aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/oxnas/files/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/oxnas/files/drivers')
-rw-r--r--target/linux/oxnas/files/drivers/ata/sata_oxnas.c1889
-rw-r--r--target/linux/oxnas/files/drivers/clk/clk-oxnas.c262
-rw-r--r--target/linux/oxnas/files/drivers/clocksource/oxnas_rps_timer.c96
-rw-r--r--target/linux/oxnas/files/drivers/irqchip/irq-rps.c146
-rw-r--r--target/linux/oxnas/files/drivers/mtd/nand/oxnas_nand.c102
-rw-r--r--target/linux/oxnas/files/drivers/pci/host/pcie-oxnas.c676
-rw-r--r--target/linux/oxnas/files/drivers/pinctrl/pinctrl-oxnas.c1480
-rw-r--r--target/linux/oxnas/files/drivers/reset/reset-ox820.c107
-rw-r--r--target/linux/oxnas/files/drivers/usb/host/ehci-oxnas.c316
9 files changed, 5074 insertions, 0 deletions
diff --git a/target/linux/oxnas/files/drivers/ata/sata_oxnas.c b/target/linux/oxnas/files/drivers/ata/sata_oxnas.c
new file mode 100644
index 0000000000..b3685bdda0
--- /dev/null
+++ b/target/linux/oxnas/files/drivers/ata/sata_oxnas.c
@@ -0,0 +1,1889 @@
+#include <linux/ata.h>
+#include <linux/libata.h>
+#include <linux/of_platform.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+
+#include <mach/utils.h>
+
+/* sgdma request structure */
+struct sgdma_request {
+ volatile u32 qualifier;
+ volatile u32 control;
+ dma_addr_t src_pa;
+ dma_addr_t dst_pa;
+} __packed __aligned(4);
+
+
+/* Controller information */
+enum {
+ SATA_OXNAS_MAX_PRD = 254,
+ SATA_OXNAS_DMA_SIZE = SATA_OXNAS_MAX_PRD *
+ sizeof(struct ata_bmdma_prd) +
+ sizeof(struct sgdma_request),
+ SATA_OXNAS_MAX_PORTS = 1,
+ /** The different Oxsemi SATA core version numbers */
+ SATA_OXNAS_CORE_VERSION = 0x1f3,
+ SATA_OXNAS_IRQ_FLAG = IRQF_SHARED,
+ SATA_OXNAS_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
+ ATA_FLAG_NO_ATAPI /*| ATA_FLAG_NCQ*/),
+ SATA_OXNAS_QUEUE_DEPTH = 32,
+
+ SATA_OXNAS_DMA_BOUNDARY = 0xFFFFFFFF,
+};
+
+
+/*
+ * SATA Port Registers
+ */
+enum {
+ /** sata host port register offsets */
+ ORB1 = 0x00,
+ ORB2 = 0x04,
+ ORB3 = 0x08,
+ ORB4 = 0x0C,
+ ORB5 = 0x10,
+ MASTER_STATUS = 0x10,
+ FIS_CTRL = 0x18,
+ FIS_DATA = 0x1C,
+ INT_STATUS = 0x30,
+ INT_CLEAR = 0x30,
+ INT_ENABLE = 0x34,
+ INT_DISABLE = 0x38,
+ VERSION = 0x3C,
+ SATA_CONTROL = 0x5C,
+ SATA_COMMAND = 0x60,
+ HID_FEATURES = 0x64,
+ PORT_CONTROL = 0x68,
+ DRIVE_CONTROL = 0x6C,
+ /** These registers allow access to the link layer registers
+ that reside in a different clock domain to the processor bus */
+ LINK_DATA = 0x70,
+ LINK_RD_ADDR = 0x74,
+ LINK_WR_ADDR = 0x78,
+ LINK_CONTROL = 0x7C,
+ /* window control */
+ WIN1LO = 0x80,
+ WIN1HI = 0x84,
+ WIN2LO = 0x88,
+ WIN2HI = 0x8C,
+ WIN0_CONTROL = 0x90,
+
+};
+
+/** sata port register bits */
+enum{
+ /**
+ * commands to issue in the master status to tell it to move shadow ,
+ * registers to the actual device ,
+ */
+ SATA_OPCODE_MASK = 0x00000007,
+ CMD_WRITE_TO_ORB_REGS_NO_COMMAND = 0x4,
+ CMD_WRITE_TO_ORB_REGS = 0x2,
+ CMD_SYNC_ESCAPE = 0x7,
+ CMD_CORE_BUSY = (1 << 7),
+ CMD_DRIVE_SELECT_SHIFT = 12,
+ CMD_DRIVE_SELECT_MASK = (0xf << CMD_DRIVE_SELECT_SHIFT),
+
+ /** interrupt bits */
+ INT_END_OF_CMD = 1 << 0,
+ INT_LINK_SERROR = 1 << 1,
+ INT_ERROR = 1 << 2,
+ INT_LINK_IRQ = 1 << 3,
+ INT_REG_ACCESS_ERR = 1 << 7,
+ INT_BIST_FIS = 1 << 11,
+ INT_MASKABLE = INT_END_OF_CMD |
+ INT_LINK_SERROR |
+ INT_ERROR |
+ INT_LINK_IRQ |
+ INT_REG_ACCESS_ERR |
+ INT_BIST_FIS,
+ INT_WANT = INT_END_OF_CMD |
+ INT_LINK_SERROR |
+ INT_REG_ACCESS_ERR |
+ INT_ERROR,
+ INT_ERRORS = INT_LINK_SERROR |
+ INT_REG_ACCESS_ERR |
+ INT_ERROR,
+
+ /** raw interrupt bits, unmaskable, but do not generate interrupts */
+ RAW_END_OF_CMD = INT_END_OF_CMD << 16,
+ RAW_LINK_SERROR = INT_LINK_SERROR << 16,
+ RAW_ERROR = INT_ERROR << 16,
+ RAW_LINK_IRQ = INT_LINK_IRQ << 16,
+ RAW_REG_ACCESS_ERR = INT_REG_ACCESS_ERR << 16,
+ RAW_BIST_FIS = INT_BIST_FIS << 16,
+ RAW_WANT = INT_WANT << 16,
+ RAW_ERRORS = INT_ERRORS << 16,
+
+ /**
+ * variables to write to the device control register to set the current
+ * device, ie. master or slave.
+ */
+ DR_CON_48 = 2,
+ DR_CON_28 = 0,
+
+ SATA_CTL_ERR_MASK = 0x00000016,
+
+};
+
+/* ATA SGDMA register offsets */
+enum {
+ SGDMA_CONTROL = 0x0,
+ SGDMA_STATUS = 0x4,
+ SGDMA_REQUESTPTR = 0x8,
+ SGDMA_RESETS = 0xC,
+ SGDMA_CORESIZE = 0x10,
+};
+
+enum {
+ /* see DMA core docs for the values. Out means from memory (bus A) out
+ * to disk (bus B) */
+ SGDMA_REQCTL0OUT = 0x0497c03d,
+ /* burst mode disabled when no micro code used */
+ SGDMA_REQCTL0IN = 0x0493a3c1,
+ SGDMA_REQCTL1OUT = 0x0497c07d,
+ SGDMA_REQCTL1IN = 0x0497a3c5,
+ SGDMA_CONTROL_NOGO = 0x3e,
+ SGDMA_CONTROL_GO = SGDMA_CONTROL_NOGO | 1,
+ SGDMA_ERRORMASK = 0x3f,
+ SGDMA_BUSY = 0x80,
+
+ SGDMA_RESETS_CTRL = 1 << 0,
+ SGDMA_RESETS_ARBT = 1 << 1,
+ SGDMA_RESETS_AHB = 1 << 2,
+ SGDMA_RESETS_ALL = SGDMA_RESETS_CTRL |
+ SGDMA_RESETS_ARBT |
+ SGDMA_RESETS_AHB,
+
+ /* Final EOTs */
+ SGDMA_REQQUAL = 0x00220001,
+
+};
+
+/** SATA core register offsets */
+enum {
+ DM_DBG1 = 0x000,
+ RAID_SET = 0x004,
+ DM_DBG2 = 0x008,
+ DATACOUNT_PORT0 = 0x010,
+ DATACOUNT_PORT1 = 0x014,
+ CORE_INT_STATUS = 0x030,
+ CORE_INT_CLEAR = 0x030,
+ CORE_INT_ENABLE = 0x034,
+ CORE_INT_DISABLE = 0x038,
+ CORE_REBUILD_ENABLE = 0x050,
+ CORE_FAILED_PORT_R = 0x054,
+ DEVICE_CONTROL = 0x068,
+ EXCESS = 0x06C,
+ RAID_SIZE_LOW = 0x070,
+ RAID_SIZE_HIGH = 0x074,
+ PORT_ERROR_MASK = 0x078,
+ IDLE_STATUS = 0x07C,
+ RAID_CONTROL = 0x090,
+ DATA_PLANE_CTRL = 0x0AC,
+ CORE_DATAPLANE_STAT = 0x0b8,
+ PROC_PC = 0x100,
+ CONFIG_IN = 0x3d8,
+ PROC_START = 0x3f0,
+ PROC_RESET = 0x3f4,
+ UCODE_STORE = 0x1000,
+ RAID_WP_BOT_LOW = 0x1FF0,
+ RAID_WP_BOT_HIGH = 0x1FF4,
+ RAID_WP_TOP_LOW = 0x1FF8,
+ RAID_WP_TOP_HIGH = 0x1FFC,
+ DATA_MUX_RAM0 = 0x8000,
+ DATA_MUX_RAM1 = 0xA000,
+};
+
+enum {
+ /* Sata core debug1 register bits */
+ CORE_PORT0_DATA_DIR_BIT = 20,
+ CORE_PORT1_DATA_DIR_BIT = 21,
+ CORE_PORT0_DATA_DIR = 1 << CORE_PORT0_DATA_DIR_BIT,
+ CORE_PORT1_DATA_DIR = 1 << CORE_PORT1_DATA_DIR_BIT,
+
+ /** sata core control register bits */
+ SCTL_CLR_ERR = 0x00003016,
+ RAID_CLR_ERR = 0x0000011e,
+
+ /* Interrupts direct from the ports */
+ NORMAL_INTS_WANTED = 0x00000303,
+
+ /* shift these left by port number */
+ COREINT_HOST = 0x00000001,
+ COREINT_END = 0x00000100,
+ CORERAW_HOST = COREINT_HOST << 16,
+ CORERAW_END = COREINT_END << 16,
+
+ /* Interrupts from the RAID controller only */
+ RAID_INTS_WANTED = 0x00008300,
+
+ /* The bits in the IDLE_STATUS that, when set indicate an idle core */
+ IDLE_CORES = (1 << 18) | (1 << 19),
+
+ /* Data plane control error-mask mask and bit, these bit in the data
+ * plane control mask out errors from the ports that prevent the SGDMA
+ * care from sending an interrupt */
+ DPC_ERROR_MASK = 0x00000300,
+ DPC_ERROR_MASK_BIT = 0x00000100,
+ /* enable jbod micro-code */
+ DPC_JBOD_UCODE = 1 << 0,
+ DPC_FIS_SWCH = 1 << 1,
+
+ /** Device Control register bits */
+ DEVICE_CONTROL_DMABT = 1 << 4,
+ DEVICE_CONTROL_ABORT = 1 << 2,
+ DEVICE_CONTROL_PAD = 1 << 3,
+ DEVICE_CONTROL_PADPAT = 1 << 16,
+ DEVICE_CONTROL_PRTRST = 1 << 8,
+ DEVICE_CONTROL_RAMRST = 1 << 12,
+ DEVICE_CONTROL_ATA_ERR_OVERRIDE = 1 << 28,
+
+ /** oxsemi HW raid modes */
+ OXNASSATA_NOTRAID = 0,
+ OXNASSATA_RAID0 = 1,
+ OXNASSATA_RAID1 = 2,
+ /** OX820 specific HW-RAID register values */
+ RAID_TWODISKS = 3,
+ UNKNOWN_MODE = ~0,
+};
+
+/* SATA PHY Registers */
+enum {
+ PHY_STAT = 0x00,
+ PHY_DATA = 0x04,
+};
+
+enum {
+ STAT_READ_VALID = (1 << 21),
+ STAT_CR_ACK = (1 << 20),
+ STAT_CR_READ = (1 << 19),
+ STAT_CR_WRITE = (1 << 18),
+ STAT_CAP_DATA = (1 << 17),
+ STAT_CAP_ADDR = (1 << 16),
+
+ STAT_ACK_ANY = STAT_CR_ACK |
+ STAT_CR_READ |
+ STAT_CR_WRITE |
+ STAT_CAP_DATA |
+ STAT_CAP_ADDR,
+
+ CR_READ_ENABLE = (1 << 16),
+ CR_WRITE_ENABLE = (1 << 17),
+ CR_CAP_DATA = (1 << 18),
+};
+
+enum {
+ /* Link layer registers */
+ SERROR_IRQ_MASK = 5,
+};
+
+enum {
+ OXNAS_SATA_SOFTRESET = 1,
+ OXNAS_SATA_REINIT = 2,
+};
+
+enum {
+ OXNAS_SATA_UCODE_RAID0,
+ OXNAS_SATA_UCODE_RAID1,
+ OXNAS_SATA_UCODE_JBOD,
+ OXNAS_SATA_UCODE_NONE,
+};
+
+struct sata_oxnas_host_priv {
+ void __iomem *port_base[SATA_OXNAS_MAX_PORTS];
+ void __iomem *sgdma_base[SATA_OXNAS_MAX_PORTS];
+ void __iomem *core_base;
+ void __iomem *phy_base;
+ dma_addr_t dma_base;
+ void __iomem *dma_base_va;
+ size_t dma_size;
+ int irq;
+ u32 port_in_eh;
+ struct clk *clk;
+ struct reset_control *rst_sata;
+ struct reset_control *rst_link;
+ struct reset_control *rst_phy;
+};
+
+
+struct sata_oxnas_port_priv {
+ void __iomem *port_base;
+ void __iomem *sgdma_base;
+ void __iomem *core_base;
+ struct sgdma_request *sgdma_request;
+ dma_addr_t sgdma_request_pa;
+};
+
+static u8 sata_oxnas_check_status(struct ata_port *ap);
+static int sata_oxnas_cleanup(struct ata_host *ah);
+static void sata_oxnas_tf_load(struct ata_port *ap,
+ const struct ata_taskfile *tf);
+static void sata_oxnas_irq_on(struct ata_port *ap);
+static void sata_oxnas_post_reset_init(struct ata_port *ap);
+
+/* ??????????????????????????????????? */
+static void wait_cr_ack(void __iomem *phy_base)
+{
+ while ((ioread32(phy_base + PHY_STAT) >> 16) & 0x1f)
+ ; /* wait for an ack bit to be set */
+}
+
+static u16 read_cr(void __iomem *phy_base, u16 address)
+{
+ iowrite32((u32)address, phy_base + PHY_STAT);
+ wait_cr_ack(phy_base);
+ iowrite32(CR_READ_ENABLE, phy_base + PHY_DATA);
+ wait_cr_ack(phy_base);
+ return (u16)ioread32(phy_base + PHY_STAT);
+}
+
+static void write_cr(void __iomem *phy_base, u16 data, u16 address)
+{
+ iowrite32((u32)address, phy_base + PHY_STAT);
+ wait_cr_ack(phy_base);
+ iowrite32((data | CR_CAP_DATA), phy_base + PHY_DATA);
+ wait_cr_ack(phy_base);
+ iowrite32(CR_WRITE_ENABLE, phy_base + PHY_DATA);
+ wait_cr_ack(phy_base);
+}
+
+#define PH_GAIN 2
+#define FR_GAIN 3
+#define PH_GAIN_OFFSET 6
+#define FR_GAIN_OFFSET 8
+#define PH_GAIN_MASK (0x3 << PH_GAIN_OFFSET)
+#define FR_GAIN_MASK (0x3 << FR_GAIN_OFFSET)
+#define USE_INT_SETTING (1<<5)
+
+void workaround5458(struct ata_host *ah)
+{
+ struct sata_oxnas_host_priv *hd = ah->private_data;
+ void __iomem *phy_base = hd->phy_base;
+ u16 rx_control;
+ unsigned i;
+
+ for (i = 0; i < 2; i++) {
+ rx_control = read_cr(phy_base, 0x201d + (i << 8));
+ rx_control &= ~(PH_GAIN_MASK | FR_GAIN_MASK);
+ rx_control |= PH_GAIN << PH_GAIN_OFFSET;
+ rx_control |= (FR_GAIN << FR_GAIN_OFFSET) | USE_INT_SETTING;
+ write_cr(phy_base, rx_control, 0x201d+(i<<8));
+ }
+}
+
+/**
+ * allows access to the link layer registers
+ * @param link_reg the link layer register to access (oxsemi indexing ie
+ * 00 = static config, 04 = phy ctrl)
+ */
+void sata_oxnas_link_write(struct ata_port *ap, unsigned int link_reg, u32 val)
+{
+ struct sata_oxnas_port_priv *port_priv = ap->private_data;
+ void __iomem *port_base = port_priv->port_base;
+ u32 patience;
+
+ DPRINTK("[0x%02x]->0x%08x\n", link_reg, val);
+
+ iowrite32(val, port_base + LINK_DATA);
+
+ /* accessed twice as a work around for a bug in the SATA abp bridge
+ * hardware (bug 6828) */
+ iowrite32(link_reg , port_base + LINK_WR_ADDR);
+ ioread32(port_base + LINK_WR_ADDR);
+
+ for (patience = 0x100000; patience > 0; --patience) {
+ if (ioread32(port_base + LINK_CONTROL) & 0x00000001)
+ break;
+ }
+}
+
+static int sata_oxnas_scr_write_port(struct ata_port *ap, unsigned int sc_reg,
+ u32 val)
+{
+ sata_oxnas_link_write(ap, 0x20 + (sc_reg * 4), val);
+ return 0;
+}
+
+static int sata_oxnas_scr_write(struct ata_link *link, unsigned int sc_reg,
+ u32 val)
+{
+ return sata_oxnas_scr_write_port(link->ap, sc_reg, val);
+}
+
+/* FIXME lock */
+u32 sata_oxnas_link_read(struct ata_port *ap, unsigned int link_reg)
+{
+ struct sata_oxnas_port_priv *pd = ap->private_data;
+ void __iomem *port_base = pd->port_base;
+ u32 result;
+ u32 patience;
+
+ /* accessed twice as a work around for a bug in the SATA abp bridge
+ * hardware (bug 6828) */
+ iowrite32(link_reg, port_base + LINK_RD_ADDR);
+ ioread32(port_base + LINK_RD_ADDR);
+
+ for (patience = 0x100000; patience > 0; --patience) {
+ if (ioread32(port_base + LINK_CONTROL) & 0x00000001)
+ break;
+ }
+ if (patience == 0)
+ DPRINTK("link read timed out\n");
+
+ result = readl(port_base + LINK_DATA);
+
+ return result;
+}
+
+static int sata_oxnas_scr_read_port(struct ata_port *ap, unsigned int sc_reg,
+ u32 *val)
+{
+ *val = sata_oxnas_link_read(ap, 0x20 + (sc_reg*4));
+ return 0;
+}
+
+static int sata_oxnas_scr_read(struct ata_link *link,
+ unsigned int sc_reg, u32 *val)
+{
+
+ return sata_oxnas_scr_read_port(link->ap, sc_reg, val);
+}
+
+/**
+ * sata_oxnas_irq_clear is called during probe just before the interrupt handler is
+ * registered, to be sure hardware is quiet. It clears and masks interrupt bits
+ * in the SATA core.
+ *
+ * @param ap hardware with the registers in
+ */
+static void sata_oxnas_irq_clear(struct ata_port *ap)
+{
+ struct sata_oxnas_port_priv *port_priv = ap->private_data;
+
+ /* clear pending interrupts */
+ iowrite32(~0, port_priv->port_base + INT_CLEAR);
+ iowrite32(COREINT_END, port_priv->core_base + CORE_INT_CLEAR);
+}
+
+/**
+ * qc_issue is used to make a command active, once the hardware and S/G tables
+ * have been prepared. IDE BMDMA drivers use the helper function
+ * ata_qc_issue_prot() for taskfile protocol-based dispatch. More advanced
+ * drivers roll their own ->qc_issue implementation, using this as the
+ * "issue new ATA command to hardware" hook.
+ * @param qc the queued command to issue
+ */
+static unsigned int sata_oxnas_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct sata_oxnas_port_priv *pd = qc->ap->private_data;
+ void __iomem *port_base = pd->port_base;
+ void __iomem *core_base = pd->core_base;
+ int port_no = qc->ap->port_no;
+ u32 reg;
+
+ /* check the core is idle */
+ if (ioread32(port_base + SATA_COMMAND) & CMD_CORE_BUSY) {
+ int count = 0;
+
+ DPRINTK("core busy for a command on port %d\n",
+ qc->ap->port_no);
+ do {
+ mdelay(1);
+ if (++count > 100) {
+ DPRINTK("core busy for a command on port %d\n",
+ qc->ap->port_no);
+ /* CrazyDumpDebug(); */
+ sata_oxnas_cleanup(qc->ap->host);
+ }
+ } while (ioread32(port_base + SATA_COMMAND) & CMD_CORE_BUSY);
+ }
+
+ /* enable passing of error signals to DMA sub-core by clearing the
+ * appropriate bit (all transfers are on dma channel 0) */
+ reg = ioread32(core_base + DATA_PLANE_CTRL);
+ reg &= ~(DPC_ERROR_MASK_BIT << port_no);
+ iowrite32(reg, core_base + DATA_PLANE_CTRL);
+
+ /* Disable all interrupts for ports and RAID controller */
+ iowrite32(~0, port_base + INT_DISABLE);
+
+ /* Disable all interrupts for core */
+ iowrite32(~0, core_base + CORE_INT_DISABLE);
+ wmb();
+
+ /* Load the command settings into the orb registers */
+ sata_oxnas_tf_load(qc->ap, &qc->tf);
+
+ /* both pio and dma commands use dma */
+ if (ata_is_dma(qc->tf.protocol) || ata_is_pio(qc->tf.protocol)) {
+ /* Start the DMA */
+ iowrite32(SGDMA_CONTROL_GO, pd->sgdma_base + SGDMA_CONTROL);
+ wmb();
+ }
+
+ /* enable End of command interrupt */
+ iowrite32(INT_WANT, port_base + INT_ENABLE);
+ iowrite32(COREINT_END, core_base + CORE_INT_ENABLE);
+ wmb();
+
+ /* Start the command */
+ reg = ioread32(port_base + SATA_COMMAND);
+ reg &= ~SATA_OPCODE_MASK;
+ reg |= CMD_WRITE_TO_ORB_REGS;
+ iowrite32(reg , port_base + SATA_COMMAND);
+ wmb();
+
+ return 0;
+}
+
+/**
+ * Will schedule the libATA error handler on the premise that there has
+ * been a hotplug event on the port specified
+ */
+void sata_oxnas_checkforhotplug(struct ata_port *ap)
+{
+ DPRINTK("ENTER\n");
+
+ ata_ehi_hotplugged(&ap->link.eh_info);
+ ata_port_freeze(ap);
+}
+
+
+static inline int sata_oxnas_is_host_frozen(struct ata_host *ah)
+{
+ struct sata_oxnas_host_priv *hd = ah->private_data;
+
+ smp_rmb();
+ return hd->port_in_eh;
+}
+
+static inline u32 sata_oxnas_hostportbusy(struct ata_port *ap)
+{
+ struct sata_oxnas_port_priv *pd = ap->private_data;
+
+ return ioread32(pd->port_base + SATA_COMMAND) & CMD_CORE_BUSY;
+}
+
+static inline u32 sata_oxnas_hostdmabusy(struct ata_port *ap)
+{
+ struct sata_oxnas_port_priv *pd = ap->private_data;
+
+ return ioread32(pd->sgdma_base + SGDMA_STATUS) & SGDMA_BUSY;
+}
+
+
+/**
+ * Turns on the cores clock and resets it
+ */
+static void sata_oxnas_reset_core(struct ata_host *ah)
+{
+ struct sata_oxnas_host_priv *host_priv = ah->private_data;
+
+ DPRINTK("ENTER\n");
+ clk_prepare_enable(host_priv->clk);
+
+ reset_control_assert(host_priv->rst_sata);
+ reset_control_assert(host_priv->rst_link);
+ reset_control_assert(host_priv->rst_phy);
+
+ udelay(50);
+
+ /* un-reset the PHY, then Link and Controller */
+ reset_control_deassert(host_priv->rst_phy);
+ udelay(50);
+
+ reset_control_deassert(host_priv->rst_sata);
+ reset_control_deassert(host_priv->rst_link);
+ udelay(50);
+
+ workaround5458(ah);
+ /* tune for sata compatability */
+ sata_oxnas_link_write(ah->ports[0], 0x60, 0x2988);
+
+ /* each port in turn */
+ sata_oxnas_link_write(ah->ports[0], 0x70, 0x55629);
+ udelay(50);
+}
+
+
+/**
+ * Called after an identify device command has worked out what kind of device
+ * is on the port
+ *
+ * @param port The port to configure
+ * @param pdev The hardware associated with controlling the port
+ */
+static void sata_oxnas_dev_config(struct ata_device *pdev)
+{
+ struct sata_oxnas_port_priv *pd = pdev->link->ap->private_data;
+ void __iomem *port_base = pd->port_base;
+ u32 reg;
+
+ DPRINTK("ENTER\n");
+ /* Set the bits to put the port into 28 or 48-bit node */
+ reg = ioread32(port_base + DRIVE_CONTROL);
+ reg &= ~3;
+ reg |= (pdev->flags & ATA_DFLAG_LBA48) ? DR_CON_48 : DR_CON_28;
+ iowrite32(reg, port_base + DRIVE_CONTROL);
+
+ /* if this is an ATA-6 disk, put port into ATA-5 auto translate mode */
+ if (pdev->flags & ATA_DFLAG_LBA48) {
+ reg = ioread32(port_base + PORT_CONTROL);
+ reg |= 2;
+ iowrite32(reg, port_base + PORT_CONTROL);
+ }
+}
+/**
+ * called to write a taskfile into the ORB registers
+ * @param ap hardware with the registers in
+ * @param tf taskfile to write to the registers
+ */
+static void sata_oxnas_tf_load(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ u32 count = 0;
+ u32 Orb1 = 0;
+ u32 Orb2 = 0;
+ u32 Orb3 = 0;
+ u32 Orb4 = 0;
+ u32 Command_Reg;
+
+ struct sata_oxnas_port_priv *port_priv = ap->private_data;
+ void __iomem *port_base = port_priv->port_base;
+ unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+ /* wait a maximum of 10ms for the core to be idle */
+ do {
+ Command_Reg = ioread32(port_base + SATA_COMMAND);
+ if (!(Command_Reg & CMD_CORE_BUSY))
+ break;
+ count++;
+ udelay(50);
+ } while (count < 200);
+
+ /* check if the ctl register has interrupts disabled or enabled and
+ * modify the interrupt enable registers on the ata core as required */
+ if (tf->ctl & ATA_NIEN) {
+ /* interrupts disabled */
+ u32 mask = (COREINT_END << ap->port_no);
+
+ iowrite32(mask, port_priv->core_base + CORE_INT_DISABLE);
+ sata_oxnas_irq_clear(ap);
+ } else {
+ sata_oxnas_irq_on(ap);
+ }
+
+ Orb2 |= (tf->command) << 24;
+
+ /* write 48 or 28 bit tf parameters */
+ if (is_addr) {
+ /* set LBA bit as it's an address */
+ Orb1 |= (tf->device & ATA_LBA) << 24;
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ Orb1 |= ATA_LBA << 24;
+ Orb2 |= (tf->hob_nsect) << 8;
+ Orb3 |= (tf->hob_lbal) << 24;
+ Orb4 |= (tf->hob_lbam) << 0;
+ Orb4 |= (tf->hob_lbah) << 8;
+ Orb4 |= (tf->hob_feature) << 16;
+ } else {
+ Orb3 |= (tf->device & 0xf) << 24;
+ }
+
+ /* write 28-bit lba */
+ Orb2 |= (tf->nsect) << 0;
+ Orb2 |= (tf->feature) << 16;
+ Orb3 |= (tf->lbal) << 0;
+ Orb3 |= (tf->lbam) << 8;
+ Orb3 |= (tf->lbah) << 16;
+ Orb4 |= (tf->ctl) << 24;
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE)
+ Orb1 |= (tf->device) << 24;
+
+ ap->last_ctl = tf->ctl;
+
+ /* write values to registers */
+ iowrite32(Orb1, port_base + ORB1);
+ iowrite32(Orb2, port_base + ORB2);
+ iowrite32(Orb3, port_base + ORB3);
+ iowrite32(Orb4, port_base + ORB4);
+}
+
+
+void sata_oxnas_set_mode(struct ata_port *ap, u32 mode, u32 force)
+{
+ struct sata_oxnas_port_priv *port_priv = ap->private_data;
+ void __iomem *core_base = port_priv->core_base;
+
+ unsigned int *src;
+ void __iomem *dst;
+ unsigned int progmicrocode = 0;
+ unsigned int changeparameters = 0;
+ static u32 previous_mode = UNKNOWN_MODE;
+
+ /* these micro-code programs _should_ include the version word */
+
+ /* JBOD */
+ static const unsigned int jbod[] = {
+ 0x07B400AC, 0x0228A280, 0x00200001, 0x00204002, 0x00224001,
+ 0x00EE0009, 0x00724901, 0x01A24903, 0x00E40009, 0x00224001,
+ 0x00621120, 0x0183C908, 0x00E20005, 0x00718908, 0x0198A206,
+ 0x00621124, 0x0183C908, 0x00E20046, 0x00621104, 0x0183C908,
+ 0x00E20015, 0x00EE009D, 0x01A3E301, 0x00E2001B, 0x0183C900,
+ 0x00E2001B, 0x00210001, 0x00EE0020, 0x01A3E302, 0x00E2009D,
+ 0x0183C901, 0x00E2009D, 0x00210002, 0x0235D700, 0x0208A204,
+ 0x0071C908, 0x000F8207, 0x000FC207, 0x0071C920, 0x000F8507,
+ 0x000FC507, 0x0228A240, 0x02269A40, 0x00094004, 0x00621104,
+ 0x0180C908, 0x00E40031, 0x00621112, 0x01A3C801, 0x00E2002B,
+ 0x00294000, 0x0228A220, 0x01A69ABF, 0x002F8000, 0x002FC000,
+ 0x0198A204, 0x0001C022, 0x01B1A220, 0x0001C106, 0x00088007,
+ 0x0183C903, 0x00E2009D, 0x0228A220, 0x0071890C, 0x0208A206,
+ 0x0198A206, 0x0001C022, 0x01B1A220, 0x0001C106, 0x00088007,
+ 0x00EE009D, 0x00621104, 0x0183C908, 0x00E2004A, 0x00EE009D,
+ 0x01A3C901, 0x00E20050, 0x0021E7FF, 0x0183E007, 0x00E2009D,
+ 0x00EE0054, 0x0061600B, 0x0021E7FF, 0x0183C507, 0x00E2009D,
+ 0x01A3E301, 0x00E2005A, 0x0183C900, 0x00E2005A, 0x00210001,
+ 0x00EE005F, 0x01A3E302, 0x00E20005, 0x0183C901, 0x00E20005,
+ 0x00210002, 0x0235D700, 0x0208A204, 0x000F8109, 0x000FC109,
+ 0x0071C918, 0x000F8407, 0x000FC407, 0x0001C022, 0x01A1A2BF,
+ 0x0001C106, 0x00088007, 0x02269A40, 0x00094004, 0x00621112,
+ 0x01A3C801, 0x00E4007F, 0x00621104, 0x0180C908, 0x00E4008D,
+ 0x00621128, 0x0183C908, 0x00E2006C, 0x01A3C901, 0x00E2007B,
+ 0x0021E7FF, 0x0183E007, 0x00E2007F, 0x00EE006C, 0x0061600B,
+ 0x0021E7FF, 0x0183C507, 0x00E4006C, 0x00621111, 0x01A3C801,
+ 0x00E2007F, 0x00621110, 0x01A3C801, 0x00E20082, 0x0228A220,
+ 0x00621119, 0x01A3C801, 0x00E20086, 0x0001C022, 0x01B1A220,
+ 0x0001C106, 0x00088007, 0x0198A204, 0x00294000, 0x01A69ABF,
+ 0x002F8000, 0x002FC000, 0x0183C903, 0x00E20005, 0x0228A220,
+ 0x0071890C, 0x0208A206, 0x0198A206, 0x0001C022, 0x01B1A220,
+ 0x0001C106, 0x00088007, 0x00EE009D, 0x00621128, 0x0183C908,
+ 0x00E20005, 0x00621104, 0x0183C908, 0x00E200A6, 0x0062111C,
+ 0x0183C908, 0x00E20005, 0x0071890C, 0x0208A206, 0x0198A206,
+ 0x00718908, 0x0208A206, 0x00EE0005, ~0
+ };
+
+ /* Bi-Modal RAID-0/1 */
+ static const unsigned int raid[] = {
+ 0x00F20145, 0x00EE20FA, 0x00EE20A7, 0x0001C009, 0x00EE0004,
+ 0x00220000, 0x0001000B, 0x037003FF, 0x00700018, 0x037003FE,
+ 0x037043FD, 0x00704118, 0x037043FC, 0x01A3D240, 0x00E20017,
+ 0x00B3C235, 0x00E40018, 0x0093C104, 0x00E80014, 0x0093C004,
+ 0x00E80017, 0x01020000, 0x00274020, 0x00EE0083, 0x0080C904,
+ 0x0093C104, 0x00EA0020, 0x0093C103, 0x00EC001F, 0x00220002,
+ 0x00924104, 0x0005C009, 0x00EE0058, 0x0093CF04, 0x00E80026,
+ 0x00900F01, 0x00600001, 0x00910400, 0x00EE0058, 0x00601604,
+ 0x01A00003, 0x00E2002C, 0x01018000, 0x00274040, 0x00EE0083,
+ 0x0093CF03, 0x00EC0031, 0x00220003, 0x00924F04, 0x0005C009,
+ 0x00810104, 0x00B3C235, 0x00E20037, 0x0022C000, 0x00218210,
+ 0x00EE0039, 0x0022C001, 0x00218200, 0x00600401, 0x00A04901,
+ 0x00604101, 0x01A0C401, 0x00E20040, 0x00216202, 0x00EE0041,
+ 0x00216101, 0x02018506, 0x00EE2141, 0x00904901, 0x00E20049,
+ 0x00A00401, 0x00600001, 0x02E0C301, 0x00EE2141, 0x00216303,
+ 0x037003EE, 0x01A3C001, 0x00E40105, 0x00250080, 0x00204000,
+ 0x002042F1, 0x0004C001, 0x00230001, 0x00100006, 0x02C18605,
+ 0x00100006, 0x01A3D502, 0x00E20055, 0x00EE0053, 0x00004009,
+ 0x00000004, 0x00B3C235, 0x00E40062, 0x0022C001, 0x0020C000,
+ 0x00EE2141, 0x0020C001, 0x00EE2141, 0x00EE006B, 0x0022C000,
+ 0x0060D207, 0x00EE2141, 0x00B3C242, 0x00E20069, 0x01A3D601,
+ 0x00E2006E, 0x02E0C301, 0x00EE2141, 0x00230001, 0x00301303,
+ 0x00EE007B, 0x00218210, 0x01A3C301, 0x00E20073, 0x00216202,
+ 0x00EE0074, 0x00216101, 0x02018506, 0x00214000, 0x037003EE,
+ 0x01A3C001, 0x00E40108, 0x00230001, 0x00100006, 0x00250080,
+ 0x00204000, 0x002042F1, 0x0004C001, 0x00EE007F, 0x0024C000,
+ 0x01A3D1F0, 0x00E20088, 0x00230001, 0x00300000, 0x01A3D202,
+ 0x00E20085, 0x00EE00A5, 0x00B3C800, 0x00E20096, 0x00218000,
+ 0x00924709, 0x0005C009, 0x00B20802, 0x00E40093, 0x037103FD,
+ 0x00710418, 0x037103FC, 0x00EE0006, 0x00220000, 0x0001000F,
+ 0x00EE0006, 0x00800B0C, 0x00B00001, 0x00204000, 0x00208550,
+ 0x00208440, 0x002083E0, 0x00208200, 0x00208100, 0x01008000,
+ 0x037083EE, 0x02008212, 0x02008216, 0x01A3C201, 0x00E400A5,
+ 0x0100C000, 0x00EE20FA, 0x02800000, 0x00208000, 0x00B24C00,
+ 0x00E400AD, 0x00224001, 0x00724910, 0x0005C009, 0x00B3CDC4,
+ 0x00E200D5, 0x00B3CD29, 0x00E200D5, 0x00B3CD20, 0x00E200D5,
+ 0x00B3CD24, 0x00E200D5, 0x00B3CDC5, 0x00E200D2, 0x00B3CD39,
+ 0x00E200D2, 0x00B3CD30, 0x00E200D2, 0x00B3CD34, 0x00E200D2,
+ 0x00B3CDCA, 0x00E200CF, 0x00B3CD35, 0x00E200CF, 0x00B3CDC8,
+ 0x00E200CC, 0x00B3CD25, 0x00E200CC, 0x00B3CD40, 0x00E200CB,
+ 0x00B3CD42, 0x00E200CB, 0x01018000, 0x00EE0083, 0x0025C000,
+ 0x036083EE, 0x0000800D, 0x00EE00D8, 0x036083EE, 0x00208035,
+ 0x00EE00DA, 0x036083EE, 0x00208035, 0x00EE00DA, 0x00208007,
+ 0x036083EE, 0x00208025, 0x036083EF, 0x02400000, 0x01A3D208,
+ 0x00E200D8, 0x0067120A, 0x0021C000, 0x0021C224, 0x00220000,
+ 0x00404B1C, 0x00600105, 0x00800007, 0x0020C00E, 0x00214000,
+ 0x01004000, 0x01A0411F, 0x00404E01, 0x01A3C101, 0x00E200F1,
+ 0x00B20800, 0x00E400D8, 0x00220001, 0x0080490B, 0x00B04101,
+ 0x0040411C, 0x00EE00E1, 0x02269A01, 0x01020000, 0x02275D80,
+ 0x01A3D202, 0x00E200F4, 0x01B75D80, 0x01030000, 0x01B69A01,
+ 0x00EE00D8, 0x01A3D204, 0x00E40104, 0x00224000, 0x0020C00E,
+ 0x0020001E, 0x00214000, 0x01004000, 0x0212490E, 0x00214001,
+ 0x01004000, 0x02400000, 0x00B3D702, 0x00E80112, 0x00EE010E,
+ 0x00B3D702, 0x00E80112, 0x00B3D702, 0x00E4010E, 0x00230001,
+ 0x00EE0140, 0x00200005, 0x036003EE, 0x00204001, 0x00EE0116,
+ 0x00230001, 0x00100006, 0x02C18605, 0x00100006, 0x01A3D1F0,
+ 0x00E40083, 0x037003EE, 0x01A3C002, 0x00E20121, 0x0020A300,
+ 0x0183D102, 0x00E20124, 0x037003EE, 0x01A00005, 0x036003EE,
+ 0x01A0910F, 0x00B3C20F, 0x00E2012F, 0x01A3D502, 0x00E20116,
+ 0x01A3C002, 0x00E20116, 0x00B3D702, 0x00E4012C, 0x00300000,
+ 0x00EE011F, 0x02C18605, 0x00100006, 0x00EE0116, 0x01A3D1F0,
+ 0x00E40083, 0x037003EE, 0x01A3C004, 0x00E20088, 0x00200003,
+ 0x036003EE, 0x01A3D502, 0x00E20136, 0x00230001, 0x00B3C101,
+ 0x00E4012C, 0x00100006, 0x02C18605, 0x00100006, 0x00204000,
+ 0x00EE0116, 0x00100006, 0x01A3D1F0, 0x00E40083, 0x01000000,
+ 0x02400000, ~0
+ };
+
+ if (force)
+ previous_mode = UNKNOWN_MODE;
+
+ if (mode == previous_mode)
+ return;
+
+ /* decide what needs to be done using the STD in my logbook */
+ switch (previous_mode) {
+ case OXNASSATA_RAID1:
+ switch (mode) {
+ case OXNASSATA_RAID0:
+ changeparameters = 1;
+ break;
+ case OXNASSATA_NOTRAID:
+ changeparameters = 1;
+ progmicrocode = 1;
+ break;
+ }
+ break;
+ case OXNASSATA_RAID0:
+ switch (mode) {
+ case OXNASSATA_RAID1:
+ changeparameters = 1;
+ break;
+ case OXNASSATA_NOTRAID:
+ changeparameters = 1;
+ progmicrocode = 1;
+ break;
+ }
+ break;
+ case OXNASSATA_NOTRAID:
+ case UNKNOWN_MODE:
+ changeparameters = 1;
+ progmicrocode = 1;
+ break;
+ }
+
+ /* no need to reprogram everything if already in the right mode */
+ if (progmicrocode) {
+ /* reset micro-code processor */
+ iowrite32(1, core_base + PROC_RESET);
+ wmb();
+
+ /* select micro-code */
+ switch (mode) {
+ case OXNASSATA_RAID1:
+ case OXNASSATA_RAID0:
+ VPRINTK("Loading RAID micro-code\n");
+ src = (unsigned int *)&raid[1];
+ break;
+ case OXNASSATA_NOTRAID:
+ VPRINTK("Loading JBOD micro-code\n");
+ src = (unsigned int *)&jbod[1];
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ /* load micro code */
+ dst = core_base + UCODE_STORE;
+ while (*src != ~0) {
+ iowrite32(*src, dst);
+ src++;
+ dst += sizeof(*src);
+ }
+ wmb();
+ }
+
+ if (changeparameters) {
+ u32 reg;
+ /* set other mode dependent flags */
+ switch (mode) {
+ case OXNASSATA_RAID1:
+ /* clear JBOD mode */
+ reg = ioread32(core_base + DATA_PLANE_CTRL);
+ reg |= DPC_JBOD_UCODE;
+ reg &= ~DPC_FIS_SWCH;
+ iowrite32(reg, core_base + DATA_PLANE_CTRL);
+ wmb();
+
+ /* set the hardware up for RAID-1 */
+ iowrite32(0, core_base + RAID_WP_BOT_LOW);
+ iowrite32(0, core_base + RAID_WP_BOT_HIGH);
+ iowrite32(0xffffffff, core_base + RAID_WP_TOP_LOW);
+ iowrite32(0x7fffffff, core_base + RAID_WP_TOP_HIGH);
+ iowrite32(0, core_base + RAID_SIZE_LOW);
+ iowrite32(0, core_base + RAID_SIZE_HIGH);
+ wmb();
+ break;
+ case OXNASSATA_RAID0:
+ /* clear JBOD mode */
+ reg = ioread32(core_base + DATA_PLANE_CTRL);
+ reg |= DPC_JBOD_UCODE;
+ reg &= ~DPC_FIS_SWCH;
+ iowrite32(reg, core_base + DATA_PLANE_CTRL);
+ wmb();
+
+ /* set the hardware up for RAID-1 */
+ iowrite32(0, core_base + RAID_WP_BOT_LOW);
+ iowrite32(0, core_base + RAID_WP_BOT_HIGH);
+ iowrite32(0xffffffff, core_base + RAID_WP_TOP_LOW);
+ iowrite32(0x7fffffff, core_base + RAID_WP_TOP_HIGH);
+ iowrite32(0xffffffff, core_base + RAID_SIZE_LOW);
+ iowrite32(0x7fffffff, core_base + RAID_SIZE_HIGH);
+ wmb();
+ break;
+ case OXNASSATA_NOTRAID:
+ /* enable jbod mode */
+ reg = ioread32(core_base + DATA_PLANE_CTRL);
+ reg &= ~DPC_JBOD_UCODE;
+ reg |= DPC_FIS_SWCH;
+ iowrite32(reg, core_base + DATA_PLANE_CTRL);
+ wmb();
+
+ /* start micro-code processor*/
+ iowrite32(1, core_base + PROC_START);
+ break;
+ default:
+ break;
+ }
+ }
+
+ previous_mode = mode;
+}
+
+/**
+ * sends a sync-escape if there is a link present
+ */
+static inline void sata_oxnas_send_sync_escape(struct ata_port *ap)
+{
+ struct sata_oxnas_port_priv *pd = ap->private_data;
+ u32 reg;
+
+ /* read the SSTATUS register and only send a sync escape if there is a
+ * link active */
+ if ((sata_oxnas_link_read(ap, 0x20) & 3) == 3) {
+ reg = ioread32(pd->port_base + SATA_COMMAND);
+ reg &= ~SATA_OPCODE_MASK;
+ reg |= CMD_SYNC_ESCAPE;
+ iowrite32(reg, pd->port_base + SATA_COMMAND);
+ }
+}
+
+/* clears errors */
+static inline void sata_oxnas_clear_CS_error(u32 *base)
+{
+ u32 reg;
+
+ reg = ioread32(base + SATA_CONTROL);
+ reg &= SATA_CTL_ERR_MASK;
+ iowrite32(reg, base + SATA_CONTROL);
+}
+
+/**
+ * Clears the error caused by the core's registers being accessed when the
+ * core is busy.
+ */
+static inline void sata_oxnas_clear_reg_access_error(u32 *base)
+{
+ u32 reg;
+
+ reg = ioread32(base + INT_STATUS);
+
+ DPRINTK("ENTER\n");
+ if (reg & INT_REG_ACCESS_ERR) {
+ printk(KERN_INFO "clearing register access error\n");
+ iowrite32(INT_REG_ACCESS_ERR, base + INT_STATUS);
+ }
+ if (reg & INT_REG_ACCESS_ERR)
+ printk(KERN_INFO "register access error didn't clear\n");
+}
+
+/**
+ * Clean up all the state machines in the sata core.
+ * @return post cleanup action required
+ */
+static int sata_oxnas_cleanup(struct ata_host *ah)
+{
+ int actions_required = 0;
+
+ printk(KERN_INFO "ox820sata: reseting SATA core\n");
+
+ /* core not recovering, reset it */
+ mdelay(5);
+ sata_oxnas_reset_core(ah);
+ mdelay(5);
+ actions_required |= OXNAS_SATA_REINIT;
+ /* Perform any SATA core re-initialisation after reset post reset init
+ * needs to be called for both ports as there's one reset for both
+ * ports */
+
+ sata_oxnas_post_reset_init(ah->ports[0]);
+
+ return actions_required;
+}
+
+void sata_oxnas_freeze_host(struct ata_port *ap)
+{
+ struct sata_oxnas_host_priv *hd = ap->host->private_data;
+
+ DPRINTK("ENTER\n");
+ hd->port_in_eh |= BIT(ap->port_no);
+ smp_wmb();
+}
+
+void sata_oxnas_thaw_host(struct ata_port *ap)
+{
+ struct sata_oxnas_host_priv *hd = ap->host->private_data;
+
+ DPRINTK("ENTER\n");
+ hd->port_in_eh &= ~BIT(ap->port_no);
+ smp_wmb();
+}
+
+static void sata_oxnas_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ DPRINTK("ENTER\n");
+ /* If the core is busy here, make it idle */
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ sata_oxnas_cleanup(qc->ap->host);
+}
+
+
+/**
+ * turn on the interrupts
+ *
+ * @param ap Hardware with the registers in
+ */
+static void sata_oxnas_irq_on(struct ata_port *ap)
+{
+ struct sata_oxnas_port_priv *pd = ap->private_data;
+ u32 mask = (COREINT_END << ap->port_no);
+
+ /* Clear pending interrupts */
+ iowrite32(~0, pd->port_base + INT_CLEAR);
+ iowrite32(mask, pd->core_base + CORE_INT_STATUS);
+ wmb();
+
+ /* enable End of command interrupt */
+ iowrite32(INT_WANT, pd->port_base + INT_ENABLE);
+ iowrite32(mask, pd->core_base + CORE_INT_ENABLE);
+}
+
+
+/** @return true if the port has a cable connected */
+int sata_oxnas_check_link(struct ata_port *ap)
+{
+ int reg;
+
+ sata_oxnas_scr_read_port(ap, SCR_STATUS, &reg);
+ /* Check for the cable present indicated by SCR status bit-0 set */
+ return reg & 0x1;
+}
+
+/**
+ * ata_std_postreset - standard postreset callback
+ * @link: the target ata_link
+ * @classes: classes of attached devices
+ *
+ * This function is invoked after a successful reset. Note that
+ * the device might have been reset more than once using
+ * different reset methods before postreset is invoked.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep)
+ */
+static void sata_oxnas_postreset(struct ata_link *link, unsigned int *classes)
+{
+ struct ata_port *ap = link->ap;
+
+ unsigned int dev;
+
+ DPRINTK("ENTER\n");
+ ata_std_postreset(link, classes);
+
+ /* turn on phy error detection by removing the masks */
+ sata_oxnas_link_write(ap , 0x0c, 0x30003);
+
+ /* bail out if no device is present */
+ if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
+ DPRINTK("EXIT, no device\n");
+ return;
+ }
+
+ /* go through all the devices and configure them */
+ for (dev = 0; dev < ATA_MAX_DEVICES; ++dev) {
+ if (ap->link.device[dev].class == ATA_DEV_ATA)
+ sata_oxnas_dev_config(&(ap->link.device[dev]));
+ }
+
+ DPRINTK("EXIT\n");
+}
+
+/**
+ * Called to read the hardware registers / DMA buffers, to
+ * obtain the current set of taskfile register values.
+ * @param ap hardware with the registers in
+ * @param tf taskfile to read the registers into
+ */
+static void sata_oxnas_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct sata_oxnas_port_priv *port_priv = ap->private_data;
+ void __iomem *port_base = port_priv->port_base;
+ /* read the orb registers */
+ u32 Orb1 = ioread32(port_base + ORB1);
+ u32 Orb2 = ioread32(port_base + ORB2);
+ u32 Orb3 = ioread32(port_base + ORB3);
+ u32 Orb4 = ioread32(port_base + ORB4);
+
+ /* read common 28/48 bit tf parameters */
+ tf->device = (Orb1 >> 24);
+ tf->nsect = (Orb2 >> 0);
+ tf->feature = (Orb2 >> 16);
+ tf->command = sata_oxnas_check_status(ap);
+
+ /* read 48 or 28 bit tf parameters */
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ tf->hob_nsect = (Orb2 >> 8);
+ tf->lbal = (Orb3 >> 0);
+ tf->lbam = (Orb3 >> 8);
+ tf->lbah = (Orb3 >> 16);
+ tf->hob_lbal = (Orb3 >> 24);
+ tf->hob_lbam = (Orb4 >> 0);
+ tf->hob_lbah = (Orb4 >> 8);
+ /* feature ext and control are write only */
+ } else {
+ /* read 28-bit lba */
+ tf->lbal = (Orb3 >> 0);
+ tf->lbam = (Orb3 >> 8);
+ tf->lbah = (Orb3 >> 16);
+ }
+}
+
+/**
+ * Read a result task-file from the sata core registers.
+ */
+static bool sata_oxnas_qc_fill_rtf(struct ata_queued_cmd *qc)
+{
+ /* Read the most recently received FIS from the SATA core ORB registers
+ and convert to an ATA taskfile */
+ sata_oxnas_tf_read(qc->ap, &qc->result_tf);
+ return true;
+}
+
+/**
+ * Reads the Status ATA shadow register from hardware.
+ *
+ * @return The status register
+ */
+static u8 sata_oxnas_check_status(struct ata_port *ap)
+{
+ u32 Reg;
+ u8 status;
+ struct sata_oxnas_port_priv *port_priv = ap->private_data;
+ void __iomem *port_base = port_priv->port_base;
+
+ /* read byte 3 of Orb2 register */
+ status = ioread32(port_base + ORB2) >> 24;
+
+ /* check for the drive going missing indicated by SCR status bits
+ * 0-3 = 0 */
+ sata_oxnas_scr_read_port(ap, SCR_STATUS, &Reg);
+
+ if (!(Reg & 0x1)) {
+ status |= ATA_DF;
+ status |= ATA_ERR;
+ }
+
+ return status;
+}
+
+/**
+ * Prepare as much as possible for a command without involving anything that is
+ * shared between ports.
+ */
+static void sata_oxnas_qc_prep(struct ata_queued_cmd *qc)
+{
+ struct sata_oxnas_port_priv *pd;
+ int port_no = qc->ap->port_no;
+
+ /* if the port's not connected, complete now with an error */
+ /*
+ if (!sata_oxnas_check_link(qc->ap)) {
+ printk(KERN_ERR "port %d not connected completing with error\n",
+ qc->ap->port_no);
+ qc->err_mask |= AC_ERR_ATA_BUS;
+ ata_qc_complete(qc);
+ }
+ */
+ /* both pio and dma commands use dma */
+ if (ata_is_dma(qc->tf.protocol) || ata_is_pio(qc->tf.protocol)) {
+ /* program the scatterlist into the prd table */
+ ata_bmdma_qc_prep(qc);
+
+ /* point the sgdma controller at the dma request structure */
+ pd = qc->ap->private_data;
+
+ iowrite32(pd->sgdma_request_pa,
+ pd->sgdma_base + SGDMA_REQUESTPTR);
+
+ /* setup the request table */
+ if (port_no == 0) {
+ pd->sgdma_request->control =
+ (qc->dma_dir == DMA_FROM_DEVICE) ?
+ SGDMA_REQCTL0IN : SGDMA_REQCTL0OUT;
+ } else {
+ pd->sgdma_request->control =
+ (qc->dma_dir == DMA_FROM_DEVICE) ?
+ SGDMA_REQCTL1IN : SGDMA_REQCTL1OUT;
+ }
+ pd->sgdma_request->qualifier = SGDMA_REQQUAL;
+ pd->sgdma_request->src_pa = qc->ap->bmdma_prd_dma;
+ pd->sgdma_request->dst_pa = qc->ap->bmdma_prd_dma;
+ smp_wmb();
+
+ /* tell it to wait */
+ iowrite32(SGDMA_CONTROL_NOGO, pd->sgdma_base + SGDMA_CONTROL);
+ }
+}
+
+static int sata_oxnas_port_start(struct ata_port *ap)
+{
+ struct sata_oxnas_host_priv *host_priv = ap->host->private_data;
+ struct device *dev = ap->host->dev;
+ struct sata_oxnas_port_priv *pp;
+ void *mem;
+ dma_addr_t mem_dma;
+
+ DPRINTK("ENTER\n");
+
+ pp = kzalloc(sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
+ pp->port_base = host_priv->port_base[ap->port_no];
+ pp->sgdma_base = host_priv->sgdma_base[ap->port_no];
+ pp->core_base = host_priv->core_base;
+
+ /* preallocated */
+ if (host_priv->dma_size >= SATA_OXNAS_DMA_SIZE * SATA_OXNAS_MAX_PORTS) {
+ mem_dma = host_priv->dma_base +
+ ap->port_no * SATA_OXNAS_DMA_SIZE;
+ mem = ioremap(mem_dma, SATA_OXNAS_DMA_SIZE);
+
+ } else {
+ mem = dma_alloc_coherent(dev, SATA_OXNAS_DMA_SIZE, &mem_dma,
+ GFP_KERNEL);
+ }
+ if (!mem)
+ goto err_ret;
+
+ pp->sgdma_request_pa = mem_dma;
+ pp->sgdma_request = mem;
+ ap->bmdma_prd_dma = mem_dma + sizeof(struct sgdma_request);
+ ap->bmdma_prd = mem + sizeof(struct sgdma_request);
+
+ ap->private_data = pp;
+
+ sata_oxnas_post_reset_init(ap);
+
+ return 0;
+
+err_ret:
+ kfree(pp);
+ return -ENOMEM;
+
+}
+
+static void sata_oxnas_port_stop(struct ata_port *ap)
+{
+ struct device *dev = ap->host->dev;
+ struct sata_oxnas_port_priv *pp = ap->private_data;
+ struct sata_oxnas_host_priv *host_priv = ap->host->private_data;
+
+ DPRINTK("ENTER\n");
+ ap->private_data = NULL;
+ if (host_priv->dma_size) {
+ iounmap(pp->sgdma_request);
+ } else {
+ dma_free_coherent(dev, SATA_OXNAS_DMA_SIZE,
+ pp->sgdma_request, pp->sgdma_request_pa);
+ }
+
+ kfree(pp);
+}
+
+static void sata_oxnas_post_reset_init(struct ata_port *ap)
+{
+ struct sata_oxnas_port_priv *pd = ap->private_data;
+ uint dev;
+ int no_microcode = 0;
+
+ DPRINTK("ENTER\n");
+ if (no_microcode) {
+ u32 reg;
+ sata_oxnas_set_mode(ap, UNKNOWN_MODE, 1);
+ reg = readl(pd->core_base + DEVICE_CONTROL);
+ reg |= DEVICE_CONTROL_ATA_ERR_OVERRIDE;
+ writel(reg, pd->core_base + DEVICE_CONTROL);
+ } else {
+ /* JBOD uCode */
+ sata_oxnas_set_mode(ap, OXNASSATA_NOTRAID, 1);
+ /* Turn the work around off as it may have been left on by any
+ * HW-RAID code that we've been working with */
+ writel(0x0, pd->core_base + PORT_ERROR_MASK);
+ }
+ /* turn on phy error detection by removing the masks */
+ sata_oxnas_link_write(ap, 0x0C, 0x30003);
+
+ /* enable hotplug event detection */
+ sata_oxnas_scr_write_port(ap, SCR_ERROR, ~0);
+ sata_oxnas_scr_write_port(ap, SERROR_IRQ_MASK, 0x03feffff);
+ sata_oxnas_scr_write_port(ap, SCR_ACTIVE, ~0 & ~(1 << 26) & ~(1 << 16));
+
+ /* enable interrupts for ports */
+ sata_oxnas_irq_on(ap);
+
+ /* go through all the devices and configure them */
+ for (dev = 0; dev < ATA_MAX_DEVICES; ++dev) {
+ if (ap->link.device[dev].class == ATA_DEV_ATA) {
+ sata_std_hardreset(&ap->link, NULL, jiffies + HZ);
+ sata_oxnas_dev_config(&(ap->link.device[dev]));
+ }
+ }
+
+ /* clean up any remaining errors */
+ sata_oxnas_scr_write_port(ap, SCR_ERROR, ~0);
+ VPRINTK("done\n");
+}
+
+/**
+ * host_stop() is called when the rmmod or hot unplug process begins. The
+ * hook must stop all hardware interrupts, DMA engines, etc.
+ *
+ * @param ap hardware with the registers in
+ */
+static void sata_oxnas_host_stop(struct ata_host *host_set)
+{
+ DPRINTK("\n");
+}
+
+
+#define ERROR_HW_ACQUIRE_TIMEOUT_JIFFIES (10 * HZ)
+static void sata_oxnas_error_handler(struct ata_port *ap)
+{
+ DPRINTK("Enter port_no %d\n", ap->port_no);
+ sata_oxnas_freeze_host(ap);
+
+ /* If the core is busy here, make it idle */
+ sata_oxnas_cleanup(ap->host);
+
+ ata_std_error_handler(ap);
+
+ sata_oxnas_thaw_host(ap);
+}
+
+static int sata_oxnas_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct sata_oxnas_port_priv *pd = ap->private_data;
+ void __iomem *port_base = pd->port_base;
+ int rc;
+
+ struct ata_taskfile tf;
+ u32 Command_Reg;
+
+ DPRINTK("ENTER\n");
+
+ port_base = pd->port_base;
+
+ if (ata_link_offline(link)) {
+ DPRINTK("PHY reports no device\n");
+ *class = ATA_DEV_NONE;
+ goto out;
+ }
+
+ /* write value to register */
+ iowrite32((ap->ctl) << 24, port_base + ORB4);
+
+ /* command the core to send a control FIS */
+ Command_Reg = ioread32(port_base + SATA_COMMAND);
+ Command_Reg &= ~SATA_OPCODE_MASK;
+ Command_Reg |= CMD_WRITE_TO_ORB_REGS_NO_COMMAND;
+ iowrite32(Command_Reg, port_base + SATA_COMMAND);
+ udelay(20); /* FIXME: flush */
+
+ /* write value to register */
+ iowrite32((ap->ctl | ATA_SRST) << 24, port_base + ORB4);
+
+ /* command the core to send a control FIS */
+ Command_Reg &= ~SATA_OPCODE_MASK;
+ Command_Reg |= CMD_WRITE_TO_ORB_REGS_NO_COMMAND;
+ iowrite32(Command_Reg, port_base + SATA_COMMAND);
+ udelay(20); /* FIXME: flush */
+
+ /* write value to register */
+ iowrite32((ap->ctl) << 24, port_base + ORB4);
+
+ /* command the core to send a control FIS */
+ Command_Reg &= ~SATA_OPCODE_MASK;
+ Command_Reg |= CMD_WRITE_TO_ORB_REGS_NO_COMMAND;
+ iowrite32(Command_Reg, port_base + SATA_COMMAND);
+
+ msleep(150);
+
+ rc = ata_sff_wait_ready(link, deadline);
+
+ /* if link is occupied, -ENODEV too is an error */
+ if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
+ ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+ return rc;
+ }
+
+ /* determine by signature whether we have ATA or ATAPI devices */
+ sata_oxnas_tf_read(ap, &tf);
+ *class = ata_dev_classify(&tf);
+
+ if (*class == ATA_DEV_UNKNOWN)
+ *class = ATA_DEV_NONE;
+
+out:
+ DPRINTK("EXIT, class=%u\n", *class);
+ return 0;
+}
+
+
+int sata_oxnas_init_controller(struct ata_host *host)
+{
+ return 0;
+}
+
+/**
+ * Ref bug-6320
+ *
+ * This code is a work around for a DMA hardware bug that will repeat the
+ * penultimate 8-bytes on some reads. This code will check that the amount
+ * of data transferred is a multiple of 512 bytes, if not the in it will
+ * fetch the correct data from a buffer in the SATA core and copy it into
+ * memory.
+ *
+ * @param port SATA port to check and if necessary, correct.
+ */
+static int ox820sata_bug_6320_workaround(struct ata_port *ap)
+{
+ struct sata_oxnas_port_priv *pd = ap->private_data;
+ void __iomem *core_base = pd->core_base;
+ int is_read;
+ int quads_transferred;
+ int remainder;
+ int sector_quads_remaining;
+ int bug_present = 0;
+
+ /* Only want to apply fix to reads */
+ is_read = !(readl(core_base + DM_DBG1) & (ap->port_no ?
+ BIT(CORE_PORT1_DATA_DIR_BIT) :
+ BIT(CORE_PORT0_DATA_DIR_BIT)));
+
+ /* Check for an incomplete transfer, i.e. not a multiple of 512 bytes
+ transferred (datacount_port register counts quads transferred) */
+ quads_transferred =
+ readl(core_base + (ap->port_no ?
+ DATACOUNT_PORT1 : DATACOUNT_PORT0));
+
+ remainder = quads_transferred & 0x7f;
+ sector_quads_remaining = remainder ? (0x80 - remainder) : 0;
+
+ if (is_read && (sector_quads_remaining == 2)) {
+ bug_present = 1;
+ } else if (sector_quads_remaining) {
+ if (is_read) {
+ printk(KERN_WARNING "SATA read fixup cannot deal with" \
+ " %d quads remaining\n",
+ sector_quads_remaining);
+ } else {
+ printk(KERN_WARNING "SATA write fixup of %d quads" \
+ " remaining not supported\n",
+ sector_quads_remaining);
+ }
+ }
+
+ return bug_present;
+}
+
+/* This port done an interrupt */
+static void sata_oxnas_port_irq(struct ata_port *ap, int force_error)
+{
+ struct ata_queued_cmd *qc;
+ struct sata_oxnas_port_priv *pd = ap->private_data;
+ void __iomem *port_base = pd->port_base;
+
+ u32 int_status;
+ unsigned long flags = 0;
+
+ /* DPRINTK("ENTER irqstatus %x\n", ioread32(port_base + INT_STATUS)); */
+/*
+ if (ap->qc_active & (1 << ATA_TAG_INTERNAL)) {
+ qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
+ DPRINTK("completing non-ncq cmd\n");
+
+ if (qc) {
+ ata_qc_complete(qc);
+ }
+ return;
+ }
+*/
+
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+
+
+ /* record the port's interrupt */
+ int_status = ioread32(port_base + INT_STATUS);
+
+ /* If there's no command associated with this IRQ, ignore it. We may get
+ * spurious interrupts when cleaning-up after a failed command, ignore
+ * these too. */
+ if (likely(qc)) {
+ /* get the status before any error cleanup */
+ qc->err_mask = ac_err_mask(sata_oxnas_check_status(ap));
+ if (force_error) {
+ /* Pretend there has been a link error */
+ qc->err_mask |= AC_ERR_ATA_BUS;
+ DPRINTK(" ####force error####\n");
+ }
+ /* tell libata we're done */
+ local_irq_save(flags);
+ sata_oxnas_irq_clear(ap);
+ local_irq_restore(flags);
+ ata_qc_complete(qc);
+ } else {
+ VPRINTK("Ignoring interrupt, can't find the command tag=" \
+ "%d %08x\n", ap->link.active_tag, ap->qc_active);
+ }
+
+ /* maybe a hotplug event */
+ if (unlikely(int_status & INT_LINK_SERROR)) {
+ u32 serror;
+
+ sata_oxnas_scr_read_port(ap, SCR_ERROR, &serror);
+ if (serror & (SERR_DEV_XCHG | SERR_PHYRDY_CHG)) {
+ ata_ehi_hotplugged(&ap->link.eh_info);
+ ata_port_freeze(ap);
+ }
+ }
+}
+
+/**
+ * irq_handler is the interrupt handling routine registered with the system,
+ * by libata.
+ */
+static irqreturn_t sata_oxnas_interrupt(int irq, void *dev_instance)
+{
+ struct ata_host *ah = dev_instance;
+ struct sata_oxnas_host_priv *hd = ah->private_data;
+ void __iomem *core_base = hd->core_base;
+
+ u32 int_status;
+ irqreturn_t ret = IRQ_NONE;
+ u32 port_no;
+ u32 mask;
+ int bug_present;
+
+ /* loop until there are no more interrupts */
+ while ((int_status = ioread32(core_base + CORE_INT_STATUS)) &
+ COREINT_END) {
+
+ /* clear any interrupt */
+ iowrite32(int_status, core_base + CORE_INT_CLEAR);
+
+ /* Only need this workaround for single disk systems as dual
+ * disk will use uCode which prevents this read underrun problem
+ * from occuring.
+ * All single disk systems will use port 0 */
+
+ for (port_no = 0; port_no < SATA_OXNAS_MAX_PORTS; ++port_no) {
+ /* check the raw end of command interrupt to see if the
+ * port is done */
+ mask = (CORERAW_HOST << port_no);
+ if (int_status & mask) {
+ /* this port had an interrupt, clear it */
+ iowrite32(mask, core_base + CORE_INT_CLEAR);
+ bug_present = ox820sata_bug_6320_workaround(
+ ah->ports[port_no]);
+ sata_oxnas_port_irq(ah->ports[port_no],
+ bug_present);
+ ret = IRQ_HANDLED;
+ }
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * scsi mid-layer and libata interface structures
+ */
+static struct scsi_host_template sata_oxnas_sht = {
+ ATA_NCQ_SHT("sata_oxnas"),
+ .can_queue = SATA_OXNAS_QUEUE_DEPTH,
+ .sg_tablesize = SATA_OXNAS_MAX_PRD,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+ .unchecked_isa_dma = 0,
+};
+
+
+static struct ata_port_operations sata_oxnas_ops = {
+ .inherits = &sata_port_ops,
+ .qc_prep = sata_oxnas_qc_prep,
+ .qc_issue = sata_oxnas_qc_issue,
+ .qc_fill_rtf = sata_oxnas_qc_fill_rtf,
+
+ .scr_read = sata_oxnas_scr_read,
+ .scr_write = sata_oxnas_scr_write,
+
+ /* .freeze = sata_oxnas_freeze, */
+ /* .thaw = sata_oxnas_thaw, */
+ .softreset = sata_oxnas_softreset,
+ /* .hardreset = sata_oxnas_hardreset, */
+ .postreset = sata_oxnas_postreset,
+ .error_handler = sata_oxnas_error_handler,
+ .post_internal_cmd = sata_oxnas_post_internal_cmd,
+
+ .port_start = sata_oxnas_port_start,
+ .port_stop = sata_oxnas_port_stop,
+
+ .host_stop = sata_oxnas_host_stop,
+ /* .pmp_attach = sata_oxnas_pmp_attach, */
+ /* .pmp_detach = sata_oxnas_pmp_detach, */
+ .sff_check_status = sata_oxnas_check_status,
+};
+
+static const struct ata_port_info sata_oxnas_port_info = {
+ .flags = SATA_OXNAS_HOST_FLAGS,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &sata_oxnas_ops,
+};
+
+static int sata_oxnas_probe(struct platform_device *ofdev)
+{
+ int retval = -ENXIO;
+ void __iomem *port_base = NULL;
+ void __iomem *sgdma_base = NULL;
+ void __iomem *core_base = NULL;
+ void __iomem *phy_base = NULL;
+ struct reset_control *rstc;
+
+ struct resource res = {};
+ struct sata_oxnas_host_priv *host_priv = NULL;
+ int irq = 0;
+ struct ata_host *host = NULL;
+ struct clk *clk = NULL;
+
+ const struct ata_port_info *ppi[] = { &sata_oxnas_port_info, NULL };
+
+ port_base = of_iomap(ofdev->dev.of_node, 0);
+ if (!port_base)
+ goto error_exit_with_cleanup;
+
+ sgdma_base = of_iomap(ofdev->dev.of_node, 1);
+ if (!sgdma_base)
+ goto error_exit_with_cleanup;
+
+ core_base = of_iomap(ofdev->dev.of_node, 2);
+ if (!core_base)
+ goto error_exit_with_cleanup;
+
+ phy_base = of_iomap(ofdev->dev.of_node, 3);
+ if (!phy_base)
+ goto error_exit_with_cleanup;
+
+ host_priv = devm_kzalloc(&ofdev->dev,
+ sizeof(struct sata_oxnas_host_priv),
+ GFP_KERNEL);
+ if (!host_priv)
+ goto error_exit_with_cleanup;
+
+ host_priv->port_base[0] = port_base;
+ host_priv->sgdma_base[0] = sgdma_base;
+ host_priv->core_base = core_base;
+ host_priv->phy_base = phy_base;
+
+ if (!of_address_to_resource(ofdev->dev.of_node, 4, &res)) {
+ host_priv->dma_base = res.start;
+ host_priv->dma_size = resource_size(&res);
+ }
+
+ irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+ if (!irq) {
+ dev_err(&ofdev->dev, "invalid irq from platform\n");
+ goto error_exit_with_cleanup;
+ }
+ host_priv->irq = irq;
+
+ clk = of_clk_get(ofdev->dev.of_node, 0);
+ if (IS_ERR(clk)) {
+ retval = PTR_ERR(clk);
+ clk = NULL;
+ goto error_exit_with_cleanup;
+ }
+ host_priv->clk = clk;
+
+ rstc = devm_reset_control_get(&ofdev->dev, "sata");
+ if (IS_ERR(rstc)) {
+ retval = PTR_ERR(rstc);
+ goto error_exit_with_cleanup;
+ }
+ host_priv->rst_sata = rstc;
+
+ rstc = devm_reset_control_get(&ofdev->dev, "link");
+ if (IS_ERR(rstc)) {
+ retval = PTR_ERR(rstc);
+ goto error_exit_with_cleanup;
+ }
+ host_priv->rst_link = rstc;
+
+ rstc = devm_reset_control_get(&ofdev->dev, "phy");
+ if (IS_ERR(rstc)) {
+ retval = PTR_ERR(rstc);
+ goto error_exit_with_cleanup;
+ }
+ host_priv->rst_phy = rstc;
+
+ /* allocate host structure */
+ host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_OXNAS_MAX_PORTS);
+ if (!host) {
+ retval = -ENOMEM;
+ goto error_exit_with_cleanup;
+ }
+ host->private_data = host_priv;
+ host->iomap = port_base;
+
+ /* initialize host controller */
+ retval = sata_oxnas_init_controller(host);
+ if (retval)
+ goto error_exit_with_cleanup;
+
+ /*
+ * Now, register with libATA core, this will also initiate the
+ * device discovery process, invoking our port_start() handler &
+ * error_handler() to execute a dummy softreset EH session
+ */
+ ata_host_activate(host, irq, sata_oxnas_interrupt, SATA_OXNAS_IRQ_FLAG,
+ &sata_oxnas_sht);
+
+ return 0;
+
+error_exit_with_cleanup:
+ if (irq)
+ irq_dispose_mapping(host_priv->irq);
+ if (clk)
+ clk_put(clk);
+ if (host)
+ ata_host_detach(host);
+ if (port_base)
+ iounmap(port_base);
+ if (sgdma_base)
+ iounmap(sgdma_base);
+ if (core_base)
+ iounmap(core_base);
+ if (phy_base)
+ iounmap(phy_base);
+ return retval;
+}
+
+
+static int sata_oxnas_remove(struct platform_device *ofdev)
+{
+ struct ata_host *host = dev_get_drvdata(&ofdev->dev);
+ struct sata_oxnas_host_priv *host_priv = host->private_data;
+
+ ata_host_detach(host);
+
+ irq_dispose_mapping(host_priv->irq);
+ iounmap(host_priv->port_base);
+ iounmap(host_priv->sgdma_base);
+ iounmap(host_priv->core_base);
+
+ /* reset Controller, Link and PHY */
+ reset_control_assert(host_priv->rst_sata);
+ reset_control_assert(host_priv->rst_link);
+ reset_control_assert(host_priv->rst_phy);
+
+ /* Disable the clock to the SATA block */
+ clk_disable_unprepare(host_priv->clk);
+ clk_put(host_priv->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sata_oxnas_suspend(struct platform_device *op, pm_message_t state)
+{
+ struct ata_host *host = dev_get_drvdata(&op->dev);
+
+ return ata_host_suspend(host, state);
+}
+
+static int sata_oxnas_resume(struct platform_device *op)
+{
+ struct ata_host *host = dev_get_drvdata(&op->dev);
+ int ret;
+
+ ret = sata_oxnas_init_controller(host);
+ if (ret) {
+ dev_err(&op->dev, "Error initializing hardware\n");
+ return ret;
+ }
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
+
+
+static struct of_device_id oxnas_sata_match[] = {
+ {
+ .compatible = "plxtech,nas782x-sata",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, oxnas_sata_match);
+
+static struct platform_driver oxnas_sata_driver = {
+ .driver = {
+ .name = "oxnas-sata",
+ .owner = THIS_MODULE,
+ .of_match_table = oxnas_sata_match,
+ },
+ .probe = sata_oxnas_probe,
+ .remove = sata_oxnas_remove,
+#ifdef CONFIG_PM
+ .suspend = sata_oxnas_suspend,
+ .resume = sata_oxnas_resume,
+#endif
+};
+
+module_platform_driver(oxnas_sata_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Oxford Semiconductor Ltd.");
+MODULE_DESCRIPTION("934 SATA core controler");
diff --git a/target/linux/oxnas/files/drivers/clk/clk-oxnas.c b/target/linux/oxnas/files/drivers/clk/clk-oxnas.c
new file mode 100644
index 0000000000..8d80c4f2b8
--- /dev/null
+++ b/target/linux/oxnas/files/drivers/clk/clk-oxnas.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2010 Broadcom
+ * Copyright (C) 2012 Stephen Warren
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/stringify.h>
+#include <linux/reset.h>
+#include <linux/io.h>
+#include <mach/hardware.h>
+#include <mach/utils.h>
+
+#define MHZ (1000 * 1000)
+
+static unsigned long plla_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ unsigned long fin = parent_rate;
+ unsigned long pll0;
+ unsigned long fbdiv, refdiv, outdiv;
+
+ pll0 = readl_relaxed(SYS_CTRL_PLLA_CTRL0);
+ refdiv = (pll0 >> PLLA_REFDIV_SHIFT) & PLLA_REFDIV_MASK;
+ refdiv += 1;
+ outdiv = (pll0 >> PLLA_OUTDIV_SHIFT) & PLLA_OUTDIV_MASK;
+ outdiv += 1;
+ fbdiv = readl_relaxed(SYS_CTRL_PLLA_CTRL1);
+
+ /* seems we will not be here when pll is bypassed, so ignore this
+ * case */
+
+ return fin / MHZ * fbdiv / (refdiv * outdiv) / 32768 * MHZ;
+}
+
+static const char *pll_clk_parents[] = {
+ "oscillator",
+};
+
+static struct clk_ops plla_ops = {
+ .recalc_rate = plla_clk_recalc_rate,
+};
+
+static struct clk_init_data clk_plla_init = {
+ .name = "plla",
+ .ops = &plla_ops,
+ .parent_names = pll_clk_parents,
+ .num_parents = ARRAY_SIZE(pll_clk_parents),
+};
+
+static struct clk_hw plla_hw = {
+ .init = &clk_plla_init,
+};
+
+static struct device_node *node_pllb;
+
+int pllb_clk_enable(struct clk_hw *hw)
+{
+ struct reset_control *rstc;
+
+ rstc = of_reset_control_get(node_pllb, NULL);
+ if (IS_ERR(rstc))
+ return PTR_ERR(rstc);
+
+ /* put PLL into bypass */
+ oxnas_register_set_mask(SEC_CTRL_PLLB_CTRL0, BIT(PLLB_BYPASS));
+ wmb();
+ udelay(10);
+ reset_control_assert(rstc);
+ udelay(10);
+ /* set PLL B control information */
+ writel((1 << PLLB_ENSAT) | (1 << PLLB_OUTDIV) | (2 << PLLB_REFDIV),
+ SEC_CTRL_PLLB_CTRL0);
+ reset_control_deassert(rstc);
+ reset_control_put(rstc);
+ udelay(100);
+ oxnas_register_clear_mask(SEC_CTRL_PLLB_CTRL0, BIT(PLLB_BYPASS));
+
+ return 0;
+}
+
+void pllb_clk_disable(struct clk_hw *hw)
+{
+ struct reset_control *rstc;
+
+ /* put PLL into bypass */
+ oxnas_register_set_mask(SEC_CTRL_PLLB_CTRL0, BIT(PLLB_BYPASS));
+ wmb();
+ udelay(10);
+
+ rstc = of_reset_control_get(node_pllb, NULL);
+ if (!IS_ERR(rstc))
+ reset_control_assert(rstc);
+}
+
+static struct clk_ops pllb_ops = {
+ .enable = pllb_clk_enable,
+ .disable = pllb_clk_disable,
+};
+
+static struct clk_init_data clk_pllb_init = {
+ .name = "pllb",
+ .ops = &pllb_ops,
+ .parent_names = pll_clk_parents,
+ .num_parents = ARRAY_SIZE(pll_clk_parents),
+};
+
+static struct clk_hw pllb_hw = {
+ .init = &clk_pllb_init,
+};
+
+/* standard gate clock */
+struct clk_std {
+ struct clk_hw hw;
+ signed char bit;
+};
+
+#define NUM_STD_CLKS 17
+#define to_stdclk(_hw) container_of(_hw, struct clk_std, hw)
+
+static int std_clk_is_enabled(struct clk_hw *hw)
+{
+ struct clk_std *std = to_stdclk(hw);
+
+ return readl_relaxed(SYSCTRL_CLK_STAT) & BIT(std->bit);
+}
+
+static int std_clk_enable(struct clk_hw *hw)
+{
+ struct clk_std *std = to_stdclk(hw);
+
+ writel(BIT(std->bit), SYS_CTRL_CLK_SET_CTRL);
+ return 0;
+}
+
+static void std_clk_disable(struct clk_hw *hw)
+{
+ struct clk_std *std = to_stdclk(hw);
+
+ writel(BIT(std->bit), SYS_CTRL_CLK_CLR_CTRL);
+}
+
+static struct clk_ops std_clk_ops = {
+ .enable = std_clk_enable,
+ .disable = std_clk_disable,
+ .is_enabled = std_clk_is_enabled,
+};
+
+static const char *std_clk_parents[] = {
+ "oscillator",
+};
+
+static const char *eth_parents[] = {
+ "gmacclk",
+};
+
+#define DECLARE_STD_CLKP(__clk, __bit, __parent) \
+static struct clk_init_data clk_##__clk##_init = { \
+ .name = __stringify(__clk), \
+ .ops = &std_clk_ops, \
+ .parent_names = __parent, \
+ .num_parents = ARRAY_SIZE(__parent), \
+}; \
+ \
+static struct clk_std clk_##__clk = { \
+ .bit = __bit, \
+ .hw = { \
+ .init = &clk_##__clk##_init, \
+ }, \
+}
+
+#define DECLARE_STD_CLK(__clk, __bit) DECLARE_STD_CLKP(__clk, __bit, \
+ std_clk_parents)
+
+DECLARE_STD_CLK(leon, 0);
+DECLARE_STD_CLK(dma_sgdma, 1);
+DECLARE_STD_CLK(cipher, 2);
+DECLARE_STD_CLK(sd, 3);
+DECLARE_STD_CLK(sata, 4);
+DECLARE_STD_CLK(audio, 5);
+DECLARE_STD_CLK(usbmph, 6);
+DECLARE_STD_CLKP(etha, 7, eth_parents);
+DECLARE_STD_CLK(pciea, 8);
+DECLARE_STD_CLK(static, 9);
+DECLARE_STD_CLK(ethb, 10);
+DECLARE_STD_CLK(pcieb, 11);
+DECLARE_STD_CLK(ref600, 12);
+DECLARE_STD_CLK(usbdev, 13);
+
+struct clk_hw *std_clk_hw_tbl[] = {
+ &clk_leon.hw,
+ &clk_dma_sgdma.hw,
+ &clk_cipher.hw,
+ &clk_sd.hw,
+ &clk_sata.hw,
+ &clk_audio.hw,
+ &clk_usbmph.hw,
+ &clk_etha.hw,
+ &clk_pciea.hw,
+ &clk_static.hw,
+ &clk_ethb.hw,
+ &clk_pcieb.hw,
+ &clk_ref600.hw,
+ &clk_usbdev.hw,
+};
+
+struct clk *std_clk_tbl[ARRAY_SIZE(std_clk_hw_tbl)];
+
+static struct clk_onecell_data std_clk_data;
+
+void __init oxnas_init_stdclk(struct device_node *np)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(std_clk_hw_tbl); i++) {
+ std_clk_tbl[i] = clk_register(NULL, std_clk_hw_tbl[i]);
+ BUG_ON(IS_ERR(std_clk_tbl[i]));
+ }
+ std_clk_data.clks = std_clk_tbl;
+ std_clk_data.clk_num = ARRAY_SIZE(std_clk_tbl);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &std_clk_data);
+}
+CLK_OF_DECLARE(oxnas_pllstd, "plxtech,nas782x-stdclk", oxnas_init_stdclk);
+
+void __init oxnas_init_plla(struct device_node *np)
+{
+ struct clk *clk;
+
+ clk = clk_register(NULL, &plla_hw);
+ BUG_ON(IS_ERR(clk));
+ /* mark it as enabled */
+ clk_prepare_enable(clk);
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+CLK_OF_DECLARE(oxnas_plla, "plxtech,nas782x-plla", oxnas_init_plla);
+
+void __init oxnas_init_pllb(struct device_node *np)
+{
+ struct clk *clk;
+
+ node_pllb = np;
+
+ clk = clk_register(NULL, &pllb_hw);
+ BUG_ON(IS_ERR(clk));
+ of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+CLK_OF_DECLARE(oxnas_pllb, "plxtech,nas782x-pllb", oxnas_init_pllb);
diff --git a/target/linux/oxnas/files/drivers/clocksource/oxnas_rps_timer.c b/target/linux/oxnas/files/drivers/clocksource/oxnas_rps_timer.c
new file mode 100644
index 0000000000..7c8c4cf435
--- /dev/null
+++ b/target/linux/oxnas/files/drivers/clocksource/oxnas_rps_timer.c
@@ -0,0 +1,96 @@
+/*
+ * arch/arm/mach-ox820/rps-time.c
+ *
+ * Copyright (C) 2009 Oxford Semiconductor Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/clockchips.h>
+#include <linux/clk.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/sched_clock.h>
+#include <mach/hardware.h>
+
+enum {
+ TIMER_LOAD = 0,
+ TIMER_CURR = 4,
+ TIMER_CTRL = 8,
+ TIMER_CLRINT = 0xC,
+
+ TIMER_BITS = 24,
+
+ TIMER_MAX_VAL = (1 << TIMER_BITS) - 1,
+
+ TIMER_PERIODIC = (1 << 6),
+ TIMER_ENABLE = (1 << 7),
+
+ TIMER_DIV1 = (0 << 2),
+ TIMER_DIV16 = (1 << 2),
+ TIMER_DIV256 = (2 << 2),
+
+ TIMER1_OFFSET = 0,
+ TIMER2_OFFSET = 0x20,
+
+};
+
+static u64 notrace rps_read_sched_clock(void)
+{
+ return ~readl_relaxed(RPSA_TIMER2_VAL);
+}
+
+static void __init rps_clocksource_init(void __iomem *base, ulong ref_rate)
+{
+ int ret;
+ ulong clock_rate;
+ /* use prescale 16 */
+ clock_rate = ref_rate / 16;
+
+ iowrite32(TIMER_MAX_VAL, base + TIMER_LOAD);
+ iowrite32(TIMER_PERIODIC | TIMER_ENABLE | TIMER_DIV16,
+ base + TIMER_CTRL);
+
+ ret = clocksource_mmio_init(base + TIMER_CURR, "rps_clocksource_timer",
+ clock_rate, 250, TIMER_BITS,
+ clocksource_mmio_readl_down);
+ if (ret)
+ panic("can't register clocksource\n");
+
+ sched_clock_register(rps_read_sched_clock, TIMER_BITS, clock_rate);
+}
+
+static void __init rps_timer_init(struct device_node *np)
+{
+ struct clk *refclk;
+ unsigned long ref_rate;
+ void __iomem *base;
+
+ refclk = of_clk_get(np, 0);
+
+ if (IS_ERR(refclk) || clk_prepare_enable(refclk))
+ panic("rps_timer_init: failed to get refclk\n");
+ ref_rate = clk_get_rate(refclk);
+
+ base = of_iomap(np, 0);
+ if (!base)
+ panic("rps_timer_init: failed to map io\n");
+
+ rps_clocksource_init(base + TIMER2_OFFSET, ref_rate);
+}
+
+CLOCKSOURCE_OF_DECLARE(nas782x, "plxtech,nas782x-rps-timer", rps_timer_init);
diff --git a/target/linux/oxnas/files/drivers/irqchip/irq-rps.c b/target/linux/oxnas/files/drivers/irqchip/irq-rps.c
new file mode 100644
index 0000000000..5795406fef
--- /dev/null
+++ b/target/linux/oxnas/files/drivers/irqchip/irq-rps.c
@@ -0,0 +1,146 @@
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include "irqchip.h"
+
+struct rps_chip_data {
+ void __iomem *base;
+ struct irq_chip chip;
+ struct irq_domain *domain;
+} rps_data;
+
+enum {
+ RPS_IRQ_BASE = 64,
+ RPS_IRQ_COUNT = 32,
+ PRS_HWIRQ_BASE = 0,
+
+ RPS_STATUS = 0,
+ RPS_RAW_STATUS = 4,
+ RPS_UNMASK = 8,
+ RPS_MASK = 0xc,
+};
+
+/*
+ * Routines to acknowledge, disable and enable interrupts
+ */
+static void rps_mask_irq(struct irq_data *d)
+{
+ struct rps_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq);
+
+ iowrite32(mask, chip_data->base + RPS_MASK);
+}
+
+static void rps_unmask_irq(struct irq_data *d)
+{
+ struct rps_chip_data *chip_data = irq_data_get_irq_chip_data(d);
+ u32 mask = BIT(d->hwirq);
+
+ iowrite32(mask, chip_data->base + RPS_UNMASK);
+}
+
+static struct irq_chip rps_chip = {
+ .name = "RPS",
+ .irq_mask = rps_mask_irq,
+ .irq_unmask = rps_unmask_irq,
+};
+
+static int rps_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *controller,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (d->of_node != controller)
+ return -EINVAL;
+ if (intsize < 1)
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+ /* Honestly I do not know the type */
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static int rps_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &rps_chip, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_chip_data(irq, d->host_data);
+ return 0;
+}
+
+const struct irq_domain_ops rps_irq_domain_ops = {
+ .map = rps_irq_domain_map,
+ .xlate = rps_irq_domain_xlate,
+};
+
+static void rps_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct rps_chip_data *chip_data = irq_get_handler_data(irq);
+ struct irq_chip *chip = irq_get_chip(irq);
+ unsigned int cascade_irq, rps_irq;
+ u32 status;
+
+ chained_irq_enter(chip, desc);
+
+ status = ioread32(chip_data->base + RPS_STATUS);
+ rps_irq = __ffs(status);
+ cascade_irq = irq_find_mapping(chip_data->domain, rps_irq);
+
+ if (unlikely(rps_irq >= RPS_IRQ_COUNT))
+ handle_bad_irq(cascade_irq, desc);
+ else
+ generic_handle_irq(cascade_irq);
+
+ chained_irq_exit(chip, desc);
+}
+
+#ifdef CONFIG_OF
+int __init rps_of_init(struct device_node *node, struct device_node *parent)
+{
+ void __iomem *rps_base;
+ int irq_start = RPS_IRQ_BASE;
+ int irq_base;
+ int irq;
+
+ if (WARN_ON(!node))
+ return -ENODEV;
+
+ rps_base = of_iomap(node, 0);
+ WARN(!rps_base, "unable to map rps registers\n");
+ rps_data.base = rps_base;
+
+ irq_base = irq_alloc_descs(irq_start, 0, RPS_IRQ_COUNT, numa_node_id());
+ if (IS_ERR_VALUE(irq_base)) {
+ WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
+ irq_start);
+ irq_base = irq_start;
+ }
+
+ rps_data.domain = irq_domain_add_legacy(node, RPS_IRQ_COUNT, irq_base,
+ PRS_HWIRQ_BASE, &rps_irq_domain_ops, &rps_data);
+
+ if (WARN_ON(!rps_data.domain))
+ return -ENOMEM;
+
+ if (parent) {
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq_set_handler_data(irq, &rps_data) != 0)
+ BUG();
+ irq_set_chained_handler(irq, rps_handle_cascade_irq);
+ }
+ return 0;
+
+}
+
+IRQCHIP_DECLARE(nas782x, "plxtech,nas782x-rps", rps_of_init);
+#endif
diff --git a/target/linux/oxnas/files/drivers/mtd/nand/oxnas_nand.c b/target/linux/oxnas/files/drivers/mtd/nand/oxnas_nand.c
new file mode 100644
index 0000000000..55eb009bac
--- /dev/null
+++ b/target/linux/oxnas/files/drivers/mtd/nand/oxnas_nand.c
@@ -0,0 +1,102 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/mtd/nand.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <mach/utils.h>
+
+/* nand commands */
+#define NAND_CMD_ALE BIT(18)
+#define NAND_CMD_CLE BIT(19)
+#define NAND_CMD_CS 0
+#define NAND_CMD_RESET 0xff
+#define NAND_CMD (NAND_CMD_CS | NAND_CMD_CLE)
+#define NAND_ADDR (NAND_CMD_CS | NAND_CMD_ALE)
+#define NAND_DATA (NAND_CMD_CS)
+
+static void oxnas_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ nandaddr &= ~(NAND_CMD | NAND_ADDR);
+ if (ctrl & NAND_CLE)
+ nandaddr |= NAND_CMD;
+ else if (ctrl & NAND_ALE)
+ nandaddr |= NAND_ADDR;
+ this->IO_ADDR_W = (void __iomem *) nandaddr;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, (void __iomem *) nandaddr);
+}
+
+static int oxnas_nand_probe(struct platform_device *pdev)
+{
+ /* enable clock and release static block reset */
+ struct clk *clk = of_clk_get(pdev->dev.of_node, 0);
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clk_prepare_enable(clk);
+ device_reset(&pdev->dev);
+
+ return 0;
+}
+
+/* allow users to override the partition in DT using the cmdline */
+static const char * part_probes[] = { "cmdlinepart", "ofpart", NULL };
+
+static struct platform_nand_data oxnas_nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_delay = 30,
+ .part_probe_types = part_probes,
+ },
+ .ctrl = {
+ .probe = oxnas_nand_probe,
+ .cmd_ctrl = oxnas_cmd_ctrl,
+ }
+};
+
+/*
+ * Try to find the node inside the DT. If it is available attach out
+ * platform_nand_data
+ */
+static int __init oxnas_register_nand(void)
+{
+ struct device_node *node;
+ struct platform_device *pdev;
+
+ node = of_find_compatible_node(NULL, NULL, "plxtech,nand-nas782x");
+ if (!node)
+ return -ENOENT;
+ pdev = of_find_device_by_node(node);
+ if (!pdev)
+ return -EINVAL;
+ pdev->dev.platform_data = &oxnas_nand_data;
+ of_node_put(node);
+ return 0;
+}
+
+subsys_initcall(oxnas_register_nand);
+
+static const struct of_device_id oxnas_nand_ids[] = {
+ { .compatible = "plxtech,nand-nas782x"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, oxnas_nand_ids);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ma Haijun");
+MODULE_DESCRIPTION("NAND glue for Oxnas platforms");
+MODULE_ALIAS("platform:oxnas_nand");
diff --git a/target/linux/oxnas/files/drivers/pci/host/pcie-oxnas.c b/target/linux/oxnas/files/drivers/pci/host/pcie-oxnas.c
new file mode 100644
index 0000000000..9e8d6d9f93
--- /dev/null
+++ b/target/linux/oxnas/files/drivers/pci/host/pcie-oxnas.c
@@ -0,0 +1,676 @@
+/*
+ * PCIe driver for PLX NAS782X SoCs
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/mbus.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <mach/iomap.h>
+#include <mach/hardware.h>
+#include <mach/utils.h>
+
+#define VERSION_ID_MAGIC 0x082510b5
+#define LINK_UP_TIMEOUT_SECONDS 1
+#define NUM_CONTROLLERS 1
+
+enum {
+ PCIE_DEVICE_TYPE_MASK = 0x0F,
+ PCIE_DEVICE_TYPE_ENDPOINT = 0,
+ PCIE_DEVICE_TYPE_LEGACY_ENDPOINT = 1,
+ PCIE_DEVICE_TYPE_ROOT = 4,
+
+ PCIE_LTSSM = BIT(4),
+ PCIE_READY_ENTR_L23 = BIT(9),
+ PCIE_LINK_UP = BIT(11),
+ PCIE_OBTRANS = BIT(12),
+};
+
+enum {
+ HCSL_BIAS_ON = BIT(0),
+ HCSL_PCIE_EN = BIT(1),
+ HCSL_PCIEA_EN = BIT(2),
+ HCSL_PCIEB_EN = BIT(3),
+};
+
+enum {
+ /* pcie phy reg offset */
+ PHY_ADDR = 0,
+ PHY_DATA = 4,
+ /* phy data reg bits */
+ READ_EN = BIT(16),
+ WRITE_EN = BIT(17),
+ CAP_DATA = BIT(18),
+};
+
+/* core config registers */
+enum {
+ PCI_CONFIG_VERSION_DEVICEID = 0,
+ PCI_CONFIG_COMMAND_STATUS = 4,
+};
+
+/* inbound config registers */
+enum {
+ IB_ADDR_XLATE_ENABLE = 0xFC,
+
+ /* bits */
+ ENABLE_IN_ADDR_TRANS = BIT(0),
+};
+
+/* outbound config registers, offset relative to PCIE_POM0_MEM_ADDR */
+enum {
+ PCIE_POM0_MEM_ADDR = 0,
+ PCIE_POM1_MEM_ADDR = 4,
+ PCIE_IN0_MEM_ADDR = 8,
+ PCIE_IN1_MEM_ADDR = 12,
+ PCIE_IN_IO_ADDR = 16,
+ PCIE_IN_CFG0_ADDR = 20,
+ PCIE_IN_CFG1_ADDR = 24,
+ PCIE_IN_MSG_ADDR = 28,
+ PCIE_IN0_MEM_LIMIT = 32,
+ PCIE_IN1_MEM_LIMIT = 36,
+ PCIE_IN_IO_LIMIT = 40,
+ PCIE_IN_CFG0_LIMIT = 44,
+ PCIE_IN_CFG1_LIMIT = 48,
+ PCIE_IN_MSG_LIMIT = 52,
+ PCIE_AHB_SLAVE_CTRL = 56,
+
+ PCIE_SLAVE_BE_SHIFT = 22,
+};
+
+#define ADDR_VAL(val) ((val) & 0xFFFF)
+#define DATA_VAL(val) ((val) & 0xFFFF)
+
+#define PCIE_SLAVE_BE(val) ((val) << PCIE_SLAVE_BE_SHIFT)
+#define PCIE_SLAVE_BE_MASK PCIE_SLAVE_BE(0xF)
+
+struct oxnas_pcie_shared {
+ /* seems all access are serialized, no lock required */
+ int refcount;
+};
+
+/* Structure representing one PCIe interfaces */
+struct oxnas_pcie {
+ void __iomem *cfgbase;
+ void __iomem *base;
+ void __iomem *inbound;
+ void __iomem *outbound;
+ void __iomem *pcie_ctrl;
+
+ int haslink;
+ struct platform_device *pdev;
+ struct resource io;
+ struct resource cfg;
+ struct resource pre_mem; /* prefetchable */
+ struct resource non_mem; /* non-prefetchable */
+ struct resource busn; /* max available bus numbers */
+ int card_reset; /* gpio pin, optional */
+ unsigned hcsl_en; /* hcsl pci enable bit */
+ struct clk *clk;
+ struct clk *busclk; /* for pcie bus, actually the PLLB */
+ void *private_data[1];
+ spinlock_t lock;
+};
+
+static struct oxnas_pcie_shared pcie_shared = {
+ .refcount = 0,
+};
+
+static inline struct oxnas_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+
+static inline void set_out_lanes(struct oxnas_pcie *pcie, unsigned lanes)
+{
+ oxnas_register_value_mask(pcie->outbound + PCIE_AHB_SLAVE_CTRL,
+ PCIE_SLAVE_BE_MASK, PCIE_SLAVE_BE(lanes));
+ wmb();
+}
+
+static int oxnas_pcie_link_up(struct oxnas_pcie *pcie)
+{
+ unsigned long end;
+
+ /* Poll for PCIE link up */
+ end = jiffies + (LINK_UP_TIMEOUT_SECONDS * HZ);
+ while (!time_after(jiffies, end)) {
+ if (readl(pcie->pcie_ctrl) & PCIE_LINK_UP)
+ return 1;
+ }
+ return 0;
+}
+
+static void __init oxnas_pcie_setup_hw(struct oxnas_pcie *pcie)
+{
+ /* We won't have any inbound address translation. This allows PCI
+ * devices to access anywhere in the AHB address map. Might be regarded
+ * as a bit dangerous, but let's get things working before we worry
+ * about that
+ */
+ oxnas_register_clear_mask(pcie->inbound + IB_ADDR_XLATE_ENABLE,
+ ENABLE_IN_ADDR_TRANS);
+ wmb();
+
+ /*
+ * Program outbound translation windows
+ *
+ * Outbound window is what is referred to as "PCI client" region in HRM
+ *
+ * Could use the larger alternative address space to get >>64M regions
+ * for graphics cards etc., but will not bother at this point.
+ *
+ * IP bug means that AMBA window size must be a power of 2
+ *
+ * Set mem0 window for first 16MB of outbound window non-prefetchable
+ * Set mem1 window for second 16MB of outbound window prefetchable
+ * Set io window for next 16MB of outbound window
+ * Set cfg0 for final 1MB of outbound window
+ *
+ * Ignore mem1, cfg1 and msg windows for now as no obvious use cases for
+ * 820 that would need them
+ *
+ * Probably ideally want no offset between mem0 window start as seen by
+ * ARM and as seen on PCI bus and get Linux to assign memory regions to
+ * PCI devices using the same "PCI client" region start address as seen
+ * by ARM
+ */
+
+ /* Set PCIeA mem0 region to be 1st 16MB of the 64MB PCIeA window */
+ writel_relaxed(pcie->non_mem.start, pcie->outbound + PCIE_IN0_MEM_ADDR);
+ writel_relaxed(pcie->non_mem.end, pcie->outbound + PCIE_IN0_MEM_LIMIT);
+ writel_relaxed(pcie->non_mem.start, pcie->outbound + PCIE_POM0_MEM_ADDR);
+
+ /* Set PCIeA mem1 region to be 2nd 16MB of the 64MB PCIeA window */
+ writel_relaxed(pcie->pre_mem.start, pcie->outbound + PCIE_IN1_MEM_ADDR);
+ writel_relaxed(pcie->pre_mem.end, pcie->outbound + PCIE_IN1_MEM_LIMIT);
+ writel_relaxed(pcie->pre_mem.start, pcie->outbound + PCIE_POM1_MEM_ADDR);
+
+ /* Set PCIeA io to be third 16M region of the 64MB PCIeA window*/
+ writel_relaxed(pcie->io.start, pcie->outbound + PCIE_IN_IO_ADDR);
+ writel_relaxed(pcie->io.end, pcie->outbound + PCIE_IN_IO_LIMIT);
+
+ /* Set PCIeA cgf0 to be last 16M region of the 64MB PCIeA window*/
+ writel_relaxed(pcie->cfg.start, pcie->outbound + PCIE_IN_CFG0_ADDR);
+ writel_relaxed(pcie->cfg.end, pcie->outbound + PCIE_IN_CFG0_LIMIT);
+ wmb();
+
+ /* Enable outbound address translation */
+ oxnas_register_set_mask(pcie->pcie_ctrl, PCIE_OBTRANS);
+ wmb();
+
+ /*
+ * Program PCIe command register for core to:
+ * enable memory space
+ * enable bus master
+ * enable io
+ */
+ writel_relaxed(7, pcie->base + PCI_CONFIG_COMMAND_STATUS);
+ /* which is which */
+ wmb();
+}
+
+static unsigned oxnas_pcie_cfg_to_offset(
+ struct pci_sys_data *sys,
+ unsigned char bus_number,
+ unsigned int devfn,
+ int where)
+{
+ unsigned int function = PCI_FUNC(devfn);
+ unsigned int slot = PCI_SLOT(devfn);
+ unsigned char bus_number_offset;
+
+ bus_number_offset = bus_number - sys->busnr;
+
+ /*
+ * We'll assume for now that the offset, function, slot, bus encoding
+ * should map onto linear, contiguous addresses in PCIe config space,
+ * albeit that the majority will be unused as only slot 0 is valid for
+ * any PCIe bus and most devices have only function 0
+ *
+ * Could be that PCIe in fact works by not encoding the slot number into
+ * the config space address as it's known that only slot 0 is valid.
+ * We'll have to experiment if/when we get a PCIe switch connected to
+ * the PCIe host
+ */
+ return (bus_number_offset << 20) | (slot << 15) | (function << 12) |
+ (where & ~3);
+}
+
+/* PCI configuration space write function */
+static int oxnas_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ unsigned long flags;
+ struct oxnas_pcie *pcie = sys_to_pcie(bus->sysdata);
+ unsigned offset;
+ u32 value;
+ u32 lanes;
+
+ /* Only a single device per bus for PCIe point-to-point links */
+ if (PCI_SLOT(devfn) > 0)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (!pcie->haslink)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ offset = oxnas_pcie_cfg_to_offset(bus->sysdata, bus->number, devfn,
+ where);
+
+ value = val << (8 * (where & 3));
+ lanes = (0xf >> (4-size)) << (where & 3);
+ /* it race with mem and io write, but the possibility is low, normally
+ * all config writes happens at driver initialize stage, wont interleave
+ * with others.
+ * and many pcie cards use dword (4bytes) access mem/io access only,
+ * so not bother to copy that ugly work-around now. */
+ spin_lock_irqsave(&pcie->lock, flags);
+ set_out_lanes(pcie, lanes);
+ writel_relaxed(value, pcie->cfgbase + offset);
+ set_out_lanes(pcie, 0xf);
+ spin_unlock_irqrestore(&pcie->lock, flags);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/* PCI configuration space read function */
+static int oxnas_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ struct oxnas_pcie *pcie = sys_to_pcie(bus->sysdata);
+ unsigned offset;
+ u32 value;
+ u32 left_bytes, right_bytes;
+
+ /* Only a single device per bus for PCIe point-to-point links */
+ if (PCI_SLOT(devfn) > 0) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ if (!pcie->haslink) {
+ *val = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ offset = oxnas_pcie_cfg_to_offset(bus->sysdata, bus->number, devfn,
+ where);
+ value = readl_relaxed(pcie->cfgbase + offset);
+ left_bytes = where & 3;
+ right_bytes = 4 - left_bytes - size;
+ value <<= right_bytes * 8;
+ value >>= (left_bytes + right_bytes) * 8;
+ *val = value;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops oxnas_pcie_ops = {
+ .read = oxnas_pcie_rd_conf,
+ .write = oxnas_pcie_wr_conf,
+};
+
+static int __init oxnas_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct oxnas_pcie *pcie = sys_to_pcie(sys);
+
+ pci_add_resource_offset(&sys->resources, &pcie->non_mem, sys->mem_offset);
+ pci_add_resource_offset(&sys->resources, &pcie->pre_mem, sys->mem_offset);
+ pci_add_resource_offset(&sys->resources, &pcie->io, sys->io_offset);
+ pci_add_resource(&sys->resources, &pcie->busn);
+ if (sys->busnr == 0) { /* default one */
+ sys->busnr = pcie->busn.start;
+ }
+ /* do not use devm_ioremap_resource, it does not like cfg resource */
+ pcie->cfgbase = devm_ioremap(&pcie->pdev->dev, pcie->cfg.start,
+ resource_size(&pcie->cfg));
+ if (!pcie->cfgbase)
+ return -ENOMEM;
+
+ oxnas_pcie_setup_hw(pcie);
+
+ return 1;
+}
+
+static void __init oxnas_pcie_enable(struct device *dev, struct oxnas_pcie *pcie)
+{
+ struct hw_pci hw;
+ int i;
+
+ memset(&hw, 0, sizeof(hw));
+ for (i = 0; i < NUM_CONTROLLERS; i++)
+ pcie->private_data[i] = pcie;
+
+ hw.nr_controllers = NUM_CONTROLLERS;
+/* I think use stack pointer is a bad idea though it is valid in this case */
+ hw.private_data = pcie->private_data;
+ hw.setup = oxnas_pcie_setup;
+ hw.map_irq = of_irq_parse_and_map_pci;
+ hw.ops = &oxnas_pcie_ops;
+
+ /* pass dev to maintain of tree, interrupt mapping rely on this */
+ pci_common_init_dev(dev, &hw);
+}
+
+void oxnas_pcie_init_shared_hw(struct platform_device *pdev,
+ void __iomem *phybase)
+{
+ struct reset_control *rstc;
+ int ret;
+
+ /* generate clocks from HCSL buffers, shared parts */
+ writel(HCSL_BIAS_ON|HCSL_PCIE_EN, SYS_CTRL_HCSL_CTRL);
+
+ /* Ensure PCIe PHY is properly reset */
+ rstc = reset_control_get(&pdev->dev, "phy");
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ } else {
+ ret = reset_control_reset(rstc);
+ reset_control_put(rstc);
+ }
+
+ if (ret) {
+ dev_err(&pdev->dev, "phy reset failed %d\n", ret);
+ return;
+ }
+
+ /* Enable PCIe Pre-Emphasis: What these value means? */
+
+ writel(ADDR_VAL(0x0014), phybase + PHY_ADDR);
+ writel(DATA_VAL(0xce10) | CAP_DATA, phybase + PHY_DATA);
+ writel(DATA_VAL(0xce10) | WRITE_EN, phybase + PHY_DATA);
+
+ writel(ADDR_VAL(0x2004), phybase + PHY_ADDR);
+ writel(DATA_VAL(0x82c7) | CAP_DATA, phybase + PHY_DATA);
+ writel(DATA_VAL(0x82c7) | WRITE_EN, phybase + PHY_DATA);
+}
+
+static int oxnas_pcie_shared_init(struct platform_device *pdev)
+{
+ if (++pcie_shared.refcount == 1) {
+ /* we are the first */
+ struct device_node *np = pdev->dev.of_node;
+ void __iomem *phy = of_iomap(np, 2);
+ if (!phy) {
+ --pcie_shared.refcount;
+ return -ENOMEM;
+ }
+ oxnas_pcie_init_shared_hw(pdev, phy);
+ iounmap(phy);
+ return 0;
+ } else {
+ return 0;
+ }
+}
+
+#if 0
+/* maybe we will call it when enter low power state */
+static void oxnas_pcie_shared_deinit(struct platform_device *pdev)
+{
+ if (--pcie_shared.refcount == 0) {
+ /* no cleanup needed */;
+ }
+}
+#endif
+
+static int __init
+oxnas_pcie_map_registers(struct platform_device *pdev,
+ struct device_node *np,
+ struct oxnas_pcie *pcie)
+{
+ struct resource regs;
+ int ret = 0;
+ u32 outbound_ctrl_offset;
+ u32 pcie_ctrl_offset;
+
+ /* 2 is reserved for shared phy */
+ ret = of_address_to_resource(np, 0, &regs);
+ if (ret)
+ return -EINVAL;
+ pcie->base = devm_ioremap_resource(&pdev->dev, &regs);
+ if (!pcie->base)
+ return -ENOMEM;
+
+ ret = of_address_to_resource(np, 1, &regs);
+ if (ret)
+ return -EINVAL;
+ pcie->inbound = devm_ioremap_resource(&pdev->dev, &regs);
+ if (!pcie->inbound)
+ return -ENOMEM;
+
+
+ if (of_property_read_u32(np, "plxtech,pcie-outbound-offset",
+ &outbound_ctrl_offset))
+ return -EINVAL;
+ /* SYSCRTL is shared by too many drivers, so is mapped by board file */
+ pcie->outbound = IOMEM(OXNAS_SYSCRTL_BASE_VA + outbound_ctrl_offset);
+
+ if (of_property_read_u32(np, "plxtech,pcie-ctrl-offset",
+ &pcie_ctrl_offset))
+ return -EINVAL;
+ pcie->pcie_ctrl = IOMEM(OXNAS_SYSCRTL_BASE_VA + pcie_ctrl_offset);
+
+ return 0;
+}
+
+static int __init oxnas_pcie_init_res(struct platform_device *pdev,
+ struct oxnas_pcie *pcie,
+ struct device_node *np)
+{
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int ret;
+
+ if (of_pci_range_parser_init(&parser, np))
+ return -EINVAL;
+
+ /* Get the I/O and memory ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+
+ unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
+ if (restype == IORESOURCE_IO) {
+ of_pci_range_to_resource(&range, np, &pcie->io);
+ pcie->io.name = "I/O";
+ }
+ if (restype == IORESOURCE_MEM) {
+ if (range.flags & IORESOURCE_PREFETCH) {
+ of_pci_range_to_resource(&range, np, &pcie->pre_mem);
+ pcie->pre_mem.name = "PRE MEM";
+ } else {
+ of_pci_range_to_resource(&range, np, &pcie->non_mem);
+ pcie->non_mem.name = "NON MEM";
+ }
+
+ }
+ if (restype == 0)
+ of_pci_range_to_resource(&range, np, &pcie->cfg);
+ }
+
+ /* Get the bus range */
+ ret = of_pci_parse_bus_range(np, &pcie->busn);
+
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse bus-range property: %d\n",
+ ret);
+ return ret;
+ }
+
+ pcie->card_reset = of_get_gpio(np, 0);
+ if (pcie->card_reset < 0)
+ dev_info(&pdev->dev, "card reset gpio pin not exists\n");
+
+ if (of_property_read_u32(np, "plxtech,pcie-hcsl-bit", &pcie->hcsl_en))
+ return -EINVAL;
+
+ pcie->clk = of_clk_get_by_name(np, "pcie");
+ if (IS_ERR(pcie->clk)) {
+ return PTR_ERR(pcie->clk);
+ }
+
+ pcie->busclk = of_clk_get_by_name(np, "busclk");
+ if (IS_ERR(pcie->busclk)) {
+ clk_put(pcie->clk);
+ return PTR_ERR(pcie->busclk);
+ }
+
+ return 0;
+}
+
+static void oxnas_pcie_init_hw(struct platform_device *pdev,
+ struct oxnas_pcie *pcie)
+{
+ u32 version_id;
+ int ret;
+
+ clk_prepare_enable(pcie->busclk);
+
+ /* reset PCIe cards use hard-wired gpio pin */
+ if (pcie->card_reset >= 0 &&
+ !gpio_direction_output(pcie->card_reset, 0)) {
+ wmb();
+ mdelay(10);
+ /* must tri-state the pin to pull it up */
+ gpio_direction_input(pcie->card_reset);
+ wmb();
+ mdelay(100);
+ }
+
+ oxnas_register_set_mask(SYS_CTRL_HCSL_CTRL, BIT(pcie->hcsl_en));
+
+ /* core */
+ ret = device_reset(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "core reset failed %d\n", ret);
+ return;
+ }
+
+ /* Start PCIe core clocks */
+ clk_prepare_enable(pcie->clk);
+
+ version_id = readl_relaxed(pcie->base + PCI_CONFIG_VERSION_DEVICEID);
+ dev_info(&pdev->dev, "PCIe version/deviceID 0x%x\n", version_id);
+
+ if (version_id != VERSION_ID_MAGIC) {
+ dev_info(&pdev->dev, "PCIe controller not found\n");
+ pcie->haslink = 0;
+ return;
+ }
+
+ /* allow entry to L23 state */
+ oxnas_register_set_mask(pcie->pcie_ctrl, PCIE_READY_ENTR_L23);
+
+ /* Set PCIe core into RootCore mode */
+ oxnas_register_value_mask(pcie->pcie_ctrl, PCIE_DEVICE_TYPE_MASK,
+ PCIE_DEVICE_TYPE_ROOT);
+ wmb();
+
+ /* Bring up the PCI core */
+ oxnas_register_set_mask(pcie->pcie_ctrl, PCIE_LTSSM);
+ wmb();
+}
+
+static int __init oxnas_pcie_probe(struct platform_device *pdev)
+{
+ struct oxnas_pcie *pcie;
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ pcie = devm_kzalloc(&pdev->dev, sizeof(struct oxnas_pcie),
+ GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
+ pcie->pdev = pdev;
+ pcie->haslink = 1;
+ spin_lock_init(&pcie->lock);
+
+ ret = oxnas_pcie_init_res(pdev, pcie, np);
+ if (ret)
+ return ret;
+ if (pcie->card_reset >= 0) {
+ ret = gpio_request_one(pcie->card_reset, GPIOF_DIR_IN,
+ dev_name(&pdev->dev));
+ if (ret) {
+ dev_err(&pdev->dev, "cannot request gpio pin %d\n",
+ pcie->card_reset);
+ return ret;
+ }
+ }
+
+ ret = oxnas_pcie_map_registers(pdev, np, pcie);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot map registers\n");
+ goto err_free_gpio;
+ }
+
+ ret = oxnas_pcie_shared_init(pdev);
+ if (ret)
+ goto err_free_gpio;
+
+ /* if hw not found, haslink cleared */
+ oxnas_pcie_init_hw(pdev, pcie);
+
+ if (pcie->haslink && oxnas_pcie_link_up(pcie)) {
+ pcie->haslink = 1;
+ dev_info(&pdev->dev, "link up\n");
+ } else {
+ pcie->haslink = 0;
+ dev_info(&pdev->dev, "link down\n");
+ }
+ /* should we register our controller even when pcie->haslink is 0 ? */
+ /* register the controller with framework */
+ oxnas_pcie_enable(&pdev->dev, pcie);
+
+ return 0;
+
+err_free_gpio:
+ if (pcie->card_reset)
+ gpio_free(pcie->card_reset);
+
+ return ret;
+}
+
+static const struct of_device_id oxnas_pcie_of_match_table[] = {
+ { .compatible = "plxtech,nas782x-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, oxnas_pcie_of_match_table);
+
+static struct platform_driver oxnas_pcie_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "oxnas-pcie",
+ .of_match_table =
+ of_match_ptr(oxnas_pcie_of_match_table),
+ },
+};
+
+static int __init oxnas_pcie_init(void)
+{
+ return platform_driver_probe(&oxnas_pcie_driver,
+ oxnas_pcie_probe);
+}
+
+subsys_initcall(oxnas_pcie_init);
+
+MODULE_AUTHOR("Ma Haijun <mahaijuns@gmail.com>");
+MODULE_DESCRIPTION("NAS782x PCIe driver");
+MODULE_LICENSE("GPLv2");
diff --git a/target/linux/oxnas/files/drivers/pinctrl/pinctrl-oxnas.c b/target/linux/oxnas/files/drivers/pinctrl/pinctrl-oxnas.c
new file mode 100644
index 0000000000..6cc8f72c60
--- /dev/null
+++ b/target/linux/oxnas/files/drivers/pinctrl/pinctrl-oxnas.c
@@ -0,0 +1,1480 @@
+/*
+ * oxnas pinctrl driver based on at91 pinctrl driver
+ *
+ * Copyright (C) 2011-2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
+ *
+ * Under GPLv2 only
+ */
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+/* Since we request GPIOs from ourself */
+#include <linux/pinctrl/consumer.h>
+
+#include "core.h"
+
+#include <mach/utils.h>
+
+#define MAX_NB_GPIO_PER_BANK 32
+#define MAX_GPIO_BANKS 2
+
+struct oxnas_gpio_chip {
+ struct gpio_chip chip;
+ struct pinctrl_gpio_range range;
+ void __iomem *regbase; /* GPIOA/B virtual address */
+ void __iomem *ctrlbase; /* SYS/SEC_CTRL virtual address */
+ struct irq_domain *domain; /* associated irq domain */
+};
+
+#define to_oxnas_gpio_chip(c) container_of(c, struct oxnas_gpio_chip, chip)
+
+static struct oxnas_gpio_chip *gpio_chips[MAX_GPIO_BANKS];
+
+static int gpio_banks;
+
+#define PULL_UP (1 << 0)
+#define PULL_DOWN (1 << 1)
+#define DEBOUNCE (1 << 2)
+
+/**
+ * struct oxnas_pmx_func - describes pinmux functions
+ * @name: the name of this specific function
+ * @groups: corresponding pin groups
+ * @ngroups: the number of groups
+ */
+struct oxnas_pmx_func {
+ const char *name;
+ const char **groups;
+ unsigned ngroups;
+};
+
+enum oxnas_mux {
+ OXNAS_PINMUX_GPIO,
+ OXNAS_PINMUX_FUNC2,
+ OXNAS_PINMUX_FUNC3,
+ OXNAS_PINMUX_FUNC4,
+ OXNAS_PINMUX_DEBUG,
+ OXNAS_PINMUX_ALT,
+};
+
+enum {
+ INPUT_VALUE = 0,
+ OUTPUT_ENABLE = 4,
+ IRQ_PENDING = 0xC,
+ OUTPUT_VALUE = 0x10,
+ OUTPUT_SET = 0x14,
+ OUTPUT_CLEAR = 0x18,
+ OUTPUT_EN_SET = 0x1C,
+ OUTPUT_EN_CLEAR = 0x20,
+ DEBOUNCE_ENABLE = 0x24,
+ RE_IRQ_ENABLE = 0x28, /* rising edge */
+ FE_IRQ_ENABLE = 0x2C, /* falling edge */
+ RE_IRQ_PENDING = 0x30, /* rising edge */
+ FE_IRQ_PENDING = 0x34, /* falling edge */
+ CLOCK_DIV = 0x48,
+ PULL_ENABLE = 0x50,
+ PULL_SENSE = 0x54, /* 1 up, 0 down */
+
+
+ DEBOUNCE_MASK = 0x3FFF0000,
+ /* put hw debounce and soft config at same bit position*/
+ DEBOUNCE_SHIFT = 16
+};
+
+enum {
+ PINMUX_SECONDARY_SEL = 0x14,
+ PINMUX_TERTIARY_SEL = 0x8c,
+ PINMUX_QUATERNARY_SEL = 0x94,
+ PINMUX_DEBUG_SEL = 0x9c,
+ PINMUX_ALTERNATIVE_SEL = 0xa4,
+ PINMUX_PULLUP_SEL = 0xac,
+};
+
+/**
+ * struct oxnas_pmx_pin - describes an pin mux
+ * @bank: the bank of the pin
+ * @pin: the pin number in the @bank
+ * @mux: the mux mode : gpio or periph_x of the pin i.e. alternate function.
+ * @conf: the configuration of the pin: PULL_UP, MULTIDRIVE etc...
+ */
+struct oxnas_pmx_pin {
+ uint32_t bank;
+ uint32_t pin;
+ enum oxnas_mux mux;
+ unsigned long conf;
+};
+
+/**
+ * struct oxnas_pin_group - describes an pin group
+ * @name: the name of this specific pin group
+ * @pins_conf: the mux mode for each pin in this group. The size of this
+ * array is the same as pins.
+ * @pins: an array of discrete physical pins used in this group, taken
+ * from the driver-local pin enumeration space
+ * @npins: the number of pins in this group array, i.e. the number of
+ * elements in .pins so we can iterate over that array
+ */
+struct oxnas_pin_group {
+ const char *name;
+ struct oxnas_pmx_pin *pins_conf;
+ unsigned int *pins;
+ unsigned npins;
+};
+
+struct oxnas_pinctrl {
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+
+ int nbanks;
+
+ uint32_t *mux_mask;
+ int nmux;
+
+ struct oxnas_pmx_func *functions;
+ int nfunctions;
+
+ struct oxnas_pin_group *groups;
+ int ngroups;
+};
+
+static const inline struct oxnas_pin_group *oxnas_pinctrl_find_group_by_name(
+ const struct oxnas_pinctrl *info,
+ const char *name)
+{
+ const struct oxnas_pin_group *grp = NULL;
+ int i;
+
+ for (i = 0; i < info->ngroups; i++) {
+ if (strcmp(info->groups[i].name, name))
+ continue;
+
+ grp = &info->groups[i];
+ dev_dbg(info->dev, "%s: %d 0:%d\n", name, grp->npins,
+ grp->pins[0]);
+ break;
+ }
+
+ return grp;
+}
+
+static int oxnas_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct oxnas_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+ return info->ngroups;
+}
+
+static const char *oxnas_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct oxnas_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+ return info->groups[selector].name;
+}
+
+static int oxnas_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
+ const unsigned **pins,
+ unsigned *npins)
+{
+ struct oxnas_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+ if (selector >= info->ngroups)
+ return -EINVAL;
+
+ *pins = info->groups[selector].pins;
+ *npins = info->groups[selector].npins;
+
+ return 0;
+}
+
+static void oxnas_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned offset)
+{
+ seq_printf(s, "%s", dev_name(pctldev->dev));
+}
+
+static int oxnas_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ struct oxnas_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ const struct oxnas_pin_group *grp;
+ struct pinctrl_map *new_map;
+ struct device_node *parent;
+ int map_num = 1;
+ int i;
+
+ /*
+ * first find the group of this node and check if we need create
+ * config maps for pins
+ */
+ grp = oxnas_pinctrl_find_group_by_name(info, np->name);
+ if (!grp) {
+ dev_err(info->dev, "unable to find group for node %s\n",
+ np->name);
+ return -EINVAL;
+ }
+
+ map_num += grp->npins;
+ new_map = devm_kzalloc(pctldev->dev, sizeof(*new_map) * map_num,
+ GFP_KERNEL);
+ if (!new_map)
+ return -ENOMEM;
+
+ *map = new_map;
+ *num_maps = map_num;
+
+ /* create mux map */
+ parent = of_get_parent(np);
+ if (!parent) {
+ devm_kfree(pctldev->dev, new_map);
+ return -EINVAL;
+ }
+ new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
+ new_map[0].data.mux.function = parent->name;
+ new_map[0].data.mux.group = np->name;
+ of_node_put(parent);
+
+ /* create config map */
+ new_map++;
+ for (i = 0; i < grp->npins; i++) {
+ new_map[i].type = PIN_MAP_TYPE_CONFIGS_PIN;
+ new_map[i].data.configs.group_or_pin =
+ pin_get_name(pctldev, grp->pins[i]);
+ new_map[i].data.configs.configs = &grp->pins_conf[i].conf;
+ new_map[i].data.configs.num_configs = 1;
+ }
+
+ dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
+ (*map)->data.mux.function, (*map)->data.mux.group, map_num);
+
+ return 0;
+}
+
+static void oxnas_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned num_maps)
+{
+}
+
+static const struct pinctrl_ops oxnas_pctrl_ops = {
+ .get_groups_count = oxnas_get_groups_count,
+ .get_group_name = oxnas_get_group_name,
+ .get_group_pins = oxnas_get_group_pins,
+ .pin_dbg_show = oxnas_pin_dbg_show,
+ .dt_node_to_map = oxnas_dt_node_to_map,
+ .dt_free_map = oxnas_dt_free_map,
+};
+
+static void __iomem *pin_to_gpioctrl(struct oxnas_pinctrl *info,
+ unsigned int bank)
+{
+ return gpio_chips[bank]->regbase;
+}
+
+static void __iomem *pin_to_muxctrl(struct oxnas_pinctrl *info,
+ unsigned int bank)
+{
+ return gpio_chips[bank]->ctrlbase;
+}
+
+
+static inline int pin_to_bank(unsigned pin)
+{
+ return pin / MAX_NB_GPIO_PER_BANK;
+}
+
+static unsigned pin_to_mask(unsigned int pin)
+{
+ return 1 << pin;
+}
+
+static void oxnas_mux_disable_interrupt(void __iomem *pio, unsigned mask)
+{
+ oxnas_register_clear_mask(pio + RE_IRQ_ENABLE, mask);
+ oxnas_register_clear_mask(pio + FE_IRQ_ENABLE, mask);
+}
+
+static unsigned oxnas_mux_get_pullup(void __iomem *pio, unsigned pin)
+{
+ return (readl_relaxed(pio + PULL_ENABLE) & BIT(pin)) &&
+ (readl_relaxed(pio + PULL_SENSE) & BIT(pin));
+}
+
+static void oxnas_mux_set_pullup(void __iomem *pio, unsigned mask, bool on)
+{
+ if (on) {
+ oxnas_register_set_mask(pio + PULL_SENSE, mask);
+ oxnas_register_set_mask(pio + PULL_ENABLE, mask);
+ } else {
+ oxnas_register_clear_mask(pio + PULL_ENABLE, mask);
+ }
+}
+
+static bool oxnas_mux_get_pulldown(void __iomem *pio, unsigned pin)
+{
+ return (readl_relaxed(pio + PULL_ENABLE) & BIT(pin)) &&
+ (!(readl_relaxed(pio + PULL_SENSE) & BIT(pin)));
+}
+
+static void oxnas_mux_set_pulldown(void __iomem *pio, unsigned mask, bool on)
+{
+ if (on) {
+ oxnas_register_clear_mask(pio + PULL_SENSE, mask);
+ oxnas_register_set_mask(pio + PULL_ENABLE, mask);
+ } else {
+ oxnas_register_clear_mask(pio + PULL_ENABLE, mask);
+ };
+}
+
+/* unfortunately debounce control are shared */
+static bool oxnas_mux_get_debounce(void __iomem *pio, unsigned pin, u32 *div)
+{
+ *div = __raw_readl(pio + CLOCK_DIV) & DEBOUNCE_MASK;
+ return __raw_readl(pio + DEBOUNCE_ENABLE) & BIT(pin);
+}
+
+static void oxnas_mux_set_debounce(void __iomem *pio, unsigned mask,
+ bool is_on, u32 div)
+{
+ if (is_on) {
+ oxnas_register_value_mask(pio + CLOCK_DIV, DEBOUNCE_MASK, div);
+ oxnas_register_set_mask(pio + DEBOUNCE_ENABLE, mask);
+ } else {
+ oxnas_register_clear_mask(pio + DEBOUNCE_ENABLE, mask);
+ }
+}
+
+
+static void oxnas_mux_set_func2(void __iomem *cio, unsigned mask)
+{
+/* in fact, SECONDARY takes precedence, so clear others is not necessary */
+ oxnas_register_set_mask(cio + PINMUX_SECONDARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_TERTIARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_QUATERNARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_DEBUG_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_ALTERNATIVE_SEL, mask);
+}
+
+static void oxnas_mux_set_func3(void __iomem *cio, unsigned mask)
+{
+ oxnas_register_clear_mask(cio + PINMUX_SECONDARY_SEL, mask);
+ oxnas_register_set_mask(cio + PINMUX_TERTIARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_QUATERNARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_DEBUG_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_ALTERNATIVE_SEL, mask);
+}
+
+static void oxnas_mux_set_func4(void __iomem *cio, unsigned mask)
+{
+ oxnas_register_clear_mask(cio + PINMUX_SECONDARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_TERTIARY_SEL, mask);
+ oxnas_register_set_mask(cio + PINMUX_QUATERNARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_DEBUG_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_ALTERNATIVE_SEL, mask);
+}
+
+static void oxnas_mux_set_func_dbg(void __iomem *cio, unsigned mask)
+{
+ oxnas_register_clear_mask(cio + PINMUX_SECONDARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_TERTIARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_QUATERNARY_SEL, mask);
+ oxnas_register_set_mask(cio + PINMUX_DEBUG_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_ALTERNATIVE_SEL, mask);
+}
+
+static void oxnas_mux_set_func_alt(void __iomem *cio, unsigned mask)
+{
+ oxnas_register_clear_mask(cio + PINMUX_SECONDARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_TERTIARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_QUATERNARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_DEBUG_SEL, mask);
+ oxnas_register_set_mask(cio + PINMUX_ALTERNATIVE_SEL, mask);
+}
+
+static void oxnas_mux_set_gpio(void __iomem *cio, unsigned mask)
+{
+ oxnas_register_clear_mask(cio + PINMUX_SECONDARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_TERTIARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_QUATERNARY_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_DEBUG_SEL, mask);
+ oxnas_register_clear_mask(cio + PINMUX_ALTERNATIVE_SEL, mask);
+}
+
+static enum oxnas_mux oxnas_mux_get_func(void __iomem *cio, unsigned mask)
+{
+ if (readl_relaxed(cio + PINMUX_SECONDARY_SEL) & mask)
+ return OXNAS_PINMUX_FUNC2;
+ if (readl_relaxed(cio + PINMUX_TERTIARY_SEL) & mask)
+ return OXNAS_PINMUX_FUNC3;
+ if (readl_relaxed(cio + PINMUX_QUATERNARY_SEL) & mask)
+ return OXNAS_PINMUX_FUNC4;
+ if (readl_relaxed(cio + PINMUX_DEBUG_SEL) & mask)
+ return OXNAS_PINMUX_DEBUG;
+ if (readl_relaxed(cio + PINMUX_ALTERNATIVE_SEL) & mask)
+ return OXNAS_PINMUX_ALT;
+ return OXNAS_PINMUX_GPIO;
+}
+
+
+static void oxnas_pin_dbg(const struct device *dev,
+ const struct oxnas_pmx_pin *pin)
+{
+ if (pin->mux) {
+ dev_dbg(dev,
+ "MF_%c%d configured as periph%c with conf = 0x%lu\n",
+ pin->bank + 'A', pin->pin, pin->mux - 1 + 'A',
+ pin->conf);
+ } else {
+ dev_dbg(dev, "MF_%c%d configured as gpio with conf = 0x%lu\n",
+ pin->bank + 'A', pin->pin, pin->conf);
+ }
+}
+
+static int pin_check_config(struct oxnas_pinctrl *info, const char *name,
+ int index, const struct oxnas_pmx_pin *pin)
+{
+ int mux;
+
+ /* check if it's a valid config */
+ if (pin->bank >= info->nbanks) {
+ dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n",
+ name, index, pin->bank, info->nbanks);
+ return -EINVAL;
+ }
+
+ if (pin->pin >= MAX_NB_GPIO_PER_BANK) {
+ dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",
+ name, index, pin->pin, MAX_NB_GPIO_PER_BANK);
+ return -EINVAL;
+ }
+ /* gpio always allowed */
+ if (!pin->mux)
+ return 0;
+
+ mux = pin->mux - 1;
+
+ if (mux >= info->nmux) {
+ dev_err(info->dev, "%s: pin conf %d mux_id %d >= nmux %d\n",
+ name, index, mux, info->nmux);
+ return -EINVAL;
+ }
+
+ if (!(info->mux_mask[pin->bank * info->nmux + mux] & 1 << pin->pin)) {
+ dev_err(info->dev, "%s: pin conf %d mux_id %d not supported for MF_%c%d\n",
+ name, index, mux, pin->bank + 'A', pin->pin);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void oxnas_mux_gpio_enable(void __iomem *cio, void __iomem *pio,
+ unsigned mask, bool input)
+{
+ oxnas_mux_set_gpio(cio, mask);
+ if (input)
+ writel_relaxed(mask, pio + OUTPUT_EN_CLEAR);
+ else
+ writel_relaxed(mask, pio + OUTPUT_EN_SET);
+}
+
+static void oxnas_mux_gpio_disable(void __iomem *cio, void __iomem *pio,
+ unsigned mask)
+{
+ /* when switch to other function, gpio is disabled automatically */
+ return;
+}
+
+static int oxnas_pmx_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
+{
+ struct oxnas_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ const struct oxnas_pmx_pin *pins_conf = info->groups[group].pins_conf;
+ const struct oxnas_pmx_pin *pin;
+ uint32_t npins = info->groups[group].npins;
+ int i, ret;
+ unsigned mask;
+ void __iomem *pio;
+ void __iomem *cio;
+
+ dev_dbg(info->dev, "enable function %s group %s\n",
+ info->functions[selector].name, info->groups[group].name);
+
+ /* first check that all the pins of the group are valid with a valid
+ * paramter */
+ for (i = 0; i < npins; i++) {
+ pin = &pins_conf[i];
+ ret = pin_check_config(info, info->groups[group].name, i, pin);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < npins; i++) {
+ pin = &pins_conf[i];
+ oxnas_pin_dbg(info->dev, pin);
+
+ pio = pin_to_gpioctrl(info, pin->bank);
+ cio = pin_to_muxctrl(info, pin->bank);
+
+ mask = pin_to_mask(pin->pin);
+ oxnas_mux_disable_interrupt(pio, mask);
+
+ switch (pin->mux) {
+ case OXNAS_PINMUX_GPIO:
+ oxnas_mux_gpio_enable(cio, pio, mask, 1);
+ break;
+ case OXNAS_PINMUX_FUNC2:
+ oxnas_mux_set_func2(cio, mask);
+ break;
+ case OXNAS_PINMUX_FUNC3:
+ oxnas_mux_set_func3(cio, mask);
+ break;
+ case OXNAS_PINMUX_FUNC4:
+ oxnas_mux_set_func4(cio, mask);
+ break;
+ case OXNAS_PINMUX_DEBUG:
+ oxnas_mux_set_func_dbg(cio, mask);
+ break;
+ case OXNAS_PINMUX_ALT:
+ oxnas_mux_set_func_alt(cio, mask);
+ break;
+ }
+ if (pin->mux)
+ oxnas_mux_gpio_disable(cio, pio, mask);
+ }
+
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
+static void oxnas_pmx_disable(struct pinctrl_dev *pctldev, unsigned selector,
+ unsigned group)
+{
+ struct oxnas_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ const struct oxnas_pmx_pin *pins_conf = info->groups[group].pins_conf;
+ const struct oxnas_pmx_pin *pin;
+ uint32_t npins = info->groups[group].npins;
+ int i;
+ unsigned mask;
+ void __iomem *pio;
+ void __iomem *cio;
+
+ for (i = 0; i < npins; i++) {
+ pin = &pins_conf[i];
+ oxnas_pin_dbg(info->dev, pin);
+ pio = pin_to_gpioctrl(info, pin->bank);
+ cio = pin_to_muxctrl(info, pin->bank);
+ mask = pin_to_mask(pin->pin);
+ oxnas_mux_gpio_enable(cio, pio, mask, 1);
+ }
+}
+#endif
+
+static int oxnas_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+ struct oxnas_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+ return info->nfunctions;
+}
+
+static const char *oxnas_pmx_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct oxnas_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+ return info->functions[selector].name;
+}
+
+static int oxnas_pmx_get_groups(struct pinctrl_dev *pctldev, unsigned selector,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct oxnas_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = info->functions[selector].groups;
+ *num_groups = info->functions[selector].ngroups;
+
+ return 0;
+}
+
+static int oxnas_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct oxnas_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
+ struct oxnas_gpio_chip *oxnas_chip;
+ struct gpio_chip *chip;
+ unsigned mask;
+
+ if (!range) {
+ dev_err(npct->dev, "invalid range\n");
+ return -EINVAL;
+ }
+ if (!range->gc) {
+ dev_err(npct->dev, "missing GPIO chip in range\n");
+ return -EINVAL;
+ }
+ chip = range->gc;
+ oxnas_chip = container_of(chip, struct oxnas_gpio_chip, chip);
+
+ dev_dbg(npct->dev, "enable pin %u as GPIO\n", offset);
+
+ mask = 1 << (offset - chip->base);
+
+ dev_dbg(npct->dev, "enable pin %u as MF_%c%d 0x%x\n",
+ offset, 'A' + range->id, offset - chip->base, mask);
+
+ oxnas_mux_set_gpio(oxnas_chip->ctrlbase, mask);
+
+ return 0;
+}
+
+static void oxnas_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct oxnas_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
+
+ dev_dbg(npct->dev, "disable pin %u as GPIO\n", offset);
+ /* Set the pin to some default state, GPIO is usually default */
+}
+
+static const struct pinmux_ops oxnas_pmx_ops = {
+ .get_functions_count = oxnas_pmx_get_funcs_count,
+ .get_function_name = oxnas_pmx_get_func_name,
+ .get_function_groups = oxnas_pmx_get_groups,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ .set_mux = oxnas_pmx_set_mux,
+#else
+ .enable = oxnas_pmx_set_mux,
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
+ .disable = oxnas_pmx_disable,
+#endif
+ .gpio_request_enable = oxnas_gpio_request_enable,
+ .gpio_disable_free = oxnas_gpio_disable_free,
+};
+
+static int oxnas_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned pin_id, unsigned long *config)
+{
+ struct oxnas_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ void __iomem *pio;
+ unsigned pin;
+ int div;
+
+ dev_dbg(info->dev, "%s:%d, pin_id=%d, config=0x%lx", __func__,
+ __LINE__, pin_id, *config);
+ pio = pin_to_gpioctrl(info, pin_to_bank(pin_id));
+ pin = pin_id % MAX_NB_GPIO_PER_BANK;
+
+ if (oxnas_mux_get_pullup(pio, pin))
+ *config |= PULL_UP;
+
+ if (oxnas_mux_get_pulldown(pio, pin))
+ *config |= PULL_DOWN;
+
+ if (oxnas_mux_get_debounce(pio, pin, &div))
+ *config |= DEBOUNCE | div;
+ return 0;
+}
+
+static int oxnas_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned pin_id, unsigned long *configs,
+ unsigned num_configs)
+{
+ struct oxnas_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ unsigned mask;
+ void __iomem *pio;
+ int i;
+ unsigned long config;
+
+ pio = pin_to_gpioctrl(info, pin_to_bank(pin_id));
+ mask = pin_to_mask(pin_id % MAX_NB_GPIO_PER_BANK);
+
+ for (i = 0; i < num_configs; i++) {
+ config = configs[i];
+
+ dev_dbg(info->dev,
+ "%s:%d, pin_id=%d, config=0x%lx",
+ __func__, __LINE__, pin_id, config);
+
+ if ((config & PULL_UP) && (config & PULL_DOWN))
+ return -EINVAL;
+
+ oxnas_mux_set_pullup(pio, mask, config & PULL_UP);
+ oxnas_mux_set_pulldown(pio, mask, config & PULL_DOWN);
+ oxnas_mux_set_debounce(pio, mask, config & DEBOUNCE,
+ config & DEBOUNCE_MASK);
+
+ } /* for each config */
+
+ return 0;
+}
+
+static void oxnas_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned pin_id)
+{
+
+}
+
+static void oxnas_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned group)
+{
+}
+
+static const struct pinconf_ops oxnas_pinconf_ops = {
+ .pin_config_get = oxnas_pinconf_get,
+ .pin_config_set = oxnas_pinconf_set,
+ .pin_config_dbg_show = oxnas_pinconf_dbg_show,
+ .pin_config_group_dbg_show = oxnas_pinconf_group_dbg_show,
+};
+
+static struct pinctrl_desc oxnas_pinctrl_desc = {
+ .pctlops = &oxnas_pctrl_ops,
+ .pmxops = &oxnas_pmx_ops,
+ .confops = &oxnas_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+static const char *gpio_compat = "plxtech,nas782x-gpio";
+
+static void oxnas_pinctrl_child_count(struct oxnas_pinctrl *info,
+ struct device_node *np)
+{
+ struct device_node *child;
+
+ for_each_child_of_node(np, child) {
+ if (of_device_is_compatible(child, gpio_compat)) {
+ info->nbanks++;
+ } else {
+ info->nfunctions++;
+ info->ngroups += of_get_child_count(child);
+ }
+ }
+}
+
+static int oxnas_pinctrl_mux_mask(struct oxnas_pinctrl *info,
+ struct device_node *np)
+{
+ int ret = 0;
+ int size;
+ const __be32 *list;
+
+ list = of_get_property(np, "plxtech,mux-mask", &size);
+ if (!list) {
+ dev_err(info->dev, "can not read the mux-mask of %d\n", size);
+ return -EINVAL;
+ }
+
+ size /= sizeof(*list);
+ if (!size || size % info->nbanks) {
+ dev_err(info->dev, "wrong mux mask array should be by %d\n",
+ info->nbanks);
+ return -EINVAL;
+ }
+ info->nmux = size / info->nbanks;
+
+ info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL);
+ if (!info->mux_mask) {
+ dev_err(info->dev, "could not alloc mux_mask\n");
+ return -ENOMEM;
+ }
+
+ ret = of_property_read_u32_array(np, "plxtech,mux-mask",
+ info->mux_mask, size);
+ if (ret)
+ dev_err(info->dev, "can not read the mux-mask of %d\n", size);
+ return ret;
+}
+
+static int oxnas_pinctrl_parse_groups(struct device_node *np,
+ struct oxnas_pin_group *grp,
+ struct oxnas_pinctrl *info, u32 index)
+{
+ struct oxnas_pmx_pin *pin;
+ int size;
+ const __be32 *list;
+ int i, j;
+
+ dev_dbg(info->dev, "group(%d): %s\n", index, np->name);
+
+ /* Initialise group */
+ grp->name = np->name;
+
+ /*
+ * the binding format is plxtech,pins = <bank pin mux CONFIG ...>,
+ * do sanity check and calculate pins number
+ */
+ list = of_get_property(np, "plxtech,pins", &size);
+ /* we do not check return since it's safe node passed down */
+ size /= sizeof(*list);
+ if (!size || size % 4) {
+ dev_err(info->dev, "wrong pins number or pins and configs"
+ " should be divisible by 4\n");
+ return -EINVAL;
+ }
+
+ grp->npins = size / 4;
+ pin = grp->pins_conf = devm_kzalloc(info->dev,
+ grp->npins * sizeof(struct oxnas_pmx_pin),
+ GFP_KERNEL);
+ grp->pins = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!grp->pins_conf || !grp->pins)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < size; i += 4, j++) {
+ pin->bank = be32_to_cpu(*list++);
+ pin->pin = be32_to_cpu(*list++);
+ grp->pins[j] = pin->bank * MAX_NB_GPIO_PER_BANK + pin->pin;
+ pin->mux = be32_to_cpu(*list++);
+ pin->conf = be32_to_cpu(*list++);
+
+ oxnas_pin_dbg(info->dev, pin);
+ pin++;
+ }
+
+ return 0;
+}
+
+static int oxnas_pinctrl_parse_functions(struct device_node *np,
+ struct oxnas_pinctrl *info, u32 index)
+{
+ struct device_node *child;
+ struct oxnas_pmx_func *func;
+ struct oxnas_pin_group *grp;
+ int ret;
+ static u32 grp_index;
+ u32 i = 0;
+
+ dev_dbg(info->dev, "parse function(%d): %s\n", index, np->name);
+
+ func = &info->functions[index];
+
+ /* Initialise function */
+ func->name = np->name;
+ func->ngroups = of_get_child_count(np);
+ if (func->ngroups <= 0) {
+ dev_err(info->dev, "no groups defined\n");
+ return -EINVAL;
+ }
+ func->groups = devm_kzalloc(info->dev,
+ func->ngroups * sizeof(char *), GFP_KERNEL);
+ if (!func->groups)
+ return -ENOMEM;
+
+ for_each_child_of_node(np, child) {
+ func->groups[i] = child->name;
+ grp = &info->groups[grp_index++];
+ ret = oxnas_pinctrl_parse_groups(child, grp, info, i++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct of_device_id oxnas_pinctrl_of_match[] = {
+ { .compatible = "plxtech,nas782x-pinctrl"},
+ { /* sentinel */ }
+};
+
+static int oxnas_pinctrl_probe_dt(struct platform_device *pdev,
+ struct oxnas_pinctrl *info)
+{
+ int ret = 0;
+ int i, j;
+ uint32_t *tmp;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+
+ if (!np)
+ return -ENODEV;
+
+ info->dev = &pdev->dev;
+
+ oxnas_pinctrl_child_count(info, np);
+
+ if (info->nbanks < 1) {
+ dev_err(&pdev->dev, "you need to specify atleast one gpio-controller\n");
+ return -EINVAL;
+ }
+
+ ret = oxnas_pinctrl_mux_mask(info, np);
+ if (ret)
+ return ret;
+
+ dev_dbg(&pdev->dev, "nmux = %d\n", info->nmux);
+
+ dev_dbg(&pdev->dev, "mux-mask\n");
+ tmp = info->mux_mask;
+ for (i = 0; i < info->nbanks; i++)
+ for (j = 0; j < info->nmux; j++, tmp++)
+ dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);
+
+ dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
+ dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
+ info->functions = devm_kzalloc(&pdev->dev, info->nfunctions *
+ sizeof(struct oxnas_pmx_func),
+ GFP_KERNEL);
+ if (!info->functions)
+ return -ENOMEM;
+
+ info->groups = devm_kzalloc(&pdev->dev, info->ngroups *
+ sizeof(struct oxnas_pin_group),
+ GFP_KERNEL);
+ if (!info->groups)
+ return -ENOMEM;
+
+ dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks);
+ dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
+ dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
+
+ i = 0;
+
+ for_each_child_of_node(np, child) {
+ if (of_device_is_compatible(child, gpio_compat))
+ continue;
+ ret = oxnas_pinctrl_parse_functions(child, info, i++);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse function\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int oxnas_pinctrl_probe(struct platform_device *pdev)
+{
+ struct oxnas_pinctrl *info;
+ struct pinctrl_pin_desc *pdesc;
+ int ret, i, j, k;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = oxnas_pinctrl_probe_dt(pdev, info);
+ if (ret)
+ return ret;
+
+ /*
+ * We need all the GPIO drivers to probe FIRST, or we will not be able
+ * to obtain references to the struct gpio_chip * for them, and we
+ * need this to proceed.
+ */
+ for (i = 0; i < info->nbanks; i++) {
+ if (!gpio_chips[i]) {
+ dev_warn(&pdev->dev,
+ "GPIO chip %d not registered yet\n", i);
+ devm_kfree(&pdev->dev, info);
+ return -EPROBE_DEFER;
+ }
+ }
+
+ oxnas_pinctrl_desc.name = dev_name(&pdev->dev);
+ oxnas_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK;
+ oxnas_pinctrl_desc.pins = pdesc =
+ devm_kzalloc(&pdev->dev, sizeof(*pdesc) *
+ oxnas_pinctrl_desc.npins, GFP_KERNEL);
+
+ if (!oxnas_pinctrl_desc.pins)
+ return -ENOMEM;
+
+ for (i = 0 , k = 0; i < info->nbanks; i++) {
+ for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
+ pdesc->number = k;
+ pdesc->name = kasprintf(GFP_KERNEL, "MF_%c%d", i + 'A',
+ j);
+ pdesc++;
+ }
+ }
+
+ platform_set_drvdata(pdev, info);
+ info->pctl = pinctrl_register(&oxnas_pinctrl_desc, &pdev->dev, info);
+
+ if (!info->pctl) {
+ dev_err(&pdev->dev, "could not register OX820 pinctrl driver\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* We will handle a range of GPIO pins */
+ for (i = 0; i < info->nbanks; i++)
+ pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
+
+ dev_info(&pdev->dev, "initialized OX820 pinctrl driver\n");
+
+ return 0;
+
+err:
+ return ret;
+}
+
+static int oxnas_pinctrl_remove(struct platform_device *pdev)
+{
+ struct oxnas_pinctrl *info = platform_get_drvdata(pdev);
+
+ pinctrl_unregister(info->pctl);
+
+ return 0;
+}
+
+static int oxnas_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ /*
+ * Map back to global GPIO space and request muxing, the direction
+ * parameter does not matter for this controller.
+ */
+ int gpio = chip->base + offset;
+ int bank = chip->base / chip->ngpio;
+
+ dev_dbg(chip->dev, "%s:%d MF_%c%d(%d)\n", __func__, __LINE__,
+ 'A' + bank, offset, gpio);
+
+ return pinctrl_request_gpio(gpio);
+}
+
+static void oxnas_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio = chip->base + offset;
+
+ pinctrl_free_gpio(gpio);
+}
+
+static int oxnas_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct oxnas_gpio_chip *oxnas_gpio = to_oxnas_gpio_chip(chip);
+ void __iomem *pio = oxnas_gpio->regbase;
+
+ writel_relaxed(BIT(offset), pio + OUTPUT_EN_CLEAR);
+ return 0;
+}
+
+static int oxnas_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct oxnas_gpio_chip *oxnas_gpio = to_oxnas_gpio_chip(chip);
+ void __iomem *pio = oxnas_gpio->regbase;
+ unsigned mask = 1 << offset;
+ u32 pdsr;
+
+ pdsr = readl_relaxed(pio + INPUT_VALUE);
+ return (pdsr & mask) != 0;
+}
+
+static void oxnas_gpio_set(struct gpio_chip *chip, unsigned offset,
+ int val)
+{
+ struct oxnas_gpio_chip *oxnas_gpio = to_oxnas_gpio_chip(chip);
+ void __iomem *pio = oxnas_gpio->regbase;
+
+ if (val)
+ writel_relaxed(BIT(offset), pio + OUTPUT_SET);
+ else
+ writel_relaxed(BIT(offset), pio + OUTPUT_CLEAR);
+
+}
+
+static int oxnas_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
+ int val)
+{
+ struct oxnas_gpio_chip *oxnas_gpio = to_oxnas_gpio_chip(chip);
+ void __iomem *pio = oxnas_gpio->regbase;
+
+ if (val)
+ writel_relaxed(BIT(offset), pio + OUTPUT_SET);
+ else
+ writel_relaxed(BIT(offset), pio + OUTPUT_CLEAR);
+
+ writel_relaxed(BIT(offset), pio + OUTPUT_EN_SET);
+
+ return 0;
+}
+
+static int oxnas_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct oxnas_gpio_chip *oxnas_gpio = to_oxnas_gpio_chip(chip);
+ int virq;
+
+ if (offset < chip->ngpio)
+ virq = irq_create_mapping(oxnas_gpio->domain, offset);
+ else
+ virq = -ENXIO;
+
+ dev_dbg(chip->dev, "%s: request IRQ for GPIO %d, return %d\n",
+ chip->label, offset + chip->base, virq);
+ return virq;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void oxnas_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ enum oxnas_mux mode;
+ int i;
+ struct oxnas_gpio_chip *oxnas_gpio = to_oxnas_gpio_chip(chip);
+ void __iomem *pio = oxnas_gpio->regbase;
+ void __iomem *cio = oxnas_gpio->ctrlbase;
+
+ for (i = 0; i < chip->ngpio; i++) {
+ unsigned pin = chip->base + i;
+ unsigned mask = pin_to_mask(pin);
+ const char *gpio_label;
+ u32 pdsr;
+
+ gpio_label = gpiochip_is_requested(chip, i);
+ if (!gpio_label)
+ continue;
+ /* FIXME */
+ mode = oxnas_mux_get_func(cio, mask);
+ seq_printf(s, "[%s] GPIO%s%d: ",
+ gpio_label, chip->label, i);
+ if (mode == OXNAS_PINMUX_GPIO) {
+ pdsr = readl_relaxed(pio + INPUT_VALUE);
+
+ seq_printf(s, "[gpio] %s\n",
+ pdsr & mask ?
+ "set" : "clear");
+ } else {
+ seq_printf(s, "[periph %c]\n",
+ mode + 'A' - 1);
+ }
+ }
+}
+#else
+#define oxnas_gpio_dbg_show NULL
+#endif
+
+/* Several AIC controller irqs are dispatched through this GPIO handler.
+ * To use any AT91_PIN_* as an externally triggered IRQ, first call
+ * oxnas_set_gpio_input() then maybe enable its glitch filter.
+ * Then just request_irq() with the pin ID; it works like any ARM IRQ
+ * handler.
+ */
+
+static void gpio_irq_mask(struct irq_data *d)
+{
+ struct oxnas_gpio_chip *oxnas_gpio = irq_data_get_irq_chip_data(d);
+ void __iomem *pio = oxnas_gpio->regbase;
+ unsigned mask = 1 << d->hwirq;
+ unsigned type = irqd_get_trigger_type(d);
+
+ /* FIXME: need proper lock */
+ if (type & IRQ_TYPE_EDGE_RISING)
+ oxnas_register_clear_mask(pio + RE_IRQ_ENABLE, mask);
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ oxnas_register_clear_mask(pio + FE_IRQ_ENABLE, mask);
+}
+
+static void gpio_irq_unmask(struct irq_data *d)
+{
+ struct oxnas_gpio_chip *oxnas_gpio = irq_data_get_irq_chip_data(d);
+ void __iomem *pio = oxnas_gpio->regbase;
+ unsigned mask = 1 << d->hwirq;
+ unsigned type = irqd_get_trigger_type(d);
+
+ /* FIXME: need proper lock */
+ if (type & IRQ_TYPE_EDGE_RISING)
+ oxnas_register_set_mask(pio + RE_IRQ_ENABLE, mask);
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ oxnas_register_set_mask(pio + FE_IRQ_ENABLE, mask);
+}
+
+
+static int gpio_irq_type(struct irq_data *d, unsigned type)
+{
+ if ((type & IRQ_TYPE_EDGE_BOTH) == 0) {
+ pr_warn("OX820: Unsupported type for irq %d\n",
+ gpio_to_irq(d->irq));
+ return -EINVAL;
+ }
+ /* seems no way to set trigger type without enable irq, so leave it to unmask time */
+
+ return 0;
+}
+
+static struct irq_chip gpio_irqchip = {
+ .name = "GPIO",
+ .irq_disable = gpio_irq_mask,
+ .irq_mask = gpio_irq_mask,
+ .irq_unmask = gpio_irq_unmask,
+ .irq_set_type = gpio_irq_type,
+};
+
+static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ struct irq_data *idata = irq_desc_get_irq_data(desc);
+ struct oxnas_gpio_chip *oxnas_gpio = irq_data_get_irq_chip_data(idata);
+ void __iomem *pio = oxnas_gpio->regbase;
+ unsigned long isr;
+ int n;
+
+ chained_irq_enter(chip, desc);
+ for (;;) {
+ /* TODO: see if it works */
+ isr = readl_relaxed(pio + IRQ_PENDING);
+ if (!isr)
+ break;
+ /* acks pending interrupts */
+ writel_relaxed(isr, pio + IRQ_PENDING);
+
+ for_each_set_bit(n, &isr, BITS_PER_LONG) {
+ generic_handle_irq(irq_find_mapping(oxnas_gpio->domain,
+ n));
+ }
+ }
+ chained_irq_exit(chip, desc);
+ /* now it may re-trigger */
+}
+
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
+static int oxnas_gpio_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ struct oxnas_gpio_chip *oxnas_gpio = h->host_data;
+
+ irq_set_lockdep_class(virq, &gpio_lock_class);
+
+ irq_set_chip_and_handler(virq, &gpio_irqchip, handle_edge_irq);
+ set_irq_flags(virq, IRQF_VALID);
+ irq_set_chip_data(virq, oxnas_gpio);
+
+ return 0;
+}
+
+static int oxnas_gpio_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec,
+ unsigned int intsize,
+ irq_hw_number_t *out_hwirq,
+ unsigned int *out_type)
+{
+ struct oxnas_gpio_chip *oxnas_gpio = d->host_data;
+ int ret;
+ int pin = oxnas_gpio->chip.base + intspec[0];
+
+ if (WARN_ON(intsize < 2))
+ return -EINVAL;
+ *out_hwirq = intspec[0];
+ *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+
+ ret = gpio_request(pin, ctrlr->full_name);
+ if (ret)
+ return ret;
+
+ ret = gpio_direction_input(pin);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct irq_domain_ops oxnas_gpio_ops = {
+ .map = oxnas_gpio_irq_map,
+ .xlate = oxnas_gpio_irq_domain_xlate,
+};
+
+static int oxnas_gpio_of_irq_setup(struct device_node *node,
+ struct oxnas_gpio_chip *oxnas_gpio,
+ unsigned int irq)
+{
+ /* Disable irqs of this controller */
+ writel_relaxed(0, oxnas_gpio->regbase + RE_IRQ_ENABLE);
+ writel_relaxed(0, oxnas_gpio->regbase + FE_IRQ_ENABLE);
+
+ /* Setup irq domain */
+ oxnas_gpio->domain = irq_domain_add_linear(node, oxnas_gpio->chip.ngpio,
+ &oxnas_gpio_ops, oxnas_gpio);
+ if (!oxnas_gpio->domain)
+ panic("oxnas_gpio: couldn't allocate irq domain (DT).\n");
+
+ irq_set_chip_data(irq, oxnas_gpio);
+ irq_set_chained_handler(irq, gpio_irq_handler);
+
+ return 0;
+}
+
+/* This structure is replicated for each GPIO block allocated at probe time */
+static struct gpio_chip oxnas_gpio_template = {
+ .request = oxnas_gpio_request,
+ .free = oxnas_gpio_free,
+ .direction_input = oxnas_gpio_direction_input,
+ .get = oxnas_gpio_get,
+ .direction_output = oxnas_gpio_direction_output,
+ .set = oxnas_gpio_set,
+ .to_irq = oxnas_gpio_to_irq,
+ .dbg_show = oxnas_gpio_dbg_show,
+ .can_sleep = 0,
+ .ngpio = MAX_NB_GPIO_PER_BANK,
+};
+
+static struct of_device_id oxnas_gpio_of_match[] = {
+ { .compatible = "plxtech,nas782x-gpio"},
+ { /* sentinel */ }
+};
+
+static int oxnas_gpio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
+ struct oxnas_gpio_chip *oxnas_chip = NULL;
+ struct gpio_chip *chip;
+ struct pinctrl_gpio_range *range;
+ int ret = 0;
+ int irq, i;
+ int alias_idx = of_alias_get_id(np, "gpio");
+ uint32_t ngpio;
+ char **names;
+
+ BUG_ON(alias_idx >= ARRAY_SIZE(gpio_chips));
+ if (gpio_chips[alias_idx]) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err;
+ }
+
+ oxnas_chip = devm_kzalloc(&pdev->dev, sizeof(*oxnas_chip), GFP_KERNEL);
+ if (!oxnas_chip) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ oxnas_chip->regbase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(oxnas_chip->regbase)) {
+ ret = PTR_ERR(oxnas_chip->regbase);
+ goto err;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ oxnas_chip->ctrlbase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(oxnas_chip->ctrlbase)) {
+ ret = PTR_ERR(oxnas_chip->ctrlbase);
+ goto err;
+ }
+
+ oxnas_chip->chip = oxnas_gpio_template;
+
+ chip = &oxnas_chip->chip;
+ chip->of_node = np;
+ chip->label = dev_name(&pdev->dev);
+ chip->dev = &pdev->dev;
+ chip->owner = THIS_MODULE;
+ chip->base = alias_idx * MAX_NB_GPIO_PER_BANK;
+
+ if (!of_property_read_u32(np, "#gpio-lines", &ngpio)) {
+ if (ngpio > MAX_NB_GPIO_PER_BANK)
+ pr_err("oxnas_gpio.%d, gpio-nb >= %d failback to %d\n",
+ alias_idx, MAX_NB_GPIO_PER_BANK,
+ MAX_NB_GPIO_PER_BANK);
+ else
+ chip->ngpio = ngpio;
+ }
+
+ names = devm_kzalloc(&pdev->dev, sizeof(char *) * chip->ngpio,
+ GFP_KERNEL);
+
+ if (!names) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < chip->ngpio; i++)
+ names[i] = kasprintf(GFP_KERNEL, "MF_%c%d", alias_idx + 'A', i);
+
+ chip->names = (const char *const *)names;
+
+ range = &oxnas_chip->range;
+ range->name = chip->label;
+ range->id = alias_idx;
+ range->pin_base = range->base = range->id * MAX_NB_GPIO_PER_BANK;
+
+ range->npins = chip->ngpio;
+ range->gc = chip;
+
+ ret = gpiochip_add(chip);
+ if (ret)
+ goto err;
+
+ gpio_chips[alias_idx] = oxnas_chip;
+ gpio_banks = max(gpio_banks, alias_idx + 1);
+
+ oxnas_gpio_of_irq_setup(np, oxnas_chip, irq);
+
+ dev_info(&pdev->dev, "at address %p\n", oxnas_chip->regbase);
+
+ return 0;
+err:
+ dev_err(&pdev->dev, "Failure %i for GPIO %i\n", ret, alias_idx);
+
+ return ret;
+}
+
+static struct platform_driver oxnas_gpio_driver = {
+ .driver = {
+ .name = "gpio-oxnas",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(oxnas_gpio_of_match),
+ },
+ .probe = oxnas_gpio_probe,
+};
+
+static struct platform_driver oxnas_pinctrl_driver = {
+ .driver = {
+ .name = "pinctrl-oxnas",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(oxnas_pinctrl_of_match),
+ },
+ .probe = oxnas_pinctrl_probe,
+ .remove = oxnas_pinctrl_remove,
+};
+
+static int __init oxnas_pinctrl_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&oxnas_gpio_driver);
+ if (ret)
+ return ret;
+ return platform_driver_register(&oxnas_pinctrl_driver);
+}
+arch_initcall(oxnas_pinctrl_init);
+
+static void __exit oxnas_pinctrl_exit(void)
+{
+ platform_driver_unregister(&oxnas_pinctrl_driver);
+}
+
+module_exit(oxnas_pinctrl_exit);
+MODULE_AUTHOR("Ma Hajun <mahaijuns@gmail.com>");
+MODULE_DESCRIPTION("Plxtech Nas782x pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/target/linux/oxnas/files/drivers/reset/reset-ox820.c b/target/linux/oxnas/files/drivers/reset/reset-ox820.c
new file mode 100644
index 0000000000..0a28de55f4
--- /dev/null
+++ b/target/linux/oxnas/files/drivers/reset/reset-ox820.c
@@ -0,0 +1,107 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <mach/hardware.h>
+
+static int ox820_reset_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ writel(BIT(id), SYS_CTRL_RST_SET_CTRL);
+ writel(BIT(id), SYS_CTRL_RST_CLR_CTRL);
+ return 0;
+}
+
+static int ox820_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ writel(BIT(id), SYS_CTRL_RST_SET_CTRL);
+
+ return 0;
+}
+
+static int ox820_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ writel(BIT(id), SYS_CTRL_RST_CLR_CTRL);
+
+ return 0;
+}
+
+static struct reset_control_ops ox820_reset_ops = {
+ .reset = ox820_reset_reset,
+ .assert = ox820_reset_assert,
+ .deassert = ox820_reset_deassert,
+};
+
+static const struct of_device_id ox820_reset_dt_ids[] = {
+ { .compatible = "plxtech,nas782x-reset", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ox820_reset_dt_ids);
+
+struct reset_controller_dev rcdev;
+
+static int ox820_reset_probe(struct platform_device *pdev)
+{
+ struct reset_controller_dev *rcdev;
+
+ rcdev = devm_kzalloc(&pdev->dev, sizeof(*rcdev), GFP_KERNEL);
+ if (!rcdev)
+ return -ENOMEM;
+
+ /* note: reset controller is statically mapped */
+
+ rcdev->owner = THIS_MODULE;
+ rcdev->nr_resets = 32;
+ rcdev->ops = &ox820_reset_ops;
+ rcdev->of_node = pdev->dev.of_node;
+ reset_controller_register(rcdev);
+ platform_set_drvdata(pdev, rcdev);
+
+ return 0;
+}
+
+static int ox820_reset_remove(struct platform_device *pdev)
+{
+ struct reset_controller_dev *rcdev = platform_get_drvdata(pdev);
+
+ reset_controller_unregister(rcdev);
+
+ return 0;
+}
+
+static struct platform_driver ox820_reset_driver = {
+ .probe = ox820_reset_probe,
+ .remove = ox820_reset_remove,
+ .driver = {
+ .name = "ox820-reset",
+ .owner = THIS_MODULE,
+ .of_match_table = ox820_reset_dt_ids,
+ },
+};
+
+static int __init ox820_reset_init(void)
+{
+ return platform_driver_probe(&ox820_reset_driver,
+ ox820_reset_probe);
+}
+/*
+ * reset controller does not support probe deferral, so it has to be
+ * initialized before any user, in particular, PCIE uses subsys_initcall.
+ */
+arch_initcall(ox820_reset_init);
+
+MODULE_AUTHOR("Ma Haijun");
+MODULE_LICENSE("GPL");
diff --git a/target/linux/oxnas/files/drivers/usb/host/ehci-oxnas.c b/target/linux/oxnas/files/drivers/usb/host/ehci-oxnas.c
new file mode 100644
index 0000000000..23c5061ec5
--- /dev/null
+++ b/target/linux/oxnas/files/drivers/usb/host/ehci-oxnas.c
@@ -0,0 +1,316 @@
+/*
+ * drivers/usb/host/ehci-oxnas.c
+ *
+ * Tzachi Perelstein <tzachi@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <mach/hardware.h>
+#include <mach/utils.h>
+
+#include "ehci.h"
+
+struct oxnas_hcd {
+ struct clk *clk;
+ struct clk *refsrc;
+ struct clk *phyref;
+ int use_pllb;
+ int use_phya;
+ struct reset_control *rst_host;
+ struct reset_control *rst_phya;
+ struct reset_control *rst_phyb;
+};
+
+#define DRIVER_DESC "Oxnas On-Chip EHCI Host Controller"
+
+static struct hc_driver __read_mostly oxnas_hc_driver;
+
+static void start_oxnas_usb_ehci(struct oxnas_hcd *oxnas)
+{
+ u32 reg;
+
+ if (oxnas->use_pllb) {
+ /* enable pllb */
+ clk_prepare_enable(oxnas->refsrc);
+ /* enable ref600 */
+ clk_prepare_enable(oxnas->phyref);
+ /* 600MHz pllb divider for 12MHz */
+ writel(PLLB_DIV_INT(50) | PLLB_DIV_FRAC(0),
+ SEC_CTRL_PLLB_DIV_CTRL);
+
+ } else {
+ /* ref 300 divider for 12MHz */
+ writel(REF300_DIV_INT(25) | REF300_DIV_FRAC(0),
+ SYS_CTRL_REF300_DIV);
+ }
+
+ /* Ensure the USB block is properly reset */
+ reset_control_reset(oxnas->rst_host);
+ reset_control_reset(oxnas->rst_phya);
+ reset_control_reset(oxnas->rst_phyb);
+
+ /* Force the high speed clock to be generated all the time, via serial
+ programming of the USB HS PHY */
+ writel((2UL << USBHSPHY_TEST_ADD) |
+ (0xe0UL << USBHSPHY_TEST_DIN), SYS_CTRL_USBHSPHY_CTRL);
+
+ writel((1UL << USBHSPHY_TEST_CLK) |
+ (2UL << USBHSPHY_TEST_ADD) |
+ (0xe0UL << USBHSPHY_TEST_DIN), SYS_CTRL_USBHSPHY_CTRL);
+
+ writel((0xfUL << USBHSPHY_TEST_ADD) |
+ (0xaaUL << USBHSPHY_TEST_DIN), SYS_CTRL_USBHSPHY_CTRL);
+
+ writel((1UL << USBHSPHY_TEST_CLK) |
+ (0xfUL << USBHSPHY_TEST_ADD) |
+ (0xaaUL << USBHSPHY_TEST_DIN), SYS_CTRL_USBHSPHY_CTRL);
+
+ if (oxnas->use_pllb) /* use pllb clock */
+ writel(USB_CLK_INTERNAL | USB_INT_CLK_PLLB, SYS_CTRL_USB_CTRL);
+ else /* use ref300 derived clock */
+ writel(USB_CLK_INTERNAL | USB_INT_CLK_REF300,
+ SYS_CTRL_USB_CTRL);
+
+ if (oxnas->use_phya) {
+ /* Configure USB PHYA as a host */
+ reg = readl(SYS_CTRL_USB_CTRL);
+ reg &= ~USBAMUX_DEVICE;
+ writel(reg, SYS_CTRL_USB_CTRL);
+ }
+
+ /* Enable the clock to the USB block */
+ clk_prepare_enable(oxnas->clk);
+}
+
+static void stop_oxnas_usb_ehci(struct oxnas_hcd *oxnas)
+{
+ reset_control_assert(oxnas->rst_host);
+ reset_control_assert(oxnas->rst_phya);
+ reset_control_assert(oxnas->rst_phyb);
+
+ if (oxnas->use_pllb) {
+ clk_disable_unprepare(oxnas->phyref);
+ clk_disable_unprepare(oxnas->refsrc);
+ }
+ clk_disable_unprepare(oxnas->clk);
+}
+
+static int ehci_oxnas_reset(struct usb_hcd *hcd)
+{
+ #define txttfill_tuning reserved2[0]
+
+ struct ehci_hcd *ehci;
+ u32 tmp;
+ int retval = ehci_setup(hcd);
+ if (retval)
+ return retval;
+
+ ehci = hcd_to_ehci(hcd);
+ tmp = ehci_readl(ehci, &ehci->regs->txfill_tuning);
+ tmp &= ~0x00ff0000;
+ tmp |= 0x003f0000; /* set burst pre load count to 0x40 (63 * 4 bytes) */
+ tmp |= 0x16; /* set sheduler overhead to 22 * 1.267us (HS) or 22 * 6.33us (FS/LS)*/
+ ehci_writel(ehci, tmp, &ehci->regs->txfill_tuning);
+
+ tmp = ehci_readl(ehci, &ehci->regs->txttfill_tuning);
+ tmp |= 0x2; /* set sheduler overhead to 2 * 6.333us */
+ ehci_writel(ehci, tmp, &ehci->regs->txttfill_tuning);
+
+ return retval;
+}
+
+static int ehci_oxnas_drv_probe(struct platform_device *ofdev)
+{
+ struct device_node *np = ofdev->dev.of_node;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct resource res;
+ struct oxnas_hcd *oxnas;
+ int irq, err;
+ struct reset_control *rstc;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ if (!ofdev->dev.dma_mask)
+ ofdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask;
+ if (!ofdev->dev.coherent_dma_mask)
+ ofdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+ hcd = usb_create_hcd(&oxnas_hc_driver, &ofdev->dev,
+ dev_name(&ofdev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ err = of_address_to_resource(np, 0, &res);
+ if (err)
+ goto err_res;
+
+ hcd->rsrc_start = res.start;
+ hcd->rsrc_len = resource_size(&res);
+
+ hcd->regs = devm_ioremap_resource(&ofdev->dev, &res);
+ if (IS_ERR(hcd->regs)) {
+ dev_err(&ofdev->dev, "devm_ioremap_resource failed\n");
+ err = PTR_ERR(hcd->regs);
+ goto err_ioremap;
+ }
+
+ oxnas = (struct oxnas_hcd *)hcd_to_ehci(hcd)->priv;
+
+ oxnas->use_pllb = of_property_read_bool(np, "plxtch,ehci_use_pllb");
+ oxnas->use_phya = of_property_read_bool(np, "plxtch,ehci_use_phya");
+
+ oxnas->clk = of_clk_get_by_name(np, "usb");
+ if (IS_ERR(oxnas->clk)) {
+ err = PTR_ERR(oxnas->clk);
+ goto err_clk;
+ }
+
+ if (oxnas->use_pllb) {
+ oxnas->refsrc = of_clk_get_by_name(np, "refsrc");
+ if (IS_ERR(oxnas->refsrc)) {
+ err = PTR_ERR(oxnas->refsrc);
+ goto err_refsrc;
+ }
+ oxnas->phyref = of_clk_get_by_name(np, "phyref");
+ if (IS_ERR(oxnas->refsrc)) {
+ err = PTR_ERR(oxnas->refsrc);
+ goto err_phyref;
+ }
+
+ } else {
+ oxnas->refsrc = NULL;
+ oxnas->phyref = NULL;
+ }
+
+ rstc = devm_reset_control_get(&ofdev->dev, "host");
+ if (IS_ERR(rstc)) {
+ err = PTR_ERR(rstc);
+ goto err_rst;
+ }
+ oxnas->rst_host = rstc;
+
+ rstc = devm_reset_control_get(&ofdev->dev, "phya");
+ if (IS_ERR(rstc)) {
+ err = PTR_ERR(rstc);
+ goto err_rst;
+ }
+ oxnas->rst_phya = rstc;
+
+ rstc = devm_reset_control_get(&ofdev->dev, "phyb");
+ if (IS_ERR(rstc)) {
+ err = PTR_ERR(rstc);
+ goto err_rst;
+ }
+ oxnas->rst_phyb = rstc;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ dev_err(&ofdev->dev, "irq_of_parse_and_map failed\n");
+ err = -EBUSY;
+ goto err_irq;
+ }
+
+ hcd->has_tt = 1;
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = hcd->regs;
+
+ start_oxnas_usb_ehci(oxnas);
+
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_DISABLED);
+ if (err)
+ goto err_hcd;
+
+ return 0;
+
+err_hcd:
+ stop_oxnas_usb_ehci(oxnas);
+err_irq:
+err_rst:
+ if (oxnas->phyref)
+ clk_put(oxnas->phyref);
+err_phyref:
+ if (oxnas->refsrc)
+ clk_put(oxnas->refsrc);
+err_refsrc:
+ clk_put(oxnas->clk);
+err_clk:
+err_ioremap:
+err_res:
+ usb_put_hcd(hcd);
+
+ return err;
+}
+
+static int ehci_oxnas_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct oxnas_hcd *oxnas = (struct oxnas_hcd *)hcd_to_ehci(hcd)->priv;
+
+ usb_remove_hcd(hcd);
+ if (oxnas->use_pllb) {
+ clk_disable_unprepare(oxnas->phyref);
+ clk_put(oxnas->phyref);
+ clk_disable_unprepare(oxnas->refsrc);
+ clk_put(oxnas->refsrc);
+ }
+ clk_disable_unprepare(oxnas->clk);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static const struct of_device_id oxnas_ehci_dt_ids[] = {
+ { .compatible = "plxtch,nas782x-ehci" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, oxnas_ehci_dt_ids);
+
+static struct platform_driver ehci_oxnas_driver = {
+ .probe = ehci_oxnas_drv_probe,
+ .remove = ehci_oxnas_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver.name = "oxnas-ehci",
+ .driver.of_match_table = oxnas_ehci_dt_ids,
+};
+
+static const struct ehci_driver_overrides oxnas_overrides __initconst = {
+ .reset = ehci_oxnas_reset,
+ .extra_priv_size = sizeof(struct oxnas_hcd),
+};
+
+static int __init ehci_oxnas_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ ehci_init_driver(&oxnas_hc_driver, &oxnas_overrides);
+ return platform_driver_register(&ehci_oxnas_driver);
+}
+module_init(ehci_oxnas_init);
+
+static void __exit ehci_oxnas_cleanup(void)
+{
+ platform_driver_unregister(&ehci_oxnas_driver);
+}
+module_exit(ehci_oxnas_cleanup);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_ALIAS("platform:oxnas-ehci");
+MODULE_LICENSE("GPL");