aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/adm5120/files-3.18/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/adm5120/files-3.18/drivers')
-rw-r--r--target/linux/adm5120/files-3.18/drivers/ata/pata_rb153_cf.c267
-rw-r--r--target/linux/adm5120/files-3.18/drivers/leds/ledtrig-adm5120-switch.c149
-rw-r--r--target/linux/adm5120/files-3.18/drivers/mtd/maps/adm5120-flash.c482
-rw-r--r--target/linux/adm5120/files-3.18/drivers/mtd/trxsplit.c216
-rw-r--r--target/linux/adm5120/files-3.18/drivers/net/adm5120sw.c1219
-rw-r--r--target/linux/adm5120/files-3.18/drivers/net/adm5120sw.h23
-rw-r--r--target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-dbg.c836
-rw-r--r--target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-drv.c228
-rw-r--r--target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-hcd.c843
-rw-r--r--target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-hub.c430
-rw-r--r--target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-mem.c202
-rw-r--r--target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-pm.c449
-rw-r--r--target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-q.c964
-rw-r--r--target/linux/adm5120/files-3.18/drivers/usb/host/adm5120.h755
-rw-r--r--target/linux/adm5120/files-3.18/drivers/watchdog/adm5120_wdt.c202
15 files changed, 7265 insertions, 0 deletions
diff --git a/target/linux/adm5120/files-3.18/drivers/ata/pata_rb153_cf.c b/target/linux/adm5120/files-3.18/drivers/ata/pata_rb153_cf.c
new file mode 100644
index 0000000..92a4d13
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/ata/pata_rb153_cf.c
@@ -0,0 +1,267 @@
+/*
+ * A low-level PATA driver to handle a Compact Flash connected on the
+ * Mikrotik's RouterBoard 153 board.
+ *
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was based on: drivers/ata/pata_ixp4xx_cf.c
+ * Copyright (C) 2006-07 Tower Technologies
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * Also was based on the driver for Linux 2.4.xx published by Mikrotik for
+ * their RouterBoard 1xx and 5xx series devices. The original Mikrotik code
+ * seems not to have a license.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+
+#include <linux/libata.h>
+#include <scsi/scsi_host.h>
+
+#define DRV_NAME "pata-rb153-cf"
+#define DRV_VERSION "0.5.0"
+#define DRV_DESC "PATA driver for RouterBOARD 153 Compact Flash"
+
+#define RB153_CF_MAXPORTS 1
+#define RB153_CF_IO_DELAY 100
+
+#define RB153_CF_REG_CMD 0x0800
+#define RB153_CF_REG_CTRL 0x080E
+#define RB153_CF_REG_DATA 0x0C00
+
+struct rb153_cf_info {
+ void __iomem *iobase;
+ unsigned int gpio_line;
+ int frozen;
+ unsigned int irq;
+};
+
+static inline void rb153_pata_finish_io(struct ata_port *ap)
+{
+ struct rb153_cf_info *info = ap->host->private_data;
+
+ /* FIXME: Keep previous delay. If this is merely a fence then
+ * ata_sff_sync might be sufficient. */
+ ata_sff_dma_pause(ap);
+ ndelay(RB153_CF_IO_DELAY);
+
+ irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static void rb153_pata_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ writeb(tf->command, ap->ioaddr.command_addr);
+ rb153_pata_finish_io(ap);
+}
+
+static unsigned int rb153_pata_data_xfer(struct ata_device *adev,
+ unsigned char *buf,
+ unsigned int buflen,
+ int write_data)
+{
+ void __iomem *ioaddr = adev->link->ap->ioaddr.data_addr;
+ unsigned int t;
+
+ t = buflen;
+ if (write_data) {
+ for (; t > 0; t--, buf++)
+ writeb(*buf, ioaddr);
+ } else {
+ for (; t > 0; t--, buf++)
+ *buf = readb(ioaddr);
+ }
+
+ rb153_pata_finish_io(adev->link->ap);
+ return buflen;
+}
+
+static void rb153_pata_freeze(struct ata_port *ap)
+{
+ struct rb153_cf_info *info = ap->host->private_data;
+
+ info->frozen = 1;
+}
+
+static void rb153_pata_thaw(struct ata_port *ap)
+{
+ struct rb153_cf_info *info = ap->host->private_data;
+
+ info->frozen = 0;
+}
+
+static irqreturn_t rb153_pata_irq_handler(int irq, void *dev_instance)
+{
+ struct ata_host *ah = dev_instance;
+ struct rb153_cf_info *info = ah->private_data;
+
+ if (gpio_get_value(info->gpio_line)) {
+ irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW);
+ if (!info->frozen)
+ ata_sff_interrupt(irq, dev_instance);
+ } else {
+ irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct ata_port_operations rb153_pata_port_ops = {
+ .inherits = &ata_sff_port_ops,
+ .sff_exec_command = rb153_pata_exec_command,
+ .sff_data_xfer = rb153_pata_data_xfer,
+ .freeze = rb153_pata_freeze,
+ .thaw = rb153_pata_thaw,
+};
+
+static struct scsi_host_template rb153_pata_sht = {
+ ATA_PIO_SHT(DRV_NAME),
+};
+
+static void rb153_pata_setup_port(struct ata_host *ah)
+{
+ struct rb153_cf_info *info = ah->private_data;
+ struct ata_port *ap;
+
+ ap = ah->ports[0];
+
+ ap->ops = &rb153_pata_port_ops;
+ ap->pio_mask = 0x1f; /* PIO4 */
+
+ ap->ioaddr.cmd_addr = info->iobase + RB153_CF_REG_CMD;
+ ap->ioaddr.ctl_addr = info->iobase + RB153_CF_REG_CTRL;
+ ap->ioaddr.altstatus_addr = info->iobase + RB153_CF_REG_CTRL;
+
+ ata_sff_std_ports(&ap->ioaddr);
+
+ ap->ioaddr.data_addr = info->iobase + RB153_CF_REG_DATA;
+}
+
+static int rb153_pata_driver_probe(struct platform_device *pdev)
+{
+ unsigned int irq;
+ int gpio;
+ struct resource *res;
+ struct ata_host *ah;
+ struct rb153_cf_info *info;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no IOMEM resource found\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "no IRQ resource found\n");
+ return -ENOENT;
+ }
+
+ gpio = irq_to_gpio(irq);
+ if (gpio < 0) {
+ dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
+ return -ENOENT;
+ }
+
+ ret = gpio_request(gpio, DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "GPIO request failed\n");
+ return ret;
+ }
+
+ ah = ata_host_alloc(&pdev->dev, RB153_CF_MAXPORTS);
+ if (!ah)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ah);
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ah->private_data = info;
+ info->gpio_line = gpio;
+ info->irq = irq;
+
+ info->iobase = devm_ioremap_nocache(&pdev->dev, res->start,
+ res->end - res->start + 1);
+ if (!info->iobase)
+ return -ENOMEM;
+
+ ret = gpio_direction_input(gpio);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to set GPIO direction, err=%d\n",
+ ret);
+ goto err_free_gpio;
+ }
+
+ rb153_pata_setup_port(ah);
+
+ ret = ata_host_activate(ah, irq, rb153_pata_irq_handler,
+ IRQF_TRIGGER_LOW, &rb153_pata_sht);
+ if (ret)
+ goto err_free_gpio;
+
+ return 0;
+
+err_free_gpio:
+ gpio_free(gpio);
+
+ return ret;
+}
+
+static int rb153_pata_driver_remove(struct platform_device *pdev)
+{
+ struct ata_host *ah = platform_get_drvdata(pdev);
+ struct rb153_cf_info *info = ah->private_data;
+
+ ata_host_detach(ah);
+ gpio_free(info->gpio_line);
+
+ return 0;
+}
+
+static struct platform_driver rb153_pata_platform_driver = {
+ .probe = rb153_pata_driver_probe,
+ .remove = rb153_pata_driver_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+/* ------------------------------------------------------------------------ */
+
+#define DRV_INFO DRV_DESC " version " DRV_VERSION
+
+static int __init rb153_pata_module_init(void)
+{
+ printk(KERN_INFO DRV_INFO "\n");
+
+ return platform_driver_register(&rb153_pata_platform_driver);
+}
+
+static void __exit rb153_pata_module_exit(void)
+{
+ platform_driver_unregister(&rb153_pata_platform_driver);
+}
+
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL v2");
+
+module_init(rb153_pata_module_init);
+module_exit(rb153_pata_module_exit);
diff --git a/target/linux/adm5120/files-3.18/drivers/leds/ledtrig-adm5120-switch.c b/target/linux/adm5120/files-3.18/drivers/leds/ledtrig-adm5120-switch.c
new file mode 100644
index 0000000..23a54a0
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/leds/ledtrig-adm5120-switch.c
@@ -0,0 +1,149 @@
+/*
+ * LED ADM5120 Switch Port State Trigger
+ *
+ * Copyright (C) 2007 Bernhard Held <bernhard at bernhardheld.de>
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was based on: drivers/leds/ledtrig-timer.c
+ * Copyright 2005-2006 Openedhand Ltd.
+ * Author: Richard Purdie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include <linux/gpio.h>
+
+#include "leds.h"
+
+#define DRV_NAME "port_state"
+#define DRV_DESC "LED ADM5120 Switch Port State Trigger"
+
+struct port_state {
+ char *name;
+ unsigned int value;
+};
+
+#define PORT_STATE(n, v) {.name = (n), .value = (v)}
+
+static struct port_state port_states[] = {
+ PORT_STATE("off", LED_OFF),
+ PORT_STATE("on", LED_FULL),
+ PORT_STATE("flash", ADM5120_GPIO_FLASH),
+ PORT_STATE("link", ADM5120_GPIO_LINK),
+ PORT_STATE("speed", ADM5120_GPIO_SPEED),
+ PORT_STATE("duplex", ADM5120_GPIO_DUPLEX),
+ PORT_STATE("act", ADM5120_GPIO_ACT),
+ PORT_STATE("coll", ADM5120_GPIO_COLL),
+ PORT_STATE("link_act", ADM5120_GPIO_LINK_ACT),
+ PORT_STATE("duplex_coll", ADM5120_GPIO_DUPLEX_COLL),
+ PORT_STATE("10M_act", ADM5120_GPIO_10M_ACT),
+ PORT_STATE("100M_act", ADM5120_GPIO_100M_ACT),
+};
+
+static ssize_t led_port_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct port_state *state = led_cdev->trigger_data;
+ int len = 0;
+ int i;
+
+ *buf = '\0';
+ for (i = 0; i < ARRAY_SIZE(port_states); i++) {
+ if (&port_states[i] == state)
+ len += sprintf(buf+len, "[%s] ", port_states[i].name);
+ else
+ len += sprintf(buf+len, "%s ", port_states[i].name);
+ }
+ len += sprintf(buf+len, "\n");
+
+ return len;
+}
+
+static ssize_t led_port_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ size_t len;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(port_states); i++) {
+ len = strlen(port_states[i].name);
+ if (strncmp(port_states[i].name, buf, len) != 0)
+ continue;
+
+ if (buf[len] != '\0' && buf[len] != '\n')
+ continue;
+
+ led_cdev->trigger_data = &port_states[i];
+ led_set_brightness(led_cdev, port_states[i].value);
+ return size;
+ }
+
+ return -EINVAL;
+}
+
+static DEVICE_ATTR(port_state, 0644, led_port_state_show,
+ led_port_state_store);
+
+static void adm5120_switch_trig_activate(struct led_classdev *led_cdev)
+{
+ struct port_state *state = port_states;
+ int rc;
+
+ led_cdev->trigger_data = state;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_port_state);
+ if (rc)
+ goto err;
+
+ led_set_brightness(led_cdev, state->value);
+
+ return;
+err:
+ led_cdev->trigger_data = NULL;
+}
+
+static void adm5120_switch_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct port_state *state = led_cdev->trigger_data;
+
+ if (!state)
+ return;
+
+ device_remove_file(led_cdev->dev, &dev_attr_port_state);
+
+}
+
+static struct led_trigger adm5120_switch_led_trigger = {
+ .name = DRV_NAME,
+ .activate = adm5120_switch_trig_activate,
+ .deactivate = adm5120_switch_trig_deactivate,
+};
+
+static int __init adm5120_switch_trig_init(void)
+{
+ led_trigger_register(&adm5120_switch_led_trigger);
+ return 0;
+}
+
+static void __exit adm5120_switch_trig_exit(void)
+{
+ led_trigger_unregister(&adm5120_switch_led_trigger);
+}
+
+module_init(adm5120_switch_trig_init);
+module_exit(adm5120_switch_trig_exit);
+
+MODULE_AUTHOR("Bernhard Held <bernhard at bernhardheld.de>, "
+ "Gabor Juhos <juhosg@openwrt.org>");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL v2");
diff --git a/target/linux/adm5120/files-3.18/drivers/mtd/maps/adm5120-flash.c b/target/linux/adm5120/files-3.18/drivers/mtd/maps/adm5120-flash.c
new file mode 100644
index 0000000..f6a86f4
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/mtd/maps/adm5120-flash.c
@@ -0,0 +1,482 @@
+/*
+ * Platform driver for NOR flash devices on ADM5120 based boards
+ *
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: drivers/mtd/map/physmap.c
+ * Copyright (C) 2003 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/mach-adm5120/adm5120_defs.h>
+#include <asm/mach-adm5120/adm5120_switch.h>
+#include <asm/mach-adm5120/adm5120_mpmc.h>
+#include <asm/mach-adm5120/adm5120_platform.h>
+
+#define DRV_NAME "adm5120-flash"
+#define DRV_DESC "ADM5120 flash MAP driver"
+#define MAX_PARSED_PARTS 8
+
+#ifdef ADM5120_FLASH_DEBUG
+#define MAP_DBG(m, f, a...) printk(KERN_INFO "%s: " f, (m->name) , ## a)
+#else
+#define MAP_DBG(m, f, a...) do {} while (0)
+#endif
+#define MAP_ERR(m, f, a...) printk(KERN_ERR "%s: " f, (m->name) , ## a)
+#define MAP_INFO(m, f, a...) printk(KERN_INFO "%s: " f, (m->name) , ## a)
+
+struct adm5120_map_info {
+ struct map_info map;
+ void (*switch_bank)(unsigned);
+ unsigned long window_size;
+};
+
+struct adm5120_flash_info {
+ struct mtd_info *mtd;
+ struct resource *res;
+ struct platform_device *dev;
+ struct adm5120_map_info amap;
+};
+
+struct flash_desc {
+ u32 phys;
+ u32 srs_shift;
+};
+
+/*
+ * Globals
+ */
+static DEFINE_SPINLOCK(adm5120_flash_spin);
+#define FLASH_LOCK() spin_lock(&adm5120_flash_spin)
+#define FLASH_UNLOCK() spin_unlock(&adm5120_flash_spin)
+
+static u32 flash_bankwidths[4] = { 1, 2, 4, 0 };
+
+static u32 flash_sizes[8] = {
+ 0, 512*1024, 1024*1024, 2*1024*1024,
+ 4*1024*1024, 0, 0, 0
+};
+
+static struct flash_desc flash_descs[2] = {
+ {
+ .phys = ADM5120_SRAM0_BASE,
+ .srs_shift = MEMCTRL_SRS0_SHIFT,
+ }, {
+ .phys = ADM5120_SRAM1_BASE,
+ .srs_shift = MEMCTRL_SRS1_SHIFT,
+ }
+};
+
+static const char const *probe_types[] = {
+ "cfi_probe",
+ "jedec_probe",
+ "map_rom",
+ NULL
+};
+
+static const char const *parse_types[] = {
+ "cmdlinepart",
+#ifdef CONFIG_MTD_REDBOOT_PARTS
+ "RedBoot",
+#endif
+#ifdef CONFIG_MTD_MYLOADER_PARTS
+ "MyLoader",
+#endif
+ NULL,
+};
+
+#define BANK_SIZE (2<<20)
+#define BANK_SIZE_MAX (4<<20)
+#define BANK_OFFS_MASK (BANK_SIZE-1)
+#define BANK_START_MASK (~BANK_OFFS_MASK)
+
+static inline struct adm5120_map_info *map_to_amap(struct map_info *map)
+{
+ return (struct adm5120_map_info *)map;
+}
+
+static void adm5120_flash_switchbank(struct map_info *map,
+ unsigned long ofs)
+{
+ struct adm5120_map_info *amap = map_to_amap(map);
+ unsigned bank;
+
+ if (amap->switch_bank == NULL)
+ return;
+
+ bank = (ofs & BANK_START_MASK) >> 21;
+ if (bank > 1)
+ BUG();
+
+ MAP_DBG(map, "switching to bank %u, ofs=%lX\n", bank, ofs);
+ amap->switch_bank(bank);
+}
+
+static map_word adm5120_flash_read(struct map_info *map, unsigned long ofs)
+{
+ struct adm5120_map_info *amap = map_to_amap(map);
+ map_word ret;
+
+ MAP_DBG(map, "reading from ofs %lX\n", ofs);
+
+ if (ofs >= amap->window_size)
+ return map_word_ff(map);
+
+ FLASH_LOCK();
+ adm5120_flash_switchbank(map, ofs);
+ ret = inline_map_read(map, (ofs & (amap->window_size-1)));
+ FLASH_UNLOCK();
+
+ return ret;
+}
+
+static void adm5120_flash_write(struct map_info *map, const map_word datum,
+ unsigned long ofs)
+{
+ struct adm5120_map_info *amap = map_to_amap(map);
+
+ MAP_DBG(map, "writing to ofs %lX\n", ofs);
+
+ if (ofs > amap->window_size)
+ return;
+
+ FLASH_LOCK();
+ adm5120_flash_switchbank(map, ofs);
+ inline_map_write(map, datum, (ofs & (amap->window_size-1)));
+ FLASH_UNLOCK();
+}
+
+static void adm5120_flash_copy_from(struct map_info *map, void *to,
+ unsigned long from, ssize_t len)
+{
+ struct adm5120_map_info *amap = map_to_amap(map);
+ char *p;
+ ssize_t t;
+
+ MAP_DBG(map, "copy_from, to=%lX, from=%lX, len=%lX\n",
+ (unsigned long)to, from, (unsigned long)len);
+
+ if (from > amap->window_size)
+ return;
+
+ p = (char *)to;
+ while (len > 0) {
+ t = len;
+ if ((from < BANK_SIZE) && ((from+len) > BANK_SIZE))
+ t = BANK_SIZE-from;
+
+ FLASH_LOCK();
+ MAP_DBG(map, "copying %lu byte(s) from %lX to %lX\n",
+ (unsigned long)t, (from & (amap->window_size-1)),
+ (unsigned long)p);
+ adm5120_flash_switchbank(map, from);
+ inline_map_copy_from(map, p, (from & (amap->window_size-1)), t);
+ FLASH_UNLOCK();
+ p += t;
+ from += t;
+ len -= t;
+ }
+}
+
+static int adm5120_flash_initres(struct adm5120_flash_info *info)
+{
+ struct map_info *map = &info->amap.map;
+ int err = 0;
+
+ info->res = request_mem_region(map->phys, info->amap.window_size,
+ map->name);
+ if (info->res == NULL) {
+ MAP_ERR(map, "could not reserve memory region\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ map->virt = ioremap_nocache(map->phys, info->amap.window_size);
+ if (map->virt == NULL) {
+ MAP_ERR(map, "failed to ioremap flash region\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+static int adm5120_flash_initinfo(struct adm5120_flash_info *info,
+ struct platform_device *dev)
+{
+ struct map_info *map = &info->amap.map;
+ struct adm5120_flash_platform_data *pdata = dev->dev.platform_data;
+ struct flash_desc *fdesc;
+ u32 t = 0;
+
+ map->name = dev_name(&dev->dev);
+
+ if (dev->id > 1) {
+ MAP_ERR(map, "invalid flash id\n");
+ goto err_out;
+ }
+
+ fdesc = &flash_descs[dev->id];
+
+ if (pdata)
+ info->amap.window_size = pdata->window_size;
+
+ if (info->amap.window_size == 0) {
+ /* get memory window size */
+ t = SW_READ_REG(SWITCH_REG_MEMCTRL) >> fdesc->srs_shift;
+ t &= MEMCTRL_SRS_MASK;
+ info->amap.window_size = flash_sizes[t];
+ }
+
+ if (info->amap.window_size == 0) {
+ MAP_ERR(map, "unable to determine window size\n");
+ goto err_out;
+ }
+
+ /* get flash bus width */
+ switch (dev->id) {
+ case 0:
+ t = MPMC_READ_REG(SC1) & SC_MW_MASK;
+ break;
+ case 1:
+ t = MPMC_READ_REG(SC0) & SC_MW_MASK;
+ break;
+ }
+ map->bankwidth = flash_bankwidths[t];
+ if (map->bankwidth == 0) {
+ MAP_ERR(map, "invalid bus width detected\n");
+ goto err_out;
+ }
+
+ map->phys = fdesc->phys;
+ map->size = BANK_SIZE_MAX;
+
+ simple_map_init(map);
+ map->read = adm5120_flash_read;
+ map->write = adm5120_flash_write;
+ map->copy_from = adm5120_flash_copy_from;
+
+ if (pdata) {
+ map->set_vpp = pdata->set_vpp;
+ info->amap.switch_bank = pdata->switch_bank;
+ }
+
+ info->dev = dev;
+
+ MAP_INFO(map, "probing at 0x%lX, size:%ldKiB, width:%d bits\n",
+ (unsigned long)map->phys,
+ (unsigned long)info->amap.window_size >> 10,
+ map->bankwidth*8);
+
+ return 0;
+
+err_out:
+ return -ENODEV;
+}
+
+static void adm5120_flash_initbanks(struct adm5120_flash_info *info)
+{
+ struct map_info *map = &info->amap.map;
+
+ if (info->mtd->size <= BANK_SIZE)
+ /* no bank switching needed */
+ return;
+
+ if (info->amap.switch_bank) {
+ info->amap.window_size = info->mtd->size;
+ return;
+ }
+
+ MAP_ERR(map, "reduce visibility from %ldKiB to %ldKiB\n",
+ (unsigned long)map->size >> 10,
+ (unsigned long)info->mtd->size >> 10);
+
+ info->mtd->size = info->amap.window_size;
+}
+
+static int adm5120_flash_remove(struct platform_device *dev)
+{
+ struct adm5120_flash_info *info;
+
+ info = platform_get_drvdata(dev);
+ if (info == NULL)
+ return 0;
+
+ platform_set_drvdata(dev, NULL);
+
+ if (info->mtd != NULL) {
+ mtd_device_unregister(info->mtd);
+ map_destroy(info->mtd);
+ }
+
+ if (info->amap.map.virt != NULL)
+ iounmap(info->amap.map.virt);
+
+ if (info->res != NULL) {
+ release_resource(info->res);
+ kfree(info->res);
+ }
+
+ return 0;
+}
+
+static int adm5120_flash_probe(struct platform_device *dev)
+{
+ struct adm5120_flash_platform_data *pdata;
+ struct adm5120_flash_info *info;
+ struct map_info *map;
+ const char **probe_type;
+ int err;
+
+ pdata = dev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&dev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (info == NULL) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ platform_set_drvdata(dev, info);
+
+ err = adm5120_flash_initinfo(info, dev);
+ if (err)
+ goto err_out;
+
+ err = adm5120_flash_initres(info);
+ if (err)
+ goto err_out;
+
+ map = &info->amap.map;
+ for (probe_type = probe_types; info->mtd == NULL && *probe_type != NULL;
+ probe_type++)
+ info->mtd = do_map_probe(*probe_type, map);
+
+ if (info->mtd == NULL) {
+ MAP_ERR(map, "map_probe failed\n");
+ err = -ENXIO;
+ goto err_out;
+ }
+
+ adm5120_flash_initbanks(info);
+
+ if (info->mtd->size < info->amap.window_size) {
+ /* readjust resources */
+ iounmap(map->virt);
+ release_resource(info->res);
+ kfree(info->res);
+
+ info->amap.window_size = info->mtd->size;
+ map->size = info->mtd->size;
+ MAP_INFO(map, "reducing map size to %ldKiB\n",
+ (unsigned long)map->size >> 10);
+ err = adm5120_flash_initres(info);
+ if (err)
+ goto err_out;
+ }
+
+ MAP_INFO(map, "found at 0x%lX, size:%ldKiB, width:%d bits\n",
+ (unsigned long)map->phys, (unsigned long)info->mtd->size >> 10,
+ map->bankwidth*8);
+
+ info->mtd->owner = THIS_MODULE;
+
+ err = mtd_device_parse_register(info->mtd, parse_types, 0,
+ pdata->parts, pdata->nr_parts);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ adm5120_flash_remove(dev);
+ return err;
+}
+
+#ifdef CONFIG_PM
+static int adm5120_flash_suspend(struct platform_device *dev,
+ pm_message_t state)
+{
+ struct adm5120_flash_info *info = platform_get_drvdata(dev);
+ int ret = 0;
+
+ if (info)
+ ret = info->mtd->suspend(info->mtd);
+
+ return ret;
+}
+
+static int adm5120_flash_resume(struct platform_device *dev)
+{
+ struct adm5120_flash_info *info = platform_get_drvdata(dev);
+
+ if (info)
+ info->mtd->resume(info->mtd);
+
+ return 0;
+}
+
+static void adm5120_flash_shutdown(struct platform_device *dev)
+{
+ struct adm5120_flash_info *info = platform_get_drvdata(dev);
+
+ if (info && info->mtd->suspend(info->mtd) == 0)
+ info->mtd->resume(info->mtd);
+}
+#endif
+
+static struct platform_driver adm5120_flash_driver = {
+ .probe = adm5120_flash_probe,
+ .remove = adm5120_flash_remove,
+#ifdef CONFIG_PM
+ .suspend = adm5120_flash_suspend,
+ .resume = adm5120_flash_resume,
+ .shutdown = adm5120_flash_shutdown,
+#endif
+ .driver = {
+ .name = DRV_NAME,
+ },
+};
+
+static int __init adm5120_flash_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&adm5120_flash_driver);
+
+ return err;
+}
+
+static void __exit adm5120_flash_exit(void)
+{
+ platform_driver_unregister(&adm5120_flash_driver);
+}
+
+module_init(adm5120_flash_init);
+module_exit(adm5120_flash_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/target/linux/adm5120/files-3.18/drivers/mtd/trxsplit.c b/target/linux/adm5120/files-3.18/drivers/mtd/trxsplit.c
new file mode 100644
index 0000000..76cbdc7
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/mtd/trxsplit.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/kmod.h>
+#include <linux/root_dev.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/byteorder/generic.h>
+
+#define PFX "trxsplit: "
+
+#define TRX_MAGIC 0x30524448 /* "HDR0" */
+#define TRX_VERSION 1
+#define TRX_MAX_LEN 0x3A0000
+#define TRX_NO_HEADER 0x1 /* do not write TRX header */
+#define TRX_GZ_FILES 0x2 /* contains individual gzip files */
+#define TRX_MAX_OFFSET 3
+#define TRX_MIN_KERNEL_SIZE (256 * 1024)
+
+struct trx_header {
+ u32 magic; /* "HDR0" */
+ u32 len; /* Length of file including header */
+ u32 crc32; /* 32-bit CRC from flag_version to end of file */
+ u32 flag_version; /* 0:15 flags, 16:31 version */
+ u32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions */
+};
+
+#define TRX_ALIGN 0x1000
+
+static int trx_nr_parts;
+static unsigned long trx_offset;
+static struct mtd_info *trx_mtd;
+static struct mtd_partition trx_parts[TRX_MAX_OFFSET];
+static struct trx_header trx_hdr;
+
+static int trxsplit_refresh_partitions(struct mtd_info *mtd);
+
+static int trxsplit_checktrx(struct mtd_info *mtd, unsigned long offset)
+{
+ size_t retlen;
+ int err;
+
+ err = mtd_read(mtd, offset, sizeof(trx_hdr), &retlen, (void *)&trx_hdr);
+ if (err) {
+ printk(KERN_ALERT PFX "unable to read from '%s'\n", mtd->name);
+ goto err_out;
+ }
+
+ if (retlen != sizeof(trx_hdr)) {
+ printk(KERN_ALERT PFX "reading failed on '%s'\n", mtd->name);
+ goto err_out;
+ }
+
+ trx_hdr.magic = le32_to_cpu(trx_hdr.magic);
+ trx_hdr.len = le32_to_cpu(trx_hdr.len);
+ trx_hdr.crc32 = le32_to_cpu(trx_hdr.crc32);
+ trx_hdr.flag_version = le32_to_cpu(trx_hdr.flag_version);
+ trx_hdr.offsets[0] = le32_to_cpu(trx_hdr.offsets[0]);
+ trx_hdr.offsets[1] = le32_to_cpu(trx_hdr.offsets[1]);
+ trx_hdr.offsets[2] = le32_to_cpu(trx_hdr.offsets[2]);
+
+ /* sanity checks */
+ if (trx_hdr.magic != TRX_MAGIC)
+ goto err_out;
+
+ if (trx_hdr.len > mtd->size - offset)
+ goto err_out;
+
+ /* TODO: add crc32 checking too? */
+
+ return 0;
+
+err_out:
+ return -1;
+}
+
+static void trxsplit_findtrx(struct mtd_info *mtd)
+{
+ unsigned long offset;
+ int err;
+
+ printk(KERN_INFO PFX "searching TRX header in '%s'\n", mtd->name);
+
+ err = 0;
+ for (offset = 0; offset < mtd->size; offset += TRX_ALIGN) {
+ err = trxsplit_checktrx(mtd, offset);
+ if (err == 0)
+ break;
+ }
+
+ if (err)
+ return;
+
+ printk(KERN_INFO PFX "TRX header found at 0x%lX\n", offset);
+
+ trx_mtd = mtd;
+ trx_offset = offset;
+}
+
+static void trxsplit_create_partitions(struct mtd_info *mtd)
+{
+ struct mtd_partition *part = trx_parts;
+ int err;
+ int i;
+
+ for (i = 0; i < TRX_MAX_OFFSET; i++) {
+ part = &trx_parts[i];
+ if (trx_hdr.offsets[i] == 0)
+ continue;
+ part->offset = trx_offset + trx_hdr.offsets[i];
+ trx_nr_parts++;
+ }
+
+ for (i = 0; i < trx_nr_parts-1; i++)
+ trx_parts[i].size = trx_parts[i+1].offset - trx_parts[i].offset;
+
+ trx_parts[i].size = mtd->size - trx_parts[i].offset;
+
+ i = 0;
+ part = &trx_parts[i];
+ if (part->size < TRX_MIN_KERNEL_SIZE) {
+ part->name = "loader";
+ i++;
+ }
+
+ part = &trx_parts[i];
+ part->name = "kernel";
+ i++;
+
+ part = &trx_parts[i];
+ part->name = "rootfs";
+
+ err = mtd_device_register(mtd, trx_parts, trx_nr_parts);
+ if (err) {
+ printk(KERN_ALERT PFX "adding TRX partitions failed\n");
+ return;
+ }
+
+ mtd->refresh_device = trxsplit_refresh_partitions;
+}
+
+static int trxsplit_refresh_partitions(struct mtd_info *mtd)
+{
+ printk(KERN_INFO PFX "refreshing TRX partitions in '%s' (%d,%d)\n",
+ mtd->name, MTD_BLOCK_MAJOR, mtd->index);
+
+ /* remove old partitions */
+ mtd_device_unregister(mtd);
+
+ trxsplit_findtrx(mtd);
+ if (!trx_mtd)
+ goto err;
+
+ trxsplit_create_partitions(trx_mtd);
+ return 1;
+
+err:
+ return 0;
+}
+
+static void __init trxsplit_add_mtd(struct mtd_info *mtd)
+{
+ if (mtd->type != MTD_NORFLASH) {
+ printk(KERN_INFO PFX "'%s' is not a NOR flash, skipped\n",
+ mtd->name);
+ return;
+ }
+
+ if (!trx_mtd)
+ trxsplit_findtrx(mtd);
+}
+
+static void __init trxsplit_remove_mtd(struct mtd_info *mtd)
+{
+ /* nothing to do */
+}
+
+static struct mtd_notifier trxsplit_notifier __initdata = {
+ .add = trxsplit_add_mtd,
+ .remove = trxsplit_remove_mtd,
+};
+
+static void __init trxsplit_scan(void)
+{
+ register_mtd_user(&trxsplit_notifier);
+ unregister_mtd_user(&trxsplit_notifier);
+}
+
+static int __init trxsplit_init(void)
+{
+ trxsplit_scan();
+
+ if (trx_mtd) {
+ printk(KERN_INFO PFX "creating TRX partitions in '%s' "
+ "(%d,%d)\n", trx_mtd->name, MTD_BLOCK_MAJOR,
+ trx_mtd->index);
+ trxsplit_create_partitions(trx_mtd);
+ }
+
+ return 0;
+}
+
+late_initcall(trxsplit_init);
diff --git a/target/linux/adm5120/files-3.18/drivers/net/adm5120sw.c b/target/linux/adm5120/files-3.18/drivers/net/adm5120sw.c
new file mode 100644
index 0000000..7fbabb0
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/net/adm5120sw.c
@@ -0,0 +1,1219 @@
+/*
+ * ADM5120 built-in ethernet switch driver
+ *
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This code was based on a driver for Linux 2.6.xx by Jeroen Vreeken.
+ * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
+ * NAPI extension for the Jeroen's driver
+ * Copyright Thomas Langer (Thomas.Langer@infineon.com), 2007
+ * Copyright Friedrich Beckmann (Friedrich.Beckmann@infineon.com), 2007
+ * Inspiration for the Jeroen's driver came from the ADMtek 2.4 driver.
+ * Copyright ADMtek Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/mipsregs.h>
+
+#include <asm/mach-adm5120/adm5120_info.h>
+#include <asm/mach-adm5120/adm5120_defs.h>
+#include <asm/mach-adm5120/adm5120_switch.h>
+
+#include "adm5120sw.h"
+#include <linux/dma-mapping.h>
+
+#define DRV_NAME "adm5120-switch"
+#define DRV_DESC "ADM5120 built-in ethernet switch driver"
+#define DRV_VERSION "0.1.1"
+
+#define CONFIG_ADM5120_SWITCH_NAPI 1
+#undef CONFIG_ADM5120_SWITCH_DEBUG
+
+/* ------------------------------------------------------------------------ */
+
+#ifdef CONFIG_ADM5120_SWITCH_DEBUG
+#define SW_DBG(f, a...) printk(KERN_DEBUG "%s: " f, DRV_NAME , ## a)
+#else
+#define SW_DBG(f, a...) do {} while (0)
+#endif
+#define SW_ERR(f, a...) printk(KERN_ERR "%s: " f, DRV_NAME , ## a)
+#define SW_INFO(f, a...) printk(KERN_INFO "%s: " f, DRV_NAME , ## a)
+
+#define SWITCH_NUM_PORTS 6
+#define ETH_CSUM_LEN 4
+
+#define RX_MAX_PKTLEN 1550
+#define RX_RING_SIZE 64
+
+#define TX_RING_SIZE 32
+#define TX_QUEUE_LEN 28 /* Limit ring entries actually used. */
+#define TX_TIMEOUT (HZ * 400)
+
+#define RX_DESCS_SIZE (RX_RING_SIZE * sizeof(struct dma_desc *))
+#define RX_SKBS_SIZE (RX_RING_SIZE * sizeof(struct sk_buff *))
+#define TX_DESCS_SIZE (TX_RING_SIZE * sizeof(struct dma_desc *))
+#define TX_SKBS_SIZE (TX_RING_SIZE * sizeof(struct sk_buff *))
+
+#define SKB_ALLOC_LEN (RX_MAX_PKTLEN + 32)
+#define SKB_RESERVE_LEN (NET_IP_ALIGN + NET_SKB_PAD)
+
+#define SWITCH_INTS_HIGH (SWITCH_INT_SHD | SWITCH_INT_RHD | SWITCH_INT_HDF)
+#define SWITCH_INTS_LOW (SWITCH_INT_SLD | SWITCH_INT_RLD | SWITCH_INT_LDF)
+#define SWITCH_INTS_ERR (SWITCH_INT_RDE | SWITCH_INT_SDE | SWITCH_INT_CPUH)
+#define SWITCH_INTS_Q (SWITCH_INT_P0QF | SWITCH_INT_P1QF | SWITCH_INT_P2QF | \
+ SWITCH_INT_P3QF | SWITCH_INT_P4QF | SWITCH_INT_P5QF | \
+ SWITCH_INT_CPQF | SWITCH_INT_GQF)
+
+#define SWITCH_INTS_ALL (SWITCH_INTS_HIGH | SWITCH_INTS_LOW | \
+ SWITCH_INTS_ERR | SWITCH_INTS_Q | \
+ SWITCH_INT_MD | SWITCH_INT_PSC)
+
+#define SWITCH_INTS_USED (SWITCH_INTS_LOW | SWITCH_INT_PSC)
+#define SWITCH_INTS_POLL (SWITCH_INT_RLD | SWITCH_INT_LDF | SWITCH_INT_SLD)
+
+/* ------------------------------------------------------------------------ */
+
+struct adm5120_if_priv {
+ struct net_device *dev;
+
+ unsigned int vlan_no;
+ unsigned int port_mask;
+
+#ifdef CONFIG_ADM5120_SWITCH_NAPI
+ struct napi_struct napi;
+#endif
+};
+
+struct dma_desc {
+ __u32 buf1;
+#define DESC_OWN (1UL << 31) /* Owned by the switch */
+#define DESC_EOR (1UL << 28) /* End of Ring */
+#define DESC_ADDR_MASK 0x1FFFFFF
+#define DESC_ADDR(x) ((__u32)(x) & DESC_ADDR_MASK)
+ __u32 buf2;
+#define DESC_BUF2_EN (1UL << 31) /* Buffer 2 enable */
+ __u32 buflen;
+ __u32 misc;
+/* definitions for tx/rx descriptors */
+#define DESC_PKTLEN_SHIFT 16
+#define DESC_PKTLEN_MASK 0x7FF
+/* tx descriptor specific part */
+#define DESC_CSUM (1UL << 31) /* Append checksum */
+#define DESC_DSTPORT_SHIFT 8
+#define DESC_DSTPORT_MASK 0x3F
+#define DESC_VLAN_MASK 0x3F
+/* rx descriptor specific part */
+#define DESC_SRCPORT_SHIFT 12
+#define DESC_SRCPORT_MASK 0x7
+#define DESC_DA_MASK 0x3
+#define DESC_DA_SHIFT 4
+#define DESC_IPCSUM_FAIL (1UL << 3) /* IP checksum fail */
+#define DESC_VLAN_TAG (1UL << 2) /* VLAN tag present */
+#define DESC_TYPE_MASK 0x3 /* mask for Packet type */
+#define DESC_TYPE_IP 0x0 /* IP packet */
+#define DESC_TYPE_PPPoE 0x1 /* PPPoE packet */
+} __attribute__ ((aligned(16)));
+
+/* ------------------------------------------------------------------------ */
+
+static int adm5120_nrdevs;
+
+static struct net_device *adm5120_devs[SWITCH_NUM_PORTS];
+/* Lookup table port -> device */
+static struct net_device *adm5120_port[SWITCH_NUM_PORTS];
+
+static struct dma_desc *txl_descs;
+static struct dma_desc *rxl_descs;
+
+static dma_addr_t txl_descs_dma;
+static dma_addr_t rxl_descs_dma;
+
+static struct sk_buff **txl_skbuff;
+static struct sk_buff **rxl_skbuff;
+
+static unsigned int cur_rxl, dirty_rxl; /* producer/consumer ring indices */
+static unsigned int cur_txl, dirty_txl;
+
+static unsigned int sw_used;
+
+static DEFINE_SPINLOCK(tx_lock);
+
+/* ------------------------------------------------------------------------ */
+
+static inline u32 sw_read_reg(u32 reg)
+{
+ return __raw_readl((void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg);
+}
+
+static inline void sw_write_reg(u32 reg, u32 val)
+{
+ __raw_writel(val, (void __iomem *)KSEG1ADDR(ADM5120_SWITCH_BASE)+reg);
+}
+
+static inline void sw_int_mask(u32 mask)
+{
+ u32 t;
+
+ t = sw_read_reg(SWITCH_REG_INT_MASK);
+ t |= mask;
+ sw_write_reg(SWITCH_REG_INT_MASK, t);
+}
+
+static inline void sw_int_unmask(u32 mask)
+{
+ u32 t;
+
+ t = sw_read_reg(SWITCH_REG_INT_MASK);
+ t &= ~mask;
+ sw_write_reg(SWITCH_REG_INT_MASK, t);
+}
+
+static inline void sw_int_ack(u32 mask)
+{
+ sw_write_reg(SWITCH_REG_INT_STATUS, mask);
+}
+
+static inline u32 sw_int_status(void)
+{
+ u32 t;
+
+ t = sw_read_reg(SWITCH_REG_INT_STATUS);
+ t &= ~sw_read_reg(SWITCH_REG_INT_MASK);
+ return t;
+}
+
+static inline u32 desc_get_srcport(struct dma_desc *desc)
+{
+ return (desc->misc >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK;
+}
+
+static inline u32 desc_get_pktlen(struct dma_desc *desc)
+{
+ return (desc->misc >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK;
+}
+
+static inline int desc_ipcsum_fail(struct dma_desc *desc)
+{
+ return ((desc->misc & DESC_IPCSUM_FAIL) != 0);
+}
+
+/* ------------------------------------------------------------------------ */
+
+#ifdef CONFIG_ADM5120_SWITCH_DEBUG
+static void sw_dump_desc(char *label, struct dma_desc *desc, int tx)
+{
+ u32 t;
+
+ SW_DBG("%s %s desc/%p\n", label, tx ? "tx" : "rx", desc);
+
+ t = desc->buf1;
+ SW_DBG(" buf1 %08X addr=%08X; len=%08X %s%s\n", t,
+ t & DESC_ADDR_MASK,
+ desc->buflen,
+ (t & DESC_OWN) ? "SWITCH" : "CPU",
+ (t & DESC_EOR) ? " RE" : "");
+
+ t = desc->buf2;
+ SW_DBG(" buf2 %08X addr=%08X%s\n", desc->buf2,
+ t & DESC_ADDR_MASK,
+ (t & DESC_BUF2_EN) ? " EN" : "");
+
+ t = desc->misc;
+ if (tx)
+ SW_DBG(" misc %08X%s pktlen=%04X ports=%02X vlan=%02X\n", t,
+ (t & DESC_CSUM) ? " CSUM" : "",
+ (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK,
+ (t >> DESC_DSTPORT_SHIFT) & DESC_DSTPORT_MASK,
+ t & DESC_VLAN_MASK);
+ else
+ SW_DBG(" misc %08X pktlen=%04X port=%d DA=%d%s%s type=%d\n",
+ t,
+ (t >> DESC_PKTLEN_SHIFT) & DESC_PKTLEN_MASK,
+ (t >> DESC_SRCPORT_SHIFT) & DESC_SRCPORT_MASK,
+ (t >> DESC_DA_SHIFT) & DESC_DA_MASK,
+ (t & DESC_IPCSUM_FAIL) ? " IPCF" : "",
+ (t & DESC_VLAN_TAG) ? " VLAN" : "",
+ (t & DESC_TYPE_MASK));
+}
+
+static void sw_dump_intr_mask(char *label, u32 mask)
+{
+ SW_DBG("%s %08X%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ label, mask,
+ (mask & SWITCH_INT_SHD) ? " SHD" : "",
+ (mask & SWITCH_INT_SLD) ? " SLD" : "",
+ (mask & SWITCH_INT_RHD) ? " RHD" : "",
+ (mask & SWITCH_INT_RLD) ? " RLD" : "",
+ (mask & SWITCH_INT_HDF) ? " HDF" : "",
+ (mask & SWITCH_INT_LDF) ? " LDF" : "",
+ (mask & SWITCH_INT_P0QF) ? " P0QF" : "",
+ (mask & SWITCH_INT_P1QF) ? " P1QF" : "",
+ (mask & SWITCH_INT_P2QF) ? " P2QF" : "",
+ (mask & SWITCH_INT_P3QF) ? " P3QF" : "",
+ (mask & SWITCH_INT_P4QF) ? " P4QF" : "",
+ (mask & SWITCH_INT_CPQF) ? " CPQF" : "",
+ (mask & SWITCH_INT_GQF) ? " GQF" : "",
+ (mask & SWITCH_INT_MD) ? " MD" : "",
+ (mask & SWITCH_INT_BCS) ? " BCS" : "",
+ (mask & SWITCH_INT_PSC) ? " PSC" : "",
+ (mask & SWITCH_INT_ID) ? " ID" : "",
+ (mask & SWITCH_INT_W0TE) ? " W0TE" : "",
+ (mask & SWITCH_INT_W1TE) ? " W1TE" : "",
+ (mask & SWITCH_INT_RDE) ? " RDE" : "",
+ (mask & SWITCH_INT_SDE) ? " SDE" : "",
+ (mask & SWITCH_INT_CPUH) ? " CPUH" : "");
+}
+
+static void sw_dump_regs(void)
+{
+ u32 t;
+
+ t = sw_read_reg(SWITCH_REG_PHY_STATUS);
+ SW_DBG("phy_status: %08X\n", t);
+
+ t = sw_read_reg(SWITCH_REG_CPUP_CONF);
+ SW_DBG("cpup_conf: %08X%s%s%s\n", t,
+ (t & CPUP_CONF_DCPUP) ? " DCPUP" : "",
+ (t & CPUP_CONF_CRCP) ? " CRCP" : "",
+ (t & CPUP_CONF_BTM) ? " BTM" : "");
+
+ t = sw_read_reg(SWITCH_REG_PORT_CONF0);
+ SW_DBG("port_conf0: %08X\n", t);
+ t = sw_read_reg(SWITCH_REG_PORT_CONF1);
+ SW_DBG("port_conf1: %08X\n", t);
+ t = sw_read_reg(SWITCH_REG_PORT_CONF2);
+ SW_DBG("port_conf2: %08X\n", t);
+
+ t = sw_read_reg(SWITCH_REG_VLAN_G1);
+ SW_DBG("vlan g1: %08X\n", t);
+ t = sw_read_reg(SWITCH_REG_VLAN_G2);
+ SW_DBG("vlan g2: %08X\n", t);
+
+ t = sw_read_reg(SWITCH_REG_BW_CNTL0);
+ SW_DBG("bw_cntl0: %08X\n", t);
+ t = sw_read_reg(SWITCH_REG_BW_CNTL1);
+ SW_DBG("bw_cntl1: %08X\n", t);
+
+ t = sw_read_reg(SWITCH_REG_PHY_CNTL0);
+ SW_DBG("phy_cntl0: %08X\n", t);
+ t = sw_read_reg(SWITCH_REG_PHY_CNTL1);
+ SW_DBG("phy_cntl1: %08X\n", t);
+ t = sw_read_reg(SWITCH_REG_PHY_CNTL2);
+ SW_DBG("phy_cntl2: %08X\n", t);
+ t = sw_read_reg(SWITCH_REG_PHY_CNTL3);
+ SW_DBG("phy_cntl3: %08X\n", t);
+ t = sw_read_reg(SWITCH_REG_PHY_CNTL4);
+ SW_DBG("phy_cntl4: %08X\n", t);
+
+ t = sw_read_reg(SWITCH_REG_INT_STATUS);
+ sw_dump_intr_mask("int_status: ", t);
+
+ t = sw_read_reg(SWITCH_REG_INT_MASK);
+ sw_dump_intr_mask("int_mask: ", t);
+
+ t = sw_read_reg(SWITCH_REG_SHDA);
+ SW_DBG("shda: %08X\n", t);
+ t = sw_read_reg(SWITCH_REG_SLDA);
+ SW_DBG("slda: %08X\n", t);
+ t = sw_read_reg(SWITCH_REG_RHDA);
+ SW_DBG("rhda: %08X\n", t);
+ t = sw_read_reg(SWITCH_REG_RLDA);
+ SW_DBG("rlda: %08X\n", t);
+}
+#else
+static inline void sw_dump_desc(char *label, struct dma_desc *desc, int tx) {}
+static void sw_dump_intr_mask(char *label, u32 mask) {}
+static inline void sw_dump_regs(void) {}
+#endif /* CONFIG_ADM5120_SWITCH_DEBUG */
+
+/* ------------------------------------------------------------------------ */
+
+static inline void adm5120_rx_dma_update(struct dma_desc *desc,
+ struct sk_buff *skb, int end)
+{
+ desc->misc = 0;
+ desc->buf2 = 0;
+ desc->buflen = RX_MAX_PKTLEN;
+ desc->buf1 = DESC_ADDR(skb->data) |
+ DESC_OWN | (end ? DESC_EOR : 0);
+}
+
+static void adm5120_switch_rx_refill(void)
+{
+ unsigned int entry;
+
+ for (; cur_rxl - dirty_rxl > 0; dirty_rxl++) {
+ struct dma_desc *desc;
+ struct sk_buff *skb;
+
+ entry = dirty_rxl % RX_RING_SIZE;
+ desc = &rxl_descs[entry];
+
+ skb = rxl_skbuff[entry];
+ if (skb == NULL) {
+ skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC);
+ if (skb) {
+ skb_reserve(skb, SKB_RESERVE_LEN);
+ rxl_skbuff[entry] = skb;
+ } else {
+ SW_ERR("no memory for skb\n");
+ desc->buflen = 0;
+ desc->buf2 = 0;
+ desc->misc = 0;
+ desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN;
+ break;
+ }
+ }
+
+ desc->buf2 = 0;
+ desc->buflen = RX_MAX_PKTLEN;
+ desc->misc = 0;
+ desc->buf1 = (desc->buf1 & DESC_EOR) | DESC_OWN |
+ DESC_ADDR(skb->data);
+ }
+}
+
+static int adm5120_switch_rx(int limit)
+{
+ unsigned int done = 0;
+
+ SW_DBG("rx start, limit=%d, cur_rxl=%u, dirty_rxl=%u\n",
+ limit, cur_rxl, dirty_rxl);
+
+ while (done < limit) {
+ int entry = cur_rxl % RX_RING_SIZE;
+ struct dma_desc *desc = &rxl_descs[entry];
+ struct net_device *rdev;
+ unsigned int port;
+
+ if (desc->buf1 & DESC_OWN)
+ break;
+
+ if (dirty_rxl + RX_RING_SIZE == cur_rxl)
+ break;
+
+ port = desc_get_srcport(desc);
+ rdev = adm5120_port[port];
+
+ SW_DBG("rx descriptor %u, desc=%p, skb=%p\n", entry, desc,
+ rxl_skbuff[entry]);
+
+ if ((rdev) && netif_running(rdev)) {
+ struct sk_buff *skb = rxl_skbuff[entry];
+ int pktlen;
+
+ pktlen = desc_get_pktlen(desc);
+ pktlen -= ETH_CSUM_LEN;
+
+ if ((pktlen == 0) || desc_ipcsum_fail(desc)) {
+ rdev->stats.rx_errors++;
+ if (pktlen == 0)
+ rdev->stats.rx_length_errors++;
+ if (desc_ipcsum_fail(desc))
+ rdev->stats.rx_crc_errors++;
+ SW_DBG("rx error, recycling skb %u\n", entry);
+ } else {
+ skb_put(skb, pktlen);
+
+ skb->dev = rdev;
+ skb->protocol = eth_type_trans(skb, rdev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ dma_cache_wback_inv((unsigned long)skb->data,
+ skb->len);
+
+#ifdef CONFIG_ADM5120_SWITCH_NAPI
+ netif_receive_skb(skb);
+#else
+ netif_rx(skb);
+#endif
+
+ rdev->last_rx = jiffies;
+ rdev->stats.rx_packets++;
+ rdev->stats.rx_bytes += pktlen;
+
+ rxl_skbuff[entry] = NULL;
+ done++;
+ }
+ } else {
+ SW_DBG("no rx device, recycling skb %u\n", entry);
+ }
+
+ cur_rxl++;
+ if (cur_rxl - dirty_rxl > RX_RING_SIZE / 4)
+ adm5120_switch_rx_refill();
+ }
+
+ adm5120_switch_rx_refill();
+
+ SW_DBG("rx finished, cur_rxl=%u, dirty_rxl=%u, processed %d\n",
+ cur_rxl, dirty_rxl, done);
+
+ return done;
+}
+
+static void adm5120_switch_tx(void)
+{
+ unsigned int entry;
+
+ spin_lock(&tx_lock);
+ entry = dirty_txl % TX_RING_SIZE;
+ while (dirty_txl != cur_txl) {
+ struct dma_desc *desc = &txl_descs[entry];
+ struct sk_buff *skb = txl_skbuff[entry];
+
+ if (desc->buf1 & DESC_OWN)
+ break;
+
+ if (netif_running(skb->dev)) {
+ skb->dev->stats.tx_bytes += skb->len;
+ skb->dev->stats.tx_packets++;
+ }
+
+ dev_kfree_skb_irq(skb);
+ txl_skbuff[entry] = NULL;
+ entry = (++dirty_txl) % TX_RING_SIZE;
+ }
+
+ if ((cur_txl - dirty_txl) < TX_QUEUE_LEN - 4) {
+ int i;
+ for (i = 0; i < SWITCH_NUM_PORTS; i++) {
+ if (!adm5120_devs[i])
+ continue;
+ netif_wake_queue(adm5120_devs[i]);
+ }
+ }
+ spin_unlock(&tx_lock);
+}
+
+#ifdef CONFIG_ADM5120_SWITCH_NAPI
+static int adm5120_if_poll(struct napi_struct *napi, int limit)
+{
+ struct adm5120_if_priv *priv = container_of(napi,
+ struct adm5120_if_priv, napi);
+ struct net_device *dev __maybe_unused = priv->dev;
+ int done;
+ u32 status;
+
+ sw_int_ack(SWITCH_INTS_POLL);
+
+ SW_DBG("%s: processing TX ring\n", dev->name);
+ adm5120_switch_tx();
+
+ SW_DBG("%s: processing RX ring\n", dev->name);
+ done = adm5120_switch_rx(limit);
+
+ status = sw_int_status() & SWITCH_INTS_POLL;
+ if ((done < limit) && (!status)) {
+ SW_DBG("disable polling mode for %s\n", dev->name);
+ napi_complete(napi);
+ sw_int_unmask(SWITCH_INTS_POLL);
+ return 0;
+ }
+
+ SW_DBG("%s still in polling mode, done=%d, status=%x\n",
+ dev->name, done, status);
+ return 1;
+}
+#endif /* CONFIG_ADM5120_SWITCH_NAPI */
+
+
+static irqreturn_t adm5120_switch_irq(int irq, void *dev_id)
+{
+ u32 status;
+
+ status = sw_int_status();
+ status &= SWITCH_INTS_ALL;
+ if (!status)
+ return IRQ_NONE;
+
+#ifdef CONFIG_ADM5120_SWITCH_NAPI
+ sw_int_ack(status & ~SWITCH_INTS_POLL);
+
+ if (status & SWITCH_INTS_POLL) {
+ struct net_device *dev = dev_id;
+ struct adm5120_if_priv *priv = netdev_priv(dev);
+
+ sw_dump_intr_mask("poll ints", status);
+ SW_DBG("enable polling mode for %s\n", dev->name);
+ sw_int_mask(SWITCH_INTS_POLL);
+ napi_schedule(&priv->napi);
+ }
+#else
+ sw_int_ack(status);
+
+ if (status & (SWITCH_INT_RLD | SWITCH_INT_LDF))
+ adm5120_switch_rx(RX_RING_SIZE);
+
+ if (status & SWITCH_INT_SLD)
+ adm5120_switch_tx();
+#endif
+
+ return IRQ_HANDLED;
+}
+
+static void adm5120_set_bw(char *matrix)
+{
+ unsigned long val;
+
+ /* Port 0 to 3 are set using the bandwidth control 0 register */
+ val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
+ sw_write_reg(SWITCH_REG_BW_CNTL0, val);
+
+ /* Port 4 and 5 are set using the bandwidth control 1 register */
+ val = matrix[4];
+ if (matrix[5] == 1)
+ sw_write_reg(SWITCH_REG_BW_CNTL1, val | 0x80000000);
+ else
+ sw_write_reg(SWITCH_REG_BW_CNTL1, val & ~0x8000000);
+
+ SW_DBG("D: ctl0 0x%ux, ctl1 0x%ux\n", sw_read_reg(SWITCH_REG_BW_CNTL0),
+ sw_read_reg(SWITCH_REG_BW_CNTL1));
+}
+
+static void adm5120_switch_tx_ring_reset(struct dma_desc *desc,
+ struct sk_buff **skbl, int num)
+{
+ memset(desc, 0, num * sizeof(*desc));
+ desc[num-1].buf1 |= DESC_EOR;
+ memset(skbl, 0, sizeof(struct skb *) * num);
+
+ cur_txl = 0;
+ dirty_txl = 0;
+}
+
+static void adm5120_switch_rx_ring_reset(struct dma_desc *desc,
+ struct sk_buff **skbl, int num)
+{
+ int i;
+
+ memset(desc, 0, num * sizeof(*desc));
+ for (i = 0; i < num; i++) {
+ skbl[i] = dev_alloc_skb(SKB_ALLOC_LEN);
+ if (!skbl[i]) {
+ i = num;
+ break;
+ }
+ skb_reserve(skbl[i], SKB_RESERVE_LEN);
+ adm5120_rx_dma_update(&desc[i], skbl[i], (num - 1 == i));
+ }
+
+ cur_rxl = 0;
+ dirty_rxl = 0;
+}
+
+static int adm5120_switch_tx_ring_alloc(void)
+{
+ int err;
+
+ txl_descs = dma_alloc_coherent(NULL, TX_DESCS_SIZE, &txl_descs_dma,
+ GFP_ATOMIC);
+ if (!txl_descs) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ txl_skbuff = kzalloc(TX_SKBS_SIZE, GFP_KERNEL);
+ if (!txl_skbuff) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return err;
+}
+
+static void adm5120_switch_tx_ring_free(void)
+{
+ int i;
+
+ if (txl_skbuff) {
+ for (i = 0; i < TX_RING_SIZE; i++)
+ if (txl_skbuff[i])
+ kfree_skb(txl_skbuff[i]);
+ kfree(txl_skbuff);
+ }
+
+ if (txl_descs)
+ dma_free_coherent(NULL, TX_DESCS_SIZE, txl_descs,
+ txl_descs_dma);
+}
+
+static int adm5120_switch_rx_ring_alloc(void)
+{
+ int err;
+ int i;
+
+ /* init RX ring */
+ rxl_descs = dma_alloc_coherent(NULL, RX_DESCS_SIZE, &rxl_descs_dma,
+ GFP_ATOMIC);
+ if (!rxl_descs) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ rxl_skbuff = kzalloc(RX_SKBS_SIZE, GFP_KERNEL);
+ if (!rxl_skbuff) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ skb = alloc_skb(SKB_ALLOC_LEN, GFP_ATOMIC);
+ if (!skb) {
+ err = -ENOMEM;
+ goto err;
+ }
+ rxl_skbuff[i] = skb;
+ skb_reserve(skb, SKB_RESERVE_LEN);
+ }
+
+ return 0;
+
+err:
+ return err;
+}
+
+static void adm5120_switch_rx_ring_free(void)
+{
+ int i;
+
+ if (rxl_skbuff) {
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (rxl_skbuff[i])
+ kfree_skb(rxl_skbuff[i]);
+ kfree(rxl_skbuff);
+ }
+
+ if (rxl_descs)
+ dma_free_coherent(NULL, RX_DESCS_SIZE, rxl_descs,
+ rxl_descs_dma);
+}
+
+static void adm5120_write_mac(struct net_device *dev)
+{
+ struct adm5120_if_priv *priv = netdev_priv(dev);
+ unsigned char *mac = dev->dev_addr;
+ u32 t;
+
+ t = mac[2] | (mac[3] << MAC_WT1_MAC3_SHIFT) |
+ (mac[4] << MAC_WT1_MAC4_SHIFT) | (mac[5] << MAC_WT1_MAC5_SHIFT);
+ sw_write_reg(SWITCH_REG_MAC_WT1, t);
+
+ t = (mac[0] << MAC_WT0_MAC0_SHIFT) | (mac[1] << MAC_WT0_MAC1_SHIFT) |
+ MAC_WT0_MAWC | MAC_WT0_WVE | (priv->vlan_no<<3);
+
+ sw_write_reg(SWITCH_REG_MAC_WT0, t);
+
+ while (!(sw_read_reg(SWITCH_REG_MAC_WT0) & MAC_WT0_MWD))
+ ;
+}
+
+static void adm5120_set_vlan(char *matrix)
+{
+ unsigned long val;
+ int vlan_port, port;
+
+ val = matrix[0] + (matrix[1]<<8) + (matrix[2]<<16) + (matrix[3]<<24);
+ sw_write_reg(SWITCH_REG_VLAN_G1, val);
+ val = matrix[4] + (matrix[5]<<8);
+ sw_write_reg(SWITCH_REG_VLAN_G2, val);
+
+ /* Now set/update the port vs. device lookup table */
+ for (port = 0; port < SWITCH_NUM_PORTS; port++) {
+ for (vlan_port = 0; vlan_port < SWITCH_NUM_PORTS && !(matrix[vlan_port] & (0x00000001 << port)); vlan_port++)
+ ;
+ if (vlan_port < SWITCH_NUM_PORTS)
+ adm5120_port[port] = adm5120_devs[vlan_port];
+ else
+ adm5120_port[port] = NULL;
+ }
+}
+
+static void adm5120_switch_set_vlan_mac(unsigned int vlan, unsigned char *mac)
+{
+ u32 t;
+
+ t = mac[2] | (mac[3] << MAC_WT1_MAC3_SHIFT)
+ | (mac[4] << MAC_WT1_MAC4_SHIFT)
+ | (mac[5] << MAC_WT1_MAC5_SHIFT);
+ sw_write_reg(SWITCH_REG_MAC_WT1, t);
+
+ t = (mac[0] << MAC_WT0_MAC0_SHIFT) | (mac[1] << MAC_WT0_MAC1_SHIFT) |
+ MAC_WT0_MAWC | MAC_WT0_WVE | (vlan << MAC_WT0_WVN_SHIFT) |
+ (MAC_WT0_WAF_STATIC << MAC_WT0_WAF_SHIFT);
+ sw_write_reg(SWITCH_REG_MAC_WT0, t);
+
+ do {
+ t = sw_read_reg(SWITCH_REG_MAC_WT0);
+ } while ((t & MAC_WT0_MWD) == 0);
+}
+
+static void adm5120_switch_set_vlan_ports(unsigned int vlan, u32 ports)
+{
+ unsigned int reg;
+ u32 t;
+
+ if (vlan < 4)
+ reg = SWITCH_REG_VLAN_G1;
+ else {
+ vlan -= 4;
+ reg = SWITCH_REG_VLAN_G2;
+ }
+
+ t = sw_read_reg(reg);
+ t &= ~(0xFF << (vlan*8));
+ t |= (ports << (vlan*8));
+ sw_write_reg(reg, t);
+}
+
+/* ------------------------------------------------------------------------ */
+
+#ifdef CONFIG_ADM5120_SWITCH_NAPI
+static inline void adm5120_if_napi_enable(struct net_device *dev)
+{
+ struct adm5120_if_priv *priv = netdev_priv(dev);
+ napi_enable(&priv->napi);
+}
+
+static inline void adm5120_if_napi_disable(struct net_device *dev)
+{
+ struct adm5120_if_priv *priv = netdev_priv(dev);
+ napi_disable(&priv->napi);
+}
+#else
+static inline void adm5120_if_napi_enable(struct net_device *dev) {}
+static inline void adm5120_if_napi_disable(struct net_device *dev) {}
+#endif /* CONFIG_ADM5120_SWITCH_NAPI */
+
+static int adm5120_if_open(struct net_device *dev)
+{
+ u32 t;
+ int err;
+ int i;
+
+ adm5120_if_napi_enable(dev);
+
+ err = request_irq(dev->irq, adm5120_switch_irq, IRQF_SHARED,
+ dev->name, dev);
+ if (err) {
+ SW_ERR("unable to get irq for %s\n", dev->name);
+ goto err;
+ }
+
+ if (!sw_used++)
+ /* enable interrupts on first open */
+ sw_int_unmask(SWITCH_INTS_USED);
+
+ /* enable (additional) port */
+ t = sw_read_reg(SWITCH_REG_PORT_CONF0);
+ for (i = 0; i < SWITCH_NUM_PORTS; i++) {
+ if (dev == adm5120_devs[i])
+ t &= ~adm5120_eth_vlans[i];
+ }
+ sw_write_reg(SWITCH_REG_PORT_CONF0, t);
+
+ netif_start_queue(dev);
+
+ return 0;
+
+err:
+ adm5120_if_napi_disable(dev);
+ return err;
+}
+
+static int adm5120_if_stop(struct net_device *dev)
+{
+ u32 t;
+ int i;
+
+ netif_stop_queue(dev);
+ adm5120_if_napi_disable(dev);
+
+ /* disable port if not assigned to other devices */
+ t = sw_read_reg(SWITCH_REG_PORT_CONF0);
+ t |= SWITCH_PORTS_NOCPU;
+ for (i = 0; i < SWITCH_NUM_PORTS; i++) {
+ if ((dev != adm5120_devs[i]) && netif_running(adm5120_devs[i]))
+ t &= ~adm5120_eth_vlans[i];
+ }
+ sw_write_reg(SWITCH_REG_PORT_CONF0, t);
+
+ if (!--sw_used)
+ sw_int_mask(SWITCH_INTS_USED);
+
+ free_irq(dev->irq, dev);
+
+ return 0;
+}
+
+static int adm5120_if_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dma_desc *desc;
+ struct adm5120_if_priv *priv = netdev_priv(dev);
+ unsigned int entry;
+ unsigned long data;
+ int i;
+
+ /* lock switch irq */
+ spin_lock_irq(&tx_lock);
+
+ /* calculate the next TX descriptor entry. */
+ entry = cur_txl % TX_RING_SIZE;
+
+ desc = &txl_descs[entry];
+ if (desc->buf1 & DESC_OWN) {
+ /* We want to write a packet but the TX queue is still
+ * occupied by the DMA. We are faster than the DMA... */
+ SW_DBG("%s unable to transmit, packet dopped\n", dev->name);
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return 0;
+ }
+
+ txl_skbuff[entry] = skb;
+ data = (desc->buf1 & DESC_EOR);
+ data |= DESC_ADDR(skb->data);
+
+ desc->misc =
+ ((skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len) << DESC_PKTLEN_SHIFT) |
+ (0x1 << priv->vlan_no);
+
+ desc->buflen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+
+ desc->buf1 = data | DESC_OWN;
+ sw_write_reg(SWITCH_REG_SEND_TRIG, SEND_TRIG_STL);
+
+ cur_txl++;
+ if (cur_txl == dirty_txl + TX_QUEUE_LEN) {
+ for (i = 0; i < SWITCH_NUM_PORTS; i++) {
+ if (!adm5120_devs[i])
+ continue;
+ netif_stop_queue(adm5120_devs[i]);
+ }
+ }
+
+ dev->trans_start = jiffies;
+
+ spin_unlock_irq(&tx_lock);
+
+ return 0;
+}
+
+static void adm5120_if_tx_timeout(struct net_device *dev)
+{
+ SW_INFO("TX timeout on %s\n", dev->name);
+}
+
+static void adm5120_if_set_rx_mode(struct net_device *dev)
+{
+ struct adm5120_if_priv *priv = netdev_priv(dev);
+ u32 ports;
+ u32 t;
+
+ ports = adm5120_eth_vlans[priv->vlan_no] & SWITCH_PORTS_NOCPU;
+
+ t = sw_read_reg(SWITCH_REG_CPUP_CONF);
+ if (dev->flags & IFF_PROMISC)
+ /* enable unknown packets */
+ t &= ~(ports << CPUP_CONF_DUNP_SHIFT);
+ else
+ /* disable unknown packets */
+ t |= (ports << CPUP_CONF_DUNP_SHIFT);
+
+ if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev))
+ /* enable multicast packets */
+ t &= ~(ports << CPUP_CONF_DMCP_SHIFT);
+ else
+ /* disable multicast packets */
+ t |= (ports << CPUP_CONF_DMCP_SHIFT);
+
+ /* If there is any port configured to be in promiscuous mode, then the */
+ /* Bridge Test Mode has to be activated. This will result in */
+ /* transporting also packets learned in another VLAN to be forwarded */
+ /* to the CPU. */
+ /* The difficult scenario is when we want to build a bridge on the CPU.*/
+ /* Assume we have port0 and the CPU port in VLAN0 and port1 and the */
+ /* CPU port in VLAN1. Now we build a bridge on the CPU between */
+ /* VLAN0 and VLAN1. Both ports of the VLANs are set in promisc mode. */
+ /* Now assume a packet with ethernet source address 99 enters port 0 */
+ /* It will be forwarded to the CPU because it is unknown. Then the */
+ /* bridge in the CPU will send it to VLAN1 and it goes out at port 1. */
+ /* When now a packet with ethernet destination address 99 comes in at */
+ /* port 1 in VLAN1, then the switch has learned that this address is */
+ /* located at port 0 in VLAN0. Therefore the switch will drop */
+ /* this packet. In order to avoid this and to send the packet still */
+ /* to the CPU, the Bridge Test Mode has to be activated. */
+
+ /* Check if there is any vlan in promisc mode. */
+ if (~t & (SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT))
+ t |= CPUP_CONF_BTM; /* Enable Bridge Testing Mode */
+ else
+ t &= ~CPUP_CONF_BTM; /* Disable Bridge Testing Mode */
+
+ sw_write_reg(SWITCH_REG_CPUP_CONF, t);
+
+}
+
+static int adm5120_if_set_mac_address(struct net_device *dev, void *p)
+{
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (ret)
+ return ret;
+
+ adm5120_write_mac(dev);
+ return 0;
+}
+
+static int adm5120_if_do_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd)
+{
+ int err;
+ struct adm5120_sw_info info;
+ struct adm5120_if_priv *priv = netdev_priv(dev);
+
+ switch (cmd) {
+ case SIOCGADMINFO:
+ info.magic = 0x5120;
+ info.ports = adm5120_nrdevs;
+ info.vlan = priv->vlan_no;
+ err = copy_to_user(rq->ifr_data, &info, sizeof(info));
+ if (err)
+ return -EFAULT;
+ break;
+ case SIOCSMATRIX:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ err = copy_from_user(adm5120_eth_vlans, rq->ifr_data,
+ sizeof(adm5120_eth_vlans));
+ if (err)
+ return -EFAULT;
+ adm5120_set_vlan(adm5120_eth_vlans);
+ break;
+ case SIOCGMATRIX:
+ err = copy_to_user(rq->ifr_data, adm5120_eth_vlans,
+ sizeof(adm5120_eth_vlans));
+ if (err)
+ return -EFAULT;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static const struct net_device_ops adm5120sw_netdev_ops = {
+ .ndo_open = adm5120_if_open,
+ .ndo_stop = adm5120_if_stop,
+ .ndo_start_xmit = adm5120_if_hard_start_xmit,
+ .ndo_set_rx_mode = adm5120_if_set_rx_mode,
+ .ndo_do_ioctl = adm5120_if_do_ioctl,
+ .ndo_tx_timeout = adm5120_if_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = adm5120_if_set_mac_address,
+};
+
+static struct net_device *adm5120_if_alloc(void)
+{
+ struct net_device *dev;
+ struct adm5120_if_priv *priv;
+
+ dev = alloc_etherdev(sizeof(*priv));
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+ priv->dev = dev;
+
+ dev->irq = ADM5120_IRQ_SWITCH;
+ dev->netdev_ops = &adm5120sw_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+#ifdef CONFIG_ADM5120_SWITCH_NAPI
+ netif_napi_add(dev, &priv->napi, adm5120_if_poll, 64);
+#endif
+
+ return dev;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static void adm5120_switch_cleanup(void)
+{
+ int i;
+
+ /* disable interrupts */
+ sw_int_mask(SWITCH_INTS_ALL);
+
+ for (i = 0; i < SWITCH_NUM_PORTS; i++) {
+ struct net_device *dev = adm5120_devs[i];
+ if (dev) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ }
+
+ adm5120_switch_tx_ring_free();
+ adm5120_switch_rx_ring_free();
+}
+
+static int adm5120_switch_probe(struct platform_device *pdev)
+{
+ u32 t;
+ int i, err;
+
+ adm5120_nrdevs = adm5120_eth_num_ports;
+
+ t = CPUP_CONF_DCPUP | CPUP_CONF_CRCP |
+ SWITCH_PORTS_NOCPU << CPUP_CONF_DUNP_SHIFT |
+ SWITCH_PORTS_NOCPU << CPUP_CONF_DMCP_SHIFT ;
+ sw_write_reg(SWITCH_REG_CPUP_CONF, t);
+
+ t = (SWITCH_PORTS_NOCPU << PORT_CONF0_EMCP_SHIFT) |
+ (SWITCH_PORTS_NOCPU << PORT_CONF0_BP_SHIFT) |
+ (SWITCH_PORTS_NOCPU);
+ sw_write_reg(SWITCH_REG_PORT_CONF0, t);
+
+ /* setup ports to Autoneg/100M/Full duplex/Auto MDIX */
+ t = SWITCH_PORTS_PHY |
+ (SWITCH_PORTS_PHY << PHY_CNTL2_SC_SHIFT) |
+ (SWITCH_PORTS_PHY << PHY_CNTL2_DC_SHIFT) |
+ (SWITCH_PORTS_PHY << PHY_CNTL2_PHYR_SHIFT) |
+ (SWITCH_PORTS_PHY << PHY_CNTL2_AMDIX_SHIFT) |
+ PHY_CNTL2_RMAE;
+ sw_write_reg(SWITCH_REG_PHY_CNTL2, t);
+
+ t = sw_read_reg(SWITCH_REG_PHY_CNTL3);
+ t |= PHY_CNTL3_RNT;
+ sw_write_reg(SWITCH_REG_PHY_CNTL3, t);
+
+ /* Force all the packets from all ports are low priority */
+ sw_write_reg(SWITCH_REG_PRI_CNTL, 0);
+
+ sw_int_mask(SWITCH_INTS_ALL);
+ sw_int_ack(SWITCH_INTS_ALL);
+
+ err = adm5120_switch_rx_ring_alloc();
+ if (err)
+ goto err;
+
+ err = adm5120_switch_tx_ring_alloc();
+ if (err)
+ goto err;
+
+ adm5120_switch_tx_ring_reset(txl_descs, txl_skbuff, TX_RING_SIZE);
+ adm5120_switch_rx_ring_reset(rxl_descs, rxl_skbuff, RX_RING_SIZE);
+
+ sw_write_reg(SWITCH_REG_SHDA, 0);
+ sw_write_reg(SWITCH_REG_SLDA, KSEG1ADDR(txl_descs));
+ sw_write_reg(SWITCH_REG_RHDA, 0);
+ sw_write_reg(SWITCH_REG_RLDA, KSEG1ADDR(rxl_descs));
+
+ for (i = 0; i < SWITCH_NUM_PORTS; i++) {
+ struct net_device *dev;
+ struct adm5120_if_priv *priv;
+
+ dev = adm5120_if_alloc();
+ if (!dev) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ adm5120_devs[i] = dev;
+ priv = netdev_priv(dev);
+
+ priv->vlan_no = i;
+ priv->port_mask = adm5120_eth_vlans[i];
+
+ memcpy(dev->dev_addr, adm5120_eth_macs[i], 6);
+ adm5120_write_mac(dev);
+
+ err = register_netdev(dev);
+ if (err) {
+ SW_INFO("%s register failed, error=%d\n",
+ dev->name, err);
+ goto err;
+ }
+ }
+
+ /* setup vlan/port mapping after devs are filled up */
+ adm5120_set_vlan(adm5120_eth_vlans);
+
+ /* enable CPU port */
+ t = sw_read_reg(SWITCH_REG_CPUP_CONF);
+ t &= ~CPUP_CONF_DCPUP;
+ sw_write_reg(SWITCH_REG_CPUP_CONF, t);
+
+ return 0;
+
+err:
+ adm5120_switch_cleanup();
+
+ SW_ERR("init failed\n");
+ return err;
+}
+
+static int adm5120_switch_remove(struct platform_device *pdev)
+{
+ adm5120_switch_cleanup();
+ return 0;
+}
+
+static struct platform_driver adm5120_switch_driver = {
+ .probe = adm5120_switch_probe,
+ .remove = adm5120_switch_remove,
+ .driver = {
+ .name = DRV_NAME,
+ },
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int __init adm5120_switch_mod_init(void)
+{
+ int err;
+
+ pr_info(DRV_DESC " version " DRV_VERSION "\n");
+ err = platform_driver_register(&adm5120_switch_driver);
+
+ return err;
+}
+
+static void __exit adm5120_switch_mod_exit(void)
+{
+ platform_driver_unregister(&adm5120_switch_driver);
+}
+
+module_init(adm5120_switch_mod_init);
+module_exit(adm5120_switch_mod_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
diff --git a/target/linux/adm5120/files-3.18/drivers/net/adm5120sw.h b/target/linux/adm5120/files-3.18/drivers/net/adm5120sw.h
new file mode 100644
index 0000000..fa9e503
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/net/adm5120sw.h
@@ -0,0 +1,23 @@
+/*
+ * Defines for ADM5120 built in ethernet switch driver
+ *
+ * Copyright Jeroen Vreeken (pe1rxq@amsat.org), 2005
+ *
+ * Values come from ADM5120 datasheet and original ADMtek 2.4 driver,
+ * Copyright ADMtek Inc.
+ */
+
+#ifndef _INCLUDE_ADM5120SW_H_
+#define _INCLUDE_ADM5120SW_H_
+
+#define SIOCSMATRIX SIOCDEVPRIVATE
+#define SIOCGMATRIX (SIOCDEVPRIVATE + 1)
+#define SIOCGADMINFO (SIOCDEVPRIVATE + 2)
+
+struct adm5120_sw_info {
+ u16 magic;
+ u16 ports;
+ u16 vlan;
+};
+
+#endif /* _INCLUDE_ADM5120SW_H_ */
diff --git a/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-dbg.c b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-dbg.c
new file mode 100644
index 0000000..2d5dc2a
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-dbg.c
@@ -0,0 +1,836 @@
+/*
+ * ADM5120 HCD (Host Controller Driver) for USB
+ *
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: drivers/usb/host/ohci-dbg.c
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+/*-------------------------------------------------------------------------*/
+
+static inline char *ed_typestring(int ed_type)
+{
+ switch (ed_type) {
+ case PIPE_CONTROL:
+ return "ctrl";
+ case PIPE_BULK:
+ return "bulk";
+ case PIPE_INTERRUPT:
+ return "intr";
+ case PIPE_ISOCHRONOUS:
+ return "isoc";
+ }
+ return "(bad ed_type)";
+}
+
+static inline char *ed_statestring(int state)
+{
+ switch (state) {
+ case ED_IDLE:
+ return "IDLE";
+ case ED_UNLINK:
+ return "UNLINK";
+ case ED_OPER:
+ return "OPER";
+ }
+ return "?STATE";
+}
+
+static inline char *pipestring(int pipe)
+{
+ return ed_typestring(usb_pipetype(pipe));
+}
+
+static inline char *td_pidstring(u32 info)
+{
+ switch (info & TD_DP) {
+ case TD_DP_SETUP:
+ return "SETUP";
+ case TD_DP_IN:
+ return "IN";
+ case TD_DP_OUT:
+ return "OUT";
+ }
+ return "?PID";
+}
+
+static inline char *td_togglestring(u32 info)
+{
+ switch (info & TD_T) {
+ case TD_T_DATA0:
+ return "DATA0";
+ case TD_T_DATA1:
+ return "DATA1";
+ case TD_T_CARRY:
+ return "CARRY";
+ }
+ return "?TOGGLE";
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef DEBUG
+
+/* debug| print the main components of an URB
+ * small: 0) header + data packets 1) just header
+ */
+static void __attribute__((unused))
+urb_print(struct admhcd *ahcd, struct urb *urb, char *str, int small, int status)
+{
+ unsigned int pipe = urb->pipe;
+
+ if (!urb->dev || !urb->dev->bus) {
+ admhc_dbg(ahcd, "%s URB: no dev", str);
+ return;
+ }
+
+#ifndef ADMHC_VERBOSE_DEBUG
+ if (status != 0)
+#endif
+ admhc_dbg(ahcd, "URB-%s %p dev=%d ep=%d%s-%s flags=%x len=%d/%d "
+ "stat=%d\n",
+ str,
+ urb,
+ usb_pipedevice(pipe),
+ usb_pipeendpoint(pipe),
+ usb_pipeout(pipe) ? "out" : "in",
+ pipestring(pipe),
+ urb->transfer_flags,
+ urb->actual_length,
+ urb->transfer_buffer_length,
+ status);
+
+#ifdef ADMHC_VERBOSE_DEBUG
+ if (!small) {
+ int i, len;
+
+ if (usb_pipecontrol(pipe)) {
+ admhc_dbg(ahcd, "setup(8):");
+ for (i = 0; i < 8 ; i++)
+ printk(KERN_INFO" %02x", ((__u8 *)urb->setup_packet)[i]);
+ printk(KERN_INFO "\n");
+ }
+ if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) {
+ admhc_dbg(ahcd, "data(%d/%d):",
+ urb->actual_length,
+ urb->transfer_buffer_length);
+ len = usb_pipeout(pipe) ?
+ urb->transfer_buffer_length : urb->actual_length;
+ for (i = 0; i < 16 && i < len; i++)
+ printk(KERN_INFO " %02x", ((__u8 *)urb->transfer_buffer)[i]);
+ printk(KERN_INFO "%s stat:%d\n", i < len ? "..." : "", status);
+ }
+ }
+#endif /* ADMHC_VERBOSE_DEBUG */
+}
+
+#define admhc_dbg_sw(ahcd, next, size, format, arg...) \
+ do { \
+ if (next) { \
+ unsigned s_len; \
+ s_len = scnprintf(*next, *size, format, ## arg); \
+ *size -= s_len; *next += s_len; \
+ } else \
+ admhc_dbg(ahcd, format, ## arg); \
+ } while (0);
+
+
+static void admhc_dump_intr_mask(struct admhcd *ahcd, char *label, u32 mask,
+ char **next, unsigned *size)
+{
+ admhc_dbg_sw(ahcd, next, size, "%s 0x%08x%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ label,
+ mask,
+ (mask & ADMHC_INTR_INTA) ? " INTA" : "",
+ (mask & ADMHC_INTR_FATI) ? " FATI" : "",
+ (mask & ADMHC_INTR_SWI) ? " SWI" : "",
+ (mask & ADMHC_INTR_TDC) ? " TDC" : "",
+ (mask & ADMHC_INTR_FNO) ? " FNO" : "",
+ (mask & ADMHC_INTR_SO) ? " SO" : "",
+ (mask & ADMHC_INTR_INSM) ? " INSM" : "",
+ (mask & ADMHC_INTR_BABI) ? " BABI" : "",
+ (mask & ADMHC_INTR_7) ? " !7!" : "",
+ (mask & ADMHC_INTR_6) ? " !6!" : "",
+ (mask & ADMHC_INTR_RESI) ? " RESI" : "",
+ (mask & ADMHC_INTR_SOFI) ? " SOFI" : ""
+ );
+}
+
+static void maybe_print_eds(struct admhcd *ahcd, char *label, u32 value,
+ char **next, unsigned *size)
+{
+ if (value)
+ admhc_dbg_sw(ahcd, next, size, "%s %08x\n", label, value);
+}
+
+static char *buss2string(int state)
+{
+ switch (state) {
+ case ADMHC_BUSS_RESET:
+ return "reset";
+ case ADMHC_BUSS_RESUME:
+ return "resume";
+ case ADMHC_BUSS_OPER:
+ return "operational";
+ case ADMHC_BUSS_SUSPEND:
+ return "suspend";
+ }
+ return "?state";
+}
+
+static void
+admhc_dump_status(struct admhcd *ahcd, char **next, unsigned *size)
+{
+ struct admhcd_regs __iomem *regs = ahcd->regs;
+ u32 temp;
+
+ temp = admhc_readl(ahcd, &regs->gencontrol);
+ admhc_dbg_sw(ahcd, next, size,
+ "gencontrol 0x%08x%s%s%s%s\n",
+ temp,
+ (temp & ADMHC_CTRL_UHFE) ? " UHFE" : "",
+ (temp & ADMHC_CTRL_SIR) ? " SIR" : "",
+ (temp & ADMHC_CTRL_DMAA) ? " DMAA" : "",
+ (temp & ADMHC_CTRL_SR) ? " SR" : ""
+ );
+
+ temp = admhc_readl(ahcd, &regs->host_control);
+ admhc_dbg_sw(ahcd, next, size,
+ "host_control 0x%08x BUSS=%s%s\n",
+ temp,
+ buss2string(temp & ADMHC_HC_BUSS),
+ (temp & ADMHC_HC_DMAE) ? " DMAE" : ""
+ );
+
+ admhc_dump_intr_mask(ahcd, "int_status",
+ admhc_readl(ahcd, &regs->int_status),
+ next, size);
+ admhc_dump_intr_mask(ahcd, "int_enable",
+ admhc_readl(ahcd, &regs->int_enable),
+ next, size);
+
+ maybe_print_eds(ahcd, "hosthead",
+ admhc_readl(ahcd, &regs->hosthead), next, size);
+}
+
+#define dbg_port_sw(hc, num, value, next, size) \
+ admhc_dbg_sw(hc, next, size, \
+ "portstatus [%d] " \
+ "0x%08x%s%s%s%s%s%s%s%s%s%s%s%s\n", \
+ num, temp, \
+ (temp & ADMHC_PS_PRSC) ? " PRSC" : "", \
+ (temp & ADMHC_PS_OCIC) ? " OCIC" : "", \
+ (temp & ADMHC_PS_PSSC) ? " PSSC" : "", \
+ (temp & ADMHC_PS_PESC) ? " PESC" : "", \
+ (temp & ADMHC_PS_CSC) ? " CSC" : "", \
+ \
+ (temp & ADMHC_PS_LSDA) ? " LSDA" : "", \
+ (temp & ADMHC_PS_PPS) ? " PPS" : "", \
+ (temp & ADMHC_PS_PRS) ? " PRS" : "", \
+ (temp & ADMHC_PS_POCI) ? " POCI" : "", \
+ (temp & ADMHC_PS_PSS) ? " PSS" : "", \
+ \
+ (temp & ADMHC_PS_PES) ? " PES" : "", \
+ (temp & ADMHC_PS_CCS) ? " CCS" : "" \
+ );
+
+
+static void
+admhc_dump_roothub(
+ struct admhcd *ahcd,
+ int verbose,
+ char **next,
+ unsigned *size)
+{
+ u32 temp, i;
+
+ temp = admhc_read_rhdesc(ahcd);
+ if (temp == ~(u32)0)
+ return;
+
+ if (verbose) {
+ admhc_dbg_sw(ahcd, next, size,
+ "rhdesc %08x%s%s%s%s%s%s PPCM=%02x%s%s%s%s NUMP=%d(%d)\n",
+ temp,
+ (temp & ADMHC_RH_CRWE) ? " CRWE" : "",
+ (temp & ADMHC_RH_OCIC) ? " OCIC" : "",
+ (temp & ADMHC_RH_LPSC) ? " LPSC" : "",
+ (temp & ADMHC_RH_LPSC) ? " DRWE" : "",
+ (temp & ADMHC_RH_LPSC) ? " OCI" : "",
+ (temp & ADMHC_RH_LPSC) ? " LPS" : "",
+ ((temp & ADMHC_RH_PPCM) >> 16),
+ (temp & ADMHC_RH_NOCP) ? " NOCP" : "",
+ (temp & ADMHC_RH_OCPM) ? " OCPM" : "",
+ (temp & ADMHC_RH_NPS) ? " NPS" : "",
+ (temp & ADMHC_RH_PSM) ? " PSM" : "",
+ (temp & ADMHC_RH_NUMP), ahcd->num_ports
+ );
+ }
+
+ for (i = 0; i < ahcd->num_ports; i++) {
+ temp = admhc_read_portstatus(ahcd, i);
+ dbg_port_sw(ahcd, i, temp, next, size);
+ }
+}
+
+static void admhc_dump(struct admhcd *ahcd, int verbose)
+{
+ admhc_dbg(ahcd, "ADMHC ahcd state\n");
+
+ /* dumps some of the state we know about */
+ admhc_dump_status(ahcd, NULL, NULL);
+ admhc_dbg(ahcd, "current frame #%04x\n",
+ admhc_frame_no(ahcd));
+
+ admhc_dump_roothub(ahcd, verbose, NULL, NULL);
+}
+
+static const char data0[] = "DATA0";
+static const char data1[] = "DATA1";
+
+static void admhc_dump_td(const struct admhcd *ahcd, const char *label,
+ const struct td *td)
+{
+ u32 tmp;
+
+ admhc_dbg(ahcd, "%s td %p; urb %p index %d; hwNextTD %08x\n",
+ label, td,
+ td->urb, td->index,
+ hc32_to_cpup(ahcd, &td->hwNextTD));
+
+ tmp = hc32_to_cpup(ahcd, &td->hwINFO);
+ admhc_dbg(ahcd, " status %08x%s CC=%x EC=%d %s %s ISI=%x FN=%x\n",
+ tmp,
+ (tmp & TD_OWN) ? " OWN" : "",
+ TD_CC_GET(tmp),
+ TD_EC_GET(tmp),
+ td_togglestring(tmp),
+ td_pidstring(tmp),
+ TD_ISI_GET(tmp),
+ TD_FN_GET(tmp));
+
+ tmp = hc32_to_cpup(ahcd, &td->hwCBL);
+ admhc_dbg(ahcd, " dbp %08x; cbl %08x; LEN=%d%s\n",
+ hc32_to_cpup(ahcd, &td->hwDBP),
+ tmp,
+ TD_BL_GET(tmp),
+ (tmp & TD_IE) ? " IE" : "");
+}
+
+/* caller MUST own hcd spinlock if verbose is set! */
+static void __attribute__((unused))
+admhc_dump_ed(const struct admhcd *ahcd, const char *label,
+ const struct ed *ed, int verbose)
+{
+ u32 tmp = hc32_to_cpu(ahcd, ed->hwINFO);
+
+ admhc_dbg(ahcd, "%s ed %p %s type %s; next ed %08x\n",
+ label,
+ ed, ed_statestring(ed->state), ed_typestring(ed->type),
+ hc32_to_cpup(ahcd, &ed->hwNextED));
+
+ admhc_dbg(ahcd, " info %08x MAX=%d%s%s%s%s EP=%d DEV=%d\n", tmp,
+ ED_MPS_GET(tmp),
+ (tmp & ED_ISO) ? " ISO" : "",
+ (tmp & ED_SKIP) ? " SKIP" : "",
+ (tmp & ED_SPEED_FULL) ? " FULL" : " LOW",
+ (tmp & ED_INT) ? " INT" : "",
+ ED_EN_GET(tmp),
+ ED_FA_GET(tmp));
+
+ tmp = hc32_to_cpup(ahcd, &ed->hwHeadP);
+ admhc_dbg(ahcd, " tds: head %08x tail %08x %s%s%s\n",
+ tmp & TD_MASK,
+ hc32_to_cpup(ahcd, &ed->hwTailP),
+ (tmp & ED_C) ? data1 : data0,
+ (tmp & ED_H) ? " HALT" : "",
+ verbose ? " td list follows" : " (not listing)");
+
+ if (verbose) {
+ struct list_head *tmp;
+
+ /* use ed->td_list because HC concurrently modifies
+ * hwNextTD as it accumulates ed_donelist.
+ */
+ list_for_each(tmp, &ed->td_list) {
+ struct td *td;
+ td = list_entry(tmp, struct td, td_list);
+ admhc_dump_td(ahcd, " ->", td);
+ }
+ }
+}
+
+#else /* ifdef DEBUG */
+
+static inline void urb_print(struct admhcd *ahcd, struct urb * urb, char * str,
+ int small, int status) {}
+static inline void admhc_dump_ed(const struct admhcd *ahcd, const char *label,
+ const struct ed *ed, int verbose) {}
+static inline void admhc_dump_td(const struct admhcd *ahcd, const char *label,
+ const struct td *td) {}
+static inline void admhc_dump(struct admhcd *ahcd, int verbose) {}
+
+#undef ADMHC_VERBOSE_DEBUG
+
+#endif /* DEBUG */
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef STUB_DEBUG_FILES
+
+static inline void create_debug_files(struct admhcd *bus) { }
+static inline void remove_debug_files(struct admhcd *bus) { }
+
+#else
+
+static int debug_async_open(struct inode *, struct file *);
+static int debug_periodic_open(struct inode *, struct file *);
+static int debug_registers_open(struct inode *, struct file *);
+static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
+static int debug_close(struct inode *, struct file *);
+
+static const struct file_operations debug_async_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_async_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+static const struct file_operations debug_periodic_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_periodic_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+static const struct file_operations debug_registers_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_registers_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+
+static struct dentry *admhc_debug_root;
+
+struct debug_buffer {
+ ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
+ struct admhcd *ahcd;
+ struct mutex mutex; /* protect filling of buffer */
+ size_t count; /* number of characters filled into buffer */
+ char *page;
+};
+
+static ssize_t
+show_list(struct admhcd *ahcd, char *buf, size_t count, struct ed *ed)
+{
+ unsigned temp;
+ unsigned size = count;
+
+ if (!ed)
+ return 0;
+
+ /* dump a snapshot of the bulk or control schedule */
+ while (ed) {
+ u32 info = hc32_to_cpu(ahcd, ed->hwINFO);
+ u32 headp = hc32_to_cpu(ahcd, ed->hwHeadP);
+ u32 tailp = hc32_to_cpu(ahcd, ed->hwTailP);
+ struct list_head *entry;
+ struct td *td;
+
+ temp = scnprintf(buf, size,
+ "ed/%p %s %s %cs dev%d ep%d %s%smax %d %08x%s%s %s"
+ " h:%08x t:%08x",
+ ed,
+ ed_statestring(ed->state),
+ ed_typestring(ed->type),
+ (info & ED_SPEED_FULL) ? 'f' : 'l',
+ info & ED_FA_MASK,
+ (info >> ED_EN_SHIFT) & ED_EN_MASK,
+ (info & ED_INT) ? "INT " : "",
+ (info & ED_ISO) ? "ISO " : "",
+ (info >> ED_MPS_SHIFT) & ED_MPS_MASK ,
+ info,
+ (info & ED_SKIP) ? " S" : "",
+ (headp & ED_H) ? " H" : "",
+ (headp & ED_C) ? data1 : data0,
+ headp & ED_MASK, tailp);
+ size -= temp;
+ buf += temp;
+
+ list_for_each(entry, &ed->td_list) {
+ u32 dbp, cbl;
+
+ td = list_entry(entry, struct td, td_list);
+ info = hc32_to_cpup(ahcd, &td->hwINFO);
+ dbp = hc32_to_cpup(ahcd, &td->hwDBP);
+ cbl = hc32_to_cpup(ahcd, &td->hwCBL);
+
+ temp = scnprintf(buf, size,
+ "\n\ttd/%p %s %d %s%scc=%x urb %p (%08x,%08x)",
+ td,
+ td_pidstring(info),
+ TD_BL_GET(cbl),
+ (info & TD_OWN) ? "" : "DONE ",
+ (cbl & TD_IE) ? "IE " : "",
+ TD_CC_GET(info), td->urb, info, cbl);
+ size -= temp;
+ buf += temp;
+ }
+
+ temp = scnprintf(buf, size, "\n");
+ size -= temp;
+ buf += temp;
+
+ ed = ed->ed_next;
+ }
+
+ return count - size;
+}
+
+static ssize_t fill_async_buffer(struct debug_buffer *buf)
+{
+ struct admhcd *ahcd;
+ size_t temp;
+ unsigned long flags;
+
+ ahcd = buf->ahcd;
+
+ spin_lock_irqsave(&ahcd->lock, flags);
+ temp = show_list(ahcd, buf->page, PAGE_SIZE, ahcd->ed_head);
+ spin_unlock_irqrestore(&ahcd->lock, flags);
+
+ return temp;
+}
+
+
+#define DBG_SCHED_LIMIT 64
+
+static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
+{
+ struct admhcd *ahcd;
+ struct ed **seen, *ed;
+ unsigned long flags;
+ unsigned temp, size, seen_count;
+ char *next;
+ unsigned i;
+
+ seen = kmalloc(DBG_SCHED_LIMIT * sizeof(*seen), GFP_ATOMIC);
+ if (!seen)
+ return 0;
+ seen_count = 0;
+
+ ahcd = buf->ahcd;
+ next = buf->page;
+ size = PAGE_SIZE;
+
+ temp = scnprintf(next, size, "size = %d\n", NUM_INTS);
+ size -= temp;
+ next += temp;
+
+ /* dump a snapshot of the periodic schedule (and load) */
+ spin_lock_irqsave(&ahcd->lock, flags);
+ for (i = 0; i < NUM_INTS; i++) {
+ ed = ahcd->periodic[i];
+ if (!ed)
+ continue;
+
+ temp = scnprintf(next, size, "%2d [%3d]:", i, ahcd->load[i]);
+ size -= temp;
+ next += temp;
+
+ do {
+ temp = scnprintf(next, size, " ed%d/%p",
+ ed->interval, ed);
+ size -= temp;
+ next += temp;
+ for (temp = 0; temp < seen_count; temp++) {
+ if (seen[temp] == ed)
+ break;
+ }
+
+ /* show more info the first time around */
+ if (temp == seen_count) {
+ u32 info = hc32_to_cpu(ahcd, ed->hwINFO);
+ struct list_head *entry;
+ unsigned qlen = 0;
+
+ /* qlen measured here in TDs, not urbs */
+ list_for_each(entry, &ed->td_list)
+ qlen++;
+ temp = scnprintf(next, size,
+ " (%cs dev%d ep%d%s qlen %u"
+ " max %d %08x%s%s)",
+ (info & ED_SPEED_FULL) ? 'f' : 'l',
+ ED_FA_GET(info),
+ ED_EN_GET(info),
+ (info & ED_ISO) ? "iso" : "int",
+ qlen,
+ ED_MPS_GET(info),
+ info,
+ (info & ED_SKIP) ? " K" : "",
+ (ed->hwHeadP &
+ cpu_to_hc32(ahcd, ED_H)) ?
+ " H" : "");
+ size -= temp;
+ next += temp;
+
+ if (seen_count < DBG_SCHED_LIMIT)
+ seen[seen_count++] = ed;
+
+ ed = ed->ed_next;
+
+ } else {
+ /* we've seen it and what's after */
+ temp = 0;
+ ed = NULL;
+ }
+
+ } while (ed);
+
+ temp = scnprintf(next, size, "\n");
+ size -= temp;
+ next += temp;
+ }
+ spin_unlock_irqrestore(&ahcd->lock, flags);
+ kfree(seen);
+
+ return PAGE_SIZE - size;
+}
+
+
+#undef DBG_SCHED_LIMIT
+
+static ssize_t fill_registers_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct admhcd *ahcd;
+ struct admhcd_regs __iomem *regs;
+ unsigned long flags;
+ unsigned temp, size;
+ char *next;
+ u32 rdata;
+
+ ahcd = buf->ahcd;
+ hcd = admhc_to_hcd(ahcd);
+ regs = ahcd->regs;
+ next = buf->page;
+ size = PAGE_SIZE;
+
+ spin_lock_irqsave(&ahcd->lock, flags);
+
+ /* dump driver info, then registers in spec order */
+
+ admhc_dbg_sw(ahcd, &next, &size,
+ "bus %s, device %s\n"
+ "%s\n"
+ "%s\n",
+ hcd->self.controller->bus->name,
+ dev_name(hcd->self.controller),
+ hcd->product_desc,
+ hcd_name);
+
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
+ size -= scnprintf(next, size,
+ "SUSPENDED (no register access)\n");
+ goto done;
+ }
+
+ admhc_dump_status(ahcd, &next, &size);
+
+ /* other registers mostly affect frame timings */
+ rdata = admhc_readl(ahcd, &regs->fminterval);
+ temp = scnprintf(next, size,
+ "fmintvl 0x%08x %sFSLDP=0x%04x FI=0x%04x\n",
+ rdata, (rdata & ADMHC_SFI_FIT) ? "FIT " : "",
+ (rdata >> ADMHC_SFI_FSLDP_SHIFT) & ADMHC_SFI_FSLDP_MASK,
+ rdata & ADMHC_SFI_FI_MASK);
+ size -= temp;
+ next += temp;
+
+ rdata = admhc_readl(ahcd, &regs->fmnumber);
+ temp = scnprintf(next, size, "fmnumber 0x%08x %sFR=0x%04x FN=%04x\n",
+ rdata, (rdata & ADMHC_SFN_FRT) ? "FRT " : "",
+ (rdata >> ADMHC_SFN_FR_SHIFT) & ADMHC_SFN_FR_MASK,
+ rdata & ADMHC_SFN_FN_MASK);
+ size -= temp;
+ next += temp;
+
+ /* TODO: use predefined bitmask */
+ rdata = admhc_readl(ahcd, &regs->lsthresh);
+ temp = scnprintf(next, size, "lsthresh 0x%04x\n",
+ rdata & 0x3fff);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf(next, size, "hub poll timer: %s\n",
+ admhcd_to_hcd(ahcd)->poll_rh ? "ON" : "OFF");
+ size -= temp;
+ next += temp;
+
+ /* roothub */
+ admhc_dump_roothub(ahcd, 1, &next, &size);
+
+done:
+ spin_unlock_irqrestore(&ahcd->lock, flags);
+ return PAGE_SIZE - size;
+}
+
+
+static struct debug_buffer *alloc_buffer(struct admhcd *ahcd,
+ ssize_t (*fill_func)(struct debug_buffer *))
+{
+ struct debug_buffer *buf;
+
+ buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
+
+ if (buf) {
+ buf->ahcd = ahcd;
+ buf->fill_func = fill_func;
+ mutex_init(&buf->mutex);
+ }
+
+ return buf;
+}
+
+static int fill_buffer(struct debug_buffer *buf)
+{
+ int ret = 0;
+
+ if (!buf->page)
+ buf->page = (char *)get_zeroed_page(GFP_KERNEL);
+
+ if (!buf->page) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = buf->fill_func(buf);
+
+ if (ret >= 0) {
+ buf->count = ret;
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+static ssize_t debug_output(struct file *file, char __user *user_buf,
+ size_t len, loff_t *offset)
+{
+ struct debug_buffer *buf = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&buf->mutex);
+ if (buf->count == 0) {
+ ret = fill_buffer(buf);
+ if (ret != 0) {
+ mutex_unlock(&buf->mutex);
+ goto out;
+ }
+ }
+ mutex_unlock(&buf->mutex);
+
+ ret = simple_read_from_buffer(user_buf, len, offset,
+ buf->page, buf->count);
+
+out:
+ return ret;
+}
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+ struct debug_buffer *buf = file->private_data;
+
+ if (buf) {
+ if (buf->page)
+ free_page((unsigned long)buf->page);
+ kfree(buf);
+ }
+
+ return 0;
+}
+
+static int debug_async_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
+static int debug_periodic_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private,
+ fill_periodic_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
+static int debug_registers_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private,
+ fill_registers_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
+static inline void create_debug_files(struct admhcd *ahcd)
+{
+ struct usb_bus *bus = &admhcd_to_hcd(ahcd)->self;
+
+ ahcd->debug_dir = debugfs_create_dir(bus->bus_name, admhc_debug_root);
+ if (!ahcd->debug_dir)
+ goto dir_error;
+
+ ahcd->debug_async = debugfs_create_file("async", S_IRUGO,
+ ahcd->debug_dir, ahcd,
+ &debug_async_fops);
+ if (!ahcd->debug_async)
+ goto async_error;
+
+ ahcd->debug_periodic = debugfs_create_file("periodic", S_IRUGO,
+ ahcd->debug_dir, ahcd,
+ &debug_periodic_fops);
+ if (!ahcd->debug_periodic)
+ goto periodic_error;
+
+ ahcd->debug_registers = debugfs_create_file("registers", S_IRUGO,
+ ahcd->debug_dir, ahcd,
+ &debug_registers_fops);
+ if (!ahcd->debug_registers)
+ goto registers_error;
+
+ admhc_dbg(ahcd, "created debug files\n");
+ return;
+
+registers_error:
+ debugfs_remove(ahcd->debug_periodic);
+periodic_error:
+ debugfs_remove(ahcd->debug_async);
+async_error:
+ debugfs_remove(ahcd->debug_dir);
+dir_error:
+ ahcd->debug_periodic = NULL;
+ ahcd->debug_async = NULL;
+ ahcd->debug_dir = NULL;
+}
+
+static inline void remove_debug_files(struct admhcd *ahcd)
+{
+ debugfs_remove(ahcd->debug_registers);
+ debugfs_remove(ahcd->debug_periodic);
+ debugfs_remove(ahcd->debug_async);
+ debugfs_remove(ahcd->debug_dir);
+}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
diff --git a/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-drv.c b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-drv.c
new file mode 100644
index 0000000..798fd22
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-drv.c
@@ -0,0 +1,228 @@
+/*
+ * ADM5120 HCD (Host Controller Driver) for USB
+ *
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: drivers/usb/host/ohci-au1xxx.c
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ * (C) Copyright 2002 Hewlett-Packard Company
+ *
+ * Written by Christopher Hoover <ch@hpl.hp.com>
+ * Based on fragments of previous driver by Russell King et al.
+ *
+ * Modified for LH7A404 from ahcd-sa1111.c
+ * by Durgesh Pattamatta <pattamattad@sharpsec.com>
+ * Modified for AMD Alchemy Au1xxx
+ * by Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/signal.h>
+
+#include <asm/bootinfo.h>
+#include <asm/mach-adm5120/adm5120_defs.h>
+
+#ifdef DEBUG
+#define HCD_DBG(f, a...) printk(KERN_DEBUG "%s: " f, hcd_name, ## a)
+#else
+#define HCD_DBG(f, a...) do {} while (0)
+#endif
+#define HCD_ERR(f, a...) printk(KERN_ERR "%s: " f, hcd_name, ## a)
+#define HCD_INFO(f, a...) printk(KERN_INFO "%s: " f, hcd_name, ## a)
+
+/*-------------------------------------------------------------------------*/
+
+static int admhc_adm5120_probe(const struct hc_driver *driver,
+ struct platform_device *dev)
+{
+ int retval;
+ struct usb_hcd *hcd;
+ int irq;
+ struct resource *regs;
+
+ /* sanity checks */
+ regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ HCD_DBG("no IOMEM resource found\n");
+ return -ENODEV;
+ }
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ HCD_DBG("no IRQ resource found\n");
+ return -ENODEV;
+ }
+
+ hcd = usb_create_hcd(driver, &dev->dev, "ADM5120");
+ if (!hcd)
+ return -ENOMEM;
+
+ hcd->rsrc_start = regs->start;
+ hcd->rsrc_len = regs->end - regs->start + 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ HCD_DBG("request_mem_region failed\n");
+ retval = -EBUSY;
+ goto err_dev;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ HCD_DBG("ioremap failed\n");
+ retval = -ENOMEM;
+ goto err_mem;
+ }
+
+ admhc_hcd_init(hcd_to_admhcd(hcd));
+
+ retval = usb_add_hcd(hcd, irq, 0);
+ if (retval)
+ goto err_io;
+
+ return 0;
+
+err_io:
+ iounmap(hcd->regs);
+err_mem:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err_dev:
+ usb_put_hcd(hcd);
+ return retval;
+}
+
+
+/* may be called without controller electrically present */
+/* may be called with controller, bus, and devices active */
+
+static void admhc_adm5120_remove(struct usb_hcd *hcd,
+ struct platform_device *dev)
+{
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+}
+
+static int admhc_adm5120_start(struct usb_hcd *hcd)
+{
+ struct admhcd *ahcd = hcd_to_admhcd(hcd);
+ int ret;
+
+ ret = admhc_init(ahcd);
+ if (ret < 0) {
+ HCD_ERR("unable to init %s\n", hcd->self.bus_name);
+ goto err;
+ }
+
+ ret = admhc_run(ahcd);
+ if (ret < 0) {
+ HCD_ERR("unable to run %s\n", hcd->self.bus_name);
+ goto err_stop;
+ }
+
+ return 0;
+
+err_stop:
+ admhc_stop(hcd);
+err:
+ return ret;
+}
+
+static const struct hc_driver adm5120_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "ADM5120 built-in USB 1.1 Host Controller",
+ .hcd_priv_size = sizeof(struct admhcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = admhc_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .start = admhc_adm5120_start,
+ .stop = admhc_stop,
+ .shutdown = admhc_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = admhc_urb_enqueue,
+ .urb_dequeue = admhc_urb_dequeue,
+ .endpoint_disable = admhc_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = admhc_get_frame_number,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = admhc_hub_status_data,
+ .hub_control = admhc_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = admhc_bus_suspend,
+ .bus_resume = admhc_bus_resume,
+#endif
+ .start_port_reset = admhc_start_port_reset,
+};
+
+static int usb_hcd_adm5120_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = admhc_adm5120_probe(&adm5120_hc_driver, pdev);
+
+ return ret;
+}
+
+static int usb_hcd_adm5120_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+ admhc_adm5120_remove(hcd, pdev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+/* TODO */
+static int usb_hcd_adm5120_suspend(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+
+ return 0;
+}
+
+static int usb_hcd_adm5120_resume(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+
+ return 0;
+}
+#else
+#define usb_hcd_adm5120_suspend NULL
+#define usb_hcd_adm5120_resume NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver usb_hcd_adm5120_driver = {
+ .probe = usb_hcd_adm5120_probe,
+ .remove = usb_hcd_adm5120_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .suspend = usb_hcd_adm5120_suspend,
+ .resume = usb_hcd_adm5120_resume,
+ .driver = {
+ .name = "adm5120-hcd",
+ .owner = THIS_MODULE,
+ },
+};
+
diff --git a/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-hcd.c b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-hcd.c
new file mode 100644
index 0000000..f721ec1
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-hcd.c
@@ -0,0 +1,843 @@
+/*
+ * ADM5120 HCD (Host Controller Driver) for USB
+ *
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: drivers/usb/host/ohci-hcd.c
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * [ Initialisation is based on Linus' ]
+ * [ uhci code and gregs ahcd fragments ]
+ * [ (C) Copyright 1999 Linus Torvalds ]
+ * [ (C) Copyright 1999 Gregory P. Smith]
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/hcd.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+
+#define DRIVER_VERSION "0.27.0"
+#define DRIVER_AUTHOR "Gabor Juhos <juhosg@openwrt.org>"
+#define DRIVER_DESC "ADMtek USB 1.1 Host Controller Driver"
+
+/*-------------------------------------------------------------------------*/
+
+#undef ADMHC_VERBOSE_DEBUG /* not always helpful */
+
+/* For initializing controller (mask in an HCFS mode too) */
+#define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
+
+#define ADMHC_INTR_INIT \
+ (ADMHC_INTR_MIE | ADMHC_INTR_INSM | ADMHC_INTR_FATI \
+ | ADMHC_INTR_RESI | ADMHC_INTR_TDC | ADMHC_INTR_BABI)
+
+/*-------------------------------------------------------------------------*/
+
+static const char hcd_name[] = "admhc-hcd";
+
+#define STATECHANGE_DELAY msecs_to_jiffies(300)
+
+#include "adm5120.h"
+
+static void admhc_dump(struct admhcd *ahcd, int verbose);
+static int admhc_init(struct admhcd *ahcd);
+static void admhc_stop(struct usb_hcd *hcd);
+
+#include "adm5120-dbg.c"
+#include "adm5120-mem.c"
+#include "adm5120-pm.c"
+#include "adm5120-hub.c"
+#include "adm5120-q.c"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * queue up an urb for anything except the root hub
+ */
+static int admhc_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct admhcd *ahcd = hcd_to_admhcd(hcd);
+ struct ed *ed;
+ struct urb_priv *urb_priv;
+ unsigned int pipe = urb->pipe;
+ int td_cnt = 0;
+ unsigned long flags;
+ int ret = 0;
+
+#ifdef ADMHC_VERBOSE_DEBUG
+ spin_lock_irqsave(&ahcd->lock, flags);
+ urb_print(ahcd, urb, "ENQEUE", usb_pipein(pipe), -EINPROGRESS);
+ spin_unlock_irqrestore(&ahcd->lock, flags);
+#endif
+
+ /* every endpoint has an ed, locate and maybe (re)initialize it */
+ ed = ed_get(ahcd, urb->ep, urb->dev, pipe, urb->interval);
+ if (!ed)
+ return -ENOMEM;
+
+ /* for the private part of the URB we need the number of TDs */
+ switch (ed->type) {
+ case PIPE_CONTROL:
+ if (urb->transfer_buffer_length > TD_DATALEN_MAX)
+ /* td_submit_urb() doesn't yet handle these */
+ return -EMSGSIZE;
+
+ /* 1 TD for setup, 1 for ACK, plus ... */
+ td_cnt = 2;
+ /* FALLTHROUGH */
+ case PIPE_BULK:
+ /* one TD for every 4096 Bytes (can be up to 8K) */
+ td_cnt += urb->transfer_buffer_length / TD_DATALEN_MAX;
+ /* ... and for any remaining bytes ... */
+ if ((urb->transfer_buffer_length % TD_DATALEN_MAX) != 0)
+ td_cnt++;
+ /* ... and maybe a zero length packet to wrap it up */
+ if (td_cnt == 0)
+ td_cnt++;
+ else if ((urb->transfer_flags & URB_ZERO_PACKET) != 0
+ && (urb->transfer_buffer_length
+ % usb_maxpacket(urb->dev, pipe,
+ usb_pipeout(pipe))) == 0)
+ td_cnt++;
+ break;
+ case PIPE_INTERRUPT:
+ /*
+ * for Interrupt IN/OUT transactions, each ED contains
+ * only 1 TD.
+ * TODO: check transfer_buffer_length?
+ */
+ td_cnt = 1;
+ break;
+ case PIPE_ISOCHRONOUS:
+ /* number of packets from URB */
+ td_cnt = urb->number_of_packets;
+ break;
+ }
+
+ urb_priv = urb_priv_alloc(ahcd, td_cnt, mem_flags);
+ if (!urb_priv)
+ return -ENOMEM;
+
+ urb_priv->ed = ed;
+
+ spin_lock_irqsave(&ahcd->lock, flags);
+ /* don't submit to a dead HC */
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+ if (!HC_IS_RUNNING(hcd->state)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto fail;
+
+ /* schedule the ed if needed */
+ if (ed->state == ED_IDLE) {
+ ret = ed_schedule(ahcd, ed);
+ if (ret < 0) {
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ goto fail;
+ }
+ if (ed->type == PIPE_ISOCHRONOUS) {
+ u16 frame = admhc_frame_no(ahcd);
+
+ /* delay a few frames before the first TD */
+ frame += max_t (u16, 8, ed->interval);
+ frame &= ~(ed->interval - 1);
+ frame |= ed->branch;
+ urb->start_frame = frame;
+
+ /* yes, only URB_ISO_ASAP is supported, and
+ * urb->start_frame is never used as input.
+ */
+ }
+ } else if (ed->type == PIPE_ISOCHRONOUS)
+ urb->start_frame = ed->last_iso + ed->interval;
+
+ /* fill the TDs and link them to the ed; and
+ * enable that part of the schedule, if needed
+ * and update count of queued periodic urbs
+ */
+ urb->hcpriv = urb_priv;
+ td_submit_urb(ahcd, urb);
+
+#ifdef ADMHC_VERBOSE_DEBUG
+ admhc_dump_ed(ahcd, "admhc_urb_enqueue", urb_priv->ed, 1);
+#endif
+
+fail:
+ if (ret)
+ urb_priv_free(ahcd, urb_priv);
+
+ spin_unlock_irqrestore(&ahcd->lock, flags);
+ return ret;
+}
+
+/*
+ * decouple the URB from the HC queues (TDs, urb_priv);
+ * reporting is always done
+ * asynchronously, and we might be dealing with an urb that's
+ * partially transferred, or an ED with other urbs being unlinked.
+ */
+static int admhc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+ int status)
+{
+ struct admhcd *ahcd = hcd_to_admhcd(hcd);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ahcd->lock, flags);
+
+#ifdef ADMHC_VERBOSE_DEBUG
+ urb_print(ahcd, urb, "DEQUEUE", 1, status);
+#endif
+ ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (ret) {
+ /* Do nothing */
+ ;
+ } else if (HC_IS_RUNNING(hcd->state)) {
+ struct urb_priv *urb_priv;
+
+ /* Unless an IRQ completed the unlink while it was being
+ * handed to us, flag it for unlink and giveback, and force
+ * some upcoming INTR_SF to call finish_unlinks()
+ */
+ urb_priv = urb->hcpriv;
+ if (urb_priv) {
+ if (urb_priv->ed->state == ED_OPER)
+ start_ed_unlink(ahcd, urb_priv->ed);
+ }
+ } else {
+ /*
+ * with HC dead, we won't respect hc queue pointers
+ * any more ... just clean up every urb's memory.
+ */
+ if (urb->hcpriv)
+ finish_urb(ahcd, urb, status);
+ }
+ spin_unlock_irqrestore(&ahcd->lock, flags);
+
+ return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* frees config/altsetting state for endpoints,
+ * including ED memory, dummy TD, and bulk/intr data toggle
+ */
+
+static void admhc_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct admhcd *ahcd = hcd_to_admhcd(hcd);
+ unsigned long flags;
+ struct ed *ed = ep->hcpriv;
+ unsigned limit = 1000;
+
+ /* ASSERT: any requests/urbs are being unlinked */
+ /* ASSERT: nobody can be submitting urbs for this any more */
+
+ if (!ed)
+ return;
+
+#ifdef ADMHC_VERBOSE_DEBUG
+ spin_lock_irqsave(&ahcd->lock, flags);
+ admhc_dump_ed(ahcd, "EP-DISABLE", ed, 1);
+ spin_unlock_irqrestore(&ahcd->lock, flags);
+#endif
+
+rescan:
+ spin_lock_irqsave(&ahcd->lock, flags);
+
+ if (!HC_IS_RUNNING(hcd->state)) {
+sanitize:
+ ed->state = ED_IDLE;
+ finish_unlinks(ahcd, 0);
+ }
+
+ switch (ed->state) {
+ case ED_UNLINK: /* wait for hw to finish? */
+ /* major IRQ delivery trouble loses INTR_SOFI too... */
+ if (limit-- == 0) {
+ admhc_warn(ahcd, "IRQ INTR_SOFI lossage\n");
+ goto sanitize;
+ }
+ spin_unlock_irqrestore(&ahcd->lock, flags);
+ schedule_timeout_uninterruptible(1);
+ goto rescan;
+ case ED_IDLE: /* fully unlinked */
+ if (list_empty(&ed->td_list)) {
+ td_free(ahcd, ed->dummy);
+ ed_free(ahcd, ed);
+ break;
+ }
+ /* else FALL THROUGH */
+ default:
+ /* caller was supposed to have unlinked any requests;
+ * that's not our job. can't recover; must leak ed.
+ */
+ admhc_err(ahcd, "leak ed %p (#%02x) state %d%s\n",
+ ed, ep->desc.bEndpointAddress, ed->state,
+ list_empty(&ed->td_list) ? "" : " (has tds)");
+ td_free(ahcd, ed->dummy);
+ break;
+ }
+
+ ep->hcpriv = NULL;
+
+ spin_unlock_irqrestore(&ahcd->lock, flags);
+}
+
+static int admhc_get_frame_number(struct usb_hcd *hcd)
+{
+ struct admhcd *ahcd = hcd_to_admhcd(hcd);
+
+ return admhc_frame_no(ahcd);
+}
+
+static void admhc_usb_reset(struct admhcd *ahcd)
+{
+#if 0
+ ahcd->hc_control = admhc_readl(ahcd, &ahcd->regs->control);
+ ahcd->hc_control &= OHCI_CTRL_RWC;
+ admhc_writel(ahcd, ahcd->hc_control, &ahcd->regs->control);
+#else
+ /* FIXME */
+ ahcd->host_control = ADMHC_BUSS_RESET;
+ admhc_writel(ahcd, ahcd->host_control, &ahcd->regs->host_control);
+#endif
+}
+
+/* admhc_shutdown forcibly disables IRQs and DMA, helping kexec and
+ * other cases where the next software may expect clean state from the
+ * "firmware". this is bus-neutral, unlike shutdown() methods.
+ */
+static void
+admhc_shutdown(struct usb_hcd *hcd)
+{
+ struct admhcd *ahcd;
+
+ ahcd = hcd_to_admhcd(hcd);
+ admhc_intr_disable(ahcd, ADMHC_INTR_MIE);
+ admhc_dma_disable(ahcd);
+ admhc_usb_reset(ahcd);
+ /* flush the writes */
+ admhc_writel_flush(ahcd);
+}
+
+/*-------------------------------------------------------------------------*
+ * HC functions
+ *-------------------------------------------------------------------------*/
+
+static void admhc_eds_cleanup(struct admhcd *ahcd)
+{
+ if (ahcd->ed_tails[PIPE_INTERRUPT]) {
+ ed_free(ahcd, ahcd->ed_tails[PIPE_INTERRUPT]);
+ ahcd->ed_tails[PIPE_INTERRUPT] = NULL;
+ }
+
+ if (ahcd->ed_tails[PIPE_ISOCHRONOUS]) {
+ ed_free(ahcd, ahcd->ed_tails[PIPE_ISOCHRONOUS]);
+ ahcd->ed_tails[PIPE_ISOCHRONOUS] = NULL;
+ }
+
+ if (ahcd->ed_tails[PIPE_CONTROL]) {
+ ed_free(ahcd, ahcd->ed_tails[PIPE_CONTROL]);
+ ahcd->ed_tails[PIPE_CONTROL] = NULL;
+ }
+
+ if (ahcd->ed_tails[PIPE_BULK]) {
+ ed_free(ahcd, ahcd->ed_tails[PIPE_BULK]);
+ ahcd->ed_tails[PIPE_BULK] = NULL;
+ }
+
+ ahcd->ed_head = NULL;
+}
+
+#define ED_DUMMY_INFO (ED_SPEED_FULL | ED_SKIP)
+
+static int admhc_eds_init(struct admhcd *ahcd)
+{
+ struct ed *ed;
+
+ ed = ed_create(ahcd, PIPE_INTERRUPT, ED_DUMMY_INFO);
+ if (!ed)
+ goto err;
+
+ ahcd->ed_tails[PIPE_INTERRUPT] = ed;
+
+ ed = ed_create(ahcd, PIPE_ISOCHRONOUS, ED_DUMMY_INFO);
+ if (!ed)
+ goto err;
+
+ ahcd->ed_tails[PIPE_ISOCHRONOUS] = ed;
+ ed->ed_prev = ahcd->ed_tails[PIPE_INTERRUPT];
+ ahcd->ed_tails[PIPE_INTERRUPT]->ed_next = ed;
+ ahcd->ed_tails[PIPE_INTERRUPT]->hwNextED = cpu_to_hc32(ahcd, ed->dma);
+
+ ed = ed_create(ahcd, PIPE_CONTROL, ED_DUMMY_INFO);
+ if (!ed)
+ goto err;
+
+ ahcd->ed_tails[PIPE_CONTROL] = ed;
+ ed->ed_prev = ahcd->ed_tails[PIPE_ISOCHRONOUS];
+ ahcd->ed_tails[PIPE_ISOCHRONOUS]->ed_next = ed;
+ ahcd->ed_tails[PIPE_ISOCHRONOUS]->hwNextED = cpu_to_hc32(ahcd, ed->dma);
+
+ ed = ed_create(ahcd, PIPE_BULK, ED_DUMMY_INFO);
+ if (!ed)
+ goto err;
+
+ ahcd->ed_tails[PIPE_BULK] = ed;
+ ed->ed_prev = ahcd->ed_tails[PIPE_CONTROL];
+ ahcd->ed_tails[PIPE_CONTROL]->ed_next = ed;
+ ahcd->ed_tails[PIPE_CONTROL]->hwNextED = cpu_to_hc32(ahcd, ed->dma);
+
+ ahcd->ed_head = ahcd->ed_tails[PIPE_INTERRUPT];
+
+#ifdef ADMHC_VERBOSE_DEBUG
+ admhc_dump_ed(ahcd, "ed intr", ahcd->ed_tails[PIPE_INTERRUPT], 1);
+ admhc_dump_ed(ahcd, "ed isoc", ahcd->ed_tails[PIPE_ISOCHRONOUS], 1);
+ admhc_dump_ed(ahcd, "ed ctrl", ahcd->ed_tails[PIPE_CONTROL], 1);
+ admhc_dump_ed(ahcd, "ed bulk", ahcd->ed_tails[PIPE_BULK], 1);
+#endif
+
+ return 0;
+
+err:
+ admhc_eds_cleanup(ahcd);
+ return -ENOMEM;
+}
+
+/* init memory, and kick BIOS/SMM off */
+
+static int admhc_init(struct admhcd *ahcd)
+{
+ struct usb_hcd *hcd = admhcd_to_hcd(ahcd);
+ int ret;
+
+ admhc_disable(ahcd);
+ ahcd->regs = hcd->regs;
+
+ /* Disable HC interrupts */
+ admhc_intr_disable(ahcd, ADMHC_INTR_MIE);
+
+ /* Read the number of ports unless overridden */
+ if (ahcd->num_ports == 0)
+ ahcd->num_ports = admhc_read_rhdesc(ahcd) & ADMHC_RH_NUMP;
+
+ ret = admhc_mem_init(ahcd);
+ if (ret)
+ goto err;
+
+ /* init dummy endpoints */
+ ret = admhc_eds_init(ahcd);
+ if (ret)
+ goto err;
+
+ create_debug_files(ahcd);
+
+ return 0;
+
+err:
+ admhc_stop(hcd);
+ return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Start an OHCI controller, set the BUS operational
+ * resets USB and controller
+ * enable interrupts
+ */
+static int admhc_run(struct admhcd *ahcd)
+{
+ u32 val;
+ int first = ahcd->fminterval == 0;
+ struct usb_hcd *hcd = admhcd_to_hcd(ahcd);
+
+ admhc_disable(ahcd);
+
+ /* boot firmware should have set this up (5.1.1.3.1) */
+ if (first) {
+ val = admhc_readl(ahcd, &ahcd->regs->fminterval);
+ ahcd->fminterval = val & ADMHC_SFI_FI_MASK;
+ if (ahcd->fminterval != FI)
+ admhc_dbg(ahcd, "fminterval delta %d\n",
+ ahcd->fminterval - FI);
+ ahcd->fminterval |=
+ (FSLDP(ahcd->fminterval) << ADMHC_SFI_FSLDP_SHIFT);
+ /* also: power/overcurrent flags in rhdesc */
+ }
+
+#if 0 /* TODO: not applicable */
+ /* Reset USB nearly "by the book". RemoteWakeupConnected has
+ * to be checked in case boot firmware (BIOS/SMM/...) has set up
+ * wakeup in a way the bus isn't aware of (e.g., legacy PCI PM).
+ * If the bus glue detected wakeup capability then it should
+ * already be enabled; if so we'll just enable it again.
+ */
+ if ((ahcd->hc_control & OHCI_CTRL_RWC) != 0)
+ device_set_wakeup_capable(hcd->self.controller, 1);
+#endif
+
+ switch (ahcd->host_control & ADMHC_HC_BUSS) {
+ case ADMHC_BUSS_OPER:
+ val = 0;
+ break;
+ case ADMHC_BUSS_SUSPEND:
+ /* FALLTHROUGH ? */
+ case ADMHC_BUSS_RESUME:
+ ahcd->host_control = ADMHC_BUSS_RESUME;
+ val = 10 /* msec wait */;
+ break;
+ /* case ADMHC_BUSS_RESET: */
+ default:
+ ahcd->host_control = ADMHC_BUSS_RESET;
+ val = 50 /* msec wait */;
+ break;
+ }
+ admhc_writel(ahcd, ahcd->host_control, &ahcd->regs->host_control);
+
+ /* flush the writes */
+ admhc_writel_flush(ahcd);
+
+ msleep(val);
+ val = admhc_read_rhdesc(ahcd);
+ if (!(val & ADMHC_RH_NPS)) {
+ /* power down each port */
+ for (val = 0; val < ahcd->num_ports; val++)
+ admhc_write_portstatus(ahcd, val, ADMHC_PS_CPP);
+ }
+ /* flush those writes */
+ admhc_writel_flush(ahcd);
+
+ /* 2msec timelimit here means no irqs/preempt */
+ spin_lock_irq(&ahcd->lock);
+
+ admhc_writel(ahcd, ADMHC_CTRL_SR, &ahcd->regs->gencontrol);
+ val = 30; /* ... allow extra time */
+ while ((admhc_readl(ahcd, &ahcd->regs->gencontrol) & ADMHC_CTRL_SR) != 0) {
+ if (--val == 0) {
+ spin_unlock_irq(&ahcd->lock);
+ admhc_err(ahcd, "USB HC reset timed out!\n");
+ return -1;
+ }
+ udelay(1);
+ }
+
+ /* enable HOST mode, before access any host specific register */
+ admhc_writel(ahcd, ADMHC_CTRL_UHFE, &ahcd->regs->gencontrol);
+
+ /* Tell the controller where the descriptor list is */
+ admhc_writel(ahcd, (u32)ahcd->ed_head->dma, &ahcd->regs->hosthead);
+
+ periodic_reinit(ahcd);
+
+ /* use rhsc irqs after khubd is fully initialized */
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ hcd->uses_new_polling = 1;
+
+#if 0
+ /* wake on ConnectStatusChange, matching external hubs */
+ admhc_writel(ahcd, RH_HS_DRWE, &ahcd->regs->roothub.status);
+#else
+ /* FIXME roothub_write_status (ahcd, ADMHC_RH_DRWE); */
+#endif
+
+ /* Choose the interrupts we care about now, others later on demand */
+ admhc_intr_ack(ahcd, ~0);
+ admhc_intr_enable(ahcd, ADMHC_INTR_INIT);
+
+ admhc_writel(ahcd, ADMHC_RH_NPS | ADMHC_RH_LPSC, &ahcd->regs->rhdesc);
+
+ /* flush those writes */
+ admhc_writel_flush(ahcd);
+
+ /* start controller operations */
+ ahcd->host_control = ADMHC_BUSS_OPER;
+ admhc_writel(ahcd, ahcd->host_control, &ahcd->regs->host_control);
+
+ val = 20;
+ while ((admhc_readl(ahcd, &ahcd->regs->host_control)
+ & ADMHC_HC_BUSS) != ADMHC_BUSS_OPER) {
+ if (--val == 0) {
+ spin_unlock_irq(&ahcd->lock);
+ admhc_err(ahcd, "unable to setup operational mode!\n");
+ return -1;
+ }
+ mdelay(1);
+ }
+
+ hcd->state = HC_STATE_RUNNING;
+
+ ahcd->next_statechange = jiffies + STATECHANGE_DELAY;
+
+#if 0
+ /* FIXME: enabling DMA is always failed here for an unknown reason */
+ admhc_dma_enable(ahcd);
+
+ val = 200;
+ while ((admhc_readl(ahcd, &ahcd->regs->host_control)
+ & ADMHC_HC_DMAE) != ADMHC_HC_DMAE) {
+ if (--val == 0) {
+ spin_unlock_irq(&ahcd->lock);
+ admhc_err(ahcd, "unable to enable DMA!\n");
+ admhc_dump(ahcd, 1);
+ return -1;
+ }
+ mdelay(1);
+ }
+
+#endif
+
+ spin_unlock_irq(&ahcd->lock);
+
+ mdelay(ADMHC_POTPGT);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* an interrupt happens */
+
+static irqreturn_t admhc_irq(struct usb_hcd *hcd)
+{
+ struct admhcd *ahcd = hcd_to_admhcd(hcd);
+ struct admhcd_regs __iomem *regs = ahcd->regs;
+ u32 ints;
+
+ ints = admhc_readl(ahcd, &regs->int_status);
+ if ((ints & ADMHC_INTR_INTA) == 0) {
+ /* no unmasked interrupt status is set */
+ return IRQ_NONE;
+ }
+
+ ints &= admhc_readl(ahcd, &regs->int_enable);
+
+ if (ints & ADMHC_INTR_FATI) {
+ /* e.g. due to PCI Master/Target Abort */
+ admhc_disable(ahcd);
+ admhc_err(ahcd, "Fatal Error, controller disabled\n");
+ admhc_dump(ahcd, 1);
+ admhc_usb_reset(ahcd);
+ }
+
+ if (ints & ADMHC_INTR_BABI) {
+ admhc_intr_disable(ahcd, ADMHC_INTR_BABI);
+ admhc_intr_ack(ahcd, ADMHC_INTR_BABI);
+ admhc_err(ahcd, "Babble Detected\n");
+ }
+
+ if (ints & ADMHC_INTR_INSM) {
+ admhc_vdbg(ahcd, "Root Hub Status Change\n");
+ ahcd->next_statechange = jiffies + STATECHANGE_DELAY;
+ admhc_intr_ack(ahcd, ADMHC_INTR_RESI | ADMHC_INTR_INSM);
+
+ /* NOTE: Vendors didn't always make the same implementation
+ * choices for RHSC. Many followed the spec; RHSC triggers
+ * on an edge, like setting and maybe clearing a port status
+ * change bit. With others it's level-triggered, active
+ * until khubd clears all the port status change bits. We'll
+ * always disable it here and rely on polling until khubd
+ * re-enables it.
+ */
+ admhc_intr_disable(ahcd, ADMHC_INTR_INSM);
+ usb_hcd_poll_rh_status(hcd);
+ } else if (ints & ADMHC_INTR_RESI) {
+ /* For connect and disconnect events, we expect the controller
+ * to turn on RHSC along with RD. But for remote wakeup events
+ * this might not happen.
+ */
+ admhc_vdbg(ahcd, "Resume Detect\n");
+ admhc_intr_ack(ahcd, ADMHC_INTR_RESI);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ if (ahcd->autostop) {
+ spin_lock(&ahcd->lock);
+ admhc_rh_resume(ahcd);
+ spin_unlock(&ahcd->lock);
+ } else
+ usb_hcd_resume_root_hub(hcd);
+ }
+
+ if (ints & ADMHC_INTR_TDC) {
+ admhc_vdbg(ahcd, "Transfer Descriptor Complete\n");
+ admhc_intr_ack(ahcd, ADMHC_INTR_TDC);
+ if (HC_IS_RUNNING(hcd->state))
+ admhc_intr_disable(ahcd, ADMHC_INTR_TDC);
+ spin_lock(&ahcd->lock);
+ admhc_td_complete(ahcd);
+ spin_unlock(&ahcd->lock);
+ if (HC_IS_RUNNING(hcd->state))
+ admhc_intr_enable(ahcd, ADMHC_INTR_TDC);
+ }
+
+ if (ints & ADMHC_INTR_SO) {
+ /* could track INTR_SO to reduce available PCI/... bandwidth */
+ admhc_vdbg(ahcd, "Schedule Overrun\n");
+ }
+
+#if 1
+ spin_lock(&ahcd->lock);
+ if (ahcd->ed_rm_list)
+ finish_unlinks(ahcd, admhc_frame_no(ahcd));
+
+ if ((ints & ADMHC_INTR_SOFI) != 0 && !ahcd->ed_rm_list
+ && HC_IS_RUNNING(hcd->state))
+ admhc_intr_disable(ahcd, ADMHC_INTR_SOFI);
+ spin_unlock(&ahcd->lock);
+#else
+ if (ints & ADMHC_INTR_SOFI) {
+ admhc_vdbg(ahcd, "Start Of Frame\n");
+ spin_lock(&ahcd->lock);
+
+ /* handle any pending ED removes */
+ finish_unlinks(ahcd, admhc_frameno(ahcd));
+
+ /* leaving INTR_SOFI enabled when there's still unlinking
+ * to be done in the (next frame).
+ */
+ if ((ahcd->ed_rm_list == NULL) ||
+ HC_IS_RUNNING(hcd->state) == 0)
+ /*
+ * disable INTR_SOFI if there are no unlinking to be
+ * done (in the next frame)
+ */
+ admhc_intr_disable(ahcd, ADMHC_INTR_SOFI);
+
+ spin_unlock(&ahcd->lock);
+ }
+#endif
+
+ if (HC_IS_RUNNING(hcd->state)) {
+ admhc_intr_ack(ahcd, ints);
+ admhc_intr_enable(ahcd, ADMHC_INTR_MIE);
+ admhc_writel_flush(ahcd);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void admhc_stop(struct usb_hcd *hcd)
+{
+ struct admhcd *ahcd = hcd_to_admhcd(hcd);
+
+ admhc_dump(ahcd, 1);
+
+ flush_scheduled_work();
+
+ admhc_usb_reset(ahcd);
+ admhc_intr_disable(ahcd, ADMHC_INTR_MIE);
+
+ free_irq(hcd->irq, hcd);
+ hcd->irq = -1;
+
+ remove_debug_files(ahcd);
+ admhc_eds_cleanup(ahcd);
+ admhc_mem_cleanup(ahcd);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_ADM5120
+#include "adm5120-drv.c"
+#define PLATFORM_DRIVER usb_hcd_adm5120_driver
+#endif
+
+#if !defined(PLATFORM_DRIVER)
+#error "missing bus glue for admhc-hcd"
+#endif
+
+#define DRIVER_INFO DRIVER_DESC " version " DRIVER_VERSION
+
+static int __init admhc_hcd_mod_init(void)
+{
+ int ret = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_INFO "\n", hcd_name);
+ pr_info("%s: block sizes: ed %Zd td %Zd\n", hcd_name,
+ sizeof(struct ed), sizeof(struct td));
+ set_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
+
+#ifdef DEBUG
+ admhc_debug_root = debugfs_create_dir("admhc", usb_debug_root);
+ if (!admhc_debug_root) {
+ ret = -ENOENT;
+ goto error_debug;
+ }
+#endif
+
+#ifdef PLATFORM_DRIVER
+ ret = platform_driver_register(&PLATFORM_DRIVER);
+ if (ret < 0)
+ goto error_platform;
+#endif
+
+ return ret;
+
+#ifdef PLATFORM_DRIVER
+ platform_driver_unregister(&PLATFORM_DRIVER);
+error_platform:
+#endif
+
+#ifdef DEBUG
+ debugfs_remove(admhc_debug_root);
+ admhc_debug_root = NULL;
+error_debug:
+#endif
+ clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
+ return ret;
+}
+module_init(admhc_hcd_mod_init);
+
+static void __exit admhc_hcd_mod_exit(void)
+{
+ platform_driver_unregister(&PLATFORM_DRIVER);
+#ifdef DEBUG
+ debugfs_remove(admhc_debug_root);
+#endif
+ clear_bit(USB_OHCI_LOADED, &usb_hcds_loaded);
+}
+module_exit(admhc_hcd_mod_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_INFO);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
diff --git a/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-hub.c b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-hub.c
new file mode 100644
index 0000000..8cabaf9
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-hub.c
@@ -0,0 +1,430 @@
+/*
+ * ADM5120 HCD (Host Controller Driver) for USB
+ *
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: drivers/usb/host/ohci-hub.c
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * ADM5120 Root Hub ... the nonsharable stuff
+ */
+
+#define dbg_port(hc, label, num, value) \
+ admhc_dbg(hc, \
+ "%s port%d " \
+ "= 0x%08x%s%s%s%s%s%s%s%s%s%s%s%s\n", \
+ label, num, value, \
+ (value & ADMHC_PS_PRSC) ? " PRSC" : "", \
+ (value & ADMHC_PS_OCIC) ? " OCIC" : "", \
+ (value & ADMHC_PS_PSSC) ? " PSSC" : "", \
+ (value & ADMHC_PS_PESC) ? " PESC" : "", \
+ (value & ADMHC_PS_CSC) ? " CSC" : "", \
+ \
+ (value & ADMHC_PS_LSDA) ? " LSDA" : "", \
+ (value & ADMHC_PS_PPS) ? " PPS" : "", \
+ (value & ADMHC_PS_PRS) ? " PRS" : "", \
+ (value & ADMHC_PS_POCI) ? " POCI" : "", \
+ (value & ADMHC_PS_PSS) ? " PSS" : "", \
+ \
+ (value & ADMHC_PS_PES) ? " PES" : "", \
+ (value & ADMHC_PS_CCS) ? " CCS" : "" \
+ );
+
+#define dbg_port_write(hc, label, num, value) \
+ admhc_dbg(hc, \
+ "%s port%d " \
+ "= 0x%08x%s%s%s%s%s%s%s%s%s%s%s%s\n", \
+ label, num, value, \
+ (value & ADMHC_PS_PRSC) ? " PRSC" : "", \
+ (value & ADMHC_PS_OCIC) ? " OCIC" : "", \
+ (value & ADMHC_PS_PSSC) ? " PSSC" : "", \
+ (value & ADMHC_PS_PESC) ? " PESC" : "", \
+ (value & ADMHC_PS_CSC) ? " CSC" : "", \
+ \
+ (value & ADMHC_PS_CPP) ? " CPP" : "", \
+ (value & ADMHC_PS_SPP) ? " SPP" : "", \
+ (value & ADMHC_PS_SPR) ? " SPR" : "", \
+ (value & ADMHC_PS_CPS) ? " CPS" : "", \
+ (value & ADMHC_PS_SPS) ? " SPS" : "", \
+ \
+ (value & ADMHC_PS_SPE) ? " SPE" : "", \
+ (value & ADMHC_PS_CPE) ? " CPE" : "" \
+ );
+
+/*-------------------------------------------------------------------------*/
+
+/* build "status change" packet (one or two bytes) from HC registers */
+
+static int
+admhc_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct admhcd *ahcd = hcd_to_admhcd(hcd);
+ int i, changed = 0, length = 1;
+ int any_connected = 0;
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&ahcd->lock, flags);
+ if (!HCD_HW_ACCESSIBLE(hcd))
+ goto done;
+
+ /* init status */
+ status = admhc_read_rhdesc(ahcd);
+ if (status & (ADMHC_RH_LPSC | ADMHC_RH_OCIC))
+ buf[0] = changed = 1;
+ else
+ buf[0] = 0;
+ if (ahcd->num_ports > 7) {
+ buf[1] = 0;
+ length++;
+ }
+
+ /* look at each port */
+ for (i = 0; i < ahcd->num_ports; i++) {
+ status = admhc_read_portstatus(ahcd, i);
+
+ /* can't autostop if ports are connected */
+ any_connected |= (status & ADMHC_PS_CCS);
+
+ if (status & (ADMHC_PS_CSC | ADMHC_PS_PESC | ADMHC_PS_PSSC
+ | ADMHC_PS_OCIC | ADMHC_PS_PRSC)) {
+ changed = 1;
+ if (i < 7)
+ buf[0] |= 1 << (i + 1);
+ else
+ buf[1] |= 1 << (i - 7);
+ }
+ }
+
+ if (admhc_root_hub_state_changes(ahcd, changed,
+ any_connected))
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ else
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+
+done:
+ spin_unlock_irqrestore(&ahcd->lock, flags);
+
+ return changed ? length : 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int admhc_get_hub_descriptor(struct admhcd *ahcd, char *buf)
+{
+ struct usb_hub_descriptor *desc = (struct usb_hub_descriptor *)buf;
+ u32 rh = admhc_read_rhdesc(ahcd);
+ u16 temp;
+
+ desc->bDescriptorType = USB_DT_HUB; /* Hub-descriptor */
+ desc->bPwrOn2PwrGood = ADMHC_POTPGT/2; /* use default value */
+ desc->bHubContrCurrent = 0x00; /* 0mA */
+
+ desc->bNbrPorts = ahcd->num_ports;
+ temp = 1 + (ahcd->num_ports / 8);
+ desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * temp;
+
+ /* FIXME */
+ temp = 0;
+ if (rh & ADMHC_RH_NPS) /* no power switching? */
+ temp |= 0x0002;
+ if (rh & ADMHC_RH_PSM) /* per-port power switching? */
+ temp |= 0x0001;
+ if (rh & ADMHC_RH_NOCP) /* no overcurrent reporting? */
+ temp |= 0x0010;
+ else if (rh & ADMHC_RH_OCPM) /* per-port overcurrent reporting? */
+ temp |= 0x0008;
+ desc->wHubCharacteristics = (__force __u16)cpu_to_hc16(ahcd, temp);
+
+ /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ desc->u.hs.DeviceRemovable[0] = 0;
+ desc->u.hs.DeviceRemovable[0] = ~0;
+
+ return 0;
+}
+
+static int admhc_get_hub_status(struct admhcd *ahcd, char *buf)
+{
+ struct usb_hub_status *hs = (struct usb_hub_status *)buf;
+ u32 t = admhc_read_rhdesc(ahcd);
+ u16 status, change;
+
+ status = 0;
+ status |= (t & ADMHC_RH_LPS) ? HUB_STATUS_LOCAL_POWER : 0;
+ status |= (t & ADMHC_RH_OCI) ? HUB_STATUS_OVERCURRENT : 0;
+
+ change = 0;
+ change |= (t & ADMHC_RH_LPSC) ? HUB_CHANGE_LOCAL_POWER : 0;
+ change |= (t & ADMHC_RH_OCIC) ? HUB_CHANGE_OVERCURRENT : 0;
+
+ hs->wHubStatus = (__force __u16)cpu_to_hc16(ahcd, status);
+ hs->wHubChange = (__force __u16)cpu_to_hc16(ahcd, change);
+
+ return 0;
+}
+
+static int admhc_get_port_status(struct admhcd *ahcd, unsigned port, char *buf)
+{
+ struct usb_port_status *ps = (struct usb_port_status *)buf;
+ u32 t = admhc_read_portstatus(ahcd, port);
+ u16 status, change;
+
+ status = 0;
+ status |= (t & ADMHC_PS_CCS) ? USB_PORT_STAT_CONNECTION : 0;
+ status |= (t & ADMHC_PS_PES) ? USB_PORT_STAT_ENABLE : 0;
+ status |= (t & ADMHC_PS_PSS) ? USB_PORT_STAT_SUSPEND : 0;
+ status |= (t & ADMHC_PS_POCI) ? USB_PORT_STAT_OVERCURRENT : 0;
+ status |= (t & ADMHC_PS_PRS) ? USB_PORT_STAT_RESET : 0;
+ status |= (t & ADMHC_PS_PPS) ? USB_PORT_STAT_POWER : 0;
+ status |= (t & ADMHC_PS_LSDA) ? USB_PORT_STAT_LOW_SPEED : 0;
+
+ change = 0;
+ change |= (t & ADMHC_PS_CSC) ? USB_PORT_STAT_C_CONNECTION : 0;
+ change |= (t & ADMHC_PS_PESC) ? USB_PORT_STAT_C_ENABLE : 0;
+ change |= (t & ADMHC_PS_PSSC) ? USB_PORT_STAT_C_SUSPEND : 0;
+ change |= (t & ADMHC_PS_OCIC) ? USB_PORT_STAT_C_OVERCURRENT : 0;
+ change |= (t & ADMHC_PS_PRSC) ? USB_PORT_STAT_C_RESET : 0;
+
+ ps->wPortStatus = (__force __u16)cpu_to_hc16(ahcd, status);
+ ps->wPortChange = (__force __u16)cpu_to_hc16(ahcd, change);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_USB_OTG
+
+static int admhc_start_port_reset(struct usb_hcd *hcd, unsigned port)
+{
+ struct admhcd *ahcd = hcd_to_admhcd(hcd);
+ u32 status;
+
+ if (!port)
+ return -EINVAL;
+ port--;
+
+ /* start port reset before HNP protocol times out */
+ status = admhc_read_portstatus(ahcd, port);
+ if (!(status & ADMHC_PS_CCS))
+ return -ENODEV;
+
+ /* khubd will finish the reset later */
+ admhc_write_portstatus(ahcd, port, ADMHC_PS_PRS);
+ return 0;
+}
+
+static void start_hnp(struct admhcd *ahcd);
+
+#else
+
+#define admhc_start_port_reset NULL
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+
+/* See usb 7.1.7.5: root hubs must issue at least 50 msec reset signaling,
+ * not necessarily continuous ... to guard against resume signaling.
+ * The short timeout is safe for non-root hubs, and is backward-compatible
+ * with earlier Linux hosts.
+ */
+#ifdef CONFIG_USB_SUSPEND
+#define PORT_RESET_MSEC 50
+#else
+#define PORT_RESET_MSEC 10
+#endif
+
+/* this timer value might be vendor-specific ... */
+#define PORT_RESET_HW_MSEC 10
+
+/* wrap-aware logic morphed from <linux/jiffies.h> */
+#define tick_before(t1, t2) ((s16)(((s16)(t1)) - ((s16)(t2))) < 0)
+
+/* called from some task, normally khubd */
+static inline int admhc_port_reset(struct admhcd *ahcd, unsigned port)
+{
+ u32 t;
+
+ admhc_vdbg(ahcd, "reset port%d\n", port);
+ t = admhc_read_portstatus(ahcd, port);
+ if (!(t & ADMHC_PS_CCS))
+ return -ENODEV;
+
+ admhc_write_portstatus(ahcd, port, ADMHC_PS_SPR);
+ mdelay(10);
+ admhc_write_portstatus(ahcd, port, (ADMHC_PS_SPE | ADMHC_PS_CSC));
+ mdelay(100);
+
+ return 0;
+}
+
+static inline int admhc_port_enable(struct admhcd *ahcd, unsigned port)
+{
+ u32 t;
+
+ admhc_vdbg(ahcd, "enable port%d\n", port);
+ t = admhc_read_portstatus(ahcd, port);
+ if (!(t & ADMHC_PS_CCS))
+ return -ENODEV;
+
+ admhc_write_portstatus(ahcd, port, ADMHC_PS_SPE);
+
+ return 0;
+}
+
+static inline int admhc_port_disable(struct admhcd *ahcd, unsigned port)
+{
+ u32 t;
+
+ admhc_vdbg(ahcd, "disable port%d\n", port);
+ t = admhc_read_portstatus(ahcd, port);
+ if (!(t & ADMHC_PS_CCS))
+ return -ENODEV;
+
+ admhc_write_portstatus(ahcd, port, ADMHC_PS_CPE);
+
+ return 0;
+}
+
+static inline int admhc_port_write(struct admhcd *ahcd, unsigned port,
+ u32 val)
+{
+#ifdef ADMHC_VERBOSE_DEBUG
+ dbg_port_write(ahcd, "write", port, val);
+#endif
+ admhc_write_portstatus(ahcd, port, val);
+
+ return 0;
+}
+
+static int admhc_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct admhcd *ahcd = hcd_to_admhcd(hcd);
+ int ports = ahcd->num_ports;
+ int ret = 0;
+
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
+ return -ESHUTDOWN;
+
+ switch (typeReq) {
+ case ClearHubFeature:
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+#if 0 /* FIXME */
+ admhc_writel(ahcd, ADMHC_RH_OCIC,
+ &ahcd->regs->roothub.status);
+#endif
+ case C_HUB_LOCAL_POWER:
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case ClearPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ ret = admhc_port_disable(ahcd, wIndex);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_CPS);
+ break;
+ case USB_PORT_FEAT_POWER:
+ ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_CPP);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_CSC);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_PESC);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_PSSC);
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_OCIC);
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_PRSC);
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case GetHubDescriptor:
+ ret = admhc_get_hub_descriptor(ahcd, buf);
+ break;
+ case GetHubStatus:
+ ret = admhc_get_hub_status(ahcd, buf);
+ break;
+ case GetPortStatus:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+
+ ret = admhc_get_port_status(ahcd, wIndex, buf);
+ break;
+ case SetHubFeature:
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ /* FIXME: this can be cleared, yes? */
+ case C_HUB_LOCAL_POWER:
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case SetPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ ret = admhc_port_enable(ahcd, wIndex);
+ break;
+ case USB_PORT_FEAT_RESET:
+ ret = admhc_port_reset(ahcd, wIndex);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+#ifdef CONFIG_USB_OTG
+ if (hcd->self.otg_port == (wIndex + 1)
+ && hcd->self.b_hnp_enable)
+ start_hnp(ahcd);
+ else
+#endif
+ ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_SPS);
+ break;
+ case USB_PORT_FEAT_POWER:
+ ret = admhc_port_write(ahcd, wIndex, ADMHC_PS_SPP);
+ break;
+ default:
+ goto error;
+ }
+ break;
+
+ default:
+error:
+ /* "protocol stall" on error */
+ ret = -EPIPE;
+ }
+
+ return ret;
+}
+
diff --git a/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-mem.c b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-mem.c
new file mode 100644
index 0000000..79fff70
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-mem.c
@@ -0,0 +1,202 @@
+/*
+ * ADM5120 HCD (Host Controller Driver) for USB
+ *
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: drivers/usb/host/ohci-mem.c
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * OHCI deals with three types of memory:
+ * - data used only by the HCD ... kmalloc is fine
+ * - async and periodic schedules, shared by HC and HCD ... these
+ * need to use dma_pool or dma_alloc_coherent
+ * - driver buffers, read/written by HC ... the hcd glue or the
+ * device driver provides us with dma addresses
+ *
+ * There's also "register" data, which is memory mapped.
+ * No memory seen by this driver (or any HCD) may be paged out.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+static void admhc_hcd_init(struct admhcd *ahcd)
+{
+ ahcd->next_statechange = jiffies;
+ spin_lock_init(&ahcd->lock);
+ INIT_LIST_HEAD(&ahcd->pending);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int admhc_mem_init(struct admhcd *ahcd)
+{
+ ahcd->td_cache = dma_pool_create("admhc_td",
+ admhcd_to_hcd(ahcd)->self.controller,
+ sizeof(struct td),
+ TD_ALIGN, /* byte alignment */
+ 0 /* no page-crossing issues */
+ );
+ if (!ahcd->td_cache)
+ goto err;
+
+ ahcd->ed_cache = dma_pool_create("admhc_ed",
+ admhcd_to_hcd(ahcd)->self.controller,
+ sizeof(struct ed),
+ ED_ALIGN, /* byte alignment */
+ 0 /* no page-crossing issues */
+ );
+ if (!ahcd->ed_cache)
+ goto err_td_cache;
+
+ return 0;
+
+err_td_cache:
+ dma_pool_destroy(ahcd->td_cache);
+ ahcd->td_cache = NULL;
+err:
+ return -ENOMEM;
+}
+
+static void admhc_mem_cleanup(struct admhcd *ahcd)
+{
+ if (ahcd->td_cache) {
+ dma_pool_destroy(ahcd->td_cache);
+ ahcd->td_cache = NULL;
+ }
+
+ if (ahcd->ed_cache) {
+ dma_pool_destroy(ahcd->ed_cache);
+ ahcd->ed_cache = NULL;
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ahcd "done list" processing needs this mapping */
+static inline struct td *dma_to_td(struct admhcd *ahcd, dma_addr_t td_dma)
+{
+ struct td *td;
+
+ td_dma &= TD_MASK;
+ td = ahcd->td_hash[TD_HASH_FUNC(td_dma)];
+ while (td && td->td_dma != td_dma)
+ td = td->td_hash;
+
+ return td;
+}
+
+/* TDs ... */
+static struct td *td_alloc(struct admhcd *ahcd, gfp_t mem_flags)
+{
+ dma_addr_t dma;
+ struct td *td;
+
+ td = dma_pool_alloc(ahcd->td_cache, mem_flags, &dma);
+ if (!td)
+ return NULL;
+
+ /* in case ahcd fetches it, make it look dead */
+ memset(td, 0, sizeof *td);
+ td->hwNextTD = cpu_to_hc32(ahcd, dma);
+ td->td_dma = dma;
+ /* hashed in td_fill */
+
+ return td;
+}
+
+static void td_free(struct admhcd *ahcd, struct td *td)
+{
+ struct td **prev = &ahcd->td_hash[TD_HASH_FUNC(td->td_dma)];
+
+ while (*prev && *prev != td)
+ prev = &(*prev)->td_hash;
+ if (*prev)
+ *prev = td->td_hash;
+#if 0
+ /* TODO: remove */
+ else if ((td->hwINFO & cpu_to_hc32(ahcd, TD_DONE)) != 0)
+ admhc_dbg(ahcd, "no hash for td %p\n", td);
+#else
+ else if ((td->flags & TD_FLAG_DONE) != 0)
+ admhc_dbg(ahcd, "no hash for td %p\n", td);
+#endif
+ dma_pool_free(ahcd->td_cache, td, td->td_dma);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* EDs ... */
+static struct ed *ed_alloc(struct admhcd *ahcd, gfp_t mem_flags)
+{
+ dma_addr_t dma;
+ struct ed *ed;
+
+ ed = dma_pool_alloc(ahcd->ed_cache, mem_flags, &dma);
+ if (!ed)
+ return NULL;
+
+ memset(ed, 0, sizeof(*ed));
+ ed->dma = dma;
+
+ INIT_LIST_HEAD(&ed->td_list);
+ INIT_LIST_HEAD(&ed->urb_list);
+
+ return ed;
+}
+
+static void ed_free(struct admhcd *ahcd, struct ed *ed)
+{
+ dma_pool_free(ahcd->ed_cache, ed, ed->dma);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* URB priv ... */
+static void urb_priv_free(struct admhcd *ahcd, struct urb_priv *urb_priv)
+{
+ int i;
+
+ for (i = 0; i < urb_priv->td_cnt; i++)
+ if (urb_priv->td[i])
+ td_free(ahcd, urb_priv->td[i]);
+
+ list_del(&urb_priv->pending);
+ kfree(urb_priv);
+}
+
+static struct urb_priv *urb_priv_alloc(struct admhcd *ahcd, int num_tds,
+ gfp_t mem_flags)
+{
+ struct urb_priv *priv;
+
+ /* allocate the private part of the URB */
+ priv = kzalloc(sizeof(*priv) + sizeof(struct td) * num_tds, mem_flags);
+ if (!priv)
+ goto err;
+
+ /* allocate the TDs (deferring hash chain updates) */
+ for (priv->td_cnt = 0; priv->td_cnt < num_tds; priv->td_cnt++) {
+ priv->td[priv->td_cnt] = td_alloc(ahcd, mem_flags);
+ if (priv->td[priv->td_cnt] == NULL)
+ goto err_free;
+ }
+
+ INIT_LIST_HEAD(&priv->pending);
+
+ return priv;
+
+err_free:
+ urb_priv_free(ahcd, priv);
+err:
+ return NULL;
+}
diff --git a/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-pm.c b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-pm.c
new file mode 100644
index 0000000..7d7fc24
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-pm.c
@@ -0,0 +1,449 @@
+/*
+ * ADM5120 HCD (Host Controller Driver) for USB
+ *
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from fragments of the OHCI driver.
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2004 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#define OHCI_SCHED_ENABLES \
+ (OHCI_CTRL_CLE|OHCI_CTRL_BLE|OHCI_CTRL_PLE|OHCI_CTRL_IE)
+
+#ifdef CONFIG_PM
+static int admhc_restart(struct admhcd *ahcd);
+
+static int admhc_rh_suspend(struct admhcd *ahcd, int autostop)
+__releases(ahcd->lock)
+__acquires(ahcd->lock)
+{
+ int status = 0;
+
+ ahcd->hc_control = admhc_readl(ahcd, &ahcd->regs->control);
+ switch (ahcd->hc_control & OHCI_CTRL_HCFS) {
+ case OHCI_USB_RESUME:
+ admhc_dbg(ahcd, "resume/suspend?\n");
+ ahcd->hc_control &= ~OHCI_CTRL_HCFS;
+ ahcd->hc_control |= OHCI_USB_RESET;
+ admhc_writel(ahcd, ahcd->hc_control, &ahcd->ahcd->regs->control);
+ (void) admhc_readl(ahcd, &ahcd->regs->control);
+ /* FALL THROUGH */
+ case OHCI_USB_RESET:
+ status = -EBUSY;
+ admhc_dbg(ahcd, "needs reinit!\n");
+ goto done;
+ case OHCI_USB_SUSPEND:
+ if (!ahcd->autostop) {
+ admhc_dbg(ahcd, "already suspended\n");
+ goto done;
+ }
+ }
+ admhc_dbg(ahcd, "%s root hub\n",
+ autostop ? "auto-stop" : "suspend");
+
+ /* First stop any processing */
+ if (!autostop && (ahcd->hc_control & OHCI_SCHED_ENABLES)) {
+ ahcd->hc_control &= ~OHCI_SCHED_ENABLES;
+ admhc_writel(ahcd, ahcd->hc_control, &ahcd->ahcd->regs->control);
+ ahcd->hc_control = admhc_readl(ahcd, &ahcd->regs->control);
+ admhc_writel(ahcd, OHCI_INTR_SF, &ahcd->regs->intrstatus);
+
+ /* sched disables take effect on the next frame,
+ * then the last WDH could take 6+ msec
+ */
+ admhc_dbg(ahcd, "stopping schedules ...\n");
+ ahcd->autostop = 0;
+ spin_unlock_irq (&ahcd->lock);
+ msleep (8);
+ spin_lock_irq(&ahcd->lock);
+ }
+ dl_done_list (ahcd);
+ finish_unlinks (ahcd, admhc_frame_no(ahcd));
+
+ /* maybe resume can wake root hub */
+ if (device_may_wakeup(&admhcd_to_hcd(ahcd)->self.root_hub->dev) ||
+ autostop)
+ ahcd->hc_control |= OHCI_CTRL_RWE;
+ else {
+ admhc_writel(ahcd, OHCI_INTR_RHSC, &ahcd->regs->intrdisable);
+ ahcd->hc_control &= ~OHCI_CTRL_RWE;
+ }
+
+ /* Suspend hub ... this is the "global (to this bus) suspend" mode,
+ * which doesn't imply ports will first be individually suspended.
+ */
+ ahcd->hc_control &= ~OHCI_CTRL_HCFS;
+ ahcd->hc_control |= OHCI_USB_SUSPEND;
+ admhc_writel(ahcd, ahcd->hc_control, &ahcd->ahcd->regs->control);
+ (void) admhc_readl(ahcd, &ahcd->regs->control);
+
+ /* no resumes until devices finish suspending */
+ if (!autostop) {
+ ahcd->next_statechange = jiffies + msecs_to_jiffies (5);
+ ahcd->autostop = 0;
+ }
+
+done:
+ return status;
+}
+
+static inline struct ed *find_head(struct ed *ed)
+{
+ /* for bulk and control lists */
+ while (ed->ed_prev)
+ ed = ed->ed_prev;
+ return ed;
+}
+
+/* caller has locked the root hub */
+static int admhc_rh_resume(struct admhcd *ahcd)
+__releases(ahcd->lock)
+__acquires(ahcd->lock)
+{
+ struct usb_hcd *hcd = admhcd_to_hcd (ahcd);
+ u32 temp, enables;
+ int status = -EINPROGRESS;
+ int autostopped = ahcd->autostop;
+
+ ahcd->autostop = 0;
+ ahcd->hc_control = admhc_readl(ahcd, &ahcd->regs->control);
+
+ if (ahcd->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
+ /* this can happen after resuming a swsusp snapshot */
+ if (hcd->state == HC_STATE_RESUMING) {
+ admhc_dbg(ahcd, "BIOS/SMM active, control %03x\n",
+ ahcd->hc_control);
+ status = -EBUSY;
+ /* this happens when pmcore resumes HC then root */
+ } else {
+ admhc_dbg(ahcd, "duplicate resume\n");
+ status = 0;
+ }
+ } else switch (ahcd->hc_control & OHCI_CTRL_HCFS) {
+ case OHCI_USB_SUSPEND:
+ ahcd->hc_control &= ~(OHCI_CTRL_HCFS|OHCI_SCHED_ENABLES);
+ ahcd->hc_control |= OHCI_USB_RESUME;
+ admhc_writel(ahcd, ahcd->hc_control, &ahcd->ahcd->regs->control);
+ (void) admhc_readl(ahcd, &ahcd->regs->control);
+ admhc_dbg(ahcd, "%s root hub\n",
+ autostopped ? "auto-start" : "resume");
+ break;
+ case OHCI_USB_RESUME:
+ /* HCFS changes sometime after INTR_RD */
+ admhc_dbg(ahcd, "%swakeup root hub\n",
+ autostopped ? "auto-" : "");
+ break;
+ case OHCI_USB_OPER:
+ /* this can happen after resuming a swsusp snapshot */
+ admhc_dbg(ahcd, "snapshot resume? reinit\n");
+ status = -EBUSY;
+ break;
+ default: /* RESET, we lost power */
+ admhc_dbg(ahcd, "lost power\n");
+ status = -EBUSY;
+ }
+ if (status == -EBUSY) {
+ if (!autostopped) {
+ spin_unlock_irq (&ahcd->lock);
+ (void) ahcd_init (ahcd);
+ status = admhc_restart (ahcd);
+ spin_lock_irq(&ahcd->lock);
+ }
+ return status;
+ }
+ if (status != -EINPROGRESS)
+ return status;
+ if (autostopped)
+ goto skip_resume;
+ spin_unlock_irq (&ahcd->lock);
+
+ /* Some controllers (lucent erratum) need extra-long delays */
+ msleep (20 /* usb 11.5.1.10 */ + 12 /* 32 msec counter */ + 1);
+
+ temp = admhc_readl(ahcd, &ahcd->regs->control);
+ temp &= OHCI_CTRL_HCFS;
+ if (temp != OHCI_USB_RESUME) {
+ admhc_err (ahcd, "controller won't resume\n");
+ spin_lock_irq(&ahcd->lock);
+ return -EBUSY;
+ }
+
+ /* disable old schedule state, reinit from scratch */
+ admhc_writel(ahcd, 0, &ahcd->regs->ed_controlhead);
+ admhc_writel(ahcd, 0, &ahcd->regs->ed_controlcurrent);
+ admhc_writel(ahcd, 0, &ahcd->regs->ed_bulkhead);
+ admhc_writel(ahcd, 0, &ahcd->regs->ed_bulkcurrent);
+ admhc_writel(ahcd, 0, &ahcd->regs->ed_periodcurrent);
+ admhc_writel(ahcd, (u32) ahcd->hcca_dma, &ahcd->ahcd->regs->hcca);
+
+ /* Sometimes PCI D3 suspend trashes frame timings ... */
+ periodic_reinit(ahcd);
+
+ /* the following code is executed with ahcd->lock held and
+ * irqs disabled if and only if autostopped is true
+ */
+
+skip_resume:
+ /* interrupts might have been disabled */
+ admhc_writel(ahcd, OHCI_INTR_INIT, &ahcd->regs->int_enable);
+ if (ahcd->ed_rm_list)
+ admhc_writel(ahcd, OHCI_INTR_SF, &ahcd->regs->int_enable);
+
+ /* Then re-enable operations */
+ admhc_writel(ahcd, OHCI_USB_OPER, &ahcd->regs->control);
+ (void) admhc_readl(ahcd, &ahcd->regs->control);
+ if (!autostopped)
+ msleep (3);
+
+ temp = ahcd->hc_control;
+ temp &= OHCI_CTRL_RWC;
+ temp |= OHCI_CONTROL_INIT | OHCI_USB_OPER;
+ ahcd->hc_control = temp;
+ admhc_writel(ahcd, temp, &ahcd->regs->control);
+ (void) admhc_readl(ahcd, &ahcd->regs->control);
+
+ /* TRSMRCY */
+ if (!autostopped) {
+ msleep (10);
+ spin_lock_irq(&ahcd->lock);
+ }
+ /* now ahcd->lock is always held and irqs are always disabled */
+
+ /* keep it alive for more than ~5x suspend + resume costs */
+ ahcd->next_statechange = jiffies + STATECHANGE_DELAY;
+
+ /* maybe turn schedules back on */
+ enables = 0;
+ temp = 0;
+ if (!ahcd->ed_rm_list) {
+ if (ahcd->ed_controltail) {
+ admhc_writel(ahcd,
+ find_head (ahcd->ed_controltail)->dma,
+ &ahcd->regs->ed_controlhead);
+ enables |= OHCI_CTRL_CLE;
+ temp |= OHCI_CLF;
+ }
+ if (ahcd->ed_bulktail) {
+ admhc_writel(ahcd, find_head (ahcd->ed_bulktail)->dma,
+ &ahcd->regs->ed_bulkhead);
+ enables |= OHCI_CTRL_BLE;
+ temp |= OHCI_BLF;
+ }
+ }
+ if (hcd->self.bandwidth_isoc_reqs || hcd->self.bandwidth_int_reqs)
+ enables |= OHCI_CTRL_PLE|OHCI_CTRL_IE;
+ if (enables) {
+ admhc_dbg(ahcd, "restarting schedules ... %08x\n", enables);
+ ahcd->hc_control |= enables;
+ admhc_writel(ahcd, ahcd->hc_control, &ahcd->ahcd->regs->control);
+ if (temp)
+ admhc_writel(ahcd, temp, &ahcd->regs->cmdstatus);
+ (void) admhc_readl(ahcd, &ahcd->regs->control);
+ }
+
+ return 0;
+}
+
+static int admhc_bus_suspend(struct usb_hcd *hcd)
+{
+ struct admhcd *ahcd = hcd_to_admhcd(hcd);
+ int rc;
+
+ spin_lock_irq(&ahcd->lock);
+
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
+ rc = -ESHUTDOWN;
+ else
+ rc = admhc_rh_suspend(ahcd, 0);
+ spin_unlock_irq(&ahcd->lock);
+ return rc;
+}
+
+static int admhc_bus_resume(struct usb_hcd *hcd)
+{
+ struct admhcd *ahcd = hcd_to_admhcd(hcd);
+ int rc;
+
+ if (time_before(jiffies, ahcd->next_statechange))
+ msleep(5);
+
+ spin_lock_irq(&ahcd->lock);
+
+ if (unlikely(!HCD_HW_ACCESSIBLE(hcd)))
+ rc = -ESHUTDOWN;
+ else
+ rc = admhc_rh_resume(ahcd);
+ spin_unlock_irq(&ahcd->lock);
+
+ /* poll until we know a device is connected or we autostop */
+ if (rc == 0)
+ usb_hcd_poll_rh_status(hcd);
+ return rc;
+}
+
+/* Carry out polling-, autostop-, and autoresume-related state changes */
+static int admhc_root_hub_state_changes(struct admhcd *ahcd, int changed,
+ int any_connected)
+{
+ int poll_rh = 1;
+
+ switch (ahcd->hc_control & OHCI_CTRL_HCFS) {
+
+ case OHCI_USB_OPER:
+ /* keep on polling until we know a device is connected
+ * and RHSC is enabled */
+ if (!ahcd->autostop) {
+ if (any_connected ||
+ !device_may_wakeup(&admhcd_to_hcd(ahcd)
+ ->self.root_hub->dev)) {
+ if (admhc_readl(ahcd, &ahcd->regs->int_enable) &
+ OHCI_INTR_RHSC)
+ poll_rh = 0;
+ } else {
+ ahcd->autostop = 1;
+ ahcd->next_statechange = jiffies + HZ;
+ }
+
+ /* if no devices have been attached for one second, autostop */
+ } else {
+ if (changed || any_connected) {
+ ahcd->autostop = 0;
+ ahcd->next_statechange = jiffies +
+ STATECHANGE_DELAY;
+ } else if (time_after_eq(jiffies,
+ ahcd->next_statechange)
+ && !ahcd->ed_rm_list
+ && !(ahcd->hc_control &
+ OHCI_SCHED_ENABLES)) {
+ ahcd_rh_suspend(ahcd, 1);
+ }
+ }
+ break;
+
+ /* if there is a port change, autostart or ask to be resumed */
+ case OHCI_USB_SUSPEND:
+ case OHCI_USB_RESUME:
+ if (changed) {
+ if (ahcd->autostop)
+ admhc_rh_resume(ahcd);
+ else
+ usb_hcd_resume_root_hub(admhcd_to_hcd(ahcd));
+ } else {
+ /* everything is idle, no need for polling */
+ poll_rh = 0;
+ }
+ break;
+ }
+ return poll_rh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* must not be called from interrupt context */
+static int admhc_restart(struct admhcd *ahcd)
+{
+ int temp;
+ int i;
+ struct urb_priv *priv;
+
+ /* mark any devices gone, so they do nothing till khubd disconnects.
+ * recycle any "live" eds/tds (and urbs) right away.
+ * later, khubd disconnect processing will recycle the other state,
+ * (either as disconnect/reconnect, or maybe someday as a reset).
+ */
+ spin_lock_irq(&ahcd->lock);
+ admhc_disable(ahcd);
+ usb_root_hub_lost_power(admhcd_to_hcd(ahcd)->self.root_hub);
+ if (!list_empty(&ahcd->pending))
+ admhc_dbg(ahcd, "abort schedule...\n");
+ list_for_each_entry(priv, &ahcd->pending, pending) {
+ struct urb *urb = priv->td[0]->urb;
+ struct ed *ed = priv->ed;
+
+ switch (ed->state) {
+ case ED_OPER:
+ ed->state = ED_UNLINK;
+ ed->hwINFO |= cpu_to_hc32(ahcd, ED_DEQUEUE);
+ ed_deschedule (ahcd, ed);
+
+ ed->ed_next = ahcd->ed_rm_list;
+ ed->ed_prev = NULL;
+ ahcd->ed_rm_list = ed;
+ /* FALLTHROUGH */
+ case ED_UNLINK:
+ break;
+ default:
+ admhc_dbg(ahcd, "bogus ed %p state %d\n",
+ ed, ed->state);
+ }
+
+ if (!urb->unlinked)
+ urb->unlinked = -ESHUTDOWN;
+ }
+ finish_unlinks(ahcd, 0);
+ spin_unlock_irq(&ahcd->lock);
+
+ /* paranoia, in case that didn't work: */
+
+ /* empty the interrupt branches */
+ for (i = 0; i < NUM_INTS; i++) ahcd->load[i] = 0;
+ for (i = 0; i < NUM_INTS; i++) ahcd->hcca->int_table[i] = 0;
+
+ /* no EDs to remove */
+ ahcd->ed_rm_list = NULL;
+
+ /* empty control and bulk lists */
+ ahcd->ed_controltail = NULL;
+ ahcd->ed_bulktail = NULL;
+
+ if ((temp = admhc_run(ahcd)) < 0) {
+ admhc_err(ahcd, "can't restart, %d\n", temp);
+ return temp;
+ } else {
+ /* here we "know" root ports should always stay powered,
+ * and that if we try to turn them back on the root hub
+ * will respond to CSC processing.
+ */
+ i = ahcd->num_ports;
+ while (i--)
+ admhc_writel(ahcd, RH_PS_PSS,
+ &ahcd->regs->portstatus[i]);
+ admhc_dbg(ahcd, "restart complete\n");
+ }
+ return 0;
+}
+
+#else /* CONFIG_PM */
+
+static inline int admhc_rh_resume(struct admhcd *ahcd)
+{
+ return 0;
+}
+
+/* Carry out polling-related state changes.
+ * autostop isn't used when CONFIG_PM is turned off.
+ */
+static int admhc_root_hub_state_changes(struct admhcd *ahcd, int changed,
+ int any_connected)
+{
+ /* If INSM is enabled, don't poll */
+ if (admhc_readl(ahcd, &ahcd->regs->int_enable) & ADMHC_INTR_INSM)
+ return 0;
+
+ /* If no status changes are pending, enable status-change interrupts */
+ if (!changed) {
+ admhc_intr_enable(ahcd, ADMHC_INTR_INSM);
+ return 0;
+ }
+
+ return 1;
+}
+
+#endif /* CONFIG_PM */
+
diff --git a/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-q.c b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-q.c
new file mode 100644
index 0000000..cd9c892
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-q.c
@@ -0,0 +1,964 @@
+/*
+ * ADM5120 HCD (Host Controller Driver) for USB
+ *
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: drivers/usb/host/ohci-q.c
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/irq.h>
+#include <linux/slab.h>
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * URB goes back to driver, and isn't reissued.
+ * It's completely gone from HC data structures.
+ * PRECONDITION: ahcd lock held, irqs blocked.
+ */
+static void
+finish_urb(struct admhcd *ahcd, struct urb *urb, int status)
+__releases(ahcd->lock)
+__acquires(ahcd->lock)
+{
+ urb_priv_free(ahcd, urb->hcpriv);
+
+ if (likely(status == -EINPROGRESS))
+ status = 0;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_ISOCHRONOUS:
+ admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs--;
+ break;
+ case PIPE_INTERRUPT:
+ admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs--;
+ break;
+ }
+
+#ifdef ADMHC_VERBOSE_DEBUG
+ urb_print(ahcd, urb, "RET", usb_pipeout(urb->pipe), status);
+#endif
+
+ /* urb->complete() can reenter this HCD */
+ usb_hcd_unlink_urb_from_ep(admhcd_to_hcd(ahcd), urb);
+ spin_unlock(&ahcd->lock);
+ usb_hcd_giveback_urb(admhcd_to_hcd(ahcd), urb, status);
+ spin_lock(&ahcd->lock);
+}
+
+
+/*-------------------------------------------------------------------------*
+ * ED handling functions
+ *-------------------------------------------------------------------------*/
+
+#if 0 /* FIXME */
+/* search for the right schedule branch to use for a periodic ed.
+ * does some load balancing; returns the branch, or negative errno.
+ */
+static int balance(struct admhcd *ahcd, int interval, int load)
+{
+ int i, branch = -ENOSPC;
+
+ /* iso periods can be huge; iso tds specify frame numbers */
+ if (interval > NUM_INTS)
+ interval = NUM_INTS;
+
+ /* search for the least loaded schedule branch of that period
+ * that has enough bandwidth left unreserved.
+ */
+ for (i = 0; i < interval ; i++) {
+ if (branch < 0 || ahcd->load[branch] > ahcd->load[i]) {
+ int j;
+
+ /* usb 1.1 says 90% of one frame */
+ for (j = i; j < NUM_INTS; j += interval) {
+ if ((ahcd->load[j] + load) > 900)
+ break;
+ }
+ if (j < NUM_INTS)
+ continue;
+ branch = i;
+ }
+ }
+ return branch;
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#if 0 /* FIXME */
+/* both iso and interrupt requests have periods; this routine puts them
+ * into the schedule tree in the apppropriate place. most iso devices use
+ * 1msec periods, but that's not required.
+ */
+static void periodic_link(struct admhcd *ahcd, struct ed *ed)
+{
+ unsigned i;
+
+ admhc_vdbg(ahcd, "link %sed %p branch %d [%dus.], interval %d\n",
+ (ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "",
+ ed, ed->branch, ed->load, ed->interval);
+
+ for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
+ struct ed **prev = &ahcd->periodic[i];
+ __hc32 *prev_p = &ahcd->hcca->int_table[i];
+ struct ed *here = *prev;
+
+ /* sorting each branch by period (slow before fast)
+ * lets us share the faster parts of the tree.
+ * (plus maybe: put interrupt eds before iso)
+ */
+ while (here && ed != here) {
+ if (ed->interval > here->interval)
+ break;
+ prev = &here->ed_next;
+ prev_p = &here->hwNextED;
+ here = *prev;
+ }
+ if (ed != here) {
+ ed->ed_next = here;
+ if (here)
+ ed->hwNextED = *prev_p;
+ wmb();
+ *prev = ed;
+ *prev_p = cpu_to_hc32(ahcd, ed->dma);
+ wmb();
+ }
+ ahcd->load[i] += ed->load;
+ }
+ admhcd_to_hcd(ahcd)->self.bandwidth_allocated += ed->load / ed->interval;
+}
+#endif
+
+/* link an ed into the HC chain */
+
+static int ed_schedule(struct admhcd *ahcd, struct ed *ed)
+{
+ struct ed *old_tail;
+
+ if (admhcd_to_hcd(ahcd)->state == HC_STATE_QUIESCING)
+ return -EAGAIN;
+
+ ed->state = ED_OPER;
+
+ old_tail = ahcd->ed_tails[ed->type];
+
+ ed->ed_next = old_tail->ed_next;
+ if (ed->ed_next) {
+ ed->ed_next->ed_prev = ed;
+ ed->hwNextED = cpu_to_hc32(ahcd, ed->ed_next->dma);
+ }
+ ed->ed_prev = old_tail;
+
+ old_tail->ed_next = ed;
+ old_tail->hwNextED = cpu_to_hc32(ahcd, ed->dma);
+
+ ahcd->ed_tails[ed->type] = ed;
+
+ admhc_dma_enable(ahcd);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#if 0 /* FIXME */
+/* scan the periodic table to find and unlink this ED */
+static void periodic_unlink(struct admhcd *ahcd, struct ed *ed)
+{
+ int i;
+
+ for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
+ struct ed *temp;
+ struct ed **prev = &ahcd->periodic[i];
+ __hc32 *prev_p = &ahcd->hcca->int_table[i];
+
+ while (*prev && (temp = *prev) != ed) {
+ prev_p = &temp->hwNextED;
+ prev = &temp->ed_next;
+ }
+ if (*prev) {
+ *prev_p = ed->hwNextED;
+ *prev = ed->ed_next;
+ }
+ ahcd->load[i] -= ed->load;
+ }
+
+ admhcd_to_hcd(ahcd)->self.bandwidth_allocated -= ed->load / ed->interval;
+ admhc_vdbg(ahcd, "unlink %sed %p branch %d [%dus.], interval %d\n",
+ (ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "",
+ ed, ed->branch, ed->load, ed->interval);
+}
+#endif
+
+/* unlink an ed from the HC chain.
+ * just the link to the ed is unlinked.
+ * the link from the ed still points to another operational ed or 0
+ * so the HC can eventually finish the processing of the unlinked ed
+ * (assuming it already started that, which needn't be true).
+ *
+ * ED_UNLINK is a transient state: the HC may still see this ED, but soon
+ * it won't. ED_SKIP means the HC will finish its current transaction,
+ * but won't start anything new. The TD queue may still grow; device
+ * drivers don't know about this HCD-internal state.
+ *
+ * When the HC can't see the ED, something changes ED_UNLINK to one of:
+ *
+ * - ED_OPER: when there's any request queued, the ED gets rescheduled
+ * immediately. HC should be working on them.
+ *
+ * - ED_IDLE: when there's no TD queue. there's no reason for the HC
+ * to care about this ED; safe to disable the endpoint.
+ *
+ * When finish_unlinks() runs later, after SOF interrupt, it will often
+ * complete one or more URB unlinks before making that state change.
+ */
+static void ed_deschedule(struct admhcd *ahcd, struct ed *ed)
+{
+
+#ifdef ADMHC_VERBOSE_DEBUG
+ admhc_dump_ed(ahcd, "ED-DESCHED", ed, 1);
+#endif
+
+ ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
+ wmb();
+ ed->state = ED_UNLINK;
+
+ /* remove this ED from the HC list */
+ ed->ed_prev->hwNextED = ed->hwNextED;
+
+ /* and remove it from our list also */
+ ed->ed_prev->ed_next = ed->ed_next;
+
+ if (ed->ed_next)
+ ed->ed_next->ed_prev = ed->ed_prev;
+
+ if (ahcd->ed_tails[ed->type] == ed)
+ ahcd->ed_tails[ed->type] = ed->ed_prev;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct ed *ed_create(struct admhcd *ahcd, unsigned int type, u32 info)
+{
+ struct ed *ed;
+ struct td *td;
+
+ ed = ed_alloc(ahcd, GFP_ATOMIC);
+ if (!ed)
+ goto err;
+
+ /* dummy td; end of td list for this ed */
+ td = td_alloc(ahcd, GFP_ATOMIC);
+ if (!td)
+ goto err_free_ed;
+
+ switch (type) {
+ case PIPE_INTERRUPT:
+ info |= ED_INT;
+ break;
+ case PIPE_ISOCHRONOUS:
+ info |= ED_ISO;
+ break;
+ }
+
+ ed->dummy = td;
+ ed->state = ED_IDLE;
+ ed->type = type;
+
+ ed->hwINFO = cpu_to_hc32(ahcd, info);
+ ed->hwTailP = cpu_to_hc32(ahcd, td->td_dma);
+ ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */
+
+ return ed;
+
+err_free_ed:
+ ed_free(ahcd, ed);
+err:
+ return NULL;
+}
+
+/* get and maybe (re)init an endpoint. init _should_ be done only as part
+ * of enumeration, usb_set_configuration() or usb_set_interface().
+ */
+static struct ed *ed_get(struct admhcd *ahcd, struct usb_host_endpoint *ep,
+ struct usb_device *udev, unsigned int pipe, int interval)
+{
+ struct ed *ed;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ahcd->lock, flags);
+
+ ed = ep->hcpriv;
+ if (!ed) {
+ u32 info;
+
+ /* FIXME: usbcore changes dev->devnum before SET_ADDRESS
+ * succeeds ... otherwise we wouldn't need "pipe".
+ */
+ info = usb_pipedevice(pipe);
+ info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << ED_EN_SHIFT;
+ info |= le16_to_cpu(ep->desc.wMaxPacketSize) << ED_MPS_SHIFT;
+ if (udev->speed == USB_SPEED_FULL)
+ info |= ED_SPEED_FULL;
+
+ ed = ed_create(ahcd, usb_pipetype(pipe), info);
+ if (ed)
+ ep->hcpriv = ed;
+ }
+
+ spin_unlock_irqrestore(&ahcd->lock, flags);
+
+ return ed;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* request unlinking of an endpoint from an operational HC.
+ * put the ep on the rm_list
+ * real work is done at the next start frame (SOFI) hardware interrupt
+ * caller guarantees HCD is running, so hardware access is safe,
+ * and that ed->state is ED_OPER
+ */
+static void start_ed_unlink(struct admhcd *ahcd, struct ed *ed)
+{
+
+#ifdef ADMHC_VERBOSE_DEBUG
+ admhc_dump_ed(ahcd, "ED-UNLINK", ed, 1);
+#endif
+
+ ed->hwINFO |= cpu_to_hc32(ahcd, ED_DEQUEUE);
+ ed_deschedule(ahcd, ed);
+
+ /* add this ED into the remove list */
+ ed->ed_rm_next = ahcd->ed_rm_list;
+ ahcd->ed_rm_list = ed;
+
+ /* enable SOF interrupt */
+ admhc_intr_ack(ahcd, ADMHC_INTR_SOFI);
+ admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
+ /* flush those writes */
+ admhc_writel_flush(ahcd);
+
+ /* SOF interrupt might get delayed; record the frame counter value that
+ * indicates when the HC isn't looking at it, so concurrent unlinks
+ * behave. frame_no wraps every 2^16 msec, and changes right before
+ * SOF is triggered.
+ */
+ ed->tick = admhc_frame_no(ahcd) + 1;
+}
+
+/*-------------------------------------------------------------------------*
+ * TD handling functions
+ *-------------------------------------------------------------------------*/
+
+/* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
+
+static void
+td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len,
+ struct urb *urb, int index)
+{
+ struct td *td, *td_pt;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ int hash;
+ u32 cbl = 0;
+
+#if 1
+ if (index == (urb_priv->td_cnt - 1) &&
+ ((urb->transfer_flags & URB_NO_INTERRUPT) == 0))
+ cbl |= TD_IE;
+#else
+ if (index == (urb_priv->td_cnt - 1))
+ cbl |= TD_IE;
+#endif
+
+ /* use this td as the next dummy */
+ td_pt = urb_priv->td[index];
+
+ /* fill the old dummy TD */
+ td = urb_priv->td[index] = urb_priv->ed->dummy;
+ urb_priv->ed->dummy = td_pt;
+
+ td->ed = urb_priv->ed;
+ td->next_dl_td = NULL;
+ td->index = index;
+ td->urb = urb;
+ td->data_dma = data;
+ if (!len)
+ data = 0;
+
+ if (data)
+ cbl |= (len & TD_BL_MASK);
+
+ info |= TD_OWN;
+
+ /* setup hardware specific fields */
+ td->hwINFO = cpu_to_hc32(ahcd, info);
+ td->hwDBP = cpu_to_hc32(ahcd, data);
+ td->hwCBL = cpu_to_hc32(ahcd, cbl);
+ td->hwNextTD = cpu_to_hc32(ahcd, td_pt->td_dma);
+
+ /* append to queue */
+ list_add_tail(&td->td_list, &td->ed->td_list);
+
+ /* hash it for later reverse mapping */
+ hash = TD_HASH_FUNC(td->td_dma);
+ td->td_hash = ahcd->td_hash[hash];
+ ahcd->td_hash[hash] = td;
+
+ /* HC might read the TD (or cachelines) right away ... */
+ wmb();
+ td->ed->hwTailP = td->hwNextTD;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Prepare all TDs of a transfer, and queue them onto the ED.
+ * Caller guarantees HC is active.
+ * Usually the ED is already on the schedule, so TDs might be
+ * processed as soon as they're queued.
+ */
+static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
+{
+ struct urb_priv *urb_priv = urb->hcpriv;
+ dma_addr_t data;
+ int data_len = urb->transfer_buffer_length;
+ int cnt = 0;
+ u32 info = 0;
+ int is_out = usb_pipeout(urb->pipe);
+ u32 toggle = 0;
+
+ /* OHCI handles the bulk/interrupt data toggles itself. We just
+ * use the device toggle bits for resetting, and rely on the fact
+ * that resetting toggle is meaningless if the endpoint is active.
+ */
+
+ if (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), is_out)) {
+ toggle = TD_T_CARRY;
+ } else {
+ toggle = TD_T_DATA0;
+ usb_settoggle(urb->dev, usb_pipeendpoint (urb->pipe),
+ is_out, 1);
+ }
+
+ urb_priv->td_idx = 0;
+ list_add(&urb_priv->pending, &ahcd->pending);
+
+ if (data_len)
+ data = urb->transfer_dma;
+ else
+ data = 0;
+
+ /* NOTE: TD_CC is set so we can tell which TDs the HC processed by
+ * using TD_CC_GET, as well as by seeing them on the done list.
+ * (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
+ */
+ switch (urb_priv->ed->type) {
+ case PIPE_INTERRUPT:
+ info = is_out
+ ? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT
+ : TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN;
+
+ /* setup service interval and starting frame number */
+ info |= (urb->start_frame & TD_FN_MASK);
+ info |= (urb->interval & TD_ISI_MASK) << TD_ISI_SHIFT;
+
+ td_fill(ahcd, info, data, data_len, urb, cnt);
+ cnt++;
+
+ admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs++;
+ break;
+
+ case PIPE_BULK:
+ info = is_out
+ ? TD_SCC_NOTACCESSED | TD_DP_OUT
+ : TD_SCC_NOTACCESSED | TD_DP_IN;
+
+ /* TDs _could_ transfer up to 8K each */
+ while (data_len > TD_DATALEN_MAX) {
+ td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
+ data, TD_DATALEN_MAX, urb, cnt);
+ data += TD_DATALEN_MAX;
+ data_len -= TD_DATALEN_MAX;
+ cnt++;
+ }
+
+ td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), data,
+ data_len, urb, cnt);
+ cnt++;
+
+ if ((urb->transfer_flags & URB_ZERO_PACKET)
+ && (cnt < urb_priv->td_cnt)) {
+ td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
+ 0, 0, urb, cnt);
+ cnt++;
+ }
+ break;
+
+ /* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
+ * any DATA phase works normally, and the STATUS ack is special.
+ */
+ case PIPE_CONTROL:
+ /* fill a TD for the setup */
+ info = TD_SCC_NOTACCESSED | TD_DP_SETUP | TD_T_DATA0;
+ td_fill(ahcd, info, urb->setup_dma, 8, urb, cnt++);
+
+ if (data_len > 0) {
+ /* fill a TD for the data */
+ info = TD_SCC_NOTACCESSED | TD_T_DATA1;
+ info |= is_out ? TD_DP_OUT : TD_DP_IN;
+ /* NOTE: mishandles transfers >8K, some >4K */
+ td_fill(ahcd, info, data, data_len, urb, cnt++);
+ }
+
+ /* fill a TD for the ACK */
+ info = (is_out || data_len == 0)
+ ? TD_SCC_NOTACCESSED | TD_DP_IN | TD_T_DATA1
+ : TD_SCC_NOTACCESSED | TD_DP_OUT | TD_T_DATA1;
+ td_fill(ahcd, info, data, 0, urb, cnt++);
+
+ break;
+
+ /* ISO has no retransmit, so no toggle;
+ * Each TD could handle multiple consecutive frames (interval 1);
+ * we could often reduce the number of TDs here.
+ */
+ case PIPE_ISOCHRONOUS:
+ info = is_out
+ ? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT
+ : TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN;
+
+ for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
+ int frame = urb->start_frame;
+
+ frame += cnt * urb->interval;
+ frame &= TD_FN_MASK;
+ td_fill(ahcd, info | frame,
+ data + urb->iso_frame_desc[cnt].offset,
+ urb->iso_frame_desc[cnt].length, urb, cnt);
+ }
+ admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs++;
+ break;
+ }
+
+ if (urb_priv->td_cnt != cnt)
+ admhc_err(ahcd, "bad number of tds created for urb %p\n", urb);
+}
+
+/*-------------------------------------------------------------------------*
+ * Done List handling functions
+ *-------------------------------------------------------------------------*/
+
+/* calculate transfer length/status and update the urb */
+static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td)
+{
+ struct urb_priv *urb_priv = urb->hcpriv;
+ u32 info;
+ u32 bl;
+ u32 tdDBP;
+ int type = usb_pipetype(urb->pipe);
+ int cc;
+ int status = -EINPROGRESS;
+
+ info = hc32_to_cpup(ahcd, &td->hwINFO);
+ tdDBP = hc32_to_cpup(ahcd, &td->hwDBP);
+ bl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL));
+ cc = TD_CC_GET(info);
+
+ /* ISO ... drivers see per-TD length/status */
+ if (type == PIPE_ISOCHRONOUS) {
+ /* TODO */
+ int dlen = 0;
+
+ /* NOTE: assumes FC in tdINFO == 0, and that
+ * only the first of 0..MAXPSW psws is used.
+ */
+ if (info & TD_CC) /* hc didn't touch? */
+ return status;
+
+ if (usb_pipeout(urb->pipe))
+ dlen = urb->iso_frame_desc[td->index].length;
+ else {
+ /* short reads are always OK for ISO */
+ if (cc == TD_CC_DATAUNDERRUN)
+ cc = TD_CC_NOERROR;
+ dlen = tdDBP - td->data_dma + bl;
+ }
+
+ urb->actual_length += dlen;
+ urb->iso_frame_desc[td->index].actual_length = dlen;
+ urb->iso_frame_desc[td->index].status = cc_to_error[cc];
+
+ if (cc != TD_CC_NOERROR)
+ admhc_vdbg(ahcd,
+ "urb %p iso td %p (%d) len %d cc %d\n",
+ urb, td, 1 + td->index, dlen, cc);
+
+ /* BULK, INT, CONTROL ... drivers see aggregate length/status,
+ * except that "setup" bytes aren't counted and "short" transfers
+ * might not be reported as errors.
+ */
+ } else {
+ /* update packet status if needed (short is normally ok) */
+ if (cc == TD_CC_DATAUNDERRUN
+ && !(urb->transfer_flags & URB_SHORT_NOT_OK))
+ cc = TD_CC_NOERROR;
+
+ if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0)
+ status = cc_to_error[cc];
+
+
+ /* count all non-empty packets except control SETUP packet */
+ if ((type != PIPE_CONTROL || td->index != 0) && tdDBP != 0)
+ urb->actual_length += tdDBP - td->data_dma + bl;
+
+ if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0)
+ admhc_vdbg(ahcd,
+ "urb %p td %p (%d) cc %d, len=%d/%d\n",
+ urb, td, td->index, cc,
+ urb->actual_length,
+ urb->transfer_buffer_length);
+ }
+
+ list_del(&td->td_list);
+ urb_priv->td_idx++;
+
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void ed_halted(struct admhcd *ahcd, struct td *td, int cc)
+{
+ struct urb *urb = td->urb;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct ed *ed = td->ed;
+ struct list_head *tmp = td->td_list.next;
+ __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
+
+ admhc_dump_ed(ahcd, "ed halted", td->ed, 1);
+ /* clear ed halt; this is the td that caused it, but keep it inactive
+ * until its urb->complete() has a chance to clean up.
+ */
+ ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
+ wmb();
+ ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
+
+ /* Get rid of all later tds from this urb. We don't have
+ * to be careful: no errors and nothing was transferred.
+ * Also patch the ed so it looks as if those tds completed normally.
+ */
+ while (tmp != &ed->td_list) {
+ struct td *next;
+
+ next = list_entry(tmp, struct td, td_list);
+ tmp = next->td_list.next;
+
+ if (next->urb != urb)
+ break;
+
+ /* NOTE: if multi-td control DATA segments get supported,
+ * this urb had one of them, this td wasn't the last td
+ * in that segment (TD_R clear), this ed halted because
+ * of a short read, _and_ URB_SHORT_NOT_OK is clear ...
+ * then we need to leave the control STATUS packet queued
+ * and clear ED_SKIP.
+ */
+ list_del(&next->td_list);
+ urb_priv->td_cnt++;
+ ed->hwHeadP = next->hwNextTD | toggle;
+ }
+
+ /* help for troubleshooting: report anything that
+ * looks odd ... that doesn't include protocol stalls
+ * (or maybe some other things)
+ */
+ switch (cc) {
+ case TD_CC_DATAUNDERRUN:
+ if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
+ break;
+ /* fallthrough */
+ case TD_CC_STALL:
+ if (usb_pipecontrol(urb->pipe))
+ break;
+ /* fallthrough */
+ default:
+ admhc_dbg(ahcd,
+ "urb %p path %s ep%d%s %08x cc %d --> status %d\n",
+ urb, urb->dev->devpath,
+ usb_pipeendpoint (urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ hc32_to_cpu(ahcd, td->hwINFO),
+ cc, cc_to_error[cc]);
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
+static void
+finish_unlinks(struct admhcd *ahcd, u16 tick)
+{
+ struct ed *ed, **last;
+
+rescan_all:
+ for (last = &ahcd->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
+ struct list_head *entry, *tmp;
+ int completed, modified;
+ __hc32 *prev;
+
+ /* only take off EDs that the HC isn't using, accounting for
+ * frame counter wraps and EDs with partially retired TDs
+ */
+ if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))) {
+ if (tick_before(tick, ed->tick)) {
+skip_ed:
+ last = &ed->ed_rm_next;
+ continue;
+ }
+#if 0
+ if (!list_empty(&ed->td_list)) {
+ struct td *td;
+ u32 head;
+
+ td = list_entry(ed->td_list.next, struct td,
+ td_list);
+ head = hc32_to_cpu(ahcd, ed->hwHeadP) &
+ TD_MASK;
+
+ /* INTR_WDH may need to clean up first */
+ if (td->td_dma != head)
+ goto skip_ed;
+ }
+#endif
+ }
+
+ /* reentrancy: if we drop the schedule lock, someone might
+ * have modified this list. normally it's just prepending
+ * entries (which we'd ignore), but paranoia won't hurt.
+ */
+ *last = ed->ed_rm_next;
+ ed->ed_rm_next = NULL;
+ modified = 0;
+
+ /* unlink urbs as requested, but rescan the list after
+ * we call a completion since it might have unlinked
+ * another (earlier) urb
+ *
+ * When we get here, the HC doesn't see this ed. But it
+ * must not be rescheduled until all completed URBs have
+ * been given back to the driver.
+ */
+rescan_this:
+ completed = 0;
+ prev = &ed->hwHeadP;
+ list_for_each_safe(entry, tmp, &ed->td_list) {
+ struct td *td;
+ struct urb *urb;
+ struct urb_priv *urb_priv;
+ __hc32 savebits;
+ u32 tdINFO;
+ int status;
+
+ td = list_entry(entry, struct td, td_list);
+ urb = td->urb;
+ urb_priv = td->urb->hcpriv;
+
+ if (!urb->unlinked) {
+ prev = &td->hwNextTD;
+ continue;
+ }
+
+ if ((urb_priv) == NULL)
+ continue;
+
+ /* patch pointer hc uses */
+ savebits = *prev & ~cpu_to_hc32(ahcd, TD_MASK);
+ *prev = td->hwNextTD | savebits;
+ /* If this was unlinked, the TD may not have been
+ * retired ... so manually save dhe data toggle.
+ * The controller ignores the value we save for
+ * control and ISO endpoints.
+ */
+ tdINFO = hc32_to_cpup(ahcd, &td->hwINFO);
+ if ((tdINFO & TD_T) == TD_T_DATA0)
+ ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_C);
+ else if ((tdINFO & TD_T) == TD_T_DATA1)
+ ed->hwHeadP |= cpu_to_hc32(ahcd, ED_C);
+
+ /* HC may have partly processed this TD */
+#ifdef ADMHC_VERBOSE_DEBUG
+ urb_print(ahcd, urb, "PARTIAL", 0);
+#endif
+ status = td_done(ahcd, urb, td);
+
+ /* if URB is done, clean up */
+ if (urb_priv->td_idx == urb_priv->td_cnt) {
+ modified = completed = 1;
+ finish_urb(ahcd, urb, status);
+ }
+ }
+ if (completed && !list_empty(&ed->td_list))
+ goto rescan_this;
+
+ /* ED's now officially unlinked, hc doesn't see */
+ ed->state = ED_IDLE;
+ ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
+ ed->hwNextED = 0;
+ wmb();
+ ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP | ED_DEQUEUE);
+
+ /* but if there's work queued, reschedule */
+ if (!list_empty(&ed->td_list)) {
+ if (HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))
+ ed_schedule(ahcd, ed);
+ }
+
+ if (modified)
+ goto rescan_all;
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+/*
+ * Process normal completions (error or success) and clean the schedules.
+ *
+ * This is the main path for handing urbs back to drivers. The only other
+ * normal path is finish_unlinks(), which unlinks URBs using ed_rm_list,
+ * instead of scanning the (re-reversed) donelist as this does.
+ */
+
+static void ed_unhalt(struct admhcd *ahcd, struct ed *ed, struct urb *urb)
+{
+ struct list_head *entry, *tmp;
+ __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
+
+#ifdef ADMHC_VERBOSE_DEBUG
+ admhc_dump_ed(ahcd, "UNHALT", ed, 0);
+#endif
+ /* clear ed halt; this is the td that caused it, but keep it inactive
+ * until its urb->complete() has a chance to clean up.
+ */
+ ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
+ wmb();
+ ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
+
+ list_for_each_safe(entry, tmp, &ed->td_list) {
+ struct td *td = list_entry(entry, struct td, td_list);
+ __hc32 info;
+
+ if (td->urb != urb)
+ break;
+
+ info = td->hwINFO;
+ info &= ~cpu_to_hc32(ahcd, TD_CC | TD_OWN);
+ td->hwINFO = info;
+
+ ed->hwHeadP = td->hwNextTD | toggle;
+ wmb();
+ }
+
+}
+
+static void ed_intr_refill(struct admhcd *ahcd, struct ed *ed)
+{
+ __hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
+
+ ed->hwHeadP = ed->hwTailP | toggle;
+}
+
+
+static inline int is_ed_halted(struct admhcd *ahcd, struct ed *ed)
+{
+ return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) == ED_H);
+}
+
+static inline int is_td_halted(struct admhcd *ahcd, struct ed *ed,
+ struct td *td)
+{
+ return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & TD_MASK) ==
+ (hc32_to_cpup(ahcd, &td->hwNextTD) & TD_MASK));
+}
+
+static void ed_update(struct admhcd *ahcd, struct ed *ed)
+{
+ struct list_head *entry, *tmp;
+
+#ifdef ADMHC_VERBOSE_DEBUG
+ admhc_dump_ed(ahcd, "UPDATE", ed, 1);
+#endif
+
+ list_for_each_safe(entry, tmp, &ed->td_list) {
+ struct td *td = list_entry(entry, struct td, td_list);
+ struct urb *urb = td->urb;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ int status;
+
+ if (hc32_to_cpup(ahcd, &td->hwINFO) & TD_OWN)
+ break;
+
+ /* update URB's length and status from TD */
+ status = td_done(ahcd, urb, td);
+ if (is_ed_halted(ahcd, ed) && is_td_halted(ahcd, ed, td))
+ ed_unhalt(ahcd, ed, urb);
+
+ if (ed->type == PIPE_INTERRUPT)
+ ed_intr_refill(ahcd, ed);
+
+ /* If all this urb's TDs are done, call complete() */
+ if (urb_priv->td_idx == urb_priv->td_cnt)
+ finish_urb(ahcd, urb, status);
+
+ /* clean schedule: unlink EDs that are no longer busy */
+ if (list_empty(&ed->td_list)) {
+ if (ed->state == ED_OPER)
+ start_ed_unlink(ahcd, ed);
+
+ /* ... reenabling halted EDs only after fault cleanup */
+ } else if ((ed->hwINFO & cpu_to_hc32(ahcd,
+ ED_SKIP | ED_DEQUEUE))
+ == cpu_to_hc32(ahcd, ED_SKIP)) {
+ td = list_entry(ed->td_list.next, struct td, td_list);
+#if 0
+ if (!(td->hwINFO & cpu_to_hc32(ahcd, TD_DONE))) {
+ ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
+ /* ... hc may need waking-up */
+ switch (ed->type) {
+ case PIPE_CONTROL:
+ admhc_writel(ahcd, OHCI_CLF,
+ &ahcd->regs->cmdstatus);
+ break;
+ case PIPE_BULK:
+ admhc_writel(ahcd, OHCI_BLF,
+ &ahcd->regs->cmdstatus);
+ break;
+ }
+ }
+#else
+ if ((td->hwINFO & cpu_to_hc32(ahcd, TD_OWN)))
+ ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
+#endif
+ }
+
+ }
+}
+
+/* there are some tds completed; called in_irq(), with HCD locked */
+static void admhc_td_complete(struct admhcd *ahcd)
+{
+ struct ed *ed;
+
+ for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
+ if (ed->state != ED_OPER)
+ continue;
+
+ ed_update(ahcd, ed);
+ }
+}
diff --git a/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120.h b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120.h
new file mode 100644
index 0000000..e47aac8
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/usb/host/adm5120.h
@@ -0,0 +1,755 @@
+/*
+ * ADM5120 HCD (Host Controller Driver) for USB
+ *
+ * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This file was derived from: drivers/usb/host/ohci.h
+ * (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
+ * (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+/*
+ * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
+ * __leXX (normally) or __beXX (given OHCI_BIG_ENDIAN), depending on the
+ * host controller implementation.
+ */
+typedef __u32 __bitwise __hc32;
+typedef __u16 __bitwise __hc16;
+
+/*
+ * OHCI Endpoint Descriptor (ED) ... holds TD queue
+ * See OHCI spec, section 4.2
+ *
+ * This is a "Queue Head" for those transfers, which is why
+ * both EHCI and UHCI call similar structures a "QH".
+ */
+
+#define TD_DATALEN_MAX 4096
+
+#define ED_ALIGN 16
+#define ED_MASK ((u32)~(ED_ALIGN-1)) /* strip hw status in low addr bits */
+
+struct ed {
+ /* first fields are hardware-specified */
+ __hc32 hwINFO; /* endpoint config bitmap */
+ /* info bits defined by hcd */
+#define ED_DEQUEUE (1 << 27)
+ /* info bits defined by the hardware */
+#define ED_MPS_SHIFT 16
+#define ED_MPS_MASK ((1 << 11)-1)
+#define ED_MPS_GET(x) (((x) >> ED_MPS_SHIFT) & ED_MPS_MASK)
+#define ED_ISO (1 << 15) /* isochronous endpoint */
+#define ED_SKIP (1 << 14)
+#define ED_SPEED_FULL (1 << 13) /* fullspeed device */
+#define ED_INT (1 << 11) /* interrupt endpoint */
+#define ED_EN_SHIFT 7 /* endpoint shift */
+#define ED_EN_MASK ((1 << 4)-1) /* endpoint mask */
+#define ED_EN_GET(x) (((x) >> ED_EN_SHIFT) & ED_EN_MASK)
+#define ED_FA_MASK ((1 << 7)-1) /* function address mask */
+#define ED_FA_GET(x) ((x) & ED_FA_MASK)
+ __hc32 hwTailP; /* tail of TD list */
+ __hc32 hwHeadP; /* head of TD list (hc r/w) */
+#define ED_C (0x02) /* toggle carry */
+#define ED_H (0x01) /* halted */
+ __hc32 hwNextED; /* next ED in list */
+
+ /* rest are purely for the driver's use */
+ dma_addr_t dma; /* addr of ED */
+ struct td *dummy; /* next TD to activate */
+
+ struct list_head urb_list; /* list of our URBs */
+
+ /* host's view of schedule */
+ struct ed *ed_next; /* on schedule list */
+ struct ed *ed_prev; /* for non-interrupt EDs */
+ struct ed *ed_rm_next; /* on rm list */
+ struct list_head td_list; /* "shadow list" of our TDs */
+
+ /* create --> IDLE --> OPER --> ... --> IDLE --> destroy
+ * usually: OPER --> UNLINK --> (IDLE | OPER) --> ...
+ */
+ u8 state; /* ED_{IDLE,UNLINK,OPER} */
+#define ED_IDLE 0x00 /* NOT linked to HC */
+#define ED_UNLINK 0x01 /* being unlinked from hc */
+#define ED_OPER 0x02 /* IS linked to hc */
+
+ u8 type; /* PIPE_{BULK,...} */
+
+ /* periodic scheduling params (for intr and iso) */
+ u8 branch;
+ u16 interval;
+ u16 load;
+ u16 last_iso; /* iso only */
+
+ /* HC may see EDs on rm_list until next frame (frame_no == tick) */
+ u16 tick;
+} __attribute__ ((aligned(ED_ALIGN)));
+
+/*
+ * OHCI Transfer Descriptor (TD) ... one per transfer segment
+ * See OHCI spec, sections 4.3.1 (general = control/bulk/interrupt)
+ * and 4.3.2 (iso)
+ */
+
+#define TD_ALIGN 32
+#define TD_MASK ((u32)~(TD_ALIGN-1)) /* strip hw status in low addr bits */
+
+struct td {
+ /* first fields are hardware-specified */
+ __hc32 hwINFO; /* transfer info bitmask */
+
+ /* hwINFO bits */
+#define TD_OWN (1 << 31) /* owner of the descriptor */
+#define TD_CC_SHIFT 27 /* condition code */
+#define TD_CC_MASK 0xf
+#define TD_CC (TD_CC_MASK << TD_CC_SHIFT)
+#define TD_CC_GET(x) (((x) >> TD_CC_SHIFT) & TD_CC_MASK)
+
+#define TD_EC_SHIFT 25 /* error count */
+#define TD_EC_MASK 0x3
+#define TD_EC (TD_EC_MASK << TD_EC_SHIFT)
+#define TD_EC_GET(x) ((x >> TD_EC_SHIFT) & TD_EC_MASK)
+#define TD_T_SHIFT 23 /* data toggle state */
+#define TD_T_MASK 0x3
+#define TD_T (TD_T_MASK << TD_T_SHIFT)
+#define TD_T_DATA0 (0x2 << TD_T_SHIFT) /* DATA0 */
+#define TD_T_DATA1 (0x3 << TD_T_SHIFT) /* DATA1 */
+#define TD_T_CARRY (0x0 << TD_T_SHIFT) /* uses ED_C */
+#define TD_T_GET(x) (((x) >> TD_T_SHIFT) & TD_T_MASK)
+#define TD_DP_SHIFT 21 /* direction/pid */
+#define TD_DP_MASK 0x3
+#define TD_DP (TD_DP_MASK << TD_DP_SHIFT)
+#define TD_DP_GET (((x) >> TD_DP_SHIFT) & TD_DP_MASK)
+#define TD_DP_SETUP (0x0 << TD_DP_SHIFT) /* SETUP pid */
+#define TD_DP_OUT (0x1 << TD_DP_SHIFT) /* OUT pid */
+#define TD_DP_IN (0x2 << TD_DP_SHIFT) /* IN pid */
+#define TD_ISI_SHIFT 8 /* Interrupt Service Interval */
+#define TD_ISI_MASK 0x3f
+#define TD_ISI_GET(x) (((x) >> TD_ISI_SHIFT) & TD_ISI_MASK)
+#define TD_FN_MASK 0x3f /* frame number */
+#define TD_FN_GET(x) ((x) & TD_FN_MASK)
+
+ __hc32 hwDBP; /* Data Buffer Pointer (or 0) */
+ __hc32 hwCBL; /* Controller/Buffer Length */
+
+ /* hwCBL bits */
+#define TD_BL_MASK 0xffff /* buffer length */
+#define TD_BL_GET(x) ((x) & TD_BL_MASK)
+#define TD_IE (1 << 16) /* interrupt enable */
+ __hc32 hwNextTD; /* Next TD Pointer */
+
+ /* rest are purely for the driver's use */
+ __u8 index;
+ struct ed *ed;
+ struct td *td_hash; /* dma-->td hashtable */
+ struct td *next_dl_td;
+ struct urb *urb;
+
+ dma_addr_t td_dma; /* addr of this TD */
+ dma_addr_t data_dma; /* addr of data it points to */
+
+ struct list_head td_list; /* "shadow list", TDs on same ED */
+
+ u32 flags;
+#define TD_FLAG_DONE (1 << 17) /* retired to done list */
+#define TD_FLAG_ISO (1 << 16) /* copy of ED_ISO */
+} __attribute__ ((aligned(TD_ALIGN))); /* c/b/i need 16; only iso needs 32 */
+
+/*
+ * Hardware transfer status codes -- CC from td->hwINFO
+ */
+#define TD_CC_NOERROR 0x00
+#define TD_CC_CRC 0x01
+#define TD_CC_BITSTUFFING 0x02
+#define TD_CC_DATATOGGLEM 0x03
+#define TD_CC_STALL 0x04
+#define TD_CC_DEVNOTRESP 0x05
+#define TD_CC_PIDCHECKFAIL 0x06
+#define TD_CC_UNEXPECTEDPID 0x07
+#define TD_CC_DATAOVERRUN 0x08
+#define TD_CC_DATAUNDERRUN 0x09
+ /* 0x0A, 0x0B reserved for hardware */
+#define TD_CC_BUFFEROVERRUN 0x0C
+#define TD_CC_BUFFERUNDERRUN 0x0D
+ /* 0x0E, 0x0F reserved for HCD */
+#define TD_CC_HCD0 0x0E
+#define TD_CC_NOTACCESSED 0x0F
+
+/*
+ * preshifted status codes
+ */
+#define TD_SCC_NOTACCESSED (TD_CC_NOTACCESSED << TD_CC_SHIFT)
+
+
+/* map OHCI TD status codes (CC) to errno values */
+static const int cc_to_error[16] = {
+ /* No Error */ 0,
+ /* CRC Error */ -EILSEQ,
+ /* Bit Stuff */ -EPROTO,
+ /* Data Togg */ -EILSEQ,
+ /* Stall */ -EPIPE,
+ /* DevNotResp */ -ETIME,
+ /* PIDCheck */ -EPROTO,
+ /* UnExpPID */ -EPROTO,
+ /* DataOver */ -EOVERFLOW,
+ /* DataUnder */ -EREMOTEIO,
+ /* (for hw) */ -EIO,
+ /* (for hw) */ -EIO,
+ /* BufferOver */ -ECOMM,
+ /* BuffUnder */ -ENOSR,
+ /* (for HCD) */ -EALREADY,
+ /* (for HCD) */ -EALREADY
+};
+
+#define NUM_INTS 32
+
+/*
+ * This is the structure of the OHCI controller's memory mapped I/O region.
+ * You must use readl() and writel() (in <asm/io.h>) to access these fields!!
+ * Layout is in section 7 (and appendix B) of the spec.
+ */
+struct admhcd_regs {
+ __hc32 gencontrol; /* General Control */
+ __hc32 int_status; /* Interrupt Status */
+ __hc32 int_enable; /* Interrupt Enable */
+ __hc32 reserved00;
+ __hc32 host_control; /* Host General Control */
+ __hc32 reserved01;
+ __hc32 fminterval; /* Frame Interval */
+ __hc32 fmnumber; /* Frame Number */
+ __hc32 reserved02;
+ __hc32 reserved03;
+ __hc32 reserved04;
+ __hc32 reserved05;
+ __hc32 reserved06;
+ __hc32 reserved07;
+ __hc32 reserved08;
+ __hc32 reserved09;
+ __hc32 reserved10;
+ __hc32 reserved11;
+ __hc32 reserved12;
+ __hc32 reserved13;
+ __hc32 reserved14;
+ __hc32 reserved15;
+ __hc32 reserved16;
+ __hc32 reserved17;
+ __hc32 reserved18;
+ __hc32 reserved19;
+ __hc32 reserved20;
+ __hc32 reserved21;
+ __hc32 lsthresh; /* Low Speed Threshold */
+ __hc32 rhdesc; /* Root Hub Descriptor */
+#define MAX_ROOT_PORTS 2
+ __hc32 portstatus[MAX_ROOT_PORTS]; /* Port Status */
+ __hc32 hosthead; /* Host Descriptor Head */
+} __attribute__ ((aligned(32)));
+
+/*
+ * General Control register bits
+ */
+#define ADMHC_CTRL_UHFE (1 << 0) /* USB Host Function Enable */
+#define ADMHC_CTRL_SIR (1 << 1) /* Software Interrupt request */
+#define ADMHC_CTRL_DMAA (1 << 2) /* DMA Arbitration Control */
+#define ADMHC_CTRL_SR (1 << 3) /* Software Reset */
+
+/*
+ * Host General Control register bits
+ */
+#define ADMHC_HC_BUSS 0x3 /* USB bus state */
+#define ADMHC_BUSS_RESET 0x0
+#define ADMHC_BUSS_RESUME 0x1
+#define ADMHC_BUSS_OPER 0x2
+#define ADMHC_BUSS_SUSPEND 0x3
+#define ADMHC_HC_DMAE (1 << 2) /* DMA enable */
+
+/*
+ * Interrupt Status/Enable register bits
+ */
+#define ADMHC_INTR_SOFI (1 << 4) /* start of frame */
+#define ADMHC_INTR_RESI (1 << 5) /* resume detected */
+#define ADMHC_INTR_6 (1 << 6) /* unknown */
+#define ADMHC_INTR_7 (1 << 7) /* unknown */
+#define ADMHC_INTR_BABI (1 << 8) /* babble detected */
+#define ADMHC_INTR_INSM (1 << 9) /* root hub status change */
+#define ADMHC_INTR_SO (1 << 10) /* scheduling overrun */
+#define ADMHC_INTR_FNO (1 << 11) /* frame number overflow */
+#define ADMHC_INTR_TDC (1 << 20) /* transfer descriptor completed */
+#define ADMHC_INTR_SWI (1 << 29) /* software interrupt */
+#define ADMHC_INTR_FATI (1 << 30) /* fatal error */
+#define ADMHC_INTR_INTA (1 << 31) /* interrupt active */
+
+#define ADMHC_INTR_MIE (1 << 31) /* master interrupt enable */
+
+/*
+ * SOF Frame Interval register bits
+ */
+#define ADMHC_SFI_FI_MASK ((1 << 14)-1) /* Frame Interval value */
+#define ADMHC_SFI_FSLDP_SHIFT 16
+#define ADMHC_SFI_FSLDP_MASK ((1 << 15)-1)
+#define ADMHC_SFI_FIT (1 << 31) /* Frame Interval Toggle */
+
+/*
+ * SOF Frame Number register bits
+ */
+#define ADMHC_SFN_FN_MASK ((1 << 16)-1) /* Frame Number Mask */
+#define ADMHC_SFN_FR_SHIFT 16 /* Frame Remaining Shift */
+#define ADMHC_SFN_FR_MASK ((1 << 14)-1) /* Frame Remaining Mask */
+#define ADMHC_SFN_FRT (1 << 31) /* Frame Remaining Toggle */
+
+/*
+ * Root Hub Descriptor register bits
+ */
+#define ADMHC_RH_NUMP 0xff /* number of ports */
+#define ADMHC_RH_PSM (1 << 8) /* power switching mode */
+#define ADMHC_RH_NPS (1 << 9) /* no power switching */
+#define ADMHC_RH_OCPM (1 << 10) /* over current protection mode */
+#define ADMHC_RH_NOCP (1 << 11) /* no over current protection */
+#define ADMHC_RH_PPCM (0xff << 16) /* port power control */
+
+#define ADMHC_RH_LPS (1 << 24) /* local power switch */
+#define ADMHC_RH_OCI (1 << 25) /* over current indicator */
+
+/* status change bits */
+#define ADMHC_RH_LPSC (1 << 26) /* local power switch change */
+#define ADMHC_RH_OCIC (1 << 27) /* over current indicator change */
+
+#define ADMHC_RH_DRWE (1 << 28) /* device remote wakeup enable */
+#define ADMHC_RH_CRWE (1 << 29) /* clear remote wakeup enable */
+
+#define ADMHC_RH_CGP (1 << 24) /* clear global power */
+#define ADMHC_RH_SGP (1 << 26) /* set global power */
+
+/*
+ * Port Status register bits
+ */
+#define ADMHC_PS_CCS (1 << 0) /* current connect status */
+#define ADMHC_PS_PES (1 << 1) /* port enable status */
+#define ADMHC_PS_PSS (1 << 2) /* port suspend status */
+#define ADMHC_PS_POCI (1 << 3) /* port over current indicator */
+#define ADMHC_PS_PRS (1 << 4) /* port reset status */
+#define ADMHC_PS_PPS (1 << 8) /* port power status */
+#define ADMHC_PS_LSDA (1 << 9) /* low speed device attached */
+
+/* status change bits */
+#define ADMHC_PS_CSC (1 << 16) /* connect status change */
+#define ADMHC_PS_PESC (1 << 17) /* port enable status change */
+#define ADMHC_PS_PSSC (1 << 18) /* port suspend status change */
+#define ADMHC_PS_OCIC (1 << 19) /* over current indicator change */
+#define ADMHC_PS_PRSC (1 << 20) /* port reset status change */
+
+/* port feature bits */
+#define ADMHC_PS_CPE (1 << 0) /* clear port enable */
+#define ADMHC_PS_SPE (1 << 1) /* set port enable */
+#define ADMHC_PS_SPS (1 << 2) /* set port suspend */
+#define ADMHC_PS_CPS (1 << 3) /* clear suspend status */
+#define ADMHC_PS_SPR (1 << 4) /* set port reset */
+#define ADMHC_PS_SPP (1 << 8) /* set port power */
+#define ADMHC_PS_CPP (1 << 9) /* clear port power */
+
+/*
+ * the POTPGT value is not defined in the ADMHC, so define a dummy value
+ */
+#define ADMHC_POTPGT 2 /* in ms */
+
+/* hcd-private per-urb state */
+struct urb_priv {
+ struct ed *ed;
+ struct list_head pending; /* URBs on the same ED */
+
+ u32 td_cnt; /* # tds in this request */
+ u32 td_idx; /* index of the current td */
+ struct td *td[0]; /* all TDs in this request */
+};
+
+#define TD_HASH_SIZE 64 /* power'o'two */
+/* sizeof (struct td) ~= 64 == 2^6 ... */
+#define TD_HASH_FUNC(td_dma) ((td_dma ^ (td_dma >> 6)) % TD_HASH_SIZE)
+
+/*
+ * This is the full ADMHCD controller description
+ *
+ * Note how the "proper" USB information is just
+ * a subset of what the full implementation needs. (Linus)
+ */
+
+struct admhcd {
+ spinlock_t lock;
+
+ /*
+ * I/O memory used to communicate with the HC (dma-consistent)
+ */
+ struct admhcd_regs __iomem *regs;
+
+ /*
+ * hcd adds to schedule for a live hc any time, but removals finish
+ * only at the start of the next frame.
+ */
+
+ struct ed *ed_head;
+ struct ed *ed_tails[4];
+
+ struct ed *ed_rm_list; /* to be removed */
+
+ struct ed *periodic[NUM_INTS]; /* shadow int_table */
+
+#if 0 /* TODO: remove? */
+ /*
+ * OTG controllers and transceivers need software interaction;
+ * other external transceivers should be software-transparent
+ */
+ struct otg_transceiver *transceiver;
+ void (*start_hnp)(struct admhcd *ahcd);
+#endif
+
+ /*
+ * memory management for queue data structures
+ */
+ struct dma_pool *td_cache;
+ struct dma_pool *ed_cache;
+ struct td *td_hash[TD_HASH_SIZE];
+ struct list_head pending;
+
+ /*
+ * driver state
+ */
+ int num_ports;
+ int load[NUM_INTS];
+ u32 host_control; /* copy of the host_control reg */
+ unsigned long next_statechange; /* suspend/resume */
+ u32 fminterval; /* saved register */
+ unsigned autostop:1; /* rh auto stopping/stopped */
+
+ unsigned long flags; /* for HC bugs */
+#define OHCI_QUIRK_AMD756 0x01 /* erratum #4 */
+#define OHCI_QUIRK_SUPERIO 0x02 /* natsemi */
+#define OHCI_QUIRK_INITRESET 0x04 /* SiS, OPTi, ... */
+#define OHCI_QUIRK_BE_DESC 0x08 /* BE descriptors */
+#define OHCI_QUIRK_BE_MMIO 0x10 /* BE registers */
+#define OHCI_QUIRK_ZFMICRO 0x20 /* Compaq ZFMicro chipset*/
+ /* there are also chip quirks/bugs in init logic */
+
+#ifdef DEBUG
+ struct dentry *debug_dir;
+ struct dentry *debug_async;
+ struct dentry *debug_periodic;
+ struct dentry *debug_registers;
+#endif
+};
+
+/* convert between an hcd pointer and the corresponding ahcd_hcd */
+static inline struct admhcd *hcd_to_admhcd(struct usb_hcd *hcd)
+{
+ return (struct admhcd *)(hcd->hcd_priv);
+}
+static inline struct usb_hcd *admhcd_to_hcd(const struct admhcd *ahcd)
+{
+ return container_of((void *)ahcd, struct usb_hcd, hcd_priv);
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifndef DEBUG
+#define STUB_DEBUG_FILES
+#endif /* DEBUG */
+
+#ifdef DEBUG
+# define admhc_dbg(ahcd, fmt, args...) \
+ printk(KERN_DEBUG "adm5120-hcd: " fmt, ## args)
+#else
+# define admhc_dbg(ahcd, fmt, args...) do { } while (0)
+#endif
+
+#define admhc_err(ahcd, fmt, args...) \
+ printk(KERN_ERR "adm5120-hcd: " fmt, ## args)
+#define admhc_info(ahcd, fmt, args...) \
+ printk(KERN_INFO "adm5120-hcd: " fmt, ## args)
+#define admhc_warn(ahcd, fmt, args...) \
+ printk(KERN_WARNING "adm5120-hcd: " fmt, ## args)
+
+#ifdef ADMHC_VERBOSE_DEBUG
+# define admhc_vdbg admhc_dbg
+#else
+# define admhc_vdbg(ahcd, fmt, args...) do { } while (0)
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * While most USB host controllers implement their registers and
+ * in-memory communication descriptors in little-endian format,
+ * a minority (notably the IBM STB04XXX and the Motorola MPC5200
+ * processors) implement them in big endian format.
+ *
+ * In addition some more exotic implementations like the Toshiba
+ * Spider (aka SCC) cell southbridge are "mixed" endian, that is,
+ * they have a different endianness for registers vs. in-memory
+ * descriptors.
+ *
+ * This attempts to support either format at compile time without a
+ * runtime penalty, or both formats with the additional overhead
+ * of checking a flag bit.
+ *
+ * That leads to some tricky Kconfig rules howevber. There are
+ * different defaults based on some arch/ppc platforms, though
+ * the basic rules are:
+ *
+ * Controller type Kconfig options needed
+ * --------------- ----------------------
+ * little endian CONFIG_USB_ADMHC_LITTLE_ENDIAN
+ *
+ * fully big endian CONFIG_USB_ADMHC_BIG_ENDIAN_DESC _and_
+ * CONFIG_USB_ADMHC_BIG_ENDIAN_MMIO
+ *
+ * mixed endian CONFIG_USB_ADMHC_LITTLE_ENDIAN _and_
+ * CONFIG_USB_OHCI_BIG_ENDIAN_{MMIO,DESC}
+ *
+ * (If you have a mixed endian controller, you -must- also define
+ * CONFIG_USB_ADMHC_LITTLE_ENDIAN or things will not work when building
+ * both your mixed endian and a fully big endian controller support in
+ * the same kernel image).
+ */
+
+#ifdef CONFIG_USB_ADMHC_BIG_ENDIAN_DESC
+#ifdef CONFIG_USB_ADMHC_LITTLE_ENDIAN
+#define big_endian_desc(ahcd) (ahcd->flags & OHCI_QUIRK_BE_DESC)
+#else
+#define big_endian_desc(ahcd) 1 /* only big endian */
+#endif
+#else
+#define big_endian_desc(ahcd) 0 /* only little endian */
+#endif
+
+#ifdef CONFIG_USB_ADMHC_BIG_ENDIAN_MMIO
+#ifdef CONFIG_USB_ADMHC_LITTLE_ENDIAN
+#define big_endian_mmio(ahcd) (ahcd->flags & OHCI_QUIRK_BE_MMIO)
+#else
+#define big_endian_mmio(ahcd) 1 /* only big endian */
+#endif
+#else
+#define big_endian_mmio(ahcd) 0 /* only little endian */
+#endif
+
+/*
+ * Big-endian read/write functions are arch-specific.
+ * Other arches can be added if/when they're needed.
+ *
+ */
+static inline unsigned int admhc_readl(const struct admhcd *ahcd,
+ __hc32 __iomem *regs)
+{
+#ifdef CONFIG_USB_ADMHC_BIG_ENDIAN_MMIO
+ return big_endian_mmio(ahcd) ?
+ readl_be(regs) :
+ readl(regs);
+#else
+ return readl(regs);
+#endif
+}
+
+static inline void admhc_writel(const struct admhcd *ahcd,
+ const unsigned int val, __hc32 __iomem *regs)
+{
+#ifdef CONFIG_USB_ADMHC_BIG_ENDIAN_MMIO
+ big_endian_mmio(ahcd) ?
+ writel_be(val, regs) :
+ writel(val, regs);
+#else
+ writel(val, regs);
+#endif
+}
+
+static inline void admhc_writel_flush(const struct admhcd *ahcd)
+{
+#if 0
+ /* TODO: remove? */
+ (void) admhc_readl(ahcd, &ahcd->regs->gencontrol);
+#endif
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* cpu to ahcd */
+static inline __hc16 cpu_to_hc16(const struct admhcd *ahcd, const u16 x)
+{
+ return big_endian_desc(ahcd) ?
+ (__force __hc16)cpu_to_be16(x) :
+ (__force __hc16)cpu_to_le16(x);
+}
+
+static inline __hc16 cpu_to_hc16p(const struct admhcd *ahcd, const u16 *x)
+{
+ return big_endian_desc(ahcd) ?
+ cpu_to_be16p(x) :
+ cpu_to_le16p(x);
+}
+
+static inline __hc32 cpu_to_hc32(const struct admhcd *ahcd, const u32 x)
+{
+ return big_endian_desc(ahcd) ?
+ (__force __hc32)cpu_to_be32(x) :
+ (__force __hc32)cpu_to_le32(x);
+}
+
+static inline __hc32 cpu_to_hc32p(const struct admhcd *ahcd, const u32 *x)
+{
+ return big_endian_desc(ahcd) ?
+ cpu_to_be32p(x) :
+ cpu_to_le32p(x);
+}
+
+/* ahcd to cpu */
+static inline u16 hc16_to_cpu(const struct admhcd *ahcd, const __hc16 x)
+{
+ return big_endian_desc(ahcd) ?
+ be16_to_cpu((__force __be16)x) :
+ le16_to_cpu((__force __le16)x);
+}
+
+static inline u16 hc16_to_cpup(const struct admhcd *ahcd, const __hc16 *x)
+{
+ return big_endian_desc(ahcd) ?
+ be16_to_cpup((__force __be16 *)x) :
+ le16_to_cpup((__force __le16 *)x);
+}
+
+static inline u32 hc32_to_cpu(const struct admhcd *ahcd, const __hc32 x)
+{
+ return big_endian_desc(ahcd) ?
+ be32_to_cpu((__force __be32)x) :
+ le32_to_cpu((__force __le32)x);
+}
+
+static inline u32 hc32_to_cpup(const struct admhcd *ahcd, const __hc32 *x)
+{
+ return big_endian_desc(ahcd) ?
+ be32_to_cpup((__force __be32 *)x) :
+ le32_to_cpup((__force __le32 *)x);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline u16 admhc_frame_no(const struct admhcd *ahcd)
+{
+ u32 t;
+
+ t = admhc_readl(ahcd, &ahcd->regs->fmnumber) & ADMHC_SFN_FN_MASK;
+ return (u16)t;
+}
+
+static inline u16 admhc_frame_remain(const struct admhcd *ahcd)
+{
+ u32 t;
+
+ t = admhc_readl(ahcd, &ahcd->regs->fmnumber) >> ADMHC_SFN_FR_SHIFT;
+ t &= ADMHC_SFN_FR_MASK;
+ return (u16)t;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void admhc_disable(struct admhcd *ahcd)
+{
+ admhcd_to_hcd(ahcd)->state = HC_STATE_HALT;
+}
+
+#define FI 0x2edf /* 12000 bits per frame (-1) */
+#define FSLDP(fi) (0x7fff & ((6 * ((fi) - 1200)) / 7))
+#define FIT ADMHC_SFI_FIT
+#define LSTHRESH 0x628 /* lowspeed bit threshold */
+
+static inline void periodic_reinit(struct admhcd *ahcd)
+{
+#if 0
+ u32 fi = ahcd->fminterval & ADMHC_SFI_FI_MASK;
+ u32 fit = admhc_readl(ahcd, &ahcd->regs->fminterval) & FIT;
+
+ /* TODO: adjust FSLargestDataPacket value too? */
+ admhc_writel(ahcd, (fit ^ FIT) | ahcd->fminterval,
+ &ahcd->regs->fminterval);
+#else
+ u32 fit = admhc_readl(ahcd, &ahcd->regs->fminterval) & FIT;
+
+ /* TODO: adjust FSLargestDataPacket value too? */
+ admhc_writel(ahcd, (fit ^ FIT) | ahcd->fminterval,
+ &ahcd->regs->fminterval);
+#endif
+}
+
+static inline u32 admhc_read_rhdesc(struct admhcd *ahcd)
+{
+ return admhc_readl(ahcd, &ahcd->regs->rhdesc);
+}
+
+static inline u32 admhc_read_portstatus(struct admhcd *ahcd, int port)
+{
+ return admhc_readl(ahcd, &ahcd->regs->portstatus[port]);
+}
+
+static inline void admhc_write_portstatus(struct admhcd *ahcd, int port,
+ u32 value)
+{
+ admhc_writel(ahcd, value, &ahcd->regs->portstatus[port]);
+}
+
+static inline void roothub_write_status(struct admhcd *ahcd, u32 value)
+{
+ /* FIXME: read-only bits must be masked out */
+ admhc_writel(ahcd, value, &ahcd->regs->rhdesc);
+}
+
+static inline void admhc_intr_disable(struct admhcd *ahcd, u32 ints)
+{
+ u32 t;
+
+ t = admhc_readl(ahcd, &ahcd->regs->int_enable);
+ t &= ~(ints);
+ admhc_writel(ahcd, t, &ahcd->regs->int_enable);
+ /* TODO: flush writes ?*/
+}
+
+static inline void admhc_intr_enable(struct admhcd *ahcd, u32 ints)
+{
+ u32 t;
+
+ t = admhc_readl(ahcd, &ahcd->regs->int_enable);
+ t |= ints;
+ admhc_writel(ahcd, t, &ahcd->regs->int_enable);
+ /* TODO: flush writes ?*/
+}
+
+static inline void admhc_intr_ack(struct admhcd *ahcd, u32 ints)
+{
+ admhc_writel(ahcd, ints, &ahcd->regs->int_status);
+}
+
+static inline void admhc_dma_enable(struct admhcd *ahcd)
+{
+ u32 t;
+
+ t = admhc_readl(ahcd, &ahcd->regs->host_control);
+ if (t & ADMHC_HC_DMAE)
+ return;
+
+ t |= ADMHC_HC_DMAE;
+ admhc_writel(ahcd, t, &ahcd->regs->host_control);
+ admhc_vdbg(ahcd, "DMA enabled\n");
+}
+
+static inline void admhc_dma_disable(struct admhcd *ahcd)
+{
+ u32 t;
+
+ t = admhc_readl(ahcd, &ahcd->regs->host_control);
+ if (!(t & ADMHC_HC_DMAE))
+ return;
+
+ t &= ~ADMHC_HC_DMAE;
+ admhc_writel(ahcd, t, &ahcd->regs->host_control);
+ admhc_vdbg(ahcd, "DMA disabled\n");
+}
diff --git a/target/linux/adm5120/files-3.18/drivers/watchdog/adm5120_wdt.c b/target/linux/adm5120/files-3.18/drivers/watchdog/adm5120_wdt.c
new file mode 100644
index 0000000..d5d63b2
--- /dev/null
+++ b/target/linux/adm5120/files-3.18/drivers/watchdog/adm5120_wdt.c
@@ -0,0 +1,202 @@
+/*
+ * ADM5120_WDT 0.01: Infineon ADM5120 SoC watchdog driver
+ * Copyright (c) Ondrej Zajicek <santiago@crfreenet.org>, 2007
+ *
+ * based on
+ *
+ * RC32434_WDT 0.01: IDT Interprise 79RC32434 watchdog driver
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/irq.h>
+
+#include <asm/bootinfo.h>
+
+#include <asm/mach-adm5120/adm5120_info.h>
+#include <asm/mach-adm5120/adm5120_defs.h>
+#include <asm/mach-adm5120/adm5120_switch.h>
+
+#define DEFAULT_TIMEOUT 15 /* (secs) Default is 15 seconds */
+#define MAX_TIMEOUT 327
+/* Max is 327 seconds, counter is 15-bit integer, step is 10 ms */
+
+#define NAME "adm5120_wdt"
+#define VERSION "0.1"
+
+static int expect_close;
+static int access;
+static unsigned int timeout = DEFAULT_TIMEOUT;
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+MODULE_LICENSE("GPL");
+
+
+static inline void wdt_set_timeout(void)
+{
+ u32 val = (1 << 31) | (((timeout * 100) & 0x7FFF) << 16);
+ SW_WRITE_REG(SWITCH_REG_WDOG0, val);
+}
+
+/*
+ It looks like WDOG0-register-write don't modify counter,
+ but WDOG0-register-read resets counter.
+*/
+
+static inline void wdt_reset_counter(void)
+{
+ SW_READ_REG(SWITCH_REG_WDOG0);
+}
+
+static inline void wdt_disable(void)
+{
+ SW_WRITE_REG(SWITCH_REG_WDOG0, 0x7FFF0000);
+}
+
+
+
+static int wdt_open(struct inode *inode, struct file *file)
+{
+ /* Allow only one person to hold it open */
+ if (access)
+ return -EBUSY;
+
+ if (nowayout)
+ __module_get(THIS_MODULE);
+
+ /* Activate timer */
+ wdt_reset_counter();
+ wdt_set_timeout();
+ printk(KERN_INFO NAME ": enabling watchdog timer\n");
+ access = 1;
+ return 0;
+}
+
+static int wdt_release(struct inode *inode, struct file *file)
+{
+ /*
+ * Shut off the timer.
+ * Lock it in if it's a module and we set nowayout
+ */
+ if (expect_close && (nowayout == 0)) {
+ wdt_disable();
+ printk(KERN_INFO NAME ": disabling watchdog timer\n");
+ module_put(THIS_MODULE);
+ } else
+ printk(KERN_CRIT NAME ": device closed unexpectedly. WDT will not stop!\n");
+
+ access = 0;
+ return 0;
+}
+
+static ssize_t wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
+{
+ /* Refresh the timer. */
+ if (len) {
+ if (!nowayout) {
+ size_t i;
+
+ /* In case it was set long ago */
+ expect_close = 0;
+
+ for (i = 0; i != len; i++) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ if (c == 'V')
+ expect_close = 1;
+ }
+ }
+ wdt_reset_counter();
+ return len;
+ }
+ return 0;
+}
+
+static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int new_timeout;
+ static struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = "ADM5120_WDT Watchdog",
+ };
+ switch (cmd) {
+ default:
+ return -ENOTTY;
+ case WDIOC_GETSUPPORT:
+ if (copy_to_user((struct watchdog_info *)arg, &ident, sizeof(ident)))
+ return -EFAULT;
+ return 0;
+ case WDIOC_GETSTATUS:
+ case WDIOC_GETBOOTSTATUS:
+ return put_user(0, (int *)arg);
+ case WDIOC_KEEPALIVE:
+ wdt_reset_counter();
+ return 0;
+ case WDIOC_SETTIMEOUT:
+ if (get_user(new_timeout, (int *)arg))
+ return -EFAULT;
+ if (new_timeout < 1)
+ return -EINVAL;
+ if (new_timeout > MAX_TIMEOUT)
+ return -EINVAL;
+ timeout = new_timeout;
+ wdt_set_timeout();
+ /* Fall */
+ case WDIOC_GETTIMEOUT:
+ return put_user(timeout, (int *)arg);
+ }
+}
+
+static const struct file_operations wdt_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .write = wdt_write,
+ .unlocked_ioctl = wdt_ioctl,
+ .open = wdt_open,
+ .release = wdt_release,
+};
+
+static struct miscdevice wdt_miscdev = {
+ .minor = WATCHDOG_MINOR,
+ .name = "watchdog",
+ .fops = &wdt_fops,
+};
+
+static char banner[] __initdata = KERN_INFO NAME ": Watchdog Timer version " VERSION "\n";
+
+static int __init watchdog_init(void)
+{
+ int ret;
+
+ ret = misc_register(&wdt_miscdev);
+
+ if (ret)
+ return ret;
+
+ wdt_disable();
+ printk(banner);
+
+ return 0;
+}
+
+static void __exit watchdog_exit(void)
+{
+ misc_deregister(&wdt_miscdev);
+}
+
+module_init(watchdog_init);
+module_exit(watchdog_exit);