From 523794c90a44a9a7dba32caf9ac2d305d0fe5c2d Mon Sep 17 00:00:00 2001 From: Imre Kaloz Date: Sat, 11 Feb 2012 15:59:23 +0000 Subject: [cns3xxx]: rename 3.1 patches directory as it will be used for newer kernels as well git-svn-id: svn://svn.openwrt.org/openwrt/trunk@30454 3c298f89-4303-0410-b956-a3cf2f4a3e73 --- .../cns3xxx/patches-3.1/002-cns3xxx_wdt.patch | 69 - .../patches-3.1/049-cns3xxx_smp_support.patch | 415 - .../patches-3.1/050-cns3xxx_i2c_controller.patch | 421 - .../cns3xxx/patches-3.1/051-cns3xxx_gigabit.patch | 1328 -- .../cns3xxx/patches-3.1/052-cns3xxx_spi.patch | 509 - .../patches-3.1/054-cns3xxx_pcie_clock.patch | 11 - .../cns3xxx/patches-3.1/100-laguna_support.patch | 992 - .../patches-3.1/101-laguna_sdhci_card_detect.patch | 16 - .../cns3xxx/patches-3.1/102-cns3xxx_timers.patch | 109 - .../cns3xxx/patches-3.1/104-cns3xxx_gpio.patch | 141 - .../cns3xxx/patches-3.1/105-cns3xxx_pcie_io.patch | 88 - .../patches-3.1/106-cns3xxx_sata_support.patch | 190 - .../patches-3.1/110-gateworks_gsp_support.patch | 339 - target/linux/cns3xxx/patches-3.1/200-dwc_otg.patch | 22680 ------------------- target/linux/cns3xxx/patches/002-cns3xxx_wdt.patch | 69 + .../cns3xxx/patches/049-cns3xxx_smp_support.patch | 415 + .../patches/050-cns3xxx_i2c_controller.patch | 421 + .../cns3xxx/patches/051-cns3xxx_gigabit.patch | 1328 ++ target/linux/cns3xxx/patches/052-cns3xxx_spi.patch | 509 + .../cns3xxx/patches/054-cns3xxx_pcie_clock.patch | 11 + .../linux/cns3xxx/patches/100-laguna_support.patch | 992 + .../patches/101-laguna_sdhci_card_detect.patch | 16 + .../linux/cns3xxx/patches/102-cns3xxx_timers.patch | 109 + .../linux/cns3xxx/patches/104-cns3xxx_gpio.patch | 141 + .../cns3xxx/patches/105-cns3xxx_pcie_io.patch | 88 + .../cns3xxx/patches/106-cns3xxx_sata_support.patch | 190 + .../patches/110-gateworks_gsp_support.patch | 339 + target/linux/cns3xxx/patches/200-dwc_otg.patch | 22680 +++++++++++++++++++ 28 files changed, 27308 insertions(+), 27308 deletions(-) delete mode 100644 target/linux/cns3xxx/patches-3.1/002-cns3xxx_wdt.patch delete mode 100644 target/linux/cns3xxx/patches-3.1/049-cns3xxx_smp_support.patch delete mode 100644 target/linux/cns3xxx/patches-3.1/050-cns3xxx_i2c_controller.patch delete mode 100644 target/linux/cns3xxx/patches-3.1/051-cns3xxx_gigabit.patch delete mode 100644 target/linux/cns3xxx/patches-3.1/052-cns3xxx_spi.patch delete mode 100644 target/linux/cns3xxx/patches-3.1/054-cns3xxx_pcie_clock.patch delete mode 100644 target/linux/cns3xxx/patches-3.1/100-laguna_support.patch delete mode 100644 target/linux/cns3xxx/patches-3.1/101-laguna_sdhci_card_detect.patch delete mode 100644 target/linux/cns3xxx/patches-3.1/102-cns3xxx_timers.patch delete mode 100644 target/linux/cns3xxx/patches-3.1/104-cns3xxx_gpio.patch delete mode 100644 target/linux/cns3xxx/patches-3.1/105-cns3xxx_pcie_io.patch delete mode 100644 target/linux/cns3xxx/patches-3.1/106-cns3xxx_sata_support.patch delete mode 100644 target/linux/cns3xxx/patches-3.1/110-gateworks_gsp_support.patch delete mode 100644 target/linux/cns3xxx/patches-3.1/200-dwc_otg.patch create mode 100644 target/linux/cns3xxx/patches/002-cns3xxx_wdt.patch create mode 100644 target/linux/cns3xxx/patches/049-cns3xxx_smp_support.patch create mode 100644 target/linux/cns3xxx/patches/050-cns3xxx_i2c_controller.patch create mode 100644 target/linux/cns3xxx/patches/051-cns3xxx_gigabit.patch create mode 100644 target/linux/cns3xxx/patches/052-cns3xxx_spi.patch create mode 100644 target/linux/cns3xxx/patches/054-cns3xxx_pcie_clock.patch create mode 100644 target/linux/cns3xxx/patches/100-laguna_support.patch create mode 100644 target/linux/cns3xxx/patches/101-laguna_sdhci_card_detect.patch create mode 100644 target/linux/cns3xxx/patches/102-cns3xxx_timers.patch create mode 100644 target/linux/cns3xxx/patches/104-cns3xxx_gpio.patch create mode 100644 target/linux/cns3xxx/patches/105-cns3xxx_pcie_io.patch create mode 100644 target/linux/cns3xxx/patches/106-cns3xxx_sata_support.patch create mode 100644 target/linux/cns3xxx/patches/110-gateworks_gsp_support.patch create mode 100644 target/linux/cns3xxx/patches/200-dwc_otg.patch (limited to 'target') diff --git a/target/linux/cns3xxx/patches-3.1/002-cns3xxx_wdt.patch b/target/linux/cns3xxx/patches-3.1/002-cns3xxx_wdt.patch deleted file mode 100644 index 0f9004dc6d..0000000000 --- a/target/linux/cns3xxx/patches-3.1/002-cns3xxx_wdt.patch +++ /dev/null @@ -1,69 +0,0 @@ -1. Made the connection between CNS3xxx SOCs(ARCH_CNS3xxx) and MPcore watchdog - since the CNS3xxx SOCs have ARM11 MPcore CPU. -2. Enable mpcore_watchdog option as module to default configuration at - arch/arm/configs/cns3420vb_defconfig. - -Signed-off-by: Tommy Lin - ---- -arch/arm/Kconfig | 1 + - arch/arm/configs/cns3420vb_defconfig | 2 ++ - arch/arm/mach-cns3xxx/cns3420vb.c | 22 ++++++++++++++++++++++ - 3 files changed, 25 insertions(+), 0 deletions(-) - ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -333,6 +333,7 @@ config ARCH_CNS3XXX - select ARM_GIC - select MIGHT_HAVE_PCI - select PCI_DOMAINS if PCI -+ select HAVE_ARM_TWD - help - Support for Cavium Networks CNS3XXX platform. - ---- a/arch/arm/configs/cns3420vb_defconfig -+++ b/arch/arm/configs/cns3420vb_defconfig -@@ -53,6 +53,8 @@ CONFIG_LEGACY_PTY_COUNT=16 - # CONFIG_HW_RANDOM is not set - # CONFIG_HWMON is not set - # CONFIG_VGA_CONSOLE is not set -+CONFIG_WATCHDOG=y -+CONFIG_MPCORE_WATCHDOG=m - # CONFIG_HID_SUPPORT is not set - # CONFIG_USB_SUPPORT is not set - CONFIG_MMC=y ---- a/arch/arm/mach-cns3xxx/cns3420vb.c -+++ b/arch/arm/mach-cns3xxx/cns3420vb.c -@@ -158,10 +158,32 @@ static struct platform_device cns3xxx_us - }, - }; - -+/* Watchdog */ -+static struct resource cns3xxx_watchdog_resources[] = { -+ [0] = { -+ .start = CNS3XXX_TC11MP_TWD_BASE, -+ .end = CNS3XXX_TC11MP_TWD_BASE + PAGE_SIZE - 1, -+ .flags = IORESOURCE_MEM, -+ }, -+ [1] = { -+ .start = IRQ_LOCALWDOG, -+ .end = IRQ_LOCALWDOG, -+ .flags = IORESOURCE_IRQ, -+ } -+}; -+ -+static struct platform_device cns3xxx_watchdog_device = { -+ .name = "mpcore_wdt", -+ .id = -1, -+ .num_resources = ARRAY_SIZE(cns3xxx_watchdog_resources), -+ .resource = cns3xxx_watchdog_resources, -+}; -+ - /* - * Initialization - */ - static struct platform_device *cns3420_pdevs[] __initdata = { -+ &cns3xxx_watchdog_device, - &cns3420_nor_pdev, - &cns3xxx_usb_ehci_device, - &cns3xxx_usb_ohci_device, diff --git a/target/linux/cns3xxx/patches-3.1/049-cns3xxx_smp_support.patch b/target/linux/cns3xxx/patches-3.1/049-cns3xxx_smp_support.patch deleted file mode 100644 index e34134055e..0000000000 --- a/target/linux/cns3xxx/patches-3.1/049-cns3xxx_smp_support.patch +++ /dev/null @@ -1,415 +0,0 @@ ---- a/arch/arm/mach-cns3xxx/Makefile -+++ b/arch/arm/mach-cns3xxx/Makefile -@@ -1,3 +1,6 @@ - obj-$(CONFIG_ARCH_CNS3XXX) += core.o pm.o devices.o - obj-$(CONFIG_PCI) += pcie.o - obj-$(CONFIG_MACH_CNS3420VB) += cns3420vb.o -+obj-$(CONFIG_SMP) += platsmp.o headsmp.o -+obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o -+obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o ---- /dev/null -+++ b/arch/arm/mach-cns3xxx/headsmp.S -@@ -0,0 +1,42 @@ -+/* -+ * linux/arch/arm/mach-cns3xxx/headsmp.S -+ * -+ * Cloned from linux/arch/arm/plat-versatile/headsmp.S -+ * -+ * Copyright (c) 2003 ARM Limited -+ * All Rights Reserved -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+#include -+#include -+ -+ __INIT -+ -+/* -+ * CNS3XXX specific entry point for secondary CPUs. This provides -+ * a "holding pen" into which all secondary cores are held until we're -+ * ready for them to initialise. -+ */ -+ENTRY(cns3xxx_secondary_startup) -+ mrc p15, 0, r0, c0, c0, 5 -+ and r0, r0, #15 -+ adr r4, 1f -+ ldmia r4, {r5, r6} -+ sub r4, r4, r5 -+ add r6, r6, r4 -+pen: ldr r7, [r6] -+ cmp r7, r0 -+ bne pen -+ -+ /* -+ * we've been released from the holding pen: secondary_stack -+ * should now contain the SVC stack for this core -+ */ -+ b secondary_startup -+ -+ .align -+1: .long . -+ .long pen_release ---- /dev/null -+++ b/arch/arm/mach-cns3xxx/hotplug.c -@@ -0,0 +1,130 @@ -+/* linux arch/arm/mach-cns3xxx/hotplug.c -+ * -+ * Cloned from linux/arch/arm/mach-realview/hotplug.c -+ * -+ * Copyright (C) 2002 ARM Ltd. -+ * All Rights Reserved -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+*/ -+ -+#include -+#include -+#include -+ -+#include -+ -+extern volatile int pen_release; -+ -+static inline void cpu_enter_lowpower(void) -+{ -+ unsigned int v; -+ -+ flush_cache_all(); -+ asm volatile( -+ " mcr p15, 0, %1, c7, c5, 0\n" -+ " mcr p15, 0, %1, c7, c10, 4\n" -+ /* -+ * Turn off coherency -+ */ -+ " mrc p15, 0, %0, c1, c0, 1\n" -+ " bic %0, %0, %3\n" -+ " mcr p15, 0, %0, c1, c0, 1\n" -+ " mrc p15, 0, %0, c1, c0, 0\n" -+ " bic %0, %0, %2\n" -+ " mcr p15, 0, %0, c1, c0, 0\n" -+ : "=&r" (v) -+ : "r" (0), "Ir" (CR_C), "Ir" (0x40) -+ : "cc"); -+} -+ -+static inline void cpu_leave_lowpower(void) -+{ -+ unsigned int v; -+ -+ asm volatile( -+ "mrc p15, 0, %0, c1, c0, 0\n" -+ " orr %0, %0, %1\n" -+ " mcr p15, 0, %0, c1, c0, 0\n" -+ " mrc p15, 0, %0, c1, c0, 1\n" -+ " orr %0, %0, %2\n" -+ " mcr p15, 0, %0, c1, c0, 1\n" -+ : "=&r" (v) -+ : "Ir" (CR_C), "Ir" (0x40) -+ : "cc"); -+} -+ -+static inline void platform_do_lowpower(unsigned int cpu, int *spurious) -+{ -+ /* -+ * there is no power-control hardware on this platform, so all -+ * we can do is put the core into WFI; this is safe as the calling -+ * code will have already disabled interrupts -+ */ -+ for (;;) { -+ /* -+ * here's the WFI -+ */ -+ asm(".word 0xe320f003\n" -+ : -+ : -+ : "memory", "cc"); -+ -+ if (pen_release == cpu) { -+ /* -+ * OK, proper wakeup, we're done -+ */ -+ break; -+ } -+ -+ /* -+ * Getting here, means that we have come out of WFI without -+ * having been woken up - this shouldn't happen -+ * -+ * Just note it happening - when we're woken, we can report -+ * its occurrence. -+ */ -+ (*spurious)++; -+ } -+} -+ -+int platform_cpu_kill(unsigned int cpu) -+{ -+ return 1; -+} -+ -+/* -+ * platform-specific code to shutdown a CPU -+ * -+ * Called with IRQs disabled -+ */ -+void platform_cpu_die(unsigned int cpu) -+{ -+ int spurious = 0; -+ -+ /* -+ * we're ready for shutdown now, so do it -+ */ -+ cpu_enter_lowpower(); -+ platform_do_lowpower(cpu, &spurious); -+ -+ /* -+ * bring this CPU back into the world of cache -+ * coherency, and then restore interrupts -+ */ -+ cpu_leave_lowpower(); -+ -+ if (spurious) -+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); -+} -+ -+int platform_cpu_disable(unsigned int cpu) -+{ -+ /* -+ * we don't allow CPU 0 to be shutdown (it is still too special -+ * e.g. clock tick interrupts) -+ */ -+ return cpu == 0 ? -EPERM : 0; -+} ---- a/arch/arm/mach-cns3xxx/Kconfig -+++ b/arch/arm/mach-cns3xxx/Kconfig -@@ -3,6 +3,7 @@ menu "CNS3XXX platform type" - - config MACH_CNS3420VB - bool "Support for CNS3420 Validation Board" -+ select HAVE_ARM_SCU if SMP - select MIGHT_HAVE_PCI - help - Include support for the Cavium Networks CNS3420 MPCore Platform ---- /dev/null -+++ b/arch/arm/mach-cns3xxx/localtimer.c -@@ -0,0 +1,26 @@ -+/* linux/arch/arm/mach-cns3xxx/localtimer.c -+ * -+ * Cloned from linux/arch/arm/mach-realview/localtimer.c -+ * -+ * Copyright (C) 2002 ARM Ltd. -+ * All Rights Reserved -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+*/ -+ -+#include -+ -+#include -+#include -+ -+/* -+ * Setup the local clock events for a CPU. -+ */ -+int __cpuinit local_timer_setup(struct clock_event_device *evt) -+{ -+ evt->irq = IRQ_LOCALTIMER; -+ twd_timer_setup(evt); -+ return 0; -+} ---- /dev/null -+++ b/arch/arm/mach-cns3xxx/platsmp.c -@@ -0,0 +1,175 @@ -+/* linux/arch/arm/mach-cns3xxx/platsmp.c -+ * -+ * Copyright 2011 Gateworks Corporation -+ * Chris Lang -+ * -+ * Cloned from linux/arch/arm/mach-vexpress/platsmp.c -+ * -+ * Copyright (C) 2002 ARM Ltd. -+ * All Rights Reserved -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+*/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+ -+extern void cns3xxx_secondary_startup(void); -+ -+/* -+ * control for which core is the next to come out of the secondary -+ * boot "holding pen" -+ */ -+ -+volatile int __cpuinitdata pen_release = -1; -+ -+/* -+ * Write pen_release in a way that is guaranteed to be visible to all -+ * observers, irrespective of whether they're taking part in coherency -+ * or not. This is necessary for the hotplug code to work reliably. -+ */ -+static void write_pen_release(int val) -+{ -+ pen_release = val; -+ smp_wmb(); -+ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); -+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); -+} -+ -+static void __iomem *scu_base_addr(void) -+{ -+ return (void __iomem *)(CNS3XXX_TC11MP_SCU_BASE_VIRT); -+} -+ -+static DEFINE_SPINLOCK(boot_lock); -+ -+void __cpuinit platform_secondary_init(unsigned int cpu) -+{ -+ /* -+ * if any interrupts are already enabled for the primary -+ * core (e.g. timer irq), then they will not have been enabled -+ * for us: do so -+ */ -+ gic_secondary_init(0); -+ -+ /* -+ * let the primary processor know we're out of the -+ * pen, then head off into the C entry point -+ */ -+ write_pen_release(-1); -+ -+ /* -+ * Synchronise with the boot thread. -+ */ -+ spin_lock(&boot_lock); -+ spin_unlock(&boot_lock); -+} -+ -+int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) -+{ -+ unsigned long timeout; -+ -+ /* -+ * Set synchronisation state between this boot processor -+ * and the secondary one -+ */ -+ spin_lock(&boot_lock); -+ -+ /* -+ * The secondary processor is waiting to be released from -+ * the holding pen - release it, then wait for it to flag -+ * that it has been released by resetting pen_release. -+ * -+ * Note that "pen_release" is the hardware CPU ID, whereas -+ * "cpu" is Linux's internal ID. -+ */ -+ write_pen_release(cpu); -+ -+ /* -+ * Send the secondary CPU a soft interrupt, thereby causing -+ * the boot monitor to read the system wide flags register, -+ * and branch to the address found there. -+ */ -+ gic_raise_softirq(cpumask_of(cpu), 1); -+ -+ timeout = jiffies + (1 * HZ); -+ while (time_before(jiffies, timeout)) { -+ smp_rmb(); -+ if (pen_release == -1) -+ break; -+ -+ udelay(10); -+ } -+ -+ /* -+ * now the secondary core is starting up let it run its -+ * calibrations, then wait for it to finish -+ */ -+ spin_unlock(&boot_lock); -+ -+ return pen_release != -1 ? -ENOSYS : 0; -+} -+ -+/* -+ * Initialise the CPU possible map early - this describes the CPUs -+ * which may be present or become present in the system. -+ */ -+ -+void __init smp_init_cpus(void) -+{ -+ void __iomem *scu_base = scu_base_addr(); -+ unsigned int i, ncores; -+ -+ ncores = scu_base ? scu_get_core_count(scu_base) : 1; -+ -+ /* sanity check */ -+ if (ncores > NR_CPUS) { -+ printk(KERN_WARNING -+ "cns3xxx: no. of cores (%d) greater than configured " -+ "maximum of %d - clipping\n", -+ ncores, NR_CPUS); -+ ncores = NR_CPUS; -+ } -+ -+ for (i = 0; i < ncores; i++) -+ set_cpu_possible(i, true); -+ -+ set_smp_cross_call(gic_raise_softirq); -+} -+ -+void __init platform_smp_prepare_cpus(unsigned int max_cpus) -+{ -+ int i; -+ -+ /* -+ * Initialise the present map, which describes the set of CPUs -+ * actually populated at the present time. -+ */ -+ for (i = 0; i < max_cpus; i++) -+ set_cpu_present(i, true); -+ -+ scu_enable(scu_base_addr()); -+ -+ /* -+ * Write the address of secondary startup into the -+ * system-wide flags register. The boot monitor waits -+ * until it receives a soft interrupt, and then the -+ * secondary CPU branches to this address. -+ */ -+ __raw_writel(virt_to_phys(cns3xxx_secondary_startup), -+ (void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0600)); -+} ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -1388,7 +1388,7 @@ config SMP - depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \ - MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \ - ARCH_EXYNOS4 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \ -- ARCH_MSM_SCORPIONMP || ARCH_SHMOBILE -+ ARCH_MSM_SCORPIONMP || ARCH_SHMOBILE || ARCH_CNS3XXX - select USE_GENERIC_SMP_HELPERS - select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP - help diff --git a/target/linux/cns3xxx/patches-3.1/050-cns3xxx_i2c_controller.patch b/target/linux/cns3xxx/patches-3.1/050-cns3xxx_i2c_controller.patch deleted file mode 100644 index febbc1ecba..0000000000 --- a/target/linux/cns3xxx/patches-3.1/050-cns3xxx_i2c_controller.patch +++ /dev/null @@ -1,421 +0,0 @@ ---- a/drivers/i2c/busses/Kconfig -+++ b/drivers/i2c/busses/Kconfig -@@ -327,6 +327,18 @@ config I2C_BLACKFIN_TWI_CLK_KHZ - help - The unit of the TWI clock is kHz. - -+config I2C_CNS3XXX -+ tristate "Cavium CNS3xxx I2C driver" -+ depends on ARCH_CNS3XXX -+ help -+ Support for Cavium CNS3xxx I2C controller driver. -+ -+ This driver can also be built as a module. If so, the module -+ will be called i2c-cns3xxx. -+ -+ Please note that this driver might be needed to bring up other -+ devices such as Cavium CNS3xxx Ethernet. -+ - config I2C_CPM - tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)" - depends on (CPM1 || CPM2) && OF_I2C ---- a/drivers/i2c/busses/Makefile -+++ b/drivers/i2c/busses/Makefile -@@ -80,6 +80,7 @@ obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor - obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o - obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o - obj-$(CONFIG_I2C_STUB) += i2c-stub.o -+obj-$(CONFIG_I2C_CNS3XXX) += i2c-cns3xxx.o - obj-$(CONFIG_SCx200_ACB) += scx200_acb.o - obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o - ---- /dev/null -+++ b/drivers/i2c/busses/i2c-cns3xxx.c -@@ -0,0 +1,387 @@ -+/* -+ * Cavium CNS3xxx I2C Host Controller -+ * -+ * Copyright 2010 Cavium Network -+ * Copyright 2011 Gateworks Corporation -+ * Chris Lang -+ * -+ * This file is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License, Version 2, as -+ * published by the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * We need the memory map -+ */ -+ -+ -+#define MISC_MEM_MAP_VALUE(reg_offset) (*((uint32_t volatile *)(CNS3XXX_MISC_BASE_VIRT + reg_offset))) -+#define MISC_IOCDB_CTRL MISC_MEM_MAP_VALUE(0x020) -+ -+#define I2C_MEM_MAP_ADDR(x) (CNS3XXX_SSP_BASE_VIRT + x) -+#define I2C_MEM_MAP_VALUE(x) (*((unsigned int volatile*)I2C_MEM_MAP_ADDR(x))) -+ -+#define I2C_CONTROLLER_REG I2C_MEM_MAP_VALUE(0x20) -+#define I2C_TIME_OUT_REG I2C_MEM_MAP_VALUE(0x24) -+#define I2C_SLAVE_ADDRESS_REG I2C_MEM_MAP_VALUE(0x28) -+#define I2C_WRITE_DATA_REG I2C_MEM_MAP_VALUE(0x2C) -+#define I2C_READ_DATA_REG I2C_MEM_MAP_VALUE(0x30) -+#define I2C_INTERRUPT_STATUS_REG I2C_MEM_MAP_VALUE(0x34) -+#define I2C_INTERRUPT_ENABLE_REG I2C_MEM_MAP_VALUE(0x38) -+#define I2C_TWI_OUT_DLY_REG I2C_MEM_MAP_VALUE(0x3C) -+ -+#define I2C_BUS_ERROR_FLAG (0x1) -+#define I2C_ACTION_DONE_FLAG (0x2) -+ -+#define CNS3xxx_I2C_ENABLE() (I2C_CONTROLLER_REG) |= ((unsigned int)0x1 << 31) -+#define CNS3xxx_I2C_DISABLE() (I2C_CONTROLLER_REG) &= ~((unsigned int)0x1 << 31) -+#define CNS3xxx_I2C_ENABLE_INTR() (I2C_INTERRUPT_ENABLE_REG) |= 0x03 -+#define CNS3xxx_I2C_DISABLE_INTR() (I2C_INTERRUPT_ENABLE_REG) &= 0xfc -+ -+#define TWI_TIMEOUT (10*HZ) -+#define I2C_100KHZ 100000 -+#define I2C_200KHZ 200000 -+#define I2C_300KHZ 300000 -+#define I2C_400KHZ 400000 -+ -+#define CNS3xxx_I2C_CLK I2C_100KHZ -+ -+#define STATE_DONE 1 -+#define STATE_ERROR 2 -+ -+struct cns3xxx_i2c { -+ void __iomem *base; -+ wait_queue_head_t wait; -+ struct i2c_adapter adap; -+ struct i2c_msg *msg; -+ int state; /* see STATE_ */ -+ int rd_wr_len; -+ u8 *buf; -+}; -+ -+static u32 cns3xxx_i2c_func(struct i2c_adapter *adap) -+{ -+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; -+} -+ -+static int -+cns3xxx_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg) -+{ -+ struct cns3xxx_i2c *i2c = i2c_get_adapdata(adap); -+ int i, j; -+ u8 buf[1] = { 0 }; -+ -+ if (msg->len == 0) { -+ /* -+ * We are probably doing a probe for a device here, -+ * so set the length to one, and data to 0 -+ */ -+ msg->len = 1; -+ i2c->buf = buf; -+ } else { -+ i2c->buf = msg->buf; -+ } -+ -+ if (msg->flags & I2C_M_TEN) { -+ printk -+ ("%s:%d: Presently the driver does not handle extended addressing\n", -+ __FUNCTION__, __LINE__); -+ return -EINVAL; -+ } -+ i2c->msg = msg; -+ -+ for (i = 0; i < msg->len; i++) { -+ if (msg->len - i >= 4) -+ i2c->rd_wr_len = 3; -+ else -+ i2c->rd_wr_len = msg->len - i - 1; -+ -+ // Set Data Width and TWI_EN -+ I2C_CONTROLLER_REG = 0x80000000 | (i2c->rd_wr_len << 2) | (i2c->rd_wr_len); -+ -+ // Clear Write Reg -+ I2C_WRITE_DATA_REG = 0; -+ -+ // Set the slave address -+ I2C_SLAVE_ADDRESS_REG = (msg->addr << 1); -+ -+ // Are we Writing -+ if (!(msg->flags & I2C_M_RD)) { -+ I2C_CONTROLLER_REG |= (1 << 4); -+ if (i != 0) { -+ /* -+ * We need to set the address in the first byte. -+ * The base address is going to be in buf[0] and then -+ * it needs to be incremented by i - 1. -+ */ -+ i2c->buf--; -+ *i2c->buf = buf[0] + i - 1; -+ -+ if (i2c->rd_wr_len < 3) { -+ i += i2c->rd_wr_len; -+ i2c->rd_wr_len++; -+ I2C_CONTROLLER_REG = 0x80000000 | (1 << 4) | (i2c->rd_wr_len << 2) | (i2c->rd_wr_len); -+ } else { -+ i += i2c->rd_wr_len - 1; -+ } -+ } else { -+ i += i2c->rd_wr_len; -+ buf[0] = *i2c->buf; -+ } -+ for (j = 0; j <= i2c->rd_wr_len; j++) { -+ I2C_WRITE_DATA_REG |= ((*i2c->buf++) << (8 * j)); -+ } -+ } else { -+ i += i2c->rd_wr_len; -+ } -+ -+ // Start the Transfer -+ i2c->state = 0; // Clear out the State -+ I2C_CONTROLLER_REG |= (1 << 6); -+ -+ if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) || -+ (i2c->state == STATE_DONE), TWI_TIMEOUT)) { -+ if (i2c->state == STATE_ERROR) { -+ return -EIO; -+ } -+ } else { -+ return -ETIMEDOUT; -+ } -+ } -+ return 0; -+} -+ -+static int -+cns3xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) -+{ -+ int i; -+ int ret; -+ for (i = 0; i < num; i++) -+ { -+ ret = cns3xxx_i2c_xfer_msg(adap, &msgs[i]); -+ if (ret < 0) { -+ return ret; -+ } -+ } -+ return num; -+} -+ -+ -+static struct i2c_algorithm cns3xxx_i2c_algo = { -+ .master_xfer = cns3xxx_i2c_xfer, -+ .functionality = cns3xxx_i2c_func, -+}; -+ -+static struct i2c_adapter cns3xxx_i2c_adapter = { -+ .owner = THIS_MODULE, -+ .algo = &cns3xxx_i2c_algo, -+ .algo_data = NULL, -+ .nr = 0, -+ .name = "CNS3xxx I2C 0", -+ .retries = 5, -+}; -+ -+static void cns3xxx_i2c_adapter_init(struct cns3xxx_i2c *i2c) -+{ -+ cns3xxx_pwr_clk_en(1 << PM_CLK_GATE_REG_OFFSET_SPI_PCM_I2C); -+ cns3xxx_pwr_power_up(1 << PM_CLK_GATE_REG_OFFSET_SPI_PCM_I2C); -+ cns3xxx_pwr_soft_rst(1 << PM_CLK_GATE_REG_OFFSET_SPI_PCM_I2C); -+ -+ /* Disable the I2C */ -+ I2C_CONTROLLER_REG = 0; /* Disabled the I2C */ -+ -+ //enable SCL and SDA which share pin with GPIOB_PIN_EN(0x18) -+ //GPIOB[12]: SCL -+ //GPIOB[13]: SDA -+ (*(u32*)(CNS3XXX_MISC_BASE_VIRT+0x18)) |= ((1<<12)|(1<<13)); -+ -+ MISC_IOCDB_CTRL &= ~0x300; -+ MISC_IOCDB_CTRL |= 0x300; //21mA... -+ -+ /* Check the Reg Dump when testing */ -+ I2C_TIME_OUT_REG = -+ ((((((cns3xxx_cpu_clock()*(1000000/8)) / (2 * CNS3xxx_I2C_CLK)) - -+ 1) & 0x3FF) << 8) | (1 << 7) | 0x7F); -+ I2C_TWI_OUT_DLY_REG |= 0x3; -+ -+ /* Enable The Interrupt */ -+ CNS3xxx_I2C_ENABLE_INTR(); -+ -+ /* Clear Interrupt Status (0x2 | 0x1) */ -+ I2C_INTERRUPT_STATUS_REG |= (I2C_ACTION_DONE_FLAG | I2C_BUS_ERROR_FLAG); -+ -+ /* Enable the I2C Controller */ -+ CNS3xxx_I2C_ENABLE(); -+} -+ -+static irqreturn_t cns3xxx_i2c_isr(int irq, void *dev_id) -+{ -+ struct cns3xxx_i2c *i2c = dev_id; -+ int i; -+ uint32_t stat = I2C_INTERRUPT_STATUS_REG; -+ -+ /* Clear Interrupt */ -+ I2C_INTERRUPT_STATUS_REG |= 0x1; -+ -+ if (stat & I2C_BUS_ERROR_FLAG) { -+ i2c->state = STATE_ERROR; -+ } else { -+ if (i2c->msg->flags & I2C_M_RD) { -+ for (i = 0; i <= i2c->rd_wr_len; i++) -+ { -+ *i2c->buf++ = ((I2C_READ_DATA_REG >> (8 * i)) & 0xff); -+ } -+ } -+ i2c->state = STATE_DONE; -+ } -+ wake_up(&i2c->wait); -+ return IRQ_HANDLED; -+} -+ -+static int __devinit cns3xxx_i2c_probe(struct platform_device *pdev) -+{ -+ struct cns3xxx_i2c *i2c; -+ struct resource *res, *res2; -+ int ret; -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (!res) { -+ printk("%s: IORESOURCE_MEM not defined \n", __FUNCTION__); -+ return -ENODEV; -+ } -+ -+ res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); -+ if (!res2) { -+ printk("%s: IORESOURCE_IRQ not defined \n", __FUNCTION__); -+ return -ENODEV; -+ } -+ -+ i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); -+ if (!i2c) -+ return -ENOMEM; -+ -+ if (!request_mem_region(res->start, res->end - res->start + 1, -+ pdev->name)) { -+ dev_err(&pdev->dev, "Memory region busy\n"); -+ ret = -EBUSY; -+ goto request_mem_failed; -+ } -+ -+ i2c->base = ioremap(res->start, res->end - res->start + 1); -+ if (!i2c->base) { -+ dev_err(&pdev->dev, "Unable to map registers\n"); -+ ret = -EIO; -+ goto map_failed; -+ } -+ -+ cns3xxx_i2c_adapter_init(i2c); -+ -+ init_waitqueue_head(&i2c->wait); -+ ret = request_irq(res2->start, cns3xxx_i2c_isr, 0, pdev->name, i2c); -+ if (ret) { -+ dev_err(&pdev->dev, "Cannot claim IRQ\n"); -+ goto request_irq_failed; -+ } -+ -+ platform_set_drvdata(pdev, i2c); -+ i2c->adap = cns3xxx_i2c_adapter; -+ i2c_set_adapdata(&i2c->adap, i2c); -+ i2c->adap.dev.parent = &pdev->dev; -+ -+ /* add i2c adapter to i2c tree */ -+ ret = i2c_add_numbered_adapter(&i2c->adap); -+ if (ret) { -+ dev_err(&pdev->dev, "Failed to add adapter\n"); -+ goto add_adapter_failed; -+ } -+ -+ return 0; -+ -+ add_adapter_failed: -+ free_irq(res2->start, i2c); -+ request_irq_failed: -+ iounmap(i2c->base); -+ map_failed: -+ release_mem_region(res->start, res->end - res->start + 1); -+ request_mem_failed: -+ kfree(i2c); -+ -+ return ret; -+} -+ -+static int __devexit cns3xxx_i2c_remove(struct platform_device *pdev) -+{ -+ struct cns3xxx_i2c *i2c = platform_get_drvdata(pdev); -+ struct resource *res; -+ -+ /* disable i2c logic */ -+ CNS3xxx_I2C_DISABLE_INTR(); -+ CNS3xxx_I2C_DISABLE(); -+ /* remove adapter & data */ -+ i2c_del_adapter(&i2c->adap); -+ platform_set_drvdata(pdev, NULL); -+ -+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); -+ if (res) -+ free_irq(res->start, i2c); -+ -+ iounmap(i2c->base); -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (res) -+ release_mem_region(res->start, res->end - res->start + 1); -+ -+ kfree(i2c); -+ -+ return 0; -+} -+ -+#ifdef CONFIG_PM -+#warning "CONFIG_PM defined: suspend and resume not implemented" -+#define cns3xxx_i2c_suspend NULL -+#define cns3xxx_i2c_resume NULL -+#else -+#define cns3xxx_i2c_suspend NULL -+#define cns3xxx_i2c_resume NULL -+#endif -+ -+static struct platform_driver cns3xxx_i2c_driver = { -+ .probe = cns3xxx_i2c_probe, -+ .remove = cns3xxx_i2c_remove, -+ .suspend = cns3xxx_i2c_suspend, -+ .resume = cns3xxx_i2c_resume, -+ .driver = { -+ .owner = THIS_MODULE, -+ .name = "cns3xxx-i2c", -+ }, -+}; -+ -+static int __init cns3xxx_i2c_init(void) -+{ -+ return platform_driver_register(&cns3xxx_i2c_driver); -+} -+ -+static void __exit cns3xxx_i2c_exit(void) -+{ -+ platform_driver_unregister(&cns3xxx_i2c_driver); -+} -+ -+module_init(cns3xxx_i2c_init); -+module_exit(cns3xxx_i2c_exit); -+ -+MODULE_AUTHOR("Cavium Networks"); -+MODULE_DESCRIPTION("Cavium CNS3XXX I2C Controller"); -+MODULE_LICENSE("GPL"); diff --git a/target/linux/cns3xxx/patches-3.1/051-cns3xxx_gigabit.patch b/target/linux/cns3xxx/patches-3.1/051-cns3xxx_gigabit.patch deleted file mode 100644 index cf67e92dd6..0000000000 --- a/target/linux/cns3xxx/patches-3.1/051-cns3xxx_gigabit.patch +++ /dev/null @@ -1,1328 +0,0 @@ ---- a/drivers/net/Kconfig -+++ b/drivers/net/Kconfig -@@ -2071,6 +2071,14 @@ config ACENIC_OMIT_TIGON_I - - The safe and default value for this is N. - -+config CNS3XXX_ETH -+ tristate "Cavium CNS3xxx Ethernet support" -+ depends on ARCH_CNS3XXX -+ select PHYLIB -+ help -+ Say Y here if you want to use built-in Ethernet ports -+ on CNS3XXX processor. -+ - config DL2K - tristate "DL2000/TC902x-based Gigabit Ethernet support" - depends on PCI ---- a/drivers/net/Makefile -+++ b/drivers/net/Makefile -@@ -240,6 +240,7 @@ obj-$(CONFIG_MAC89x0) += mac89x0.o - obj-$(CONFIG_TUN) += tun.o - obj-$(CONFIG_VETH) += veth.o - obj-$(CONFIG_NET_NETX) += netx-eth.o -+obj-$(CONFIG_CNS3XXX_ETH) += cns3xxx_eth.o - obj-$(CONFIG_DL2K) += dl2k.o - obj-$(CONFIG_R8169) += r8169.o - obj-$(CONFIG_AMD8111_ETH) += amd8111e.o ---- /dev/null -+++ b/drivers/net/cns3xxx_eth.c -@@ -0,0 +1,1269 @@ -+/* -+ * Cavium CNS3xxx Gigabit driver for Linux -+ * -+ * Copyright 2011 Gateworks Corporation -+ * Chris Lang -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of version 2 of the GNU General Public License -+ * as published by the Free Software Foundation. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define DRV_NAME "cns3xxx_eth" -+ -+#define RX_DESCS 512 -+#define TX_DESCS 512 -+#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) -+ -+#define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS) -+#define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS) -+#define REGS_SIZE 336 -+#define MAX_MRU 9500 -+ -+#define NAPI_WEIGHT 64 -+ -+/* MDIO Defines */ -+#define MDIO_CMD_COMPLETE 0x00008000 -+#define MDIO_WRITE_COMMAND 0x00002000 -+#define MDIO_READ_COMMAND 0x00004000 -+#define MDIO_REG_OFFSET 8 -+#define MDIO_VALUE_OFFSET 16 -+ -+/* Descritor Defines */ -+#define END_OF_RING 0x40000000 -+#define FIRST_SEGMENT 0x20000000 -+#define LAST_SEGMENT 0x10000000 -+#define FORCE_ROUTE 0x04000000 -+#define IP_CHECKSUM 0x00040000 -+#define UDP_CHECKSUM 0x00020000 -+#define TCP_CHECKSUM 0x00010000 -+ -+/* Port Config Defines */ -+#define PORT_DISABLE 0x00040000 -+#define PROMISC_OFFSET 29 -+ -+/* Global Config Defines */ -+#define UNKNOWN_VLAN_TO_CPU 0x02000000 -+#define ACCEPT_CRC_PACKET 0x00200000 -+#define CRC_STRIPPING 0x00100000 -+ -+/* VLAN Config Defines */ -+#define NIC_MODE 0x00008000 -+#define VLAN_UNAWARE 0x00000001 -+ -+/* DMA AUTO Poll Defines */ -+#define TS_POLL_EN 0x00000020 -+#define TS_SUSPEND 0x00000010 -+#define FS_POLL_EN 0x00000002 -+#define FS_SUSPEND 0x00000001 -+ -+/* DMA Ring Control Defines */ -+#define QUEUE_THRESHOLD 0x000000f0 -+#define CLR_FS_STATE 0x80000000 -+ -+struct tx_desc -+{ -+ u32 sdp; /* segment data pointer */ -+ -+ union { -+ struct { -+ u32 sdl:16; /* segment data length */ -+ u32 tco:1; -+ u32 uco:1; -+ u32 ico:1; -+ u32 rsv_1:3; /* reserve */ -+ u32 pri:3; -+ u32 fp:1; /* force priority */ -+ u32 fr:1; -+ u32 interrupt:1; -+ u32 lsd:1; -+ u32 fsd:1; -+ u32 eor:1; -+ u32 cown:1; -+ }; -+ u32 config0; -+ }; -+ -+ union { -+ struct { -+ u32 ctv:1; -+ u32 stv:1; -+ u32 sid:4; -+ u32 inss:1; -+ u32 dels:1; -+ u32 rsv_2:9; -+ u32 pmap:5; -+ u32 mark:3; -+ u32 ewan:1; -+ u32 fewan:1; -+ u32 rsv_3:5; -+ }; -+ u32 config1; -+ }; -+ -+ union { -+ struct { -+ u32 c_vid:12; -+ u32 c_cfs:1; -+ u32 c_pri:3; -+ u32 s_vid:12; -+ u32 s_dei:1; -+ u32 s_pri:3; -+ }; -+ u32 config2; -+ }; -+ -+ u8 alignment[16]; /* for 32 byte */ -+}; -+ -+struct rx_desc -+{ -+ u32 sdp; /* segment data pointer */ -+ -+ union { -+ struct { -+ u32 sdl:16; /* segment data length */ -+ u32 l4f:1; -+ u32 ipf:1; -+ u32 prot:4; -+ u32 hr:6; -+ u32 lsd:1; -+ u32 fsd:1; -+ u32 eor:1; -+ u32 cown:1; -+ }; -+ u32 config0; -+ }; -+ -+ union { -+ struct { -+ u32 ctv:1; -+ u32 stv:1; -+ u32 unv:1; -+ u32 iwan:1; -+ u32 exdv:1; -+ u32 e_wan:1; -+ u32 rsv_1:2; -+ u32 sp:3; -+ u32 crc_err:1; -+ u32 un_eth:1; -+ u32 tc:2; -+ u32 rsv_2:1; -+ u32 ip_offset:5; -+ u32 rsv_3:11; -+ }; -+ u32 config1; -+ }; -+ -+ union { -+ struct { -+ u32 c_vid:12; -+ u32 c_cfs:1; -+ u32 c_pri:3; -+ u32 s_vid:12; -+ u32 s_dei:1; -+ u32 s_pri:3; -+ }; -+ u32 config2; -+ }; -+ -+ u8 alignment[16]; /* for 32 byte alignment */ -+}; -+ -+struct switch_regs { -+ u32 phy_control; -+ u32 phy_auto_addr; -+ u32 mac_glob_cfg; -+ u32 mac_cfg[4]; -+ u32 mac_pri_ctrl[5], __res; -+ u32 etype[2]; -+ u32 udp_range[4]; -+ u32 prio_etype_udp; -+ u32 prio_ipdscp[8]; -+ u32 tc_ctrl; -+ u32 rate_ctrl; -+ u32 fc_glob_thrs; -+ u32 fc_port_thrs; -+ u32 mc_fc_glob_thrs; -+ u32 dc_glob_thrs; -+ u32 arl_vlan_cmd; -+ u32 arl_ctrl[3]; -+ u32 vlan_cfg; -+ u32 pvid[2]; -+ u32 vlan_ctrl[3]; -+ u32 session_id[8]; -+ u32 intr_stat; -+ u32 intr_mask; -+ u32 sram_test; -+ u32 mem_queue; -+ u32 farl_ctrl; -+ u32 fc_input_thrs, __res1[2]; -+ u32 clk_skew_ctrl; -+ u32 mac_glob_cfg_ext, __res2[2]; -+ u32 dma_ring_ctrl; -+ u32 dma_auto_poll_cfg; -+ u32 delay_intr_cfg, __res3; -+ u32 ts_dma_ctrl0; -+ u32 ts_desc_ptr0; -+ u32 ts_desc_base_addr0, __res4; -+ u32 fs_dma_ctrl0; -+ u32 fs_desc_ptr0; -+ u32 fs_desc_base_addr0, __res5; -+ u32 ts_dma_ctrl1; -+ u32 ts_desc_ptr1; -+ u32 ts_desc_base_addr1, __res6; -+ u32 fs_dma_ctrl1; -+ u32 fs_desc_ptr1; -+ u32 fs_desc_base_addr1; -+}; -+ -+struct _tx_ring { -+ struct tx_desc *desc; -+ dma_addr_t phys_addr; -+ struct tx_desc *cur_addr; -+ struct sk_buff *buff_tab[TX_DESCS]; -+ u32 free_index; -+ u32 count_index; -+ u32 cur_index; -+ int num_used; -+ int num_count; -+}; -+ -+struct _rx_ring { -+ struct rx_desc *desc; -+ dma_addr_t phys_addr; -+ struct rx_desc *cur_addr; -+ struct sk_buff *buff_tab[RX_DESCS]; -+ u32 cur_index; -+ u32 alloc_index; -+ int alloc_count; -+}; -+ -+struct sw { -+ struct resource *mem_res; -+ struct switch_regs __iomem *regs; -+ struct napi_struct napi; -+ struct cns3xxx_plat_info *plat; -+ struct _tx_ring *tx_ring; -+ struct _rx_ring *rx_ring; -+ u32 mtu; -+}; -+ -+struct port { -+ struct net_device *netdev; -+ struct phy_device *phydev; -+ struct sw *sw; -+ int id; /* logical port ID */ -+ int speed, duplex; -+ u32 mtu; -+}; -+ -+static spinlock_t mdio_lock; -+static spinlock_t tx_lock; -+static spinlock_t stat_lock; -+static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */ -+struct mii_bus *mdio_bus; -+static int ports_open; -+static struct port *switch_port_tab[3]; -+static struct dma_pool *rx_dma_pool; -+static struct dma_pool *tx_dma_pool; -+struct net_device *napi_dev; -+ -+static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, -+ int write, u16 cmd) -+{ -+ int cycles = 0; -+ u32 temp = 0; -+ -+ temp = __raw_readl(&mdio_regs->phy_control); -+ temp |= MDIO_CMD_COMPLETE; -+ __raw_writel(temp, &mdio_regs->phy_control); -+ udelay(10); -+ -+ if (write) { -+ temp = (cmd << MDIO_VALUE_OFFSET); -+ temp |= MDIO_WRITE_COMMAND; -+ } else { -+ temp = MDIO_READ_COMMAND; -+ } -+ temp |= ((location & 0x1f) << MDIO_REG_OFFSET); -+ temp |= (phy_id & 0x1f); -+ -+ __raw_writel(temp, &mdio_regs->phy_control); -+ -+ while (((__raw_readl(&mdio_regs->phy_control) & MDIO_CMD_COMPLETE) == 0) -+ && cycles < 5000) { -+ udelay(1); -+ cycles++; -+ } -+ -+ if (cycles == 5000) { -+ printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name, -+ phy_id); -+ return -1; -+ } -+ -+ temp = __raw_readl(&mdio_regs->phy_control); -+ temp |= MDIO_CMD_COMPLETE; -+ __raw_writel(temp, &mdio_regs->phy_control); -+ -+ if (write) -+ return 0; -+ -+ return ((temp >> MDIO_VALUE_OFFSET) & 0xFFFF); -+} -+ -+static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location) -+{ -+ unsigned long flags; -+ int ret; -+ -+ spin_lock_irqsave(&mdio_lock, flags); -+ ret = cns3xxx_mdio_cmd(bus, phy_id, location, 0, 0); -+ spin_unlock_irqrestore(&mdio_lock, flags); -+ return ret; -+} -+ -+static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location, -+ u16 val) -+{ -+ unsigned long flags; -+ int ret; -+ -+ spin_lock_irqsave(&mdio_lock, flags); -+ ret = cns3xxx_mdio_cmd(bus, phy_id, location, 1, val); -+ spin_unlock_irqrestore(&mdio_lock, flags); -+ return ret; -+} -+ -+static int cns3xxx_mdio_register(void) -+{ -+ int err; -+ -+ if (!(mdio_bus = mdiobus_alloc())) -+ return -ENOMEM; -+ -+ mdio_regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT; -+ -+ spin_lock_init(&mdio_lock); -+ mdio_bus->name = "CNS3xxx MII Bus"; -+ mdio_bus->read = &cns3xxx_mdio_read; -+ mdio_bus->write = &cns3xxx_mdio_write; -+ strcpy(mdio_bus->id, "0"); -+ -+ if ((err = mdiobus_register(mdio_bus))) -+ mdiobus_free(mdio_bus); -+ return err; -+} -+ -+static void cns3xxx_mdio_remove(void) -+{ -+ mdiobus_unregister(mdio_bus); -+ mdiobus_free(mdio_bus); -+} -+ -+static void cns3xxx_adjust_link(struct net_device *dev) -+{ -+ struct port *port = netdev_priv(dev); -+ struct phy_device *phydev = port->phydev; -+ -+ if (!phydev->link) { -+ if (port->speed) { -+ port->speed = 0; -+ printk(KERN_INFO "%s: link down\n", dev->name); -+ } -+ return; -+ } -+ -+ if (port->speed == phydev->speed && port->duplex == phydev->duplex) -+ return; -+ -+ port->speed = phydev->speed; -+ port->duplex = phydev->duplex; -+ -+ printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n", -+ dev->name, port->speed, port->duplex ? "full" : "half"); -+} -+ -+irqreturn_t eth_rx_irq(int irq, void *pdev) -+{ -+ struct net_device *dev = pdev; -+ struct sw *sw = netdev_priv(dev); -+ if (likely(napi_schedule_prep(&sw->napi))) { -+ disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC); -+ __napi_schedule(&sw->napi); -+ } -+ return (IRQ_HANDLED); -+} -+ -+static void cns3xxx_alloc_rx_buf(struct sw *sw, int received) -+{ -+ struct _rx_ring *rx_ring = sw->rx_ring; -+ unsigned int i = rx_ring->alloc_index; -+ struct rx_desc *desc; -+ struct sk_buff *skb; -+ u32 mtu = sw->mtu; -+ -+ rx_ring->alloc_count += received; -+ -+ for (received = rx_ring->alloc_count; received > 0; received--) { -+ desc = &(rx_ring)->desc[i]; -+ -+ if ((skb = dev_alloc_skb(mtu))) { -+ if (SKB_DMA_REALIGN) -+ skb_reserve(skb, SKB_DMA_REALIGN); -+ skb_reserve(skb, NET_IP_ALIGN); -+ desc->sdp = dma_map_single(NULL, skb->data, -+ mtu, DMA_FROM_DEVICE); -+ if (dma_mapping_error(NULL, desc->sdp)) { -+ dev_kfree_skb(skb); -+ /* Failed to map, better luck next time */ -+ goto out;; -+ } -+ } else { -+ /* Failed to allocate skb, try again next time */ -+ goto out; -+ } -+ -+ /* put the new buffer on RX-free queue */ -+ rx_ring->buff_tab[i] = skb; -+ -+ if (++i == RX_DESCS) { -+ i = 0; -+ desc->config0 = END_OF_RING | FIRST_SEGMENT | -+ LAST_SEGMENT | mtu; -+ } else { -+ desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | mtu; -+ } -+ } -+out: -+ rx_ring->alloc_count = received; -+ rx_ring->alloc_index = i; -+} -+ -+static void update_tx_stats(struct sw *sw) -+{ -+ struct _tx_ring *tx_ring = sw->tx_ring; -+ struct tx_desc *desc; -+ struct tx_desc *next_desc; -+ struct sk_buff *skb; -+ int i; -+ int index; -+ int num_count; -+ -+ spin_lock_bh(&stat_lock); -+ -+ num_count = tx_ring->num_count; -+ -+ if (!num_count) { -+ spin_unlock_bh(&stat_lock); -+ return; -+ } -+ -+ index = tx_ring->count_index; -+ desc = &(tx_ring)->desc[index]; -+ for (i = 0; i < num_count; i++) { -+ skb = tx_ring->buff_tab[index]; -+ if (desc->cown) { -+ tx_ring->buff_tab[index] = 0; -+ if (unlikely(++index == TX_DESCS)) index = 0; -+ next_desc = &(tx_ring)->desc[index]; -+ prefetch(next_desc + 4); -+ if (likely(skb)) { -+ skb->dev->stats.tx_packets++; -+ skb->dev->stats.tx_bytes += skb->len; -+ dev_kfree_skb_any(skb); -+ } -+ desc = next_desc; -+ } else { -+ break; -+ } -+ } -+ tx_ring->num_count -= i; -+ tx_ring->count_index = index; -+ -+ spin_unlock_bh(&stat_lock); -+} -+ -+static void clear_tx_desc(struct sw *sw) -+{ -+ struct _tx_ring *tx_ring = sw->tx_ring; -+ struct tx_desc *desc; -+ struct tx_desc *next_desc; -+ int i; -+ int index; -+ int num_used = tx_ring->num_used - tx_ring->num_count; -+ -+ if (num_used < (TX_DESCS >> 1)) -+ return; -+ -+ index = tx_ring->free_index; -+ desc = &(tx_ring)->desc[index]; -+ for (i = 0; i < num_used; i++) { -+ if (desc->cown) { -+ if (unlikely(++index == TX_DESCS)) index = 0; -+ next_desc = &(tx_ring)->desc[index]; -+ prefetch(next_desc); -+ prefetch(next_desc + 4); -+ if (likely(desc->sdp)) -+ dma_unmap_single(NULL, desc->sdp, -+ desc->sdl, DMA_TO_DEVICE); -+ desc = next_desc; -+ } else { -+ break; -+ } -+ } -+ tx_ring->free_index = index; -+ tx_ring->num_used -= i; -+} -+ -+static int eth_poll(struct napi_struct *napi, int budget) -+{ -+ struct sw *sw = container_of(napi, struct sw, napi); -+ struct net_device *dev; -+ struct _rx_ring *rx_ring = sw->rx_ring; -+ int received = 0; -+ unsigned int length; -+ unsigned int i = rx_ring->cur_index; -+ struct rx_desc *next_desc; -+ struct rx_desc *desc = &(rx_ring)->desc[i]; -+ int port_id; -+ -+ while (desc->cown) { -+ struct sk_buff *skb; -+ -+ if (received >= budget) -+ break; -+ -+ skb = rx_ring->buff_tab[i]; -+ -+ if (++i == RX_DESCS) i = 0; -+ next_desc = &(rx_ring)->desc[i]; -+ prefetch(next_desc); -+ -+ port_id = desc->sp; -+ if (port_id == 4) -+ dev = switch_port_tab[2]->netdev; -+ else -+ dev = switch_port_tab[port_id]->netdev; -+ -+ length = desc->sdl; -+ /* process received frame */ -+ dma_unmap_single(&dev->dev, desc->sdp, -+ length, DMA_FROM_DEVICE); -+ -+ skb_put(skb, length); -+ -+ skb->dev = dev; -+ skb->protocol = eth_type_trans(skb, dev); -+ -+ dev->stats.rx_packets++; -+ dev->stats.rx_bytes += length; -+ -+ switch (desc->prot) { -+ case 1: -+ case 2: -+ case 5: -+ case 6: -+ case 13: -+ case 14: -+ if (desc->l4f) -+ skb->ip_summed = CHECKSUM_NONE; -+ else -+ skb->ip_summed = CHECKSUM_UNNECESSARY; -+ break; -+ default: -+ skb->ip_summed = CHECKSUM_NONE; -+ break; -+ } -+ -+ napi_gro_receive(napi, skb); -+ -+ received++; -+ desc = next_desc; -+ } -+ -+ cns3xxx_alloc_rx_buf(sw, received); -+ rx_ring->cur_index = i; -+ -+ if (received != budget) { -+ napi_complete(napi); -+ enable_irq(IRQ_CNS3XXX_SW_R0RXC); -+ } -+ -+ return received; -+} -+ -+static int eth_xmit(struct sk_buff *skb, struct net_device *dev) -+{ -+ struct port *port = netdev_priv(dev); -+ struct sw *sw = port->sw; -+ struct _tx_ring *tx_ring = sw->tx_ring; -+ struct tx_desc *tx_desc; -+ int index; -+ int len = skb->len; -+ char pmap = (1 << port->id); -+ -+ if (pmap == 8) -+ pmap = (1 << 4); -+ -+ if (unlikely(len > sw->mtu)) { -+ dev_kfree_skb(skb); -+ dev->stats.tx_errors++; -+ return NETDEV_TX_OK; -+ } -+ -+ update_tx_stats(sw); -+ -+ spin_lock_bh(&tx_lock); -+ -+ clear_tx_desc(sw); -+ -+ if (unlikely(tx_ring->num_used == TX_DESCS)) { -+ spin_unlock_bh(&tx_lock); -+ return NETDEV_TX_BUSY; -+ } -+ -+ index = tx_ring->cur_index; -+ -+ if (unlikely(++tx_ring->cur_index == TX_DESCS)) -+ tx_ring->cur_index = 0; -+ -+ tx_ring->num_used++; -+ tx_ring->num_count++; -+ -+ spin_unlock_bh(&tx_lock); -+ -+ tx_desc = &(tx_ring)->desc[index]; -+ -+ tx_desc->sdp = dma_map_single(NULL, skb->data, len, -+ DMA_TO_DEVICE); -+ -+ if (dma_mapping_error(NULL, tx_desc->sdp)) { -+ dev_kfree_skb(skb); -+ dev->stats.tx_errors++; -+ return NETDEV_TX_OK; -+ } -+ -+ tx_desc->pmap = pmap; -+ tx_ring->buff_tab[index] = skb; -+ -+ if (index == TX_DESCS - 1) { -+ tx_desc->config0 = END_OF_RING | FIRST_SEGMENT | LAST_SEGMENT | -+ FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM | -+ TCP_CHECKSUM | len; -+ } else { -+ tx_desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | -+ FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM | -+ TCP_CHECKSUM | len; -+ } -+ -+ return NETDEV_TX_OK; -+} -+ -+static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) -+{ -+ struct port *port = netdev_priv(dev); -+ -+ if (!netif_running(dev)) -+ return -EINVAL; -+ return phy_mii_ioctl(port->phydev, req, cmd); -+} -+ -+/* ethtool support */ -+ -+static void cns3xxx_get_drvinfo(struct net_device *dev, -+ struct ethtool_drvinfo *info) -+{ -+ strcpy(info->driver, DRV_NAME); -+ strcpy(info->bus_info, "internal"); -+} -+ -+static int cns3xxx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -+{ -+ struct port *port = netdev_priv(dev); -+ return phy_ethtool_gset(port->phydev, cmd); -+} -+ -+static int cns3xxx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -+{ -+ struct port *port = netdev_priv(dev); -+ return phy_ethtool_sset(port->phydev, cmd); -+} -+ -+static int cns3xxx_nway_reset(struct net_device *dev) -+{ -+ struct port *port = netdev_priv(dev); -+ return phy_start_aneg(port->phydev); -+} -+ -+static struct ethtool_ops cns3xxx_ethtool_ops = { -+ .get_drvinfo = cns3xxx_get_drvinfo, -+ .get_settings = cns3xxx_get_settings, -+ .set_settings = cns3xxx_set_settings, -+ .nway_reset = cns3xxx_nway_reset, -+ .get_link = ethtool_op_get_link, -+}; -+ -+ -+static int init_rings(struct sw *sw) -+{ -+ int i; -+ struct _rx_ring *rx_ring = sw->rx_ring; -+ struct _tx_ring *tx_ring = sw->tx_ring; -+ -+ __raw_writel(0, &sw->regs->fs_dma_ctrl0); -+ __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg); -+ __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl); -+ __raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl); -+ -+ __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl); -+ -+ if (!(rx_dma_pool = dma_pool_create(DRV_NAME, NULL, -+ RX_POOL_ALLOC_SIZE, 32, 0))) -+ return -ENOMEM; -+ -+ if (!(rx_ring->desc = dma_pool_alloc(rx_dma_pool, GFP_KERNEL, -+ &rx_ring->phys_addr))) -+ return -ENOMEM; -+ memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE); -+ -+ /* Setup RX buffers */ -+ for (i = 0; i < RX_DESCS; i++) { -+ struct rx_desc *desc = &(rx_ring)->desc[i]; -+ struct sk_buff *skb; -+ if (!(skb = dev_alloc_skb(sw->mtu))) -+ return -ENOMEM; -+ if (SKB_DMA_REALIGN) -+ skb_reserve(skb, SKB_DMA_REALIGN); -+ skb_reserve(skb, NET_IP_ALIGN); -+ desc->sdl = sw->mtu; -+ if (i == (RX_DESCS - 1)) -+ desc->eor = 1; -+ desc->fsd = 1; -+ desc->lsd = 1; -+ -+ desc->sdp = dma_map_single(NULL, skb->data, -+ sw->mtu, DMA_FROM_DEVICE); -+ if (dma_mapping_error(NULL, desc->sdp)) { -+ return -EIO; -+ } -+ rx_ring->buff_tab[i] = skb; -+ desc->cown = 0; -+ } -+ __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_ptr0); -+ __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_base_addr0); -+ -+ if (!(tx_dma_pool = dma_pool_create(DRV_NAME, NULL, -+ TX_POOL_ALLOC_SIZE, 32, 0))) -+ return -ENOMEM; -+ -+ if (!(tx_ring->desc = dma_pool_alloc(tx_dma_pool, GFP_KERNEL, -+ &tx_ring->phys_addr))) -+ return -ENOMEM; -+ memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE); -+ -+ /* Setup TX buffers */ -+ for (i = 0; i < TX_DESCS; i++) { -+ struct tx_desc *desc = &(tx_ring)->desc[i]; -+ tx_ring->buff_tab[i] = 0; -+ -+ if (i == (TX_DESCS - 1)) -+ desc->eor = 1; -+ desc->cown = 1; -+ } -+ __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0); -+ __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_base_addr0); -+ -+ return 0; -+} -+ -+static void destroy_rings(struct sw *sw) -+{ -+ int i; -+ if (sw->rx_ring->desc) { -+ for (i = 0; i < RX_DESCS; i++) { -+ struct _rx_ring *rx_ring = sw->rx_ring; -+ struct rx_desc *desc = &(rx_ring)->desc[i]; -+ struct sk_buff *skb = sw->rx_ring->buff_tab[i]; -+ if (skb) { -+ dma_unmap_single(NULL, -+ desc->sdp, -+ sw->mtu, DMA_FROM_DEVICE); -+ dev_kfree_skb(skb); -+ } -+ } -+ dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr); -+ dma_pool_destroy(rx_dma_pool); -+ rx_dma_pool = 0; -+ sw->rx_ring->desc = 0; -+ } -+ if (sw->tx_ring->desc) { -+ for (i = 0; i < TX_DESCS; i++) { -+ struct _tx_ring *tx_ring = sw->tx_ring; -+ struct tx_desc *desc = &(tx_ring)->desc[i]; -+ struct sk_buff *skb = sw->tx_ring->buff_tab[i]; -+ if (skb) { -+ dma_unmap_single(NULL, desc->sdp, -+ skb->len, DMA_TO_DEVICE); -+ dev_kfree_skb(skb); -+ } -+ } -+ dma_pool_free(tx_dma_pool, sw->tx_ring->desc, sw->tx_ring->phys_addr); -+ dma_pool_destroy(tx_dma_pool); -+ tx_dma_pool = 0; -+ sw->tx_ring->desc = 0; -+ } -+} -+ -+static int eth_open(struct net_device *dev) -+{ -+ struct port *port = netdev_priv(dev); -+ struct sw *sw = port->sw; -+ u32 temp; -+ -+ port->speed = 0; /* force "link up" message */ -+ phy_start(port->phydev); -+ -+ netif_start_queue(dev); -+ -+ if (!ports_open) { -+ request_irq(IRQ_CNS3XXX_SW_R0RXC, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev); -+ napi_enable(&sw->napi); -+ netif_start_queue(napi_dev); -+ //enable_irq(IRQ_CNS3XXX_SW_R0RXC); -+ -+ temp = __raw_readl(&sw->regs->mac_cfg[2]); -+ temp &= ~(PORT_DISABLE); -+ __raw_writel(temp, &sw->regs->mac_cfg[2]); -+ -+ temp = __raw_readl(&sw->regs->dma_auto_poll_cfg); -+ temp &= ~(TS_SUSPEND | FS_SUSPEND); -+ __raw_writel(temp, &sw->regs->dma_auto_poll_cfg); -+ -+ __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg); -+ } -+ temp = __raw_readl(&sw->regs->mac_cfg[port->id]); -+ temp &= ~(PORT_DISABLE); -+ __raw_writel(temp, &sw->regs->mac_cfg[port->id]); -+ -+ ports_open++; -+ netif_carrier_on(dev); -+ -+ return 0; -+} -+ -+static int eth_close(struct net_device *dev) -+{ -+ struct port *port = netdev_priv(dev); -+ struct sw *sw = port->sw; -+ u32 temp; -+ -+ ports_open--; -+ -+ temp = __raw_readl(&sw->regs->mac_cfg[port->id]); -+ temp |= (PORT_DISABLE); -+ __raw_writel(temp, &sw->regs->mac_cfg[port->id]); -+ -+ netif_stop_queue(dev); -+ -+ phy_stop(port->phydev); -+ -+ if (!ports_open) { -+ disable_irq(IRQ_CNS3XXX_SW_R0RXC); -+ free_irq(IRQ_CNS3XXX_SW_R0RXC, napi_dev); -+ napi_disable(&sw->napi); -+ netif_stop_queue(napi_dev); -+ temp = __raw_readl(&sw->regs->mac_cfg[2]); -+ temp |= (PORT_DISABLE); -+ __raw_writel(temp, &sw->regs->mac_cfg[2]); -+ -+ __raw_writel(TS_SUSPEND | FS_SUSPEND, -+ &sw->regs->dma_auto_poll_cfg); -+ } -+ -+ netif_carrier_off(dev); -+ return 0; -+} -+ -+static void eth_rx_mode(struct net_device *dev) -+{ -+ struct port *port = netdev_priv(dev); -+ struct sw *sw = port->sw; -+ u32 temp; -+ -+ temp = __raw_readl(&sw->regs->mac_glob_cfg); -+ -+ if (dev->flags & IFF_PROMISC) { -+ if (port->id == 3) -+ temp |= ((1 << 2) << PROMISC_OFFSET); -+ else -+ temp |= ((1 << port->id) << PROMISC_OFFSET); -+ } else { -+ if (port->id == 3) -+ temp &= ~((1 << 2) << PROMISC_OFFSET); -+ else -+ temp &= ~((1 << port->id) << PROMISC_OFFSET); -+ } -+ __raw_writel(temp, &sw->regs->mac_glob_cfg); -+} -+ -+static int eth_set_mac(struct net_device *netdev, void *p) -+{ -+ struct port *port = netdev_priv(netdev); -+ struct sw *sw = port->sw; -+ struct sockaddr *addr = p; -+ u32 cycles = 0; -+ -+ if (!is_valid_ether_addr(addr->sa_data)) -+ return -EADDRNOTAVAIL; -+ -+ /* Invalidate old ARL Entry */ -+ if (port->id == 3) -+ __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]); -+ else -+ __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]); -+ __raw_writel( ((netdev->dev_addr[0] << 24) | (netdev->dev_addr[1] << 16) | -+ (netdev->dev_addr[2] << 8) | (netdev->dev_addr[3])), -+ &sw->regs->arl_ctrl[1]); -+ -+ __raw_writel( ((netdev->dev_addr[4] << 24) | (netdev->dev_addr[5] << 16) | -+ (1 << 1)), -+ &sw->regs->arl_ctrl[2]); -+ __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd); -+ -+ while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0) -+ && cycles < 5000) { -+ udelay(1); -+ cycles++; -+ } -+ -+ cycles = 0; -+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); -+ -+ if (port->id == 3) -+ __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]); -+ else -+ __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]); -+ __raw_writel( ((addr->sa_data[0] << 24) | (addr->sa_data[1] << 16) | -+ (addr->sa_data[2] << 8) | (addr->sa_data[3])), -+ &sw->regs->arl_ctrl[1]); -+ -+ __raw_writel( ((addr->sa_data[4] << 24) | (addr->sa_data[5] << 16) | -+ (7 << 4) | (1 << 1)), &sw->regs->arl_ctrl[2]); -+ __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd); -+ -+ while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0) -+ && cycles < 5000) { -+ udelay(1); -+ cycles++; -+ } -+ return 0; -+} -+ -+static int cns3xxx_change_mtu(struct net_device *netdev, int new_mtu) -+{ -+ struct port *port = netdev_priv(netdev); -+ struct sw *sw = port->sw; -+ u32 temp; -+ int i; -+ struct _rx_ring *rx_ring = sw->rx_ring; -+ struct rx_desc *desc; -+ struct sk_buff *skb; -+ -+ if (new_mtu > MAX_MRU) -+ return -EINVAL; -+ -+ netdev->mtu = new_mtu; -+ -+ new_mtu += 36 + SKB_DMA_REALIGN; -+ port->mtu = new_mtu; -+ -+ new_mtu = 0; -+ for (i = 0; i < 3; i++) { -+ if (switch_port_tab[i]) { -+ if (switch_port_tab[i]->mtu > new_mtu) -+ new_mtu = switch_port_tab[i]->mtu; -+ } -+ } -+ -+ -+ if (new_mtu == sw->mtu) -+ return 0; -+ -+ disable_irq(IRQ_CNS3XXX_SW_R0RXC); -+ -+ sw->mtu = new_mtu; -+ -+ /* Disable DMA */ -+ __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg); -+ -+ for (i = 0; i < RX_DESCS; i++) { -+ desc = &(rx_ring)->desc[i]; -+ /* Check if we own it, if we do, it will get set correctly -+ * when it is re-used */ -+ if (!desc->cown) { -+ skb = rx_ring->buff_tab[i]; -+ dma_unmap_single(NULL, desc->sdp, desc->sdl, -+ DMA_FROM_DEVICE); -+ dev_kfree_skb(skb); -+ -+ if ((skb = dev_alloc_skb(new_mtu))) { -+ if (SKB_DMA_REALIGN) -+ skb_reserve(skb, SKB_DMA_REALIGN); -+ skb_reserve(skb, NET_IP_ALIGN); -+ desc->sdp = dma_map_single(NULL, skb->data, -+ new_mtu, DMA_FROM_DEVICE); -+ if (dma_mapping_error(NULL, desc->sdp)) { -+ dev_kfree_skb(skb); -+ skb = NULL; -+ } -+ } -+ -+ /* put the new buffer on RX-free queue */ -+ rx_ring->buff_tab[i] = skb; -+ -+ if (i == RX_DESCS - 1) -+ desc->config0 = END_OF_RING | FIRST_SEGMENT | -+ LAST_SEGMENT | new_mtu; -+ else -+ desc->config0 = FIRST_SEGMENT | -+ LAST_SEGMENT | new_mtu; -+ } -+ } -+ -+ /* Re-ENABLE DMA */ -+ temp = __raw_readl(&sw->regs->dma_auto_poll_cfg); -+ temp &= ~(TS_SUSPEND | FS_SUSPEND); -+ __raw_writel(temp, &sw->regs->dma_auto_poll_cfg); -+ -+ __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg); -+ -+ enable_irq(IRQ_CNS3XXX_SW_R0RXC); -+ -+ return 0; -+} -+ -+static const struct net_device_ops cns3xxx_netdev_ops = { -+ .ndo_open = eth_open, -+ .ndo_stop = eth_close, -+ .ndo_start_xmit = eth_xmit, -+ .ndo_set_rx_mode = eth_rx_mode, -+ .ndo_do_ioctl = eth_ioctl, -+ .ndo_change_mtu = cns3xxx_change_mtu, -+ .ndo_set_mac_address = eth_set_mac, -+ .ndo_validate_addr = eth_validate_addr, -+}; -+ -+static int __devinit eth_init_one(struct platform_device *pdev) -+{ -+ int i; -+ struct port *port; -+ struct sw *sw; -+ struct net_device *dev; -+ struct cns3xxx_plat_info *plat = pdev->dev.platform_data; -+ u32 regs_phys; -+ char phy_id[MII_BUS_ID_SIZE + 3]; -+ int err; -+ u32 temp; -+ -+ spin_lock_init(&tx_lock); -+ spin_lock_init(&stat_lock); -+ -+ if (!(napi_dev = alloc_etherdev(sizeof(struct sw)))) -+ return -ENOMEM; -+ strcpy(napi_dev->name, "switch%d"); -+ -+ SET_NETDEV_DEV(napi_dev, &pdev->dev); -+ sw = netdev_priv(napi_dev); -+ memset(sw, 0, sizeof(struct sw)); -+ sw->regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT; -+ regs_phys = CNS3XXX_SWITCH_BASE; -+ sw->mem_res = request_mem_region(regs_phys, REGS_SIZE, napi_dev->name); -+ if (!sw->mem_res) { -+ err = -EBUSY; -+ goto err_free; -+ } -+ -+ sw->mtu = 1536 + SKB_DMA_REALIGN; -+ -+ for (i = 0; i < 4; i++) { -+ temp = __raw_readl(&sw->regs->mac_cfg[i]); -+ temp |= (PORT_DISABLE) | 0x80000000; -+ __raw_writel(temp, &sw->regs->mac_cfg[i]); -+ } -+ -+ temp = PORT_DISABLE; -+ __raw_writel(temp, &sw->regs->mac_cfg[2]); -+ -+ temp = __raw_readl(&sw->regs->vlan_cfg); -+ temp |= NIC_MODE | VLAN_UNAWARE; -+ __raw_writel(temp, &sw->regs->vlan_cfg); -+ -+ __raw_writel(UNKNOWN_VLAN_TO_CPU | ACCEPT_CRC_PACKET | -+ CRC_STRIPPING, &sw->regs->mac_glob_cfg); -+ -+ if (!(sw->rx_ring = kmalloc(sizeof(struct _rx_ring), GFP_KERNEL))) { -+ err = -ENOMEM; -+ goto err_free; -+ } -+ memset(sw->rx_ring, 0, sizeof(struct _rx_ring)); -+ -+ if (!(sw->tx_ring = kmalloc(sizeof(struct _tx_ring), GFP_KERNEL))) { -+ err = -ENOMEM; -+ goto err_free_rx; -+ } -+ memset(sw->tx_ring, 0, sizeof(struct _tx_ring)); -+ -+ if ((err = init_rings(sw)) != 0) { -+ destroy_rings(sw); -+ err = -ENOMEM; -+ goto err_free_rings; -+ } -+ platform_set_drvdata(pdev, napi_dev); -+ -+ netif_napi_add(napi_dev, &sw->napi, eth_poll, NAPI_WEIGHT); -+ -+ for (i = 0; i < 3; i++) { -+ if (!(plat->ports & (1 << i))) { -+ continue; -+ } -+ -+ if (!(dev = alloc_etherdev(sizeof(struct port)))) { -+ goto free_ports; -+ } -+ -+ //SET_NETDEV_DEV(dev, &pdev->dev); -+ port = netdev_priv(dev); -+ port->netdev = dev; -+ if (i == 2) -+ port->id = 3; -+ else -+ port->id = i; -+ port->sw = sw; -+ port->mtu = sw->mtu; -+ -+ temp = __raw_readl(&sw->regs->mac_cfg[port->id]); -+ temp |= (PORT_DISABLE); -+ __raw_writel(temp, &sw->regs->mac_cfg[port->id]); -+ -+ dev->netdev_ops = &cns3xxx_netdev_ops; -+ dev->ethtool_ops = &cns3xxx_ethtool_ops; -+ dev->tx_queue_len = 1000; -+ dev->features = NETIF_F_HW_CSUM; -+ -+ dev->vlan_features = NETIF_F_HW_CSUM; -+ -+ switch_port_tab[i] = port; -+ memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN); -+ -+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy[i]); -+ port->phydev = phy_connect(dev, phy_id, &cns3xxx_adjust_link, 0, -+ PHY_INTERFACE_MODE_RGMII); -+ if ((err = IS_ERR(port->phydev))) { -+ switch_port_tab[i] = 0; -+ free_netdev(dev); -+ goto free_ports; -+ } -+ -+ port->phydev->irq = PHY_POLL; -+ -+ if ((err = register_netdev(dev))) { -+ phy_disconnect(port->phydev); -+ switch_port_tab[i] = 0; -+ free_netdev(dev); -+ goto free_ports; -+ } -+ -+ printk(KERN_INFO "%s: RGMII PHY %i on cns3xxx Switch\n", dev->name, plat->phy[i]); -+ netif_carrier_off(dev); -+ dev = 0; -+ } -+ -+ return 0; -+ -+free_ports: -+ err = -ENOMEM; -+ for (--i; i >= 0; i--) { -+ if (switch_port_tab[i]) { -+ port = switch_port_tab[i]; -+ dev = port->netdev; -+ unregister_netdev(dev); -+ phy_disconnect(port->phydev); -+ switch_port_tab[i] = 0; -+ free_netdev(dev); -+ } -+ } -+err_free_rings: -+ kfree(sw->tx_ring); -+err_free_rx: -+ kfree(sw->rx_ring); -+err_free: -+ free_netdev(napi_dev); -+ return err; -+} -+ -+static int __devexit eth_remove_one(struct platform_device *pdev) -+{ -+ struct net_device *dev = platform_get_drvdata(pdev); -+ struct sw *sw = netdev_priv(dev); -+ int i; -+ destroy_rings(sw); -+ -+ for (i = 2; i >= 0; i--) { -+ if (switch_port_tab[i]) { -+ struct port *port = switch_port_tab[i]; -+ struct net_device *dev = port->netdev; -+ unregister_netdev(dev); -+ phy_disconnect(port->phydev); -+ switch_port_tab[i] = 0; -+ free_netdev(dev); -+ } -+ } -+ -+ release_resource(sw->mem_res); -+ free_netdev(napi_dev); -+ return 0; -+} -+ -+static struct platform_driver cns3xxx_eth_driver = { -+ .driver.name = DRV_NAME, -+ .probe = eth_init_one, -+ .remove = eth_remove_one, -+}; -+ -+static int __init eth_init_module(void) -+{ -+ int err; -+ if ((err = cns3xxx_mdio_register())) -+ return err; -+ return platform_driver_register(&cns3xxx_eth_driver); -+} -+ -+static void __exit eth_cleanup_module(void) -+{ -+ platform_driver_unregister(&cns3xxx_eth_driver); -+ cns3xxx_mdio_remove(); -+} -+ -+module_init(eth_init_module); -+module_exit(eth_cleanup_module); -+ -+MODULE_AUTHOR("Chris Lang"); -+MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver"); -+MODULE_LICENSE("GPL v2"); -+MODULE_ALIAS("platform:cns3xxx_eth"); ---- /dev/null -+++ b/arch/arm/mach-cns3xxx/include/mach/platform.h -@@ -0,0 +1,26 @@ -+/* -+ * arch/arm/mach-cns3xxx/include/mach/platform.h -+ * -+ * Copyright 2011 Gateworks Corporation -+ * Chris Lang -+ * -+ * This file is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License, Version 2, as -+ * published by the Free Software Foundation. -+ * -+ * This file is distributed in the hope that it will be useful, -+ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or -+ * NONINFRINGEMENT. See the GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this file; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or -+ * visit http://www.gnu.org/licenses/. -+ * -+ * This file may also be available under a different license from Cavium. -+ * Contact Cavium Networks for more information -+ * -+ ******************************************************************************/ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+ * define access macros -+ */ -+#define SPI_MEM_MAP_VALUE(reg_offset) (*((u32 volatile *)(CNS3XXX_SSP_BASE_VIRT + reg_offset))) -+ -+#define SPI_CONFIGURATION_REG SPI_MEM_MAP_VALUE(0x40) -+#define SPI_SERVICE_STATUS_REG SPI_MEM_MAP_VALUE(0x44) -+#define SPI_BIT_RATE_CONTROL_REG SPI_MEM_MAP_VALUE(0x48) -+#define SPI_TRANSMIT_CONTROL_REG SPI_MEM_MAP_VALUE(0x4C) -+#define SPI_TRANSMIT_BUFFER_REG SPI_MEM_MAP_VALUE(0x50) -+#define SPI_RECEIVE_CONTROL_REG SPI_MEM_MAP_VALUE(0x54) -+#define SPI_RECEIVE_BUFFER_REG SPI_MEM_MAP_VALUE(0x58) -+#define SPI_FIFO_TRANSMIT_CONFIG_REG SPI_MEM_MAP_VALUE(0x5C) -+#define SPI_FIFO_TRANSMIT_CONTROL_REG SPI_MEM_MAP_VALUE(0x60) -+#define SPI_FIFO_RECEIVE_CONFIG_REG SPI_MEM_MAP_VALUE(0x64) -+#define SPI_INTERRUPT_STATUS_REG SPI_MEM_MAP_VALUE(0x68) -+#define SPI_INTERRUPT_ENABLE_REG SPI_MEM_MAP_VALUE(0x6C) -+ -+#define SPI_TRANSMIT_BUFFER_REG_ADDR (CNS3XXX_SSP_BASE +0x50) -+#define SPI_RECEIVE_BUFFER_REG_ADDR (CNS3XXX_SSP_BASE +0x58) -+ -+/* Structure for SPI controller of CNS3XXX SOCs */ -+struct cns3xxx_spi { -+ /* bitbang has to be first */ -+ struct spi_bitbang bitbang; -+ struct completion done; -+ wait_queue_head_t wait; -+ -+ int len; -+ int count; -+ int last_in_message_list; -+ -+ /* data buffers */ -+ const unsigned char *tx; -+ unsigned char *rx; -+ -+ struct spi_master *master; -+ struct platform_device *pdev; -+ struct device *dev; -+}; -+ -+static inline u8 cns3xxx_spi_bus_idle(void) -+{ -+ return ((SPI_SERVICE_STATUS_REG & 0x1) ? 0 : 1); -+} -+ -+static inline u8 cns3xxx_spi_tx_buffer_empty(void) -+{ -+ return ((SPI_INTERRUPT_STATUS_REG & (0x1 << 3)) ? 1 : 0); -+} -+ -+static inline u8 cns3xxx_spi_rx_buffer_full(void) -+{ -+ return ((SPI_INTERRUPT_STATUS_REG & (0x1 << 2)) ? 1 : 0); -+} -+ -+u8 cns3xxx_spi_tx_rx(u8 tx_channel, u8 tx_eof, u32 tx_data, -+ u32 * rx_data) -+{ -+ u8 rx_channel; -+ u8 rx_eof; -+ -+ while (!cns3xxx_spi_bus_idle()) ; // do nothing -+ -+ while (!cns3xxx_spi_tx_buffer_empty()) ; // do nothing -+ -+ SPI_TRANSMIT_CONTROL_REG &= ~(0x7); -+ SPI_TRANSMIT_CONTROL_REG |= (tx_channel & 0x3) | ((tx_eof & 0x1) << 2); -+ -+ SPI_TRANSMIT_BUFFER_REG = tx_data; -+ -+ while (!cns3xxx_spi_rx_buffer_full()) ; // do nothing -+ -+ rx_channel = SPI_RECEIVE_CONTROL_REG & 0x3; -+ rx_eof = (SPI_RECEIVE_CONTROL_REG & (0x1 << 2)) ? 1 : 0; -+ -+ *rx_data = SPI_RECEIVE_BUFFER_REG; -+ -+ if ((tx_channel != rx_channel) || (tx_eof != rx_eof)) { -+ return 0; -+ } else { -+ return 1; -+ } -+} -+ -+u8 cns3xxx_spi_tx(u8 tx_channel, u8 tx_eof, u32 tx_data) -+{ -+ -+ while (!cns3xxx_spi_bus_idle()) ; // do nothing -+ -+ while (!cns3xxx_spi_tx_buffer_empty()) ; // do nothing -+ -+ SPI_TRANSMIT_CONTROL_REG &= ~(0x7); -+ SPI_TRANSMIT_CONTROL_REG |= (tx_channel & 0x3) | ((tx_eof & 0x1) << 2); -+ -+ SPI_TRANSMIT_BUFFER_REG = tx_data; -+ -+ return 1; -+} -+ -+static inline struct cns3xxx_spi *to_hw(struct spi_device *sdev) -+{ -+ return spi_master_get_devdata(sdev->master); -+} -+ -+static int cns3xxx_spi_setup_transfer(struct spi_device *spi, -+ struct spi_transfer *t) -+{ -+ return 0; -+} -+ -+static void cns3xxx_spi_chipselect(struct spi_device *spi, int value) -+{ -+ unsigned int spi_config; -+ -+ switch (value) { -+ case BITBANG_CS_INACTIVE: -+ break; -+ -+ case BITBANG_CS_ACTIVE: -+ spi_config = SPI_CONFIGURATION_REG; -+ -+ if (spi->mode & SPI_CPHA) -+ spi_config |= (0x1 << 13); -+ else -+ spi_config &= ~(0x1 << 13); -+ -+ if (spi->mode & SPI_CPOL) -+ spi_config |= (0x1 << 14); -+ else -+ spi_config &= ~(0x1 << 14); -+ -+ /* write new configration */ -+ SPI_CONFIGURATION_REG = spi_config; -+ -+ SPI_TRANSMIT_CONTROL_REG &= ~(0x7); -+ SPI_TRANSMIT_CONTROL_REG |= (spi->chip_select & 0x3); -+ -+ break; -+ } -+} -+ -+static int cns3xxx_spi_setup(struct spi_device *spi) -+{ -+ if (!spi->bits_per_word) -+ spi->bits_per_word = 8; -+ -+ return 0; -+} -+ -+static int cns3xxx_spi_txrx(struct spi_device *spi, struct spi_transfer *t) -+{ -+ struct cns3xxx_spi *hw = to_hw(spi); -+ -+ dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", t->tx_buf, t->rx_buf, -+ t->len); -+ -+ hw->tx = t->tx_buf; -+ hw->rx = t->rx_buf; -+ hw->len = t->len; -+ hw->count = 0; -+ hw->last_in_message_list = t->last_in_message_list; -+ -+ init_completion(&hw->done); -+ -+ if (hw->tx) { -+ int i; -+ u32 rx_data; -+ for (i = 0; i < (hw->len - 1); i++) { -+ dev_dbg(&spi->dev, -+ "[SPI_CNS3XXX_DEBUG] hw->tx[%02d]: 0x%02x\n", i, -+ hw->tx[i]); -+ cns3xxx_spi_tx_rx(spi->chip_select, 0, hw->tx[i], -+ &rx_data); -+ if (hw->rx) { -+ hw->rx[i] = rx_data; -+ dev_dbg(&spi->dev, -+ "[SPI_CNS3XXX_DEBUG] hw->rx[%02d]: 0x%02x\n", -+ i, hw->rx[i]); -+ } -+ } -+ -+ if (t->last_in_message_list) { -+ cns3xxx_spi_tx_rx(spi->chip_select, 1, hw->tx[i], -+ &rx_data); -+ if (hw->rx) { -+ hw->rx[i] = rx_data; -+ dev_dbg(&spi->dev, -+ "[SPI_CNS3XXX_DEBUG] hw->rx[%02d]: 0x%02x\n", -+ i, hw->rx[i]); -+ } -+ } else { -+ cns3xxx_spi_tx_rx(spi->chip_select, 0, hw->tx[i], -+ &rx_data); -+ } -+ goto done; -+ } -+ -+ if (hw->rx) { -+ int i; -+ u32 rx_data; -+ for (i = 0; i < (hw->len - 1); i++) { -+ cns3xxx_spi_tx_rx(spi->chip_select, 0, 0xff, &rx_data); -+ hw->rx[i] = rx_data; -+ dev_dbg(&spi->dev, -+ "[SPI_CNS3XXX_DEBUG] hw->rx[%02d]: 0x%02x\n", i, -+ hw->rx[i]); -+ } -+ -+ if (t->last_in_message_list) { -+ cns3xxx_spi_tx_rx(spi->chip_select, 1, 0xff, &rx_data); -+ } else { -+ cns3xxx_spi_tx_rx(spi->chip_select, 0, 0xff, &rx_data); -+ } -+ hw->rx[i] = rx_data; -+ dev_dbg(&spi->dev, "[SPI_CNS3XXX_DEBUG] hw->rx[%02d]: 0x%02x\n", -+ i, hw->rx[i]); -+ } -+done: -+ return hw->len; -+} -+ -+static void __init cns3xxx_spi_initial(void) -+{ -+ u32 __iomem *gpiob = __io(CNS3XXX_MISC_BASE_VIRT + 0x0018); -+ u32 gpiob_pins = __raw_readl(gpiob); -+ -+ /* MMC/SD pins share with GPIOA */ -+ gpiob_pins |= 0xf80; -+ __raw_writel(gpiob_pins, gpiob); -+ -+ /* share pin config. */ -+ //PM_PLL_HM_PD_CTRL_REG &= ~(0x1 << 5); -+ //HAL_MISC_ENABLE_SPI_PINS(); -+ cns3xxx_pwr_clk_en(CNS3XXX_PWR_CLK_EN(SPI_PCM_I2C)); -+ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SPI_PCM_I2C)); -+ -+ SPI_CONFIGURATION_REG = (((0x0 & 0x3) << 0) | /* 8bits shift length */ -+ (0x0 << 9) | /* SPI mode */ -+ (0x0 << 10) | /* disable FIFO */ -+ (0x1 << 11) | /* SPI master mode */ -+ (0x0 << 12) | /* disable SPI loopback mode */ -+ (0x1 << 13) | /* clock phase */ -+ (0x1 << 14) | /* clock polarity */ -+ (0x0 << 24) | /* disable - SPI data swap */ -+ (0x1 << 29) | /* enable - 2IO Read mode */ -+ (0x0 << 30) | /* disable - SPI high speed read for system boot up */ -+ (0x0 << 31)); /* disable - SPI */ -+ -+ /* Set SPI bit rate PCLK/2 */ -+ SPI_BIT_RATE_CONTROL_REG = 0x1; -+ -+ /* Set SPI Tx channel 0 */ -+ SPI_TRANSMIT_CONTROL_REG = 0x0; -+ -+ /* Set Tx FIFO Threshold, Tx FIFO has 2 words */ -+ SPI_FIFO_TRANSMIT_CONFIG_REG &= ~(0x03 << 4); -+ SPI_FIFO_TRANSMIT_CONFIG_REG |= ((0x0 & 0x03) << 4); -+ -+ /* Set Rx FIFO Threshold, Rx FIFO has 2 words */ -+ SPI_FIFO_RECEIVE_CONFIG_REG &= ~(0x03 << 4); -+ SPI_FIFO_RECEIVE_CONFIG_REG |= ((0x0 & 0x03) << 4); -+ -+ /* Disable all interrupt */ -+ SPI_INTERRUPT_ENABLE_REG = 0x0; -+ -+ /* Clear spurious interrupt sources */ -+ SPI_INTERRUPT_STATUS_REG = (0x0F << 4); -+ -+ /* Enable SPI */ -+ SPI_CONFIGURATION_REG |= (0x1 << 31); -+ -+ return; -+} -+ -+static int __init cns3xxx_spi_probe(struct platform_device *pdev) -+{ -+ struct spi_master *master; -+ struct cns3xxx_spi *hw; -+ int err = 0; -+ -+ printk("%s: setup CNS3XXX SPI Controller\n", __FUNCTION__); -+ -+ /* Allocate master with space for cns3xxx_spi */ -+ master = spi_alloc_master(&pdev->dev, sizeof(struct cns3xxx_spi)); -+ if (master == NULL) { -+ dev_err(&pdev->dev, "No memory for spi_master\n"); -+ err = -ENOMEM; -+ goto err_nomem; -+ } -+ -+ hw = spi_master_get_devdata(master); -+ memset(hw, 0, sizeof(struct cns3xxx_spi)); -+ -+ hw->master = spi_master_get(master); -+ hw->dev = &pdev->dev; -+ -+ platform_set_drvdata(pdev, hw); -+ init_completion(&hw->done); -+ -+ /* setup the master state. */ -+ -+ master->num_chipselect = 4; -+ master->bus_num = 1; -+ -+ /* setup the state for the bitbang driver */ -+ -+ hw->bitbang.master = hw->master; -+ hw->bitbang.setup_transfer = cns3xxx_spi_setup_transfer; -+ hw->bitbang.chipselect = cns3xxx_spi_chipselect; -+ hw->bitbang.txrx_bufs = cns3xxx_spi_txrx; -+ hw->bitbang.master->setup = cns3xxx_spi_setup; -+ -+ dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang); -+ -+ /* SPI controller initializations */ -+ cns3xxx_spi_initial(); -+ -+ /* register SPI controller */ -+ -+ err = spi_bitbang_start(&hw->bitbang); -+ if (err) { -+ dev_err(&pdev->dev, "Failed to register SPI master\n"); -+ goto err_register; -+ } -+ -+ return 0; -+ -+err_register: -+ spi_master_put(hw->master);; -+ -+err_nomem: -+ return err; -+} -+ -+static int __devexit cns3xxx_spi_remove(struct platform_device *dev) -+{ -+ struct cns3xxx_spi *hw = platform_get_drvdata(dev); -+ -+ platform_set_drvdata(dev, NULL); -+ -+ spi_unregister_master(hw->master); -+ -+ spi_master_put(hw->master); -+ return 0; -+} -+ -+#ifdef CONFIG_PM -+ -+static int cns3xxx_spi_suspend(struct platform_device *pdev, pm_message_t msg) -+{ -+ struct cns3xxx_spi *hw = platform_get_drvdata(pdev); -+ -+ return 0; -+} -+ -+static int cns3xxx_spi_resume(struct platform_device *pdev) -+{ -+ struct cns3xxx_spi *hw = platform_get_drvdata(pdev); -+ -+ return 0; -+} -+ -+#else -+#define cns3xxx_spi_suspend NULL -+#define cns3xxx_spi_resume NULL -+#endif -+ -+static struct platform_driver cns3xxx_spi_driver = { -+ .probe = cns3xxx_spi_probe, -+ .remove = __devexit_p(cns3xxx_spi_remove), -+ .suspend = cns3xxx_spi_suspend, -+ .resume = cns3xxx_spi_resume, -+ .driver = { -+ .name = "cns3xxx_spi", -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init cns3xxx_spi_init(void) -+{ -+ return platform_driver_register(&cns3xxx_spi_driver); -+} -+ -+static void __exit cns3xxx_spi_exit(void) -+{ -+ platform_driver_unregister(&cns3xxx_spi_driver); -+} -+ -+module_init(cns3xxx_spi_init); -+module_exit(cns3xxx_spi_exit); -+ -+MODULE_AUTHOR("Cavium Networks"); -+MODULE_DESCRIPTION("CNS3XXX SPI Controller Driver"); -+MODULE_LICENSE("GPL"); -+MODULE_ALIAS("platform:cns3xxx_spi"); -+ -+EXPORT_SYMBOL_GPL(cns3xxx_spi_tx_rx); ---- a/include/linux/spi/spi.h -+++ b/include/linux/spi/spi.h -@@ -446,6 +446,13 @@ struct spi_transfer { - u32 speed_hz; - - struct list_head transfer_list; -+ -+#ifdef CONFIG_ARCH_CNS3XXX -+ unsigned last_in_message_list; -+#ifdef CONFIG_SPI_CNS3XXX_2IOREAD -+ u8 dio_read; -+#endif -+#endif - }; - - /** ---- a/drivers/spi/spi-bitbang.c -+++ b/drivers/spi/spi-bitbang.c -@@ -329,6 +329,12 @@ static void bitbang_work(struct work_str - */ - if (!m->is_dma_mapped) - t->rx_dma = t->tx_dma = 0; -+ -+ if (t->transfer_list.next == &m->transfers) -+ t->last_in_message_list = 1; -+ else -+ t->last_in_message_list = 0; -+ - status = bitbang->txrx_bufs(spi, t); - } - if (status > 0) diff --git a/target/linux/cns3xxx/patches-3.1/054-cns3xxx_pcie_clock.patch b/target/linux/cns3xxx/patches-3.1/054-cns3xxx_pcie_clock.patch deleted file mode 100644 index 0c6c525039..0000000000 --- a/target/linux/cns3xxx/patches-3.1/054-cns3xxx_pcie_clock.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/arch/arm/mach-cns3xxx/pcie.c -+++ b/arch/arm/mach-cns3xxx/pcie.c -@@ -378,8 +378,6 @@ static int __init cns3xxx_pcie_init(void - for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) { - iotable_init(cns3xxx_pcie[i].cfg_bases, - ARRAY_SIZE(cns3xxx_pcie[i].cfg_bases)); -- cns3xxx_pwr_clk_en(0x1 << PM_CLK_GATE_REG_OFFSET_PCIE(i)); -- cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i)); - cns3xxx_pcie_check_link(&cns3xxx_pcie[i]); - cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]); - pci_common_init(&cns3xxx_pcie[i].hw_pci); diff --git a/target/linux/cns3xxx/patches-3.1/100-laguna_support.patch b/target/linux/cns3xxx/patches-3.1/100-laguna_support.patch deleted file mode 100644 index 12fe8005e9..0000000000 --- a/target/linux/cns3xxx/patches-3.1/100-laguna_support.patch +++ /dev/null @@ -1,992 +0,0 @@ ---- /dev/null -+++ b/arch/arm/mach-cns3xxx/laguna.c -@@ -0,0 +1,761 @@ -+/* -+ * Gateworks Corporation Laguna Platform -+ * -+ * Copyright 2000 Deep Blue Solutions Ltd -+ * Copyright 2008 ARM Limited -+ * Copyright 2008 Cavium Networks -+ * Scott Shu -+ * Copyright 2010 MontaVista Software, LLC. -+ * Anton Vorontsov -+ * Copyright 2011 Gateworks Corporation -+ * Chris Lang -+ * -+ * This file is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License, Version 2, as -+ * published by the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "core.h" -+#include "devices.h" -+ -+// Config 1 Bitmap -+#define ETH0_LOAD BIT(0) -+#define ETH1_LOAD BIT(1) -+#define ETH2_LOAD BIT(2) -+#define SATA0_LOAD BIT(3) -+#define SATA1_LOAD BIT(4) -+#define PCM_LOAD BIT(5) -+#define I2S_LOAD BIT(6) -+#define SPI0_LOAD BIT(7) -+#define SPI1_LOAD BIT(8) -+#define PCIE0_LOAD BIT(9) -+#define PCIE1_LOAD BIT(10) -+#define USB0_LOAD BIT(11) -+#define USB1_LOAD BIT(12) -+#define USB1_ROUTE BIT(13) -+#define SD_LOAD BIT(14) -+#define UART0_LOAD BIT(15) -+#define UART1_LOAD BIT(16) -+#define UART2_LOAD BIT(17) -+#define MPCI0_LOAD BIT(18) -+#define MPCI1_LOAD BIT(19) -+#define MPCI2_LOAD BIT(20) -+#define MPCI3_LOAD BIT(21) -+#define FP_BUT_LOAD BIT(22) -+#define FP_BUT_HEADER_LOAD BIT(23) -+#define FP_LED_LOAD BIT(24) -+#define FP_LED_HEADER_LOAD BIT(25) -+#define FP_TAMPER_LOAD BIT(26) -+#define HEADER_33V_LOAD BIT(27) -+#define SATA_POWER_LOAD BIT(28) -+#define FP_POWER_LOAD BIT(29) -+#define GPIO_HEADER_LOAD BIT(30) -+#define GSP_BAT_LOAD BIT(31) -+ -+// Config 2 Bitmap -+#define FAN_LOAD BIT(0) -+#define SPI_FLASH_LOAD BIT(1) -+#define NOR_FLASH_LOAD BIT(2) -+#define GPS_LOAD BIT(3) -+#define SUPPLY_5V_LOAD BIT(6) -+#define SUPPLY_33V_LOAD BIT(7) -+ -+struct laguna_board_info { -+ char model[16]; -+ u32 config_bitmap; -+ u32 config2_bitmap; -+ u8 nor_flash_size; -+ u8 spi_flash_size; -+}; -+ -+static struct laguna_board_info laguna_info __initdata; -+ -+/* -+ * NOR Flash -+ */ -+static struct mtd_partition laguna_nor_partitions[] = { -+ { -+ .name = "uboot", -+ .size = SZ_256K, -+ .offset = 0, -+ .mask_flags = MTD_WRITEABLE, -+ }, { -+ .name = "params", -+ .size = SZ_128K, -+ .offset = SZ_256K, -+ }, { -+ .name = "kernel", -+ .size = SZ_2M, -+ .offset = SZ_256K + SZ_128K, -+ }, { -+ .name = "rootfs", -+ .size = SZ_16M - SZ_256K - SZ_128K - SZ_2M, -+ .offset = SZ_256K + SZ_128K + SZ_2M, -+ }, -+}; -+ -+static struct physmap_flash_data laguna_nor_pdata = { -+ .width = 2, -+ .parts = laguna_nor_partitions, -+ .nr_parts = ARRAY_SIZE(laguna_nor_partitions), -+}; -+ -+static struct resource laguna_nor_res = { -+ .start = CNS3XXX_FLASH_BASE, -+ .end = CNS3XXX_FLASH_BASE + SZ_128M - 1, -+ .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, -+}; -+ -+static struct platform_device laguna_nor_pdev = { -+ .name = "physmap-flash", -+ .id = 0, -+ .resource = &laguna_nor_res, -+ .num_resources = 1, -+ .dev = { -+ .platform_data = &laguna_nor_pdata, -+ }, -+}; -+ -+/* -+ * SPI -+ */ -+static struct mtd_partition laguna_spi_partitions[] = { -+ { -+ .name = "uboot", -+ .size = SZ_256K, -+ .offset = 0, -+ .mask_flags = MTD_WRITEABLE, -+ }, { -+ .name = "params", -+ .size = SZ_256K, -+ .offset = SZ_256K, -+ }, { -+ .name = "kernel", -+ .size = SZ_1M + SZ_512K, -+ .offset = SZ_512K, -+ }, { -+ .name = "rootfs", -+ .size = SZ_16M - SZ_2M, -+ .offset = SZ_2M, -+ }, -+}; -+ -+static struct flash_platform_data laguna_spi_pdata = { -+ .parts = laguna_spi_partitions, -+ .nr_parts = ARRAY_SIZE(laguna_spi_partitions), -+}; -+ -+static struct spi_board_info __initdata laguna_spi_devices[] = { -+ { -+ .modalias = "m25p80", -+ .platform_data = &laguna_spi_pdata, -+ .max_speed_hz = 50000000, -+ .bus_num = 1, -+ .chip_select = 0, -+ }, -+}; -+ -+static struct platform_device laguna_spi_controller = { -+ .name = "cns3xxx_spi", -+}; -+ -+/* -+ * LED's -+ */ -+static struct gpio_led laguna_gpio_leds[] = { -+ { -+ .name = "user1", /* Green Led */ -+ .gpio = 115, -+ .active_low = 1, -+ },{ -+ .name = "user2", /* Red Led */ -+ .gpio = 114, -+ .active_low = 1, -+ },{ -+ .name = "pwr1", /* Green Led */ -+ .gpio = 116, -+ .active_low = 1, -+ },{ -+ .name = "pwr2", /* Yellow Led */ -+ .gpio = 117, -+ .active_low = 1, -+ },{ -+ .name = "txd1", /* Green Led */ -+ .gpio = 118, -+ .active_low = 1, -+ },{ -+ .name = "txd2", /* Yellow Led */ -+ .gpio = 119, -+ .active_low = 1, -+ },{ -+ .name = "rxd1", /* Green Led */ -+ .gpio = 120, -+ .active_low = 1, -+ },{ -+ .name = "rxd2", /* Yellow Led */ -+ .gpio = 121, -+ .active_low = 1, -+ },{ -+ .name = "ser1", /* Green Led */ -+ .gpio = 122, -+ .active_low = 1, -+ },{ -+ .name = "ser2", /* Yellow Led */ -+ .gpio = 123, -+ .active_low = 1, -+ },{ -+ .name = "enet1", /* Green Led */ -+ .gpio = 124, -+ .active_low = 1, -+ },{ -+ .name = "enet2", /* Yellow Led */ -+ .gpio = 125, -+ .active_low = 1, -+ },{ -+ .name = "sig1_1", /* Green Led */ -+ .gpio = 126, -+ .active_low = 1, -+ },{ -+ .name = "sig1_2", /* Yellow Led */ -+ .gpio = 127, -+ .active_low = 1, -+ },{ -+ .name = "sig2_1", /* Green Led */ -+ .gpio = 128, -+ .active_low = 1, -+ },{ -+ .name = "sig2_2", /* Yellow Led */ -+ .gpio = 129, -+ .active_low = 1, -+ },{ -+ .name = "sig3_1", /* Green Led */ -+ .gpio = 130, -+ .active_low = 1, -+ },{ -+ .name = "sig3_2", /* Yellow Led */ -+ .gpio = 131, -+ .active_low = 1, -+ },{ -+ .name = "net1", /*Green Led */ -+ .gpio = 109, -+ .active_low = 1, -+ },{ -+ .name = "net2", /* Red Led */ -+ .gpio = 110, -+ .active_low = 1, -+ },{ -+ .name = "mod1", /* Green Led */ -+ .gpio = 111, -+ .active_low = 1, -+ },{ -+ .name = "mod2", /* Red Led */ -+ .gpio = 112, -+ .active_low = 1, -+ }, -+}; -+ -+static struct gpio_led_platform_data laguna_gpio_leds_data = { -+ .num_leds = 22, -+ .leds = laguna_gpio_leds, -+}; -+ -+static struct platform_device laguna_gpio_leds_device = { -+ .name = "leds-gpio", -+ .id = -1, -+ .dev.platform_data = &laguna_gpio_leds_data, -+}; -+ -+/* -+ * Ethernet -+ */ -+static struct cns3xxx_plat_info laguna_net_data = { -+ .ports = 0, -+ .phy = { -+ 0, -+ 1, -+ 2, -+ }, -+}; -+ -+static struct platform_device laguna_net_device = { -+ .name = "cns3xxx_eth", -+ .id = 0, -+ .dev.platform_data = &laguna_net_data, -+}; -+ -+/* -+ * UART -+ */ -+static void __init laguna_early_serial_setup(void) -+{ -+#ifdef CONFIG_SERIAL_8250_CONSOLE -+ static struct uart_port laguna_serial_port = { -+ .membase = (void __iomem *)CNS3XXX_UART0_BASE_VIRT, -+ .mapbase = CNS3XXX_UART0_BASE, -+ .irq = IRQ_CNS3XXX_UART0, -+ .iotype = UPIO_MEM, -+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE, -+ .regshift = 2, -+ .uartclk = 24000000, -+ .line = 0, -+ .type = PORT_16550A, -+ .fifosize = 16, -+ }; -+ -+ early_serial_setup(&laguna_serial_port); -+#endif -+} -+ -+static struct resource laguna_uart_resources[] = { -+ { -+ .start = CNS3XXX_UART0_BASE, -+ .end = CNS3XXX_UART0_BASE + SZ_4K - 1, -+ .flags = IORESOURCE_MEM -+ },{ -+ .start = CNS3XXX_UART2_BASE, -+ .end = CNS3XXX_UART2_BASE + SZ_4K - 1, -+ .flags = IORESOURCE_MEM -+ },{ -+ .start = CNS3XXX_UART2_BASE, -+ .end = CNS3XXX_UART2_BASE + SZ_4K - 1, -+ .flags = IORESOURCE_MEM -+ }, -+}; -+ -+static struct plat_serial8250_port laguna_uart_data[] = { -+ { -+ .membase = (char*) (CNS3XXX_UART0_BASE_VIRT), -+ .mapbase = (CNS3XXX_UART0_BASE), -+ .irq = IRQ_CNS3XXX_UART0, -+ .iotype = UPIO_MEM, -+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_NO_TXEN_TEST, -+ .regshift = 2, -+ .uartclk = 24000000, -+ .type = PORT_16550A, -+ },{ -+ .membase = (char*) (CNS3XXX_UART1_BASE_VIRT), -+ .mapbase = (CNS3XXX_UART1_BASE), -+ .irq = IRQ_CNS3XXX_UART1, -+ .iotype = UPIO_MEM, -+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_NO_TXEN_TEST, -+ .regshift = 2, -+ .uartclk = 24000000, -+ .type = PORT_16550A, -+ },{ -+ .membase = (char*) (CNS3XXX_UART2_BASE_VIRT), -+ .mapbase = (CNS3XXX_UART2_BASE), -+ .irq = IRQ_CNS3XXX_UART2, -+ .iotype = UPIO_MEM, -+ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_NO_TXEN_TEST, -+ .regshift = 2, -+ .uartclk = 24000000, -+ .type = PORT_16550A, -+ }, -+}; -+ -+static struct platform_device laguna_uart = { -+ .name = "serial8250", -+ .id = PLAT8250_DEV_PLATFORM, -+ .dev.platform_data = laguna_uart_data, -+ .num_resources = 3, -+ .resource = laguna_uart_resources -+}; -+ -+/* -+ * USB -+ */ -+static struct resource cns3xxx_usb_ehci_resources[] = { -+ [0] = { -+ .start = CNS3XXX_USB_BASE, -+ .end = CNS3XXX_USB_BASE + SZ_16M - 1, -+ .flags = IORESOURCE_MEM, -+ }, -+ [1] = { -+ .start = IRQ_CNS3XXX_USB_EHCI, -+ .flags = IORESOURCE_IRQ, -+ }, -+}; -+ -+static u64 cns3xxx_usb_ehci_dma_mask = DMA_BIT_MASK(32); -+ -+static struct platform_device cns3xxx_usb_ehci_device = { -+ .name = "cns3xxx-ehci", -+ .num_resources = ARRAY_SIZE(cns3xxx_usb_ehci_resources), -+ .resource = cns3xxx_usb_ehci_resources, -+ .dev = { -+ .dma_mask = &cns3xxx_usb_ehci_dma_mask, -+ .coherent_dma_mask = DMA_BIT_MASK(32), -+ }, -+}; -+ -+static struct resource cns3xxx_usb_ohci_resources[] = { -+ [0] = { -+ .start = CNS3XXX_USB_OHCI_BASE, -+ .end = CNS3XXX_USB_OHCI_BASE + SZ_16M - 1, -+ .flags = IORESOURCE_MEM, -+ }, -+ [1] = { -+ .start = IRQ_CNS3XXX_USB_OHCI, -+ .flags = IORESOURCE_IRQ, -+ }, -+}; -+ -+static u64 cns3xxx_usb_ohci_dma_mask = DMA_BIT_MASK(32); -+ -+static struct platform_device cns3xxx_usb_ohci_device = { -+ .name = "cns3xxx-ohci", -+ .num_resources = ARRAY_SIZE(cns3xxx_usb_ohci_resources), -+ .resource = cns3xxx_usb_ohci_resources, -+ .dev = { -+ .dma_mask = &cns3xxx_usb_ohci_dma_mask, -+ .coherent_dma_mask = DMA_BIT_MASK(32), -+ }, -+}; -+ -+static struct resource cns3xxx_usb_otg_resources[] = { -+ [0] = { -+ .start = CNS3XXX_USBOTG_BASE, -+ .end = CNS3XXX_USBOTG_BASE + SZ_16M - 1, -+ .flags = IORESOURCE_MEM, -+ }, -+ [1] = { -+ .start = IRQ_CNS3XXX_USB_OTG, -+ .flags = IORESOURCE_IRQ, -+ }, -+}; -+ -+static u64 cns3xxx_usb_otg_dma_mask = DMA_BIT_MASK(32); -+ -+static struct platform_device cns3xxx_usb_otg_device = { -+ .name = "dwc_otg", -+ .num_resources = ARRAY_SIZE(cns3xxx_usb_otg_resources), -+ .resource = cns3xxx_usb_otg_resources, -+ .dev = { -+ .dma_mask = &cns3xxx_usb_otg_dma_mask, -+ .coherent_dma_mask = DMA_BIT_MASK(32), -+ }, -+}; -+ -+/* -+ * I2C -+ */ -+static struct resource laguna_i2c_resource[] = { -+ { -+ .start = CNS3XXX_SSP_BASE + 0x20, -+ .end = 0x7100003f, -+ .flags = IORESOURCE_MEM, -+ },{ -+ .start = IRQ_CNS3XXX_I2C, -+ .flags = IORESOURCE_IRQ, -+ }, -+}; -+ -+static struct platform_device laguna_i2c_controller = { -+ .name = "cns3xxx-i2c", -+ .num_resources = 2, -+ .resource = laguna_i2c_resource, -+}; -+ -+static struct memory_accessor *at24_mem_acc; -+ -+static void at24_setup(struct memory_accessor *mem_acc, void *context) -+{ -+ char buf[8]; -+ -+ at24_mem_acc = mem_acc; -+ -+ /* Read MAC addresses */ -+ if (at24_mem_acc->read(at24_mem_acc, buf, 0x100, 6) == 6) -+ memcpy(&laguna_net_data.hwaddr[0], buf, ETH_ALEN); -+ if (at24_mem_acc->read(at24_mem_acc, buf, 0x106, 6) == 6) -+ memcpy(&laguna_net_data.hwaddr[1], buf, ETH_ALEN); -+ if (at24_mem_acc->read(at24_mem_acc, buf, 0x10C, 6) == 6) -+ memcpy(&laguna_net_data.hwaddr[2], buf, ETH_ALEN); -+ if (at24_mem_acc->read(at24_mem_acc, buf, 0x112, 6) == 6) -+ memcpy(&laguna_net_data.hwaddr[3], buf, ETH_ALEN); -+ -+ /* Read out Model Information */ -+ if (at24_mem_acc->read(at24_mem_acc, buf, 0x130, 16) == 16) -+ memcpy(&laguna_info.model, buf, 16); -+ if (at24_mem_acc->read(at24_mem_acc, buf, 0x140, 1) == 1) -+ memcpy(&laguna_info.nor_flash_size, buf, 1); -+ if (at24_mem_acc->read(at24_mem_acc, buf, 0x141, 1) == 1) -+ memcpy(&laguna_info.spi_flash_size, buf, 1); -+ if (at24_mem_acc->read(at24_mem_acc, buf, 0x142, 4) == 4) -+ memcpy(&laguna_info.config_bitmap, buf, 4); -+ if (at24_mem_acc->read(at24_mem_acc, buf, 0x146, 4) == 4) -+ memcpy(&laguna_info.config2_bitmap, buf, 4); -+}; -+ -+static struct at24_platform_data laguna_eeprom_info = { -+ .byte_len = 1024, -+ .page_size = 16, -+ .flags = AT24_FLAG_READONLY, -+ .setup = at24_setup, -+}; -+ -+static struct pca953x_platform_data laguna_pca_data = { -+ .gpio_base = 100, -+ .irq_base = -1, -+}; -+ -+static struct pca953x_platform_data laguna_pca2_data = { -+ .gpio_base = 116, -+ .irq_base = -1, -+}; -+ -+static struct i2c_board_info __initdata laguna_i2c_devices[] = { -+ { -+ I2C_BOARD_INFO("pca9555", 0x23), -+ .platform_data = &laguna_pca_data, -+ },{ -+ I2C_BOARD_INFO("pca9555", 0x27), -+ .platform_data = &laguna_pca2_data, -+ },{ -+ I2C_BOARD_INFO("gsp", 0x29), -+ },{ -+ I2C_BOARD_INFO ("24c08",0x50), -+ .platform_data = &laguna_eeprom_info, -+ },{ -+ I2C_BOARD_INFO("ds1672", 0x68), -+ }, -+}; -+ -+/* -+ * Watchdog -+ */ -+ -+static struct resource laguna_watchdog_resources[] = { -+ [0] = { -+ .start = CNS3XXX_TC11MP_TWD_BASE, -+ .end = CNS3XXX_TC11MP_TWD_BASE + SZ_4K - 1, -+ .flags = IORESOURCE_MEM, -+ }, -+ [1] = { -+ .start = IRQ_LOCALWDOG, -+ .end = IRQ_LOCALWDOG, -+ .flags = IORESOURCE_IRQ, -+ } -+}; -+ -+static struct platform_device laguna_watchdog = { -+ .name = "mpcore_wdt", -+ .id = -1, -+ .num_resources = ARRAY_SIZE(laguna_watchdog_resources), -+ .resource = laguna_watchdog_resources, -+}; -+ -+/* -+ * Initialization -+ */ -+ -+static void __init laguna_init(void) -+{ -+ cns3xxx_l2x0_init(); -+ -+ platform_device_register(&laguna_watchdog); -+ -+ platform_device_register(&laguna_i2c_controller); -+ -+ i2c_register_board_info(0, laguna_i2c_devices, -+ ARRAY_SIZE(laguna_i2c_devices)); -+ -+ pm_power_off = cns3xxx_power_off; -+} -+ -+static struct map_desc laguna_io_desc[] __initdata = { -+ { -+ .virtual = CNS3XXX_UART0_BASE_VIRT, -+ .pfn = __phys_to_pfn(CNS3XXX_UART0_BASE), -+ .length = SZ_4K, -+ .type = MT_DEVICE, -+ },{ -+ .virtual = CNS3XXX_UART1_BASE_VIRT, -+ .pfn = __phys_to_pfn(CNS3XXX_UART1_BASE), -+ .length = SZ_4K, -+ .type = MT_DEVICE, -+ },{ -+ .virtual = CNS3XXX_UART2_BASE_VIRT, -+ .pfn = __phys_to_pfn(CNS3XXX_UART2_BASE), -+ .length = SZ_4K, -+ .type = MT_DEVICE, -+ }, -+}; -+ -+static void __init laguna_map_io(void) -+{ -+ cns3xxx_map_io(); -+ iotable_init(laguna_io_desc, ARRAY_SIZE(laguna_io_desc)); -+ laguna_early_serial_setup(); -+} -+ -+static int __init laguna_model_setup(void) -+{ -+ u32 __iomem *mem; -+ u32 reg; -+ u8 pcie_bitmap = 0; -+ -+ printk("Running on Gateworks Laguna %s\n", laguna_info.model); -+ -+ if (strncmp(laguna_info.model, "GW", 2) == 0) { -+ if (laguna_info.config_bitmap & ETH0_LOAD) -+ laguna_net_data.ports |= BIT(0); -+ if (laguna_info.config_bitmap & ETH1_LOAD) -+ laguna_net_data.ports |= BIT(1); -+ if (laguna_info.config_bitmap & ETH2_LOAD) -+ laguna_net_data.ports |= BIT(2); -+ if (laguna_net_data.ports) -+ platform_device_register(&laguna_net_device); -+ -+ if ((laguna_info.config_bitmap & SATA0_LOAD) || -+ (laguna_info.config_bitmap & SATA1_LOAD)) -+ cns3xxx_ahci_init(); -+ -+ if (laguna_info.config_bitmap & (PCIE0_LOAD)) -+ pcie_bitmap |= 0x1; -+ -+ if (laguna_info.config_bitmap & (PCIE1_LOAD)) -+ pcie_bitmap |= 0x2; -+ -+ cns3xxx_pcie_init(pcie_bitmap); -+ -+ if (laguna_info.config_bitmap & (USB0_LOAD)) { -+ cns3xxx_pwr_power_up(1 << PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_USB); -+ -+ /* DRVVBUS pins share with GPIOA */ -+ mem = (void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0014); -+ reg = __raw_readl(mem); -+ reg |= 0x8; -+ __raw_writel(reg, mem); -+ -+ /* Enable OTG */ -+ mem = (void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0808); -+ reg = __raw_readl(mem); -+ reg &= ~(1 << 10); -+ __raw_writel(reg, mem); -+ -+ platform_device_register(&cns3xxx_usb_otg_device); -+ } -+ -+ if (laguna_info.config_bitmap & (USB1_LOAD)) { -+ platform_device_register(&cns3xxx_usb_ehci_device); -+ platform_device_register(&cns3xxx_usb_ohci_device); -+ } -+ -+ if (laguna_info.config_bitmap & (SD_LOAD)) -+ cns3xxx_sdhci_init(); -+ -+ if (laguna_info.config_bitmap & (UART0_LOAD)) -+ laguna_uart.num_resources = 1; -+ if (laguna_info.config_bitmap & (UART1_LOAD)) -+ laguna_uart.num_resources = 2; -+ if (laguna_info.config_bitmap & (UART2_LOAD)) -+ laguna_uart.num_resources = 3; -+ platform_device_register(&laguna_uart); -+ -+ if (laguna_info.config2_bitmap & (NOR_FLASH_LOAD)) { -+ switch (laguna_info.nor_flash_size) { -+ case 1: -+ laguna_nor_partitions[3].size = SZ_8M - SZ_256K - SZ_128K - SZ_2M; -+ laguna_nor_res.end = CNS3XXX_FLASH_BASE + SZ_8M - 1; -+ break; -+ case 2: -+ laguna_nor_partitions[3].size = SZ_16M - SZ_256K - SZ_128K - SZ_2M; -+ laguna_nor_res.end = CNS3XXX_FLASH_BASE + SZ_16M - 1; -+ break; -+ case 3: -+ laguna_nor_partitions[3].size = SZ_32M - SZ_256K - SZ_128K - SZ_2M; -+ laguna_nor_res.end = CNS3XXX_FLASH_BASE + SZ_32M - 1; -+ break; -+ case 4: -+ laguna_nor_partitions[3].size = SZ_64M - SZ_256K - SZ_128K - SZ_2M; -+ laguna_nor_res.end = CNS3XXX_FLASH_BASE + SZ_64M - 1; -+ break; -+ case 5: -+ laguna_nor_partitions[3].size = SZ_128M - SZ_256K - SZ_128K - SZ_2M; -+ laguna_nor_res.end = CNS3XXX_FLASH_BASE + SZ_128M - 1; -+ break; -+ } -+ platform_device_register(&laguna_nor_pdev); -+ } -+ -+ if (laguna_info.config2_bitmap & (SPI_FLASH_LOAD)) { -+ switch (laguna_info.spi_flash_size) { -+ case 1: -+ laguna_spi_partitions[3].size = SZ_4M - SZ_2M; -+ break; -+ case 2: -+ laguna_spi_partitions[3].size = SZ_8M - SZ_2M; -+ break; -+ case 3: -+ laguna_spi_partitions[3].size = SZ_16M - SZ_2M; -+ break; -+ case 4: -+ laguna_spi_partitions[3].size = SZ_32M - SZ_2M; -+ break; -+ case 5: -+ laguna_spi_partitions[3].size = SZ_64M - SZ_2M; -+ break; -+ } -+ spi_register_board_info(laguna_spi_devices, ARRAY_SIZE(laguna_spi_devices)); -+ } -+ -+ if ((laguna_info.config_bitmap & SPI0_LOAD) || -+ (laguna_info.config_bitmap & SPI1_LOAD)) -+ platform_device_register(&laguna_spi_controller); -+ -+ /* -+ * Do any model specific setup not known by the bitmap by matching -+ * the first 6 characters of the model name -+ */ -+ -+ if (strncmp(laguna_info.model, "GW2388", 6) == 0) { -+ laguna_gpio_leds_data.num_leds = 2; -+ } else if (strncmp(laguna_info.model, "GW2380", 6) == 0) { -+ laguna_gpio_leds[0].gpio = 107; -+ laguna_gpio_leds[1].gpio = 106; -+ laguna_gpio_leds_data.num_leds = 2; -+ } -+ platform_device_register(&laguna_gpio_leds_device); -+ } else { -+ // Do some defaults here, not sure what yet -+ } -+ return 0; -+} -+ -+late_initcall(laguna_model_setup); -+ -+MACHINE_START(GW2388, "Gateworks Corporation Laguna Platform") -+ .boot_params = 0x00000100, -+ .map_io = laguna_map_io, -+ .init_irq = cns3xxx_init_irq, -+ .timer = &cns3xxx_timer, -+ .init_machine = laguna_init, -+MACHINE_END ---- a/arch/arm/mach-cns3xxx/Kconfig -+++ b/arch/arm/mach-cns3xxx/Kconfig -@@ -11,4 +11,14 @@ config MACH_CNS3420VB - This is a platform with an on-board ARM11 MPCore and has support - for USB, USB-OTG, MMC/SD/SDIO, SATA, PCI-E, etc. - -+config MACH_GW2388 -+ bool "Support for Gateworks Laguna Platform" -+ select HAVE_ARM_SCU if SMP -+ select MIGHT_HAVE_PCI -+ help -+ Include support for the Gateworks Laguna Platform -+ -+ This is a platform with an on-board ARM11 MPCore and has support -+ for USB, USB-OTG, MMC/SD/SDIO, SATA, PCI-E, I2C, GIG, etc. -+ - endmenu ---- a/arch/arm/mach-cns3xxx/core.c -+++ b/arch/arm/mach-cns3xxx/core.c -@@ -16,6 +16,7 @@ - #include - #include - #include -+#include - #include - #include - #include "core.h" -@@ -61,11 +62,24 @@ static struct map_desc cns3xxx_io_desc[] - .pfn = __phys_to_pfn(CNS3XXX_PM_BASE), - .length = SZ_4K, - .type = MT_DEVICE, -+ }, { -+ .virtual = CNS3XXX_SWITCH_BASE_VIRT, -+ .pfn = __phys_to_pfn(CNS3XXX_SWITCH_BASE), -+ .length = SZ_4K, -+ .type = MT_DEVICE, -+ }, { -+ .virtual = CNS3XXX_SSP_BASE_VIRT, -+ .pfn = __phys_to_pfn(CNS3XXX_SSP_BASE), -+ .length = SZ_4K, -+ .type = MT_DEVICE, - }, - }; - - void __init cns3xxx_map_io(void) - { -+#ifdef CONFIG_LOCAL_TIMERS -+ twd_base = (void __iomem *) CNS3XXX_TC11MP_TWD_BASE_VIRT; -+#endif - iotable_init(cns3xxx_io_desc, ARRAY_SIZE(cns3xxx_io_desc)); - } - ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -329,6 +329,7 @@ config ARCH_CLPS711X - config ARCH_CNS3XXX - bool "Cavium Networks CNS3XXX family" - select CPU_V6K -+ select ARCH_WANT_OPTIONAL_GPIOLIB - select GENERIC_CLOCKEVENTS - select ARM_GIC - select MIGHT_HAVE_PCI ---- /dev/null -+++ b/arch/arm/mach-cns3xxx/include/mach/gpio.h -@@ -0,0 +1,98 @@ -+/* -+ * arch/arm/mach-cns3xxx/include/mach/gpio.h -+ * -+ * CNS3xxx GPIO wrappers for arch-neutral GPIO calls -+ * -+ * Copyright 2011 Gateworks Corporation -+ * Chris Lang -+ * -+ * Based on IXP implementation by Milan Svoboda -+ * Based on PXA implementation by Philipp Zabel -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -+ * -+ */ -+ -+#ifndef __ASM_ARCH_CNS3XXX_GPIO_H -+#define __ASM_ARCH_CNS3XXX_GPIO_H -+ -+#include -+#include -+#include -+#include /* cansleep wrappers */ -+ -+#define NR_BUILTIN_GPIO 64 -+ -+#define CNS3XXX_GPIO_IN 0x0 -+#define CNS3XXX_GPIO_OUT 0x1 -+ -+#define CNS3XXX_GPIO_LO 0 -+#define CNS3XXX_GPIO_HI 1 -+ -+#define CNS3XXX_GPIO_OUTPUT 0x00 -+#define CNS3XXX_GPIO_INPUT 0x04 -+#define CNS3XXX_GPIO_DIR 0x08 -+#define CNS3XXX_GPIO_SET 0x10 -+#define CNS3XXX_GPIO_CLEAR 0x14 -+ -+static inline void gpio_line_get(u8 line, int *value) -+{ -+ if (line < 32) -+ *value = ((__raw_readl(CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_INPUT) >> line) & 0x1); -+ else -+ *value = ((__raw_readl(CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_INPUT) >> (line - 32)) & 0x1); -+} -+ -+static inline void gpio_line_set(u8 line, int value) -+{ -+ if (line < 32) { -+ if (value) -+ __raw_writel((1 << line), CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_SET); -+ else -+ __raw_writel((1 << line), CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_CLEAR); -+ } else { -+ if (value) -+ __raw_writel((1 << line), CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_SET); -+ else -+ __raw_writel((1 << line), CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_CLEAR); -+ } -+} -+ -+static inline int gpio_get_value(unsigned gpio) -+{ -+ if (gpio < NR_BUILTIN_GPIO) -+ { -+ int value; -+ gpio_line_get(gpio, &value); -+ return value; -+ } -+ else -+ return __gpio_get_value(gpio); -+} -+ -+static inline void gpio_set_value(unsigned gpio, int value) -+{ -+ if (gpio < NR_BUILTIN_GPIO) -+ gpio_line_set(gpio, value); -+ else -+ __gpio_set_value(gpio, value); -+} -+ -+#define gpio_cansleep __gpio_cansleep -+ -+extern int gpio_to_irq(int gpio); -+extern int irq_to_gpio(int gpio); -+ -+#endif ---- a/arch/arm/mach-cns3xxx/Makefile -+++ b/arch/arm/mach-cns3xxx/Makefile -@@ -1,6 +1,7 @@ - obj-$(CONFIG_ARCH_CNS3XXX) += core.o pm.o devices.o - obj-$(CONFIG_PCI) += pcie.o - obj-$(CONFIG_MACH_CNS3420VB) += cns3420vb.o -+obj-$(CONFIG_MACH_GW2388) += laguna.o - obj-$(CONFIG_SMP) += platsmp.o headsmp.o - obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o - obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o ---- a/arch/arm/mach-cns3xxx/pcie.c -+++ b/arch/arm/mach-cns3xxx/pcie.c -@@ -365,7 +365,7 @@ static int cns3xxx_pcie_abort_handler(un - return 0; - } - --static int __init cns3xxx_pcie_init(void) -+int cns3xxx_pcie_init(u8 bitmap) - { - int i; - -@@ -376,6 +376,9 @@ static int __init cns3xxx_pcie_init(void - "imprecise external abort"); - - for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) { -+ if (!(bitmap & (1 << i))) -+ continue; -+ - iotable_init(cns3xxx_pcie[i].cfg_bases, - ARRAY_SIZE(cns3xxx_pcie[i].cfg_bases)); - cns3xxx_pcie_check_link(&cns3xxx_pcie[i]); -@@ -387,4 +390,3 @@ static int __init cns3xxx_pcie_init(void - - return 0; - } --device_initcall(cns3xxx_pcie_init); ---- a/arch/arm/mach-cns3xxx/cns3420vb.c -+++ b/arch/arm/mach-cns3xxx/cns3420vb.c -@@ -31,6 +31,7 @@ - #include - #include - #include -+#include - #include "core.h" - #include "devices.h" - -@@ -198,6 +199,8 @@ static void __init cns3420_init(void) - cns3xxx_ahci_init(); - cns3xxx_sdhci_init(); - -+ cns3xxx_pcie_init(0x3); -+ - pm_power_off = cns3xxx_power_off; - } - ---- a/arch/arm/mach-cns3xxx/core.h -+++ b/arch/arm/mach-cns3xxx/core.h -@@ -12,6 +12,7 @@ - #define __CNS3XXX_CORE_H - - extern struct sys_timer cns3xxx_timer; -+extern int cns3xxx_pcie_init(u8 bitmap); - - #ifdef CONFIG_CACHE_L2X0 - void __init cns3xxx_l2x0_init(void); diff --git a/target/linux/cns3xxx/patches-3.1/101-laguna_sdhci_card_detect.patch b/target/linux/cns3xxx/patches-3.1/101-laguna_sdhci_card_detect.patch deleted file mode 100644 index 14802706e4..0000000000 --- a/target/linux/cns3xxx/patches-3.1/101-laguna_sdhci_card_detect.patch +++ /dev/null @@ -1,16 +0,0 @@ ---- a/drivers/mmc/host/sdhci-cns3xxx.c -+++ b/drivers/mmc/host/sdhci-cns3xxx.c -@@ -88,10 +88,11 @@ static struct sdhci_pltfm_data sdhci_cns - .ops = &sdhci_cns3xxx_ops, - .quirks = SDHCI_QUIRK_BROKEN_DMA | - SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | -- SDHCI_QUIRK_INVERTED_WRITE_PROTECT | -+ //SDHCI_QUIRK_INVERTED_WRITE_PROTECT | - SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | - SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | -- SDHCI_QUIRK_NONSTANDARD_CLOCK, -+ SDHCI_QUIRK_NONSTANDARD_CLOCK | -+ SDHCI_QUIRK_BROKEN_CARD_DETECTION, - }; - - static int __devinit sdhci_cns3xxx_probe(struct platform_device *pdev) diff --git a/target/linux/cns3xxx/patches-3.1/102-cns3xxx_timers.patch b/target/linux/cns3xxx/patches-3.1/102-cns3xxx_timers.patch deleted file mode 100644 index dddb85a4db..0000000000 --- a/target/linux/cns3xxx/patches-3.1/102-cns3xxx_timers.patch +++ /dev/null @@ -1,109 +0,0 @@ ---- a/arch/arm/mach-cns3xxx/core.c -+++ b/arch/arm/mach-cns3xxx/core.c -@@ -118,12 +118,13 @@ static void cns3xxx_timer_set_mode(enum - - switch (mode) { - case CLOCK_EVT_MODE_PERIODIC: -- reload = pclk * 20 / (3 * HZ) * 0x25000; -+ reload = pclk * 1000000 / HZ; - writel(reload, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET); - ctrl |= (1 << 0) | (1 << 2) | (1 << 9); - break; - case CLOCK_EVT_MODE_ONESHOT: - /* period set, and timer enabled in 'next_event' hook */ -+ writel(0, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET); - ctrl |= (1 << 2) | (1 << 9); - break; - case CLOCK_EVT_MODE_UNUSED: -@@ -148,11 +149,11 @@ static int cns3xxx_timer_set_next_event( - - static struct clock_event_device cns3xxx_tmr1_clockevent = { - .name = "cns3xxx timer1", -- .shift = 8, -+ .shift = 32, - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, - .set_mode = cns3xxx_timer_set_mode, - .set_next_event = cns3xxx_timer_set_next_event, -- .rating = 350, -+ .rating = 300, - .cpumask = cpu_all_mask, - }; - -@@ -194,6 +195,35 @@ static struct irqaction cns3xxx_timer_ir - .handler = cns3xxx_timer_interrupt, - }; - -+static cycle_t cns3xxx_get_cycles(struct clocksource *cs) -+{ -+ u64 val; -+ -+ val = readl(cns3xxx_tmr1 + TIMER_FREERUN_CONTROL_OFFSET); -+ val &= 0xffff; -+ -+ return ((val << 32) | readl(cns3xxx_tmr1 + TIMER_FREERUN_OFFSET)); -+} -+ -+static struct clocksource clocksource_cns3xxx = { -+ .name = "freerun", -+ .rating = 200, -+ .read = cns3xxx_get_cycles, -+ .mask = CLOCKSOURCE_MASK(48), -+ .shift = 16, -+ .flags = CLOCK_SOURCE_IS_CONTINUOUS, -+}; -+ -+static void __init cns3xxx_clocksource_init(void) -+{ -+ /* Reset the FreeRunning counter */ -+ writel((1 << 16), cns3xxx_tmr1 + TIMER_FREERUN_CONTROL_OFFSET); -+ -+ clocksource_cns3xxx.mult = -+ clocksource_khz2mult(100, clocksource_cns3xxx.shift); -+ clocksource_register(&clocksource_cns3xxx); -+} -+ - /* - * Set up the clock source and clock events devices - */ -@@ -211,13 +241,12 @@ static void __init __cns3xxx_timer_init( - /* stop free running timer3 */ - writel(0, cns3xxx_tmr1 + TIMER_FREERUN_CONTROL_OFFSET); - -- /* timer1 */ -- writel(0x5C800, cns3xxx_tmr1 + TIMER1_COUNTER_OFFSET); -- writel(0x5C800, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET); -- - writel(0, cns3xxx_tmr1 + TIMER1_MATCH_V1_OFFSET); - writel(0, cns3xxx_tmr1 + TIMER1_MATCH_V2_OFFSET); - -+ val = (cns3xxx_cpu_clock() >> 3) * 1000000 / HZ; -+ writel(val, cns3xxx_tmr1 + TIMER1_COUNTER_OFFSET); -+ - /* mask irq, non-mask timer1 overflow */ - irq_mask = readl(cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET); - irq_mask &= ~(1 << 2); -@@ -229,23 +258,9 @@ static void __init __cns3xxx_timer_init( - val |= (1 << 9); - writel(val, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET); - -- /* timer2 */ -- writel(0, cns3xxx_tmr1 + TIMER2_MATCH_V1_OFFSET); -- writel(0, cns3xxx_tmr1 + TIMER2_MATCH_V2_OFFSET); -- -- /* mask irq */ -- irq_mask = readl(cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET); -- irq_mask |= ((1 << 3) | (1 << 4) | (1 << 5)); -- writel(irq_mask, cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET); -- -- /* down counter */ -- val = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET); -- val |= (1 << 10); -- writel(val, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET); -- -- /* Make irqs happen for the system timer */ - setup_irq(timer_irq, &cns3xxx_timer_irq); - -+ cns3xxx_clocksource_init(); - cns3xxx_clockevents_init(timer_irq); - } - diff --git a/target/linux/cns3xxx/patches-3.1/104-cns3xxx_gpio.patch b/target/linux/cns3xxx/patches-3.1/104-cns3xxx_gpio.patch deleted file mode 100644 index 6dff5b07d7..0000000000 --- a/target/linux/cns3xxx/patches-3.1/104-cns3xxx_gpio.patch +++ /dev/null @@ -1,141 +0,0 @@ ---- a/arch/arm/mach-cns3xxx/cns3420vb.c -+++ b/arch/arm/mach-cns3xxx/cns3420vb.c -@@ -215,7 +215,7 @@ static struct map_desc cns3420_io_desc[] - - static void __init cns3420_map_io(void) - { -- cns3xxx_map_io(); -+ cns3xxx_common_init(); - iotable_init(cns3420_io_desc, ARRAY_SIZE(cns3420_io_desc)); - - cns3420_early_serial_setup(); ---- a/arch/arm/mach-cns3xxx/core.c -+++ b/arch/arm/mach-cns3xxx/core.c -@@ -18,6 +18,7 @@ - #include - #include - #include -+#include - #include - #include "core.h" - -@@ -75,12 +76,96 @@ static struct map_desc cns3xxx_io_desc[] - }, - }; - --void __init cns3xxx_map_io(void) -+int gpio_to_irq(int gpio) -+{ -+ if (gpio > 63) -+ return -EINVAL; -+ -+ if (gpio < 32) -+ return IRQ_CNS3XXX_GPIOA; -+ else -+ return IRQ_CNS3XXX_GPIOB; -+} -+EXPORT_SYMBOL(gpio_to_irq); -+ -+int irq2gpio(int irq) -+{ -+ if (irq == IRQ_CNS3XXX_GPIOA) -+ return 0; -+ else if (irq == IRQ_CNS3XXX_GPIOB) -+ return 32; -+ else -+ return -EINVAL; -+} -+EXPORT_SYMBOL(irq2gpio); -+ -+static inline void gpio_line_config(u8 line, u32 direction) -+{ -+ u32 reg; -+ if (direction) { -+ if (line < 32) { -+ reg = __raw_readl(CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_DIR); -+ reg |= (1 << line); -+ __raw_writel(reg, CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_DIR); -+ } else { -+ reg = __raw_readl(CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_DIR); -+ reg |= (1 << (line - 32)); -+ __raw_writel(reg, CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_DIR); -+ } -+ } else { -+ if (line < 32) { -+ reg = __raw_readl(CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_DIR); -+ reg &= ~(1 << line); -+ __raw_writel(reg, CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_DIR); -+ } else { -+ reg = __raw_readl(CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_DIR); -+ reg &= ~(1 << (line - 32)); -+ __raw_writel(reg, CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_DIR); -+ } -+ } -+} -+ -+static int cns3xxx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) -+{ -+ gpio_line_config(gpio, CNS3XXX_GPIO_IN); -+ return 0; -+} -+ -+static int cns3xxx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int level) -+{ -+ gpio_line_set(gpio, level); -+ gpio_line_config(gpio, CNS3XXX_GPIO_OUT); -+ return 0; -+} -+ -+static int cns3xxx_gpio_get_value(struct gpio_chip *chip, unsigned gpio) -+{ -+ return gpio_get_value(gpio); -+} -+ -+static void cns3xxx_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) -+{ -+ gpio_set_value(gpio, value); -+} -+ -+static struct gpio_chip cns3xxx_gpio_chip = { -+ .label = "CNS3XXX_GPIO_CHIP", -+ .direction_input = cns3xxx_gpio_direction_input, -+ .direction_output = cns3xxx_gpio_direction_output, -+ .get = cns3xxx_gpio_get_value, -+ .set = cns3xxx_gpio_set_value, -+ .base = 0, -+ .ngpio = 64, -+}; -+ -+void __init cns3xxx_common_init(void) - { - #ifdef CONFIG_LOCAL_TIMERS - twd_base = (void __iomem *) CNS3XXX_TC11MP_TWD_BASE_VIRT; - #endif - iotable_init(cns3xxx_io_desc, ARRAY_SIZE(cns3xxx_io_desc)); -+ -+ gpiochip_add(&cns3xxx_gpio_chip); - } - - /* used by entry-macro.S */ ---- a/arch/arm/mach-cns3xxx/core.h -+++ b/arch/arm/mach-cns3xxx/core.h -@@ -20,7 +20,7 @@ void __init cns3xxx_l2x0_init(void); - static inline void cns3xxx_l2x0_init(void) {} - #endif /* CONFIG_CACHE_L2X0 */ - --void __init cns3xxx_map_io(void); -+void __init cns3xxx_common_init(void); - void __init cns3xxx_init_irq(void); - void cns3xxx_power_off(void); - ---- a/arch/arm/mach-cns3xxx/laguna.c -+++ b/arch/arm/mach-cns3xxx/laguna.c -@@ -611,7 +611,7 @@ static struct map_desc laguna_io_desc[] - - static void __init laguna_map_io(void) - { -- cns3xxx_map_io(); -+ cns3xxx_common_init(); - iotable_init(laguna_io_desc, ARRAY_SIZE(laguna_io_desc)); - laguna_early_serial_setup(); - } diff --git a/target/linux/cns3xxx/patches-3.1/105-cns3xxx_pcie_io.patch b/target/linux/cns3xxx/patches-3.1/105-cns3xxx_pcie_io.patch deleted file mode 100644 index 6c6d268a96..0000000000 --- a/target/linux/cns3xxx/patches-3.1/105-cns3xxx_pcie_io.patch +++ /dev/null @@ -1,88 +0,0 @@ ---- a/arch/arm/mach-cns3xxx/core.c -+++ b/arch/arm/mach-cns3xxx/core.c -@@ -73,6 +73,16 @@ static struct map_desc cns3xxx_io_desc[] - .pfn = __phys_to_pfn(CNS3XXX_SSP_BASE), - .length = SZ_4K, - .type = MT_DEVICE, -+ }, { -+ .virtual = CNS3XXX_PCIE0_IO_BASE_VIRT, -+ .pfn = __phys_to_pfn(CNS3XXX_PCIE0_IO_BASE), -+ .length = SZ_16M, -+ .type = MT_DEVICE, -+ }, { -+ .virtual = CNS3XXX_PCIE1_IO_BASE_VIRT, -+ .pfn = __phys_to_pfn(CNS3XXX_PCIE1_IO_BASE), -+ .length = SZ_16M, -+ .type = MT_DEVICE, - }, - }; - -@@ -171,13 +181,13 @@ void __init cns3xxx_common_init(void) - /* used by entry-macro.S */ - void __init cns3xxx_init_irq(void) - { -- gic_init(0, 29, __io(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT), -- __io(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT)); -+ gic_init(0, 29, (void __iomem *) CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, -+ (void __iomem *) CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT); - } - - void cns3xxx_power_off(void) - { -- u32 __iomem *pm_base = __io(CNS3XXX_PM_BASE_VIRT); -+ u32 __iomem *pm_base = (void __iomem *) CNS3XXX_PM_BASE_VIRT; - u32 clkctrl; - - printk(KERN_INFO "powering system down...\n"); -@@ -351,7 +361,7 @@ static void __init __cns3xxx_timer_init( - - static void __init cns3xxx_timer_init(void) - { -- cns3xxx_tmr1 = __io(CNS3XXX_TIMER1_2_3_BASE_VIRT); -+ cns3xxx_tmr1 = (void __iomem *) CNS3XXX_TIMER1_2_3_BASE_VIRT; - - __cns3xxx_timer_init(IRQ_CNS3XXX_TIMER0); - } ---- a/arch/arm/mach-cns3xxx/devices.c -+++ b/arch/arm/mach-cns3xxx/devices.c -@@ -98,7 +98,7 @@ static struct platform_device cns3xxx_sd - - void __init cns3xxx_sdhci_init(void) - { -- u32 __iomem *gpioa = __io(CNS3XXX_MISC_BASE_VIRT + 0x0014); -+ u32 __iomem *gpioa = (void __iomem *) (CNS3XXX_MISC_BASE_VIRT + 0x0014); - u32 gpioa_pins = __raw_readl(gpioa); - - /* MMC/SD pins share with GPIOA */ ---- a/arch/arm/mach-cns3xxx/include/mach/io.h -+++ b/arch/arm/mach-cns3xxx/include/mach/io.h -@@ -9,9 +9,17 @@ - #ifndef __MACH_IO_H - #define __MACH_IO_H - -+#include "cns3xxx.h" -+ - #define IO_SPACE_LIMIT 0xffffffff - --#define __io(a) __typesafe_io(a) -+static inline void __iomem *__io(unsigned long addr) -+{ -+ return (void __iomem *)((addr - CNS3XXX_PCIE0_IO_BASE) -+ + CNS3XXX_PCIE0_IO_BASE_VIRT); -+} -+ -+#define __io(a) __io(a) - #define __mem_pci(a) (a) - - #endif ---- a/drivers/spi/spi_cns3xxx.c -+++ b/drivers/spi/spi_cns3xxx.c -@@ -273,7 +273,7 @@ done: - - static void __init cns3xxx_spi_initial(void) - { -- u32 __iomem *gpiob = __io(CNS3XXX_MISC_BASE_VIRT + 0x0018); -+ u32 __iomem *gpiob = (void __iomem *) (CNS3XXX_MISC_BASE_VIRT + 0x0018); - u32 gpiob_pins = __raw_readl(gpiob); - - /* MMC/SD pins share with GPIOA */ diff --git a/target/linux/cns3xxx/patches-3.1/106-cns3xxx_sata_support.patch b/target/linux/cns3xxx/patches-3.1/106-cns3xxx_sata_support.patch deleted file mode 100644 index ce96f0ea4c..0000000000 --- a/target/linux/cns3xxx/patches-3.1/106-cns3xxx_sata_support.patch +++ /dev/null @@ -1,190 +0,0 @@ ---- a/arch/arm/mach-cns3xxx/devices.c -+++ b/arch/arm/mach-cns3xxx/devices.c -@@ -41,7 +41,7 @@ static struct resource cns3xxx_ahci_reso - static u64 cns3xxx_ahci_dmamask = DMA_BIT_MASK(32); - - static struct platform_device cns3xxx_ahci_pdev = { -- .name = "ahci", -+ .name = "ahci-cns3xxx", - .id = 0, - .resource = cns3xxx_ahci_resource, - .num_resources = ARRAY_SIZE(cns3xxx_ahci_resource), ---- a/drivers/ata/Kconfig -+++ b/drivers/ata/Kconfig -@@ -83,6 +83,17 @@ config SATA_AHCI_PLATFORM - - If unsure, say N. - -+config SATA_AHCI_CNS3XXX -+ bool "AHCI Support on the Cavium Networks CNS3xxx SOC" -+ depends on ARCH_CNS3XXX -+ depends on SATA_AHCI_PLATFORM -+ help -+ This option enables AHCI platform driver to support CNS3xxx -+ System-on-Chip devices. This is only needed when using CNS3xxx AHCI -+ controller. -+ -+ If unsure, say N. -+ - config SATA_FSL - tristate "Freescale 3.0Gbps SATA support" - depends on FSL_SOC ---- a/drivers/ata/Makefile -+++ b/drivers/ata/Makefile -@@ -4,7 +4,10 @@ obj-$(CONFIG_ATA) += libata.o - # non-SFF interface - obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o - obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o --obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o -+obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platforms.o libahci.o -+ahci_platforms-y += ahci_platform.o -+ahci_platforms-$(CONFIG_SATA_AHCI_CNS3XXX) += ahci_cns3xxx.o -+ - obj-$(CONFIG_SATA_FSL) += sata_fsl.o - obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o - obj-$(CONFIG_SATA_SIL24) += sata_sil24.o ---- /dev/null -+++ b/drivers/ata/ahci_cns3xxx.c -@@ -0,0 +1,52 @@ -+/* -+ * AHCI support for CNS3xxx SoC -+ * -+ * Copyright 2010 MontaVista Software, LLC. -+ * Copyright 2010 Cavium Networks -+ * -+ * Authors: Anton Vorontsov -+ * Mac Lin -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include "ahci.h" -+ -+/* -+ * TODO: move cns3xxx_ahci_init to here after cns3xxx_pwr*() calls are -+ * thread-safe -+ */ -+ -+static int cns3xxx_ahci_softreset(struct ata_link *link, unsigned int *class, -+ unsigned long deadline) -+{ -+ int pmp = sata_srst_pmp(link); -+ int ret; -+ -+ ret = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready); -+ if (pmp && ret) -+ return ahci_do_softreset(link, class, 0, deadline, -+ ahci_check_ready); -+ return ret; -+} -+ -+static struct ata_port_operations cns3xxx_ahci_ops = { -+ .inherits = &ahci_ops, -+ .softreset = cns3xxx_ahci_softreset, -+}; -+ -+static const struct ata_port_info cns3xxx_ata_port_info = { -+ .flags = AHCI_FLAG_COMMON, -+ .pio_mask = ATA_PIO4, -+ .udma_mask = ATA_UDMA6, -+ .port_ops = &cns3xxx_ahci_ops, -+}; -+ -+struct ahci_platform_data cns3xxx_ahci_platform_data = { -+ .ata_port_info = &cns3xxx_ata_port_info, -+}; -+ ---- a/drivers/ata/ahci_platform.c -+++ b/drivers/ata/ahci_platform.c -@@ -19,9 +19,11 @@ - #include - #include - #include -+#include - #include - #include - #include "ahci.h" -+#include "ahci_platform.h" - - static struct scsi_host_template ahci_platform_sht = { - AHCI_SHT("ahci_platform"), -@@ -29,6 +31,7 @@ static struct scsi_host_template ahci_pl - - static int __init ahci_probe(struct platform_device *pdev) - { -+ const struct platform_device_id *platid = platform_get_device_id(pdev); - struct device *dev = &pdev->dev; - struct ahci_platform_data *pdata = dev->platform_data; - struct ata_port_info pi = { -@@ -46,6 +49,9 @@ static int __init ahci_probe(struct plat - int i; - int rc; - -+ if (!pdata && platid && platid->driver_data) -+ pdata = (void *)platid->driver_data; -+ - mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem) { - dev_err(dev, "no mmio space\n"); -@@ -171,17 +177,28 @@ static int __devexit ahci_remove(struct - return 0; - } - -+static const struct platform_device_id ahci_platform_ids[] = { -+ { "ahci", }, -+#ifdef CONFIG_SATA_AHCI_CNS3XXX -+ { "ahci-cns3xxx", (kernel_ulong_t)&cns3xxx_ahci_platform_data}, -+#endif -+ { }, -+}; -+MODULE_DEVICE_TABLE(platform, ahci_platform_ids); -+ - static struct platform_driver ahci_driver = { -- .remove = __devexit_p(ahci_remove), - .driver = { - .name = "ahci", - .owner = THIS_MODULE, - }, -+ .probe = ahci_probe, -+ .remove = __devexit_p(ahci_remove), -+ .id_table = ahci_platform_ids, - }; - - static int __init ahci_init(void) - { -- return platform_driver_probe(&ahci_driver, ahci_probe); -+ return platform_driver_register(&ahci_driver); - } - module_init(ahci_init); - -@@ -194,4 +211,3 @@ module_exit(ahci_exit); - MODULE_DESCRIPTION("AHCI SATA platform driver"); - MODULE_AUTHOR("Anton Vorontsov "); - MODULE_LICENSE("GPL"); --MODULE_ALIAS("platform:ahci"); ---- /dev/null -+++ b/drivers/ata/ahci_platform.h -@@ -0,0 +1,19 @@ -+/* -+ * Copyright 2010 MontaVista Software, LLC. -+ * Copyright 2010 Cavium Networks -+ * -+ * Authors: Anton Vorontsov -+ * Mac Lin -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+ -+#ifndef _DRIVERS_SATA_AHCI_PLATFORMS_H -+#define _DRIVERS_SATA_AHCI_PLATFORMS_H -+ -+extern struct ahci_platform_data cns3xxx_ahci_platform_data; -+ -+#endif /*_DRIVERS_SATA_AHCI_PLATFORMS_H*/ -+ diff --git a/target/linux/cns3xxx/patches-3.1/110-gateworks_gsp_support.patch b/target/linux/cns3xxx/patches-3.1/110-gateworks_gsp_support.patch deleted file mode 100644 index 3886ee94eb..0000000000 --- a/target/linux/cns3xxx/patches-3.1/110-gateworks_gsp_support.patch +++ /dev/null @@ -1,339 +0,0 @@ ---- a/drivers/hwmon/Kconfig -+++ b/drivers/hwmon/Kconfig -@@ -401,6 +401,15 @@ config SENSORS_GL520SM - This driver can also be built as a module. If so, the module - will be called gl520sm. - -+config SENSORS_GSP -+ tristate "Gateworks System Peripheral" -+ depends on I2C && EXPERIMENTAL -+ help -+ If you say yes here you get support for the Gateworks System Peripherals. -+ -+ This driver can also be built as a module. If so, the module -+ will be called gsp. -+ - config SENSORS_GPIO_FAN - tristate "GPIO fan" - depends on GENERIC_GPIO ---- a/drivers/hwmon/Makefile -+++ b/drivers/hwmon/Makefile -@@ -123,6 +123,7 @@ obj-$(CONFIG_SENSORS_W83L785TS) += w83l7 - obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o - obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o - obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o -+obj-$(CONFIG_SENSORS_GSP) += gsp.o - - obj-$(CONFIG_PMBUS) += pmbus/ - ---- /dev/null -+++ b/drivers/hwmon/gsp.c -@@ -0,0 +1,308 @@ -+/* -+ * A hwmon driver for the Gateworks System Peripheral -+ * Copyright (C) 2009 Gateworks Corporation -+ * -+ * Author: Chris Lang -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License, -+ * as published by the Free Software Foundation - version 2. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define DRV_VERSION "0.2" -+ -+enum chips { gsp }; -+ -+/* AD7418 registers */ -+#define GSP_REG_TEMP_IN 0x00 -+#define GSP_REG_VIN 0x02 -+#define GSP_REG_3P3 0x05 -+#define GSP_REG_BAT 0x08 -+#define GSP_REG_5P0 0x0b -+#define GSP_REG_CORE 0x0e -+#define GSP_REG_CPU1 0x11 -+#define GSP_REG_CPU2 0x14 -+#define GSP_REG_DRAM 0x17 -+#define GSP_REG_EXT_BAT 0x1a -+#define GSP_REG_IO1 0x1d -+#define GSP_REG_IO2 0x20 -+#define GSP_REG_PCIE 0x23 -+#define GSP_REG_CURRENT 0x26 -+#define GSP_FAN_0 0x2C -+#define GSP_FAN_1 0x2E -+#define GSP_FAN_2 0x30 -+#define GSP_FAN_3 0x32 -+#define GSP_FAN_4 0x34 -+#define GSP_FAN_5 0x36 -+ -+struct gsp_sensor_info { -+ const char* name; -+ int reg; -+}; -+ -+static const struct gsp_sensor_info gsp_sensors[] = { -+ {"temp", GSP_REG_TEMP_IN}, -+ {"vin", GSP_REG_VIN}, -+ {"3p3", GSP_REG_3P3}, -+ {"bat", GSP_REG_BAT}, -+ {"5p0", GSP_REG_5P0}, -+ {"core", GSP_REG_CORE}, -+ {"cpu1", GSP_REG_CPU1}, -+ {"cpu2", GSP_REG_CPU2}, -+ {"dram", GSP_REG_DRAM}, -+ {"ext_bat", GSP_REG_EXT_BAT}, -+ {"io1", GSP_REG_IO1}, -+ {"io2", GSP_REG_IO2}, -+ {"pci2", GSP_REG_PCIE}, -+ {"current", GSP_REG_CURRENT}, -+ {"fan_point0", GSP_FAN_0}, -+ {"fan_point1", GSP_FAN_1}, -+ {"fan_point2", GSP_FAN_2}, -+ {"fan_point3", GSP_FAN_3}, -+ {"fan_point4", GSP_FAN_4}, -+ {"fan_point5", GSP_FAN_5}, -+}; -+ -+struct gsp_data { -+ struct device *hwmon_dev; -+ struct attribute_group attrs; -+ enum chips type; -+}; -+ -+static int gsp_probe(struct i2c_client *client, -+ const struct i2c_device_id *id); -+static int gsp_remove(struct i2c_client *client); -+ -+static const struct i2c_device_id gsp_id[] = { -+ { "gsp", 0 }, -+ { } -+}; -+MODULE_DEVICE_TABLE(i2c, gsp_id); -+ -+static struct i2c_driver gsp_driver = { -+ .driver = { -+ .name = "gsp", -+ }, -+ .probe = gsp_probe, -+ .remove = gsp_remove, -+ .id_table = gsp_id, -+}; -+ -+/* All registers are word-sized, except for the configuration registers. -+ * AD7418 uses a high-byte first convention. Do NOT use those functions to -+ * access the configuration registers CONF and CONF2, as they are byte-sized. -+ */ -+static inline int gsp_read(struct i2c_client *client, u8 reg) -+{ -+ unsigned int adc = 0; -+ if (reg == GSP_REG_TEMP_IN || reg > GSP_REG_CURRENT) -+ { -+ adc |= i2c_smbus_read_byte_data(client, reg); -+ adc |= i2c_smbus_read_byte_data(client, reg + 1) << 8; -+ return adc; -+ } -+ else -+ { -+ adc |= i2c_smbus_read_byte_data(client, reg); -+ adc |= i2c_smbus_read_byte_data(client, reg + 1) << 8; -+ adc |= i2c_smbus_read_byte_data(client, reg + 2) << 16; -+ return adc; -+ } -+} -+ -+static inline int gsp_write(struct i2c_client *client, u8 reg, u16 value) -+{ -+ i2c_smbus_write_byte_data(client, reg, value & 0xff); -+ i2c_smbus_write_byte_data(client, reg + 1, ((value >> 8) & 0xff)); -+ return 1; -+} -+ -+static ssize_t show_adc(struct device *dev, struct device_attribute *devattr, -+ char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -+ struct i2c_client *client = to_i2c_client(dev); -+ return sprintf(buf, "%d\n", gsp_read(client, gsp_sensors[attr->index].reg)); -+} -+ -+static ssize_t show_label(struct device *dev, -+ struct device_attribute *devattr, char *buf) -+{ -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -+ -+ return sprintf(buf, "%s\n", gsp_sensors[attr->index].name); -+} -+ -+static ssize_t store_fan(struct device *dev, -+ struct device_attribute *devattr, const char *buf, size_t count) -+{ -+ u16 val; -+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); -+ struct i2c_client *client = to_i2c_client(dev); -+ val = simple_strtoul(buf, NULL, 10); -+ gsp_write(client, gsp_sensors[attr->index].reg, val); -+ return count; -+} -+ -+static SENSOR_DEVICE_ATTR(temp0_input, S_IRUGO, show_adc, NULL, 0); -+static SENSOR_DEVICE_ATTR(temp0_label, S_IRUGO, show_label, NULL, 0); -+ -+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_adc, NULL, 1); -+static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, show_label, NULL, 1); -+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_adc, NULL, 2); -+static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, show_label, NULL, 2); -+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_adc, NULL, 3); -+static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, show_label, NULL, 3); -+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_adc, NULL, 4); -+static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 4); -+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_adc, NULL, 5); -+static SENSOR_DEVICE_ATTR(in4_label, S_IRUGO, show_label, NULL, 5); -+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_adc, NULL, 6); -+static SENSOR_DEVICE_ATTR(in5_label, S_IRUGO, show_label, NULL, 6); -+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_adc, NULL, 7); -+static SENSOR_DEVICE_ATTR(in6_label, S_IRUGO, show_label, NULL, 7); -+static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_adc, NULL, 8); -+static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 8); -+static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_adc, NULL, 9); -+static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 9); -+static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_adc, NULL, 10); -+static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 10); -+static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_adc, NULL, 11); -+static SENSOR_DEVICE_ATTR(in10_label, S_IRUGO, show_label, NULL, 11); -+static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, show_adc, NULL, 12); -+static SENSOR_DEVICE_ATTR(in11_label, S_IRUGO, show_label, NULL, 12); -+static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, show_adc, NULL, 13); -+static SENSOR_DEVICE_ATTR(in12_label, S_IRUGO, show_label, NULL, 13); -+ -+static SENSOR_DEVICE_ATTR(fan0_point0, S_IRUGO | S_IWUSR, show_adc, store_fan, 14); -+static SENSOR_DEVICE_ATTR(fan0_point1, S_IRUGO | S_IWUSR, show_adc, store_fan, 15); -+static SENSOR_DEVICE_ATTR(fan0_point2, S_IRUGO | S_IWUSR, show_adc, store_fan, 16); -+static SENSOR_DEVICE_ATTR(fan0_point3, S_IRUGO | S_IWUSR, show_adc, store_fan, 17); -+static SENSOR_DEVICE_ATTR(fan0_point4, S_IRUGO | S_IWUSR, show_adc, store_fan, 18); -+static SENSOR_DEVICE_ATTR(fan0_point5, S_IRUGO | S_IWUSR, show_adc, store_fan, 19); -+ -+static struct attribute *gsp_attributes[] = { -+ &sensor_dev_attr_temp0_input.dev_attr.attr, -+ &sensor_dev_attr_in0_input.dev_attr.attr, -+ &sensor_dev_attr_in1_input.dev_attr.attr, -+ &sensor_dev_attr_in2_input.dev_attr.attr, -+ &sensor_dev_attr_in3_input.dev_attr.attr, -+ &sensor_dev_attr_in4_input.dev_attr.attr, -+ &sensor_dev_attr_in5_input.dev_attr.attr, -+ &sensor_dev_attr_in6_input.dev_attr.attr, -+ &sensor_dev_attr_in7_input.dev_attr.attr, -+ &sensor_dev_attr_in8_input.dev_attr.attr, -+ &sensor_dev_attr_in9_input.dev_attr.attr, -+ &sensor_dev_attr_in10_input.dev_attr.attr, -+ &sensor_dev_attr_in11_input.dev_attr.attr, -+ &sensor_dev_attr_in12_input.dev_attr.attr, -+ -+ &sensor_dev_attr_temp0_label.dev_attr.attr, -+ &sensor_dev_attr_in0_label.dev_attr.attr, -+ &sensor_dev_attr_in1_label.dev_attr.attr, -+ &sensor_dev_attr_in2_label.dev_attr.attr, -+ &sensor_dev_attr_in3_label.dev_attr.attr, -+ &sensor_dev_attr_in4_label.dev_attr.attr, -+ &sensor_dev_attr_in5_label.dev_attr.attr, -+ &sensor_dev_attr_in6_label.dev_attr.attr, -+ &sensor_dev_attr_in7_label.dev_attr.attr, -+ &sensor_dev_attr_in8_label.dev_attr.attr, -+ &sensor_dev_attr_in9_label.dev_attr.attr, -+ &sensor_dev_attr_in10_label.dev_attr.attr, -+ &sensor_dev_attr_in11_label.dev_attr.attr, -+ &sensor_dev_attr_in12_label.dev_attr.attr, -+ -+ &sensor_dev_attr_fan0_point0.dev_attr.attr, -+ &sensor_dev_attr_fan0_point1.dev_attr.attr, -+ &sensor_dev_attr_fan0_point2.dev_attr.attr, -+ &sensor_dev_attr_fan0_point3.dev_attr.attr, -+ &sensor_dev_attr_fan0_point4.dev_attr.attr, -+ &sensor_dev_attr_fan0_point5.dev_attr.attr, -+ NULL -+}; -+ -+ -+static int gsp_probe(struct i2c_client *client, -+ const struct i2c_device_id *id) -+{ -+ struct i2c_adapter *adapter = client->adapter; -+ struct gsp_data *data; -+ int err; -+ -+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | -+ I2C_FUNC_SMBUS_WORD_DATA)) { -+ err = -EOPNOTSUPP; -+ goto exit; -+ } -+ -+ if (!(data = kzalloc(sizeof(struct gsp_data), GFP_KERNEL))) { -+ err = -ENOMEM; -+ goto exit; -+ } -+ -+ i2c_set_clientdata(client, data); -+ -+ data->type = id->driver_data; -+ -+ switch (data->type) { -+ case 0: -+ data->attrs.attrs = gsp_attributes; -+ break; -+ } -+ -+ dev_info(&client->dev, "%s chip found\n", client->name); -+ -+ /* Register sysfs hooks */ -+ if ((err = sysfs_create_group(&client->dev.kobj, &data->attrs))) -+ goto exit_free; -+ -+ data->hwmon_dev = hwmon_device_register(&client->dev); -+ if (IS_ERR(data->hwmon_dev)) { -+ err = PTR_ERR(data->hwmon_dev); -+ goto exit_remove; -+ } -+ -+ return 0; -+ -+exit_remove: -+ sysfs_remove_group(&client->dev.kobj, &data->attrs); -+exit_free: -+ kfree(data); -+exit: -+ return err; -+} -+ -+static int gsp_remove(struct i2c_client *client) -+{ -+ struct gsp_data *data = i2c_get_clientdata(client); -+ hwmon_device_unregister(data->hwmon_dev); -+ sysfs_remove_group(&client->dev.kobj, &data->attrs); -+ kfree(data); -+ return 0; -+} -+ -+static int __init gsp_init(void) -+{ -+ return i2c_add_driver(&gsp_driver); -+} -+ -+static void __exit gsp_exit(void) -+{ -+ i2c_del_driver(&gsp_driver); -+} -+ -+module_init(gsp_init); -+module_exit(gsp_exit); -+ -+MODULE_AUTHOR("Chris Lang "); -+MODULE_DESCRIPTION("GSP HWMON driver"); -+MODULE_LICENSE("GPL"); -+MODULE_VERSION(DRV_VERSION); -+ diff --git a/target/linux/cns3xxx/patches-3.1/200-dwc_otg.patch b/target/linux/cns3xxx/patches-3.1/200-dwc_otg.patch deleted file mode 100644 index 27a97126d0..0000000000 --- a/target/linux/cns3xxx/patches-3.1/200-dwc_otg.patch +++ /dev/null @@ -1,22680 +0,0 @@ ---- a/drivers/Makefile -+++ b/drivers/Makefile -@@ -68,6 +68,7 @@ obj-$(CONFIG_PARIDE) += block/paride/ - obj-$(CONFIG_TC) += tc/ - obj-$(CONFIG_UWB) += uwb/ - obj-$(CONFIG_USB_OTG_UTILS) += usb/ -+obj-$(CONFIG_USB_DWC_OTG) += usb/dwc/ - obj-$(CONFIG_USB) += usb/ - obj-$(CONFIG_PCI) += usb/ - obj-$(CONFIG_USB_GADGET) += usb/ ---- a/drivers/usb/Kconfig -+++ b/drivers/usb/Kconfig -@@ -120,6 +120,8 @@ source "drivers/usb/musb/Kconfig" - - source "drivers/usb/renesas_usbhs/Kconfig" - -+source "drivers/usb/dwc/Kconfig" -+ - source "drivers/usb/class/Kconfig" - - source "drivers/usb/storage/Kconfig" ---- /dev/null -+++ b/drivers/usb/dwc/Kconfig -@@ -0,0 +1,44 @@ -+# -+# USB Dual Role (OTG-ready) Controller Drivers -+# for silicon based on Synopsys DesignWare IP -+# -+ -+comment "Enable Host or Gadget support for DesignWare OTG controller" -+depends on !USB && USB_GADGET=n -+ -+config USB_DWC_OTG -+ tristate "Synopsys DWC OTG Controller" -+ depends on USB -+ help -+ This driver provides USB Device Controller support for the -+ Synopsys DesignWare USB OTG Core used on the Cavium CNS34xx SOC. -+ -+config DWC_DEBUG -+ bool "Enable DWC Debugging" -+ depends on USB_DWC_OTG -+ default n -+ help -+ Enable DWC driver debugging -+ -+choice -+ prompt "DWC Mode Selection" -+ depends on USB_DWC_OTG -+ default DWC_HOST_ONLY -+ help -+ Select the DWC Core in OTG, Host only, or Device only mode. -+ -+config DWC_HOST_ONLY -+ bool "DWC Host Only Mode" -+ -+config DWC_OTG_MODE -+ bool "DWC OTG Mode" -+ select USB_GADGET -+ select USB_GADGET_SELECTED -+ -+config DWC_DEVICE_ONLY -+ bool "DWC Device Only Mode" -+ select USB_GADGET -+ select USB_GADGET_SELECTED -+ -+endchoice -+ ---- /dev/null -+++ b/drivers/usb/dwc/Makefile -@@ -0,0 +1,26 @@ -+# -+# Makefile for DWC_otg Highspeed USB controller driver -+# -+ -+EXTRA_CFLAGS += -DDWC_HS_ELECT_TST -+#EXTRA_CFLAGS += -Dlinux -DDWC_HS_ELECT_TST -+#EXTRA_CFLAGS += -DDWC_EN_ISOC -+ -+ifneq ($(CONFIG_DWC_HOST_ONLY),) -+EXTRA_CFLAGS += -DDWC_HOST_ONLY -+endif -+ -+ifneq ($(CONFIG_DWC_DEVICE_ONLY),) -+EXTRA_CFLAGS += -DDWC_DEVICE_ONLY -+endif -+ -+ifneq ($(CONFIG_DWC_DEBUG),) -+EXTRA_CFLAGS += -DDEBUG -+endif -+ -+obj-$(CONFIG_USB_DWC_OTG) := dwc_otg.o -+ -+dwc_otg-objs := otg_driver.o otg_attr.o -+dwc_otg-objs += otg_cil.o otg_cil_intr.o -+dwc_otg-objs += otg_pcd.o otg_pcd_intr.o -+dwc_otg-objs += otg_hcd.o otg_hcd_intr.o otg_hcd_queue.o ---- /dev/null -+++ b/drivers/usb/dwc/otg_attr.c -@@ -0,0 +1,886 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_attr.c $ -+ * $Revision: #31 $ -+ * $Date: 2008/07/15 $ -+ * $Change: 1064918 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+/** @file -+ * -+ * The diagnostic interface will provide access to the controller for -+ * bringing up the hardware and testing. The Linux driver attributes -+ * feature will be used to provide the Linux Diagnostic -+ * Interface. These attributes are accessed through sysfs. -+ */ -+ -+/** @page "Linux Module Attributes" -+ * -+ * The Linux module attributes feature is used to provide the Linux -+ * Diagnostic Interface. These attributes are accessed through sysfs. -+ * The diagnostic interface will provide access to the controller for -+ * bringing up the hardware and testing. -+ -+ -+ The following table shows the attributes. -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+
Name Description Access
mode Returns the current mode: 0 for device mode, 1 for host mode Read
hnpcapable Gets or sets the "HNP-capable" bit in the Core USB Configuraton Register. -+ Read returns the current value. Read/Write
srpcapable Gets or sets the "SRP-capable" bit in the Core USB Configuraton Register. -+ Read returns the current value. Read/Write
hnp Initiates the Host Negotiation Protocol. Read returns the status. Read/Write
srp Initiates the Session Request Protocol. Read returns the status. Read/Write
buspower Gets or sets the Power State of the bus (0 - Off or 1 - On) Read/Write
bussuspend Suspends the USB bus. Read/Write
busconnected Gets the connection status of the bus Read
gotgctl Gets or sets the Core Control Status Register. Read/Write
gusbcfg Gets or sets the Core USB Configuration Register Read/Write
grxfsiz Gets or sets the Receive FIFO Size Register Read/Write
gnptxfsiz Gets or sets the non-periodic Transmit Size Register Read/Write
gpvndctl Gets or sets the PHY Vendor Control Register Read/Write
ggpio Gets the value in the lower 16-bits of the General Purpose IO Register -+ or sets the upper 16 bits. Read/Write
guid Gets or sets the value of the User ID Register Read/Write
gsnpsid Gets the value of the Synopsys ID Regester Read
devspeed Gets or sets the device speed setting in the DCFG register Read/Write
enumspeed Gets the device enumeration Speed. Read
hptxfsiz Gets the value of the Host Periodic Transmit FIFO Read
hprt0 Gets or sets the value in the Host Port Control and Status Register Read/Write
regoffset Sets the register offset for the next Register Access Read/Write
regvalue Gets or sets the value of the register at the offset in the regoffset attribute. Read/Write
remote_wakeup On read, shows the status of Remote Wakeup. On write, initiates a remote -+ wakeup of the host. When bit 0 is 1 and Remote Wakeup is enabled, the Remote -+ Wakeup signalling bit in the Device Control Register is set for 1 -+ milli-second. Read/Write
regdump Dumps the contents of core registers. Read
spramdump Dumps the contents of core registers. Read
hcddump Dumps the current HCD state. Read
hcd_frrem Shows the average value of the Frame Remaining -+ field in the Host Frame Number/Frame Remaining register when an SOF interrupt -+ occurs. This can be used to determine the average interrupt latency. Also -+ shows the average Frame Remaining value for start_transfer and the "a" and -+ "b" sample points. The "a" and "b" sample points may be used during debugging -+ bto determine how long it takes to execute a section of the HCD code. Read
rd_reg_test Displays the time required to read the GNPTXFSIZ register many times -+ (the output shows the number of times the register is read). -+ Read
wr_reg_test Displays the time required to write the GNPTXFSIZ register many times -+ (the output shows the number of times the register is written). -+ Read
-+ -+ Example usage: -+ To get the current mode: -+ cat /sys/devices/lm0/mode -+ -+ To power down the USB: -+ echo 0 > /sys/devices/lm0/buspower -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include /* permission constants */ -+#include -+ -+#include -+#include -+#include -+ -+#include "otg_plat.h" -+#include "otg_attr.h" -+#include "otg_driver.h" -+#include "otg_pcd.h" -+#include "otg_hcd.h" -+ -+/* -+ * MACROs for defining sysfs attribute -+ */ -+#define DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \ -+{ \ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ uint32_t val; \ -+ val = dwc_read_reg32 (_addr_); \ -+ val = (val & (_mask_)) >> _shift_; \ -+ return sprintf (buf, "%s = 0x%x\n", _string_, val); \ -+} -+#define DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \ -+ const char *buf, size_t count) \ -+{ \ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ uint32_t set = simple_strtoul(buf, NULL, 16); \ -+ uint32_t clear = set; \ -+ clear = ((~clear) << _shift_) & _mask_; \ -+ set = (set << _shift_) & _mask_; \ -+ dev_dbg(_dev, "Storing Address=0x%08x Set=0x%08x Clear=0x%08x\n", (uint32_t)_addr_, set, clear); \ -+ dwc_modify_reg32(_addr_, clear, set); \ -+ return count; \ -+} -+ -+/* -+ * MACROs for defining sysfs attribute for 32-bit registers -+ */ -+#define DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ -+static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \ -+{ \ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ uint32_t val; \ -+ val = dwc_read_reg32 (_addr_); \ -+ return sprintf (buf, "%s = 0x%08x\n", _string_, val); \ -+} -+#define DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ -+static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \ -+ const char *buf, size_t count) \ -+{ \ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ uint32_t val = simple_strtoul(buf, NULL, 16); \ -+ dev_dbg(_dev, "Storing Address=0x%08x Val=0x%08x\n", (uint32_t)_addr_, val); \ -+ dwc_write_reg32(_addr_, val); \ -+ return count; \ -+} -+ -+#define DWC_OTG_DEVICE_ATTR_BITFIELD_RW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store); -+ -+#define DWC_OTG_DEVICE_ATTR_BITFIELD_RO(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL); -+ -+#define DWC_OTG_DEVICE_ATTR_REG32_RW(_otg_attr_name_,_addr_,_string_) \ -+DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ -+DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ -+DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store); -+ -+#define DWC_OTG_DEVICE_ATTR_REG32_RO(_otg_attr_name_,_addr_,_string_) \ -+DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ -+DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL); -+ -+ -+/** @name Functions for Show/Store of Attributes */ -+/**@{*/ -+ -+/** -+ * Show the register offset of the Register Access. -+ */ -+static ssize_t regoffset_show( struct device *_dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ return snprintf(buf, sizeof("0xFFFFFFFF\n")+1,"0x%08x\n", otg_dev->reg_offset); -+} -+ -+/** -+ * Set the register offset for the next Register Access Read/Write -+ */ -+static ssize_t regoffset_store( struct device *_dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count ) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ uint32_t offset = simple_strtoul(buf, NULL, 16); -+ //dev_dbg(_dev, "Offset=0x%08x\n", offset); -+ if (offset < SZ_256K ) { -+ otg_dev->reg_offset = offset; -+ } -+ else { -+ dev_err( _dev, "invalid offset\n" ); -+ } -+ -+ return count; -+} -+DEVICE_ATTR(regoffset, S_IRUGO|S_IWUSR, (void *)regoffset_show, regoffset_store); -+ -+ -+/** -+ * Show the value of the register at the offset in the reg_offset -+ * attribute. -+ */ -+static ssize_t regvalue_show( struct device *_dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ uint32_t val; -+ volatile uint32_t *addr; -+ -+ if (otg_dev->reg_offset != 0xFFFFFFFF && -+ 0 != otg_dev->base) { -+ /* Calculate the address */ -+ addr = (uint32_t*)(otg_dev->reg_offset + -+ (uint8_t*)otg_dev->base); -+ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr); -+ val = dwc_read_reg32( addr ); -+ return snprintf(buf, sizeof("Reg@0xFFFFFFFF = 0xFFFFFFFF\n")+1, -+ "Reg@0x%06x = 0x%08x\n", -+ otg_dev->reg_offset, val); -+ } -+ else { -+ dev_err(_dev, "Invalid offset (0x%0x)\n", -+ otg_dev->reg_offset); -+ return sprintf(buf, "invalid offset\n" ); -+ } -+} -+ -+/** -+ * Store the value in the register at the offset in the reg_offset -+ * attribute. -+ * -+ */ -+static ssize_t regvalue_store( struct device *_dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count ) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ volatile uint32_t * addr; -+ uint32_t val = simple_strtoul(buf, NULL, 16); -+ //dev_dbg(_dev, "Offset=0x%08x Val=0x%08x\n", otg_dev->reg_offset, val); -+ if (otg_dev->reg_offset != 0xFFFFFFFF && 0 != otg_dev->base) { -+ /* Calculate the address */ -+ addr = (uint32_t*)(otg_dev->reg_offset + -+ (uint8_t*)otg_dev->base); -+ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr); -+ dwc_write_reg32( addr, val ); -+ } -+ else { -+ dev_err(_dev, "Invalid Register Offset (0x%08x)\n", -+ otg_dev->reg_offset); -+ } -+ return count; -+} -+DEVICE_ATTR(regvalue, S_IRUGO|S_IWUSR, regvalue_show, regvalue_store); -+ -+/* -+ * Attributes -+ */ -+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(mode,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<20),20,"Mode"); -+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(hnpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<9),9,"Mode"); -+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(srpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<8),8,"Mode"); -+ -+//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(buspower,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode"); -+//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(bussuspend,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode"); -+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(busconnected,otg_dev->core_if->host_if->hprt0,0x01,0,"Bus Connected"); -+ -+DWC_OTG_DEVICE_ATTR_REG32_RW(gotgctl,&(otg_dev->core_if->core_global_regs->gotgctl),"GOTGCTL"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(gusbcfg,&(otg_dev->core_if->core_global_regs->gusbcfg),"GUSBCFG"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(grxfsiz,&(otg_dev->core_if->core_global_regs->grxfsiz),"GRXFSIZ"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(gnptxfsiz,&(otg_dev->core_if->core_global_regs->gnptxfsiz),"GNPTXFSIZ"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(gpvndctl,&(otg_dev->core_if->core_global_regs->gpvndctl),"GPVNDCTL"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(ggpio,&(otg_dev->core_if->core_global_regs->ggpio),"GGPIO"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(guid,&(otg_dev->core_if->core_global_regs->guid),"GUID"); -+DWC_OTG_DEVICE_ATTR_REG32_RO(gsnpsid,&(otg_dev->core_if->core_global_regs->gsnpsid),"GSNPSID"); -+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(devspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dcfg),0x3,0,"Device Speed"); -+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(enumspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dsts),0x6,1,"Device Enumeration Speed"); -+ -+DWC_OTG_DEVICE_ATTR_REG32_RO(hptxfsiz,&(otg_dev->core_if->core_global_regs->hptxfsiz),"HPTXFSIZ"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(hprt0,otg_dev->core_if->host_if->hprt0,"HPRT0"); -+ -+ -+/** -+ * @todo Add code to initiate the HNP. -+ */ -+/** -+ * Show the HNP status bit -+ */ -+static ssize_t hnp_show( struct device *_dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ gotgctl_data_t val; -+ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl)); -+ return sprintf (buf, "HstNegScs = 0x%x\n", val.b.hstnegscs); -+} -+ -+/** -+ * Set the HNP Request bit -+ */ -+static ssize_t hnp_store( struct device *_dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count ) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ uint32_t in = simple_strtoul(buf, NULL, 16); -+ uint32_t *addr = (uint32_t *)&(otg_dev->core_if->core_global_regs->gotgctl); -+ gotgctl_data_t mem; -+ mem.d32 = dwc_read_reg32(addr); -+ mem.b.hnpreq = in; -+ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); -+ dwc_write_reg32(addr, mem.d32); -+ return count; -+} -+DEVICE_ATTR(hnp, 0644, hnp_show, hnp_store); -+ -+/** -+ * @todo Add code to initiate the SRP. -+ */ -+/** -+ * Show the SRP status bit -+ */ -+static ssize_t srp_show( struct device *_dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+#ifndef DWC_HOST_ONLY -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ gotgctl_data_t val; -+ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl)); -+ return sprintf (buf, "SesReqScs = 0x%x\n", val.b.sesreqscs); -+#else -+ return sprintf(buf, "Host Only Mode!\n"); -+#endif -+} -+ -+ -+ -+/** -+ * Set the SRP Request bit -+ */ -+static ssize_t srp_store( struct device *_dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count ) -+{ -+#ifndef DWC_HOST_ONLY -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ dwc_otg_pcd_initiate_srp(otg_dev->pcd); -+#endif -+ return count; -+} -+DEVICE_ATTR(srp, 0644, srp_show, srp_store); -+ -+/** -+ * @todo Need to do more for power on/off? -+ */ -+/** -+ * Show the Bus Power status -+ */ -+static ssize_t buspower_show( struct device *_dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ hprt0_data_t val; -+ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0); -+ return sprintf (buf, "Bus Power = 0x%x\n", val.b.prtpwr); -+} -+ -+ -+/** -+ * Set the Bus Power status -+ */ -+static ssize_t buspower_store( struct device *_dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count ) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ uint32_t on = simple_strtoul(buf, NULL, 16); -+ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0; -+ hprt0_data_t mem; -+ -+ mem.d32 = dwc_read_reg32(addr); -+ mem.b.prtpwr = on; -+ -+ //dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); -+ dwc_write_reg32(addr, mem.d32); -+ -+ return count; -+} -+DEVICE_ATTR(buspower, 0644, buspower_show, buspower_store); -+ -+/** -+ * @todo Need to do more for suspend? -+ */ -+/** -+ * Show the Bus Suspend status -+ */ -+static ssize_t bussuspend_show( struct device *_dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ hprt0_data_t val; -+ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0); -+ return sprintf (buf, "Bus Suspend = 0x%x\n", val.b.prtsusp); -+} -+ -+/** -+ * Set the Bus Suspend status -+ */ -+static ssize_t bussuspend_store( struct device *_dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count ) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ uint32_t in = simple_strtoul(buf, NULL, 16); -+ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0; -+ hprt0_data_t mem; -+ mem.d32 = dwc_read_reg32(addr); -+ mem.b.prtsusp = in; -+ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); -+ dwc_write_reg32(addr, mem.d32); -+ return count; -+} -+DEVICE_ATTR(bussuspend, 0644, bussuspend_show, bussuspend_store); -+ -+/** -+ * Show the status of Remote Wakeup. -+ */ -+static ssize_t remote_wakeup_show( struct device *_dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+#ifndef DWC_HOST_ONLY -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ dctl_data_t val; -+ val.d32 = -+ dwc_read_reg32( &otg_dev->core_if->dev_if->dev_global_regs->dctl); -+ return sprintf( buf, "Remote Wakeup = %d Enabled = %d\n", -+ val.b.rmtwkupsig, otg_dev->pcd->remote_wakeup_enable); -+#else -+ return sprintf(buf, "Host Only Mode!\n"); -+#endif -+} -+/** -+ * Initiate a remote wakeup of the host. The Device control register -+ * Remote Wakeup Signal bit is written if the PCD Remote wakeup enable -+ * flag is set. -+ * -+ */ -+static ssize_t remote_wakeup_store( struct device *_dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count ) -+{ -+#ifndef DWC_HOST_ONLY -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ uint32_t val = simple_strtoul(buf, NULL, 16); -+ if (val&1) { -+ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 1); -+ } -+ else { -+ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 0); -+ } -+#endif -+ return count; -+} -+DEVICE_ATTR(remote_wakeup, S_IRUGO|S_IWUSR, remote_wakeup_show, -+ remote_wakeup_store); -+ -+/** -+ * Dump global registers and either host or device registers (depending on the -+ * current mode of the core). -+ */ -+static ssize_t regdump_show( struct device *_dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ dwc_otg_dump_global_registers( otg_dev->core_if); -+ if (dwc_otg_is_host_mode(otg_dev->core_if)) { -+ dwc_otg_dump_host_registers( otg_dev->core_if); -+ } else { -+ dwc_otg_dump_dev_registers( otg_dev->core_if); -+ -+ } -+ return sprintf( buf, "Register Dump\n" ); -+} -+ -+DEVICE_ATTR(regdump, S_IRUGO|S_IWUSR, regdump_show, 0); -+ -+/** -+ * Dump global registers and either host or device registers (depending on the -+ * current mode of the core). -+ */ -+static ssize_t spramdump_show( struct device *_dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ dwc_otg_dump_spram( otg_dev->core_if); -+ -+ return sprintf( buf, "SPRAM Dump\n" ); -+} -+ -+DEVICE_ATTR(spramdump, S_IRUGO|S_IWUSR, spramdump_show, 0); -+ -+/** -+ * Dump the current hcd state. -+ */ -+static ssize_t hcddump_show( struct device *_dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+#ifndef DWC_DEVICE_ONLY -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ dwc_otg_hcd_dump_state(otg_dev->hcd); -+#endif -+ return sprintf( buf, "HCD Dump\n" ); -+} -+ -+DEVICE_ATTR(hcddump, S_IRUGO|S_IWUSR, hcddump_show, 0); -+ -+/** -+ * Dump the average frame remaining at SOF. This can be used to -+ * determine average interrupt latency. Frame remaining is also shown for -+ * start transfer and two additional sample points. -+ */ -+static ssize_t hcd_frrem_show( struct device *_dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+#ifndef DWC_DEVICE_ONLY -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ dwc_otg_hcd_dump_frrem(otg_dev->hcd); -+#endif -+ return sprintf( buf, "HCD Dump Frame Remaining\n" ); -+} -+ -+DEVICE_ATTR(hcd_frrem, S_IRUGO|S_IWUSR, hcd_frrem_show, 0); -+ -+/** -+ * Displays the time required to read the GNPTXFSIZ register many times (the -+ * output shows the number of times the register is read). -+ */ -+#define RW_REG_COUNT 10000000 -+#define MSEC_PER_JIFFIE 1000/HZ -+static ssize_t rd_reg_test_show( struct device *_dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ int i; -+ int time; -+ int start_jiffies; -+ -+ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n", -+ HZ, MSEC_PER_JIFFIE, loops_per_jiffy); -+ start_jiffies = jiffies; -+ for (i = 0; i < RW_REG_COUNT; i++) { -+ dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz); -+ } -+ time = jiffies - start_jiffies; -+ return sprintf( buf, "Time to read GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n", -+ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time ); -+} -+ -+DEVICE_ATTR(rd_reg_test, S_IRUGO|S_IWUSR, rd_reg_test_show, 0); -+ -+/** -+ * Displays the time required to write the GNPTXFSIZ register many times (the -+ * output shows the number of times the register is written). -+ */ -+static ssize_t wr_reg_test_show( struct device *_dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ -+ uint32_t reg_val; -+ int i; -+ int time; -+ int start_jiffies; -+ -+ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n", -+ HZ, MSEC_PER_JIFFIE, loops_per_jiffy); -+ reg_val = dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz); -+ start_jiffies = jiffies; -+ for (i = 0; i < RW_REG_COUNT; i++) { -+ dwc_write_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz, reg_val); -+ } -+ time = jiffies - start_jiffies; -+ return sprintf( buf, "Time to write GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n", -+ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time); -+} -+ -+DEVICE_ATTR(wr_reg_test, S_IRUGO|S_IWUSR, wr_reg_test_show, 0); -+/**@}*/ -+ -+/** -+ * Create the device files -+ */ -+void dwc_otg_attr_create (struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ int error; -+ -+ error = device_create_file(dev, &dev_attr_regoffset); -+ error = device_create_file(dev, &dev_attr_regvalue); -+ error = device_create_file(dev, &dev_attr_mode); -+ error = device_create_file(dev, &dev_attr_hnpcapable); -+ error = device_create_file(dev, &dev_attr_srpcapable); -+ error = device_create_file(dev, &dev_attr_hnp); -+ error = device_create_file(dev, &dev_attr_srp); -+ error = device_create_file(dev, &dev_attr_buspower); -+ error = device_create_file(dev, &dev_attr_bussuspend); -+ error = device_create_file(dev, &dev_attr_busconnected); -+ error = device_create_file(dev, &dev_attr_gotgctl); -+ error = device_create_file(dev, &dev_attr_gusbcfg); -+ error = device_create_file(dev, &dev_attr_grxfsiz); -+ error = device_create_file(dev, &dev_attr_gnptxfsiz); -+ error = device_create_file(dev, &dev_attr_gpvndctl); -+ error = device_create_file(dev, &dev_attr_ggpio); -+ error = device_create_file(dev, &dev_attr_guid); -+ error = device_create_file(dev, &dev_attr_gsnpsid); -+ error = device_create_file(dev, &dev_attr_devspeed); -+ error = device_create_file(dev, &dev_attr_enumspeed); -+ error = device_create_file(dev, &dev_attr_hptxfsiz); -+ error = device_create_file(dev, &dev_attr_hprt0); -+ error = device_create_file(dev, &dev_attr_remote_wakeup); -+ error = device_create_file(dev, &dev_attr_regdump); -+ error = device_create_file(dev, &dev_attr_spramdump); -+ error = device_create_file(dev, &dev_attr_hcddump); -+ error = device_create_file(dev, &dev_attr_hcd_frrem); -+ error = device_create_file(dev, &dev_attr_rd_reg_test); -+ error = device_create_file(dev, &dev_attr_wr_reg_test); -+} -+ -+/** -+ * Remove the device files -+ */ -+void dwc_otg_attr_remove (struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ -+ device_remove_file(dev, &dev_attr_regoffset); -+ device_remove_file(dev, &dev_attr_regvalue); -+ device_remove_file(dev, &dev_attr_mode); -+ device_remove_file(dev, &dev_attr_hnpcapable); -+ device_remove_file(dev, &dev_attr_srpcapable); -+ device_remove_file(dev, &dev_attr_hnp); -+ device_remove_file(dev, &dev_attr_srp); -+ device_remove_file(dev, &dev_attr_buspower); -+ device_remove_file(dev, &dev_attr_bussuspend); -+ device_remove_file(dev, &dev_attr_busconnected); -+ device_remove_file(dev, &dev_attr_gotgctl); -+ device_remove_file(dev, &dev_attr_gusbcfg); -+ device_remove_file(dev, &dev_attr_grxfsiz); -+ device_remove_file(dev, &dev_attr_gnptxfsiz); -+ device_remove_file(dev, &dev_attr_gpvndctl); -+ device_remove_file(dev, &dev_attr_ggpio); -+ device_remove_file(dev, &dev_attr_guid); -+ device_remove_file(dev, &dev_attr_gsnpsid); -+ device_remove_file(dev, &dev_attr_devspeed); -+ device_remove_file(dev, &dev_attr_enumspeed); -+ device_remove_file(dev, &dev_attr_hptxfsiz); -+ device_remove_file(dev, &dev_attr_hprt0); -+ device_remove_file(dev, &dev_attr_remote_wakeup); -+ device_remove_file(dev, &dev_attr_regdump); -+ device_remove_file(dev, &dev_attr_spramdump); -+ device_remove_file(dev, &dev_attr_hcddump); -+ device_remove_file(dev, &dev_attr_hcd_frrem); -+ device_remove_file(dev, &dev_attr_rd_reg_test); -+ device_remove_file(dev, &dev_attr_wr_reg_test); -+} ---- /dev/null -+++ b/drivers/usb/dwc/otg_attr.h -@@ -0,0 +1,67 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_attr.h $ -+ * $Revision: #7 $ -+ * $Date: 2005/03/28 $ -+ * $Change: 477051 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+#if !defined(__DWC_OTG_ATTR_H__) -+#define __DWC_OTG_ATTR_H__ -+ -+/** @file -+ * This file contains the interface to the Linux device attributes. -+ */ -+extern struct device_attribute dev_attr_regoffset; -+extern struct device_attribute dev_attr_regvalue; -+ -+extern struct device_attribute dev_attr_mode; -+extern struct device_attribute dev_attr_hnpcapable; -+extern struct device_attribute dev_attr_srpcapable; -+extern struct device_attribute dev_attr_hnp; -+extern struct device_attribute dev_attr_srp; -+extern struct device_attribute dev_attr_buspower; -+extern struct device_attribute dev_attr_bussuspend; -+extern struct device_attribute dev_attr_busconnected; -+extern struct device_attribute dev_attr_gotgctl; -+extern struct device_attribute dev_attr_gusbcfg; -+extern struct device_attribute dev_attr_grxfsiz; -+extern struct device_attribute dev_attr_gnptxfsiz; -+extern struct device_attribute dev_attr_gpvndctl; -+extern struct device_attribute dev_attr_ggpio; -+extern struct device_attribute dev_attr_guid; -+extern struct device_attribute dev_attr_gsnpsid; -+extern struct device_attribute dev_attr_devspeed; -+extern struct device_attribute dev_attr_enumspeed; -+extern struct device_attribute dev_attr_hptxfsiz; -+extern struct device_attribute dev_attr_hprt0; -+ -+void dwc_otg_attr_create (struct platform_device *pdev); -+void dwc_otg_attr_remove (struct platform_device *pdev); -+ -+#endif ---- /dev/null -+++ b/drivers/usb/dwc/otg_cil.c -@@ -0,0 +1,3831 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.c $ -+ * $Revision: #147 $ -+ * $Date: 2008/10/16 $ -+ * $Change: 1117667 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+/** @file -+ * -+ * The Core Interface Layer provides basic services for accessing and -+ * managing the DWC_otg hardware. These services are used by both the -+ * Host Controller Driver and the Peripheral Controller Driver. -+ * -+ * The CIL manages the memory map for the core so that the HCD and PCD -+ * don't have to do this separately. It also handles basic tasks like -+ * reading/writing the registers and data FIFOs in the controller. -+ * Some of the data access functions provide encapsulation of several -+ * operations required to perform a task, such as writing multiple -+ * registers to start a transfer. Finally, the CIL performs basic -+ * services that are not specific to either the host or device modes -+ * of operation. These services include management of the OTG Host -+ * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A -+ * Diagnostic API is also provided to allow testing of the controller -+ * hardware. -+ * -+ * The Core Interface Layer has the following requirements: -+ * - Provides basic controller operations. -+ * - Minimal use of OS services. -+ * - The OS services used will be abstracted by using inline functions -+ * or macros. -+ * -+ */ -+#include -+#include -+#ifdef DEBUG -+#include -+#endif -+ -+#include "otg_plat.h" -+#include "otg_regs.h" -+#include "otg_cil.h" -+#include "otg_pcd.h" -+ -+ -+/** -+ * This function is called to initialize the DWC_otg CSR data -+ * structures. The register addresses in the device and host -+ * structures are initialized from the base address supplied by the -+ * caller. The calling function must make the OS calls to get the -+ * base address of the DWC_otg controller registers. The core_params -+ * argument holds the parameters that specify how the core should be -+ * configured. -+ * -+ * @param[in] reg_base_addr Base address of DWC_otg core registers -+ * @param[in] core_params Pointer to the core configuration parameters -+ * -+ */ -+dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *reg_base_addr, -+ dwc_otg_core_params_t *core_params) -+{ -+ dwc_otg_core_if_t *core_if = 0; -+ dwc_otg_dev_if_t *dev_if = 0; -+ dwc_otg_host_if_t *host_if = 0; -+ uint8_t *reg_base = (uint8_t *)reg_base_addr; -+ int i = 0; -+ -+ DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, reg_base_addr, core_params); -+ -+ core_if = kmalloc(sizeof(dwc_otg_core_if_t), GFP_KERNEL); -+ -+ if (core_if == 0) { -+ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_core_if_t failed\n"); -+ return 0; -+ } -+ -+ memset(core_if, 0, sizeof(dwc_otg_core_if_t)); -+ -+ core_if->core_params = core_params; -+ core_if->core_global_regs = (dwc_otg_core_global_regs_t *)reg_base; -+ -+ /* -+ * Allocate the Device Mode structures. -+ */ -+ dev_if = kmalloc(sizeof(dwc_otg_dev_if_t), GFP_KERNEL); -+ -+ if (dev_if == 0) { -+ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_dev_if_t failed\n"); -+ kfree(core_if); -+ return 0; -+ } -+ -+ dev_if->dev_global_regs = -+ (dwc_otg_device_global_regs_t *)(reg_base + DWC_DEV_GLOBAL_REG_OFFSET); -+ -+ for (i=0; iin_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *) -+ (reg_base + DWC_DEV_IN_EP_REG_OFFSET + -+ (i * DWC_EP_REG_OFFSET)); -+ -+ dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *) -+ (reg_base + DWC_DEV_OUT_EP_REG_OFFSET + -+ (i * DWC_EP_REG_OFFSET)); -+ DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n", -+ i, &dev_if->in_ep_regs[i]->diepctl); -+ DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n", -+ i, &dev_if->out_ep_regs[i]->doepctl); -+ } -+ -+ dev_if->speed = 0; // unknown -+ -+ core_if->dev_if = dev_if; -+ -+ /* -+ * Allocate the Host Mode structures. -+ */ -+ host_if = kmalloc(sizeof(dwc_otg_host_if_t), GFP_KERNEL); -+ -+ if (host_if == 0) { -+ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_host_if_t failed\n"); -+ kfree(dev_if); -+ kfree(core_if); -+ return 0; -+ } -+ -+ host_if->host_global_regs = (dwc_otg_host_global_regs_t *) -+ (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET); -+ -+ host_if->hprt0 = (uint32_t*)(reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET); -+ -+ for (i=0; ihc_regs[i] = (dwc_otg_hc_regs_t *) -+ (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET + -+ (i * DWC_OTG_CHAN_REGS_OFFSET)); -+ DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n", -+ i, &host_if->hc_regs[i]->hcchar); -+ } -+ -+ host_if->num_host_channels = MAX_EPS_CHANNELS; -+ core_if->host_if = host_if; -+ -+ for (i=0; idata_fifo[i] = -+ (uint32_t *)(reg_base + DWC_OTG_DATA_FIFO_OFFSET + -+ (i * DWC_OTG_DATA_FIFO_SIZE)); -+ DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n", -+ i, (unsigned)core_if->data_fifo[i]); -+ } -+ -+ core_if->pcgcctl = (uint32_t*)(reg_base + DWC_OTG_PCGCCTL_OFFSET); -+ -+ /* -+ * Store the contents of the hardware configuration registers here for -+ * easy access later. -+ */ -+ core_if->hwcfg1.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg1); -+ core_if->hwcfg2.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg2); -+ core_if->hwcfg3.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg3); -+ core_if->hwcfg4.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg4); -+ -+ DWC_DEBUGPL(DBG_CILV,"hwcfg1=%08x\n",core_if->hwcfg1.d32); -+ DWC_DEBUGPL(DBG_CILV,"hwcfg2=%08x\n",core_if->hwcfg2.d32); -+ DWC_DEBUGPL(DBG_CILV,"hwcfg3=%08x\n",core_if->hwcfg3.d32); -+ DWC_DEBUGPL(DBG_CILV,"hwcfg4=%08x\n",core_if->hwcfg4.d32); -+ -+ core_if->hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg); -+ core_if->dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); -+ -+ DWC_DEBUGPL(DBG_CILV,"hcfg=%08x\n",core_if->hcfg.d32); -+ DWC_DEBUGPL(DBG_CILV,"dcfg=%08x\n",core_if->dcfg.d32); -+ -+ DWC_DEBUGPL(DBG_CILV,"op_mode=%0x\n",core_if->hwcfg2.b.op_mode); -+ DWC_DEBUGPL(DBG_CILV,"arch=%0x\n",core_if->hwcfg2.b.architecture); -+ DWC_DEBUGPL(DBG_CILV,"num_dev_ep=%d\n",core_if->hwcfg2.b.num_dev_ep); -+ DWC_DEBUGPL(DBG_CILV,"num_host_chan=%d\n",core_if->hwcfg2.b.num_host_chan); -+ DWC_DEBUGPL(DBG_CILV,"nonperio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.nonperio_tx_q_depth); -+ DWC_DEBUGPL(DBG_CILV,"host_perio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.host_perio_tx_q_depth); -+ DWC_DEBUGPL(DBG_CILV,"dev_token_q_depth=0x%0x\n",core_if->hwcfg2.b.dev_token_q_depth); -+ -+ DWC_DEBUGPL(DBG_CILV,"Total FIFO SZ=%d\n", core_if->hwcfg3.b.dfifo_depth); -+ DWC_DEBUGPL(DBG_CILV,"xfer_size_cntr_width=%0x\n", core_if->hwcfg3.b.xfer_size_cntr_width); -+ -+ /* -+ * Set the SRP sucess bit for FS-I2c -+ */ -+ core_if->srp_success = 0; -+ core_if->srp_timer_started = 0; -+ -+ -+ /* -+ * Create new workqueue and init works -+ */ -+ core_if->wq_otg = create_singlethread_workqueue("dwc_otg"); -+ if(core_if->wq_otg == 0) { -+ DWC_DEBUGPL(DBG_CIL, "Creation of wq_otg failed\n"); -+ kfree(host_if); -+ kfree(dev_if); -+ kfree(core_if); -+ return 0 * HZ; -+ } -+ INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change); -+ INIT_DELAYED_WORK(&core_if->w_wkp, w_wakeup_detected); -+ -+ return core_if; -+} -+ -+/** -+ * This function frees the structures allocated by dwc_otg_cil_init(). -+ * -+ * @param[in] core_if The core interface pointer returned from -+ * dwc_otg_cil_init(). -+ * -+ */ -+void dwc_otg_cil_remove(dwc_otg_core_if_t *core_if) -+{ -+ /* Disable all interrupts */ -+ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 1, 0); -+ dwc_write_reg32(&core_if->core_global_regs->gintmsk, 0); -+ -+ if (core_if->wq_otg) { -+ destroy_workqueue(core_if->wq_otg); -+ } -+ if (core_if->dev_if) { -+ kfree(core_if->dev_if); -+ } -+ if (core_if->host_if) { -+ kfree(core_if->host_if); -+ } -+ kfree(core_if); -+} -+ -+/** -+ * This function enables the controller's Global Interrupt in the AHB Config -+ * register. -+ * -+ * @param[in] core_if Programming view of DWC_otg controller. -+ */ -+void dwc_otg_enable_global_interrupts(dwc_otg_core_if_t *core_if) -+{ -+ gahbcfg_data_t ahbcfg = { .d32 = 0}; -+ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ -+ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32); -+} -+ -+/** -+ * This function disables the controller's Global Interrupt in the AHB Config -+ * register. -+ * -+ * @param[in] core_if Programming view of DWC_otg controller. -+ */ -+void dwc_otg_disable_global_interrupts(dwc_otg_core_if_t *core_if) -+{ -+ gahbcfg_data_t ahbcfg = { .d32 = 0}; -+ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ -+ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0); -+} -+ -+/** -+ * This function initializes the commmon interrupts, used in both -+ * device and host modes. -+ * -+ * @param[in] core_if Programming view of the DWC_otg controller -+ * -+ */ -+static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ -+ /* Clear any pending OTG Interrupts */ -+ dwc_write_reg32(&global_regs->gotgint, 0xFFFFFFFF); -+ -+ /* Clear any pending interrupts */ -+ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); -+ -+ /* -+ * Enable the interrupts in the GINTMSK. -+ */ -+ intr_mask.b.modemismatch = 1; -+ intr_mask.b.otgintr = 1; -+ -+ if (!core_if->dma_enable) { -+ intr_mask.b.rxstsqlvl = 1; -+ } -+ -+ intr_mask.b.conidstschng = 1; -+ intr_mask.b.wkupintr = 1; -+ intr_mask.b.disconnect = 1; -+ intr_mask.b.usbsuspend = 1; -+ intr_mask.b.sessreqintr = 1; -+ dwc_write_reg32(&global_regs->gintmsk, intr_mask.d32); -+} -+ -+/** -+ * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY -+ * type. -+ */ -+static void init_fslspclksel(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t val; -+ hcfg_data_t hcfg; -+ -+ if (((core_if->hwcfg2.b.hs_phy_type == 2) && -+ (core_if->hwcfg2.b.fs_phy_type == 1) && -+ (core_if->core_params->ulpi_fs_ls)) || -+ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { -+ /* Full speed PHY */ -+ val = DWC_HCFG_48_MHZ; -+ } -+ else { -+ /* High speed PHY running at full speed or high speed */ -+ val = DWC_HCFG_30_60_MHZ; -+ } -+ -+ DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val); -+ hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg); -+ hcfg.b.fslspclksel = val; -+ dwc_write_reg32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32); -+} -+ -+/** -+ * Initializes the DevSpd field of the DCFG register depending on the PHY type -+ * and the enumeration speed of the device. -+ */ -+static void init_devspd(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t val; -+ dcfg_data_t dcfg; -+ -+ if (((core_if->hwcfg2.b.hs_phy_type == 2) && -+ (core_if->hwcfg2.b.fs_phy_type == 1) && -+ (core_if->core_params->ulpi_fs_ls)) || -+ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { -+ /* Full speed PHY */ -+ val = 0x3; -+ } -+ else if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) { -+ /* High speed PHY running at full speed */ -+ val = 0x1; -+ } -+ else { -+ /* High speed PHY running at high speed */ -+ val = 0x0; -+ } -+ -+ DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val); -+ -+ dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); -+ dcfg.b.devspd = val; -+ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32); -+} -+ -+/** -+ * This function calculates the number of IN EPS -+ * using GHWCFG1 and GHWCFG2 registers values -+ * -+ * @param core_if Programming view of the DWC_otg controller -+ */ -+static uint32_t calc_num_in_eps(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t num_in_eps = 0; -+ uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep; -+ uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 3; -+ uint32_t num_tx_fifos = core_if->hwcfg4.b.num_in_eps; -+ int i; -+ -+ -+ for(i = 0; i < num_eps; ++i) -+ { -+ if(!(hwcfg1 & 0x1)) -+ num_in_eps++; -+ -+ hwcfg1 >>= 2; -+ } -+ -+ if(core_if->hwcfg4.b.ded_fifo_en) { -+ num_in_eps = (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps; -+ } -+ -+ return num_in_eps; -+} -+ -+ -+/** -+ * This function calculates the number of OUT EPS -+ * using GHWCFG1 and GHWCFG2 registers values -+ * -+ * @param core_if Programming view of the DWC_otg controller -+ */ -+static uint32_t calc_num_out_eps(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t num_out_eps = 0; -+ uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep; -+ uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 2; -+ int i; -+ -+ for(i = 0; i < num_eps; ++i) -+ { -+ if(!(hwcfg1 & 0x2)) -+ num_out_eps++; -+ -+ hwcfg1 >>= 2; -+ } -+ return num_out_eps; -+} -+/** -+ * This function initializes the DWC_otg controller registers and -+ * prepares the core for device mode or host mode operation. -+ * -+ * @param core_if Programming view of the DWC_otg controller -+ * -+ */ -+void dwc_otg_core_init(dwc_otg_core_if_t *core_if) -+{ -+ int i = 0; -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ gahbcfg_data_t ahbcfg = { .d32 = 0 }; -+ gusbcfg_data_t usbcfg = { .d32 = 0 }; -+ gi2cctl_data_t i2cctl = { .d32 = 0 }; -+ -+ DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n", core_if); -+ -+ /* Common Initialization */ -+ -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ -+// usbcfg.b.tx_end_delay = 1; -+ /* Program the ULPI External VBUS bit if needed */ -+ usbcfg.b.ulpi_ext_vbus_drv = -+ (core_if->core_params->phy_ulpi_ext_vbus == DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0; -+ -+ /* Set external TS Dline pulsing */ -+ usbcfg.b.term_sel_dl_pulse = (core_if->core_params->ts_dline == 1) ? 1 : 0; -+ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); -+ -+ -+ /* Reset the Controller */ -+ dwc_otg_core_reset(core_if); -+ -+ /* Initialize parameters from Hardware configuration registers. */ -+ dev_if->num_in_eps = calc_num_in_eps(core_if); -+ dev_if->num_out_eps = calc_num_out_eps(core_if); -+ -+ -+ DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n", core_if->hwcfg4.b.num_dev_perio_in_ep); -+ -+ for (i=0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) -+ { -+ dev_if->perio_tx_fifo_size[i] = -+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16; -+ DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n", -+ i, dev_if->perio_tx_fifo_size[i]); -+ } -+ -+ for (i=0; i < core_if->hwcfg4.b.num_in_eps; i++) -+ { -+ dev_if->tx_fifo_size[i] = -+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16; -+ DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n", -+ i, dev_if->perio_tx_fifo_size[i]); -+ } -+ -+ core_if->total_fifo_size = core_if->hwcfg3.b.dfifo_depth; -+ core_if->rx_fifo_size = -+ dwc_read_reg32(&global_regs->grxfsiz); -+ core_if->nperio_tx_fifo_size = -+ dwc_read_reg32(&global_regs->gnptxfsiz) >> 16; -+ -+ DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", core_if->total_fifo_size); -+ DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", core_if->rx_fifo_size); -+ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n", core_if->nperio_tx_fifo_size); -+ -+ /* This programming sequence needs to happen in FS mode before any other -+ * programming occurs */ -+ if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) && -+ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { -+ /* If FS mode with FS PHY */ -+ -+ /* core_init() is now called on every switch so only call the -+ * following for the first time through. */ -+ if (!core_if->phy_init_done) { -+ core_if->phy_init_done = 1; -+ DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n"); -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ usbcfg.b.physel = 1; -+ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); -+ -+ /* Reset after a PHY select */ -+ dwc_otg_core_reset(core_if); -+ } -+ -+ /* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also -+ * do this on HNP Dev/Host mode switches (done in dev_init and -+ * host_init). */ -+ if (dwc_otg_is_host_mode(core_if)) { -+ init_fslspclksel(core_if); -+ } -+ else { -+ init_devspd(core_if); -+ } -+ -+ if (core_if->core_params->i2c_enable) { -+ DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n"); -+ /* Program GUSBCFG.OtgUtmifsSel to I2C */ -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ usbcfg.b.otgutmifssel = 1; -+ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); -+ -+ /* Program GI2CCTL.I2CEn */ -+ i2cctl.d32 = dwc_read_reg32(&global_regs->gi2cctl); -+ i2cctl.b.i2cdevaddr = 1; -+ i2cctl.b.i2cen = 0; -+ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32); -+ i2cctl.b.i2cen = 1; -+ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32); -+ } -+ -+ } /* endif speed == DWC_SPEED_PARAM_FULL */ -+ -+ else { -+ /* High speed PHY. */ -+ if (!core_if->phy_init_done) { -+ core_if->phy_init_done = 1; -+ /* HS PHY parameters. These parameters are preserved -+ * during soft reset so only program the first time. Do -+ * a soft reset immediately after setting phyif. */ -+ usbcfg.b.ulpi_utmi_sel = core_if->core_params->phy_type; -+ if (usbcfg.b.ulpi_utmi_sel == 1) { -+ /* ULPI interface */ -+ usbcfg.b.phyif = 0; -+ usbcfg.b.ddrsel = core_if->core_params->phy_ulpi_ddr; -+ } -+ else { -+ /* UTMI+ interface */ -+ if (core_if->core_params->phy_utmi_width == 16) { -+ usbcfg.b.phyif = 1; -+ } -+ else { -+ usbcfg.b.phyif = 0; -+ } -+ } -+ -+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); -+ -+ /* Reset after setting the PHY parameters */ -+ dwc_otg_core_reset(core_if); -+ } -+ } -+ -+ if ((core_if->hwcfg2.b.hs_phy_type == 2) && -+ (core_if->hwcfg2.b.fs_phy_type == 1) && -+ (core_if->core_params->ulpi_fs_ls)) { -+ DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n"); -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ usbcfg.b.ulpi_fsls = 1; -+ usbcfg.b.ulpi_clk_sus_m = 1; -+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); -+ } -+ else { -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ usbcfg.b.ulpi_fsls = 0; -+ usbcfg.b.ulpi_clk_sus_m = 0; -+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); -+ } -+ -+ /* Program the GAHBCFG Register.*/ -+ switch (core_if->hwcfg2.b.architecture) { -+ -+ case DWC_SLAVE_ONLY_ARCH: -+ DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n"); -+ ahbcfg.b.nptxfemplvl_txfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; -+ ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; -+ core_if->dma_enable = 0; -+ core_if->dma_desc_enable = 0; -+ break; -+ -+ case DWC_EXT_DMA_ARCH: -+ DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n"); -+ ahbcfg.b.hburstlen = core_if->core_params->dma_burst_size; -+ core_if->dma_enable = (core_if->core_params->dma_enable != 0); -+ core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0); -+ break; -+ -+ case DWC_INT_DMA_ARCH: -+ DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n"); -+ ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR; -+ core_if->dma_enable = (core_if->core_params->dma_enable != 0); -+ core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0); -+ break; -+ -+ } -+ ahbcfg.b.dmaenable = core_if->dma_enable; -+ dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32); -+ -+ core_if->en_multiple_tx_fifo = core_if->hwcfg4.b.ded_fifo_en; -+ -+ core_if->pti_enh_enable = core_if->core_params->pti_enable != 0; -+ core_if->multiproc_int_enable = core_if->core_params->mpi_enable; -+ DWC_PRINT("Periodic Transfer Interrupt Enhancement - %s\n", ((core_if->pti_enh_enable) ? "enabled": "disabled")); -+ DWC_PRINT("Multiprocessor Interrupt Enhancement - %s\n", ((core_if->multiproc_int_enable) ? "enabled": "disabled")); -+ -+ /* -+ * Program the GUSBCFG register. -+ */ -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ -+ switch (core_if->hwcfg2.b.op_mode) { -+ case DWC_MODE_HNP_SRP_CAPABLE: -+ usbcfg.b.hnpcap = (core_if->core_params->otg_cap == -+ DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE); -+ usbcfg.b.srpcap = (core_if->core_params->otg_cap != -+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); -+ break; -+ -+ case DWC_MODE_SRP_ONLY_CAPABLE: -+ usbcfg.b.hnpcap = 0; -+ usbcfg.b.srpcap = (core_if->core_params->otg_cap != -+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); -+ break; -+ -+ case DWC_MODE_NO_HNP_SRP_CAPABLE: -+ usbcfg.b.hnpcap = 0; -+ usbcfg.b.srpcap = 0; -+ break; -+ -+ case DWC_MODE_SRP_CAPABLE_DEVICE: -+ usbcfg.b.hnpcap = 0; -+ usbcfg.b.srpcap = (core_if->core_params->otg_cap != -+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); -+ break; -+ -+ case DWC_MODE_NO_SRP_CAPABLE_DEVICE: -+ usbcfg.b.hnpcap = 0; -+ usbcfg.b.srpcap = 0; -+ break; -+ -+ case DWC_MODE_SRP_CAPABLE_HOST: -+ usbcfg.b.hnpcap = 0; -+ usbcfg.b.srpcap = (core_if->core_params->otg_cap != -+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); -+ break; -+ -+ case DWC_MODE_NO_SRP_CAPABLE_HOST: -+ usbcfg.b.hnpcap = 0; -+ usbcfg.b.srpcap = 0; -+ break; -+ } -+ -+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); -+ -+ /* Enable common interrupts */ -+ dwc_otg_enable_common_interrupts(core_if); -+ -+ /* Do device or host intialization based on mode during PCD -+ * and HCD initialization */ -+ if (dwc_otg_is_host_mode(core_if)) { -+ DWC_DEBUGPL(DBG_ANY, "Host Mode\n"); -+ core_if->op_state = A_HOST; -+ } -+ else { -+ DWC_DEBUGPL(DBG_ANY, "Device Mode\n"); -+ core_if->op_state = B_PERIPHERAL; -+#ifdef DWC_DEVICE_ONLY -+ dwc_otg_core_dev_init(core_if); -+#endif -+ } -+} -+ -+ -+/** -+ * This function enables the Device mode interrupts. -+ * -+ * @param core_if Programming view of DWC_otg controller -+ */ -+void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *core_if) -+{ -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ -+ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); -+ -+ /* Disable all interrupts. */ -+ dwc_write_reg32(&global_regs->gintmsk, 0); -+ -+ /* Clear any pending interrupts */ -+ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); -+ -+ /* Enable the common interrupts */ -+ dwc_otg_enable_common_interrupts(core_if); -+ -+ /* Enable interrupts */ -+ intr_mask.b.usbreset = 1; -+ intr_mask.b.enumdone = 1; -+ -+ if(!core_if->multiproc_int_enable) { -+ intr_mask.b.inepintr = 1; -+ intr_mask.b.outepintr = 1; -+ } -+ -+ intr_mask.b.erlysuspend = 1; -+ -+ if(core_if->en_multiple_tx_fifo == 0) { -+ intr_mask.b.epmismatch = 1; -+ } -+ -+ -+#ifdef DWC_EN_ISOC -+ if(core_if->dma_enable) { -+ if(core_if->dma_desc_enable == 0) { -+ if(core_if->pti_enh_enable) { -+ dctl_data_t dctl = { .d32 = 0 }; -+ dctl.b.ifrmnum = 1; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32); -+ } else { -+ intr_mask.b.incomplisoin = 1; -+ intr_mask.b.incomplisoout = 1; -+ } -+ } -+ } else { -+ intr_mask.b.incomplisoin = 1; -+ intr_mask.b.incomplisoout = 1; -+ } -+#endif // DWC_EN_ISOC -+ -+/** @todo NGS: Should this be a module parameter? */ -+#ifdef USE_PERIODIC_EP -+ intr_mask.b.isooutdrop = 1; -+ intr_mask.b.eopframe = 1; -+ intr_mask.b.incomplisoin = 1; -+ intr_mask.b.incomplisoout = 1; -+#endif -+ -+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32); -+ -+ DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__, -+ dwc_read_reg32(&global_regs->gintmsk)); -+} -+ -+/** -+ * This function initializes the DWC_otg controller registers for -+ * device mode. -+ * -+ * @param core_if Programming view of DWC_otg controller -+ * -+ */ -+void dwc_otg_core_dev_init(dwc_otg_core_if_t *core_if) -+{ -+ int i,size; -+ u_int32_t *default_value_array; -+ -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ dwc_otg_core_params_t *params = core_if->core_params; -+ dcfg_data_t dcfg = { .d32 = 0}; -+ grstctl_t resetctl = { .d32 = 0 }; -+ uint32_t rx_fifo_size; -+ fifosize_data_t nptxfifosize; -+ fifosize_data_t txfifosize; -+ dthrctl_data_t dthrctl; -+ -+ /* Restart the Phy Clock */ -+ dwc_write_reg32(core_if->pcgcctl, 0); -+ -+ /* Device configuration register */ -+ init_devspd(core_if); -+ dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg); -+ dcfg.b.descdma = (core_if->dma_desc_enable) ? 1 : 0; -+ dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80; -+ -+ dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32); -+ -+ /* Configure data FIFO sizes */ -+ if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) { -+ DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n", core_if->total_fifo_size); -+ DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", params->dev_rx_fifo_size); -+ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n", params->dev_nperio_tx_fifo_size); -+ -+ /* Rx FIFO */ -+ DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n", -+ dwc_read_reg32(&global_regs->grxfsiz)); -+ -+ rx_fifo_size = params->dev_rx_fifo_size; -+ dwc_write_reg32(&global_regs->grxfsiz, rx_fifo_size); -+ -+ DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n", -+ dwc_read_reg32(&global_regs->grxfsiz)); -+ -+ /** Set Periodic Tx FIFO Mask all bits 0 */ -+ core_if->p_tx_msk = 0; -+ -+ /** Set Tx FIFO Mask all bits 0 */ -+ core_if->tx_msk = 0; -+ -+ /* Non-periodic Tx FIFO */ -+ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n", -+ dwc_read_reg32(&global_regs->gnptxfsiz)); -+ -+ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size; -+ nptxfifosize.b.startaddr = params->dev_rx_fifo_size; -+ -+ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); -+ -+ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n", -+ dwc_read_reg32(&global_regs->gnptxfsiz)); -+ -+ txfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; -+ if(core_if->en_multiple_tx_fifo == 0) { -+ //core_if->hwcfg4.b.ded_fifo_en==0 -+ -+ /**@todo NGS: Fix Periodic FIFO Sizing! */ -+ /* -+ * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15. -+ * Indexes of the FIFO size module parameters in the -+ * dev_perio_tx_fifo_size array and the FIFO size registers in -+ * the dptxfsiz array run from 0 to 14. -+ */ -+ /** @todo Finish debug of this */ -+ size=core_if->hwcfg4.b.num_dev_perio_in_ep; -+ default_value_array=params->dev_perio_tx_fifo_size; -+ -+ } -+ else { -+ //core_if->hwcfg4.b.ded_fifo_en==1 -+ /* -+ * Tx FIFOs These FIFOs are numbered from 1 to 15. -+ * Indexes of the FIFO size module parameters in the -+ * dev_tx_fifo_size array and the FIFO size registers in -+ * the dptxfsiz_dieptxf array run from 0 to 14. -+ */ -+ -+ size=core_if->hwcfg4.b.num_in_eps; -+ default_value_array=params->dev_tx_fifo_size; -+ -+ } -+ for (i=0; i < size; i++) -+ { -+ -+ txfifosize.b.depth = default_value_array[i]; -+ DWC_DEBUGPL(DBG_CIL, "initial dptxfsiz_dieptxf[%d]=%08x\n", i, -+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); -+ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i], -+ txfifosize.d32); -+ DWC_DEBUGPL(DBG_CIL, "new dptxfsiz_dieptxf[%d]=%08x\n", i, -+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); -+ txfifosize.b.startaddr += txfifosize.b.depth; -+ } -+ } -+ /* Flush the FIFOs */ -+ dwc_otg_flush_tx_fifo(core_if, 0x10); /* all Tx FIFOs */ -+ dwc_otg_flush_rx_fifo(core_if); -+ -+ /* Flush the Learning Queue. */ -+ resetctl.b.intknqflsh = 1; -+ dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32); -+ -+ /* Clear all pending Device Interrupts */ -+ -+ if(core_if->multiproc_int_enable) { -+ } -+ -+ /** @todo - if the condition needed to be checked -+ * or in any case all pending interrutps should be cleared? -+ */ -+ if(core_if->multiproc_int_enable) { -+ for(i = 0; i < core_if->dev_if->num_in_eps; ++i) { -+ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[i], 0); -+ } -+ -+ for(i = 0; i < core_if->dev_if->num_out_eps; ++i) { -+ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[i], 0); -+ } -+ -+ dwc_write_reg32(&dev_if->dev_global_regs->deachint, 0xFFFFFFFF); -+ dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, 0); -+ } else { -+ dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, 0); -+ dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, 0); -+ dwc_write_reg32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF); -+ dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, 0); -+ } -+ -+ for (i=0; i <= dev_if->num_in_eps; i++) -+ { -+ depctl_data_t depctl; -+ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); -+ if (depctl.b.epena) { -+ depctl.d32 = 0; -+ depctl.b.epdis = 1; -+ depctl.b.snak = 1; -+ } -+ else { -+ depctl.d32 = 0; -+ } -+ -+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32); -+ -+ -+ dwc_write_reg32(&dev_if->in_ep_regs[i]->dieptsiz, 0); -+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepdma, 0); -+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepint, 0xFF); -+ } -+ -+ for (i=0; i <= dev_if->num_out_eps; i++) -+ { -+ depctl_data_t depctl; -+ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl); -+ if (depctl.b.epena) { -+ depctl.d32 = 0; -+ depctl.b.epdis = 1; -+ depctl.b.snak = 1; -+ } -+ else { -+ depctl.d32 = 0; -+ } -+ -+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32); -+ -+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doeptsiz, 0); -+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepdma, 0); -+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepint, 0xFF); -+ } -+ -+ if(core_if->en_multiple_tx_fifo && core_if->dma_enable) { -+ dev_if->non_iso_tx_thr_en = params->thr_ctl & 0x1; -+ dev_if->iso_tx_thr_en = (params->thr_ctl >> 1) & 0x1; -+ dev_if->rx_thr_en = (params->thr_ctl >> 2) & 0x1; -+ -+ dev_if->rx_thr_length = params->rx_thr_length; -+ dev_if->tx_thr_length = params->tx_thr_length; -+ -+ dev_if->setup_desc_index = 0; -+ -+ dthrctl.d32 = 0; -+ dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en; -+ dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en; -+ dthrctl.b.tx_thr_len = dev_if->tx_thr_length; -+ dthrctl.b.rx_thr_en = dev_if->rx_thr_en; -+ dthrctl.b.rx_thr_len = dev_if->rx_thr_length; -+ -+ dwc_write_reg32(&dev_if->dev_global_regs->dtknqr3_dthrctl, dthrctl.d32); -+ -+ DWC_DEBUGPL(DBG_CIL, "Non ISO Tx Thr - %d\nISO Tx Thr - %d\nRx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n", -+ dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en, dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len, dthrctl.b.rx_thr_len); -+ -+ } -+ -+ dwc_otg_enable_device_interrupts(core_if); -+ -+ { -+ diepmsk_data_t msk = { .d32 = 0 }; -+ msk.b.txfifoundrn = 1; -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], msk.d32, msk.d32); -+ } else { -+ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, msk.d32, msk.d32); -+ } -+ } -+ -+ -+ if(core_if->multiproc_int_enable) { -+ /* Set NAK on Babble */ -+ dctl_data_t dctl = { .d32 = 0}; -+ dctl.b.nakonbble = 1; -+ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, 0, dctl.d32); -+ } -+} -+ -+/** -+ * This function enables the Host mode interrupts. -+ * -+ * @param core_if Programming view of DWC_otg controller -+ */ -+void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+ gintmsk_data_t intr_mask = { .d32 = 0 }; -+ -+ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); -+ -+ /* Disable all interrupts. */ -+ dwc_write_reg32(&global_regs->gintmsk, 0); -+ -+ /* Clear any pending interrupts. */ -+ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); -+ -+ /* Enable the common interrupts */ -+ dwc_otg_enable_common_interrupts(core_if); -+ -+ /* -+ * Enable host mode interrupts without disturbing common -+ * interrupts. -+ */ -+ intr_mask.b.sofintr = 1; -+ intr_mask.b.portintr = 1; -+ intr_mask.b.hcintr = 1; -+ -+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32); -+} -+ -+/** -+ * This function disables the Host Mode interrupts. -+ * -+ * @param core_if Programming view of DWC_otg controller -+ */ -+void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ gintmsk_data_t intr_mask = { .d32 = 0 }; -+ -+ DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__); -+ -+ /* -+ * Disable host mode interrupts without disturbing common -+ * interrupts. -+ */ -+ intr_mask.b.sofintr = 1; -+ intr_mask.b.portintr = 1; -+ intr_mask.b.hcintr = 1; -+ intr_mask.b.ptxfempty = 1; -+ intr_mask.b.nptxfempty = 1; -+ -+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); -+} -+ -+/** -+ * This function initializes the DWC_otg controller registers for -+ * host mode. -+ * -+ * This function flushes the Tx and Rx FIFOs and it flushes any entries in the -+ * request queues. Host channels are reset to ensure that they are ready for -+ * performing transfers. -+ * -+ * @param core_if Programming view of DWC_otg controller -+ * -+ */ -+void dwc_otg_core_host_init(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+ dwc_otg_host_if_t *host_if = core_if->host_if; -+ dwc_otg_core_params_t *params = core_if->core_params; -+ hprt0_data_t hprt0 = { .d32 = 0 }; -+ fifosize_data_t nptxfifosize; -+ fifosize_data_t ptxfifosize; -+ int i; -+ hcchar_data_t hcchar; -+ hcfg_data_t hcfg; -+ dwc_otg_hc_regs_t *hc_regs; -+ int num_channels; -+ gotgctl_data_t gotgctl = { .d32 = 0 }; -+ -+ DWC_DEBUGPL(DBG_CILV,"%s(%p)\n", __func__, core_if); -+ -+ /* Restart the Phy Clock */ -+ dwc_write_reg32(core_if->pcgcctl, 0); -+ -+ /* Initialize Host Configuration Register */ -+ init_fslspclksel(core_if); -+ if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) -+ { -+ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg); -+ hcfg.b.fslssupp = 1; -+ dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32); -+ } -+ -+ /* Configure data FIFO sizes */ -+ if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) { -+ DWC_DEBUGPL(DBG_CIL,"Total FIFO Size=%d\n", core_if->total_fifo_size); -+ DWC_DEBUGPL(DBG_CIL,"Rx FIFO Size=%d\n", params->host_rx_fifo_size); -+ DWC_DEBUGPL(DBG_CIL,"NP Tx FIFO Size=%d\n", params->host_nperio_tx_fifo_size); -+ DWC_DEBUGPL(DBG_CIL,"P Tx FIFO Size=%d\n", params->host_perio_tx_fifo_size); -+ -+ /* Rx FIFO */ -+ DWC_DEBUGPL(DBG_CIL,"initial grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); -+ dwc_write_reg32(&global_regs->grxfsiz, params->host_rx_fifo_size); -+ DWC_DEBUGPL(DBG_CIL,"new grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); -+ -+ /* Non-periodic Tx FIFO */ -+ DWC_DEBUGPL(DBG_CIL,"initial gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); -+ nptxfifosize.b.depth = params->host_nperio_tx_fifo_size; -+ nptxfifosize.b.startaddr = params->host_rx_fifo_size; -+ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); -+ DWC_DEBUGPL(DBG_CIL,"new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); -+ -+ /* Periodic Tx FIFO */ -+ DWC_DEBUGPL(DBG_CIL,"initial hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); -+ ptxfifosize.b.depth = params->host_perio_tx_fifo_size; -+ ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; -+ dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32); -+ DWC_DEBUGPL(DBG_CIL,"new hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); -+ } -+ -+ /* Clear Host Set HNP Enable in the OTG Control Register */ -+ gotgctl.b.hstsethnpen = 1; -+ dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0); -+ -+ /* Make sure the FIFOs are flushed. */ -+ dwc_otg_flush_tx_fifo(core_if, 0x10 /* all Tx FIFOs */); -+ dwc_otg_flush_rx_fifo(core_if); -+ -+ /* Flush out any leftover queued requests. */ -+ num_channels = core_if->core_params->host_channels; -+ for (i = 0; i < num_channels; i++) -+ { -+ hc_regs = core_if->host_if->hc_regs[i]; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.chen = 0; -+ hcchar.b.chdis = 1; -+ hcchar.b.epdir = 0; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ } -+ -+ /* Halt all channels to put them into a known state. */ -+ for (i = 0; i < num_channels; i++) -+ { -+ int count = 0; -+ hc_regs = core_if->host_if->hc_regs[i]; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.chen = 1; -+ hcchar.b.chdis = 1; -+ hcchar.b.epdir = 0; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i); -+ do { -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (++count > 1000) -+ { -+ DWC_ERROR("%s: Unable to clear halt on channel %d\n", -+ __func__, i); -+ break; -+ } -+ } -+ while (hcchar.b.chen); -+ } -+ -+ /* Turn on the vbus power. */ -+ DWC_PRINT("Init: Port Power? op_state=%d\n", core_if->op_state); -+ if (core_if->op_state == A_HOST) { -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr); -+ if (hprt0.b.prtpwr == 0) { -+ hprt0.b.prtpwr = 1; -+ dwc_write_reg32(host_if->hprt0, hprt0.d32); -+ } -+ } -+ -+ dwc_otg_enable_host_interrupts(core_if); -+} -+ -+/** -+ * Prepares a host channel for transferring packets to/from a specific -+ * endpoint. The HCCHARn register is set up with the characteristics specified -+ * in _hc. Host channel interrupts that may need to be serviced while this -+ * transfer is in progress are enabled. -+ * -+ * @param core_if Programming view of DWC_otg controller -+ * @param hc Information needed to initialize the host channel -+ */ -+void dwc_otg_hc_init(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) -+{ -+ uint32_t intr_enable; -+ hcintmsk_data_t hc_intr_mask; -+ gintmsk_data_t gintmsk = { .d32 = 0 }; -+ hcchar_data_t hcchar; -+ hcsplt_data_t hcsplt; -+ -+ uint8_t hc_num = hc->hc_num; -+ dwc_otg_host_if_t *host_if = core_if->host_if; -+ dwc_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num]; -+ -+ /* Clear old interrupt conditions for this host channel. */ -+ hc_intr_mask.d32 = 0xFFFFFFFF; -+ hc_intr_mask.b.reserved = 0; -+ dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32); -+ -+ /* Enable channel interrupts required for this transfer. */ -+ hc_intr_mask.d32 = 0; -+ hc_intr_mask.b.chhltd = 1; -+ if (core_if->dma_enable) { -+ hc_intr_mask.b.ahberr = 1; -+ if (hc->error_state && !hc->do_split && -+ hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { -+ hc_intr_mask.b.ack = 1; -+ if (hc->ep_is_in) { -+ hc_intr_mask.b.datatglerr = 1; -+ if (hc->ep_type != DWC_OTG_EP_TYPE_INTR) { -+ hc_intr_mask.b.nak = 1; -+ } -+ } -+ } -+ } -+ else { -+ switch (hc->ep_type) { -+ case DWC_OTG_EP_TYPE_CONTROL: -+ case DWC_OTG_EP_TYPE_BULK: -+ hc_intr_mask.b.xfercompl = 1; -+ hc_intr_mask.b.stall = 1; -+ hc_intr_mask.b.xacterr = 1; -+ hc_intr_mask.b.datatglerr = 1; -+ if (hc->ep_is_in) { -+ hc_intr_mask.b.bblerr = 1; -+ } -+ else { -+ hc_intr_mask.b.nak = 1; -+ hc_intr_mask.b.nyet = 1; -+ if (hc->do_ping) { -+ hc_intr_mask.b.ack = 1; -+ } -+ } -+ -+ if (hc->do_split) { -+ hc_intr_mask.b.nak = 1; -+ if (hc->complete_split) { -+ hc_intr_mask.b.nyet = 1; -+ } -+ else { -+ hc_intr_mask.b.ack = 1; -+ } -+ } -+ -+ if (hc->error_state) { -+ hc_intr_mask.b.ack = 1; -+ } -+ break; -+ case DWC_OTG_EP_TYPE_INTR: -+ hc_intr_mask.b.xfercompl = 1; -+ hc_intr_mask.b.nak = 1; -+ hc_intr_mask.b.stall = 1; -+ hc_intr_mask.b.xacterr = 1; -+ hc_intr_mask.b.datatglerr = 1; -+ hc_intr_mask.b.frmovrun = 1; -+ -+ if (hc->ep_is_in) { -+ hc_intr_mask.b.bblerr = 1; -+ } -+ if (hc->error_state) { -+ hc_intr_mask.b.ack = 1; -+ } -+ if (hc->do_split) { -+ if (hc->complete_split) { -+ hc_intr_mask.b.nyet = 1; -+ } -+ else { -+ hc_intr_mask.b.ack = 1; -+ } -+ } -+ break; -+ case DWC_OTG_EP_TYPE_ISOC: -+ hc_intr_mask.b.xfercompl = 1; -+ hc_intr_mask.b.frmovrun = 1; -+ hc_intr_mask.b.ack = 1; -+ -+ if (hc->ep_is_in) { -+ hc_intr_mask.b.xacterr = 1; -+ hc_intr_mask.b.bblerr = 1; -+ } -+ break; -+ } -+ } -+ dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32); -+ -+// if(hc->ep_type == DWC_OTG_EP_TYPE_BULK && !hc->ep_is_in) -+// hc->max_packet = 512; -+ /* Enable the top level host channel interrupt. */ -+ intr_enable = (1 << hc_num); -+ dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable); -+ -+ /* Make sure host channel interrupts are enabled. */ -+ gintmsk.b.hcintr = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0, gintmsk.d32); -+ -+ /* -+ * Program the HCCHARn register with the endpoint characteristics for -+ * the current transfer. -+ */ -+ hcchar.d32 = 0; -+ hcchar.b.devaddr = hc->dev_addr; -+ hcchar.b.epnum = hc->ep_num; -+ hcchar.b.epdir = hc->ep_is_in; -+ hcchar.b.lspddev = (hc->speed == DWC_OTG_EP_SPEED_LOW); -+ hcchar.b.eptype = hc->ep_type; -+ hcchar.b.mps = hc->max_packet; -+ -+ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32); -+ -+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); -+ DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr); -+ DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum); -+ DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir); -+ DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev); -+ DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype); -+ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); -+ DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt); -+ -+ /* -+ * Program the HCSPLIT register for SPLITs -+ */ -+ hcsplt.d32 = 0; -+ if (hc->do_split) { -+ DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n", hc->hc_num, -+ hc->complete_split ? "CSPLIT" : "SSPLIT"); -+ hcsplt.b.compsplt = hc->complete_split; -+ hcsplt.b.xactpos = hc->xact_pos; -+ hcsplt.b.hubaddr = hc->hub_addr; -+ hcsplt.b.prtaddr = hc->port_addr; -+ DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", hc->complete_split); -+ DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", hc->xact_pos); -+ DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", hc->hub_addr); -+ DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", hc->port_addr); -+ DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", hc->ep_is_in); -+ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); -+ DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", hc->xfer_len); -+ } -+ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32); -+ -+} -+ -+/** -+ * Attempts to halt a host channel. This function should only be called in -+ * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under -+ * normal circumstances in DMA mode, the controller halts the channel when the -+ * transfer is complete or a condition occurs that requires application -+ * intervention. -+ * -+ * In slave mode, checks for a free request queue entry, then sets the Channel -+ * Enable and Channel Disable bits of the Host Channel Characteristics -+ * register of the specified channel to intiate the halt. If there is no free -+ * request queue entry, sets only the Channel Disable bit of the HCCHARn -+ * register to flush requests for this channel. In the latter case, sets a -+ * flag to indicate that the host channel needs to be halted when a request -+ * queue slot is open. -+ * -+ * In DMA mode, always sets the Channel Enable and Channel Disable bits of the -+ * HCCHARn register. The controller ensures there is space in the request -+ * queue before submitting the halt request. -+ * -+ * Some time may elapse before the core flushes any posted requests for this -+ * host channel and halts. The Channel Halted interrupt handler completes the -+ * deactivation of the host channel. -+ * -+ * @param core_if Controller register interface. -+ * @param hc Host channel to halt. -+ * @param halt_status Reason for halting the channel. -+ */ -+void dwc_otg_hc_halt(dwc_otg_core_if_t *core_if, -+ dwc_hc_t *hc, -+ dwc_otg_halt_status_e halt_status) -+{ -+ gnptxsts_data_t nptxsts; -+ hptxsts_data_t hptxsts; -+ hcchar_data_t hcchar; -+ dwc_otg_hc_regs_t *hc_regs; -+ dwc_otg_core_global_regs_t *global_regs; -+ dwc_otg_host_global_regs_t *host_global_regs; -+ -+ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; -+ global_regs = core_if->core_global_regs; -+ host_global_regs = core_if->host_if->host_global_regs; -+ -+ WARN_ON(halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS); -+ -+ if (halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || -+ halt_status == DWC_OTG_HC_XFER_AHB_ERR) { -+ /* -+ * Disable all channel interrupts except Ch Halted. The QTD -+ * and QH state associated with this transfer has been cleared -+ * (in the case of URB_DEQUEUE), so the channel needs to be -+ * shut down carefully to prevent crashes. -+ */ -+ hcintmsk_data_t hcintmsk; -+ hcintmsk.d32 = 0; -+ hcintmsk.b.chhltd = 1; -+ dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32); -+ -+ /* -+ * Make sure no other interrupts besides halt are currently -+ * pending. Handling another interrupt could cause a crash due -+ * to the QTD and QH state. -+ */ -+ dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32); -+ -+ /* -+ * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR -+ * even if the channel was already halted for some other -+ * reason. -+ */ -+ hc->halt_status = halt_status; -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chen == 0) { -+ /* -+ * The channel is either already halted or it hasn't -+ * started yet. In DMA mode, the transfer may halt if -+ * it finishes normally or a condition occurs that -+ * requires driver intervention. Don't want to halt -+ * the channel again. In either Slave or DMA mode, -+ * it's possible that the transfer has been assigned -+ * to a channel, but not started yet when an URB is -+ * dequeued. Don't want to halt a channel that hasn't -+ * started yet. -+ */ -+ return; -+ } -+ } -+ -+ if (hc->halt_pending) { -+ /* -+ * A halt has already been issued for this channel. This might -+ * happen when a transfer is aborted by a higher level in -+ * the stack. -+ */ -+#ifdef DEBUG -+ DWC_PRINT("*** %s: Channel %d, _hc->halt_pending already set ***\n", -+ __func__, hc->hc_num); -+ -+/* dwc_otg_dump_global_registers(core_if); */ -+/* dwc_otg_dump_host_registers(core_if); */ -+#endif -+ return; -+ } -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.chen = 1; -+ hcchar.b.chdis = 1; -+ -+ if (!core_if->dma_enable) { -+ /* Check for space in the request queue to issue the halt. */ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || -+ hc->ep_type == DWC_OTG_EP_TYPE_BULK) { -+ nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts); -+ if (nptxsts.b.nptxqspcavail == 0) { -+ hcchar.b.chen = 0; -+ } -+ } -+ else { -+ hptxsts.d32 = dwc_read_reg32(&host_global_regs->hptxsts); -+ if ((hptxsts.b.ptxqspcavail == 0) || (core_if->queuing_high_bandwidth)) { -+ hcchar.b.chen = 0; -+ } -+ } -+ } -+ -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ -+ hc->halt_status = halt_status; -+ -+ if (hcchar.b.chen) { -+ hc->halt_pending = 1; -+ hc->halt_on_queue = 0; -+ } -+ else { -+ hc->halt_on_queue = 1; -+ } -+ -+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); -+ DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32); -+ DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", hc->halt_pending); -+ DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", hc->halt_on_queue); -+ DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", hc->halt_status); -+ -+ return; -+} -+ -+/** -+ * Clears the transfer state for a host channel. This function is normally -+ * called after a transfer is done and the host channel is being released. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param hc Identifies the host channel to clean up. -+ */ -+void dwc_otg_hc_cleanup(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) -+{ -+ dwc_otg_hc_regs_t *hc_regs; -+ -+ hc->xfer_started = 0; -+ -+ /* -+ * Clear channel interrupt enables and any unhandled channel interrupt -+ * conditions. -+ */ -+ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; -+ dwc_write_reg32(&hc_regs->hcintmsk, 0); -+ dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF); -+ -+#ifdef DEBUG -+ del_timer(&core_if->hc_xfer_timer[hc->hc_num]); -+ { -+ hcchar_data_t hcchar; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chdis) { -+ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", -+ __func__, hc->hc_num, hcchar.d32); -+ } -+ } -+#endif -+} -+ -+/** -+ * Sets the channel property that indicates in which frame a periodic transfer -+ * should occur. This is always set to the _next_ frame. This function has no -+ * effect on non-periodic transfers. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param hc Identifies the host channel to set up and its properties. -+ * @param hcchar Current value of the HCCHAR register for the specified host -+ * channel. -+ */ -+static inline void hc_set_even_odd_frame(dwc_otg_core_if_t *core_if, -+ dwc_hc_t *hc, -+ hcchar_data_t *hcchar) -+{ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ hfnum_data_t hfnum; -+ hfnum.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hfnum); -+ -+ /* 1 if _next_ frame is odd, 0 if it's even */ -+ hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1; -+#ifdef DEBUG -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR && hc->do_split && !hc->complete_split) { -+ switch (hfnum.b.frnum & 0x7) { -+ case 7: -+ core_if->hfnum_7_samples++; -+ core_if->hfnum_7_frrem_accum += hfnum.b.frrem; -+ break; -+ case 0: -+ core_if->hfnum_0_samples++; -+ core_if->hfnum_0_frrem_accum += hfnum.b.frrem; -+ break; -+ default: -+ core_if->hfnum_other_samples++; -+ core_if->hfnum_other_frrem_accum += hfnum.b.frrem; -+ break; -+ } -+ } -+#endif -+ } -+} -+ -+#ifdef DEBUG -+static void hc_xfer_timeout(unsigned long ptr) -+{ -+ hc_xfer_info_t *xfer_info = (hc_xfer_info_t *)ptr; -+ int hc_num = xfer_info->hc->hc_num; -+ DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num); -+ DWC_WARN(" start_hcchar_val 0x%08x\n", xfer_info->core_if->start_hcchar_val[hc_num]); -+} -+#endif -+ -+/* -+ * This function does the setup for a data transfer for a host channel and -+ * starts the transfer. May be called in either Slave mode or DMA mode. In -+ * Slave mode, the caller must ensure that there is sufficient space in the -+ * request queue and Tx Data FIFO. -+ * -+ * For an OUT transfer in Slave mode, it loads a data packet into the -+ * appropriate FIFO. If necessary, additional data packets will be loaded in -+ * the Host ISR. -+ * -+ * For an IN transfer in Slave mode, a data packet is requested. The data -+ * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, -+ * additional data packets are requested in the Host ISR. -+ * -+ * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ -+ * register along with a packet count of 1 and the channel is enabled. This -+ * causes a single PING transaction to occur. Other fields in HCTSIZ are -+ * simply set to 0 since no data transfer occurs in this case. -+ * -+ * For a PING transfer in DMA mode, the HCTSIZ register is initialized with -+ * all the information required to perform the subsequent data transfer. In -+ * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the -+ * controller performs the entire PING protocol, then starts the data -+ * transfer. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param hc Information needed to initialize the host channel. The xfer_len -+ * value may be reduced to accommodate the max widths of the XferSize and -+ * PktCnt fields in the HCTSIZn register. The multi_count value may be changed -+ * to reflect the final xfer_len value. -+ */ -+void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) -+{ -+ hcchar_data_t hcchar; -+ hctsiz_data_t hctsiz; -+ uint16_t num_packets; -+ uint32_t max_hc_xfer_size = core_if->core_params->max_transfer_size; -+ uint16_t max_hc_pkt_count = core_if->core_params->max_packet_count; -+ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; -+ -+ hctsiz.d32 = 0; -+ -+ if (hc->do_ping) { -+ if (!core_if->dma_enable) { -+ dwc_otg_hc_do_ping(core_if, hc); -+ hc->xfer_started = 1; -+ return; -+ } -+ else { -+ hctsiz.b.dopng = 1; -+ } -+ } -+ -+ if (hc->do_split) { -+ num_packets = 1; -+ -+ if (hc->complete_split && !hc->ep_is_in) { -+ /* For CSPLIT OUT Transfer, set the size to 0 so the -+ * core doesn't expect any data written to the FIFO */ -+ hc->xfer_len = 0; -+ } -+ else if (hc->ep_is_in || (hc->xfer_len > hc->max_packet)) { -+ hc->xfer_len = hc->max_packet; -+ } -+ else if (!hc->ep_is_in && (hc->xfer_len > 188)) { -+ hc->xfer_len = 188; -+ } -+ -+ hctsiz.b.xfersize = hc->xfer_len; -+ } -+ else { -+ /* -+ * Ensure that the transfer length and packet count will fit -+ * in the widths allocated for them in the HCTSIZn register. -+ */ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ /* -+ * Make sure the transfer size is no larger than one -+ * (micro)frame's worth of data. (A check was done -+ * when the periodic transfer was accepted to ensure -+ * that a (micro)frame's worth of data can be -+ * programmed into a channel.) -+ */ -+ uint32_t max_periodic_len = hc->multi_count * hc->max_packet; -+ if (hc->xfer_len > max_periodic_len) { -+ hc->xfer_len = max_periodic_len; -+ } -+ else { -+ } -+ } -+ else if (hc->xfer_len > max_hc_xfer_size) { -+ /* Make sure that xfer_len is a multiple of max packet size. */ -+ hc->xfer_len = max_hc_xfer_size - hc->max_packet + 1; -+ } -+ -+ if (hc->xfer_len > 0) { -+ num_packets = (hc->xfer_len + hc->max_packet - 1) / hc->max_packet; -+ if (num_packets > max_hc_pkt_count) { -+ num_packets = max_hc_pkt_count; -+ hc->xfer_len = num_packets * hc->max_packet; -+ } -+ } -+ else { -+ /* Need 1 packet for transfer length of 0. */ -+ num_packets = 1; -+ } -+ -+#if 0 -+//host testusb item 10, would do series of Control transfer -+//with URB_SHORT_NOT_OK set in transfer_flags , -+//changing the xfer_len would cause the test fail -+ if (hc->ep_is_in) { -+ /* Always program an integral # of max packets for IN transfers. */ -+ hc->xfer_len = num_packets * hc->max_packet; -+ } -+#endif -+ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ /* -+ * Make sure that the multi_count field matches the -+ * actual transfer length. -+ */ -+ hc->multi_count = num_packets; -+ } -+ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ /* Set up the initial PID for the transfer. */ -+ if (hc->speed == DWC_OTG_EP_SPEED_HIGH) { -+ if (hc->ep_is_in) { -+ if (hc->multi_count == 1) { -+ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; -+ } -+ else if (hc->multi_count == 2) { -+ hc->data_pid_start = DWC_OTG_HC_PID_DATA1; -+ } -+ else { -+ hc->data_pid_start = DWC_OTG_HC_PID_DATA2; -+ } -+ } -+ else { -+ if (hc->multi_count == 1) { -+ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; -+ } -+ else { -+ hc->data_pid_start = DWC_OTG_HC_PID_MDATA; -+ } -+ } -+ } -+ else { -+ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; -+ } -+ } -+ -+ hctsiz.b.xfersize = hc->xfer_len; -+ } -+ -+ hc->start_pkt_count = num_packets; -+ hctsiz.b.pktcnt = num_packets; -+ hctsiz.b.pid = hc->data_pid_start; -+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); -+ -+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); -+ DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize); -+ DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt); -+ DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid); -+ -+ if (core_if->dma_enable) { -+ dwc_write_reg32(&hc_regs->hcdma, (uint32_t)hc->xfer_buff); -+ } -+ -+ /* Start the split */ -+ if (hc->do_split) { -+ hcsplt_data_t hcsplt; -+ hcsplt.d32 = dwc_read_reg32 (&hc_regs->hcsplt); -+ hcsplt.b.spltena = 1; -+ dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32); -+ } -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.multicnt = hc->multi_count; -+ hc_set_even_odd_frame(core_if, hc, &hcchar); -+#ifdef DEBUG -+ core_if->start_hcchar_val[hc->hc_num] = hcchar.d32; -+ if (hcchar.b.chdis) { -+ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", -+ __func__, hc->hc_num, hcchar.d32); -+ } -+#endif -+ -+ /* Set host channel enable after all other setup is complete. */ -+ hcchar.b.chen = 1; -+ hcchar.b.chdis = 0; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ -+ hc->xfer_started = 1; -+ hc->requests++; -+ -+ if (!core_if->dma_enable && -+ !hc->ep_is_in && hc->xfer_len > 0) { -+ /* Load OUT packet into the appropriate Tx FIFO. */ -+ dwc_otg_hc_write_packet(core_if, hc); -+ } -+ -+#ifdef DEBUG -+ /* Start a timer for this transfer. */ -+ core_if->hc_xfer_timer[hc->hc_num].function = hc_xfer_timeout; -+ core_if->hc_xfer_info[hc->hc_num].core_if = core_if; -+ core_if->hc_xfer_info[hc->hc_num].hc = hc; -+ core_if->hc_xfer_timer[hc->hc_num].data = (unsigned long)(&core_if->hc_xfer_info[hc->hc_num]); -+ core_if->hc_xfer_timer[hc->hc_num].expires = jiffies + (HZ*10); -+ add_timer(&core_if->hc_xfer_timer[hc->hc_num]); -+#endif -+} -+ -+/** -+ * This function continues a data transfer that was started by previous call -+ * to dwc_otg_hc_start_transfer. The caller must ensure there is -+ * sufficient space in the request queue and Tx Data FIFO. This function -+ * should only be called in Slave mode. In DMA mode, the controller acts -+ * autonomously to complete transfers programmed to a host channel. -+ * -+ * For an OUT transfer, a new data packet is loaded into the appropriate FIFO -+ * if there is any data remaining to be queued. For an IN transfer, another -+ * data packet is always requested. For the SETUP phase of a control transfer, -+ * this function does nothing. -+ * -+ * @return 1 if a new request is queued, 0 if no more requests are required -+ * for this transfer. -+ */ -+int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) -+{ -+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); -+ -+ if (hc->do_split) { -+ /* SPLITs always queue just once per channel */ -+ return 0; -+ } -+ else if (hc->data_pid_start == DWC_OTG_HC_PID_SETUP) { -+ /* SETUPs are queued only once since they can't be NAKed. */ -+ return 0; -+ } -+ else if (hc->ep_is_in) { -+ /* -+ * Always queue another request for other IN transfers. If -+ * back-to-back INs are issued and NAKs are received for both, -+ * the driver may still be processing the first NAK when the -+ * second NAK is received. When the interrupt handler clears -+ * the NAK interrupt for the first NAK, the second NAK will -+ * not be seen. So we can't depend on the NAK interrupt -+ * handler to requeue a NAKed request. Instead, IN requests -+ * are issued each time this function is called. When the -+ * transfer completes, the extra requests for the channel will -+ * be flushed. -+ */ -+ hcchar_data_t hcchar; -+ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hc_set_even_odd_frame(core_if, hc, &hcchar); -+ hcchar.b.chen = 1; -+ hcchar.b.chdis = 0; -+ DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32); -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ hc->requests++; -+ return 1; -+ } -+ else { -+ /* OUT transfers. */ -+ if (hc->xfer_count < hc->xfer_len) { -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ hcchar_data_t hcchar; -+ dwc_otg_hc_regs_t *hc_regs; -+ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hc_set_even_odd_frame(core_if, hc, &hcchar); -+ } -+ -+ /* Load OUT packet into the appropriate Tx FIFO. */ -+ dwc_otg_hc_write_packet(core_if, hc); -+ hc->requests++; -+ return 1; -+ } -+ else { -+ return 0; -+ } -+ } -+} -+ -+/** -+ * Starts a PING transfer. This function should only be called in Slave mode. -+ * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled. -+ */ -+void dwc_otg_hc_do_ping(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) -+{ -+ hcchar_data_t hcchar; -+ hctsiz_data_t hctsiz; -+ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; -+ -+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); -+ -+ hctsiz.d32 = 0; -+ hctsiz.b.dopng = 1; -+ hctsiz.b.pktcnt = 1; -+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.chen = 1; -+ hcchar.b.chdis = 0; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+} -+ -+/* -+ * This function writes a packet into the Tx FIFO associated with the Host -+ * Channel. For a channel associated with a non-periodic EP, the non-periodic -+ * Tx FIFO is written. For a channel associated with a periodic EP, the -+ * periodic Tx FIFO is written. This function should only be called in Slave -+ * mode. -+ * -+ * Upon return the xfer_buff and xfer_count fields in _hc are incremented by -+ * then number of bytes written to the Tx FIFO. -+ */ -+void dwc_otg_hc_write_packet(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) -+{ -+ uint32_t i; -+ uint32_t remaining_count; -+ uint32_t byte_count; -+ uint32_t dword_count; -+ -+ uint32_t *data_buff = (uint32_t *)(hc->xfer_buff); -+ uint32_t *data_fifo = core_if->data_fifo[hc->hc_num]; -+ -+ remaining_count = hc->xfer_len - hc->xfer_count; -+ if (remaining_count > hc->max_packet) { -+ byte_count = hc->max_packet; -+ } -+ else { -+ byte_count = remaining_count; -+ } -+ -+ dword_count = (byte_count + 3) / 4; -+ -+ if ((((unsigned long)data_buff) & 0x3) == 0) { -+ /* xfer_buff is DWORD aligned. */ -+ for (i = 0; i < dword_count; i++, data_buff++) -+ { -+ dwc_write_reg32(data_fifo, *data_buff); -+ } -+ } -+ else { -+ /* xfer_buff is not DWORD aligned. */ -+ for (i = 0; i < dword_count; i++, data_buff++) -+ { -+ dwc_write_reg32(data_fifo, get_unaligned(data_buff)); -+ } -+ } -+ -+ hc->xfer_count += byte_count; -+ hc->xfer_buff += byte_count; -+} -+ -+/** -+ * Gets the current USB frame number. This is the frame number from the last -+ * SOF packet. -+ */ -+uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *core_if) -+{ -+ dsts_data_t dsts; -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ -+ /* read current frame/microframe number from DSTS register */ -+ return dsts.b.soffn; -+} -+ -+/** -+ * This function reads a setup packet from the Rx FIFO into the destination -+ * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl) -+ * Interrupt routine when a SETUP packet has been received in Slave mode. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param dest Destination buffer for packet data. -+ */ -+void dwc_otg_read_setup_packet(dwc_otg_core_if_t *core_if, uint32_t *dest) -+{ -+ /* Get the 8 bytes of a setup transaction data */ -+ -+ /* Pop 2 DWORDS off the receive data FIFO into memory */ -+ dest[0] = dwc_read_reg32(core_if->data_fifo[0]); -+ dest[1] = dwc_read_reg32(core_if->data_fifo[0]); -+} -+ -+ -+/** -+ * This function enables EP0 OUT to receive SETUP packets and configures EP0 -+ * IN for transmitting packets. It is normally called when the -+ * "Enumeration Done" interrupt occurs. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP0 data. -+ */ -+void dwc_otg_ep0_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ dsts_data_t dsts; -+ depctl_data_t diepctl; -+ depctl_data_t doepctl; -+ dctl_data_t dctl = { .d32 = 0 }; -+ -+ /* Read the Device Status and Endpoint 0 Control registers */ -+ dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts); -+ diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl); -+ doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl); -+ -+ /* Set the MPS of the IN EP based on the enumeration speed */ -+ switch (dsts.b.enumspd) { -+ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ: -+ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ: -+ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ: -+ diepctl.b.mps = DWC_DEP0CTL_MPS_64; -+ break; -+ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ: -+ diepctl.b.mps = DWC_DEP0CTL_MPS_8; -+ break; -+ } -+ -+ dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32); -+ -+ /* Enable OUT EP for receive */ -+ doepctl.b.epena = 1; -+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32); -+ -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n", -+ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); -+ DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n", -+ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl)); -+#endif -+ dctl.b.cgnpinnak = 1; -+ -+ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32); -+ DWC_DEBUGPL(DBG_PCDV,"dctl=%0x\n", -+ dwc_read_reg32(&dev_if->dev_global_regs->dctl)); -+} -+ -+/** -+ * This function activates an EP. The Device EP control register for -+ * the EP is configured as defined in the ep structure. Note: This -+ * function is not used for EP0. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to activate. -+ */ -+void dwc_otg_ep_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ depctl_data_t depctl; -+ volatile uint32_t *addr; -+ daint_data_t daintmsk = { .d32 = 0 }; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, ep->num, -+ (ep->is_in?"IN":"OUT")); -+ -+ /* Read DEPCTLn register */ -+ if (ep->is_in == 1) { -+ addr = &dev_if->in_ep_regs[ep->num]->diepctl; -+ daintmsk.ep.in = 1<num; -+ } -+ else { -+ addr = &dev_if->out_ep_regs[ep->num]->doepctl; -+ daintmsk.ep.out = 1<num; -+ } -+ -+ /* If the EP is already active don't change the EP Control -+ * register. */ -+ depctl.d32 = dwc_read_reg32(addr); -+ if (!depctl.b.usbactep) { -+ depctl.b.mps = ep->maxpacket; -+ depctl.b.eptype = ep->type; -+ depctl.b.txfnum = ep->tx_fifo_num; -+ -+ if (ep->type == DWC_OTG_EP_TYPE_ISOC) { -+ depctl.b.setd0pid = 1; // ??? -+ } -+ else { -+ depctl.b.setd0pid = 1; -+ } -+ depctl.b.usbactep = 1; -+ -+ dwc_write_reg32(addr, depctl.d32); -+ DWC_DEBUGPL(DBG_PCDV,"DEPCTL(%.8x)=%08x\n",(u32)addr, dwc_read_reg32(addr)); -+ } -+ -+ /* Enable the Interrupt for this EP */ -+ if(core_if->multiproc_int_enable) { -+ if (ep->is_in == 1) { -+ diepmsk_data_t diepmsk = { .d32 = 0}; -+ diepmsk.b.xfercompl = 1; -+ diepmsk.b.timeout = 1; -+ diepmsk.b.epdisabled = 1; -+ diepmsk.b.ahberr = 1; -+ diepmsk.b.intknepmis = 1; -+ diepmsk.b.txfifoundrn = 1; //????? -+ -+ -+ if(core_if->dma_desc_enable) { -+ diepmsk.b.bna = 1; -+ } -+/* -+ if(core_if->dma_enable) { -+ doepmsk.b.nak = 1; -+ } -+*/ -+ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num], diepmsk.d32); -+ -+ } else { -+ doepmsk_data_t doepmsk = { .d32 = 0}; -+ doepmsk.b.xfercompl = 1; -+ doepmsk.b.ahberr = 1; -+ doepmsk.b.epdisabled = 1; -+ -+ -+ if(core_if->dma_desc_enable) { -+ doepmsk.b.bna = 1; -+ } -+/* -+ doepmsk.b.babble = 1; -+ doepmsk.b.nyet = 1; -+ doepmsk.b.nak = 1; -+*/ -+ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[ep->num], doepmsk.d32); -+ } -+ dwc_modify_reg32(&dev_if->dev_global_regs->deachintmsk, -+ 0, daintmsk.d32); -+ } else { -+ dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk, -+ 0, daintmsk.d32); -+ } -+ -+ DWC_DEBUGPL(DBG_PCDV,"DAINTMSK=%0x\n", -+ dwc_read_reg32(&dev_if->dev_global_regs->daintmsk)); -+ -+ ep->stall_clear_flag = 0; -+ return; -+} -+ -+/** -+ * This function deactivates an EP. This is done by clearing the USB Active -+ * EP bit in the Device EP control register. Note: This function is not used -+ * for EP0. EP0 cannot be deactivated. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to deactivate. -+ */ -+void dwc_otg_ep_deactivate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl = { .d32 = 0 }; -+ volatile uint32_t *addr; -+ daint_data_t daintmsk = { .d32 = 0}; -+ -+ /* Read DEPCTLn register */ -+ if (ep->is_in == 1) { -+ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; -+ daintmsk.ep.in = 1<num; -+ } -+ else { -+ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; -+ daintmsk.ep.out = 1<num; -+ } -+ -+ //disabled ep only when ep is enabled -+ //or got halt in the loop in test in cv9 -+ depctl.d32=dwc_read_reg32(addr); -+ if(depctl.b.epena){ -+ if (ep->is_in == 1) { -+ diepint_data_t diepint; -+ dwc_otg_dev_in_ep_regs_t *in_reg=core_if->dev_if->in_ep_regs[ep->num]; -+ -+ //Set ep nak -+ depctl.d32=dwc_read_reg32(&in_reg->diepctl); -+ depctl.b.snak=1; -+ dwc_write_reg32(&in_reg->diepctl,depctl.d32); -+ -+ //wait for diepint.b.inepnakeff -+ diepint.d32=dwc_read_reg32(&in_reg->diepint); -+ while(!diepint.b.inepnakeff){ -+ udelay(1); -+ diepint.d32=dwc_read_reg32(&in_reg->diepint); -+ } -+ diepint.d32=0; -+ diepint.b.inepnakeff=1; -+ dwc_write_reg32(&in_reg->diepint,diepint.d32); -+ -+ //set ep disable and snak -+ depctl.d32=dwc_read_reg32(&in_reg->diepctl); -+ depctl.b.snak=1; -+ depctl.b.epdis=1; -+ dwc_write_reg32(&in_reg->diepctl,depctl.d32); -+ -+ //wait for diepint.b.epdisabled -+ diepint.d32=dwc_read_reg32(&in_reg->diepint); -+ while(!diepint.b.epdisabled){ -+ udelay(1); -+ diepint.d32=dwc_read_reg32(&in_reg->diepint); -+ } -+ diepint.d32=0; -+ diepint.b.epdisabled=1; -+ dwc_write_reg32(&in_reg->diepint,diepint.d32); -+ -+ //clear ep enable and disable bit -+ depctl.d32=dwc_read_reg32(&in_reg->diepctl); -+ depctl.b.epena=0; -+ depctl.b.epdis=0; -+ dwc_write_reg32(&in_reg->diepctl,depctl.d32); -+ -+ } -+#if 0 -+//following DWC OTG DataBook v2.72a, 6.4.2.1.3 Disabling an OUT Endpoint, -+//but this doesn't work, the old code do. -+ else { -+ doepint_data_t doepint; -+ dwc_otg_dev_out_ep_regs_t *out_reg=core_if->dev_if->out_ep_regs[ep->num]; -+ dctl_data_t dctl; -+ gintsts_data_t gintsts; -+ -+ //set dctl global out nak -+ dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl); -+ dctl.b.sgoutnak=1; -+ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl,dctl.d32); -+ -+ //wait for gintsts.goutnakeff -+ gintsts.d32=dwc_read_reg32(&core_if->core_global_regs->gintsts); -+ while(!gintsts.b.goutnakeff){ -+ udelay(1); -+ gintsts.d32=dwc_read_reg32(&core_if->core_global_regs->gintsts); -+ } -+ gintsts.d32=0; -+ gintsts.b.goutnakeff=1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ //set ep disable and snak -+ depctl.d32=dwc_read_reg32(&out_reg->doepctl); -+ depctl.b.snak=1; -+ depctl.b.epdis=1; -+ dwc_write_reg32(&out_reg->doepctl,depctl.d32); -+ -+ //wait for diepint.b.epdisabled -+ doepint.d32=dwc_read_reg32(&out_reg->doepint); -+ while(!doepint.b.epdisabled){ -+ udelay(1); -+ doepint.d32=dwc_read_reg32(&out_reg->doepint); -+ } -+ doepint.d32=0; -+ doepint.b.epdisabled=1; -+ dwc_write_reg32(&out_reg->doepint,doepint.d32); -+ -+ //clear ep enable and disable bit -+ depctl.d32=dwc_read_reg32(&out_reg->doepctl); -+ depctl.b.epena=0; -+ depctl.b.epdis=0; -+ dwc_write_reg32(&out_reg->doepctl,depctl.d32); -+ } -+#endif -+ -+ depctl.d32=0; -+ depctl.b.usbactep = 0; -+ -+ if (ep->is_in == 0) { -+ if(core_if->dma_enable||core_if->dma_desc_enable) -+ depctl.b.epdis = 1; -+ } -+ -+ dwc_write_reg32(addr, depctl.d32); -+ } -+ -+ /* Disable the Interrupt for this EP */ -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->deachintmsk, -+ daintmsk.d32, 0); -+ -+ if (ep->is_in == 1) { -+ dwc_write_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[ep->num], 0); -+ } else { -+ dwc_write_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[ep->num], 0); -+ } -+ } else { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->daintmsk, -+ daintmsk.d32, 0); -+ } -+ -+ if (ep->is_in == 1) { -+ DWC_DEBUGPL(DBG_PCD, "DIEPCTL(%.8x)=%08x DIEPTSIZ=%08x, DIEPINT=%.8x, DIEPDMA=%.8x, DTXFSTS=%.8x\n", -+ (u32)&core_if->dev_if->in_ep_regs[ep->num]->diepctl, -+ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepctl), -+ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz), -+ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepint), -+ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepdma), -+ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts)); -+ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), -+ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); -+ } -+ else { -+ DWC_DEBUGPL(DBG_PCD, "DOEPCTL(%.8x)=%08x DOEPTSIZ=%08x, DOEPINT=%.8x, DOEPDMA=%.8x\n", -+ (u32)&core_if->dev_if->out_ep_regs[ep->num]->doepctl, -+ dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepctl), -+ dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz), -+ dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepint), -+ dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepdma)); -+ -+ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), -+ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); -+ } -+ -+} -+ -+/** -+ * This function does the setup for a data transfer for an EP and -+ * starts the transfer. For an IN transfer, the packets will be -+ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, -+ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ */ -+static void init_dma_desc_chain(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ dwc_otg_dma_desc_t* dma_desc; -+ uint32_t offset; -+ uint32_t xfer_est; -+ int i; -+ -+ ep->desc_cnt = ( ep->total_len / ep->maxxfer) + -+ ((ep->total_len % ep->maxxfer) ? 1 : 0); -+ if(!ep->desc_cnt) -+ ep->desc_cnt = 1; -+ -+ dma_desc = ep->desc_addr; -+ xfer_est = ep->total_len; -+ offset = 0; -+ for( i = 0; i < ep->desc_cnt; ++i) { -+ /** DMA Descriptor Setup */ -+ if(xfer_est > ep->maxxfer) { -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 0; -+ dma_desc->status.b.ioc = 0; -+ dma_desc->status.b.sp = 0; -+ dma_desc->status.b.bytes = ep->maxxfer; -+ dma_desc->buf = ep->dma_addr + offset; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ -+ xfer_est -= ep->maxxfer; -+ offset += ep->maxxfer; -+ } else { -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 1; -+ dma_desc->status.b.ioc = 1; -+ if(ep->is_in) { -+ dma_desc->status.b.sp = (xfer_est % ep->maxpacket) ? -+ 1 : ((ep->sent_zlp) ? 1 : 0); -+ dma_desc->status.b.bytes = xfer_est; -+ } else { -+ dma_desc->status.b.bytes = xfer_est + ((4 - (xfer_est & 0x3)) & 0x3) ; -+ } -+ -+ dma_desc->buf = ep->dma_addr + offset; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ } -+ dma_desc ++; -+ } -+} -+ -+/** -+ * This function does the setup for a data transfer for an EP and -+ * starts the transfer. For an IN transfer, the packets will be -+ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, -+ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ */ -+ -+void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl; -+ deptsiz_data_t deptsiz; -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); -+ -+ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d " -+ "xfer_buff=%p start_xfer_buff=%p\n", -+ ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len, -+ ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff); -+ -+ /* IN endpoint */ -+ if (ep->is_in == 1) { -+ dwc_otg_dev_in_ep_regs_t *in_regs = -+ core_if->dev_if->in_ep_regs[ep->num]; -+ -+ gnptxsts_data_t gtxstatus; -+ -+ gtxstatus.d32 = -+ dwc_read_reg32(&core_if->core_global_regs->gnptxsts); -+ -+ if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) { -+#ifdef DEBUG -+ DWC_PRINT("TX Queue Full (0x%0x)\n", gtxstatus.d32); -+#endif -+ return; -+ } -+ -+ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl)); -+ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz)); -+ -+ ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ? -+ ep->maxxfer : (ep->total_len - ep->xfer_len); -+ -+ /* Zero Length Packet? */ -+ if ((ep->xfer_len - ep->xfer_count) == 0) { -+ deptsiz.b.xfersize = 0; -+ deptsiz.b.pktcnt = 1; -+ } -+ else { -+ /* Program the transfer size and packet count -+ * as follows: xfersize = N * maxpacket + -+ * short_packet pktcnt = N + (short_packet -+ * exist ? 1 : 0) -+ */ -+ deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count; -+ deptsiz.b.pktcnt = -+ (ep->xfer_len - ep->xfer_count - 1 + ep->maxpacket) / -+ ep->maxpacket; -+ } -+ -+ -+ /* Write the DMA register */ -+ if (core_if->dma_enable) { -+ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { -+ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); -+ } -+ DWC_DEBUGPL(DBG_PCDV, "ep%d dma_addr=%.8x\n", ep->num, ep->dma_addr); -+ -+ if (core_if->dma_desc_enable == 0) { -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ -+ VERIFY_PCD_DMA_ADDR(ep->dma_addr); -+ dwc_write_reg32 (&(in_regs->diepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ else { -+ init_dma_desc_chain(core_if, ep); -+ /** DIEPDMAn Register write */ -+ -+ VERIFY_PCD_DMA_ADDR(ep->dma_desc_addr); -+ dwc_write_reg32(&in_regs->diepdma, ep->dma_desc_addr); -+ } -+ } -+ else -+ { -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ if(ep->type != DWC_OTG_EP_TYPE_ISOC) { -+ /** -+ * Enable the Non-Periodic Tx FIFO empty interrupt, -+ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode, -+ * the data will be written into the fifo by the ISR. -+ */ -+ if(core_if->en_multiple_tx_fifo == 0) { -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, -+ intr_mask.d32, intr_mask.d32); -+ } -+ else { -+ /* Enable the Tx FIFO Empty Interrupt for this EP */ -+ if(ep->xfer_len > 0) { -+ uint32_t fifoemptymsk = 0; -+ fifoemptymsk = 1 << ep->num; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, -+ 0, fifoemptymsk); -+ -+ } -+ } -+ } -+ } -+ -+ /* EP enable, IN data in FIFO */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ dwc_write_reg32(&in_regs->diepctl, depctl.d32); -+ -+ depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl); -+ depctl.b.nextep = ep->num; -+ dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32); -+ -+ DWC_DEBUGPL(DBG_PCD, "DIEPCTL(%.8x)=%08x DIEPTSIZ=%08x, DIEPINT=%.8x, DIEPDMA=%.8x, DTXFSTS=%.8x\n", -+ (u32)&in_regs->diepctl, -+ dwc_read_reg32(&in_regs->diepctl), -+ dwc_read_reg32(&in_regs->dieptsiz), -+ dwc_read_reg32(&in_regs->diepint), -+ dwc_read_reg32(&in_regs->diepdma), -+ dwc_read_reg32(&in_regs->dtxfsts)); -+ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), -+ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); -+ -+ } -+ else { -+ /* OUT endpoint */ -+ dwc_otg_dev_out_ep_regs_t *out_regs = -+ core_if->dev_if->out_ep_regs[ep->num]; -+ -+ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl)); -+ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz)); -+ -+ ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ? -+ ep->maxxfer : (ep->total_len - ep->xfer_len); -+ -+ /* Program the transfer size and packet count as follows: -+ * -+ * pktcnt = N -+ * xfersize = N * maxpacket -+ */ -+ if ((ep->xfer_len - ep->xfer_count) == 0) { -+ /* Zero Length Packet */ -+ deptsiz.b.xfersize = ep->maxpacket; -+ deptsiz.b.pktcnt = 1; -+ } -+ else { -+ deptsiz.b.pktcnt = -+ (ep->xfer_len - ep->xfer_count + (ep->maxpacket - 1)) / -+ ep->maxpacket; -+ ep->xfer_len = deptsiz.b.pktcnt * ep->maxpacket + ep->xfer_count; -+ deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count; -+ } -+ -+ DWC_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n", -+ ep->num, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ -+ if (core_if->dma_enable) { -+ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { -+ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); -+ } -+ DWC_DEBUGPL(DBG_PCDV, "ep%d dma_addr=%.8x\n", -+ ep->num, -+ ep->dma_addr); -+ if (!core_if->dma_desc_enable) { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ -+ VERIFY_PCD_DMA_ADDR(ep->dma_addr); -+ dwc_write_reg32 (&(out_regs->doepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ else { -+ init_dma_desc_chain(core_if, ep); -+ -+ /** DOEPDMAn Register write */ -+ -+ VERIFY_PCD_DMA_ADDR(ep->dma_desc_addr); -+ dwc_write_reg32(&out_regs->doepdma, ep->dma_desc_addr); -+ } -+ } -+ else { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ } -+ -+ /* EP enable */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ -+ dwc_write_reg32(&out_regs->doepctl, depctl.d32); -+ -+ DWC_DEBUGPL(DBG_PCD, "DOEPCTL(%.8x)=%08x DOEPTSIZ=%08x, DOEPINT=%.8x, DOEPDMA=%.8x\n", -+ (u32)&out_regs->doepctl, -+ dwc_read_reg32(&out_regs->doepctl), -+ dwc_read_reg32(&out_regs->doeptsiz), -+ dwc_read_reg32(&out_regs->doepint), -+ dwc_read_reg32(&out_regs->doepdma)); -+ -+ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), -+ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); -+ } -+} -+ -+/** -+ * This function setup a zero length transfer in Buffer DMA and -+ * Slave modes for usb requests with zero field set -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ * -+ */ -+void dwc_otg_ep_start_zl_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ -+ depctl_data_t depctl; -+ deptsiz_data_t deptsiz; -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); -+ -+ /* IN endpoint */ -+ if (ep->is_in == 1) { -+ dwc_otg_dev_in_ep_regs_t *in_regs = -+ core_if->dev_if->in_ep_regs[ep->num]; -+ -+ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl)); -+ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz)); -+ -+ deptsiz.b.xfersize = 0; -+ deptsiz.b.pktcnt = 1; -+ -+ -+ /* Write the DMA register */ -+ if (core_if->dma_enable) { -+ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { -+ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); -+ } -+ if (core_if->dma_desc_enable == 0) { -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ -+ VERIFY_PCD_DMA_ADDR(ep->dma_addr); -+ dwc_write_reg32 (&(in_regs->diepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ } -+ else { -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ /** -+ * Enable the Non-Periodic Tx FIFO empty interrupt, -+ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode, -+ * the data will be written into the fifo by the ISR. -+ */ -+ if(core_if->en_multiple_tx_fifo == 0) { -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, -+ intr_mask.d32, intr_mask.d32); -+ } -+ else { -+ /* Enable the Tx FIFO Empty Interrupt for this EP */ -+ if(ep->xfer_len > 0) { -+ uint32_t fifoemptymsk = 0; -+ fifoemptymsk = 1 << ep->num; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, -+ 0, fifoemptymsk); -+ } -+ } -+ } -+ -+ /* EP enable, IN data in FIFO */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ dwc_write_reg32(&in_regs->diepctl, depctl.d32); -+ -+ depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl); -+ depctl.b.nextep = ep->num; -+ dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32); -+ -+ } -+ else { -+ /* OUT endpoint */ -+ dwc_otg_dev_out_ep_regs_t *out_regs = -+ core_if->dev_if->out_ep_regs[ep->num]; -+ -+ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl)); -+ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz)); -+ -+ /* Zero Length Packet */ -+ deptsiz.b.xfersize = ep->maxpacket; -+ deptsiz.b.pktcnt = 1; -+ -+ if (core_if->dma_enable) { -+ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { -+ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); -+ } -+ if (!core_if->dma_desc_enable) { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ -+ -+ VERIFY_PCD_DMA_ADDR(ep->dma_addr); -+ dwc_write_reg32 (&(out_regs->doepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ } -+ else { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ } -+ -+ /* EP enable */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ -+ dwc_write_reg32(&out_regs->doepctl, depctl.d32); -+ -+ } -+} -+ -+/** -+ * This function does the setup for a data transfer for EP0 and starts -+ * the transfer. For an IN transfer, the packets will be loaded into -+ * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are -+ * unloaded from the Rx FIFO in the ISR. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP0 data. -+ */ -+void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl; -+ deptsiz0_data_t deptsiz; -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ dwc_otg_dma_desc_t* dma_desc; -+ -+ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d " -+ "xfer_buff=%p start_xfer_buff=%p, dma_addr=%.8x\n", -+ ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len, -+ ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff,ep->dma_addr); -+ -+ ep->total_len = ep->xfer_len; -+ -+ /* IN endpoint */ -+ if (ep->is_in == 1) { -+ dwc_otg_dev_in_ep_regs_t *in_regs = -+ core_if->dev_if->in_ep_regs[0]; -+ -+ gnptxsts_data_t gtxstatus; -+ -+ gtxstatus.d32 = -+ dwc_read_reg32(&core_if->core_global_regs->gnptxsts); -+ -+ if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) { -+#ifdef DEBUG -+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); -+ DWC_DEBUGPL(DBG_PCD,"DIEPCTL0=%0x\n", -+ dwc_read_reg32(&in_regs->diepctl)); -+ DWC_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n", -+ deptsiz.d32, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ DWC_PRINT("TX Queue or FIFO Full (0x%0x)\n", -+ gtxstatus.d32); -+#endif -+ return; -+ } -+ -+ -+ depctl.d32 = dwc_read_reg32(&in_regs->diepctl); -+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); -+ -+ /* Zero Length Packet? */ -+ if (ep->xfer_len == 0) { -+ deptsiz.b.xfersize = 0; -+ deptsiz.b.pktcnt = 1; -+ } -+ else { -+ /* Program the transfer size and packet count -+ * as follows: xfersize = N * maxpacket + -+ * short_packet pktcnt = N + (short_packet -+ * exist ? 1 : 0) -+ */ -+ if (ep->xfer_len > ep->maxpacket) { -+ ep->xfer_len = ep->maxpacket; -+ deptsiz.b.xfersize = ep->maxpacket; -+ } -+ else { -+ deptsiz.b.xfersize = ep->xfer_len; -+ } -+ deptsiz.b.pktcnt = 1; -+ -+ } -+ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", -+ ep->xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); -+ /* Write the DMA register */ -+ if (core_if->dma_enable) { -+ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { -+ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); -+ } -+ if(core_if->dma_desc_enable == 0) { -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ -+ VERIFY_PCD_DMA_ADDR(ep->dma_addr); -+ dwc_write_reg32 (&(in_regs->diepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ else { -+ dma_desc = core_if->dev_if->in_desc_addr; -+ -+ /** DMA Descriptor Setup */ -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 1; -+ dma_desc->status.b.ioc = 1; -+ dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1; -+ dma_desc->status.b.bytes = ep->xfer_len; -+ dma_desc->buf = ep->dma_addr; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ -+ /** DIEPDMA0 Register write */ -+ -+ VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_in_desc_addr); -+ dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr); -+ } -+ } -+ else { -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ } -+ -+ /* EP enable, IN data in FIFO */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ dwc_write_reg32(&in_regs->diepctl, depctl.d32); -+ -+ /** -+ * Enable the Non-Periodic Tx FIFO empty interrupt, the -+ * data will be written into the fifo by the ISR. -+ */ -+ if (!core_if->dma_enable) { -+ if(core_if->en_multiple_tx_fifo == 0) { -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, -+ intr_mask.d32, intr_mask.d32); -+ } -+ else { -+ /* Enable the Tx FIFO Empty Interrupt for this EP */ -+ if(ep->xfer_len > 0) { -+ uint32_t fifoemptymsk = 0; -+ fifoemptymsk |= 1 << ep->num; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, -+ 0, fifoemptymsk); -+ } -+ } -+ } -+ } -+ else { -+ /* OUT endpoint */ -+ dwc_otg_dev_out_ep_regs_t *out_regs = -+ core_if->dev_if->out_ep_regs[0]; -+ -+ depctl.d32 = dwc_read_reg32(&out_regs->doepctl); -+ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz); -+ -+ /* Program the transfer size and packet count as follows: -+ * xfersize = N * (maxpacket + 4 - (maxpacket % 4)) -+ * pktcnt = N */ -+ /* Zero Length Packet */ -+ deptsiz.b.xfersize = ep->maxpacket; -+ deptsiz.b.pktcnt = 1; -+ -+ DWC_DEBUGPL(DBG_PCDV, "len=%d xfersize=%d pktcnt=%d\n", -+ ep->xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ -+ if (core_if->dma_enable) { -+ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { -+ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); -+ } -+ if(!core_if->dma_desc_enable) { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ -+ -+ VERIFY_PCD_DMA_ADDR(ep->dma_addr); -+ dwc_write_reg32 (&(out_regs->doepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ else { -+ dma_desc = core_if->dev_if->out_desc_addr; -+ -+ /** DMA Descriptor Setup */ -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 1; -+ dma_desc->status.b.ioc = 1; -+ dma_desc->status.b.bytes = ep->maxpacket; -+ dma_desc->buf = ep->dma_addr; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ -+ /** DOEPDMA0 Register write */ -+ VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_out_desc_addr); -+ dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr); -+ } -+ } -+ else { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ } -+ -+ /* EP enable */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ dwc_write_reg32 (&(out_regs->doepctl), depctl.d32); -+ } -+} -+ -+/** -+ * This function continues control IN transfers started by -+ * dwc_otg_ep0_start_transfer, when the transfer does not fit in a -+ * single packet. NOTE: The DIEPCTL0/DOEPCTL0 registers only have one -+ * bit for the packet count. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP0 data. -+ */ -+void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl; -+ deptsiz0_data_t deptsiz; -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ dwc_otg_dma_desc_t* dma_desc; -+ -+ if (ep->is_in == 1) { -+ dwc_otg_dev_in_ep_regs_t *in_regs = -+ core_if->dev_if->in_ep_regs[0]; -+ gnptxsts_data_t tx_status = { .d32 = 0 }; -+ -+ tx_status.d32 = dwc_read_reg32(&core_if->core_global_regs->gnptxsts); -+ /** @todo Should there be check for room in the Tx -+ * Status Queue. If not remove the code above this comment. */ -+ -+ depctl.d32 = dwc_read_reg32(&in_regs->diepctl); -+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); -+ -+ /* Program the transfer size and packet count -+ * as follows: xfersize = N * maxpacket + -+ * short_packet pktcnt = N + (short_packet -+ * exist ? 1 : 0) -+ */ -+ -+ -+ if(core_if->dma_desc_enable == 0) { -+ deptsiz.b.xfersize = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket : -+ (ep->total_len - ep->xfer_count); -+ deptsiz.b.pktcnt = 1; -+ if(core_if->dma_enable == 0) { -+ ep->xfer_len += deptsiz.b.xfersize; -+ } else { -+ ep->xfer_len = deptsiz.b.xfersize; -+ } -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ } -+ else { -+ ep->xfer_len = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket : -+ (ep->total_len - ep->xfer_count); -+ -+ dma_desc = core_if->dev_if->in_desc_addr; -+ -+ /** DMA Descriptor Setup */ -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 1; -+ dma_desc->status.b.ioc = 1; -+ dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1; -+ dma_desc->status.b.bytes = ep->xfer_len; -+ dma_desc->buf = ep->dma_addr; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ -+ -+ /** DIEPDMA0 Register write */ -+ VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_in_desc_addr); -+ dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr); -+ } -+ -+ -+ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", -+ ep->xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); -+ -+ /* Write the DMA register */ -+ if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) { -+ if(core_if->dma_desc_enable == 0){ -+ -+ VERIFY_PCD_DMA_ADDR(ep->dma_addr); -+ dwc_write_reg32 (&(in_regs->diepdma), (uint32_t)ep->dma_addr); -+ } -+ } -+ -+ /* EP enable, IN data in FIFO */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ dwc_write_reg32(&in_regs->diepctl, depctl.d32); -+ -+ /** -+ * Enable the Non-Periodic Tx FIFO empty interrupt, the -+ * data will be written into the fifo by the ISR. -+ */ -+ if (!core_if->dma_enable) { -+ if(core_if->en_multiple_tx_fifo == 0) { -+ /* First clear it from GINTSTS */ -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, -+ intr_mask.d32, intr_mask.d32); -+ -+ } -+ else { -+ /* Enable the Tx FIFO Empty Interrupt for this EP */ -+ if(ep->xfer_len > 0) { -+ uint32_t fifoemptymsk = 0; -+ fifoemptymsk |= 1 << ep->num; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, -+ 0, fifoemptymsk); -+ } -+ } -+ } -+ } -+ else { -+ dwc_otg_dev_out_ep_regs_t *out_regs = -+ core_if->dev_if->out_ep_regs[0]; -+ -+ -+ depctl.d32 = dwc_read_reg32(&out_regs->doepctl); -+ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz); -+ -+ /* Program the transfer size and packet count -+ * as follows: xfersize = N * maxpacket + -+ * short_packet pktcnt = N + (short_packet -+ * exist ? 1 : 0) -+ */ -+ deptsiz.b.xfersize = ep->maxpacket; -+ deptsiz.b.pktcnt = 1; -+ -+ -+ if(core_if->dma_desc_enable == 0) { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ } -+ else { -+ dma_desc = core_if->dev_if->out_desc_addr; -+ -+ /** DMA Descriptor Setup */ -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 1; -+ dma_desc->status.b.ioc = 1; -+ dma_desc->status.b.bytes = ep->maxpacket; -+ dma_desc->buf = ep->dma_addr; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ -+ /** DOEPDMA0 Register write */ -+ VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_out_desc_addr); -+ dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr); -+ } -+ -+ -+ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", -+ ep->xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); -+ -+ /* Write the DMA register */ -+ if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) { -+ if(core_if->dma_desc_enable == 0){ -+ -+ VERIFY_PCD_DMA_ADDR(ep->dma_addr); -+ dwc_write_reg32 (&(out_regs->doepdma), (uint32_t)ep->dma_addr); -+ } -+ } -+ -+ /* EP enable, IN data in FIFO */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ dwc_write_reg32(&out_regs->doepctl, depctl.d32); -+ -+ } -+} -+ -+#ifdef DEBUG -+void dump_msg(const u8 *buf, unsigned int length) -+{ -+ unsigned int start, num, i; -+ char line[52], *p; -+ -+ if (length >= 512) -+ return; -+ start = 0; -+ while (length > 0) { -+ num = min(length, 16u); -+ p = line; -+ for (i = 0; i < num; ++i) -+ { -+ if (i == 8) -+ *p++ = ' '; -+ sprintf(p, " %02x", buf[i]); -+ p += 3; -+ } -+ *p = 0; -+ DWC_PRINT("%6x: %s\n", start, line); -+ buf += num; -+ start += num; -+ length -= num; -+ } -+} -+#else -+static inline void dump_msg(const u8 *buf, unsigned int length) -+{ -+} -+#endif -+ -+/** -+ * This function writes a packet into the Tx FIFO associated with the -+ * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For -+ * periodic EPs the periodic Tx FIFO associated with the EP is written -+ * with all packets for the next micro-frame. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to write packet for. -+ * @param dma Indicates if DMA is being used. -+ */ -+void dwc_otg_ep_write_packet(dwc_otg_core_if_t *core_if, dwc_ep_t *ep, int dma) -+{ -+ /** -+ * The buffer is padded to DWORD on a per packet basis in -+ * slave/dma mode if the MPS is not DWORD aligned. The last -+ * packet, if short, is also padded to a multiple of DWORD. -+ * -+ * ep->xfer_buff always starts DWORD aligned in memory and is a -+ * multiple of DWORD in length -+ * -+ * ep->xfer_len can be any number of bytes -+ * -+ * ep->xfer_count is a multiple of ep->maxpacket until the last -+ * packet -+ * -+ * FIFO access is DWORD */ -+ -+ uint32_t i; -+ uint32_t byte_count; -+ uint32_t dword_count; -+ uint32_t *fifo; -+ uint32_t *data_buff = (uint32_t *)ep->xfer_buff; -+ -+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, core_if, ep); -+ if (ep->xfer_count >= ep->xfer_len) { -+ DWC_WARN("%s() No data for EP%d!!!\n", __func__, ep->num); -+ return; -+ } -+ -+ /* Find the byte length of the packet either short packet or MPS */ -+ if ((ep->xfer_len - ep->xfer_count) < ep->maxpacket) { -+ byte_count = ep->xfer_len - ep->xfer_count; -+ } -+ else { -+ byte_count = ep->maxpacket; -+ } -+ -+ /* Find the DWORD length, padded by extra bytes as neccessary if MPS -+ * is not a multiple of DWORD */ -+ dword_count = (byte_count + 3) / 4; -+ -+#ifdef VERBOSE -+ dump_msg(ep->xfer_buff, byte_count); -+#endif -+ -+ /**@todo NGS Where are the Periodic Tx FIFO addresses -+ * intialized? What should this be? */ -+ -+ fifo = core_if->data_fifo[ep->num]; -+ -+ -+ DWC_DEBUGPL((DBG_PCDV|DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n", fifo, data_buff, *data_buff, byte_count); -+ -+ if (!dma) { -+ for (i=0; ixfer_count += byte_count; -+ ep->xfer_buff += byte_count; -+ ep->dma_addr += byte_count; -+} -+ -+/** -+ * Set the EP STALL. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to set the stall on. -+ */ -+void dwc_otg_ep_set_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl; -+ volatile uint32_t *depctl_addr; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s ep%d-%s1\n", __func__, ep->num, -+ (ep->is_in?"IN":"OUT")); -+ -+ DWC_PRINT("%s ep%d-%s\n", __func__, ep->num, -+ (ep->is_in?"in":"out")); -+ -+ if (ep->is_in == 1) { -+ depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl); -+ depctl.d32 = dwc_read_reg32(depctl_addr); -+ -+ /* set the disable and stall bits */ -+#if 0 -+//epdis is set here but not cleared at latter dwc_otg_ep_clear_stall, -+//which cause the testusb item 13 failed(Host:pc, device: otg device) -+ if (depctl.b.epena) { -+ depctl.b.epdis = 1; -+ } -+#endif -+ depctl.b.stall = 1; -+ dwc_write_reg32(depctl_addr, depctl.d32); -+ } -+ else { -+ depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl); -+ depctl.d32 = dwc_read_reg32(depctl_addr); -+ -+ /* set the stall bit */ -+ depctl.b.stall = 1; -+ dwc_write_reg32(depctl_addr, depctl.d32); -+ } -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s: DEPCTL(%.8x)=%0x\n",__func__,(u32)depctl_addr,dwc_read_reg32(depctl_addr)); -+ -+ return; -+} -+ -+/** -+ * Clear the EP STALL. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to clear stall from. -+ */ -+void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl; -+ volatile uint32_t *depctl_addr; -+ -+ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num, -+ (ep->is_in?"IN":"OUT")); -+ -+ if (ep->is_in == 1) { -+ depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl); -+ } -+ else { -+ depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl); -+ } -+ -+ depctl.d32 = dwc_read_reg32(depctl_addr); -+ -+ /* clear the stall bits */ -+ depctl.b.stall = 0; -+ -+ /* -+ * USB Spec 9.4.5: For endpoints using data toggle, regardless -+ * of whether an endpoint has the Halt feature set, a -+ * ClearFeature(ENDPOINT_HALT) request always results in the -+ * data toggle being reinitialized to DATA0. -+ */ -+ if (ep->type == DWC_OTG_EP_TYPE_INTR || -+ ep->type == DWC_OTG_EP_TYPE_BULK) { -+ depctl.b.setd0pid = 1; /* DATA0 */ -+ } -+ -+ dwc_write_reg32(depctl_addr, depctl.d32); -+ DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr)); -+ return; -+} -+ -+/** -+ * This function reads a packet from the Rx FIFO into the destination -+ * buffer. To read SETUP data use dwc_otg_read_setup_packet. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param dest Destination buffer for the packet. -+ * @param bytes Number of bytes to copy to the destination. -+ */ -+void dwc_otg_read_packet(dwc_otg_core_if_t *core_if, -+ uint8_t *dest, -+ uint16_t bytes) -+{ -+ int i; -+ int word_count = (bytes + 3) / 4; -+ -+ volatile uint32_t *fifo = core_if->data_fifo[0]; -+ uint32_t *data_buff = (uint32_t *)dest; -+ -+ /** -+ * @todo Account for the case where _dest is not dword aligned. This -+ * requires reading data from the FIFO into a uint32_t temp buffer, -+ * then moving it into the data buffer. -+ */ -+ -+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p,%d)\n", __func__, -+ core_if, dest, bytes); -+ -+ for (i=0; idev_if->dev_global_regs->dcfg; -+ DWC_PRINT("DCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->dctl; -+ DWC_PRINT("DCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->dsts; -+ DWC_PRINT("DSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->diepmsk; -+ DWC_PRINT("DIEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->doepmsk; -+ DWC_PRINT("DOEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->daint; -+ DWC_PRINT("DAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->daintmsk; -+ DWC_PRINT("DAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->dtknqr1; -+ DWC_PRINT("DTKNQR1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ if (core_if->hwcfg2.b.dev_token_q_depth > 6) { -+ addr=&core_if->dev_if->dev_global_regs->dtknqr2; -+ DWC_PRINT("DTKNQR2 @0x%08X : 0x%08X\n", -+ (uint32_t)addr,dwc_read_reg32(addr)); -+ } -+ -+ addr=&core_if->dev_if->dev_global_regs->dvbusdis; -+ DWC_PRINT("DVBUSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ -+ addr=&core_if->dev_if->dev_global_regs->dvbuspulse; -+ DWC_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n", -+ (uint32_t)addr,dwc_read_reg32(addr)); -+ -+ if (core_if->hwcfg2.b.dev_token_q_depth > 14) { -+ addr=&core_if->dev_if->dev_global_regs->dtknqr3_dthrctl; -+ DWC_PRINT("DTKNQR3_DTHRCTL @0x%08X : 0x%08X\n", -+ (uint32_t)addr, dwc_read_reg32(addr)); -+ } -+/* -+ if (core_if->hwcfg2.b.dev_token_q_depth > 22) { -+ addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk; -+ DWC_PRINT("DTKNQR4 @0x%08X : 0x%08X\n", -+ (uint32_t)addr, dwc_read_reg32(addr)); -+ } -+*/ -+ addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk; -+ DWC_PRINT("FIFOEMPMSK @0x%08X : 0x%08X\n", (uint32_t)addr, dwc_read_reg32(addr)); -+ -+ addr=&core_if->dev_if->dev_global_regs->deachint; -+ DWC_PRINT("DEACHINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->deachintmsk; -+ DWC_PRINT("DEACHINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ -+ for (i=0; i<= core_if->dev_if->num_in_eps; i++) { -+ addr=&core_if->dev_if->dev_global_regs->diepeachintmsk[i]; -+ DWC_PRINT("DIEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr)); -+ } -+ -+ -+ for (i=0; i<= core_if->dev_if->num_out_eps; i++) { -+ addr=&core_if->dev_if->dev_global_regs->doepeachintmsk[i]; -+ DWC_PRINT("DOEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr)); -+ } -+ -+ for (i=0; i<= core_if->dev_if->num_in_eps; i++) { -+ DWC_PRINT("Device IN EP %d Registers\n", i); -+ addr=&core_if->dev_if->in_ep_regs[i]->diepctl; -+ DWC_PRINT("DIEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->in_ep_regs[i]->diepint; -+ DWC_PRINT("DIEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->in_ep_regs[i]->dieptsiz; -+ DWC_PRINT("DIETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->in_ep_regs[i]->diepdma; -+ DWC_PRINT("DIEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->in_ep_regs[i]->dtxfsts; -+ DWC_PRINT("DTXFSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ //reading depdmab in non desc dma mode would halt the ahb bus... -+ if(core_if->dma_desc_enable){ -+ addr=&core_if->dev_if->in_ep_regs[i]->diepdmab; -+ DWC_PRINT("DIEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ } -+ } -+ -+ -+ for (i=0; i<= core_if->dev_if->num_out_eps; i++) { -+ DWC_PRINT("Device OUT EP %d Registers\n", i); -+ addr=&core_if->dev_if->out_ep_regs[i]->doepctl; -+ DWC_PRINT("DOEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->out_ep_regs[i]->doepfn; -+ DWC_PRINT("DOEPFN @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->out_ep_regs[i]->doepint; -+ DWC_PRINT("DOEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->out_ep_regs[i]->doeptsiz; -+ DWC_PRINT("DOETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->out_ep_regs[i]->doepdma; -+ DWC_PRINT("DOEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ -+ //reading depdmab in non desc dma mode would halt the ahb bus... -+ if(core_if->dma_desc_enable){ -+ addr=&core_if->dev_if->out_ep_regs[i]->doepdmab; -+ DWC_PRINT("DOEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ } -+ -+ } -+ -+ -+ -+ return; -+} -+ -+/** -+ * This functions reads the SPRAM and prints its content -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+void dwc_otg_dump_spram(dwc_otg_core_if_t *core_if) -+{ -+ volatile uint8_t *addr, *start_addr, *end_addr; -+ -+ DWC_PRINT("SPRAM Data:\n"); -+ start_addr = (void*)core_if->core_global_regs; -+ DWC_PRINT("Base Address: 0x%8X\n", (uint32_t)start_addr); -+ start_addr += 0x00028000; -+ end_addr=(void*)core_if->core_global_regs; -+ end_addr += 0x000280e0; -+ -+ for(addr = start_addr; addr < end_addr; addr+=16) -+ { -+ DWC_PRINT("0x%8X:\t%2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X\n", (uint32_t)addr, -+ addr[0], -+ addr[1], -+ addr[2], -+ addr[3], -+ addr[4], -+ addr[5], -+ addr[6], -+ addr[7], -+ addr[8], -+ addr[9], -+ addr[10], -+ addr[11], -+ addr[12], -+ addr[13], -+ addr[14], -+ addr[15] -+ ); -+ } -+ -+ return; -+} -+/** -+ * This function reads the host registers and prints them -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+void dwc_otg_dump_host_registers(dwc_otg_core_if_t *core_if) -+{ -+ int i; -+ volatile uint32_t *addr; -+ -+ DWC_PRINT("Host Global Registers\n"); -+ addr=&core_if->host_if->host_global_regs->hcfg; -+ DWC_PRINT("HCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->host_global_regs->hfir; -+ DWC_PRINT("HFIR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->host_global_regs->hfnum; -+ DWC_PRINT("HFNUM @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->host_global_regs->hptxsts; -+ DWC_PRINT("HPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->host_global_regs->haint; -+ DWC_PRINT("HAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->host_global_regs->haintmsk; -+ DWC_PRINT("HAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=core_if->host_if->hprt0; -+ DWC_PRINT("HPRT0 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ -+ for (i=0; icore_params->host_channels; i++) -+ { -+ DWC_PRINT("Host Channel %d Specific Registers\n", i); -+ addr=&core_if->host_if->hc_regs[i]->hcchar; -+ DWC_PRINT("HCCHAR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->hc_regs[i]->hcsplt; -+ DWC_PRINT("HCSPLT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->hc_regs[i]->hcint; -+ DWC_PRINT("HCINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->hc_regs[i]->hcintmsk; -+ DWC_PRINT("HCINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->hc_regs[i]->hctsiz; -+ DWC_PRINT("HCTSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->hc_regs[i]->hcdma; -+ DWC_PRINT("HCDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ } -+ return; -+} -+ -+/** -+ * This function reads the core global registers and prints them -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+void dwc_otg_dump_global_registers(dwc_otg_core_if_t *core_if) -+{ -+ int i,size; -+ char* str; -+ volatile uint32_t *addr; -+ -+ DWC_PRINT("Core Global Registers\n"); -+ addr=&core_if->core_global_regs->gotgctl; -+ DWC_PRINT("GOTGCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gotgint; -+ DWC_PRINT("GOTGINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gahbcfg; -+ DWC_PRINT("GAHBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gusbcfg; -+ DWC_PRINT("GUSBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->grstctl; -+ DWC_PRINT("GRSTCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gintsts; -+ DWC_PRINT("GINTSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gintmsk; -+ DWC_PRINT("GINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->grxstsr; -+ DWC_PRINT("GRXSTSR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ //addr=&core_if->core_global_regs->grxstsp; -+ //DWC_PRINT("GRXSTSP @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->grxfsiz; -+ DWC_PRINT("GRXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gnptxfsiz; -+ DWC_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gnptxsts; -+ DWC_PRINT("GNPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gi2cctl; -+ DWC_PRINT("GI2CCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gpvndctl; -+ DWC_PRINT("GPVNDCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->ggpio; -+ DWC_PRINT("GGPIO @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->guid; -+ DWC_PRINT("GUID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gsnpsid; -+ DWC_PRINT("GSNPSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->ghwcfg1; -+ DWC_PRINT("GHWCFG1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->ghwcfg2; -+ DWC_PRINT("GHWCFG2 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->ghwcfg3; -+ DWC_PRINT("GHWCFG3 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->ghwcfg4; -+ DWC_PRINT("GHWCFG4 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->hptxfsiz; -+ DWC_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ -+ size=(core_if->hwcfg4.b.ded_fifo_en)? -+ core_if->hwcfg4.b.num_in_eps:core_if->hwcfg4.b.num_dev_perio_in_ep; -+ str=(core_if->hwcfg4.b.ded_fifo_en)?"DIEPTXF":"DPTXFSIZ"; -+ for (i=0; icore_global_regs->dptxfsiz_dieptxf[i]; -+ DWC_PRINT("%s[%d] @0x%08X : 0x%08X\n",str,i,(uint32_t)addr,dwc_read_reg32(addr)); -+ } -+} -+ -+/** -+ * Flush a Tx FIFO. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param num Tx FIFO to flush. -+ */ -+void dwc_otg_flush_tx_fifo(dwc_otg_core_if_t *core_if, -+ const int num) -+{ -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+ volatile grstctl_t greset = { .d32 = 0}; -+ int count = 0; -+ -+ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "Flush Tx FIFO %d\n", num); -+ -+ greset.b.txfflsh = 1; -+ greset.b.txfnum = num; -+ dwc_write_reg32(&global_regs->grstctl, greset.d32); -+ -+ do { -+ greset.d32 = dwc_read_reg32(&global_regs->grstctl); -+ if (++count > 10000) { -+ DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", -+ __func__, greset.d32, -+ dwc_read_reg32(&global_regs->gnptxsts)); -+ break; -+ } -+ } -+ while (greset.b.txfflsh == 1); -+ -+ /* Wait for 3 PHY Clocks*/ -+ UDELAY(1); -+} -+ -+/** -+ * Flush Rx FIFO. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+void dwc_otg_flush_rx_fifo(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+ volatile grstctl_t greset = { .d32 = 0}; -+ int count = 0; -+ -+ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "%s\n", __func__); -+ /* -+ * -+ */ -+ greset.b.rxfflsh = 1; -+ dwc_write_reg32(&global_regs->grstctl, greset.d32); -+ -+ do { -+ greset.d32 = dwc_read_reg32(&global_regs->grstctl); -+ if (++count > 10000) { -+ DWC_WARN("%s() HANG! GRSTCTL=%0x\n", __func__, -+ greset.d32); -+ break; -+ } -+ } -+ while (greset.b.rxfflsh == 1); -+ -+ /* Wait for 3 PHY Clocks*/ -+ UDELAY(1); -+} -+ -+/** -+ * Do core a soft reset of the core. Be careful with this because it -+ * resets all the internal state machines of the core. -+ */ -+void dwc_otg_core_reset(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+ volatile grstctl_t greset = { .d32 = 0}; -+ int count = 0; -+ -+ DWC_DEBUGPL(DBG_CILV, "%s\n", __func__); -+ /* Wait for AHB master IDLE state. */ -+ do { -+ UDELAY(10); -+ greset.d32 = dwc_read_reg32(&global_regs->grstctl); -+ if (++count > 100000) { -+ DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__, -+ greset.d32); -+ return; -+ } -+ } -+ while (greset.b.ahbidle == 0); -+ -+ /* Core Soft Reset */ -+ count = 0; -+ greset.b.csftrst = 1; -+ dwc_write_reg32(&global_regs->grstctl, greset.d32); -+ do { -+ greset.d32 = dwc_read_reg32(&global_regs->grstctl); -+ if (++count > 10000) { -+ DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__, -+ greset.d32); -+ break; -+ } -+ } -+ while (greset.b.csftrst == 1); -+ -+ /* Wait for 3 PHY Clocks*/ -+ MDELAY(100); -+ -+ DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts)); -+ DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts)); -+ DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts)); -+ -+} -+ -+ -+ -+/** -+ * Register HCD callbacks. The callbacks are used to start and stop -+ * the HCD for interrupt processing. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param cb the HCD callback structure. -+ * @param p pointer to be passed to callback function (usb_hcd*). -+ */ -+void dwc_otg_cil_register_hcd_callbacks(dwc_otg_core_if_t *core_if, -+ dwc_otg_cil_callbacks_t *cb, -+ void *p) -+{ -+ core_if->hcd_cb = cb; -+ cb->p = p; -+} -+ -+/** -+ * Register PCD callbacks. The callbacks are used to start and stop -+ * the PCD for interrupt processing. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param cb the PCD callback structure. -+ * @param p pointer to be passed to callback function (pcd*). -+ */ -+void dwc_otg_cil_register_pcd_callbacks(dwc_otg_core_if_t *core_if, -+ dwc_otg_cil_callbacks_t *cb, -+ void *p) -+{ -+ core_if->pcd_cb = cb; -+ cb->p = p; -+} -+ -+#ifdef DWC_EN_ISOC -+ -+/** -+ * This function writes isoc data per 1 (micro)frame into tx fifo -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ * -+ */ -+void write_isoc_frame_data(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ dwc_otg_dev_in_ep_regs_t *ep_regs; -+ dtxfsts_data_t txstatus = {.d32 = 0}; -+ uint32_t len = 0; -+ uint32_t dwords; -+ -+ ep->xfer_len = ep->data_per_frame; -+ ep->xfer_count = 0; -+ -+ ep_regs = core_if->dev_if->in_ep_regs[ep->num]; -+ -+ len = ep->xfer_len - ep->xfer_count; -+ -+ if (len > ep->maxpacket) { -+ len = ep->maxpacket; -+ } -+ -+ dwords = (len + 3)/4; -+ -+ /* While there is space in the queue and space in the FIFO and -+ * More data to tranfer, Write packets to the Tx FIFO */ -+ txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts); -+ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",ep->num,txstatus.d32); -+ -+ while (txstatus.b.txfspcavail > dwords && -+ ep->xfer_count < ep->xfer_len && -+ ep->xfer_len != 0) { -+ /* Write the FIFO */ -+ dwc_otg_ep_write_packet(core_if, ep, 0); -+ -+ len = ep->xfer_len - ep->xfer_count; -+ if (len > ep->maxpacket) { -+ len = ep->maxpacket; -+ } -+ -+ dwords = (len + 3)/4; -+ txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts); -+ DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", ep->num, txstatus.d32); -+ } -+} -+ -+ -+/** -+ * This function initializes a descriptor chain for Isochronous transfer -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ * -+ */ -+void dwc_otg_iso_ep_start_frm_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ deptsiz_data_t deptsiz = { .d32 = 0 }; -+ depctl_data_t depctl = { .d32 = 0 }; -+ dsts_data_t dsts = { .d32 = 0 }; -+ volatile uint32_t *addr; -+ -+ if(ep->is_in) { -+ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; -+ } else { -+ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; -+ } -+ -+ ep->xfer_len = ep->data_per_frame; -+ ep->xfer_count = 0; -+ ep->xfer_buff = ep->cur_pkt_addr; -+ ep->dma_addr = ep->cur_pkt_dma_addr; -+ -+ if(ep->is_in) { -+ /* Program the transfer size and packet count -+ * as follows: xfersize = N * maxpacket + -+ * short_packet pktcnt = N + (short_packet -+ * exist ? 1 : 0) -+ */ -+ deptsiz.b.xfersize = ep->xfer_len; -+ deptsiz.b.pktcnt = -+ (ep->xfer_len - 1 + ep->maxpacket) / -+ ep->maxpacket; -+ deptsiz.b.mc = deptsiz.b.pktcnt; -+ dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32); -+ -+ /* Write the DMA register */ -+ if (core_if->dma_enable) { -+ dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr); -+ } -+ } else { -+ deptsiz.b.pktcnt = -+ (ep->xfer_len + (ep->maxpacket - 1)) / -+ ep->maxpacket; -+ deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket; -+ -+ dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32); -+ -+ if (core_if->dma_enable) { -+ dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ } -+ -+ -+ /** Enable endpoint, clear nak */ -+ -+ depctl.d32 = 0; -+ if(ep->bInterval == 1) { -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ ep->next_frame = dsts.b.soffn + ep->bInterval; -+ -+ if(ep->next_frame & 0x1) { -+ depctl.b.setd1pid = 1; -+ } else { -+ depctl.b.setd0pid = 1; -+ } -+ } else { -+ ep->next_frame += ep->bInterval; -+ -+ if(ep->next_frame & 0x1) { -+ depctl.b.setd1pid = 1; -+ } else { -+ depctl.b.setd0pid = 1; -+ } -+ } -+ depctl.b.epena = 1; -+ depctl.b.cnak = 1; -+ -+ dwc_modify_reg32(addr, 0, depctl.d32); -+ depctl.d32 = dwc_read_reg32(addr); -+ -+ if(ep->is_in && core_if->dma_enable == 0) { -+ write_isoc_frame_data(core_if, ep); -+ } -+ -+} -+ -+#endif //DWC_EN_ISOC ---- /dev/null -+++ b/drivers/usb/dwc/otg_cil.h -@@ -0,0 +1,1106 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.h $ -+ * $Revision: #91 $ -+ * $Date: 2008/09/19 $ -+ * $Change: 1099526 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+#if !defined(__DWC_CIL_H__) -+#define __DWC_CIL_H__ -+ -+#include -+#include -+#include -+//#include -+ -+#include "otg_plat.h" -+#include "otg_regs.h" -+#ifdef DEBUG -+#include "linux/timer.h" -+#endif -+ -+/** -+ * @file -+ * This file contains the interface to the Core Interface Layer. -+ */ -+ -+ -+/** Macros defined for DWC OTG HW Release verison */ -+#define OTG_CORE_REV_2_00 0x4F542000 -+#define OTG_CORE_REV_2_60a 0x4F54260A -+#define OTG_CORE_REV_2_71a 0x4F54271A -+#define OTG_CORE_REV_2_72a 0x4F54272A -+ -+/** -+*/ -+typedef struct iso_pkt_info -+{ -+ uint32_t offset; -+ uint32_t length; -+ int32_t status; -+} iso_pkt_info_t; -+/** -+ * The dwc_ep structure represents the state of a single -+ * endpoint when acting in device mode. It contains the data items -+ * needed for an endpoint to be activated and transfer packets. -+ */ -+typedef struct dwc_ep -+{ -+ /** EP number used for register address lookup */ -+ uint8_t num; -+ /** EP direction 0 = OUT */ -+ unsigned is_in : 1; -+ /** EP active. */ -+ unsigned active : 1; -+ -+ /** Periodic Tx FIFO # for IN EPs For INTR EP set to 0 to use non-periodic Tx FIFO -+ If dedicated Tx FIFOs are enabled for all IN Eps - Tx FIFO # FOR IN EPs*/ -+ unsigned tx_fifo_num : 4; -+ /** EP type: 0 - Control, 1 - ISOC, 2 - BULK, 3 - INTR */ -+ unsigned type : 2; -+#define DWC_OTG_EP_TYPE_CONTROL 0 -+#define DWC_OTG_EP_TYPE_ISOC 1 -+#define DWC_OTG_EP_TYPE_BULK 2 -+#define DWC_OTG_EP_TYPE_INTR 3 -+ -+ /** DATA start PID for INTR and BULK EP */ -+ unsigned data_pid_start : 1; -+ /** Frame (even/odd) for ISOC EP */ -+ unsigned even_odd_frame : 1; -+ /** Max Packet bytes */ -+ unsigned maxpacket : 11; -+ -+ /** Max Transfer size */ -+ unsigned maxxfer : 16; -+ -+ /** @name Transfer state */ -+ /** @{ */ -+ -+ /** -+ * Pointer to the beginning of the transfer buffer -- do not modify -+ * during transfer. -+ */ -+ -+ uint32_t dma_addr; -+ -+ uint32_t dma_desc_addr; -+ dwc_otg_dma_desc_t* desc_addr; -+ -+ -+ uint8_t *start_xfer_buff; -+ /** pointer to the transfer buffer */ -+ uint8_t *xfer_buff; -+ /** Number of bytes to transfer */ -+ unsigned xfer_len : 19; -+ /** Number of bytes transferred. */ -+ unsigned xfer_count : 19; -+ /** Sent ZLP */ -+ unsigned sent_zlp : 1; -+ /** Total len for control transfer */ -+ unsigned total_len : 19; -+ -+ /** stall clear flag */ -+ unsigned stall_clear_flag : 1; -+ -+ /** Allocated DMA Desc count */ -+ uint32_t desc_cnt; -+ -+ uint32_t aligned_dma_addr; -+ uint32_t aligned_buf_size; -+ uint8_t *aligned_buf; -+ -+ -+#ifdef DWC_EN_ISOC -+ /** -+ * Variables specific for ISOC EPs -+ * -+ */ -+ /** DMA addresses of ISOC buffers */ -+ uint32_t dma_addr0; -+ uint32_t dma_addr1; -+ -+ uint32_t iso_dma_desc_addr; -+ dwc_otg_dma_desc_t* iso_desc_addr; -+ -+ /** pointer to the transfer buffers */ -+ uint8_t *xfer_buff0; -+ uint8_t *xfer_buff1; -+ -+ /** number of ISOC Buffer is processing */ -+ uint32_t proc_buf_num; -+ /** Interval of ISOC Buffer processing */ -+ uint32_t buf_proc_intrvl; -+ /** Data size for regular frame */ -+ uint32_t data_per_frame; -+ -+ /* todo - pattern data support is to be implemented in the future */ -+ /** Data size for pattern frame */ -+ uint32_t data_pattern_frame; -+ /** Frame number of pattern data */ -+ uint32_t sync_frame; -+ -+ /** bInterval */ -+ uint32_t bInterval; -+ /** ISO Packet number per frame */ -+ uint32_t pkt_per_frm; -+ /** Next frame num for which will be setup DMA Desc */ -+ uint32_t next_frame; -+ /** Number of packets per buffer processing */ -+ uint32_t pkt_cnt; -+ /** Info for all isoc packets */ -+ iso_pkt_info_t *pkt_info; -+ /** current pkt number */ -+ uint32_t cur_pkt; -+ /** current pkt number */ -+ uint8_t *cur_pkt_addr; -+ /** current pkt number */ -+ uint32_t cur_pkt_dma_addr; -+#endif //DWC_EN_ISOC -+/** @} */ -+} dwc_ep_t; -+ -+/* -+ * Reasons for halting a host channel. -+ */ -+typedef enum dwc_otg_halt_status -+{ -+ DWC_OTG_HC_XFER_NO_HALT_STATUS, -+ DWC_OTG_HC_XFER_COMPLETE, -+ DWC_OTG_HC_XFER_URB_COMPLETE, -+ DWC_OTG_HC_XFER_ACK, -+ DWC_OTG_HC_XFER_NAK, -+ DWC_OTG_HC_XFER_NYET, -+ DWC_OTG_HC_XFER_STALL, -+ DWC_OTG_HC_XFER_XACT_ERR, -+ DWC_OTG_HC_XFER_FRAME_OVERRUN, -+ DWC_OTG_HC_XFER_BABBLE_ERR, -+ DWC_OTG_HC_XFER_DATA_TOGGLE_ERR, -+ DWC_OTG_HC_XFER_AHB_ERR, -+ DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE, -+ DWC_OTG_HC_XFER_URB_DEQUEUE -+} dwc_otg_halt_status_e; -+ -+/** -+ * Host channel descriptor. This structure represents the state of a single -+ * host channel when acting in host mode. It contains the data items needed to -+ * transfer packets to an endpoint via a host channel. -+ */ -+typedef struct dwc_hc -+{ -+ /** Host channel number used for register address lookup */ -+ uint8_t hc_num; -+ -+ /** Device to access */ -+ unsigned dev_addr : 7; -+ -+ /** EP to access */ -+ unsigned ep_num : 4; -+ -+ /** EP direction. 0: OUT, 1: IN */ -+ unsigned ep_is_in : 1; -+ -+ /** -+ * EP speed. -+ * One of the following values: -+ * - DWC_OTG_EP_SPEED_LOW -+ * - DWC_OTG_EP_SPEED_FULL -+ * - DWC_OTG_EP_SPEED_HIGH -+ */ -+ unsigned speed : 2; -+#define DWC_OTG_EP_SPEED_LOW 0 -+#define DWC_OTG_EP_SPEED_FULL 1 -+#define DWC_OTG_EP_SPEED_HIGH 2 -+ -+ /** -+ * Endpoint type. -+ * One of the following values: -+ * - DWC_OTG_EP_TYPE_CONTROL: 0 -+ * - DWC_OTG_EP_TYPE_ISOC: 1 -+ * - DWC_OTG_EP_TYPE_BULK: 2 -+ * - DWC_OTG_EP_TYPE_INTR: 3 -+ */ -+ unsigned ep_type : 2; -+ -+ /** Max packet size in bytes */ -+ unsigned max_packet : 11; -+ -+ /** -+ * PID for initial transaction. -+ * 0: DATA0,
-+ * 1: DATA2,
-+ * 2: DATA1,
-+ * 3: MDATA (non-Control EP), -+ * SETUP (Control EP) -+ */ -+ unsigned data_pid_start : 2; -+#define DWC_OTG_HC_PID_DATA0 0 -+#define DWC_OTG_HC_PID_DATA2 1 -+#define DWC_OTG_HC_PID_DATA1 2 -+#define DWC_OTG_HC_PID_MDATA 3 -+#define DWC_OTG_HC_PID_SETUP 3 -+ -+ /** Number of periodic transactions per (micro)frame */ -+ unsigned multi_count: 2; -+ -+ /** @name Transfer State */ -+ /** @{ */ -+ -+ /** Pointer to the current transfer buffer position. */ -+ uint8_t *xfer_buff; -+ /** Total number of bytes to transfer. */ -+ uint32_t xfer_len; -+ /** Number of bytes transferred so far. */ -+ uint32_t xfer_count; -+ /** Packet count at start of transfer.*/ -+ uint16_t start_pkt_count; -+ -+ /** -+ * Flag to indicate whether the transfer has been started. Set to 1 if -+ * it has been started, 0 otherwise. -+ */ -+ uint8_t xfer_started; -+ -+ /** -+ * Set to 1 to indicate that a PING request should be issued on this -+ * channel. If 0, process normally. -+ */ -+ uint8_t do_ping; -+ -+ /** -+ * Set to 1 to indicate that the error count for this transaction is -+ * non-zero. Set to 0 if the error count is 0. -+ */ -+ uint8_t error_state; -+ -+ /** -+ * Set to 1 to indicate that this channel should be halted the next -+ * time a request is queued for the channel. This is necessary in -+ * slave mode if no request queue space is available when an attempt -+ * is made to halt the channel. -+ */ -+ uint8_t halt_on_queue; -+ -+ /** -+ * Set to 1 if the host channel has been halted, but the core is not -+ * finished flushing queued requests. Otherwise 0. -+ */ -+ uint8_t halt_pending; -+ -+ /** -+ * Reason for halting the host channel. -+ */ -+ dwc_otg_halt_status_e halt_status; -+ -+ /* -+ * Split settings for the host channel -+ */ -+ uint8_t do_split; /**< Enable split for the channel */ -+ uint8_t complete_split; /**< Enable complete split */ -+ uint8_t hub_addr; /**< Address of high speed hub */ -+ -+ uint8_t port_addr; /**< Port of the low/full speed device */ -+ /** Split transaction position -+ * One of the following values: -+ * - DWC_HCSPLIT_XACTPOS_MID -+ * - DWC_HCSPLIT_XACTPOS_BEGIN -+ * - DWC_HCSPLIT_XACTPOS_END -+ * - DWC_HCSPLIT_XACTPOS_ALL */ -+ uint8_t xact_pos; -+ -+ /** Set when the host channel does a short read. */ -+ uint8_t short_read; -+ -+ /** -+ * Number of requests issued for this channel since it was assigned to -+ * the current transfer (not counting PINGs). -+ */ -+ uint8_t requests; -+ -+ /** -+ * Queue Head for the transfer being processed by this channel. -+ */ -+ struct dwc_otg_qh *qh; -+ -+ /** @} */ -+ -+ /** Entry in list of host channels. */ -+ struct list_head hc_list_entry; -+} dwc_hc_t; -+ -+/** -+ * The following parameters may be specified when starting the module. These -+ * parameters define how the DWC_otg controller should be configured. -+ * Parameter values are passed to the CIL initialization function -+ * dwc_otg_cil_init. -+ */ -+typedef struct dwc_otg_core_params -+{ -+ int32_t opt; -+#define dwc_param_opt_default 1 -+ -+ /** -+ * Specifies the OTG capabilities. The driver will automatically -+ * detect the value for this parameter if none is specified. -+ * 0 - HNP and SRP capable (default) -+ * 1 - SRP Only capable -+ * 2 - No HNP/SRP capable -+ */ -+ int32_t otg_cap; -+#define DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0 -+#define DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1 -+#define DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2 -+//#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE -+#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE -+ -+ /** -+ * Specifies whether to use slave or DMA mode for accessing the data -+ * FIFOs. The driver will automatically detect the value for this -+ * parameter if none is specified. -+ * 0 - Slave -+ * 1 - DMA (default, if available) -+ */ -+ int32_t dma_enable; -+#define dwc_param_dma_enable_default 1 -+ -+ /** -+ * When DMA mode is enabled specifies whether to use address DMA or DMA Descritor mode for accessing the data -+ * FIFOs in device mode. The driver will automatically detect the value for this -+ * parameter if none is specified. -+ * 0 - address DMA -+ * 1 - DMA Descriptor(default, if available) -+ */ -+ int32_t dma_desc_enable; -+#define dwc_param_dma_desc_enable_default 0 -+ /** The DMA Burst size (applicable only for External DMA -+ * Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32) -+ */ -+ int32_t dma_burst_size; /* Translate this to GAHBCFG values */ -+//#define dwc_param_dma_burst_size_default 32 -+#define dwc_param_dma_burst_size_default 1 -+ -+ /** -+ * Specifies the maximum speed of operation in host and device mode. -+ * The actual speed depends on the speed of the attached device and -+ * the value of phy_type. The actual speed depends on the speed of the -+ * attached device. -+ * 0 - High Speed (default) -+ * 1 - Full Speed -+ */ -+ int32_t speed; -+#define dwc_param_speed_default 0 -+#define DWC_SPEED_PARAM_HIGH 0 -+#define DWC_SPEED_PARAM_FULL 1 -+ -+ /** Specifies whether low power mode is supported when attached -+ * to a Full Speed or Low Speed device in host mode. -+ * 0 - Don't support low power mode (default) -+ * 1 - Support low power mode -+ */ -+ int32_t host_support_fs_ls_low_power; -+#define dwc_param_host_support_fs_ls_low_power_default 0 -+ -+ /** Specifies the PHY clock rate in low power mode when connected to a -+ * Low Speed device in host mode. This parameter is applicable only if -+ * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS -+ * then defaults to 6 MHZ otherwise 48 MHZ. -+ * -+ * 0 - 48 MHz -+ * 1 - 6 MHz -+ */ -+ int32_t host_ls_low_power_phy_clk; -+#define dwc_param_host_ls_low_power_phy_clk_default 0 -+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0 -+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1 -+ -+ /** -+ * 0 - Use cC FIFO size parameters -+ * 1 - Allow dynamic FIFO sizing (default) -+ */ -+ int32_t enable_dynamic_fifo; -+#define dwc_param_enable_dynamic_fifo_default 1 -+ -+ /** Total number of 4-byte words in the data FIFO memory. This -+ * memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic -+ * Tx FIFOs. -+ * 32 to 32768 (default 8192) -+ * Note: The total FIFO memory depth in the FPGA configuration is 8192. -+ */ -+ int32_t data_fifo_size; -+#define dwc_param_data_fifo_size_default 8192 -+ -+ /** Number of 4-byte words in the Rx FIFO in device mode when dynamic -+ * FIFO sizing is enabled. -+ * 16 to 32768 (default 1064) -+ */ -+ int32_t dev_rx_fifo_size; -+//#define dwc_param_dev_rx_fifo_size_default 1064 -+#define dwc_param_dev_rx_fifo_size_default 0x100 -+ -+ /** -+ * Specifies whether dedicated transmit FIFOs are -+ * enabled for non periodic IN endpoints in device mode -+ * 0 - No -+ * 1 - Yes -+ */ -+ int32_t en_multiple_tx_fifo; -+#define dwc_param_en_multiple_tx_fifo_default 1 -+ -+ /** Number of 4-byte words in each of the Tx FIFOs in device -+ * mode when dynamic FIFO sizing is enabled. -+ * 4 to 768 (default 256) -+ */ -+ uint32_t dev_tx_fifo_size[MAX_TX_FIFOS]; -+//#define dwc_param_dev_tx_fifo_size_default 256 -+#define dwc_param_dev_tx_fifo_size_default 0x80 -+ -+ /** Number of 4-byte words in the non-periodic Tx FIFO in device mode -+ * when dynamic FIFO sizing is enabled. -+ * 16 to 32768 (default 1024) -+ */ -+ int32_t dev_nperio_tx_fifo_size; -+//#define dwc_param_dev_nperio_tx_fifo_size_default 1024 -+#define dwc_param_dev_nperio_tx_fifo_size_default 0x80 -+ -+ /** Number of 4-byte words in each of the periodic Tx FIFOs in device -+ * mode when dynamic FIFO sizing is enabled. -+ * 4 to 768 (default 256) -+ */ -+ uint32_t dev_perio_tx_fifo_size[MAX_PERIO_FIFOS]; -+//#define dwc_param_dev_perio_tx_fifo_size_default 256 -+#define dwc_param_dev_perio_tx_fifo_size_default 0x80 -+ -+ /** Number of 4-byte words in the Rx FIFO in host mode when dynamic -+ * FIFO sizing is enabled. -+ * 16 to 32768 (default 1024) -+ */ -+ int32_t host_rx_fifo_size; -+//#define dwc_param_host_rx_fifo_size_default 1024 -+#define dwc_param_host_rx_fifo_size_default 0x292 -+ -+ /** Number of 4-byte words in the non-periodic Tx FIFO in host mode -+ * when Dynamic FIFO sizing is enabled in the core. -+ * 16 to 32768 (default 1024) -+ */ -+ int32_t host_nperio_tx_fifo_size; -+//#define dwc_param_host_nperio_tx_fifo_size_default 1024 -+//#define dwc_param_host_nperio_tx_fifo_size_default 0x292 -+#define dwc_param_host_nperio_tx_fifo_size_default 0x80 -+ -+ /** Number of 4-byte words in the host periodic Tx FIFO when dynamic -+ * FIFO sizing is enabled. -+ * 16 to 32768 (default 1024) -+ */ -+ int32_t host_perio_tx_fifo_size; -+//#define dwc_param_host_perio_tx_fifo_size_default 1024 -+#define dwc_param_host_perio_tx_fifo_size_default 0x292 -+ -+ /** The maximum transfer size supported in bytes. -+ * 2047 to 65,535 (default 65,535) -+ */ -+ int32_t max_transfer_size; -+#define dwc_param_max_transfer_size_default 65535 -+ -+ /** The maximum number of packets in a transfer. -+ * 15 to 511 (default 511) -+ */ -+ int32_t max_packet_count; -+#define dwc_param_max_packet_count_default 511 -+ -+ /** The number of host channel registers to use. -+ * 1 to 16 (default 12) -+ * Note: The FPGA configuration supports a maximum of 12 host channels. -+ */ -+ int32_t host_channels; -+//#define dwc_param_host_channels_default 12 -+#define dwc_param_host_channels_default 16 -+ -+ /** The number of endpoints in addition to EP0 available for device -+ * mode operations. -+ * 1 to 15 (default 6 IN and OUT) -+ * Note: The FPGA configuration supports a maximum of 6 IN and OUT -+ * endpoints in addition to EP0. -+ */ -+ int32_t dev_endpoints; -+//#define dwc_param_dev_endpoints_default 6 -+#define dwc_param_dev_endpoints_default 8 -+ -+ /** -+ * Specifies the type of PHY interface to use. By default, the driver -+ * will automatically detect the phy_type. -+ * -+ * 0 - Full Speed PHY -+ * 1 - UTMI+ (default) -+ * 2 - ULPI -+ */ -+ int32_t phy_type; -+#define DWC_PHY_TYPE_PARAM_FS 0 -+#define DWC_PHY_TYPE_PARAM_UTMI 1 -+#define DWC_PHY_TYPE_PARAM_ULPI 2 -+#define dwc_param_phy_type_default DWC_PHY_TYPE_PARAM_UTMI -+ -+ /** -+ * Specifies the UTMI+ Data Width. This parameter is -+ * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI -+ * PHY_TYPE, this parameter indicates the data width between -+ * the MAC and the ULPI Wrapper.) Also, this parameter is -+ * applicable only if the OTG_HSPHY_WIDTH cC parameter was set -+ * to "8 and 16 bits", meaning that the core has been -+ * configured to work at either data path width. -+ * -+ * 8 or 16 bits (default 16) -+ */ -+ int32_t phy_utmi_width; -+#define dwc_param_phy_utmi_width_default 16 -+ -+ /** -+ * Specifies whether the ULPI operates at double or single -+ * data rate. This parameter is only applicable if PHY_TYPE is -+ * ULPI. -+ * -+ * 0 - single data rate ULPI interface with 8 bit wide data -+ * bus (default) -+ * 1 - double data rate ULPI interface with 4 bit wide data -+ * bus -+ */ -+ int32_t phy_ulpi_ddr; -+#define dwc_param_phy_ulpi_ddr_default 0 -+ -+ /** -+ * Specifies whether to use the internal or external supply to -+ * drive the vbus with a ULPI phy. -+ */ -+ int32_t phy_ulpi_ext_vbus; -+#define DWC_PHY_ULPI_INTERNAL_VBUS 0 -+#define DWC_PHY_ULPI_EXTERNAL_VBUS 1 -+#define dwc_param_phy_ulpi_ext_vbus_default DWC_PHY_ULPI_INTERNAL_VBUS -+ -+ /** -+ * Specifies whether to use the I2Cinterface for full speed PHY. This -+ * parameter is only applicable if PHY_TYPE is FS. -+ * 0 - No (default) -+ * 1 - Yes -+ */ -+ int32_t i2c_enable; -+#define dwc_param_i2c_enable_default 0 -+ -+ int32_t ulpi_fs_ls; -+#define dwc_param_ulpi_fs_ls_default 0 -+ -+ int32_t ts_dline; -+#define dwc_param_ts_dline_default 0 -+ -+ /** Thresholding enable flag- -+ * bit 0 - enable non-ISO Tx thresholding -+ * bit 1 - enable ISO Tx thresholding -+ * bit 2 - enable Rx thresholding -+ */ -+ uint32_t thr_ctl; -+#define dwc_param_thr_ctl_default 0 -+ -+ /** Thresholding length for Tx -+ * FIFOs in 32 bit DWORDs -+ */ -+ uint32_t tx_thr_length; -+#define dwc_param_tx_thr_length_default 64 -+ -+ /** Thresholding length for Rx -+ * FIFOs in 32 bit DWORDs -+ */ -+ uint32_t rx_thr_length; -+#define dwc_param_rx_thr_length_default 64 -+ -+ /** Per Transfer Interrupt -+ * mode enable flag -+ * 1 - Enabled -+ * 0 - Disabled -+ */ -+ uint32_t pti_enable; -+#define dwc_param_pti_enable_default 0 -+ -+ /** Molti Processor Interrupt -+ * mode enable flag -+ * 1 - Enabled -+ * 0 - Disabled -+ */ -+ uint32_t mpi_enable; -+#define dwc_param_mpi_enable_default 0 -+ -+} dwc_otg_core_params_t; -+ -+#ifdef DEBUG -+struct dwc_otg_core_if; -+typedef struct hc_xfer_info -+{ -+ struct dwc_otg_core_if *core_if; -+ dwc_hc_t *hc; -+} hc_xfer_info_t; -+#endif -+ -+/** -+ * The dwc_otg_core_if structure contains information needed to manage -+ * the DWC_otg controller acting in either host or device mode. It -+ * represents the programming view of the controller as a whole. -+ */ -+typedef struct dwc_otg_core_if -+{ -+ /** Parameters that define how the core should be configured.*/ -+ dwc_otg_core_params_t *core_params; -+ -+ /** Core Global registers starting at offset 000h. */ -+ dwc_otg_core_global_regs_t *core_global_regs; -+ -+ /** Device-specific information */ -+ dwc_otg_dev_if_t *dev_if; -+ /** Host-specific information */ -+ dwc_otg_host_if_t *host_if; -+ -+ /** Value from SNPSID register */ -+ uint32_t snpsid; -+ -+ /* -+ * Set to 1 if the core PHY interface bits in USBCFG have been -+ * initialized. -+ */ -+ uint8_t phy_init_done; -+ -+ /* -+ * SRP Success flag, set by srp success interrupt in FS I2C mode -+ */ -+ uint8_t srp_success; -+ uint8_t srp_timer_started; -+ -+ /* Common configuration information */ -+ /** Power and Clock Gating Control Register */ -+ volatile uint32_t *pcgcctl; -+#define DWC_OTG_PCGCCTL_OFFSET 0xE00 -+ -+ /** Push/pop addresses for endpoints or host channels.*/ -+ uint32_t *data_fifo[MAX_EPS_CHANNELS]; -+#define DWC_OTG_DATA_FIFO_OFFSET 0x1000 -+#define DWC_OTG_DATA_FIFO_SIZE 0x1000 -+ -+ /** Total RAM for FIFOs (Bytes) */ -+ uint16_t total_fifo_size; -+ /** Size of Rx FIFO (Bytes) */ -+ uint16_t rx_fifo_size; -+ /** Size of Non-periodic Tx FIFO (Bytes) */ -+ uint16_t nperio_tx_fifo_size; -+ -+ -+ /** 1 if DMA is enabled, 0 otherwise. */ -+ uint8_t dma_enable; -+ -+ /** 1 if Descriptor DMA mode is enabled, 0 otherwise. */ -+ uint8_t dma_desc_enable; -+ -+ /** 1 if PTI Enhancement mode is enabled, 0 otherwise. */ -+ uint8_t pti_enh_enable; -+ -+ /** 1 if MPI Enhancement mode is enabled, 0 otherwise. */ -+ uint8_t multiproc_int_enable; -+ -+ /** 1 if dedicated Tx FIFOs are enabled, 0 otherwise. */ -+ uint8_t en_multiple_tx_fifo; -+ -+ /** Set to 1 if multiple packets of a high-bandwidth transfer is in -+ * process of being queued */ -+ uint8_t queuing_high_bandwidth; -+ -+ /** Hardware Configuration -- stored here for convenience.*/ -+ hwcfg1_data_t hwcfg1; -+ hwcfg2_data_t hwcfg2; -+ hwcfg3_data_t hwcfg3; -+ hwcfg4_data_t hwcfg4; -+ -+ /** Host and Device Configuration -- stored here for convenience.*/ -+ hcfg_data_t hcfg; -+ dcfg_data_t dcfg; -+ -+ /** The operational State, during transations -+ * (a_host>>a_peripherial and b_device=>b_host) this may not -+ * match the core but allows the software to determine -+ * transitions. -+ */ -+ uint8_t op_state; -+ -+ /** -+ * Set to 1 if the HCD needs to be restarted on a session request -+ * interrupt. This is required if no connector ID status change has -+ * occurred since the HCD was last disconnected. -+ */ -+ uint8_t restart_hcd_on_session_req; -+ -+ /** HCD callbacks */ -+ /** A-Device is a_host */ -+#define A_HOST (1) -+ /** A-Device is a_suspend */ -+#define A_SUSPEND (2) -+ /** A-Device is a_peripherial */ -+#define A_PERIPHERAL (3) -+ /** B-Device is operating as a Peripheral. */ -+#define B_PERIPHERAL (4) -+ /** B-Device is operating as a Host. */ -+#define B_HOST (5) -+ -+ /** HCD callbacks */ -+ struct dwc_otg_cil_callbacks *hcd_cb; -+ /** PCD callbacks */ -+ struct dwc_otg_cil_callbacks *pcd_cb; -+ -+ /** Device mode Periodic Tx FIFO Mask */ -+ uint32_t p_tx_msk; -+ /** Device mode Periodic Tx FIFO Mask */ -+ uint32_t tx_msk; -+ -+ /** Workqueue object used for handling several interrupts */ -+ struct workqueue_struct *wq_otg; -+ -+ /** Work object used for handling "Connector ID Status Change" Interrupt */ -+ struct work_struct w_conn_id; -+ -+ /** Work object used for handling "Wakeup Detected" Interrupt */ -+ struct delayed_work w_wkp; -+ -+#ifdef DEBUG -+ uint32_t start_hcchar_val[MAX_EPS_CHANNELS]; -+ -+ hc_xfer_info_t hc_xfer_info[MAX_EPS_CHANNELS]; -+ struct timer_list hc_xfer_timer[MAX_EPS_CHANNELS]; -+ -+ uint32_t hfnum_7_samples; -+ uint64_t hfnum_7_frrem_accum; -+ uint32_t hfnum_0_samples; -+ uint64_t hfnum_0_frrem_accum; -+ uint32_t hfnum_other_samples; -+ uint64_t hfnum_other_frrem_accum; -+#endif -+ -+ -+} dwc_otg_core_if_t; -+ -+/*We must clear S3C24XX_EINTPEND external interrupt register -+ * because after clearing in this register trigerred IRQ from -+ * H/W core in kernel interrupt can be occured again before OTG -+ * handlers clear all IRQ sources of Core registers because of -+ * timing latencies and Low Level IRQ Type. -+ */ -+ -+#ifdef CONFIG_MACH_IPMATE -+#define S3C2410X_CLEAR_EINTPEND() \ -+do { \ -+ if (!dwc_otg_read_core_intr(core_if)) { \ -+ __raw_writel(1UL << 11,S3C24XX_EINTPEND); \ -+ } \ -+} while (0) -+#else -+#define S3C2410X_CLEAR_EINTPEND() do { } while (0) -+#endif -+ -+/* -+ * The following functions are functions for works -+ * using during handling some interrupts -+ */ -+extern void w_conn_id_status_change(struct work_struct *p); -+extern void w_wakeup_detected(struct work_struct *p); -+ -+ -+/* -+ * The following functions support initialization of the CIL driver component -+ * and the DWC_otg controller. -+ */ -+extern dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *_reg_base_addr, -+ dwc_otg_core_params_t *_core_params); -+extern void dwc_otg_cil_remove(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_core_init(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_core_host_init(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_core_dev_init(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_enable_global_interrupts( dwc_otg_core_if_t *_core_if ); -+extern void dwc_otg_disable_global_interrupts( dwc_otg_core_if_t *_core_if ); -+ -+/** @name Device CIL Functions -+ * The following functions support managing the DWC_otg controller in device -+ * mode. -+ */ -+/**@{*/ -+extern void dwc_otg_wakeup(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_read_setup_packet (dwc_otg_core_if_t *_core_if, uint32_t *_dest); -+extern uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_ep0_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep_deactivate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep_start_zl_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep_write_packet(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep, int _dma); -+extern void dwc_otg_ep_set_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_dump_dev_registers(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_dump_spram(dwc_otg_core_if_t *_core_if); -+#ifdef DWC_EN_ISOC -+extern void dwc_otg_iso_ep_start_frm_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep); -+extern void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep); -+#endif //DWC_EN_ISOC -+/**@}*/ -+ -+/** @name Host CIL Functions -+ * The following functions support managing the DWC_otg controller in host -+ * mode. -+ */ -+/**@{*/ -+extern void dwc_otg_hc_init(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); -+extern void dwc_otg_hc_halt(dwc_otg_core_if_t *_core_if, -+ dwc_hc_t *_hc, -+ dwc_otg_halt_status_e _halt_status); -+extern void dwc_otg_hc_cleanup(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); -+extern void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); -+extern int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); -+extern void dwc_otg_hc_do_ping(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); -+extern void dwc_otg_hc_write_packet(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); -+extern void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *_core_if); -+ -+/** -+ * This function Reads HPRT0 in preparation to modify. It keeps the -+ * WC bits 0 so that if they are read as 1, they won't clear when you -+ * write it back -+ */ -+static inline uint32_t dwc_otg_read_hprt0(dwc_otg_core_if_t *_core_if) -+{ -+ hprt0_data_t hprt0; -+ hprt0.d32 = dwc_read_reg32(_core_if->host_if->hprt0); -+ hprt0.b.prtena = 0; -+ hprt0.b.prtconndet = 0; -+ hprt0.b.prtenchng = 0; -+ hprt0.b.prtovrcurrchng = 0; -+ return hprt0.d32; -+} -+ -+extern void dwc_otg_dump_host_registers(dwc_otg_core_if_t *_core_if); -+/**@}*/ -+ -+/** @name Common CIL Functions -+ * The following functions support managing the DWC_otg controller in either -+ * device or host mode. -+ */ -+/**@{*/ -+ -+extern void dwc_otg_read_packet(dwc_otg_core_if_t *core_if, -+ uint8_t *dest, -+ uint16_t bytes); -+ -+extern void dwc_otg_dump_global_registers(dwc_otg_core_if_t *_core_if); -+ -+extern void dwc_otg_flush_tx_fifo( dwc_otg_core_if_t *_core_if, -+ const int _num ); -+extern void dwc_otg_flush_rx_fifo( dwc_otg_core_if_t *_core_if ); -+extern void dwc_otg_core_reset( dwc_otg_core_if_t *_core_if ); -+ -+extern dwc_otg_dma_desc_t* dwc_otg_ep_alloc_desc_chain(uint32_t * dma_desc_addr, uint32_t count); -+extern void dwc_otg_ep_free_desc_chain(dwc_otg_dma_desc_t* desc_addr, uint32_t dma_desc_addr, uint32_t count); -+ -+/** -+ * This function returns the Core Interrupt register. -+ */ -+static inline uint32_t dwc_otg_read_core_intr(dwc_otg_core_if_t *_core_if) -+{ -+ return (dwc_read_reg32(&_core_if->core_global_regs->gintsts) & -+ dwc_read_reg32(&_core_if->core_global_regs->gintmsk)); -+} -+ -+/** -+ * This function returns the OTG Interrupt register. -+ */ -+static inline uint32_t dwc_otg_read_otg_intr (dwc_otg_core_if_t *_core_if) -+{ -+ return (dwc_read_reg32 (&_core_if->core_global_regs->gotgint)); -+} -+ -+/** -+ * This function reads the Device All Endpoints Interrupt register and -+ * returns the IN endpoint interrupt bits. -+ */ -+static inline uint32_t dwc_otg_read_dev_all_in_ep_intr(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t v; -+ -+ if(core_if->multiproc_int_enable) { -+ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachint) & -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachintmsk); -+ } else { -+ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->daint) & -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk); -+ } -+ return (v & 0xffff); -+ -+} -+ -+/** -+ * This function reads the Device All Endpoints Interrupt register and -+ * returns the OUT endpoint interrupt bits. -+ */ -+static inline uint32_t dwc_otg_read_dev_all_out_ep_intr(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t v; -+ -+ if(core_if->multiproc_int_enable) { -+ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachint) & -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachintmsk); -+ } else { -+ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->daint) & -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk); -+ } -+ -+ return ((v & 0xffff0000) >> 16); -+} -+ -+/** -+ * This function returns the Device IN EP Interrupt register -+ */ -+static inline uint32_t dwc_otg_read_dev_in_ep_intr(dwc_otg_core_if_t *core_if, -+ dwc_ep_t *ep) -+{ -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ uint32_t v, msk, emp; -+ -+ if(core_if->multiproc_int_enable) { -+ msk = dwc_read_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num]); -+ emp = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); -+ msk |= ((emp >> ep->num) & 0x1) << 7; -+ v = dwc_read_reg32(&dev_if->in_ep_regs[ep->num]->diepint) & msk; -+ } else { -+ msk = dwc_read_reg32(&dev_if->dev_global_regs->diepmsk); -+ emp = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); -+ msk |= ((emp >> ep->num) & 0x1) << 7; -+ v = dwc_read_reg32(&dev_if->in_ep_regs[ep->num]->diepint) & msk; -+ } -+ -+ -+ return v; -+} -+/** -+ * This function returns the Device OUT EP Interrupt register -+ */ -+static inline uint32_t dwc_otg_read_dev_out_ep_intr(dwc_otg_core_if_t *_core_if, -+ dwc_ep_t *_ep) -+{ -+ dwc_otg_dev_if_t *dev_if = _core_if->dev_if; -+ uint32_t v; -+ doepmsk_data_t msk = { .d32 = 0 }; -+ -+ if(_core_if->multiproc_int_enable) { -+ msk.d32 = dwc_read_reg32(&dev_if->dev_global_regs->doepeachintmsk[_ep->num]); -+ if(_core_if->pti_enh_enable) { -+ msk.b.pktdrpsts = 1; -+ } -+ v = dwc_read_reg32( &dev_if->out_ep_regs[_ep->num]->doepint) & msk.d32; -+ } else { -+ msk.d32 = dwc_read_reg32(&dev_if->dev_global_regs->doepmsk); -+ if(_core_if->pti_enh_enable) { -+ msk.b.pktdrpsts = 1; -+ } -+ v = dwc_read_reg32( &dev_if->out_ep_regs[_ep->num]->doepint) & msk.d32; -+ } -+ return v; -+} -+ -+/** -+ * This function returns the Host All Channel Interrupt register -+ */ -+static inline uint32_t dwc_otg_read_host_all_channels_intr (dwc_otg_core_if_t *_core_if) -+{ -+ return (dwc_read_reg32 (&_core_if->host_if->host_global_regs->haint)); -+} -+ -+static inline uint32_t dwc_otg_read_host_channel_intr (dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) -+{ -+ return (dwc_read_reg32 (&_core_if->host_if->hc_regs[_hc->hc_num]->hcint)); -+} -+ -+ -+/** -+ * This function returns the mode of the operation, host or device. -+ * -+ * @return 0 - Device Mode, 1 - Host Mode -+ */ -+static inline uint32_t dwc_otg_mode(dwc_otg_core_if_t *_core_if) -+{ -+ return (dwc_read_reg32( &_core_if->core_global_regs->gintsts ) & 0x1); -+} -+ -+static inline uint8_t dwc_otg_is_device_mode(dwc_otg_core_if_t *_core_if) -+{ -+ return (dwc_otg_mode(_core_if) != DWC_HOST_MODE); -+} -+static inline uint8_t dwc_otg_is_host_mode(dwc_otg_core_if_t *_core_if) -+{ -+ return (dwc_otg_mode(_core_if) == DWC_HOST_MODE); -+} -+ -+extern int32_t dwc_otg_handle_common_intr( dwc_otg_core_if_t *_core_if ); -+ -+ -+/**@}*/ -+ -+/** -+ * DWC_otg CIL callback structure. This structure allows the HCD and -+ * PCD to register functions used for starting and stopping the PCD -+ * and HCD for role change on for a DRD. -+ */ -+typedef struct dwc_otg_cil_callbacks -+{ -+ /** Start function for role change */ -+ int (*start) (void *_p); -+ /** Stop Function for role change */ -+ int (*stop) (void *_p); -+ /** Disconnect Function for role change */ -+ int (*disconnect) (void *_p); -+ /** Resume/Remote wakeup Function */ -+ int (*resume_wakeup) (void *_p); -+ /** Suspend function */ -+ int (*suspend) (void *_p); -+ /** Session Start (SRP) */ -+ int (*session_start) (void *_p); -+ /** Pointer passed to start() and stop() */ -+ void *p; -+} dwc_otg_cil_callbacks_t; -+ -+extern void dwc_otg_cil_register_pcd_callbacks( dwc_otg_core_if_t *_core_if, -+ dwc_otg_cil_callbacks_t *_cb, -+ void *_p); -+extern void dwc_otg_cil_register_hcd_callbacks( dwc_otg_core_if_t *_core_if, -+ dwc_otg_cil_callbacks_t *_cb, -+ void *_p); -+#ifndef warn -+#define warn printk -+#endif -+ -+#endif -+ ---- /dev/null -+++ b/drivers/usb/dwc/otg_cil_intr.c -@@ -0,0 +1,852 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil_intr.c $ -+ * $Revision: #10 $ -+ * $Date: 2008/07/16 $ -+ * $Change: 1065567 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+/** @file -+ * -+ * The Core Interface Layer provides basic services for accessing and -+ * managing the DWC_otg hardware. These services are used by both the -+ * Host Controller Driver and the Peripheral Controller Driver. -+ * -+ * This file contains the Common Interrupt handlers. -+ */ -+#include "otg_plat.h" -+#include "otg_regs.h" -+#include "otg_cil.h" -+#include "otg_pcd.h" -+ -+#ifdef DEBUG -+inline const char *op_state_str(dwc_otg_core_if_t *core_if) -+{ -+ return (core_if->op_state==A_HOST?"a_host": -+ (core_if->op_state==A_SUSPEND?"a_suspend": -+ (core_if->op_state==A_PERIPHERAL?"a_peripheral": -+ (core_if->op_state==B_PERIPHERAL?"b_peripheral": -+ (core_if->op_state==B_HOST?"b_host": -+ "unknown"))))); -+} -+#endif -+ -+/** This function will log a debug message -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+int32_t dwc_otg_handle_mode_mismatch_intr (dwc_otg_core_if_t *core_if) -+{ -+ gintsts_data_t gintsts; -+ DWC_WARN("Mode Mismatch Interrupt: currently in %s mode\n", -+ dwc_otg_mode(core_if) ? "Host" : "Device"); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.modemismatch = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ return 1; -+} -+ -+/** Start the HCD. Helper function for using the HCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void hcd_start(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->hcd_cb && core_if->hcd_cb->start) { -+ core_if->hcd_cb->start(core_if->hcd_cb->p); -+ } -+} -+/** Stop the HCD. Helper function for using the HCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void hcd_stop(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->hcd_cb && core_if->hcd_cb->stop) { -+ core_if->hcd_cb->stop(core_if->hcd_cb->p); -+ } -+} -+/** Disconnect the HCD. Helper function for using the HCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void hcd_disconnect(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->hcd_cb && core_if->hcd_cb->disconnect) { -+ core_if->hcd_cb->disconnect(core_if->hcd_cb->p); -+ } -+} -+/** Inform the HCD the a New Session has begun. Helper function for -+ * using the HCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void hcd_session_start(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->hcd_cb && core_if->hcd_cb->session_start) { -+ core_if->hcd_cb->session_start(core_if->hcd_cb->p); -+ } -+} -+ -+/** Start the PCD. Helper function for using the PCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void pcd_start(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->pcd_cb && core_if->pcd_cb->start) { -+ core_if->pcd_cb->start(core_if->pcd_cb->p); -+ } -+} -+/** Stop the PCD. Helper function for using the PCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void pcd_stop(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->pcd_cb && core_if->pcd_cb->stop) { -+ core_if->pcd_cb->stop(core_if->pcd_cb->p); -+ } -+} -+/** Suspend the PCD. Helper function for using the PCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void pcd_suspend(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->pcd_cb && core_if->pcd_cb->suspend) { -+ core_if->pcd_cb->suspend(core_if->pcd_cb->p); -+ } -+} -+/** Resume the PCD. Helper function for using the PCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void pcd_resume(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { -+ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); -+ } -+} -+ -+/** -+ * This function handles the OTG Interrupts. It reads the OTG -+ * Interrupt Register (GOTGINT) to determine what interrupt has -+ * occurred. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+int32_t dwc_otg_handle_otg_intr(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ gotgint_data_t gotgint; -+ gotgctl_data_t gotgctl; -+ gintmsk_data_t gintmsk; -+ gotgint.d32 = dwc_read_reg32(&global_regs->gotgint); -+ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); -+ DWC_DEBUGPL(DBG_CIL, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint.d32, -+ op_state_str(core_if)); -+ //DWC_DEBUGPL(DBG_CIL, "gotgctl=%08x\n", gotgctl.d32); -+ -+ if (gotgint.b.sesenddet) { -+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " -+ "Session End Detected++ (%s)\n", -+ op_state_str(core_if)); -+ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); -+ -+ if (core_if->op_state == B_HOST) { -+ -+ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)core_if->pcd_cb->p; -+ if(unlikely(!pcd)) { -+ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); -+ BUG(); -+ } -+ SPIN_LOCK(&pcd->lock); -+ -+ pcd_start(core_if); -+ -+ SPIN_UNLOCK(&pcd->lock); -+ core_if->op_state = B_PERIPHERAL; -+ } else { -+ dwc_otg_pcd_t *pcd; -+ -+ /* If not B_HOST and Device HNP still set. HNP -+ * Did not succeed!*/ -+ if (gotgctl.b.devhnpen) { -+ DWC_DEBUGPL(DBG_ANY, "Session End Detected\n"); -+ DWC_ERROR("Device Not Connected/Responding!\n"); -+ } -+ -+ /* If Session End Detected the B-Cable has -+ * been disconnected. */ -+ /* Reset PCD and Gadget driver to a -+ * clean state. */ -+ -+ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; -+ if(unlikely(!pcd)) { -+ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); -+ BUG(); -+ } -+ SPIN_LOCK(&pcd->lock); -+ -+ pcd_stop(core_if); -+ -+ SPIN_UNLOCK(&pcd->lock); -+ } -+ gotgctl.d32 = 0; -+ gotgctl.b.devhnpen = 1; -+ dwc_modify_reg32(&global_regs->gotgctl, -+ gotgctl.d32, 0); -+ } -+ if (gotgint.b.sesreqsucstschng) { -+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " -+ "Session Reqeust Success Status Change++\n"); -+ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); -+ if (gotgctl.b.sesreqscs) { -+ if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) && -+ (core_if->core_params->i2c_enable)) { -+ core_if->srp_success = 1; -+ } -+ else { -+ dwc_otg_pcd_t *pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; -+ if(unlikely(!pcd)) { -+ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); -+ BUG(); -+ } -+ SPIN_LOCK(&pcd->lock); -+ -+ pcd_resume(core_if); -+ -+ SPIN_UNLOCK(&pcd->lock); -+ /* Clear Session Request */ -+ gotgctl.d32 = 0; -+ gotgctl.b.sesreq = 1; -+ dwc_modify_reg32(&global_regs->gotgctl, -+ gotgctl.d32, 0); -+ } -+ } -+ } -+ if (gotgint.b.hstnegsucstschng) { -+ /* Print statements during the HNP interrupt handling -+ * can cause it to fail.*/ -+ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); -+ if (gotgctl.b.hstnegscs) { -+ if (dwc_otg_is_host_mode(core_if)) { -+ dwc_otg_pcd_t *pcd; -+ -+ core_if->op_state = B_HOST; -+ /* -+ * Need to disable SOF interrupt immediately. -+ * When switching from device to host, the PCD -+ * interrupt handler won't handle the -+ * interrupt if host mode is already set. The -+ * HCD interrupt handler won't get called if -+ * the HCD state is HALT. This means that the -+ * interrupt does not get handled and Linux -+ * complains loudly. -+ */ -+ gintmsk.d32 = 0; -+ gintmsk.b.sofintr = 1; -+ dwc_modify_reg32(&global_regs->gintmsk, -+ gintmsk.d32, 0); -+ -+ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; -+ if(unlikely(!pcd)) { -+ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); -+ BUG(); -+ } -+ SPIN_LOCK(&pcd->lock); -+ -+ pcd_stop(core_if); -+ -+ SPIN_UNLOCK(&pcd->lock); -+ /* -+ * Initialize the Core for Host mode. -+ */ -+ hcd_start(core_if); -+ core_if->op_state = B_HOST; -+ } -+ } else { -+ gotgctl.d32 = 0; -+ gotgctl.b.hnpreq = 1; -+ gotgctl.b.devhnpen = 1; -+ dwc_modify_reg32(&global_regs->gotgctl, -+ gotgctl.d32, 0); -+ DWC_DEBUGPL(DBG_ANY, "HNP Failed\n"); -+ DWC_ERROR("Device Not Connected/Responding\n"); -+ } -+ } -+ if (gotgint.b.hstnegdet) { -+ /* The disconnect interrupt is set at the same time as -+ * Host Negotiation Detected. During the mode -+ * switch all interrupts are cleared so the disconnect -+ * interrupt handler will not get executed. -+ */ -+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " -+ "Host Negotiation Detected++ (%s)\n", -+ (dwc_otg_is_host_mode(core_if)?"Host":"Device")); -+ if (dwc_otg_is_device_mode(core_if)){ -+ dwc_otg_pcd_t *pcd; -+ -+ DWC_DEBUGPL(DBG_ANY, "a_suspend->a_peripheral (%d)\n", core_if->op_state); -+ hcd_disconnect(core_if); -+ -+ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; -+ if(unlikely(!pcd)) { -+ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); -+ BUG(); -+ } -+ SPIN_LOCK(&pcd->lock); -+ -+ pcd_start(core_if); -+ -+ SPIN_UNLOCK(&pcd->lock); -+ core_if->op_state = A_PERIPHERAL; -+ } else { -+ dwc_otg_pcd_t *pcd; -+ -+ /* -+ * Need to disable SOF interrupt immediately. When -+ * switching from device to host, the PCD interrupt -+ * handler won't handle the interrupt if host mode is -+ * already set. The HCD interrupt handler won't get -+ * called if the HCD state is HALT. This means that -+ * the interrupt does not get handled and Linux -+ * complains loudly. -+ */ -+ gintmsk.d32 = 0; -+ gintmsk.b.sofintr = 1; -+ dwc_modify_reg32(&global_regs->gintmsk, -+ gintmsk.d32, 0); -+ -+ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; -+ if(unlikely(!pcd)) { -+ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); -+ BUG(); -+ } -+ SPIN_LOCK(&pcd->lock); -+ -+ pcd_stop(core_if); -+ -+ SPIN_UNLOCK(&pcd->lock); -+ hcd_start(core_if); -+ core_if->op_state = A_HOST; -+ } -+ } -+ if (gotgint.b.adevtoutchng) { -+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " -+ "A-Device Timeout Change++\n"); -+ } -+ if (gotgint.b.debdone) { -+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " -+ "Debounce Done++\n"); -+ } -+ -+ /* Clear GOTGINT */ -+ dwc_write_reg32 (&core_if->core_global_regs->gotgint, gotgint.d32); -+ -+ return 1; -+} -+ -+ -+void w_conn_id_status_change(struct work_struct *p) -+{ -+ dwc_otg_core_if_t *core_if = container_of(p, dwc_otg_core_if_t, w_conn_id); -+ -+ uint32_t count = 0; -+ gotgctl_data_t gotgctl = { .d32 = 0 }; -+ -+ gotgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl); -+ DWC_DEBUGPL(DBG_CIL, "gotgctl=%0x\n", gotgctl.d32); -+ DWC_DEBUGPL(DBG_CIL, "gotgctl.b.conidsts=%d\n", gotgctl.b.conidsts); -+ -+ /* B-Device connector (Device Mode) */ -+ if (gotgctl.b.conidsts) { -+ dwc_otg_pcd_t *pcd; -+ -+ /* Wait for switch to device mode. */ -+ while (!dwc_otg_is_device_mode(core_if)){ -+ DWC_PRINT("Waiting for Peripheral Mode, Mode=%s\n", -+ (dwc_otg_is_host_mode(core_if)?"Host":"Peripheral")); -+ MDELAY(100); -+ if (++count > 10000) *(uint32_t*)NULL=0; -+ } -+ core_if->op_state = B_PERIPHERAL; -+ dwc_otg_core_init(core_if); -+ dwc_otg_enable_global_interrupts(core_if); -+ -+ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; -+ if(unlikely(!pcd)) { -+ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); -+ BUG(); -+ } -+ SPIN_LOCK(&pcd->lock); -+ -+ pcd_start(core_if); -+ -+ SPIN_UNLOCK(&pcd->lock); -+ } else { -+ /* A-Device connector (Host Mode) */ -+ while (!dwc_otg_is_host_mode(core_if)) { -+ DWC_PRINT("Waiting for Host Mode, Mode=%s\n", -+ (dwc_otg_is_host_mode(core_if)?"Host":"Peripheral")); -+ MDELAY(100); -+ if (++count > 10000) *(uint32_t*)NULL=0; -+ } -+ core_if->op_state = A_HOST; -+ /* -+ * Initialize the Core for Host mode. -+ */ -+ dwc_otg_core_init(core_if); -+ dwc_otg_enable_global_interrupts(core_if); -+ hcd_start(core_if); -+ } -+} -+ -+ -+/** -+ * This function handles the Connector ID Status Change Interrupt. It -+ * reads the OTG Interrupt Register (GOTCTL) to determine whether this -+ * is a Device to Host Mode transition or a Host Mode to Device -+ * Transition. -+ * -+ * This only occurs when the cable is connected/removed from the PHY -+ * connector. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+int32_t dwc_otg_handle_conn_id_status_change_intr(dwc_otg_core_if_t *core_if) -+{ -+ -+ /* -+ * Need to disable SOF interrupt immediately. If switching from device -+ * to host, the PCD interrupt handler won't handle the interrupt if -+ * host mode is already set. The HCD interrupt handler won't get -+ * called if the HCD state is HALT. This means that the interrupt does -+ * not get handled and Linux complains loudly. -+ */ -+ gintmsk_data_t gintmsk = { .d32 = 0 }; -+ gintsts_data_t gintsts = { .d32 = 0 }; -+ -+ gintmsk.b.sofintr = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, gintmsk.d32, 0); -+ -+ DWC_DEBUGPL(DBG_CIL, " ++Connector ID Status Change Interrupt++ (%s)\n", -+ (dwc_otg_is_host_mode(core_if)?"Host":"Device")); -+ -+ /* -+ * Need to schedule a work, as there are possible DELAY function calls -+ */ -+ queue_work(core_if->wq_otg, &core_if->w_conn_id); -+ -+ /* Set flag and clear interrupt */ -+ gintsts.b.conidstschng = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This interrupt indicates that a device is initiating the Session -+ * Request Protocol to request the host to turn on bus power so a new -+ * session can begin. The handler responds by turning on bus power. If -+ * the DWC_otg controller is in low power mode, the handler brings the -+ * controller out of low power mode before turning on bus power. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+int32_t dwc_otg_handle_session_req_intr(dwc_otg_core_if_t *core_if) -+{ -+ hprt0_data_t hprt0; -+ gintsts_data_t gintsts; -+ -+#ifndef DWC_HOST_ONLY -+ DWC_DEBUGPL(DBG_ANY, "++Session Request Interrupt++\n"); -+ -+ if (dwc_otg_is_device_mode(core_if)) { -+ DWC_PRINT("SRP: Device mode\n"); -+ } else { -+ DWC_PRINT("SRP: Host mode\n"); -+ -+ /* Turn on the port power bit. */ -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtpwr = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ -+ /* Start the Connection timer. So a message can be displayed -+ * if connect does not occur within 10 seconds. */ -+ hcd_session_start(core_if); -+ } -+#endif -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.sessreqintr = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+ -+void w_wakeup_detected(struct work_struct *p) -+{ -+ struct delayed_work *dw = container_of(p, struct delayed_work, work); -+ dwc_otg_core_if_t *core_if = container_of(dw, dwc_otg_core_if_t, w_wkp); -+ -+ /* -+ * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms -+ * so that OPT tests pass with all PHYs). -+ */ -+ hprt0_data_t hprt0 = {.d32=0}; -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ DWC_DEBUGPL(DBG_ANY,"Resume: HPRT0=%0x\n", hprt0.d32); -+// MDELAY(70); -+ hprt0.b.prtres = 0; /* Resume */ -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ DWC_DEBUGPL(DBG_ANY,"Clear Resume: HPRT0=%0x\n", dwc_read_reg32(core_if->host_if->hprt0)); -+} -+/** -+ * This interrupt indicates that the DWC_otg controller has detected a -+ * resume or remote wakeup sequence. If the DWC_otg controller is in -+ * low power mode, the handler must brings the controller out of low -+ * power mode. The controller automatically begins resume -+ * signaling. The handler schedules a time to stop resume signaling. -+ */ -+int32_t dwc_otg_handle_wakeup_detected_intr(dwc_otg_core_if_t *core_if) -+{ -+ gintsts_data_t gintsts; -+ -+ DWC_DEBUGPL(DBG_ANY, "++Resume and Remote Wakeup Detected Interrupt++\n"); -+ -+ if (dwc_otg_is_device_mode(core_if)) { -+ dctl_data_t dctl = {.d32=0}; -+ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts)); -+#ifdef PARTIAL_POWER_DOWN -+ if (core_if->hwcfg4.b.power_optimiz) { -+ pcgcctl_data_t power = {.d32=0}; -+ -+ power.d32 = dwc_read_reg32(core_if->pcgcctl); -+ DWC_DEBUGPL(DBG_CIL, "PCGCCTL=%0x\n", power.d32); -+ -+ power.b.stoppclk = 0; -+ dwc_write_reg32(core_if->pcgcctl, power.d32); -+ -+ power.b.pwrclmp = 0; -+ dwc_write_reg32(core_if->pcgcctl, power.d32); -+ -+ power.b.rstpdwnmodule = 0; -+ dwc_write_reg32(core_if->pcgcctl, power.d32); -+ } -+#endif -+ /* Clear the Remote Wakeup Signalling */ -+ dctl.b.rmtwkupsig = 1; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, -+ dctl.d32, 0); -+ -+ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { -+ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); -+ } -+ -+ } else { -+ pcgcctl_data_t pcgcctl = {.d32=0}; -+ -+ /* Restart the Phy Clock */ -+ pcgcctl.b.stoppclk = 1; -+ dwc_modify_reg32(core_if->pcgcctl, pcgcctl.d32, 0); -+ -+ queue_delayed_work(core_if->wq_otg, &core_if->w_wkp, ((70 * HZ / 1000) + 1)); -+ } -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.wkupintr = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This interrupt indicates that a device has been disconnected from -+ * the root port. -+ */ -+int32_t dwc_otg_handle_disconnect_intr(dwc_otg_core_if_t *core_if) -+{ -+ gintsts_data_t gintsts; -+ -+ DWC_DEBUGPL(DBG_ANY, "++Disconnect Detected Interrupt++ (%s) %s\n", -+ (dwc_otg_is_host_mode(core_if)?"Host":"Device"), -+ op_state_str(core_if)); -+ -+/** @todo Consolidate this if statement. */ -+#ifndef DWC_HOST_ONLY -+ if (core_if->op_state == B_HOST) { -+ dwc_otg_pcd_t *pcd; -+ -+ /* If in device mode Disconnect and stop the HCD, then -+ * start the PCD. */ -+ hcd_disconnect(core_if); -+ -+ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; -+ if(unlikely(!pcd)) { -+ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); -+ BUG(); -+ } -+ SPIN_LOCK(&pcd->lock); -+ -+ pcd_start(core_if); -+ -+ SPIN_UNLOCK(&pcd->lock); -+ core_if->op_state = B_PERIPHERAL; -+ } else if (dwc_otg_is_device_mode(core_if)) { -+ gotgctl_data_t gotgctl = { .d32 = 0 }; -+ gotgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl); -+ if (gotgctl.b.hstsethnpen==1) { -+ /* Do nothing, if HNP in process the OTG -+ * interrupt "Host Negotiation Detected" -+ * interrupt will do the mode switch. -+ */ -+ } else if (gotgctl.b.devhnpen == 0) { -+ dwc_otg_pcd_t *pcd; -+ -+ /* If in device mode Disconnect and stop the HCD, then -+ * start the PCD. */ -+ hcd_disconnect(core_if); -+ -+ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; -+ if(unlikely(!pcd)) { -+ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); -+ BUG(); -+ } -+ SPIN_LOCK(&pcd->lock); -+ -+ pcd_start(core_if); -+ -+ SPIN_UNLOCK(&pcd->lock); -+ -+ core_if->op_state = B_PERIPHERAL; -+ } else { -+ DWC_DEBUGPL(DBG_ANY,"!a_peripheral && !devhnpen\n"); -+ } -+ } else { -+ if (core_if->op_state == A_HOST) { -+ /* A-Cable still connected but device disconnected. */ -+ hcd_disconnect(core_if); -+ } -+ } -+#endif -+ -+ gintsts.d32 = 0; -+ gintsts.b.disconnect = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ return 1; -+} -+/** -+ * This interrupt indicates that SUSPEND state has been detected on -+ * the USB. -+ * -+ * For HNP the USB Suspend interrupt signals the change from -+ * "a_peripheral" to "a_host". -+ * -+ * When power management is enabled the core will be put in low power -+ * mode. -+ */ -+int32_t dwc_otg_handle_usb_suspend_intr(dwc_otg_core_if_t *core_if) -+{ -+ dsts_data_t dsts; -+ gintsts_data_t gintsts; -+ -+ DWC_DEBUGPL(DBG_ANY,"USB SUSPEND\n"); -+ -+ if (dwc_otg_is_device_mode(core_if)) { -+ dwc_otg_pcd_t *pcd; -+ -+ /* Check the Device status register to determine if the Suspend -+ * state is active. */ -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", dsts.d32); -+ DWC_DEBUGPL(DBG_PCD, "DSTS.Suspend Status=%d " -+ "HWCFG4.power Optimize=%d\n", -+ dsts.b.suspsts, core_if->hwcfg4.b.power_optimiz); -+ -+ -+#ifdef PARTIAL_POWER_DOWN -+/** @todo Add a module parameter for power management. */ -+ if (dsts.b.suspsts && core_if->hwcfg4.b.power_optimiz) { -+ pcgcctl_data_t power = {.d32=0}; -+ DWC_DEBUGPL(DBG_CIL, "suspend\n"); -+ -+ power.b.pwrclmp = 1; -+ dwc_write_reg32(core_if->pcgcctl, power.d32); -+ -+ power.b.rstpdwnmodule = 1; -+ dwc_modify_reg32(core_if->pcgcctl, 0, power.d32); -+ -+ power.b.stoppclk = 1; -+ dwc_modify_reg32(core_if->pcgcctl, 0, power.d32); -+ } else { -+ DWC_DEBUGPL(DBG_ANY,"disconnect?\n"); -+ } -+#endif -+ /* PCD callback for suspend. */ -+ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; -+ if(unlikely(!pcd)) { -+ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); -+ BUG(); -+ } -+ SPIN_LOCK(&pcd->lock); -+ -+ pcd_suspend(core_if); -+ -+ SPIN_UNLOCK(&pcd->lock); -+ } else { -+ if (core_if->op_state == A_PERIPHERAL) { -+ dwc_otg_pcd_t *pcd; -+ -+ DWC_DEBUGPL(DBG_ANY,"a_peripheral->a_host\n"); -+ /* Clear the a_peripheral flag, back to a_host. */ -+ -+ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; -+ if(unlikely(!pcd)) { -+ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); -+ BUG(); -+ } -+ SPIN_LOCK(&pcd->lock); -+ -+ pcd_stop(core_if); -+ -+ SPIN_UNLOCK(&pcd->lock); -+ -+ hcd_start(core_if); -+ core_if->op_state = A_HOST; -+ } -+ } -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.usbsuspend = 1; -+ dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+ -+/** -+ * This function returns the Core Interrupt register. -+ */ -+static inline uint32_t dwc_otg_read_common_intr(dwc_otg_core_if_t *core_if) -+{ -+ gintsts_data_t gintsts; -+ gintmsk_data_t gintmsk; -+ gintmsk_data_t gintmsk_common = {.d32=0}; -+ gintmsk_common.b.wkupintr = 1; -+ gintmsk_common.b.sessreqintr = 1; -+ gintmsk_common.b.conidstschng = 1; -+ gintmsk_common.b.otgintr = 1; -+ gintmsk_common.b.modemismatch = 1; -+ gintmsk_common.b.disconnect = 1; -+ gintmsk_common.b.usbsuspend = 1; -+ /** @todo: The port interrupt occurs while in device -+ * mode. Added code to CIL to clear the interrupt for now! -+ */ -+ gintmsk_common.b.portintr = 1; -+ -+ gintsts.d32 = dwc_read_reg32(&core_if->core_global_regs->gintsts); -+ gintmsk.d32 = dwc_read_reg32(&core_if->core_global_regs->gintmsk); -+#ifdef DEBUG -+ /* if any common interrupts set */ -+ if (gintsts.d32 & gintmsk_common.d32) { -+ DWC_DEBUGPL(DBG_ANY, "gintsts=%08x gintmsk=%08x\n", -+ gintsts.d32, gintmsk.d32); -+ } -+#endif -+ -+ return ((gintsts.d32 & gintmsk.d32) & gintmsk_common.d32); -+ -+} -+ -+/** -+ * Common interrupt handler. -+ * -+ * The common interrupts are those that occur in both Host and Device mode. -+ * This handler handles the following interrupts: -+ * - Mode Mismatch Interrupt -+ * - Disconnect Interrupt -+ * - OTG Interrupt -+ * - Connector ID Status Change Interrupt -+ * - Session Request Interrupt. -+ * - Resume / Remote Wakeup Detected Interrupt. -+ * -+ */ -+int32_t dwc_otg_handle_common_intr(dwc_otg_core_if_t *core_if) -+{ -+ int retval = 0; -+ gintsts_data_t gintsts; -+ -+ gintsts.d32 = dwc_otg_read_common_intr(core_if); -+ -+ if (gintsts.b.modemismatch) { -+ retval |= dwc_otg_handle_mode_mismatch_intr(core_if); -+ } -+ if (gintsts.b.otgintr) { -+ retval |= dwc_otg_handle_otg_intr(core_if); -+ } -+ if (gintsts.b.conidstschng) { -+ retval |= dwc_otg_handle_conn_id_status_change_intr(core_if); -+ } -+ if (gintsts.b.disconnect) { -+ retval |= dwc_otg_handle_disconnect_intr(core_if); -+ } -+ if (gintsts.b.sessreqintr) { -+ retval |= dwc_otg_handle_session_req_intr(core_if); -+ } -+ if (gintsts.b.wkupintr) { -+ retval |= dwc_otg_handle_wakeup_detected_intr(core_if); -+ } -+ if (gintsts.b.usbsuspend) { -+ retval |= dwc_otg_handle_usb_suspend_intr(core_if); -+ } -+ if (gintsts.b.portintr && dwc_otg_is_device_mode(core_if)) { -+ /* The port interrupt occurs while in device mode with HPRT0 -+ * Port Enable/Disable. -+ */ -+ gintsts.d32 = 0; -+ gintsts.b.portintr = 1; -+ dwc_write_reg32(&core_if->core_global_regs->gintsts, -+ gintsts.d32); -+ retval |= 1; -+ -+ } -+ -+ S3C2410X_CLEAR_EINTPEND(); -+ -+ return retval; -+} ---- /dev/null -+++ b/drivers/usb/dwc/otg_driver.c -@@ -0,0 +1,960 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_driver.c $ -+ * $Revision: #63 $ -+ * $Date: 2008/09/24 $ -+ * $Change: 1101777 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+/** @file -+ * The dwc_otg_driver module provides the initialization and cleanup entry -+ * points for the DWC_otg driver. This module will be dynamically installed -+ * after Linux is booted using the insmod command. When the module is -+ * installed, the dwc_otg_driver_init function is called. When the module is -+ * removed (using rmmod), the dwc_otg_driver_cleanup function is called. -+ * -+ * This module also defines a data structure for the dwc_otg_driver, which is -+ * used in conjunction with the standard ARM lm_device structure. These -+ * structures allow the OTG driver to comply with the standard Linux driver -+ * model in which devices and drivers are registered with a bus driver. This -+ * has the benefit that Linux can expose attributes of the driver and device -+ * in its special sysfs file system. Users can then read or write files in -+ * this file system to perform diagnostics on the driver components or the -+ * device. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include /* permission constants */ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "otg_plat.h" -+#include "otg_attr.h" -+#include "otg_driver.h" -+#include "otg_cil.h" -+#include "otg_pcd.h" -+#include "otg_hcd.h" -+ -+#define DWC_DRIVER_VERSION "2.72a 24-JUN-2008" -+#define DWC_DRIVER_DESC "HS OTG USB Controller driver" -+ -+static const char dwc_driver_name[] = "dwc_otg"; -+ -+/*-------------------------------------------------------------------------*/ -+/* Encapsulate the module parameter settings */ -+ -+static dwc_otg_core_params_t dwc_otg_module_params = { -+ .opt = -1, -+ .otg_cap = -1, -+ .dma_enable = -1, -+ .dma_desc_enable = -1, -+ .dma_burst_size = -1, -+ .speed = -1, -+ .host_support_fs_ls_low_power = -1, -+ .host_ls_low_power_phy_clk = -1, -+ .enable_dynamic_fifo = -1, -+ .data_fifo_size = -1, -+ .dev_rx_fifo_size = -1, -+ .dev_nperio_tx_fifo_size = -1, -+ .dev_perio_tx_fifo_size = { -+ /* dev_perio_tx_fifo_size_1 */ -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1 -+ /* 15 */ -+ }, -+ .host_rx_fifo_size = -1, -+ .host_nperio_tx_fifo_size = -1, -+ .host_perio_tx_fifo_size = -1, -+ .max_transfer_size = -1, -+ .max_packet_count = -1, -+ .host_channels = -1, -+ .dev_endpoints = -1, -+ .phy_type = -1, -+ .phy_utmi_width = -1, -+ .phy_ulpi_ddr = -1, -+ .phy_ulpi_ext_vbus = -1, -+ .i2c_enable = -1, -+ .ulpi_fs_ls = -1, -+ .ts_dline = -1, -+ .en_multiple_tx_fifo = -1, -+ .dev_tx_fifo_size = { -+ /* dev_tx_fifo_size */ -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1 -+ /* 15 */ -+ }, -+ .thr_ctl = -1, -+ .tx_thr_length = -1, -+ .rx_thr_length = -1, -+ .pti_enable = -1, -+ .mpi_enable = -1, -+}; -+ -+/** -+ * Global Debug Level Mask. -+ */ -+uint32_t g_dbg_lvl = 0; /* OFF */ -+ -+/** -+ * This function is called during module intialization to verify that -+ * the module parameters are in a valid state. -+ */ -+static int check_parameters(dwc_otg_core_if_t *core_if) -+{ -+ int i; -+ int retval = 0; -+ -+/* Checks if the parameter is outside of its valid range of values */ -+#define DWC_OTG_PARAM_TEST(_param_, _low_, _high_) \ -+ ((dwc_otg_module_params._param_ < (_low_)) || \ -+ (dwc_otg_module_params._param_ > (_high_))) -+ -+/* If the parameter has been set by the user, check that the parameter value is -+ * within the value range of values. If not, report a module error. */ -+#define DWC_OTG_PARAM_ERR(_param_, _low_, _high_, _string_) \ -+ do { \ -+ if (dwc_otg_module_params._param_ != -1) { \ -+ if (DWC_OTG_PARAM_TEST(_param_, (_low_), (_high_))) { \ -+ DWC_ERROR("`%d' invalid for parameter `%s'\n", \ -+ dwc_otg_module_params._param_, _string_); \ -+ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \ -+ retval++; \ -+ } \ -+ } \ -+ } while (0) -+ -+ DWC_OTG_PARAM_ERR(opt,0,1,"opt"); -+ DWC_OTG_PARAM_ERR(otg_cap,0,2,"otg_cap"); -+ DWC_OTG_PARAM_ERR(dma_enable,0,1,"dma_enable"); -+ DWC_OTG_PARAM_ERR(dma_desc_enable,0,1,"dma_desc_enable"); -+ DWC_OTG_PARAM_ERR(speed,0,1,"speed"); -+ DWC_OTG_PARAM_ERR(host_support_fs_ls_low_power,0,1,"host_support_fs_ls_low_power"); -+ DWC_OTG_PARAM_ERR(host_ls_low_power_phy_clk,0,1,"host_ls_low_power_phy_clk"); -+ DWC_OTG_PARAM_ERR(enable_dynamic_fifo,0,1,"enable_dynamic_fifo"); -+ DWC_OTG_PARAM_ERR(data_fifo_size,32,32768,"data_fifo_size"); -+ DWC_OTG_PARAM_ERR(dev_rx_fifo_size,16,32768,"dev_rx_fifo_size"); -+ DWC_OTG_PARAM_ERR(dev_nperio_tx_fifo_size,16,32768,"dev_nperio_tx_fifo_size"); -+ DWC_OTG_PARAM_ERR(host_rx_fifo_size,16,32768,"host_rx_fifo_size"); -+ DWC_OTG_PARAM_ERR(host_nperio_tx_fifo_size,16,32768,"host_nperio_tx_fifo_size"); -+ DWC_OTG_PARAM_ERR(host_perio_tx_fifo_size,16,32768,"host_perio_tx_fifo_size"); -+ DWC_OTG_PARAM_ERR(max_transfer_size,2047,524288,"max_transfer_size"); -+ DWC_OTG_PARAM_ERR(max_packet_count,15,511,"max_packet_count"); -+ DWC_OTG_PARAM_ERR(host_channels,1,16,"host_channels"); -+ DWC_OTG_PARAM_ERR(dev_endpoints,1,15,"dev_endpoints"); -+ DWC_OTG_PARAM_ERR(phy_type,0,2,"phy_type"); -+ DWC_OTG_PARAM_ERR(phy_ulpi_ddr,0,1,"phy_ulpi_ddr"); -+ DWC_OTG_PARAM_ERR(phy_ulpi_ext_vbus,0,1,"phy_ulpi_ext_vbus"); -+ DWC_OTG_PARAM_ERR(i2c_enable,0,1,"i2c_enable"); -+ DWC_OTG_PARAM_ERR(ulpi_fs_ls,0,1,"ulpi_fs_ls"); -+ DWC_OTG_PARAM_ERR(ts_dline,0,1,"ts_dline"); -+ -+ if (dwc_otg_module_params.dma_burst_size != -1) { -+ if (DWC_OTG_PARAM_TEST(dma_burst_size,1,1) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,4,4) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,8,8) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,16,16) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,32,32) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,64,64) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,128,128) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,256,256)) { -+ DWC_ERROR("`%d' invalid for parameter `dma_burst_size'\n", -+ dwc_otg_module_params.dma_burst_size); -+ dwc_otg_module_params.dma_burst_size = 32; -+ retval++; -+ } -+ -+ { -+ uint8_t brst_sz = 0; -+ while(dwc_otg_module_params.dma_burst_size > 1) { -+ brst_sz ++; -+ dwc_otg_module_params.dma_burst_size >>= 1; -+ } -+ dwc_otg_module_params.dma_burst_size = brst_sz; -+ } -+ } -+ -+ if (dwc_otg_module_params.phy_utmi_width != -1) { -+ if (DWC_OTG_PARAM_TEST(phy_utmi_width, 8, 8) && -+ DWC_OTG_PARAM_TEST(phy_utmi_width, 16, 16)) { -+ DWC_ERROR("`%d' invalid for parameter `phy_utmi_width'\n", -+ dwc_otg_module_params.phy_utmi_width); -+ dwc_otg_module_params.phy_utmi_width = 16; -+ retval++; -+ } -+ } -+ -+ for (i = 0; i < 15; i++) { -+ /** @todo should be like above */ -+ //DWC_OTG_PARAM_ERR(dev_perio_tx_fifo_size[i], 4, 768, "dev_perio_tx_fifo_size"); -+ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] != -1) { -+ if (DWC_OTG_PARAM_TEST(dev_perio_tx_fifo_size[i], 4, 768)) { -+ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n", -+ dwc_otg_module_params.dev_perio_tx_fifo_size[i], "dev_perio_tx_fifo_size", i); -+ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default; -+ retval++; -+ } -+ } -+ } -+ -+ DWC_OTG_PARAM_ERR(en_multiple_tx_fifo, 0, 1, "en_multiple_tx_fifo"); -+ -+ for (i = 0; i < 15; i++) { -+ /** @todo should be like above */ -+ //DWC_OTG_PARAM_ERR(dev_tx_fifo_size[i], 4, 768, "dev_tx_fifo_size"); -+ if (dwc_otg_module_params.dev_tx_fifo_size[i] != -1) { -+ if (DWC_OTG_PARAM_TEST(dev_tx_fifo_size[i], 4, 768)) { -+ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n", -+ dwc_otg_module_params.dev_tx_fifo_size[i], "dev_tx_fifo_size", i); -+ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_param_dev_tx_fifo_size_default; -+ retval++; -+ } -+ } -+ } -+ -+ DWC_OTG_PARAM_ERR(thr_ctl, 0, 7, "thr_ctl"); -+ DWC_OTG_PARAM_ERR(tx_thr_length, 8, 128, "tx_thr_length"); -+ DWC_OTG_PARAM_ERR(rx_thr_length, 8, 128, "rx_thr_length"); -+ -+ DWC_OTG_PARAM_ERR(pti_enable,0,1,"pti_enable"); -+ DWC_OTG_PARAM_ERR(mpi_enable,0,1,"mpi_enable"); -+ -+ /* At this point, all module parameters that have been set by the user -+ * are valid, and those that have not are left unset. Now set their -+ * default values and/or check the parameters against the hardware -+ * configurations of the OTG core. */ -+ -+/* This sets the parameter to the default value if it has not been set by the -+ * user */ -+#define DWC_OTG_PARAM_SET_DEFAULT(_param_) \ -+ ({ \ -+ int changed = 1; \ -+ if (dwc_otg_module_params._param_ == -1) { \ -+ changed = 0; \ -+ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \ -+ } \ -+ changed; \ -+ }) -+ -+/* This checks the macro agains the hardware configuration to see if it is -+ * valid. It is possible that the default value could be invalid. In this -+ * case, it will report a module error if the user touched the parameter. -+ * Otherwise it will adjust the value without any error. */ -+#define DWC_OTG_PARAM_CHECK_VALID(_param_, _str_, _is_valid_, _set_valid_) \ -+ ({ \ -+ int changed = DWC_OTG_PARAM_SET_DEFAULT(_param_); \ -+ int error = 0; \ -+ if (!(_is_valid_)) { \ -+ if (changed) { \ -+ DWC_ERROR("`%d' invalid for parameter `%s'. Check HW configuration.\n", dwc_otg_module_params._param_, _str_); \ -+ error = 1; \ -+ } \ -+ dwc_otg_module_params._param_ = (_set_valid_); \ -+ } \ -+ error; \ -+ }) -+ -+ /* OTG Cap */ -+ retval += DWC_OTG_PARAM_CHECK_VALID(otg_cap, "otg_cap", -+ ({ -+ int valid; -+ valid = 1; -+ switch (dwc_otg_module_params.otg_cap) { -+ case DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE: -+ if (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) -+ valid = 0; -+ break; -+ case DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE: -+ if ((core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) && -+ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) && -+ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) && -+ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) { -+ valid = 0; -+ } -+ break; -+ case DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE: -+ /* always valid */ -+ break; -+ } -+ valid; -+ }), -+ (((core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) || -+ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) || -+ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || -+ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) ? -+ DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE : -+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(dma_enable, "dma_enable", -+ ((dwc_otg_module_params.dma_enable == 1) && (core_if->hwcfg2.b.architecture == 0)) ? 0 : 1, -+ 0); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(dma_desc_enable, "dma_desc_enable", -+ ((dwc_otg_module_params.dma_desc_enable == 1) && -+ ((dwc_otg_module_params.dma_enable == 0) || (core_if->hwcfg4.b.desc_dma == 0))) ? 0 : 1, -+ 0); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(opt, "opt", 1, 0); -+ -+ DWC_OTG_PARAM_SET_DEFAULT(dma_burst_size); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(host_support_fs_ls_low_power, -+ "host_support_fs_ls_low_power", -+ 1, 0); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(enable_dynamic_fifo, -+ "enable_dynamic_fifo", -+ ((dwc_otg_module_params.enable_dynamic_fifo == 0) || -+ (core_if->hwcfg2.b.dynamic_fifo == 1)), 0); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(data_fifo_size, -+ "data_fifo_size", -+ (dwc_otg_module_params.data_fifo_size <= core_if->hwcfg3.b.dfifo_depth), -+ core_if->hwcfg3.b.dfifo_depth); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(dev_rx_fifo_size, -+ "dev_rx_fifo_size", -+ (dwc_otg_module_params.dev_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)), -+ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(dev_nperio_tx_fifo_size, -+ "dev_nperio_tx_fifo_size", -+ (dwc_otg_module_params.dev_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)), -+ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(host_rx_fifo_size, -+ "host_rx_fifo_size", -+ (dwc_otg_module_params.host_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)), -+ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(host_nperio_tx_fifo_size, -+ "host_nperio_tx_fifo_size", -+ (dwc_otg_module_params.host_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)), -+ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(host_perio_tx_fifo_size, -+ "host_perio_tx_fifo_size", -+ (dwc_otg_module_params.host_perio_tx_fifo_size <= ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))), -+ ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(max_transfer_size, -+ "max_transfer_size", -+ (dwc_otg_module_params.max_transfer_size < (1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11))), -+ ((1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11)) - 1)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(max_packet_count, -+ "max_packet_count", -+ (dwc_otg_module_params.max_packet_count < (1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4))), -+ ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4)) - 1)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(host_channels, -+ "host_channels", -+ (dwc_otg_module_params.host_channels <= (core_if->hwcfg2.b.num_host_chan + 1)), -+ (core_if->hwcfg2.b.num_host_chan + 1)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(dev_endpoints, -+ "dev_endpoints", -+ (dwc_otg_module_params.dev_endpoints <= (core_if->hwcfg2.b.num_dev_ep)), -+ core_if->hwcfg2.b.num_dev_ep); -+ -+/* -+ * Define the following to disable the FS PHY Hardware checking. This is for -+ * internal testing only. -+ * -+ * #define NO_FS_PHY_HW_CHECKS -+ */ -+ -+#ifdef NO_FS_PHY_HW_CHECKS -+ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, -+ "phy_type", 1, 0); -+#else -+ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, -+ "phy_type", -+ ({ -+ int valid = 0; -+ if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_UTMI) && -+ ((core_if->hwcfg2.b.hs_phy_type == 1) || -+ (core_if->hwcfg2.b.hs_phy_type == 3))) { -+ valid = 1; -+ } -+ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_ULPI) && -+ ((core_if->hwcfg2.b.hs_phy_type == 2) || -+ (core_if->hwcfg2.b.hs_phy_type == 3))) { -+ valid = 1; -+ } -+ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) && -+ (core_if->hwcfg2.b.fs_phy_type == 1)) { -+ valid = 1; -+ } -+ valid; -+ }), -+ ({ -+ int set = DWC_PHY_TYPE_PARAM_FS; -+ if (core_if->hwcfg2.b.hs_phy_type) { -+ if ((core_if->hwcfg2.b.hs_phy_type == 3) || -+ (core_if->hwcfg2.b.hs_phy_type == 1)) { -+ set = DWC_PHY_TYPE_PARAM_UTMI; -+ } -+ else { -+ set = DWC_PHY_TYPE_PARAM_ULPI; -+ } -+ } -+ set; -+ })); -+#endif -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(speed, "speed", -+ (dwc_otg_module_params.speed == 0) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1, -+ dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS ? 1 : 0); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(host_ls_low_power_phy_clk, -+ "host_ls_low_power_phy_clk", -+ ((dwc_otg_module_params.host_ls_low_power_phy_clk == DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1), -+ ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ : DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ)); -+ -+ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ddr); -+ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ext_vbus); -+ DWC_OTG_PARAM_SET_DEFAULT(phy_utmi_width); -+ DWC_OTG_PARAM_SET_DEFAULT(ulpi_fs_ls); -+ DWC_OTG_PARAM_SET_DEFAULT(ts_dline); -+ -+#ifdef NO_FS_PHY_HW_CHECKS -+ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, "i2c_enable", 1, 0); -+#else -+ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, -+ "i2c_enable", -+ (dwc_otg_module_params.i2c_enable == 1) && (core_if->hwcfg3.b.i2c == 0) ? 0 : 1, -+ 0); -+#endif -+ -+ for (i = 0; i < 15; i++) { -+ int changed = 1; -+ int error = 0; -+ -+ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] == -1) { -+ changed = 0; -+ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default; -+ } -+ if (!(dwc_otg_module_params.dev_perio_tx_fifo_size[i] <= (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) { -+ if (changed) { -+ DWC_ERROR("`%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n", dwc_otg_module_params.dev_perio_tx_fifo_size[i], i); -+ error = 1; -+ } -+ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]); -+ } -+ retval += error; -+ } -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(en_multiple_tx_fifo, "en_multiple_tx_fifo", -+ ((dwc_otg_module_params.en_multiple_tx_fifo == 1) && (core_if->hwcfg4.b.ded_fifo_en == 0)) ? 0 : 1, -+ 0); -+ -+ for (i = 0; i < 15; i++) { -+ int changed = 1; -+ int error = 0; -+ -+ if (dwc_otg_module_params.dev_tx_fifo_size[i] == -1) { -+ changed = 0; -+ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_param_dev_tx_fifo_size_default; -+ } -+ if (!(dwc_otg_module_params.dev_tx_fifo_size[i] <= (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) { -+ if (changed) { -+ DWC_ERROR("%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n", dwc_otg_module_params.dev_tx_fifo_size[i], i); -+ error = 1; -+ } -+ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]); -+ } -+ retval += error; -+ } -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(thr_ctl, "thr_ctl", -+ ((dwc_otg_module_params.thr_ctl != 0) && ((dwc_otg_module_params.dma_enable == 0) || (core_if->hwcfg4.b.ded_fifo_en == 0))) ? 0 : 1, -+ 0); -+ -+ DWC_OTG_PARAM_SET_DEFAULT(tx_thr_length); -+ DWC_OTG_PARAM_SET_DEFAULT(rx_thr_length); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(pti_enable, "pti_enable", -+ ((dwc_otg_module_params.pti_enable == 0) || ((dwc_otg_module_params.pti_enable == 1) && (core_if->snpsid >= 0x4F54272A))) ? 1 : 0, -+ 0); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(mpi_enable, "mpi_enable", -+ ((dwc_otg_module_params.mpi_enable == 0) || ((dwc_otg_module_params.mpi_enable == 1) && (core_if->hwcfg2.b.multi_proc_int == 1))) ? 1 : 0, -+ 0); -+ return retval; -+} -+ -+/** -+ * This function is the top level interrupt handler for the Common -+ * (Device and host modes) interrupts. -+ */ -+static irqreturn_t dwc_otg_common_irq(int irq, void *dev) -+{ -+ dwc_otg_device_t *otg_dev = dev; -+ int32_t retval = IRQ_NONE; -+ -+ retval = dwc_otg_handle_common_intr(otg_dev->core_if); -+ return IRQ_RETVAL(retval); -+} -+ -+/** -+ * This function is called when a lm_device is unregistered with the -+ * dwc_otg_driver. This happens, for example, when the rmmod command is -+ * executed. The device may or may not be electrically present. If it is -+ * present, the driver stops device processing. Any resources used on behalf -+ * of this device are freed. -+ * -+ * @param[in] lmdev -+ */ -+static int __devexit dwc_otg_driver_remove(struct platform_device *pdev) -+{ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); -+ DWC_DEBUGPL(DBG_ANY, "%s(%p)\n", __func__, pdev); -+ -+ if (!otg_dev) { -+ /* Memory allocation for the dwc_otg_device failed. */ -+ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__); -+ return 0; -+ } -+ -+ /* -+ * Free the IRQ -+ */ -+ if (otg_dev->common_irq_installed) { -+ free_irq(otg_dev->irq, otg_dev); -+ } -+ -+#ifndef DWC_DEVICE_ONLY -+ if (otg_dev->hcd) { -+ dwc_otg_hcd_remove(pdev); -+ } else { -+ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__); -+ return 0; -+ } -+#endif -+ -+#ifndef DWC_HOST_ONLY -+ if (otg_dev->pcd) { -+ dwc_otg_pcd_remove(pdev); -+ } -+#endif -+ if (otg_dev->core_if) { -+ dwc_otg_cil_remove(otg_dev->core_if); -+ } -+ -+ /* -+ * Remove the device attributes -+ */ -+ dwc_otg_attr_remove(pdev); -+ -+ /* -+ * Return the memory. -+ */ -+ if (otg_dev->base) { -+ iounmap(otg_dev->base); -+ } -+ kfree(otg_dev); -+ -+ /* -+ * Clear the drvdata pointer. -+ */ -+ platform_set_drvdata(pdev, 0); -+ -+ return 0; -+} -+ -+/** -+ * This function is called when an lm_device is bound to a -+ * dwc_otg_driver. It creates the driver components required to -+ * control the device (CIL, HCD, and PCD) and it initializes the -+ * device. The driver components are stored in a dwc_otg_device -+ * structure. A reference to the dwc_otg_device is saved in the -+ * lm_device. This allows the driver to access the dwc_otg_device -+ * structure on subsequent calls to driver methods for this device. -+ * -+ * @param[in] lmdev lm_device definition -+ */ -+static int __devinit dwc_otg_driver_probe(struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ int retval = 0; -+ uint32_t snpsid; -+ dwc_otg_device_t *dwc_otg_device; -+ struct resource *res; -+ -+ dev_dbg(dev, "dwc_otg_driver_probe(%p)\n", pdev); -+ -+ dwc_otg_device = kmalloc(sizeof(dwc_otg_device_t), GFP_KERNEL); -+ -+ if (!dwc_otg_device) { -+ dev_err(dev, "kmalloc of dwc_otg_device failed\n"); -+ retval = -ENOMEM; -+ goto fail; -+ } -+ -+ memset(dwc_otg_device, 0, sizeof(*dwc_otg_device)); -+ dwc_otg_device->reg_offset = 0xFFFFFFFF; -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (!res) { -+ dev_err(dev, "Found OTG with no register addr.\n"); -+ retval = -ENODEV; -+ goto fail; -+ } -+ dwc_otg_device->rsrc_start = res->start; -+ dwc_otg_device->rsrc_len = res->end - res->start + 1; -+ -+ dwc_otg_device->base = ioremap(dwc_otg_device->rsrc_start, dwc_otg_device->rsrc_len); -+ -+ if (!dwc_otg_device->base) { -+ dev_err(dev, "ioremap() failed\n"); -+ retval = -ENOMEM; -+ goto fail; -+ } -+ dev_dbg(dev, "base=0x%08x\n", (unsigned)dwc_otg_device->base); -+ -+ /* -+ * Attempt to ensure this device is really a DWC_otg Controller. -+ * Read and verify the SNPSID register contents. The value should be -+ * 0x45F42XXX, which corresponds to "OT2", as in "OTG version 2.XX". -+ */ -+ snpsid = dwc_read_reg32((uint32_t *)((uint8_t *)dwc_otg_device->base + 0x40)); -+ -+ if ((snpsid & 0xFFFFF000) != OTG_CORE_REV_2_00) { -+ dev_err(dev, "Bad value for SNPSID: 0x%08x\n", snpsid); -+ retval = -EINVAL; -+ goto fail; -+ } -+ -+ DWC_PRINT("Core Release: %x.%x%x%x\n", -+ (snpsid >> 12 & 0xF), -+ (snpsid >> 8 & 0xF), -+ (snpsid >> 4 & 0xF), -+ (snpsid & 0xF)); -+ -+ /* -+ * Initialize driver data to point to the global DWC_otg -+ * Device structure. -+ */ -+ platform_set_drvdata(pdev, dwc_otg_device); -+ -+ dev_dbg(dev, "dwc_otg_device=0x%p\n", dwc_otg_device); -+ -+ dwc_otg_device->core_if = dwc_otg_cil_init(dwc_otg_device->base, -+ &dwc_otg_module_params); -+ -+ dwc_otg_device->core_if->snpsid = snpsid; -+ -+ if (!dwc_otg_device->core_if) { -+ dev_err(dev, "CIL initialization failed!\n"); -+ retval = -ENOMEM; -+ goto fail; -+ } -+ -+ /* -+ * Validate parameter values. -+ */ -+ if (check_parameters(dwc_otg_device->core_if)) { -+ retval = -EINVAL; -+ goto fail; -+ } -+ -+ /* -+ * Create Device Attributes in sysfs -+ */ -+ dwc_otg_attr_create(pdev); -+ -+ /* -+ * Disable the global interrupt until all the interrupt -+ * handlers are installed. -+ */ -+ dwc_otg_disable_global_interrupts(dwc_otg_device->core_if); -+ -+ /* -+ * Install the interrupt handler for the common interrupts before -+ * enabling common interrupts in core_init below. -+ */ -+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); -+ if (!res) { -+ dev_err(dev, "Fount OTG with to IRQ.\n"); -+ retval = -ENODEV; -+ goto fail; -+ } -+ dwc_otg_device->irq = res->start; -+ -+ retval = request_irq(res->start, dwc_otg_common_irq, -+ IRQF_SHARED, "dwc_otg", dwc_otg_device); -+ if (retval) { -+ DWC_ERROR("request of irq%d failed\n", res->start); -+ retval = -EBUSY; -+ goto fail; -+ } else { -+ dwc_otg_device->common_irq_installed = 1; -+ } -+ -+ /* -+ * Initialize the DWC_otg core. -+ */ -+ dwc_otg_core_init(dwc_otg_device->core_if); -+ -+#ifndef DWC_HOST_ONLY -+ /* -+ * Initialize the PCD -+ */ -+ retval = dwc_otg_pcd_init(pdev); -+ if (retval != 0) { -+ DWC_ERROR("dwc_otg_pcd_init failed\n"); -+ dwc_otg_device->pcd = NULL; -+ goto fail; -+ } -+#endif -+#ifndef DWC_DEVICE_ONLY -+ /* -+ * Initialize the HCD -+ */ -+ retval = dwc_otg_hcd_init(pdev); -+ if (retval != 0) { -+ DWC_ERROR("dwc_otg_hcd_init failed\n"); -+ dwc_otg_device->hcd = NULL; -+ goto fail; -+ } -+#endif -+ -+ /* -+ * Enable the global interrupt after all the interrupt -+ * handlers are installed. -+ */ -+ dwc_otg_enable_global_interrupts(dwc_otg_device->core_if); -+ -+ return 0; -+ -+ fail: -+ dwc_otg_driver_remove(pdev); -+ return retval; -+} -+ -+static struct platform_driver dwc_otg_platform_driver = { -+ .driver.name = "dwc_otg", -+ .probe = dwc_otg_driver_probe, -+ .remove = dwc_otg_driver_remove, -+}; -+ -+static int __init dwc_otg_init_module(void) -+{ -+ return platform_driver_register(&dwc_otg_platform_driver); -+} -+ -+static void __exit dwc_otg_cleanup_module(void) -+{ -+ platform_driver_unregister(&dwc_otg_platform_driver); -+} -+ -+module_init(dwc_otg_init_module); -+module_exit(dwc_otg_cleanup_module); -+ -+/** -+ * This function is called when the driver is removed from the kernel -+ * with the rmmod command. The driver unregisters itself with its bus -+ * driver. -+ * -+ */ -+ -+MODULE_DESCRIPTION(DWC_DRIVER_DESC); -+MODULE_AUTHOR("Synopsys Inc."); -+MODULE_LICENSE("GPL"); -+ -+module_param_named(otg_cap, dwc_otg_module_params.otg_cap, int, 0444); -+MODULE_PARM_DESC(otg_cap, "OTG Capabilities 0=HNP&SRP 1=SRP Only 2=None"); -+module_param_named(opt, dwc_otg_module_params.opt, int, 0444); -+MODULE_PARM_DESC(opt, "OPT Mode"); -+module_param_named(dma_enable, dwc_otg_module_params.dma_enable, int, 0444); -+MODULE_PARM_DESC(dma_enable, "DMA Mode 0=Slave 1=DMA enabled"); -+ -+module_param_named(dma_desc_enable, dwc_otg_module_params.dma_desc_enable, int, 0444); -+MODULE_PARM_DESC(dma_desc_enable, "DMA Desc Mode 0=Address DMA 1=DMA Descriptor enabled"); -+ -+module_param_named(dma_burst_size, dwc_otg_module_params.dma_burst_size, int, 0444); -+MODULE_PARM_DESC(dma_burst_size, "DMA Burst Size 1, 4, 8, 16, 32, 64, 128, 256"); -+module_param_named(speed, dwc_otg_module_params.speed, int, 0444); -+MODULE_PARM_DESC(speed, "Speed 0=High Speed 1=Full Speed"); -+module_param_named(host_support_fs_ls_low_power, dwc_otg_module_params.host_support_fs_ls_low_power, int, 0444); -+MODULE_PARM_DESC(host_support_fs_ls_low_power, "Support Low Power w/FS or LS 0=Support 1=Don't Support"); -+module_param_named(host_ls_low_power_phy_clk, dwc_otg_module_params.host_ls_low_power_phy_clk, int, 0444); -+MODULE_PARM_DESC(host_ls_low_power_phy_clk, "Low Speed Low Power Clock 0=48Mhz 1=6Mhz"); -+module_param_named(enable_dynamic_fifo, dwc_otg_module_params.enable_dynamic_fifo, int, 0444); -+MODULE_PARM_DESC(enable_dynamic_fifo, "0=cC Setting 1=Allow Dynamic Sizing"); -+module_param_named(data_fifo_size, dwc_otg_module_params.data_fifo_size, int, 0444); -+MODULE_PARM_DESC(data_fifo_size, "Total number of words in the data FIFO memory 32-32768"); -+module_param_named(dev_rx_fifo_size, dwc_otg_module_params.dev_rx_fifo_size, int, 0444); -+MODULE_PARM_DESC(dev_rx_fifo_size, "Number of words in the Rx FIFO 16-32768"); -+module_param_named(dev_nperio_tx_fifo_size, dwc_otg_module_params.dev_nperio_tx_fifo_size, int, 0444); -+MODULE_PARM_DESC(dev_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768"); -+module_param_named(dev_perio_tx_fifo_size_1, dwc_otg_module_params.dev_perio_tx_fifo_size[0], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_1, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_2, dwc_otg_module_params.dev_perio_tx_fifo_size[1], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_2, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_3, dwc_otg_module_params.dev_perio_tx_fifo_size[2], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_3, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_4, dwc_otg_module_params.dev_perio_tx_fifo_size[3], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_4, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_5, dwc_otg_module_params.dev_perio_tx_fifo_size[4], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_5, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_6, dwc_otg_module_params.dev_perio_tx_fifo_size[5], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_6, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_7, dwc_otg_module_params.dev_perio_tx_fifo_size[6], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_7, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_8, dwc_otg_module_params.dev_perio_tx_fifo_size[7], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_8, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_9, dwc_otg_module_params.dev_perio_tx_fifo_size[8], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_9, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_10, dwc_otg_module_params.dev_perio_tx_fifo_size[9], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_10, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_11, dwc_otg_module_params.dev_perio_tx_fifo_size[10], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_11, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_12, dwc_otg_module_params.dev_perio_tx_fifo_size[11], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_12, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_13, dwc_otg_module_params.dev_perio_tx_fifo_size[12], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_13, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_14, dwc_otg_module_params.dev_perio_tx_fifo_size[13], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_14, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_15, dwc_otg_module_params.dev_perio_tx_fifo_size[14], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_15, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(host_rx_fifo_size, dwc_otg_module_params.host_rx_fifo_size, int, 0444); -+MODULE_PARM_DESC(host_rx_fifo_size, "Number of words in the Rx FIFO 16-32768"); -+module_param_named(host_nperio_tx_fifo_size, dwc_otg_module_params.host_nperio_tx_fifo_size, int, 0444); -+MODULE_PARM_DESC(host_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768"); -+module_param_named(host_perio_tx_fifo_size, dwc_otg_module_params.host_perio_tx_fifo_size, int, 0444); -+MODULE_PARM_DESC(host_perio_tx_fifo_size, "Number of words in the host periodic Tx FIFO 16-32768"); -+module_param_named(max_transfer_size, dwc_otg_module_params.max_transfer_size, int, 0444); -+/** @todo Set the max to 512K, modify checks */ -+MODULE_PARM_DESC(max_transfer_size, "The maximum transfer size supported in bytes 2047-65535"); -+module_param_named(max_packet_count, dwc_otg_module_params.max_packet_count, int, 0444); -+MODULE_PARM_DESC(max_packet_count, "The maximum number of packets in a transfer 15-511"); -+module_param_named(host_channels, dwc_otg_module_params.host_channels, int, 0444); -+MODULE_PARM_DESC(host_channels, "The number of host channel registers to use 1-16"); -+module_param_named(dev_endpoints, dwc_otg_module_params.dev_endpoints, int, 0444); -+MODULE_PARM_DESC(dev_endpoints, "The number of endpoints in addition to EP0 available for device mode 1-15"); -+module_param_named(phy_type, dwc_otg_module_params.phy_type, int, 0444); -+MODULE_PARM_DESC(phy_type, "0=Reserved 1=UTMI+ 2=ULPI"); -+module_param_named(phy_utmi_width, dwc_otg_module_params.phy_utmi_width, int, 0444); -+MODULE_PARM_DESC(phy_utmi_width, "Specifies the UTMI+ Data Width 8 or 16 bits"); -+module_param_named(phy_ulpi_ddr, dwc_otg_module_params.phy_ulpi_ddr, int, 0444); -+MODULE_PARM_DESC(phy_ulpi_ddr, "ULPI at double or single data rate 0=Single 1=Double"); -+module_param_named(phy_ulpi_ext_vbus, dwc_otg_module_params.phy_ulpi_ext_vbus, int, 0444); -+MODULE_PARM_DESC(phy_ulpi_ext_vbus, "ULPI PHY using internal or external vbus 0=Internal"); -+module_param_named(i2c_enable, dwc_otg_module_params.i2c_enable, int, 0444); -+MODULE_PARM_DESC(i2c_enable, "FS PHY Interface"); -+module_param_named(ulpi_fs_ls, dwc_otg_module_params.ulpi_fs_ls, int, 0444); -+MODULE_PARM_DESC(ulpi_fs_ls, "ULPI PHY FS/LS mode only"); -+module_param_named(ts_dline, dwc_otg_module_params.ts_dline, int, 0444); -+MODULE_PARM_DESC(ts_dline, "Term select Dline pulsing for all PHYs"); -+module_param_named(debug, g_dbg_lvl, int, 0444); -+MODULE_PARM_DESC(debug, ""); -+ -+module_param_named(en_multiple_tx_fifo, dwc_otg_module_params.en_multiple_tx_fifo, int, 0444); -+MODULE_PARM_DESC(en_multiple_tx_fifo, "Dedicated Non Periodic Tx FIFOs 0=disabled 1=enabled"); -+module_param_named(dev_tx_fifo_size_1, dwc_otg_module_params.dev_tx_fifo_size[0], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_1, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_2, dwc_otg_module_params.dev_tx_fifo_size[1], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_2, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_3, dwc_otg_module_params.dev_tx_fifo_size[2], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_3, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_4, dwc_otg_module_params.dev_tx_fifo_size[3], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_4, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_5, dwc_otg_module_params.dev_tx_fifo_size[4], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_5, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_6, dwc_otg_module_params.dev_tx_fifo_size[5], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_6, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_7, dwc_otg_module_params.dev_tx_fifo_size[6], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_7, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_8, dwc_otg_module_params.dev_tx_fifo_size[7], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_8, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_9, dwc_otg_module_params.dev_tx_fifo_size[8], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_9, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_10, dwc_otg_module_params.dev_tx_fifo_size[9], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_10, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_11, dwc_otg_module_params.dev_tx_fifo_size[10], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_11, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_12, dwc_otg_module_params.dev_tx_fifo_size[11], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_12, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_13, dwc_otg_module_params.dev_tx_fifo_size[12], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_13, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_14, dwc_otg_module_params.dev_tx_fifo_size[13], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_14, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_15, dwc_otg_module_params.dev_tx_fifo_size[14], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_15, "Number of words in the Tx FIFO 4-768"); -+ -+module_param_named(thr_ctl, dwc_otg_module_params.thr_ctl, int, 0444); -+MODULE_PARM_DESC(thr_ctl, "Thresholding enable flag bit 0 - non ISO Tx thr., 1 - ISO Tx thr., 2 - Rx thr.- bit 0=disabled 1=enabled"); -+module_param_named(tx_thr_length, dwc_otg_module_params.tx_thr_length, int, 0444); -+MODULE_PARM_DESC(tx_thr_length, "Tx Threshold length in 32 bit DWORDs"); -+module_param_named(rx_thr_length, dwc_otg_module_params.rx_thr_length, int, 0444); -+MODULE_PARM_DESC(rx_thr_length, "Rx Threshold length in 32 bit DWORDs"); -+ -+module_param_named(pti_enable, dwc_otg_module_params.pti_enable, int, 0444); -+MODULE_PARM_DESC(pti_enable, "Per Transfer Interrupt mode 0=disabled 1=enabled"); -+ -+module_param_named(mpi_enable, dwc_otg_module_params.mpi_enable, int, 0444); -+MODULE_PARM_DESC(mpi_enable, "Multiprocessor Interrupt mode 0=disabled 1=enabled"); ---- /dev/null -+++ b/drivers/usb/dwc/otg_driver.h -@@ -0,0 +1,62 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_driver.h $ -+ * $Revision: #12 $ -+ * $Date: 2008/07/15 $ -+ * $Change: 1064918 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+#ifndef __DWC_OTG_DRIVER_H__ -+#define __DWC_OTG_DRIVER_H__ -+ -+/** @file -+ * This file contains the interface to the Linux driver. -+ */ -+#include "otg_cil.h" -+ -+/* Type declarations */ -+struct dwc_otg_pcd; -+struct dwc_otg_hcd; -+ -+/** -+ * This structure is a wrapper that encapsulates the driver components used to -+ * manage a single DWC_otg controller. -+ */ -+typedef struct dwc_otg_device { -+ void *base; -+ dwc_otg_core_if_t *core_if; -+ uint32_t reg_offset; -+ struct dwc_otg_pcd *pcd; -+ struct dwc_otg_hcd *hcd; -+ uint8_t common_irq_installed; -+ int irq; -+ uint32_t rsrc_start; -+ uint32_t rsrc_len; -+} dwc_otg_device_t; -+ -+#endif ---- /dev/null -+++ b/drivers/usb/dwc/otg_hcd.c -@@ -0,0 +1,2735 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.c $ -+ * $Revision: #75 $ -+ * $Date: 2008/07/15 $ -+ * $Change: 1064940 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_DEVICE_ONLY -+ -+/** -+ * @file -+ * -+ * This file contains the implementation of the HCD. In Linux, the HCD -+ * implements the hc_driver API. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "otg_driver.h" -+#include "otg_hcd.h" -+#include "otg_regs.h" -+ -+static const char dwc_otg_hcd_name[] = "dwc_otg_hcd"; -+ -+static const struct hc_driver dwc_otg_hc_driver = { -+ -+ .description = dwc_otg_hcd_name, -+ .product_desc = "DWC OTG Controller", -+ .hcd_priv_size = sizeof(dwc_otg_hcd_t), -+ .irq = dwc_otg_hcd_irq, -+ .flags = HCD_MEMORY | HCD_USB2, -+ .start = dwc_otg_hcd_start, -+ .stop = dwc_otg_hcd_stop, -+ .urb_enqueue = dwc_otg_hcd_urb_enqueue, -+ .urb_dequeue = dwc_otg_hcd_urb_dequeue, -+ .endpoint_disable = dwc_otg_hcd_endpoint_disable, -+ .get_frame_number = dwc_otg_hcd_get_frame_number, -+ .hub_status_data = dwc_otg_hcd_hub_status_data, -+ .hub_control = dwc_otg_hcd_hub_control, -+}; -+ -+/** -+ * Work queue function for starting the HCD when A-Cable is connected. -+ * The dwc_otg_hcd_start() must be called in a process context. -+ */ -+static void hcd_start_func(struct work_struct *_work) -+{ -+ struct delayed_work *dw = container_of(_work, struct delayed_work, work); -+ struct dwc_otg_hcd *otg_hcd = container_of(dw, struct dwc_otg_hcd, start_work); -+ struct usb_hcd *usb_hcd = container_of((void *)otg_hcd, struct usb_hcd, hcd_priv); -+ DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd); -+ if (usb_hcd) { -+ dwc_otg_hcd_start(usb_hcd); -+ } -+} -+ -+/** -+ * HCD Callback function for starting the HCD when A-Cable is -+ * connected. -+ * -+ * @param p void pointer to the struct usb_hcd -+ */ -+static int32_t dwc_otg_hcd_start_cb(void *p) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); -+ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; -+ hprt0_data_t hprt0; -+ -+ if (core_if->op_state == B_HOST) { -+ /* -+ * Reset the port. During a HNP mode switch the reset -+ * needs to occur within 1ms and have a duration of at -+ * least 50ms. -+ */ -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtrst = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ ((struct usb_hcd *)p)->self.is_b_host = 1; -+ } else { -+ ((struct usb_hcd *)p)->self.is_b_host = 0; -+ } -+ -+ /* Need to start the HCD in a non-interrupt context. */ -+// INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func); -+ INIT_DELAYED_WORK(&dwc_otg_hcd->start_work, hcd_start_func); -+// schedule_work(&dwc_otg_hcd->start_work); -+ queue_delayed_work(core_if->wq_otg, &dwc_otg_hcd->start_work, 50 * HZ / 1000); -+ -+ return 1; -+} -+ -+/** -+ * HCD Callback function for stopping the HCD. -+ * -+ * @param p void pointer to the struct usb_hcd -+ */ -+static int32_t dwc_otg_hcd_stop_cb(void *p) -+{ -+ struct usb_hcd *usb_hcd = (struct usb_hcd *)p; -+ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); -+ dwc_otg_hcd_stop(usb_hcd); -+ return 1; -+} -+ -+static void del_xfer_timers(dwc_otg_hcd_t *hcd) -+{ -+#ifdef DEBUG -+ int i; -+ int num_channels = hcd->core_if->core_params->host_channels; -+ for (i = 0; i < num_channels; i++) { -+ del_timer(&hcd->core_if->hc_xfer_timer[i]); -+ } -+#endif -+} -+ -+static void del_timers(dwc_otg_hcd_t *hcd) -+{ -+ del_xfer_timers(hcd); -+ del_timer(&hcd->conn_timer); -+} -+ -+/** -+ * Processes all the URBs in a single list of QHs. Completes them with -+ * -ETIMEDOUT and frees the QTD. -+ */ -+static void kill_urbs_in_qh_list(dwc_otg_hcd_t *hcd, struct list_head *qh_list) -+{ -+ struct list_head *qh_item; -+ dwc_otg_qh_t *qh; -+ struct list_head *qtd_item; -+ dwc_otg_qtd_t *qtd; -+ -+ list_for_each(qh_item, qh_list) { -+ qh = list_entry(qh_item, dwc_otg_qh_t, qh_list_entry); -+ for (qtd_item = qh->qtd_list.next; -+ qtd_item != &qh->qtd_list; -+ qtd_item = qh->qtd_list.next) { -+ qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry); -+ if (qtd->urb != NULL) { -+ dwc_otg_hcd_complete_urb(hcd, qtd->urb, -+ -ETIMEDOUT); -+ } -+ dwc_otg_hcd_qtd_remove_and_free(hcd, qtd); -+ } -+ } -+} -+ -+/** -+ * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic -+ * and periodic schedules. The QTD associated with each URB is removed from -+ * the schedule and freed. This function may be called when a disconnect is -+ * detected or when the HCD is being stopped. -+ */ -+static void kill_all_urbs(dwc_otg_hcd_t *hcd) -+{ -+ kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_inactive); -+ kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_active); -+ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_inactive); -+ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_ready); -+ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_assigned); -+ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_queued); -+} -+ -+/** -+ * HCD Callback function for disconnect of the HCD. -+ * -+ * @param p void pointer to the struct usb_hcd -+ */ -+static int32_t dwc_otg_hcd_disconnect_cb(void *p) -+{ -+ gintsts_data_t intr; -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); -+ -+ //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); -+ -+ /* -+ * Set status flags for the hub driver. -+ */ -+ dwc_otg_hcd->flags.b.port_connect_status_change = 1; -+ dwc_otg_hcd->flags.b.port_connect_status = 0; -+ -+ /* -+ * Shutdown any transfers in process by clearing the Tx FIFO Empty -+ * interrupt mask and status bits and disabling subsequent host -+ * channel interrupts. -+ */ -+ intr.d32 = 0; -+ intr.b.nptxfempty = 1; -+ intr.b.ptxfempty = 1; -+ intr.b.hcintr = 1; -+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, intr.d32, 0); -+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintsts, intr.d32, 0); -+ -+ del_timers(dwc_otg_hcd); -+ -+ /* -+ * Turn off the vbus power only if the core has transitioned to device -+ * mode. If still in host mode, need to keep power on to detect a -+ * reconnection. -+ */ -+ if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) { -+ if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) { -+ hprt0_data_t hprt0 = { .d32=0 }; -+ DWC_PRINT("Disconnect: PortPower off\n"); -+ hprt0.b.prtpwr = 0; -+ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32); -+ } -+ -+ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if); -+ } -+ -+ /* Respond with an error status to all URBs in the schedule. */ -+ kill_all_urbs(dwc_otg_hcd); -+ -+ if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) { -+ /* Clean up any host channels that were in use. */ -+ int num_channels; -+ int i; -+ dwc_hc_t *channel; -+ dwc_otg_hc_regs_t *hc_regs; -+ hcchar_data_t hcchar; -+ -+ num_channels = dwc_otg_hcd->core_if->core_params->host_channels; -+ -+ if (!dwc_otg_hcd->core_if->dma_enable) { -+ /* Flush out any channel requests in slave mode. */ -+ for (i = 0; i < num_channels; i++) { -+ channel = dwc_otg_hcd->hc_ptr_array[i]; -+ if (list_empty(&channel->hc_list_entry)) { -+ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i]; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chen) { -+ hcchar.b.chen = 0; -+ hcchar.b.chdis = 1; -+ hcchar.b.epdir = 0; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ } -+ } -+ } -+ } -+ -+ for (i = 0; i < num_channels; i++) { -+ channel = dwc_otg_hcd->hc_ptr_array[i]; -+ if (list_empty(&channel->hc_list_entry)) { -+ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i]; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chen) { -+ /* Halt the channel. */ -+ hcchar.b.chdis = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ } -+ -+ dwc_otg_hc_cleanup(dwc_otg_hcd->core_if, channel); -+ list_add_tail(&channel->hc_list_entry, -+ &dwc_otg_hcd->free_hc_list); -+ } -+ } -+ } -+ -+ /* A disconnect will end the session so the B-Device is no -+ * longer a B-host. */ -+ ((struct usb_hcd *)p)->self.is_b_host = 0; -+ return 1; -+} -+ -+/** -+ * Connection timeout function. An OTG host is required to display a -+ * message if the device does not connect within 10 seconds. -+ */ -+void dwc_otg_hcd_connect_timeout(unsigned long ptr) -+{ -+ DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)ptr); -+ DWC_PRINT("Connect Timeout\n"); -+ DWC_ERROR("Device Not Connected/Responding\n"); -+} -+ -+/** -+ * Start the connection timer. An OTG host is required to display a -+ * message if the device does not connect within 10 seconds. The -+ * timer is deleted if a port connect interrupt occurs before the -+ * timer expires. -+ */ -+static void dwc_otg_hcd_start_connect_timer(dwc_otg_hcd_t *hcd) -+{ -+ init_timer(&hcd->conn_timer); -+ hcd->conn_timer.function = dwc_otg_hcd_connect_timeout; -+ hcd->conn_timer.data = 0; -+ hcd->conn_timer.expires = jiffies + (HZ * 10); -+ add_timer(&hcd->conn_timer); -+} -+ -+/** -+ * HCD Callback function for disconnect of the HCD. -+ * -+ * @param p void pointer to the struct usb_hcd -+ */ -+static int32_t dwc_otg_hcd_session_start_cb(void *p) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); -+ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); -+ dwc_otg_hcd_start_connect_timer(dwc_otg_hcd); -+ return 1; -+} -+ -+/** -+ * HCD Callback structure for handling mode switching. -+ */ -+static dwc_otg_cil_callbacks_t hcd_cil_callbacks = { -+ .start = dwc_otg_hcd_start_cb, -+ .stop = dwc_otg_hcd_stop_cb, -+ .disconnect = dwc_otg_hcd_disconnect_cb, -+ .session_start = dwc_otg_hcd_session_start_cb, -+ .p = 0, -+}; -+ -+/** -+ * Reset tasklet function -+ */ -+static void reset_tasklet_func(unsigned long data) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = (dwc_otg_hcd_t *)data; -+ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; -+ hprt0_data_t hprt0; -+ -+ DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n"); -+ -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtrst = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ mdelay(60); -+ -+ hprt0.b.prtrst = 0; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ dwc_otg_hcd->flags.b.port_reset_change = 1; -+} -+ -+static struct tasklet_struct reset_tasklet = { -+ .next = NULL, -+ .state = 0, -+ .count = ATOMIC_INIT(0), -+ .func = reset_tasklet_func, -+ .data = 0, -+}; -+ -+/** -+ * Initializes the HCD. This function allocates memory for and initializes the -+ * static parts of the usb_hcd and dwc_otg_hcd structures. It also registers the -+ * USB bus with the core and calls the hc_driver->start() function. It returns -+ * a negative error on failure. -+ */ -+int dwc_otg_hcd_init(struct platform_device *pdev) -+{ -+ struct usb_hcd *hcd = NULL; -+ dwc_otg_hcd_t *dwc_otg_hcd = NULL; -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); -+ -+ int num_channels; -+ int i; -+ dwc_hc_t *channel; -+ -+ int retval = 0; -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n"); -+ -+ /* Set device flags indicating whether the HCD supports DMA. */ -+ if (otg_dev->core_if->dma_enable) { -+ DWC_PRINT("Using DMA mode\n"); -+ -+ if (otg_dev->core_if->dma_desc_enable) { -+ DWC_PRINT("Device using Descriptor DMA mode\n"); -+ } else { -+ DWC_PRINT("Device using Buffer DMA mode\n"); -+ } -+ } -+ /* -+ * Allocate memory for the base HCD plus the DWC OTG HCD. -+ * Initialize the base HCD. -+ */ -+ -+ hcd = usb_create_hcd(&dwc_otg_hc_driver, &pdev->dev, "gadget"); -+ if (!hcd) { -+ retval = -ENOMEM; -+ goto error1; -+ } -+ -+ hcd->regs = otg_dev->base; -+ hcd->self.otg_port = 1; -+ -+ /* Initialize the DWC OTG HCD. */ -+ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ dwc_otg_hcd->core_if = otg_dev->core_if; -+ otg_dev->hcd = dwc_otg_hcd; -+ -+ /* */ -+ spin_lock_init(&dwc_otg_hcd->lock); -+ -+ /* Register the HCD CIL Callbacks */ -+ dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if, -+ &hcd_cil_callbacks, hcd); -+ -+ /* Initialize the non-periodic schedule. */ -+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive); -+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_active); -+ -+ /* Initialize the periodic schedule. */ -+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive); -+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready); -+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned); -+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued); -+ -+ /* -+ * Create a host channel descriptor for each host channel implemented -+ * in the controller. Initialize the channel descriptor array. -+ */ -+ INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list); -+ num_channels = dwc_otg_hcd->core_if->core_params->host_channels; -+ memset(dwc_otg_hcd->hc_ptr_array, 0, sizeof(dwc_otg_hcd->hc_ptr_array)); -+ for (i = 0; i < num_channels; i++) { -+ channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNEL); -+ if (channel == NULL) { -+ retval = -ENOMEM; -+ DWC_ERROR("%s: host channel allocation failed\n", __func__); -+ goto error2; -+ } -+ memset(channel, 0, sizeof(dwc_hc_t)); -+ channel->hc_num = i; -+ dwc_otg_hcd->hc_ptr_array[i] = channel; -+#ifdef DEBUG -+ init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]); -+#endif -+ DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i, channel); -+ } -+ -+ /* Initialize the Connection timeout timer. */ -+ init_timer(&dwc_otg_hcd->conn_timer); -+ -+ /* Initialize reset tasklet. */ -+ reset_tasklet.data = (unsigned long) dwc_otg_hcd; -+ dwc_otg_hcd->reset_tasklet = &reset_tasklet; -+ -+ /* -+ * Finish generic HCD initialization and start the HCD. This function -+ * allocates the DMA buffer pool, registers the USB bus, requests the -+ * IRQ line, and calls dwc_otg_hcd_start method. -+ */ -+ retval = usb_add_hcd(hcd, otg_dev->irq, IRQF_SHARED); -+ if (retval < 0) { -+ goto error2; -+ } -+ -+ /* -+ * Allocate space for storing data on status transactions. Normally no -+ * data is sent, but this space acts as a bit bucket. This must be -+ * done after usb_add_hcd since that function allocates the DMA buffer -+ * pool. -+ */ -+ if (otg_dev->core_if->dma_enable) { -+ dwc_otg_hcd->status_buf = -+ dma_alloc_coherent(&pdev->dev, -+ DWC_OTG_HCD_STATUS_BUF_SIZE, -+ &dwc_otg_hcd->status_buf_dma, -+ GFP_KERNEL | GFP_DMA); -+ } else { -+ dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD_STATUS_BUF_SIZE, -+ GFP_KERNEL); -+ } -+ if (!dwc_otg_hcd->status_buf) { -+ retval = -ENOMEM; -+ DWC_ERROR("%s: status_buf allocation failed\n", __func__); -+ goto error3; -+ } -+ -+ dwc_otg_hcd->otg_dev = otg_dev; -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Initialized HCD, usbbus=%d\n", -+ hcd->self.busnum); -+ return 0; -+ -+ /* Error conditions */ -+ error3: -+ usb_remove_hcd(hcd); -+ error2: -+ dwc_otg_hcd_free(hcd); -+ usb_put_hcd(hcd); -+ error1: -+ return retval; -+} -+ -+/** -+ * Removes the HCD. -+ * Frees memory and resources associated with the HCD and deregisters the bus. -+ */ -+void dwc_otg_hcd_remove(struct platform_device *pdev) -+{ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); -+ dwc_otg_hcd_t *dwc_otg_hcd; -+ struct usb_hcd *hcd; -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n"); -+ -+ if (!otg_dev) { -+ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__); -+ return; -+ } -+ -+ dwc_otg_hcd = otg_dev->hcd; -+ -+ if (!dwc_otg_hcd) { -+ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__); -+ return; -+ } -+ -+ hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd); -+ -+ if (!hcd) { -+ DWC_DEBUGPL(DBG_ANY, "%s: dwc_otg_hcd_to_hcd(dwc_otg_hcd) NULL!\n", __func__); -+ return; -+ } -+ -+ /* Turn off all interrupts */ -+ dwc_write_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0); -+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gahbcfg, 1, 0); -+ -+ usb_remove_hcd(hcd); -+ dwc_otg_hcd_free(hcd); -+ usb_put_hcd(hcd); -+} -+ -+/* ========================================================================= -+ * Linux HC Driver Functions -+ * ========================================================================= */ -+ -+/** -+ * Initializes dynamic portions of the DWC_otg HCD state. -+ */ -+static void hcd_reinit(dwc_otg_hcd_t *hcd) -+{ -+ struct list_head *item; -+ int num_channels; -+ int i; -+ dwc_hc_t *channel; -+ -+ hcd->flags.d32 = 0; -+ -+ hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active; -+ hcd->non_periodic_channels = 0; -+ hcd->periodic_channels = 0; -+ -+ /* -+ * Put all channels in the free channel list and clean up channel -+ * states. -+ */ -+ item = hcd->free_hc_list.next; -+ while (item != &hcd->free_hc_list) { -+ list_del(item); -+ item = hcd->free_hc_list.next; -+ } -+ num_channels = hcd->core_if->core_params->host_channels; -+ for (i = 0; i < num_channels; i++) { -+ channel = hcd->hc_ptr_array[i]; -+ list_add_tail(&channel->hc_list_entry, &hcd->free_hc_list); -+ dwc_otg_hc_cleanup(hcd->core_if, channel); -+ } -+ -+ /* Initialize the DWC core for host mode operation. */ -+ dwc_otg_core_host_init(hcd->core_if); -+} -+ -+/** Initializes the DWC_otg controller and its root hub and prepares it for host -+ * mode operation. Activates the root port. Returns 0 on success and a negative -+ * error code on failure. */ -+int dwc_otg_hcd_start(struct usb_hcd *hcd) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; -+ struct usb_bus *bus; -+ -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n"); -+ -+ bus = hcd_to_bus(hcd); -+ -+ /* Initialize the bus state. If the core is in Device Mode -+ * HALT the USB bus and return. */ -+ if (dwc_otg_is_device_mode(core_if)) { -+ hcd->state = HC_STATE_RUNNING; -+ return 0; -+ } -+ hcd->state = HC_STATE_RUNNING; -+ -+ /* Initialize and connect root hub if one is not already attached */ -+ if (bus->root_hub) { -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hub\n"); -+ /* Inform the HUB driver to resume. */ -+ usb_hcd_resume_root_hub(hcd); -+ } -+ else { -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Does Not Have Root Hub\n"); -+ } -+ -+ hcd_reinit(dwc_otg_hcd); -+ -+ return 0; -+} -+ -+static void qh_list_free(dwc_otg_hcd_t *hcd, struct list_head *qh_list) -+{ -+ struct list_head *item; -+ dwc_otg_qh_t *qh; -+ -+ if (!qh_list->next) { -+ /* The list hasn't been initialized yet. */ -+ return; -+ } -+ -+ /* Ensure there are no QTDs or URBs left. */ -+ kill_urbs_in_qh_list(hcd, qh_list); -+ -+ for (item = qh_list->next; item != qh_list; item = qh_list->next) { -+ qh = list_entry(item, dwc_otg_qh_t, qh_list_entry); -+ dwc_otg_hcd_qh_remove_and_free(hcd, qh); -+ } -+} -+ -+/** -+ * Halts the DWC_otg host mode operations in a clean manner. USB transfers are -+ * stopped. -+ */ -+void dwc_otg_hcd_stop(struct usb_hcd *hcd) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ hprt0_data_t hprt0 = { .d32=0 }; -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n"); -+ -+ /* Turn off all host-specific interrupts. */ -+ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if); -+ -+ /* -+ * The root hub should be disconnected before this function is called. -+ * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue) -+ * and the QH lists (via ..._hcd_endpoint_disable). -+ */ -+ -+ /* Turn off the vbus power */ -+ DWC_PRINT("PortPower off\n"); -+ hprt0.b.prtpwr = 0; -+ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32); -+} -+ -+/** Returns the current frame number. */ -+int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ hfnum_data_t hfnum; -+ -+ hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if-> -+ host_if->host_global_regs->hfnum); -+ -+#ifdef DEBUG_SOF -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n", hfnum.b.frnum); -+#endif -+ return hfnum.b.frnum; -+} -+ -+/** -+ * Frees secondary storage associated with the dwc_otg_hcd structure contained -+ * in the struct usb_hcd field. -+ */ -+void dwc_otg_hcd_free(struct usb_hcd *hcd) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ int i; -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n"); -+ -+ del_timers(dwc_otg_hcd); -+ -+ /* Free memory for QH/QTD lists */ -+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_inactive); -+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active); -+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive); -+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready); -+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned); -+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued); -+ -+ /* Free memory for the host channels. */ -+ for (i = 0; i < MAX_EPS_CHANNELS; i++) { -+ dwc_hc_t *hc = dwc_otg_hcd->hc_ptr_array[i]; -+ if (hc != NULL) { -+ DWC_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n", i, hc); -+ kfree(hc); -+ } -+ } -+ -+ if (dwc_otg_hcd->core_if->dma_enable) { -+ if (dwc_otg_hcd->status_buf_dma) { -+ dma_free_coherent(hcd->self.controller, -+ DWC_OTG_HCD_STATUS_BUF_SIZE, -+ dwc_otg_hcd->status_buf, -+ dwc_otg_hcd->status_buf_dma); -+ } -+ } else if (dwc_otg_hcd->status_buf != NULL) { -+ kfree(dwc_otg_hcd->status_buf); -+ } -+} -+ -+#ifdef DEBUG -+static void dump_urb_info(struct urb *urb, char* fn_name) -+{ -+ DWC_PRINT("%s, urb %p\n", fn_name, urb); -+ DWC_PRINT(" Device address: %d\n", usb_pipedevice(urb->pipe)); -+ DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe), -+ (usb_pipein(urb->pipe) ? "IN" : "OUT")); -+ DWC_PRINT(" Endpoint type: %s\n", -+ ({char *pipetype; -+ switch (usb_pipetype(urb->pipe)) { -+ case PIPE_CONTROL: pipetype = "CONTROL"; break; -+ case PIPE_BULK: pipetype = "BULK"; break; -+ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break; -+ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; -+ default: pipetype = "UNKNOWN"; break; -+ }; pipetype;})); -+ DWC_PRINT(" Speed: %s\n", -+ ({char *speed; -+ switch (urb->dev->speed) { -+ case USB_SPEED_HIGH: speed = "HIGH"; break; -+ case USB_SPEED_FULL: speed = "FULL"; break; -+ case USB_SPEED_LOW: speed = "LOW"; break; -+ default: speed = "UNKNOWN"; break; -+ }; speed;})); -+ DWC_PRINT(" Max packet size: %d\n", -+ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); -+ DWC_PRINT(" Data buffer length: %d\n", urb->transfer_buffer_length); -+ DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n", -+ urb->transfer_buffer, (void *)urb->transfer_dma); -+ DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n", -+ urb->setup_packet, (void *)urb->setup_dma); -+ DWC_PRINT(" Interval: %d\n", urb->interval); -+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { -+ int i; -+ for (i = 0; i < urb->number_of_packets; i++) { -+ DWC_PRINT(" ISO Desc %d:\n", i); -+ DWC_PRINT(" offset: %d, length %d\n", -+ urb->iso_frame_desc[i].offset, -+ urb->iso_frame_desc[i].length); -+ } -+ } -+} -+ -+static void dump_channel_info(dwc_otg_hcd_t *hcd, -+ dwc_otg_qh_t *qh) -+{ -+ if (qh->channel != NULL) { -+ dwc_hc_t *hc = qh->channel; -+ struct list_head *item; -+ dwc_otg_qh_t *qh_item; -+ int num_channels = hcd->core_if->core_params->host_channels; -+ int i; -+ -+ dwc_otg_hc_regs_t *hc_regs; -+ hcchar_data_t hcchar; -+ hcsplt_data_t hcsplt; -+ hctsiz_data_t hctsiz; -+ uint32_t hcdma; -+ -+ hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num]; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ hcdma = dwc_read_reg32(&hc_regs->hcdma); -+ -+ DWC_PRINT(" Assigned to channel %p:\n", hc); -+ DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32); -+ DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma); -+ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n", -+ hc->dev_addr, hc->ep_num, hc->ep_is_in); -+ DWC_PRINT(" ep_type: %d\n", hc->ep_type); -+ DWC_PRINT(" max_packet: %d\n", hc->max_packet); -+ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start); -+ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started); -+ DWC_PRINT(" halt_status: %d\n", hc->halt_status); -+ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff); -+ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len); -+ DWC_PRINT(" qh: %p\n", hc->qh); -+ DWC_PRINT(" NP inactive sched:\n"); -+ list_for_each(item, &hcd->non_periodic_sched_inactive) { -+ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry); -+ DWC_PRINT(" %p\n", qh_item); -+ } -+ DWC_PRINT(" NP active sched:\n"); -+ list_for_each(item, &hcd->non_periodic_sched_active) { -+ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry); -+ DWC_PRINT(" %p\n", qh_item); -+ } -+ DWC_PRINT(" Channels: \n"); -+ for (i = 0; i < num_channels; i++) { -+ dwc_hc_t *hc = hcd->hc_ptr_array[i]; -+ DWC_PRINT(" %2d: %p\n", i, hc); -+ } -+ } -+} -+#endif -+ -+ -+//OTG host require the DMA addr is DWORD-aligned, -+//patch it if the buffer is not DWORD-aligned -+inline -+void hcd_check_and_patch_dma_addr(struct urb *urb){ -+ -+ if((!urb->transfer_buffer)||!urb->transfer_dma||urb->transfer_dma==0xffffffff) -+ return; -+ -+ if(((u32)urb->transfer_buffer)& 0x3){ -+ /* -+ printk("%s: " -+ "urb(%.8x) " -+ "transfer_buffer=%.8x, " -+ "transfer_dma=%.8x, " -+ "transfer_buffer_length=%d, " -+ "actual_length=%d(%x), " -+ "\n", -+ ((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_OUT)?"OUT":"IN", -+ urb, -+ urb->transfer_buffer, -+ urb->transfer_dma, -+ urb->transfer_buffer_length, -+ urb->actual_length,urb->actual_length -+ ); -+ */ -+ if(!urb->aligned_transfer_buffer||urb->aligned_transfer_buffer_lengthtransfer_buffer_length){ -+ urb->aligned_transfer_buffer_length=urb->transfer_buffer_length; -+ if(urb->aligned_transfer_buffer) { -+ kfree(urb->aligned_transfer_buffer); -+ } -+ urb->aligned_transfer_buffer=kmalloc(urb->aligned_transfer_buffer_length,GFP_KERNEL|GFP_DMA|GFP_ATOMIC); -+ urb->aligned_transfer_dma=dma_map_single(NULL,(void *)(urb->aligned_transfer_buffer),(urb->aligned_transfer_buffer_length),DMA_FROM_DEVICE); -+ if(!urb->aligned_transfer_buffer){ -+ DWC_ERROR("Cannot alloc required buffer!!\n"); -+ BUG(); -+ } -+ //printk(" new allocated aligned_buf=%.8x aligned_buf_len=%d\n", (u32)urb->aligned_transfer_buffer, urb->aligned_transfer_buffer_length); -+ } -+ urb->transfer_dma=urb->aligned_transfer_dma; -+ if((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_OUT) { -+ memcpy(urb->aligned_transfer_buffer,urb->transfer_buffer,urb->transfer_buffer_length); -+ dma_sync_single_for_device(NULL,urb->transfer_dma,urb->transfer_buffer_length,DMA_TO_DEVICE); -+ } -+ } -+} -+ -+ -+ -+/** Starts processing a USB transfer request specified by a USB Request Block -+ * (URB). mem_flags indicates the type of memory allocation to use while -+ * processing this URB. */ -+int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd, -+// struct usb_host_endpoint *ep, -+ struct urb *urb, -+ gfp_t mem_flags -+ ) -+{ -+ int retval = 0; -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ dwc_otg_qtd_t *qtd; -+ -+#ifdef DEBUG -+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { -+ dump_urb_info(urb, "dwc_otg_hcd_urb_enqueue"); -+ } -+#endif -+ if (!dwc_otg_hcd->flags.b.port_connect_status) { -+ /* No longer connected. */ -+ return -ENODEV; -+ } -+ -+ hcd_check_and_patch_dma_addr(urb); -+ qtd = dwc_otg_hcd_qtd_create(urb); -+ if (qtd == NULL) { -+ DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n"); -+ return -ENOMEM; -+ } -+ -+ retval = dwc_otg_hcd_qtd_add(qtd, dwc_otg_hcd); -+ if (retval < 0) { -+ DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. " -+ "Error status %d\n", retval); -+ dwc_otg_hcd_qtd_free(qtd); -+ } -+ -+ return retval; -+} -+ -+/** Aborts/cancels a USB transfer request. Always returns 0 to indicate -+ * success. */ -+int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd, -+ struct urb *urb, int status) -+{ -+ unsigned long flags; -+ dwc_otg_hcd_t *dwc_otg_hcd; -+ dwc_otg_qtd_t *urb_qtd; -+ dwc_otg_qh_t *qh; -+ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n"); -+ -+ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ -+ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); -+ -+ urb_qtd = (dwc_otg_qtd_t *)urb->hcpriv; -+ qh = (dwc_otg_qh_t *)ep->hcpriv; -+ -+#ifdef DEBUG -+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { -+ dump_urb_info(urb, "dwc_otg_hcd_urb_dequeue"); -+ if (urb_qtd == qh->qtd_in_process) { -+ dump_channel_info(dwc_otg_hcd, qh); -+ } -+ } -+#endif -+ -+ if (urb_qtd == qh->qtd_in_process) { -+ /* The QTD is in process (it has been assigned to a channel). */ -+ -+ if (dwc_otg_hcd->flags.b.port_connect_status) { -+ /* -+ * If still connected (i.e. in host mode), halt the -+ * channel so it can be used for other transfers. If -+ * no longer connected, the host registers can't be -+ * written to halt the channel since the core is in -+ * device mode. -+ */ -+ dwc_otg_hc_halt(dwc_otg_hcd->core_if, qh->channel, -+ DWC_OTG_HC_XFER_URB_DEQUEUE); -+ } -+ } -+ -+ /* -+ * Free the QTD and clean up the associated QH. Leave the QH in the -+ * schedule if it has any remaining QTDs. -+ */ -+ dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd, urb_qtd); -+ if (urb_qtd == qh->qtd_in_process) { -+ dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0); -+ qh->channel = NULL; -+ qh->qtd_in_process = NULL; -+ } else if (list_empty(&qh->qtd_list)) { -+ dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh); -+ } -+ -+ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); -+ -+ urb->hcpriv = NULL; -+ -+ /* Higher layer software sets URB status. */ -+ usb_hcd_giveback_urb(hcd, urb, status); -+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { -+ DWC_PRINT("Called usb_hcd_giveback_urb()\n"); -+ DWC_PRINT(" urb->status = %d\n", urb->status); -+ } -+ -+ return 0; -+} -+ -+/** Frees resources in the DWC_otg controller related to a given endpoint. Also -+ * clears state in the HCD related to the endpoint. Any URBs for the endpoint -+ * must already be dequeued. */ -+void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd, -+ struct usb_host_endpoint *ep) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ dwc_otg_qh_t *qh; -+ -+ unsigned long flags; -+ int retry = 0; -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, " -+ "endpoint=%d\n", ep->desc.bEndpointAddress, -+ dwc_ep_addr_to_endpoint(ep->desc.bEndpointAddress)); -+ -+rescan: -+ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); -+ qh = (dwc_otg_qh_t *)(ep->hcpriv); -+ if (!qh) -+ goto done; -+ -+ /** Check that the QTD list is really empty */ -+ if (!list_empty(&qh->qtd_list)) { -+ if (retry++ < 250) { -+ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); -+ schedule_timeout_uninterruptible(1); -+ goto rescan; -+ } -+ -+ DWC_WARN("DWC OTG HCD EP DISABLE:" -+ " QTD List for this endpoint is not empty\n"); -+ } -+ -+ dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh); -+ ep->hcpriv = NULL; -+done: -+ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); -+ -+} -+ -+/** Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if -+ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid -+ * interrupt. -+ * -+ * This function is called by the USB core when an interrupt occurs */ -+irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd) -+{ -+ int retVal = 0; -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ retVal = dwc_otg_hcd_handle_intr(dwc_otg_hcd); -+ if (dwc_otg_hcd->flags.b.port_connect_status_change == 1) -+ usb_hcd_poll_rh_status(hcd); -+ return IRQ_RETVAL(retVal); -+} -+ -+/** Creates Status Change bitmap for the root hub and root port. The bitmap is -+ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1 -+ * is the status change indicator for the single root port. Returns 1 if either -+ * change indicator is 1, otherwise returns 0. */ -+int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, char *buf) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ -+ buf[0] = 0; -+ buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_change || -+ dwc_otg_hcd->flags.b.port_reset_change || -+ dwc_otg_hcd->flags.b.port_enable_change || -+ dwc_otg_hcd->flags.b.port_suspend_change || -+ dwc_otg_hcd->flags.b.port_over_current_change) << 1; -+ -+#ifdef DEBUG -+ if (buf[0]) { -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:" -+ " Root port status changed\n"); -+ DWC_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n", -+ dwc_otg_hcd->flags.b.port_connect_status_change); -+ DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n", -+ dwc_otg_hcd->flags.b.port_reset_change); -+ DWC_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n", -+ dwc_otg_hcd->flags.b.port_enable_change); -+ DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n", -+ dwc_otg_hcd->flags.b.port_suspend_change); -+ DWC_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n", -+ dwc_otg_hcd->flags.b.port_over_current_change); -+ } -+#endif -+ return (buf[0] != 0); -+} -+ -+#ifdef DWC_HS_ELECT_TST -+/* -+ * Quick and dirty hack to implement the HS Electrical Test -+ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature. -+ * -+ * This code was copied from our userspace app "hset". It sends a -+ * Get Device Descriptor control sequence in two parts, first the -+ * Setup packet by itself, followed some time later by the In and -+ * Ack packets. Rather than trying to figure out how to add this -+ * functionality to the normal driver code, we just hijack the -+ * hardware, using these two function to drive the hardware -+ * directly. -+ */ -+ -+dwc_otg_core_global_regs_t *global_regs; -+dwc_otg_host_global_regs_t *hc_global_regs; -+dwc_otg_hc_regs_t *hc_regs; -+uint32_t *data_fifo; -+ -+static void do_setup(void) -+{ -+ gintsts_data_t gintsts; -+ hctsiz_data_t hctsiz; -+ hcchar_data_t hcchar; -+ haint_data_t haint; -+ hcint_data_t hcint; -+ -+ /* Enable HAINTs */ -+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); -+ -+ /* Enable HCINTs */ -+ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* -+ * Send Setup packet (Get Device Descriptor) -+ */ -+ -+ /* Make sure channel is disabled */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chen) { -+ //fprintf(stderr, "Channel already enabled 1, HCCHAR = %08x\n", hcchar.d32); -+ hcchar.b.chdis = 1; -+// hcchar.b.chen = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ //sleep(1); -+ mdelay(1000); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //if (hcchar.b.chen) { -+ // fprintf(stderr, "** Channel _still_ enabled 1, HCCHAR = %08x **\n", hcchar.d32); -+ //} -+ } -+ -+ /* Set HCTSIZ */ -+ hctsiz.d32 = 0; -+ hctsiz.b.xfersize = 8; -+ hctsiz.b.pktcnt = 1; -+ hctsiz.b.pid = DWC_OTG_HC_PID_SETUP; -+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); -+ -+ /* Set HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; -+ hcchar.b.epdir = 0; -+ hcchar.b.epnum = 0; -+ hcchar.b.mps = 8; -+ hcchar.b.chen = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ -+ /* Fill FIFO with Setup data for Get Device Descriptor */ -+ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); -+ dwc_write_reg32(data_fifo++, 0x01000680); -+ dwc_write_reg32(data_fifo++, 0x00080000); -+ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Wait for host channel interrupt */ -+ do { -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ } while (gintsts.b.hcintr == 0); -+ -+ //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Disable HCINTs */ -+ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); -+ -+ /* Disable HAINTs */ -+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+} -+ -+static void do_in_ack(void) -+{ -+ gintsts_data_t gintsts; -+ hctsiz_data_t hctsiz; -+ hcchar_data_t hcchar; -+ haint_data_t haint; -+ hcint_data_t hcint; -+ host_grxsts_data_t grxsts; -+ -+ /* Enable HAINTs */ -+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); -+ -+ /* Enable HCINTs */ -+ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* -+ * Receive Control In packet -+ */ -+ -+ /* Make sure channel is disabled */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chen) { -+ //fprintf(stderr, "Channel already enabled 2, HCCHAR = %08x\n", hcchar.d32); -+ hcchar.b.chdis = 1; -+ hcchar.b.chen = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ //sleep(1); -+ mdelay(1000); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //if (hcchar.b.chen) { -+ // fprintf(stderr, "** Channel _still_ enabled 2, HCCHAR = %08x **\n", hcchar.d32); -+ //} -+ } -+ -+ /* Set HCTSIZ */ -+ hctsiz.d32 = 0; -+ hctsiz.b.xfersize = 8; -+ hctsiz.b.pktcnt = 1; -+ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; -+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); -+ -+ /* Set HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; -+ hcchar.b.epdir = 1; -+ hcchar.b.epnum = 0; -+ hcchar.b.mps = 8; -+ hcchar.b.chen = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Wait for receive status queue interrupt */ -+ do { -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ } while (gintsts.b.rxstsqlvl == 0); -+ -+ //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Read RXSTS */ -+ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); -+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); -+ -+ /* Clear RXSTSQLVL in GINTSTS */ -+ gintsts.d32 = 0; -+ gintsts.b.rxstsqlvl = 1; -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ switch (grxsts.b.pktsts) { -+ case DWC_GRXSTS_PKTSTS_IN: -+ /* Read the data into the host buffer */ -+ if (grxsts.b.bcnt > 0) { -+ int i; -+ int word_count = (grxsts.b.bcnt + 3) / 4; -+ -+ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); -+ -+ for (i = 0; i < word_count; i++) { -+ (void)dwc_read_reg32(data_fifo++); -+ } -+ } -+ -+ //fprintf(stderr, "Received %u bytes\n", (unsigned)grxsts.b.bcnt); -+ break; -+ -+ default: -+ //fprintf(stderr, "** Unexpected GRXSTS packet status 1 **\n"); -+ break; -+ } -+ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Wait for receive status queue interrupt */ -+ do { -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ } while (gintsts.b.rxstsqlvl == 0); -+ -+ //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Read RXSTS */ -+ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); -+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); -+ -+ /* Clear RXSTSQLVL in GINTSTS */ -+ gintsts.d32 = 0; -+ gintsts.b.rxstsqlvl = 1; -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ switch (grxsts.b.pktsts) { -+ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: -+ break; -+ -+ default: -+ //fprintf(stderr, "** Unexpected GRXSTS packet status 2 **\n"); -+ break; -+ } -+ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Wait for host channel interrupt */ -+ do { -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ } while (gintsts.b.hcintr == 0); -+ -+ //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+// usleep(100000); -+// mdelay(100); -+ mdelay(1); -+ -+ /* -+ * Send handshake packet -+ */ -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* Make sure channel is disabled */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chen) { -+ //fprintf(stderr, "Channel already enabled 3, HCCHAR = %08x\n", hcchar.d32); -+ hcchar.b.chdis = 1; -+ hcchar.b.chen = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ //sleep(1); -+ mdelay(1000); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //if (hcchar.b.chen) { -+ // fprintf(stderr, "** Channel _still_ enabled 3, HCCHAR = %08x **\n", hcchar.d32); -+ //} -+ } -+ -+ /* Set HCTSIZ */ -+ hctsiz.d32 = 0; -+ hctsiz.b.xfersize = 0; -+ hctsiz.b.pktcnt = 1; -+ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; -+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); -+ -+ /* Set HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; -+ hcchar.b.epdir = 0; -+ hcchar.b.epnum = 0; -+ hcchar.b.mps = 8; -+ hcchar.b.chen = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Wait for host channel interrupt */ -+ do { -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ } while (gintsts.b.hcintr == 0); -+ -+ //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Disable HCINTs */ -+ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); -+ -+ /* Disable HAINTs */ -+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+} -+#endif /* DWC_HS_ELECT_TST */ -+ -+/** Handles hub class-specific requests. */ -+int dwc_otg_hcd_hub_control(struct usb_hcd *hcd, -+ u16 typeReq, -+ u16 wValue, -+ u16 wIndex, -+ char *buf, -+ u16 wLength) -+{ -+ int retval = 0; -+ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ dwc_otg_core_if_t *core_if = hcd_to_dwc_otg_hcd(hcd)->core_if; -+ struct usb_hub_descriptor *desc; -+ hprt0_data_t hprt0 = {.d32 = 0}; -+ -+ uint32_t port_status; -+ -+ switch (typeReq) { -+ case ClearHubFeature: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearHubFeature 0x%x\n", wValue); -+ switch (wValue) { -+ case C_HUB_LOCAL_POWER: -+ case C_HUB_OVER_CURRENT: -+ /* Nothing required here */ -+ break; -+ default: -+ retval = -EINVAL; -+ DWC_ERROR("DWC OTG HCD - " -+ "ClearHubFeature request %xh unknown\n", wValue); -+ } -+ break; -+ case ClearPortFeature: -+ if (!wIndex || wIndex > 1) -+ goto error; -+ -+ switch (wValue) { -+ case USB_PORT_FEAT_ENABLE: -+ DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_ENABLE\n"); -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtena = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ break; -+ case USB_PORT_FEAT_SUSPEND: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n"); -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtres = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ /* Clear Resume bit */ -+ mdelay(100); -+ hprt0.b.prtres = 0; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ break; -+ case USB_PORT_FEAT_POWER: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_POWER\n"); -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtpwr = 0; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ break; -+ case USB_PORT_FEAT_INDICATOR: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_INDICATOR\n"); -+ /* Port inidicator not supported */ -+ break; -+ case USB_PORT_FEAT_C_CONNECTION: -+ /* Clears drivers internal connect status change -+ * flag */ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n"); -+ dwc_otg_hcd->flags.b.port_connect_status_change = 0; -+ break; -+ case USB_PORT_FEAT_C_RESET: -+ /* Clears the driver's internal Port Reset Change -+ * flag */ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_C_RESET\n"); -+ dwc_otg_hcd->flags.b.port_reset_change = 0; -+ break; -+ case USB_PORT_FEAT_C_ENABLE: -+ /* Clears the driver's internal Port -+ * Enable/Disable Change flag */ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n"); -+ dwc_otg_hcd->flags.b.port_enable_change = 0; -+ break; -+ case USB_PORT_FEAT_C_SUSPEND: -+ /* Clears the driver's internal Port Suspend -+ * Change flag, which is set when resume signaling on -+ * the host port is complete */ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n"); -+ dwc_otg_hcd->flags.b.port_suspend_change = 0; -+ break; -+ case USB_PORT_FEAT_C_OVER_CURRENT: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n"); -+ dwc_otg_hcd->flags.b.port_over_current_change = 0; -+ break; -+ default: -+ retval = -EINVAL; -+ DWC_ERROR("DWC OTG HCD - " -+ "ClearPortFeature request %xh " -+ "unknown or unsupported\n", wValue); -+ } -+ break; -+ case GetHubDescriptor: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "GetHubDescriptor\n"); -+ desc = (struct usb_hub_descriptor *)buf; -+ desc->bDescLength = 9; -+ desc->bDescriptorType = 0x29; -+ desc->bNbrPorts = 1; -+ desc->wHubCharacteristics = 0x08; -+ desc->bPwrOn2PwrGood = 1; -+ desc->bHubContrCurrent = 0; -+ desc->u.hs.DeviceRemovable[0] = 0; -+ desc->u.hs.DeviceRemovable[1] = 0xff; -+ break; -+ case GetHubStatus: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "GetHubStatus\n"); -+ memset(buf, 0, 4); -+ break; -+ case GetPortStatus: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "GetPortStatus\n"); -+ -+ if (!wIndex || wIndex > 1) -+ goto error; -+ -+ port_status = 0; -+ -+ if (dwc_otg_hcd->flags.b.port_connect_status_change) -+ port_status |= (1 << USB_PORT_FEAT_C_CONNECTION); -+ -+ if (dwc_otg_hcd->flags.b.port_enable_change) -+ port_status |= (1 << USB_PORT_FEAT_C_ENABLE); -+ -+ if (dwc_otg_hcd->flags.b.port_suspend_change) -+ port_status |= (1 << USB_PORT_FEAT_C_SUSPEND); -+ -+ if (dwc_otg_hcd->flags.b.port_reset_change) -+ port_status |= (1 << USB_PORT_FEAT_C_RESET); -+ -+ if (dwc_otg_hcd->flags.b.port_over_current_change) { -+ DWC_ERROR("Device Not Supported\n"); -+ port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT); -+ } -+ -+ if (!dwc_otg_hcd->flags.b.port_connect_status) { -+ /* -+ * The port is disconnected, which means the core is -+ * either in device mode or it soon will be. Just -+ * return 0's for the remainder of the port status -+ * since the port register can't be read if the core -+ * is in device mode. -+ */ -+ *((__le32 *) buf) = cpu_to_le32(port_status); -+ break; -+ } -+ -+ hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0); -+ DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32); -+ -+ if (hprt0.b.prtconnsts) -+ port_status |= (1 << USB_PORT_FEAT_CONNECTION); -+ -+ if (hprt0.b.prtena) -+ port_status |= (1 << USB_PORT_FEAT_ENABLE); -+ -+ if (hprt0.b.prtsusp) -+ port_status |= (1 << USB_PORT_FEAT_SUSPEND); -+ -+ if (hprt0.b.prtovrcurract) -+ port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT); -+ -+ if (hprt0.b.prtrst) -+ port_status |= (1 << USB_PORT_FEAT_RESET); -+ -+ if (hprt0.b.prtpwr) -+ port_status |= (1 << USB_PORT_FEAT_POWER); -+ -+ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) -+ port_status |= (USB_PORT_STAT_HIGH_SPEED); -+ else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) -+ port_status |= (USB_PORT_STAT_LOW_SPEED); -+ -+ if (hprt0.b.prttstctl) -+ port_status |= (1 << USB_PORT_FEAT_TEST); -+ -+ /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ -+ -+ *((__le32 *) buf) = cpu_to_le32(port_status); -+ -+ break; -+ case SetHubFeature: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "SetHubFeature\n"); -+ /* No HUB features supported */ -+ break; -+ case SetPortFeature: -+ if (wValue != USB_PORT_FEAT_TEST && (!wIndex || wIndex > 1)) -+ goto error; -+ -+ if (!dwc_otg_hcd->flags.b.port_connect_status) { -+ /* -+ * The port is disconnected, which means the core is -+ * either in device mode or it soon will be. Just -+ * return without doing anything since the port -+ * register can't be written if the core is in device -+ * mode. -+ */ -+ break; -+ } -+ -+ switch (wValue) { -+ case USB_PORT_FEAT_SUSPEND: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "SetPortFeature - USB_PORT_FEAT_SUSPEND\n"); -+ if (hcd->self.otg_port == wIndex && -+ hcd->self.b_hnp_enable) { -+ gotgctl_data_t gotgctl = {.d32=0}; -+ gotgctl.b.hstsethnpen = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gotgctl, -+ 0, gotgctl.d32); -+ core_if->op_state = A_SUSPEND; -+ } -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtsusp = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ //DWC_PRINT("SUSPEND: HPRT0=%0x\n", hprt0.d32); -+ /* Suspend the Phy Clock */ -+ { -+ pcgcctl_data_t pcgcctl = {.d32=0}; -+ pcgcctl.b.stoppclk = 1; -+ dwc_write_reg32(core_if->pcgcctl, pcgcctl.d32); -+ } -+ -+ /* For HNP the bus must be suspended for at least 200ms. */ -+ if (hcd->self.b_hnp_enable) { -+ mdelay(200); -+ //DWC_PRINT("SUSPEND: wait complete! (%d)\n", _hcd->state); -+ } -+ break; -+ case USB_PORT_FEAT_POWER: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "SetPortFeature - USB_PORT_FEAT_POWER\n"); -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtpwr = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ break; -+ case USB_PORT_FEAT_RESET: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "SetPortFeature - USB_PORT_FEAT_RESET\n"); -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ /* When B-Host the Port reset bit is set in -+ * the Start HCD Callback function, so that -+ * the reset is started within 1ms of the HNP -+ * success interrupt. */ -+ if (!hcd->self.is_b_host) { -+ hprt0.b.prtrst = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ } -+ /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */ -+ MDELAY(60); -+ hprt0.b.prtrst = 0; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ break; -+ -+#ifdef DWC_HS_ELECT_TST -+ case USB_PORT_FEAT_TEST: -+ { -+ uint32_t t; -+ gintmsk_data_t gintmsk; -+ -+ t = (wIndex >> 8); /* MSB wIndex USB */ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "SetPortFeature - USB_PORT_FEAT_TEST %d\n", t); -+ warn("USB_PORT_FEAT_TEST %d\n", t); -+ if (t < 6) { -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prttstctl = t; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ } else { -+ /* Setup global vars with reg addresses (quick and -+ * dirty hack, should be cleaned up) -+ */ -+ global_regs = core_if->core_global_regs; -+ hc_global_regs = core_if->host_if->host_global_regs; -+ hc_regs = (dwc_otg_hc_regs_t *)((char *)global_regs + 0x500); -+ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); -+ -+ if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */ -+ /* Save current interrupt mask */ -+ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); -+ -+ /* Disable all interrupts while we muck with -+ * the hardware directly -+ */ -+ dwc_write_reg32(&global_regs->gintmsk, 0); -+ -+ /* 15 second delay per the test spec */ -+ mdelay(15000); -+ -+ /* Drive suspend on the root port */ -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtsusp = 1; -+ hprt0.b.prtres = 0; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ -+ /* 15 second delay per the test spec */ -+ mdelay(15000); -+ -+ /* Drive resume on the root port */ -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtsusp = 0; -+ hprt0.b.prtres = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ mdelay(100); -+ -+ /* Clear the resume bit */ -+ hprt0.b.prtres = 0; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ -+ /* Restore interrupts */ -+ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); -+ } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */ -+ /* Save current interrupt mask */ -+ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); -+ -+ /* Disable all interrupts while we muck with -+ * the hardware directly -+ */ -+ dwc_write_reg32(&global_regs->gintmsk, 0); -+ -+ /* 15 second delay per the test spec */ -+ mdelay(15000); -+ -+ /* Send the Setup packet */ -+ do_setup(); -+ -+ /* 15 second delay so nothing else happens for awhile */ -+ mdelay(15000); -+ -+ /* Restore interrupts */ -+ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); -+ } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */ -+ /* Save current interrupt mask */ -+ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); -+ -+ /* Disable all interrupts while we muck with -+ * the hardware directly -+ */ -+ dwc_write_reg32(&global_regs->gintmsk, 0); -+ -+ /* Send the Setup packet */ -+ do_setup(); -+ -+ /* 15 second delay so nothing else happens for awhile */ -+ mdelay(15000); -+ -+ /* Send the In and Ack packets */ -+ do_in_ack(); -+ -+ /* 15 second delay so nothing else happens for awhile */ -+ mdelay(15000); -+ -+ /* Restore interrupts */ -+ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); -+ } -+ } -+ break; -+ } -+#endif /* DWC_HS_ELECT_TST */ -+ -+ case USB_PORT_FEAT_INDICATOR: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "SetPortFeature - USB_PORT_FEAT_INDICATOR\n"); -+ /* Not supported */ -+ break; -+ default: -+ retval = -EINVAL; -+ DWC_ERROR("DWC OTG HCD - " -+ "SetPortFeature request %xh " -+ "unknown or unsupported\n", wValue); -+ break; -+ } -+ break; -+ default: -+ error: -+ retval = -EINVAL; -+ DWC_WARN("DWC OTG HCD - " -+ "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n", -+ typeReq, wIndex, wValue); -+ break; -+ } -+ -+ return retval; -+} -+ -+/** -+ * Assigns transactions from a QTD to a free host channel and initializes the -+ * host channel to perform the transactions. The host channel is removed from -+ * the free list. -+ * -+ * @param hcd The HCD state structure. -+ * @param qh Transactions from the first QTD for this QH are selected and -+ * assigned to a free host channel. -+ */ -+static void assign_and_init_hc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ dwc_hc_t *hc; -+ dwc_otg_qtd_t *qtd; -+ struct urb *urb; -+ -+ DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, hcd, qh); -+ -+ hc = list_entry(hcd->free_hc_list.next, dwc_hc_t, hc_list_entry); -+ -+ /* Remove the host channel from the free list. */ -+ list_del_init(&hc->hc_list_entry); -+ -+ qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); -+ urb = qtd->urb; -+ qh->channel = hc; -+ qh->qtd_in_process = qtd; -+ -+ /* -+ * Use usb_pipedevice to determine device address. This address is -+ * 0 before the SET_ADDRESS command and the correct address afterward. -+ */ -+ hc->dev_addr = usb_pipedevice(urb->pipe); -+ hc->ep_num = usb_pipeendpoint(urb->pipe); -+ -+ if (urb->dev->speed == USB_SPEED_LOW) { -+ hc->speed = DWC_OTG_EP_SPEED_LOW; -+ } else if (urb->dev->speed == USB_SPEED_FULL) { -+ hc->speed = DWC_OTG_EP_SPEED_FULL; -+ } else { -+ hc->speed = DWC_OTG_EP_SPEED_HIGH; -+ } -+ -+ hc->max_packet = dwc_max_packet(qh->maxp); -+ -+ hc->xfer_started = 0; -+ hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS; -+ hc->error_state = (qtd->error_count > 0); -+ hc->halt_on_queue = 0; -+ hc->halt_pending = 0; -+ hc->requests = 0; -+ -+ /* -+ * The following values may be modified in the transfer type section -+ * below. The xfer_len value may be reduced when the transfer is -+ * started to accommodate the max widths of the XferSize and PktCnt -+ * fields in the HCTSIZn register. -+ */ -+ hc->do_ping = qh->ping_state; -+ hc->ep_is_in = (usb_pipein(urb->pipe) != 0); -+ hc->data_pid_start = qh->data_toggle; -+ hc->multi_count = 1; -+ -+ if (hcd->core_if->dma_enable) { -+ hc->xfer_buff = (uint8_t *)urb->transfer_dma + urb->actual_length; -+ } else { -+ hc->xfer_buff = (uint8_t *)urb->transfer_buffer + urb->actual_length; -+ } -+ hc->xfer_len = urb->transfer_buffer_length - urb->actual_length; -+ hc->xfer_count = 0; -+ -+ /* -+ * Set the split attributes -+ */ -+ hc->do_split = 0; -+ if (qh->do_split) { -+ hc->do_split = 1; -+ hc->xact_pos = qtd->isoc_split_pos; -+ hc->complete_split = qtd->complete_split; -+ hc->hub_addr = urb->dev->tt->hub->devnum; -+ hc->port_addr = urb->dev->ttport; -+ } -+ -+ switch (usb_pipetype(urb->pipe)) { -+ case PIPE_CONTROL: -+ hc->ep_type = DWC_OTG_EP_TYPE_CONTROL; -+ switch (qtd->control_phase) { -+ case DWC_OTG_CONTROL_SETUP: -+ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction\n"); -+ hc->do_ping = 0; -+ hc->ep_is_in = 0; -+ hc->data_pid_start = DWC_OTG_HC_PID_SETUP; -+ if (hcd->core_if->dma_enable) { -+ hc->xfer_buff = (uint8_t *)urb->setup_dma; -+ } else { -+ hc->xfer_buff = (uint8_t *)urb->setup_packet; -+ } -+ hc->xfer_len = 8; -+ break; -+ case DWC_OTG_CONTROL_DATA: -+ DWC_DEBUGPL(DBG_HCDV, " Control data transaction\n"); -+ hc->data_pid_start = qtd->data_toggle; -+ break; -+ case DWC_OTG_CONTROL_STATUS: -+ /* -+ * Direction is opposite of data direction or IN if no -+ * data. -+ */ -+ DWC_DEBUGPL(DBG_HCDV, " Control status transaction\n"); -+ if (urb->transfer_buffer_length == 0) { -+ hc->ep_is_in = 1; -+ } else { -+ hc->ep_is_in = (usb_pipein(urb->pipe) != USB_DIR_IN); -+ } -+ if (hc->ep_is_in) { -+ hc->do_ping = 0; -+ } -+ hc->data_pid_start = DWC_OTG_HC_PID_DATA1; -+ hc->xfer_len = 0; -+ if (hcd->core_if->dma_enable) { -+ hc->xfer_buff = (uint8_t *)hcd->status_buf_dma; -+ } else { -+ hc->xfer_buff = (uint8_t *)hcd->status_buf; -+ } -+ break; -+ } -+ break; -+ case PIPE_BULK: -+ hc->ep_type = DWC_OTG_EP_TYPE_BULK; -+ break; -+ case PIPE_INTERRUPT: -+ hc->ep_type = DWC_OTG_EP_TYPE_INTR; -+ break; -+ case PIPE_ISOCHRONOUS: -+ { -+ struct usb_iso_packet_descriptor *frame_desc; -+ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index]; -+ hc->ep_type = DWC_OTG_EP_TYPE_ISOC; -+ if (hcd->core_if->dma_enable) { -+ hc->xfer_buff = (uint8_t *)urb->transfer_dma; -+ } else { -+ hc->xfer_buff = (uint8_t *)urb->transfer_buffer; -+ } -+ hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset; -+ hc->xfer_len = frame_desc->length - qtd->isoc_split_offset; -+ -+ if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) { -+ if (hc->xfer_len <= 188) { -+ hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL; -+ } -+ else { -+ hc->xact_pos = DWC_HCSPLIT_XACTPOS_BEGIN; -+ } -+ } -+ } -+ break; -+ } -+ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ /* -+ * This value may be modified when the transfer is started to -+ * reflect the actual transfer length. -+ */ -+ hc->multi_count = dwc_hb_mult(qh->maxp); -+ } -+ -+ dwc_otg_hc_init(hcd->core_if, hc); -+ hc->qh = qh; -+} -+ -+/** -+ * This function selects transactions from the HCD transfer schedule and -+ * assigns them to available host channels. It is called from HCD interrupt -+ * handler functions. -+ * -+ * @param hcd The HCD state structure. -+ * -+ * @return The types of new transactions that were assigned to host channels. -+ */ -+dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd) -+{ -+ struct list_head *qh_ptr; -+ dwc_otg_qh_t *qh; -+ int num_channels; -+ dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE; -+ -+#ifdef DEBUG_SOF -+ DWC_DEBUGPL(DBG_HCD, " Select Transactions\n"); -+#endif -+ -+ /* Process entries in the periodic ready list. */ -+ qh_ptr = hcd->periodic_sched_ready.next; -+ while (qh_ptr != &hcd->periodic_sched_ready && -+ !list_empty(&hcd->free_hc_list)) { -+ -+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); -+ assign_and_init_hc(hcd, qh); -+ -+ /* -+ * Move the QH from the periodic ready schedule to the -+ * periodic assigned schedule. -+ */ -+ qh_ptr = qh_ptr->next; -+ list_move(&qh->qh_list_entry, &hcd->periodic_sched_assigned); -+ -+ ret_val = DWC_OTG_TRANSACTION_PERIODIC; -+ } -+ -+ /* -+ * Process entries in the inactive portion of the non-periodic -+ * schedule. Some free host channels may not be used if they are -+ * reserved for periodic transfers. -+ */ -+ qh_ptr = hcd->non_periodic_sched_inactive.next; -+ num_channels = hcd->core_if->core_params->host_channels; -+ while (qh_ptr != &hcd->non_periodic_sched_inactive && -+ (hcd->non_periodic_channels < -+ num_channels - hcd->periodic_channels) && -+ !list_empty(&hcd->free_hc_list)) { -+ -+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); -+ assign_and_init_hc(hcd, qh); -+ -+ /* -+ * Move the QH from the non-periodic inactive schedule to the -+ * non-periodic active schedule. -+ */ -+ qh_ptr = qh_ptr->next; -+ list_move(&qh->qh_list_entry, &hcd->non_periodic_sched_active); -+ -+ if (ret_val == DWC_OTG_TRANSACTION_NONE) { -+ ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC; -+ } else { -+ ret_val = DWC_OTG_TRANSACTION_ALL; -+ } -+ -+ hcd->non_periodic_channels++; -+ } -+ -+ return ret_val; -+} -+ -+/** -+ * Attempts to queue a single transaction request for a host channel -+ * associated with either a periodic or non-periodic transfer. This function -+ * assumes that there is space available in the appropriate request queue. For -+ * an OUT transfer or SETUP transaction in Slave mode, it checks whether space -+ * is available in the appropriate Tx FIFO. -+ * -+ * @param hcd The HCD state structure. -+ * @param hc Host channel descriptor associated with either a periodic or -+ * non-periodic transfer. -+ * @param fifo_dwords_avail Number of DWORDs available in the periodic Tx -+ * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic -+ * transfers. -+ * -+ * @return 1 if a request is queued and more requests may be needed to -+ * complete the transfer, 0 if no more requests are required for this -+ * transfer, -1 if there is insufficient space in the Tx FIFO. -+ */ -+static int queue_transaction(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ uint16_t fifo_dwords_avail) -+{ -+ int retval; -+ -+ if (hcd->core_if->dma_enable) { -+ if (!hc->xfer_started) { -+ dwc_otg_hc_start_transfer(hcd->core_if, hc); -+ hc->qh->ping_state = 0; -+ } -+ retval = 0; -+ } else if (hc->halt_pending) { -+ /* Don't queue a request if the channel has been halted. */ -+ retval = 0; -+ } else if (hc->halt_on_queue) { -+ dwc_otg_hc_halt(hcd->core_if, hc, hc->halt_status); -+ retval = 0; -+ } else if (hc->do_ping) { -+ if (!hc->xfer_started) { -+ dwc_otg_hc_start_transfer(hcd->core_if, hc); -+ } -+ retval = 0; -+ } else if (!hc->ep_is_in || -+ hc->data_pid_start == DWC_OTG_HC_PID_SETUP) { -+ if ((fifo_dwords_avail * 4) >= hc->max_packet) { -+ if (!hc->xfer_started) { -+ dwc_otg_hc_start_transfer(hcd->core_if, hc); -+ retval = 1; -+ } else { -+ retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc); -+ } -+ } else { -+ retval = -1; -+ } -+ } else { -+ if (!hc->xfer_started) { -+ dwc_otg_hc_start_transfer(hcd->core_if, hc); -+ retval = 1; -+ } else { -+ retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc); -+ } -+ } -+ -+ return retval; -+} -+ -+/** -+ * Processes active non-periodic channels and queues transactions for these -+ * channels to the DWC_otg controller. After queueing transactions, the NP Tx -+ * FIFO Empty interrupt is enabled if there are more transactions to queue as -+ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx -+ * FIFO Empty interrupt is disabled. -+ */ -+static void process_non_periodic_channels(dwc_otg_hcd_t *hcd) -+{ -+ gnptxsts_data_t tx_status; -+ struct list_head *orig_qh_ptr; -+ dwc_otg_qh_t *qh; -+ int status; -+ int no_queue_space = 0; -+ int no_fifo_space = 0; -+ int more_to_do = 0; -+ -+ dwc_otg_core_global_regs_t *global_regs = hcd->core_if->core_global_regs; -+ -+ DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n"); -+#ifdef DEBUG -+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); -+ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (before queue): %d\n", -+ tx_status.b.nptxqspcavail); -+ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n", -+ tx_status.b.nptxfspcavail); -+#endif -+ /* -+ * Keep track of the starting point. Skip over the start-of-list -+ * entry. -+ */ -+ if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) { -+ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; -+ } -+ orig_qh_ptr = hcd->non_periodic_qh_ptr; -+ -+ /* -+ * Process once through the active list or until no more space is -+ * available in the request queue or the Tx FIFO. -+ */ -+ do { -+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); -+ if (!hcd->core_if->dma_enable && tx_status.b.nptxqspcavail == 0) { -+ no_queue_space = 1; -+ break; -+ } -+ -+ qh = list_entry(hcd->non_periodic_qh_ptr, dwc_otg_qh_t, qh_list_entry); -+ status = queue_transaction(hcd, qh->channel, tx_status.b.nptxfspcavail); -+ -+ if (status > 0) { -+ more_to_do = 1; -+ } else if (status < 0) { -+ no_fifo_space = 1; -+ break; -+ } -+ -+ /* Advance to next QH, skipping start-of-list entry. */ -+ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; -+ if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) { -+ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; -+ } -+ -+ } while (hcd->non_periodic_qh_ptr != orig_qh_ptr); -+ -+ if (!hcd->core_if->dma_enable) { -+ gintmsk_data_t intr_mask = {.d32 = 0}; -+ intr_mask.b.nptxfempty = 1; -+ -+#ifdef DEBUG -+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); -+ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (after queue): %d\n", -+ tx_status.b.nptxqspcavail); -+ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (after queue): %d\n", -+ tx_status.b.nptxfspcavail); -+#endif -+ if (more_to_do || no_queue_space || no_fifo_space) { -+ /* -+ * May need to queue more transactions as the request -+ * queue or Tx FIFO empties. Enable the non-periodic -+ * Tx FIFO empty interrupt. (Always use the half-empty -+ * level to ensure that new requests are loaded as -+ * soon as possible.) -+ */ -+ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32); -+ } else { -+ /* -+ * Disable the Tx FIFO empty interrupt since there are -+ * no more transactions that need to be queued right -+ * now. This function is called from interrupt -+ * handlers to queue more transactions as transfer -+ * states change. -+ */ -+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); -+ } -+ } -+} -+ -+/** -+ * Processes periodic channels for the next frame and queues transactions for -+ * these channels to the DWC_otg controller. After queueing transactions, the -+ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions -+ * to queue as Periodic Tx FIFO or request queue space becomes available. -+ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled. -+ */ -+static void process_periodic_channels(dwc_otg_hcd_t *hcd) -+{ -+ hptxsts_data_t tx_status; -+ struct list_head *qh_ptr; -+ dwc_otg_qh_t *qh; -+ int status; -+ int no_queue_space = 0; -+ int no_fifo_space = 0; -+ -+ dwc_otg_host_global_regs_t *host_regs; -+ host_regs = hcd->core_if->host_if->host_global_regs; -+ -+ DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n"); -+#ifdef DEBUG -+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); -+ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (before queue): %d\n", -+ tx_status.b.ptxqspcavail); -+ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (before queue): %d\n", -+ tx_status.b.ptxfspcavail); -+#endif -+ -+ qh_ptr = hcd->periodic_sched_assigned.next; -+ while (qh_ptr != &hcd->periodic_sched_assigned) { -+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); -+ if (tx_status.b.ptxqspcavail == 0) { -+ no_queue_space = 1; -+ break; -+ } -+ -+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); -+ -+ /* -+ * Set a flag if we're queuing high-bandwidth in slave mode. -+ * The flag prevents any halts to get into the request queue in -+ * the middle of multiple high-bandwidth packets getting queued. -+ */ -+ if (!hcd->core_if->dma_enable && -+ qh->channel->multi_count > 1) -+ { -+ hcd->core_if->queuing_high_bandwidth = 1; -+ } -+ -+ status = queue_transaction(hcd, qh->channel, tx_status.b.ptxfspcavail); -+ if (status < 0) { -+ no_fifo_space = 1; -+ break; -+ } -+ -+ /* -+ * In Slave mode, stay on the current transfer until there is -+ * nothing more to do or the high-bandwidth request count is -+ * reached. In DMA mode, only need to queue one request. The -+ * controller automatically handles multiple packets for -+ * high-bandwidth transfers. -+ */ -+ if (hcd->core_if->dma_enable || status == 0 || -+ qh->channel->requests == qh->channel->multi_count) { -+ qh_ptr = qh_ptr->next; -+ /* -+ * Move the QH from the periodic assigned schedule to -+ * the periodic queued schedule. -+ */ -+ list_move(&qh->qh_list_entry, &hcd->periodic_sched_queued); -+ -+ /* done queuing high bandwidth */ -+ hcd->core_if->queuing_high_bandwidth = 0; -+ } -+ } -+ -+ if (!hcd->core_if->dma_enable) { -+ dwc_otg_core_global_regs_t *global_regs; -+ gintmsk_data_t intr_mask = {.d32 = 0}; -+ -+ global_regs = hcd->core_if->core_global_regs; -+ intr_mask.b.ptxfempty = 1; -+#ifdef DEBUG -+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); -+ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (after queue): %d\n", -+ tx_status.b.ptxqspcavail); -+ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (after queue): %d\n", -+ tx_status.b.ptxfspcavail); -+#endif -+ if (!list_empty(&hcd->periodic_sched_assigned) || -+ no_queue_space || no_fifo_space) { -+ /* -+ * May need to queue more transactions as the request -+ * queue or Tx FIFO empties. Enable the periodic Tx -+ * FIFO empty interrupt. (Always use the half-empty -+ * level to ensure that new requests are loaded as -+ * soon as possible.) -+ */ -+ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32); -+ } else { -+ /* -+ * Disable the Tx FIFO empty interrupt since there are -+ * no more transactions that need to be queued right -+ * now. This function is called from interrupt -+ * handlers to queue more transactions as transfer -+ * states change. -+ */ -+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); -+ } -+ } -+} -+ -+/** -+ * This function processes the currently active host channels and queues -+ * transactions for these channels to the DWC_otg controller. It is called -+ * from HCD interrupt handler functions. -+ * -+ * @param hcd The HCD state structure. -+ * @param tr_type The type(s) of transactions to queue (non-periodic, -+ * periodic, or both). -+ */ -+void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd, -+ dwc_otg_transaction_type_e tr_type) -+{ -+#ifdef DEBUG_SOF -+ DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n"); -+#endif -+ /* Process host channels associated with periodic transfers. */ -+ if ((tr_type == DWC_OTG_TRANSACTION_PERIODIC || -+ tr_type == DWC_OTG_TRANSACTION_ALL) && -+ !list_empty(&hcd->periodic_sched_assigned)) { -+ -+ process_periodic_channels(hcd); -+ } -+ -+ /* Process host channels associated with non-periodic transfers. */ -+ if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC || -+ tr_type == DWC_OTG_TRANSACTION_ALL) { -+ if (!list_empty(&hcd->non_periodic_sched_active)) { -+ process_non_periodic_channels(hcd); -+ } else { -+ /* -+ * Ensure NP Tx FIFO empty interrupt is disabled when -+ * there are no non-periodic transfers to process. -+ */ -+ gintmsk_data_t gintmsk = {.d32 = 0}; -+ gintmsk.b.nptxfempty = 1; -+ dwc_modify_reg32(&hcd->core_if->core_global_regs->gintmsk, -+ gintmsk.d32, 0); -+ } -+ } -+} -+ -+/** -+ * Sets the final status of an URB and returns it to the device driver. Any -+ * required cleanup of the URB is performed. -+ */ -+void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *hcd, struct urb *urb, int status) -+{ -+#ifdef DEBUG -+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { -+ DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n", -+ __func__, urb, usb_pipedevice(urb->pipe), -+ usb_pipeendpoint(urb->pipe), -+ usb_pipein(urb->pipe) ? "IN" : "OUT", status); -+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { -+ int i; -+ for (i = 0; i < urb->number_of_packets; i++) { -+ DWC_PRINT(" ISO Desc %d status: %d\n", -+ i, urb->iso_frame_desc[i].status); -+ } -+ } -+ } -+#endif -+ -+ //if we use the aligned buffer instead of the original unaligned buffer, -+ //for IN data, we have to move the data to the original buffer -+ if((urb->transfer_dma==urb->aligned_transfer_dma)&&((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_IN)){ -+ dma_sync_single_for_device(NULL,urb->transfer_dma,urb->actual_length,DMA_FROM_DEVICE); -+ memcpy(urb->transfer_buffer,urb->aligned_transfer_buffer,urb->actual_length); -+ } -+ -+ -+ urb->status = status; -+ urb->hcpriv = NULL; -+ usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status); -+} -+ -+/* -+ * Returns the Queue Head for an URB. -+ */ -+dwc_otg_qh_t *dwc_urb_to_qh(struct urb *urb) -+{ -+ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); -+ return (dwc_otg_qh_t *)ep->hcpriv; -+} -+ -+#ifdef DEBUG -+void dwc_print_setup_data(uint8_t *setup) -+{ -+ int i; -+ if (CHK_DEBUG_LEVEL(DBG_HCD)){ -+ DWC_PRINT("Setup Data = MSB "); -+ for (i = 7; i >= 0; i--) DWC_PRINT("%02x ", setup[i]); -+ DWC_PRINT("\n"); -+ DWC_PRINT(" bmRequestType Tranfer = %s\n", (setup[0] & 0x80) ? "Device-to-Host" : "Host-to-Device"); -+ DWC_PRINT(" bmRequestType Type = "); -+ switch ((setup[0] & 0x60) >> 5) { -+ case 0: DWC_PRINT("Standard\n"); break; -+ case 1: DWC_PRINT("Class\n"); break; -+ case 2: DWC_PRINT("Vendor\n"); break; -+ case 3: DWC_PRINT("Reserved\n"); break; -+ } -+ DWC_PRINT(" bmRequestType Recipient = "); -+ switch (setup[0] & 0x1f) { -+ case 0: DWC_PRINT("Device\n"); break; -+ case 1: DWC_PRINT("Interface\n"); break; -+ case 2: DWC_PRINT("Endpoint\n"); break; -+ case 3: DWC_PRINT("Other\n"); break; -+ default: DWC_PRINT("Reserved\n"); break; -+ } -+ DWC_PRINT(" bRequest = 0x%0x\n", setup[1]); -+ DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t *)&setup[2])); -+ DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t *)&setup[4])); -+ DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_t *)&setup[6])); -+ } -+} -+#endif -+ -+void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *hcd) { -+} -+ -+void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd) -+{ -+#ifdef DEBUG -+ int num_channels; -+ int i; -+ gnptxsts_data_t np_tx_status; -+ hptxsts_data_t p_tx_status; -+ -+ num_channels = hcd->core_if->core_params->host_channels; -+ DWC_PRINT("\n"); -+ DWC_PRINT("************************************************************\n"); -+ DWC_PRINT("HCD State:\n"); -+ DWC_PRINT(" Num channels: %d\n", num_channels); -+ for (i = 0; i < num_channels; i++) { -+ dwc_hc_t *hc = hcd->hc_ptr_array[i]; -+ DWC_PRINT(" Channel %d:\n", i); -+ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n", -+ hc->dev_addr, hc->ep_num, hc->ep_is_in); -+ DWC_PRINT(" speed: %d\n", hc->speed); -+ DWC_PRINT(" ep_type: %d\n", hc->ep_type); -+ DWC_PRINT(" max_packet: %d\n", hc->max_packet); -+ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start); -+ DWC_PRINT(" multi_count: %d\n", hc->multi_count); -+ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started); -+ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff); -+ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len); -+ DWC_PRINT(" xfer_count: %d\n", hc->xfer_count); -+ DWC_PRINT(" halt_on_queue: %d\n", hc->halt_on_queue); -+ DWC_PRINT(" halt_pending: %d\n", hc->halt_pending); -+ DWC_PRINT(" halt_status: %d\n", hc->halt_status); -+ DWC_PRINT(" do_split: %d\n", hc->do_split); -+ DWC_PRINT(" complete_split: %d\n", hc->complete_split); -+ DWC_PRINT(" hub_addr: %d\n", hc->hub_addr); -+ DWC_PRINT(" port_addr: %d\n", hc->port_addr); -+ DWC_PRINT(" xact_pos: %d\n", hc->xact_pos); -+ DWC_PRINT(" requests: %d\n", hc->requests); -+ DWC_PRINT(" qh: %p\n", hc->qh); -+ if (hc->xfer_started) { -+ hfnum_data_t hfnum; -+ hcchar_data_t hcchar; -+ hctsiz_data_t hctsiz; -+ hcint_data_t hcint; -+ hcintmsk_data_t hcintmsk; -+ hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum); -+ hcchar.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcchar); -+ hctsiz.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hctsiz); -+ hcint.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcint); -+ hcintmsk.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcintmsk); -+ DWC_PRINT(" hfnum: 0x%08x\n", hfnum.d32); -+ DWC_PRINT(" hcchar: 0x%08x\n", hcchar.d32); -+ DWC_PRINT(" hctsiz: 0x%08x\n", hctsiz.d32); -+ DWC_PRINT(" hcint: 0x%08x\n", hcint.d32); -+ DWC_PRINT(" hcintmsk: 0x%08x\n", hcintmsk.d32); -+ } -+ if (hc->xfer_started && hc->qh && hc->qh->qtd_in_process) { -+ dwc_otg_qtd_t *qtd; -+ struct urb *urb; -+ qtd = hc->qh->qtd_in_process; -+ urb = qtd->urb; -+ DWC_PRINT(" URB Info:\n"); -+ DWC_PRINT(" qtd: %p, urb: %p\n", qtd, urb); -+ if (urb) { -+ DWC_PRINT(" Dev: %d, EP: %d %s\n", -+ usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), -+ usb_pipein(urb->pipe) ? "IN" : "OUT"); -+ DWC_PRINT(" Max packet size: %d\n", -+ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); -+ DWC_PRINT(" transfer_buffer: %p\n", urb->transfer_buffer); -+ DWC_PRINT(" transfer_dma: %p\n", (void *)urb->transfer_dma); -+ DWC_PRINT(" transfer_buffer_length: %d\n", urb->transfer_buffer_length); -+ DWC_PRINT(" actual_length: %d\n", urb->actual_length); -+ } -+ } -+ } -+ DWC_PRINT(" non_periodic_channels: %d\n", hcd->non_periodic_channels); -+ DWC_PRINT(" periodic_channels: %d\n", hcd->periodic_channels); -+ DWC_PRINT(" periodic_usecs: %d\n", hcd->periodic_usecs); -+ np_tx_status.d32 = dwc_read_reg32(&hcd->core_if->core_global_regs->gnptxsts); -+ DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n", np_tx_status.b.nptxqspcavail); -+ DWC_PRINT(" NP Tx FIFO Space Avail: %d\n", np_tx_status.b.nptxfspcavail); -+ p_tx_status.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hptxsts); -+ DWC_PRINT(" P Tx Req Queue Space Avail: %d\n", p_tx_status.b.ptxqspcavail); -+ DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail); -+ dwc_otg_hcd_dump_frrem(hcd); -+ dwc_otg_dump_global_registers(hcd->core_if); -+ dwc_otg_dump_host_registers(hcd->core_if); -+ DWC_PRINT("************************************************************\n"); -+ DWC_PRINT("\n"); -+#endif -+} -+#endif /* DWC_DEVICE_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc/otg_hcd.h -@@ -0,0 +1,647 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.h $ -+ * $Revision: #45 $ -+ * $Date: 2008/07/15 $ -+ * $Change: 1064918 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_DEVICE_ONLY -+#ifndef __DWC_HCD_H__ -+#define __DWC_HCD_H__ -+ -+#include -+#include -+#include -+ -+struct dwc_otg_device; -+ -+#include "otg_cil.h" -+ -+/** -+ * @file -+ * -+ * This file contains the structures, constants, and interfaces for -+ * the Host Contoller Driver (HCD). -+ * -+ * The Host Controller Driver (HCD) is responsible for translating requests -+ * from the USB Driver into the appropriate actions on the DWC_otg controller. -+ * It isolates the USBD from the specifics of the controller by providing an -+ * API to the USBD. -+ */ -+ -+/** -+ * Phases for control transfers. -+ */ -+typedef enum dwc_otg_control_phase { -+ DWC_OTG_CONTROL_SETUP, -+ DWC_OTG_CONTROL_DATA, -+ DWC_OTG_CONTROL_STATUS -+} dwc_otg_control_phase_e; -+ -+/** Transaction types. */ -+typedef enum dwc_otg_transaction_type { -+ DWC_OTG_TRANSACTION_NONE, -+ DWC_OTG_TRANSACTION_PERIODIC, -+ DWC_OTG_TRANSACTION_NON_PERIODIC, -+ DWC_OTG_TRANSACTION_ALL -+} dwc_otg_transaction_type_e; -+ -+/** -+ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control, -+ * interrupt, or isochronous transfer. A single QTD is created for each URB -+ * (of one of these types) submitted to the HCD. The transfer associated with -+ * a QTD may require one or multiple transactions. -+ * -+ * A QTD is linked to a Queue Head, which is entered in either the -+ * non-periodic or periodic schedule for execution. When a QTD is chosen for -+ * execution, some or all of its transactions may be executed. After -+ * execution, the state of the QTD is updated. The QTD may be retired if all -+ * its transactions are complete or if an error occurred. Otherwise, it -+ * remains in the schedule so more transactions can be executed later. -+ */ -+typedef struct dwc_otg_qtd { -+ /** -+ * Determines the PID of the next data packet for the data phase of -+ * control transfers. Ignored for other transfer types.
-+ * One of the following values: -+ * - DWC_OTG_HC_PID_DATA0 -+ * - DWC_OTG_HC_PID_DATA1 -+ */ -+ uint8_t data_toggle; -+ -+ /** Current phase for control transfers (Setup, Data, or Status). */ -+ dwc_otg_control_phase_e control_phase; -+ -+ /** Keep track of the current split type -+ * for FS/LS endpoints on a HS Hub */ -+ uint8_t complete_split; -+ -+ /** How many bytes transferred during SSPLIT OUT */ -+ uint32_t ssplit_out_xfer_count; -+ -+ /** -+ * Holds the number of bus errors that have occurred for a transaction -+ * within this transfer. -+ */ -+ uint8_t error_count; -+ -+ /** -+ * Index of the next frame descriptor for an isochronous transfer. A -+ * frame descriptor describes the buffer position and length of the -+ * data to be transferred in the next scheduled (micro)frame of an -+ * isochronous transfer. It also holds status for that transaction. -+ * The frame index starts at 0. -+ */ -+ int isoc_frame_index; -+ -+ /** Position of the ISOC split on full/low speed */ -+ uint8_t isoc_split_pos; -+ -+ /** Position of the ISOC split in the buffer for the current frame */ -+ uint16_t isoc_split_offset; -+ -+ /** URB for this transfer */ -+ struct urb *urb; -+ -+ /** This list of QTDs */ -+ struct list_head qtd_list_entry; -+ -+} dwc_otg_qtd_t; -+ -+/** -+ * A Queue Head (QH) holds the static characteristics of an endpoint and -+ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may -+ * be entered in either the non-periodic or periodic schedule. -+ */ -+typedef struct dwc_otg_qh { -+ /** -+ * Endpoint type. -+ * One of the following values: -+ * - USB_ENDPOINT_XFER_CONTROL -+ * - USB_ENDPOINT_XFER_ISOC -+ * - USB_ENDPOINT_XFER_BULK -+ * - USB_ENDPOINT_XFER_INT -+ */ -+ uint8_t ep_type; -+ uint8_t ep_is_in; -+ -+ /** wMaxPacketSize Field of Endpoint Descriptor. */ -+ uint16_t maxp; -+ -+ /** -+ * Determines the PID of the next data packet for non-control -+ * transfers. Ignored for control transfers.
-+ * One of the following values: -+ * - DWC_OTG_HC_PID_DATA0 -+ * - DWC_OTG_HC_PID_DATA1 -+ */ -+ uint8_t data_toggle; -+ -+ /** Ping state if 1. */ -+ uint8_t ping_state; -+ -+ /** -+ * List of QTDs for this QH. -+ */ -+ struct list_head qtd_list; -+ -+ /** Host channel currently processing transfers for this QH. */ -+ dwc_hc_t *channel; -+ -+ /** QTD currently assigned to a host channel for this QH. */ -+ dwc_otg_qtd_t *qtd_in_process; -+ -+ /** Full/low speed endpoint on high-speed hub requires split. */ -+ uint8_t do_split; -+ -+ /** @name Periodic schedule information */ -+ /** @{ */ -+ -+ /** Bandwidth in microseconds per (micro)frame. */ -+ uint8_t usecs; -+ -+ /** Interval between transfers in (micro)frames. */ -+ uint16_t interval; -+ -+ /** -+ * (micro)frame to initialize a periodic transfer. The transfer -+ * executes in the following (micro)frame. -+ */ -+ uint16_t sched_frame; -+ -+ /** (micro)frame at which last start split was initialized. */ -+ uint16_t start_split_frame; -+ -+ /** @} */ -+ -+ /** Entry for QH in either the periodic or non-periodic schedule. */ -+ struct list_head qh_list_entry; -+} dwc_otg_qh_t; -+ -+/** -+ * This structure holds the state of the HCD, including the non-periodic and -+ * periodic schedules. -+ */ -+typedef struct dwc_otg_hcd { -+ /** The DWC otg device pointer */ -+ struct dwc_otg_device *otg_dev; -+ -+ /** DWC OTG Core Interface Layer */ -+ dwc_otg_core_if_t *core_if; -+ -+ /** Internal DWC HCD Flags */ -+ volatile union dwc_otg_hcd_internal_flags { -+ uint32_t d32; -+ struct { -+ unsigned port_connect_status_change : 1; -+ unsigned port_connect_status : 1; -+ unsigned port_reset_change : 1; -+ unsigned port_enable_change : 1; -+ unsigned port_suspend_change : 1; -+ unsigned port_over_current_change : 1; -+ unsigned reserved : 27; -+ } b; -+ } flags; -+ -+ /** -+ * Inactive items in the non-periodic schedule. This is a list of -+ * Queue Heads. Transfers associated with these Queue Heads are not -+ * currently assigned to a host channel. -+ */ -+ struct list_head non_periodic_sched_inactive; -+ -+ /** -+ * Active items in the non-periodic schedule. This is a list of -+ * Queue Heads. Transfers associated with these Queue Heads are -+ * currently assigned to a host channel. -+ */ -+ struct list_head non_periodic_sched_active; -+ -+ /** -+ * Pointer to the next Queue Head to process in the active -+ * non-periodic schedule. -+ */ -+ struct list_head *non_periodic_qh_ptr; -+ -+ /** -+ * Inactive items in the periodic schedule. This is a list of QHs for -+ * periodic transfers that are _not_ scheduled for the next frame. -+ * Each QH in the list has an interval counter that determines when it -+ * needs to be scheduled for execution. This scheduling mechanism -+ * allows only a simple calculation for periodic bandwidth used (i.e. -+ * must assume that all periodic transfers may need to execute in the -+ * same frame). However, it greatly simplifies scheduling and should -+ * be sufficient for the vast majority of OTG hosts, which need to -+ * connect to a small number of peripherals at one time. -+ * -+ * Items move from this list to periodic_sched_ready when the QH -+ * interval counter is 0 at SOF. -+ */ -+ struct list_head periodic_sched_inactive; -+ -+ /** -+ * List of periodic QHs that are ready for execution in the next -+ * frame, but have not yet been assigned to host channels. -+ * -+ * Items move from this list to periodic_sched_assigned as host -+ * channels become available during the current frame. -+ */ -+ struct list_head periodic_sched_ready; -+ -+ /** -+ * List of periodic QHs to be executed in the next frame that are -+ * assigned to host channels. -+ * -+ * Items move from this list to periodic_sched_queued as the -+ * transactions for the QH are queued to the DWC_otg controller. -+ */ -+ struct list_head periodic_sched_assigned; -+ -+ /** -+ * List of periodic QHs that have been queued for execution. -+ * -+ * Items move from this list to either periodic_sched_inactive or -+ * periodic_sched_ready when the channel associated with the transfer -+ * is released. If the interval for the QH is 1, the item moves to -+ * periodic_sched_ready because it must be rescheduled for the next -+ * frame. Otherwise, the item moves to periodic_sched_inactive. -+ */ -+ struct list_head periodic_sched_queued; -+ -+ /** -+ * Total bandwidth claimed so far for periodic transfers. This value -+ * is in microseconds per (micro)frame. The assumption is that all -+ * periodic transfers may occur in the same (micro)frame. -+ */ -+ uint16_t periodic_usecs; -+ -+ /** -+ * Frame number read from the core at SOF. The value ranges from 0 to -+ * DWC_HFNUM_MAX_FRNUM. -+ */ -+ uint16_t frame_number; -+ -+ /** -+ * Free host channels in the controller. This is a list of -+ * dwc_hc_t items. -+ */ -+ struct list_head free_hc_list; -+ -+ /** -+ * Number of host channels assigned to periodic transfers. Currently -+ * assuming that there is a dedicated host channel for each periodic -+ * transaction and at least one host channel available for -+ * non-periodic transactions. -+ */ -+ int periodic_channels; -+ -+ /** -+ * Number of host channels assigned to non-periodic transfers. -+ */ -+ int non_periodic_channels; -+ -+ /** -+ * Array of pointers to the host channel descriptors. Allows accessing -+ * a host channel descriptor given the host channel number. This is -+ * useful in interrupt handlers. -+ */ -+ dwc_hc_t *hc_ptr_array[MAX_EPS_CHANNELS]; -+ -+ /** -+ * Buffer to use for any data received during the status phase of a -+ * control transfer. Normally no data is transferred during the status -+ * phase. This buffer is used as a bit bucket. -+ */ -+ uint8_t *status_buf; -+ -+ /** -+ * DMA address for status_buf. -+ */ -+ dma_addr_t status_buf_dma; -+#define DWC_OTG_HCD_STATUS_BUF_SIZE 64 -+ -+ /** -+ * Structure to allow starting the HCD in a non-interrupt context -+ * during an OTG role change. -+ */ -+ struct delayed_work start_work; -+ -+ /** -+ * Connection timer. An OTG host must display a message if the device -+ * does not connect. Started when the VBus power is turned on via -+ * sysfs attribute "buspower". -+ */ -+ struct timer_list conn_timer; -+ -+ /* Tasket to do a reset */ -+ struct tasklet_struct *reset_tasklet; -+ -+ /* */ -+ spinlock_t lock; -+ -+#ifdef DEBUG -+ uint32_t frrem_samples; -+ uint64_t frrem_accum; -+ -+ uint32_t hfnum_7_samples_a; -+ uint64_t hfnum_7_frrem_accum_a; -+ uint32_t hfnum_0_samples_a; -+ uint64_t hfnum_0_frrem_accum_a; -+ uint32_t hfnum_other_samples_a; -+ uint64_t hfnum_other_frrem_accum_a; -+ -+ uint32_t hfnum_7_samples_b; -+ uint64_t hfnum_7_frrem_accum_b; -+ uint32_t hfnum_0_samples_b; -+ uint64_t hfnum_0_frrem_accum_b; -+ uint32_t hfnum_other_samples_b; -+ uint64_t hfnum_other_frrem_accum_b; -+#endif -+} dwc_otg_hcd_t; -+ -+/** Gets the dwc_otg_hcd from a struct usb_hcd */ -+static inline dwc_otg_hcd_t *hcd_to_dwc_otg_hcd(struct usb_hcd *hcd) -+{ -+ return (dwc_otg_hcd_t *)(hcd->hcd_priv); -+} -+ -+/** Gets the struct usb_hcd that contains a dwc_otg_hcd_t. */ -+static inline struct usb_hcd *dwc_otg_hcd_to_hcd(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ return container_of((void *)dwc_otg_hcd, struct usb_hcd, hcd_priv); -+} -+ -+/** @name HCD Create/Destroy Functions */ -+/** @{ */ -+extern int dwc_otg_hcd_init(struct platform_device *pdev); -+extern void dwc_otg_hcd_remove(struct platform_device *pdev); -+/** @} */ -+ -+/** @name Linux HC Driver API Functions */ -+/** @{ */ -+ -+extern int dwc_otg_hcd_start(struct usb_hcd *hcd); -+extern void dwc_otg_hcd_stop(struct usb_hcd *hcd); -+extern int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd); -+extern void dwc_otg_hcd_free(struct usb_hcd *hcd); -+extern int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd, -+ // struct usb_host_endpoint *ep, -+ struct urb *urb, -+ gfp_t mem_flags -+ ); -+extern int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd, -+ struct urb *urb, int status); -+extern void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd, -+ struct usb_host_endpoint *ep); -+extern irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd); -+extern int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, -+ char *buf); -+extern int dwc_otg_hcd_hub_control(struct usb_hcd *hcd, -+ u16 typeReq, -+ u16 wValue, -+ u16 wIndex, -+ char *buf, -+ u16 wLength); -+ -+/** @} */ -+ -+/** @name Transaction Execution Functions */ -+/** @{ */ -+extern dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd); -+extern void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd, -+ dwc_otg_transaction_type_e tr_type); -+extern void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *_hcd, struct urb *urb, -+ int status); -+/** @} */ -+ -+/** @name Interrupt Handler Functions */ -+/** @{ */ -+extern int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_incomplete_periodic_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_conn_id_status_change_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_disconnect_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num); -+extern int32_t dwc_otg_hcd_handle_session_req_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_wakeup_detected_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+/** @} */ -+ -+ -+/** @name Schedule Queue Functions */ -+/** @{ */ -+ -+/* Implemented in dwc_otg_hcd_queue.c */ -+extern dwc_otg_qh_t *dwc_otg_hcd_qh_create(dwc_otg_hcd_t *hcd, struct urb *urb); -+extern void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, struct urb *urb); -+extern void dwc_otg_hcd_qh_free(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); -+extern int dwc_otg_hcd_qh_add(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); -+extern void dwc_otg_hcd_qh_remove(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); -+extern void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_csplit); -+ -+/** Remove and free a QH */ -+static inline void dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd_t *hcd, -+ dwc_otg_qh_t *qh) -+{ -+ dwc_otg_hcd_qh_remove(hcd, qh); -+ dwc_otg_hcd_qh_free(hcd, qh); -+} -+ -+/** Allocates memory for a QH structure. -+ * @return Returns the memory allocate or NULL on error. */ -+static inline dwc_otg_qh_t *dwc_otg_hcd_qh_alloc(void) -+{ -+ return (dwc_otg_qh_t *) kmalloc(sizeof(dwc_otg_qh_t), GFP_KERNEL); -+} -+ -+extern dwc_otg_qtd_t *dwc_otg_hcd_qtd_create(struct urb *urb); -+extern void dwc_otg_hcd_qtd_init(dwc_otg_qtd_t *qtd, struct urb *urb); -+extern int dwc_otg_hcd_qtd_add(dwc_otg_qtd_t *qtd, dwc_otg_hcd_t *dwc_otg_hcd); -+ -+/** Allocates memory for a QTD structure. -+ * @return Returns the memory allocate or NULL on error. */ -+static inline dwc_otg_qtd_t *dwc_otg_hcd_qtd_alloc(void) -+{ -+ return (dwc_otg_qtd_t *) kmalloc(sizeof(dwc_otg_qtd_t), GFP_KERNEL); -+} -+ -+/** Frees the memory for a QTD structure. QTD should already be removed from -+ * list. -+ * @param[in] qtd QTD to free.*/ -+static inline void dwc_otg_hcd_qtd_free(dwc_otg_qtd_t *qtd) -+{ -+ kfree(qtd); -+} -+ -+/** Removes a QTD from list. -+ * @param[in] hcd HCD instance. -+ * @param[in] qtd QTD to remove from list. */ -+static inline void dwc_otg_hcd_qtd_remove(dwc_otg_hcd_t *hcd, dwc_otg_qtd_t *qtd) -+{ -+ unsigned long flags; -+ SPIN_LOCK_IRQSAVE(&hcd->lock, flags); -+ list_del(&qtd->qtd_list_entry); -+ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); -+} -+ -+/** Remove and free a QTD */ -+static inline void dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd_t *hcd, dwc_otg_qtd_t *qtd) -+{ -+ dwc_otg_hcd_qtd_remove(hcd, qtd); -+ dwc_otg_hcd_qtd_free(qtd); -+} -+ -+/** @} */ -+ -+ -+/** @name Internal Functions */ -+/** @{ */ -+dwc_otg_qh_t *dwc_urb_to_qh(struct urb *urb); -+void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *hcd); -+void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd); -+/** @} */ -+ -+/** Gets the usb_host_endpoint associated with an URB. */ -+static inline struct usb_host_endpoint *dwc_urb_to_endpoint(struct urb *urb) -+{ -+ struct usb_device *dev = urb->dev; -+ int ep_num = usb_pipeendpoint(urb->pipe); -+ -+ if (usb_pipein(urb->pipe)) -+ return dev->ep_in[ep_num]; -+ else -+ return dev->ep_out[ep_num]; -+} -+ -+/** -+ * Gets the endpoint number from a _bEndpointAddress argument. The endpoint is -+ * qualified with its direction (possible 32 endpoints per device). -+ */ -+#define dwc_ep_addr_to_endpoint(_bEndpointAddress_) ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \ -+ ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4) -+ -+/** Gets the QH that contains the list_head */ -+#define dwc_list_to_qh(_list_head_ptr_) container_of(_list_head_ptr_, dwc_otg_qh_t, qh_list_entry) -+ -+/** Gets the QTD that contains the list_head */ -+#define dwc_list_to_qtd(_list_head_ptr_) container_of(_list_head_ptr_, dwc_otg_qtd_t, qtd_list_entry) -+ -+/** Check if QH is non-periodic */ -+#define dwc_qh_is_non_per(_qh_ptr_) ((_qh_ptr_->ep_type == USB_ENDPOINT_XFER_BULK) || \ -+ (_qh_ptr_->ep_type == USB_ENDPOINT_XFER_CONTROL)) -+ -+/** High bandwidth multiplier as encoded in highspeed endpoint descriptors */ -+#define dwc_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) -+ -+/** Packet size for any kind of endpoint descriptor */ -+#define dwc_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) -+ -+/** -+ * Returns true if _frame1 is less than or equal to _frame2. The comparison is -+ * done modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the -+ * frame number when the max frame number is reached. -+ */ -+static inline int dwc_frame_num_le(uint16_t frame1, uint16_t frame2) -+{ -+ return ((frame2 - frame1) & DWC_HFNUM_MAX_FRNUM) <= -+ (DWC_HFNUM_MAX_FRNUM >> 1); -+} -+ -+/** -+ * Returns true if _frame1 is greater than _frame2. The comparison is done -+ * modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the frame -+ * number when the max frame number is reached. -+ */ -+static inline int dwc_frame_num_gt(uint16_t frame1, uint16_t frame2) -+{ -+ return (frame1 != frame2) && -+ (((frame1 - frame2) & DWC_HFNUM_MAX_FRNUM) < -+ (DWC_HFNUM_MAX_FRNUM >> 1)); -+} -+ -+/** -+ * Increments _frame by the amount specified by _inc. The addition is done -+ * modulo DWC_HFNUM_MAX_FRNUM. Returns the incremented value. -+ */ -+static inline uint16_t dwc_frame_num_inc(uint16_t frame, uint16_t inc) -+{ -+ return (frame + inc) & DWC_HFNUM_MAX_FRNUM; -+} -+ -+static inline uint16_t dwc_full_frame_num(uint16_t frame) -+{ -+ return (frame & DWC_HFNUM_MAX_FRNUM) >> 3; -+} -+ -+static inline uint16_t dwc_micro_frame_num(uint16_t frame) -+{ -+ return frame & 0x7; -+} -+ -+#ifdef DEBUG -+/** -+ * Macro to sample the remaining PHY clocks left in the current frame. This -+ * may be used during debugging to determine the average time it takes to -+ * execute sections of code. There are two possible sample points, "a" and -+ * "b", so the _letter argument must be one of these values. -+ * -+ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For -+ * example, "cat /sys/devices/lm0/hcd_frrem". -+ */ -+#define dwc_sample_frrem(_hcd, _qh, _letter) \ -+{ \ -+ hfnum_data_t hfnum; \ -+ dwc_otg_qtd_t *qtd; \ -+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); \ -+ if (usb_pipeint(qtd->urb->pipe) && _qh->start_split_frame != 0 && !qtd->complete_split) { \ -+ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum); \ -+ switch (hfnum.b.frnum & 0x7) { \ -+ case 7: \ -+ _hcd->hfnum_7_samples_##_letter++; \ -+ _hcd->hfnum_7_frrem_accum_##_letter += hfnum.b.frrem; \ -+ break; \ -+ case 0: \ -+ _hcd->hfnum_0_samples_##_letter++; \ -+ _hcd->hfnum_0_frrem_accum_##_letter += hfnum.b.frrem; \ -+ break; \ -+ default: \ -+ _hcd->hfnum_other_samples_##_letter++; \ -+ _hcd->hfnum_other_frrem_accum_##_letter += hfnum.b.frrem; \ -+ break; \ -+ } \ -+ } \ -+} -+#else -+#define dwc_sample_frrem(_hcd, _qh, _letter) -+#endif -+#endif -+#endif /* DWC_DEVICE_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc/otg_hcd_intr.c -@@ -0,0 +1,1826 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_intr.c $ -+ * $Revision: #70 $ -+ * $Date: 2008/10/16 $ -+ * $Change: 1117667 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_DEVICE_ONLY -+ -+#include -+ -+#include "otg_driver.h" -+#include "otg_hcd.h" -+#include "otg_regs.h" -+ -+/** @file -+ * This file contains the implementation of the HCD Interrupt handlers. -+ */ -+ -+/** This function handles interrupts for the HCD. */ -+int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ int retval = 0; -+ -+ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; -+ gintsts_data_t gintsts; -+#ifdef DEBUG -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+#endif -+ -+ /* Check if HOST Mode */ -+ if (dwc_otg_is_host_mode(core_if)) { -+ gintsts.d32 = dwc_otg_read_core_intr(core_if); -+ if (!gintsts.d32) { -+ return 0; -+ } -+ -+#ifdef DEBUG -+ /* Don't print debug message in the interrupt handler on SOF */ -+# ifndef DEBUG_SOF -+ if (gintsts.d32 != DWC_SOF_INTR_MASK) -+# endif -+ DWC_DEBUGPL(DBG_HCD, "\n"); -+#endif -+ -+#ifdef DEBUG -+# ifndef DEBUG_SOF -+ if (gintsts.d32 != DWC_SOF_INTR_MASK) -+# endif -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", gintsts.d32); -+#endif -+ if (gintsts.b.usbreset) { -+ DWC_PRINT("Usb Reset In Host Mode\n"); -+ } -+ if (gintsts.b.sofintr) { -+ retval |= dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd); -+ } -+ if (gintsts.b.rxstsqlvl) { -+ retval |= dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd); -+ } -+ if (gintsts.b.nptxfempty) { -+ retval |= dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd); -+ } -+ if (gintsts.b.i2cintr) { -+ /** @todo Implement i2cintr handler. */ -+ } -+ if (gintsts.b.portintr) { -+ retval |= dwc_otg_hcd_handle_port_intr(dwc_otg_hcd); -+ } -+ if (gintsts.b.hcintr) { -+ retval |= dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd); -+ } -+ if (gintsts.b.ptxfempty) { -+ retval |= dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd); -+ } -+#ifdef DEBUG -+# ifndef DEBUG_SOF -+ if (gintsts.d32 != DWC_SOF_INTR_MASK) -+# endif -+ { -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Finished Servicing Interrupts\n"); -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n", -+ dwc_read_reg32(&global_regs->gintsts)); -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n", -+ dwc_read_reg32(&global_regs->gintmsk)); -+ } -+#endif -+ -+#ifdef DEBUG -+# ifndef DEBUG_SOF -+ if (gintsts.d32 != DWC_SOF_INTR_MASK) -+# endif -+ DWC_DEBUGPL(DBG_HCD, "\n"); -+#endif -+ -+ } -+ S3C2410X_CLEAR_EINTPEND(); -+ -+ return retval; -+} -+ -+#ifdef DWC_TRACK_MISSED_SOFS -+#warning Compiling code to track missed SOFs -+#define FRAME_NUM_ARRAY_SIZE 1000 -+/** -+ * This function is for debug only. -+ */ -+static inline void track_missed_sofs(uint16_t curr_frame_number) -+{ -+ static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE]; -+ static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE]; -+ static int frame_num_idx = 0; -+ static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM; -+ static int dumped_frame_num_array = 0; -+ -+ if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) { -+ if (((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) != curr_frame_number) { -+ frame_num_array[frame_num_idx] = curr_frame_number; -+ last_frame_num_array[frame_num_idx++] = last_frame_num; -+ } -+ } else if (!dumped_frame_num_array) { -+ int i; -+ printk(KERN_EMERG USB_DWC "Frame Last Frame\n"); -+ printk(KERN_EMERG USB_DWC "----- ----------\n"); -+ for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) { -+ printk(KERN_EMERG USB_DWC "0x%04x 0x%04x\n", -+ frame_num_array[i], last_frame_num_array[i]); -+ } -+ dumped_frame_num_array = 1; -+ } -+ last_frame_num = curr_frame_number; -+} -+#endif -+ -+/** -+ * Handles the start-of-frame interrupt in host mode. Non-periodic -+ * transactions may be queued to the DWC_otg controller for the current -+ * (micro)frame. Periodic transactions may be queued to the controller for the -+ * next (micro)frame. -+ */ -+int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t *hcd) -+{ -+ hfnum_data_t hfnum; -+ struct list_head *qh_entry; -+ dwc_otg_qh_t *qh; -+ dwc_otg_transaction_type_e tr_type; -+ gintsts_data_t gintsts = {.d32 = 0}; -+ -+ hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum); -+ -+#ifdef DEBUG_SOF -+ DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n"); -+#endif -+ hcd->frame_number = hfnum.b.frnum; -+ -+#ifdef DEBUG -+ hcd->frrem_accum += hfnum.b.frrem; -+ hcd->frrem_samples++; -+#endif -+ -+#ifdef DWC_TRACK_MISSED_SOFS -+ track_missed_sofs(hcd->frame_number); -+#endif -+ -+ /* Determine whether any periodic QHs should be executed. */ -+ qh_entry = hcd->periodic_sched_inactive.next; -+ while (qh_entry != &hcd->periodic_sched_inactive) { -+ qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry); -+ qh_entry = qh_entry->next; -+ if (dwc_frame_num_le(qh->sched_frame, hcd->frame_number)) { -+ /* -+ * Move QH to the ready list to be executed next -+ * (micro)frame. -+ */ -+ list_move(&qh->qh_list_entry, &hcd->periodic_sched_ready); -+ } -+ } -+ -+ tr_type = dwc_otg_hcd_select_transactions(hcd); -+ if (tr_type != DWC_OTG_TRANSACTION_NONE) { -+ dwc_otg_hcd_queue_transactions(hcd, tr_type); -+ } -+ -+ /* Clear interrupt */ -+ gintsts.b.sofintr = 1; -+ dwc_write_reg32(&hcd->core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** Handles the Rx Status Queue Level Interrupt, which indicates that there is at -+ * least one packet in the Rx FIFO. The packets are moved from the FIFO to -+ * memory if the DWC_otg controller is operating in Slave mode. */ -+int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ host_grxsts_data_t grxsts; -+ dwc_hc_t *hc = NULL; -+ -+ DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n"); -+ -+ grxsts.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->core_global_regs->grxstsp); -+ -+ hc = dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum]; -+ -+ /* Packet Status */ -+ DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum); -+ DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt); -+ DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid, hc->data_pid_start); -+ DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts); -+ -+ switch (grxsts.b.pktsts) { -+ case DWC_GRXSTS_PKTSTS_IN: -+ /* Read the data into the host buffer. */ -+ if (grxsts.b.bcnt > 0) { -+ dwc_otg_read_packet(dwc_otg_hcd->core_if, -+ hc->xfer_buff, -+ grxsts.b.bcnt); -+ -+ /* Update the HC fields for the next packet received. */ -+ hc->xfer_count += grxsts.b.bcnt; -+ hc->xfer_buff += grxsts.b.bcnt; -+ } -+ -+ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: -+ case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR: -+ case DWC_GRXSTS_PKTSTS_CH_HALTED: -+ /* Handled in interrupt, just ignore data */ -+ break; -+ default: -+ DWC_ERROR("RX_STS_Q Interrupt: Unknown status %d\n", grxsts.b.pktsts); -+ break; -+ } -+ -+ return 1; -+} -+ -+/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More -+ * data packets may be written to the FIFO for OUT transfers. More requests -+ * may be written to the non-periodic request queue for IN transfers. This -+ * interrupt is enabled only in Slave mode. */ -+int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n"); -+ dwc_otg_hcd_queue_transactions(dwc_otg_hcd, -+ DWC_OTG_TRANSACTION_NON_PERIODIC); -+ return 1; -+} -+ -+/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data -+ * packets may be written to the FIFO for OUT transfers. More requests may be -+ * written to the periodic request queue for IN transfers. This interrupt is -+ * enabled only in Slave mode. */ -+int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n"); -+ dwc_otg_hcd_queue_transactions(dwc_otg_hcd, -+ DWC_OTG_TRANSACTION_PERIODIC); -+ return 1; -+} -+ -+/** There are multiple conditions that can cause a port interrupt. This function -+ * determines which interrupt conditions have occurred and handles them -+ * appropriately. */ -+int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ int retval = 0; -+ hprt0_data_t hprt0; -+ hprt0_data_t hprt0_modify; -+ -+ hprt0.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0); -+ hprt0_modify.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0); -+ -+ /* Clear appropriate bits in HPRT0 to clear the interrupt bit in -+ * GINTSTS */ -+ -+ hprt0_modify.b.prtena = 0; -+ hprt0_modify.b.prtconndet = 0; -+ hprt0_modify.b.prtenchng = 0; -+ hprt0_modify.b.prtovrcurrchng = 0; -+ -+ /* Port Connect Detected -+ * Set flag and clear if detected */ -+ if (hprt0.b.prtconndet) { -+ DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x " -+ "Port Connect Detected--\n", hprt0.d32); -+ dwc_otg_hcd->flags.b.port_connect_status_change = 1; -+ dwc_otg_hcd->flags.b.port_connect_status = 1; -+ hprt0_modify.b.prtconndet = 1; -+ -+ /* B-Device has connected, Delete the connection timer. */ -+ del_timer( &dwc_otg_hcd->conn_timer ); -+ -+ /* The Hub driver asserts a reset when it sees port connect -+ * status change flag */ -+ retval |= 1; -+ } -+ -+ /* Port Enable Changed -+ * Clear if detected - Set internal flag if disabled */ -+ if (hprt0.b.prtenchng) { -+ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x " -+ "Port Enable Changed--\n", hprt0.d32); -+ hprt0_modify.b.prtenchng = 1; -+ if (hprt0.b.prtena == 1) { -+ int do_reset = 0; -+ dwc_otg_core_params_t *params = dwc_otg_hcd->core_if->core_params; -+ dwc_otg_core_global_regs_t *global_regs = dwc_otg_hcd->core_if->core_global_regs; -+ dwc_otg_host_if_t *host_if = dwc_otg_hcd->core_if->host_if; -+ -+ /* Check if we need to adjust the PHY clock speed for -+ * low power and adjust it */ -+ if (params->host_support_fs_ls_low_power) { -+ gusbcfg_data_t usbcfg; -+ -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ -+ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED || -+ hprt0.b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED) { -+ /* -+ * Low power -+ */ -+ hcfg_data_t hcfg; -+ if (usbcfg.b.phylpwrclksel == 0) { -+ /* Set PHY low power clock select for FS/LS devices */ -+ usbcfg.b.phylpwrclksel = 1; -+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); -+ do_reset = 1; -+ } -+ -+ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg); -+ -+ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED && -+ params->host_ls_low_power_phy_clk == -+ DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) { -+ /* 6 MHZ */ -+ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 6 MHz (Low Power)\n"); -+ if (hcfg.b.fslspclksel != DWC_HCFG_6_MHZ) { -+ hcfg.b.fslspclksel = DWC_HCFG_6_MHZ; -+ dwc_write_reg32(&host_if->host_global_regs->hcfg, -+ hcfg.d32); -+ do_reset = 1; -+ } -+ } else { -+ /* 48 MHZ */ -+ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 48 MHz ()\n"); -+ if (hcfg.b.fslspclksel != DWC_HCFG_48_MHZ) { -+ hcfg.b.fslspclksel = DWC_HCFG_48_MHZ; -+ dwc_write_reg32(&host_if->host_global_regs->hcfg, -+ hcfg.d32); -+ do_reset = 1; -+ } -+ } -+ } else { -+ /* -+ * Not low power -+ */ -+ if (usbcfg.b.phylpwrclksel == 1) { -+ usbcfg.b.phylpwrclksel = 0; -+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); -+ do_reset = 1; -+ } -+ } -+ -+ if (do_reset) { -+ tasklet_schedule(dwc_otg_hcd->reset_tasklet); -+ } -+ } -+ -+ if (!do_reset) { -+ /* Port has been enabled set the reset change flag */ -+ dwc_otg_hcd->flags.b.port_reset_change = 1; -+ } -+ } else { -+ dwc_otg_hcd->flags.b.port_enable_change = 1; -+ } -+ retval |= 1; -+ } -+ -+ /** Overcurrent Change Interrupt */ -+ if (hprt0.b.prtovrcurrchng) { -+ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x " -+ "Port Overcurrent Changed--\n", hprt0.d32); -+ dwc_otg_hcd->flags.b.port_over_current_change = 1; -+ hprt0_modify.b.prtovrcurrchng = 1; -+ retval |= 1; -+ } -+ -+ /* Clear Port Interrupts */ -+ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32); -+ -+ return retval; -+} -+ -+/** This interrupt indicates that one or more host channels has a pending -+ * interrupt. There are multiple conditions that can cause each host channel -+ * interrupt. This function determines which conditions have occurred for each -+ * host channel interrupt and handles them appropriately. */ -+int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ int i; -+ int retval = 0; -+ haint_data_t haint; -+ -+ /* Clear appropriate bits in HCINTn to clear the interrupt bit in -+ * GINTSTS */ -+ -+ haint.d32 = dwc_otg_read_host_all_channels_intr(dwc_otg_hcd->core_if); -+ -+ for (i = 0; i < dwc_otg_hcd->core_if->core_params->host_channels; i++) { -+ if (haint.b2.chint & (1 << i)) { -+ retval |= dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd, i); -+ } -+ } -+ -+ return retval; -+} -+ -+/* Macro used to clear one channel interrupt */ -+#define clear_hc_int(_hc_regs_, _intr_) \ -+do { \ -+ hcint_data_t hcint_clear = {.d32 = 0}; \ -+ hcint_clear.b._intr_ = 1; \ -+ dwc_write_reg32(&(_hc_regs_)->hcint, hcint_clear.d32); \ -+} while (0) -+ -+/* -+ * Macro used to disable one channel interrupt. Channel interrupts are -+ * disabled when the channel is halted or released by the interrupt handler. -+ * There is no need to handle further interrupts of that type until the -+ * channel is re-assigned. In fact, subsequent handling may cause crashes -+ * because the channel structures are cleaned up when the channel is released. -+ */ -+#define disable_hc_int(_hc_regs_, _intr_) \ -+do { \ -+ hcintmsk_data_t hcintmsk = {.d32 = 0}; \ -+ hcintmsk.b._intr_ = 1; \ -+ dwc_modify_reg32(&(_hc_regs_)->hcintmsk, hcintmsk.d32, 0); \ -+} while (0) -+ -+/** -+ * Gets the actual length of a transfer after the transfer halts. _halt_status -+ * holds the reason for the halt. -+ * -+ * For IN transfers where halt_status is DWC_OTG_HC_XFER_COMPLETE, -+ * *short_read is set to 1 upon return if less than the requested -+ * number of bytes were transferred. Otherwise, *short_read is set to 0 upon -+ * return. short_read may also be NULL on entry, in which case it remains -+ * unchanged. -+ */ -+static uint32_t get_actual_xfer_length(dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status, -+ int *short_read) -+{ -+ hctsiz_data_t hctsiz; -+ uint32_t length; -+ -+ if (short_read != NULL) { -+ *short_read = 0; -+ } -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ -+ if (halt_status == DWC_OTG_HC_XFER_COMPLETE) { -+ if (hc->ep_is_in) { -+ length = hc->xfer_len - hctsiz.b.xfersize; -+ if (short_read != NULL) { -+ *short_read = (hctsiz.b.xfersize != 0); -+ } -+ } else if (hc->qh->do_split) { -+ length = qtd->ssplit_out_xfer_count; -+ } else { -+ length = hc->xfer_len; -+ } -+ } else { -+ /* -+ * Must use the hctsiz.pktcnt field to determine how much data -+ * has been transferred. This field reflects the number of -+ * packets that have been transferred via the USB. This is -+ * always an integral number of packets if the transfer was -+ * halted before its normal completion. (Can't use the -+ * hctsiz.xfersize field because that reflects the number of -+ * bytes transferred via the AHB, not the USB). -+ */ -+ length = (hc->start_pkt_count - hctsiz.b.pktcnt) * hc->max_packet; -+ } -+ -+ return length; -+} -+ -+/** -+ * Updates the state of the URB after a Transfer Complete interrupt on the -+ * host channel. Updates the actual_length field of the URB based on the -+ * number of bytes transferred via the host channel. Sets the URB status -+ * if the data transfer is finished. -+ * -+ * @return 1 if the data transfer specified by the URB is completely finished, -+ * 0 otherwise. -+ */ -+static int update_urb_state_xfer_comp(dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ struct urb *urb, -+ dwc_otg_qtd_t *qtd) -+{ -+ int xfer_done = 0; -+ int short_read = 0; -+ -+ urb->actual_length += get_actual_xfer_length(hc, hc_regs, qtd, -+ DWC_OTG_HC_XFER_COMPLETE, -+ &short_read); -+ -+ if (short_read || urb->actual_length == urb->transfer_buffer_length) { -+ xfer_done = 1; -+ if (short_read && (urb->transfer_flags & URB_SHORT_NOT_OK)) { -+ urb->status = -EREMOTEIO; -+ } else { -+ urb->status = 0; -+ } -+ } -+ -+#ifdef DEBUG -+ { -+ hctsiz_data_t hctsiz; -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n", -+ __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num); -+ DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", hc->xfer_len); -+ DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n", hctsiz.b.xfersize); -+ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n", -+ urb->transfer_buffer_length); -+ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length); -+ DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n", -+ short_read, xfer_done); -+ } -+#endif -+ -+ return xfer_done; -+} -+ -+/* -+ * Save the starting data toggle for the next transfer. The data toggle is -+ * saved in the QH for non-control transfers and it's saved in the QTD for -+ * control transfers. -+ */ -+static void save_data_toggle(dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ hctsiz_data_t hctsiz; -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ -+ if (hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) { -+ dwc_otg_qh_t *qh = hc->qh; -+ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { -+ qh->data_toggle = DWC_OTG_HC_PID_DATA0; -+ } else { -+ qh->data_toggle = DWC_OTG_HC_PID_DATA1; -+ } -+ } else { -+ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { -+ qtd->data_toggle = DWC_OTG_HC_PID_DATA0; -+ } else { -+ qtd->data_toggle = DWC_OTG_HC_PID_DATA1; -+ } -+ } -+} -+ -+/** -+ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic -+ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are -+ * still linked to the QH, the QH is added to the end of the inactive -+ * non-periodic schedule. For periodic QHs, removes the QH from the periodic -+ * schedule if no more QTDs are linked to the QH. -+ */ -+static void deactivate_qh(dwc_otg_hcd_t *hcd, -+ dwc_otg_qh_t *qh, -+ int free_qtd) -+{ -+ int continue_split = 0; -+ dwc_otg_qtd_t *qtd; -+ -+ DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd); -+ -+ qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); -+ -+ if (qtd->complete_split) { -+ continue_split = 1; -+ } else if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID || -+ qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END) { -+ continue_split = 1; -+ } -+ -+ if (free_qtd) { -+ dwc_otg_hcd_qtd_remove_and_free(hcd, qtd); -+ continue_split = 0; -+ } -+ -+ qh->channel = NULL; -+ qh->qtd_in_process = NULL; -+ dwc_otg_hcd_qh_deactivate(hcd, qh, continue_split); -+} -+ -+/** -+ * Updates the state of an Isochronous URB when the transfer is stopped for -+ * any reason. The fields of the current entry in the frame descriptor array -+ * are set based on the transfer state and the input _halt_status. Completes -+ * the Isochronous URB if all the URB frames have been completed. -+ * -+ * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be -+ * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE. -+ */ -+static dwc_otg_halt_status_e -+update_isoc_urb_state(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status) -+{ -+ struct urb *urb = qtd->urb; -+ dwc_otg_halt_status_e ret_val = halt_status; -+ struct usb_iso_packet_descriptor *frame_desc; -+ -+ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index]; -+ switch (halt_status) { -+ case DWC_OTG_HC_XFER_COMPLETE: -+ frame_desc->status = 0; -+ frame_desc->actual_length = -+ get_actual_xfer_length(hc, hc_regs, qtd, -+ halt_status, NULL); -+ break; -+ case DWC_OTG_HC_XFER_FRAME_OVERRUN: -+ urb->error_count++; -+ if (hc->ep_is_in) { -+ frame_desc->status = -ENOSR; -+ } else { -+ frame_desc->status = -ECOMM; -+ } -+ frame_desc->actual_length = 0; -+ break; -+ case DWC_OTG_HC_XFER_BABBLE_ERR: -+ urb->error_count++; -+ frame_desc->status = -EOVERFLOW; -+ /* Don't need to update actual_length in this case. */ -+ break; -+ case DWC_OTG_HC_XFER_XACT_ERR: -+ urb->error_count++; -+ frame_desc->status = -EPROTO; -+ frame_desc->actual_length = -+ get_actual_xfer_length(hc, hc_regs, qtd, -+ halt_status, NULL); -+ default: -+ DWC_ERROR("%s: Unhandled _halt_status (%d)\n", __func__, -+ halt_status); -+ BUG(); -+ break; -+ } -+ -+ if (++qtd->isoc_frame_index == urb->number_of_packets) { -+ /* -+ * urb->status is not used for isoc transfers. -+ * The individual frame_desc statuses are used instead. -+ */ -+ dwc_otg_hcd_complete_urb(hcd, urb, 0); -+ ret_val = DWC_OTG_HC_XFER_URB_COMPLETE; -+ } else { -+ ret_val = DWC_OTG_HC_XFER_COMPLETE; -+ } -+ -+ return ret_val; -+} -+ -+/** -+ * Releases a host channel for use by other transfers. Attempts to select and -+ * queue more transactions since at least one host channel is available. -+ * -+ * @param hcd The HCD state structure. -+ * @param hc The host channel to release. -+ * @param qtd The QTD associated with the host channel. This QTD may be freed -+ * if the transfer is complete or an error has occurred. -+ * @param halt_status Reason the channel is being released. This status -+ * determines the actions taken by this function. -+ */ -+static void release_channel(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status) -+{ -+ dwc_otg_transaction_type_e tr_type; -+ int free_qtd; -+ -+ DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n", -+ __func__, hc->hc_num, halt_status); -+ -+ switch (halt_status) { -+ case DWC_OTG_HC_XFER_URB_COMPLETE: -+ free_qtd = 1; -+ break; -+ case DWC_OTG_HC_XFER_AHB_ERR: -+ case DWC_OTG_HC_XFER_STALL: -+ case DWC_OTG_HC_XFER_BABBLE_ERR: -+ free_qtd = 1; -+ break; -+ case DWC_OTG_HC_XFER_XACT_ERR: -+ if (qtd->error_count >= 3) { -+ DWC_DEBUGPL(DBG_HCDV, " Complete URB with transaction error\n"); -+ free_qtd = 1; -+ qtd->urb->status = -EPROTO; -+ dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EPROTO); -+ } else { -+ free_qtd = 0; -+ } -+ break; -+ case DWC_OTG_HC_XFER_URB_DEQUEUE: -+ /* -+ * The QTD has already been removed and the QH has been -+ * deactivated. Don't want to do anything except release the -+ * host channel and try to queue more transfers. -+ */ -+ goto cleanup; -+ case DWC_OTG_HC_XFER_NO_HALT_STATUS: -+ DWC_ERROR("%s: No halt_status, channel %d\n", __func__, hc->hc_num); -+ free_qtd = 0; -+ break; -+ default: -+ free_qtd = 0; -+ break; -+ } -+ -+ deactivate_qh(hcd, hc->qh, free_qtd); -+ -+ cleanup: -+ /* -+ * Release the host channel for use by other transfers. The cleanup -+ * function clears the channel interrupt enables and conditions, so -+ * there's no need to clear the Channel Halted interrupt separately. -+ */ -+ dwc_otg_hc_cleanup(hcd->core_if, hc); -+ list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list); -+ -+ switch (hc->ep_type) { -+ case DWC_OTG_EP_TYPE_CONTROL: -+ case DWC_OTG_EP_TYPE_BULK: -+ hcd->non_periodic_channels--; -+ break; -+ -+ default: -+ /* -+ * Don't release reservations for periodic channels here. -+ * That's done when a periodic transfer is descheduled (i.e. -+ * when the QH is removed from the periodic schedule). -+ */ -+ break; -+ } -+ -+ /* Try to queue more transfers now that there's a free channel. */ -+ tr_type = dwc_otg_hcd_select_transactions(hcd); -+ if (tr_type != DWC_OTG_TRANSACTION_NONE) { -+ dwc_otg_hcd_queue_transactions(hcd, tr_type); -+ } -+} -+ -+/** -+ * Halts a host channel. If the channel cannot be halted immediately because -+ * the request queue is full, this function ensures that the FIFO empty -+ * interrupt for the appropriate queue is enabled so that the halt request can -+ * be queued when there is space in the request queue. -+ * -+ * This function may also be called in DMA mode. In that case, the channel is -+ * simply released since the core always halts the channel automatically in -+ * DMA mode. -+ */ -+static void halt_channel(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status) -+{ -+ if (hcd->core_if->dma_enable) { -+ release_channel(hcd, hc, qtd, halt_status); -+ return; -+ } -+ -+ /* Slave mode processing... */ -+ dwc_otg_hc_halt(hcd->core_if, hc, halt_status); -+ -+ if (hc->halt_on_queue) { -+ gintmsk_data_t gintmsk = {.d32 = 0}; -+ dwc_otg_core_global_regs_t *global_regs; -+ global_regs = hcd->core_if->core_global_regs; -+ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || -+ hc->ep_type == DWC_OTG_EP_TYPE_BULK) { -+ /* -+ * Make sure the Non-periodic Tx FIFO empty interrupt -+ * is enabled so that the non-periodic schedule will -+ * be processed. -+ */ -+ gintmsk.b.nptxfempty = 1; -+ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32); -+ } else { -+ /* -+ * Move the QH from the periodic queued schedule to -+ * the periodic assigned schedule. This allows the -+ * halt to be queued when the periodic schedule is -+ * processed. -+ */ -+ list_move(&hc->qh->qh_list_entry, -+ &hcd->periodic_sched_assigned); -+ -+ /* -+ * Make sure the Periodic Tx FIFO Empty interrupt is -+ * enabled so that the periodic schedule will be -+ * processed. -+ */ -+ gintmsk.b.ptxfempty = 1; -+ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32); -+ } -+ } -+} -+ -+/** -+ * Performs common cleanup for non-periodic transfers after a Transfer -+ * Complete interrupt. This function should be called after any endpoint type -+ * specific handling is finished to release the host channel. -+ */ -+static void complete_non_periodic_xfer(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status) -+{ -+ hcint_data_t hcint; -+ -+ qtd->error_count = 0; -+ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ if (hcint.b.nyet) { -+ /* -+ * Got a NYET on the last transaction of the transfer. This -+ * means that the endpoint should be in the PING state at the -+ * beginning of the next transfer. -+ */ -+ hc->qh->ping_state = 1; -+ clear_hc_int(hc_regs, nyet); -+ } -+ -+ /* -+ * Always halt and release the host channel to make it available for -+ * more transfers. There may still be more phases for a control -+ * transfer or more data packets for a bulk transfer at this point, -+ * but the host channel is still halted. A channel will be reassigned -+ * to the transfer when the non-periodic schedule is processed after -+ * the channel is released. This allows transactions to be queued -+ * properly via dwc_otg_hcd_queue_transactions, which also enables the -+ * Tx FIFO Empty interrupt if necessary. -+ */ -+ if (hc->ep_is_in) { -+ /* -+ * IN transfers in Slave mode require an explicit disable to -+ * halt the channel. (In DMA mode, this call simply releases -+ * the channel.) -+ */ -+ halt_channel(hcd, hc, qtd, halt_status); -+ } else { -+ /* -+ * The channel is automatically disabled by the core for OUT -+ * transfers in Slave mode. -+ */ -+ release_channel(hcd, hc, qtd, halt_status); -+ } -+} -+ -+/** -+ * Performs common cleanup for periodic transfers after a Transfer Complete -+ * interrupt. This function should be called after any endpoint type specific -+ * handling is finished to release the host channel. -+ */ -+static void complete_periodic_xfer(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status) -+{ -+ hctsiz_data_t hctsiz; -+ qtd->error_count = 0; -+ -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ if (!hc->ep_is_in || hctsiz.b.pktcnt == 0) { -+ /* Core halts channel in these cases. */ -+ release_channel(hcd, hc, qtd, halt_status); -+ } else { -+ /* Flush any outstanding requests from the Tx queue. */ -+ halt_channel(hcd, hc, qtd, halt_status); -+ } -+} -+ -+/** -+ * Handles a host channel Transfer Complete interrupt. This handler may be -+ * called in either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ int urb_xfer_done; -+ dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE; -+ struct urb *urb = qtd->urb; -+ int pipe_type = usb_pipetype(urb->pipe); -+ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "Transfer Complete--\n", hc->hc_num); -+ -+ /* -+ * Handle xfer complete on CSPLIT. -+ */ -+ if (hc->qh->do_split) { -+ qtd->complete_split = 0; -+ } -+ -+ /* Update the QTD and URB states. */ -+ switch (pipe_type) { -+ case PIPE_CONTROL: -+ switch (qtd->control_phase) { -+ case DWC_OTG_CONTROL_SETUP: -+ if (urb->transfer_buffer_length > 0) { -+ qtd->control_phase = DWC_OTG_CONTROL_DATA; -+ } else { -+ qtd->control_phase = DWC_OTG_CONTROL_STATUS; -+ } -+ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction done\n"); -+ halt_status = DWC_OTG_HC_XFER_COMPLETE; -+ break; -+ case DWC_OTG_CONTROL_DATA: { -+ urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); -+ if (urb_xfer_done) { -+ qtd->control_phase = DWC_OTG_CONTROL_STATUS; -+ DWC_DEBUGPL(DBG_HCDV, " Control data transfer done\n"); -+ } else { -+ save_data_toggle(hc, hc_regs, qtd); -+ } -+ halt_status = DWC_OTG_HC_XFER_COMPLETE; -+ break; -+ } -+ case DWC_OTG_CONTROL_STATUS: -+ DWC_DEBUGPL(DBG_HCDV, " Control transfer complete\n"); -+ if (urb->status == -EINPROGRESS) { -+ urb->status = 0; -+ } -+ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); -+ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE; -+ break; -+ } -+ -+ complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); -+ break; -+ case PIPE_BULK: -+ DWC_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n"); -+ urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); -+ if (urb_xfer_done) { -+ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); -+ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE; -+ } else { -+ halt_status = DWC_OTG_HC_XFER_COMPLETE; -+ } -+ -+ save_data_toggle(hc, hc_regs, qtd); -+ complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); -+ break; -+ case PIPE_INTERRUPT: -+ DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n"); -+ update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); -+ -+ /* -+ * Interrupt URB is done on the first transfer complete -+ * interrupt. -+ */ -+ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); -+ save_data_toggle(hc, hc_regs, qtd); -+ complete_periodic_xfer(hcd, hc, hc_regs, qtd, -+ DWC_OTG_HC_XFER_URB_COMPLETE); -+ break; -+ case PIPE_ISOCHRONOUS: -+ DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n"); -+ if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL) { -+ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, -+ DWC_OTG_HC_XFER_COMPLETE); -+ } -+ complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); -+ break; -+ } -+ -+ disable_hc_int(hc_regs, xfercompl); -+ -+ return 1; -+} -+ -+/** -+ * Handles a host channel STALL interrupt. This handler may be called in -+ * either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_stall_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ struct urb *urb = qtd->urb; -+ int pipe_type = usb_pipetype(urb->pipe); -+ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "STALL Received--\n", hc->hc_num); -+ -+ if (pipe_type == PIPE_CONTROL) { -+ dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE); -+ } -+ -+ if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTERRUPT) { -+ dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE); -+ /* -+ * USB protocol requires resetting the data toggle for bulk -+ * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT) -+ * setup command is issued to the endpoint. Anticipate the -+ * CLEAR_FEATURE command since a STALL has occurred and reset -+ * the data toggle now. -+ */ -+ hc->qh->data_toggle = 0; -+ } -+ -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_STALL); -+ -+ disable_hc_int(hc_regs, stall); -+ -+ return 1; -+} -+ -+/* -+ * Updates the state of the URB when a transfer has been stopped due to an -+ * abnormal condition before the transfer completes. Modifies the -+ * actual_length field of the URB to reflect the number of bytes that have -+ * actually been transferred via the host channel. -+ */ -+static void update_urb_state_xfer_intr(dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ struct urb *urb, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status) -+{ -+ uint32_t bytes_transferred = get_actual_xfer_length(hc, hc_regs, qtd, -+ halt_status, NULL); -+ urb->actual_length += bytes_transferred; -+ -+#ifdef DEBUG -+ { -+ hctsiz_data_t hctsiz; -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n", -+ __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num); -+ DWC_DEBUGPL(DBG_HCDV, " hc->start_pkt_count %d\n", hc->start_pkt_count); -+ DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt); -+ DWC_DEBUGPL(DBG_HCDV, " hc->max_packet %d\n", hc->max_packet); -+ DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n", bytes_transferred); -+ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length); -+ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n", -+ urb->transfer_buffer_length); -+ } -+#endif -+} -+ -+/** -+ * Handles a host channel NAK interrupt. This handler may be called in either -+ * DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_nak_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "NAK Received--\n", hc->hc_num); -+ -+ /* -+ * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and -+ * interrupt. Re-start the SSPLIT transfer. -+ */ -+ if (hc->do_split) { -+ if (hc->complete_split) { -+ qtd->error_count = 0; -+ } -+ qtd->complete_split = 0; -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); -+ goto handle_nak_done; -+ } -+ -+ switch (usb_pipetype(qtd->urb->pipe)) { -+ case PIPE_CONTROL: -+ case PIPE_BULK: -+ if (hcd->core_if->dma_enable && hc->ep_is_in) { -+ /* -+ * NAK interrupts are enabled on bulk/control IN -+ * transfers in DMA mode for the sole purpose of -+ * resetting the error count after a transaction error -+ * occurs. The core will continue transferring data. -+ */ -+ qtd->error_count = 0; -+ goto handle_nak_done; -+ } -+ -+ /* -+ * NAK interrupts normally occur during OUT transfers in DMA -+ * or Slave mode. For IN transfers, more requests will be -+ * queued as request queue space is available. -+ */ -+ qtd->error_count = 0; -+ -+ if (!hc->qh->ping_state) { -+ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, -+ qtd, DWC_OTG_HC_XFER_NAK); -+ save_data_toggle(hc, hc_regs, qtd); -+ if (qtd->urb->dev->speed == USB_SPEED_HIGH) { -+ hc->qh->ping_state = 1; -+ } -+ } -+ -+ /* -+ * Halt the channel so the transfer can be re-started from -+ * the appropriate point or the PING protocol will -+ * start/continue. -+ */ -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); -+ break; -+ case PIPE_INTERRUPT: -+ qtd->error_count = 0; -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); -+ break; -+ case PIPE_ISOCHRONOUS: -+ /* Should never get called for isochronous transfers. */ -+ BUG(); -+ break; -+ } -+ -+ handle_nak_done: -+ disable_hc_int(hc_regs, nak); -+ -+ return 1; -+} -+ -+/** -+ * Handles a host channel ACK interrupt. This interrupt is enabled when -+ * performing the PING protocol in Slave mode, when errors occur during -+ * either Slave mode or DMA mode, and during Start Split transactions. -+ */ -+static int32_t handle_hc_ack_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "ACK Received--\n", hc->hc_num); -+ -+ if (hc->do_split) { -+ /* -+ * Handle ACK on SSPLIT. -+ * ACK should not occur in CSPLIT. -+ */ -+ if (!hc->ep_is_in && hc->data_pid_start != DWC_OTG_HC_PID_SETUP) { -+ qtd->ssplit_out_xfer_count = hc->xfer_len; -+ } -+ if (!(hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in)) { -+ /* Don't need complete for isochronous out transfers. */ -+ qtd->complete_split = 1; -+ } -+ -+ /* ISOC OUT */ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) { -+ switch (hc->xact_pos) { -+ case DWC_HCSPLIT_XACTPOS_ALL: -+ break; -+ case DWC_HCSPLIT_XACTPOS_END: -+ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL; -+ qtd->isoc_split_offset = 0; -+ break; -+ case DWC_HCSPLIT_XACTPOS_BEGIN: -+ case DWC_HCSPLIT_XACTPOS_MID: -+ /* -+ * For BEGIN or MID, calculate the length for -+ * the next microframe to determine the correct -+ * SSPLIT token, either MID or END. -+ */ -+ { -+ struct usb_iso_packet_descriptor *frame_desc; -+ -+ frame_desc = &qtd->urb->iso_frame_desc[qtd->isoc_frame_index]; -+ qtd->isoc_split_offset += 188; -+ -+ if ((frame_desc->length - qtd->isoc_split_offset) <= 188) { -+ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_END; -+ } else { -+ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_MID; -+ } -+ -+ } -+ break; -+ } -+ } else { -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK); -+ } -+ } else { -+ qtd->error_count = 0; -+ -+ if (hc->qh->ping_state) { -+ hc->qh->ping_state = 0; -+ /* -+ * Halt the channel so the transfer can be re-started -+ * from the appropriate point. This only happens in -+ * Slave mode. In DMA mode, the ping_state is cleared -+ * when the transfer is started because the core -+ * automatically executes the PING, then the transfer. -+ */ -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK); -+ } -+ } -+ -+ /* -+ * If the ACK occurred when _not_ in the PING state, let the channel -+ * continue transferring data after clearing the error count. -+ */ -+ -+ disable_hc_int(hc_regs, ack); -+ -+ return 1; -+} -+ -+/** -+ * Handles a host channel NYET interrupt. This interrupt should only occur on -+ * Bulk and Control OUT endpoints and for complete split transactions. If a -+ * NYET occurs at the same time as a Transfer Complete interrupt, it is -+ * handled in the xfercomp interrupt handler, not here. This handler may be -+ * called in either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "NYET Received--\n", hc->hc_num); -+ -+ /* -+ * NYET on CSPLIT -+ * re-do the CSPLIT immediately on non-periodic -+ */ -+ if (hc->do_split && hc->complete_split) { -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ int frnum = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd)); -+ -+ if (dwc_full_frame_num(frnum) != -+ dwc_full_frame_num(hc->qh->sched_frame)) { -+ /* -+ * No longer in the same full speed frame. -+ * Treat this as a transaction error. -+ */ -+#if 0 -+ /** @todo Fix system performance so this can -+ * be treated as an error. Right now complete -+ * splits cannot be scheduled precisely enough -+ * due to other system activity, so this error -+ * occurs regularly in Slave mode. -+ */ -+ qtd->error_count++; -+#endif -+ qtd->complete_split = 0; -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); -+ /** @todo add support for isoc release */ -+ goto handle_nyet_done; -+ } -+ } -+ -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET); -+ goto handle_nyet_done; -+ } -+ -+ hc->qh->ping_state = 1; -+ qtd->error_count = 0; -+ -+ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, qtd, -+ DWC_OTG_HC_XFER_NYET); -+ save_data_toggle(hc, hc_regs, qtd); -+ -+ /* -+ * Halt the channel and re-start the transfer so the PING -+ * protocol will start. -+ */ -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET); -+ -+handle_nyet_done: -+ disable_hc_int(hc_regs, nyet); -+ return 1; -+} -+ -+/** -+ * Handles a host channel babble interrupt. This handler may be called in -+ * either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_babble_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "Babble Error--\n", hc->hc_num); -+ if (hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { -+ dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EOVERFLOW); -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_BABBLE_ERR); -+ } else { -+ dwc_otg_halt_status_e halt_status; -+ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, -+ DWC_OTG_HC_XFER_BABBLE_ERR); -+ halt_channel(hcd, hc, qtd, halt_status); -+ } -+ disable_hc_int(hc_regs, bblerr); -+ return 1; -+} -+ -+/** -+ * Handles a host channel AHB error interrupt. This handler is only called in -+ * DMA mode. -+ */ -+static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ hcchar_data_t hcchar; -+ hcsplt_data_t hcsplt; -+ hctsiz_data_t hctsiz; -+ uint32_t hcdma; -+ struct urb *urb = qtd->urb; -+ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "AHB Error--\n", hc->hc_num); -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ hcdma = dwc_read_reg32(&hc_regs->hcdma); -+ -+ DWC_ERROR("AHB ERROR, Channel %d\n", hc->hc_num); -+ DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32); -+ DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma); -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n"); -+ DWC_ERROR(" Device address: %d\n", usb_pipedevice(urb->pipe)); -+ DWC_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe), -+ (usb_pipein(urb->pipe) ? "IN" : "OUT")); -+ DWC_ERROR(" Endpoint type: %s\n", -+ ({char *pipetype; -+ switch (usb_pipetype(urb->pipe)) { -+ case PIPE_CONTROL: pipetype = "CONTROL"; break; -+ case PIPE_BULK: pipetype = "BULK"; break; -+ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break; -+ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; -+ default: pipetype = "UNKNOWN"; break; -+ }; pipetype;})); -+ DWC_ERROR(" Speed: %s\n", -+ ({char *speed; -+ switch (urb->dev->speed) { -+ case USB_SPEED_HIGH: speed = "HIGH"; break; -+ case USB_SPEED_FULL: speed = "FULL"; break; -+ case USB_SPEED_LOW: speed = "LOW"; break; -+ default: speed = "UNKNOWN"; break; -+ }; speed;})); -+ DWC_ERROR(" Max packet size: %d\n", -+ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); -+ DWC_ERROR(" Data buffer length: %d\n", urb->transfer_buffer_length); -+ DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n", -+ urb->transfer_buffer, (void *)urb->transfer_dma); -+ DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n", -+ urb->setup_packet, (void *)urb->setup_dma); -+ DWC_ERROR(" Interval: %d\n", urb->interval); -+ -+ dwc_otg_hcd_complete_urb(hcd, urb, -EIO); -+ -+ /* -+ * Force a channel halt. Don't call halt_channel because that won't -+ * write to the HCCHARn register in DMA mode to force the halt. -+ */ -+ dwc_otg_hc_halt(hcd->core_if, hc, DWC_OTG_HC_XFER_AHB_ERR); -+ -+ disable_hc_int(hc_regs, ahberr); -+ return 1; -+} -+ -+/** -+ * Handles a host channel transaction error interrupt. This handler may be -+ * called in either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "Transaction Error--\n", hc->hc_num); -+ -+ switch (usb_pipetype(qtd->urb->pipe)) { -+ case PIPE_CONTROL: -+ case PIPE_BULK: -+ qtd->error_count++; -+ if (!hc->qh->ping_state) { -+ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, -+ qtd, DWC_OTG_HC_XFER_XACT_ERR); -+ save_data_toggle(hc, hc_regs, qtd); -+ if (!hc->ep_is_in && qtd->urb->dev->speed == USB_SPEED_HIGH) { -+ hc->qh->ping_state = 1; -+ } -+ } -+ -+ /* -+ * Halt the channel so the transfer can be re-started from -+ * the appropriate point or the PING protocol will start. -+ */ -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); -+ break; -+ case PIPE_INTERRUPT: -+ qtd->error_count++; -+ if (hc->do_split && hc->complete_split) { -+ qtd->complete_split = 0; -+ } -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); -+ break; -+ case PIPE_ISOCHRONOUS: -+ { -+ dwc_otg_halt_status_e halt_status; -+ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, -+ DWC_OTG_HC_XFER_XACT_ERR); -+ -+ halt_channel(hcd, hc, qtd, halt_status); -+ } -+ break; -+ } -+ -+ disable_hc_int(hc_regs, xacterr); -+ -+ return 1; -+} -+ -+/** -+ * Handles a host channel frame overrun interrupt. This handler may be called -+ * in either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "Frame Overrun--\n", hc->hc_num); -+ -+ switch (usb_pipetype(qtd->urb->pipe)) { -+ case PIPE_CONTROL: -+ case PIPE_BULK: -+ break; -+ case PIPE_INTERRUPT: -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN); -+ break; -+ case PIPE_ISOCHRONOUS: -+ { -+ dwc_otg_halt_status_e halt_status; -+ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, -+ DWC_OTG_HC_XFER_FRAME_OVERRUN); -+ -+ halt_channel(hcd, hc, qtd, halt_status); -+ } -+ break; -+ } -+ -+ disable_hc_int(hc_regs, frmovrun); -+ -+ return 1; -+} -+ -+/** -+ * Handles a host channel data toggle error interrupt. This handler may be -+ * called in either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "Data Toggle Error--\n", hc->hc_num); -+ -+ if (hc->ep_is_in) { -+ qtd->error_count = 0; -+ } else { -+ DWC_ERROR("Data Toggle Error on OUT transfer," -+ "channel %d\n", hc->hc_num); -+ } -+ -+ disable_hc_int(hc_regs, datatglerr); -+ -+ return 1; -+} -+ -+#ifdef DEBUG -+/** -+ * This function is for debug only. It checks that a valid halt status is set -+ * and that HCCHARn.chdis is clear. If there's a problem, corrective action is -+ * taken and a warning is issued. -+ * @return 1 if halt status is ok, 0 otherwise. -+ */ -+static inline int halt_status_ok(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ hcchar_data_t hcchar; -+ hctsiz_data_t hctsiz; -+ hcint_data_t hcint; -+ hcintmsk_data_t hcintmsk; -+ hcsplt_data_t hcsplt; -+ -+ if (hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) { -+ /* -+ * This code is here only as a check. This condition should -+ * never happen. Ignore the halt if it does occur. -+ */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); -+ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); -+ DWC_WARN("%s: hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS, " -+ "channel %d, hcchar 0x%08x, hctsiz 0x%08x, " -+ "hcint 0x%08x, hcintmsk 0x%08x, " -+ "hcsplt 0x%08x, qtd->complete_split %d\n", -+ __func__, hc->hc_num, hcchar.d32, hctsiz.d32, -+ hcint.d32, hcintmsk.d32, -+ hcsplt.d32, qtd->complete_split); -+ -+ DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n", -+ __func__, hc->hc_num); -+ DWC_WARN("\n"); -+ clear_hc_int(hc_regs, chhltd); -+ return 0; -+ } -+ -+ /* -+ * This code is here only as a check. hcchar.chdis should -+ * never be set when the halt interrupt occurs. Halt the -+ * channel again if it does occur. -+ */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chdis) { -+ DWC_WARN("%s: hcchar.chdis set unexpectedly, " -+ "hcchar 0x%08x, trying to halt again\n", -+ __func__, hcchar.d32); -+ clear_hc_int(hc_regs, chhltd); -+ hc->halt_pending = 0; -+ halt_channel(hcd, hc, qtd, hc->halt_status); -+ return 0; -+ } -+ -+ return 1; -+} -+#endif -+ -+/** -+ * Handles a host Channel Halted interrupt in DMA mode. This handler -+ * determines the reason the channel halted and proceeds accordingly. -+ */ -+static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ hcint_data_t hcint; -+ hcintmsk_data_t hcintmsk; -+ int out_nak_enh = 0; -+ -+ /* For core with OUT NAK enhancement, the flow for high- -+ * speed CONTROL/BULK OUT is handled a little differently. -+ */ -+ if (hcd->core_if->snpsid >= 0x4F54271A) { -+ if (hc->speed == DWC_OTG_EP_SPEED_HIGH && !hc->ep_is_in && -+ (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || -+ hc->ep_type == DWC_OTG_EP_TYPE_BULK)) { -+ DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement enabled\n"); -+ out_nak_enh = 1; -+ } else { -+ DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement disabled, not HS Ctrl/Bulk OUT EP\n"); -+ } -+ } else { -+ DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement disabled, no core support\n"); -+ } -+ -+ if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || -+ hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR) { -+ /* -+ * Just release the channel. A dequeue can happen on a -+ * transfer timeout. In the case of an AHB Error, the channel -+ * was forced to halt because there's no way to gracefully -+ * recover. -+ */ -+ release_channel(hcd, hc, qtd, hc->halt_status); -+ return; -+ } -+ -+ /* Read the HCINTn register to determine the cause for the halt. */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); -+ -+ if (hcint.b.xfercomp) { -+ /** @todo This is here because of a possible hardware bug. Spec -+ * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT -+ * interrupt w/ACK bit set should occur, but I only see the -+ * XFERCOMP bit, even with it masked out. This is a workaround -+ * for that behavior. Should fix this when hardware is fixed. -+ */ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) { -+ handle_hc_ack_intr(hcd, hc, hc_regs, qtd); -+ } -+ handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd); -+ } else if (hcint.b.stall) { -+ handle_hc_stall_intr(hcd, hc, hc_regs, qtd); -+ } else if (hcint.b.xacterr) { -+ if (out_nak_enh) { -+ if (hcint.b.nyet || hcint.b.nak || hcint.b.ack) { -+ printk(KERN_DEBUG "XactErr with NYET/NAK/ACK\n"); -+ qtd->error_count = 0; -+ } else { -+ printk(KERN_DEBUG "XactErr without NYET/NAK/ACK\n"); -+ } -+ } -+ -+ /* -+ * Must handle xacterr before nak or ack. Could get a xacterr -+ * at the same time as either of these on a BULK/CONTROL OUT -+ * that started with a PING. The xacterr takes precedence. -+ */ -+ handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd); -+ } else if (!out_nak_enh) { -+ if (hcint.b.nyet) { -+ /* -+ * Must handle nyet before nak or ack. Could get a nyet at the -+ * same time as either of those on a BULK/CONTROL OUT that -+ * started with a PING. The nyet takes precedence. -+ */ -+ handle_hc_nyet_intr(hcd, hc, hc_regs, qtd); -+ } else if (hcint.b.bblerr) { -+ handle_hc_babble_intr(hcd, hc, hc_regs, qtd); -+ } else if (hcint.b.frmovrun) { -+ handle_hc_frmovrun_intr(hcd, hc, hc_regs, qtd); -+ } else if (hcint.b.nak && !hcintmsk.b.nak) { -+ /* -+ * If nak is not masked, it's because a non-split IN transfer -+ * is in an error state. In that case, the nak is handled by -+ * the nak interrupt handler, not here. Handle nak here for -+ * BULK/CONTROL OUT transfers, which halt on a NAK to allow -+ * rewinding the buffer pointer. -+ */ -+ handle_hc_nak_intr(hcd, hc, hc_regs, qtd); -+ } else if (hcint.b.ack && !hcintmsk.b.ack) { -+ /* -+ * If ack is not masked, it's because a non-split IN transfer -+ * is in an error state. In that case, the ack is handled by -+ * the ack interrupt handler, not here. Handle ack here for -+ * split transfers. Start splits halt on ACK. -+ */ -+ handle_hc_ack_intr(hcd, hc, hc_regs, qtd); -+ } else { -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ /* -+ * A periodic transfer halted with no other channel -+ * interrupts set. Assume it was halted by the core -+ * because it could not be completed in its scheduled -+ * (micro)frame. -+ */ -+#ifdef DEBUG -+ DWC_PRINT("%s: Halt channel %d (assume incomplete periodic transfer)\n", -+ __func__, hc->hc_num); -+#endif -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE); -+ } else { -+ DWC_ERROR("%s: Channel %d, DMA Mode -- ChHltd set, but reason " -+ "for halting is unknown, hcint 0x%08x, intsts 0x%08x\n", -+ __func__, hc->hc_num, hcint.d32, -+ dwc_read_reg32(&hcd->core_if->core_global_regs->gintsts)); -+ } -+ } -+ } else { -+ printk(KERN_DEBUG "NYET/NAK/ACK/other in non-error case, 0x%08x\n", hcint.d32); -+ } -+} -+ -+/** -+ * Handles a host channel Channel Halted interrupt. -+ * -+ * In slave mode, this handler is called only when the driver specifically -+ * requests a halt. This occurs during handling other host channel interrupts -+ * (e.g. nak, xacterr, stall, nyet, etc.). -+ * -+ * In DMA mode, this is the interrupt that occurs when the core has finished -+ * processing a transfer on a channel. Other host channel interrupts (except -+ * ahberr) are disabled in DMA mode. -+ */ -+static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "Channel Halted--\n", hc->hc_num); -+ -+ if (hcd->core_if->dma_enable) { -+ handle_hc_chhltd_intr_dma(hcd, hc, hc_regs, qtd); -+ } else { -+#ifdef DEBUG -+ if (!halt_status_ok(hcd, hc, hc_regs, qtd)) { -+ return 1; -+ } -+#endif -+ release_channel(hcd, hc, qtd, hc->halt_status); -+ } -+ -+ return 1; -+} -+ -+/** Handles interrupt for a specific Host Channel */ -+int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num) -+{ -+ int retval = 0; -+ hcint_data_t hcint; -+ hcintmsk_data_t hcintmsk; -+ dwc_hc_t *hc; -+ dwc_otg_hc_regs_t *hc_regs; -+ dwc_otg_qtd_t *qtd; -+ -+ DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num); -+ -+ hc = dwc_otg_hcd->hc_ptr_array[num]; -+ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[num]; -+ qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); -+ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); -+ DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", -+ hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32)); -+ hcint.d32 = hcint.d32 & hcintmsk.d32; -+ -+ if (!dwc_otg_hcd->core_if->dma_enable) { -+ if (hcint.b.chhltd && hcint.d32 != 0x2) { -+ hcint.b.chhltd = 0; -+ } -+ } -+ -+ if (hcint.b.xfercomp) { -+ retval |= handle_hc_xfercomp_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ /* -+ * If NYET occurred at same time as Xfer Complete, the NYET is -+ * handled by the Xfer Complete interrupt handler. Don't want -+ * to call the NYET interrupt handler in this case. -+ */ -+ hcint.b.nyet = 0; -+ } -+ if (hcint.b.chhltd) { -+ retval |= handle_hc_chhltd_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.ahberr) { -+ retval |= handle_hc_ahberr_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.stall) { -+ retval |= handle_hc_stall_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.nak) { -+ retval |= handle_hc_nak_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.ack) { -+ retval |= handle_hc_ack_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.nyet) { -+ retval |= handle_hc_nyet_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.xacterr) { -+ retval |= handle_hc_xacterr_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.bblerr) { -+ retval |= handle_hc_babble_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.frmovrun) { -+ retval |= handle_hc_frmovrun_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.datatglerr) { -+ retval |= handle_hc_datatglerr_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ -+ return retval; -+} -+ -+#endif /* DWC_DEVICE_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc/otg_hcd_queue.c -@@ -0,0 +1,713 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_queue.c $ -+ * $Revision: #33 $ -+ * $Date: 2008/07/15 $ -+ * $Change: 1064918 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_DEVICE_ONLY -+ -+/** -+ * @file -+ * -+ * This file contains the functions to manage Queue Heads and Queue -+ * Transfer Descriptors. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "otg_driver.h" -+#include "otg_hcd.h" -+#include "otg_regs.h" -+ -+/** -+ * This function allocates and initializes a QH. -+ * -+ * @param hcd The HCD state structure for the DWC OTG controller. -+ * @param[in] urb Holds the information about the device/endpoint that we need -+ * to initialize the QH. -+ * -+ * @return Returns pointer to the newly allocated QH, or NULL on error. */ -+dwc_otg_qh_t *dwc_otg_hcd_qh_create (dwc_otg_hcd_t *hcd, struct urb *urb) -+{ -+ dwc_otg_qh_t *qh; -+ -+ /* Allocate memory */ -+ /** @todo add memflags argument */ -+ qh = dwc_otg_hcd_qh_alloc (); -+ if (qh == NULL) { -+ return NULL; -+ } -+ -+ dwc_otg_hcd_qh_init (hcd, qh, urb); -+ return qh; -+} -+ -+/** Free each QTD in the QH's QTD-list then free the QH. QH should already be -+ * removed from a list. QTD list should already be empty if called from URB -+ * Dequeue. -+ * -+ * @param[in] hcd HCD instance. -+ * @param[in] qh The QH to free. -+ */ -+void dwc_otg_hcd_qh_free (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ dwc_otg_qtd_t *qtd; -+ struct list_head *pos; -+ //unsigned long flags; -+ -+ /* Free each QTD in the QTD list */ -+ -+#ifdef CONFIG_SMP -+ //the spinlock is locked before this function get called, -+ //but in case the lock is needed, the check function is preserved -+ -+ //but in non-SMP mode, all spinlock is lockable. -+ //don't do the test in non-SMP mode -+ -+ if(spin_trylock(&hcd->lock)) { -+ printk("%s: It is not supposed to be lockable!!\n",__func__); -+ BUG(); -+ } -+#endif -+// SPIN_LOCK_IRQSAVE(&hcd->lock, flags) -+ for (pos = qh->qtd_list.next; -+ pos != &qh->qtd_list; -+ pos = qh->qtd_list.next) -+ { -+ list_del (pos); -+ qtd = dwc_list_to_qtd (pos); -+ dwc_otg_hcd_qtd_free (qtd); -+ } -+// SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) -+ -+ kfree (qh); -+ return; -+} -+ -+/** Initializes a QH structure. -+ * -+ * @param[in] hcd The HCD state structure for the DWC OTG controller. -+ * @param[in] qh The QH to init. -+ * @param[in] urb Holds the information about the device/endpoint that we need -+ * to initialize the QH. */ -+#define SCHEDULE_SLOP 10 -+void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, struct urb *urb) -+{ -+ char *speed, *type; -+ memset (qh, 0, sizeof (dwc_otg_qh_t)); -+ -+ /* Initialize QH */ -+ switch (usb_pipetype(urb->pipe)) { -+ case PIPE_CONTROL: -+ qh->ep_type = USB_ENDPOINT_XFER_CONTROL; -+ break; -+ case PIPE_BULK: -+ qh->ep_type = USB_ENDPOINT_XFER_BULK; -+ break; -+ case PIPE_ISOCHRONOUS: -+ qh->ep_type = USB_ENDPOINT_XFER_ISOC; -+ break; -+ case PIPE_INTERRUPT: -+ qh->ep_type = USB_ENDPOINT_XFER_INT; -+ break; -+ } -+ -+ qh->ep_is_in = usb_pipein(urb->pipe) ? 1 : 0; -+ -+ qh->data_toggle = DWC_OTG_HC_PID_DATA0; -+ qh->maxp = usb_maxpacket(urb->dev, urb->pipe, !(usb_pipein(urb->pipe))); -+ INIT_LIST_HEAD(&qh->qtd_list); -+ INIT_LIST_HEAD(&qh->qh_list_entry); -+ qh->channel = NULL; -+ -+ /* FS/LS Enpoint on HS Hub -+ * NOT virtual root hub */ -+ qh->do_split = 0; -+ if (((urb->dev->speed == USB_SPEED_LOW) || -+ (urb->dev->speed == USB_SPEED_FULL)) && -+ (urb->dev->tt) && (urb->dev->tt->hub) && (urb->dev->tt->hub->devnum != 1)) -+ { -+ DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT found at hub addr %d, for port %d\n", -+ usb_pipeendpoint(urb->pipe), urb->dev->tt->hub->devnum, -+ urb->dev->ttport); -+ qh->do_split = 1; -+ } -+ -+ if (qh->ep_type == USB_ENDPOINT_XFER_INT || -+ qh->ep_type == USB_ENDPOINT_XFER_ISOC) { -+ /* Compute scheduling parameters once and save them. */ -+ hprt0_data_t hprt; -+ -+ /** @todo Account for split transfers in the bus time. */ -+ int bytecount = dwc_hb_mult(qh->maxp) * dwc_max_packet(qh->maxp); -+ qh->usecs = usb_calc_bus_time(urb->dev->speed, -+ usb_pipein(urb->pipe), -+ (qh->ep_type == USB_ENDPOINT_XFER_ISOC), -+ bytecount); -+ -+ /* Start in a slightly future (micro)frame. */ -+ qh->sched_frame = dwc_frame_num_inc(hcd->frame_number, -+ SCHEDULE_SLOP); -+ qh->interval = urb->interval; -+#if 0 -+ /* Increase interrupt polling rate for debugging. */ -+ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { -+ qh->interval = 8; -+ } -+#endif -+ hprt.d32 = dwc_read_reg32(hcd->core_if->host_if->hprt0); -+ if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) && -+ ((urb->dev->speed == USB_SPEED_LOW) || -+ (urb->dev->speed == USB_SPEED_FULL))) { -+ qh->interval *= 8; -+ qh->sched_frame |= 0x7; -+ qh->start_split_frame = qh->sched_frame; -+ } -+ -+ } -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n"); -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - qh = %p\n", qh); -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Device Address = %d\n", -+ urb->dev->devnum); -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Endpoint %d, %s\n", -+ usb_pipeendpoint(urb->pipe), -+ usb_pipein(urb->pipe) == USB_DIR_IN ? "IN" : "OUT"); -+ -+ switch(urb->dev->speed) { -+ case USB_SPEED_LOW: -+ speed = "low"; -+ break; -+ case USB_SPEED_FULL: -+ speed = "full"; -+ break; -+ case USB_SPEED_HIGH: -+ speed = "high"; -+ break; -+ default: -+ speed = "?"; -+ break; -+ } -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Speed = %s\n", speed); -+ -+ switch (qh->ep_type) { -+ case USB_ENDPOINT_XFER_ISOC: -+ type = "isochronous"; -+ break; -+ case USB_ENDPOINT_XFER_INT: -+ type = "interrupt"; -+ break; -+ case USB_ENDPOINT_XFER_CONTROL: -+ type = "control"; -+ break; -+ case USB_ENDPOINT_XFER_BULK: -+ type = "bulk"; -+ break; -+ default: -+ type = "?"; -+ break; -+ } -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Type = %s\n",type); -+ -+#ifdef DEBUG -+ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - usecs = %d\n", -+ qh->usecs); -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - interval = %d\n", -+ qh->interval); -+ } -+#endif -+ -+ return; -+} -+ -+/** -+ * Checks that a channel is available for a periodic transfer. -+ * -+ * @return 0 if successful, negative error code otherise. -+ */ -+static int periodic_channel_available(dwc_otg_hcd_t *hcd) -+{ -+ /* -+ * Currently assuming that there is a dedicated host channnel for each -+ * periodic transaction plus at least one host channel for -+ * non-periodic transactions. -+ */ -+ int status; -+ int num_channels; -+ -+ num_channels = hcd->core_if->core_params->host_channels; -+ if ((hcd->periodic_channels + hcd->non_periodic_channels < num_channels) && -+ (hcd->periodic_channels < num_channels - 1)) { -+ status = 0; -+ } -+ else { -+ DWC_NOTICE("%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n", -+ __func__, num_channels, hcd->periodic_channels, -+ hcd->non_periodic_channels); -+ status = -ENOSPC; -+ } -+ -+ return status; -+} -+ -+/** -+ * Checks that there is sufficient bandwidth for the specified QH in the -+ * periodic schedule. For simplicity, this calculation assumes that all the -+ * transfers in the periodic schedule may occur in the same (micro)frame. -+ * -+ * @param hcd The HCD state structure for the DWC OTG controller. -+ * @param qh QH containing periodic bandwidth required. -+ * -+ * @return 0 if successful, negative error code otherwise. -+ */ -+static int check_periodic_bandwidth(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ int status; -+ uint16_t max_claimed_usecs; -+ -+ status = 0; -+ -+ if (hcd->core_if->core_params->speed == DWC_SPEED_PARAM_HIGH) { -+ /* -+ * High speed mode. -+ * Max periodic usecs is 80% x 125 usec = 100 usec. -+ */ -+ max_claimed_usecs = 100 - qh->usecs; -+ } else { -+ /* -+ * Full speed mode. -+ * Max periodic usecs is 90% x 1000 usec = 900 usec. -+ */ -+ max_claimed_usecs = 900 - qh->usecs; -+ } -+ -+ if (hcd->periodic_usecs > max_claimed_usecs) { -+ DWC_NOTICE("%s: already claimed usecs %d, required usecs %d\n", -+ __func__, hcd->periodic_usecs, qh->usecs); -+ status = -ENOSPC; -+ } -+ -+ return status; -+} -+ -+/** -+ * Checks that the max transfer size allowed in a host channel is large enough -+ * to handle the maximum data transfer in a single (micro)frame for a periodic -+ * transfer. -+ * -+ * @param hcd The HCD state structure for the DWC OTG controller. -+ * @param qh QH for a periodic endpoint. -+ * -+ * @return 0 if successful, negative error code otherwise. -+ */ -+static int check_max_xfer_size(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ int status; -+ uint32_t max_xfer_size; -+ uint32_t max_channel_xfer_size; -+ -+ status = 0; -+ -+ max_xfer_size = dwc_max_packet(qh->maxp) * dwc_hb_mult(qh->maxp); -+ max_channel_xfer_size = hcd->core_if->core_params->max_transfer_size; -+ -+ if (max_xfer_size > max_channel_xfer_size) { -+ DWC_NOTICE("%s: Periodic xfer length %d > " -+ "max xfer length for channel %d\n", -+ __func__, max_xfer_size, max_channel_xfer_size); -+ status = -ENOSPC; -+ } -+ -+ return status; -+} -+ -+/** -+ * Schedules an interrupt or isochronous transfer in the periodic schedule. -+ * -+ * @param hcd The HCD state structure for the DWC OTG controller. -+ * @param qh QH for the periodic transfer. The QH should already contain the -+ * scheduling information. -+ * -+ * @return 0 if successful, negative error code otherwise. -+ */ -+static int schedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ int status = 0; -+ -+ status = periodic_channel_available(hcd); -+ if (status) { -+ DWC_NOTICE("%s: No host channel available for periodic " -+ "transfer.\n", __func__); -+ return status; -+ } -+ -+ status = check_periodic_bandwidth(hcd, qh); -+ if (status) { -+ DWC_NOTICE("%s: Insufficient periodic bandwidth for " -+ "periodic transfer.\n", __func__); -+ return status; -+ } -+ -+ status = check_max_xfer_size(hcd, qh); -+ if (status) { -+ DWC_NOTICE("%s: Channel max transfer size too small " -+ "for periodic transfer.\n", __func__); -+ return status; -+ } -+ -+ /* Always start in the inactive schedule. */ -+ list_add_tail(&qh->qh_list_entry, &hcd->periodic_sched_inactive); -+ -+ /* Reserve the periodic channel. */ -+ hcd->periodic_channels++; -+ -+ /* Update claimed usecs per (micro)frame. */ -+ hcd->periodic_usecs += qh->usecs; -+ -+ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ -+ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated += qh->usecs / qh->interval; -+ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { -+ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs++; -+ DWC_DEBUGPL(DBG_HCD, "Scheduled intr: qh %p, usecs %d, period %d\n", -+ qh, qh->usecs, qh->interval); -+ } else { -+ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs++; -+ DWC_DEBUGPL(DBG_HCD, "Scheduled isoc: qh %p, usecs %d, period %d\n", -+ qh, qh->usecs, qh->interval); -+ } -+ -+ return status; -+} -+ -+/** -+ * This function adds a QH to either the non periodic or periodic schedule if -+ * it is not already in the schedule. If the QH is already in the schedule, no -+ * action is taken. -+ * -+ * @return 0 if successful, negative error code otherwise. -+ */ -+int dwc_otg_hcd_qh_add (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ //unsigned long flags; -+ int status = 0; -+ -+#ifdef CONFIG_SMP -+ //the spinlock is locked before this function get called, -+ //but in case the lock is needed, the check function is preserved -+ //but in non-SMP mode, all spinlock is lockable. -+ //don't do the test in non-SMP mode -+ -+ if(spin_trylock(&hcd->lock)) { -+ printk("%s: It is not supposed to be lockable!!\n",__func__); -+ BUG(); -+ } -+#endif -+// SPIN_LOCK_IRQSAVE(&hcd->lock, flags) -+ -+ if (!list_empty(&qh->qh_list_entry)) { -+ /* QH already in a schedule. */ -+ goto done; -+ } -+ -+ /* Add the new QH to the appropriate schedule */ -+ if (dwc_qh_is_non_per(qh)) { -+ /* Always start in the inactive schedule. */ -+ list_add_tail(&qh->qh_list_entry, &hcd->non_periodic_sched_inactive); -+ } else { -+ status = schedule_periodic(hcd, qh); -+ } -+ -+ done: -+// SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) -+ -+ return status; -+} -+ -+/** -+ * Removes an interrupt or isochronous transfer from the periodic schedule. -+ * -+ * @param hcd The HCD state structure for the DWC OTG controller. -+ * @param qh QH for the periodic transfer. -+ */ -+static void deschedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ list_del_init(&qh->qh_list_entry); -+ -+ /* Release the periodic channel reservation. */ -+ hcd->periodic_channels--; -+ -+ /* Update claimed usecs per (micro)frame. */ -+ hcd->periodic_usecs -= qh->usecs; -+ -+ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ -+ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated -= qh->usecs / qh->interval; -+ -+ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { -+ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs--; -+ DWC_DEBUGPL(DBG_HCD, "Descheduled intr: qh %p, usecs %d, period %d\n", -+ qh, qh->usecs, qh->interval); -+ } else { -+ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs--; -+ DWC_DEBUGPL(DBG_HCD, "Descheduled isoc: qh %p, usecs %d, period %d\n", -+ qh, qh->usecs, qh->interval); -+ } -+} -+ -+/** -+ * Removes a QH from either the non-periodic or periodic schedule. Memory is -+ * not freed. -+ * -+ * @param[in] hcd The HCD state structure. -+ * @param[in] qh QH to remove from schedule. */ -+void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ //unsigned long flags; -+ -+#ifdef CONFIG_SMP -+ //the spinlock is locked before this function get called, -+ //but in case the lock is needed, the check function is preserved -+ //but in non-SMP mode, all spinlock is lockable. -+ //don't do the test in non-SMP mode -+ -+ if(spin_trylock(&hcd->lock)) { -+ printk("%s: It is not supposed to be lockable!!\n",__func__); -+ BUG(); -+ } -+#endif -+// SPIN_LOCK_IRQSAVE(&hcd->lock, flags); -+ -+ if (list_empty(&qh->qh_list_entry)) { -+ /* QH is not in a schedule. */ -+ goto done; -+ } -+ -+ if (dwc_qh_is_non_per(qh)) { -+ if (hcd->non_periodic_qh_ptr == &qh->qh_list_entry) { -+ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; -+ } -+ list_del_init(&qh->qh_list_entry); -+ } else { -+ deschedule_periodic(hcd, qh); -+ } -+ -+ done: -+// SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); -+ return; -+} -+ -+/** -+ * Deactivates a QH. For non-periodic QHs, removes the QH from the active -+ * non-periodic schedule. The QH is added to the inactive non-periodic -+ * schedule if any QTDs are still attached to the QH. -+ * -+ * For periodic QHs, the QH is removed from the periodic queued schedule. If -+ * there are any QTDs still attached to the QH, the QH is added to either the -+ * periodic inactive schedule or the periodic ready schedule and its next -+ * scheduled frame is calculated. The QH is placed in the ready schedule if -+ * the scheduled frame has been reached already. Otherwise it's placed in the -+ * inactive schedule. If there are no QTDs attached to the QH, the QH is -+ * completely removed from the periodic schedule. -+ */ -+void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_next_periodic_split) -+{ -+ unsigned long flags; -+ SPIN_LOCK_IRQSAVE(&hcd->lock, flags); -+ -+ if (dwc_qh_is_non_per(qh)) { -+ dwc_otg_hcd_qh_remove(hcd, qh); -+ if (!list_empty(&qh->qtd_list)) { -+ /* Add back to inactive non-periodic schedule. */ -+ dwc_otg_hcd_qh_add(hcd, qh); -+ } -+ } else { -+ uint16_t frame_number = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd)); -+ -+ if (qh->do_split) { -+ /* Schedule the next continuing periodic split transfer */ -+ if (sched_next_periodic_split) { -+ -+ qh->sched_frame = frame_number; -+ if (dwc_frame_num_le(frame_number, -+ dwc_frame_num_inc(qh->start_split_frame, 1))) { -+ /* -+ * Allow one frame to elapse after start -+ * split microframe before scheduling -+ * complete split, but DONT if we are -+ * doing the next start split in the -+ * same frame for an ISOC out. -+ */ -+ if ((qh->ep_type != USB_ENDPOINT_XFER_ISOC) || (qh->ep_is_in != 0)) { -+ qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, 1); -+ } -+ } -+ } else { -+ qh->sched_frame = dwc_frame_num_inc(qh->start_split_frame, -+ qh->interval); -+ if (dwc_frame_num_le(qh->sched_frame, frame_number)) { -+ qh->sched_frame = frame_number; -+ } -+ qh->sched_frame |= 0x7; -+ qh->start_split_frame = qh->sched_frame; -+ } -+ } else { -+ qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, qh->interval); -+ if (dwc_frame_num_le(qh->sched_frame, frame_number)) { -+ qh->sched_frame = frame_number; -+ } -+ } -+ -+ if (list_empty(&qh->qtd_list)) { -+ dwc_otg_hcd_qh_remove(hcd, qh); -+ } else { -+ /* -+ * Remove from periodic_sched_queued and move to -+ * appropriate queue. -+ */ -+ if (qh->sched_frame == frame_number) { -+ list_move(&qh->qh_list_entry, -+ &hcd->periodic_sched_ready); -+ } else { -+ list_move(&qh->qh_list_entry, -+ &hcd->periodic_sched_inactive); -+ } -+ } -+ } -+ -+ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); -+} -+ -+/** -+ * This function allocates and initializes a QTD. -+ * -+ * @param[in] urb The URB to create a QTD from. Each URB-QTD pair will end up -+ * pointing to each other so each pair should have a unique correlation. -+ * -+ * @return Returns pointer to the newly allocated QTD, or NULL on error. */ -+dwc_otg_qtd_t *dwc_otg_hcd_qtd_create (struct urb *urb) -+{ -+ dwc_otg_qtd_t *qtd; -+ -+ qtd = dwc_otg_hcd_qtd_alloc (); -+ if (qtd == NULL) { -+ return NULL; -+ } -+ -+ dwc_otg_hcd_qtd_init (qtd, urb); -+ return qtd; -+} -+ -+/** -+ * Initializes a QTD structure. -+ * -+ * @param[in] qtd The QTD to initialize. -+ * @param[in] urb The URB to use for initialization. */ -+void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *qtd, struct urb *urb) -+{ -+ memset (qtd, 0, sizeof (dwc_otg_qtd_t)); -+ qtd->urb = urb; -+ if (usb_pipecontrol(urb->pipe)) { -+ /* -+ * The only time the QTD data toggle is used is on the data -+ * phase of control transfers. This phase always starts with -+ * DATA1. -+ */ -+ qtd->data_toggle = DWC_OTG_HC_PID_DATA1; -+ qtd->control_phase = DWC_OTG_CONTROL_SETUP; -+ } -+ -+ /* start split */ -+ qtd->complete_split = 0; -+ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL; -+ qtd->isoc_split_offset = 0; -+ -+ /* Store the qtd ptr in the urb to reference what QTD. */ -+ urb->hcpriv = qtd; -+ return; -+} -+ -+/** -+ * This function adds a QTD to the QTD-list of a QH. It will find the correct -+ * QH to place the QTD into. If it does not find a QH, then it will create a -+ * new QH. If the QH to which the QTD is added is not currently scheduled, it -+ * is placed into the proper schedule based on its EP type. -+ * -+ * @param[in] qtd The QTD to add -+ * @param[in] dwc_otg_hcd The DWC HCD structure -+ * -+ * @return 0 if successful, negative error code otherwise. -+ */ -+int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd, -+ dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ struct usb_host_endpoint *ep; -+ dwc_otg_qh_t *qh; -+ unsigned long flags; -+ int retval = 0; -+ -+ struct urb *urb = qtd->urb; -+ -+ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); -+ -+ /* -+ * Get the QH which holds the QTD-list to insert to. Create QH if it -+ * doesn't exist. -+ */ -+ ep = dwc_urb_to_endpoint(urb); -+ qh = (dwc_otg_qh_t *)ep->hcpriv; -+ if (qh == NULL) { -+ qh = dwc_otg_hcd_qh_create (dwc_otg_hcd, urb); -+ if (qh == NULL) { -+ goto done; -+ } -+ ep->hcpriv = qh; -+ } -+ -+ retval = dwc_otg_hcd_qh_add(dwc_otg_hcd, qh); -+ if (retval == 0) { -+ list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list); -+ } -+ -+ done: -+ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); -+ -+ return retval; -+} -+ -+#endif /* DWC_DEVICE_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc/otg_pcd.c -@@ -0,0 +1,2502 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $ -+ * $Revision: #70 $ -+ * $Date: 2008/10/14 $ -+ * $Change: 1115682 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_HOST_ONLY -+ -+/** @file -+ * This file implements the Peripheral Controller Driver. -+ * -+ * The Peripheral Controller Driver (PCD) is responsible for -+ * translating requests from the Function Driver into the appropriate -+ * actions on the DWC_otg controller. It isolates the Function Driver -+ * from the specifics of the controller by providing an API to the -+ * Function Driver. -+ * -+ * The Peripheral Controller Driver for Linux will implement the -+ * Gadget API, so that the existing Gadget drivers can be used. -+ * (Gadget Driver is the Linux terminology for a Function Driver.) -+ * -+ * The Linux Gadget API is defined in the header file -+ * . The USB EP operations API is -+ * defined in the structure usb_ep_ops and the USB -+ * Controller API is defined in the structure -+ * usb_gadget_ops. -+ * -+ * An important function of the PCD is managing interrupts generated -+ * by the DWC_otg controller. The implementation of the DWC_otg device -+ * mode interrupt service routines is in dwc_otg_pcd_intr.c. -+ * -+ * @todo Add Device Mode test modes (Test J mode, Test K mode, etc). -+ * @todo Does it work when the request size is greater than DEPTSIZ -+ * transfer size -+ * -+ */ -+ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+//#include -+ -+#include "otg_driver.h" -+#include "otg_pcd.h" -+ -+ -+ -+/** -+ * Static PCD pointer for use in usb_gadget_register_driver and -+ * usb_gadget_unregister_driver. Initialized in dwc_otg_pcd_init. -+ */ -+static dwc_otg_pcd_t *s_pcd = 0; -+ -+ -+/* Display the contents of the buffer */ -+extern void dump_msg(const u8 *buf, unsigned int length); -+ -+ -+/** -+ * This function completes a request. It call's the request call back. -+ */ -+void dwc_otg_request_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req, -+ int status) -+{ -+ unsigned stopped = ep->stopped; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, ep); -+ list_del_init(&req->queue); -+ -+ if (req->req.status == -EINPROGRESS) { -+ req->req.status = status; -+ } else { -+ status = req->req.status; -+ } -+ -+ /* don't modify queue heads during completion callback */ -+ ep->stopped = 1; -+ SPIN_UNLOCK(&ep->pcd->lock); -+ req->req.complete(&ep->ep, &req->req); -+ SPIN_LOCK(&ep->pcd->lock); -+ -+ if (ep->pcd->request_pending > 0) { -+ --ep->pcd->request_pending; -+ } -+ -+ ep->stopped = stopped; -+} -+ -+/** -+ * This function terminates all the requsts in the EP request queue. -+ */ -+void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *ep) -+{ -+ dwc_otg_pcd_request_t *req; -+ -+ ep->stopped = 1; -+ -+ /* called with irqs blocked?? */ -+ while (!list_empty(&ep->queue)) { -+ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, -+ queue); -+ dwc_otg_request_done(ep, req, -ESHUTDOWN); -+ } -+} -+ -+/* USB Endpoint Operations */ -+/* -+ * The following sections briefly describe the behavior of the Gadget -+ * API endpoint operations implemented in the DWC_otg driver -+ * software. Detailed descriptions of the generic behavior of each of -+ * these functions can be found in the Linux header file -+ * include/linux/usb_gadget.h. -+ * -+ * The Gadget API provides wrapper functions for each of the function -+ * pointers defined in usb_ep_ops. The Gadget Driver calls the wrapper -+ * function, which then calls the underlying PCD function. The -+ * following sections are named according to the wrapper -+ * functions. Within each section, the corresponding DWC_otg PCD -+ * function name is specified. -+ * -+ */ -+ -+/** -+ * This function assigns periodic Tx FIFO to an periodic EP -+ * in shared Tx FIFO mode -+ */ -+static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t PerTxMsk = 1; -+ int i; -+ for(i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) -+ { -+ if((PerTxMsk & core_if->p_tx_msk) == 0) { -+ core_if->p_tx_msk |= PerTxMsk; -+ return i + 1; -+ } -+ PerTxMsk <<= 1; -+ } -+ return 0; -+} -+/** -+ * This function releases periodic Tx FIFO -+ * in shared Tx FIFO mode -+ */ -+static void release_perio_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num) -+{ -+ core_if->p_tx_msk = (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk; -+} -+/** -+ * This function assigns periodic Tx FIFO to an periodic EP -+ * in shared Tx FIFO mode -+ */ -+static uint32_t assign_tx_fifo(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t TxMsk = 1; -+ int i; -+ -+ for(i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) -+ { -+ if((TxMsk & core_if->tx_msk) == 0) { -+ core_if->tx_msk |= TxMsk; -+ return i + 1; -+ } -+ TxMsk <<= 1; -+ } -+ return 0; -+} -+/** -+ * This function releases periodic Tx FIFO -+ * in shared Tx FIFO mode -+ */ -+static void release_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num) -+{ -+ core_if->tx_msk = (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk; -+} -+ -+/** -+ * This function is called by the Gadget Driver for each EP to be -+ * configured for the current configuration (SET_CONFIGURATION). -+ * -+ * This function initializes the dwc_otg_ep_t data structure, and then -+ * calls dwc_otg_ep_activate. -+ */ -+static int dwc_otg_pcd_ep_enable(struct usb_ep *usb_ep, -+ const struct usb_endpoint_descriptor *ep_desc) -+{ -+ dwc_otg_pcd_ep_t *ep = 0; -+ dwc_otg_pcd_t *pcd = 0; -+ unsigned long flags; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, usb_ep, ep_desc); -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ if (!usb_ep || !ep_desc || ep->desc || -+ ep_desc->bDescriptorType != USB_DT_ENDPOINT) { -+ DWC_WARN("%s, bad ep or descriptor\n", __func__); -+ return -EINVAL; -+ } -+ if (ep == &ep->pcd->ep0) { -+ DWC_WARN("%s, bad ep(0)\n", __func__); -+ return -EINVAL; -+ } -+ -+ /* Check FIFO size? */ -+ if (!ep_desc->wMaxPacketSize) { -+ DWC_WARN("%s, bad %s maxpacket\n", __func__, usb_ep->name); -+ return -ERANGE; -+ } -+ -+ pcd = ep->pcd; -+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { -+ DWC_WARN("%s, bogus device state\n", __func__); -+ return -ESHUTDOWN; -+ } -+ -+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); -+ -+ ep->desc = ep_desc; -+ ep->ep.maxpacket = le16_to_cpu (ep_desc->wMaxPacketSize); -+ -+ /* -+ * Activate the EP -+ */ -+ ep->stopped = 0; -+ -+ ep->dwc_ep.is_in = (USB_DIR_IN & ep_desc->bEndpointAddress) != 0; -+ ep->dwc_ep.maxpacket = ep->ep.maxpacket; -+ -+ ep->dwc_ep.type = ep_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; -+ -+ if(ep->dwc_ep.is_in) { -+ if(!pcd->otg_dev->core_if->en_multiple_tx_fifo) { -+ ep->dwc_ep.tx_fifo_num = 0; -+ -+ if (ep->dwc_ep.type == USB_ENDPOINT_XFER_ISOC) { -+ /* -+ * if ISOC EP then assign a Periodic Tx FIFO. -+ */ -+ ep->dwc_ep.tx_fifo_num = assign_perio_tx_fifo(pcd->otg_dev->core_if); -+ } -+ } else { -+ /* -+ * if Dedicated FIFOs mode is on then assign a Tx FIFO. -+ */ -+ ep->dwc_ep.tx_fifo_num = assign_tx_fifo(pcd->otg_dev->core_if); -+ -+ } -+ } -+ /* Set initial data PID. */ -+ if (ep->dwc_ep.type == USB_ENDPOINT_XFER_BULK) { -+ ep->dwc_ep.data_pid_start = 0; -+ } -+ -+ DWC_DEBUGPL(DBG_PCD, "Activate %s-%s: type=%d, mps=%d desc=%p\n", -+ ep->ep.name, (ep->dwc_ep.is_in ?"IN":"OUT"), -+ ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc); -+ -+ if(ep->dwc_ep.type != USB_ENDPOINT_XFER_ISOC) { -+ ep->dwc_ep.desc_addr = dwc_otg_ep_alloc_desc_chain(&ep->dwc_ep.dma_desc_addr, MAX_DMA_DESC_CNT); -+ } -+ -+ dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep); -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ -+ return 0; -+} -+ -+/** -+ * This function is called when an EP is disabled due to disconnect or -+ * change in configuration. Any pending requests will terminate with a -+ * status of -ESHUTDOWN. -+ * -+ * This function modifies the dwc_otg_ep_t data structure for this EP, -+ * and then calls dwc_otg_ep_deactivate. -+ */ -+static int dwc_otg_pcd_ep_disable(struct usb_ep *usb_ep) -+{ -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd = 0; -+ unsigned long flags; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, usb_ep); -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ if (!usb_ep || !ep->desc) { -+ DWC_DEBUGPL(DBG_PCD, "%s, %s not enabled\n", __func__, -+ usb_ep ? ep->ep.name : NULL); -+ return -EINVAL; -+ } -+ -+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); -+ -+ dwc_otg_request_nuke(ep); -+ -+ dwc_otg_ep_deactivate(GET_CORE_IF(ep->pcd), &ep->dwc_ep); -+ ep->desc = 0; -+ ep->stopped = 1; -+ -+ if(ep->dwc_ep.is_in) { -+ dwc_otg_flush_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); -+ release_perio_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); -+ release_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); -+ } -+ -+ /* Free DMA Descriptors */ -+ pcd = ep->pcd; -+ -+ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags); -+ -+ if(ep->dwc_ep.type != USB_ENDPOINT_XFER_ISOC && ep->dwc_ep.desc_addr) { -+ dwc_otg_ep_free_desc_chain(ep->dwc_ep.desc_addr, ep->dwc_ep.dma_desc_addr, MAX_DMA_DESC_CNT); -+ } -+ -+ DWC_DEBUGPL(DBG_PCD, "%s disabled\n", usb_ep->name); -+ return 0; -+} -+ -+ -+/** -+ * This function allocates a request object to use with the specified -+ * endpoint. -+ * -+ * @param ep The endpoint to be used with with the request -+ * @param gfp_flags the GFP_* flags to use. -+ */ -+static struct usb_request *dwc_otg_pcd_alloc_request(struct usb_ep *ep, -+ gfp_t gfp_flags) -+{ -+ dwc_otg_pcd_request_t *req; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%d)\n", __func__, ep, gfp_flags); -+ if (0 == ep) { -+ DWC_WARN("%s() %s\n", __func__, "Invalid EP!\n"); -+ return 0; -+ } -+ req = kmalloc(sizeof(dwc_otg_pcd_request_t), gfp_flags); -+ if (0 == req) { -+ DWC_WARN("%s() %s\n", __func__, -+ "request allocation failed!\n"); -+ return 0; -+ } -+ memset(req, 0, sizeof(dwc_otg_pcd_request_t)); -+ req->req.dma = DMA_ADDR_INVALID; -+ INIT_LIST_HEAD(&req->queue); -+ return &req->req; -+} -+ -+/** -+ * This function frees a request object. -+ * -+ * @param ep The endpoint associated with the request -+ * @param req The request being freed -+ */ -+static void dwc_otg_pcd_free_request(struct usb_ep *ep, -+ struct usb_request *req) -+{ -+ dwc_otg_pcd_request_t *request; -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, ep, req); -+ -+ if (0 == ep || 0 == req) { -+ DWC_WARN("%s() %s\n", __func__, -+ "Invalid ep or req argument!\n"); -+ return; -+ } -+ -+ request = container_of(req, dwc_otg_pcd_request_t, req); -+ kfree(request); -+} -+ -+#if 0 -+/** -+ * This function allocates an I/O buffer to be used for a transfer -+ * to/from the specified endpoint. -+ * -+ * @param usb_ep The endpoint to be used with with the request -+ * @param bytes The desired number of bytes for the buffer -+ * @param dma Pointer to the buffer's DMA address; must be valid -+ * @param gfp_flags the GFP_* flags to use. -+ * @return address of a new buffer or null is buffer could not be allocated. -+ */ -+static void *dwc_otg_pcd_alloc_buffer(struct usb_ep *usb_ep, unsigned bytes, -+ dma_addr_t *dma, -+ gfp_t gfp_flags) -+{ -+ void *buf; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd = 0; -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ pcd = ep->pcd; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%d,%p,%0x)\n", __func__, usb_ep, bytes, -+ dma, gfp_flags); -+ -+ /* Check dword alignment */ -+ if ((bytes & 0x3UL) != 0) { -+ DWC_WARN("%s() Buffer size is not a multiple of" -+ "DWORD size (%d)",__func__, bytes); -+ } -+ -+ if (GET_CORE_IF(pcd)->dma_enable) { -+ buf = dma_alloc_coherent (NULL, bytes, dma, gfp_flags); -+ } -+ else { -+ buf = kmalloc(bytes, gfp_flags); -+ } -+ -+ /* Check dword alignment */ -+ if (((int)buf & 0x3UL) != 0) { -+ DWC_WARN("%s() Buffer is not DWORD aligned (%p)", -+ __func__, buf); -+ } -+ -+ return buf; -+} -+ -+/** -+ * This function frees an I/O buffer that was allocated by alloc_buffer. -+ * -+ * @param usb_ep the endpoint associated with the buffer -+ * @param buf address of the buffer -+ * @param dma The buffer's DMA address -+ * @param bytes The number of bytes of the buffer -+ */ -+static void dwc_otg_pcd_free_buffer(struct usb_ep *usb_ep, void *buf, -+ dma_addr_t dma, unsigned bytes) -+{ -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd = 0; -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ pcd = ep->pcd; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p,%0x,%d)\n", __func__, ep, buf, dma, bytes); -+ -+ if (GET_CORE_IF(pcd)->dma_enable) { -+ dma_free_coherent (NULL, bytes, buf, dma); -+ } -+ else { -+ kfree(buf); -+ } -+} -+#endif -+ -+/** -+ * This function is used to submit an I/O Request to an EP. -+ * -+ * - When the request completes the request's completion callback -+ * is called to return the request to the driver. -+ * - An EP, except control EPs, may have multiple requests -+ * pending. -+ * - Once submitted the request cannot be examined or modified. -+ * - Each request is turned into one or more packets. -+ * - A BULK EP can queue any amount of data; the transfer is -+ * packetized. -+ * - Zero length Packets are specified with the request 'zero' -+ * flag. -+ */ -+static int dwc_otg_pcd_ep_queue(struct usb_ep *usb_ep, -+ struct usb_request *usb_req, -+ gfp_t gfp_flags) -+{ -+ int prevented = 0; -+ dwc_otg_pcd_request_t *req; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd; -+ unsigned long flags = 0; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p,%d)\n", -+ __func__, usb_ep, usb_req, gfp_flags); -+ -+ req = container_of(usb_req, dwc_otg_pcd_request_t, req); -+ if (!usb_req || !usb_req->complete || !usb_req->buf || -+ !list_empty(&req->queue)) { -+ DWC_WARN("%s, bad params\n", __func__); -+ return -EINVAL; -+ } -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ if (!usb_ep || (!ep->desc && ep->dwc_ep.num != 0)/* || ep->stopped != 0*/) { -+ DWC_WARN("%s, bad ep\n", __func__); -+ return -EINVAL; -+ } -+ -+ pcd = ep->pcd; -+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { -+ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); -+ DWC_WARN("%s, bogus device state\n", __func__); -+ return -ESHUTDOWN; -+ } -+ -+ -+ DWC_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n", -+ usb_ep->name, usb_req, usb_req->length, usb_req->buf); -+ -+ if (!GET_CORE_IF(pcd)->core_params->opt) { -+ if (ep->dwc_ep.num != 0) { -+ DWC_ERROR("%s queue req %p, len %d buf %p\n", -+ usb_ep->name, usb_req, usb_req->length, usb_req->buf); -+ } -+ } -+ -+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); -+ -+#if defined(DEBUG) & defined(VERBOSE) -+ dump_msg(usb_req->buf, usb_req->length); -+#endif -+ -+ usb_req->status = -EINPROGRESS; -+ usb_req->actual = 0; -+ -+ /* -+ * For EP0 IN without premature status, zlp is required? -+ */ -+ if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) { -+ DWC_DEBUGPL(DBG_PCDV, "%s-OUT ZLP\n", usb_ep->name); -+ //_req->zero = 1; -+ } -+ -+ /* Start the transfer */ -+ if (list_empty(&ep->queue) && !ep->stopped) { -+ /* EP0 Transfer? */ -+ if (ep->dwc_ep.num == 0) { -+ switch (pcd->ep0state) { -+ case EP0_IN_DATA_PHASE: -+ DWC_DEBUGPL(DBG_PCD, -+ "%s ep0: EP0_IN_DATA_PHASE\n", -+ __func__); -+ break; -+ -+ case EP0_OUT_DATA_PHASE: -+ DWC_DEBUGPL(DBG_PCD, -+ "%s ep0: EP0_OUT_DATA_PHASE\n", -+ __func__); -+ if (pcd->request_config) { -+ /* Complete STATUS PHASE */ -+ ep->dwc_ep.is_in = 1; -+ pcd->ep0state = EP0_IN_STATUS_PHASE; -+ } -+ break; -+ -+ case EP0_IN_STATUS_PHASE: -+ DWC_DEBUGPL(DBG_PCD, -+ "%s ep0: EP0_IN_STATUS_PHASE\n", -+ __func__); -+ break; -+ -+ default: -+ DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n", -+ pcd->ep0state); -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ return -EL2HLT; -+ } -+ ep->dwc_ep.dma_addr = usb_req->dma; -+ ep->dwc_ep.start_xfer_buff = usb_req->buf; -+ ep->dwc_ep.xfer_buff = usb_req->buf; -+ ep->dwc_ep.xfer_len = usb_req->length; -+ ep->dwc_ep.xfer_count = 0; -+ ep->dwc_ep.sent_zlp = 0; -+ ep->dwc_ep.total_len = ep->dwc_ep.xfer_len; -+ -+ if(usb_req->zero) { -+ if((ep->dwc_ep.xfer_len % ep->dwc_ep.maxpacket == 0) -+ && (ep->dwc_ep.xfer_len != 0)) { -+ ep->dwc_ep.sent_zlp = 1; -+ } -+ -+ } -+ -+ ep_check_and_patch_dma_addr(ep); -+ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep->dwc_ep); -+ } -+ else { -+ -+ uint32_t max_transfer = GET_CORE_IF(ep->pcd)->core_params->max_transfer_size; -+ -+ /* Setup and start the Transfer */ -+ ep->dwc_ep.dma_addr = usb_req->dma; -+ ep->dwc_ep.start_xfer_buff = usb_req->buf; -+ ep->dwc_ep.xfer_buff = usb_req->buf; -+ ep->dwc_ep.sent_zlp = 0; -+ ep->dwc_ep.total_len = usb_req->length; -+ ep->dwc_ep.xfer_len = 0; -+ ep->dwc_ep.xfer_count = 0; -+ -+ if(max_transfer > MAX_TRANSFER_SIZE) { -+ ep->dwc_ep.maxxfer = max_transfer - (max_transfer % ep->dwc_ep.maxpacket); -+ } else { -+ ep->dwc_ep.maxxfer = max_transfer; -+ } -+ -+ if(usb_req->zero) { -+ if((ep->dwc_ep.total_len % ep->dwc_ep.maxpacket == 0) -+ && (ep->dwc_ep.total_len != 0)) { -+ ep->dwc_ep.sent_zlp = 1; -+ } -+ -+ } -+ -+ ep_check_and_patch_dma_addr(ep); -+ dwc_otg_ep_start_transfer(GET_CORE_IF(pcd), &ep->dwc_ep); -+ } -+ } -+ -+ if ((req != 0) || prevented) { -+ ++pcd->request_pending; -+ list_add_tail(&req->queue, &ep->queue); -+ if (ep->dwc_ep.is_in && ep->stopped && !(GET_CORE_IF(pcd)->dma_enable)) { -+ /** @todo NGS Create a function for this. */ -+ diepmsk_data_t diepmsk = { .d32 = 0}; -+ diepmsk.b.intktxfemp = 1; -+ if(&GET_CORE_IF(pcd)->multiproc_int_enable) { -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepeachintmsk[ep->dwc_ep.num], -+ 0, diepmsk.d32); -+ } else { -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepmsk, 0, diepmsk.d32); -+ } -+ } -+ } -+ -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ return 0; -+} -+ -+/** -+ * This function cancels an I/O request from an EP. -+ */ -+static int dwc_otg_pcd_ep_dequeue(struct usb_ep *usb_ep, -+ struct usb_request *usb_req) -+{ -+ dwc_otg_pcd_request_t *req; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd; -+ unsigned long flags; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, usb_ep, usb_req); -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ if (!usb_ep || !usb_req || (!ep->desc && ep->dwc_ep.num != 0)) { -+ DWC_WARN("%s, bad argument\n", __func__); -+ return -EINVAL; -+ } -+ pcd = ep->pcd; -+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { -+ DWC_WARN("%s, bogus device state\n", __func__); -+ return -ESHUTDOWN; -+ } -+ -+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); -+ DWC_DEBUGPL(DBG_PCDV, "%s %s %s %p\n", __func__, usb_ep->name, -+ ep->dwc_ep.is_in ? "IN" : "OUT", -+ usb_req); -+ -+ /* make sure it's actually queued on this endpoint */ -+ list_for_each_entry(req, &ep->queue, queue) -+ { -+ if (&req->req == usb_req) { -+ break; -+ } -+ } -+ -+ if (&req->req != usb_req) { -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ return -EINVAL; -+ } -+ -+ if (!list_empty(&req->queue)) { -+ dwc_otg_request_done(ep, req, -ECONNRESET); -+ } -+ else { -+ req = 0; -+ } -+ -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ -+ return req ? 0 : -EOPNOTSUPP; -+} -+ -+/** -+ * usb_ep_set_halt stalls an endpoint. -+ * -+ * usb_ep_clear_halt clears an endpoint halt and resets its data -+ * toggle. -+ * -+ * Both of these functions are implemented with the same underlying -+ * function. The behavior depends on the value argument. -+ * -+ * @param[in] usb_ep the Endpoint to halt or clear halt. -+ * @param[in] value -+ * - 0 means clear_halt. -+ * - 1 means set_halt, -+ * - 2 means clear stall lock flag. -+ * - 3 means set stall lock flag. -+ */ -+static int dwc_otg_pcd_ep_set_halt(struct usb_ep *usb_ep, int value) -+{ -+ int retval = 0; -+ unsigned long flags; -+ dwc_otg_pcd_ep_t *ep = 0; -+ -+ -+ DWC_DEBUGPL(DBG_PCD,"HALT %s %d\n", usb_ep->name, value); -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ -+ if (!usb_ep || (!ep->desc && ep != &ep->pcd->ep0) || -+ ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { -+ DWC_WARN("%s, bad ep\n", __func__); -+ return -EINVAL; -+ } -+ -+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); -+ if (!list_empty(&ep->queue)) { -+ DWC_WARN("%s() %s XFer In process\n", __func__, usb_ep->name); -+ retval = -EAGAIN; -+ } -+ else if (value == 0) { -+ dwc_otg_ep_clear_stall(ep->pcd->otg_dev->core_if, -+ &ep->dwc_ep); -+ } -+ else if(value == 1) { -+ if (ep->dwc_ep.is_in == 1 && ep->pcd->otg_dev->core_if->dma_desc_enable) { -+ dtxfsts_data_t txstatus; -+ fifosize_data_t txfifosize; -+ -+ txfifosize.d32 = dwc_read_reg32(&ep->pcd->otg_dev->core_if->core_global_regs->dptxfsiz_dieptxf[ep->dwc_ep.tx_fifo_num]); -+ txstatus.d32 = dwc_read_reg32(&ep->pcd->otg_dev->core_if->dev_if->in_ep_regs[ep->dwc_ep.num]->dtxfsts); -+ -+ if(txstatus.b.txfspcavail < txfifosize.b.depth) { -+ DWC_WARN("%s() %s Data In Tx Fifo\n", __func__, usb_ep->name); -+ retval = -EAGAIN; -+ } -+ else { -+ if (ep->dwc_ep.num == 0) { -+ ep->pcd->ep0state = EP0_STALL; -+ } -+ -+ ep->stopped = 1; -+ dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if, -+ &ep->dwc_ep); -+ } -+ } -+ else { -+ if (ep->dwc_ep.num == 0) { -+ ep->pcd->ep0state = EP0_STALL; -+ } -+ -+ ep->stopped = 1; -+ dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if, -+ &ep->dwc_ep); -+ } -+ } -+ else if (value == 2) { -+ ep->dwc_ep.stall_clear_flag = 0; -+ } -+ else if (value == 3) { -+ ep->dwc_ep.stall_clear_flag = 1; -+ } -+ -+ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags); -+ return retval; -+} -+ -+/** -+ * This function allocates a DMA Descriptor chain for the Endpoint -+ * buffer to be used for a transfer to/from the specified endpoint. -+ */ -+dwc_otg_dma_desc_t* dwc_otg_ep_alloc_desc_chain(uint32_t * dma_desc_addr, uint32_t count) -+{ -+ -+ return dma_alloc_coherent(NULL, count * sizeof(dwc_otg_dma_desc_t), dma_desc_addr, GFP_KERNEL); -+} -+ -+LIST_HEAD(tofree_list); -+DEFINE_SPINLOCK(tofree_list_lock); -+ -+struct free_param { -+ struct list_head list; -+ -+ void* addr; -+ dma_addr_t dma_addr; -+ uint32_t size; -+}; -+void free_list_agent_fn(void *data){ -+ struct list_head free_list; -+ struct free_param *cur,*next; -+ -+ spin_lock(&tofree_list_lock); -+ list_add(&free_list,&tofree_list); -+ list_del_init(&tofree_list); -+ spin_unlock(&tofree_list_lock); -+ -+ list_for_each_entry_safe(cur,next,&free_list,list){ -+ if(cur==&free_list) break; -+ dma_free_coherent(NULL,cur->size,cur->addr,cur->dma_addr); -+ list_del(&cur->list); -+ kfree(cur); -+ } -+} -+DECLARE_WORK(free_list_agent,free_list_agent_fn); -+/** -+ * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc. -+ */ -+void dwc_otg_ep_free_desc_chain(dwc_otg_dma_desc_t* desc_addr, uint32_t dma_desc_addr, uint32_t count) -+{ -+ if(irqs_disabled()){ -+ struct free_param* fp=kmalloc(sizeof(struct free_param),GFP_KERNEL); -+ fp->addr=desc_addr; -+ fp->dma_addr=dma_desc_addr; -+ fp->size=count*sizeof(dwc_otg_dma_desc_t); -+ -+ spin_lock(&tofree_list_lock); -+ list_add(&fp->list,&tofree_list); -+ spin_unlock(&tofree_list_lock); -+ -+ schedule_work(&free_list_agent); -+ return ; -+ } -+ dma_free_coherent(NULL, count * sizeof(dwc_otg_dma_desc_t), desc_addr, dma_desc_addr); -+} -+ -+#ifdef DWC_EN_ISOC -+ -+/** -+ * This function initializes a descriptor chain for Isochronous transfer -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param dwc_ep The EP to start the transfer on. -+ * -+ */ -+void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) -+{ -+ -+ dsts_data_t dsts = { .d32 = 0}; -+ depctl_data_t depctl = { .d32 = 0 }; -+ volatile uint32_t *addr; -+ int i, j; -+ -+ if(dwc_ep->is_in) -+ dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval; -+ else -+ dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; -+ -+ -+ /** Allocate descriptors for double buffering */ -+ dwc_ep->iso_desc_addr = dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,dwc_ep->desc_cnt*2); -+ if(dwc_ep->desc_addr) { -+ DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__); -+ return; -+ } -+ -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ -+ /** ISO OUT EP */ -+ if(dwc_ep->is_in == 0) { -+ desc_sts_data_t sts = { .d32 =0 }; -+ dwc_otg_dma_desc_t* dma_desc = dwc_ep->iso_desc_addr; -+ dma_addr_t dma_ad; -+ uint32_t data_per_desc; -+ dwc_otg_dev_out_ep_regs_t *out_regs = -+ core_if->dev_if->out_ep_regs[dwc_ep->num]; -+ int offset; -+ -+ addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl; -+ dma_ad = (dma_addr_t)dwc_read_reg32(&(out_regs->doepdma)); -+ -+ /** Buffer 0 descriptors setup */ -+ dma_ad = dwc_ep->dma_addr0; -+ -+ sts.b_iso_out.bs = BS_HOST_READY; -+ sts.b_iso_out.rxsts = 0; -+ sts.b_iso_out.l = 0; -+ sts.b_iso_out.sp = 0; -+ sts.b_iso_out.ioc = 0; -+ sts.b_iso_out.pid = 0; -+ sts.b_iso_out.framenum = 0; -+ -+ offset = 0; -+ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) -+ { -+ -+ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ offset += data_per_desc; -+ dma_desc ++; -+ //(uint32_t)dma_ad += data_per_desc; -+ dma_ad = (uint32_t)dma_ad + data_per_desc; -+ } -+ } -+ -+ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ offset += data_per_desc; -+ dma_desc ++; -+ //(uint32_t)dma_ad += data_per_desc; -+ dma_ad = (uint32_t)dma_ad + data_per_desc; -+ } -+ -+ sts.b_iso_out.ioc = 1; -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ dma_desc ++; -+ -+ /** Buffer 1 descriptors setup */ -+ sts.b_iso_out.ioc = 0; -+ dma_ad = dwc_ep->dma_addr1; -+ -+ offset = 0; -+ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) -+ { -+ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ offset += data_per_desc; -+ dma_desc ++; -+ //(uint32_t)dma_ad += data_per_desc; -+ dma_ad = (uint32_t)dma_ad + data_per_desc; -+ } -+ } -+ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ offset += data_per_desc; -+ dma_desc ++; -+ //(uint32_t)dma_ad += data_per_desc; -+ dma_ad = (uint32_t)dma_ad + data_per_desc; -+ } -+ -+ sts.b_iso_out.ioc = 1; -+ sts.b_iso_out.l = 1; -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ dwc_ep->next_frame = 0; -+ -+ /** Write dma_ad into DOEPDMA register */ -+ dwc_write_reg32(&(out_regs->doepdma),(uint32_t)dwc_ep->iso_dma_desc_addr); -+ -+ } -+ /** ISO IN EP */ -+ else { -+ desc_sts_data_t sts = { .d32 =0 }; -+ dwc_otg_dma_desc_t* dma_desc = dwc_ep->iso_desc_addr; -+ dma_addr_t dma_ad; -+ dwc_otg_dev_in_ep_regs_t *in_regs = -+ core_if->dev_if->in_ep_regs[dwc_ep->num]; -+ unsigned int frmnumber; -+ fifosize_data_t txfifosize,rxfifosize; -+ -+ txfifosize.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[dwc_ep->num]->dtxfsts); -+ rxfifosize.d32 = dwc_read_reg32(&core_if->core_global_regs->grxfsiz); -+ -+ -+ addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl; -+ -+ dma_ad = dwc_ep->dma_addr0; -+ -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ -+ sts.b_iso_in.bs = BS_HOST_READY; -+ sts.b_iso_in.txsts = 0; -+ sts.b_iso_in.sp = (dwc_ep->data_per_frame % dwc_ep->maxpacket)? 1 : 0; -+ sts.b_iso_in.ioc = 0; -+ sts.b_iso_in.pid = dwc_ep->pkt_per_frm; -+ -+ -+ frmnumber = dwc_ep->next_frame; -+ -+ sts.b_iso_in.framenum = frmnumber; -+ sts.b_iso_in.txbytes = dwc_ep->data_per_frame; -+ sts.b_iso_in.l = 0; -+ -+ /** Buffer 0 descriptors setup */ -+ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) -+ { -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ dma_desc ++; -+ -+ //(uint32_t)dma_ad += dwc_ep->data_per_frame; -+ dma_ad = (uint32_t)dma_ad + dwc_ep->data_per_frame; -+ sts.b_iso_in.framenum += dwc_ep->bInterval; -+ } -+ -+ sts.b_iso_in.ioc = 1; -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ ++dma_desc; -+ -+ /** Buffer 1 descriptors setup */ -+ sts.b_iso_in.ioc = 0; -+ dma_ad = dwc_ep->dma_addr1; -+ -+ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) -+ { -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ dma_desc ++; -+ -+ //(uint32_t)dma_ad += dwc_ep->data_per_frame; -+ dma_ad = (uint32_t)dma_ad + dwc_ep->data_per_frame; -+ sts.b_iso_in.framenum += dwc_ep->bInterval; -+ -+ sts.b_iso_in.ioc = 0; -+ } -+ sts.b_iso_in.ioc = 1; -+ sts.b_iso_in.l = 1; -+ -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval; -+ -+ /** Write dma_ad into diepdma register */ -+ dwc_write_reg32(&(in_regs->diepdma),(uint32_t)dwc_ep->iso_dma_desc_addr); -+ } -+ /** Enable endpoint, clear nak */ -+ depctl.d32 = 0; -+ depctl.b.epena = 1; -+ depctl.b.usbactep = 1; -+ depctl.b.cnak = 1; -+ -+ dwc_modify_reg32(addr, depctl.d32,depctl.d32); -+ depctl.d32 = dwc_read_reg32(addr); -+} -+ -+/** -+ * This function initializes a descriptor chain for Isochronous transfer -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ * -+ */ -+ -+void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl = { .d32 = 0 }; -+ volatile uint32_t *addr; -+ -+ -+ if(ep->is_in) { -+ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; -+ } else { -+ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; -+ } -+ -+ -+ if(core_if->dma_enable == 0 || core_if->dma_desc_enable!= 0) { -+ return; -+ } else { -+ deptsiz_data_t deptsiz = { .d32 = 0 }; -+ -+ ep->xfer_len = ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval; -+ ep->pkt_cnt = (ep->xfer_len - 1 + ep->maxpacket) / -+ ep->maxpacket; -+ ep->xfer_count = 0; -+ ep->xfer_buff = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; -+ ep->dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; -+ -+ if(ep->is_in) { -+ /* Program the transfer size and packet count -+ * as follows: xfersize = N * maxpacket + -+ * short_packet pktcnt = N + (short_packet -+ * exist ? 1 : 0) -+ */ -+ deptsiz.b.mc = ep->pkt_per_frm; -+ deptsiz.b.xfersize = ep->xfer_len; -+ deptsiz.b.pktcnt = -+ (ep->xfer_len - 1 + ep->maxpacket) / -+ ep->maxpacket; -+ dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32); -+ -+ /* Write the DMA register */ -+ dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr); -+ -+ } else { -+ deptsiz.b.pktcnt = -+ (ep->xfer_len + (ep->maxpacket - 1)) / -+ ep->maxpacket; -+ deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket; -+ -+ dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32); -+ -+ /* Write the DMA register */ -+ dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma), (uint32_t)ep->dma_addr); -+ -+ } -+ /** Enable endpoint, clear nak */ -+ depctl.d32 = 0; -+ dwc_modify_reg32(addr, depctl.d32,depctl.d32); -+ -+ depctl.b.epena = 1; -+ depctl.b.cnak = 1; -+ -+ dwc_modify_reg32(addr, depctl.d32,depctl.d32); -+ } -+} -+ -+ -+/** -+ * This function does the setup for a data transfer for an EP and -+ * starts the transfer. For an IN transfer, the packets will be -+ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, -+ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ */ -+ -+void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ if(core_if->dma_enable) { -+ if(core_if->dma_desc_enable) { -+ if(ep->is_in) { -+ ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm; -+ } else { -+ ep->desc_cnt = ep->pkt_cnt; -+ } -+ dwc_otg_iso_ep_start_ddma_transfer(core_if, ep); -+ } else { -+ if(core_if->pti_enh_enable) { -+ dwc_otg_iso_ep_start_buf_transfer(core_if, ep); -+ } else { -+ ep->cur_pkt_addr = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; -+ ep->cur_pkt_dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; -+ dwc_otg_iso_ep_start_frm_transfer(core_if, ep); -+ } -+ } -+ } else { -+ ep->cur_pkt_addr = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; -+ ep->cur_pkt_dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; -+ dwc_otg_iso_ep_start_frm_transfer(core_if, ep); -+ } -+} -+ -+/** -+ * This function does the setup for a data transfer for an EP and -+ * starts the transfer. For an IN transfer, the packets will be -+ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, -+ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ */ -+ -+void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl = { .d32 = 0 }; -+ volatile uint32_t *addr; -+ -+ if(ep->is_in == 1) { -+ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; -+ } -+ else { -+ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; -+ } -+ -+ /* disable the ep */ -+ depctl.d32 = dwc_read_reg32(addr); -+ -+ depctl.b.epdis = 1; -+ depctl.b.snak = 1; -+ -+ dwc_write_reg32(addr, depctl.d32); -+ -+ if(core_if->dma_desc_enable && -+ ep->iso_desc_addr && ep->iso_dma_desc_addr) { -+ dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,ep->iso_dma_desc_addr,ep->desc_cnt * 2); -+ } -+ -+ /* reset varibales */ -+ ep->dma_addr0 = 0; -+ ep->dma_addr1 = 0; -+ ep->xfer_buff0 = 0; -+ ep->xfer_buff1 = 0; -+ ep->data_per_frame = 0; -+ ep->data_pattern_frame = 0; -+ ep->sync_frame = 0; -+ ep->buf_proc_intrvl = 0; -+ ep->bInterval = 0; -+ ep->proc_buf_num = 0; -+ ep->pkt_per_frm = 0; -+ ep->pkt_per_frm = 0; -+ ep->desc_cnt = 0; -+ ep->iso_desc_addr = 0; -+ ep->iso_dma_desc_addr = 0; -+} -+ -+ -+/** -+ * This function is used to submit an ISOC Transfer Request to an EP. -+ * -+ * - Every time a sync period completes the request's completion callback -+ * is called to provide data to the gadget driver. -+ * - Once submitted the request cannot be modified. -+ * - Each request is turned into periodic data packets untill ISO -+ * Transfer is stopped.. -+ */ -+static int dwc_otg_pcd_iso_ep_start(struct usb_ep *usb_ep, struct usb_iso_request *req, -+ gfp_t gfp_flags) -+{ -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd; -+ dwc_ep_t *dwc_ep; -+ unsigned long flags = 0; -+ int32_t frm_data; -+ dwc_otg_core_if_t *core_if; -+ dcfg_data_t dcfg; -+ dsts_data_t dsts; -+ -+ -+ if (!req || !req->process_buffer || !req->buf0 || !req->buf1) { -+ DWC_WARN("%s, bad params\n", __func__); -+ return -EINVAL; -+ } -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ -+ if (!usb_ep || !ep->desc || ep->dwc_ep.num == 0) { -+ DWC_WARN("%s, bad ep\n", __func__); -+ return -EINVAL; -+ } -+ -+ pcd = ep->pcd; -+ core_if = GET_CORE_IF(pcd); -+ -+ dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); -+ -+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { -+ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); -+ DWC_WARN("%s, bogus device state\n", __func__); -+ return -ESHUTDOWN; -+ } -+ -+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); -+ -+ dwc_ep = &ep->dwc_ep; -+ -+ if(ep->iso_req) { -+ DWC_WARN("%s, iso request in progress\n", __func__); -+ } -+ req->status = -EINPROGRESS; -+ -+ dwc_ep->dma_addr0 = req->dma0; -+ dwc_ep->dma_addr1 = req->dma1; -+ -+ dwc_ep->xfer_buff0 = req->buf0; -+ dwc_ep->xfer_buff1 = req->buf1; -+ -+ ep->iso_req = req; -+ -+ dwc_ep->data_per_frame = req->data_per_frame; -+ -+ /** @todo - pattern data support is to be implemented in the future */ -+ dwc_ep->data_pattern_frame = req->data_pattern_frame; -+ dwc_ep->sync_frame = req->sync_frame; -+ -+ dwc_ep->buf_proc_intrvl = req->buf_proc_intrvl; -+ -+ dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1); -+ -+ dwc_ep->proc_buf_num = 0; -+ -+ dwc_ep->pkt_per_frm = 0; -+ frm_data = ep->dwc_ep.data_per_frame; -+ while(frm_data > 0) { -+ dwc_ep->pkt_per_frm++; -+ frm_data -= ep->dwc_ep.maxpacket; -+ } -+ -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ -+ if(req->flags & USB_REQ_ISO_ASAP) { -+ dwc_ep->next_frame = dsts.b.soffn + 1; -+ if(dwc_ep->bInterval != 1){ -+ dwc_ep->next_frame = dwc_ep->next_frame + (dwc_ep->bInterval - 1 - dwc_ep->next_frame % dwc_ep->bInterval); -+ } -+ } else { -+ dwc_ep->next_frame = req->start_frame; -+ } -+ -+ -+ if(!core_if->pti_enh_enable) { -+ dwc_ep->pkt_cnt = dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; -+ } else { -+ dwc_ep->pkt_cnt = -+ (dwc_ep->data_per_frame * (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval) -+ - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket; -+ } -+ -+ if(core_if->dma_desc_enable) { -+ dwc_ep->desc_cnt = -+ dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; -+ } -+ -+ dwc_ep->pkt_info = kmalloc(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt, GFP_KERNEL); -+ if(!dwc_ep->pkt_info) { -+ return -ENOMEM; -+ } -+ if(core_if->pti_enh_enable) { -+ memset(dwc_ep->pkt_info, 0, sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt); -+ } -+ -+ dwc_ep->cur_pkt = 0; -+ -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ -+ dwc_otg_iso_ep_start_transfer(core_if, dwc_ep); -+ -+ return 0; -+} -+ -+/** -+ * This function stops ISO EP Periodic Data Transfer. -+ */ -+static int dwc_otg_pcd_iso_ep_stop(struct usb_ep *usb_ep, struct usb_iso_request *req) -+{ -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd; -+ dwc_ep_t *dwc_ep; -+ unsigned long flags; -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ -+ if (!usb_ep || !ep->desc || ep->dwc_ep.num == 0) { -+ DWC_WARN("%s, bad ep\n", __func__); -+ return -EINVAL; -+ } -+ -+ pcd = ep->pcd; -+ -+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { -+ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); -+ DWC_WARN("%s, bogus device state\n", __func__); -+ return -ESHUTDOWN; -+ } -+ -+ dwc_ep = &ep->dwc_ep; -+ -+ dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep); -+ -+ kfree(dwc_ep->pkt_info); -+ -+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); -+ -+ if(ep->iso_req != req) { -+ return -EINVAL; -+ } -+ -+ req->status = -ECONNRESET; -+ -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ -+ -+ ep->iso_req = 0; -+ -+ return 0; -+} -+ -+/** -+ * This function is used for perodical data exchnage between PCD and gadget drivers. -+ * for Isochronous EPs -+ * -+ * - Every time a sync period completes this function is called to -+ * perform data exchange between PCD and gadget -+ */ -+void dwc_otg_iso_buffer_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_iso_request_t *req) -+{ -+ int i; -+ struct usb_gadget_iso_packet_descriptor *iso_packet; -+ dwc_ep_t *dwc_ep; -+ -+ dwc_ep = &ep->dwc_ep; -+ -+ if(ep->iso_req->status == -ECONNRESET) { -+ DWC_PRINT("Device has already disconnected\n"); -+ /*Device has been disconnected*/ -+ return; -+ } -+ -+ if(dwc_ep->proc_buf_num != 0) { -+ iso_packet = ep->iso_req->iso_packet_desc0; -+ } -+ -+ else { -+ iso_packet = ep->iso_req->iso_packet_desc1; -+ } -+ -+ /* Fill in ISOC packets descriptors & pass to gadget driver*/ -+ -+ for(i = 0; i < dwc_ep->pkt_cnt; ++i) { -+ iso_packet[i].status = dwc_ep->pkt_info[i].status; -+ iso_packet[i].offset = dwc_ep->pkt_info[i].offset; -+ iso_packet[i].actual_length = dwc_ep->pkt_info[i].length; -+ dwc_ep->pkt_info[i].status = 0; -+ dwc_ep->pkt_info[i].offset = 0; -+ dwc_ep->pkt_info[i].length = 0; -+ } -+ -+ /* Call callback function to process data buffer */ -+ ep->iso_req->status = 0;/* success */ -+ -+ SPIN_UNLOCK(&ep->pcd->lock); -+ ep->iso_req->process_buffer(&ep->ep, ep->iso_req); -+ SPIN_LOCK(&ep->pcd->lock); -+} -+ -+ -+static struct usb_iso_request *dwc_otg_pcd_alloc_iso_request(struct usb_ep *ep,int packets, -+ gfp_t gfp_flags) -+{ -+ struct usb_iso_request *pReq = NULL; -+ uint32_t req_size; -+ -+ -+ req_size = sizeof(struct usb_iso_request); -+ req_size += (2 * packets * (sizeof(struct usb_gadget_iso_packet_descriptor))); -+ -+ -+ pReq = kmalloc(req_size, gfp_flags); -+ if (!pReq) { -+ DWC_WARN("%s, can't allocate Iso Request\n", __func__); -+ return 0; -+ } -+ pReq->iso_packet_desc0 = (void*) (pReq + 1); -+ -+ pReq->iso_packet_desc1 = pReq->iso_packet_desc0 + packets; -+ -+ return pReq; -+} -+ -+static void dwc_otg_pcd_free_iso_request(struct usb_ep *ep, struct usb_iso_request *req) -+{ -+ kfree(req); -+} -+ -+static struct usb_isoc_ep_ops dwc_otg_pcd_ep_ops = -+{ -+ .ep_ops = -+ { -+ .enable = dwc_otg_pcd_ep_enable, -+ .disable = dwc_otg_pcd_ep_disable, -+ -+ .alloc_request = dwc_otg_pcd_alloc_request, -+ .free_request = dwc_otg_pcd_free_request, -+ -+ //.alloc_buffer = dwc_otg_pcd_alloc_buffer, -+ //.free_buffer = dwc_otg_pcd_free_buffer, -+ -+ .queue = dwc_otg_pcd_ep_queue, -+ .dequeue = dwc_otg_pcd_ep_dequeue, -+ -+ .set_halt = dwc_otg_pcd_ep_set_halt, -+ .fifo_status = 0, -+ .fifo_flush = 0, -+ }, -+ .iso_ep_start = dwc_otg_pcd_iso_ep_start, -+ .iso_ep_stop = dwc_otg_pcd_iso_ep_stop, -+ .alloc_iso_request = dwc_otg_pcd_alloc_iso_request, -+ .free_iso_request = dwc_otg_pcd_free_iso_request, -+}; -+ -+#else -+ -+ -+static struct usb_ep_ops dwc_otg_pcd_ep_ops = -+{ -+ .enable = dwc_otg_pcd_ep_enable, -+ .disable = dwc_otg_pcd_ep_disable, -+ -+ .alloc_request = dwc_otg_pcd_alloc_request, -+ .free_request = dwc_otg_pcd_free_request, -+ -+// .alloc_buffer = dwc_otg_pcd_alloc_buffer, -+// .free_buffer = dwc_otg_pcd_free_buffer, -+ -+ .queue = dwc_otg_pcd_ep_queue, -+ .dequeue = dwc_otg_pcd_ep_dequeue, -+ -+ .set_halt = dwc_otg_pcd_ep_set_halt, -+ .fifo_status = 0, -+ .fifo_flush = 0, -+ -+ -+}; -+ -+#endif /* DWC_EN_ISOC */ -+/* Gadget Operations */ -+/** -+ * The following gadget operations will be implemented in the DWC_otg -+ * PCD. Functions in the API that are not described below are not -+ * implemented. -+ * -+ * The Gadget API provides wrapper functions for each of the function -+ * pointers defined in usb_gadget_ops. The Gadget Driver calls the -+ * wrapper function, which then calls the underlying PCD function. The -+ * following sections are named according to the wrapper functions -+ * (except for ioctl, which doesn't have a wrapper function). Within -+ * each section, the corresponding DWC_otg PCD function name is -+ * specified. -+ * -+ */ -+ -+/** -+ *Gets the USB Frame number of the last SOF. -+ */ -+static int dwc_otg_pcd_get_frame(struct usb_gadget *gadget) -+{ -+ dwc_otg_pcd_t *pcd; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, gadget); -+ -+ if (gadget == 0) { -+ return -ENODEV; -+ } -+ else { -+ pcd = container_of(gadget, dwc_otg_pcd_t, gadget); -+ dwc_otg_get_frame_number(GET_CORE_IF(pcd)); -+ } -+ -+ return 0; -+} -+ -+void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd) -+{ -+ uint32_t *addr = (uint32_t *)&(GET_CORE_IF(pcd)->core_global_regs->gotgctl); -+ gotgctl_data_t mem; -+ gotgctl_data_t val; -+ -+ val.d32 = dwc_read_reg32(addr); -+ if (val.b.sesreq) { -+ DWC_ERROR("Session Request Already active!\n"); -+ return; -+ } -+ -+ DWC_NOTICE("Session Request Initated\n"); -+ mem.d32 = dwc_read_reg32(addr); -+ mem.b.sesreq = 1; -+ dwc_write_reg32(addr, mem.d32); -+ -+ /* Start the SRP timer */ -+ dwc_otg_pcd_start_srp_timer(pcd); -+ return; -+} -+ -+void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set) -+{ -+ dctl_data_t dctl = {.d32=0}; -+ volatile uint32_t *addr = &(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl); -+ -+ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) { -+ if (pcd->remote_wakeup_enable) { -+ if (set) { -+ dctl.b.rmtwkupsig = 1; -+ dwc_modify_reg32(addr, 0, dctl.d32); -+ DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n"); -+ mdelay(1); -+ dwc_modify_reg32(addr, dctl.d32, 0); -+ DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n"); -+ } -+ else { -+ } -+ } -+ else { -+ DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n"); -+ } -+ } -+ return; -+} -+ -+/** -+ * Initiates Session Request Protocol (SRP) to wakeup the host if no -+ * session is in progress. If a session is already in progress, but -+ * the device is suspended, remote wakeup signaling is started. -+ * -+ */ -+static int dwc_otg_pcd_wakeup(struct usb_gadget *gadget) -+{ -+ unsigned long flags; -+ dwc_otg_pcd_t *pcd; -+ dsts_data_t dsts; -+ gotgctl_data_t gotgctl; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, gadget); -+ -+ if (gadget == 0) { -+ return -ENODEV; -+ } -+ else { -+ pcd = container_of(gadget, dwc_otg_pcd_t, gadget); -+ } -+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); -+ -+ /* -+ * This function starts the Protocol if no session is in progress. If -+ * a session is already in progress, but the device is suspended, -+ * remote wakeup signaling is started. -+ */ -+ -+ /* Check if valid session */ -+ gotgctl.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl)); -+ if (gotgctl.b.bsesvld) { -+ /* Check if suspend state */ -+ dsts.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts)); -+ if (dsts.b.suspsts) { -+ dwc_otg_pcd_remote_wakeup(pcd, 1); -+ } -+ } -+ else { -+ dwc_otg_pcd_initiate_srp(pcd); -+ } -+ -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ return 0; -+} -+ -+static const struct usb_gadget_ops dwc_otg_pcd_ops = -+{ -+ .get_frame = dwc_otg_pcd_get_frame, -+ .wakeup = dwc_otg_pcd_wakeup, -+ // current versions must always be self-powered -+}; -+ -+/** -+ * This function updates the otg values in the gadget structure. -+ */ -+void dwc_otg_pcd_update_otg(dwc_otg_pcd_t *pcd, const unsigned reset) -+{ -+ -+ if (!pcd->gadget.is_otg) -+ return; -+ -+ if (reset) { -+ pcd->b_hnp_enable = 0; -+ pcd->a_hnp_support = 0; -+ pcd->a_alt_hnp_support = 0; -+ } -+ -+ pcd->gadget.b_hnp_enable = pcd->b_hnp_enable; -+ pcd->gadget.a_hnp_support = pcd->a_hnp_support; -+ pcd->gadget.a_alt_hnp_support = pcd->a_alt_hnp_support; -+} -+ -+/** -+ * This function is the top level PCD interrupt handler. -+ */ -+static irqreturn_t dwc_otg_pcd_irq(int irq, void *dev) -+{ -+ dwc_otg_pcd_t *pcd = dev; -+ int32_t retval = IRQ_NONE; -+ -+ retval = dwc_otg_pcd_handle_intr(pcd); -+ return IRQ_RETVAL(retval); -+} -+ -+/** -+ * PCD Callback function for initializing the PCD when switching to -+ * device mode. -+ * -+ * @param p void pointer to the dwc_otg_pcd_t -+ */ -+static int32_t dwc_otg_pcd_start_cb(void *p) -+{ -+ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; -+ -+ /* -+ * Initialized the Core for Device mode. -+ */ -+ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) { -+ dwc_otg_core_dev_init(GET_CORE_IF(pcd)); -+ } -+ return 1; -+} -+ -+/** -+ * PCD Callback function for stopping the PCD when switching to Host -+ * mode. -+ * -+ * @param p void pointer to the dwc_otg_pcd_t -+ */ -+static int32_t dwc_otg_pcd_stop_cb(void *p) -+{ -+ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; -+ extern void dwc_otg_pcd_stop(dwc_otg_pcd_t *_pcd); -+ -+ dwc_otg_pcd_stop(pcd); -+ return 1; -+} -+ -+ -+/** -+ * PCD Callback function for notifying the PCD when resuming from -+ * suspend. -+ * -+ * @param p void pointer to the dwc_otg_pcd_t -+ */ -+static int32_t dwc_otg_pcd_suspend_cb(void *p) -+{ -+ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; -+ -+ if (pcd->driver && pcd->driver->resume) { -+ SPIN_UNLOCK(&pcd->lock); -+ pcd->driver->suspend(&pcd->gadget); -+ SPIN_LOCK(&pcd->lock); -+ } -+ -+ return 1; -+} -+ -+ -+/** -+ * PCD Callback function for notifying the PCD when resuming from -+ * suspend. -+ * -+ * @param p void pointer to the dwc_otg_pcd_t -+ */ -+static int32_t dwc_otg_pcd_resume_cb(void *p) -+{ -+ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; -+ -+ if (pcd->driver && pcd->driver->resume) { -+ SPIN_UNLOCK(&pcd->lock); -+ pcd->driver->resume(&pcd->gadget); -+ SPIN_LOCK(&pcd->lock); -+ } -+ -+ /* Stop the SRP timeout timer. */ -+ if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS) || -+ (!GET_CORE_IF(pcd)->core_params->i2c_enable)) { -+ if (GET_CORE_IF(pcd)->srp_timer_started) { -+ GET_CORE_IF(pcd)->srp_timer_started = 0; -+ del_timer(&pcd->srp_timer); -+ } -+ } -+ return 1; -+} -+ -+ -+/** -+ * PCD Callback structure for handling mode switching. -+ */ -+static dwc_otg_cil_callbacks_t pcd_callbacks = -+{ -+ .start = dwc_otg_pcd_start_cb, -+ .stop = dwc_otg_pcd_stop_cb, -+ .suspend = dwc_otg_pcd_suspend_cb, -+ .resume_wakeup = dwc_otg_pcd_resume_cb, -+ .p = 0, /* Set at registration */ -+}; -+ -+/** -+ * This function is called when the SRP timer expires. The SRP should -+ * complete within 6 seconds. -+ */ -+static void srp_timeout(unsigned long ptr) -+{ -+ gotgctl_data_t gotgctl; -+ dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *)ptr; -+ volatile uint32_t *addr = &core_if->core_global_regs->gotgctl; -+ -+ gotgctl.d32 = dwc_read_reg32(addr); -+ -+ core_if->srp_timer_started = 0; -+ -+ if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) && -+ (core_if->core_params->i2c_enable)) { -+ DWC_PRINT("SRP Timeout\n"); -+ -+ if ((core_if->srp_success) && -+ (gotgctl.b.bsesvld)) { -+ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { -+ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); -+ } -+ -+ /* Clear Session Request */ -+ gotgctl.d32 = 0; -+ gotgctl.b.sesreq = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gotgctl, -+ gotgctl.d32, 0); -+ -+ core_if->srp_success = 0; -+ } -+ else { -+ DWC_ERROR("Device not connected/responding\n"); -+ gotgctl.b.sesreq = 0; -+ dwc_write_reg32(addr, gotgctl.d32); -+ } -+ } -+ else if (gotgctl.b.sesreq) { -+ DWC_PRINT("SRP Timeout\n"); -+ -+ DWC_ERROR("Device not connected/responding\n"); -+ gotgctl.b.sesreq = 0; -+ dwc_write_reg32(addr, gotgctl.d32); -+ } -+ else { -+ DWC_PRINT(" SRP GOTGCTL=%0x\n", gotgctl.d32); -+ } -+} -+ -+/** -+ * Start the SRP timer to detect when the SRP does not complete within -+ * 6 seconds. -+ * -+ * @param pcd the pcd structure. -+ */ -+void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t *pcd) -+{ -+ struct timer_list *srp_timer = &pcd->srp_timer; -+ GET_CORE_IF(pcd)->srp_timer_started = 1; -+ init_timer(srp_timer); -+ srp_timer->function = srp_timeout; -+ srp_timer->data = (unsigned long)GET_CORE_IF(pcd); -+ srp_timer->expires = jiffies + (HZ*6); -+ add_timer(srp_timer); -+} -+ -+/** -+ * Tasklet -+ * -+ */ -+extern void start_next_request(dwc_otg_pcd_ep_t *ep); -+ -+static void start_xfer_tasklet_func (unsigned long data) -+{ -+ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t*)data; -+ dwc_otg_core_if_t *core_if = pcd->otg_dev->core_if; -+ -+ int i; -+ depctl_data_t diepctl; -+ -+ DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n"); -+ -+ diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl); -+ -+ if (pcd->ep0.queue_sof) { -+ pcd->ep0.queue_sof = 0; -+ start_next_request (&pcd->ep0); -+ // break; -+ } -+ -+ for (i=0; idev_if->num_in_eps; i++) -+ { -+ depctl_data_t diepctl; -+ diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[i]->diepctl); -+ -+ if (pcd->in_ep[i].queue_sof) { -+ pcd->in_ep[i].queue_sof = 0; -+ start_next_request (&pcd->in_ep[i]); -+ // break; -+ } -+ } -+ -+ return; -+} -+ -+ -+ -+ -+ -+ -+ -+static struct tasklet_struct start_xfer_tasklet = { -+ .next = NULL, -+ .state = 0, -+ .count = ATOMIC_INIT(0), -+ .func = start_xfer_tasklet_func, -+ .data = 0, -+}; -+/** -+ * This function initialized the pcd Dp structures to there default -+ * state. -+ * -+ * @param pcd the pcd structure. -+ */ -+void dwc_otg_pcd_reinit(dwc_otg_pcd_t *pcd) -+{ -+ static const char * names[] = -+ { -+ -+ "ep0", -+ "ep1in", -+ "ep2in", -+ "ep3in", -+ "ep4in", -+ "ep5in", -+ "ep6in", -+ "ep7in", -+ "ep8in", -+ "ep9in", -+ "ep10in", -+ "ep11in", -+ "ep12in", -+ "ep13in", -+ "ep14in", -+ "ep15in", -+ "ep1out", -+ "ep2out", -+ "ep3out", -+ "ep4out", -+ "ep5out", -+ "ep6out", -+ "ep7out", -+ "ep8out", -+ "ep9out", -+ "ep10out", -+ "ep11out", -+ "ep12out", -+ "ep13out", -+ "ep14out", -+ "ep15out" -+ -+ }; -+ -+ int i; -+ int in_ep_cntr, out_ep_cntr; -+ uint32_t hwcfg1; -+ uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps; -+ uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps; -+ dwc_otg_pcd_ep_t *ep; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd); -+ -+ INIT_LIST_HEAD (&pcd->gadget.ep_list); -+ pcd->gadget.ep0 = &pcd->ep0.ep; -+ pcd->gadget.speed = USB_SPEED_UNKNOWN; -+ -+ INIT_LIST_HEAD (&pcd->gadget.ep0->ep_list); -+ -+ /** -+ * Initialize the EP0 structure. -+ */ -+ ep = &pcd->ep0; -+ -+ /* Init EP structure */ -+ ep->desc = 0; -+ ep->pcd = pcd; -+ ep->stopped = 1; -+ -+ /* Init DWC ep structure */ -+ ep->dwc_ep.num = 0; -+ ep->dwc_ep.active = 0; -+ ep->dwc_ep.tx_fifo_num = 0; -+ /* Control until ep is actvated */ -+ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; -+ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; -+ ep->dwc_ep.dma_addr = 0; -+ ep->dwc_ep.start_xfer_buff = 0; -+ ep->dwc_ep.xfer_buff = 0; -+ ep->dwc_ep.xfer_len = 0; -+ ep->dwc_ep.xfer_count = 0; -+ ep->dwc_ep.sent_zlp = 0; -+ ep->dwc_ep.total_len = 0; -+ ep->queue_sof = 0; -+ ep->dwc_ep.desc_addr = 0; -+ ep->dwc_ep.dma_desc_addr = 0; -+ -+ ep->dwc_ep.aligned_buf=NULL; -+ ep->dwc_ep.aligned_buf_size=0; -+ ep->dwc_ep.aligned_dma_addr=0; -+ -+ -+ /* Init the usb_ep structure. */ -+ ep->ep.name = names[0]; -+ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; -+ -+ /** -+ * @todo NGS: What should the max packet size be set to -+ * here? Before EP type is set? -+ */ -+ ep->ep.maxpacket = MAX_PACKET_SIZE; -+ -+ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); -+ -+ INIT_LIST_HEAD (&ep->queue); -+ /** -+ * Initialize the EP structures. -+ */ -+ in_ep_cntr = 0; -+ hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3; -+ -+ for (i = 1; in_ep_cntr < num_in_eps; i++) -+ { -+ if((hwcfg1 & 0x1) == 0) { -+ dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr]; -+ in_ep_cntr ++; -+ -+ /* Init EP structure */ -+ ep->desc = 0; -+ ep->pcd = pcd; -+ ep->stopped = 1; -+ -+ /* Init DWC ep structure */ -+ ep->dwc_ep.is_in = 1; -+ ep->dwc_ep.num = i; -+ ep->dwc_ep.active = 0; -+ ep->dwc_ep.tx_fifo_num = 0; -+ -+ /* Control until ep is actvated */ -+ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; -+ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; -+ ep->dwc_ep.dma_addr = 0; -+ ep->dwc_ep.start_xfer_buff = 0; -+ ep->dwc_ep.xfer_buff = 0; -+ ep->dwc_ep.xfer_len = 0; -+ ep->dwc_ep.xfer_count = 0; -+ ep->dwc_ep.sent_zlp = 0; -+ ep->dwc_ep.total_len = 0; -+ ep->queue_sof = 0; -+ ep->dwc_ep.desc_addr = 0; -+ ep->dwc_ep.dma_desc_addr = 0; -+ -+ /* Init the usb_ep structure. */ -+ ep->ep.name = names[i]; -+ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; -+ -+ /** -+ * @todo NGS: What should the max packet size be set to -+ * here? Before EP type is set? -+ */ -+ ep->ep.maxpacket = MAX_PACKET_SIZE; -+ -+ //add only even number ep as in -+ if((i%2)==1) -+ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); -+ -+ INIT_LIST_HEAD (&ep->queue); -+ } -+ hwcfg1 >>= 2; -+ } -+ -+ out_ep_cntr = 0; -+ hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2; -+ -+ for (i = 1; out_ep_cntr < num_out_eps; i++) -+ { -+ if((hwcfg1 & 0x1) == 0) { -+ dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr]; -+ out_ep_cntr++; -+ -+ /* Init EP structure */ -+ ep->desc = 0; -+ ep->pcd = pcd; -+ ep->stopped = 1; -+ -+ /* Init DWC ep structure */ -+ ep->dwc_ep.is_in = 0; -+ ep->dwc_ep.num = i; -+ ep->dwc_ep.active = 0; -+ ep->dwc_ep.tx_fifo_num = 0; -+ /* Control until ep is actvated */ -+ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; -+ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; -+ ep->dwc_ep.dma_addr = 0; -+ ep->dwc_ep.start_xfer_buff = 0; -+ ep->dwc_ep.xfer_buff = 0; -+ ep->dwc_ep.xfer_len = 0; -+ ep->dwc_ep.xfer_count = 0; -+ ep->dwc_ep.sent_zlp = 0; -+ ep->dwc_ep.total_len = 0; -+ ep->queue_sof = 0; -+ -+ /* Init the usb_ep structure. */ -+ ep->ep.name = names[15 + i]; -+ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; -+ /** -+ * @todo NGS: What should the max packet size be set to -+ * here? Before EP type is set? -+ */ -+ ep->ep.maxpacket = MAX_PACKET_SIZE; -+ -+ //add only odd number ep as out -+ if((i%2)==0) -+ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); -+ -+ INIT_LIST_HEAD (&ep->queue); -+ } -+ hwcfg1 >>= 2; -+ } -+ -+ /* remove ep0 from the list. There is a ep0 pointer.*/ -+ list_del_init (&pcd->ep0.ep.ep_list); -+ -+ pcd->ep0state = EP0_DISCONNECT; -+ pcd->ep0.ep.maxpacket = MAX_EP0_SIZE; -+ pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE; -+ pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; -+} -+ -+/** -+ * This function releases the Gadget device. -+ * required by device_unregister(). -+ * -+ * @todo Should this do something? Should it free the PCD? -+ */ -+static void dwc_otg_pcd_gadget_release(struct device *dev) -+{ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, dev); -+} -+ -+ -+ -+/** -+ * This function initialized the PCD portion of the driver. -+ * -+ */ -+u8 dev_id[]="gadget"; -+int dwc_otg_pcd_init(struct platform_device *pdev) -+{ -+ static char pcd_name[] = "dwc_otg_pcd"; -+ dwc_otg_pcd_t *pcd; -+ dwc_otg_core_if_t* core_if; -+ dwc_otg_dev_if_t* dev_if; -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); -+ int retval = 0; -+ -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n",__func__, pdev); -+ /* -+ * Allocate PCD structure -+ */ -+ pcd = kmalloc(sizeof(dwc_otg_pcd_t), GFP_KERNEL); -+ -+ if (pcd == 0) { -+ return -ENOMEM; -+ } -+ -+ memset(pcd, 0, sizeof(dwc_otg_pcd_t)); -+ spin_lock_init(&pcd->lock); -+ -+ otg_dev->pcd = pcd; -+ s_pcd = pcd; -+ pcd->gadget.name = pcd_name; -+ -+ pcd->gadget.dev.init_name = dev_id; -+ pcd->otg_dev = platform_get_drvdata(pdev); -+ -+ pcd->gadget.dev.parent = &pdev->dev; -+ pcd->gadget.dev.release = dwc_otg_pcd_gadget_release; -+ pcd->gadget.ops = &dwc_otg_pcd_ops; -+ -+ core_if = GET_CORE_IF(pcd); -+ dev_if = core_if->dev_if; -+ -+ if(core_if->hwcfg4.b.ded_fifo_en) { -+ DWC_PRINT("Dedicated Tx FIFOs mode\n"); -+ } -+ else { -+ DWC_PRINT("Shared Tx FIFO mode\n"); -+ } -+ -+ /* If the module is set to FS or if the PHY_TYPE is FS then the gadget -+ * should not report as dual-speed capable. replace the following line -+ * with the block of code below it once the software is debugged for -+ * this. If is_dualspeed = 0 then the gadget driver should not report -+ * a device qualifier descriptor when queried. */ -+ if ((GET_CORE_IF(pcd)->core_params->speed == DWC_SPEED_PARAM_FULL) || -+ ((GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == 2) && -+ (GET_CORE_IF(pcd)->hwcfg2.b.fs_phy_type == 1) && -+ (GET_CORE_IF(pcd)->core_params->ulpi_fs_ls))) { -+ pcd->gadget.is_dualspeed = 0; -+ } -+ else { -+ pcd->gadget.is_dualspeed = 1; -+ } -+ -+ if ((otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE) || -+ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST) || -+ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || -+ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) { -+ pcd->gadget.is_otg = 0; -+ } -+ else { -+ pcd->gadget.is_otg = 1; -+ } -+ -+ -+ pcd->driver = 0; -+ /* Register the gadget device */ -+printk("%s: 1\n",__func__); -+ retval = device_register(&pcd->gadget.dev); -+ if (retval != 0) { -+ kfree (pcd); -+printk("%s: 2\n",__func__); -+ return retval; -+ } -+ -+ -+ /* -+ * Initialized the Core for Device mode. -+ */ -+ if (dwc_otg_is_device_mode(core_if)) { -+ dwc_otg_core_dev_init(core_if); -+ } -+ -+ /* -+ * Initialize EP structures -+ */ -+ dwc_otg_pcd_reinit(pcd); -+ -+ /* -+ * Register the PCD Callbacks. -+ */ -+ dwc_otg_cil_register_pcd_callbacks(otg_dev->core_if, &pcd_callbacks, -+ pcd); -+ /* -+ * Setup interupt handler -+ */ -+ DWC_DEBUGPL(DBG_ANY, "registering handler for irq%d\n", otg_dev->irq); -+ retval = request_irq(otg_dev->irq, dwc_otg_pcd_irq, -+ IRQF_SHARED, pcd->gadget.name, pcd); -+ if (retval != 0) { -+ DWC_ERROR("request of irq%d failed\n", otg_dev->irq); -+ device_unregister(&pcd->gadget.dev); -+ kfree (pcd); -+ return -EBUSY; -+ } -+ -+ /* -+ * Initialize the DMA buffer for SETUP packets -+ */ -+ if (GET_CORE_IF(pcd)->dma_enable) { -+ pcd->setup_pkt = dma_alloc_coherent (NULL, sizeof (*pcd->setup_pkt) * 5, &pcd->setup_pkt_dma_handle, 0); -+ if (pcd->setup_pkt == 0) { -+ free_irq(otg_dev->irq, pcd); -+ device_unregister(&pcd->gadget.dev); -+ kfree (pcd); -+ return -ENOMEM; -+ } -+ -+ pcd->status_buf = dma_alloc_coherent (NULL, sizeof (uint16_t), &pcd->status_buf_dma_handle, 0); -+ if (pcd->status_buf == 0) { -+ dma_free_coherent(NULL, sizeof(*pcd->setup_pkt), pcd->setup_pkt, pcd->setup_pkt_dma_handle); -+ free_irq(otg_dev->irq, pcd); -+ device_unregister(&pcd->gadget.dev); -+ kfree (pcd); -+ return -ENOMEM; -+ } -+ -+ if (GET_CORE_IF(pcd)->dma_desc_enable) { -+ dev_if->setup_desc_addr[0] = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_setup_desc_addr[0], 1); -+ dev_if->setup_desc_addr[1] = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_setup_desc_addr[1], 1); -+ dev_if->in_desc_addr = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_in_desc_addr, 1); -+ dev_if->out_desc_addr = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_out_desc_addr, 1); -+ -+ if(dev_if->setup_desc_addr[0] == 0 -+ || dev_if->setup_desc_addr[1] == 0 -+ || dev_if->in_desc_addr == 0 -+ || dev_if->out_desc_addr == 0 ) { -+ -+ if(dev_if->out_desc_addr) -+ dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr, dev_if->dma_out_desc_addr, 1); -+ if(dev_if->in_desc_addr) -+ dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr, dev_if->dma_in_desc_addr, 1); -+ if(dev_if->setup_desc_addr[1]) -+ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1], dev_if->dma_setup_desc_addr[1], 1); -+ if(dev_if->setup_desc_addr[0]) -+ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0], dev_if->dma_setup_desc_addr[0], 1); -+ -+ -+ dma_free_coherent(NULL, sizeof(*pcd->status_buf), pcd->status_buf, pcd->setup_pkt_dma_handle); -+ dma_free_coherent(NULL, sizeof(*pcd->setup_pkt), pcd->setup_pkt, pcd->setup_pkt_dma_handle); -+ -+ free_irq(otg_dev->irq, pcd); -+ device_unregister(&pcd->gadget.dev); -+ kfree (pcd); -+ -+ return -ENOMEM; -+ } -+ } -+ } -+ else { -+ pcd->setup_pkt = kmalloc (sizeof (*pcd->setup_pkt) * 5, GFP_KERNEL); -+ if (pcd->setup_pkt == 0) { -+ free_irq(otg_dev->irq, pcd); -+ device_unregister(&pcd->gadget.dev); -+ kfree (pcd); -+ return -ENOMEM; -+ } -+ -+ pcd->status_buf = kmalloc (sizeof (uint16_t), GFP_KERNEL); -+ if (pcd->status_buf == 0) { -+ kfree(pcd->setup_pkt); -+ free_irq(otg_dev->irq, pcd); -+ device_unregister(&pcd->gadget.dev); -+ kfree (pcd); -+ return -ENOMEM; -+ } -+ } -+ -+ -+ /* Initialize tasklet */ -+ start_xfer_tasklet.data = (unsigned long)pcd; -+ pcd->start_xfer_tasklet = &start_xfer_tasklet; -+ -+ return 0; -+} -+ -+/** -+ * Cleanup the PCD. -+ */ -+void dwc_otg_pcd_remove(struct platform_device *pdev) -+{ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); -+ dwc_otg_pcd_t *pcd = otg_dev->pcd; -+ dwc_otg_dev_if_t* dev_if = GET_CORE_IF(pcd)->dev_if; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pdev); -+ -+ /* -+ * Free the IRQ -+ */ -+ free_irq(otg_dev->irq, pcd); -+ -+ /* start with the driver above us */ -+ if (pcd->driver) { -+ /* should have been done already by driver model core */ -+ DWC_WARN("driver '%s' is still registered\n", -+ pcd->driver->driver.name); -+ usb_gadget_unregister_driver(pcd->driver); -+ } -+ device_unregister(&pcd->gadget.dev); -+ -+ if (GET_CORE_IF(pcd)->dma_enable) { -+ dma_free_coherent (NULL, sizeof (*pcd->setup_pkt) * 5, pcd->setup_pkt, pcd->setup_pkt_dma_handle); -+ dma_free_coherent (NULL, sizeof (uint16_t), pcd->status_buf, pcd->status_buf_dma_handle); -+ if (GET_CORE_IF(pcd)->dma_desc_enable) { -+ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0], dev_if->dma_setup_desc_addr[0], 1); -+ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1], dev_if->dma_setup_desc_addr[1], 1); -+ dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr, dev_if->dma_in_desc_addr, 1); -+ dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr, dev_if->dma_out_desc_addr, 1); -+ } -+ } -+ else { -+ kfree (pcd->setup_pkt); -+ kfree (pcd->status_buf); -+ } -+ -+ kfree(pcd); -+ otg_dev->pcd = 0; -+} -+ -+/** -+ * This function registers a gadget driver with the PCD. -+ * -+ * When a driver is successfully registered, it will receive control -+ * requests including set_configuration(), which enables non-control -+ * requests. then usb traffic follows until a disconnect is reported. -+ * then a host may connect again, or the driver might get unbound. -+ * -+ * @param driver The driver being registered -+ */ -+int usb_gadget_probe_driver(struct usb_gadget_driver *driver, -+ int (*bind)(struct usb_gadget *)) -+{ -+ int retval; -+ -+ DWC_DEBUGPL(DBG_PCD, "registering gadget driver '%s'\n", driver->driver.name); -+ -+ if (!driver || driver->speed == USB_SPEED_UNKNOWN || -+ !bind || -+ !driver->unbind || -+ !driver->disconnect || -+ !driver->setup) { -+ DWC_DEBUGPL(DBG_PCDV,"EINVAL\n"); -+ return -EINVAL; -+ } -+ if (s_pcd == 0) { -+ DWC_DEBUGPL(DBG_PCDV,"ENODEV\n"); -+ return -ENODEV; -+ } -+ if (s_pcd->driver != 0) { -+ DWC_DEBUGPL(DBG_PCDV,"EBUSY (%p)\n", s_pcd->driver); -+ return -EBUSY; -+ } -+ -+ /* hook up the driver */ -+ s_pcd->driver = driver; -+ s_pcd->gadget.dev.driver = &driver->driver; -+ -+ DWC_DEBUGPL(DBG_PCD, "bind to driver %s\n", driver->driver.name); -+ retval = bind(&s_pcd->gadget); -+ if (retval) { -+ DWC_ERROR("bind to driver %s --> error %d\n", -+ driver->driver.name, retval); -+ s_pcd->driver = 0; -+ s_pcd->gadget.dev.driver = 0; -+ return retval; -+ } -+ DWC_DEBUGPL(DBG_ANY, "registered gadget driver '%s'\n", -+ driver->driver.name); -+ return 0; -+} -+ -+EXPORT_SYMBOL(usb_gadget_probe_driver); -+ -+/** -+ * This function unregisters a gadget driver -+ * -+ * @param driver The driver being unregistered -+ */ -+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) -+{ -+ //DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, _driver); -+ -+ if (s_pcd == 0) { -+ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): s_pcd==0\n", __func__, -+ -ENODEV); -+ return -ENODEV; -+ } -+ if (driver == 0 || driver != s_pcd->driver) { -+ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): driver?\n", __func__, -+ -EINVAL); -+ return -EINVAL; -+ } -+ -+ driver->unbind(&s_pcd->gadget); -+ s_pcd->driver = 0; -+ -+ DWC_DEBUGPL(DBG_ANY, "unregistered driver '%s'\n", -+ driver->driver.name); -+ return 0; -+} -+EXPORT_SYMBOL(usb_gadget_unregister_driver); -+ -+#endif /* DWC_HOST_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc/otg_pcd.h -@@ -0,0 +1,292 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.h $ -+ * $Revision: #36 $ -+ * $Date: 2008/09/26 $ -+ * $Change: 1103515 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_HOST_ONLY -+#if !defined(__DWC_PCD_H__) -+#define __DWC_PCD_H__ -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+#include -+ -+struct dwc_otg_device; -+ -+#include "otg_cil.h" -+ -+/** -+ * @file -+ * -+ * This file contains the structures, constants, and interfaces for -+ * the Perpherial Contoller Driver (PCD). -+ * -+ * The Peripheral Controller Driver (PCD) for Linux will implement the -+ * Gadget API, so that the existing Gadget drivers can be used. For -+ * the Mass Storage Function driver the File-backed USB Storage Gadget -+ * (FBS) driver will be used. The FBS driver supports the -+ * Control-Bulk (CB), Control-Bulk-Interrupt (CBI), and Bulk-Only -+ * transports. -+ * -+ */ -+ -+/** Invalid DMA Address */ -+#define DMA_ADDR_INVALID (~(dma_addr_t)0) -+/** Maxpacket size for EP0 */ -+#define MAX_EP0_SIZE 64 -+/** Maxpacket size for any EP */ -+#define MAX_PACKET_SIZE 1024 -+ -+/** Max Transfer size for any EP */ -+#define MAX_TRANSFER_SIZE 65535 -+ -+/** Max DMA Descriptor count for any EP */ -+#define MAX_DMA_DESC_CNT 64 -+ -+/** -+ * Get the pointer to the core_if from the pcd pointer. -+ */ -+#define GET_CORE_IF( _pcd ) (_pcd->otg_dev->core_if) -+ -+/** -+ * States of EP0. -+ */ -+typedef enum ep0_state -+{ -+ EP0_DISCONNECT, /* no host */ -+ EP0_IDLE, -+ EP0_IN_DATA_PHASE, -+ EP0_OUT_DATA_PHASE, -+ EP0_IN_STATUS_PHASE, -+ EP0_OUT_STATUS_PHASE, -+ EP0_STALL, -+} ep0state_e; -+ -+/** Fordward declaration.*/ -+struct dwc_otg_pcd; -+ -+/** DWC_otg iso request structure. -+ * -+ */ -+typedef struct usb_iso_request dwc_otg_pcd_iso_request_t; -+ -+/** PCD EP structure. -+ * This structure describes an EP, there is an array of EPs in the PCD -+ * structure. -+ */ -+typedef struct dwc_otg_pcd_ep -+{ -+ /** USB EP data */ -+ struct usb_ep ep; -+ /** USB EP Descriptor */ -+ const struct usb_endpoint_descriptor *desc; -+ -+ /** queue of dwc_otg_pcd_requests. */ -+ struct list_head queue; -+ unsigned stopped : 1; -+ unsigned disabling : 1; -+ unsigned dma : 1; -+ unsigned queue_sof : 1; -+ -+#ifdef DWC_EN_ISOC -+ /** DWC_otg Isochronous Transfer */ -+ struct usb_iso_request* iso_req; -+#endif //DWC_EN_ISOC -+ -+ /** DWC_otg ep data. */ -+ dwc_ep_t dwc_ep; -+ -+ /** Pointer to PCD */ -+ struct dwc_otg_pcd *pcd; -+}dwc_otg_pcd_ep_t; -+ -+ -+ -+/** DWC_otg PCD Structure. -+ * This structure encapsulates the data for the dwc_otg PCD. -+ */ -+typedef struct dwc_otg_pcd -+{ -+ /** USB gadget */ -+ struct usb_gadget gadget; -+ /** USB gadget driver pointer*/ -+ struct usb_gadget_driver *driver; -+ /** The DWC otg device pointer. */ -+ struct dwc_otg_device *otg_dev; -+ -+ /** State of EP0 */ -+ ep0state_e ep0state; -+ /** EP0 Request is pending */ -+ unsigned ep0_pending : 1; -+ /** Indicates when SET CONFIGURATION Request is in process */ -+ unsigned request_config : 1; -+ /** The state of the Remote Wakeup Enable. */ -+ unsigned remote_wakeup_enable : 1; -+ /** The state of the B-Device HNP Enable. */ -+ unsigned b_hnp_enable : 1; -+ /** The state of A-Device HNP Support. */ -+ unsigned a_hnp_support : 1; -+ /** The state of the A-Device Alt HNP support. */ -+ unsigned a_alt_hnp_support : 1; -+ /** Count of pending Requests */ -+ unsigned request_pending; -+ -+ /** SETUP packet for EP0 -+ * This structure is allocated as a DMA buffer on PCD initialization -+ * with enough space for up to 3 setup packets. -+ */ -+ union -+ { -+ struct usb_ctrlrequest req; -+ uint32_t d32[2]; -+ } *setup_pkt; -+ -+ dma_addr_t setup_pkt_dma_handle; -+ -+ /** 2-byte dma buffer used to return status from GET_STATUS */ -+ uint16_t *status_buf; -+ dma_addr_t status_buf_dma_handle; -+ -+ /** EP0 */ -+ dwc_otg_pcd_ep_t ep0; -+ -+ /** Array of IN EPs. */ -+ dwc_otg_pcd_ep_t in_ep[ MAX_EPS_CHANNELS - 1]; -+ /** Array of OUT EPs. */ -+ dwc_otg_pcd_ep_t out_ep[ MAX_EPS_CHANNELS - 1]; -+ /** number of valid EPs in the above array. */ -+// unsigned num_eps : 4; -+ spinlock_t lock; -+ /** Timer for SRP. If it expires before SRP is successful -+ * clear the SRP. */ -+ struct timer_list srp_timer; -+ -+ /** Tasklet to defer starting of TEST mode transmissions until -+ * Status Phase has been completed. -+ */ -+ struct tasklet_struct test_mode_tasklet; -+ -+ /** Tasklet to delay starting of xfer in DMA mode */ -+ struct tasklet_struct *start_xfer_tasklet; -+ -+ /** The test mode to enter when the tasklet is executed. */ -+ unsigned test_mode; -+ -+} dwc_otg_pcd_t; -+ -+ -+/** DWC_otg request structure. -+ * This structure is a list of requests. -+ */ -+typedef struct -+{ -+ struct usb_request req; /**< USB Request. */ -+ struct list_head queue; /**< queue of these requests. */ -+} dwc_otg_pcd_request_t; -+ -+ -+extern int dwc_otg_pcd_init(struct platform_device *pdev); -+ -+//extern void dwc_otg_pcd_remove( struct dwc_otg_device *_otg_dev ); -+extern void dwc_otg_pcd_remove( struct platform_device *pdev ); -+extern int32_t dwc_otg_pcd_handle_intr( dwc_otg_pcd_t *pcd ); -+extern void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t *pcd ); -+ -+extern void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd); -+extern void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set); -+ -+extern void dwc_otg_iso_buffer_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_iso_request_t *req); -+extern void dwc_otg_request_done(dwc_otg_pcd_ep_t *_ep, dwc_otg_pcd_request_t *req, -+ int status); -+extern void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *_ep); -+extern void dwc_otg_pcd_update_otg(dwc_otg_pcd_t *_pcd, -+ const unsigned reset); -+#ifndef VERBOSE -+#define VERIFY_PCD_DMA_ADDR(_addr_) BUG_ON(((_addr_)==DMA_ADDR_INVALID)||\ -+ ((_addr_)==0)||\ -+ ((_addr_)&0x3)) -+#else -+#define VERIFY_PCD_DMA_ADDR(_addr_) {\ -+ if(((_addr_)==DMA_ADDR_INVALID)||\ -+ ((_addr_)==0)||\ -+ ((_addr_)&0x3)) {\ -+ printk("%s: Invalid DMA address "#_addr_"(%.8x)\n",__func__,_addr_);\ -+ BUG();\ -+ }\ -+ } -+#endif -+ -+ -+static inline void ep_check_and_patch_dma_addr(dwc_otg_pcd_ep_t *ep){ -+//void ep_check_and_patch_dma_addr(dwc_otg_pcd_ep_t *ep){ -+ dwc_ep_t *dwc_ep=&ep->dwc_ep; -+ -+DWC_DEBUGPL(DBG_PCDV,"%s: dwc_ep xfer_buf=%.8x, total_len=%d, dma_addr=%.8x\n",__func__,(u32)dwc_ep->xfer_buff,(dwc_ep->total_len),dwc_ep->dma_addr); -+ if (/*(core_if->dma_enable)&&*/(dwc_ep->dma_addr==DMA_ADDR_INVALID)) { -+ if((((u32)dwc_ep->xfer_buff)&0x3)==0){ -+ dwc_ep->dma_addr=dma_map_single(NULL,(void *)(dwc_ep->start_xfer_buff),(dwc_ep->total_len), DMA_TO_DEVICE); -+DWC_DEBUGPL(DBG_PCDV," got dma_addr=%.8x\n",dwc_ep->dma_addr); -+ }else{ -+DWC_DEBUGPL(DBG_PCDV," buf not aligned, use aligned_buf instead. xfer_buf=%.8x, total_len=%d, aligned_buf_size=%d\n",(u32)dwc_ep->xfer_buff,(dwc_ep->total_len),dwc_ep->aligned_buf_size); -+ if(dwc_ep->aligned_buf_sizetotal_len){ -+ if(dwc_ep->aligned_buf){ -+//printk(" free buff dwc_ep aligned_buf_size=%d, aligned_buf(%.8x), aligned_dma_addr(%.8x));\n",dwc_ep->aligned_buf_size,dwc_ep->aligned_buf,dwc_ep->aligned_dma_addr); -+ //dma_free_coherent(NULL,dwc_ep->aligned_buf_size,dwc_ep->aligned_buf,dwc_ep->aligned_dma_addr); -+ kfree(dwc_ep->aligned_buf); -+ } -+ dwc_ep->aligned_buf_size=((1<<20)>(dwc_ep->total_len<<1))?(dwc_ep->total_len<<1):(1<<20); -+ //dwc_ep->aligned_buf = dma_alloc_coherent (NULL, dwc_ep->aligned_buf_size, &dwc_ep->aligned_dma_addr, GFP_KERNEL|GFP_DMA); -+ dwc_ep->aligned_buf=kmalloc(dwc_ep->aligned_buf_size,GFP_KERNEL|GFP_DMA|GFP_ATOMIC); -+ dwc_ep->aligned_dma_addr=dma_map_single(NULL,(void *)(dwc_ep->aligned_buf),(dwc_ep->aligned_buf_size),DMA_FROM_DEVICE); -+ if(!dwc_ep->aligned_buf){ -+ DWC_ERROR("Cannot alloc required buffer!!\n"); -+ BUG(); -+ } -+DWC_DEBUGPL(DBG_PCDV," dwc_ep allocated aligned buf=%.8x, dma_addr=%.8x, size=%d(0x%x)\n", (u32)dwc_ep->aligned_buf, dwc_ep->aligned_dma_addr, dwc_ep->aligned_buf_size, dwc_ep->aligned_buf_size); -+ } -+ dwc_ep->dma_addr=dwc_ep->aligned_dma_addr; -+ if(dwc_ep->is_in) { -+ memcpy(dwc_ep->aligned_buf,dwc_ep->xfer_buff,dwc_ep->total_len); -+ dma_sync_single_for_device(NULL,dwc_ep->dma_addr,dwc_ep->total_len,DMA_TO_DEVICE); -+ } -+ } -+ } -+} -+ -+#endif -+#endif /* DWC_HOST_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc/otg_pcd_intr.c -@@ -0,0 +1,3682 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd_intr.c $ -+ * $Revision: #83 $ -+ * $Date: 2008/10/14 $ -+ * $Change: 1115682 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_HOST_ONLY -+#include -+#include -+#include -+#include -+ -+#include "otg_driver.h" -+#include "otg_pcd.h" -+ -+ -+#define DEBUG_EP0 -+ -+ -+/* request functions defined in "dwc_otg_pcd.c" */ -+ -+/** @file -+ * This file contains the implementation of the PCD Interrupt handlers. -+ * -+ * The PCD handles the device interrupts. Many conditions can cause a -+ * device interrupt. When an interrupt occurs, the device interrupt -+ * service routine determines the cause of the interrupt and -+ * dispatches handling to the appropriate function. These interrupt -+ * handling functions are described below. -+ * All interrupt registers are processed from LSB to MSB. -+ */ -+ -+ -+/** -+ * This function prints the ep0 state for debug purposes. -+ */ -+static inline void print_ep0_state(dwc_otg_pcd_t *pcd) -+{ -+#ifdef DEBUG -+ char str[40]; -+ -+ switch (pcd->ep0state) { -+ case EP0_DISCONNECT: -+ strcpy(str, "EP0_DISCONNECT"); -+ break; -+ case EP0_IDLE: -+ strcpy(str, "EP0_IDLE"); -+ break; -+ case EP0_IN_DATA_PHASE: -+ strcpy(str, "EP0_IN_DATA_PHASE"); -+ break; -+ case EP0_OUT_DATA_PHASE: -+ strcpy(str, "EP0_OUT_DATA_PHASE"); -+ break; -+ case EP0_IN_STATUS_PHASE: -+ strcpy(str,"EP0_IN_STATUS_PHASE"); -+ break; -+ case EP0_OUT_STATUS_PHASE: -+ strcpy(str,"EP0_OUT_STATUS_PHASE"); -+ break; -+ case EP0_STALL: -+ strcpy(str,"EP0_STALL"); -+ break; -+ default: -+ strcpy(str,"EP0_INVALID"); -+ } -+ -+ DWC_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state); -+#endif -+} -+ -+/** -+ * This function returns pointer to in ep struct with number ep_num -+ */ -+static inline dwc_otg_pcd_ep_t* get_in_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num) -+{ -+ int i; -+ int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps; -+ if(ep_num == 0) { -+ return &pcd->ep0; -+ } -+ else { -+ for(i = 0; i < num_in_eps; ++i) -+ { -+ if(pcd->in_ep[i].dwc_ep.num == ep_num) -+ return &pcd->in_ep[i]; -+ } -+ return 0; -+ } -+} -+/** -+ * This function returns pointer to out ep struct with number ep_num -+ */ -+static inline dwc_otg_pcd_ep_t* get_out_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num) -+{ -+ int i; -+ int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps; -+ if(ep_num == 0) { -+ return &pcd->ep0; -+ } -+ else { -+ for(i = 0; i < num_out_eps; ++i) -+ { -+ if(pcd->out_ep[i].dwc_ep.num == ep_num) -+ return &pcd->out_ep[i]; -+ } -+ return 0; -+ } -+} -+/** -+ * This functions gets a pointer to an EP from the wIndex address -+ * value of the control request. -+ */ -+static dwc_otg_pcd_ep_t *get_ep_by_addr (dwc_otg_pcd_t *pcd, u16 wIndex) -+{ -+ dwc_otg_pcd_ep_t *ep; -+ -+ if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) -+ return &pcd->ep0; -+ list_for_each_entry(ep, &pcd->gadget.ep_list, ep.ep_list) -+ { -+ u8 bEndpointAddress; -+ -+ if (!ep->desc) -+ continue; -+ -+ bEndpointAddress = ep->desc->bEndpointAddress; -+ if((wIndex & (USB_DIR_IN | USB_ENDPOINT_NUMBER_MASK)) -+ == (bEndpointAddress & (USB_DIR_IN | USB_ENDPOINT_NUMBER_MASK))) -+ return ep; -+ } -+ return NULL; -+} -+ -+/** -+ * This function checks the EP request queue, if the queue is not -+ * empty the next request is started. -+ */ -+void start_next_request(dwc_otg_pcd_ep_t *ep) -+{ -+ dwc_otg_pcd_request_t *req = 0; -+ uint32_t max_transfer = GET_CORE_IF(ep->pcd)->core_params->max_transfer_size; -+ if (!list_empty(&ep->queue)) { -+ req = list_entry(ep->queue.next, -+ dwc_otg_pcd_request_t, queue); -+ -+ /* Setup and start the Transfer */ -+ ep->dwc_ep.dma_addr = req->req.dma; -+ ep->dwc_ep.start_xfer_buff = req->req.buf; -+ ep->dwc_ep.xfer_buff = req->req.buf; -+ ep->dwc_ep.sent_zlp = 0; -+ ep->dwc_ep.total_len = req->req.length; -+ ep->dwc_ep.xfer_len = 0; -+ ep->dwc_ep.xfer_count = 0; -+ -+ if(max_transfer > MAX_TRANSFER_SIZE) { -+ ep->dwc_ep.maxxfer = max_transfer - (max_transfer % ep->dwc_ep.maxpacket); -+ } else { -+ ep->dwc_ep.maxxfer = max_transfer; -+ } -+ -+ if(req->req.zero) { -+ if((ep->dwc_ep.total_len % ep->dwc_ep.maxpacket == 0) -+ && (ep->dwc_ep.total_len != 0)) { -+ ep->dwc_ep.sent_zlp = 1; -+ } -+ -+ } -+ ep_check_and_patch_dma_addr(ep); -+ dwc_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->dwc_ep); -+ } -+} -+ -+/** -+ * This function handles the SOF Interrupts. At this time the SOF -+ * Interrupt is disabled. -+ */ -+int32_t dwc_otg_pcd_handle_sof_intr(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ -+ gintsts_data_t gintsts; -+ -+ DWC_DEBUGPL(DBG_PCD, "SOF\n"); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.sofintr = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+ -+/** -+ * This function handles the Rx Status Queue Level Interrupt, which -+ * indicates that there is a least one packet in the Rx FIFO. The -+ * packets are moved from the FIFO to memory, where they will be -+ * processed when the Endpoint Interrupt Register indicates Transfer -+ * Complete or SETUP Phase Done. -+ * -+ * Repeat the following until the Rx Status Queue is empty: -+ * -# Read the Receive Status Pop Register (GRXSTSP) to get Packet -+ * info -+ * -# If Receive FIFO is empty then skip to step Clear the interrupt -+ * and exit -+ * -# If SETUP Packet call dwc_otg_read_setup_packet to copy the -+ * SETUP data to the buffer -+ * -# If OUT Data Packet call dwc_otg_read_packet to copy the data -+ * to the destination buffer -+ */ -+int32_t dwc_otg_pcd_handle_rx_status_q_level_intr(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+ gintmsk_data_t gintmask = {.d32=0}; -+ device_grxsts_data_t status; -+ dwc_otg_pcd_ep_t *ep; -+ gintsts_data_t gintsts; -+#ifdef DEBUG -+ static char *dpid_str[] ={ "D0", "D2", "D1", "MDATA" }; -+#endif -+ -+ //DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd); -+ /* Disable the Rx Status Queue Level interrupt */ -+ gintmask.b.rxstsqlvl= 1; -+ dwc_modify_reg32(&global_regs->gintmsk, gintmask.d32, 0); -+ -+ /* Get the Status from the top of the FIFO */ -+ status.d32 = dwc_read_reg32(&global_regs->grxstsp); -+ -+ DWC_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s " -+ "pktsts:%x Frame:%d(0x%0x)\n", -+ status.b.epnum, status.b.bcnt, -+ dpid_str[status.b.dpid], -+ status.b.pktsts, status.b.fn, status.b.fn); -+ /* Get pointer to EP structure */ -+ ep = get_out_ep(pcd, status.b.epnum); -+ -+ switch (status.b.pktsts) { -+ case DWC_DSTS_GOUT_NAK: -+ DWC_DEBUGPL(DBG_PCDV, "Global OUT NAK\n"); -+ break; -+ case DWC_STS_DATA_UPDT: -+ DWC_DEBUGPL(DBG_PCDV, "OUT Data Packet\n"); -+ if (status.b.bcnt && ep->dwc_ep.xfer_buff) { -+ /** @todo NGS Check for buffer overflow? */ -+ dwc_otg_read_packet(core_if, -+ ep->dwc_ep.xfer_buff, -+ status.b.bcnt); -+ ep->dwc_ep.xfer_count += status.b.bcnt; -+ ep->dwc_ep.xfer_buff += status.b.bcnt; -+ } -+ break; -+ case DWC_STS_XFER_COMP: -+ DWC_DEBUGPL(DBG_PCDV, "OUT Complete\n"); -+ break; -+ case DWC_DSTS_SETUP_COMP: -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCDV, "Setup Complete\n"); -+#endif -+ break; -+case DWC_DSTS_SETUP_UPDT: -+ dwc_otg_read_setup_packet(core_if, pcd->setup_pkt->d32); -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD, -+ "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n", -+ pcd->setup_pkt->req.bRequestType, -+ pcd->setup_pkt->req.bRequest, -+ pcd->setup_pkt->req.wValue, -+ pcd->setup_pkt->req.wIndex, -+ pcd->setup_pkt->req.wLength); -+#endif -+ ep->dwc_ep.xfer_count += status.b.bcnt; -+ break; -+ default: -+ DWC_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n", -+ status.b.pktsts); -+ break; -+ } -+ -+ /* Enable the Rx Status Queue Level interrupt */ -+ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmask.d32); -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.rxstsqlvl = 1; -+ dwc_write_reg32 (&global_regs->gintsts, gintsts.d32); -+ -+ //DWC_DEBUGPL(DBG_PCDV, "EXIT: %s\n", __func__); -+ return 1; -+} -+/** -+ * This function examines the Device IN Token Learning Queue to -+ * determine the EP number of the last IN token received. This -+ * implementation is for the Mass Storage device where there are only -+ * 2 IN EPs (Control-IN and BULK-IN). -+ * -+ * The EP numbers for the first six IN Tokens are in DTKNQR1 and there -+ * are 8 EP Numbers in each of the other possible DTKNQ Registers. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * -+ */ -+static inline int get_ep_of_last_in_token(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_device_global_regs_t *dev_global_regs = -+ core_if->dev_if->dev_global_regs; -+ const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth; -+ /* Number of Token Queue Registers */ -+ const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8; -+ dtknq1_data_t dtknqr1; -+ uint32_t in_tkn_epnums[4]; -+ int ndx = 0; -+ int i = 0; -+ volatile uint32_t *addr = &dev_global_regs->dtknqr1; -+ int epnum = 0; -+ -+ //DWC_DEBUGPL(DBG_PCD,"dev_token_q_depth=%d\n",TOKEN_Q_DEPTH); -+ -+ /* Read the DTKNQ Registers */ -+ for (i = 0; i < DTKNQ_REG_CNT; i++) -+ { -+ in_tkn_epnums[ i ] = dwc_read_reg32(addr); -+ DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i+1, -+ in_tkn_epnums[i]); -+ if (addr == &dev_global_regs->dvbusdis) { -+ addr = &dev_global_regs->dtknqr3_dthrctl; -+ } -+ else { -+ ++addr; -+ } -+ } -+ -+ /* Copy the DTKNQR1 data to the bit field. */ -+ dtknqr1.d32 = in_tkn_epnums[0]; -+ /* Get the EP numbers */ -+ in_tkn_epnums[0] = dtknqr1.b.epnums0_5; -+ ndx = dtknqr1.b.intknwptr - 1; -+ -+ //DWC_DEBUGPL(DBG_PCDV,"ndx=%d\n",ndx); -+ if (ndx == -1) { -+ /** @todo Find a simpler way to calculate the max -+ * queue position.*/ -+ int cnt = TOKEN_Q_DEPTH; -+ if (TOKEN_Q_DEPTH <= 6) { -+ cnt = TOKEN_Q_DEPTH - 1; -+ } -+ else if (TOKEN_Q_DEPTH <= 14) { -+ cnt = TOKEN_Q_DEPTH - 7; -+ } -+ else if (TOKEN_Q_DEPTH <= 22) { -+ cnt = TOKEN_Q_DEPTH - 15; -+ } -+ else { -+ cnt = TOKEN_Q_DEPTH - 23; -+ } -+ epnum = (in_tkn_epnums[ DTKNQ_REG_CNT - 1 ] >> (cnt * 4)) & 0xF; -+ } -+ else { -+ if (ndx <= 5) { -+ epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF; -+ } -+ else if (ndx <= 13) { -+ ndx -= 6; -+ epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF; -+ } -+ else if (ndx <= 21) { -+ ndx -= 14; -+ epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF; -+ } -+ else if (ndx <= 29) { -+ ndx -= 22; -+ epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF; -+ } -+ } -+ //DWC_DEBUGPL(DBG_PCD,"epnum=%d\n",epnum); -+ return epnum; -+} -+ -+/** -+ * This interrupt occurs when the non-periodic Tx FIFO is half-empty. -+ * The active request is checked for the next packet to be loaded into -+ * the non-periodic Tx FIFO. -+ */ -+int32_t dwc_otg_pcd_handle_np_tx_fifo_empty_intr(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ dwc_otg_dev_in_ep_regs_t *ep_regs; -+ gnptxsts_data_t txstatus = {.d32 = 0}; -+ gintsts_data_t gintsts; -+ -+ int epnum = 0; -+ dwc_otg_pcd_ep_t *ep = 0; -+ uint32_t len = 0; -+ int dwords; -+ -+ /* Get the epnum from the IN Token Learning Queue. */ -+ epnum = get_ep_of_last_in_token(core_if); -+ ep = get_in_ep(pcd, epnum); -+ -+ DWC_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %s(%d) \n", ep->ep.name, epnum); -+ ep_regs = core_if->dev_if->in_ep_regs[epnum]; -+ -+ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; -+ if (len > ep->dwc_ep.maxpacket) { -+ len = ep->dwc_ep.maxpacket; -+ } -+ dwords = (len + 3)/4; -+ -+ /* While there is space in the queue and space in the FIFO and -+ * More data to tranfer, Write packets to the Tx FIFO */ -+ txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts); -+ DWC_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n",txstatus.d32); -+ -+ while (txstatus.b.nptxqspcavail > 0 && -+ txstatus.b.nptxfspcavail > dwords && -+ ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len) { -+ /* Write the FIFO */ -+ dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0); -+ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; -+ -+ if (len > ep->dwc_ep.maxpacket) { -+ len = ep->dwc_ep.maxpacket; -+ } -+ -+ dwords = (len + 3)/4; -+ txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts); -+ DWC_DEBUGPL(DBG_PCDV,"GNPTXSTS=0x%08x\n",txstatus.d32); -+ } -+ -+ DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", -+ dwc_read_reg32(&global_regs->gnptxsts)); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.nptxfempty = 1; -+ dwc_write_reg32 (&global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This function is called when dedicated Tx FIFO Empty interrupt occurs. -+ * The active request is checked for the next packet to be loaded into -+ * apropriate Tx FIFO. -+ */ -+static int32_t write_empty_tx_fifo(dwc_otg_pcd_t *pcd, uint32_t epnum) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t* dev_if = core_if->dev_if; -+ dwc_otg_dev_in_ep_regs_t *ep_regs; -+ dtxfsts_data_t txstatus = {.d32 = 0}; -+ dwc_otg_pcd_ep_t *ep = 0; -+ uint32_t len = 0; -+ int dwords; -+ -+ ep = get_in_ep(pcd, epnum); -+ -+ DWC_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %s(%d) \n", ep->ep.name, epnum); -+ -+ ep_regs = core_if->dev_if->in_ep_regs[epnum]; -+ -+ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; -+ -+ if (len > ep->dwc_ep.maxpacket) { -+ len = ep->dwc_ep.maxpacket; -+ } -+ -+ dwords = (len + 3)/4; -+ -+ /* While there is space in the queue and space in the FIFO and -+ * More data to tranfer, Write packets to the Tx FIFO */ -+ txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts); -+ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",epnum,txstatus.d32); -+ -+ while (txstatus.b.txfspcavail > dwords && -+ ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len && -+ ep->dwc_ep.xfer_len != 0) { -+ /* Write the FIFO */ -+ dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0); -+ -+ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; -+ if (len > ep->dwc_ep.maxpacket) { -+ len = ep->dwc_ep.maxpacket; -+ } -+ -+ dwords = (len + 3)/4; -+ txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts); -+ DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32); -+ } -+ -+ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",epnum,dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts)); -+ -+ return 1; -+} -+ -+/** -+ * This function is called when the Device is disconnected. It stops -+ * any active requests and informs the Gadget driver of the -+ * disconnect. -+ */ -+void dwc_otg_pcd_stop(dwc_otg_pcd_t *pcd) -+{ -+ int i, num_in_eps, num_out_eps; -+ dwc_otg_pcd_ep_t *ep; -+ -+ gintmsk_data_t intr_mask = {.d32 = 0}; -+ -+ num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps; -+ num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s() \n", __func__); -+ /* don't disconnect drivers more than once */ -+ if (pcd->ep0state == EP0_DISCONNECT) { -+ DWC_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__); -+ return; -+ } -+ pcd->ep0state = EP0_DISCONNECT; -+ -+ /* Reset the OTG state. */ -+ dwc_otg_pcd_update_otg(pcd, 1); -+ -+ /* Disable the NP Tx Fifo Empty Interrupt. */ -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+ /* Flush the FIFOs */ -+ /**@todo NGS Flush Periodic FIFOs */ -+ dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0x10); -+ dwc_otg_flush_rx_fifo(GET_CORE_IF(pcd)); -+ -+ /* prevent new request submissions, kill any outstanding requests */ -+ ep = &pcd->ep0; -+ dwc_otg_request_nuke(ep); -+ /* prevent new request submissions, kill any outstanding requests */ -+ for (i = 0; i < num_in_eps; i++) -+ { -+ dwc_otg_pcd_ep_t *ep = &pcd->in_ep[i]; -+ dwc_otg_request_nuke(ep); -+ } -+ /* prevent new request submissions, kill any outstanding requests */ -+ for (i = 0; i < num_out_eps; i++) -+ { -+ dwc_otg_pcd_ep_t *ep = &pcd->out_ep[i]; -+ dwc_otg_request_nuke(ep); -+ } -+ -+ /* report disconnect; the driver is already quiesced */ -+ if (pcd->driver && pcd->driver->disconnect) { -+ SPIN_UNLOCK(&pcd->lock); -+ pcd->driver->disconnect(&pcd->gadget); -+ SPIN_LOCK(&pcd->lock); -+ } -+} -+ -+/** -+ * This interrupt indicates that ... -+ */ -+int32_t dwc_otg_pcd_handle_i2c_intr(dwc_otg_pcd_t *pcd) -+{ -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ gintsts_data_t gintsts; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "i2cintr"); -+ intr_mask.b.i2cintr = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.i2cintr = 1; -+ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ return 1; -+} -+ -+ -+/** -+ * This interrupt indicates that ... -+ */ -+int32_t dwc_otg_pcd_handle_early_suspend_intr(dwc_otg_pcd_t *pcd) -+{ -+ gintsts_data_t gintsts; -+#if defined(VERBOSE) -+ DWC_PRINT("Early Suspend Detected\n"); -+#endif -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.erlysuspend = 1; -+ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ return 1; -+} -+ -+/** -+ * This function configures EPO to receive SETUP packets. -+ * -+ * @todo NGS: Update the comments from the HW FS. -+ * -+ * -# Program the following fields in the endpoint specific registers -+ * for Control OUT EP 0, in order to receive a setup packet -+ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back -+ * setup packets) -+ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back -+ * to back setup packets) -+ * - In DMA mode, DOEPDMA0 Register with a memory address to -+ * store any setup packets received -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param pcd Programming view of the PCD. -+ */ -+static inline void ep0_out_start(dwc_otg_core_if_t *core_if, dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ deptsiz0_data_t doeptsize0 = { .d32 = 0}; -+ dwc_otg_dma_desc_t* dma_desc; -+ depctl_data_t doepctl = { .d32 = 0 }; -+ -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_PCDV,"%s() doepctl0=%0x\n", __func__, -+ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); -+#endif -+ -+ doeptsize0.b.supcnt = 3; -+ doeptsize0.b.pktcnt = 1; -+ doeptsize0.b.xfersize = 8*3; -+ -+ if (core_if->dma_enable) { -+ if (!core_if->dma_desc_enable) { -+ /** put here as for Hermes mode deptisz register should not be written */ -+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz, -+ doeptsize0.d32); -+ -+ /** @todo dma needs to handle multiple setup packets (up to 3) */ -+ VERIFY_PCD_DMA_ADDR(pcd->setup_pkt_dma_handle); -+ -+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma, -+ pcd->setup_pkt_dma_handle); -+ } else { -+ dev_if->setup_desc_index = (dev_if->setup_desc_index + 1) & 1; -+ dma_desc = dev_if->setup_desc_addr[dev_if->setup_desc_index]; -+ -+ /** DMA Descriptor Setup */ -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 1; -+ dma_desc->status.b.ioc = 1; -+ dma_desc->status.b.bytes = pcd->ep0.dwc_ep.maxpacket; -+ dma_desc->buf = pcd->setup_pkt_dma_handle; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ -+ /** DOEPDMA0 Register write */ -+ VERIFY_PCD_DMA_ADDR(dev_if->dma_setup_desc_addr[dev_if->setup_desc_index]); -+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma, dev_if->dma_setup_desc_addr[dev_if->setup_desc_index]); -+ } -+ -+ } else { -+ /** put here as for Hermes mode deptisz register should not be written */ -+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz, -+ doeptsize0.d32); -+ } -+ -+ /** DOEPCTL0 Register write */ -+ doepctl.b.epena = 1; -+ doepctl.b.cnak = 1; -+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32); -+ -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n", -+ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); -+ DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n", -+ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl)); -+#endif -+} -+ -+/** -+ * This interrupt occurs when a USB Reset is detected. When the USB -+ * Reset Interrupt occurs the device state is set to DEFAULT and the -+ * EP0 state is set to IDLE. -+ * -# Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1) -+ * -# Unmask the following interrupt bits -+ * - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint) -+ * - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint) -+ * - DOEPMSK.SETUP = 1 -+ * - DOEPMSK.XferCompl = 1 -+ * - DIEPMSK.XferCompl = 1 -+ * - DIEPMSK.TimeOut = 1 -+ * -# Program the following fields in the endpoint specific registers -+ * for Control OUT EP 0, in order to receive a setup packet -+ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back -+ * setup packets) -+ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back -+ * to back setup packets) -+ * - In DMA mode, DOEPDMA0 Register with a memory address to -+ * store any setup packets received -+ * At this point, all the required initialization, except for enabling -+ * the control 0 OUT endpoint is done, for receiving SETUP packets. -+ */ -+int32_t dwc_otg_pcd_handle_usb_reset_intr(dwc_otg_pcd_t * pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ depctl_data_t doepctl = { .d32 = 0}; -+ -+ daint_data_t daintmsk = { .d32 = 0}; -+ doepmsk_data_t doepmsk = { .d32 = 0}; -+ diepmsk_data_t diepmsk = { .d32 = 0}; -+ -+ dcfg_data_t dcfg = { .d32=0 }; -+ grstctl_t resetctl = { .d32=0 }; -+ dctl_data_t dctl = {.d32=0}; -+ int i = 0; -+ gintsts_data_t gintsts; -+ -+ DWC_PRINT("USB RESET\n"); -+#ifdef DWC_EN_ISOC -+ for(i = 1;i < 16; ++i) -+ { -+ dwc_otg_pcd_ep_t *ep; -+ dwc_ep_t *dwc_ep; -+ ep = get_in_ep(pcd,i); -+ if(ep != 0){ -+ dwc_ep = &ep->dwc_ep; -+ dwc_ep->next_frame = 0xffffffff; -+ } -+ } -+#endif /* DWC_EN_ISOC */ -+ -+ /* reset the HNP settings */ -+ dwc_otg_pcd_update_otg(pcd, 1); -+ -+ /* Clear the Remote Wakeup Signalling */ -+ dctl.b.rmtwkupsig = 1; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, -+ dctl.d32, 0); -+ -+ /* Set NAK for all OUT EPs */ -+ doepctl.b.snak = 1; -+ for (i=0; i <= dev_if->num_out_eps; i++) -+ { -+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, -+ doepctl.d32); -+ } -+ -+ /* Flush the NP Tx FIFO */ -+ dwc_otg_flush_tx_fifo(core_if, 0x10); -+ /* Flush the Learning Queue */ -+ resetctl.b.intknqflsh = 1; -+ dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32); -+ -+ if(core_if->multiproc_int_enable) { -+ daintmsk.b.inep0 = 1; -+ daintmsk.b.outep0 = 1; -+ dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, daintmsk.d32); -+ -+ doepmsk.b.setup = 1; -+ doepmsk.b.xfercompl = 1; -+ doepmsk.b.ahberr = 1; -+ doepmsk.b.epdisabled = 1; -+ -+ if(core_if->dma_desc_enable) { -+ doepmsk.b.stsphsercvd = 1; -+ doepmsk.b.bna = 1; -+ } -+/* -+ doepmsk.b.babble = 1; -+ doepmsk.b.nyet = 1; -+ -+ if(core_if->dma_enable) { -+ doepmsk.b.nak = 1; -+ } -+*/ -+ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[0], doepmsk.d32); -+ -+ diepmsk.b.xfercompl = 1; -+ diepmsk.b.timeout = 1; -+ diepmsk.b.epdisabled = 1; -+ diepmsk.b.ahberr = 1; -+ diepmsk.b.intknepmis = 1; -+ -+ if(core_if->dma_desc_enable) { -+ diepmsk.b.bna = 1; -+ } -+/* -+ if(core_if->dma_enable) { -+ diepmsk.b.nak = 1; -+ } -+*/ -+ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], diepmsk.d32); -+ } else{ -+ daintmsk.b.inep0 = 1; -+ daintmsk.b.outep0 = 1; -+ dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, daintmsk.d32); -+ -+ doepmsk.b.setup = 1; -+ doepmsk.b.xfercompl = 1; -+ doepmsk.b.ahberr = 1; -+ doepmsk.b.epdisabled = 1; -+ -+ if(core_if->dma_desc_enable) { -+ doepmsk.b.stsphsercvd = 1; -+ doepmsk.b.bna = 1; -+ } -+/* -+ doepmsk.b.babble = 1; -+ doepmsk.b.nyet = 1; -+ doepmsk.b.nak = 1; -+*/ -+ dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32); -+ -+ diepmsk.b.xfercompl = 1; -+ diepmsk.b.timeout = 1; -+ diepmsk.b.epdisabled = 1; -+ diepmsk.b.ahberr = 1; -+ diepmsk.b.intknepmis = 1; -+ -+ if(core_if->dma_desc_enable) { -+ diepmsk.b.bna = 1; -+ } -+ -+// diepmsk.b.nak = 1; -+ -+ dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32); -+ } -+ -+ /* Reset Device Address */ -+ dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg); -+ dcfg.b.devaddr = 0; -+ dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32); -+ -+ /* setup EP0 to receive SETUP packets */ -+ ep0_out_start(core_if, pcd); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.usbreset = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * Get the device speed from the device status register and convert it -+ * to USB speed constant. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static int get_device_speed(dwc_otg_core_if_t *core_if) -+{ -+ dsts_data_t dsts; -+ enum usb_device_speed speed = USB_SPEED_UNKNOWN; -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ -+ switch (dsts.b.enumspd) { -+ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ: -+ speed = USB_SPEED_HIGH; -+ break; -+ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ: -+ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ: -+ speed = USB_SPEED_FULL; -+ break; -+ -+ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ: -+ speed = USB_SPEED_LOW; -+ break; -+ } -+ -+ return speed; -+} -+ -+/** -+ * Read the device status register and set the device speed in the -+ * data structure. -+ * Set up EP0 to receive SETUP packets by calling dwc_ep0_activate. -+ */ -+int32_t dwc_otg_pcd_handle_enum_done_intr(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ gintsts_data_t gintsts; -+ gusbcfg_data_t gusbcfg; -+ dwc_otg_core_global_regs_t *global_regs = -+ GET_CORE_IF(pcd)->core_global_regs; -+ uint8_t utmi16b, utmi8b; -+// DWC_DEBUGPL(DBG_PCD, "SPEED ENUM\n"); -+ DWC_PRINT("SPEED ENUM\n"); -+ -+ if (GET_CORE_IF(pcd)->snpsid >= 0x4F54260A) { -+ utmi16b = 6; -+ utmi8b = 9; -+ } else { -+ utmi16b = 4; -+ utmi8b = 8; -+ } -+ dwc_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->dwc_ep); -+ -+#ifdef DEBUG_EP0 -+ print_ep0_state(pcd); -+#endif -+ -+ if (pcd->ep0state == EP0_DISCONNECT) { -+ pcd->ep0state = EP0_IDLE; -+ } -+ else if (pcd->ep0state == EP0_STALL) { -+ pcd->ep0state = EP0_IDLE; -+ } -+ -+ pcd->ep0state = EP0_IDLE; -+ -+ ep0->stopped = 0; -+ -+ pcd->gadget.speed = get_device_speed(GET_CORE_IF(pcd)); -+ -+ /* Set USB turnaround time based on device speed and PHY interface. */ -+ gusbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ if (pcd->gadget.speed == USB_SPEED_HIGH) { -+ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_ULPI) { -+ /* ULPI interface */ -+ gusbcfg.b.usbtrdtim = 9; -+ } -+ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_UTMI) { -+ /* UTMI+ interface */ -+ if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 0) { -+ gusbcfg.b.usbtrdtim = utmi8b; -+ } -+ else if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 1) { -+ gusbcfg.b.usbtrdtim = utmi16b; -+ } -+ else if (GET_CORE_IF(pcd)->core_params->phy_utmi_width == 8) { -+ gusbcfg.b.usbtrdtim = utmi8b; -+ } -+ else { -+ gusbcfg.b.usbtrdtim = utmi16b; -+ } -+ } -+ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) { -+ /* UTMI+ OR ULPI interface */ -+ if (gusbcfg.b.ulpi_utmi_sel == 1) { -+ /* ULPI interface */ -+ gusbcfg.b.usbtrdtim = 9; -+ } -+ else { -+ /* UTMI+ interface */ -+ if (GET_CORE_IF(pcd)->core_params->phy_utmi_width == 16) { -+ gusbcfg.b.usbtrdtim = utmi16b; -+ } -+ else { -+ gusbcfg.b.usbtrdtim = utmi8b; -+ } -+ } -+ } -+ } -+ else { -+ /* Full or low speed */ -+ gusbcfg.b.usbtrdtim = 9; -+ } -+ dwc_write_reg32(&global_regs->gusbcfg, gusbcfg.d32); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.enumdone = 1; -+ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ return 1; -+} -+ -+/** -+ * This interrupt indicates that the ISO OUT Packet was dropped due to -+ * Rx FIFO full or Rx Status Queue Full. If this interrupt occurs -+ * read all the data from the Rx FIFO. -+ */ -+int32_t dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(dwc_otg_pcd_t *pcd) -+{ -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ gintsts_data_t gintsts; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", -+ "ISOC Out Dropped"); -+ -+ intr_mask.b.isooutdrop = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+ /* Clear interrupt */ -+ -+ gintsts.d32 = 0; -+ gintsts.b.isooutdrop = 1; -+ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This interrupt indicates the end of the portion of the micro-frame -+ * for periodic transactions. If there is a periodic transaction for -+ * the next frame, load the packets into the EP periodic Tx FIFO. -+ */ -+int32_t dwc_otg_pcd_handle_end_periodic_frame_intr(dwc_otg_pcd_t *pcd) -+{ -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ gintsts_data_t gintsts; -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "EOP"); -+ -+ intr_mask.b.eopframe = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.eopframe = 1; -+ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This interrupt indicates that EP of the packet on the top of the -+ * non-periodic Tx FIFO does not match EP of the IN Token received. -+ * -+ * The "Device IN Token Queue" Registers are read to determine the -+ * order the IN Tokens have been received. The non-periodic Tx FIFO -+ * is flushed, so it can be reloaded in the order seen in the IN Token -+ * Queue. -+ */ -+int32_t dwc_otg_pcd_handle_ep_mismatch_intr(dwc_otg_core_if_t *core_if) -+{ -+ gintsts_data_t gintsts; -+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.epmismatch = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This funcion stalls EP0. -+ */ -+static inline void ep0_do_stall(dwc_otg_pcd_t *pcd, const int err_val) -+{ -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ struct usb_ctrlrequest *ctrl = &pcd->setup_pkt->req; -+ DWC_WARN("req %02x.%02x protocol STALL; err %d\n", -+ ctrl->bRequestType, ctrl->bRequest, err_val); -+ -+ ep0->dwc_ep.is_in = 1; -+ dwc_otg_ep_set_stall(pcd->otg_dev->core_if, &ep0->dwc_ep); -+ pcd->ep0.stopped = 1; -+ pcd->ep0state = EP0_IDLE; -+ ep0_out_start(GET_CORE_IF(pcd), pcd); -+} -+ -+/** -+ * This functions delegates the setup command to the gadget driver. -+ */ -+static inline void do_gadget_setup(dwc_otg_pcd_t *pcd, -+ struct usb_ctrlrequest * ctrl) -+{ -+ int ret = 0; -+ if (pcd->driver && pcd->driver->setup) { -+ SPIN_UNLOCK(&pcd->lock); -+ ret = pcd->driver->setup(&pcd->gadget, ctrl); -+ SPIN_LOCK(&pcd->lock); -+ if (ret < 0) { -+ ep0_do_stall(pcd, ret); -+ } -+ -+ /** @todo This is a g_file_storage gadget driver specific -+ * workaround: a DELAYED_STATUS result from the fsg_setup -+ * routine will result in the gadget queueing a EP0 IN status -+ * phase for a two-stage control transfer. Exactly the same as -+ * a SET_CONFIGURATION/SET_INTERFACE except that this is a class -+ * specific request. Need a generic way to know when the gadget -+ * driver will queue the status phase. Can we assume when we -+ * call the gadget driver setup() function that it will always -+ * queue and require the following flag? Need to look into -+ * this. -+ */ -+ -+ if (ret == 256 + 999) { -+ pcd->request_config = 1; -+ } -+ } -+} -+ -+/** -+ * This function starts the Zero-Length Packet for the IN status phase -+ * of a 2 stage control transfer. -+ */ -+static inline void do_setup_in_status_phase(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ if (pcd->ep0state == EP0_STALL) { -+ return; -+ } -+ -+ pcd->ep0state = EP0_IN_STATUS_PHASE; -+ -+ /* Prepare for more SETUP Packets */ -+ DWC_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n"); -+ ep0->dwc_ep.xfer_len = 0; -+ ep0->dwc_ep.xfer_count = 0; -+ ep0->dwc_ep.is_in = 1; -+ ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle; -+ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); -+ -+ /* Prepare for more SETUP Packets */ -+// if(GET_CORE_IF(pcd)->dma_enable == 0) ep0_out_start(GET_CORE_IF(pcd), pcd); -+} -+ -+/** -+ * This function starts the Zero-Length Packet for the OUT status phase -+ * of a 2 stage control transfer. -+ */ -+static inline void do_setup_out_status_phase(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ if (pcd->ep0state == EP0_STALL) { -+ DWC_DEBUGPL(DBG_PCD, "EP0 STALLED\n"); -+ return; -+ } -+ pcd->ep0state = EP0_OUT_STATUS_PHASE; -+ -+ DWC_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n"); -+ ep0->dwc_ep.xfer_len = 0; -+ ep0->dwc_ep.xfer_count = 0; -+ ep0->dwc_ep.is_in = 0; -+ ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle; -+ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); -+ -+ /* Prepare for more SETUP Packets */ -+ if(GET_CORE_IF(pcd)->dma_enable == 0) { -+ ep0_out_start(GET_CORE_IF(pcd), pcd); -+ } -+} -+ -+/** -+ * Clear the EP halt (STALL) and if pending requests start the -+ * transfer. -+ */ -+static inline void pcd_clear_halt(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep) -+{ -+ if(ep->dwc_ep.stall_clear_flag == 0) -+ dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep); -+ -+ /* Reactive the EP */ -+ dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep); -+ if (ep->stopped) { -+ ep->stopped = 0; -+ /* If there is a request in the EP queue start it */ -+ -+ /** @todo FIXME: this causes an EP mismatch in DMA mode. -+ * epmismatch not yet implemented. */ -+ -+ /* -+ * Above fixme is solved by implmenting a tasklet to call the -+ * start_next_request(), outside of interrupt context at some -+ * time after the current time, after a clear-halt setup packet. -+ * Still need to implement ep mismatch in the future if a gadget -+ * ever uses more than one endpoint at once -+ */ -+ ep->queue_sof = 1; -+ tasklet_schedule (pcd->start_xfer_tasklet); -+ } -+ /* Start Control Status Phase */ -+ do_setup_in_status_phase(pcd); -+} -+ -+/** -+ * This function is called when the SET_FEATURE TEST_MODE Setup packet -+ * is sent from the host. The Device Control register is written with -+ * the Test Mode bits set to the specified Test Mode. This is done as -+ * a tasklet so that the "Status" phase of the control transfer -+ * completes before transmitting the TEST packets. -+ * -+ * @todo This has not been tested since the tasklet struct was put -+ * into the PCD struct! -+ * -+ */ -+static void do_test_mode(unsigned long data) -+{ -+ dctl_data_t dctl; -+ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)data; -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ int test_mode = pcd->test_mode; -+ -+ -+// DWC_WARN("%s() has not been tested since being rewritten!\n", __func__); -+ -+ dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl); -+ switch (test_mode) { -+ case 1: // TEST_J -+ dctl.b.tstctl = 1; -+ break; -+ -+ case 2: // TEST_K -+ dctl.b.tstctl = 2; -+ break; -+ -+ case 3: // TEST_SE0_NAK -+ dctl.b.tstctl = 3; -+ break; -+ -+ case 4: // TEST_PACKET -+ dctl.b.tstctl = 4; -+ break; -+ -+ case 5: // TEST_FORCE_ENABLE -+ dctl.b.tstctl = 5; -+ break; -+ } -+ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32); -+} -+ -+/** -+ * This function process the GET_STATUS Setup Commands. -+ */ -+static inline void do_get_status(dwc_otg_pcd_t *pcd) -+{ -+ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ uint16_t *status = pcd->status_buf; -+ -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD, -+ "GET_STATUS %02x.%02x v%04x i%04x l%04x\n", -+ ctrl.bRequestType, ctrl.bRequest, -+ ctrl.wValue, ctrl.wIndex, ctrl.wLength); -+#endif -+ -+ switch (ctrl.bRequestType & USB_RECIP_MASK) { -+ case USB_RECIP_DEVICE: -+ *status = 0x1; /* Self powered */ -+ *status |= pcd->remote_wakeup_enable << 1; -+ break; -+ -+ case USB_RECIP_INTERFACE: -+ *status = 0; -+ break; -+ -+ case USB_RECIP_ENDPOINT: -+ ep = get_ep_by_addr(pcd, ctrl.wIndex); -+ if (ep == 0 || ctrl.wLength > 2) { -+ ep0_do_stall(pcd, -EOPNOTSUPP); -+ return; -+ } -+ /** @todo check for EP stall */ -+ *status = ep->stopped; -+ break; -+ } -+ pcd->ep0_pending = 1; -+ ep0->dwc_ep.start_xfer_buff = (uint8_t *)status; -+ ep0->dwc_ep.xfer_buff = (uint8_t *)status; -+ ep0->dwc_ep.dma_addr = pcd->status_buf_dma_handle; -+ ep0->dwc_ep.xfer_len = 2; -+ ep0->dwc_ep.xfer_count = 0; -+ ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len; -+ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); -+} -+/** -+ * This function process the SET_FEATURE Setup Commands. -+ */ -+static inline void do_set_feature(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; -+ dwc_otg_pcd_ep_t *ep = 0; -+ int32_t otg_cap_param = core_if->core_params->otg_cap; -+ gotgctl_data_t gotgctl = { .d32 = 0 }; -+ -+ DWC_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n", -+ ctrl.bRequestType, ctrl.bRequest, -+ ctrl.wValue, ctrl.wIndex, ctrl.wLength); -+ DWC_DEBUGPL(DBG_PCD,"otg_cap=%d\n", otg_cap_param); -+ -+ -+ switch (ctrl.bRequestType & USB_RECIP_MASK) { -+ case USB_RECIP_DEVICE: -+ switch (ctrl.wValue) { -+ case USB_DEVICE_REMOTE_WAKEUP: -+ pcd->remote_wakeup_enable = 1; -+ break; -+ -+ case USB_DEVICE_TEST_MODE: -+ /* Setup the Test Mode tasklet to do the Test -+ * Packet generation after the SETUP Status -+ * phase has completed. */ -+ -+ /** @todo This has not been tested since the -+ * tasklet struct was put into the PCD -+ * struct! */ -+ pcd->test_mode_tasklet.next = 0; -+ pcd->test_mode_tasklet.state = 0; -+ atomic_set(&pcd->test_mode_tasklet.count, 0); -+ pcd->test_mode_tasklet.func = do_test_mode; -+ pcd->test_mode_tasklet.data = (unsigned long)pcd; -+ pcd->test_mode = ctrl.wIndex >> 8; -+ tasklet_schedule(&pcd->test_mode_tasklet); -+ break; -+ -+ case USB_DEVICE_B_HNP_ENABLE: -+ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n"); -+ -+ /* dev may initiate HNP */ -+ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { -+ pcd->b_hnp_enable = 1; -+ dwc_otg_pcd_update_otg(pcd, 0); -+ DWC_DEBUGPL(DBG_PCD, "Request B HNP\n"); -+ /**@todo Is the gotgctl.devhnpen cleared -+ * by a USB Reset? */ -+ gotgctl.b.devhnpen = 1; -+ gotgctl.b.hnpreq = 1; -+ dwc_write_reg32(&global_regs->gotgctl, gotgctl.d32); -+ } -+ else { -+ ep0_do_stall(pcd, -EOPNOTSUPP); -+ } -+ break; -+ -+ case USB_DEVICE_A_HNP_SUPPORT: -+ /* RH port supports HNP */ -+ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n"); -+ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { -+ pcd->a_hnp_support = 1; -+ dwc_otg_pcd_update_otg(pcd, 0); -+ } -+ else { -+ ep0_do_stall(pcd, -EOPNOTSUPP); -+ } -+ break; -+ -+ case USB_DEVICE_A_ALT_HNP_SUPPORT: -+ /* other RH port does */ -+ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n"); -+ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { -+ pcd->a_alt_hnp_support = 1; -+ dwc_otg_pcd_update_otg(pcd, 0); -+ } -+ else { -+ ep0_do_stall(pcd, -EOPNOTSUPP); -+ } -+ break; -+ } -+ do_setup_in_status_phase(pcd); -+ break; -+ -+ case USB_RECIP_INTERFACE: -+ do_gadget_setup(pcd, &ctrl); -+ break; -+ -+ case USB_RECIP_ENDPOINT: -+ if (ctrl.wValue == USB_ENDPOINT_HALT) { -+ ep = get_ep_by_addr(pcd, ctrl.wIndex); -+ if (ep == 0) { -+ ep0_do_stall(pcd, -EOPNOTSUPP); -+ return; -+ } -+ ep->stopped = 1; -+ dwc_otg_ep_set_stall(core_if, &ep->dwc_ep); -+ } -+ do_setup_in_status_phase(pcd); -+ break; -+ } -+} -+ -+/** -+ * This function process the CLEAR_FEATURE Setup Commands. -+ */ -+static inline void do_clear_feature(dwc_otg_pcd_t *pcd) -+{ -+ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; -+ dwc_otg_pcd_ep_t *ep = 0; -+ -+ DWC_DEBUGPL(DBG_PCD, -+ "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n", -+ ctrl.bRequestType, ctrl.bRequest, -+ ctrl.wValue, ctrl.wIndex, ctrl.wLength); -+ -+ switch (ctrl.bRequestType & USB_RECIP_MASK) { -+ case USB_RECIP_DEVICE: -+ switch (ctrl.wValue) { -+ case USB_DEVICE_REMOTE_WAKEUP: -+ pcd->remote_wakeup_enable = 0; -+ break; -+ -+ case USB_DEVICE_TEST_MODE: -+ /** @todo Add CLEAR_FEATURE for TEST modes. */ -+ break; -+ } -+ do_setup_in_status_phase(pcd); -+ break; -+ -+ case USB_RECIP_ENDPOINT: -+ ep = get_ep_by_addr(pcd, ctrl.wIndex); -+ if (ep == 0) { -+ ep0_do_stall(pcd, -EOPNOTSUPP); -+ return; -+ } -+ -+ pcd_clear_halt(pcd, ep); -+ -+ break; -+ } -+} -+ -+/** -+ * This function process the SET_ADDRESS Setup Commands. -+ */ -+static inline void do_set_address(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if; -+ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; -+ -+ if (ctrl.bRequestType == USB_RECIP_DEVICE) { -+ dcfg_data_t dcfg = {.d32=0}; -+ -+#ifdef DEBUG_EP0 -+// DWC_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue); -+#endif -+ dcfg.b.devaddr = ctrl.wValue; -+ dwc_modify_reg32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32); -+ do_setup_in_status_phase(pcd); -+ } -+} -+ -+/** -+ * This function processes SETUP commands. In Linux, the USB Command -+ * processing is done in two places - the first being the PCD and the -+ * second in the Gadget Driver (for example, the File-Backed Storage -+ * Gadget Driver). -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ *
Command Driver Description
GET_STATUS PCD Command is processed as -+ * defined in chapter 9 of the USB 2.0 Specification chapter 9 -+ *
CLEAR_FEATURE PCD The Device and Endpoint -+ * requests are the ENDPOINT_HALT feature is procesed, all others the -+ * interface requests are ignored.
SET_FEATURE PCD The Device and Endpoint -+ * requests are processed by the PCD. Interface requests are passed -+ * to the Gadget Driver.
SET_ADDRESS PCD Program the DCFG reg, -+ * with device address received
GET_DESCRIPTOR Gadget Driver Return the -+ * requested descriptor
SET_DESCRIPTOR Gadget Driver Optional - -+ * not implemented by any of the existing Gadget Drivers.
SET_CONFIGURATION Gadget Driver Disable -+ * all EPs and enable EPs for new configuration.
GET_CONFIGURATION Gadget Driver Return -+ * the current configuration
SET_INTERFACE Gadget Driver Disable all -+ * EPs and enable EPs for new configuration.
GET_INTERFACE Gadget Driver Return the -+ * current interface.
SYNC_FRAME PCD Display debug -+ * message.
-+ * -+ * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are -+ * processed by pcd_setup. Calling the Function Driver's setup function from -+ * pcd_setup processes the gadget SETUP commands. -+ */ -+static inline void pcd_setup(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ -+ deptsiz0_data_t doeptsize0 = { .d32 = 0}; -+ -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n", -+ ctrl.bRequestType, ctrl.bRequest, -+ ctrl.wValue, ctrl.wIndex, ctrl.wLength); -+#endif -+ -+ doeptsize0.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doeptsiz); -+ -+ /** @todo handle > 1 setup packet , assert error for now */ -+ -+ if (core_if->dma_enable && core_if->dma_desc_enable == 0 && (doeptsize0.b.supcnt < 2)) { -+ DWC_ERROR ("\n\n----------- CANNOT handle > 1 setup packet in DMA mode\n\n"); -+ } -+ -+ /* Clean up the request queue */ -+ dwc_otg_request_nuke(ep0); -+ ep0->stopped = 0; -+ -+ if (ctrl.bRequestType & USB_DIR_IN) { -+ ep0->dwc_ep.is_in = 1; -+ pcd->ep0state = EP0_IN_DATA_PHASE; -+ } -+ else { -+ ep0->dwc_ep.is_in = 0; -+ pcd->ep0state = EP0_OUT_DATA_PHASE; -+ } -+ -+ if(ctrl.wLength == 0) { -+ ep0->dwc_ep.is_in = 1; -+ pcd->ep0state = EP0_IN_STATUS_PHASE; -+ } -+ -+ if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) { -+ /* handle non-standard (class/vendor) requests in the gadget driver */ -+ do_gadget_setup(pcd, &ctrl); -+ return; -+ } -+ -+ /** @todo NGS: Handle bad setup packet? */ -+ -+/////////////////////////////////////////// -+//// --- Standard Request handling --- //// -+ -+ switch (ctrl.bRequest) { -+ case USB_REQ_GET_STATUS: -+ do_get_status(pcd); -+ break; -+ -+ case USB_REQ_CLEAR_FEATURE: -+ do_clear_feature(pcd); -+ break; -+ -+ case USB_REQ_SET_FEATURE: -+ do_set_feature(pcd); -+ break; -+ -+ case USB_REQ_SET_ADDRESS: -+ do_set_address(pcd); -+ break; -+ -+ case USB_REQ_SET_INTERFACE: -+ case USB_REQ_SET_CONFIGURATION: -+// _pcd->request_config = 1; /* Configuration changed */ -+ do_gadget_setup(pcd, &ctrl); -+ break; -+ -+ case USB_REQ_SYNCH_FRAME: -+ do_gadget_setup(pcd, &ctrl); -+ break; -+ -+ default: -+ /* Call the Gadget Driver's setup functions */ -+ do_gadget_setup(pcd, &ctrl); -+ break; -+ } -+} -+ -+/** -+ * This function completes the ep0 control transfer. -+ */ -+static int32_t ep0_complete_request(dwc_otg_pcd_ep_t *ep) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ dwc_otg_dev_in_ep_regs_t *in_ep_regs = -+ dev_if->in_ep_regs[ep->dwc_ep.num]; -+#ifdef DEBUG_EP0 -+ dwc_otg_dev_out_ep_regs_t *out_ep_regs = -+ dev_if->out_ep_regs[ep->dwc_ep.num]; -+#endif -+ deptsiz0_data_t deptsiz; -+ desc_sts_data_t desc_sts; -+ dwc_otg_pcd_request_t *req; -+ int is_last = 0; -+ dwc_otg_pcd_t *pcd = ep->pcd; -+ -+ //DWC_DEBUGPL(DBG_PCDV, "%s() %s\n", __func__, _ep->ep.name); -+ -+ if (pcd->ep0_pending && list_empty(&ep->queue)) { -+ if (ep->dwc_ep.is_in) { -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n"); -+#endif -+ do_setup_out_status_phase(pcd); -+ } -+ else { -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n"); -+#endif -+ do_setup_in_status_phase(pcd); -+ } -+ pcd->ep0_pending = 0; -+ return 1; -+ } -+ -+ if (list_empty(&ep->queue)) { -+ return 0; -+ } -+ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, queue); -+ -+ -+ if (pcd->ep0state == EP0_OUT_STATUS_PHASE || pcd->ep0state == EP0_IN_STATUS_PHASE) { -+ is_last = 1; -+ } -+ else if (ep->dwc_ep.is_in) { -+ deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz); -+ if(core_if->dma_desc_enable != 0) -+ desc_sts.d32 = readl(dev_if->in_desc_addr); -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", -+ ep->ep.name, ep->dwc_ep.xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+#endif -+ -+ if (((core_if->dma_desc_enable == 0) && (deptsiz.b.xfersize == 0)) || -+ ((core_if->dma_desc_enable != 0) && (desc_sts.b.bytes == 0))) { -+ req->req.actual = ep->dwc_ep.xfer_count; -+ /* Is a Zero Len Packet needed? */ -+ if (req->req.zero) { -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n"); -+#endif -+ req->req.zero = 0; -+ } -+ do_setup_out_status_phase(pcd); -+ } -+ } -+ else { -+ /* ep0-OUT */ -+#ifdef DEBUG_EP0 -+ deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz); -+ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xsize=%d pktcnt=%d\n", -+ ep->ep.name, ep->dwc_ep.xfer_len, -+ deptsiz.b.xfersize, -+ deptsiz.b.pktcnt); -+#endif -+ req->req.actual = ep->dwc_ep.xfer_count; -+ /* Is a Zero Len Packet needed? */ -+ if (req->req.zero) { -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n"); -+#endif -+ req->req.zero = 0; -+ } -+ if(core_if->dma_desc_enable == 0) -+ do_setup_in_status_phase(pcd); -+ } -+ -+ /* Complete the request */ -+ if (is_last) { -+ dwc_otg_request_done(ep, req, 0); -+ ep->dwc_ep.start_xfer_buff = 0; -+ ep->dwc_ep.xfer_buff = 0; -+ ep->dwc_ep.xfer_len = 0; -+ return 1; -+ } -+ return 0; -+} -+ -+inline void aligned_buf_patch_on_buf_dma_oep_completion(dwc_otg_pcd_ep_t *ep, uint32_t byte_count) -+{ -+ dwc_ep_t *dwc_ep = &ep->dwc_ep; -+ if(byte_count && dwc_ep->aligned_buf && -+ dwc_ep->dma_addr>=dwc_ep->aligned_dma_addr && -+ dwc_ep->dma_addr<=(dwc_ep->aligned_dma_addr+dwc_ep->aligned_buf_size))\ -+ { -+ //aligned buf used, apply complete patch -+ u32 offset=(dwc_ep->dma_addr-dwc_ep->aligned_dma_addr); -+ memcpy(dwc_ep->start_xfer_buff+offset, dwc_ep->aligned_buf+offset, byte_count); -+ } -+} -+ -+/** -+ * This function completes the request for the EP. If there are -+ * additional requests for the EP in the queue they will be started. -+ */ -+static void complete_ep(dwc_otg_pcd_ep_t *ep) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ dwc_otg_dev_in_ep_regs_t *in_ep_regs = -+ dev_if->in_ep_regs[ep->dwc_ep.num]; -+ deptsiz_data_t deptsiz; -+ desc_sts_data_t desc_sts; -+ dwc_otg_pcd_request_t *req = 0; -+ dwc_otg_dma_desc_t* dma_desc; -+ uint32_t byte_count = 0; -+ int is_last = 0; -+ int i; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s() %s-%s\n", __func__, ep->ep.name, -+ (ep->dwc_ep.is_in?"IN":"OUT")); -+ -+ /* Get any pending requests */ -+ if (!list_empty(&ep->queue)) { -+ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, -+ queue); -+ if (!req) { -+ printk("complete_ep 0x%p, req = NULL!\n", ep); -+ return; -+ } -+ } -+ else { -+ printk("complete_ep 0x%p, ep->queue empty!\n", ep); -+ return; -+ } -+ DWC_DEBUGPL(DBG_PCD, "Requests %d\n", ep->pcd->request_pending); -+ -+ if (ep->dwc_ep.is_in) { -+ deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz); -+ -+ if (core_if->dma_enable) { -+ //dma_unmap_single(NULL,ep->dwc_ep.dma_addr,ep->dwc_ep.xfer_count,DMA_NONE); -+ if(core_if->dma_desc_enable == 0) { -+ //dma_unmap_single(NULL,ep->dwc_ep.dma_addr,ep->dwc_ep.xfer_count,DMA_NONE); -+ if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) { -+ byte_count = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; -+DWC_DEBUGPL(DBG_PCDV,"byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x)\n", byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count ); -+ -+ ep->dwc_ep.xfer_buff += byte_count; -+ ep->dwc_ep.dma_addr += byte_count; -+ ep->dwc_ep.xfer_count += byte_count; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", -+ ep->ep.name, ep->dwc_ep.xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ -+ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { -+ //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); -+printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x) - deptsiz.b.xfersize(%.8x)\n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count , deptsiz.b.xfersize); -+ } else if(ep->dwc_ep.sent_zlp) { -+ /* -+ * This fragment of code should initiate 0 -+ * length trasfer in case if it is queued -+ * a trasfer with size divisible to EPs max -+ * packet size and with usb_request zero field -+ * is set, which means that after data is transfered, -+ * it is also should be transfered -+ * a 0 length packet at the end. For Slave and -+ * Buffer DMA modes in this case SW has -+ * to initiate 2 transfers one with transfer size, -+ * and the second with 0 size. For Desriptor -+ * DMA mode SW is able to initiate a transfer, -+ * which will handle all the packets including -+ * the last 0 legth. -+ */ -+ ep->dwc_ep.sent_zlp = 0; -+ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); -+ } else { -+ is_last = 1; -+ } -+ } else { -+ DWC_WARN("Incomplete transfer (%s-%s [siz=%d pkt=%d])\n", -+ ep->ep.name, (ep->dwc_ep.is_in?"IN":"OUT"), -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ } -+ } else { -+ -+ dma_desc = ep->dwc_ep.desc_addr; -+ byte_count = 0; -+ ep->dwc_ep.sent_zlp = 0; -+ -+ for(i = 0; i < ep->dwc_ep.desc_cnt; ++i) { -+ desc_sts.d32 = readl(dma_desc); -+ byte_count += desc_sts.b.bytes; -+ dma_desc++; -+ } -+ -+ if(byte_count == 0) { -+ ep->dwc_ep.xfer_count = ep->dwc_ep.total_len; -+ is_last = 1; -+ } else { -+ DWC_WARN("Incomplete transfer\n"); -+ } -+ } -+ } else { -+ if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) { -+ /* Check if the whole transfer was completed, -+ * if no, setup transfer for next portion of data -+ */ -+ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", -+ ep->ep.name, ep->dwc_ep.xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { -+ //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); -+printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, ep->dwc_ep.xfer_len(%.8x) \n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, ep->dwc_ep.xfer_len ); -+ } else if(ep->dwc_ep.sent_zlp) { -+ /* -+ * This fragment of code should initiate 0 -+ * length trasfer in case if it is queued -+ * a trasfer with size divisible to EPs max -+ * packet size and with usb_request zero field -+ * is set, which means that after data is transfered, -+ * it is also should be transfered -+ * a 0 length packet at the end. For Slave and -+ * Buffer DMA modes in this case SW has -+ * to initiate 2 transfers one with transfer size, -+ * and the second with 0 size. For Desriptor -+ * DMA mode SW is able to initiate a transfer, -+ * which will handle all the packets including -+ * the last 0 legth. -+ */ -+ ep->dwc_ep.sent_zlp = 0; -+ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); -+ } else { -+ is_last = 1; -+ } -+ } -+ else { -+ DWC_WARN("Incomplete transfer (%s-%s [siz=%d pkt=%d])\n", -+ ep->ep.name, (ep->dwc_ep.is_in?"IN":"OUT"), -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ } -+ } -+ } else { -+ dwc_otg_dev_out_ep_regs_t *out_ep_regs = -+ dev_if->out_ep_regs[ep->dwc_ep.num]; -+ desc_sts.d32 = 0; -+ if(core_if->dma_enable) { -+ //dma_unmap_single(NULL,ep->dwc_ep.dma_addr,ep->dwc_ep.xfer_count,DMA_FROM_DEVICE); -+ if(core_if->dma_desc_enable) { -+ DWC_WARN("\n\n%s: we need a cache invalidation here!!\n\n",__func__); -+ dma_desc = ep->dwc_ep.desc_addr; -+ byte_count = 0; -+ ep->dwc_ep.sent_zlp = 0; -+ for(i = 0; i < ep->dwc_ep.desc_cnt; ++i) { -+ desc_sts.d32 = readl(dma_desc); -+ byte_count += desc_sts.b.bytes; -+ dma_desc++; -+ } -+ -+ ep->dwc_ep.xfer_count = ep->dwc_ep.total_len -+ - byte_count + ((4 - (ep->dwc_ep.total_len & 0x3)) & 0x3); -+ -+ //todo: invalidate cache & aligned buf patch on completion -+ // -+ -+ is_last = 1; -+ } else { -+ deptsiz.d32 = 0; -+ deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz); -+ -+ byte_count = (ep->dwc_ep.xfer_len - -+ ep->dwc_ep.xfer_count - deptsiz.b.xfersize); -+ -+// dma_sync_single_for_device(NULL,ep->dwc_ep.dma_addr,byte_count,DMA_FROM_DEVICE); -+ -+DWC_DEBUGPL(DBG_PCDV,"ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x) - deptsiz.b.xfersize(%.8x)\n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count , deptsiz.b.xfersize); -+ //todo: invalidate cache & aligned buf patch on completion -+ dma_sync_single_for_device(NULL,ep->dwc_ep.dma_addr,byte_count,DMA_FROM_DEVICE); -+ aligned_buf_patch_on_buf_dma_oep_completion(ep,byte_count); -+ -+ ep->dwc_ep.xfer_buff += byte_count; -+ ep->dwc_ep.dma_addr += byte_count; -+ ep->dwc_ep.xfer_count += byte_count; -+ -+ /* Check if the whole transfer was completed, -+ * if no, setup transfer for next portion of data -+ */ -+ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { -+ //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); -+printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x) - deptsiz.b.xfersize(%.8x)\n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count , deptsiz.b.xfersize); -+ } -+ else if(ep->dwc_ep.sent_zlp) { -+ /* -+ * This fragment of code should initiate 0 -+ * length trasfer in case if it is queued -+ * a trasfer with size divisible to EPs max -+ * packet size and with usb_request zero field -+ * is set, which means that after data is transfered, -+ * it is also should be transfered -+ * a 0 length packet at the end. For Slave and -+ * Buffer DMA modes in this case SW has -+ * to initiate 2 transfers one with transfer size, -+ * and the second with 0 size. For Desriptor -+ * DMA mode SW is able to initiate a transfer, -+ * which will handle all the packets including -+ * the last 0 legth. -+ */ -+ ep->dwc_ep.sent_zlp = 0; -+ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); -+ } else { -+ is_last = 1; -+ } -+ } -+ } else { -+ /* Check if the whole transfer was completed, -+ * if no, setup transfer for next portion of data -+ */ -+ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { -+ //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); -+printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, ep->dwc_ep.xfer_len(%.8x) \n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, ep->dwc_ep.xfer_len ); -+ } -+ else if(ep->dwc_ep.sent_zlp) { -+ /* -+ * This fragment of code should initiate 0 -+ * length trasfer in case if it is queued -+ * a trasfer with size divisible to EPs max -+ * packet size and with usb_request zero field -+ * is set, which means that after data is transfered, -+ * it is also should be transfered -+ * a 0 length packet at the end. For Slave and -+ * Buffer DMA modes in this case SW has -+ * to initiate 2 transfers one with transfer size, -+ * and the second with 0 size. For Desriptor -+ * DMA mode SW is able to initiate a transfer, -+ * which will handle all the packets including -+ * the last 0 legth. -+ */ -+ ep->dwc_ep.sent_zlp = 0; -+ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); -+ } else { -+ is_last = 1; -+ } -+ } -+ -+#ifdef DEBUG -+ -+ DWC_DEBUGPL(DBG_PCDV, "addr %p, %s len=%d cnt=%d xsize=%d pktcnt=%d\n", -+ &out_ep_regs->doeptsiz, ep->ep.name, ep->dwc_ep.xfer_len, -+ ep->dwc_ep.xfer_count, -+ deptsiz.b.xfersize, -+ deptsiz.b.pktcnt); -+#endif -+ } -+ -+ /* Complete the request */ -+ if (is_last) { -+ req->req.actual = ep->dwc_ep.xfer_count; -+ -+ dwc_otg_request_done(ep, req, 0); -+ -+ ep->dwc_ep.start_xfer_buff = 0; -+ ep->dwc_ep.xfer_buff = 0; -+ ep->dwc_ep.xfer_len = 0; -+ -+ /* If there is a request in the queue start it.*/ -+ start_next_request(ep); -+ } -+} -+ -+ -+#ifdef DWC_EN_ISOC -+ -+/** -+ * This function BNA interrupt for Isochronous EPs -+ * -+ */ -+static void dwc_otg_pcd_handle_iso_bna(dwc_otg_pcd_ep_t *ep) -+{ -+ dwc_ep_t *dwc_ep = &ep->dwc_ep; -+ volatile uint32_t *addr; -+ depctl_data_t depctl = {.d32 = 0}; -+ dwc_otg_pcd_t *pcd = ep->pcd; -+ dwc_otg_dma_desc_t *dma_desc; -+ int i; -+ -+ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * (dwc_ep->proc_buf_num); -+ -+ if(dwc_ep->is_in) { -+ desc_sts_data_t sts = {.d32 = 0}; -+ for(i = 0;i < dwc_ep->desc_cnt; ++i, ++dma_desc) -+ { -+ sts.d32 = readl(&dma_desc->status); -+ sts.b_iso_in.bs = BS_HOST_READY; -+ writel(sts.d32,&dma_desc->status); -+ } -+ } -+ else { -+ desc_sts_data_t sts = {.d32 = 0}; -+ for(i = 0;i < dwc_ep->desc_cnt; ++i, ++dma_desc) -+ { -+ sts.d32 = readl(&dma_desc->status); -+ sts.b_iso_out.bs = BS_HOST_READY; -+ writel(sts.d32,&dma_desc->status); -+ } -+ } -+ -+ if(dwc_ep->is_in == 0){ -+ addr = &GET_CORE_IF(pcd)->dev_if->out_ep_regs[dwc_ep->num]->doepctl; -+ } -+ else{ -+ addr = &GET_CORE_IF(pcd)->dev_if->in_ep_regs[dwc_ep->num]->diepctl; -+ } -+ depctl.b.epena = 1; -+ dwc_modify_reg32(addr,depctl.d32,depctl.d32); -+} -+ -+/** -+ * This function sets latest iso packet information(non-PTI mode) -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ * -+ */ -+void set_current_pkt_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ deptsiz_data_t deptsiz = { .d32 = 0 }; -+ dma_addr_t dma_addr; -+ uint32_t offset; -+ -+ if(ep->proc_buf_num) -+ dma_addr = ep->dma_addr1; -+ else -+ dma_addr = ep->dma_addr0; -+ -+ if(ep->is_in) { -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz); -+ offset = ep->data_per_frame; -+ } else { -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz); -+ offset = ep->data_per_frame + (0x4 & (0x4 - (ep->data_per_frame & 0x3))); -+ } -+ -+ if(!deptsiz.b.xfersize) { -+ ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame; -+ ep->pkt_info[ep->cur_pkt].offset = ep->cur_pkt_dma_addr - dma_addr; -+ ep->pkt_info[ep->cur_pkt].status = 0; -+ } else { -+ ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame; -+ ep->pkt_info[ep->cur_pkt].offset = ep->cur_pkt_dma_addr - dma_addr; -+ ep->pkt_info[ep->cur_pkt].status = -ENODATA; -+ } -+ ep->cur_pkt_addr += offset; -+ ep->cur_pkt_dma_addr += offset; -+ ep->cur_pkt++; -+} -+ -+/** -+ * This function sets latest iso packet information(DDMA mode) -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param dwc_ep The EP to start the transfer on. -+ * -+ */ -+static void set_ddma_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) -+{ -+ dwc_otg_dma_desc_t* dma_desc; -+ desc_sts_data_t sts = {.d32 = 0}; -+ iso_pkt_info_t *iso_packet; -+ uint32_t data_per_desc; -+ uint32_t offset; -+ int i, j; -+ -+ iso_packet = dwc_ep->pkt_info; -+ -+ /** Reinit closed DMA Descriptors*/ -+ /** ISO OUT EP */ -+ if(dwc_ep->is_in == 0) { -+ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; -+ offset = 0; -+ -+ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) -+ { -+ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ -+ sts.d32 = readl(&dma_desc->status); -+ -+ /* Write status in iso_packet_decsriptor */ -+ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); -+ if(iso_packet->status) { -+ iso_packet->status = -ENODATA; -+ } -+ -+ /* Received data length */ -+ if(!sts.b_iso_out.rxbytes){ -+ iso_packet->length = data_per_desc - sts.b_iso_out.rxbytes; -+ } else { -+ iso_packet->length = data_per_desc - sts.b_iso_out.rxbytes + -+ (4 - dwc_ep->data_per_frame % 4); -+ } -+ -+ iso_packet->offset = offset; -+ -+ offset += data_per_desc; -+ dma_desc ++; -+ iso_packet ++; -+ } -+ } -+ -+ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ -+ sts.d32 = readl(&dma_desc->status); -+ -+ /* Write status in iso_packet_decsriptor */ -+ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); -+ if(iso_packet->status) { -+ iso_packet->status = -ENODATA; -+ } -+ -+ /* Received data length */ -+ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes; -+ -+ iso_packet->offset = offset; -+ -+ offset += data_per_desc; -+ iso_packet++; -+ dma_desc++; -+ } -+ -+ sts.d32 = readl(&dma_desc->status); -+ -+ /* Write status in iso_packet_decsriptor */ -+ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); -+ if(iso_packet->status) { -+ iso_packet->status = -ENODATA; -+ } -+ /* Received data length */ -+ if(!sts.b_iso_out.rxbytes){ -+ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes; -+ } else { -+ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes + -+ (4 - dwc_ep->data_per_frame % 4); -+ } -+ -+ iso_packet->offset = offset; -+ } -+ else /** ISO IN EP */ -+ { -+ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; -+ -+ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) -+ { -+ sts.d32 = readl(&dma_desc->status); -+ -+ /* Write status in iso packet descriptor */ -+ iso_packet->status = sts.b_iso_in.txsts + (sts.b_iso_in.bs^BS_DMA_DONE); -+ if(iso_packet->status != 0) { -+ iso_packet->status = -ENODATA; -+ -+ } -+ /* Bytes has been transfered */ -+ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_in.txbytes; -+ -+ dma_desc ++; -+ iso_packet++; -+ } -+ -+ sts.d32 = readl(&dma_desc->status); -+ while(sts.b_iso_in.bs == BS_DMA_BUSY) { -+ sts.d32 = readl(&dma_desc->status); -+ } -+ -+ /* Write status in iso packet descriptor ??? do be done with ERROR codes*/ -+ iso_packet->status = sts.b_iso_in.txsts + (sts.b_iso_in.bs^BS_DMA_DONE); -+ if(iso_packet->status != 0) { -+ iso_packet->status = -ENODATA; -+ } -+ -+ /* Bytes has been transfered */ -+ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_in.txbytes; -+ } -+} -+ -+/** -+ * This function reinitialize DMA Descriptors for Isochronous transfer -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param dwc_ep The EP to start the transfer on. -+ * -+ */ -+static void reinit_ddma_iso_xfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) -+{ -+ int i, j; -+ dwc_otg_dma_desc_t* dma_desc; -+ dma_addr_t dma_ad; -+ volatile uint32_t *addr; -+ desc_sts_data_t sts = { .d32 =0 }; -+ uint32_t data_per_desc; -+ -+ if(dwc_ep->is_in == 0) { -+ addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl; -+ } -+ else { -+ addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl; -+ } -+ -+ -+ if(dwc_ep->proc_buf_num == 0) { -+ /** Buffer 0 descriptors setup */ -+ dma_ad = dwc_ep->dma_addr0; -+ } -+ else { -+ /** Buffer 1 descriptors setup */ -+ dma_ad = dwc_ep->dma_addr1; -+ } -+ -+ /** Reinit closed DMA Descriptors*/ -+ /** ISO OUT EP */ -+ if(dwc_ep->is_in == 0) { -+ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; -+ -+ sts.b_iso_out.bs = BS_HOST_READY; -+ sts.b_iso_out.rxsts = 0; -+ sts.b_iso_out.l = 0; -+ sts.b_iso_out.sp = 0; -+ sts.b_iso_out.ioc = 0; -+ sts.b_iso_out.pid = 0; -+ sts.b_iso_out.framenum = 0; -+ -+ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) -+ { -+ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ //(uint32_t)dma_ad += data_per_desc; -+ dma_ad = (uint32_t)dma_ad + data_per_desc; -+ dma_desc ++; -+ } -+ } -+ -+ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) -+ { -+ -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ dma_desc++; -+ //(uint32_t)dma_ad += data_per_desc; -+ dma_ad = (uint32_t)dma_ad + data_per_desc; -+ } -+ -+ sts.b_iso_out.ioc = 1; -+ sts.b_iso_out.l = dwc_ep->proc_buf_num; -+ -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ } -+ else /** ISO IN EP */ -+ { -+ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; -+ -+ sts.b_iso_in.bs = BS_HOST_READY; -+ sts.b_iso_in.txsts = 0; -+ sts.b_iso_in.sp = 0; -+ sts.b_iso_in.ioc = 0; -+ sts.b_iso_in.pid = dwc_ep->pkt_per_frm; -+ sts.b_iso_in.framenum = dwc_ep->next_frame; -+ sts.b_iso_in.txbytes = dwc_ep->data_per_frame; -+ sts.b_iso_in.l = 0; -+ -+ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) -+ { -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ sts.b_iso_in.framenum += dwc_ep->bInterval; -+ //(uint32_t)dma_ad += dwc_ep->data_per_frame; -+ dma_ad = (uint32_t)dma_ad + dwc_ep->data_per_frame; -+ dma_desc ++; -+ } -+ -+ sts.b_iso_in.ioc = 1; -+ sts.b_iso_in.l = dwc_ep->proc_buf_num; -+ -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval * 1; -+ } -+ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; -+} -+ -+ -+/** -+ * This function is to handle Iso EP transfer complete interrupt -+ * in case Iso out packet was dropped -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param dwc_ep The EP for wihich transfer complete was asserted -+ * -+ */ -+static uint32_t handle_iso_out_pkt_dropped(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) -+{ -+ uint32_t dma_addr; -+ uint32_t drp_pkt; -+ uint32_t drp_pkt_cnt; -+ deptsiz_data_t deptsiz = { .d32 = 0 }; -+ depctl_data_t depctl = { .d32 = 0 }; -+ int i; -+ -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz); -+ -+ drp_pkt = dwc_ep->pkt_cnt - deptsiz.b.pktcnt; -+ drp_pkt_cnt = dwc_ep->pkt_per_frm - (drp_pkt % dwc_ep->pkt_per_frm); -+ -+ /* Setting dropped packets status */ -+ for(i = 0; i < drp_pkt_cnt; ++i) { -+ dwc_ep->pkt_info[drp_pkt].status = -ENODATA; -+ drp_pkt ++; -+ deptsiz.b.pktcnt--; -+ } -+ -+ -+ if(deptsiz.b.pktcnt > 0) { -+ deptsiz.b.xfersize = dwc_ep->xfer_len - (dwc_ep->pkt_cnt - deptsiz.b.pktcnt) * dwc_ep->maxpacket; -+ } else { -+ deptsiz.b.xfersize = 0; -+ deptsiz.b.pktcnt = 0; -+ } -+ -+ dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz, deptsiz.d32); -+ -+ if(deptsiz.b.pktcnt > 0) { -+ if(dwc_ep->proc_buf_num) { -+ dma_addr = dwc_ep->dma_addr1 + dwc_ep->xfer_len - deptsiz.b.xfersize; -+ } else { -+ dma_addr = dwc_ep->dma_addr0 + dwc_ep->xfer_len - deptsiz.b.xfersize;; -+ } -+ -+ VERIFY_PCD_DMA_ADDR(dma_addr); -+ dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepdma, dma_addr); -+ -+ /** Re-enable endpoint, clear nak */ -+ depctl.d32 = 0; -+ depctl.b.epena = 1; -+ depctl.b.cnak = 1; -+ -+ dwc_modify_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl, -+ depctl.d32,depctl.d32); -+ return 0; -+ } else { -+ return 1; -+ } -+} -+ -+/** -+ * This function sets iso packets information(PTI mode) -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ * -+ */ -+static uint32_t set_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ int i, j; -+ dma_addr_t dma_ad; -+ iso_pkt_info_t *packet_info = ep->pkt_info; -+ uint32_t offset; -+ uint32_t frame_data; -+ deptsiz_data_t deptsiz; -+ -+ if(ep->proc_buf_num == 0) { -+ /** Buffer 0 descriptors setup */ -+ dma_ad = ep->dma_addr0; -+ } -+ else { -+ /** Buffer 1 descriptors setup */ -+ dma_ad = ep->dma_addr1; -+ } -+ -+ if(ep->is_in) { -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz); -+ } else { -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz); -+ } -+ -+ if(!deptsiz.b.xfersize) { -+ offset = 0; -+ for(i = 0; i < ep->pkt_cnt; i += ep->pkt_per_frm) -+ { -+ frame_data = ep->data_per_frame; -+ for(j = 0; j < ep->pkt_per_frm; ++j) { -+ -+ /* Packet status - is not set as initially -+ * it is set to 0 and if packet was sent -+ successfully, status field will remain 0*/ -+ -+ /* Bytes has been transfered */ -+ packet_info->length = (ep->maxpacket < frame_data) ? -+ ep->maxpacket : frame_data; -+ -+ /* Received packet offset */ -+ packet_info->offset = offset; -+ offset += packet_info->length; -+ frame_data -= packet_info->length; -+ -+ packet_info ++; -+ } -+ } -+ return 1; -+ } else { -+ /* This is a workaround for in case of Transfer Complete with -+ * PktDrpSts interrupts merging - in this case Transfer complete -+ * interrupt for Isoc Out Endpoint is asserted without PktDrpSts -+ * set and with DOEPTSIZ register non zero. Investigations showed, -+ * that this happens when Out packet is dropped, but because of -+ * interrupts merging during first interrupt handling PktDrpSts -+ * bit is cleared and for next merged interrupts it is not reset. -+ * In this case SW hadles the interrupt as if PktDrpSts bit is set. -+ */ -+ if(ep->is_in) { -+ return 1; -+ } else { -+ return handle_iso_out_pkt_dropped(core_if, ep); -+ } -+ } -+} -+ -+/** -+ * This function is to handle Iso EP transfer complete interrupt -+ * -+ * @param ep The EP for which transfer complete was asserted -+ * -+ */ -+static void complete_iso_ep(dwc_otg_pcd_ep_t *ep) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); -+ dwc_ep_t *dwc_ep = &ep->dwc_ep; -+ uint8_t is_last = 0; -+ -+ if(core_if->dma_enable) { -+ if(core_if->dma_desc_enable) { -+ set_ddma_iso_pkts_info(core_if, dwc_ep); -+ reinit_ddma_iso_xfer(core_if, dwc_ep); -+ is_last = 1; -+ } else { -+ if(core_if->pti_enh_enable) { -+ if(set_iso_pkts_info(core_if, dwc_ep)) { -+ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; -+ dwc_otg_iso_ep_start_buf_transfer(core_if, dwc_ep); -+ is_last = 1; -+ } -+ } else { -+ set_current_pkt_info(core_if, dwc_ep); -+ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { -+ is_last = 1; -+ dwc_ep->cur_pkt = 0; -+ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; -+ if(dwc_ep->proc_buf_num) { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; -+ } else { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; -+ } -+ } -+ dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep); -+ } -+ } -+ } else { -+ set_current_pkt_info(core_if, dwc_ep); -+ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { -+ is_last = 1; -+ dwc_ep->cur_pkt = 0; -+ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; -+ if(dwc_ep->proc_buf_num) { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; -+ } else { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; -+ } -+ } -+ dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep); -+ } -+ if(is_last) -+ dwc_otg_iso_buffer_done(ep, ep->iso_req); -+} -+ -+#endif //DWC_EN_ISOC -+ -+ -+/** -+ * This function handles EP0 Control transfers. -+ * -+ * The state of the control tranfers are tracked in -+ * ep0state. -+ */ -+static void handle_ep0(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ desc_sts_data_t desc_sts; -+ deptsiz0_data_t deptsiz; -+ uint32_t byte_count; -+ -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__); -+ print_ep0_state(pcd); -+#endif -+ -+ switch (pcd->ep0state) { -+ case EP0_DISCONNECT: -+ break; -+ -+ case EP0_IDLE: -+ pcd->request_config = 0; -+ -+ pcd_setup(pcd); -+ break; -+ -+ case EP0_IN_DATA_PHASE: -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n", -+ ep0->dwc_ep.num, (ep0->dwc_ep.is_in ?"IN":"OUT"), -+ ep0->dwc_ep.type, ep0->dwc_ep.maxpacket); -+#endif -+ -+ if (core_if->dma_enable != 0) { -+ /* -+ * For EP0 we can only program 1 packet at a time so we -+ * need to do the make calculations after each complete. -+ * Call write_packet to make the calculations, as in -+ * slave mode, and use those values to determine if we -+ * can complete. -+ */ -+ if(core_if->dma_desc_enable == 0) { -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->dieptsiz); -+ byte_count = ep0->dwc_ep.xfer_len - deptsiz.b.xfersize; -+ } -+ else { -+ desc_sts.d32 = readl(core_if->dev_if->in_desc_addr); -+ byte_count = ep0->dwc_ep.xfer_len - desc_sts.b.bytes; -+ } -+ -+ ep0->dwc_ep.xfer_count += byte_count; -+ ep0->dwc_ep.xfer_buff += byte_count; -+ ep0->dwc_ep.dma_addr += byte_count; -+ } -+ if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) { -+ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); -+ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); -+ } -+ else if(ep0->dwc_ep.sent_zlp) { -+ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); -+ ep0->dwc_ep.sent_zlp = 0; -+ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); -+ } -+ else { -+ ep0_complete_request(ep0); -+ DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n"); -+ } -+ break; -+ case EP0_OUT_DATA_PHASE: -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n", -+ ep0->dwc_ep.num, (ep0->dwc_ep.is_in ?"IN":"OUT"), -+ ep0->dwc_ep.type, ep0->dwc_ep.maxpacket); -+#endif -+ if (core_if->dma_enable != 0) { -+ if(core_if->dma_desc_enable == 0) { -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[0]->doeptsiz); -+ byte_count = ep0->dwc_ep.maxpacket - deptsiz.b.xfersize; -+ -+ //todo: invalidate cache & aligned buf patch on completion -+ dma_sync_single_for_device(NULL,ep0->dwc_ep.dma_addr,byte_count,DMA_FROM_DEVICE); -+ aligned_buf_patch_on_buf_dma_oep_completion(ep0,byte_count); -+ } -+ else { -+ desc_sts.d32 = readl(core_if->dev_if->out_desc_addr); -+ byte_count = ep0->dwc_ep.maxpacket - desc_sts.b.bytes; -+ -+ //todo: invalidate cache & aligned buf patch on completion -+ // -+ -+ } -+ ep0->dwc_ep.xfer_count += byte_count; -+ ep0->dwc_ep.xfer_buff += byte_count; -+ ep0->dwc_ep.dma_addr += byte_count; -+ } -+ if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) { -+ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); -+ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); -+ } -+ else if(ep0->dwc_ep.sent_zlp) { -+ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); -+ ep0->dwc_ep.sent_zlp = 0; -+ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); -+ } -+ else { -+ ep0_complete_request(ep0); -+ DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n"); -+ } -+ break; -+ -+ case EP0_IN_STATUS_PHASE: -+ case EP0_OUT_STATUS_PHASE: -+ DWC_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n"); -+ ep0_complete_request(ep0); -+ pcd->ep0state = EP0_IDLE; -+ ep0->stopped = 1; -+ ep0->dwc_ep.is_in = 0; /* OUT for next SETUP */ -+ -+ /* Prepare for more SETUP Packets */ -+ if(core_if->dma_enable) { -+ ep0_out_start(core_if, pcd); -+ } -+ break; -+ -+ case EP0_STALL: -+ DWC_ERROR("EP0 STALLed, should not get here pcd_setup()\n"); -+ break; -+ } -+#ifdef DEBUG_EP0 -+ print_ep0_state(pcd); -+#endif -+} -+ -+ -+/** -+ * Restart transfer -+ */ -+static void restart_transfer(dwc_otg_pcd_t *pcd, const uint32_t epnum) -+{ -+ dwc_otg_core_if_t *core_if; -+ dwc_otg_dev_if_t *dev_if; -+ deptsiz_data_t dieptsiz = {.d32=0}; -+ dwc_otg_pcd_ep_t *ep; -+ -+ ep = get_in_ep(pcd, epnum); -+ -+#ifdef DWC_EN_ISOC -+ if(ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) { -+ return; -+ } -+#endif /* DWC_EN_ISOC */ -+ -+ core_if = GET_CORE_IF(pcd); -+ dev_if = core_if->dev_if; -+ -+ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz); -+ -+ DWC_DEBUGPL(DBG_PCD,"xfer_buff=%p xfer_count=%0x xfer_len=%0x" -+ " stopped=%d\n", ep->dwc_ep.xfer_buff, -+ ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len , -+ ep->stopped); -+ /* -+ * If xfersize is 0 and pktcnt in not 0, resend the last packet. -+ */ -+ if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0 && -+ ep->dwc_ep.start_xfer_buff != 0) { -+ if (ep->dwc_ep.total_len <= ep->dwc_ep.maxpacket) { -+ ep->dwc_ep.xfer_count = 0; -+ ep->dwc_ep.xfer_buff = ep->dwc_ep.start_xfer_buff; -+ ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count; -+ } -+ else { -+ ep->dwc_ep.xfer_count -= ep->dwc_ep.maxpacket; -+ /* convert packet size to dwords. */ -+ ep->dwc_ep.xfer_buff -= ep->dwc_ep.maxpacket; -+ ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count; -+ } -+ ep->stopped = 0; -+ DWC_DEBUGPL(DBG_PCD,"xfer_buff=%p xfer_count=%0x " -+ "xfer_len=%0x stopped=%d\n", -+ ep->dwc_ep.xfer_buff, -+ ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len , -+ ep->stopped -+ ); -+ if (epnum == 0) { -+ dwc_otg_ep0_start_transfer(core_if, &ep->dwc_ep); -+ } -+ else { -+ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); -+ } -+ } -+} -+ -+ -+/** -+ * handle the IN EP disable interrupt. -+ */ -+static inline void handle_in_ep_disable_intr(dwc_otg_pcd_t *pcd, -+ const uint32_t epnum) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ deptsiz_data_t dieptsiz = {.d32=0}; -+ dctl_data_t dctl = {.d32=0}; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_ep_t *dwc_ep; -+ -+ ep = get_in_ep(pcd, epnum); -+ dwc_ep = &ep->dwc_ep; -+ -+ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { -+ dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num); -+ return; -+ } -+ -+ DWC_DEBUGPL(DBG_PCD,"diepctl%d=%0x\n", epnum, -+ dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl)); -+ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz); -+ -+ DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", -+ dieptsiz.b.pktcnt, -+ dieptsiz.b.xfersize); -+ -+ if (ep->stopped) { -+ /* Flush the Tx FIFO */ -+ dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num); -+ /* Clear the Global IN NP NAK */ -+ dctl.d32 = 0; -+ dctl.b.cgnpinnak = 1; -+ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, -+ dctl.d32, 0); -+ /* Restart the transaction */ -+ if (dieptsiz.b.pktcnt != 0 || -+ dieptsiz.b.xfersize != 0) { -+ restart_transfer(pcd, epnum); -+ } -+ } -+ else { -+ /* Restart the transaction */ -+ if (dieptsiz.b.pktcnt != 0 || -+ dieptsiz.b.xfersize != 0) { -+ restart_transfer(pcd, epnum); -+ } -+ DWC_DEBUGPL(DBG_ANY, "STOPPED!!!\n"); -+ } -+} -+ -+/** -+ * Handler for the IN EP timeout handshake interrupt. -+ */ -+static inline void handle_in_ep_timeout_intr(dwc_otg_pcd_t *pcd, -+ const uint32_t epnum) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ -+#ifdef DEBUG -+ deptsiz_data_t dieptsiz = {.d32=0}; -+ uint32_t num = 0; -+#endif -+ dctl_data_t dctl = {.d32=0}; -+ dwc_otg_pcd_ep_t *ep; -+ -+ gintmsk_data_t intr_mask = {.d32 = 0}; -+ -+ ep = get_in_ep(pcd, epnum); -+ -+ /* Disable the NP Tx Fifo Empty Interrrupt */ -+ if (!core_if->dma_enable) { -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0); -+ } -+ /** @todo NGS Check EP type. -+ * Implement for Periodic EPs */ -+ /* -+ * Non-periodic EP -+ */ -+ /* Enable the Global IN NAK Effective Interrupt */ -+ intr_mask.b.ginnakeff = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, -+ 0, intr_mask.d32); -+ -+ /* Set Global IN NAK */ -+ dctl.b.sgnpinnak = 1; -+ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, -+ dctl.d32, dctl.d32); -+ -+ ep->stopped = 1; -+ -+#ifdef DEBUG -+ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[num]->dieptsiz); -+ DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", -+ dieptsiz.b.pktcnt, -+ dieptsiz.b.xfersize); -+#endif -+ -+#ifdef DISABLE_PERIODIC_EP -+ /* -+ * Set the NAK bit for this EP to -+ * start the disable process. -+ */ -+ diepctl.d32 = 0; -+ diepctl.b.snak = 1; -+ dwc_modify_reg32(&dev_if->in_ep_regs[num]->diepctl, diepctl.d32, diepctl.d32); -+ ep->disabling = 1; -+ ep->stopped = 1; -+#endif -+} -+ -+/** -+ * Handler for the IN EP NAK interrupt. -+ */ -+static inline int32_t handle_in_ep_nak_intr(dwc_otg_pcd_t *pcd, -+ const uint32_t epnum) -+{ -+ /** @todo implement ISR */ -+ dwc_otg_core_if_t* core_if; -+ diepmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "IN EP NAK"); -+ core_if = GET_CORE_IF(pcd); -+ intr_mask.b.nak = 1; -+ -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[epnum], -+ intr_mask.d32, 0); -+ } else { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepmsk, -+ intr_mask.d32, 0); -+ } -+ -+ return 1; -+} -+ -+/** -+ * Handler for the OUT EP Babble interrupt. -+ */ -+static inline int32_t handle_out_ep_babble_intr(dwc_otg_pcd_t *pcd, -+ const uint32_t epnum) -+{ -+ /** @todo implement ISR */ -+ dwc_otg_core_if_t* core_if; -+ doepmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP Babble"); -+ core_if = GET_CORE_IF(pcd); -+ intr_mask.b.babble = 1; -+ -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], -+ intr_mask.d32, 0); -+ } else { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, -+ intr_mask.d32, 0); -+ } -+ -+ return 1; -+} -+ -+/** -+ * Handler for the OUT EP NAK interrupt. -+ */ -+static inline int32_t handle_out_ep_nak_intr(dwc_otg_pcd_t *pcd, -+ const uint32_t epnum) -+{ -+ /** @todo implement ISR */ -+ dwc_otg_core_if_t* core_if; -+ doepmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NAK"); -+ core_if = GET_CORE_IF(pcd); -+ intr_mask.b.nak = 1; -+ -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], -+ intr_mask.d32, 0); -+ } else { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, -+ intr_mask.d32, 0); -+ } -+ -+ return 1; -+} -+ -+/** -+ * Handler for the OUT EP NYET interrupt. -+ */ -+static inline int32_t handle_out_ep_nyet_intr(dwc_otg_pcd_t *pcd, -+ const uint32_t epnum) -+{ -+ /** @todo implement ISR */ -+ dwc_otg_core_if_t* core_if; -+ doepmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NYET"); -+ core_if = GET_CORE_IF(pcd); -+ intr_mask.b.nyet = 1; -+ -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], -+ intr_mask.d32, 0); -+ } else { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, -+ intr_mask.d32, 0); -+ } -+ -+ return 1; -+} -+ -+/** -+ * This interrupt indicates that an IN EP has a pending Interrupt. -+ * The sequence for handling the IN EP interrupt is shown below: -+ * -# Read the Device All Endpoint Interrupt register -+ * -# Repeat the following for each IN EP interrupt bit set (from -+ * LSB to MSB). -+ * -# Read the Device Endpoint Interrupt (DIEPINTn) register -+ * -# If "Transfer Complete" call the request complete function -+ * -# If "Endpoint Disabled" complete the EP disable procedure. -+ * -# If "AHB Error Interrupt" log error -+ * -# If "Time-out Handshake" log error -+ * -# If "IN Token Received when TxFIFO Empty" write packet to Tx -+ * FIFO. -+ * -# If "IN Token EP Mismatch" (disable, this is handled by EP -+ * Mismatch Interrupt) -+ */ -+static int32_t dwc_otg_pcd_handle_in_ep_intr(dwc_otg_pcd_t *pcd) -+{ -+#define CLEAR_IN_EP_INTR(__core_if,__epnum,__intr) \ -+do { \ -+ diepint_data_t diepint = {.d32=0}; \ -+ diepint.b.__intr = 1; \ -+ dwc_write_reg32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \ -+ diepint.d32); \ -+} while (0) -+ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ diepint_data_t diepint = {.d32=0}; -+ dctl_data_t dctl = {.d32=0}; -+ depctl_data_t depctl = {.d32=0}; -+ uint32_t ep_intr; -+ uint32_t epnum = 0; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_ep_t *dwc_ep; -+ gintmsk_data_t intr_mask = {.d32 = 0}; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd); -+ -+ /* Read in the device interrupt bits */ -+ ep_intr = dwc_otg_read_dev_all_in_ep_intr(core_if); -+ -+ /* Service the Device IN interrupts for each endpoint */ -+ while(ep_intr) { -+ if (ep_intr&0x1) { -+ uint32_t empty_msk; -+ /* Get EP pointer */ -+ ep = get_in_ep(pcd, epnum); -+ dwc_ep = &ep->dwc_ep; -+ -+ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl); -+ empty_msk = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); -+ -+ DWC_DEBUGPL(DBG_PCDV, -+ "IN EP INTERRUPT - %d\nepmty_msk - %8x diepctl - %8x\n", -+ epnum, -+ empty_msk, -+ depctl.d32); -+ -+ DWC_DEBUGPL(DBG_PCD, -+ "EP%d-%s: type=%d, mps=%d\n", -+ dwc_ep->num, (dwc_ep->is_in ?"IN":"OUT"), -+ dwc_ep->type, dwc_ep->maxpacket); -+ -+ diepint.d32 = dwc_otg_read_dev_in_ep_intr(core_if, dwc_ep); -+ -+ DWC_DEBUGPL(DBG_PCDV, "EP %d Interrupt Register - 0x%x\n", epnum, diepint.d32); -+ /* Transfer complete */ -+ if (diepint.b.xfercompl) { -+ /* Disable the NP Tx FIFO Empty -+ * Interrrupt */ -+ if(core_if->en_multiple_tx_fifo == 0) { -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0); -+ } -+ else { -+ /* Disable the Tx FIFO Empty Interrupt for this EP */ -+ uint32_t fifoemptymsk = 0x1 << dwc_ep->num; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, -+ fifoemptymsk, 0); -+ } -+ /* Clear the bit in DIEPINTn for this interrupt */ -+ CLEAR_IN_EP_INTR(core_if,epnum,xfercompl); -+ -+ /* Complete the transfer */ -+ if (epnum == 0) { -+ handle_ep0(pcd); -+ } -+#ifdef DWC_EN_ISOC -+ else if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { -+ if(!ep->stopped) -+ complete_iso_ep(ep); -+ } -+#endif //DWC_EN_ISOC -+ else { -+ -+ complete_ep(ep); -+ } -+ } -+ /* Endpoint disable */ -+ if (diepint.b.epdisabled) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN disabled\n", epnum); -+ handle_in_ep_disable_intr(pcd, epnum); -+ -+ /* Clear the bit in DIEPINTn for this interrupt */ -+ CLEAR_IN_EP_INTR(core_if,epnum,epdisabled); -+ } -+ /* AHB Error */ -+ if (diepint.b.ahberr) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN AHB Error\n", epnum); -+ /* Clear the bit in DIEPINTn for this interrupt */ -+ CLEAR_IN_EP_INTR(core_if,epnum,ahberr); -+ } -+ /* TimeOUT Handshake (non-ISOC IN EPs) */ -+ if (diepint.b.timeout) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN Time-out\n", epnum); -+ handle_in_ep_timeout_intr(pcd, epnum); -+ -+ CLEAR_IN_EP_INTR(core_if,epnum,timeout); -+ } -+ /** IN Token received with TxF Empty */ -+ if (diepint.b.intktxfemp) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN TxFifo Empty\n", -+ epnum); -+ if (!ep->stopped && epnum != 0) { -+ -+ diepmsk_data_t diepmsk = { .d32 = 0}; -+ diepmsk.b.intktxfemp = 1; -+ -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[epnum], -+ diepmsk.d32, 0); -+ } else { -+ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32, 0); -+ } -+ start_next_request(ep); -+ } -+ else if(core_if->dma_desc_enable && epnum == 0 && -+ pcd->ep0state == EP0_OUT_STATUS_PHASE) { -+ // EP0 IN set STALL -+ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl); -+ -+ /* set the disable and stall bits */ -+ if (depctl.b.epena) { -+ depctl.b.epdis = 1; -+ } -+ depctl.b.stall = 1; -+ dwc_write_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32); -+ } -+ CLEAR_IN_EP_INTR(core_if,epnum,intktxfemp); -+ } -+ /** IN Token Received with EP mismatch */ -+ if (diepint.b.intknepmis) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN EP Mismatch\n", epnum); -+ CLEAR_IN_EP_INTR(core_if,epnum,intknepmis); -+ } -+ /** IN Endpoint NAK Effective */ -+ if (diepint.b.inepnakeff) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN EP NAK Effective\n", epnum); -+ /* Periodic EP */ -+ if (ep->disabling) { -+ depctl.d32 = 0; -+ depctl.b.snak = 1; -+ depctl.b.epdis = 1; -+ dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32, depctl.d32); -+ } -+ CLEAR_IN_EP_INTR(core_if,epnum,inepnakeff); -+ -+ } -+ -+ /** IN EP Tx FIFO Empty Intr */ -+ if (diepint.b.emptyintr) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d Tx FIFO Empty Intr \n", epnum); -+ write_empty_tx_fifo(pcd, epnum); -+ -+ CLEAR_IN_EP_INTR(core_if,epnum,emptyintr); -+ } -+ -+ /** IN EP BNA Intr */ -+ if (diepint.b.bna) { -+ CLEAR_IN_EP_INTR(core_if,epnum,bna); -+ if(core_if->dma_desc_enable) { -+#ifdef DWC_EN_ISOC -+ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { -+ /* -+ * This checking is performed to prevent first "false" BNA -+ * handling occuring right after reconnect -+ */ -+ if(dwc_ep->next_frame != 0xffffffff) -+ dwc_otg_pcd_handle_iso_bna(ep); -+ } -+ else -+#endif //DWC_EN_ISOC -+ { -+ dctl.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dctl); -+ -+ /* If Global Continue on BNA is disabled - disable EP */ -+ if(!dctl.b.gcontbna) { -+ depctl.d32 = 0; -+ depctl.b.snak = 1; -+ depctl.b.epdis = 1; -+ dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32, depctl.d32); -+ } else { -+ start_next_request(ep); -+ } -+ } -+ } -+ } -+ /* NAK Interrutp */ -+ if (diepint.b.nak) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN NAK Interrupt\n", epnum); -+ handle_in_ep_nak_intr(pcd, epnum); -+ -+ CLEAR_IN_EP_INTR(core_if,epnum,nak); -+ } -+ } -+ epnum++; -+ ep_intr >>=1; -+ } -+ -+ return 1; -+#undef CLEAR_IN_EP_INTR -+} -+ -+/** -+ * This interrupt indicates that an OUT EP has a pending Interrupt. -+ * The sequence for handling the OUT EP interrupt is shown below: -+ * -# Read the Device All Endpoint Interrupt register -+ * -# Repeat the following for each OUT EP interrupt bit set (from -+ * LSB to MSB). -+ * -# Read the Device Endpoint Interrupt (DOEPINTn) register -+ * -# If "Transfer Complete" call the request complete function -+ * -# If "Endpoint Disabled" complete the EP disable procedure. -+ * -# If "AHB Error Interrupt" log error -+ * -# If "Setup Phase Done" process Setup Packet (See Standard USB -+ * Command Processing) -+ */ -+static int32_t dwc_otg_pcd_handle_out_ep_intr(dwc_otg_pcd_t *pcd) -+{ -+#define CLEAR_OUT_EP_INTR(__core_if,__epnum,__intr) \ -+do { \ -+ doepint_data_t doepint = {.d32=0}; \ -+ doepint.b.__intr = 1; \ -+ dwc_write_reg32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \ -+ doepint.d32); \ -+} while (0) -+ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ uint32_t ep_intr; -+ doepint_data_t doepint = {.d32=0}; -+ dctl_data_t dctl = {.d32=0}; -+ depctl_data_t doepctl = {.d32=0}; -+ uint32_t epnum = 0; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_ep_t *dwc_ep; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__); -+ -+ /* Read in the device interrupt bits */ -+ ep_intr = dwc_otg_read_dev_all_out_ep_intr(core_if); -+ -+ while(ep_intr) { -+ if (ep_intr&0x1) { -+ /* Get EP pointer */ -+ ep = get_out_ep(pcd, epnum); -+ dwc_ep = &ep->dwc_ep; -+ -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_PCDV, -+ "EP%d-%s: type=%d, mps=%d\n", -+ dwc_ep->num, (dwc_ep->is_in ?"IN":"OUT"), -+ dwc_ep->type, dwc_ep->maxpacket); -+#endif -+ doepint.d32 = dwc_otg_read_dev_out_ep_intr(core_if, dwc_ep); -+ -+ /* Transfer complete */ -+ if (doepint.b.xfercompl) { -+ if (epnum == 0) { -+ /* Clear the bit in DOEPINTn for this interrupt */ -+ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); -+ if(core_if->dma_desc_enable == 0 || pcd->ep0state != EP0_IDLE) -+ handle_ep0(pcd); -+#ifdef DWC_EN_ISOC -+ } else if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { -+ if (doepint.b.pktdrpsts == 0) { -+ /* Clear the bit in DOEPINTn for this interrupt */ -+ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); -+ complete_iso_ep(ep); -+ } else { -+ doepint_data_t doepint = {.d32=0}; -+ doepint.b.xfercompl = 1; -+ doepint.b.pktdrpsts = 1; -+ dwc_write_reg32(&core_if->dev_if->out_ep_regs[epnum]->doepint, -+ doepint.d32); -+ if(handle_iso_out_pkt_dropped(core_if,dwc_ep)) { -+ complete_iso_ep(ep); -+ } -+ } -+#endif //DWC_EN_ISOC -+ } else { -+ /* Clear the bit in DOEPINTn for this interrupt */ -+ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); -+ complete_ep(ep); -+ } -+ -+ } -+ -+ /* Endpoint disable */ -+ if (doepint.b.epdisabled) { -+ /* Clear the bit in DOEPINTn for this interrupt */ -+ CLEAR_OUT_EP_INTR(core_if,epnum,epdisabled); -+ } -+ /* AHB Error */ -+ if (doepint.b.ahberr) { -+ DWC_DEBUGPL(DBG_PCD,"EP%d OUT AHB Error\n", epnum); -+ DWC_DEBUGPL(DBG_PCD,"EP DMA REG %d \n", core_if->dev_if->out_ep_regs[epnum]->doepdma); -+ CLEAR_OUT_EP_INTR(core_if,epnum,ahberr); -+ } -+ /* Setup Phase Done (contorl EPs) */ -+ if (doepint.b.setup) { -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD,"EP%d SETUP Done\n", -+ epnum); -+#endif -+ CLEAR_OUT_EP_INTR(core_if,epnum,setup); -+ handle_ep0(pcd); -+ } -+ -+ /** OUT EP BNA Intr */ -+ if (doepint.b.bna) { -+ CLEAR_OUT_EP_INTR(core_if,epnum,bna); -+ if(core_if->dma_desc_enable) { -+#ifdef DWC_EN_ISOC -+ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { -+ /* -+ * This checking is performed to prevent first "false" BNA -+ * handling occuring right after reconnect -+ */ -+ if(dwc_ep->next_frame != 0xffffffff) -+ dwc_otg_pcd_handle_iso_bna(ep); -+ } -+ else -+#endif //DWC_EN_ISOC -+ { -+ dctl.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dctl); -+ -+ /* If Global Continue on BNA is disabled - disable EP*/ -+ if(!dctl.b.gcontbna) { -+ doepctl.d32 = 0; -+ doepctl.b.snak = 1; -+ doepctl.b.epdis = 1; -+ dwc_modify_reg32(&dev_if->out_ep_regs[epnum]->doepctl, doepctl.d32, doepctl.d32); -+ } else { -+ start_next_request(ep); -+ } -+ } -+ } -+ } -+ if (doepint.b.stsphsercvd) { -+ CLEAR_OUT_EP_INTR(core_if,epnum,stsphsercvd); -+ if(core_if->dma_desc_enable) { -+ do_setup_in_status_phase(pcd); -+ } -+ } -+ /* Babble Interrutp */ -+ if (doepint.b.babble) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d OUT Babble\n", epnum); -+ handle_out_ep_babble_intr(pcd, epnum); -+ -+ CLEAR_OUT_EP_INTR(core_if,epnum,babble); -+ } -+ /* NAK Interrutp */ -+ if (doepint.b.nak) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d OUT NAK\n", epnum); -+ handle_out_ep_nak_intr(pcd, epnum); -+ -+ CLEAR_OUT_EP_INTR(core_if,epnum,nak); -+ } -+ /* NYET Interrutp */ -+ if (doepint.b.nyet) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d OUT NYET\n", epnum); -+ handle_out_ep_nyet_intr(pcd, epnum); -+ -+ CLEAR_OUT_EP_INTR(core_if,epnum,nyet); -+ } -+ } -+ -+ epnum++; -+ ep_intr >>=1; -+ } -+ -+ return 1; -+ -+#undef CLEAR_OUT_EP_INTR -+} -+ -+ -+/** -+ * Incomplete ISO IN Transfer Interrupt. -+ * This interrupt indicates one of the following conditions occurred -+ * while transmitting an ISOC transaction. -+ * - Corrupted IN Token for ISOC EP. -+ * - Packet not complete in FIFO. -+ * The follow actions will be taken: -+ * -# Determine the EP -+ * -# Set incomplete flag in dwc_ep structure -+ * -# Disable EP; when "Endpoint Disabled" interrupt is received -+ * Flush FIFO -+ */ -+int32_t dwc_otg_pcd_handle_incomplete_isoc_in_intr(dwc_otg_pcd_t *pcd) -+{ -+ gintsts_data_t gintsts; -+ -+ -+#ifdef DWC_EN_ISOC -+ dwc_otg_dev_if_t *dev_if; -+ deptsiz_data_t deptsiz = { .d32 = 0}; -+ depctl_data_t depctl = { .d32 = 0}; -+ dsts_data_t dsts = { .d32 = 0}; -+ dwc_ep_t *dwc_ep; -+ int i; -+ -+ dev_if = GET_CORE_IF(pcd)->dev_if; -+ -+ for(i = 1; i <= dev_if->num_in_eps; ++i) { -+ dwc_ep = &pcd->in_ep[i].dwc_ep; -+ if(dwc_ep->active && -+ dwc_ep->type == USB_ENDPOINT_XFER_ISOC) -+ { -+ deptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->dieptsiz); -+ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); -+ -+ if(depctl.b.epdis && deptsiz.d32) { -+ set_current_pkt_info(GET_CORE_IF(pcd), dwc_ep); -+ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { -+ dwc_ep->cur_pkt = 0; -+ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; -+ -+ if(dwc_ep->proc_buf_num) { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; -+ } else { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; -+ } -+ } -+ -+ dsts.d32 = dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts); -+ dwc_ep->next_frame = dsts.b.soffn; -+ -+ dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF(pcd), dwc_ep); -+ } -+ } -+ } -+ -+#else -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", -+ "IN ISOC Incomplete"); -+ -+ intr_mask.b.incomplisoin = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+#endif //DWC_EN_ISOC -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.incomplisoin = 1; -+ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * Incomplete ISO OUT Transfer Interrupt. -+ * -+ * This interrupt indicates that the core has dropped an ISO OUT -+ * packet. The following conditions can be the cause: -+ * - FIFO Full, the entire packet would not fit in the FIFO. -+ * - CRC Error -+ * - Corrupted Token -+ * The follow actions will be taken: -+ * -# Determine the EP -+ * -# Set incomplete flag in dwc_ep structure -+ * -# Read any data from the FIFO -+ * -# Disable EP. when "Endpoint Disabled" interrupt is received -+ * re-enable EP. -+ */ -+int32_t dwc_otg_pcd_handle_incomplete_isoc_out_intr(dwc_otg_pcd_t *pcd) -+{ -+ /* @todo implement ISR */ -+ gintsts_data_t gintsts; -+ -+#ifdef DWC_EN_ISOC -+ dwc_otg_dev_if_t *dev_if; -+ deptsiz_data_t deptsiz = { .d32 = 0}; -+ depctl_data_t depctl = { .d32 = 0}; -+ dsts_data_t dsts = { .d32 = 0}; -+ dwc_ep_t *dwc_ep; -+ int i; -+ -+ dev_if = GET_CORE_IF(pcd)->dev_if; -+ -+ for(i = 1; i <= dev_if->num_out_eps; ++i) { -+ dwc_ep = &pcd->in_ep[i].dwc_ep; -+ if(pcd->out_ep[i].dwc_ep.active && -+ pcd->out_ep[i].dwc_ep.type == USB_ENDPOINT_XFER_ISOC) -+ { -+ deptsiz.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doeptsiz); -+ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl); -+ -+ if(depctl.b.epdis && deptsiz.d32) { -+ set_current_pkt_info(GET_CORE_IF(pcd), &pcd->out_ep[i].dwc_ep); -+ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { -+ dwc_ep->cur_pkt = 0; -+ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; -+ -+ if(dwc_ep->proc_buf_num) { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; -+ } else { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; -+ } -+ } -+ -+ dsts.d32 = dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts); -+ dwc_ep->next_frame = dsts.b.soffn; -+ -+ dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF(pcd), dwc_ep); -+ } -+ } -+ } -+#else -+ /** @todo implement ISR */ -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", -+ "OUT ISOC Incomplete"); -+ -+ intr_mask.b.incomplisoout = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+#endif // DWC_EN_ISOC -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.incomplisoout = 1; -+ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This function handles the Global IN NAK Effective interrupt. -+ * -+ */ -+int32_t dwc_otg_pcd_handle_in_nak_effective(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if; -+ depctl_data_t diepctl = { .d32 = 0}; -+ depctl_data_t diepctl_rd = { .d32 = 0}; -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ gintsts_data_t gintsts; -+ int i; -+ -+ DWC_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n"); -+ -+ /* Disable all active IN EPs */ -+ diepctl.b.epdis = 1; -+ diepctl.b.snak = 1; -+ -+ for (i=0; i <= dev_if->num_in_eps; i++) -+ { -+ diepctl_rd.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); -+ if (diepctl_rd.b.epena) { -+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, -+ diepctl.d32); -+ } -+ } -+ /* Disable the Global IN NAK Effective Interrupt */ -+ intr_mask.b.ginnakeff = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.ginnakeff = 1; -+ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * OUT NAK Effective. -+ * -+ */ -+int32_t dwc_otg_pcd_handle_out_nak_effective(dwc_otg_pcd_t *pcd) -+{ -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ gintsts_data_t gintsts; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", -+ "Global IN NAK Effective\n"); -+ /* Disable the Global IN NAK Effective Interrupt */ -+ intr_mask.b.goutnakeff = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.goutnakeff = 1; -+ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ -+ return 1; -+} -+ -+ -+/** -+ * PCD interrupt handler. -+ * -+ * The PCD handles the device interrupts. Many conditions can cause a -+ * device interrupt. When an interrupt occurs, the device interrupt -+ * service routine determines the cause of the interrupt and -+ * dispatches handling to the appropriate function. These interrupt -+ * handling functions are described below. -+ * -+ * All interrupt registers are processed from LSB to MSB. -+ * -+ */ -+int32_t dwc_otg_pcd_handle_intr(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+#ifdef VERBOSE -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+#endif -+ gintsts_data_t gintr_status; -+ int32_t retval = 0; -+ -+ -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_ANY, "%s() gintsts=%08x gintmsk=%08x\n", -+ __func__, -+ dwc_read_reg32(&global_regs->gintsts), -+ dwc_read_reg32(&global_regs->gintmsk)); -+#endif -+ -+ if (dwc_otg_is_device_mode(core_if)) { -+ SPIN_LOCK(&pcd->lock); -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%08x gintmsk=%08x\n", -+ __func__, -+ dwc_read_reg32(&global_regs->gintsts), -+ dwc_read_reg32(&global_regs->gintmsk)); -+#endif -+ -+ gintr_status.d32 = dwc_otg_read_core_intr(core_if); -+/* -+ if (!gintr_status.d32) { -+ SPIN_UNLOCK(&pcd->lock); -+ return 0; -+ } -+*/ -+ DWC_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n", -+ __func__, gintr_status.d32); -+ -+ if (gintr_status.b.sofintr) { -+ retval |= dwc_otg_pcd_handle_sof_intr(pcd); -+ } -+ if (gintr_status.b.rxstsqlvl) { -+ retval |= dwc_otg_pcd_handle_rx_status_q_level_intr(pcd); -+ } -+ if (gintr_status.b.nptxfempty) { -+ retval |= dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd); -+ } -+ if (gintr_status.b.ginnakeff) { -+ retval |= dwc_otg_pcd_handle_in_nak_effective(pcd); -+ } -+ if (gintr_status.b.goutnakeff) { -+ retval |= dwc_otg_pcd_handle_out_nak_effective(pcd); -+ } -+ if (gintr_status.b.i2cintr) { -+ retval |= dwc_otg_pcd_handle_i2c_intr(pcd); -+ } -+ if (gintr_status.b.erlysuspend) { -+ retval |= dwc_otg_pcd_handle_early_suspend_intr(pcd); -+ } -+ if (gintr_status.b.usbreset) { -+ retval |= dwc_otg_pcd_handle_usb_reset_intr(pcd); -+ } -+ if (gintr_status.b.enumdone) { -+ retval |= dwc_otg_pcd_handle_enum_done_intr(pcd); -+ } -+ if (gintr_status.b.isooutdrop) { -+ retval |= dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(pcd); -+ } -+ if (gintr_status.b.eopframe) { -+ retval |= dwc_otg_pcd_handle_end_periodic_frame_intr(pcd); -+ } -+ if (gintr_status.b.epmismatch) { -+ retval |= dwc_otg_pcd_handle_ep_mismatch_intr(core_if); -+ } -+ if (gintr_status.b.inepint) { -+ if(!core_if->multiproc_int_enable) { -+ retval |= dwc_otg_pcd_handle_in_ep_intr(pcd); -+ } -+ } -+ if (gintr_status.b.outepintr) { -+ if(!core_if->multiproc_int_enable) { -+ retval |= dwc_otg_pcd_handle_out_ep_intr(pcd); -+ } -+ } -+ if (gintr_status.b.incomplisoin) { -+ retval |= dwc_otg_pcd_handle_incomplete_isoc_in_intr(pcd); -+ } -+ if (gintr_status.b.incomplisoout) { -+ retval |= dwc_otg_pcd_handle_incomplete_isoc_out_intr(pcd); -+ } -+ -+ /* In MPI mode De vice Endpoints intterrupts are asserted -+ * without setting outepintr and inepint bits set, so these -+ * Interrupt handlers are called without checking these bit-fields -+ */ -+ if(core_if->multiproc_int_enable) { -+ retval |= dwc_otg_pcd_handle_in_ep_intr(pcd); -+ retval |= dwc_otg_pcd_handle_out_ep_intr(pcd); -+ } -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%0x\n", __func__, -+ dwc_read_reg32(&global_regs->gintsts)); -+#endif -+ SPIN_UNLOCK(&pcd->lock); -+ } -+ S3C2410X_CLEAR_EINTPEND(); -+ -+ return retval; -+} -+ -+#endif /* DWC_HOST_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc/otg_plat.h -@@ -0,0 +1,266 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/platform/dwc_otg_plat.h $ -+ * $Revision: #23 $ -+ * $Date: 2008/07/15 $ -+ * $Change: 1064915 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+#if !defined(__DWC_OTG_PLAT_H__) -+#define __DWC_OTG_PLAT_H__ -+ -+#include -+#include -+#include -+#include -+#include -+ -+/* Changed all readl and writel to __raw_readl, __raw_writel */ -+ -+/** -+ * @file -+ * -+ * This file contains the Platform Specific constants, interfaces -+ * (functions and macros) for Linux. -+ * -+ */ -+//#if !defined(__LINUX_ARM_ARCH__) -+//#error "The contents of this file is Linux specific!!!" -+//#endif -+ -+/** -+ * Reads the content of a register. -+ * -+ * @param reg address of register to read. -+ * @return contents of the register. -+ * -+ -+ * Usage:
-+ * uint32_t dev_ctl = dwc_read_reg32(&dev_regs->dctl); -+ */ -+static __inline__ uint32_t dwc_read_reg32( volatile uint32_t *reg) -+{ -+ return __raw_readl(reg); -+ // return readl(reg); -+}; -+ -+/** -+ * Writes a register with a 32 bit value. -+ * -+ * @param reg address of register to read. -+ * @param value to write to _reg. -+ * -+ * Usage:
-+ * dwc_write_reg32(&dev_regs->dctl, 0); -+ */ -+static __inline__ void dwc_write_reg32( volatile uint32_t *reg, const uint32_t value) -+{ -+ // writel( value, reg ); -+ __raw_writel(value, reg); -+ -+}; -+ -+/** -+ * This function modifies bit values in a register. Using the -+ * algorithm: (reg_contents & ~clear_mask) | set_mask. -+ * -+ * @param reg address of register to read. -+ * @param clear_mask bit mask to be cleared. -+ * @param set_mask bit mask to be set. -+ * -+ * Usage:
-+ * // Clear the SOF Interrupt Mask bit and
-+ * // set the OTG Interrupt mask bit, leaving all others as they were. -+ * dwc_modify_reg32(&dev_regs->gintmsk, DWC_SOF_INT, DWC_OTG_INT);
-+ */ -+static __inline__ -+ void dwc_modify_reg32( volatile uint32_t *reg, const uint32_t clear_mask, const uint32_t set_mask) -+{ -+ // writel( (readl(reg) & ~clear_mask) | set_mask, reg ); -+ __raw_writel( (__raw_readl(reg) & ~clear_mask) | set_mask, reg ); -+}; -+ -+ -+/** -+ * Wrapper for the OS micro-second delay function. -+ * @param[in] usecs Microseconds of delay -+ */ -+static __inline__ void UDELAY( const uint32_t usecs ) -+{ -+ udelay( usecs ); -+} -+ -+/** -+ * Wrapper for the OS milli-second delay function. -+ * @param[in] msecs milliseconds of delay -+ */ -+static __inline__ void MDELAY( const uint32_t msecs ) -+{ -+ mdelay( msecs ); -+} -+ -+/** -+ * Wrapper for the Linux spin_lock. On the ARM (Integrator) -+ * spin_lock() is a nop. -+ * -+ * @param lock Pointer to the spinlock. -+ */ -+static __inline__ void SPIN_LOCK( spinlock_t *lock ) -+{ -+ spin_lock(lock); -+} -+ -+/** -+ * Wrapper for the Linux spin_unlock. On the ARM (Integrator) -+ * spin_lock() is a nop. -+ * -+ * @param lock Pointer to the spinlock. -+ */ -+static __inline__ void SPIN_UNLOCK( spinlock_t *lock ) -+{ -+ spin_unlock(lock); -+} -+ -+/** -+ * Wrapper (macro) for the Linux spin_lock_irqsave. On the ARM -+ * (Integrator) spin_lock() is a nop. -+ * -+ * @param l Pointer to the spinlock. -+ * @param f unsigned long for irq flags storage. -+ */ -+#define SPIN_LOCK_IRQSAVE( l, f ) spin_lock_irqsave(l,f); -+ -+/** -+ * Wrapper (macro) for the Linux spin_unlock_irqrestore. On the ARM -+ * (Integrator) spin_lock() is a nop. -+ * -+ * @param l Pointer to the spinlock. -+ * @param f unsigned long for irq flags storage. -+ */ -+#define SPIN_UNLOCK_IRQRESTORE( l,f ) spin_unlock_irqrestore(l,f); -+ -+/* -+ * Debugging support vanishes in non-debug builds. -+ */ -+ -+ -+/** -+ * The Debug Level bit-mask variable. -+ */ -+extern uint32_t g_dbg_lvl; -+/** -+ * Set the Debug Level variable. -+ */ -+static inline uint32_t SET_DEBUG_LEVEL( const uint32_t new ) -+{ -+ uint32_t old = g_dbg_lvl; -+ g_dbg_lvl = new; -+ return old; -+} -+ -+/** When debug level has the DBG_CIL bit set, display CIL Debug messages. */ -+#define DBG_CIL (0x2) -+/** When debug level has the DBG_CILV bit set, display CIL Verbose debug -+ * messages */ -+#define DBG_CILV (0x20) -+/** When debug level has the DBG_PCD bit set, display PCD (Device) debug -+ * messages */ -+#define DBG_PCD (0x4) -+/** When debug level has the DBG_PCDV set, display PCD (Device) Verbose debug -+ * messages */ -+#define DBG_PCDV (0x40) -+/** When debug level has the DBG_HCD bit set, display Host debug messages */ -+#define DBG_HCD (0x8) -+/** When debug level has the DBG_HCDV bit set, display Verbose Host debug -+ * messages */ -+#define DBG_HCDV (0x80) -+/** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host -+ * mode. */ -+#define DBG_HCD_URB (0x800) -+ -+/** When debug level has any bit set, display debug messages */ -+#define DBG_ANY (0xFF) -+ -+/** All debug messages off */ -+#define DBG_OFF 0 -+ -+/** Prefix string for DWC_DEBUG print macros. */ -+#define USB_DWC "DWC_otg: " -+ -+/** -+ * Print a debug message when the Global debug level variable contains -+ * the bit defined in lvl. -+ * -+ * @param[in] lvl - Debug level, use one of the DBG_ constants above. -+ * @param[in] x - like printf -+ * -+ * Example:

-+ * -+ * DWC_DEBUGPL( DBG_ANY, "%s(%p)\n", __func__, _reg_base_addr); -+ * -+ *
-+ * results in:
-+ * -+ * usb-DWC_otg: dwc_otg_cil_init(ca867000) -+ * -+ */ -+#ifdef DEBUG -+ -+# define DWC_DEBUGPL(lvl, x...) do{ if ((lvl)&g_dbg_lvl)printk( KERN_DEBUG USB_DWC x ); }while(0) -+# define DWC_DEBUGP(x...) DWC_DEBUGPL(DBG_ANY, x ) -+ -+# define CHK_DEBUG_LEVEL(level) ((level) & g_dbg_lvl) -+ -+#else -+ -+# define DWC_DEBUGPL(lvl, x...) do{}while(0) -+# define DWC_DEBUGP(x...) -+ -+# define CHK_DEBUG_LEVEL(level) (0) -+ -+#endif /*DEBUG*/ -+ -+/** -+ * Print an Error message. -+ */ -+#define DWC_ERROR(x...) printk( KERN_ERR USB_DWC x ) -+/** -+ * Print a Warning message. -+ */ -+#define DWC_WARN(x...) printk( KERN_WARNING USB_DWC x ) -+/** -+ * Print a notice (normal but significant message). -+ */ -+#define DWC_NOTICE(x...) printk( KERN_NOTICE USB_DWC x ) -+/** -+ * Basic message printing. -+ */ -+#define DWC_PRINT(x...) printk( KERN_INFO USB_DWC x ) -+ -+#endif -+ ---- /dev/null -+++ b/drivers/usb/dwc/otg_regs.h -@@ -0,0 +1,2059 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_regs.h $ -+ * $Revision: #72 $ -+ * $Date: 2008/09/19 $ -+ * $Change: 1099526 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+#ifndef __DWC_OTG_REGS_H__ -+#define __DWC_OTG_REGS_H__ -+ -+/** -+ * @file -+ * -+ * This file contains the data structures for accessing the DWC_otg core registers. -+ * -+ * The application interfaces with the HS OTG core by reading from and -+ * writing to the Control and Status Register (CSR) space through the -+ * AHB Slave interface. These registers are 32 bits wide, and the -+ * addresses are 32-bit-block aligned. -+ * CSRs are classified as follows: -+ * - Core Global Registers -+ * - Device Mode Registers -+ * - Device Global Registers -+ * - Device Endpoint Specific Registers -+ * - Host Mode Registers -+ * - Host Global Registers -+ * - Host Port CSRs -+ * - Host Channel Specific Registers -+ * -+ * Only the Core Global registers can be accessed in both Device and -+ * Host modes. When the HS OTG core is operating in one mode, either -+ * Device or Host, the application must not access registers from the -+ * other mode. When the core switches from one mode to another, the -+ * registers in the new mode of operation must be reprogrammed as they -+ * would be after a power-on reset. -+ */ -+ -+/** Maximum number of Periodic FIFOs */ -+#define MAX_PERIO_FIFOS 15 -+/** Maximum number of Transmit FIFOs */ -+#define MAX_TX_FIFOS 15 -+ -+/** Maximum number of Endpoints/HostChannels */ -+#define MAX_EPS_CHANNELS 16 -+ -+/****************************************************************************/ -+/** DWC_otg Core registers . -+ * The dwc_otg_core_global_regs structure defines the size -+ * and relative field offsets for the Core Global registers. -+ */ -+typedef struct dwc_otg_core_global_regs -+{ -+ /** OTG Control and Status Register. Offset: 000h */ -+ volatile uint32_t gotgctl; -+ /** OTG Interrupt Register. Offset: 004h */ -+ volatile uint32_t gotgint; -+ /**Core AHB Configuration Register. Offset: 008h */ -+ volatile uint32_t gahbcfg; -+ -+#define DWC_GLBINTRMASK 0x0001 -+#define DWC_DMAENABLE 0x0020 -+#define DWC_NPTXEMPTYLVL_EMPTY 0x0080 -+#define DWC_NPTXEMPTYLVL_HALFEMPTY 0x0000 -+#define DWC_PTXEMPTYLVL_EMPTY 0x0100 -+#define DWC_PTXEMPTYLVL_HALFEMPTY 0x0000 -+ -+ /**Core USB Configuration Register. Offset: 00Ch */ -+ volatile uint32_t gusbcfg; -+ /**Core Reset Register. Offset: 010h */ -+ volatile uint32_t grstctl; -+ /**Core Interrupt Register. Offset: 014h */ -+ volatile uint32_t gintsts; -+ /**Core Interrupt Mask Register. Offset: 018h */ -+ volatile uint32_t gintmsk; -+ /**Receive Status Queue Read Register (Read Only). Offset: 01Ch */ -+ volatile uint32_t grxstsr; -+ /**Receive Status Queue Read & POP Register (Read Only). Offset: 020h*/ -+ volatile uint32_t grxstsp; -+ /**Receive FIFO Size Register. Offset: 024h */ -+ volatile uint32_t grxfsiz; -+ /**Non Periodic Transmit FIFO Size Register. Offset: 028h */ -+ volatile uint32_t gnptxfsiz; -+ /**Non Periodic Transmit FIFO/Queue Status Register (Read -+ * Only). Offset: 02Ch */ -+ volatile uint32_t gnptxsts; -+ /**I2C Access Register. Offset: 030h */ -+ volatile uint32_t gi2cctl; -+ /**PHY Vendor Control Register. Offset: 034h */ -+ volatile uint32_t gpvndctl; -+ /**General Purpose Input/Output Register. Offset: 038h */ -+ volatile uint32_t ggpio; -+ /**User ID Register. Offset: 03Ch */ -+ volatile uint32_t guid; -+ /**Synopsys ID Register (Read Only). Offset: 040h */ -+ volatile uint32_t gsnpsid; -+ /**User HW Config1 Register (Read Only). Offset: 044h */ -+ volatile uint32_t ghwcfg1; -+ /**User HW Config2 Register (Read Only). Offset: 048h */ -+ volatile uint32_t ghwcfg2; -+#define DWC_SLAVE_ONLY_ARCH 0 -+#define DWC_EXT_DMA_ARCH 1 -+#define DWC_INT_DMA_ARCH 2 -+ -+#define DWC_MODE_HNP_SRP_CAPABLE 0 -+#define DWC_MODE_SRP_ONLY_CAPABLE 1 -+#define DWC_MODE_NO_HNP_SRP_CAPABLE 2 -+#define DWC_MODE_SRP_CAPABLE_DEVICE 3 -+#define DWC_MODE_NO_SRP_CAPABLE_DEVICE 4 -+#define DWC_MODE_SRP_CAPABLE_HOST 5 -+#define DWC_MODE_NO_SRP_CAPABLE_HOST 6 -+ -+ /**User HW Config3 Register (Read Only). Offset: 04Ch */ -+ volatile uint32_t ghwcfg3; -+ /**User HW Config4 Register (Read Only). Offset: 050h*/ -+ volatile uint32_t ghwcfg4; -+ /** Reserved Offset: 054h-0FFh */ -+ volatile uint32_t reserved[43]; -+ /** Host Periodic Transmit FIFO Size Register. Offset: 100h */ -+ volatile uint32_t hptxfsiz; -+ /** Device Periodic Transmit FIFO#n Register if dedicated fifos are disabled, -+ otherwise Device Transmit FIFO#n Register. -+ * Offset: 104h + (FIFO_Number-1)*04h, 1 <= FIFO Number <= 15 (1<=n<=15). */ -+ volatile uint32_t dptxfsiz_dieptxf[15]; -+} dwc_otg_core_global_regs_t; -+ -+/** -+ * This union represents the bit fields of the Core OTG Control -+ * and Status Register (GOTGCTL). Set the bits using the bit -+ * fields then write the d32 value to the register. -+ */ -+typedef union gotgctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned sesreqscs : 1; -+ unsigned sesreq : 1; -+ unsigned reserved2_7 : 6; -+ unsigned hstnegscs : 1; -+ unsigned hnpreq : 1; -+ unsigned hstsethnpen : 1; -+ unsigned devhnpen : 1; -+ unsigned reserved12_15 : 4; -+ unsigned conidsts : 1; -+ unsigned reserved17 : 1; -+ unsigned asesvld : 1; -+ unsigned bsesvld : 1; -+ unsigned currmod : 1; -+ unsigned reserved21_31 : 11; -+ } b; -+} gotgctl_data_t; -+ -+/** -+ * This union represents the bit fields of the Core OTG Interrupt Register -+ * (GOTGINT). Set/clear the bits using the bit fields then write the d32 -+ * value to the register. -+ */ -+typedef union gotgint_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Current Mode */ -+ unsigned reserved0_1 : 2; -+ -+ /** Session End Detected */ -+ unsigned sesenddet : 1; -+ -+ unsigned reserved3_7 : 5; -+ -+ /** Session Request Success Status Change */ -+ unsigned sesreqsucstschng : 1; -+ /** Host Negotiation Success Status Change */ -+ unsigned hstnegsucstschng : 1; -+ -+ unsigned reserver10_16 : 7; -+ -+ /** Host Negotiation Detected */ -+ unsigned hstnegdet : 1; -+ /** A-Device Timeout Change */ -+ unsigned adevtoutchng : 1; -+ /** Debounce Done */ -+ unsigned debdone : 1; -+ -+ unsigned reserved31_20 : 12; -+ -+ } b; -+} gotgint_data_t; -+ -+ -+/** -+ * This union represents the bit fields of the Core AHB Configuration -+ * Register (GAHBCFG). Set/clear the bits using the bit fields then -+ * write the d32 value to the register. -+ */ -+typedef union gahbcfg_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned glblintrmsk : 1; -+#define DWC_GAHBCFG_GLBINT_ENABLE 1 -+ -+ unsigned hburstlen : 4; -+#define DWC_GAHBCFG_INT_DMA_BURST_SINGLE 0 -+#define DWC_GAHBCFG_INT_DMA_BURST_INCR 1 -+#define DWC_GAHBCFG_INT_DMA_BURST_INCR4 3 -+#define DWC_GAHBCFG_INT_DMA_BURST_INCR8 5 -+#define DWC_GAHBCFG_INT_DMA_BURST_INCR16 7 -+ -+ unsigned dmaenable : 1; -+#define DWC_GAHBCFG_DMAENABLE 1 -+ unsigned reserved : 1; -+ unsigned nptxfemplvl_txfemplvl : 1; -+ unsigned ptxfemplvl : 1; -+#define DWC_GAHBCFG_TXFEMPTYLVL_EMPTY 1 -+#define DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY 0 -+ unsigned reserved9_31 : 23; -+ } b; -+} gahbcfg_data_t; -+ -+/** -+ * This union represents the bit fields of the Core USB Configuration -+ * Register (GUSBCFG). Set the bits using the bit fields then write -+ * the d32 value to the register. -+ */ -+typedef union gusbcfg_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned toutcal : 3; -+ unsigned phyif : 1; -+ unsigned ulpi_utmi_sel : 1; -+ unsigned fsintf : 1; -+ unsigned physel : 1; -+ unsigned ddrsel : 1; -+ unsigned srpcap : 1; -+ unsigned hnpcap : 1; -+ unsigned usbtrdtim : 4; -+ unsigned nptxfrwnden : 1; -+ unsigned phylpwrclksel : 1; -+ unsigned otgutmifssel : 1; -+ unsigned ulpi_fsls : 1; -+ unsigned ulpi_auto_res : 1; -+ unsigned ulpi_clk_sus_m : 1; -+ unsigned ulpi_ext_vbus_drv : 1; -+ unsigned ulpi_int_vbus_indicator : 1; -+ unsigned term_sel_dl_pulse : 1; -+ unsigned reserved23_27 : 5; -+ unsigned tx_end_delay : 1; -+ unsigned reserved29_31 : 3; -+ } b; -+} gusbcfg_data_t; -+ -+/** -+ * This union represents the bit fields of the Core Reset Register -+ * (GRSTCTL). Set/clear the bits using the bit fields then write the -+ * d32 value to the register. -+ */ -+typedef union grstctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Core Soft Reset (CSftRst) (Device and Host) -+ * -+ * The application can flush the control logic in the -+ * entire core using this bit. This bit resets the -+ * pipelines in the AHB Clock domain as well as the -+ * PHY Clock domain. -+ * -+ * The state machines are reset to an IDLE state, the -+ * control bits in the CSRs are cleared, all the -+ * transmit FIFOs and the receive FIFO are flushed. -+ * -+ * The status mask bits that control the generation of -+ * the interrupt, are cleared, to clear the -+ * interrupt. The interrupt status bits are not -+ * cleared, so the application can get the status of -+ * any events that occurred in the core after it has -+ * set this bit. -+ * -+ * Any transactions on the AHB are terminated as soon -+ * as possible following the protocol. Any -+ * transactions on the USB are terminated immediately. -+ * -+ * The configuration settings in the CSRs are -+ * unchanged, so the software doesn't have to -+ * reprogram these registers (Device -+ * Configuration/Host Configuration/Core System -+ * Configuration/Core PHY Configuration). -+ * -+ * The application can write to this bit, any time it -+ * wants to reset the core. This is a self clearing -+ * bit and the core clears this bit after all the -+ * necessary logic is reset in the core, which may -+ * take several clocks, depending on the current state -+ * of the core. -+ */ -+ unsigned csftrst : 1; -+ /** Hclk Soft Reset -+ * -+ * The application uses this bit to reset the control logic in -+ * the AHB clock domain. Only AHB clock domain pipelines are -+ * reset. -+ */ -+ unsigned hsftrst : 1; -+ /** Host Frame Counter Reset (Host Only)
-+ * -+ * The application can reset the (micro)frame number -+ * counter inside the core, using this bit. When the -+ * (micro)frame counter is reset, the subsequent SOF -+ * sent out by the core, will have a (micro)frame -+ * number of 0. -+ */ -+ unsigned hstfrm : 1; -+ /** In Token Sequence Learning Queue Flush -+ * (INTknQFlsh) (Device Only) -+ */ -+ unsigned intknqflsh : 1; -+ /** RxFIFO Flush (RxFFlsh) (Device and Host) -+ * -+ * The application can flush the entire Receive FIFO -+ * using this bit.

The application must first -+ * ensure that the core is not in the middle of a -+ * transaction.

The application should write into -+ * this bit, only after making sure that neither the -+ * DMA engine is reading from the RxFIFO nor the MAC -+ * is writing the data in to the FIFO.

The -+ * application should wait until the bit is cleared -+ * before performing any other operations. This bit -+ * will takes 8 clocks (slowest of PHY or AHB clock) -+ * to clear. -+ */ -+ unsigned rxfflsh : 1; -+ /** TxFIFO Flush (TxFFlsh) (Device and Host). -+ * -+ * This bit is used to selectively flush a single or -+ * all transmit FIFOs. The application must first -+ * ensure that the core is not in the middle of a -+ * transaction.

The application should write into -+ * this bit, only after making sure that neither the -+ * DMA engine is writing into the TxFIFO nor the MAC -+ * is reading the data out of the FIFO.

The -+ * application should wait until the core clears this -+ * bit, before performing any operations. This bit -+ * will takes 8 clocks (slowest of PHY or AHB clock) -+ * to clear. -+ */ -+ unsigned txfflsh : 1; -+ /** TxFIFO Number (TxFNum) (Device and Host). -+ * -+ * This is the FIFO number which needs to be flushed, -+ * using the TxFIFO Flush bit. This field should not -+ * be changed until the TxFIFO Flush bit is cleared by -+ * the core. -+ * - 0x0 : Non Periodic TxFIFO Flush -+ * - 0x1 : Periodic TxFIFO #1 Flush in device mode -+ * or Periodic TxFIFO in host mode -+ * - 0x2 : Periodic TxFIFO #2 Flush in device mode. -+ * - ... -+ * - 0xF : Periodic TxFIFO #15 Flush in device mode -+ * - 0x10: Flush all the Transmit NonPeriodic and -+ * Transmit Periodic FIFOs in the core -+ */ -+ unsigned txfnum : 5; -+ /** Reserved */ -+ unsigned reserved11_29 : 19; -+ /** DMA Request Signal. Indicated DMA request is in -+ * probress. Used for debug purpose. */ -+ unsigned dmareq : 1; -+ /** AHB Master Idle. Indicates the AHB Master State -+ * Machine is in IDLE condition. */ -+ unsigned ahbidle : 1; -+ } b; -+} grstctl_t; -+ -+ -+/** -+ * This union represents the bit fields of the Core Interrupt Mask -+ * Register (GINTMSK). Set/clear the bits using the bit fields then -+ * write the d32 value to the register. -+ */ -+typedef union gintmsk_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned reserved0 : 1; -+ unsigned modemismatch : 1; -+ unsigned otgintr : 1; -+ unsigned sofintr : 1; -+ unsigned rxstsqlvl : 1; -+ unsigned nptxfempty : 1; -+ unsigned ginnakeff : 1; -+ unsigned goutnakeff : 1; -+ unsigned reserved8 : 1; -+ unsigned i2cintr : 1; -+ unsigned erlysuspend : 1; -+ unsigned usbsuspend : 1; -+ unsigned usbreset : 1; -+ unsigned enumdone : 1; -+ unsigned isooutdrop : 1; -+ unsigned eopframe : 1; -+ unsigned reserved16 : 1; -+ unsigned epmismatch : 1; -+ unsigned inepintr : 1; -+ unsigned outepintr : 1; -+ unsigned incomplisoin : 1; -+ unsigned incomplisoout : 1; -+ unsigned reserved22_23 : 2; -+ unsigned portintr : 1; -+ unsigned hcintr : 1; -+ unsigned ptxfempty : 1; -+ unsigned reserved27 : 1; -+ unsigned conidstschng : 1; -+ unsigned disconnect : 1; -+ unsigned sessreqintr : 1; -+ unsigned wkupintr : 1; -+ } b; -+} gintmsk_data_t; -+/** -+ * This union represents the bit fields of the Core Interrupt Register -+ * (GINTSTS). Set/clear the bits using the bit fields then write the -+ * d32 value to the register. -+ */ -+typedef union gintsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+#define DWC_SOF_INTR_MASK 0x0008 -+ /** register bits */ -+ struct -+ { -+#define DWC_HOST_MODE 1 -+ unsigned curmode : 1; -+ unsigned modemismatch : 1; -+ unsigned otgintr : 1; -+ unsigned sofintr : 1; -+ unsigned rxstsqlvl : 1; -+ unsigned nptxfempty : 1; -+ unsigned ginnakeff : 1; -+ unsigned goutnakeff : 1; -+ unsigned reserved8 : 1; -+ unsigned i2cintr : 1; -+ unsigned erlysuspend : 1; -+ unsigned usbsuspend : 1; -+ unsigned usbreset : 1; -+ unsigned enumdone : 1; -+ unsigned isooutdrop : 1; -+ unsigned eopframe : 1; -+ unsigned intokenrx : 1; -+ unsigned epmismatch : 1; -+ unsigned inepint: 1; -+ unsigned outepintr : 1; -+ unsigned incomplisoin : 1; -+ unsigned incomplisoout : 1; -+ unsigned reserved22_23 : 2; -+ unsigned portintr : 1; -+ unsigned hcintr : 1; -+ unsigned ptxfempty : 1; -+ unsigned reserved27 : 1; -+ unsigned conidstschng : 1; -+ unsigned disconnect : 1; -+ unsigned sessreqintr : 1; -+ unsigned wkupintr : 1; -+ } b; -+} gintsts_data_t; -+ -+ -+/** -+ * This union represents the bit fields in the Device Receive Status Read and -+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the d32 -+ * element then read out the bits using the bit elements. -+ */ -+typedef union device_grxsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned epnum : 4; -+ unsigned bcnt : 11; -+ unsigned dpid : 2; -+#define DWC_STS_DATA_UPDT 0x2 // OUT Data Packet -+#define DWC_STS_XFER_COMP 0x3 // OUT Data Transfer Complete -+ -+#define DWC_DSTS_GOUT_NAK 0x1 // Global OUT NAK -+#define DWC_DSTS_SETUP_COMP 0x4 // Setup Phase Complete -+#define DWC_DSTS_SETUP_UPDT 0x6 // SETUP Packet -+ unsigned pktsts : 4; -+ unsigned fn : 4; -+ unsigned reserved : 7; -+ } b; -+} device_grxsts_data_t; -+ -+/** -+ * This union represents the bit fields in the Host Receive Status Read and -+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the d32 -+ * element then read out the bits using the bit elements. -+ */ -+typedef union host_grxsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned chnum : 4; -+ unsigned bcnt : 11; -+ unsigned dpid : 2; -+ unsigned pktsts : 4; -+#define DWC_GRXSTS_PKTSTS_IN 0x2 -+#define DWC_GRXSTS_PKTSTS_IN_XFER_COMP 0x3 -+#define DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR 0x5 -+#define DWC_GRXSTS_PKTSTS_CH_HALTED 0x7 -+ unsigned reserved : 11; -+ } b; -+} host_grxsts_data_t; -+ -+/** -+ * This union represents the bit fields in the FIFO Size Registers (HPTXFSIZ, -+ * GNPTXFSIZ, DPTXFSIZn, DIEPTXFn). Read the register into the d32 element then -+ * read out the bits using the bit elements. -+ */ -+typedef union fifosize_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned startaddr : 16; -+ unsigned depth : 16; -+ } b; -+} fifosize_data_t; -+ -+/** -+ * This union represents the bit fields in the Non-Periodic Transmit -+ * FIFO/Queue Status Register (GNPTXSTS). Read the register into the -+ * d32 element then read out the bits using the bit -+ * elements. -+ */ -+typedef union gnptxsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned nptxfspcavail : 16; -+ unsigned nptxqspcavail : 8; -+ /** Top of the Non-Periodic Transmit Request Queue -+ * - bit 24 - Terminate (Last entry for the selected -+ * channel/EP) -+ * - bits 26:25 - Token Type -+ * - 2'b00 - IN/OUT -+ * - 2'b01 - Zero Length OUT -+ * - 2'b10 - PING/Complete Split -+ * - 2'b11 - Channel Halt -+ * - bits 30:27 - Channel/EP Number -+ */ -+ unsigned nptxqtop_terminate : 1; -+ unsigned nptxqtop_token : 2; -+ unsigned nptxqtop_chnep : 4; -+ unsigned reserved : 1; -+ } b; -+} gnptxsts_data_t; -+ -+/** -+ * This union represents the bit fields in the Transmit -+ * FIFO Status Register (DTXFSTS). Read the register into the -+ * d32 element then read out the bits using the bit -+ * elements. -+ */ -+typedef union dtxfsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned txfspcavail : 16; -+ unsigned reserved : 16; -+ } b; -+} dtxfsts_data_t; -+ -+/** -+ * This union represents the bit fields in the I2C Control Register -+ * (I2CCTL). Read the register into the d32 element then read out the -+ * bits using the bit elements. -+ */ -+typedef union gi2cctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned rwdata : 8; -+ unsigned regaddr : 8; -+ unsigned addr : 7; -+ unsigned i2cen : 1; -+ unsigned ack : 1; -+ unsigned i2csuspctl : 1; -+ unsigned i2cdevaddr : 2; -+ unsigned reserved : 2; -+ unsigned rw : 1; -+ unsigned bsydne : 1; -+ } b; -+} gi2cctl_data_t; -+ -+/** -+ * This union represents the bit fields in the User HW Config1 -+ * Register. Read the register into the d32 element then read -+ * out the bits using the bit elements. -+ */ -+typedef union hwcfg1_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned ep_dir0 : 2; -+ unsigned ep_dir1 : 2; -+ unsigned ep_dir2 : 2; -+ unsigned ep_dir3 : 2; -+ unsigned ep_dir4 : 2; -+ unsigned ep_dir5 : 2; -+ unsigned ep_dir6 : 2; -+ unsigned ep_dir7 : 2; -+ unsigned ep_dir8 : 2; -+ unsigned ep_dir9 : 2; -+ unsigned ep_dir10 : 2; -+ unsigned ep_dir11 : 2; -+ unsigned ep_dir12 : 2; -+ unsigned ep_dir13 : 2; -+ unsigned ep_dir14 : 2; -+ unsigned ep_dir15 : 2; -+ } b; -+} hwcfg1_data_t; -+ -+/** -+ * This union represents the bit fields in the User HW Config2 -+ * Register. Read the register into the d32 element then read -+ * out the bits using the bit elements. -+ */ -+typedef union hwcfg2_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /* GHWCFG2 */ -+ unsigned op_mode : 3; -+#define DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG 0 -+#define DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG 1 -+#define DWC_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG 2 -+#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3 -+#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4 -+#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST 5 -+#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6 -+ -+ unsigned architecture : 2; -+ unsigned point2point : 1; -+ unsigned hs_phy_type : 2; -+#define DWC_HWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0 -+#define DWC_HWCFG2_HS_PHY_TYPE_UTMI 1 -+#define DWC_HWCFG2_HS_PHY_TYPE_ULPI 2 -+#define DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI 3 -+ -+ unsigned fs_phy_type : 2; -+ unsigned num_dev_ep : 4; -+ unsigned num_host_chan : 4; -+ unsigned perio_ep_supported : 1; -+ unsigned dynamic_fifo : 1; -+ unsigned multi_proc_int : 1; -+ unsigned reserved21 : 1; -+ unsigned nonperio_tx_q_depth : 2; -+ unsigned host_perio_tx_q_depth : 2; -+ unsigned dev_token_q_depth : 5; -+ unsigned reserved31 : 1; -+ } b; -+} hwcfg2_data_t; -+ -+/** -+ * This union represents the bit fields in the User HW Config3 -+ * Register. Read the register into the d32 element then read -+ * out the bits using the bit elements. -+ */ -+typedef union hwcfg3_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /* GHWCFG3 */ -+ unsigned xfer_size_cntr_width : 4; -+ unsigned packet_size_cntr_width : 3; -+ unsigned otg_func : 1; -+ unsigned i2c : 1; -+ unsigned vendor_ctrl_if : 1; -+ unsigned optional_features : 1; -+ unsigned synch_reset_type : 1; -+ unsigned ahb_phy_clock_synch : 1; -+ unsigned reserved15_13 : 3; -+ unsigned dfifo_depth : 16; -+ } b; -+} hwcfg3_data_t; -+ -+/** -+ * This union represents the bit fields in the User HW Config4 -+ * Register. Read the register into the d32 element then read -+ * out the bits using the bit elements. -+ */ -+typedef union hwcfg4_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned num_dev_perio_in_ep : 4; -+ unsigned power_optimiz : 1; -+ unsigned min_ahb_freq : 9; -+ unsigned utmi_phy_data_width : 2; -+ unsigned num_dev_mode_ctrl_ep : 4; -+ unsigned iddig_filt_en : 1; -+ unsigned vbus_valid_filt_en : 1; -+ unsigned a_valid_filt_en : 1; -+ unsigned b_valid_filt_en : 1; -+ unsigned session_end_filt_en : 1; -+ unsigned ded_fifo_en : 1; -+ unsigned num_in_eps : 4; -+ unsigned desc_dma : 1; -+ unsigned desc_dma_dyn : 1; -+ } b; -+} hwcfg4_data_t; -+ -+//////////////////////////////////////////// -+// Device Registers -+/** -+ * Device Global Registers. Offsets 800h-BFFh -+ * -+ * The following structures define the size and relative field offsets -+ * for the Device Mode Registers. -+ * -+ * These registers are visible only in Device mode and must not be -+ * accessed in Host mode, as the results are unknown. -+ */ -+typedef struct dwc_otg_dev_global_regs -+{ -+ /** Device Configuration Register. Offset 800h */ -+ volatile uint32_t dcfg; -+ /** Device Control Register. Offset: 804h */ -+ volatile uint32_t dctl; -+ /** Device Status Register (Read Only). Offset: 808h */ -+ volatile uint32_t dsts; -+ /** Reserved. Offset: 80Ch */ -+ uint32_t unused; -+ /** Device IN Endpoint Common Interrupt Mask -+ * Register. Offset: 810h */ -+ volatile uint32_t diepmsk; -+ /** Device OUT Endpoint Common Interrupt Mask -+ * Register. Offset: 814h */ -+ volatile uint32_t doepmsk; -+ /** Device All Endpoints Interrupt Register. Offset: 818h */ -+ volatile uint32_t daint; -+ /** Device All Endpoints Interrupt Mask Register. Offset: -+ * 81Ch */ -+ volatile uint32_t daintmsk; -+ /** Device IN Token Queue Read Register-1 (Read Only). -+ * Offset: 820h */ -+ volatile uint32_t dtknqr1; -+ /** Device IN Token Queue Read Register-2 (Read Only). -+ * Offset: 824h */ -+ volatile uint32_t dtknqr2; -+ /** Device VBUS discharge Register. Offset: 828h */ -+ volatile uint32_t dvbusdis; -+ /** Device VBUS Pulse Register. Offset: 82Ch */ -+ volatile uint32_t dvbuspulse; -+ /** Device IN Token Queue Read Register-3 (Read Only). / -+ * Device Thresholding control register (Read/Write) -+ * Offset: 830h */ -+ volatile uint32_t dtknqr3_dthrctl; -+ /** Device IN Token Queue Read Register-4 (Read Only). / -+ * Device IN EPs empty Inr. Mask Register (Read/Write) -+ * Offset: 834h */ -+ volatile uint32_t dtknqr4_fifoemptymsk; -+ /** Device Each Endpoint Interrupt Register (Read Only). / -+ * Offset: 838h */ -+ volatile uint32_t deachint; -+ /** Device Each Endpoint Interrupt mask Register (Read/Write). / -+ * Offset: 83Ch */ -+ volatile uint32_t deachintmsk; -+ /** Device Each In Endpoint Interrupt mask Register (Read/Write). / -+ * Offset: 840h */ -+ volatile uint32_t diepeachintmsk[MAX_EPS_CHANNELS]; -+ /** Device Each Out Endpoint Interrupt mask Register (Read/Write). / -+ * Offset: 880h */ -+ volatile uint32_t doepeachintmsk[MAX_EPS_CHANNELS]; -+} dwc_otg_device_global_regs_t; -+ -+/** -+ * This union represents the bit fields in the Device Configuration -+ * Register. Read the register into the d32 member then -+ * set/clear the bits using the bit elements. Write the -+ * d32 member to the dcfg register. -+ */ -+typedef union dcfg_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Device Speed */ -+ unsigned devspd : 2; -+ /** Non Zero Length Status OUT Handshake */ -+ unsigned nzstsouthshk : 1; -+#define DWC_DCFG_SEND_STALL 1 -+ -+ unsigned reserved3 : 1; -+ /** Device Addresses */ -+ unsigned devaddr : 7; -+ /** Periodic Frame Interval */ -+ unsigned perfrint : 2; -+#define DWC_DCFG_FRAME_INTERVAL_80 0 -+#define DWC_DCFG_FRAME_INTERVAL_85 1 -+#define DWC_DCFG_FRAME_INTERVAL_90 2 -+#define DWC_DCFG_FRAME_INTERVAL_95 3 -+ -+ unsigned reserved13_17 : 5; -+ /** In Endpoint Mis-match count */ -+ unsigned epmscnt : 5; -+ /** Enable Descriptor DMA in Device mode */ -+ unsigned descdma : 1; -+ } b; -+} dcfg_data_t; -+ -+/** -+ * This union represents the bit fields in the Device Control -+ * Register. Read the register into the d32 member then -+ * set/clear the bits using the bit elements. -+ */ -+typedef union dctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Remote Wakeup */ -+ unsigned rmtwkupsig : 1; -+ /** Soft Disconnect */ -+ unsigned sftdiscon : 1; -+ /** Global Non-Periodic IN NAK Status */ -+ unsigned gnpinnaksts : 1; -+ /** Global OUT NAK Status */ -+ unsigned goutnaksts : 1; -+ /** Test Control */ -+ unsigned tstctl : 3; -+ /** Set Global Non-Periodic IN NAK */ -+ unsigned sgnpinnak : 1; -+ /** Clear Global Non-Periodic IN NAK */ -+ unsigned cgnpinnak : 1; -+ /** Set Global OUT NAK */ -+ unsigned sgoutnak : 1; -+ /** Clear Global OUT NAK */ -+ unsigned cgoutnak : 1; -+ -+ /** Power-On Programming Done */ -+ unsigned pwronprgdone : 1; -+ /** Global Continue on BNA */ -+ unsigned gcontbna : 1; -+ /** Global Multi Count */ -+ unsigned gmc : 2; -+ /** Ignore Frame Number for ISOC EPs */ -+ unsigned ifrmnum : 1; -+ /** NAK on Babble */ -+ unsigned nakonbble : 1; -+ -+ unsigned reserved16_31 : 16; -+ } b; -+} dctl_data_t; -+ -+/** -+ * This union represents the bit fields in the Device Status -+ * Register. Read the register into the d32 member then -+ * set/clear the bits using the bit elements. -+ */ -+typedef union dsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Suspend Status */ -+ unsigned suspsts : 1; -+ /** Enumerated Speed */ -+ unsigned enumspd : 2; -+#define DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ 0 -+#define DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ 1 -+#define DWC_DSTS_ENUMSPD_LS_PHY_6MHZ 2 -+#define DWC_DSTS_ENUMSPD_FS_PHY_48MHZ 3 -+ /** Erratic Error */ -+ unsigned errticerr : 1; -+ unsigned reserved4_7: 4; -+ /** Frame or Microframe Number of the received SOF */ -+ unsigned soffn : 14; -+ unsigned reserved22_31 : 10; -+ } b; -+} dsts_data_t; -+ -+ -+/** -+ * This union represents the bit fields in the Device IN EP Interrupt -+ * Register and the Device IN EP Common Mask Register. -+ * -+ * - Read the register into the d32 member then set/clear the -+ * bits using the bit elements. -+ */ -+typedef union diepint_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Transfer complete mask */ -+ unsigned xfercompl : 1; -+ /** Endpoint disable mask */ -+ unsigned epdisabled : 1; -+ /** AHB Error mask */ -+ unsigned ahberr : 1; -+ /** TimeOUT Handshake mask (non-ISOC EPs) */ -+ unsigned timeout : 1; -+ /** IN Token received with TxF Empty mask */ -+ unsigned intktxfemp : 1; -+ /** IN Token Received with EP mismatch mask */ -+ unsigned intknepmis : 1; -+ /** IN Endpoint HAK Effective mask */ -+ unsigned inepnakeff : 1; -+ /** IN Endpoint HAK Effective mask */ -+ unsigned emptyintr : 1; -+ unsigned txfifoundrn : 1; -+ -+ /** BNA Interrupt mask */ -+ unsigned bna : 1; -+ unsigned reserved10_12 : 3; -+ /** BNA Interrupt mask */ -+ unsigned nak : 1; -+ unsigned reserved14_31 : 18; -+ } b; -+} diepint_data_t; -+ -+/** -+ * This union represents the bit fields in the Device IN EP -+ * Common/Dedicated Interrupt Mask Register. -+ */ -+typedef union diepint_data diepmsk_data_t; -+ -+/** -+ * This union represents the bit fields in the Device OUT EP Interrupt -+ * Registerand Device OUT EP Common Interrupt Mask Register. -+ * -+ * - Read the register into the d32 member then set/clear the -+ * bits using the bit elements. -+ */ -+typedef union doepint_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Transfer complete */ -+ unsigned xfercompl : 1; -+ /** Endpoint disable */ -+ unsigned epdisabled : 1; -+ /** AHB Error */ -+ unsigned ahberr : 1; -+ /** Setup Phase Done (contorl EPs) */ -+ unsigned setup : 1; -+ /** OUT Token Received when Endpoint Disabled */ -+ unsigned outtknepdis : 1; -+ unsigned stsphsercvd : 1; -+ /** Back-to-Back SETUP Packets Received */ -+ unsigned back2backsetup : 1; -+ unsigned reserved7 : 1; -+ /** OUT packet Error */ -+ unsigned outpkterr : 1; -+ /** BNA Interrupt */ -+ unsigned bna : 1; -+ unsigned reserved10 : 1; -+ /** Packet Drop Status */ -+ unsigned pktdrpsts : 1; -+ /** Babble Interrupt */ -+ unsigned babble : 1; -+ /** NAK Interrupt */ -+ unsigned nak : 1; -+ /** NYET Interrupt */ -+ unsigned nyet : 1; -+ -+ unsigned reserved15_31 : 17; -+ } b; -+} doepint_data_t; -+ -+/** -+ * This union represents the bit fields in the Device OUT EP -+ * Common/Dedicated Interrupt Mask Register. -+ */ -+typedef union doepint_data doepmsk_data_t; -+ -+/** -+ * This union represents the bit fields in the Device All EP Interrupt -+ * and Mask Registers. -+ * - Read the register into the d32 member then set/clear the -+ * bits using the bit elements. -+ */ -+typedef union daint_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** IN Endpoint bits */ -+ unsigned in : 16; -+ /** OUT Endpoint bits */ -+ unsigned out : 16; -+ } ep; -+ struct -+ { -+ /** IN Endpoint bits */ -+ unsigned inep0 : 1; -+ unsigned inep1 : 1; -+ unsigned inep2 : 1; -+ unsigned inep3 : 1; -+ unsigned inep4 : 1; -+ unsigned inep5 : 1; -+ unsigned inep6 : 1; -+ unsigned inep7 : 1; -+ unsigned inep8 : 1; -+ unsigned inep9 : 1; -+ unsigned inep10 : 1; -+ unsigned inep11 : 1; -+ unsigned inep12 : 1; -+ unsigned inep13 : 1; -+ unsigned inep14 : 1; -+ unsigned inep15 : 1; -+ /** OUT Endpoint bits */ -+ unsigned outep0 : 1; -+ unsigned outep1 : 1; -+ unsigned outep2 : 1; -+ unsigned outep3 : 1; -+ unsigned outep4 : 1; -+ unsigned outep5 : 1; -+ unsigned outep6 : 1; -+ unsigned outep7 : 1; -+ unsigned outep8 : 1; -+ unsigned outep9 : 1; -+ unsigned outep10 : 1; -+ unsigned outep11 : 1; -+ unsigned outep12 : 1; -+ unsigned outep13 : 1; -+ unsigned outep14 : 1; -+ unsigned outep15 : 1; -+ } b; -+} daint_data_t; -+ -+/** -+ * This union represents the bit fields in the Device IN Token Queue -+ * Read Registers. -+ * - Read the register into the d32 member. -+ * - READ-ONLY Register -+ */ -+typedef union dtknq1_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** In Token Queue Write Pointer */ -+ unsigned intknwptr : 5; -+ /** Reserved */ -+ unsigned reserved05_06 : 2; -+ /** write pointer has wrapped. */ -+ unsigned wrap_bit : 1; -+ /** EP Numbers of IN Tokens 0 ... 4 */ -+ unsigned epnums0_5 : 24; -+ }b; -+} dtknq1_data_t; -+ -+/** -+ * This union represents Threshold control Register -+ * - Read and write the register into the d32 member. -+ * - READ-WRITABLE Register -+ */ -+typedef union dthrctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** non ISO Tx Thr. Enable */ -+ unsigned non_iso_thr_en : 1; -+ /** ISO Tx Thr. Enable */ -+ unsigned iso_thr_en : 1; -+ /** Tx Thr. Length */ -+ unsigned tx_thr_len : 9; -+ /** Reserved */ -+ unsigned reserved11_15 : 5; -+ /** Rx Thr. Enable */ -+ unsigned rx_thr_en : 1; -+ /** Rx Thr. Length */ -+ unsigned rx_thr_len : 9; -+ /** Reserved */ -+ unsigned reserved26_31 : 6; -+ }b; -+} dthrctl_data_t; -+ -+ -+/** -+ * Device Logical IN Endpoint-Specific Registers. Offsets -+ * 900h-AFCh -+ * -+ * There will be one set of endpoint registers per logical endpoint -+ * implemented. -+ * -+ * These registers are visible only in Device mode and must not be -+ * accessed in Host mode, as the results are unknown. -+ */ -+typedef struct dwc_otg_dev_in_ep_regs -+{ -+ /** Device IN Endpoint Control Register. Offset:900h + -+ * (ep_num * 20h) + 00h */ -+ volatile uint32_t diepctl; -+ /** Reserved. Offset:900h + (ep_num * 20h) + 04h */ -+ uint32_t reserved04; -+ /** Device IN Endpoint Interrupt Register. Offset:900h + -+ * (ep_num * 20h) + 08h */ -+ volatile uint32_t diepint; -+ /** Reserved. Offset:900h + (ep_num * 20h) + 0Ch */ -+ uint32_t reserved0C; -+ /** Device IN Endpoint Transfer Size -+ * Register. Offset:900h + (ep_num * 20h) + 10h */ -+ volatile uint32_t dieptsiz; -+ /** Device IN Endpoint DMA Address Register. Offset:900h + -+ * (ep_num * 20h) + 14h */ -+ volatile uint32_t diepdma; -+ /** Device IN Endpoint Transmit FIFO Status Register. Offset:900h + -+ * (ep_num * 20h) + 18h */ -+ volatile uint32_t dtxfsts; -+ /** Device IN Endpoint DMA Buffer Register. Offset:900h + -+ * (ep_num * 20h) + 1Ch */ -+ volatile uint32_t diepdmab; -+} dwc_otg_dev_in_ep_regs_t; -+ -+/** -+ * Device Logical OUT Endpoint-Specific Registers. Offsets: -+ * B00h-CFCh -+ * -+ * There will be one set of endpoint registers per logical endpoint -+ * implemented. -+ * -+ * These registers are visible only in Device mode and must not be -+ * accessed in Host mode, as the results are unknown. -+ */ -+typedef struct dwc_otg_dev_out_ep_regs -+{ -+ /** Device OUT Endpoint Control Register. Offset:B00h + -+ * (ep_num * 20h) + 00h */ -+ volatile uint32_t doepctl; -+ /** Device OUT Endpoint Frame number Register. Offset: -+ * B00h + (ep_num * 20h) + 04h */ -+ volatile uint32_t doepfn; -+ /** Device OUT Endpoint Interrupt Register. Offset:B00h + -+ * (ep_num * 20h) + 08h */ -+ volatile uint32_t doepint; -+ /** Reserved. Offset:B00h + (ep_num * 20h) + 0Ch */ -+ uint32_t reserved0C; -+ /** Device OUT Endpoint Transfer Size Register. Offset: -+ * B00h + (ep_num * 20h) + 10h */ -+ volatile uint32_t doeptsiz; -+ /** Device OUT Endpoint DMA Address Register. Offset:B00h -+ * + (ep_num * 20h) + 14h */ -+ volatile uint32_t doepdma; -+ /** Reserved. Offset:B00h + * (ep_num * 20h) + 1Ch */ -+ uint32_t unused; -+ /** Device OUT Endpoint DMA Buffer Register. Offset:B00h -+ * + (ep_num * 20h) + 1Ch */ -+ uint32_t doepdmab; -+} dwc_otg_dev_out_ep_regs_t; -+ -+/** -+ * This union represents the bit fields in the Device EP Control -+ * Register. Read the register into the d32 member then -+ * set/clear the bits using the bit elements. -+ */ -+typedef union depctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Maximum Packet Size -+ * IN/OUT EPn -+ * IN/OUT EP0 - 2 bits -+ * 2'b00: 64 Bytes -+ * 2'b01: 32 -+ * 2'b10: 16 -+ * 2'b11: 8 */ -+ unsigned mps : 11; -+#define DWC_DEP0CTL_MPS_64 0 -+#define DWC_DEP0CTL_MPS_32 1 -+#define DWC_DEP0CTL_MPS_16 2 -+#define DWC_DEP0CTL_MPS_8 3 -+ -+ /** Next Endpoint -+ * IN EPn/IN EP0 -+ * OUT EPn/OUT EP0 - reserved */ -+ unsigned nextep : 4; -+ -+ /** USB Active Endpoint */ -+ unsigned usbactep : 1; -+ -+ /** Endpoint DPID (INTR/Bulk IN and OUT endpoints) -+ * This field contains the PID of the packet going to -+ * be received or transmitted on this endpoint. The -+ * application should program the PID of the first -+ * packet going to be received or transmitted on this -+ * endpoint , after the endpoint is -+ * activated. Application use the SetD1PID and -+ * SetD0PID fields of this register to program either -+ * D0 or D1 PID. -+ * -+ * The encoding for this field is -+ * - 0: D0 -+ * - 1: D1 -+ */ -+ unsigned dpid : 1; -+ -+ /** NAK Status */ -+ unsigned naksts : 1; -+ -+ /** Endpoint Type -+ * 2'b00: Control -+ * 2'b01: Isochronous -+ * 2'b10: Bulk -+ * 2'b11: Interrupt */ -+ unsigned eptype : 2; -+ -+ /** Snoop Mode -+ * OUT EPn/OUT EP0 -+ * IN EPn/IN EP0 - reserved */ -+ unsigned snp : 1; -+ -+ /** Stall Handshake */ -+ unsigned stall : 1; -+ -+ /** Tx Fifo Number -+ * IN EPn/IN EP0 -+ * OUT EPn/OUT EP0 - reserved */ -+ unsigned txfnum : 4; -+ -+ /** Clear NAK */ -+ unsigned cnak : 1; -+ /** Set NAK */ -+ unsigned snak : 1; -+ /** Set DATA0 PID (INTR/Bulk IN and OUT endpoints) -+ * Writing to this field sets the Endpoint DPID (DPID) -+ * field in this register to DATA0. Set Even -+ * (micro)frame (SetEvenFr) (ISO IN and OUT Endpoints) -+ * Writing to this field sets the Even/Odd -+ * (micro)frame (EO_FrNum) field to even (micro) -+ * frame. -+ */ -+ unsigned setd0pid : 1; -+ /** Set DATA1 PID (INTR/Bulk IN and OUT endpoints) -+ * Writing to this field sets the Endpoint DPID (DPID) -+ * field in this register to DATA1 Set Odd -+ * (micro)frame (SetOddFr) (ISO IN and OUT Endpoints) -+ * Writing to this field sets the Even/Odd -+ * (micro)frame (EO_FrNum) field to odd (micro) frame. -+ */ -+ unsigned setd1pid : 1; -+ /** Endpoint Disable */ -+ unsigned epdis : 1; -+ /** Endpoint Enable */ -+ unsigned epena : 1; -+ } b; -+} depctl_data_t; -+ -+/** -+ * This union represents the bit fields in the Device EP Transfer -+ * Size Register. Read the register into the d32 member then -+ * set/clear the bits using the bit elements. -+ */ -+typedef union deptsiz_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct { -+ /** Transfer size */ -+ unsigned xfersize : 19; -+ /** Packet Count */ -+ unsigned pktcnt : 10; -+ /** Multi Count - Periodic IN endpoints */ -+ unsigned mc : 2; -+ unsigned reserved : 1; -+ } b; -+} deptsiz_data_t; -+ -+/** -+ * This union represents the bit fields in the Device EP 0 Transfer -+ * Size Register. Read the register into the d32 member then -+ * set/clear the bits using the bit elements. -+ */ -+typedef union deptsiz0_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct { -+ /** Transfer size */ -+ unsigned xfersize : 7; -+ /** Reserved */ -+ unsigned reserved7_18 : 12; -+ /** Packet Count */ -+ unsigned pktcnt : 1; -+ /** Reserved */ -+ unsigned reserved20_28 : 9; -+ /**Setup Packet Count (DOEPTSIZ0 Only) */ -+ unsigned supcnt : 2; -+ unsigned reserved31; -+ } b; -+} deptsiz0_data_t; -+ -+ -+///////////////////////////////////////////////// -+// DMA Descriptor Specific Structures -+// -+ -+/** Buffer status definitions */ -+ -+#define BS_HOST_READY 0x0 -+#define BS_DMA_BUSY 0x1 -+#define BS_DMA_DONE 0x2 -+#define BS_HOST_BUSY 0x3 -+ -+/** Receive/Transmit status definitions */ -+ -+#define RTS_SUCCESS 0x0 -+#define RTS_BUFFLUSH 0x1 -+#define RTS_RESERVED 0x2 -+#define RTS_BUFERR 0x3 -+ -+ -+/** -+ * This union represents the bit fields in the DMA Descriptor -+ * status quadlet. Read the quadlet into the d32 member then -+ * set/clear the bits using the bit, b_iso_out and -+ * b_iso_in elements. -+ */ -+typedef union desc_sts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** quadlet bits */ -+ struct { -+ /** Received number of bytes */ -+ unsigned bytes : 16; -+ -+ unsigned reserved16_22 : 7; -+ /** Multiple Transfer - only for OUT EPs */ -+ unsigned mtrf : 1; -+ /** Setup Packet received - only for OUT EPs */ -+ unsigned sr : 1; -+ /** Interrupt On Complete */ -+ unsigned ioc : 1; -+ /** Short Packet */ -+ unsigned sp : 1; -+ /** Last */ -+ unsigned l : 1; -+ /** Receive Status */ -+ unsigned sts : 2; -+ /** Buffer Status */ -+ unsigned bs : 2; -+ } b; -+ -+#ifdef DWC_EN_ISOC -+ /** iso out quadlet bits */ -+ struct { -+ /** Received number of bytes */ -+ unsigned rxbytes : 11; -+ -+ unsigned reserved11 : 1; -+ /** Frame Number */ -+ unsigned framenum : 11; -+ /** Received ISO Data PID */ -+ unsigned pid : 2; -+ /** Interrupt On Complete */ -+ unsigned ioc : 1; -+ /** Short Packet */ -+ unsigned sp : 1; -+ /** Last */ -+ unsigned l : 1; -+ /** Receive Status */ -+ unsigned rxsts : 2; -+ /** Buffer Status */ -+ unsigned bs : 2; -+ } b_iso_out; -+ -+ /** iso in quadlet bits */ -+ struct { -+ /** Transmited number of bytes */ -+ unsigned txbytes : 12; -+ /** Frame Number */ -+ unsigned framenum : 11; -+ /** Transmited ISO Data PID */ -+ unsigned pid : 2; -+ /** Interrupt On Complete */ -+ unsigned ioc : 1; -+ /** Short Packet */ -+ unsigned sp : 1; -+ /** Last */ -+ unsigned l : 1; -+ /** Transmit Status */ -+ unsigned txsts : 2; -+ /** Buffer Status */ -+ unsigned bs : 2; -+ } b_iso_in; -+#endif //DWC_EN_ISOC -+} desc_sts_data_t; -+ -+/** -+ * DMA Descriptor structure -+ * -+ * DMA Descriptor structure contains two quadlets: -+ * Status quadlet and Data buffer pointer. -+ */ -+typedef struct dwc_otg_dma_desc -+{ -+ /** DMA Descriptor status quadlet */ -+ desc_sts_data_t status; -+ /** DMA Descriptor data buffer pointer */ -+ dma_addr_t buf; -+} dwc_otg_dma_desc_t; -+ -+/** -+ * The dwc_otg_dev_if structure contains information needed to manage -+ * the DWC_otg controller acting in device mode. It represents the -+ * programming view of the device-specific aspects of the controller. -+ */ -+typedef struct dwc_otg_dev_if -+{ -+ /** Pointer to device Global registers. -+ * Device Global Registers starting at offset 800h -+ */ -+ dwc_otg_device_global_regs_t *dev_global_regs; -+#define DWC_DEV_GLOBAL_REG_OFFSET 0x800 -+ -+ /** -+ * Device Logical IN Endpoint-Specific Registers 900h-AFCh -+ */ -+ dwc_otg_dev_in_ep_regs_t *in_ep_regs[MAX_EPS_CHANNELS]; -+#define DWC_DEV_IN_EP_REG_OFFSET 0x900 -+#define DWC_EP_REG_OFFSET 0x20 -+ -+ /** Device Logical OUT Endpoint-Specific Registers B00h-CFCh */ -+ dwc_otg_dev_out_ep_regs_t *out_ep_regs[MAX_EPS_CHANNELS]; -+#define DWC_DEV_OUT_EP_REG_OFFSET 0xB00 -+ -+ /* Device configuration information*/ -+ uint8_t speed; /**< Device Speed 0: Unknown, 1: LS, 2:FS, 3: HS */ -+ uint8_t num_in_eps; /**< Number # of Tx EP range: 0-15 exept ep0 */ -+ uint8_t num_out_eps; /**< Number # of Rx EP range: 0-15 exept ep 0*/ -+ -+ /** Size of periodic FIFOs (Bytes) */ -+ uint16_t perio_tx_fifo_size[MAX_PERIO_FIFOS]; -+ -+ /** Size of Tx FIFOs (Bytes) */ -+ uint16_t tx_fifo_size[MAX_TX_FIFOS]; -+ -+ /** Thresholding enable flags and length varaiables **/ -+ uint16_t rx_thr_en; -+ uint16_t iso_tx_thr_en; -+ uint16_t non_iso_tx_thr_en; -+ -+ uint16_t rx_thr_length; -+ uint16_t tx_thr_length; -+ -+ /** -+ * Pointers to the DMA Descriptors for EP0 Control -+ * transfers (virtual and physical) -+ */ -+ /** 2 descriptors for SETUP packets */ -+ uint32_t dma_setup_desc_addr[2]; -+ dwc_otg_dma_desc_t* setup_desc_addr[2]; -+ -+ /** Pointer to Descriptor with latest SETUP packet */ -+ dwc_otg_dma_desc_t* psetup; -+ -+ /** Index of current SETUP handler descriptor */ -+ uint32_t setup_desc_index; -+ -+ /** Descriptor for Data In or Status In phases */ -+ uint32_t dma_in_desc_addr; -+ dwc_otg_dma_desc_t* in_desc_addr;; -+ -+ /** Descriptor for Data Out or Status Out phases */ -+ uint32_t dma_out_desc_addr; -+ dwc_otg_dma_desc_t* out_desc_addr; -+} dwc_otg_dev_if_t; -+ -+ -+ -+ -+///////////////////////////////////////////////// -+// Host Mode Register Structures -+// -+/** -+ * The Host Global Registers structure defines the size and relative -+ * field offsets for the Host Mode Global Registers. Host Global -+ * Registers offsets 400h-7FFh. -+*/ -+typedef struct dwc_otg_host_global_regs -+{ -+ /** Host Configuration Register. Offset: 400h */ -+ volatile uint32_t hcfg; -+ /** Host Frame Interval Register. Offset: 404h */ -+ volatile uint32_t hfir; -+ /** Host Frame Number / Frame Remaining Register. Offset: 408h */ -+ volatile uint32_t hfnum; -+ /** Reserved. Offset: 40Ch */ -+ uint32_t reserved40C; -+ /** Host Periodic Transmit FIFO/ Queue Status Register. Offset: 410h */ -+ volatile uint32_t hptxsts; -+ /** Host All Channels Interrupt Register. Offset: 414h */ -+ volatile uint32_t haint; -+ /** Host All Channels Interrupt Mask Register. Offset: 418h */ -+ volatile uint32_t haintmsk; -+} dwc_otg_host_global_regs_t; -+ -+/** -+ * This union represents the bit fields in the Host Configuration Register. -+ * Read the register into the d32 member then set/clear the bits using -+ * the bit elements. Write the d32 member to the hcfg register. -+ */ -+typedef union hcfg_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ /** FS/LS Phy Clock Select */ -+ unsigned fslspclksel : 2; -+#define DWC_HCFG_30_60_MHZ 0 -+#define DWC_HCFG_48_MHZ 1 -+#define DWC_HCFG_6_MHZ 2 -+ -+ /** FS/LS Only Support */ -+ unsigned fslssupp : 1; -+ } b; -+} hcfg_data_t; -+ -+/** -+ * This union represents the bit fields in the Host Frame Remaing/Number -+ * Register. -+ */ -+typedef union hfir_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ unsigned frint : 16; -+ unsigned reserved : 16; -+ } b; -+} hfir_data_t; -+ -+/** -+ * This union represents the bit fields in the Host Frame Remaing/Number -+ * Register. -+ */ -+typedef union hfnum_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ unsigned frnum : 16; -+#define DWC_HFNUM_MAX_FRNUM 0x3FFF -+ unsigned frrem : 16; -+ } b; -+} hfnum_data_t; -+ -+typedef union hptxsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ unsigned ptxfspcavail : 16; -+ unsigned ptxqspcavail : 8; -+ /** Top of the Periodic Transmit Request Queue -+ * - bit 24 - Terminate (last entry for the selected channel) -+ * - bits 26:25 - Token Type -+ * - 2'b00 - Zero length -+ * - 2'b01 - Ping -+ * - 2'b10 - Disable -+ * - bits 30:27 - Channel Number -+ * - bit 31 - Odd/even microframe -+ */ -+ unsigned ptxqtop_terminate : 1; -+ unsigned ptxqtop_token : 2; -+ unsigned ptxqtop_chnum : 4; -+ unsigned ptxqtop_odd : 1; -+ } b; -+} hptxsts_data_t; -+ -+/** -+ * This union represents the bit fields in the Host Port Control and Status -+ * Register. Read the register into the d32 member then set/clear the -+ * bits using the bit elements. Write the d32 member to the -+ * hprt0 register. -+ */ -+typedef union hprt0_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned prtconnsts : 1; -+ unsigned prtconndet : 1; -+ unsigned prtena : 1; -+ unsigned prtenchng : 1; -+ unsigned prtovrcurract : 1; -+ unsigned prtovrcurrchng : 1; -+ unsigned prtres : 1; -+ unsigned prtsusp : 1; -+ unsigned prtrst : 1; -+ unsigned reserved9 : 1; -+ unsigned prtlnsts : 2; -+ unsigned prtpwr : 1; -+ unsigned prttstctl : 4; -+ unsigned prtspd : 2; -+#define DWC_HPRT0_PRTSPD_HIGH_SPEED 0 -+#define DWC_HPRT0_PRTSPD_FULL_SPEED 1 -+#define DWC_HPRT0_PRTSPD_LOW_SPEED 2 -+ unsigned reserved19_31 : 13; -+ } b; -+} hprt0_data_t; -+ -+/** -+ * This union represents the bit fields in the Host All Interrupt -+ * Register. -+ */ -+typedef union haint_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned ch0 : 1; -+ unsigned ch1 : 1; -+ unsigned ch2 : 1; -+ unsigned ch3 : 1; -+ unsigned ch4 : 1; -+ unsigned ch5 : 1; -+ unsigned ch6 : 1; -+ unsigned ch7 : 1; -+ unsigned ch8 : 1; -+ unsigned ch9 : 1; -+ unsigned ch10 : 1; -+ unsigned ch11 : 1; -+ unsigned ch12 : 1; -+ unsigned ch13 : 1; -+ unsigned ch14 : 1; -+ unsigned ch15 : 1; -+ unsigned reserved : 16; -+ } b; -+ -+ struct -+ { -+ unsigned chint : 16; -+ unsigned reserved : 16; -+ } b2; -+} haint_data_t; -+ -+/** -+ * This union represents the bit fields in the Host All Interrupt -+ * Register. -+ */ -+typedef union haintmsk_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned ch0 : 1; -+ unsigned ch1 : 1; -+ unsigned ch2 : 1; -+ unsigned ch3 : 1; -+ unsigned ch4 : 1; -+ unsigned ch5 : 1; -+ unsigned ch6 : 1; -+ unsigned ch7 : 1; -+ unsigned ch8 : 1; -+ unsigned ch9 : 1; -+ unsigned ch10 : 1; -+ unsigned ch11 : 1; -+ unsigned ch12 : 1; -+ unsigned ch13 : 1; -+ unsigned ch14 : 1; -+ unsigned ch15 : 1; -+ unsigned reserved : 16; -+ } b; -+ -+ struct -+ { -+ unsigned chint : 16; -+ unsigned reserved : 16; -+ } b2; -+} haintmsk_data_t; -+ -+/** -+ * Host Channel Specific Registers. 500h-5FCh -+ */ -+typedef struct dwc_otg_hc_regs -+{ -+ /** Host Channel 0 Characteristic Register. Offset: 500h + (chan_num * 20h) + 00h */ -+ volatile uint32_t hcchar; -+ /** Host Channel 0 Split Control Register. Offset: 500h + (chan_num * 20h) + 04h */ -+ volatile uint32_t hcsplt; -+ /** Host Channel 0 Interrupt Register. Offset: 500h + (chan_num * 20h) + 08h */ -+ volatile uint32_t hcint; -+ /** Host Channel 0 Interrupt Mask Register. Offset: 500h + (chan_num * 20h) + 0Ch */ -+ volatile uint32_t hcintmsk; -+ /** Host Channel 0 Transfer Size Register. Offset: 500h + (chan_num * 20h) + 10h */ -+ volatile uint32_t hctsiz; -+ /** Host Channel 0 DMA Address Register. Offset: 500h + (chan_num * 20h) + 14h */ -+ volatile uint32_t hcdma; -+ /** Reserved. Offset: 500h + (chan_num * 20h) + 18h - 500h + (chan_num * 20h) + 1Ch */ -+ uint32_t reserved[2]; -+} dwc_otg_hc_regs_t; -+ -+/** -+ * This union represents the bit fields in the Host Channel Characteristics -+ * Register. Read the register into the d32 member then set/clear the -+ * bits using the bit elements. Write the d32 member to the -+ * hcchar register. -+ */ -+typedef union hcchar_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ /** Maximum packet size in bytes */ -+ unsigned mps : 11; -+ -+ /** Endpoint number */ -+ unsigned epnum : 4; -+ -+ /** 0: OUT, 1: IN */ -+ unsigned epdir : 1; -+ -+ unsigned reserved : 1; -+ -+ /** 0: Full/high speed device, 1: Low speed device */ -+ unsigned lspddev : 1; -+ -+ /** 0: Control, 1: Isoc, 2: Bulk, 3: Intr */ -+ unsigned eptype : 2; -+ -+ /** Packets per frame for periodic transfers. 0 is reserved. */ -+ unsigned multicnt : 2; -+ -+ /** Device address */ -+ unsigned devaddr : 7; -+ -+ /** -+ * Frame to transmit periodic transaction. -+ * 0: even, 1: odd -+ */ -+ unsigned oddfrm : 1; -+ -+ /** Channel disable */ -+ unsigned chdis : 1; -+ -+ /** Channel enable */ -+ unsigned chen : 1; -+ } b; -+} hcchar_data_t; -+ -+typedef union hcsplt_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ /** Port Address */ -+ unsigned prtaddr : 7; -+ -+ /** Hub Address */ -+ unsigned hubaddr : 7; -+ -+ /** Transaction Position */ -+ unsigned xactpos : 2; -+#define DWC_HCSPLIT_XACTPOS_MID 0 -+#define DWC_HCSPLIT_XACTPOS_END 1 -+#define DWC_HCSPLIT_XACTPOS_BEGIN 2 -+#define DWC_HCSPLIT_XACTPOS_ALL 3 -+ -+ /** Do Complete Split */ -+ unsigned compsplt : 1; -+ -+ /** Reserved */ -+ unsigned reserved : 14; -+ -+ /** Split Enble */ -+ unsigned spltena : 1; -+ } b; -+} hcsplt_data_t; -+ -+ -+/** -+ * This union represents the bit fields in the Host All Interrupt -+ * Register. -+ */ -+typedef union hcint_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Transfer Complete */ -+ unsigned xfercomp : 1; -+ /** Channel Halted */ -+ unsigned chhltd : 1; -+ /** AHB Error */ -+ unsigned ahberr : 1; -+ /** STALL Response Received */ -+ unsigned stall : 1; -+ /** NAK Response Received */ -+ unsigned nak : 1; -+ /** ACK Response Received */ -+ unsigned ack : 1; -+ /** NYET Response Received */ -+ unsigned nyet : 1; -+ /** Transaction Err */ -+ unsigned xacterr : 1; -+ /** Babble Error */ -+ unsigned bblerr : 1; -+ /** Frame Overrun */ -+ unsigned frmovrun : 1; -+ /** Data Toggle Error */ -+ unsigned datatglerr : 1; -+ /** Reserved */ -+ unsigned reserved : 21; -+ } b; -+} hcint_data_t; -+ -+/** -+ * This union represents the bit fields in the Host Channel Transfer Size -+ * Register. Read the register into the d32 member then set/clear the -+ * bits using the bit elements. Write the d32 member to the -+ * hcchar register. -+ */ -+typedef union hctsiz_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ /** Total transfer size in bytes */ -+ unsigned xfersize : 19; -+ -+ /** Data packets to transfer */ -+ unsigned pktcnt : 10; -+ -+ /** -+ * Packet ID for next data packet -+ * 0: DATA0 -+ * 1: DATA2 -+ * 2: DATA1 -+ * 3: MDATA (non-Control), SETUP (Control) -+ */ -+ unsigned pid : 2; -+#define DWC_HCTSIZ_DATA0 0 -+#define DWC_HCTSIZ_DATA1 2 -+#define DWC_HCTSIZ_DATA2 1 -+#define DWC_HCTSIZ_MDATA 3 -+#define DWC_HCTSIZ_SETUP 3 -+ -+ /** Do PING protocol when 1 */ -+ unsigned dopng : 1; -+ } b; -+} hctsiz_data_t; -+ -+/** -+ * This union represents the bit fields in the Host Channel Interrupt Mask -+ * Register. Read the register into the d32 member then set/clear the -+ * bits using the bit elements. Write the d32 member to the -+ * hcintmsk register. -+ */ -+typedef union hcintmsk_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ unsigned xfercompl : 1; -+ unsigned chhltd : 1; -+ unsigned ahberr : 1; -+ unsigned stall : 1; -+ unsigned nak : 1; -+ unsigned ack : 1; -+ unsigned nyet : 1; -+ unsigned xacterr : 1; -+ unsigned bblerr : 1; -+ unsigned frmovrun : 1; -+ unsigned datatglerr : 1; -+ unsigned reserved : 21; -+ } b; -+} hcintmsk_data_t; -+ -+/** OTG Host Interface Structure. -+ * -+ * The OTG Host Interface Structure structure contains information -+ * needed to manage the DWC_otg controller acting in host mode. It -+ * represents the programming view of the host-specific aspects of the -+ * controller. -+ */ -+typedef struct dwc_otg_host_if -+{ -+ /** Host Global Registers starting at offset 400h.*/ -+ dwc_otg_host_global_regs_t *host_global_regs; -+#define DWC_OTG_HOST_GLOBAL_REG_OFFSET 0x400 -+ -+ /** Host Port 0 Control and Status Register */ -+ volatile uint32_t *hprt0; -+#define DWC_OTG_HOST_PORT_REGS_OFFSET 0x440 -+ -+ /** Host Channel Specific Registers at offsets 500h-5FCh. */ -+ dwc_otg_hc_regs_t *hc_regs[MAX_EPS_CHANNELS]; -+#define DWC_OTG_HOST_CHAN_REGS_OFFSET 0x500 -+#define DWC_OTG_CHAN_REGS_OFFSET 0x20 -+ -+ -+ /* Host configuration information */ -+ /** Number of Host Channels (range: 1-16) */ -+ uint8_t num_host_channels; -+ /** Periodic EPs supported (0: no, 1: yes) */ -+ uint8_t perio_eps_supported; -+ /** Periodic Tx FIFO Size (Only 1 host periodic Tx FIFO) */ -+ uint16_t perio_tx_fifo_size; -+} dwc_otg_host_if_t; -+ -+ -+/** -+ * This union represents the bit fields in the Power and Clock Gating Control -+ * Register. Read the register into the d32 member then set/clear the -+ * bits using the bit elements. -+ */ -+typedef union pcgcctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ /** Stop Pclk */ -+ unsigned stoppclk : 1; -+ /** Gate Hclk */ -+ unsigned gatehclk : 1; -+ /** Power Clamp */ -+ unsigned pwrclmp : 1; -+ /** Reset Power Down Modules */ -+ unsigned rstpdwnmodule : 1; -+ /** PHY Suspended */ -+ unsigned physuspended : 1; -+ unsigned reserved : 27; -+ } b; -+} pcgcctl_data_t; -+ -+ -+#endif ---- a/drivers/usb/core/urb.c -+++ b/drivers/usb/core/urb.c -@@ -17,7 +17,11 @@ static void urb_destroy(struct kref *kre - - if (urb->transfer_flags & URB_FREE_BUFFER) - kfree(urb->transfer_buffer); -- -+ if (urb->aligned_transfer_buffer) { -+ kfree(urb->aligned_transfer_buffer); -+ urb->aligned_transfer_buffer = 0; -+ urb->aligned_transfer_dma = 0; -+ } - kfree(urb); - } - ---- a/include/linux/usb.h -+++ b/include/linux/usb.h -@@ -1202,6 +1202,9 @@ struct urb { - unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/ - void *transfer_buffer; /* (in) associated data buffer */ - dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */ -+ void *aligned_transfer_buffer; /* (in) associeated data buffer */ -+ dma_addr_t aligned_transfer_dma;/* (in) dma addr for transfer_buffer */ -+ u32 aligned_transfer_buffer_length; /* (in) data buffer length */ - struct scatterlist *sg; /* (in) scatter gather buffer list */ - int num_mapped_sgs; /* (internal) mapped sg entries */ - int num_sgs; /* (in) number of entries in the sg list */ ---- a/drivers/usb/gadget/Kconfig -+++ b/drivers/usb/gadget/Kconfig -@@ -108,7 +108,7 @@ config USB_GADGET_VBUS_DRAW - # - choice - prompt "USB Peripheral Controller" -- depends on USB_GADGET -+ depends on USB_GADGET && !USB_DWC_OTG - help - A USB device uses a controller to talk to its host. - Systems should have only one such upstream link. ---- a/drivers/usb/gadget/Makefile -+++ b/drivers/usb/gadget/Makefile -@@ -3,7 +3,7 @@ - # - ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG - --obj-$(CONFIG_USB_GADGET) += udc-core.o -+#obj-$(CONFIG_USB_GADGET) += udc-core.o - obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o - obj-$(CONFIG_USB_NET2272) += net2272.o - obj-$(CONFIG_USB_NET2280) += net2280.o diff --git a/target/linux/cns3xxx/patches/002-cns3xxx_wdt.patch b/target/linux/cns3xxx/patches/002-cns3xxx_wdt.patch new file mode 100644 index 0000000000..0f9004dc6d --- /dev/null +++ b/target/linux/cns3xxx/patches/002-cns3xxx_wdt.patch @@ -0,0 +1,69 @@ +1. Made the connection between CNS3xxx SOCs(ARCH_CNS3xxx) and MPcore watchdog + since the CNS3xxx SOCs have ARM11 MPcore CPU. +2. Enable mpcore_watchdog option as module to default configuration at + arch/arm/configs/cns3420vb_defconfig. + +Signed-off-by: Tommy Lin + +--- +arch/arm/Kconfig | 1 + + arch/arm/configs/cns3420vb_defconfig | 2 ++ + arch/arm/mach-cns3xxx/cns3420vb.c | 22 ++++++++++++++++++++++ + 3 files changed, 25 insertions(+), 0 deletions(-) + +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -333,6 +333,7 @@ config ARCH_CNS3XXX + select ARM_GIC + select MIGHT_HAVE_PCI + select PCI_DOMAINS if PCI ++ select HAVE_ARM_TWD + help + Support for Cavium Networks CNS3XXX platform. + +--- a/arch/arm/configs/cns3420vb_defconfig ++++ b/arch/arm/configs/cns3420vb_defconfig +@@ -53,6 +53,8 @@ CONFIG_LEGACY_PTY_COUNT=16 + # CONFIG_HW_RANDOM is not set + # CONFIG_HWMON is not set + # CONFIG_VGA_CONSOLE is not set ++CONFIG_WATCHDOG=y ++CONFIG_MPCORE_WATCHDOG=m + # CONFIG_HID_SUPPORT is not set + # CONFIG_USB_SUPPORT is not set + CONFIG_MMC=y +--- a/arch/arm/mach-cns3xxx/cns3420vb.c ++++ b/arch/arm/mach-cns3xxx/cns3420vb.c +@@ -158,10 +158,32 @@ static struct platform_device cns3xxx_us + }, + }; + ++/* Watchdog */ ++static struct resource cns3xxx_watchdog_resources[] = { ++ [0] = { ++ .start = CNS3XXX_TC11MP_TWD_BASE, ++ .end = CNS3XXX_TC11MP_TWD_BASE + PAGE_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .start = IRQ_LOCALWDOG, ++ .end = IRQ_LOCALWDOG, ++ .flags = IORESOURCE_IRQ, ++ } ++}; ++ ++static struct platform_device cns3xxx_watchdog_device = { ++ .name = "mpcore_wdt", ++ .id = -1, ++ .num_resources = ARRAY_SIZE(cns3xxx_watchdog_resources), ++ .resource = cns3xxx_watchdog_resources, ++}; ++ + /* + * Initialization + */ + static struct platform_device *cns3420_pdevs[] __initdata = { ++ &cns3xxx_watchdog_device, + &cns3420_nor_pdev, + &cns3xxx_usb_ehci_device, + &cns3xxx_usb_ohci_device, diff --git a/target/linux/cns3xxx/patches/049-cns3xxx_smp_support.patch b/target/linux/cns3xxx/patches/049-cns3xxx_smp_support.patch new file mode 100644 index 0000000000..e34134055e --- /dev/null +++ b/target/linux/cns3xxx/patches/049-cns3xxx_smp_support.patch @@ -0,0 +1,415 @@ +--- a/arch/arm/mach-cns3xxx/Makefile ++++ b/arch/arm/mach-cns3xxx/Makefile +@@ -1,3 +1,6 @@ + obj-$(CONFIG_ARCH_CNS3XXX) += core.o pm.o devices.o + obj-$(CONFIG_PCI) += pcie.o + obj-$(CONFIG_MACH_CNS3420VB) += cns3420vb.o ++obj-$(CONFIG_SMP) += platsmp.o headsmp.o ++obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o ++obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/headsmp.S +@@ -0,0 +1,42 @@ ++/* ++ * linux/arch/arm/mach-cns3xxx/headsmp.S ++ * ++ * Cloned from linux/arch/arm/plat-versatile/headsmp.S ++ * ++ * Copyright (c) 2003 ARM Limited ++ * All Rights Reserved ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++#include ++#include ++ ++ __INIT ++ ++/* ++ * CNS3XXX specific entry point for secondary CPUs. This provides ++ * a "holding pen" into which all secondary cores are held until we're ++ * ready for them to initialise. ++ */ ++ENTRY(cns3xxx_secondary_startup) ++ mrc p15, 0, r0, c0, c0, 5 ++ and r0, r0, #15 ++ adr r4, 1f ++ ldmia r4, {r5, r6} ++ sub r4, r4, r5 ++ add r6, r6, r4 ++pen: ldr r7, [r6] ++ cmp r7, r0 ++ bne pen ++ ++ /* ++ * we've been released from the holding pen: secondary_stack ++ * should now contain the SVC stack for this core ++ */ ++ b secondary_startup ++ ++ .align ++1: .long . ++ .long pen_release +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/hotplug.c +@@ -0,0 +1,130 @@ ++/* linux arch/arm/mach-cns3xxx/hotplug.c ++ * ++ * Cloned from linux/arch/arm/mach-realview/hotplug.c ++ * ++ * Copyright (C) 2002 ARM Ltd. ++ * All Rights Reserved ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++*/ ++ ++#include ++#include ++#include ++ ++#include ++ ++extern volatile int pen_release; ++ ++static inline void cpu_enter_lowpower(void) ++{ ++ unsigned int v; ++ ++ flush_cache_all(); ++ asm volatile( ++ " mcr p15, 0, %1, c7, c5, 0\n" ++ " mcr p15, 0, %1, c7, c10, 4\n" ++ /* ++ * Turn off coherency ++ */ ++ " mrc p15, 0, %0, c1, c0, 1\n" ++ " bic %0, %0, %3\n" ++ " mcr p15, 0, %0, c1, c0, 1\n" ++ " mrc p15, 0, %0, c1, c0, 0\n" ++ " bic %0, %0, %2\n" ++ " mcr p15, 0, %0, c1, c0, 0\n" ++ : "=&r" (v) ++ : "r" (0), "Ir" (CR_C), "Ir" (0x40) ++ : "cc"); ++} ++ ++static inline void cpu_leave_lowpower(void) ++{ ++ unsigned int v; ++ ++ asm volatile( ++ "mrc p15, 0, %0, c1, c0, 0\n" ++ " orr %0, %0, %1\n" ++ " mcr p15, 0, %0, c1, c0, 0\n" ++ " mrc p15, 0, %0, c1, c0, 1\n" ++ " orr %0, %0, %2\n" ++ " mcr p15, 0, %0, c1, c0, 1\n" ++ : "=&r" (v) ++ : "Ir" (CR_C), "Ir" (0x40) ++ : "cc"); ++} ++ ++static inline void platform_do_lowpower(unsigned int cpu, int *spurious) ++{ ++ /* ++ * there is no power-control hardware on this platform, so all ++ * we can do is put the core into WFI; this is safe as the calling ++ * code will have already disabled interrupts ++ */ ++ for (;;) { ++ /* ++ * here's the WFI ++ */ ++ asm(".word 0xe320f003\n" ++ : ++ : ++ : "memory", "cc"); ++ ++ if (pen_release == cpu) { ++ /* ++ * OK, proper wakeup, we're done ++ */ ++ break; ++ } ++ ++ /* ++ * Getting here, means that we have come out of WFI without ++ * having been woken up - this shouldn't happen ++ * ++ * Just note it happening - when we're woken, we can report ++ * its occurrence. ++ */ ++ (*spurious)++; ++ } ++} ++ ++int platform_cpu_kill(unsigned int cpu) ++{ ++ return 1; ++} ++ ++/* ++ * platform-specific code to shutdown a CPU ++ * ++ * Called with IRQs disabled ++ */ ++void platform_cpu_die(unsigned int cpu) ++{ ++ int spurious = 0; ++ ++ /* ++ * we're ready for shutdown now, so do it ++ */ ++ cpu_enter_lowpower(); ++ platform_do_lowpower(cpu, &spurious); ++ ++ /* ++ * bring this CPU back into the world of cache ++ * coherency, and then restore interrupts ++ */ ++ cpu_leave_lowpower(); ++ ++ if (spurious) ++ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); ++} ++ ++int platform_cpu_disable(unsigned int cpu) ++{ ++ /* ++ * we don't allow CPU 0 to be shutdown (it is still too special ++ * e.g. clock tick interrupts) ++ */ ++ return cpu == 0 ? -EPERM : 0; ++} +--- a/arch/arm/mach-cns3xxx/Kconfig ++++ b/arch/arm/mach-cns3xxx/Kconfig +@@ -3,6 +3,7 @@ menu "CNS3XXX platform type" + + config MACH_CNS3420VB + bool "Support for CNS3420 Validation Board" ++ select HAVE_ARM_SCU if SMP + select MIGHT_HAVE_PCI + help + Include support for the Cavium Networks CNS3420 MPCore Platform +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/localtimer.c +@@ -0,0 +1,26 @@ ++/* linux/arch/arm/mach-cns3xxx/localtimer.c ++ * ++ * Cloned from linux/arch/arm/mach-realview/localtimer.c ++ * ++ * Copyright (C) 2002 ARM Ltd. ++ * All Rights Reserved ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++*/ ++ ++#include ++ ++#include ++#include ++ ++/* ++ * Setup the local clock events for a CPU. ++ */ ++int __cpuinit local_timer_setup(struct clock_event_device *evt) ++{ ++ evt->irq = IRQ_LOCALTIMER; ++ twd_timer_setup(evt); ++ return 0; ++} +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/platsmp.c +@@ -0,0 +1,175 @@ ++/* linux/arch/arm/mach-cns3xxx/platsmp.c ++ * ++ * Copyright 2011 Gateworks Corporation ++ * Chris Lang ++ * ++ * Cloned from linux/arch/arm/mach-vexpress/platsmp.c ++ * ++ * Copyright (C) 2002 ARM Ltd. ++ * All Rights Reserved ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++*/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++extern void cns3xxx_secondary_startup(void); ++ ++/* ++ * control for which core is the next to come out of the secondary ++ * boot "holding pen" ++ */ ++ ++volatile int __cpuinitdata pen_release = -1; ++ ++/* ++ * Write pen_release in a way that is guaranteed to be visible to all ++ * observers, irrespective of whether they're taking part in coherency ++ * or not. This is necessary for the hotplug code to work reliably. ++ */ ++static void write_pen_release(int val) ++{ ++ pen_release = val; ++ smp_wmb(); ++ __cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release)); ++ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1)); ++} ++ ++static void __iomem *scu_base_addr(void) ++{ ++ return (void __iomem *)(CNS3XXX_TC11MP_SCU_BASE_VIRT); ++} ++ ++static DEFINE_SPINLOCK(boot_lock); ++ ++void __cpuinit platform_secondary_init(unsigned int cpu) ++{ ++ /* ++ * if any interrupts are already enabled for the primary ++ * core (e.g. timer irq), then they will not have been enabled ++ * for us: do so ++ */ ++ gic_secondary_init(0); ++ ++ /* ++ * let the primary processor know we're out of the ++ * pen, then head off into the C entry point ++ */ ++ write_pen_release(-1); ++ ++ /* ++ * Synchronise with the boot thread. ++ */ ++ spin_lock(&boot_lock); ++ spin_unlock(&boot_lock); ++} ++ ++int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) ++{ ++ unsigned long timeout; ++ ++ /* ++ * Set synchronisation state between this boot processor ++ * and the secondary one ++ */ ++ spin_lock(&boot_lock); ++ ++ /* ++ * The secondary processor is waiting to be released from ++ * the holding pen - release it, then wait for it to flag ++ * that it has been released by resetting pen_release. ++ * ++ * Note that "pen_release" is the hardware CPU ID, whereas ++ * "cpu" is Linux's internal ID. ++ */ ++ write_pen_release(cpu); ++ ++ /* ++ * Send the secondary CPU a soft interrupt, thereby causing ++ * the boot monitor to read the system wide flags register, ++ * and branch to the address found there. ++ */ ++ gic_raise_softirq(cpumask_of(cpu), 1); ++ ++ timeout = jiffies + (1 * HZ); ++ while (time_before(jiffies, timeout)) { ++ smp_rmb(); ++ if (pen_release == -1) ++ break; ++ ++ udelay(10); ++ } ++ ++ /* ++ * now the secondary core is starting up let it run its ++ * calibrations, then wait for it to finish ++ */ ++ spin_unlock(&boot_lock); ++ ++ return pen_release != -1 ? -ENOSYS : 0; ++} ++ ++/* ++ * Initialise the CPU possible map early - this describes the CPUs ++ * which may be present or become present in the system. ++ */ ++ ++void __init smp_init_cpus(void) ++{ ++ void __iomem *scu_base = scu_base_addr(); ++ unsigned int i, ncores; ++ ++ ncores = scu_base ? scu_get_core_count(scu_base) : 1; ++ ++ /* sanity check */ ++ if (ncores > NR_CPUS) { ++ printk(KERN_WARNING ++ "cns3xxx: no. of cores (%d) greater than configured " ++ "maximum of %d - clipping\n", ++ ncores, NR_CPUS); ++ ncores = NR_CPUS; ++ } ++ ++ for (i = 0; i < ncores; i++) ++ set_cpu_possible(i, true); ++ ++ set_smp_cross_call(gic_raise_softirq); ++} ++ ++void __init platform_smp_prepare_cpus(unsigned int max_cpus) ++{ ++ int i; ++ ++ /* ++ * Initialise the present map, which describes the set of CPUs ++ * actually populated at the present time. ++ */ ++ for (i = 0; i < max_cpus; i++) ++ set_cpu_present(i, true); ++ ++ scu_enable(scu_base_addr()); ++ ++ /* ++ * Write the address of secondary startup into the ++ * system-wide flags register. The boot monitor waits ++ * until it receives a soft interrupt, and then the ++ * secondary CPU branches to this address. ++ */ ++ __raw_writel(virt_to_phys(cns3xxx_secondary_startup), ++ (void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0600)); ++} +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -1388,7 +1388,7 @@ config SMP + depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \ + MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \ + ARCH_EXYNOS4 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \ +- ARCH_MSM_SCORPIONMP || ARCH_SHMOBILE ++ ARCH_MSM_SCORPIONMP || ARCH_SHMOBILE || ARCH_CNS3XXX + select USE_GENERIC_SMP_HELPERS + select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP + help diff --git a/target/linux/cns3xxx/patches/050-cns3xxx_i2c_controller.patch b/target/linux/cns3xxx/patches/050-cns3xxx_i2c_controller.patch new file mode 100644 index 0000000000..febbc1ecba --- /dev/null +++ b/target/linux/cns3xxx/patches/050-cns3xxx_i2c_controller.patch @@ -0,0 +1,421 @@ +--- a/drivers/i2c/busses/Kconfig ++++ b/drivers/i2c/busses/Kconfig +@@ -327,6 +327,18 @@ config I2C_BLACKFIN_TWI_CLK_KHZ + help + The unit of the TWI clock is kHz. + ++config I2C_CNS3XXX ++ tristate "Cavium CNS3xxx I2C driver" ++ depends on ARCH_CNS3XXX ++ help ++ Support for Cavium CNS3xxx I2C controller driver. ++ ++ This driver can also be built as a module. If so, the module ++ will be called i2c-cns3xxx. ++ ++ Please note that this driver might be needed to bring up other ++ devices such as Cavium CNS3xxx Ethernet. ++ + config I2C_CPM + tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)" + depends on (CPM1 || CPM2) && OF_I2C +--- a/drivers/i2c/busses/Makefile ++++ b/drivers/i2c/busses/Makefile +@@ -80,6 +80,7 @@ obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor + obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o + obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o + obj-$(CONFIG_I2C_STUB) += i2c-stub.o ++obj-$(CONFIG_I2C_CNS3XXX) += i2c-cns3xxx.o + obj-$(CONFIG_SCx200_ACB) += scx200_acb.o + obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o + +--- /dev/null ++++ b/drivers/i2c/busses/i2c-cns3xxx.c +@@ -0,0 +1,387 @@ ++/* ++ * Cavium CNS3xxx I2C Host Controller ++ * ++ * Copyright 2010 Cavium Network ++ * Copyright 2011 Gateworks Corporation ++ * Chris Lang ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * We need the memory map ++ */ ++ ++ ++#define MISC_MEM_MAP_VALUE(reg_offset) (*((uint32_t volatile *)(CNS3XXX_MISC_BASE_VIRT + reg_offset))) ++#define MISC_IOCDB_CTRL MISC_MEM_MAP_VALUE(0x020) ++ ++#define I2C_MEM_MAP_ADDR(x) (CNS3XXX_SSP_BASE_VIRT + x) ++#define I2C_MEM_MAP_VALUE(x) (*((unsigned int volatile*)I2C_MEM_MAP_ADDR(x))) ++ ++#define I2C_CONTROLLER_REG I2C_MEM_MAP_VALUE(0x20) ++#define I2C_TIME_OUT_REG I2C_MEM_MAP_VALUE(0x24) ++#define I2C_SLAVE_ADDRESS_REG I2C_MEM_MAP_VALUE(0x28) ++#define I2C_WRITE_DATA_REG I2C_MEM_MAP_VALUE(0x2C) ++#define I2C_READ_DATA_REG I2C_MEM_MAP_VALUE(0x30) ++#define I2C_INTERRUPT_STATUS_REG I2C_MEM_MAP_VALUE(0x34) ++#define I2C_INTERRUPT_ENABLE_REG I2C_MEM_MAP_VALUE(0x38) ++#define I2C_TWI_OUT_DLY_REG I2C_MEM_MAP_VALUE(0x3C) ++ ++#define I2C_BUS_ERROR_FLAG (0x1) ++#define I2C_ACTION_DONE_FLAG (0x2) ++ ++#define CNS3xxx_I2C_ENABLE() (I2C_CONTROLLER_REG) |= ((unsigned int)0x1 << 31) ++#define CNS3xxx_I2C_DISABLE() (I2C_CONTROLLER_REG) &= ~((unsigned int)0x1 << 31) ++#define CNS3xxx_I2C_ENABLE_INTR() (I2C_INTERRUPT_ENABLE_REG) |= 0x03 ++#define CNS3xxx_I2C_DISABLE_INTR() (I2C_INTERRUPT_ENABLE_REG) &= 0xfc ++ ++#define TWI_TIMEOUT (10*HZ) ++#define I2C_100KHZ 100000 ++#define I2C_200KHZ 200000 ++#define I2C_300KHZ 300000 ++#define I2C_400KHZ 400000 ++ ++#define CNS3xxx_I2C_CLK I2C_100KHZ ++ ++#define STATE_DONE 1 ++#define STATE_ERROR 2 ++ ++struct cns3xxx_i2c { ++ void __iomem *base; ++ wait_queue_head_t wait; ++ struct i2c_adapter adap; ++ struct i2c_msg *msg; ++ int state; /* see STATE_ */ ++ int rd_wr_len; ++ u8 *buf; ++}; ++ ++static u32 cns3xxx_i2c_func(struct i2c_adapter *adap) ++{ ++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; ++} ++ ++static int ++cns3xxx_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg) ++{ ++ struct cns3xxx_i2c *i2c = i2c_get_adapdata(adap); ++ int i, j; ++ u8 buf[1] = { 0 }; ++ ++ if (msg->len == 0) { ++ /* ++ * We are probably doing a probe for a device here, ++ * so set the length to one, and data to 0 ++ */ ++ msg->len = 1; ++ i2c->buf = buf; ++ } else { ++ i2c->buf = msg->buf; ++ } ++ ++ if (msg->flags & I2C_M_TEN) { ++ printk ++ ("%s:%d: Presently the driver does not handle extended addressing\n", ++ __FUNCTION__, __LINE__); ++ return -EINVAL; ++ } ++ i2c->msg = msg; ++ ++ for (i = 0; i < msg->len; i++) { ++ if (msg->len - i >= 4) ++ i2c->rd_wr_len = 3; ++ else ++ i2c->rd_wr_len = msg->len - i - 1; ++ ++ // Set Data Width and TWI_EN ++ I2C_CONTROLLER_REG = 0x80000000 | (i2c->rd_wr_len << 2) | (i2c->rd_wr_len); ++ ++ // Clear Write Reg ++ I2C_WRITE_DATA_REG = 0; ++ ++ // Set the slave address ++ I2C_SLAVE_ADDRESS_REG = (msg->addr << 1); ++ ++ // Are we Writing ++ if (!(msg->flags & I2C_M_RD)) { ++ I2C_CONTROLLER_REG |= (1 << 4); ++ if (i != 0) { ++ /* ++ * We need to set the address in the first byte. ++ * The base address is going to be in buf[0] and then ++ * it needs to be incremented by i - 1. ++ */ ++ i2c->buf--; ++ *i2c->buf = buf[0] + i - 1; ++ ++ if (i2c->rd_wr_len < 3) { ++ i += i2c->rd_wr_len; ++ i2c->rd_wr_len++; ++ I2C_CONTROLLER_REG = 0x80000000 | (1 << 4) | (i2c->rd_wr_len << 2) | (i2c->rd_wr_len); ++ } else { ++ i += i2c->rd_wr_len - 1; ++ } ++ } else { ++ i += i2c->rd_wr_len; ++ buf[0] = *i2c->buf; ++ } ++ for (j = 0; j <= i2c->rd_wr_len; j++) { ++ I2C_WRITE_DATA_REG |= ((*i2c->buf++) << (8 * j)); ++ } ++ } else { ++ i += i2c->rd_wr_len; ++ } ++ ++ // Start the Transfer ++ i2c->state = 0; // Clear out the State ++ I2C_CONTROLLER_REG |= (1 << 6); ++ ++ if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) || ++ (i2c->state == STATE_DONE), TWI_TIMEOUT)) { ++ if (i2c->state == STATE_ERROR) { ++ return -EIO; ++ } ++ } else { ++ return -ETIMEDOUT; ++ } ++ } ++ return 0; ++} ++ ++static int ++cns3xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) ++{ ++ int i; ++ int ret; ++ for (i = 0; i < num; i++) ++ { ++ ret = cns3xxx_i2c_xfer_msg(adap, &msgs[i]); ++ if (ret < 0) { ++ return ret; ++ } ++ } ++ return num; ++} ++ ++ ++static struct i2c_algorithm cns3xxx_i2c_algo = { ++ .master_xfer = cns3xxx_i2c_xfer, ++ .functionality = cns3xxx_i2c_func, ++}; ++ ++static struct i2c_adapter cns3xxx_i2c_adapter = { ++ .owner = THIS_MODULE, ++ .algo = &cns3xxx_i2c_algo, ++ .algo_data = NULL, ++ .nr = 0, ++ .name = "CNS3xxx I2C 0", ++ .retries = 5, ++}; ++ ++static void cns3xxx_i2c_adapter_init(struct cns3xxx_i2c *i2c) ++{ ++ cns3xxx_pwr_clk_en(1 << PM_CLK_GATE_REG_OFFSET_SPI_PCM_I2C); ++ cns3xxx_pwr_power_up(1 << PM_CLK_GATE_REG_OFFSET_SPI_PCM_I2C); ++ cns3xxx_pwr_soft_rst(1 << PM_CLK_GATE_REG_OFFSET_SPI_PCM_I2C); ++ ++ /* Disable the I2C */ ++ I2C_CONTROLLER_REG = 0; /* Disabled the I2C */ ++ ++ //enable SCL and SDA which share pin with GPIOB_PIN_EN(0x18) ++ //GPIOB[12]: SCL ++ //GPIOB[13]: SDA ++ (*(u32*)(CNS3XXX_MISC_BASE_VIRT+0x18)) |= ((1<<12)|(1<<13)); ++ ++ MISC_IOCDB_CTRL &= ~0x300; ++ MISC_IOCDB_CTRL |= 0x300; //21mA... ++ ++ /* Check the Reg Dump when testing */ ++ I2C_TIME_OUT_REG = ++ ((((((cns3xxx_cpu_clock()*(1000000/8)) / (2 * CNS3xxx_I2C_CLK)) - ++ 1) & 0x3FF) << 8) | (1 << 7) | 0x7F); ++ I2C_TWI_OUT_DLY_REG |= 0x3; ++ ++ /* Enable The Interrupt */ ++ CNS3xxx_I2C_ENABLE_INTR(); ++ ++ /* Clear Interrupt Status (0x2 | 0x1) */ ++ I2C_INTERRUPT_STATUS_REG |= (I2C_ACTION_DONE_FLAG | I2C_BUS_ERROR_FLAG); ++ ++ /* Enable the I2C Controller */ ++ CNS3xxx_I2C_ENABLE(); ++} ++ ++static irqreturn_t cns3xxx_i2c_isr(int irq, void *dev_id) ++{ ++ struct cns3xxx_i2c *i2c = dev_id; ++ int i; ++ uint32_t stat = I2C_INTERRUPT_STATUS_REG; ++ ++ /* Clear Interrupt */ ++ I2C_INTERRUPT_STATUS_REG |= 0x1; ++ ++ if (stat & I2C_BUS_ERROR_FLAG) { ++ i2c->state = STATE_ERROR; ++ } else { ++ if (i2c->msg->flags & I2C_M_RD) { ++ for (i = 0; i <= i2c->rd_wr_len; i++) ++ { ++ *i2c->buf++ = ((I2C_READ_DATA_REG >> (8 * i)) & 0xff); ++ } ++ } ++ i2c->state = STATE_DONE; ++ } ++ wake_up(&i2c->wait); ++ return IRQ_HANDLED; ++} ++ ++static int __devinit cns3xxx_i2c_probe(struct platform_device *pdev) ++{ ++ struct cns3xxx_i2c *i2c; ++ struct resource *res, *res2; ++ int ret; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ printk("%s: IORESOURCE_MEM not defined \n", __FUNCTION__); ++ return -ENODEV; ++ } ++ ++ res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ if (!res2) { ++ printk("%s: IORESOURCE_IRQ not defined \n", __FUNCTION__); ++ return -ENODEV; ++ } ++ ++ i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); ++ if (!i2c) ++ return -ENOMEM; ++ ++ if (!request_mem_region(res->start, res->end - res->start + 1, ++ pdev->name)) { ++ dev_err(&pdev->dev, "Memory region busy\n"); ++ ret = -EBUSY; ++ goto request_mem_failed; ++ } ++ ++ i2c->base = ioremap(res->start, res->end - res->start + 1); ++ if (!i2c->base) { ++ dev_err(&pdev->dev, "Unable to map registers\n"); ++ ret = -EIO; ++ goto map_failed; ++ } ++ ++ cns3xxx_i2c_adapter_init(i2c); ++ ++ init_waitqueue_head(&i2c->wait); ++ ret = request_irq(res2->start, cns3xxx_i2c_isr, 0, pdev->name, i2c); ++ if (ret) { ++ dev_err(&pdev->dev, "Cannot claim IRQ\n"); ++ goto request_irq_failed; ++ } ++ ++ platform_set_drvdata(pdev, i2c); ++ i2c->adap = cns3xxx_i2c_adapter; ++ i2c_set_adapdata(&i2c->adap, i2c); ++ i2c->adap.dev.parent = &pdev->dev; ++ ++ /* add i2c adapter to i2c tree */ ++ ret = i2c_add_numbered_adapter(&i2c->adap); ++ if (ret) { ++ dev_err(&pdev->dev, "Failed to add adapter\n"); ++ goto add_adapter_failed; ++ } ++ ++ return 0; ++ ++ add_adapter_failed: ++ free_irq(res2->start, i2c); ++ request_irq_failed: ++ iounmap(i2c->base); ++ map_failed: ++ release_mem_region(res->start, res->end - res->start + 1); ++ request_mem_failed: ++ kfree(i2c); ++ ++ return ret; ++} ++ ++static int __devexit cns3xxx_i2c_remove(struct platform_device *pdev) ++{ ++ struct cns3xxx_i2c *i2c = platform_get_drvdata(pdev); ++ struct resource *res; ++ ++ /* disable i2c logic */ ++ CNS3xxx_I2C_DISABLE_INTR(); ++ CNS3xxx_I2C_DISABLE(); ++ /* remove adapter & data */ ++ i2c_del_adapter(&i2c->adap); ++ platform_set_drvdata(pdev, NULL); ++ ++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ if (res) ++ free_irq(res->start, i2c); ++ ++ iounmap(i2c->base); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (res) ++ release_mem_region(res->start, res->end - res->start + 1); ++ ++ kfree(i2c); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++#warning "CONFIG_PM defined: suspend and resume not implemented" ++#define cns3xxx_i2c_suspend NULL ++#define cns3xxx_i2c_resume NULL ++#else ++#define cns3xxx_i2c_suspend NULL ++#define cns3xxx_i2c_resume NULL ++#endif ++ ++static struct platform_driver cns3xxx_i2c_driver = { ++ .probe = cns3xxx_i2c_probe, ++ .remove = cns3xxx_i2c_remove, ++ .suspend = cns3xxx_i2c_suspend, ++ .resume = cns3xxx_i2c_resume, ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "cns3xxx-i2c", ++ }, ++}; ++ ++static int __init cns3xxx_i2c_init(void) ++{ ++ return platform_driver_register(&cns3xxx_i2c_driver); ++} ++ ++static void __exit cns3xxx_i2c_exit(void) ++{ ++ platform_driver_unregister(&cns3xxx_i2c_driver); ++} ++ ++module_init(cns3xxx_i2c_init); ++module_exit(cns3xxx_i2c_exit); ++ ++MODULE_AUTHOR("Cavium Networks"); ++MODULE_DESCRIPTION("Cavium CNS3XXX I2C Controller"); ++MODULE_LICENSE("GPL"); diff --git a/target/linux/cns3xxx/patches/051-cns3xxx_gigabit.patch b/target/linux/cns3xxx/patches/051-cns3xxx_gigabit.patch new file mode 100644 index 0000000000..cf67e92dd6 --- /dev/null +++ b/target/linux/cns3xxx/patches/051-cns3xxx_gigabit.patch @@ -0,0 +1,1328 @@ +--- a/drivers/net/Kconfig ++++ b/drivers/net/Kconfig +@@ -2071,6 +2071,14 @@ config ACENIC_OMIT_TIGON_I + + The safe and default value for this is N. + ++config CNS3XXX_ETH ++ tristate "Cavium CNS3xxx Ethernet support" ++ depends on ARCH_CNS3XXX ++ select PHYLIB ++ help ++ Say Y here if you want to use built-in Ethernet ports ++ on CNS3XXX processor. ++ + config DL2K + tristate "DL2000/TC902x-based Gigabit Ethernet support" + depends on PCI +--- a/drivers/net/Makefile ++++ b/drivers/net/Makefile +@@ -240,6 +240,7 @@ obj-$(CONFIG_MAC89x0) += mac89x0.o + obj-$(CONFIG_TUN) += tun.o + obj-$(CONFIG_VETH) += veth.o + obj-$(CONFIG_NET_NETX) += netx-eth.o ++obj-$(CONFIG_CNS3XXX_ETH) += cns3xxx_eth.o + obj-$(CONFIG_DL2K) += dl2k.o + obj-$(CONFIG_R8169) += r8169.o + obj-$(CONFIG_AMD8111_ETH) += amd8111e.o +--- /dev/null ++++ b/drivers/net/cns3xxx_eth.c +@@ -0,0 +1,1269 @@ ++/* ++ * Cavium CNS3xxx Gigabit driver for Linux ++ * ++ * Copyright 2011 Gateworks Corporation ++ * Chris Lang ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of version 2 of the GNU General Public License ++ * as published by the Free Software Foundation. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRV_NAME "cns3xxx_eth" ++ ++#define RX_DESCS 512 ++#define TX_DESCS 512 ++#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) ++ ++#define RX_POOL_ALLOC_SIZE (sizeof(struct rx_desc) * RX_DESCS) ++#define TX_POOL_ALLOC_SIZE (sizeof(struct tx_desc) * TX_DESCS) ++#define REGS_SIZE 336 ++#define MAX_MRU 9500 ++ ++#define NAPI_WEIGHT 64 ++ ++/* MDIO Defines */ ++#define MDIO_CMD_COMPLETE 0x00008000 ++#define MDIO_WRITE_COMMAND 0x00002000 ++#define MDIO_READ_COMMAND 0x00004000 ++#define MDIO_REG_OFFSET 8 ++#define MDIO_VALUE_OFFSET 16 ++ ++/* Descritor Defines */ ++#define END_OF_RING 0x40000000 ++#define FIRST_SEGMENT 0x20000000 ++#define LAST_SEGMENT 0x10000000 ++#define FORCE_ROUTE 0x04000000 ++#define IP_CHECKSUM 0x00040000 ++#define UDP_CHECKSUM 0x00020000 ++#define TCP_CHECKSUM 0x00010000 ++ ++/* Port Config Defines */ ++#define PORT_DISABLE 0x00040000 ++#define PROMISC_OFFSET 29 ++ ++/* Global Config Defines */ ++#define UNKNOWN_VLAN_TO_CPU 0x02000000 ++#define ACCEPT_CRC_PACKET 0x00200000 ++#define CRC_STRIPPING 0x00100000 ++ ++/* VLAN Config Defines */ ++#define NIC_MODE 0x00008000 ++#define VLAN_UNAWARE 0x00000001 ++ ++/* DMA AUTO Poll Defines */ ++#define TS_POLL_EN 0x00000020 ++#define TS_SUSPEND 0x00000010 ++#define FS_POLL_EN 0x00000002 ++#define FS_SUSPEND 0x00000001 ++ ++/* DMA Ring Control Defines */ ++#define QUEUE_THRESHOLD 0x000000f0 ++#define CLR_FS_STATE 0x80000000 ++ ++struct tx_desc ++{ ++ u32 sdp; /* segment data pointer */ ++ ++ union { ++ struct { ++ u32 sdl:16; /* segment data length */ ++ u32 tco:1; ++ u32 uco:1; ++ u32 ico:1; ++ u32 rsv_1:3; /* reserve */ ++ u32 pri:3; ++ u32 fp:1; /* force priority */ ++ u32 fr:1; ++ u32 interrupt:1; ++ u32 lsd:1; ++ u32 fsd:1; ++ u32 eor:1; ++ u32 cown:1; ++ }; ++ u32 config0; ++ }; ++ ++ union { ++ struct { ++ u32 ctv:1; ++ u32 stv:1; ++ u32 sid:4; ++ u32 inss:1; ++ u32 dels:1; ++ u32 rsv_2:9; ++ u32 pmap:5; ++ u32 mark:3; ++ u32 ewan:1; ++ u32 fewan:1; ++ u32 rsv_3:5; ++ }; ++ u32 config1; ++ }; ++ ++ union { ++ struct { ++ u32 c_vid:12; ++ u32 c_cfs:1; ++ u32 c_pri:3; ++ u32 s_vid:12; ++ u32 s_dei:1; ++ u32 s_pri:3; ++ }; ++ u32 config2; ++ }; ++ ++ u8 alignment[16]; /* for 32 byte */ ++}; ++ ++struct rx_desc ++{ ++ u32 sdp; /* segment data pointer */ ++ ++ union { ++ struct { ++ u32 sdl:16; /* segment data length */ ++ u32 l4f:1; ++ u32 ipf:1; ++ u32 prot:4; ++ u32 hr:6; ++ u32 lsd:1; ++ u32 fsd:1; ++ u32 eor:1; ++ u32 cown:1; ++ }; ++ u32 config0; ++ }; ++ ++ union { ++ struct { ++ u32 ctv:1; ++ u32 stv:1; ++ u32 unv:1; ++ u32 iwan:1; ++ u32 exdv:1; ++ u32 e_wan:1; ++ u32 rsv_1:2; ++ u32 sp:3; ++ u32 crc_err:1; ++ u32 un_eth:1; ++ u32 tc:2; ++ u32 rsv_2:1; ++ u32 ip_offset:5; ++ u32 rsv_3:11; ++ }; ++ u32 config1; ++ }; ++ ++ union { ++ struct { ++ u32 c_vid:12; ++ u32 c_cfs:1; ++ u32 c_pri:3; ++ u32 s_vid:12; ++ u32 s_dei:1; ++ u32 s_pri:3; ++ }; ++ u32 config2; ++ }; ++ ++ u8 alignment[16]; /* for 32 byte alignment */ ++}; ++ ++struct switch_regs { ++ u32 phy_control; ++ u32 phy_auto_addr; ++ u32 mac_glob_cfg; ++ u32 mac_cfg[4]; ++ u32 mac_pri_ctrl[5], __res; ++ u32 etype[2]; ++ u32 udp_range[4]; ++ u32 prio_etype_udp; ++ u32 prio_ipdscp[8]; ++ u32 tc_ctrl; ++ u32 rate_ctrl; ++ u32 fc_glob_thrs; ++ u32 fc_port_thrs; ++ u32 mc_fc_glob_thrs; ++ u32 dc_glob_thrs; ++ u32 arl_vlan_cmd; ++ u32 arl_ctrl[3]; ++ u32 vlan_cfg; ++ u32 pvid[2]; ++ u32 vlan_ctrl[3]; ++ u32 session_id[8]; ++ u32 intr_stat; ++ u32 intr_mask; ++ u32 sram_test; ++ u32 mem_queue; ++ u32 farl_ctrl; ++ u32 fc_input_thrs, __res1[2]; ++ u32 clk_skew_ctrl; ++ u32 mac_glob_cfg_ext, __res2[2]; ++ u32 dma_ring_ctrl; ++ u32 dma_auto_poll_cfg; ++ u32 delay_intr_cfg, __res3; ++ u32 ts_dma_ctrl0; ++ u32 ts_desc_ptr0; ++ u32 ts_desc_base_addr0, __res4; ++ u32 fs_dma_ctrl0; ++ u32 fs_desc_ptr0; ++ u32 fs_desc_base_addr0, __res5; ++ u32 ts_dma_ctrl1; ++ u32 ts_desc_ptr1; ++ u32 ts_desc_base_addr1, __res6; ++ u32 fs_dma_ctrl1; ++ u32 fs_desc_ptr1; ++ u32 fs_desc_base_addr1; ++}; ++ ++struct _tx_ring { ++ struct tx_desc *desc; ++ dma_addr_t phys_addr; ++ struct tx_desc *cur_addr; ++ struct sk_buff *buff_tab[TX_DESCS]; ++ u32 free_index; ++ u32 count_index; ++ u32 cur_index; ++ int num_used; ++ int num_count; ++}; ++ ++struct _rx_ring { ++ struct rx_desc *desc; ++ dma_addr_t phys_addr; ++ struct rx_desc *cur_addr; ++ struct sk_buff *buff_tab[RX_DESCS]; ++ u32 cur_index; ++ u32 alloc_index; ++ int alloc_count; ++}; ++ ++struct sw { ++ struct resource *mem_res; ++ struct switch_regs __iomem *regs; ++ struct napi_struct napi; ++ struct cns3xxx_plat_info *plat; ++ struct _tx_ring *tx_ring; ++ struct _rx_ring *rx_ring; ++ u32 mtu; ++}; ++ ++struct port { ++ struct net_device *netdev; ++ struct phy_device *phydev; ++ struct sw *sw; ++ int id; /* logical port ID */ ++ int speed, duplex; ++ u32 mtu; ++}; ++ ++static spinlock_t mdio_lock; ++static spinlock_t tx_lock; ++static spinlock_t stat_lock; ++static struct switch_regs __iomem *mdio_regs; /* mdio command and status only */ ++struct mii_bus *mdio_bus; ++static int ports_open; ++static struct port *switch_port_tab[3]; ++static struct dma_pool *rx_dma_pool; ++static struct dma_pool *tx_dma_pool; ++struct net_device *napi_dev; ++ ++static int cns3xxx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, ++ int write, u16 cmd) ++{ ++ int cycles = 0; ++ u32 temp = 0; ++ ++ temp = __raw_readl(&mdio_regs->phy_control); ++ temp |= MDIO_CMD_COMPLETE; ++ __raw_writel(temp, &mdio_regs->phy_control); ++ udelay(10); ++ ++ if (write) { ++ temp = (cmd << MDIO_VALUE_OFFSET); ++ temp |= MDIO_WRITE_COMMAND; ++ } else { ++ temp = MDIO_READ_COMMAND; ++ } ++ temp |= ((location & 0x1f) << MDIO_REG_OFFSET); ++ temp |= (phy_id & 0x1f); ++ ++ __raw_writel(temp, &mdio_regs->phy_control); ++ ++ while (((__raw_readl(&mdio_regs->phy_control) & MDIO_CMD_COMPLETE) == 0) ++ && cycles < 5000) { ++ udelay(1); ++ cycles++; ++ } ++ ++ if (cycles == 5000) { ++ printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name, ++ phy_id); ++ return -1; ++ } ++ ++ temp = __raw_readl(&mdio_regs->phy_control); ++ temp |= MDIO_CMD_COMPLETE; ++ __raw_writel(temp, &mdio_regs->phy_control); ++ ++ if (write) ++ return 0; ++ ++ return ((temp >> MDIO_VALUE_OFFSET) & 0xFFFF); ++} ++ ++static int cns3xxx_mdio_read(struct mii_bus *bus, int phy_id, int location) ++{ ++ unsigned long flags; ++ int ret; ++ ++ spin_lock_irqsave(&mdio_lock, flags); ++ ret = cns3xxx_mdio_cmd(bus, phy_id, location, 0, 0); ++ spin_unlock_irqrestore(&mdio_lock, flags); ++ return ret; ++} ++ ++static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location, ++ u16 val) ++{ ++ unsigned long flags; ++ int ret; ++ ++ spin_lock_irqsave(&mdio_lock, flags); ++ ret = cns3xxx_mdio_cmd(bus, phy_id, location, 1, val); ++ spin_unlock_irqrestore(&mdio_lock, flags); ++ return ret; ++} ++ ++static int cns3xxx_mdio_register(void) ++{ ++ int err; ++ ++ if (!(mdio_bus = mdiobus_alloc())) ++ return -ENOMEM; ++ ++ mdio_regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT; ++ ++ spin_lock_init(&mdio_lock); ++ mdio_bus->name = "CNS3xxx MII Bus"; ++ mdio_bus->read = &cns3xxx_mdio_read; ++ mdio_bus->write = &cns3xxx_mdio_write; ++ strcpy(mdio_bus->id, "0"); ++ ++ if ((err = mdiobus_register(mdio_bus))) ++ mdiobus_free(mdio_bus); ++ return err; ++} ++ ++static void cns3xxx_mdio_remove(void) ++{ ++ mdiobus_unregister(mdio_bus); ++ mdiobus_free(mdio_bus); ++} ++ ++static void cns3xxx_adjust_link(struct net_device *dev) ++{ ++ struct port *port = netdev_priv(dev); ++ struct phy_device *phydev = port->phydev; ++ ++ if (!phydev->link) { ++ if (port->speed) { ++ port->speed = 0; ++ printk(KERN_INFO "%s: link down\n", dev->name); ++ } ++ return; ++ } ++ ++ if (port->speed == phydev->speed && port->duplex == phydev->duplex) ++ return; ++ ++ port->speed = phydev->speed; ++ port->duplex = phydev->duplex; ++ ++ printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n", ++ dev->name, port->speed, port->duplex ? "full" : "half"); ++} ++ ++irqreturn_t eth_rx_irq(int irq, void *pdev) ++{ ++ struct net_device *dev = pdev; ++ struct sw *sw = netdev_priv(dev); ++ if (likely(napi_schedule_prep(&sw->napi))) { ++ disable_irq_nosync(IRQ_CNS3XXX_SW_R0RXC); ++ __napi_schedule(&sw->napi); ++ } ++ return (IRQ_HANDLED); ++} ++ ++static void cns3xxx_alloc_rx_buf(struct sw *sw, int received) ++{ ++ struct _rx_ring *rx_ring = sw->rx_ring; ++ unsigned int i = rx_ring->alloc_index; ++ struct rx_desc *desc; ++ struct sk_buff *skb; ++ u32 mtu = sw->mtu; ++ ++ rx_ring->alloc_count += received; ++ ++ for (received = rx_ring->alloc_count; received > 0; received--) { ++ desc = &(rx_ring)->desc[i]; ++ ++ if ((skb = dev_alloc_skb(mtu))) { ++ if (SKB_DMA_REALIGN) ++ skb_reserve(skb, SKB_DMA_REALIGN); ++ skb_reserve(skb, NET_IP_ALIGN); ++ desc->sdp = dma_map_single(NULL, skb->data, ++ mtu, DMA_FROM_DEVICE); ++ if (dma_mapping_error(NULL, desc->sdp)) { ++ dev_kfree_skb(skb); ++ /* Failed to map, better luck next time */ ++ goto out;; ++ } ++ } else { ++ /* Failed to allocate skb, try again next time */ ++ goto out; ++ } ++ ++ /* put the new buffer on RX-free queue */ ++ rx_ring->buff_tab[i] = skb; ++ ++ if (++i == RX_DESCS) { ++ i = 0; ++ desc->config0 = END_OF_RING | FIRST_SEGMENT | ++ LAST_SEGMENT | mtu; ++ } else { ++ desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | mtu; ++ } ++ } ++out: ++ rx_ring->alloc_count = received; ++ rx_ring->alloc_index = i; ++} ++ ++static void update_tx_stats(struct sw *sw) ++{ ++ struct _tx_ring *tx_ring = sw->tx_ring; ++ struct tx_desc *desc; ++ struct tx_desc *next_desc; ++ struct sk_buff *skb; ++ int i; ++ int index; ++ int num_count; ++ ++ spin_lock_bh(&stat_lock); ++ ++ num_count = tx_ring->num_count; ++ ++ if (!num_count) { ++ spin_unlock_bh(&stat_lock); ++ return; ++ } ++ ++ index = tx_ring->count_index; ++ desc = &(tx_ring)->desc[index]; ++ for (i = 0; i < num_count; i++) { ++ skb = tx_ring->buff_tab[index]; ++ if (desc->cown) { ++ tx_ring->buff_tab[index] = 0; ++ if (unlikely(++index == TX_DESCS)) index = 0; ++ next_desc = &(tx_ring)->desc[index]; ++ prefetch(next_desc + 4); ++ if (likely(skb)) { ++ skb->dev->stats.tx_packets++; ++ skb->dev->stats.tx_bytes += skb->len; ++ dev_kfree_skb_any(skb); ++ } ++ desc = next_desc; ++ } else { ++ break; ++ } ++ } ++ tx_ring->num_count -= i; ++ tx_ring->count_index = index; ++ ++ spin_unlock_bh(&stat_lock); ++} ++ ++static void clear_tx_desc(struct sw *sw) ++{ ++ struct _tx_ring *tx_ring = sw->tx_ring; ++ struct tx_desc *desc; ++ struct tx_desc *next_desc; ++ int i; ++ int index; ++ int num_used = tx_ring->num_used - tx_ring->num_count; ++ ++ if (num_used < (TX_DESCS >> 1)) ++ return; ++ ++ index = tx_ring->free_index; ++ desc = &(tx_ring)->desc[index]; ++ for (i = 0; i < num_used; i++) { ++ if (desc->cown) { ++ if (unlikely(++index == TX_DESCS)) index = 0; ++ next_desc = &(tx_ring)->desc[index]; ++ prefetch(next_desc); ++ prefetch(next_desc + 4); ++ if (likely(desc->sdp)) ++ dma_unmap_single(NULL, desc->sdp, ++ desc->sdl, DMA_TO_DEVICE); ++ desc = next_desc; ++ } else { ++ break; ++ } ++ } ++ tx_ring->free_index = index; ++ tx_ring->num_used -= i; ++} ++ ++static int eth_poll(struct napi_struct *napi, int budget) ++{ ++ struct sw *sw = container_of(napi, struct sw, napi); ++ struct net_device *dev; ++ struct _rx_ring *rx_ring = sw->rx_ring; ++ int received = 0; ++ unsigned int length; ++ unsigned int i = rx_ring->cur_index; ++ struct rx_desc *next_desc; ++ struct rx_desc *desc = &(rx_ring)->desc[i]; ++ int port_id; ++ ++ while (desc->cown) { ++ struct sk_buff *skb; ++ ++ if (received >= budget) ++ break; ++ ++ skb = rx_ring->buff_tab[i]; ++ ++ if (++i == RX_DESCS) i = 0; ++ next_desc = &(rx_ring)->desc[i]; ++ prefetch(next_desc); ++ ++ port_id = desc->sp; ++ if (port_id == 4) ++ dev = switch_port_tab[2]->netdev; ++ else ++ dev = switch_port_tab[port_id]->netdev; ++ ++ length = desc->sdl; ++ /* process received frame */ ++ dma_unmap_single(&dev->dev, desc->sdp, ++ length, DMA_FROM_DEVICE); ++ ++ skb_put(skb, length); ++ ++ skb->dev = dev; ++ skb->protocol = eth_type_trans(skb, dev); ++ ++ dev->stats.rx_packets++; ++ dev->stats.rx_bytes += length; ++ ++ switch (desc->prot) { ++ case 1: ++ case 2: ++ case 5: ++ case 6: ++ case 13: ++ case 14: ++ if (desc->l4f) ++ skb->ip_summed = CHECKSUM_NONE; ++ else ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++ break; ++ default: ++ skb->ip_summed = CHECKSUM_NONE; ++ break; ++ } ++ ++ napi_gro_receive(napi, skb); ++ ++ received++; ++ desc = next_desc; ++ } ++ ++ cns3xxx_alloc_rx_buf(sw, received); ++ rx_ring->cur_index = i; ++ ++ if (received != budget) { ++ napi_complete(napi); ++ enable_irq(IRQ_CNS3XXX_SW_R0RXC); ++ } ++ ++ return received; ++} ++ ++static int eth_xmit(struct sk_buff *skb, struct net_device *dev) ++{ ++ struct port *port = netdev_priv(dev); ++ struct sw *sw = port->sw; ++ struct _tx_ring *tx_ring = sw->tx_ring; ++ struct tx_desc *tx_desc; ++ int index; ++ int len = skb->len; ++ char pmap = (1 << port->id); ++ ++ if (pmap == 8) ++ pmap = (1 << 4); ++ ++ if (unlikely(len > sw->mtu)) { ++ dev_kfree_skb(skb); ++ dev->stats.tx_errors++; ++ return NETDEV_TX_OK; ++ } ++ ++ update_tx_stats(sw); ++ ++ spin_lock_bh(&tx_lock); ++ ++ clear_tx_desc(sw); ++ ++ if (unlikely(tx_ring->num_used == TX_DESCS)) { ++ spin_unlock_bh(&tx_lock); ++ return NETDEV_TX_BUSY; ++ } ++ ++ index = tx_ring->cur_index; ++ ++ if (unlikely(++tx_ring->cur_index == TX_DESCS)) ++ tx_ring->cur_index = 0; ++ ++ tx_ring->num_used++; ++ tx_ring->num_count++; ++ ++ spin_unlock_bh(&tx_lock); ++ ++ tx_desc = &(tx_ring)->desc[index]; ++ ++ tx_desc->sdp = dma_map_single(NULL, skb->data, len, ++ DMA_TO_DEVICE); ++ ++ if (dma_mapping_error(NULL, tx_desc->sdp)) { ++ dev_kfree_skb(skb); ++ dev->stats.tx_errors++; ++ return NETDEV_TX_OK; ++ } ++ ++ tx_desc->pmap = pmap; ++ tx_ring->buff_tab[index] = skb; ++ ++ if (index == TX_DESCS - 1) { ++ tx_desc->config0 = END_OF_RING | FIRST_SEGMENT | LAST_SEGMENT | ++ FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM | ++ TCP_CHECKSUM | len; ++ } else { ++ tx_desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | ++ FORCE_ROUTE | IP_CHECKSUM | UDP_CHECKSUM | ++ TCP_CHECKSUM | len; ++ } ++ ++ return NETDEV_TX_OK; ++} ++ ++static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) ++{ ++ struct port *port = netdev_priv(dev); ++ ++ if (!netif_running(dev)) ++ return -EINVAL; ++ return phy_mii_ioctl(port->phydev, req, cmd); ++} ++ ++/* ethtool support */ ++ ++static void cns3xxx_get_drvinfo(struct net_device *dev, ++ struct ethtool_drvinfo *info) ++{ ++ strcpy(info->driver, DRV_NAME); ++ strcpy(info->bus_info, "internal"); ++} ++ ++static int cns3xxx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) ++{ ++ struct port *port = netdev_priv(dev); ++ return phy_ethtool_gset(port->phydev, cmd); ++} ++ ++static int cns3xxx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ++{ ++ struct port *port = netdev_priv(dev); ++ return phy_ethtool_sset(port->phydev, cmd); ++} ++ ++static int cns3xxx_nway_reset(struct net_device *dev) ++{ ++ struct port *port = netdev_priv(dev); ++ return phy_start_aneg(port->phydev); ++} ++ ++static struct ethtool_ops cns3xxx_ethtool_ops = { ++ .get_drvinfo = cns3xxx_get_drvinfo, ++ .get_settings = cns3xxx_get_settings, ++ .set_settings = cns3xxx_set_settings, ++ .nway_reset = cns3xxx_nway_reset, ++ .get_link = ethtool_op_get_link, ++}; ++ ++ ++static int init_rings(struct sw *sw) ++{ ++ int i; ++ struct _rx_ring *rx_ring = sw->rx_ring; ++ struct _tx_ring *tx_ring = sw->tx_ring; ++ ++ __raw_writel(0, &sw->regs->fs_dma_ctrl0); ++ __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg); ++ __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl); ++ __raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl); ++ ++ __raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl); ++ ++ if (!(rx_dma_pool = dma_pool_create(DRV_NAME, NULL, ++ RX_POOL_ALLOC_SIZE, 32, 0))) ++ return -ENOMEM; ++ ++ if (!(rx_ring->desc = dma_pool_alloc(rx_dma_pool, GFP_KERNEL, ++ &rx_ring->phys_addr))) ++ return -ENOMEM; ++ memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE); ++ ++ /* Setup RX buffers */ ++ for (i = 0; i < RX_DESCS; i++) { ++ struct rx_desc *desc = &(rx_ring)->desc[i]; ++ struct sk_buff *skb; ++ if (!(skb = dev_alloc_skb(sw->mtu))) ++ return -ENOMEM; ++ if (SKB_DMA_REALIGN) ++ skb_reserve(skb, SKB_DMA_REALIGN); ++ skb_reserve(skb, NET_IP_ALIGN); ++ desc->sdl = sw->mtu; ++ if (i == (RX_DESCS - 1)) ++ desc->eor = 1; ++ desc->fsd = 1; ++ desc->lsd = 1; ++ ++ desc->sdp = dma_map_single(NULL, skb->data, ++ sw->mtu, DMA_FROM_DEVICE); ++ if (dma_mapping_error(NULL, desc->sdp)) { ++ return -EIO; ++ } ++ rx_ring->buff_tab[i] = skb; ++ desc->cown = 0; ++ } ++ __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_ptr0); ++ __raw_writel(rx_ring->phys_addr, &sw->regs->fs_desc_base_addr0); ++ ++ if (!(tx_dma_pool = dma_pool_create(DRV_NAME, NULL, ++ TX_POOL_ALLOC_SIZE, 32, 0))) ++ return -ENOMEM; ++ ++ if (!(tx_ring->desc = dma_pool_alloc(tx_dma_pool, GFP_KERNEL, ++ &tx_ring->phys_addr))) ++ return -ENOMEM; ++ memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE); ++ ++ /* Setup TX buffers */ ++ for (i = 0; i < TX_DESCS; i++) { ++ struct tx_desc *desc = &(tx_ring)->desc[i]; ++ tx_ring->buff_tab[i] = 0; ++ ++ if (i == (TX_DESCS - 1)) ++ desc->eor = 1; ++ desc->cown = 1; ++ } ++ __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0); ++ __raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_base_addr0); ++ ++ return 0; ++} ++ ++static void destroy_rings(struct sw *sw) ++{ ++ int i; ++ if (sw->rx_ring->desc) { ++ for (i = 0; i < RX_DESCS; i++) { ++ struct _rx_ring *rx_ring = sw->rx_ring; ++ struct rx_desc *desc = &(rx_ring)->desc[i]; ++ struct sk_buff *skb = sw->rx_ring->buff_tab[i]; ++ if (skb) { ++ dma_unmap_single(NULL, ++ desc->sdp, ++ sw->mtu, DMA_FROM_DEVICE); ++ dev_kfree_skb(skb); ++ } ++ } ++ dma_pool_free(rx_dma_pool, sw->rx_ring->desc, sw->rx_ring->phys_addr); ++ dma_pool_destroy(rx_dma_pool); ++ rx_dma_pool = 0; ++ sw->rx_ring->desc = 0; ++ } ++ if (sw->tx_ring->desc) { ++ for (i = 0; i < TX_DESCS; i++) { ++ struct _tx_ring *tx_ring = sw->tx_ring; ++ struct tx_desc *desc = &(tx_ring)->desc[i]; ++ struct sk_buff *skb = sw->tx_ring->buff_tab[i]; ++ if (skb) { ++ dma_unmap_single(NULL, desc->sdp, ++ skb->len, DMA_TO_DEVICE); ++ dev_kfree_skb(skb); ++ } ++ } ++ dma_pool_free(tx_dma_pool, sw->tx_ring->desc, sw->tx_ring->phys_addr); ++ dma_pool_destroy(tx_dma_pool); ++ tx_dma_pool = 0; ++ sw->tx_ring->desc = 0; ++ } ++} ++ ++static int eth_open(struct net_device *dev) ++{ ++ struct port *port = netdev_priv(dev); ++ struct sw *sw = port->sw; ++ u32 temp; ++ ++ port->speed = 0; /* force "link up" message */ ++ phy_start(port->phydev); ++ ++ netif_start_queue(dev); ++ ++ if (!ports_open) { ++ request_irq(IRQ_CNS3XXX_SW_R0RXC, eth_rx_irq, IRQF_SHARED, "gig_switch", napi_dev); ++ napi_enable(&sw->napi); ++ netif_start_queue(napi_dev); ++ //enable_irq(IRQ_CNS3XXX_SW_R0RXC); ++ ++ temp = __raw_readl(&sw->regs->mac_cfg[2]); ++ temp &= ~(PORT_DISABLE); ++ __raw_writel(temp, &sw->regs->mac_cfg[2]); ++ ++ temp = __raw_readl(&sw->regs->dma_auto_poll_cfg); ++ temp &= ~(TS_SUSPEND | FS_SUSPEND); ++ __raw_writel(temp, &sw->regs->dma_auto_poll_cfg); ++ ++ __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg); ++ } ++ temp = __raw_readl(&sw->regs->mac_cfg[port->id]); ++ temp &= ~(PORT_DISABLE); ++ __raw_writel(temp, &sw->regs->mac_cfg[port->id]); ++ ++ ports_open++; ++ netif_carrier_on(dev); ++ ++ return 0; ++} ++ ++static int eth_close(struct net_device *dev) ++{ ++ struct port *port = netdev_priv(dev); ++ struct sw *sw = port->sw; ++ u32 temp; ++ ++ ports_open--; ++ ++ temp = __raw_readl(&sw->regs->mac_cfg[port->id]); ++ temp |= (PORT_DISABLE); ++ __raw_writel(temp, &sw->regs->mac_cfg[port->id]); ++ ++ netif_stop_queue(dev); ++ ++ phy_stop(port->phydev); ++ ++ if (!ports_open) { ++ disable_irq(IRQ_CNS3XXX_SW_R0RXC); ++ free_irq(IRQ_CNS3XXX_SW_R0RXC, napi_dev); ++ napi_disable(&sw->napi); ++ netif_stop_queue(napi_dev); ++ temp = __raw_readl(&sw->regs->mac_cfg[2]); ++ temp |= (PORT_DISABLE); ++ __raw_writel(temp, &sw->regs->mac_cfg[2]); ++ ++ __raw_writel(TS_SUSPEND | FS_SUSPEND, ++ &sw->regs->dma_auto_poll_cfg); ++ } ++ ++ netif_carrier_off(dev); ++ return 0; ++} ++ ++static void eth_rx_mode(struct net_device *dev) ++{ ++ struct port *port = netdev_priv(dev); ++ struct sw *sw = port->sw; ++ u32 temp; ++ ++ temp = __raw_readl(&sw->regs->mac_glob_cfg); ++ ++ if (dev->flags & IFF_PROMISC) { ++ if (port->id == 3) ++ temp |= ((1 << 2) << PROMISC_OFFSET); ++ else ++ temp |= ((1 << port->id) << PROMISC_OFFSET); ++ } else { ++ if (port->id == 3) ++ temp &= ~((1 << 2) << PROMISC_OFFSET); ++ else ++ temp &= ~((1 << port->id) << PROMISC_OFFSET); ++ } ++ __raw_writel(temp, &sw->regs->mac_glob_cfg); ++} ++ ++static int eth_set_mac(struct net_device *netdev, void *p) ++{ ++ struct port *port = netdev_priv(netdev); ++ struct sw *sw = port->sw; ++ struct sockaddr *addr = p; ++ u32 cycles = 0; ++ ++ if (!is_valid_ether_addr(addr->sa_data)) ++ return -EADDRNOTAVAIL; ++ ++ /* Invalidate old ARL Entry */ ++ if (port->id == 3) ++ __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]); ++ else ++ __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]); ++ __raw_writel( ((netdev->dev_addr[0] << 24) | (netdev->dev_addr[1] << 16) | ++ (netdev->dev_addr[2] << 8) | (netdev->dev_addr[3])), ++ &sw->regs->arl_ctrl[1]); ++ ++ __raw_writel( ((netdev->dev_addr[4] << 24) | (netdev->dev_addr[5] << 16) | ++ (1 << 1)), ++ &sw->regs->arl_ctrl[2]); ++ __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd); ++ ++ while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0) ++ && cycles < 5000) { ++ udelay(1); ++ cycles++; ++ } ++ ++ cycles = 0; ++ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); ++ ++ if (port->id == 3) ++ __raw_writel((port->id << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]); ++ else ++ __raw_writel(((port->id + 1) << 16) | (0x4 << 9), &sw->regs->arl_ctrl[0]); ++ __raw_writel( ((addr->sa_data[0] << 24) | (addr->sa_data[1] << 16) | ++ (addr->sa_data[2] << 8) | (addr->sa_data[3])), ++ &sw->regs->arl_ctrl[1]); ++ ++ __raw_writel( ((addr->sa_data[4] << 24) | (addr->sa_data[5] << 16) | ++ (7 << 4) | (1 << 1)), &sw->regs->arl_ctrl[2]); ++ __raw_writel((1 << 19), &sw->regs->arl_vlan_cmd); ++ ++ while (((__raw_readl(&sw->regs->arl_vlan_cmd) & (1 << 21)) == 0) ++ && cycles < 5000) { ++ udelay(1); ++ cycles++; ++ } ++ return 0; ++} ++ ++static int cns3xxx_change_mtu(struct net_device *netdev, int new_mtu) ++{ ++ struct port *port = netdev_priv(netdev); ++ struct sw *sw = port->sw; ++ u32 temp; ++ int i; ++ struct _rx_ring *rx_ring = sw->rx_ring; ++ struct rx_desc *desc; ++ struct sk_buff *skb; ++ ++ if (new_mtu > MAX_MRU) ++ return -EINVAL; ++ ++ netdev->mtu = new_mtu; ++ ++ new_mtu += 36 + SKB_DMA_REALIGN; ++ port->mtu = new_mtu; ++ ++ new_mtu = 0; ++ for (i = 0; i < 3; i++) { ++ if (switch_port_tab[i]) { ++ if (switch_port_tab[i]->mtu > new_mtu) ++ new_mtu = switch_port_tab[i]->mtu; ++ } ++ } ++ ++ ++ if (new_mtu == sw->mtu) ++ return 0; ++ ++ disable_irq(IRQ_CNS3XXX_SW_R0RXC); ++ ++ sw->mtu = new_mtu; ++ ++ /* Disable DMA */ ++ __raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg); ++ ++ for (i = 0; i < RX_DESCS; i++) { ++ desc = &(rx_ring)->desc[i]; ++ /* Check if we own it, if we do, it will get set correctly ++ * when it is re-used */ ++ if (!desc->cown) { ++ skb = rx_ring->buff_tab[i]; ++ dma_unmap_single(NULL, desc->sdp, desc->sdl, ++ DMA_FROM_DEVICE); ++ dev_kfree_skb(skb); ++ ++ if ((skb = dev_alloc_skb(new_mtu))) { ++ if (SKB_DMA_REALIGN) ++ skb_reserve(skb, SKB_DMA_REALIGN); ++ skb_reserve(skb, NET_IP_ALIGN); ++ desc->sdp = dma_map_single(NULL, skb->data, ++ new_mtu, DMA_FROM_DEVICE); ++ if (dma_mapping_error(NULL, desc->sdp)) { ++ dev_kfree_skb(skb); ++ skb = NULL; ++ } ++ } ++ ++ /* put the new buffer on RX-free queue */ ++ rx_ring->buff_tab[i] = skb; ++ ++ if (i == RX_DESCS - 1) ++ desc->config0 = END_OF_RING | FIRST_SEGMENT | ++ LAST_SEGMENT | new_mtu; ++ else ++ desc->config0 = FIRST_SEGMENT | ++ LAST_SEGMENT | new_mtu; ++ } ++ } ++ ++ /* Re-ENABLE DMA */ ++ temp = __raw_readl(&sw->regs->dma_auto_poll_cfg); ++ temp &= ~(TS_SUSPEND | FS_SUSPEND); ++ __raw_writel(temp, &sw->regs->dma_auto_poll_cfg); ++ ++ __raw_writel((TS_POLL_EN | FS_POLL_EN), &sw->regs->dma_auto_poll_cfg); ++ ++ enable_irq(IRQ_CNS3XXX_SW_R0RXC); ++ ++ return 0; ++} ++ ++static const struct net_device_ops cns3xxx_netdev_ops = { ++ .ndo_open = eth_open, ++ .ndo_stop = eth_close, ++ .ndo_start_xmit = eth_xmit, ++ .ndo_set_rx_mode = eth_rx_mode, ++ .ndo_do_ioctl = eth_ioctl, ++ .ndo_change_mtu = cns3xxx_change_mtu, ++ .ndo_set_mac_address = eth_set_mac, ++ .ndo_validate_addr = eth_validate_addr, ++}; ++ ++static int __devinit eth_init_one(struct platform_device *pdev) ++{ ++ int i; ++ struct port *port; ++ struct sw *sw; ++ struct net_device *dev; ++ struct cns3xxx_plat_info *plat = pdev->dev.platform_data; ++ u32 regs_phys; ++ char phy_id[MII_BUS_ID_SIZE + 3]; ++ int err; ++ u32 temp; ++ ++ spin_lock_init(&tx_lock); ++ spin_lock_init(&stat_lock); ++ ++ if (!(napi_dev = alloc_etherdev(sizeof(struct sw)))) ++ return -ENOMEM; ++ strcpy(napi_dev->name, "switch%d"); ++ ++ SET_NETDEV_DEV(napi_dev, &pdev->dev); ++ sw = netdev_priv(napi_dev); ++ memset(sw, 0, sizeof(struct sw)); ++ sw->regs = (struct switch_regs __iomem *)CNS3XXX_SWITCH_BASE_VIRT; ++ regs_phys = CNS3XXX_SWITCH_BASE; ++ sw->mem_res = request_mem_region(regs_phys, REGS_SIZE, napi_dev->name); ++ if (!sw->mem_res) { ++ err = -EBUSY; ++ goto err_free; ++ } ++ ++ sw->mtu = 1536 + SKB_DMA_REALIGN; ++ ++ for (i = 0; i < 4; i++) { ++ temp = __raw_readl(&sw->regs->mac_cfg[i]); ++ temp |= (PORT_DISABLE) | 0x80000000; ++ __raw_writel(temp, &sw->regs->mac_cfg[i]); ++ } ++ ++ temp = PORT_DISABLE; ++ __raw_writel(temp, &sw->regs->mac_cfg[2]); ++ ++ temp = __raw_readl(&sw->regs->vlan_cfg); ++ temp |= NIC_MODE | VLAN_UNAWARE; ++ __raw_writel(temp, &sw->regs->vlan_cfg); ++ ++ __raw_writel(UNKNOWN_VLAN_TO_CPU | ACCEPT_CRC_PACKET | ++ CRC_STRIPPING, &sw->regs->mac_glob_cfg); ++ ++ if (!(sw->rx_ring = kmalloc(sizeof(struct _rx_ring), GFP_KERNEL))) { ++ err = -ENOMEM; ++ goto err_free; ++ } ++ memset(sw->rx_ring, 0, sizeof(struct _rx_ring)); ++ ++ if (!(sw->tx_ring = kmalloc(sizeof(struct _tx_ring), GFP_KERNEL))) { ++ err = -ENOMEM; ++ goto err_free_rx; ++ } ++ memset(sw->tx_ring, 0, sizeof(struct _tx_ring)); ++ ++ if ((err = init_rings(sw)) != 0) { ++ destroy_rings(sw); ++ err = -ENOMEM; ++ goto err_free_rings; ++ } ++ platform_set_drvdata(pdev, napi_dev); ++ ++ netif_napi_add(napi_dev, &sw->napi, eth_poll, NAPI_WEIGHT); ++ ++ for (i = 0; i < 3; i++) { ++ if (!(plat->ports & (1 << i))) { ++ continue; ++ } ++ ++ if (!(dev = alloc_etherdev(sizeof(struct port)))) { ++ goto free_ports; ++ } ++ ++ //SET_NETDEV_DEV(dev, &pdev->dev); ++ port = netdev_priv(dev); ++ port->netdev = dev; ++ if (i == 2) ++ port->id = 3; ++ else ++ port->id = i; ++ port->sw = sw; ++ port->mtu = sw->mtu; ++ ++ temp = __raw_readl(&sw->regs->mac_cfg[port->id]); ++ temp |= (PORT_DISABLE); ++ __raw_writel(temp, &sw->regs->mac_cfg[port->id]); ++ ++ dev->netdev_ops = &cns3xxx_netdev_ops; ++ dev->ethtool_ops = &cns3xxx_ethtool_ops; ++ dev->tx_queue_len = 1000; ++ dev->features = NETIF_F_HW_CSUM; ++ ++ dev->vlan_features = NETIF_F_HW_CSUM; ++ ++ switch_port_tab[i] = port; ++ memcpy(dev->dev_addr, &plat->hwaddr[i], ETH_ALEN); ++ ++ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy[i]); ++ port->phydev = phy_connect(dev, phy_id, &cns3xxx_adjust_link, 0, ++ PHY_INTERFACE_MODE_RGMII); ++ if ((err = IS_ERR(port->phydev))) { ++ switch_port_tab[i] = 0; ++ free_netdev(dev); ++ goto free_ports; ++ } ++ ++ port->phydev->irq = PHY_POLL; ++ ++ if ((err = register_netdev(dev))) { ++ phy_disconnect(port->phydev); ++ switch_port_tab[i] = 0; ++ free_netdev(dev); ++ goto free_ports; ++ } ++ ++ printk(KERN_INFO "%s: RGMII PHY %i on cns3xxx Switch\n", dev->name, plat->phy[i]); ++ netif_carrier_off(dev); ++ dev = 0; ++ } ++ ++ return 0; ++ ++free_ports: ++ err = -ENOMEM; ++ for (--i; i >= 0; i--) { ++ if (switch_port_tab[i]) { ++ port = switch_port_tab[i]; ++ dev = port->netdev; ++ unregister_netdev(dev); ++ phy_disconnect(port->phydev); ++ switch_port_tab[i] = 0; ++ free_netdev(dev); ++ } ++ } ++err_free_rings: ++ kfree(sw->tx_ring); ++err_free_rx: ++ kfree(sw->rx_ring); ++err_free: ++ free_netdev(napi_dev); ++ return err; ++} ++ ++static int __devexit eth_remove_one(struct platform_device *pdev) ++{ ++ struct net_device *dev = platform_get_drvdata(pdev); ++ struct sw *sw = netdev_priv(dev); ++ int i; ++ destroy_rings(sw); ++ ++ for (i = 2; i >= 0; i--) { ++ if (switch_port_tab[i]) { ++ struct port *port = switch_port_tab[i]; ++ struct net_device *dev = port->netdev; ++ unregister_netdev(dev); ++ phy_disconnect(port->phydev); ++ switch_port_tab[i] = 0; ++ free_netdev(dev); ++ } ++ } ++ ++ release_resource(sw->mem_res); ++ free_netdev(napi_dev); ++ return 0; ++} ++ ++static struct platform_driver cns3xxx_eth_driver = { ++ .driver.name = DRV_NAME, ++ .probe = eth_init_one, ++ .remove = eth_remove_one, ++}; ++ ++static int __init eth_init_module(void) ++{ ++ int err; ++ if ((err = cns3xxx_mdio_register())) ++ return err; ++ return platform_driver_register(&cns3xxx_eth_driver); ++} ++ ++static void __exit eth_cleanup_module(void) ++{ ++ platform_driver_unregister(&cns3xxx_eth_driver); ++ cns3xxx_mdio_remove(); ++} ++ ++module_init(eth_init_module); ++module_exit(eth_cleanup_module); ++ ++MODULE_AUTHOR("Chris Lang"); ++MODULE_DESCRIPTION("Cavium CNS3xxx Ethernet driver"); ++MODULE_LICENSE("GPL v2"); ++MODULE_ALIAS("platform:cns3xxx_eth"); +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/platform.h +@@ -0,0 +1,26 @@ ++/* ++ * arch/arm/mach-cns3xxx/include/mach/platform.h ++ * ++ * Copyright 2011 Gateworks Corporation ++ * Chris Lang ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ * ++ * This file is distributed in the hope that it will be useful, ++ * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or ++ * NONINFRINGEMENT. See the GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this file; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA or ++ * visit http://www.gnu.org/licenses/. ++ * ++ * This file may also be available under a different license from Cavium. ++ * Contact Cavium Networks for more information ++ * ++ ******************************************************************************/ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * define access macros ++ */ ++#define SPI_MEM_MAP_VALUE(reg_offset) (*((u32 volatile *)(CNS3XXX_SSP_BASE_VIRT + reg_offset))) ++ ++#define SPI_CONFIGURATION_REG SPI_MEM_MAP_VALUE(0x40) ++#define SPI_SERVICE_STATUS_REG SPI_MEM_MAP_VALUE(0x44) ++#define SPI_BIT_RATE_CONTROL_REG SPI_MEM_MAP_VALUE(0x48) ++#define SPI_TRANSMIT_CONTROL_REG SPI_MEM_MAP_VALUE(0x4C) ++#define SPI_TRANSMIT_BUFFER_REG SPI_MEM_MAP_VALUE(0x50) ++#define SPI_RECEIVE_CONTROL_REG SPI_MEM_MAP_VALUE(0x54) ++#define SPI_RECEIVE_BUFFER_REG SPI_MEM_MAP_VALUE(0x58) ++#define SPI_FIFO_TRANSMIT_CONFIG_REG SPI_MEM_MAP_VALUE(0x5C) ++#define SPI_FIFO_TRANSMIT_CONTROL_REG SPI_MEM_MAP_VALUE(0x60) ++#define SPI_FIFO_RECEIVE_CONFIG_REG SPI_MEM_MAP_VALUE(0x64) ++#define SPI_INTERRUPT_STATUS_REG SPI_MEM_MAP_VALUE(0x68) ++#define SPI_INTERRUPT_ENABLE_REG SPI_MEM_MAP_VALUE(0x6C) ++ ++#define SPI_TRANSMIT_BUFFER_REG_ADDR (CNS3XXX_SSP_BASE +0x50) ++#define SPI_RECEIVE_BUFFER_REG_ADDR (CNS3XXX_SSP_BASE +0x58) ++ ++/* Structure for SPI controller of CNS3XXX SOCs */ ++struct cns3xxx_spi { ++ /* bitbang has to be first */ ++ struct spi_bitbang bitbang; ++ struct completion done; ++ wait_queue_head_t wait; ++ ++ int len; ++ int count; ++ int last_in_message_list; ++ ++ /* data buffers */ ++ const unsigned char *tx; ++ unsigned char *rx; ++ ++ struct spi_master *master; ++ struct platform_device *pdev; ++ struct device *dev; ++}; ++ ++static inline u8 cns3xxx_spi_bus_idle(void) ++{ ++ return ((SPI_SERVICE_STATUS_REG & 0x1) ? 0 : 1); ++} ++ ++static inline u8 cns3xxx_spi_tx_buffer_empty(void) ++{ ++ return ((SPI_INTERRUPT_STATUS_REG & (0x1 << 3)) ? 1 : 0); ++} ++ ++static inline u8 cns3xxx_spi_rx_buffer_full(void) ++{ ++ return ((SPI_INTERRUPT_STATUS_REG & (0x1 << 2)) ? 1 : 0); ++} ++ ++u8 cns3xxx_spi_tx_rx(u8 tx_channel, u8 tx_eof, u32 tx_data, ++ u32 * rx_data) ++{ ++ u8 rx_channel; ++ u8 rx_eof; ++ ++ while (!cns3xxx_spi_bus_idle()) ; // do nothing ++ ++ while (!cns3xxx_spi_tx_buffer_empty()) ; // do nothing ++ ++ SPI_TRANSMIT_CONTROL_REG &= ~(0x7); ++ SPI_TRANSMIT_CONTROL_REG |= (tx_channel & 0x3) | ((tx_eof & 0x1) << 2); ++ ++ SPI_TRANSMIT_BUFFER_REG = tx_data; ++ ++ while (!cns3xxx_spi_rx_buffer_full()) ; // do nothing ++ ++ rx_channel = SPI_RECEIVE_CONTROL_REG & 0x3; ++ rx_eof = (SPI_RECEIVE_CONTROL_REG & (0x1 << 2)) ? 1 : 0; ++ ++ *rx_data = SPI_RECEIVE_BUFFER_REG; ++ ++ if ((tx_channel != rx_channel) || (tx_eof != rx_eof)) { ++ return 0; ++ } else { ++ return 1; ++ } ++} ++ ++u8 cns3xxx_spi_tx(u8 tx_channel, u8 tx_eof, u32 tx_data) ++{ ++ ++ while (!cns3xxx_spi_bus_idle()) ; // do nothing ++ ++ while (!cns3xxx_spi_tx_buffer_empty()) ; // do nothing ++ ++ SPI_TRANSMIT_CONTROL_REG &= ~(0x7); ++ SPI_TRANSMIT_CONTROL_REG |= (tx_channel & 0x3) | ((tx_eof & 0x1) << 2); ++ ++ SPI_TRANSMIT_BUFFER_REG = tx_data; ++ ++ return 1; ++} ++ ++static inline struct cns3xxx_spi *to_hw(struct spi_device *sdev) ++{ ++ return spi_master_get_devdata(sdev->master); ++} ++ ++static int cns3xxx_spi_setup_transfer(struct spi_device *spi, ++ struct spi_transfer *t) ++{ ++ return 0; ++} ++ ++static void cns3xxx_spi_chipselect(struct spi_device *spi, int value) ++{ ++ unsigned int spi_config; ++ ++ switch (value) { ++ case BITBANG_CS_INACTIVE: ++ break; ++ ++ case BITBANG_CS_ACTIVE: ++ spi_config = SPI_CONFIGURATION_REG; ++ ++ if (spi->mode & SPI_CPHA) ++ spi_config |= (0x1 << 13); ++ else ++ spi_config &= ~(0x1 << 13); ++ ++ if (spi->mode & SPI_CPOL) ++ spi_config |= (0x1 << 14); ++ else ++ spi_config &= ~(0x1 << 14); ++ ++ /* write new configration */ ++ SPI_CONFIGURATION_REG = spi_config; ++ ++ SPI_TRANSMIT_CONTROL_REG &= ~(0x7); ++ SPI_TRANSMIT_CONTROL_REG |= (spi->chip_select & 0x3); ++ ++ break; ++ } ++} ++ ++static int cns3xxx_spi_setup(struct spi_device *spi) ++{ ++ if (!spi->bits_per_word) ++ spi->bits_per_word = 8; ++ ++ return 0; ++} ++ ++static int cns3xxx_spi_txrx(struct spi_device *spi, struct spi_transfer *t) ++{ ++ struct cns3xxx_spi *hw = to_hw(spi); ++ ++ dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", t->tx_buf, t->rx_buf, ++ t->len); ++ ++ hw->tx = t->tx_buf; ++ hw->rx = t->rx_buf; ++ hw->len = t->len; ++ hw->count = 0; ++ hw->last_in_message_list = t->last_in_message_list; ++ ++ init_completion(&hw->done); ++ ++ if (hw->tx) { ++ int i; ++ u32 rx_data; ++ for (i = 0; i < (hw->len - 1); i++) { ++ dev_dbg(&spi->dev, ++ "[SPI_CNS3XXX_DEBUG] hw->tx[%02d]: 0x%02x\n", i, ++ hw->tx[i]); ++ cns3xxx_spi_tx_rx(spi->chip_select, 0, hw->tx[i], ++ &rx_data); ++ if (hw->rx) { ++ hw->rx[i] = rx_data; ++ dev_dbg(&spi->dev, ++ "[SPI_CNS3XXX_DEBUG] hw->rx[%02d]: 0x%02x\n", ++ i, hw->rx[i]); ++ } ++ } ++ ++ if (t->last_in_message_list) { ++ cns3xxx_spi_tx_rx(spi->chip_select, 1, hw->tx[i], ++ &rx_data); ++ if (hw->rx) { ++ hw->rx[i] = rx_data; ++ dev_dbg(&spi->dev, ++ "[SPI_CNS3XXX_DEBUG] hw->rx[%02d]: 0x%02x\n", ++ i, hw->rx[i]); ++ } ++ } else { ++ cns3xxx_spi_tx_rx(spi->chip_select, 0, hw->tx[i], ++ &rx_data); ++ } ++ goto done; ++ } ++ ++ if (hw->rx) { ++ int i; ++ u32 rx_data; ++ for (i = 0; i < (hw->len - 1); i++) { ++ cns3xxx_spi_tx_rx(spi->chip_select, 0, 0xff, &rx_data); ++ hw->rx[i] = rx_data; ++ dev_dbg(&spi->dev, ++ "[SPI_CNS3XXX_DEBUG] hw->rx[%02d]: 0x%02x\n", i, ++ hw->rx[i]); ++ } ++ ++ if (t->last_in_message_list) { ++ cns3xxx_spi_tx_rx(spi->chip_select, 1, 0xff, &rx_data); ++ } else { ++ cns3xxx_spi_tx_rx(spi->chip_select, 0, 0xff, &rx_data); ++ } ++ hw->rx[i] = rx_data; ++ dev_dbg(&spi->dev, "[SPI_CNS3XXX_DEBUG] hw->rx[%02d]: 0x%02x\n", ++ i, hw->rx[i]); ++ } ++done: ++ return hw->len; ++} ++ ++static void __init cns3xxx_spi_initial(void) ++{ ++ u32 __iomem *gpiob = __io(CNS3XXX_MISC_BASE_VIRT + 0x0018); ++ u32 gpiob_pins = __raw_readl(gpiob); ++ ++ /* MMC/SD pins share with GPIOA */ ++ gpiob_pins |= 0xf80; ++ __raw_writel(gpiob_pins, gpiob); ++ ++ /* share pin config. */ ++ //PM_PLL_HM_PD_CTRL_REG &= ~(0x1 << 5); ++ //HAL_MISC_ENABLE_SPI_PINS(); ++ cns3xxx_pwr_clk_en(CNS3XXX_PWR_CLK_EN(SPI_PCM_I2C)); ++ cns3xxx_pwr_soft_rst(CNS3XXX_PWR_SOFTWARE_RST(SPI_PCM_I2C)); ++ ++ SPI_CONFIGURATION_REG = (((0x0 & 0x3) << 0) | /* 8bits shift length */ ++ (0x0 << 9) | /* SPI mode */ ++ (0x0 << 10) | /* disable FIFO */ ++ (0x1 << 11) | /* SPI master mode */ ++ (0x0 << 12) | /* disable SPI loopback mode */ ++ (0x1 << 13) | /* clock phase */ ++ (0x1 << 14) | /* clock polarity */ ++ (0x0 << 24) | /* disable - SPI data swap */ ++ (0x1 << 29) | /* enable - 2IO Read mode */ ++ (0x0 << 30) | /* disable - SPI high speed read for system boot up */ ++ (0x0 << 31)); /* disable - SPI */ ++ ++ /* Set SPI bit rate PCLK/2 */ ++ SPI_BIT_RATE_CONTROL_REG = 0x1; ++ ++ /* Set SPI Tx channel 0 */ ++ SPI_TRANSMIT_CONTROL_REG = 0x0; ++ ++ /* Set Tx FIFO Threshold, Tx FIFO has 2 words */ ++ SPI_FIFO_TRANSMIT_CONFIG_REG &= ~(0x03 << 4); ++ SPI_FIFO_TRANSMIT_CONFIG_REG |= ((0x0 & 0x03) << 4); ++ ++ /* Set Rx FIFO Threshold, Rx FIFO has 2 words */ ++ SPI_FIFO_RECEIVE_CONFIG_REG &= ~(0x03 << 4); ++ SPI_FIFO_RECEIVE_CONFIG_REG |= ((0x0 & 0x03) << 4); ++ ++ /* Disable all interrupt */ ++ SPI_INTERRUPT_ENABLE_REG = 0x0; ++ ++ /* Clear spurious interrupt sources */ ++ SPI_INTERRUPT_STATUS_REG = (0x0F << 4); ++ ++ /* Enable SPI */ ++ SPI_CONFIGURATION_REG |= (0x1 << 31); ++ ++ return; ++} ++ ++static int __init cns3xxx_spi_probe(struct platform_device *pdev) ++{ ++ struct spi_master *master; ++ struct cns3xxx_spi *hw; ++ int err = 0; ++ ++ printk("%s: setup CNS3XXX SPI Controller\n", __FUNCTION__); ++ ++ /* Allocate master with space for cns3xxx_spi */ ++ master = spi_alloc_master(&pdev->dev, sizeof(struct cns3xxx_spi)); ++ if (master == NULL) { ++ dev_err(&pdev->dev, "No memory for spi_master\n"); ++ err = -ENOMEM; ++ goto err_nomem; ++ } ++ ++ hw = spi_master_get_devdata(master); ++ memset(hw, 0, sizeof(struct cns3xxx_spi)); ++ ++ hw->master = spi_master_get(master); ++ hw->dev = &pdev->dev; ++ ++ platform_set_drvdata(pdev, hw); ++ init_completion(&hw->done); ++ ++ /* setup the master state. */ ++ ++ master->num_chipselect = 4; ++ master->bus_num = 1; ++ ++ /* setup the state for the bitbang driver */ ++ ++ hw->bitbang.master = hw->master; ++ hw->bitbang.setup_transfer = cns3xxx_spi_setup_transfer; ++ hw->bitbang.chipselect = cns3xxx_spi_chipselect; ++ hw->bitbang.txrx_bufs = cns3xxx_spi_txrx; ++ hw->bitbang.master->setup = cns3xxx_spi_setup; ++ ++ dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang); ++ ++ /* SPI controller initializations */ ++ cns3xxx_spi_initial(); ++ ++ /* register SPI controller */ ++ ++ err = spi_bitbang_start(&hw->bitbang); ++ if (err) { ++ dev_err(&pdev->dev, "Failed to register SPI master\n"); ++ goto err_register; ++ } ++ ++ return 0; ++ ++err_register: ++ spi_master_put(hw->master);; ++ ++err_nomem: ++ return err; ++} ++ ++static int __devexit cns3xxx_spi_remove(struct platform_device *dev) ++{ ++ struct cns3xxx_spi *hw = platform_get_drvdata(dev); ++ ++ platform_set_drvdata(dev, NULL); ++ ++ spi_unregister_master(hw->master); ++ ++ spi_master_put(hw->master); ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++ ++static int cns3xxx_spi_suspend(struct platform_device *pdev, pm_message_t msg) ++{ ++ struct cns3xxx_spi *hw = platform_get_drvdata(pdev); ++ ++ return 0; ++} ++ ++static int cns3xxx_spi_resume(struct platform_device *pdev) ++{ ++ struct cns3xxx_spi *hw = platform_get_drvdata(pdev); ++ ++ return 0; ++} ++ ++#else ++#define cns3xxx_spi_suspend NULL ++#define cns3xxx_spi_resume NULL ++#endif ++ ++static struct platform_driver cns3xxx_spi_driver = { ++ .probe = cns3xxx_spi_probe, ++ .remove = __devexit_p(cns3xxx_spi_remove), ++ .suspend = cns3xxx_spi_suspend, ++ .resume = cns3xxx_spi_resume, ++ .driver = { ++ .name = "cns3xxx_spi", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init cns3xxx_spi_init(void) ++{ ++ return platform_driver_register(&cns3xxx_spi_driver); ++} ++ ++static void __exit cns3xxx_spi_exit(void) ++{ ++ platform_driver_unregister(&cns3xxx_spi_driver); ++} ++ ++module_init(cns3xxx_spi_init); ++module_exit(cns3xxx_spi_exit); ++ ++MODULE_AUTHOR("Cavium Networks"); ++MODULE_DESCRIPTION("CNS3XXX SPI Controller Driver"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:cns3xxx_spi"); ++ ++EXPORT_SYMBOL_GPL(cns3xxx_spi_tx_rx); +--- a/include/linux/spi/spi.h ++++ b/include/linux/spi/spi.h +@@ -446,6 +446,13 @@ struct spi_transfer { + u32 speed_hz; + + struct list_head transfer_list; ++ ++#ifdef CONFIG_ARCH_CNS3XXX ++ unsigned last_in_message_list; ++#ifdef CONFIG_SPI_CNS3XXX_2IOREAD ++ u8 dio_read; ++#endif ++#endif + }; + + /** +--- a/drivers/spi/spi-bitbang.c ++++ b/drivers/spi/spi-bitbang.c +@@ -329,6 +329,12 @@ static void bitbang_work(struct work_str + */ + if (!m->is_dma_mapped) + t->rx_dma = t->tx_dma = 0; ++ ++ if (t->transfer_list.next == &m->transfers) ++ t->last_in_message_list = 1; ++ else ++ t->last_in_message_list = 0; ++ + status = bitbang->txrx_bufs(spi, t); + } + if (status > 0) diff --git a/target/linux/cns3xxx/patches/054-cns3xxx_pcie_clock.patch b/target/linux/cns3xxx/patches/054-cns3xxx_pcie_clock.patch new file mode 100644 index 0000000000..0c6c525039 --- /dev/null +++ b/target/linux/cns3xxx/patches/054-cns3xxx_pcie_clock.patch @@ -0,0 +1,11 @@ +--- a/arch/arm/mach-cns3xxx/pcie.c ++++ b/arch/arm/mach-cns3xxx/pcie.c +@@ -378,8 +378,6 @@ static int __init cns3xxx_pcie_init(void + for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) { + iotable_init(cns3xxx_pcie[i].cfg_bases, + ARRAY_SIZE(cns3xxx_pcie[i].cfg_bases)); +- cns3xxx_pwr_clk_en(0x1 << PM_CLK_GATE_REG_OFFSET_PCIE(i)); +- cns3xxx_pwr_soft_rst(0x1 << PM_SOFT_RST_REG_OFFST_PCIE(i)); + cns3xxx_pcie_check_link(&cns3xxx_pcie[i]); + cns3xxx_pcie_hw_init(&cns3xxx_pcie[i]); + pci_common_init(&cns3xxx_pcie[i].hw_pci); diff --git a/target/linux/cns3xxx/patches/100-laguna_support.patch b/target/linux/cns3xxx/patches/100-laguna_support.patch new file mode 100644 index 0000000000..12fe8005e9 --- /dev/null +++ b/target/linux/cns3xxx/patches/100-laguna_support.patch @@ -0,0 +1,992 @@ +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/laguna.c +@@ -0,0 +1,761 @@ ++/* ++ * Gateworks Corporation Laguna Platform ++ * ++ * Copyright 2000 Deep Blue Solutions Ltd ++ * Copyright 2008 ARM Limited ++ * Copyright 2008 Cavium Networks ++ * Scott Shu ++ * Copyright 2010 MontaVista Software, LLC. ++ * Anton Vorontsov ++ * Copyright 2011 Gateworks Corporation ++ * Chris Lang ++ * ++ * This file is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, Version 2, as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "core.h" ++#include "devices.h" ++ ++// Config 1 Bitmap ++#define ETH0_LOAD BIT(0) ++#define ETH1_LOAD BIT(1) ++#define ETH2_LOAD BIT(2) ++#define SATA0_LOAD BIT(3) ++#define SATA1_LOAD BIT(4) ++#define PCM_LOAD BIT(5) ++#define I2S_LOAD BIT(6) ++#define SPI0_LOAD BIT(7) ++#define SPI1_LOAD BIT(8) ++#define PCIE0_LOAD BIT(9) ++#define PCIE1_LOAD BIT(10) ++#define USB0_LOAD BIT(11) ++#define USB1_LOAD BIT(12) ++#define USB1_ROUTE BIT(13) ++#define SD_LOAD BIT(14) ++#define UART0_LOAD BIT(15) ++#define UART1_LOAD BIT(16) ++#define UART2_LOAD BIT(17) ++#define MPCI0_LOAD BIT(18) ++#define MPCI1_LOAD BIT(19) ++#define MPCI2_LOAD BIT(20) ++#define MPCI3_LOAD BIT(21) ++#define FP_BUT_LOAD BIT(22) ++#define FP_BUT_HEADER_LOAD BIT(23) ++#define FP_LED_LOAD BIT(24) ++#define FP_LED_HEADER_LOAD BIT(25) ++#define FP_TAMPER_LOAD BIT(26) ++#define HEADER_33V_LOAD BIT(27) ++#define SATA_POWER_LOAD BIT(28) ++#define FP_POWER_LOAD BIT(29) ++#define GPIO_HEADER_LOAD BIT(30) ++#define GSP_BAT_LOAD BIT(31) ++ ++// Config 2 Bitmap ++#define FAN_LOAD BIT(0) ++#define SPI_FLASH_LOAD BIT(1) ++#define NOR_FLASH_LOAD BIT(2) ++#define GPS_LOAD BIT(3) ++#define SUPPLY_5V_LOAD BIT(6) ++#define SUPPLY_33V_LOAD BIT(7) ++ ++struct laguna_board_info { ++ char model[16]; ++ u32 config_bitmap; ++ u32 config2_bitmap; ++ u8 nor_flash_size; ++ u8 spi_flash_size; ++}; ++ ++static struct laguna_board_info laguna_info __initdata; ++ ++/* ++ * NOR Flash ++ */ ++static struct mtd_partition laguna_nor_partitions[] = { ++ { ++ .name = "uboot", ++ .size = SZ_256K, ++ .offset = 0, ++ .mask_flags = MTD_WRITEABLE, ++ }, { ++ .name = "params", ++ .size = SZ_128K, ++ .offset = SZ_256K, ++ }, { ++ .name = "kernel", ++ .size = SZ_2M, ++ .offset = SZ_256K + SZ_128K, ++ }, { ++ .name = "rootfs", ++ .size = SZ_16M - SZ_256K - SZ_128K - SZ_2M, ++ .offset = SZ_256K + SZ_128K + SZ_2M, ++ }, ++}; ++ ++static struct physmap_flash_data laguna_nor_pdata = { ++ .width = 2, ++ .parts = laguna_nor_partitions, ++ .nr_parts = ARRAY_SIZE(laguna_nor_partitions), ++}; ++ ++static struct resource laguna_nor_res = { ++ .start = CNS3XXX_FLASH_BASE, ++ .end = CNS3XXX_FLASH_BASE + SZ_128M - 1, ++ .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, ++}; ++ ++static struct platform_device laguna_nor_pdev = { ++ .name = "physmap-flash", ++ .id = 0, ++ .resource = &laguna_nor_res, ++ .num_resources = 1, ++ .dev = { ++ .platform_data = &laguna_nor_pdata, ++ }, ++}; ++ ++/* ++ * SPI ++ */ ++static struct mtd_partition laguna_spi_partitions[] = { ++ { ++ .name = "uboot", ++ .size = SZ_256K, ++ .offset = 0, ++ .mask_flags = MTD_WRITEABLE, ++ }, { ++ .name = "params", ++ .size = SZ_256K, ++ .offset = SZ_256K, ++ }, { ++ .name = "kernel", ++ .size = SZ_1M + SZ_512K, ++ .offset = SZ_512K, ++ }, { ++ .name = "rootfs", ++ .size = SZ_16M - SZ_2M, ++ .offset = SZ_2M, ++ }, ++}; ++ ++static struct flash_platform_data laguna_spi_pdata = { ++ .parts = laguna_spi_partitions, ++ .nr_parts = ARRAY_SIZE(laguna_spi_partitions), ++}; ++ ++static struct spi_board_info __initdata laguna_spi_devices[] = { ++ { ++ .modalias = "m25p80", ++ .platform_data = &laguna_spi_pdata, ++ .max_speed_hz = 50000000, ++ .bus_num = 1, ++ .chip_select = 0, ++ }, ++}; ++ ++static struct platform_device laguna_spi_controller = { ++ .name = "cns3xxx_spi", ++}; ++ ++/* ++ * LED's ++ */ ++static struct gpio_led laguna_gpio_leds[] = { ++ { ++ .name = "user1", /* Green Led */ ++ .gpio = 115, ++ .active_low = 1, ++ },{ ++ .name = "user2", /* Red Led */ ++ .gpio = 114, ++ .active_low = 1, ++ },{ ++ .name = "pwr1", /* Green Led */ ++ .gpio = 116, ++ .active_low = 1, ++ },{ ++ .name = "pwr2", /* Yellow Led */ ++ .gpio = 117, ++ .active_low = 1, ++ },{ ++ .name = "txd1", /* Green Led */ ++ .gpio = 118, ++ .active_low = 1, ++ },{ ++ .name = "txd2", /* Yellow Led */ ++ .gpio = 119, ++ .active_low = 1, ++ },{ ++ .name = "rxd1", /* Green Led */ ++ .gpio = 120, ++ .active_low = 1, ++ },{ ++ .name = "rxd2", /* Yellow Led */ ++ .gpio = 121, ++ .active_low = 1, ++ },{ ++ .name = "ser1", /* Green Led */ ++ .gpio = 122, ++ .active_low = 1, ++ },{ ++ .name = "ser2", /* Yellow Led */ ++ .gpio = 123, ++ .active_low = 1, ++ },{ ++ .name = "enet1", /* Green Led */ ++ .gpio = 124, ++ .active_low = 1, ++ },{ ++ .name = "enet2", /* Yellow Led */ ++ .gpio = 125, ++ .active_low = 1, ++ },{ ++ .name = "sig1_1", /* Green Led */ ++ .gpio = 126, ++ .active_low = 1, ++ },{ ++ .name = "sig1_2", /* Yellow Led */ ++ .gpio = 127, ++ .active_low = 1, ++ },{ ++ .name = "sig2_1", /* Green Led */ ++ .gpio = 128, ++ .active_low = 1, ++ },{ ++ .name = "sig2_2", /* Yellow Led */ ++ .gpio = 129, ++ .active_low = 1, ++ },{ ++ .name = "sig3_1", /* Green Led */ ++ .gpio = 130, ++ .active_low = 1, ++ },{ ++ .name = "sig3_2", /* Yellow Led */ ++ .gpio = 131, ++ .active_low = 1, ++ },{ ++ .name = "net1", /*Green Led */ ++ .gpio = 109, ++ .active_low = 1, ++ },{ ++ .name = "net2", /* Red Led */ ++ .gpio = 110, ++ .active_low = 1, ++ },{ ++ .name = "mod1", /* Green Led */ ++ .gpio = 111, ++ .active_low = 1, ++ },{ ++ .name = "mod2", /* Red Led */ ++ .gpio = 112, ++ .active_low = 1, ++ }, ++}; ++ ++static struct gpio_led_platform_data laguna_gpio_leds_data = { ++ .num_leds = 22, ++ .leds = laguna_gpio_leds, ++}; ++ ++static struct platform_device laguna_gpio_leds_device = { ++ .name = "leds-gpio", ++ .id = -1, ++ .dev.platform_data = &laguna_gpio_leds_data, ++}; ++ ++/* ++ * Ethernet ++ */ ++static struct cns3xxx_plat_info laguna_net_data = { ++ .ports = 0, ++ .phy = { ++ 0, ++ 1, ++ 2, ++ }, ++}; ++ ++static struct platform_device laguna_net_device = { ++ .name = "cns3xxx_eth", ++ .id = 0, ++ .dev.platform_data = &laguna_net_data, ++}; ++ ++/* ++ * UART ++ */ ++static void __init laguna_early_serial_setup(void) ++{ ++#ifdef CONFIG_SERIAL_8250_CONSOLE ++ static struct uart_port laguna_serial_port = { ++ .membase = (void __iomem *)CNS3XXX_UART0_BASE_VIRT, ++ .mapbase = CNS3XXX_UART0_BASE, ++ .irq = IRQ_CNS3XXX_UART0, ++ .iotype = UPIO_MEM, ++ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE, ++ .regshift = 2, ++ .uartclk = 24000000, ++ .line = 0, ++ .type = PORT_16550A, ++ .fifosize = 16, ++ }; ++ ++ early_serial_setup(&laguna_serial_port); ++#endif ++} ++ ++static struct resource laguna_uart_resources[] = { ++ { ++ .start = CNS3XXX_UART0_BASE, ++ .end = CNS3XXX_UART0_BASE + SZ_4K - 1, ++ .flags = IORESOURCE_MEM ++ },{ ++ .start = CNS3XXX_UART2_BASE, ++ .end = CNS3XXX_UART2_BASE + SZ_4K - 1, ++ .flags = IORESOURCE_MEM ++ },{ ++ .start = CNS3XXX_UART2_BASE, ++ .end = CNS3XXX_UART2_BASE + SZ_4K - 1, ++ .flags = IORESOURCE_MEM ++ }, ++}; ++ ++static struct plat_serial8250_port laguna_uart_data[] = { ++ { ++ .membase = (char*) (CNS3XXX_UART0_BASE_VIRT), ++ .mapbase = (CNS3XXX_UART0_BASE), ++ .irq = IRQ_CNS3XXX_UART0, ++ .iotype = UPIO_MEM, ++ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_NO_TXEN_TEST, ++ .regshift = 2, ++ .uartclk = 24000000, ++ .type = PORT_16550A, ++ },{ ++ .membase = (char*) (CNS3XXX_UART1_BASE_VIRT), ++ .mapbase = (CNS3XXX_UART1_BASE), ++ .irq = IRQ_CNS3XXX_UART1, ++ .iotype = UPIO_MEM, ++ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_NO_TXEN_TEST, ++ .regshift = 2, ++ .uartclk = 24000000, ++ .type = PORT_16550A, ++ },{ ++ .membase = (char*) (CNS3XXX_UART2_BASE_VIRT), ++ .mapbase = (CNS3XXX_UART2_BASE), ++ .irq = IRQ_CNS3XXX_UART2, ++ .iotype = UPIO_MEM, ++ .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_NO_TXEN_TEST, ++ .regshift = 2, ++ .uartclk = 24000000, ++ .type = PORT_16550A, ++ }, ++}; ++ ++static struct platform_device laguna_uart = { ++ .name = "serial8250", ++ .id = PLAT8250_DEV_PLATFORM, ++ .dev.platform_data = laguna_uart_data, ++ .num_resources = 3, ++ .resource = laguna_uart_resources ++}; ++ ++/* ++ * USB ++ */ ++static struct resource cns3xxx_usb_ehci_resources[] = { ++ [0] = { ++ .start = CNS3XXX_USB_BASE, ++ .end = CNS3XXX_USB_BASE + SZ_16M - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .start = IRQ_CNS3XXX_USB_EHCI, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static u64 cns3xxx_usb_ehci_dma_mask = DMA_BIT_MASK(32); ++ ++static struct platform_device cns3xxx_usb_ehci_device = { ++ .name = "cns3xxx-ehci", ++ .num_resources = ARRAY_SIZE(cns3xxx_usb_ehci_resources), ++ .resource = cns3xxx_usb_ehci_resources, ++ .dev = { ++ .dma_mask = &cns3xxx_usb_ehci_dma_mask, ++ .coherent_dma_mask = DMA_BIT_MASK(32), ++ }, ++}; ++ ++static struct resource cns3xxx_usb_ohci_resources[] = { ++ [0] = { ++ .start = CNS3XXX_USB_OHCI_BASE, ++ .end = CNS3XXX_USB_OHCI_BASE + SZ_16M - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .start = IRQ_CNS3XXX_USB_OHCI, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static u64 cns3xxx_usb_ohci_dma_mask = DMA_BIT_MASK(32); ++ ++static struct platform_device cns3xxx_usb_ohci_device = { ++ .name = "cns3xxx-ohci", ++ .num_resources = ARRAY_SIZE(cns3xxx_usb_ohci_resources), ++ .resource = cns3xxx_usb_ohci_resources, ++ .dev = { ++ .dma_mask = &cns3xxx_usb_ohci_dma_mask, ++ .coherent_dma_mask = DMA_BIT_MASK(32), ++ }, ++}; ++ ++static struct resource cns3xxx_usb_otg_resources[] = { ++ [0] = { ++ .start = CNS3XXX_USBOTG_BASE, ++ .end = CNS3XXX_USBOTG_BASE + SZ_16M - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .start = IRQ_CNS3XXX_USB_OTG, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static u64 cns3xxx_usb_otg_dma_mask = DMA_BIT_MASK(32); ++ ++static struct platform_device cns3xxx_usb_otg_device = { ++ .name = "dwc_otg", ++ .num_resources = ARRAY_SIZE(cns3xxx_usb_otg_resources), ++ .resource = cns3xxx_usb_otg_resources, ++ .dev = { ++ .dma_mask = &cns3xxx_usb_otg_dma_mask, ++ .coherent_dma_mask = DMA_BIT_MASK(32), ++ }, ++}; ++ ++/* ++ * I2C ++ */ ++static struct resource laguna_i2c_resource[] = { ++ { ++ .start = CNS3XXX_SSP_BASE + 0x20, ++ .end = 0x7100003f, ++ .flags = IORESOURCE_MEM, ++ },{ ++ .start = IRQ_CNS3XXX_I2C, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct platform_device laguna_i2c_controller = { ++ .name = "cns3xxx-i2c", ++ .num_resources = 2, ++ .resource = laguna_i2c_resource, ++}; ++ ++static struct memory_accessor *at24_mem_acc; ++ ++static void at24_setup(struct memory_accessor *mem_acc, void *context) ++{ ++ char buf[8]; ++ ++ at24_mem_acc = mem_acc; ++ ++ /* Read MAC addresses */ ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x100, 6) == 6) ++ memcpy(&laguna_net_data.hwaddr[0], buf, ETH_ALEN); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x106, 6) == 6) ++ memcpy(&laguna_net_data.hwaddr[1], buf, ETH_ALEN); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x10C, 6) == 6) ++ memcpy(&laguna_net_data.hwaddr[2], buf, ETH_ALEN); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x112, 6) == 6) ++ memcpy(&laguna_net_data.hwaddr[3], buf, ETH_ALEN); ++ ++ /* Read out Model Information */ ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x130, 16) == 16) ++ memcpy(&laguna_info.model, buf, 16); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x140, 1) == 1) ++ memcpy(&laguna_info.nor_flash_size, buf, 1); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x141, 1) == 1) ++ memcpy(&laguna_info.spi_flash_size, buf, 1); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x142, 4) == 4) ++ memcpy(&laguna_info.config_bitmap, buf, 4); ++ if (at24_mem_acc->read(at24_mem_acc, buf, 0x146, 4) == 4) ++ memcpy(&laguna_info.config2_bitmap, buf, 4); ++}; ++ ++static struct at24_platform_data laguna_eeprom_info = { ++ .byte_len = 1024, ++ .page_size = 16, ++ .flags = AT24_FLAG_READONLY, ++ .setup = at24_setup, ++}; ++ ++static struct pca953x_platform_data laguna_pca_data = { ++ .gpio_base = 100, ++ .irq_base = -1, ++}; ++ ++static struct pca953x_platform_data laguna_pca2_data = { ++ .gpio_base = 116, ++ .irq_base = -1, ++}; ++ ++static struct i2c_board_info __initdata laguna_i2c_devices[] = { ++ { ++ I2C_BOARD_INFO("pca9555", 0x23), ++ .platform_data = &laguna_pca_data, ++ },{ ++ I2C_BOARD_INFO("pca9555", 0x27), ++ .platform_data = &laguna_pca2_data, ++ },{ ++ I2C_BOARD_INFO("gsp", 0x29), ++ },{ ++ I2C_BOARD_INFO ("24c08",0x50), ++ .platform_data = &laguna_eeprom_info, ++ },{ ++ I2C_BOARD_INFO("ds1672", 0x68), ++ }, ++}; ++ ++/* ++ * Watchdog ++ */ ++ ++static struct resource laguna_watchdog_resources[] = { ++ [0] = { ++ .start = CNS3XXX_TC11MP_TWD_BASE, ++ .end = CNS3XXX_TC11MP_TWD_BASE + SZ_4K - 1, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .start = IRQ_LOCALWDOG, ++ .end = IRQ_LOCALWDOG, ++ .flags = IORESOURCE_IRQ, ++ } ++}; ++ ++static struct platform_device laguna_watchdog = { ++ .name = "mpcore_wdt", ++ .id = -1, ++ .num_resources = ARRAY_SIZE(laguna_watchdog_resources), ++ .resource = laguna_watchdog_resources, ++}; ++ ++/* ++ * Initialization ++ */ ++ ++static void __init laguna_init(void) ++{ ++ cns3xxx_l2x0_init(); ++ ++ platform_device_register(&laguna_watchdog); ++ ++ platform_device_register(&laguna_i2c_controller); ++ ++ i2c_register_board_info(0, laguna_i2c_devices, ++ ARRAY_SIZE(laguna_i2c_devices)); ++ ++ pm_power_off = cns3xxx_power_off; ++} ++ ++static struct map_desc laguna_io_desc[] __initdata = { ++ { ++ .virtual = CNS3XXX_UART0_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_UART0_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ },{ ++ .virtual = CNS3XXX_UART1_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_UART1_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ },{ ++ .virtual = CNS3XXX_UART2_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_UART2_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, ++}; ++ ++static void __init laguna_map_io(void) ++{ ++ cns3xxx_map_io(); ++ iotable_init(laguna_io_desc, ARRAY_SIZE(laguna_io_desc)); ++ laguna_early_serial_setup(); ++} ++ ++static int __init laguna_model_setup(void) ++{ ++ u32 __iomem *mem; ++ u32 reg; ++ u8 pcie_bitmap = 0; ++ ++ printk("Running on Gateworks Laguna %s\n", laguna_info.model); ++ ++ if (strncmp(laguna_info.model, "GW", 2) == 0) { ++ if (laguna_info.config_bitmap & ETH0_LOAD) ++ laguna_net_data.ports |= BIT(0); ++ if (laguna_info.config_bitmap & ETH1_LOAD) ++ laguna_net_data.ports |= BIT(1); ++ if (laguna_info.config_bitmap & ETH2_LOAD) ++ laguna_net_data.ports |= BIT(2); ++ if (laguna_net_data.ports) ++ platform_device_register(&laguna_net_device); ++ ++ if ((laguna_info.config_bitmap & SATA0_LOAD) || ++ (laguna_info.config_bitmap & SATA1_LOAD)) ++ cns3xxx_ahci_init(); ++ ++ if (laguna_info.config_bitmap & (PCIE0_LOAD)) ++ pcie_bitmap |= 0x1; ++ ++ if (laguna_info.config_bitmap & (PCIE1_LOAD)) ++ pcie_bitmap |= 0x2; ++ ++ cns3xxx_pcie_init(pcie_bitmap); ++ ++ if (laguna_info.config_bitmap & (USB0_LOAD)) { ++ cns3xxx_pwr_power_up(1 << PM_PLL_HM_PD_CTRL_REG_OFFSET_PLL_USB); ++ ++ /* DRVVBUS pins share with GPIOA */ ++ mem = (void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0014); ++ reg = __raw_readl(mem); ++ reg |= 0x8; ++ __raw_writel(reg, mem); ++ ++ /* Enable OTG */ ++ mem = (void __iomem *)(CNS3XXX_MISC_BASE_VIRT + 0x0808); ++ reg = __raw_readl(mem); ++ reg &= ~(1 << 10); ++ __raw_writel(reg, mem); ++ ++ platform_device_register(&cns3xxx_usb_otg_device); ++ } ++ ++ if (laguna_info.config_bitmap & (USB1_LOAD)) { ++ platform_device_register(&cns3xxx_usb_ehci_device); ++ platform_device_register(&cns3xxx_usb_ohci_device); ++ } ++ ++ if (laguna_info.config_bitmap & (SD_LOAD)) ++ cns3xxx_sdhci_init(); ++ ++ if (laguna_info.config_bitmap & (UART0_LOAD)) ++ laguna_uart.num_resources = 1; ++ if (laguna_info.config_bitmap & (UART1_LOAD)) ++ laguna_uart.num_resources = 2; ++ if (laguna_info.config_bitmap & (UART2_LOAD)) ++ laguna_uart.num_resources = 3; ++ platform_device_register(&laguna_uart); ++ ++ if (laguna_info.config2_bitmap & (NOR_FLASH_LOAD)) { ++ switch (laguna_info.nor_flash_size) { ++ case 1: ++ laguna_nor_partitions[3].size = SZ_8M - SZ_256K - SZ_128K - SZ_2M; ++ laguna_nor_res.end = CNS3XXX_FLASH_BASE + SZ_8M - 1; ++ break; ++ case 2: ++ laguna_nor_partitions[3].size = SZ_16M - SZ_256K - SZ_128K - SZ_2M; ++ laguna_nor_res.end = CNS3XXX_FLASH_BASE + SZ_16M - 1; ++ break; ++ case 3: ++ laguna_nor_partitions[3].size = SZ_32M - SZ_256K - SZ_128K - SZ_2M; ++ laguna_nor_res.end = CNS3XXX_FLASH_BASE + SZ_32M - 1; ++ break; ++ case 4: ++ laguna_nor_partitions[3].size = SZ_64M - SZ_256K - SZ_128K - SZ_2M; ++ laguna_nor_res.end = CNS3XXX_FLASH_BASE + SZ_64M - 1; ++ break; ++ case 5: ++ laguna_nor_partitions[3].size = SZ_128M - SZ_256K - SZ_128K - SZ_2M; ++ laguna_nor_res.end = CNS3XXX_FLASH_BASE + SZ_128M - 1; ++ break; ++ } ++ platform_device_register(&laguna_nor_pdev); ++ } ++ ++ if (laguna_info.config2_bitmap & (SPI_FLASH_LOAD)) { ++ switch (laguna_info.spi_flash_size) { ++ case 1: ++ laguna_spi_partitions[3].size = SZ_4M - SZ_2M; ++ break; ++ case 2: ++ laguna_spi_partitions[3].size = SZ_8M - SZ_2M; ++ break; ++ case 3: ++ laguna_spi_partitions[3].size = SZ_16M - SZ_2M; ++ break; ++ case 4: ++ laguna_spi_partitions[3].size = SZ_32M - SZ_2M; ++ break; ++ case 5: ++ laguna_spi_partitions[3].size = SZ_64M - SZ_2M; ++ break; ++ } ++ spi_register_board_info(laguna_spi_devices, ARRAY_SIZE(laguna_spi_devices)); ++ } ++ ++ if ((laguna_info.config_bitmap & SPI0_LOAD) || ++ (laguna_info.config_bitmap & SPI1_LOAD)) ++ platform_device_register(&laguna_spi_controller); ++ ++ /* ++ * Do any model specific setup not known by the bitmap by matching ++ * the first 6 characters of the model name ++ */ ++ ++ if (strncmp(laguna_info.model, "GW2388", 6) == 0) { ++ laguna_gpio_leds_data.num_leds = 2; ++ } else if (strncmp(laguna_info.model, "GW2380", 6) == 0) { ++ laguna_gpio_leds[0].gpio = 107; ++ laguna_gpio_leds[1].gpio = 106; ++ laguna_gpio_leds_data.num_leds = 2; ++ } ++ platform_device_register(&laguna_gpio_leds_device); ++ } else { ++ // Do some defaults here, not sure what yet ++ } ++ return 0; ++} ++ ++late_initcall(laguna_model_setup); ++ ++MACHINE_START(GW2388, "Gateworks Corporation Laguna Platform") ++ .boot_params = 0x00000100, ++ .map_io = laguna_map_io, ++ .init_irq = cns3xxx_init_irq, ++ .timer = &cns3xxx_timer, ++ .init_machine = laguna_init, ++MACHINE_END +--- a/arch/arm/mach-cns3xxx/Kconfig ++++ b/arch/arm/mach-cns3xxx/Kconfig +@@ -11,4 +11,14 @@ config MACH_CNS3420VB + This is a platform with an on-board ARM11 MPCore and has support + for USB, USB-OTG, MMC/SD/SDIO, SATA, PCI-E, etc. + ++config MACH_GW2388 ++ bool "Support for Gateworks Laguna Platform" ++ select HAVE_ARM_SCU if SMP ++ select MIGHT_HAVE_PCI ++ help ++ Include support for the Gateworks Laguna Platform ++ ++ This is a platform with an on-board ARM11 MPCore and has support ++ for USB, USB-OTG, MMC/SD/SDIO, SATA, PCI-E, I2C, GIG, etc. ++ + endmenu +--- a/arch/arm/mach-cns3xxx/core.c ++++ b/arch/arm/mach-cns3xxx/core.c +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + #include + #include + #include "core.h" +@@ -61,11 +62,24 @@ static struct map_desc cns3xxx_io_desc[] + .pfn = __phys_to_pfn(CNS3XXX_PM_BASE), + .length = SZ_4K, + .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_SWITCH_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_SWITCH_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_SSP_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_SSP_BASE), ++ .length = SZ_4K, ++ .type = MT_DEVICE, + }, + }; + + void __init cns3xxx_map_io(void) + { ++#ifdef CONFIG_LOCAL_TIMERS ++ twd_base = (void __iomem *) CNS3XXX_TC11MP_TWD_BASE_VIRT; ++#endif + iotable_init(cns3xxx_io_desc, ARRAY_SIZE(cns3xxx_io_desc)); + } + +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -329,6 +329,7 @@ config ARCH_CLPS711X + config ARCH_CNS3XXX + bool "Cavium Networks CNS3XXX family" + select CPU_V6K ++ select ARCH_WANT_OPTIONAL_GPIOLIB + select GENERIC_CLOCKEVENTS + select ARM_GIC + select MIGHT_HAVE_PCI +--- /dev/null ++++ b/arch/arm/mach-cns3xxx/include/mach/gpio.h +@@ -0,0 +1,98 @@ ++/* ++ * arch/arm/mach-cns3xxx/include/mach/gpio.h ++ * ++ * CNS3xxx GPIO wrappers for arch-neutral GPIO calls ++ * ++ * Copyright 2011 Gateworks Corporation ++ * Chris Lang ++ * ++ * Based on IXP implementation by Milan Svoboda ++ * Based on PXA implementation by Philipp Zabel ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ * ++ */ ++ ++#ifndef __ASM_ARCH_CNS3XXX_GPIO_H ++#define __ASM_ARCH_CNS3XXX_GPIO_H ++ ++#include ++#include ++#include ++#include /* cansleep wrappers */ ++ ++#define NR_BUILTIN_GPIO 64 ++ ++#define CNS3XXX_GPIO_IN 0x0 ++#define CNS3XXX_GPIO_OUT 0x1 ++ ++#define CNS3XXX_GPIO_LO 0 ++#define CNS3XXX_GPIO_HI 1 ++ ++#define CNS3XXX_GPIO_OUTPUT 0x00 ++#define CNS3XXX_GPIO_INPUT 0x04 ++#define CNS3XXX_GPIO_DIR 0x08 ++#define CNS3XXX_GPIO_SET 0x10 ++#define CNS3XXX_GPIO_CLEAR 0x14 ++ ++static inline void gpio_line_get(u8 line, int *value) ++{ ++ if (line < 32) ++ *value = ((__raw_readl(CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_INPUT) >> line) & 0x1); ++ else ++ *value = ((__raw_readl(CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_INPUT) >> (line - 32)) & 0x1); ++} ++ ++static inline void gpio_line_set(u8 line, int value) ++{ ++ if (line < 32) { ++ if (value) ++ __raw_writel((1 << line), CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_SET); ++ else ++ __raw_writel((1 << line), CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_CLEAR); ++ } else { ++ if (value) ++ __raw_writel((1 << line), CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_SET); ++ else ++ __raw_writel((1 << line), CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_CLEAR); ++ } ++} ++ ++static inline int gpio_get_value(unsigned gpio) ++{ ++ if (gpio < NR_BUILTIN_GPIO) ++ { ++ int value; ++ gpio_line_get(gpio, &value); ++ return value; ++ } ++ else ++ return __gpio_get_value(gpio); ++} ++ ++static inline void gpio_set_value(unsigned gpio, int value) ++{ ++ if (gpio < NR_BUILTIN_GPIO) ++ gpio_line_set(gpio, value); ++ else ++ __gpio_set_value(gpio, value); ++} ++ ++#define gpio_cansleep __gpio_cansleep ++ ++extern int gpio_to_irq(int gpio); ++extern int irq_to_gpio(int gpio); ++ ++#endif +--- a/arch/arm/mach-cns3xxx/Makefile ++++ b/arch/arm/mach-cns3xxx/Makefile +@@ -1,6 +1,7 @@ + obj-$(CONFIG_ARCH_CNS3XXX) += core.o pm.o devices.o + obj-$(CONFIG_PCI) += pcie.o + obj-$(CONFIG_MACH_CNS3420VB) += cns3420vb.o ++obj-$(CONFIG_MACH_GW2388) += laguna.o + obj-$(CONFIG_SMP) += platsmp.o headsmp.o + obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o + obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o +--- a/arch/arm/mach-cns3xxx/pcie.c ++++ b/arch/arm/mach-cns3xxx/pcie.c +@@ -365,7 +365,7 @@ static int cns3xxx_pcie_abort_handler(un + return 0; + } + +-static int __init cns3xxx_pcie_init(void) ++int cns3xxx_pcie_init(u8 bitmap) + { + int i; + +@@ -376,6 +376,9 @@ static int __init cns3xxx_pcie_init(void + "imprecise external abort"); + + for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) { ++ if (!(bitmap & (1 << i))) ++ continue; ++ + iotable_init(cns3xxx_pcie[i].cfg_bases, + ARRAY_SIZE(cns3xxx_pcie[i].cfg_bases)); + cns3xxx_pcie_check_link(&cns3xxx_pcie[i]); +@@ -387,4 +390,3 @@ static int __init cns3xxx_pcie_init(void + + return 0; + } +-device_initcall(cns3xxx_pcie_init); +--- a/arch/arm/mach-cns3xxx/cns3420vb.c ++++ b/arch/arm/mach-cns3xxx/cns3420vb.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + #include "core.h" + #include "devices.h" + +@@ -198,6 +199,8 @@ static void __init cns3420_init(void) + cns3xxx_ahci_init(); + cns3xxx_sdhci_init(); + ++ cns3xxx_pcie_init(0x3); ++ + pm_power_off = cns3xxx_power_off; + } + +--- a/arch/arm/mach-cns3xxx/core.h ++++ b/arch/arm/mach-cns3xxx/core.h +@@ -12,6 +12,7 @@ + #define __CNS3XXX_CORE_H + + extern struct sys_timer cns3xxx_timer; ++extern int cns3xxx_pcie_init(u8 bitmap); + + #ifdef CONFIG_CACHE_L2X0 + void __init cns3xxx_l2x0_init(void); diff --git a/target/linux/cns3xxx/patches/101-laguna_sdhci_card_detect.patch b/target/linux/cns3xxx/patches/101-laguna_sdhci_card_detect.patch new file mode 100644 index 0000000000..14802706e4 --- /dev/null +++ b/target/linux/cns3xxx/patches/101-laguna_sdhci_card_detect.patch @@ -0,0 +1,16 @@ +--- a/drivers/mmc/host/sdhci-cns3xxx.c ++++ b/drivers/mmc/host/sdhci-cns3xxx.c +@@ -88,10 +88,11 @@ static struct sdhci_pltfm_data sdhci_cns + .ops = &sdhci_cns3xxx_ops, + .quirks = SDHCI_QUIRK_BROKEN_DMA | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | +- SDHCI_QUIRK_INVERTED_WRITE_PROTECT | ++ //SDHCI_QUIRK_INVERTED_WRITE_PROTECT | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | +- SDHCI_QUIRK_NONSTANDARD_CLOCK, ++ SDHCI_QUIRK_NONSTANDARD_CLOCK | ++ SDHCI_QUIRK_BROKEN_CARD_DETECTION, + }; + + static int __devinit sdhci_cns3xxx_probe(struct platform_device *pdev) diff --git a/target/linux/cns3xxx/patches/102-cns3xxx_timers.patch b/target/linux/cns3xxx/patches/102-cns3xxx_timers.patch new file mode 100644 index 0000000000..dddb85a4db --- /dev/null +++ b/target/linux/cns3xxx/patches/102-cns3xxx_timers.patch @@ -0,0 +1,109 @@ +--- a/arch/arm/mach-cns3xxx/core.c ++++ b/arch/arm/mach-cns3xxx/core.c +@@ -118,12 +118,13 @@ static void cns3xxx_timer_set_mode(enum + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: +- reload = pclk * 20 / (3 * HZ) * 0x25000; ++ reload = pclk * 1000000 / HZ; + writel(reload, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET); + ctrl |= (1 << 0) | (1 << 2) | (1 << 9); + break; + case CLOCK_EVT_MODE_ONESHOT: + /* period set, and timer enabled in 'next_event' hook */ ++ writel(0, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET); + ctrl |= (1 << 2) | (1 << 9); + break; + case CLOCK_EVT_MODE_UNUSED: +@@ -148,11 +149,11 @@ static int cns3xxx_timer_set_next_event( + + static struct clock_event_device cns3xxx_tmr1_clockevent = { + .name = "cns3xxx timer1", +- .shift = 8, ++ .shift = 32, + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .set_mode = cns3xxx_timer_set_mode, + .set_next_event = cns3xxx_timer_set_next_event, +- .rating = 350, ++ .rating = 300, + .cpumask = cpu_all_mask, + }; + +@@ -194,6 +195,35 @@ static struct irqaction cns3xxx_timer_ir + .handler = cns3xxx_timer_interrupt, + }; + ++static cycle_t cns3xxx_get_cycles(struct clocksource *cs) ++{ ++ u64 val; ++ ++ val = readl(cns3xxx_tmr1 + TIMER_FREERUN_CONTROL_OFFSET); ++ val &= 0xffff; ++ ++ return ((val << 32) | readl(cns3xxx_tmr1 + TIMER_FREERUN_OFFSET)); ++} ++ ++static struct clocksource clocksource_cns3xxx = { ++ .name = "freerun", ++ .rating = 200, ++ .read = cns3xxx_get_cycles, ++ .mask = CLOCKSOURCE_MASK(48), ++ .shift = 16, ++ .flags = CLOCK_SOURCE_IS_CONTINUOUS, ++}; ++ ++static void __init cns3xxx_clocksource_init(void) ++{ ++ /* Reset the FreeRunning counter */ ++ writel((1 << 16), cns3xxx_tmr1 + TIMER_FREERUN_CONTROL_OFFSET); ++ ++ clocksource_cns3xxx.mult = ++ clocksource_khz2mult(100, clocksource_cns3xxx.shift); ++ clocksource_register(&clocksource_cns3xxx); ++} ++ + /* + * Set up the clock source and clock events devices + */ +@@ -211,13 +241,12 @@ static void __init __cns3xxx_timer_init( + /* stop free running timer3 */ + writel(0, cns3xxx_tmr1 + TIMER_FREERUN_CONTROL_OFFSET); + +- /* timer1 */ +- writel(0x5C800, cns3xxx_tmr1 + TIMER1_COUNTER_OFFSET); +- writel(0x5C800, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET); +- + writel(0, cns3xxx_tmr1 + TIMER1_MATCH_V1_OFFSET); + writel(0, cns3xxx_tmr1 + TIMER1_MATCH_V2_OFFSET); + ++ val = (cns3xxx_cpu_clock() >> 3) * 1000000 / HZ; ++ writel(val, cns3xxx_tmr1 + TIMER1_COUNTER_OFFSET); ++ + /* mask irq, non-mask timer1 overflow */ + irq_mask = readl(cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET); + irq_mask &= ~(1 << 2); +@@ -229,23 +258,9 @@ static void __init __cns3xxx_timer_init( + val |= (1 << 9); + writel(val, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET); + +- /* timer2 */ +- writel(0, cns3xxx_tmr1 + TIMER2_MATCH_V1_OFFSET); +- writel(0, cns3xxx_tmr1 + TIMER2_MATCH_V2_OFFSET); +- +- /* mask irq */ +- irq_mask = readl(cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET); +- irq_mask |= ((1 << 3) | (1 << 4) | (1 << 5)); +- writel(irq_mask, cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET); +- +- /* down counter */ +- val = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET); +- val |= (1 << 10); +- writel(val, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET); +- +- /* Make irqs happen for the system timer */ + setup_irq(timer_irq, &cns3xxx_timer_irq); + ++ cns3xxx_clocksource_init(); + cns3xxx_clockevents_init(timer_irq); + } + diff --git a/target/linux/cns3xxx/patches/104-cns3xxx_gpio.patch b/target/linux/cns3xxx/patches/104-cns3xxx_gpio.patch new file mode 100644 index 0000000000..6dff5b07d7 --- /dev/null +++ b/target/linux/cns3xxx/patches/104-cns3xxx_gpio.patch @@ -0,0 +1,141 @@ +--- a/arch/arm/mach-cns3xxx/cns3420vb.c ++++ b/arch/arm/mach-cns3xxx/cns3420vb.c +@@ -215,7 +215,7 @@ static struct map_desc cns3420_io_desc[] + + static void __init cns3420_map_io(void) + { +- cns3xxx_map_io(); ++ cns3xxx_common_init(); + iotable_init(cns3420_io_desc, ARRAY_SIZE(cns3420_io_desc)); + + cns3420_early_serial_setup(); +--- a/arch/arm/mach-cns3xxx/core.c ++++ b/arch/arm/mach-cns3xxx/core.c +@@ -18,6 +18,7 @@ + #include + #include + #include ++#include + #include + #include "core.h" + +@@ -75,12 +76,96 @@ static struct map_desc cns3xxx_io_desc[] + }, + }; + +-void __init cns3xxx_map_io(void) ++int gpio_to_irq(int gpio) ++{ ++ if (gpio > 63) ++ return -EINVAL; ++ ++ if (gpio < 32) ++ return IRQ_CNS3XXX_GPIOA; ++ else ++ return IRQ_CNS3XXX_GPIOB; ++} ++EXPORT_SYMBOL(gpio_to_irq); ++ ++int irq2gpio(int irq) ++{ ++ if (irq == IRQ_CNS3XXX_GPIOA) ++ return 0; ++ else if (irq == IRQ_CNS3XXX_GPIOB) ++ return 32; ++ else ++ return -EINVAL; ++} ++EXPORT_SYMBOL(irq2gpio); ++ ++static inline void gpio_line_config(u8 line, u32 direction) ++{ ++ u32 reg; ++ if (direction) { ++ if (line < 32) { ++ reg = __raw_readl(CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ reg |= (1 << line); ++ __raw_writel(reg, CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ } else { ++ reg = __raw_readl(CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ reg |= (1 << (line - 32)); ++ __raw_writel(reg, CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ } ++ } else { ++ if (line < 32) { ++ reg = __raw_readl(CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ reg &= ~(1 << line); ++ __raw_writel(reg, CNS3XXX_GPIOA_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ } else { ++ reg = __raw_readl(CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ reg &= ~(1 << (line - 32)); ++ __raw_writel(reg, CNS3XXX_GPIOB_BASE_VIRT + CNS3XXX_GPIO_DIR); ++ } ++ } ++} ++ ++static int cns3xxx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) ++{ ++ gpio_line_config(gpio, CNS3XXX_GPIO_IN); ++ return 0; ++} ++ ++static int cns3xxx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int level) ++{ ++ gpio_line_set(gpio, level); ++ gpio_line_config(gpio, CNS3XXX_GPIO_OUT); ++ return 0; ++} ++ ++static int cns3xxx_gpio_get_value(struct gpio_chip *chip, unsigned gpio) ++{ ++ return gpio_get_value(gpio); ++} ++ ++static void cns3xxx_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) ++{ ++ gpio_set_value(gpio, value); ++} ++ ++static struct gpio_chip cns3xxx_gpio_chip = { ++ .label = "CNS3XXX_GPIO_CHIP", ++ .direction_input = cns3xxx_gpio_direction_input, ++ .direction_output = cns3xxx_gpio_direction_output, ++ .get = cns3xxx_gpio_get_value, ++ .set = cns3xxx_gpio_set_value, ++ .base = 0, ++ .ngpio = 64, ++}; ++ ++void __init cns3xxx_common_init(void) + { + #ifdef CONFIG_LOCAL_TIMERS + twd_base = (void __iomem *) CNS3XXX_TC11MP_TWD_BASE_VIRT; + #endif + iotable_init(cns3xxx_io_desc, ARRAY_SIZE(cns3xxx_io_desc)); ++ ++ gpiochip_add(&cns3xxx_gpio_chip); + } + + /* used by entry-macro.S */ +--- a/arch/arm/mach-cns3xxx/core.h ++++ b/arch/arm/mach-cns3xxx/core.h +@@ -20,7 +20,7 @@ void __init cns3xxx_l2x0_init(void); + static inline void cns3xxx_l2x0_init(void) {} + #endif /* CONFIG_CACHE_L2X0 */ + +-void __init cns3xxx_map_io(void); ++void __init cns3xxx_common_init(void); + void __init cns3xxx_init_irq(void); + void cns3xxx_power_off(void); + +--- a/arch/arm/mach-cns3xxx/laguna.c ++++ b/arch/arm/mach-cns3xxx/laguna.c +@@ -611,7 +611,7 @@ static struct map_desc laguna_io_desc[] + + static void __init laguna_map_io(void) + { +- cns3xxx_map_io(); ++ cns3xxx_common_init(); + iotable_init(laguna_io_desc, ARRAY_SIZE(laguna_io_desc)); + laguna_early_serial_setup(); + } diff --git a/target/linux/cns3xxx/patches/105-cns3xxx_pcie_io.patch b/target/linux/cns3xxx/patches/105-cns3xxx_pcie_io.patch new file mode 100644 index 0000000000..6c6d268a96 --- /dev/null +++ b/target/linux/cns3xxx/patches/105-cns3xxx_pcie_io.patch @@ -0,0 +1,88 @@ +--- a/arch/arm/mach-cns3xxx/core.c ++++ b/arch/arm/mach-cns3xxx/core.c +@@ -73,6 +73,16 @@ static struct map_desc cns3xxx_io_desc[] + .pfn = __phys_to_pfn(CNS3XXX_SSP_BASE), + .length = SZ_4K, + .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE0_IO_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE0_IO_BASE), ++ .length = SZ_16M, ++ .type = MT_DEVICE, ++ }, { ++ .virtual = CNS3XXX_PCIE1_IO_BASE_VIRT, ++ .pfn = __phys_to_pfn(CNS3XXX_PCIE1_IO_BASE), ++ .length = SZ_16M, ++ .type = MT_DEVICE, + }, + }; + +@@ -171,13 +181,13 @@ void __init cns3xxx_common_init(void) + /* used by entry-macro.S */ + void __init cns3xxx_init_irq(void) + { +- gic_init(0, 29, __io(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT), +- __io(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT)); ++ gic_init(0, 29, (void __iomem *) CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, ++ (void __iomem *) CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT); + } + + void cns3xxx_power_off(void) + { +- u32 __iomem *pm_base = __io(CNS3XXX_PM_BASE_VIRT); ++ u32 __iomem *pm_base = (void __iomem *) CNS3XXX_PM_BASE_VIRT; + u32 clkctrl; + + printk(KERN_INFO "powering system down...\n"); +@@ -351,7 +361,7 @@ static void __init __cns3xxx_timer_init( + + static void __init cns3xxx_timer_init(void) + { +- cns3xxx_tmr1 = __io(CNS3XXX_TIMER1_2_3_BASE_VIRT); ++ cns3xxx_tmr1 = (void __iomem *) CNS3XXX_TIMER1_2_3_BASE_VIRT; + + __cns3xxx_timer_init(IRQ_CNS3XXX_TIMER0); + } +--- a/arch/arm/mach-cns3xxx/devices.c ++++ b/arch/arm/mach-cns3xxx/devices.c +@@ -98,7 +98,7 @@ static struct platform_device cns3xxx_sd + + void __init cns3xxx_sdhci_init(void) + { +- u32 __iomem *gpioa = __io(CNS3XXX_MISC_BASE_VIRT + 0x0014); ++ u32 __iomem *gpioa = (void __iomem *) (CNS3XXX_MISC_BASE_VIRT + 0x0014); + u32 gpioa_pins = __raw_readl(gpioa); + + /* MMC/SD pins share with GPIOA */ +--- a/arch/arm/mach-cns3xxx/include/mach/io.h ++++ b/arch/arm/mach-cns3xxx/include/mach/io.h +@@ -9,9 +9,17 @@ + #ifndef __MACH_IO_H + #define __MACH_IO_H + ++#include "cns3xxx.h" ++ + #define IO_SPACE_LIMIT 0xffffffff + +-#define __io(a) __typesafe_io(a) ++static inline void __iomem *__io(unsigned long addr) ++{ ++ return (void __iomem *)((addr - CNS3XXX_PCIE0_IO_BASE) ++ + CNS3XXX_PCIE0_IO_BASE_VIRT); ++} ++ ++#define __io(a) __io(a) + #define __mem_pci(a) (a) + + #endif +--- a/drivers/spi/spi_cns3xxx.c ++++ b/drivers/spi/spi_cns3xxx.c +@@ -273,7 +273,7 @@ done: + + static void __init cns3xxx_spi_initial(void) + { +- u32 __iomem *gpiob = __io(CNS3XXX_MISC_BASE_VIRT + 0x0018); ++ u32 __iomem *gpiob = (void __iomem *) (CNS3XXX_MISC_BASE_VIRT + 0x0018); + u32 gpiob_pins = __raw_readl(gpiob); + + /* MMC/SD pins share with GPIOA */ diff --git a/target/linux/cns3xxx/patches/106-cns3xxx_sata_support.patch b/target/linux/cns3xxx/patches/106-cns3xxx_sata_support.patch new file mode 100644 index 0000000000..ce96f0ea4c --- /dev/null +++ b/target/linux/cns3xxx/patches/106-cns3xxx_sata_support.patch @@ -0,0 +1,190 @@ +--- a/arch/arm/mach-cns3xxx/devices.c ++++ b/arch/arm/mach-cns3xxx/devices.c +@@ -41,7 +41,7 @@ static struct resource cns3xxx_ahci_reso + static u64 cns3xxx_ahci_dmamask = DMA_BIT_MASK(32); + + static struct platform_device cns3xxx_ahci_pdev = { +- .name = "ahci", ++ .name = "ahci-cns3xxx", + .id = 0, + .resource = cns3xxx_ahci_resource, + .num_resources = ARRAY_SIZE(cns3xxx_ahci_resource), +--- a/drivers/ata/Kconfig ++++ b/drivers/ata/Kconfig +@@ -83,6 +83,17 @@ config SATA_AHCI_PLATFORM + + If unsure, say N. + ++config SATA_AHCI_CNS3XXX ++ bool "AHCI Support on the Cavium Networks CNS3xxx SOC" ++ depends on ARCH_CNS3XXX ++ depends on SATA_AHCI_PLATFORM ++ help ++ This option enables AHCI platform driver to support CNS3xxx ++ System-on-Chip devices. This is only needed when using CNS3xxx AHCI ++ controller. ++ ++ If unsure, say N. ++ + config SATA_FSL + tristate "Freescale 3.0Gbps SATA support" + depends on FSL_SOC +--- a/drivers/ata/Makefile ++++ b/drivers/ata/Makefile +@@ -4,7 +4,10 @@ obj-$(CONFIG_ATA) += libata.o + # non-SFF interface + obj-$(CONFIG_SATA_AHCI) += ahci.o libahci.o + obj-$(CONFIG_SATA_ACARD_AHCI) += acard-ahci.o libahci.o +-obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o ++obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platforms.o libahci.o ++ahci_platforms-y += ahci_platform.o ++ahci_platforms-$(CONFIG_SATA_AHCI_CNS3XXX) += ahci_cns3xxx.o ++ + obj-$(CONFIG_SATA_FSL) += sata_fsl.o + obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o + obj-$(CONFIG_SATA_SIL24) += sata_sil24.o +--- /dev/null ++++ b/drivers/ata/ahci_cns3xxx.c +@@ -0,0 +1,52 @@ ++/* ++ * AHCI support for CNS3xxx SoC ++ * ++ * Copyright 2010 MontaVista Software, LLC. ++ * Copyright 2010 Cavium Networks ++ * ++ * Authors: Anton Vorontsov ++ * Mac Lin ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include "ahci.h" ++ ++/* ++ * TODO: move cns3xxx_ahci_init to here after cns3xxx_pwr*() calls are ++ * thread-safe ++ */ ++ ++static int cns3xxx_ahci_softreset(struct ata_link *link, unsigned int *class, ++ unsigned long deadline) ++{ ++ int pmp = sata_srst_pmp(link); ++ int ret; ++ ++ ret = ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready); ++ if (pmp && ret) ++ return ahci_do_softreset(link, class, 0, deadline, ++ ahci_check_ready); ++ return ret; ++} ++ ++static struct ata_port_operations cns3xxx_ahci_ops = { ++ .inherits = &ahci_ops, ++ .softreset = cns3xxx_ahci_softreset, ++}; ++ ++static const struct ata_port_info cns3xxx_ata_port_info = { ++ .flags = AHCI_FLAG_COMMON, ++ .pio_mask = ATA_PIO4, ++ .udma_mask = ATA_UDMA6, ++ .port_ops = &cns3xxx_ahci_ops, ++}; ++ ++struct ahci_platform_data cns3xxx_ahci_platform_data = { ++ .ata_port_info = &cns3xxx_ata_port_info, ++}; ++ +--- a/drivers/ata/ahci_platform.c ++++ b/drivers/ata/ahci_platform.c +@@ -19,9 +19,11 @@ + #include + #include + #include ++#include + #include + #include + #include "ahci.h" ++#include "ahci_platform.h" + + static struct scsi_host_template ahci_platform_sht = { + AHCI_SHT("ahci_platform"), +@@ -29,6 +31,7 @@ static struct scsi_host_template ahci_pl + + static int __init ahci_probe(struct platform_device *pdev) + { ++ const struct platform_device_id *platid = platform_get_device_id(pdev); + struct device *dev = &pdev->dev; + struct ahci_platform_data *pdata = dev->platform_data; + struct ata_port_info pi = { +@@ -46,6 +49,9 @@ static int __init ahci_probe(struct plat + int i; + int rc; + ++ if (!pdata && platid && platid->driver_data) ++ pdata = (void *)platid->driver_data; ++ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(dev, "no mmio space\n"); +@@ -171,17 +177,28 @@ static int __devexit ahci_remove(struct + return 0; + } + ++static const struct platform_device_id ahci_platform_ids[] = { ++ { "ahci", }, ++#ifdef CONFIG_SATA_AHCI_CNS3XXX ++ { "ahci-cns3xxx", (kernel_ulong_t)&cns3xxx_ahci_platform_data}, ++#endif ++ { }, ++}; ++MODULE_DEVICE_TABLE(platform, ahci_platform_ids); ++ + static struct platform_driver ahci_driver = { +- .remove = __devexit_p(ahci_remove), + .driver = { + .name = "ahci", + .owner = THIS_MODULE, + }, ++ .probe = ahci_probe, ++ .remove = __devexit_p(ahci_remove), ++ .id_table = ahci_platform_ids, + }; + + static int __init ahci_init(void) + { +- return platform_driver_probe(&ahci_driver, ahci_probe); ++ return platform_driver_register(&ahci_driver); + } + module_init(ahci_init); + +@@ -194,4 +211,3 @@ module_exit(ahci_exit); + MODULE_DESCRIPTION("AHCI SATA platform driver"); + MODULE_AUTHOR("Anton Vorontsov "); + MODULE_LICENSE("GPL"); +-MODULE_ALIAS("platform:ahci"); +--- /dev/null ++++ b/drivers/ata/ahci_platform.h +@@ -0,0 +1,19 @@ ++/* ++ * Copyright 2010 MontaVista Software, LLC. ++ * Copyright 2010 Cavium Networks ++ * ++ * Authors: Anton Vorontsov ++ * Mac Lin ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#ifndef _DRIVERS_SATA_AHCI_PLATFORMS_H ++#define _DRIVERS_SATA_AHCI_PLATFORMS_H ++ ++extern struct ahci_platform_data cns3xxx_ahci_platform_data; ++ ++#endif /*_DRIVERS_SATA_AHCI_PLATFORMS_H*/ ++ diff --git a/target/linux/cns3xxx/patches/110-gateworks_gsp_support.patch b/target/linux/cns3xxx/patches/110-gateworks_gsp_support.patch new file mode 100644 index 0000000000..3886ee94eb --- /dev/null +++ b/target/linux/cns3xxx/patches/110-gateworks_gsp_support.patch @@ -0,0 +1,339 @@ +--- a/drivers/hwmon/Kconfig ++++ b/drivers/hwmon/Kconfig +@@ -401,6 +401,15 @@ config SENSORS_GL520SM + This driver can also be built as a module. If so, the module + will be called gl520sm. + ++config SENSORS_GSP ++ tristate "Gateworks System Peripheral" ++ depends on I2C && EXPERIMENTAL ++ help ++ If you say yes here you get support for the Gateworks System Peripherals. ++ ++ This driver can also be built as a module. If so, the module ++ will be called gsp. ++ + config SENSORS_GPIO_FAN + tristate "GPIO fan" + depends on GENERIC_GPIO +--- a/drivers/hwmon/Makefile ++++ b/drivers/hwmon/Makefile +@@ -123,6 +123,7 @@ obj-$(CONFIG_SENSORS_W83L785TS) += w83l7 + obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o + obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o + obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o ++obj-$(CONFIG_SENSORS_GSP) += gsp.o + + obj-$(CONFIG_PMBUS) += pmbus/ + +--- /dev/null ++++ b/drivers/hwmon/gsp.c +@@ -0,0 +1,308 @@ ++/* ++ * A hwmon driver for the Gateworks System Peripheral ++ * Copyright (C) 2009 Gateworks Corporation ++ * ++ * Author: Chris Lang ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License, ++ * as published by the Free Software Foundation - version 2. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DRV_VERSION "0.2" ++ ++enum chips { gsp }; ++ ++/* AD7418 registers */ ++#define GSP_REG_TEMP_IN 0x00 ++#define GSP_REG_VIN 0x02 ++#define GSP_REG_3P3 0x05 ++#define GSP_REG_BAT 0x08 ++#define GSP_REG_5P0 0x0b ++#define GSP_REG_CORE 0x0e ++#define GSP_REG_CPU1 0x11 ++#define GSP_REG_CPU2 0x14 ++#define GSP_REG_DRAM 0x17 ++#define GSP_REG_EXT_BAT 0x1a ++#define GSP_REG_IO1 0x1d ++#define GSP_REG_IO2 0x20 ++#define GSP_REG_PCIE 0x23 ++#define GSP_REG_CURRENT 0x26 ++#define GSP_FAN_0 0x2C ++#define GSP_FAN_1 0x2E ++#define GSP_FAN_2 0x30 ++#define GSP_FAN_3 0x32 ++#define GSP_FAN_4 0x34 ++#define GSP_FAN_5 0x36 ++ ++struct gsp_sensor_info { ++ const char* name; ++ int reg; ++}; ++ ++static const struct gsp_sensor_info gsp_sensors[] = { ++ {"temp", GSP_REG_TEMP_IN}, ++ {"vin", GSP_REG_VIN}, ++ {"3p3", GSP_REG_3P3}, ++ {"bat", GSP_REG_BAT}, ++ {"5p0", GSP_REG_5P0}, ++ {"core", GSP_REG_CORE}, ++ {"cpu1", GSP_REG_CPU1}, ++ {"cpu2", GSP_REG_CPU2}, ++ {"dram", GSP_REG_DRAM}, ++ {"ext_bat", GSP_REG_EXT_BAT}, ++ {"io1", GSP_REG_IO1}, ++ {"io2", GSP_REG_IO2}, ++ {"pci2", GSP_REG_PCIE}, ++ {"current", GSP_REG_CURRENT}, ++ {"fan_point0", GSP_FAN_0}, ++ {"fan_point1", GSP_FAN_1}, ++ {"fan_point2", GSP_FAN_2}, ++ {"fan_point3", GSP_FAN_3}, ++ {"fan_point4", GSP_FAN_4}, ++ {"fan_point5", GSP_FAN_5}, ++}; ++ ++struct gsp_data { ++ struct device *hwmon_dev; ++ struct attribute_group attrs; ++ enum chips type; ++}; ++ ++static int gsp_probe(struct i2c_client *client, ++ const struct i2c_device_id *id); ++static int gsp_remove(struct i2c_client *client); ++ ++static const struct i2c_device_id gsp_id[] = { ++ { "gsp", 0 }, ++ { } ++}; ++MODULE_DEVICE_TABLE(i2c, gsp_id); ++ ++static struct i2c_driver gsp_driver = { ++ .driver = { ++ .name = "gsp", ++ }, ++ .probe = gsp_probe, ++ .remove = gsp_remove, ++ .id_table = gsp_id, ++}; ++ ++/* All registers are word-sized, except for the configuration registers. ++ * AD7418 uses a high-byte first convention. Do NOT use those functions to ++ * access the configuration registers CONF and CONF2, as they are byte-sized. ++ */ ++static inline int gsp_read(struct i2c_client *client, u8 reg) ++{ ++ unsigned int adc = 0; ++ if (reg == GSP_REG_TEMP_IN || reg > GSP_REG_CURRENT) ++ { ++ adc |= i2c_smbus_read_byte_data(client, reg); ++ adc |= i2c_smbus_read_byte_data(client, reg + 1) << 8; ++ return adc; ++ } ++ else ++ { ++ adc |= i2c_smbus_read_byte_data(client, reg); ++ adc |= i2c_smbus_read_byte_data(client, reg + 1) << 8; ++ adc |= i2c_smbus_read_byte_data(client, reg + 2) << 16; ++ return adc; ++ } ++} ++ ++static inline int gsp_write(struct i2c_client *client, u8 reg, u16 value) ++{ ++ i2c_smbus_write_byte_data(client, reg, value & 0xff); ++ i2c_smbus_write_byte_data(client, reg + 1, ((value >> 8) & 0xff)); ++ return 1; ++} ++ ++static ssize_t show_adc(struct device *dev, struct device_attribute *devattr, ++ char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct i2c_client *client = to_i2c_client(dev); ++ return sprintf(buf, "%d\n", gsp_read(client, gsp_sensors[attr->index].reg)); ++} ++ ++static ssize_t show_label(struct device *dev, ++ struct device_attribute *devattr, char *buf) ++{ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ ++ return sprintf(buf, "%s\n", gsp_sensors[attr->index].name); ++} ++ ++static ssize_t store_fan(struct device *dev, ++ struct device_attribute *devattr, const char *buf, size_t count) ++{ ++ u16 val; ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct i2c_client *client = to_i2c_client(dev); ++ val = simple_strtoul(buf, NULL, 10); ++ gsp_write(client, gsp_sensors[attr->index].reg, val); ++ return count; ++} ++ ++static SENSOR_DEVICE_ATTR(temp0_input, S_IRUGO, show_adc, NULL, 0); ++static SENSOR_DEVICE_ATTR(temp0_label, S_IRUGO, show_label, NULL, 0); ++ ++static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_adc, NULL, 1); ++static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, show_label, NULL, 1); ++static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_adc, NULL, 2); ++static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, show_label, NULL, 2); ++static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_adc, NULL, 3); ++static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, show_label, NULL, 3); ++static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_adc, NULL, 4); ++static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 4); ++static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_adc, NULL, 5); ++static SENSOR_DEVICE_ATTR(in4_label, S_IRUGO, show_label, NULL, 5); ++static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_adc, NULL, 6); ++static SENSOR_DEVICE_ATTR(in5_label, S_IRUGO, show_label, NULL, 6); ++static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_adc, NULL, 7); ++static SENSOR_DEVICE_ATTR(in6_label, S_IRUGO, show_label, NULL, 7); ++static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_adc, NULL, 8); ++static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 8); ++static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, show_adc, NULL, 9); ++static SENSOR_DEVICE_ATTR(in8_label, S_IRUGO, show_label, NULL, 9); ++static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, show_adc, NULL, 10); ++static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL, 10); ++static SENSOR_DEVICE_ATTR(in10_input, S_IRUGO, show_adc, NULL, 11); ++static SENSOR_DEVICE_ATTR(in10_label, S_IRUGO, show_label, NULL, 11); ++static SENSOR_DEVICE_ATTR(in11_input, S_IRUGO, show_adc, NULL, 12); ++static SENSOR_DEVICE_ATTR(in11_label, S_IRUGO, show_label, NULL, 12); ++static SENSOR_DEVICE_ATTR(in12_input, S_IRUGO, show_adc, NULL, 13); ++static SENSOR_DEVICE_ATTR(in12_label, S_IRUGO, show_label, NULL, 13); ++ ++static SENSOR_DEVICE_ATTR(fan0_point0, S_IRUGO | S_IWUSR, show_adc, store_fan, 14); ++static SENSOR_DEVICE_ATTR(fan0_point1, S_IRUGO | S_IWUSR, show_adc, store_fan, 15); ++static SENSOR_DEVICE_ATTR(fan0_point2, S_IRUGO | S_IWUSR, show_adc, store_fan, 16); ++static SENSOR_DEVICE_ATTR(fan0_point3, S_IRUGO | S_IWUSR, show_adc, store_fan, 17); ++static SENSOR_DEVICE_ATTR(fan0_point4, S_IRUGO | S_IWUSR, show_adc, store_fan, 18); ++static SENSOR_DEVICE_ATTR(fan0_point5, S_IRUGO | S_IWUSR, show_adc, store_fan, 19); ++ ++static struct attribute *gsp_attributes[] = { ++ &sensor_dev_attr_temp0_input.dev_attr.attr, ++ &sensor_dev_attr_in0_input.dev_attr.attr, ++ &sensor_dev_attr_in1_input.dev_attr.attr, ++ &sensor_dev_attr_in2_input.dev_attr.attr, ++ &sensor_dev_attr_in3_input.dev_attr.attr, ++ &sensor_dev_attr_in4_input.dev_attr.attr, ++ &sensor_dev_attr_in5_input.dev_attr.attr, ++ &sensor_dev_attr_in6_input.dev_attr.attr, ++ &sensor_dev_attr_in7_input.dev_attr.attr, ++ &sensor_dev_attr_in8_input.dev_attr.attr, ++ &sensor_dev_attr_in9_input.dev_attr.attr, ++ &sensor_dev_attr_in10_input.dev_attr.attr, ++ &sensor_dev_attr_in11_input.dev_attr.attr, ++ &sensor_dev_attr_in12_input.dev_attr.attr, ++ ++ &sensor_dev_attr_temp0_label.dev_attr.attr, ++ &sensor_dev_attr_in0_label.dev_attr.attr, ++ &sensor_dev_attr_in1_label.dev_attr.attr, ++ &sensor_dev_attr_in2_label.dev_attr.attr, ++ &sensor_dev_attr_in3_label.dev_attr.attr, ++ &sensor_dev_attr_in4_label.dev_attr.attr, ++ &sensor_dev_attr_in5_label.dev_attr.attr, ++ &sensor_dev_attr_in6_label.dev_attr.attr, ++ &sensor_dev_attr_in7_label.dev_attr.attr, ++ &sensor_dev_attr_in8_label.dev_attr.attr, ++ &sensor_dev_attr_in9_label.dev_attr.attr, ++ &sensor_dev_attr_in10_label.dev_attr.attr, ++ &sensor_dev_attr_in11_label.dev_attr.attr, ++ &sensor_dev_attr_in12_label.dev_attr.attr, ++ ++ &sensor_dev_attr_fan0_point0.dev_attr.attr, ++ &sensor_dev_attr_fan0_point1.dev_attr.attr, ++ &sensor_dev_attr_fan0_point2.dev_attr.attr, ++ &sensor_dev_attr_fan0_point3.dev_attr.attr, ++ &sensor_dev_attr_fan0_point4.dev_attr.attr, ++ &sensor_dev_attr_fan0_point5.dev_attr.attr, ++ NULL ++}; ++ ++ ++static int gsp_probe(struct i2c_client *client, ++ const struct i2c_device_id *id) ++{ ++ struct i2c_adapter *adapter = client->adapter; ++ struct gsp_data *data; ++ int err; ++ ++ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | ++ I2C_FUNC_SMBUS_WORD_DATA)) { ++ err = -EOPNOTSUPP; ++ goto exit; ++ } ++ ++ if (!(data = kzalloc(sizeof(struct gsp_data), GFP_KERNEL))) { ++ err = -ENOMEM; ++ goto exit; ++ } ++ ++ i2c_set_clientdata(client, data); ++ ++ data->type = id->driver_data; ++ ++ switch (data->type) { ++ case 0: ++ data->attrs.attrs = gsp_attributes; ++ break; ++ } ++ ++ dev_info(&client->dev, "%s chip found\n", client->name); ++ ++ /* Register sysfs hooks */ ++ if ((err = sysfs_create_group(&client->dev.kobj, &data->attrs))) ++ goto exit_free; ++ ++ data->hwmon_dev = hwmon_device_register(&client->dev); ++ if (IS_ERR(data->hwmon_dev)) { ++ err = PTR_ERR(data->hwmon_dev); ++ goto exit_remove; ++ } ++ ++ return 0; ++ ++exit_remove: ++ sysfs_remove_group(&client->dev.kobj, &data->attrs); ++exit_free: ++ kfree(data); ++exit: ++ return err; ++} ++ ++static int gsp_remove(struct i2c_client *client) ++{ ++ struct gsp_data *data = i2c_get_clientdata(client); ++ hwmon_device_unregister(data->hwmon_dev); ++ sysfs_remove_group(&client->dev.kobj, &data->attrs); ++ kfree(data); ++ return 0; ++} ++ ++static int __init gsp_init(void) ++{ ++ return i2c_add_driver(&gsp_driver); ++} ++ ++static void __exit gsp_exit(void) ++{ ++ i2c_del_driver(&gsp_driver); ++} ++ ++module_init(gsp_init); ++module_exit(gsp_exit); ++ ++MODULE_AUTHOR("Chris Lang "); ++MODULE_DESCRIPTION("GSP HWMON driver"); ++MODULE_LICENSE("GPL"); ++MODULE_VERSION(DRV_VERSION); ++ diff --git a/target/linux/cns3xxx/patches/200-dwc_otg.patch b/target/linux/cns3xxx/patches/200-dwc_otg.patch new file mode 100644 index 0000000000..27a97126d0 --- /dev/null +++ b/target/linux/cns3xxx/patches/200-dwc_otg.patch @@ -0,0 +1,22680 @@ +--- a/drivers/Makefile ++++ b/drivers/Makefile +@@ -68,6 +68,7 @@ obj-$(CONFIG_PARIDE) += block/paride/ + obj-$(CONFIG_TC) += tc/ + obj-$(CONFIG_UWB) += uwb/ + obj-$(CONFIG_USB_OTG_UTILS) += usb/ ++obj-$(CONFIG_USB_DWC_OTG) += usb/dwc/ + obj-$(CONFIG_USB) += usb/ + obj-$(CONFIG_PCI) += usb/ + obj-$(CONFIG_USB_GADGET) += usb/ +--- a/drivers/usb/Kconfig ++++ b/drivers/usb/Kconfig +@@ -120,6 +120,8 @@ source "drivers/usb/musb/Kconfig" + + source "drivers/usb/renesas_usbhs/Kconfig" + ++source "drivers/usb/dwc/Kconfig" ++ + source "drivers/usb/class/Kconfig" + + source "drivers/usb/storage/Kconfig" +--- /dev/null ++++ b/drivers/usb/dwc/Kconfig +@@ -0,0 +1,44 @@ ++# ++# USB Dual Role (OTG-ready) Controller Drivers ++# for silicon based on Synopsys DesignWare IP ++# ++ ++comment "Enable Host or Gadget support for DesignWare OTG controller" ++depends on !USB && USB_GADGET=n ++ ++config USB_DWC_OTG ++ tristate "Synopsys DWC OTG Controller" ++ depends on USB ++ help ++ This driver provides USB Device Controller support for the ++ Synopsys DesignWare USB OTG Core used on the Cavium CNS34xx SOC. ++ ++config DWC_DEBUG ++ bool "Enable DWC Debugging" ++ depends on USB_DWC_OTG ++ default n ++ help ++ Enable DWC driver debugging ++ ++choice ++ prompt "DWC Mode Selection" ++ depends on USB_DWC_OTG ++ default DWC_HOST_ONLY ++ help ++ Select the DWC Core in OTG, Host only, or Device only mode. ++ ++config DWC_HOST_ONLY ++ bool "DWC Host Only Mode" ++ ++config DWC_OTG_MODE ++ bool "DWC OTG Mode" ++ select USB_GADGET ++ select USB_GADGET_SELECTED ++ ++config DWC_DEVICE_ONLY ++ bool "DWC Device Only Mode" ++ select USB_GADGET ++ select USB_GADGET_SELECTED ++ ++endchoice ++ +--- /dev/null ++++ b/drivers/usb/dwc/Makefile +@@ -0,0 +1,26 @@ ++# ++# Makefile for DWC_otg Highspeed USB controller driver ++# ++ ++EXTRA_CFLAGS += -DDWC_HS_ELECT_TST ++#EXTRA_CFLAGS += -Dlinux -DDWC_HS_ELECT_TST ++#EXTRA_CFLAGS += -DDWC_EN_ISOC ++ ++ifneq ($(CONFIG_DWC_HOST_ONLY),) ++EXTRA_CFLAGS += -DDWC_HOST_ONLY ++endif ++ ++ifneq ($(CONFIG_DWC_DEVICE_ONLY),) ++EXTRA_CFLAGS += -DDWC_DEVICE_ONLY ++endif ++ ++ifneq ($(CONFIG_DWC_DEBUG),) ++EXTRA_CFLAGS += -DDEBUG ++endif ++ ++obj-$(CONFIG_USB_DWC_OTG) := dwc_otg.o ++ ++dwc_otg-objs := otg_driver.o otg_attr.o ++dwc_otg-objs += otg_cil.o otg_cil_intr.o ++dwc_otg-objs += otg_pcd.o otg_pcd_intr.o ++dwc_otg-objs += otg_hcd.o otg_hcd_intr.o otg_hcd_queue.o +--- /dev/null ++++ b/drivers/usb/dwc/otg_attr.c +@@ -0,0 +1,886 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_attr.c $ ++ * $Revision: #31 $ ++ * $Date: 2008/07/15 $ ++ * $Change: 1064918 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * ++ * The diagnostic interface will provide access to the controller for ++ * bringing up the hardware and testing. The Linux driver attributes ++ * feature will be used to provide the Linux Diagnostic ++ * Interface. These attributes are accessed through sysfs. ++ */ ++ ++/** @page "Linux Module Attributes" ++ * ++ * The Linux module attributes feature is used to provide the Linux ++ * Diagnostic Interface. These attributes are accessed through sysfs. ++ * The diagnostic interface will provide access to the controller for ++ * bringing up the hardware and testing. ++ ++ ++ The following table shows the attributes. ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++
Name Description Access
mode Returns the current mode: 0 for device mode, 1 for host mode Read
hnpcapable Gets or sets the "HNP-capable" bit in the Core USB Configuraton Register. ++ Read returns the current value. Read/Write
srpcapable Gets or sets the "SRP-capable" bit in the Core USB Configuraton Register. ++ Read returns the current value. Read/Write
hnp Initiates the Host Negotiation Protocol. Read returns the status. Read/Write
srp Initiates the Session Request Protocol. Read returns the status. Read/Write
buspower Gets or sets the Power State of the bus (0 - Off or 1 - On) Read/Write
bussuspend Suspends the USB bus. Read/Write
busconnected Gets the connection status of the bus Read
gotgctl Gets or sets the Core Control Status Register. Read/Write
gusbcfg Gets or sets the Core USB Configuration Register Read/Write
grxfsiz Gets or sets the Receive FIFO Size Register Read/Write
gnptxfsiz Gets or sets the non-periodic Transmit Size Register Read/Write
gpvndctl Gets or sets the PHY Vendor Control Register Read/Write
ggpio Gets the value in the lower 16-bits of the General Purpose IO Register ++ or sets the upper 16 bits. Read/Write
guid Gets or sets the value of the User ID Register Read/Write
gsnpsid Gets the value of the Synopsys ID Regester Read
devspeed Gets or sets the device speed setting in the DCFG register Read/Write
enumspeed Gets the device enumeration Speed. Read
hptxfsiz Gets the value of the Host Periodic Transmit FIFO Read
hprt0 Gets or sets the value in the Host Port Control and Status Register Read/Write
regoffset Sets the register offset for the next Register Access Read/Write
regvalue Gets or sets the value of the register at the offset in the regoffset attribute. Read/Write
remote_wakeup On read, shows the status of Remote Wakeup. On write, initiates a remote ++ wakeup of the host. When bit 0 is 1 and Remote Wakeup is enabled, the Remote ++ Wakeup signalling bit in the Device Control Register is set for 1 ++ milli-second. Read/Write
regdump Dumps the contents of core registers. Read
spramdump Dumps the contents of core registers. Read
hcddump Dumps the current HCD state. Read
hcd_frrem Shows the average value of the Frame Remaining ++ field in the Host Frame Number/Frame Remaining register when an SOF interrupt ++ occurs. This can be used to determine the average interrupt latency. Also ++ shows the average Frame Remaining value for start_transfer and the "a" and ++ "b" sample points. The "a" and "b" sample points may be used during debugging ++ bto determine how long it takes to execute a section of the HCD code. Read
rd_reg_test Displays the time required to read the GNPTXFSIZ register many times ++ (the output shows the number of times the register is read). ++ Read
wr_reg_test Displays the time required to write the GNPTXFSIZ register many times ++ (the output shows the number of times the register is written). ++ Read
++ ++ Example usage: ++ To get the current mode: ++ cat /sys/devices/lm0/mode ++ ++ To power down the USB: ++ echo 0 > /sys/devices/lm0/buspower ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* permission constants */ ++#include ++ ++#include ++#include ++#include ++ ++#include "otg_plat.h" ++#include "otg_attr.h" ++#include "otg_driver.h" ++#include "otg_pcd.h" ++#include "otg_hcd.h" ++ ++/* ++ * MACROs for defining sysfs attribute ++ */ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \ ++{ \ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ uint32_t val; \ ++ val = dwc_read_reg32 (_addr_); \ ++ val = (val & (_mask_)) >> _shift_; \ ++ return sprintf (buf, "%s = 0x%x\n", _string_, val); \ ++} ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \ ++ const char *buf, size_t count) \ ++{ \ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ uint32_t set = simple_strtoul(buf, NULL, 16); \ ++ uint32_t clear = set; \ ++ clear = ((~clear) << _shift_) & _mask_; \ ++ set = (set << _shift_) & _mask_; \ ++ dev_dbg(_dev, "Storing Address=0x%08x Set=0x%08x Clear=0x%08x\n", (uint32_t)_addr_, set, clear); \ ++ dwc_modify_reg32(_addr_, clear, set); \ ++ return count; \ ++} ++ ++/* ++ * MACROs for defining sysfs attribute for 32-bit registers ++ */ ++#define DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \ ++{ \ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ uint32_t val; \ ++ val = dwc_read_reg32 (_addr_); \ ++ return sprintf (buf, "%s = 0x%08x\n", _string_, val); \ ++} ++#define DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ ++static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \ ++ const char *buf, size_t count) \ ++{ \ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ uint32_t val = simple_strtoul(buf, NULL, 16); \ ++ dev_dbg(_dev, "Storing Address=0x%08x Val=0x%08x\n", (uint32_t)_addr_, val); \ ++ dwc_write_reg32(_addr_, val); \ ++ return count; \ ++} ++ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_RW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store); ++ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_RO(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL); ++ ++#define DWC_OTG_DEVICE_ATTR_REG32_RW(_otg_attr_name_,_addr_,_string_) \ ++DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store); ++ ++#define DWC_OTG_DEVICE_ATTR_REG32_RO(_otg_attr_name_,_addr_,_string_) \ ++DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL); ++ ++ ++/** @name Functions for Show/Store of Attributes */ ++/**@{*/ ++ ++/** ++ * Show the register offset of the Register Access. ++ */ ++static ssize_t regoffset_show( struct device *_dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ return snprintf(buf, sizeof("0xFFFFFFFF\n")+1,"0x%08x\n", otg_dev->reg_offset); ++} ++ ++/** ++ * Set the register offset for the next Register Access Read/Write ++ */ ++static ssize_t regoffset_store( struct device *_dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count ) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ uint32_t offset = simple_strtoul(buf, NULL, 16); ++ //dev_dbg(_dev, "Offset=0x%08x\n", offset); ++ if (offset < SZ_256K ) { ++ otg_dev->reg_offset = offset; ++ } ++ else { ++ dev_err( _dev, "invalid offset\n" ); ++ } ++ ++ return count; ++} ++DEVICE_ATTR(regoffset, S_IRUGO|S_IWUSR, (void *)regoffset_show, regoffset_store); ++ ++ ++/** ++ * Show the value of the register at the offset in the reg_offset ++ * attribute. ++ */ ++static ssize_t regvalue_show( struct device *_dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ uint32_t val; ++ volatile uint32_t *addr; ++ ++ if (otg_dev->reg_offset != 0xFFFFFFFF && ++ 0 != otg_dev->base) { ++ /* Calculate the address */ ++ addr = (uint32_t*)(otg_dev->reg_offset + ++ (uint8_t*)otg_dev->base); ++ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr); ++ val = dwc_read_reg32( addr ); ++ return snprintf(buf, sizeof("Reg@0xFFFFFFFF = 0xFFFFFFFF\n")+1, ++ "Reg@0x%06x = 0x%08x\n", ++ otg_dev->reg_offset, val); ++ } ++ else { ++ dev_err(_dev, "Invalid offset (0x%0x)\n", ++ otg_dev->reg_offset); ++ return sprintf(buf, "invalid offset\n" ); ++ } ++} ++ ++/** ++ * Store the value in the register at the offset in the reg_offset ++ * attribute. ++ * ++ */ ++static ssize_t regvalue_store( struct device *_dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count ) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ volatile uint32_t * addr; ++ uint32_t val = simple_strtoul(buf, NULL, 16); ++ //dev_dbg(_dev, "Offset=0x%08x Val=0x%08x\n", otg_dev->reg_offset, val); ++ if (otg_dev->reg_offset != 0xFFFFFFFF && 0 != otg_dev->base) { ++ /* Calculate the address */ ++ addr = (uint32_t*)(otg_dev->reg_offset + ++ (uint8_t*)otg_dev->base); ++ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr); ++ dwc_write_reg32( addr, val ); ++ } ++ else { ++ dev_err(_dev, "Invalid Register Offset (0x%08x)\n", ++ otg_dev->reg_offset); ++ } ++ return count; ++} ++DEVICE_ATTR(regvalue, S_IRUGO|S_IWUSR, regvalue_show, regvalue_store); ++ ++/* ++ * Attributes ++ */ ++DWC_OTG_DEVICE_ATTR_BITFIELD_RO(mode,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<20),20,"Mode"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RW(hnpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<9),9,"Mode"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RW(srpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<8),8,"Mode"); ++ ++//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(buspower,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode"); ++//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(bussuspend,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RO(busconnected,otg_dev->core_if->host_if->hprt0,0x01,0,"Bus Connected"); ++ ++DWC_OTG_DEVICE_ATTR_REG32_RW(gotgctl,&(otg_dev->core_if->core_global_regs->gotgctl),"GOTGCTL"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(gusbcfg,&(otg_dev->core_if->core_global_regs->gusbcfg),"GUSBCFG"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(grxfsiz,&(otg_dev->core_if->core_global_regs->grxfsiz),"GRXFSIZ"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(gnptxfsiz,&(otg_dev->core_if->core_global_regs->gnptxfsiz),"GNPTXFSIZ"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(gpvndctl,&(otg_dev->core_if->core_global_regs->gpvndctl),"GPVNDCTL"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(ggpio,&(otg_dev->core_if->core_global_regs->ggpio),"GGPIO"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(guid,&(otg_dev->core_if->core_global_regs->guid),"GUID"); ++DWC_OTG_DEVICE_ATTR_REG32_RO(gsnpsid,&(otg_dev->core_if->core_global_regs->gsnpsid),"GSNPSID"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RW(devspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dcfg),0x3,0,"Device Speed"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RO(enumspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dsts),0x6,1,"Device Enumeration Speed"); ++ ++DWC_OTG_DEVICE_ATTR_REG32_RO(hptxfsiz,&(otg_dev->core_if->core_global_regs->hptxfsiz),"HPTXFSIZ"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(hprt0,otg_dev->core_if->host_if->hprt0,"HPRT0"); ++ ++ ++/** ++ * @todo Add code to initiate the HNP. ++ */ ++/** ++ * Show the HNP status bit ++ */ ++static ssize_t hnp_show( struct device *_dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ gotgctl_data_t val; ++ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl)); ++ return sprintf (buf, "HstNegScs = 0x%x\n", val.b.hstnegscs); ++} ++ ++/** ++ * Set the HNP Request bit ++ */ ++static ssize_t hnp_store( struct device *_dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count ) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ uint32_t in = simple_strtoul(buf, NULL, 16); ++ uint32_t *addr = (uint32_t *)&(otg_dev->core_if->core_global_regs->gotgctl); ++ gotgctl_data_t mem; ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.hnpreq = in; ++ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); ++ dwc_write_reg32(addr, mem.d32); ++ return count; ++} ++DEVICE_ATTR(hnp, 0644, hnp_show, hnp_store); ++ ++/** ++ * @todo Add code to initiate the SRP. ++ */ ++/** ++ * Show the SRP status bit ++ */ ++static ssize_t srp_show( struct device *_dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++#ifndef DWC_HOST_ONLY ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ gotgctl_data_t val; ++ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl)); ++ return sprintf (buf, "SesReqScs = 0x%x\n", val.b.sesreqscs); ++#else ++ return sprintf(buf, "Host Only Mode!\n"); ++#endif ++} ++ ++ ++ ++/** ++ * Set the SRP Request bit ++ */ ++static ssize_t srp_store( struct device *_dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count ) ++{ ++#ifndef DWC_HOST_ONLY ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ dwc_otg_pcd_initiate_srp(otg_dev->pcd); ++#endif ++ return count; ++} ++DEVICE_ATTR(srp, 0644, srp_show, srp_store); ++ ++/** ++ * @todo Need to do more for power on/off? ++ */ ++/** ++ * Show the Bus Power status ++ */ ++static ssize_t buspower_show( struct device *_dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ hprt0_data_t val; ++ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0); ++ return sprintf (buf, "Bus Power = 0x%x\n", val.b.prtpwr); ++} ++ ++ ++/** ++ * Set the Bus Power status ++ */ ++static ssize_t buspower_store( struct device *_dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count ) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ uint32_t on = simple_strtoul(buf, NULL, 16); ++ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0; ++ hprt0_data_t mem; ++ ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.prtpwr = on; ++ ++ //dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); ++ dwc_write_reg32(addr, mem.d32); ++ ++ return count; ++} ++DEVICE_ATTR(buspower, 0644, buspower_show, buspower_store); ++ ++/** ++ * @todo Need to do more for suspend? ++ */ ++/** ++ * Show the Bus Suspend status ++ */ ++static ssize_t bussuspend_show( struct device *_dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ hprt0_data_t val; ++ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0); ++ return sprintf (buf, "Bus Suspend = 0x%x\n", val.b.prtsusp); ++} ++ ++/** ++ * Set the Bus Suspend status ++ */ ++static ssize_t bussuspend_store( struct device *_dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count ) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ uint32_t in = simple_strtoul(buf, NULL, 16); ++ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0; ++ hprt0_data_t mem; ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.prtsusp = in; ++ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); ++ dwc_write_reg32(addr, mem.d32); ++ return count; ++} ++DEVICE_ATTR(bussuspend, 0644, bussuspend_show, bussuspend_store); ++ ++/** ++ * Show the status of Remote Wakeup. ++ */ ++static ssize_t remote_wakeup_show( struct device *_dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++#ifndef DWC_HOST_ONLY ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ dctl_data_t val; ++ val.d32 = ++ dwc_read_reg32( &otg_dev->core_if->dev_if->dev_global_regs->dctl); ++ return sprintf( buf, "Remote Wakeup = %d Enabled = %d\n", ++ val.b.rmtwkupsig, otg_dev->pcd->remote_wakeup_enable); ++#else ++ return sprintf(buf, "Host Only Mode!\n"); ++#endif ++} ++/** ++ * Initiate a remote wakeup of the host. The Device control register ++ * Remote Wakeup Signal bit is written if the PCD Remote wakeup enable ++ * flag is set. ++ * ++ */ ++static ssize_t remote_wakeup_store( struct device *_dev, ++ struct device_attribute *attr, ++ const char *buf, ++ size_t count ) ++{ ++#ifndef DWC_HOST_ONLY ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ uint32_t val = simple_strtoul(buf, NULL, 16); ++ if (val&1) { ++ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 1); ++ } ++ else { ++ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 0); ++ } ++#endif ++ return count; ++} ++DEVICE_ATTR(remote_wakeup, S_IRUGO|S_IWUSR, remote_wakeup_show, ++ remote_wakeup_store); ++ ++/** ++ * Dump global registers and either host or device registers (depending on the ++ * current mode of the core). ++ */ ++static ssize_t regdump_show( struct device *_dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ dwc_otg_dump_global_registers( otg_dev->core_if); ++ if (dwc_otg_is_host_mode(otg_dev->core_if)) { ++ dwc_otg_dump_host_registers( otg_dev->core_if); ++ } else { ++ dwc_otg_dump_dev_registers( otg_dev->core_if); ++ ++ } ++ return sprintf( buf, "Register Dump\n" ); ++} ++ ++DEVICE_ATTR(regdump, S_IRUGO|S_IWUSR, regdump_show, 0); ++ ++/** ++ * Dump global registers and either host or device registers (depending on the ++ * current mode of the core). ++ */ ++static ssize_t spramdump_show( struct device *_dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ dwc_otg_dump_spram( otg_dev->core_if); ++ ++ return sprintf( buf, "SPRAM Dump\n" ); ++} ++ ++DEVICE_ATTR(spramdump, S_IRUGO|S_IWUSR, spramdump_show, 0); ++ ++/** ++ * Dump the current hcd state. ++ */ ++static ssize_t hcddump_show( struct device *_dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++#ifndef DWC_DEVICE_ONLY ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ dwc_otg_hcd_dump_state(otg_dev->hcd); ++#endif ++ return sprintf( buf, "HCD Dump\n" ); ++} ++ ++DEVICE_ATTR(hcddump, S_IRUGO|S_IWUSR, hcddump_show, 0); ++ ++/** ++ * Dump the average frame remaining at SOF. This can be used to ++ * determine average interrupt latency. Frame remaining is also shown for ++ * start transfer and two additional sample points. ++ */ ++static ssize_t hcd_frrem_show( struct device *_dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++#ifndef DWC_DEVICE_ONLY ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ dwc_otg_hcd_dump_frrem(otg_dev->hcd); ++#endif ++ return sprintf( buf, "HCD Dump Frame Remaining\n" ); ++} ++ ++DEVICE_ATTR(hcd_frrem, S_IRUGO|S_IWUSR, hcd_frrem_show, 0); ++ ++/** ++ * Displays the time required to read the GNPTXFSIZ register many times (the ++ * output shows the number of times the register is read). ++ */ ++#define RW_REG_COUNT 10000000 ++#define MSEC_PER_JIFFIE 1000/HZ ++static ssize_t rd_reg_test_show( struct device *_dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ int i; ++ int time; ++ int start_jiffies; ++ ++ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n", ++ HZ, MSEC_PER_JIFFIE, loops_per_jiffy); ++ start_jiffies = jiffies; ++ for (i = 0; i < RW_REG_COUNT; i++) { ++ dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz); ++ } ++ time = jiffies - start_jiffies; ++ return sprintf( buf, "Time to read GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n", ++ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time ); ++} ++ ++DEVICE_ATTR(rd_reg_test, S_IRUGO|S_IWUSR, rd_reg_test_show, 0); ++ ++/** ++ * Displays the time required to write the GNPTXFSIZ register many times (the ++ * output shows the number of times the register is written). ++ */ ++static ssize_t wr_reg_test_show( struct device *_dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct platform_device *pdev = container_of(_dev, struct platform_device, dev); \ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); \ ++ uint32_t reg_val; ++ int i; ++ int time; ++ int start_jiffies; ++ ++ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n", ++ HZ, MSEC_PER_JIFFIE, loops_per_jiffy); ++ reg_val = dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz); ++ start_jiffies = jiffies; ++ for (i = 0; i < RW_REG_COUNT; i++) { ++ dwc_write_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz, reg_val); ++ } ++ time = jiffies - start_jiffies; ++ return sprintf( buf, "Time to write GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n", ++ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time); ++} ++ ++DEVICE_ATTR(wr_reg_test, S_IRUGO|S_IWUSR, wr_reg_test_show, 0); ++/**@}*/ ++ ++/** ++ * Create the device files ++ */ ++void dwc_otg_attr_create (struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ int error; ++ ++ error = device_create_file(dev, &dev_attr_regoffset); ++ error = device_create_file(dev, &dev_attr_regvalue); ++ error = device_create_file(dev, &dev_attr_mode); ++ error = device_create_file(dev, &dev_attr_hnpcapable); ++ error = device_create_file(dev, &dev_attr_srpcapable); ++ error = device_create_file(dev, &dev_attr_hnp); ++ error = device_create_file(dev, &dev_attr_srp); ++ error = device_create_file(dev, &dev_attr_buspower); ++ error = device_create_file(dev, &dev_attr_bussuspend); ++ error = device_create_file(dev, &dev_attr_busconnected); ++ error = device_create_file(dev, &dev_attr_gotgctl); ++ error = device_create_file(dev, &dev_attr_gusbcfg); ++ error = device_create_file(dev, &dev_attr_grxfsiz); ++ error = device_create_file(dev, &dev_attr_gnptxfsiz); ++ error = device_create_file(dev, &dev_attr_gpvndctl); ++ error = device_create_file(dev, &dev_attr_ggpio); ++ error = device_create_file(dev, &dev_attr_guid); ++ error = device_create_file(dev, &dev_attr_gsnpsid); ++ error = device_create_file(dev, &dev_attr_devspeed); ++ error = device_create_file(dev, &dev_attr_enumspeed); ++ error = device_create_file(dev, &dev_attr_hptxfsiz); ++ error = device_create_file(dev, &dev_attr_hprt0); ++ error = device_create_file(dev, &dev_attr_remote_wakeup); ++ error = device_create_file(dev, &dev_attr_regdump); ++ error = device_create_file(dev, &dev_attr_spramdump); ++ error = device_create_file(dev, &dev_attr_hcddump); ++ error = device_create_file(dev, &dev_attr_hcd_frrem); ++ error = device_create_file(dev, &dev_attr_rd_reg_test); ++ error = device_create_file(dev, &dev_attr_wr_reg_test); ++} ++ ++/** ++ * Remove the device files ++ */ ++void dwc_otg_attr_remove (struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ ++ device_remove_file(dev, &dev_attr_regoffset); ++ device_remove_file(dev, &dev_attr_regvalue); ++ device_remove_file(dev, &dev_attr_mode); ++ device_remove_file(dev, &dev_attr_hnpcapable); ++ device_remove_file(dev, &dev_attr_srpcapable); ++ device_remove_file(dev, &dev_attr_hnp); ++ device_remove_file(dev, &dev_attr_srp); ++ device_remove_file(dev, &dev_attr_buspower); ++ device_remove_file(dev, &dev_attr_bussuspend); ++ device_remove_file(dev, &dev_attr_busconnected); ++ device_remove_file(dev, &dev_attr_gotgctl); ++ device_remove_file(dev, &dev_attr_gusbcfg); ++ device_remove_file(dev, &dev_attr_grxfsiz); ++ device_remove_file(dev, &dev_attr_gnptxfsiz); ++ device_remove_file(dev, &dev_attr_gpvndctl); ++ device_remove_file(dev, &dev_attr_ggpio); ++ device_remove_file(dev, &dev_attr_guid); ++ device_remove_file(dev, &dev_attr_gsnpsid); ++ device_remove_file(dev, &dev_attr_devspeed); ++ device_remove_file(dev, &dev_attr_enumspeed); ++ device_remove_file(dev, &dev_attr_hptxfsiz); ++ device_remove_file(dev, &dev_attr_hprt0); ++ device_remove_file(dev, &dev_attr_remote_wakeup); ++ device_remove_file(dev, &dev_attr_regdump); ++ device_remove_file(dev, &dev_attr_spramdump); ++ device_remove_file(dev, &dev_attr_hcddump); ++ device_remove_file(dev, &dev_attr_hcd_frrem); ++ device_remove_file(dev, &dev_attr_rd_reg_test); ++ device_remove_file(dev, &dev_attr_wr_reg_test); ++} +--- /dev/null ++++ b/drivers/usb/dwc/otg_attr.h +@@ -0,0 +1,67 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_attr.h $ ++ * $Revision: #7 $ ++ * $Date: 2005/03/28 $ ++ * $Change: 477051 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#if !defined(__DWC_OTG_ATTR_H__) ++#define __DWC_OTG_ATTR_H__ ++ ++/** @file ++ * This file contains the interface to the Linux device attributes. ++ */ ++extern struct device_attribute dev_attr_regoffset; ++extern struct device_attribute dev_attr_regvalue; ++ ++extern struct device_attribute dev_attr_mode; ++extern struct device_attribute dev_attr_hnpcapable; ++extern struct device_attribute dev_attr_srpcapable; ++extern struct device_attribute dev_attr_hnp; ++extern struct device_attribute dev_attr_srp; ++extern struct device_attribute dev_attr_buspower; ++extern struct device_attribute dev_attr_bussuspend; ++extern struct device_attribute dev_attr_busconnected; ++extern struct device_attribute dev_attr_gotgctl; ++extern struct device_attribute dev_attr_gusbcfg; ++extern struct device_attribute dev_attr_grxfsiz; ++extern struct device_attribute dev_attr_gnptxfsiz; ++extern struct device_attribute dev_attr_gpvndctl; ++extern struct device_attribute dev_attr_ggpio; ++extern struct device_attribute dev_attr_guid; ++extern struct device_attribute dev_attr_gsnpsid; ++extern struct device_attribute dev_attr_devspeed; ++extern struct device_attribute dev_attr_enumspeed; ++extern struct device_attribute dev_attr_hptxfsiz; ++extern struct device_attribute dev_attr_hprt0; ++ ++void dwc_otg_attr_create (struct platform_device *pdev); ++void dwc_otg_attr_remove (struct platform_device *pdev); ++ ++#endif +--- /dev/null ++++ b/drivers/usb/dwc/otg_cil.c +@@ -0,0 +1,3831 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.c $ ++ * $Revision: #147 $ ++ * $Date: 2008/10/16 $ ++ * $Change: 1117667 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * ++ * The Core Interface Layer provides basic services for accessing and ++ * managing the DWC_otg hardware. These services are used by both the ++ * Host Controller Driver and the Peripheral Controller Driver. ++ * ++ * The CIL manages the memory map for the core so that the HCD and PCD ++ * don't have to do this separately. It also handles basic tasks like ++ * reading/writing the registers and data FIFOs in the controller. ++ * Some of the data access functions provide encapsulation of several ++ * operations required to perform a task, such as writing multiple ++ * registers to start a transfer. Finally, the CIL performs basic ++ * services that are not specific to either the host or device modes ++ * of operation. These services include management of the OTG Host ++ * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A ++ * Diagnostic API is also provided to allow testing of the controller ++ * hardware. ++ * ++ * The Core Interface Layer has the following requirements: ++ * - Provides basic controller operations. ++ * - Minimal use of OS services. ++ * - The OS services used will be abstracted by using inline functions ++ * or macros. ++ * ++ */ ++#include ++#include ++#ifdef DEBUG ++#include ++#endif ++ ++#include "otg_plat.h" ++#include "otg_regs.h" ++#include "otg_cil.h" ++#include "otg_pcd.h" ++ ++ ++/** ++ * This function is called to initialize the DWC_otg CSR data ++ * structures. The register addresses in the device and host ++ * structures are initialized from the base address supplied by the ++ * caller. The calling function must make the OS calls to get the ++ * base address of the DWC_otg controller registers. The core_params ++ * argument holds the parameters that specify how the core should be ++ * configured. ++ * ++ * @param[in] reg_base_addr Base address of DWC_otg core registers ++ * @param[in] core_params Pointer to the core configuration parameters ++ * ++ */ ++dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *reg_base_addr, ++ dwc_otg_core_params_t *core_params) ++{ ++ dwc_otg_core_if_t *core_if = 0; ++ dwc_otg_dev_if_t *dev_if = 0; ++ dwc_otg_host_if_t *host_if = 0; ++ uint8_t *reg_base = (uint8_t *)reg_base_addr; ++ int i = 0; ++ ++ DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, reg_base_addr, core_params); ++ ++ core_if = kmalloc(sizeof(dwc_otg_core_if_t), GFP_KERNEL); ++ ++ if (core_if == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_core_if_t failed\n"); ++ return 0; ++ } ++ ++ memset(core_if, 0, sizeof(dwc_otg_core_if_t)); ++ ++ core_if->core_params = core_params; ++ core_if->core_global_regs = (dwc_otg_core_global_regs_t *)reg_base; ++ ++ /* ++ * Allocate the Device Mode structures. ++ */ ++ dev_if = kmalloc(sizeof(dwc_otg_dev_if_t), GFP_KERNEL); ++ ++ if (dev_if == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_dev_if_t failed\n"); ++ kfree(core_if); ++ return 0; ++ } ++ ++ dev_if->dev_global_regs = ++ (dwc_otg_device_global_regs_t *)(reg_base + DWC_DEV_GLOBAL_REG_OFFSET); ++ ++ for (i=0; iin_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *) ++ (reg_base + DWC_DEV_IN_EP_REG_OFFSET + ++ (i * DWC_EP_REG_OFFSET)); ++ ++ dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *) ++ (reg_base + DWC_DEV_OUT_EP_REG_OFFSET + ++ (i * DWC_EP_REG_OFFSET)); ++ DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n", ++ i, &dev_if->in_ep_regs[i]->diepctl); ++ DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n", ++ i, &dev_if->out_ep_regs[i]->doepctl); ++ } ++ ++ dev_if->speed = 0; // unknown ++ ++ core_if->dev_if = dev_if; ++ ++ /* ++ * Allocate the Host Mode structures. ++ */ ++ host_if = kmalloc(sizeof(dwc_otg_host_if_t), GFP_KERNEL); ++ ++ if (host_if == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_host_if_t failed\n"); ++ kfree(dev_if); ++ kfree(core_if); ++ return 0; ++ } ++ ++ host_if->host_global_regs = (dwc_otg_host_global_regs_t *) ++ (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET); ++ ++ host_if->hprt0 = (uint32_t*)(reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET); ++ ++ for (i=0; ihc_regs[i] = (dwc_otg_hc_regs_t *) ++ (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET + ++ (i * DWC_OTG_CHAN_REGS_OFFSET)); ++ DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n", ++ i, &host_if->hc_regs[i]->hcchar); ++ } ++ ++ host_if->num_host_channels = MAX_EPS_CHANNELS; ++ core_if->host_if = host_if; ++ ++ for (i=0; idata_fifo[i] = ++ (uint32_t *)(reg_base + DWC_OTG_DATA_FIFO_OFFSET + ++ (i * DWC_OTG_DATA_FIFO_SIZE)); ++ DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n", ++ i, (unsigned)core_if->data_fifo[i]); ++ } ++ ++ core_if->pcgcctl = (uint32_t*)(reg_base + DWC_OTG_PCGCCTL_OFFSET); ++ ++ /* ++ * Store the contents of the hardware configuration registers here for ++ * easy access later. ++ */ ++ core_if->hwcfg1.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg1); ++ core_if->hwcfg2.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg2); ++ core_if->hwcfg3.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg3); ++ core_if->hwcfg4.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg4); ++ ++ DWC_DEBUGPL(DBG_CILV,"hwcfg1=%08x\n",core_if->hwcfg1.d32); ++ DWC_DEBUGPL(DBG_CILV,"hwcfg2=%08x\n",core_if->hwcfg2.d32); ++ DWC_DEBUGPL(DBG_CILV,"hwcfg3=%08x\n",core_if->hwcfg3.d32); ++ DWC_DEBUGPL(DBG_CILV,"hwcfg4=%08x\n",core_if->hwcfg4.d32); ++ ++ core_if->hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg); ++ core_if->dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); ++ ++ DWC_DEBUGPL(DBG_CILV,"hcfg=%08x\n",core_if->hcfg.d32); ++ DWC_DEBUGPL(DBG_CILV,"dcfg=%08x\n",core_if->dcfg.d32); ++ ++ DWC_DEBUGPL(DBG_CILV,"op_mode=%0x\n",core_if->hwcfg2.b.op_mode); ++ DWC_DEBUGPL(DBG_CILV,"arch=%0x\n",core_if->hwcfg2.b.architecture); ++ DWC_DEBUGPL(DBG_CILV,"num_dev_ep=%d\n",core_if->hwcfg2.b.num_dev_ep); ++ DWC_DEBUGPL(DBG_CILV,"num_host_chan=%d\n",core_if->hwcfg2.b.num_host_chan); ++ DWC_DEBUGPL(DBG_CILV,"nonperio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.nonperio_tx_q_depth); ++ DWC_DEBUGPL(DBG_CILV,"host_perio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.host_perio_tx_q_depth); ++ DWC_DEBUGPL(DBG_CILV,"dev_token_q_depth=0x%0x\n",core_if->hwcfg2.b.dev_token_q_depth); ++ ++ DWC_DEBUGPL(DBG_CILV,"Total FIFO SZ=%d\n", core_if->hwcfg3.b.dfifo_depth); ++ DWC_DEBUGPL(DBG_CILV,"xfer_size_cntr_width=%0x\n", core_if->hwcfg3.b.xfer_size_cntr_width); ++ ++ /* ++ * Set the SRP sucess bit for FS-I2c ++ */ ++ core_if->srp_success = 0; ++ core_if->srp_timer_started = 0; ++ ++ ++ /* ++ * Create new workqueue and init works ++ */ ++ core_if->wq_otg = create_singlethread_workqueue("dwc_otg"); ++ if(core_if->wq_otg == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Creation of wq_otg failed\n"); ++ kfree(host_if); ++ kfree(dev_if); ++ kfree(core_if); ++ return 0 * HZ; ++ } ++ INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change); ++ INIT_DELAYED_WORK(&core_if->w_wkp, w_wakeup_detected); ++ ++ return core_if; ++} ++ ++/** ++ * This function frees the structures allocated by dwc_otg_cil_init(). ++ * ++ * @param[in] core_if The core interface pointer returned from ++ * dwc_otg_cil_init(). ++ * ++ */ ++void dwc_otg_cil_remove(dwc_otg_core_if_t *core_if) ++{ ++ /* Disable all interrupts */ ++ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 1, 0); ++ dwc_write_reg32(&core_if->core_global_regs->gintmsk, 0); ++ ++ if (core_if->wq_otg) { ++ destroy_workqueue(core_if->wq_otg); ++ } ++ if (core_if->dev_if) { ++ kfree(core_if->dev_if); ++ } ++ if (core_if->host_if) { ++ kfree(core_if->host_if); ++ } ++ kfree(core_if); ++} ++ ++/** ++ * This function enables the controller's Global Interrupt in the AHB Config ++ * register. ++ * ++ * @param[in] core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_enable_global_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ gahbcfg_data_t ahbcfg = { .d32 = 0}; ++ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ ++ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32); ++} ++ ++/** ++ * This function disables the controller's Global Interrupt in the AHB Config ++ * register. ++ * ++ * @param[in] core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_disable_global_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ gahbcfg_data_t ahbcfg = { .d32 = 0}; ++ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ ++ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0); ++} ++ ++/** ++ * This function initializes the commmon interrupts, used in both ++ * device and host modes. ++ * ++ * @param[in] core_if Programming view of the DWC_otg controller ++ * ++ */ ++static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ /* Clear any pending OTG Interrupts */ ++ dwc_write_reg32(&global_regs->gotgint, 0xFFFFFFFF); ++ ++ /* Clear any pending interrupts */ ++ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); ++ ++ /* ++ * Enable the interrupts in the GINTMSK. ++ */ ++ intr_mask.b.modemismatch = 1; ++ intr_mask.b.otgintr = 1; ++ ++ if (!core_if->dma_enable) { ++ intr_mask.b.rxstsqlvl = 1; ++ } ++ ++ intr_mask.b.conidstschng = 1; ++ intr_mask.b.wkupintr = 1; ++ intr_mask.b.disconnect = 1; ++ intr_mask.b.usbsuspend = 1; ++ intr_mask.b.sessreqintr = 1; ++ dwc_write_reg32(&global_regs->gintmsk, intr_mask.d32); ++} ++ ++/** ++ * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY ++ * type. ++ */ ++static void init_fslspclksel(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t val; ++ hcfg_data_t hcfg; ++ ++ if (((core_if->hwcfg2.b.hs_phy_type == 2) && ++ (core_if->hwcfg2.b.fs_phy_type == 1) && ++ (core_if->core_params->ulpi_fs_ls)) || ++ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { ++ /* Full speed PHY */ ++ val = DWC_HCFG_48_MHZ; ++ } ++ else { ++ /* High speed PHY running at full speed or high speed */ ++ val = DWC_HCFG_30_60_MHZ; ++ } ++ ++ DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val); ++ hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg); ++ hcfg.b.fslspclksel = val; ++ dwc_write_reg32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32); ++} ++ ++/** ++ * Initializes the DevSpd field of the DCFG register depending on the PHY type ++ * and the enumeration speed of the device. ++ */ ++static void init_devspd(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t val; ++ dcfg_data_t dcfg; ++ ++ if (((core_if->hwcfg2.b.hs_phy_type == 2) && ++ (core_if->hwcfg2.b.fs_phy_type == 1) && ++ (core_if->core_params->ulpi_fs_ls)) || ++ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { ++ /* Full speed PHY */ ++ val = 0x3; ++ } ++ else if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) { ++ /* High speed PHY running at full speed */ ++ val = 0x1; ++ } ++ else { ++ /* High speed PHY running at high speed */ ++ val = 0x0; ++ } ++ ++ DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val); ++ ++ dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); ++ dcfg.b.devspd = val; ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32); ++} ++ ++/** ++ * This function calculates the number of IN EPS ++ * using GHWCFG1 and GHWCFG2 registers values ++ * ++ * @param core_if Programming view of the DWC_otg controller ++ */ ++static uint32_t calc_num_in_eps(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t num_in_eps = 0; ++ uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep; ++ uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 3; ++ uint32_t num_tx_fifos = core_if->hwcfg4.b.num_in_eps; ++ int i; ++ ++ ++ for(i = 0; i < num_eps; ++i) ++ { ++ if(!(hwcfg1 & 0x1)) ++ num_in_eps++; ++ ++ hwcfg1 >>= 2; ++ } ++ ++ if(core_if->hwcfg4.b.ded_fifo_en) { ++ num_in_eps = (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps; ++ } ++ ++ return num_in_eps; ++} ++ ++ ++/** ++ * This function calculates the number of OUT EPS ++ * using GHWCFG1 and GHWCFG2 registers values ++ * ++ * @param core_if Programming view of the DWC_otg controller ++ */ ++static uint32_t calc_num_out_eps(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t num_out_eps = 0; ++ uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep; ++ uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 2; ++ int i; ++ ++ for(i = 0; i < num_eps; ++i) ++ { ++ if(!(hwcfg1 & 0x2)) ++ num_out_eps++; ++ ++ hwcfg1 >>= 2; ++ } ++ return num_out_eps; ++} ++/** ++ * This function initializes the DWC_otg controller registers and ++ * prepares the core for device mode or host mode operation. ++ * ++ * @param core_if Programming view of the DWC_otg controller ++ * ++ */ ++void dwc_otg_core_init(dwc_otg_core_if_t *core_if) ++{ ++ int i = 0; ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ gahbcfg_data_t ahbcfg = { .d32 = 0 }; ++ gusbcfg_data_t usbcfg = { .d32 = 0 }; ++ gi2cctl_data_t i2cctl = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n", core_if); ++ ++ /* Common Initialization */ ++ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ ++// usbcfg.b.tx_end_delay = 1; ++ /* Program the ULPI External VBUS bit if needed */ ++ usbcfg.b.ulpi_ext_vbus_drv = ++ (core_if->core_params->phy_ulpi_ext_vbus == DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0; ++ ++ /* Set external TS Dline pulsing */ ++ usbcfg.b.term_sel_dl_pulse = (core_if->core_params->ts_dline == 1) ? 1 : 0; ++ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); ++ ++ ++ /* Reset the Controller */ ++ dwc_otg_core_reset(core_if); ++ ++ /* Initialize parameters from Hardware configuration registers. */ ++ dev_if->num_in_eps = calc_num_in_eps(core_if); ++ dev_if->num_out_eps = calc_num_out_eps(core_if); ++ ++ ++ DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n", core_if->hwcfg4.b.num_dev_perio_in_ep); ++ ++ for (i=0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) ++ { ++ dev_if->perio_tx_fifo_size[i] = ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16; ++ DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n", ++ i, dev_if->perio_tx_fifo_size[i]); ++ } ++ ++ for (i=0; i < core_if->hwcfg4.b.num_in_eps; i++) ++ { ++ dev_if->tx_fifo_size[i] = ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16; ++ DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n", ++ i, dev_if->perio_tx_fifo_size[i]); ++ } ++ ++ core_if->total_fifo_size = core_if->hwcfg3.b.dfifo_depth; ++ core_if->rx_fifo_size = ++ dwc_read_reg32(&global_regs->grxfsiz); ++ core_if->nperio_tx_fifo_size = ++ dwc_read_reg32(&global_regs->gnptxfsiz) >> 16; ++ ++ DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", core_if->total_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", core_if->rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n", core_if->nperio_tx_fifo_size); ++ ++ /* This programming sequence needs to happen in FS mode before any other ++ * programming occurs */ ++ if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) && ++ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { ++ /* If FS mode with FS PHY */ ++ ++ /* core_init() is now called on every switch so only call the ++ * following for the first time through. */ ++ if (!core_if->phy_init_done) { ++ core_if->phy_init_done = 1; ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n"); ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.physel = 1; ++ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Reset after a PHY select */ ++ dwc_otg_core_reset(core_if); ++ } ++ ++ /* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also ++ * do this on HNP Dev/Host mode switches (done in dev_init and ++ * host_init). */ ++ if (dwc_otg_is_host_mode(core_if)) { ++ init_fslspclksel(core_if); ++ } ++ else { ++ init_devspd(core_if); ++ } ++ ++ if (core_if->core_params->i2c_enable) { ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n"); ++ /* Program GUSBCFG.OtgUtmifsSel to I2C */ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.otgutmifssel = 1; ++ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Program GI2CCTL.I2CEn */ ++ i2cctl.d32 = dwc_read_reg32(&global_regs->gi2cctl); ++ i2cctl.b.i2cdevaddr = 1; ++ i2cctl.b.i2cen = 0; ++ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32); ++ i2cctl.b.i2cen = 1; ++ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32); ++ } ++ ++ } /* endif speed == DWC_SPEED_PARAM_FULL */ ++ ++ else { ++ /* High speed PHY. */ ++ if (!core_if->phy_init_done) { ++ core_if->phy_init_done = 1; ++ /* HS PHY parameters. These parameters are preserved ++ * during soft reset so only program the first time. Do ++ * a soft reset immediately after setting phyif. */ ++ usbcfg.b.ulpi_utmi_sel = core_if->core_params->phy_type; ++ if (usbcfg.b.ulpi_utmi_sel == 1) { ++ /* ULPI interface */ ++ usbcfg.b.phyif = 0; ++ usbcfg.b.ddrsel = core_if->core_params->phy_ulpi_ddr; ++ } ++ else { ++ /* UTMI+ interface */ ++ if (core_if->core_params->phy_utmi_width == 16) { ++ usbcfg.b.phyif = 1; ++ } ++ else { ++ usbcfg.b.phyif = 0; ++ } ++ } ++ ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Reset after setting the PHY parameters */ ++ dwc_otg_core_reset(core_if); ++ } ++ } ++ ++ if ((core_if->hwcfg2.b.hs_phy_type == 2) && ++ (core_if->hwcfg2.b.fs_phy_type == 1) && ++ (core_if->core_params->ulpi_fs_ls)) { ++ DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n"); ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.ulpi_fsls = 1; ++ usbcfg.b.ulpi_clk_sus_m = 1; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ } ++ else { ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.ulpi_fsls = 0; ++ usbcfg.b.ulpi_clk_sus_m = 0; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ } ++ ++ /* Program the GAHBCFG Register.*/ ++ switch (core_if->hwcfg2.b.architecture) { ++ ++ case DWC_SLAVE_ONLY_ARCH: ++ DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n"); ++ ahbcfg.b.nptxfemplvl_txfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; ++ ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; ++ core_if->dma_enable = 0; ++ core_if->dma_desc_enable = 0; ++ break; ++ ++ case DWC_EXT_DMA_ARCH: ++ DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n"); ++ ahbcfg.b.hburstlen = core_if->core_params->dma_burst_size; ++ core_if->dma_enable = (core_if->core_params->dma_enable != 0); ++ core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0); ++ break; ++ ++ case DWC_INT_DMA_ARCH: ++ DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n"); ++ ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR; ++ core_if->dma_enable = (core_if->core_params->dma_enable != 0); ++ core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0); ++ break; ++ ++ } ++ ahbcfg.b.dmaenable = core_if->dma_enable; ++ dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32); ++ ++ core_if->en_multiple_tx_fifo = core_if->hwcfg4.b.ded_fifo_en; ++ ++ core_if->pti_enh_enable = core_if->core_params->pti_enable != 0; ++ core_if->multiproc_int_enable = core_if->core_params->mpi_enable; ++ DWC_PRINT("Periodic Transfer Interrupt Enhancement - %s\n", ((core_if->pti_enh_enable) ? "enabled": "disabled")); ++ DWC_PRINT("Multiprocessor Interrupt Enhancement - %s\n", ((core_if->multiproc_int_enable) ? "enabled": "disabled")); ++ ++ /* ++ * Program the GUSBCFG register. ++ */ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ ++ switch (core_if->hwcfg2.b.op_mode) { ++ case DWC_MODE_HNP_SRP_CAPABLE: ++ usbcfg.b.hnpcap = (core_if->core_params->otg_cap == ++ DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE); ++ usbcfg.b.srpcap = (core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_SRP_ONLY_CAPABLE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = (core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_NO_HNP_SRP_CAPABLE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = 0; ++ break; ++ ++ case DWC_MODE_SRP_CAPABLE_DEVICE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = (core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_NO_SRP_CAPABLE_DEVICE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = 0; ++ break; ++ ++ case DWC_MODE_SRP_CAPABLE_HOST: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = (core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_NO_SRP_CAPABLE_HOST: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = 0; ++ break; ++ } ++ ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Enable common interrupts */ ++ dwc_otg_enable_common_interrupts(core_if); ++ ++ /* Do device or host intialization based on mode during PCD ++ * and HCD initialization */ ++ if (dwc_otg_is_host_mode(core_if)) { ++ DWC_DEBUGPL(DBG_ANY, "Host Mode\n"); ++ core_if->op_state = A_HOST; ++ } ++ else { ++ DWC_DEBUGPL(DBG_ANY, "Device Mode\n"); ++ core_if->op_state = B_PERIPHERAL; ++#ifdef DWC_DEVICE_ONLY ++ dwc_otg_core_dev_init(core_if); ++#endif ++ } ++} ++ ++ ++/** ++ * This function enables the Device mode interrupts. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ */ ++void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ ++ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); ++ ++ /* Disable all interrupts. */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* Clear any pending interrupts */ ++ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); ++ ++ /* Enable the common interrupts */ ++ dwc_otg_enable_common_interrupts(core_if); ++ ++ /* Enable interrupts */ ++ intr_mask.b.usbreset = 1; ++ intr_mask.b.enumdone = 1; ++ ++ if(!core_if->multiproc_int_enable) { ++ intr_mask.b.inepintr = 1; ++ intr_mask.b.outepintr = 1; ++ } ++ ++ intr_mask.b.erlysuspend = 1; ++ ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.epmismatch = 1; ++ } ++ ++ ++#ifdef DWC_EN_ISOC ++ if(core_if->dma_enable) { ++ if(core_if->dma_desc_enable == 0) { ++ if(core_if->pti_enh_enable) { ++ dctl_data_t dctl = { .d32 = 0 }; ++ dctl.b.ifrmnum = 1; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32); ++ } else { ++ intr_mask.b.incomplisoin = 1; ++ intr_mask.b.incomplisoout = 1; ++ } ++ } ++ } else { ++ intr_mask.b.incomplisoin = 1; ++ intr_mask.b.incomplisoout = 1; ++ } ++#endif // DWC_EN_ISOC ++ ++/** @todo NGS: Should this be a module parameter? */ ++#ifdef USE_PERIODIC_EP ++ intr_mask.b.isooutdrop = 1; ++ intr_mask.b.eopframe = 1; ++ intr_mask.b.incomplisoin = 1; ++ intr_mask.b.incomplisoout = 1; ++#endif ++ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32); ++ ++ DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__, ++ dwc_read_reg32(&global_regs->gintmsk)); ++} ++ ++/** ++ * This function initializes the DWC_otg controller registers for ++ * device mode. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ * ++ */ ++void dwc_otg_core_dev_init(dwc_otg_core_if_t *core_if) ++{ ++ int i,size; ++ u_int32_t *default_value_array; ++ ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ dwc_otg_core_params_t *params = core_if->core_params; ++ dcfg_data_t dcfg = { .d32 = 0}; ++ grstctl_t resetctl = { .d32 = 0 }; ++ uint32_t rx_fifo_size; ++ fifosize_data_t nptxfifosize; ++ fifosize_data_t txfifosize; ++ dthrctl_data_t dthrctl; ++ ++ /* Restart the Phy Clock */ ++ dwc_write_reg32(core_if->pcgcctl, 0); ++ ++ /* Device configuration register */ ++ init_devspd(core_if); ++ dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg); ++ dcfg.b.descdma = (core_if->dma_desc_enable) ? 1 : 0; ++ dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80; ++ ++ dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32); ++ ++ /* Configure data FIFO sizes */ ++ if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) { ++ DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n", core_if->total_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", params->dev_rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n", params->dev_nperio_tx_fifo_size); ++ ++ /* Rx FIFO */ ++ DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->grxfsiz)); ++ ++ rx_fifo_size = params->dev_rx_fifo_size; ++ dwc_write_reg32(&global_regs->grxfsiz, rx_fifo_size); ++ ++ DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->grxfsiz)); ++ ++ /** Set Periodic Tx FIFO Mask all bits 0 */ ++ core_if->p_tx_msk = 0; ++ ++ /** Set Tx FIFO Mask all bits 0 */ ++ core_if->tx_msk = 0; ++ ++ /* Non-periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->gnptxfsiz)); ++ ++ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size; ++ nptxfifosize.b.startaddr = params->dev_rx_fifo_size; ++ ++ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); ++ ++ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->gnptxfsiz)); ++ ++ txfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; ++ if(core_if->en_multiple_tx_fifo == 0) { ++ //core_if->hwcfg4.b.ded_fifo_en==0 ++ ++ /**@todo NGS: Fix Periodic FIFO Sizing! */ ++ /* ++ * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15. ++ * Indexes of the FIFO size module parameters in the ++ * dev_perio_tx_fifo_size array and the FIFO size registers in ++ * the dptxfsiz array run from 0 to 14. ++ */ ++ /** @todo Finish debug of this */ ++ size=core_if->hwcfg4.b.num_dev_perio_in_ep; ++ default_value_array=params->dev_perio_tx_fifo_size; ++ ++ } ++ else { ++ //core_if->hwcfg4.b.ded_fifo_en==1 ++ /* ++ * Tx FIFOs These FIFOs are numbered from 1 to 15. ++ * Indexes of the FIFO size module parameters in the ++ * dev_tx_fifo_size array and the FIFO size registers in ++ * the dptxfsiz_dieptxf array run from 0 to 14. ++ */ ++ ++ size=core_if->hwcfg4.b.num_in_eps; ++ default_value_array=params->dev_tx_fifo_size; ++ ++ } ++ for (i=0; i < size; i++) ++ { ++ ++ txfifosize.b.depth = default_value_array[i]; ++ DWC_DEBUGPL(DBG_CIL, "initial dptxfsiz_dieptxf[%d]=%08x\n", i, ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); ++ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i], ++ txfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL, "new dptxfsiz_dieptxf[%d]=%08x\n", i, ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); ++ txfifosize.b.startaddr += txfifosize.b.depth; ++ } ++ } ++ /* Flush the FIFOs */ ++ dwc_otg_flush_tx_fifo(core_if, 0x10); /* all Tx FIFOs */ ++ dwc_otg_flush_rx_fifo(core_if); ++ ++ /* Flush the Learning Queue. */ ++ resetctl.b.intknqflsh = 1; ++ dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32); ++ ++ /* Clear all pending Device Interrupts */ ++ ++ if(core_if->multiproc_int_enable) { ++ } ++ ++ /** @todo - if the condition needed to be checked ++ * or in any case all pending interrutps should be cleared? ++ */ ++ if(core_if->multiproc_int_enable) { ++ for(i = 0; i < core_if->dev_if->num_in_eps; ++i) { ++ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[i], 0); ++ } ++ ++ for(i = 0; i < core_if->dev_if->num_out_eps; ++i) { ++ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[i], 0); ++ } ++ ++ dwc_write_reg32(&dev_if->dev_global_regs->deachint, 0xFFFFFFFF); ++ dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, 0); ++ } else { ++ dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, 0); ++ dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, 0); ++ dwc_write_reg32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF); ++ dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, 0); ++ } ++ ++ for (i=0; i <= dev_if->num_in_eps; i++) ++ { ++ depctl_data_t depctl; ++ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); ++ if (depctl.b.epena) { ++ depctl.d32 = 0; ++ depctl.b.epdis = 1; ++ depctl.b.snak = 1; ++ } ++ else { ++ depctl.d32 = 0; ++ } ++ ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32); ++ ++ ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->dieptsiz, 0); ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepdma, 0); ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepint, 0xFF); ++ } ++ ++ for (i=0; i <= dev_if->num_out_eps; i++) ++ { ++ depctl_data_t depctl; ++ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl); ++ if (depctl.b.epena) { ++ depctl.d32 = 0; ++ depctl.b.epdis = 1; ++ depctl.b.snak = 1; ++ } ++ else { ++ depctl.d32 = 0; ++ } ++ ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32); ++ ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doeptsiz, 0); ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepdma, 0); ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepint, 0xFF); ++ } ++ ++ if(core_if->en_multiple_tx_fifo && core_if->dma_enable) { ++ dev_if->non_iso_tx_thr_en = params->thr_ctl & 0x1; ++ dev_if->iso_tx_thr_en = (params->thr_ctl >> 1) & 0x1; ++ dev_if->rx_thr_en = (params->thr_ctl >> 2) & 0x1; ++ ++ dev_if->rx_thr_length = params->rx_thr_length; ++ dev_if->tx_thr_length = params->tx_thr_length; ++ ++ dev_if->setup_desc_index = 0; ++ ++ dthrctl.d32 = 0; ++ dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en; ++ dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en; ++ dthrctl.b.tx_thr_len = dev_if->tx_thr_length; ++ dthrctl.b.rx_thr_en = dev_if->rx_thr_en; ++ dthrctl.b.rx_thr_len = dev_if->rx_thr_length; ++ ++ dwc_write_reg32(&dev_if->dev_global_regs->dtknqr3_dthrctl, dthrctl.d32); ++ ++ DWC_DEBUGPL(DBG_CIL, "Non ISO Tx Thr - %d\nISO Tx Thr - %d\nRx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n", ++ dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en, dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len, dthrctl.b.rx_thr_len); ++ ++ } ++ ++ dwc_otg_enable_device_interrupts(core_if); ++ ++ { ++ diepmsk_data_t msk = { .d32 = 0 }; ++ msk.b.txfifoundrn = 1; ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], msk.d32, msk.d32); ++ } else { ++ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, msk.d32, msk.d32); ++ } ++ } ++ ++ ++ if(core_if->multiproc_int_enable) { ++ /* Set NAK on Babble */ ++ dctl_data_t dctl = { .d32 = 0}; ++ dctl.b.nakonbble = 1; ++ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, 0, dctl.d32); ++ } ++} ++ ++/** ++ * This function enables the Host mode interrupts. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ */ ++void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ gintmsk_data_t intr_mask = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); ++ ++ /* Disable all interrupts. */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* Clear any pending interrupts. */ ++ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); ++ ++ /* Enable the common interrupts */ ++ dwc_otg_enable_common_interrupts(core_if); ++ ++ /* ++ * Enable host mode interrupts without disturbing common ++ * interrupts. ++ */ ++ intr_mask.b.sofintr = 1; ++ intr_mask.b.portintr = 1; ++ intr_mask.b.hcintr = 1; ++ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32); ++} ++ ++/** ++ * This function disables the Host Mode interrupts. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ */ ++void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ gintmsk_data_t intr_mask = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__); ++ ++ /* ++ * Disable host mode interrupts without disturbing common ++ * interrupts. ++ */ ++ intr_mask.b.sofintr = 1; ++ intr_mask.b.portintr = 1; ++ intr_mask.b.hcintr = 1; ++ intr_mask.b.ptxfempty = 1; ++ intr_mask.b.nptxfempty = 1; ++ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); ++} ++ ++/** ++ * This function initializes the DWC_otg controller registers for ++ * host mode. ++ * ++ * This function flushes the Tx and Rx FIFOs and it flushes any entries in the ++ * request queues. Host channels are reset to ensure that they are ready for ++ * performing transfers. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ * ++ */ ++void dwc_otg_core_host_init(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ dwc_otg_host_if_t *host_if = core_if->host_if; ++ dwc_otg_core_params_t *params = core_if->core_params; ++ hprt0_data_t hprt0 = { .d32 = 0 }; ++ fifosize_data_t nptxfifosize; ++ fifosize_data_t ptxfifosize; ++ int i; ++ hcchar_data_t hcchar; ++ hcfg_data_t hcfg; ++ dwc_otg_hc_regs_t *hc_regs; ++ int num_channels; ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_CILV,"%s(%p)\n", __func__, core_if); ++ ++ /* Restart the Phy Clock */ ++ dwc_write_reg32(core_if->pcgcctl, 0); ++ ++ /* Initialize Host Configuration Register */ ++ init_fslspclksel(core_if); ++ if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ++ { ++ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg); ++ hcfg.b.fslssupp = 1; ++ dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32); ++ } ++ ++ /* Configure data FIFO sizes */ ++ if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) { ++ DWC_DEBUGPL(DBG_CIL,"Total FIFO Size=%d\n", core_if->total_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"Rx FIFO Size=%d\n", params->host_rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"NP Tx FIFO Size=%d\n", params->host_nperio_tx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"P Tx FIFO Size=%d\n", params->host_perio_tx_fifo_size); ++ ++ /* Rx FIFO */ ++ DWC_DEBUGPL(DBG_CIL,"initial grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); ++ dwc_write_reg32(&global_regs->grxfsiz, params->host_rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"new grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); ++ ++ /* Non-periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL,"initial gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); ++ nptxfifosize.b.depth = params->host_nperio_tx_fifo_size; ++ nptxfifosize.b.startaddr = params->host_rx_fifo_size; ++ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL,"new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); ++ ++ /* Periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL,"initial hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); ++ ptxfifosize.b.depth = params->host_perio_tx_fifo_size; ++ ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; ++ dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL,"new hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); ++ } ++ ++ /* Clear Host Set HNP Enable in the OTG Control Register */ ++ gotgctl.b.hstsethnpen = 1; ++ dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0); ++ ++ /* Make sure the FIFOs are flushed. */ ++ dwc_otg_flush_tx_fifo(core_if, 0x10 /* all Tx FIFOs */); ++ dwc_otg_flush_rx_fifo(core_if); ++ ++ /* Flush out any leftover queued requests. */ ++ num_channels = core_if->core_params->host_channels; ++ for (i = 0; i < num_channels; i++) ++ { ++ hc_regs = core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 0; ++ hcchar.b.chdis = 1; ++ hcchar.b.epdir = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ } ++ ++ /* Halt all channels to put them into a known state. */ ++ for (i = 0; i < num_channels; i++) ++ { ++ int count = 0; ++ hc_regs = core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 1; ++ hcchar.b.epdir = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i); ++ do { ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (++count > 1000) ++ { ++ DWC_ERROR("%s: Unable to clear halt on channel %d\n", ++ __func__, i); ++ break; ++ } ++ } ++ while (hcchar.b.chen); ++ } ++ ++ /* Turn on the vbus power. */ ++ DWC_PRINT("Init: Port Power? op_state=%d\n", core_if->op_state); ++ if (core_if->op_state == A_HOST) { ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr); ++ if (hprt0.b.prtpwr == 0) { ++ hprt0.b.prtpwr = 1; ++ dwc_write_reg32(host_if->hprt0, hprt0.d32); ++ } ++ } ++ ++ dwc_otg_enable_host_interrupts(core_if); ++} ++ ++/** ++ * Prepares a host channel for transferring packets to/from a specific ++ * endpoint. The HCCHARn register is set up with the characteristics specified ++ * in _hc. Host channel interrupts that may need to be serviced while this ++ * transfer is in progress are enabled. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ * @param hc Information needed to initialize the host channel ++ */ ++void dwc_otg_hc_init(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ uint32_t intr_enable; ++ hcintmsk_data_t hc_intr_mask; ++ gintmsk_data_t gintmsk = { .d32 = 0 }; ++ hcchar_data_t hcchar; ++ hcsplt_data_t hcsplt; ++ ++ uint8_t hc_num = hc->hc_num; ++ dwc_otg_host_if_t *host_if = core_if->host_if; ++ dwc_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num]; ++ ++ /* Clear old interrupt conditions for this host channel. */ ++ hc_intr_mask.d32 = 0xFFFFFFFF; ++ hc_intr_mask.b.reserved = 0; ++ dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32); ++ ++ /* Enable channel interrupts required for this transfer. */ ++ hc_intr_mask.d32 = 0; ++ hc_intr_mask.b.chhltd = 1; ++ if (core_if->dma_enable) { ++ hc_intr_mask.b.ahberr = 1; ++ if (hc->error_state && !hc->do_split && ++ hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { ++ hc_intr_mask.b.ack = 1; ++ if (hc->ep_is_in) { ++ hc_intr_mask.b.datatglerr = 1; ++ if (hc->ep_type != DWC_OTG_EP_TYPE_INTR) { ++ hc_intr_mask.b.nak = 1; ++ } ++ } ++ } ++ } ++ else { ++ switch (hc->ep_type) { ++ case DWC_OTG_EP_TYPE_CONTROL: ++ case DWC_OTG_EP_TYPE_BULK: ++ hc_intr_mask.b.xfercompl = 1; ++ hc_intr_mask.b.stall = 1; ++ hc_intr_mask.b.xacterr = 1; ++ hc_intr_mask.b.datatglerr = 1; ++ if (hc->ep_is_in) { ++ hc_intr_mask.b.bblerr = 1; ++ } ++ else { ++ hc_intr_mask.b.nak = 1; ++ hc_intr_mask.b.nyet = 1; ++ if (hc->do_ping) { ++ hc_intr_mask.b.ack = 1; ++ } ++ } ++ ++ if (hc->do_split) { ++ hc_intr_mask.b.nak = 1; ++ if (hc->complete_split) { ++ hc_intr_mask.b.nyet = 1; ++ } ++ else { ++ hc_intr_mask.b.ack = 1; ++ } ++ } ++ ++ if (hc->error_state) { ++ hc_intr_mask.b.ack = 1; ++ } ++ break; ++ case DWC_OTG_EP_TYPE_INTR: ++ hc_intr_mask.b.xfercompl = 1; ++ hc_intr_mask.b.nak = 1; ++ hc_intr_mask.b.stall = 1; ++ hc_intr_mask.b.xacterr = 1; ++ hc_intr_mask.b.datatglerr = 1; ++ hc_intr_mask.b.frmovrun = 1; ++ ++ if (hc->ep_is_in) { ++ hc_intr_mask.b.bblerr = 1; ++ } ++ if (hc->error_state) { ++ hc_intr_mask.b.ack = 1; ++ } ++ if (hc->do_split) { ++ if (hc->complete_split) { ++ hc_intr_mask.b.nyet = 1; ++ } ++ else { ++ hc_intr_mask.b.ack = 1; ++ } ++ } ++ break; ++ case DWC_OTG_EP_TYPE_ISOC: ++ hc_intr_mask.b.xfercompl = 1; ++ hc_intr_mask.b.frmovrun = 1; ++ hc_intr_mask.b.ack = 1; ++ ++ if (hc->ep_is_in) { ++ hc_intr_mask.b.xacterr = 1; ++ hc_intr_mask.b.bblerr = 1; ++ } ++ break; ++ } ++ } ++ dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32); ++ ++// if(hc->ep_type == DWC_OTG_EP_TYPE_BULK && !hc->ep_is_in) ++// hc->max_packet = 512; ++ /* Enable the top level host channel interrupt. */ ++ intr_enable = (1 << hc_num); ++ dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable); ++ ++ /* Make sure host channel interrupts are enabled. */ ++ gintmsk.b.hcintr = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0, gintmsk.d32); ++ ++ /* ++ * Program the HCCHARn register with the endpoint characteristics for ++ * the current transfer. ++ */ ++ hcchar.d32 = 0; ++ hcchar.b.devaddr = hc->dev_addr; ++ hcchar.b.epnum = hc->ep_num; ++ hcchar.b.epdir = hc->ep_is_in; ++ hcchar.b.lspddev = (hc->speed == DWC_OTG_EP_SPEED_LOW); ++ hcchar.b.eptype = hc->ep_type; ++ hcchar.b.mps = hc->max_packet; ++ ++ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32); ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr); ++ DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum); ++ DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir); ++ DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev); ++ DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype); ++ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); ++ DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt); ++ ++ /* ++ * Program the HCSPLIT register for SPLITs ++ */ ++ hcsplt.d32 = 0; ++ if (hc->do_split) { ++ DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n", hc->hc_num, ++ hc->complete_split ? "CSPLIT" : "SSPLIT"); ++ hcsplt.b.compsplt = hc->complete_split; ++ hcsplt.b.xactpos = hc->xact_pos; ++ hcsplt.b.hubaddr = hc->hub_addr; ++ hcsplt.b.prtaddr = hc->port_addr; ++ DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", hc->complete_split); ++ DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", hc->xact_pos); ++ DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", hc->hub_addr); ++ DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", hc->port_addr); ++ DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", hc->ep_is_in); ++ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); ++ DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", hc->xfer_len); ++ } ++ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32); ++ ++} ++ ++/** ++ * Attempts to halt a host channel. This function should only be called in ++ * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under ++ * normal circumstances in DMA mode, the controller halts the channel when the ++ * transfer is complete or a condition occurs that requires application ++ * intervention. ++ * ++ * In slave mode, checks for a free request queue entry, then sets the Channel ++ * Enable and Channel Disable bits of the Host Channel Characteristics ++ * register of the specified channel to intiate the halt. If there is no free ++ * request queue entry, sets only the Channel Disable bit of the HCCHARn ++ * register to flush requests for this channel. In the latter case, sets a ++ * flag to indicate that the host channel needs to be halted when a request ++ * queue slot is open. ++ * ++ * In DMA mode, always sets the Channel Enable and Channel Disable bits of the ++ * HCCHARn register. The controller ensures there is space in the request ++ * queue before submitting the halt request. ++ * ++ * Some time may elapse before the core flushes any posted requests for this ++ * host channel and halts. The Channel Halted interrupt handler completes the ++ * deactivation of the host channel. ++ * ++ * @param core_if Controller register interface. ++ * @param hc Host channel to halt. ++ * @param halt_status Reason for halting the channel. ++ */ ++void dwc_otg_hc_halt(dwc_otg_core_if_t *core_if, ++ dwc_hc_t *hc, ++ dwc_otg_halt_status_e halt_status) ++{ ++ gnptxsts_data_t nptxsts; ++ hptxsts_data_t hptxsts; ++ hcchar_data_t hcchar; ++ dwc_otg_hc_regs_t *hc_regs; ++ dwc_otg_core_global_regs_t *global_regs; ++ dwc_otg_host_global_regs_t *host_global_regs; ++ ++ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ global_regs = core_if->core_global_regs; ++ host_global_regs = core_if->host_if->host_global_regs; ++ ++ WARN_ON(halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS); ++ ++ if (halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || ++ halt_status == DWC_OTG_HC_XFER_AHB_ERR) { ++ /* ++ * Disable all channel interrupts except Ch Halted. The QTD ++ * and QH state associated with this transfer has been cleared ++ * (in the case of URB_DEQUEUE), so the channel needs to be ++ * shut down carefully to prevent crashes. ++ */ ++ hcintmsk_data_t hcintmsk; ++ hcintmsk.d32 = 0; ++ hcintmsk.b.chhltd = 1; ++ dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32); ++ ++ /* ++ * Make sure no other interrupts besides halt are currently ++ * pending. Handling another interrupt could cause a crash due ++ * to the QTD and QH state. ++ */ ++ dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32); ++ ++ /* ++ * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR ++ * even if the channel was already halted for some other ++ * reason. ++ */ ++ hc->halt_status = halt_status; ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen == 0) { ++ /* ++ * The channel is either already halted or it hasn't ++ * started yet. In DMA mode, the transfer may halt if ++ * it finishes normally or a condition occurs that ++ * requires driver intervention. Don't want to halt ++ * the channel again. In either Slave or DMA mode, ++ * it's possible that the transfer has been assigned ++ * to a channel, but not started yet when an URB is ++ * dequeued. Don't want to halt a channel that hasn't ++ * started yet. ++ */ ++ return; ++ } ++ } ++ ++ if (hc->halt_pending) { ++ /* ++ * A halt has already been issued for this channel. This might ++ * happen when a transfer is aborted by a higher level in ++ * the stack. ++ */ ++#ifdef DEBUG ++ DWC_PRINT("*** %s: Channel %d, _hc->halt_pending already set ***\n", ++ __func__, hc->hc_num); ++ ++/* dwc_otg_dump_global_registers(core_if); */ ++/* dwc_otg_dump_host_registers(core_if); */ ++#endif ++ return; ++ } ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 1; ++ ++ if (!core_if->dma_enable) { ++ /* Check for space in the request queue to issue the halt. */ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || ++ hc->ep_type == DWC_OTG_EP_TYPE_BULK) { ++ nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ if (nptxsts.b.nptxqspcavail == 0) { ++ hcchar.b.chen = 0; ++ } ++ } ++ else { ++ hptxsts.d32 = dwc_read_reg32(&host_global_regs->hptxsts); ++ if ((hptxsts.b.ptxqspcavail == 0) || (core_if->queuing_high_bandwidth)) { ++ hcchar.b.chen = 0; ++ } ++ } ++ } ++ ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ hc->halt_status = halt_status; ++ ++ if (hcchar.b.chen) { ++ hc->halt_pending = 1; ++ hc->halt_on_queue = 0; ++ } ++ else { ++ hc->halt_on_queue = 1; ++ } ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32); ++ DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", hc->halt_pending); ++ DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", hc->halt_on_queue); ++ DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", hc->halt_status); ++ ++ return; ++} ++ ++/** ++ * Clears the transfer state for a host channel. This function is normally ++ * called after a transfer is done and the host channel is being released. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param hc Identifies the host channel to clean up. ++ */ ++void dwc_otg_hc_cleanup(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ dwc_otg_hc_regs_t *hc_regs; ++ ++ hc->xfer_started = 0; ++ ++ /* ++ * Clear channel interrupt enables and any unhandled channel interrupt ++ * conditions. ++ */ ++ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ dwc_write_reg32(&hc_regs->hcintmsk, 0); ++ dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF); ++ ++#ifdef DEBUG ++ del_timer(&core_if->hc_xfer_timer[hc->hc_num]); ++ { ++ hcchar_data_t hcchar; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chdis) { ++ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", ++ __func__, hc->hc_num, hcchar.d32); ++ } ++ } ++#endif ++} ++ ++/** ++ * Sets the channel property that indicates in which frame a periodic transfer ++ * should occur. This is always set to the _next_ frame. This function has no ++ * effect on non-periodic transfers. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param hc Identifies the host channel to set up and its properties. ++ * @param hcchar Current value of the HCCHAR register for the specified host ++ * channel. ++ */ ++static inline void hc_set_even_odd_frame(dwc_otg_core_if_t *core_if, ++ dwc_hc_t *hc, ++ hcchar_data_t *hcchar) ++{ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ hfnum_data_t hfnum; ++ hfnum.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hfnum); ++ ++ /* 1 if _next_ frame is odd, 0 if it's even */ ++ hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1; ++#ifdef DEBUG ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR && hc->do_split && !hc->complete_split) { ++ switch (hfnum.b.frnum & 0x7) { ++ case 7: ++ core_if->hfnum_7_samples++; ++ core_if->hfnum_7_frrem_accum += hfnum.b.frrem; ++ break; ++ case 0: ++ core_if->hfnum_0_samples++; ++ core_if->hfnum_0_frrem_accum += hfnum.b.frrem; ++ break; ++ default: ++ core_if->hfnum_other_samples++; ++ core_if->hfnum_other_frrem_accum += hfnum.b.frrem; ++ break; ++ } ++ } ++#endif ++ } ++} ++ ++#ifdef DEBUG ++static void hc_xfer_timeout(unsigned long ptr) ++{ ++ hc_xfer_info_t *xfer_info = (hc_xfer_info_t *)ptr; ++ int hc_num = xfer_info->hc->hc_num; ++ DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num); ++ DWC_WARN(" start_hcchar_val 0x%08x\n", xfer_info->core_if->start_hcchar_val[hc_num]); ++} ++#endif ++ ++/* ++ * This function does the setup for a data transfer for a host channel and ++ * starts the transfer. May be called in either Slave mode or DMA mode. In ++ * Slave mode, the caller must ensure that there is sufficient space in the ++ * request queue and Tx Data FIFO. ++ * ++ * For an OUT transfer in Slave mode, it loads a data packet into the ++ * appropriate FIFO. If necessary, additional data packets will be loaded in ++ * the Host ISR. ++ * ++ * For an IN transfer in Slave mode, a data packet is requested. The data ++ * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, ++ * additional data packets are requested in the Host ISR. ++ * ++ * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ ++ * register along with a packet count of 1 and the channel is enabled. This ++ * causes a single PING transaction to occur. Other fields in HCTSIZ are ++ * simply set to 0 since no data transfer occurs in this case. ++ * ++ * For a PING transfer in DMA mode, the HCTSIZ register is initialized with ++ * all the information required to perform the subsequent data transfer. In ++ * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the ++ * controller performs the entire PING protocol, then starts the data ++ * transfer. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param hc Information needed to initialize the host channel. The xfer_len ++ * value may be reduced to accommodate the max widths of the XferSize and ++ * PktCnt fields in the HCTSIZn register. The multi_count value may be changed ++ * to reflect the final xfer_len value. ++ */ ++void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ uint16_t num_packets; ++ uint32_t max_hc_xfer_size = core_if->core_params->max_transfer_size; ++ uint16_t max_hc_pkt_count = core_if->core_params->max_packet_count; ++ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ ++ hctsiz.d32 = 0; ++ ++ if (hc->do_ping) { ++ if (!core_if->dma_enable) { ++ dwc_otg_hc_do_ping(core_if, hc); ++ hc->xfer_started = 1; ++ return; ++ } ++ else { ++ hctsiz.b.dopng = 1; ++ } ++ } ++ ++ if (hc->do_split) { ++ num_packets = 1; ++ ++ if (hc->complete_split && !hc->ep_is_in) { ++ /* For CSPLIT OUT Transfer, set the size to 0 so the ++ * core doesn't expect any data written to the FIFO */ ++ hc->xfer_len = 0; ++ } ++ else if (hc->ep_is_in || (hc->xfer_len > hc->max_packet)) { ++ hc->xfer_len = hc->max_packet; ++ } ++ else if (!hc->ep_is_in && (hc->xfer_len > 188)) { ++ hc->xfer_len = 188; ++ } ++ ++ hctsiz.b.xfersize = hc->xfer_len; ++ } ++ else { ++ /* ++ * Ensure that the transfer length and packet count will fit ++ * in the widths allocated for them in the HCTSIZn register. ++ */ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * Make sure the transfer size is no larger than one ++ * (micro)frame's worth of data. (A check was done ++ * when the periodic transfer was accepted to ensure ++ * that a (micro)frame's worth of data can be ++ * programmed into a channel.) ++ */ ++ uint32_t max_periodic_len = hc->multi_count * hc->max_packet; ++ if (hc->xfer_len > max_periodic_len) { ++ hc->xfer_len = max_periodic_len; ++ } ++ else { ++ } ++ } ++ else if (hc->xfer_len > max_hc_xfer_size) { ++ /* Make sure that xfer_len is a multiple of max packet size. */ ++ hc->xfer_len = max_hc_xfer_size - hc->max_packet + 1; ++ } ++ ++ if (hc->xfer_len > 0) { ++ num_packets = (hc->xfer_len + hc->max_packet - 1) / hc->max_packet; ++ if (num_packets > max_hc_pkt_count) { ++ num_packets = max_hc_pkt_count; ++ hc->xfer_len = num_packets * hc->max_packet; ++ } ++ } ++ else { ++ /* Need 1 packet for transfer length of 0. */ ++ num_packets = 1; ++ } ++ ++#if 0 ++//host testusb item 10, would do series of Control transfer ++//with URB_SHORT_NOT_OK set in transfer_flags , ++//changing the xfer_len would cause the test fail ++ if (hc->ep_is_in) { ++ /* Always program an integral # of max packets for IN transfers. */ ++ hc->xfer_len = num_packets * hc->max_packet; ++ } ++#endif ++ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * Make sure that the multi_count field matches the ++ * actual transfer length. ++ */ ++ hc->multi_count = num_packets; ++ } ++ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* Set up the initial PID for the transfer. */ ++ if (hc->speed == DWC_OTG_EP_SPEED_HIGH) { ++ if (hc->ep_is_in) { ++ if (hc->multi_count == 1) { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; ++ } ++ else if (hc->multi_count == 2) { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA1; ++ } ++ else { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA2; ++ } ++ } ++ else { ++ if (hc->multi_count == 1) { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; ++ } ++ else { ++ hc->data_pid_start = DWC_OTG_HC_PID_MDATA; ++ } ++ } ++ } ++ else { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; ++ } ++ } ++ ++ hctsiz.b.xfersize = hc->xfer_len; ++ } ++ ++ hc->start_pkt_count = num_packets; ++ hctsiz.b.pktcnt = num_packets; ++ hctsiz.b.pid = hc->data_pid_start; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize); ++ DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt); ++ DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid); ++ ++ if (core_if->dma_enable) { ++ dwc_write_reg32(&hc_regs->hcdma, (uint32_t)hc->xfer_buff); ++ } ++ ++ /* Start the split */ ++ if (hc->do_split) { ++ hcsplt_data_t hcsplt; ++ hcsplt.d32 = dwc_read_reg32 (&hc_regs->hcsplt); ++ hcsplt.b.spltena = 1; ++ dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32); ++ } ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.multicnt = hc->multi_count; ++ hc_set_even_odd_frame(core_if, hc, &hcchar); ++#ifdef DEBUG ++ core_if->start_hcchar_val[hc->hc_num] = hcchar.d32; ++ if (hcchar.b.chdis) { ++ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", ++ __func__, hc->hc_num, hcchar.d32); ++ } ++#endif ++ ++ /* Set host channel enable after all other setup is complete. */ ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ hc->xfer_started = 1; ++ hc->requests++; ++ ++ if (!core_if->dma_enable && ++ !hc->ep_is_in && hc->xfer_len > 0) { ++ /* Load OUT packet into the appropriate Tx FIFO. */ ++ dwc_otg_hc_write_packet(core_if, hc); ++ } ++ ++#ifdef DEBUG ++ /* Start a timer for this transfer. */ ++ core_if->hc_xfer_timer[hc->hc_num].function = hc_xfer_timeout; ++ core_if->hc_xfer_info[hc->hc_num].core_if = core_if; ++ core_if->hc_xfer_info[hc->hc_num].hc = hc; ++ core_if->hc_xfer_timer[hc->hc_num].data = (unsigned long)(&core_if->hc_xfer_info[hc->hc_num]); ++ core_if->hc_xfer_timer[hc->hc_num].expires = jiffies + (HZ*10); ++ add_timer(&core_if->hc_xfer_timer[hc->hc_num]); ++#endif ++} ++ ++/** ++ * This function continues a data transfer that was started by previous call ++ * to dwc_otg_hc_start_transfer. The caller must ensure there is ++ * sufficient space in the request queue and Tx Data FIFO. This function ++ * should only be called in Slave mode. In DMA mode, the controller acts ++ * autonomously to complete transfers programmed to a host channel. ++ * ++ * For an OUT transfer, a new data packet is loaded into the appropriate FIFO ++ * if there is any data remaining to be queued. For an IN transfer, another ++ * data packet is always requested. For the SETUP phase of a control transfer, ++ * this function does nothing. ++ * ++ * @return 1 if a new request is queued, 0 if no more requests are required ++ * for this transfer. ++ */ ++int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ ++ if (hc->do_split) { ++ /* SPLITs always queue just once per channel */ ++ return 0; ++ } ++ else if (hc->data_pid_start == DWC_OTG_HC_PID_SETUP) { ++ /* SETUPs are queued only once since they can't be NAKed. */ ++ return 0; ++ } ++ else if (hc->ep_is_in) { ++ /* ++ * Always queue another request for other IN transfers. If ++ * back-to-back INs are issued and NAKs are received for both, ++ * the driver may still be processing the first NAK when the ++ * second NAK is received. When the interrupt handler clears ++ * the NAK interrupt for the first NAK, the second NAK will ++ * not be seen. So we can't depend on the NAK interrupt ++ * handler to requeue a NAKed request. Instead, IN requests ++ * are issued each time this function is called. When the ++ * transfer completes, the extra requests for the channel will ++ * be flushed. ++ */ ++ hcchar_data_t hcchar; ++ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hc_set_even_odd_frame(core_if, hc, &hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 0; ++ DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32); ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ hc->requests++; ++ return 1; ++ } ++ else { ++ /* OUT transfers. */ ++ if (hc->xfer_count < hc->xfer_len) { ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ hcchar_data_t hcchar; ++ dwc_otg_hc_regs_t *hc_regs; ++ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hc_set_even_odd_frame(core_if, hc, &hcchar); ++ } ++ ++ /* Load OUT packet into the appropriate Tx FIFO. */ ++ dwc_otg_hc_write_packet(core_if, hc); ++ hc->requests++; ++ return 1; ++ } ++ else { ++ return 0; ++ } ++ } ++} ++ ++/** ++ * Starts a PING transfer. This function should only be called in Slave mode. ++ * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled. ++ */ ++void dwc_otg_hc_do_ping(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ ++ hctsiz.d32 = 0; ++ hctsiz.b.dopng = 1; ++ hctsiz.b.pktcnt = 1; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++} ++ ++/* ++ * This function writes a packet into the Tx FIFO associated with the Host ++ * Channel. For a channel associated with a non-periodic EP, the non-periodic ++ * Tx FIFO is written. For a channel associated with a periodic EP, the ++ * periodic Tx FIFO is written. This function should only be called in Slave ++ * mode. ++ * ++ * Upon return the xfer_buff and xfer_count fields in _hc are incremented by ++ * then number of bytes written to the Tx FIFO. ++ */ ++void dwc_otg_hc_write_packet(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ uint32_t i; ++ uint32_t remaining_count; ++ uint32_t byte_count; ++ uint32_t dword_count; ++ ++ uint32_t *data_buff = (uint32_t *)(hc->xfer_buff); ++ uint32_t *data_fifo = core_if->data_fifo[hc->hc_num]; ++ ++ remaining_count = hc->xfer_len - hc->xfer_count; ++ if (remaining_count > hc->max_packet) { ++ byte_count = hc->max_packet; ++ } ++ else { ++ byte_count = remaining_count; ++ } ++ ++ dword_count = (byte_count + 3) / 4; ++ ++ if ((((unsigned long)data_buff) & 0x3) == 0) { ++ /* xfer_buff is DWORD aligned. */ ++ for (i = 0; i < dword_count; i++, data_buff++) ++ { ++ dwc_write_reg32(data_fifo, *data_buff); ++ } ++ } ++ else { ++ /* xfer_buff is not DWORD aligned. */ ++ for (i = 0; i < dword_count; i++, data_buff++) ++ { ++ dwc_write_reg32(data_fifo, get_unaligned(data_buff)); ++ } ++ } ++ ++ hc->xfer_count += byte_count; ++ hc->xfer_buff += byte_count; ++} ++ ++/** ++ * Gets the current USB frame number. This is the frame number from the last ++ * SOF packet. ++ */ ++uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *core_if) ++{ ++ dsts_data_t dsts; ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ /* read current frame/microframe number from DSTS register */ ++ return dsts.b.soffn; ++} ++ ++/** ++ * This function reads a setup packet from the Rx FIFO into the destination ++ * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl) ++ * Interrupt routine when a SETUP packet has been received in Slave mode. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dest Destination buffer for packet data. ++ */ ++void dwc_otg_read_setup_packet(dwc_otg_core_if_t *core_if, uint32_t *dest) ++{ ++ /* Get the 8 bytes of a setup transaction data */ ++ ++ /* Pop 2 DWORDS off the receive data FIFO into memory */ ++ dest[0] = dwc_read_reg32(core_if->data_fifo[0]); ++ dest[1] = dwc_read_reg32(core_if->data_fifo[0]); ++} ++ ++ ++/** ++ * This function enables EP0 OUT to receive SETUP packets and configures EP0 ++ * IN for transmitting packets. It is normally called when the ++ * "Enumeration Done" interrupt occurs. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP0 data. ++ */ ++void dwc_otg_ep0_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ dsts_data_t dsts; ++ depctl_data_t diepctl; ++ depctl_data_t doepctl; ++ dctl_data_t dctl = { .d32 = 0 }; ++ ++ /* Read the Device Status and Endpoint 0 Control registers */ ++ dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts); ++ diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl); ++ doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl); ++ ++ /* Set the MPS of the IN EP based on the enumeration speed */ ++ switch (dsts.b.enumspd) { ++ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ: ++ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ: ++ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ: ++ diepctl.b.mps = DWC_DEP0CTL_MPS_64; ++ break; ++ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ: ++ diepctl.b.mps = DWC_DEP0CTL_MPS_8; ++ break; ++ } ++ ++ dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32); ++ ++ /* Enable OUT EP for receive */ ++ doepctl.b.epena = 1; ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32); ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); ++ DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl)); ++#endif ++ dctl.b.cgnpinnak = 1; ++ ++ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32); ++ DWC_DEBUGPL(DBG_PCDV,"dctl=%0x\n", ++ dwc_read_reg32(&dev_if->dev_global_regs->dctl)); ++} ++ ++/** ++ * This function activates an EP. The Device EP control register for ++ * the EP is configured as defined in the ep structure. Note: This ++ * function is not used for EP0. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to activate. ++ */ ++void dwc_otg_ep_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ depctl_data_t depctl; ++ volatile uint32_t *addr; ++ daint_data_t daintmsk = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, ep->num, ++ (ep->is_in?"IN":"OUT")); ++ ++ /* Read DEPCTLn register */ ++ if (ep->is_in == 1) { ++ addr = &dev_if->in_ep_regs[ep->num]->diepctl; ++ daintmsk.ep.in = 1<num; ++ } ++ else { ++ addr = &dev_if->out_ep_regs[ep->num]->doepctl; ++ daintmsk.ep.out = 1<num; ++ } ++ ++ /* If the EP is already active don't change the EP Control ++ * register. */ ++ depctl.d32 = dwc_read_reg32(addr); ++ if (!depctl.b.usbactep) { ++ depctl.b.mps = ep->maxpacket; ++ depctl.b.eptype = ep->type; ++ depctl.b.txfnum = ep->tx_fifo_num; ++ ++ if (ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ depctl.b.setd0pid = 1; // ??? ++ } ++ else { ++ depctl.b.setd0pid = 1; ++ } ++ depctl.b.usbactep = 1; ++ ++ dwc_write_reg32(addr, depctl.d32); ++ DWC_DEBUGPL(DBG_PCDV,"DEPCTL(%.8x)=%08x\n",(u32)addr, dwc_read_reg32(addr)); ++ } ++ ++ /* Enable the Interrupt for this EP */ ++ if(core_if->multiproc_int_enable) { ++ if (ep->is_in == 1) { ++ diepmsk_data_t diepmsk = { .d32 = 0}; ++ diepmsk.b.xfercompl = 1; ++ diepmsk.b.timeout = 1; ++ diepmsk.b.epdisabled = 1; ++ diepmsk.b.ahberr = 1; ++ diepmsk.b.intknepmis = 1; ++ diepmsk.b.txfifoundrn = 1; //????? ++ ++ ++ if(core_if->dma_desc_enable) { ++ diepmsk.b.bna = 1; ++ } ++/* ++ if(core_if->dma_enable) { ++ doepmsk.b.nak = 1; ++ } ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num], diepmsk.d32); ++ ++ } else { ++ doepmsk_data_t doepmsk = { .d32 = 0}; ++ doepmsk.b.xfercompl = 1; ++ doepmsk.b.ahberr = 1; ++ doepmsk.b.epdisabled = 1; ++ ++ ++ if(core_if->dma_desc_enable) { ++ doepmsk.b.bna = 1; ++ } ++/* ++ doepmsk.b.babble = 1; ++ doepmsk.b.nyet = 1; ++ doepmsk.b.nak = 1; ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[ep->num], doepmsk.d32); ++ } ++ dwc_modify_reg32(&dev_if->dev_global_regs->deachintmsk, ++ 0, daintmsk.d32); ++ } else { ++ dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk, ++ 0, daintmsk.d32); ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV,"DAINTMSK=%0x\n", ++ dwc_read_reg32(&dev_if->dev_global_regs->daintmsk)); ++ ++ ep->stall_clear_flag = 0; ++ return; ++} ++ ++/** ++ * This function deactivates an EP. This is done by clearing the USB Active ++ * EP bit in the Device EP control register. Note: This function is not used ++ * for EP0. EP0 cannot be deactivated. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to deactivate. ++ */ ++void dwc_otg_ep_deactivate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ daint_data_t daintmsk = { .d32 = 0}; ++ ++ /* Read DEPCTLn register */ ++ if (ep->is_in == 1) { ++ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; ++ daintmsk.ep.in = 1<num; ++ } ++ else { ++ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; ++ daintmsk.ep.out = 1<num; ++ } ++ ++ //disabled ep only when ep is enabled ++ //or got halt in the loop in test in cv9 ++ depctl.d32=dwc_read_reg32(addr); ++ if(depctl.b.epena){ ++ if (ep->is_in == 1) { ++ diepint_data_t diepint; ++ dwc_otg_dev_in_ep_regs_t *in_reg=core_if->dev_if->in_ep_regs[ep->num]; ++ ++ //Set ep nak ++ depctl.d32=dwc_read_reg32(&in_reg->diepctl); ++ depctl.b.snak=1; ++ dwc_write_reg32(&in_reg->diepctl,depctl.d32); ++ ++ //wait for diepint.b.inepnakeff ++ diepint.d32=dwc_read_reg32(&in_reg->diepint); ++ while(!diepint.b.inepnakeff){ ++ udelay(1); ++ diepint.d32=dwc_read_reg32(&in_reg->diepint); ++ } ++ diepint.d32=0; ++ diepint.b.inepnakeff=1; ++ dwc_write_reg32(&in_reg->diepint,diepint.d32); ++ ++ //set ep disable and snak ++ depctl.d32=dwc_read_reg32(&in_reg->diepctl); ++ depctl.b.snak=1; ++ depctl.b.epdis=1; ++ dwc_write_reg32(&in_reg->diepctl,depctl.d32); ++ ++ //wait for diepint.b.epdisabled ++ diepint.d32=dwc_read_reg32(&in_reg->diepint); ++ while(!diepint.b.epdisabled){ ++ udelay(1); ++ diepint.d32=dwc_read_reg32(&in_reg->diepint); ++ } ++ diepint.d32=0; ++ diepint.b.epdisabled=1; ++ dwc_write_reg32(&in_reg->diepint,diepint.d32); ++ ++ //clear ep enable and disable bit ++ depctl.d32=dwc_read_reg32(&in_reg->diepctl); ++ depctl.b.epena=0; ++ depctl.b.epdis=0; ++ dwc_write_reg32(&in_reg->diepctl,depctl.d32); ++ ++ } ++#if 0 ++//following DWC OTG DataBook v2.72a, 6.4.2.1.3 Disabling an OUT Endpoint, ++//but this doesn't work, the old code do. ++ else { ++ doepint_data_t doepint; ++ dwc_otg_dev_out_ep_regs_t *out_reg=core_if->dev_if->out_ep_regs[ep->num]; ++ dctl_data_t dctl; ++ gintsts_data_t gintsts; ++ ++ //set dctl global out nak ++ dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl); ++ dctl.b.sgoutnak=1; ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl,dctl.d32); ++ ++ //wait for gintsts.goutnakeff ++ gintsts.d32=dwc_read_reg32(&core_if->core_global_regs->gintsts); ++ while(!gintsts.b.goutnakeff){ ++ udelay(1); ++ gintsts.d32=dwc_read_reg32(&core_if->core_global_regs->gintsts); ++ } ++ gintsts.d32=0; ++ gintsts.b.goutnakeff=1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ //set ep disable and snak ++ depctl.d32=dwc_read_reg32(&out_reg->doepctl); ++ depctl.b.snak=1; ++ depctl.b.epdis=1; ++ dwc_write_reg32(&out_reg->doepctl,depctl.d32); ++ ++ //wait for diepint.b.epdisabled ++ doepint.d32=dwc_read_reg32(&out_reg->doepint); ++ while(!doepint.b.epdisabled){ ++ udelay(1); ++ doepint.d32=dwc_read_reg32(&out_reg->doepint); ++ } ++ doepint.d32=0; ++ doepint.b.epdisabled=1; ++ dwc_write_reg32(&out_reg->doepint,doepint.d32); ++ ++ //clear ep enable and disable bit ++ depctl.d32=dwc_read_reg32(&out_reg->doepctl); ++ depctl.b.epena=0; ++ depctl.b.epdis=0; ++ dwc_write_reg32(&out_reg->doepctl,depctl.d32); ++ } ++#endif ++ ++ depctl.d32=0; ++ depctl.b.usbactep = 0; ++ ++ if (ep->is_in == 0) { ++ if(core_if->dma_enable||core_if->dma_desc_enable) ++ depctl.b.epdis = 1; ++ } ++ ++ dwc_write_reg32(addr, depctl.d32); ++ } ++ ++ /* Disable the Interrupt for this EP */ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->deachintmsk, ++ daintmsk.d32, 0); ++ ++ if (ep->is_in == 1) { ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[ep->num], 0); ++ } else { ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[ep->num], 0); ++ } ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->daintmsk, ++ daintmsk.d32, 0); ++ } ++ ++ if (ep->is_in == 1) { ++ DWC_DEBUGPL(DBG_PCD, "DIEPCTL(%.8x)=%08x DIEPTSIZ=%08x, DIEPINT=%.8x, DIEPDMA=%.8x, DTXFSTS=%.8x\n", ++ (u32)&core_if->dev_if->in_ep_regs[ep->num]->diepctl, ++ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepctl), ++ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz), ++ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepint), ++ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->diepdma), ++ dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts)); ++ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), ++ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); ++ } ++ else { ++ DWC_DEBUGPL(DBG_PCD, "DOEPCTL(%.8x)=%08x DOEPTSIZ=%08x, DOEPINT=%.8x, DOEPDMA=%.8x\n", ++ (u32)&core_if->dev_if->out_ep_regs[ep->num]->doepctl, ++ dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepctl), ++ dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz), ++ dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepint), ++ dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doepdma)); ++ ++ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), ++ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); ++ } ++ ++} ++ ++/** ++ * This function does the setup for a data transfer for an EP and ++ * starts the transfer. For an IN transfer, the packets will be ++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, ++ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ */ ++static void init_dma_desc_chain(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ dwc_otg_dma_desc_t* dma_desc; ++ uint32_t offset; ++ uint32_t xfer_est; ++ int i; ++ ++ ep->desc_cnt = ( ep->total_len / ep->maxxfer) + ++ ((ep->total_len % ep->maxxfer) ? 1 : 0); ++ if(!ep->desc_cnt) ++ ep->desc_cnt = 1; ++ ++ dma_desc = ep->desc_addr; ++ xfer_est = ep->total_len; ++ offset = 0; ++ for( i = 0; i < ep->desc_cnt; ++i) { ++ /** DMA Descriptor Setup */ ++ if(xfer_est > ep->maxxfer) { ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 0; ++ dma_desc->status.b.ioc = 0; ++ dma_desc->status.b.sp = 0; ++ dma_desc->status.b.bytes = ep->maxxfer; ++ dma_desc->buf = ep->dma_addr + offset; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ xfer_est -= ep->maxxfer; ++ offset += ep->maxxfer; ++ } else { ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ if(ep->is_in) { ++ dma_desc->status.b.sp = (xfer_est % ep->maxpacket) ? ++ 1 : ((ep->sent_zlp) ? 1 : 0); ++ dma_desc->status.b.bytes = xfer_est; ++ } else { ++ dma_desc->status.b.bytes = xfer_est + ((4 - (xfer_est & 0x3)) & 0x3) ; ++ } ++ ++ dma_desc->buf = ep->dma_addr + offset; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ } ++ dma_desc ++; ++ } ++} ++ ++/** ++ * This function does the setup for a data transfer for an EP and ++ * starts the transfer. For an IN transfer, the packets will be ++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, ++ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ */ ++ ++void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ deptsiz_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); ++ ++ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d " ++ "xfer_buff=%p start_xfer_buff=%p\n", ++ ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len, ++ ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff); ++ ++ /* IN endpoint */ ++ if (ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[ep->num]; ++ ++ gnptxsts_data_t gtxstatus; ++ ++ gtxstatus.d32 = ++ dwc_read_reg32(&core_if->core_global_regs->gnptxsts); ++ ++ if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) { ++#ifdef DEBUG ++ DWC_PRINT("TX Queue Full (0x%0x)\n", gtxstatus.d32); ++#endif ++ return; ++ } ++ ++ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz)); ++ ++ ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ? ++ ep->maxxfer : (ep->total_len - ep->xfer_len); ++ ++ /* Zero Length Packet? */ ++ if ((ep->xfer_len - ep->xfer_count) == 0) { ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 1; ++ } ++ else { ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count; ++ deptsiz.b.pktcnt = ++ (ep->xfer_len - ep->xfer_count - 1 + ep->maxpacket) / ++ ep->maxpacket; ++ } ++ ++ ++ /* Write the DMA register */ ++ if (core_if->dma_enable) { ++ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { ++ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); ++ } ++ DWC_DEBUGPL(DBG_PCDV, "ep%d dma_addr=%.8x\n", ep->num, ep->dma_addr); ++ ++ if (core_if->dma_desc_enable == 0) { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(in_regs->diepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ else { ++ init_dma_desc_chain(core_if, ep); ++ /** DIEPDMAn Register write */ ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_desc_addr); ++ dwc_write_reg32(&in_regs->diepdma, ep->dma_desc_addr); ++ } ++ } ++ else ++ { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ if(ep->type != DWC_OTG_EP_TYPE_ISOC) { ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, ++ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode, ++ * the data will be written into the fifo by the ISR. ++ */ ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ } ++ else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if(ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk = 1 << ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ 0, fifoemptymsk); ++ ++ } ++ } ++ } ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl); ++ depctl.b.nextep = ep->num; ++ dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32); ++ ++ DWC_DEBUGPL(DBG_PCD, "DIEPCTL(%.8x)=%08x DIEPTSIZ=%08x, DIEPINT=%.8x, DIEPDMA=%.8x, DTXFSTS=%.8x\n", ++ (u32)&in_regs->diepctl, ++ dwc_read_reg32(&in_regs->diepctl), ++ dwc_read_reg32(&in_regs->dieptsiz), ++ dwc_read_reg32(&in_regs->diepint), ++ dwc_read_reg32(&in_regs->diepdma), ++ dwc_read_reg32(&in_regs->dtxfsts)); ++ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), ++ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); ++ ++ } ++ else { ++ /* OUT endpoint */ ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[ep->num]; ++ ++ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz)); ++ ++ ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ? ++ ep->maxxfer : (ep->total_len - ep->xfer_len); ++ ++ /* Program the transfer size and packet count as follows: ++ * ++ * pktcnt = N ++ * xfersize = N * maxpacket ++ */ ++ if ((ep->xfer_len - ep->xfer_count) == 0) { ++ /* Zero Length Packet */ ++ deptsiz.b.xfersize = ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ } ++ else { ++ deptsiz.b.pktcnt = ++ (ep->xfer_len - ep->xfer_count + (ep->maxpacket - 1)) / ++ ep->maxpacket; ++ ep->xfer_len = deptsiz.b.pktcnt * ep->maxpacket + ep->xfer_count; ++ deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count; ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n", ++ ep->num, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ ++ if (core_if->dma_enable) { ++ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { ++ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); ++ } ++ DWC_DEBUGPL(DBG_PCDV, "ep%d dma_addr=%.8x\n", ++ ep->num, ++ ep->dma_addr); ++ if (!core_if->dma_desc_enable) { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(out_regs->doepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ else { ++ init_dma_desc_chain(core_if, ep); ++ ++ /** DOEPDMAn Register write */ ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_desc_addr); ++ dwc_write_reg32(&out_regs->doepdma, ep->dma_desc_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ } ++ ++ /* EP enable */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ ++ dwc_write_reg32(&out_regs->doepctl, depctl.d32); ++ ++ DWC_DEBUGPL(DBG_PCD, "DOEPCTL(%.8x)=%08x DOEPTSIZ=%08x, DOEPINT=%.8x, DOEPDMA=%.8x\n", ++ (u32)&out_regs->doepctl, ++ dwc_read_reg32(&out_regs->doepctl), ++ dwc_read_reg32(&out_regs->doeptsiz), ++ dwc_read_reg32(&out_regs->doepint), ++ dwc_read_reg32(&out_regs->doepdma)); ++ ++ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), ++ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); ++ } ++} ++ ++/** ++ * This function setup a zero length transfer in Buffer DMA and ++ * Slave modes for usb requests with zero field set ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++void dwc_otg_ep_start_zl_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ ++ depctl_data_t depctl; ++ deptsiz_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); ++ ++ /* IN endpoint */ ++ if (ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[ep->num]; ++ ++ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz)); ++ ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 1; ++ ++ ++ /* Write the DMA register */ ++ if (core_if->dma_enable) { ++ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { ++ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); ++ } ++ if (core_if->dma_desc_enable == 0) { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(in_regs->diepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, ++ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode, ++ * the data will be written into the fifo by the ISR. ++ */ ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ } ++ else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if(ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk = 1 << ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ 0, fifoemptymsk); ++ } ++ } ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl); ++ depctl.b.nextep = ep->num; ++ dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32); ++ ++ } ++ else { ++ /* OUT endpoint */ ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[ep->num]; ++ ++ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz)); ++ ++ /* Zero Length Packet */ ++ deptsiz.b.xfersize = ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ ++ if (core_if->dma_enable) { ++ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { ++ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); ++ } ++ if (!core_if->dma_desc_enable) { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(out_regs->doepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ } ++ ++ /* EP enable */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ ++ dwc_write_reg32(&out_regs->doepctl, depctl.d32); ++ ++ } ++} ++ ++/** ++ * This function does the setup for a data transfer for EP0 and starts ++ * the transfer. For an IN transfer, the packets will be loaded into ++ * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are ++ * unloaded from the Rx FIFO in the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP0 data. ++ */ ++void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ deptsiz0_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ dwc_otg_dma_desc_t* dma_desc; ++ ++ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d " ++ "xfer_buff=%p start_xfer_buff=%p, dma_addr=%.8x\n", ++ ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len, ++ ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff,ep->dma_addr); ++ ++ ep->total_len = ep->xfer_len; ++ ++ /* IN endpoint */ ++ if (ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[0]; ++ ++ gnptxsts_data_t gtxstatus; ++ ++ gtxstatus.d32 = ++ dwc_read_reg32(&core_if->core_global_regs->gnptxsts); ++ ++ if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) { ++#ifdef DEBUG ++ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); ++ DWC_DEBUGPL(DBG_PCD,"DIEPCTL0=%0x\n", ++ dwc_read_reg32(&in_regs->diepctl)); ++ DWC_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n", ++ deptsiz.d32, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ DWC_PRINT("TX Queue or FIFO Full (0x%0x)\n", ++ gtxstatus.d32); ++#endif ++ return; ++ } ++ ++ ++ depctl.d32 = dwc_read_reg32(&in_regs->diepctl); ++ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); ++ ++ /* Zero Length Packet? */ ++ if (ep->xfer_len == 0) { ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 1; ++ } ++ else { ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ if (ep->xfer_len > ep->maxpacket) { ++ ep->xfer_len = ep->maxpacket; ++ deptsiz.b.xfersize = ep->maxpacket; ++ } ++ else { ++ deptsiz.b.xfersize = ep->xfer_len; ++ } ++ deptsiz.b.pktcnt = 1; ++ ++ } ++ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", ++ ep->xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); ++ /* Write the DMA register */ ++ if (core_if->dma_enable) { ++ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { ++ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); ++ } ++ if(core_if->dma_desc_enable == 0) { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(in_regs->diepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ else { ++ dma_desc = core_if->dev_if->in_desc_addr; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1; ++ dma_desc->status.b.bytes = ep->xfer_len; ++ dma_desc->buf = ep->dma_addr; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ /** DIEPDMA0 Register write */ ++ ++ VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_in_desc_addr); ++ dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, the ++ * data will be written into the fifo by the ISR. ++ */ ++ if (!core_if->dma_enable) { ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ } ++ else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if(ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk |= 1 << ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ 0, fifoemptymsk); ++ } ++ } ++ } ++ } ++ else { ++ /* OUT endpoint */ ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[0]; ++ ++ depctl.d32 = dwc_read_reg32(&out_regs->doepctl); ++ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz); ++ ++ /* Program the transfer size and packet count as follows: ++ * xfersize = N * (maxpacket + 4 - (maxpacket % 4)) ++ * pktcnt = N */ ++ /* Zero Length Packet */ ++ deptsiz.b.xfersize = ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ ++ DWC_DEBUGPL(DBG_PCDV, "len=%d xfersize=%d pktcnt=%d\n", ++ ep->xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ ++ if (core_if->dma_enable) { ++ if (/*(core_if->dma_enable)&&*/(ep->dma_addr==DMA_ADDR_INVALID)) { ++ ep->dma_addr=dma_map_single(NULL,(void *)(ep->xfer_buff),(ep->xfer_len),DMA_TO_DEVICE); ++ } ++ if(!core_if->dma_desc_enable) { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(out_regs->doepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ else { ++ dma_desc = core_if->dev_if->out_desc_addr; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.bytes = ep->maxpacket; ++ dma_desc->buf = ep->dma_addr; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ /** DOEPDMA0 Register write */ ++ VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_out_desc_addr); ++ dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ } ++ ++ /* EP enable */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32 (&(out_regs->doepctl), depctl.d32); ++ } ++} ++ ++/** ++ * This function continues control IN transfers started by ++ * dwc_otg_ep0_start_transfer, when the transfer does not fit in a ++ * single packet. NOTE: The DIEPCTL0/DOEPCTL0 registers only have one ++ * bit for the packet count. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP0 data. ++ */ ++void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ deptsiz0_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ dwc_otg_dma_desc_t* dma_desc; ++ ++ if (ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[0]; ++ gnptxsts_data_t tx_status = { .d32 = 0 }; ++ ++ tx_status.d32 = dwc_read_reg32(&core_if->core_global_regs->gnptxsts); ++ /** @todo Should there be check for room in the Tx ++ * Status Queue. If not remove the code above this comment. */ ++ ++ depctl.d32 = dwc_read_reg32(&in_regs->diepctl); ++ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); ++ ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ ++ ++ if(core_if->dma_desc_enable == 0) { ++ deptsiz.b.xfersize = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket : ++ (ep->total_len - ep->xfer_count); ++ deptsiz.b.pktcnt = 1; ++ if(core_if->dma_enable == 0) { ++ ep->xfer_len += deptsiz.b.xfersize; ++ } else { ++ ep->xfer_len = deptsiz.b.xfersize; ++ } ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ } ++ else { ++ ep->xfer_len = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket : ++ (ep->total_len - ep->xfer_count); ++ ++ dma_desc = core_if->dev_if->in_desc_addr; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1; ++ dma_desc->status.b.bytes = ep->xfer_len; ++ dma_desc->buf = ep->dma_addr; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ ++ /** DIEPDMA0 Register write */ ++ VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_in_desc_addr); ++ dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr); ++ } ++ ++ ++ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", ++ ep->xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) { ++ if(core_if->dma_desc_enable == 0){ ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(in_regs->diepdma), (uint32_t)ep->dma_addr); ++ } ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, the ++ * data will be written into the fifo by the ISR. ++ */ ++ if (!core_if->dma_enable) { ++ if(core_if->en_multiple_tx_fifo == 0) { ++ /* First clear it from GINTSTS */ ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ ++ } ++ else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if(ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk |= 1 << ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ 0, fifoemptymsk); ++ } ++ } ++ } ++ } ++ else { ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[0]; ++ ++ ++ depctl.d32 = dwc_read_reg32(&out_regs->doepctl); ++ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz); ++ ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.xfersize = ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ ++ ++ if(core_if->dma_desc_enable == 0) { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ } ++ else { ++ dma_desc = core_if->dev_if->out_desc_addr; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.bytes = ep->maxpacket; ++ dma_desc->buf = ep->dma_addr; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ /** DOEPDMA0 Register write */ ++ VERIFY_PCD_DMA_ADDR(core_if->dev_if->dma_out_desc_addr); ++ dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr); ++ } ++ ++ ++ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", ++ ep->xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) { ++ if(core_if->dma_desc_enable == 0){ ++ ++ VERIFY_PCD_DMA_ADDR(ep->dma_addr); ++ dwc_write_reg32 (&(out_regs->doepdma), (uint32_t)ep->dma_addr); ++ } ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&out_regs->doepctl, depctl.d32); ++ ++ } ++} ++ ++#ifdef DEBUG ++void dump_msg(const u8 *buf, unsigned int length) ++{ ++ unsigned int start, num, i; ++ char line[52], *p; ++ ++ if (length >= 512) ++ return; ++ start = 0; ++ while (length > 0) { ++ num = min(length, 16u); ++ p = line; ++ for (i = 0; i < num; ++i) ++ { ++ if (i == 8) ++ *p++ = ' '; ++ sprintf(p, " %02x", buf[i]); ++ p += 3; ++ } ++ *p = 0; ++ DWC_PRINT("%6x: %s\n", start, line); ++ buf += num; ++ start += num; ++ length -= num; ++ } ++} ++#else ++static inline void dump_msg(const u8 *buf, unsigned int length) ++{ ++} ++#endif ++ ++/** ++ * This function writes a packet into the Tx FIFO associated with the ++ * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For ++ * periodic EPs the periodic Tx FIFO associated with the EP is written ++ * with all packets for the next micro-frame. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to write packet for. ++ * @param dma Indicates if DMA is being used. ++ */ ++void dwc_otg_ep_write_packet(dwc_otg_core_if_t *core_if, dwc_ep_t *ep, int dma) ++{ ++ /** ++ * The buffer is padded to DWORD on a per packet basis in ++ * slave/dma mode if the MPS is not DWORD aligned. The last ++ * packet, if short, is also padded to a multiple of DWORD. ++ * ++ * ep->xfer_buff always starts DWORD aligned in memory and is a ++ * multiple of DWORD in length ++ * ++ * ep->xfer_len can be any number of bytes ++ * ++ * ep->xfer_count is a multiple of ep->maxpacket until the last ++ * packet ++ * ++ * FIFO access is DWORD */ ++ ++ uint32_t i; ++ uint32_t byte_count; ++ uint32_t dword_count; ++ uint32_t *fifo; ++ uint32_t *data_buff = (uint32_t *)ep->xfer_buff; ++ ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, core_if, ep); ++ if (ep->xfer_count >= ep->xfer_len) { ++ DWC_WARN("%s() No data for EP%d!!!\n", __func__, ep->num); ++ return; ++ } ++ ++ /* Find the byte length of the packet either short packet or MPS */ ++ if ((ep->xfer_len - ep->xfer_count) < ep->maxpacket) { ++ byte_count = ep->xfer_len - ep->xfer_count; ++ } ++ else { ++ byte_count = ep->maxpacket; ++ } ++ ++ /* Find the DWORD length, padded by extra bytes as neccessary if MPS ++ * is not a multiple of DWORD */ ++ dword_count = (byte_count + 3) / 4; ++ ++#ifdef VERBOSE ++ dump_msg(ep->xfer_buff, byte_count); ++#endif ++ ++ /**@todo NGS Where are the Periodic Tx FIFO addresses ++ * intialized? What should this be? */ ++ ++ fifo = core_if->data_fifo[ep->num]; ++ ++ ++ DWC_DEBUGPL((DBG_PCDV|DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n", fifo, data_buff, *data_buff, byte_count); ++ ++ if (!dma) { ++ for (i=0; ixfer_count += byte_count; ++ ep->xfer_buff += byte_count; ++ ep->dma_addr += byte_count; ++} ++ ++/** ++ * Set the EP STALL. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to set the stall on. ++ */ ++void dwc_otg_ep_set_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ volatile uint32_t *depctl_addr; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s ep%d-%s1\n", __func__, ep->num, ++ (ep->is_in?"IN":"OUT")); ++ ++ DWC_PRINT("%s ep%d-%s\n", __func__, ep->num, ++ (ep->is_in?"in":"out")); ++ ++ if (ep->is_in == 1) { ++ depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl); ++ depctl.d32 = dwc_read_reg32(depctl_addr); ++ ++ /* set the disable and stall bits */ ++#if 0 ++//epdis is set here but not cleared at latter dwc_otg_ep_clear_stall, ++//which cause the testusb item 13 failed(Host:pc, device: otg device) ++ if (depctl.b.epena) { ++ depctl.b.epdis = 1; ++ } ++#endif ++ depctl.b.stall = 1; ++ dwc_write_reg32(depctl_addr, depctl.d32); ++ } ++ else { ++ depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl); ++ depctl.d32 = dwc_read_reg32(depctl_addr); ++ ++ /* set the stall bit */ ++ depctl.b.stall = 1; ++ dwc_write_reg32(depctl_addr, depctl.d32); ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s: DEPCTL(%.8x)=%0x\n",__func__,(u32)depctl_addr,dwc_read_reg32(depctl_addr)); ++ ++ return; ++} ++ ++/** ++ * Clear the EP STALL. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to clear stall from. ++ */ ++void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ volatile uint32_t *depctl_addr; ++ ++ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num, ++ (ep->is_in?"IN":"OUT")); ++ ++ if (ep->is_in == 1) { ++ depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl); ++ } ++ else { ++ depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl); ++ } ++ ++ depctl.d32 = dwc_read_reg32(depctl_addr); ++ ++ /* clear the stall bits */ ++ depctl.b.stall = 0; ++ ++ /* ++ * USB Spec 9.4.5: For endpoints using data toggle, regardless ++ * of whether an endpoint has the Halt feature set, a ++ * ClearFeature(ENDPOINT_HALT) request always results in the ++ * data toggle being reinitialized to DATA0. ++ */ ++ if (ep->type == DWC_OTG_EP_TYPE_INTR || ++ ep->type == DWC_OTG_EP_TYPE_BULK) { ++ depctl.b.setd0pid = 1; /* DATA0 */ ++ } ++ ++ dwc_write_reg32(depctl_addr, depctl.d32); ++ DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr)); ++ return; ++} ++ ++/** ++ * This function reads a packet from the Rx FIFO into the destination ++ * buffer. To read SETUP data use dwc_otg_read_setup_packet. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dest Destination buffer for the packet. ++ * @param bytes Number of bytes to copy to the destination. ++ */ ++void dwc_otg_read_packet(dwc_otg_core_if_t *core_if, ++ uint8_t *dest, ++ uint16_t bytes) ++{ ++ int i; ++ int word_count = (bytes + 3) / 4; ++ ++ volatile uint32_t *fifo = core_if->data_fifo[0]; ++ uint32_t *data_buff = (uint32_t *)dest; ++ ++ /** ++ * @todo Account for the case where _dest is not dword aligned. This ++ * requires reading data from the FIFO into a uint32_t temp buffer, ++ * then moving it into the data buffer. ++ */ ++ ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p,%d)\n", __func__, ++ core_if, dest, bytes); ++ ++ for (i=0; idev_if->dev_global_regs->dcfg; ++ DWC_PRINT("DCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->dctl; ++ DWC_PRINT("DCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->dsts; ++ DWC_PRINT("DSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->diepmsk; ++ DWC_PRINT("DIEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->doepmsk; ++ DWC_PRINT("DOEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->daint; ++ DWC_PRINT("DAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->daintmsk; ++ DWC_PRINT("DAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->dtknqr1; ++ DWC_PRINT("DTKNQR1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ if (core_if->hwcfg2.b.dev_token_q_depth > 6) { ++ addr=&core_if->dev_if->dev_global_regs->dtknqr2; ++ DWC_PRINT("DTKNQR2 @0x%08X : 0x%08X\n", ++ (uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ ++ addr=&core_if->dev_if->dev_global_regs->dvbusdis; ++ DWC_PRINT("DVBUSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ addr=&core_if->dev_if->dev_global_regs->dvbuspulse; ++ DWC_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n", ++ (uint32_t)addr,dwc_read_reg32(addr)); ++ ++ if (core_if->hwcfg2.b.dev_token_q_depth > 14) { ++ addr=&core_if->dev_if->dev_global_regs->dtknqr3_dthrctl; ++ DWC_PRINT("DTKNQR3_DTHRCTL @0x%08X : 0x%08X\n", ++ (uint32_t)addr, dwc_read_reg32(addr)); ++ } ++/* ++ if (core_if->hwcfg2.b.dev_token_q_depth > 22) { ++ addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk; ++ DWC_PRINT("DTKNQR4 @0x%08X : 0x%08X\n", ++ (uint32_t)addr, dwc_read_reg32(addr)); ++ } ++*/ ++ addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk; ++ DWC_PRINT("FIFOEMPMSK @0x%08X : 0x%08X\n", (uint32_t)addr, dwc_read_reg32(addr)); ++ ++ addr=&core_if->dev_if->dev_global_regs->deachint; ++ DWC_PRINT("DEACHINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->deachintmsk; ++ DWC_PRINT("DEACHINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ for (i=0; i<= core_if->dev_if->num_in_eps; i++) { ++ addr=&core_if->dev_if->dev_global_regs->diepeachintmsk[i]; ++ DWC_PRINT("DIEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr)); ++ } ++ ++ ++ for (i=0; i<= core_if->dev_if->num_out_eps; i++) { ++ addr=&core_if->dev_if->dev_global_regs->doepeachintmsk[i]; ++ DWC_PRINT("DOEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr)); ++ } ++ ++ for (i=0; i<= core_if->dev_if->num_in_eps; i++) { ++ DWC_PRINT("Device IN EP %d Registers\n", i); ++ addr=&core_if->dev_if->in_ep_regs[i]->diepctl; ++ DWC_PRINT("DIEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->in_ep_regs[i]->diepint; ++ DWC_PRINT("DIEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->in_ep_regs[i]->dieptsiz; ++ DWC_PRINT("DIETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->in_ep_regs[i]->diepdma; ++ DWC_PRINT("DIEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->in_ep_regs[i]->dtxfsts; ++ DWC_PRINT("DTXFSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ //reading depdmab in non desc dma mode would halt the ahb bus... ++ if(core_if->dma_desc_enable){ ++ addr=&core_if->dev_if->in_ep_regs[i]->diepdmab; ++ DWC_PRINT("DIEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ } ++ ++ ++ for (i=0; i<= core_if->dev_if->num_out_eps; i++) { ++ DWC_PRINT("Device OUT EP %d Registers\n", i); ++ addr=&core_if->dev_if->out_ep_regs[i]->doepctl; ++ DWC_PRINT("DOEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->out_ep_regs[i]->doepfn; ++ DWC_PRINT("DOEPFN @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->out_ep_regs[i]->doepint; ++ DWC_PRINT("DOEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->out_ep_regs[i]->doeptsiz; ++ DWC_PRINT("DOETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->out_ep_regs[i]->doepdma; ++ DWC_PRINT("DOEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ //reading depdmab in non desc dma mode would halt the ahb bus... ++ if(core_if->dma_desc_enable){ ++ addr=&core_if->dev_if->out_ep_regs[i]->doepdmab; ++ DWC_PRINT("DOEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ ++ } ++ ++ ++ ++ return; ++} ++ ++/** ++ * This functions reads the SPRAM and prints its content ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_dump_spram(dwc_otg_core_if_t *core_if) ++{ ++ volatile uint8_t *addr, *start_addr, *end_addr; ++ ++ DWC_PRINT("SPRAM Data:\n"); ++ start_addr = (void*)core_if->core_global_regs; ++ DWC_PRINT("Base Address: 0x%8X\n", (uint32_t)start_addr); ++ start_addr += 0x00028000; ++ end_addr=(void*)core_if->core_global_regs; ++ end_addr += 0x000280e0; ++ ++ for(addr = start_addr; addr < end_addr; addr+=16) ++ { ++ DWC_PRINT("0x%8X:\t%2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X\n", (uint32_t)addr, ++ addr[0], ++ addr[1], ++ addr[2], ++ addr[3], ++ addr[4], ++ addr[5], ++ addr[6], ++ addr[7], ++ addr[8], ++ addr[9], ++ addr[10], ++ addr[11], ++ addr[12], ++ addr[13], ++ addr[14], ++ addr[15] ++ ); ++ } ++ ++ return; ++} ++/** ++ * This function reads the host registers and prints them ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_dump_host_registers(dwc_otg_core_if_t *core_if) ++{ ++ int i; ++ volatile uint32_t *addr; ++ ++ DWC_PRINT("Host Global Registers\n"); ++ addr=&core_if->host_if->host_global_regs->hcfg; ++ DWC_PRINT("HCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->hfir; ++ DWC_PRINT("HFIR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->hfnum; ++ DWC_PRINT("HFNUM @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->hptxsts; ++ DWC_PRINT("HPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->haint; ++ DWC_PRINT("HAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->haintmsk; ++ DWC_PRINT("HAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=core_if->host_if->hprt0; ++ DWC_PRINT("HPRT0 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ for (i=0; icore_params->host_channels; i++) ++ { ++ DWC_PRINT("Host Channel %d Specific Registers\n", i); ++ addr=&core_if->host_if->hc_regs[i]->hcchar; ++ DWC_PRINT("HCCHAR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hcsplt; ++ DWC_PRINT("HCSPLT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hcint; ++ DWC_PRINT("HCINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hcintmsk; ++ DWC_PRINT("HCINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hctsiz; ++ DWC_PRINT("HCTSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hcdma; ++ DWC_PRINT("HCDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ return; ++} ++ ++/** ++ * This function reads the core global registers and prints them ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_dump_global_registers(dwc_otg_core_if_t *core_if) ++{ ++ int i,size; ++ char* str; ++ volatile uint32_t *addr; ++ ++ DWC_PRINT("Core Global Registers\n"); ++ addr=&core_if->core_global_regs->gotgctl; ++ DWC_PRINT("GOTGCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gotgint; ++ DWC_PRINT("GOTGINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gahbcfg; ++ DWC_PRINT("GAHBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gusbcfg; ++ DWC_PRINT("GUSBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->grstctl; ++ DWC_PRINT("GRSTCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gintsts; ++ DWC_PRINT("GINTSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gintmsk; ++ DWC_PRINT("GINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->grxstsr; ++ DWC_PRINT("GRXSTSR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ //addr=&core_if->core_global_regs->grxstsp; ++ //DWC_PRINT("GRXSTSP @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->grxfsiz; ++ DWC_PRINT("GRXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gnptxfsiz; ++ DWC_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gnptxsts; ++ DWC_PRINT("GNPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gi2cctl; ++ DWC_PRINT("GI2CCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gpvndctl; ++ DWC_PRINT("GPVNDCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ggpio; ++ DWC_PRINT("GGPIO @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->guid; ++ DWC_PRINT("GUID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gsnpsid; ++ DWC_PRINT("GSNPSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ghwcfg1; ++ DWC_PRINT("GHWCFG1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ghwcfg2; ++ DWC_PRINT("GHWCFG2 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ghwcfg3; ++ DWC_PRINT("GHWCFG3 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ghwcfg4; ++ DWC_PRINT("GHWCFG4 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->hptxfsiz; ++ DWC_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ size=(core_if->hwcfg4.b.ded_fifo_en)? ++ core_if->hwcfg4.b.num_in_eps:core_if->hwcfg4.b.num_dev_perio_in_ep; ++ str=(core_if->hwcfg4.b.ded_fifo_en)?"DIEPTXF":"DPTXFSIZ"; ++ for (i=0; icore_global_regs->dptxfsiz_dieptxf[i]; ++ DWC_PRINT("%s[%d] @0x%08X : 0x%08X\n",str,i,(uint32_t)addr,dwc_read_reg32(addr)); ++ } ++} ++ ++/** ++ * Flush a Tx FIFO. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param num Tx FIFO to flush. ++ */ ++void dwc_otg_flush_tx_fifo(dwc_otg_core_if_t *core_if, ++ const int num) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ volatile grstctl_t greset = { .d32 = 0}; ++ int count = 0; ++ ++ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "Flush Tx FIFO %d\n", num); ++ ++ greset.b.txfflsh = 1; ++ greset.b.txfnum = num; ++ dwc_write_reg32(&global_regs->grstctl, greset.d32); ++ ++ do { ++ greset.d32 = dwc_read_reg32(&global_regs->grstctl); ++ if (++count > 10000) { ++ DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", ++ __func__, greset.d32, ++ dwc_read_reg32(&global_regs->gnptxsts)); ++ break; ++ } ++ } ++ while (greset.b.txfflsh == 1); ++ ++ /* Wait for 3 PHY Clocks*/ ++ UDELAY(1); ++} ++ ++/** ++ * Flush Rx FIFO. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_flush_rx_fifo(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ volatile grstctl_t greset = { .d32 = 0}; ++ int count = 0; ++ ++ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "%s\n", __func__); ++ /* ++ * ++ */ ++ greset.b.rxfflsh = 1; ++ dwc_write_reg32(&global_regs->grstctl, greset.d32); ++ ++ do { ++ greset.d32 = dwc_read_reg32(&global_regs->grstctl); ++ if (++count > 10000) { ++ DWC_WARN("%s() HANG! GRSTCTL=%0x\n", __func__, ++ greset.d32); ++ break; ++ } ++ } ++ while (greset.b.rxfflsh == 1); ++ ++ /* Wait for 3 PHY Clocks*/ ++ UDELAY(1); ++} ++ ++/** ++ * Do core a soft reset of the core. Be careful with this because it ++ * resets all the internal state machines of the core. ++ */ ++void dwc_otg_core_reset(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ volatile grstctl_t greset = { .d32 = 0}; ++ int count = 0; ++ ++ DWC_DEBUGPL(DBG_CILV, "%s\n", __func__); ++ /* Wait for AHB master IDLE state. */ ++ do { ++ UDELAY(10); ++ greset.d32 = dwc_read_reg32(&global_regs->grstctl); ++ if (++count > 100000) { ++ DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__, ++ greset.d32); ++ return; ++ } ++ } ++ while (greset.b.ahbidle == 0); ++ ++ /* Core Soft Reset */ ++ count = 0; ++ greset.b.csftrst = 1; ++ dwc_write_reg32(&global_regs->grstctl, greset.d32); ++ do { ++ greset.d32 = dwc_read_reg32(&global_regs->grstctl); ++ if (++count > 10000) { ++ DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__, ++ greset.d32); ++ break; ++ } ++ } ++ while (greset.b.csftrst == 1); ++ ++ /* Wait for 3 PHY Clocks*/ ++ MDELAY(100); ++ ++ DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts)); ++ DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts)); ++ DWC_DEBUGPL(DBG_CILV, "GINTSTS=%.8x\n", dwc_read_reg32(&global_regs->gintsts)); ++ ++} ++ ++ ++ ++/** ++ * Register HCD callbacks. The callbacks are used to start and stop ++ * the HCD for interrupt processing. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param cb the HCD callback structure. ++ * @param p pointer to be passed to callback function (usb_hcd*). ++ */ ++void dwc_otg_cil_register_hcd_callbacks(dwc_otg_core_if_t *core_if, ++ dwc_otg_cil_callbacks_t *cb, ++ void *p) ++{ ++ core_if->hcd_cb = cb; ++ cb->p = p; ++} ++ ++/** ++ * Register PCD callbacks. The callbacks are used to start and stop ++ * the PCD for interrupt processing. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param cb the PCD callback structure. ++ * @param p pointer to be passed to callback function (pcd*). ++ */ ++void dwc_otg_cil_register_pcd_callbacks(dwc_otg_core_if_t *core_if, ++ dwc_otg_cil_callbacks_t *cb, ++ void *p) ++{ ++ core_if->pcd_cb = cb; ++ cb->p = p; ++} ++ ++#ifdef DWC_EN_ISOC ++ ++/** ++ * This function writes isoc data per 1 (micro)frame into tx fifo ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++void write_isoc_frame_data(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ dwc_otg_dev_in_ep_regs_t *ep_regs; ++ dtxfsts_data_t txstatus = {.d32 = 0}; ++ uint32_t len = 0; ++ uint32_t dwords; ++ ++ ep->xfer_len = ep->data_per_frame; ++ ep->xfer_count = 0; ++ ++ ep_regs = core_if->dev_if->in_ep_regs[ep->num]; ++ ++ len = ep->xfer_len - ep->xfer_count; ++ ++ if (len > ep->maxpacket) { ++ len = ep->maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ ++ /* While there is space in the queue and space in the FIFO and ++ * More data to tranfer, Write packets to the Tx FIFO */ ++ txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts); ++ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",ep->num,txstatus.d32); ++ ++ while (txstatus.b.txfspcavail > dwords && ++ ep->xfer_count < ep->xfer_len && ++ ep->xfer_len != 0) { ++ /* Write the FIFO */ ++ dwc_otg_ep_write_packet(core_if, ep, 0); ++ ++ len = ep->xfer_len - ep->xfer_count; ++ if (len > ep->maxpacket) { ++ len = ep->maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts); ++ DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", ep->num, txstatus.d32); ++ } ++} ++ ++ ++/** ++ * This function initializes a descriptor chain for Isochronous transfer ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++void dwc_otg_iso_ep_start_frm_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ deptsiz_data_t deptsiz = { .d32 = 0 }; ++ depctl_data_t depctl = { .d32 = 0 }; ++ dsts_data_t dsts = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ ++ if(ep->is_in) { ++ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; ++ } else { ++ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; ++ } ++ ++ ep->xfer_len = ep->data_per_frame; ++ ep->xfer_count = 0; ++ ep->xfer_buff = ep->cur_pkt_addr; ++ ep->dma_addr = ep->cur_pkt_dma_addr; ++ ++ if(ep->is_in) { ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.xfersize = ep->xfer_len; ++ deptsiz.b.pktcnt = ++ (ep->xfer_len - 1 + ep->maxpacket) / ++ ep->maxpacket; ++ deptsiz.b.mc = deptsiz.b.pktcnt; ++ dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ if (core_if->dma_enable) { ++ dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr); ++ } ++ } else { ++ deptsiz.b.pktcnt = ++ (ep->xfer_len + (ep->maxpacket - 1)) / ++ ep->maxpacket; ++ deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket; ++ ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32); ++ ++ if (core_if->dma_enable) { ++ dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ } ++ ++ ++ /** Enable endpoint, clear nak */ ++ ++ depctl.d32 = 0; ++ if(ep->bInterval == 1) { ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ep->next_frame = dsts.b.soffn + ep->bInterval; ++ ++ if(ep->next_frame & 0x1) { ++ depctl.b.setd1pid = 1; ++ } else { ++ depctl.b.setd0pid = 1; ++ } ++ } else { ++ ep->next_frame += ep->bInterval; ++ ++ if(ep->next_frame & 0x1) { ++ depctl.b.setd1pid = 1; ++ } else { ++ depctl.b.setd0pid = 1; ++ } ++ } ++ depctl.b.epena = 1; ++ depctl.b.cnak = 1; ++ ++ dwc_modify_reg32(addr, 0, depctl.d32); ++ depctl.d32 = dwc_read_reg32(addr); ++ ++ if(ep->is_in && core_if->dma_enable == 0) { ++ write_isoc_frame_data(core_if, ep); ++ } ++ ++} ++ ++#endif //DWC_EN_ISOC +--- /dev/null ++++ b/drivers/usb/dwc/otg_cil.h +@@ -0,0 +1,1106 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.h $ ++ * $Revision: #91 $ ++ * $Date: 2008/09/19 $ ++ * $Change: 1099526 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#if !defined(__DWC_CIL_H__) ++#define __DWC_CIL_H__ ++ ++#include ++#include ++#include ++//#include ++ ++#include "otg_plat.h" ++#include "otg_regs.h" ++#ifdef DEBUG ++#include "linux/timer.h" ++#endif ++ ++/** ++ * @file ++ * This file contains the interface to the Core Interface Layer. ++ */ ++ ++ ++/** Macros defined for DWC OTG HW Release verison */ ++#define OTG_CORE_REV_2_00 0x4F542000 ++#define OTG_CORE_REV_2_60a 0x4F54260A ++#define OTG_CORE_REV_2_71a 0x4F54271A ++#define OTG_CORE_REV_2_72a 0x4F54272A ++ ++/** ++*/ ++typedef struct iso_pkt_info ++{ ++ uint32_t offset; ++ uint32_t length; ++ int32_t status; ++} iso_pkt_info_t; ++/** ++ * The dwc_ep structure represents the state of a single ++ * endpoint when acting in device mode. It contains the data items ++ * needed for an endpoint to be activated and transfer packets. ++ */ ++typedef struct dwc_ep ++{ ++ /** EP number used for register address lookup */ ++ uint8_t num; ++ /** EP direction 0 = OUT */ ++ unsigned is_in : 1; ++ /** EP active. */ ++ unsigned active : 1; ++ ++ /** Periodic Tx FIFO # for IN EPs For INTR EP set to 0 to use non-periodic Tx FIFO ++ If dedicated Tx FIFOs are enabled for all IN Eps - Tx FIFO # FOR IN EPs*/ ++ unsigned tx_fifo_num : 4; ++ /** EP type: 0 - Control, 1 - ISOC, 2 - BULK, 3 - INTR */ ++ unsigned type : 2; ++#define DWC_OTG_EP_TYPE_CONTROL 0 ++#define DWC_OTG_EP_TYPE_ISOC 1 ++#define DWC_OTG_EP_TYPE_BULK 2 ++#define DWC_OTG_EP_TYPE_INTR 3 ++ ++ /** DATA start PID for INTR and BULK EP */ ++ unsigned data_pid_start : 1; ++ /** Frame (even/odd) for ISOC EP */ ++ unsigned even_odd_frame : 1; ++ /** Max Packet bytes */ ++ unsigned maxpacket : 11; ++ ++ /** Max Transfer size */ ++ unsigned maxxfer : 16; ++ ++ /** @name Transfer state */ ++ /** @{ */ ++ ++ /** ++ * Pointer to the beginning of the transfer buffer -- do not modify ++ * during transfer. ++ */ ++ ++ uint32_t dma_addr; ++ ++ uint32_t dma_desc_addr; ++ dwc_otg_dma_desc_t* desc_addr; ++ ++ ++ uint8_t *start_xfer_buff; ++ /** pointer to the transfer buffer */ ++ uint8_t *xfer_buff; ++ /** Number of bytes to transfer */ ++ unsigned xfer_len : 19; ++ /** Number of bytes transferred. */ ++ unsigned xfer_count : 19; ++ /** Sent ZLP */ ++ unsigned sent_zlp : 1; ++ /** Total len for control transfer */ ++ unsigned total_len : 19; ++ ++ /** stall clear flag */ ++ unsigned stall_clear_flag : 1; ++ ++ /** Allocated DMA Desc count */ ++ uint32_t desc_cnt; ++ ++ uint32_t aligned_dma_addr; ++ uint32_t aligned_buf_size; ++ uint8_t *aligned_buf; ++ ++ ++#ifdef DWC_EN_ISOC ++ /** ++ * Variables specific for ISOC EPs ++ * ++ */ ++ /** DMA addresses of ISOC buffers */ ++ uint32_t dma_addr0; ++ uint32_t dma_addr1; ++ ++ uint32_t iso_dma_desc_addr; ++ dwc_otg_dma_desc_t* iso_desc_addr; ++ ++ /** pointer to the transfer buffers */ ++ uint8_t *xfer_buff0; ++ uint8_t *xfer_buff1; ++ ++ /** number of ISOC Buffer is processing */ ++ uint32_t proc_buf_num; ++ /** Interval of ISOC Buffer processing */ ++ uint32_t buf_proc_intrvl; ++ /** Data size for regular frame */ ++ uint32_t data_per_frame; ++ ++ /* todo - pattern data support is to be implemented in the future */ ++ /** Data size for pattern frame */ ++ uint32_t data_pattern_frame; ++ /** Frame number of pattern data */ ++ uint32_t sync_frame; ++ ++ /** bInterval */ ++ uint32_t bInterval; ++ /** ISO Packet number per frame */ ++ uint32_t pkt_per_frm; ++ /** Next frame num for which will be setup DMA Desc */ ++ uint32_t next_frame; ++ /** Number of packets per buffer processing */ ++ uint32_t pkt_cnt; ++ /** Info for all isoc packets */ ++ iso_pkt_info_t *pkt_info; ++ /** current pkt number */ ++ uint32_t cur_pkt; ++ /** current pkt number */ ++ uint8_t *cur_pkt_addr; ++ /** current pkt number */ ++ uint32_t cur_pkt_dma_addr; ++#endif //DWC_EN_ISOC ++/** @} */ ++} dwc_ep_t; ++ ++/* ++ * Reasons for halting a host channel. ++ */ ++typedef enum dwc_otg_halt_status ++{ ++ DWC_OTG_HC_XFER_NO_HALT_STATUS, ++ DWC_OTG_HC_XFER_COMPLETE, ++ DWC_OTG_HC_XFER_URB_COMPLETE, ++ DWC_OTG_HC_XFER_ACK, ++ DWC_OTG_HC_XFER_NAK, ++ DWC_OTG_HC_XFER_NYET, ++ DWC_OTG_HC_XFER_STALL, ++ DWC_OTG_HC_XFER_XACT_ERR, ++ DWC_OTG_HC_XFER_FRAME_OVERRUN, ++ DWC_OTG_HC_XFER_BABBLE_ERR, ++ DWC_OTG_HC_XFER_DATA_TOGGLE_ERR, ++ DWC_OTG_HC_XFER_AHB_ERR, ++ DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE, ++ DWC_OTG_HC_XFER_URB_DEQUEUE ++} dwc_otg_halt_status_e; ++ ++/** ++ * Host channel descriptor. This structure represents the state of a single ++ * host channel when acting in host mode. It contains the data items needed to ++ * transfer packets to an endpoint via a host channel. ++ */ ++typedef struct dwc_hc ++{ ++ /** Host channel number used for register address lookup */ ++ uint8_t hc_num; ++ ++ /** Device to access */ ++ unsigned dev_addr : 7; ++ ++ /** EP to access */ ++ unsigned ep_num : 4; ++ ++ /** EP direction. 0: OUT, 1: IN */ ++ unsigned ep_is_in : 1; ++ ++ /** ++ * EP speed. ++ * One of the following values: ++ * - DWC_OTG_EP_SPEED_LOW ++ * - DWC_OTG_EP_SPEED_FULL ++ * - DWC_OTG_EP_SPEED_HIGH ++ */ ++ unsigned speed : 2; ++#define DWC_OTG_EP_SPEED_LOW 0 ++#define DWC_OTG_EP_SPEED_FULL 1 ++#define DWC_OTG_EP_SPEED_HIGH 2 ++ ++ /** ++ * Endpoint type. ++ * One of the following values: ++ * - DWC_OTG_EP_TYPE_CONTROL: 0 ++ * - DWC_OTG_EP_TYPE_ISOC: 1 ++ * - DWC_OTG_EP_TYPE_BULK: 2 ++ * - DWC_OTG_EP_TYPE_INTR: 3 ++ */ ++ unsigned ep_type : 2; ++ ++ /** Max packet size in bytes */ ++ unsigned max_packet : 11; ++ ++ /** ++ * PID for initial transaction. ++ * 0: DATA0,
++ * 1: DATA2,
++ * 2: DATA1,
++ * 3: MDATA (non-Control EP), ++ * SETUP (Control EP) ++ */ ++ unsigned data_pid_start : 2; ++#define DWC_OTG_HC_PID_DATA0 0 ++#define DWC_OTG_HC_PID_DATA2 1 ++#define DWC_OTG_HC_PID_DATA1 2 ++#define DWC_OTG_HC_PID_MDATA 3 ++#define DWC_OTG_HC_PID_SETUP 3 ++ ++ /** Number of periodic transactions per (micro)frame */ ++ unsigned multi_count: 2; ++ ++ /** @name Transfer State */ ++ /** @{ */ ++ ++ /** Pointer to the current transfer buffer position. */ ++ uint8_t *xfer_buff; ++ /** Total number of bytes to transfer. */ ++ uint32_t xfer_len; ++ /** Number of bytes transferred so far. */ ++ uint32_t xfer_count; ++ /** Packet count at start of transfer.*/ ++ uint16_t start_pkt_count; ++ ++ /** ++ * Flag to indicate whether the transfer has been started. Set to 1 if ++ * it has been started, 0 otherwise. ++ */ ++ uint8_t xfer_started; ++ ++ /** ++ * Set to 1 to indicate that a PING request should be issued on this ++ * channel. If 0, process normally. ++ */ ++ uint8_t do_ping; ++ ++ /** ++ * Set to 1 to indicate that the error count for this transaction is ++ * non-zero. Set to 0 if the error count is 0. ++ */ ++ uint8_t error_state; ++ ++ /** ++ * Set to 1 to indicate that this channel should be halted the next ++ * time a request is queued for the channel. This is necessary in ++ * slave mode if no request queue space is available when an attempt ++ * is made to halt the channel. ++ */ ++ uint8_t halt_on_queue; ++ ++ /** ++ * Set to 1 if the host channel has been halted, but the core is not ++ * finished flushing queued requests. Otherwise 0. ++ */ ++ uint8_t halt_pending; ++ ++ /** ++ * Reason for halting the host channel. ++ */ ++ dwc_otg_halt_status_e halt_status; ++ ++ /* ++ * Split settings for the host channel ++ */ ++ uint8_t do_split; /**< Enable split for the channel */ ++ uint8_t complete_split; /**< Enable complete split */ ++ uint8_t hub_addr; /**< Address of high speed hub */ ++ ++ uint8_t port_addr; /**< Port of the low/full speed device */ ++ /** Split transaction position ++ * One of the following values: ++ * - DWC_HCSPLIT_XACTPOS_MID ++ * - DWC_HCSPLIT_XACTPOS_BEGIN ++ * - DWC_HCSPLIT_XACTPOS_END ++ * - DWC_HCSPLIT_XACTPOS_ALL */ ++ uint8_t xact_pos; ++ ++ /** Set when the host channel does a short read. */ ++ uint8_t short_read; ++ ++ /** ++ * Number of requests issued for this channel since it was assigned to ++ * the current transfer (not counting PINGs). ++ */ ++ uint8_t requests; ++ ++ /** ++ * Queue Head for the transfer being processed by this channel. ++ */ ++ struct dwc_otg_qh *qh; ++ ++ /** @} */ ++ ++ /** Entry in list of host channels. */ ++ struct list_head hc_list_entry; ++} dwc_hc_t; ++ ++/** ++ * The following parameters may be specified when starting the module. These ++ * parameters define how the DWC_otg controller should be configured. ++ * Parameter values are passed to the CIL initialization function ++ * dwc_otg_cil_init. ++ */ ++typedef struct dwc_otg_core_params ++{ ++ int32_t opt; ++#define dwc_param_opt_default 1 ++ ++ /** ++ * Specifies the OTG capabilities. The driver will automatically ++ * detect the value for this parameter if none is specified. ++ * 0 - HNP and SRP capable (default) ++ * 1 - SRP Only capable ++ * 2 - No HNP/SRP capable ++ */ ++ int32_t otg_cap; ++#define DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0 ++#define DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1 ++#define DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2 ++//#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE ++#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE ++ ++ /** ++ * Specifies whether to use slave or DMA mode for accessing the data ++ * FIFOs. The driver will automatically detect the value for this ++ * parameter if none is specified. ++ * 0 - Slave ++ * 1 - DMA (default, if available) ++ */ ++ int32_t dma_enable; ++#define dwc_param_dma_enable_default 1 ++ ++ /** ++ * When DMA mode is enabled specifies whether to use address DMA or DMA Descritor mode for accessing the data ++ * FIFOs in device mode. The driver will automatically detect the value for this ++ * parameter if none is specified. ++ * 0 - address DMA ++ * 1 - DMA Descriptor(default, if available) ++ */ ++ int32_t dma_desc_enable; ++#define dwc_param_dma_desc_enable_default 0 ++ /** The DMA Burst size (applicable only for External DMA ++ * Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32) ++ */ ++ int32_t dma_burst_size; /* Translate this to GAHBCFG values */ ++//#define dwc_param_dma_burst_size_default 32 ++#define dwc_param_dma_burst_size_default 1 ++ ++ /** ++ * Specifies the maximum speed of operation in host and device mode. ++ * The actual speed depends on the speed of the attached device and ++ * the value of phy_type. The actual speed depends on the speed of the ++ * attached device. ++ * 0 - High Speed (default) ++ * 1 - Full Speed ++ */ ++ int32_t speed; ++#define dwc_param_speed_default 0 ++#define DWC_SPEED_PARAM_HIGH 0 ++#define DWC_SPEED_PARAM_FULL 1 ++ ++ /** Specifies whether low power mode is supported when attached ++ * to a Full Speed or Low Speed device in host mode. ++ * 0 - Don't support low power mode (default) ++ * 1 - Support low power mode ++ */ ++ int32_t host_support_fs_ls_low_power; ++#define dwc_param_host_support_fs_ls_low_power_default 0 ++ ++ /** Specifies the PHY clock rate in low power mode when connected to a ++ * Low Speed device in host mode. This parameter is applicable only if ++ * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS ++ * then defaults to 6 MHZ otherwise 48 MHZ. ++ * ++ * 0 - 48 MHz ++ * 1 - 6 MHz ++ */ ++ int32_t host_ls_low_power_phy_clk; ++#define dwc_param_host_ls_low_power_phy_clk_default 0 ++#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0 ++#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1 ++ ++ /** ++ * 0 - Use cC FIFO size parameters ++ * 1 - Allow dynamic FIFO sizing (default) ++ */ ++ int32_t enable_dynamic_fifo; ++#define dwc_param_enable_dynamic_fifo_default 1 ++ ++ /** Total number of 4-byte words in the data FIFO memory. This ++ * memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic ++ * Tx FIFOs. ++ * 32 to 32768 (default 8192) ++ * Note: The total FIFO memory depth in the FPGA configuration is 8192. ++ */ ++ int32_t data_fifo_size; ++#define dwc_param_data_fifo_size_default 8192 ++ ++ /** Number of 4-byte words in the Rx FIFO in device mode when dynamic ++ * FIFO sizing is enabled. ++ * 16 to 32768 (default 1064) ++ */ ++ int32_t dev_rx_fifo_size; ++//#define dwc_param_dev_rx_fifo_size_default 1064 ++#define dwc_param_dev_rx_fifo_size_default 0x100 ++ ++ /** ++ * Specifies whether dedicated transmit FIFOs are ++ * enabled for non periodic IN endpoints in device mode ++ * 0 - No ++ * 1 - Yes ++ */ ++ int32_t en_multiple_tx_fifo; ++#define dwc_param_en_multiple_tx_fifo_default 1 ++ ++ /** Number of 4-byte words in each of the Tx FIFOs in device ++ * mode when dynamic FIFO sizing is enabled. ++ * 4 to 768 (default 256) ++ */ ++ uint32_t dev_tx_fifo_size[MAX_TX_FIFOS]; ++//#define dwc_param_dev_tx_fifo_size_default 256 ++#define dwc_param_dev_tx_fifo_size_default 0x80 ++ ++ /** Number of 4-byte words in the non-periodic Tx FIFO in device mode ++ * when dynamic FIFO sizing is enabled. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t dev_nperio_tx_fifo_size; ++//#define dwc_param_dev_nperio_tx_fifo_size_default 1024 ++#define dwc_param_dev_nperio_tx_fifo_size_default 0x80 ++ ++ /** Number of 4-byte words in each of the periodic Tx FIFOs in device ++ * mode when dynamic FIFO sizing is enabled. ++ * 4 to 768 (default 256) ++ */ ++ uint32_t dev_perio_tx_fifo_size[MAX_PERIO_FIFOS]; ++//#define dwc_param_dev_perio_tx_fifo_size_default 256 ++#define dwc_param_dev_perio_tx_fifo_size_default 0x80 ++ ++ /** Number of 4-byte words in the Rx FIFO in host mode when dynamic ++ * FIFO sizing is enabled. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t host_rx_fifo_size; ++//#define dwc_param_host_rx_fifo_size_default 1024 ++#define dwc_param_host_rx_fifo_size_default 0x292 ++ ++ /** Number of 4-byte words in the non-periodic Tx FIFO in host mode ++ * when Dynamic FIFO sizing is enabled in the core. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t host_nperio_tx_fifo_size; ++//#define dwc_param_host_nperio_tx_fifo_size_default 1024 ++//#define dwc_param_host_nperio_tx_fifo_size_default 0x292 ++#define dwc_param_host_nperio_tx_fifo_size_default 0x80 ++ ++ /** Number of 4-byte words in the host periodic Tx FIFO when dynamic ++ * FIFO sizing is enabled. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t host_perio_tx_fifo_size; ++//#define dwc_param_host_perio_tx_fifo_size_default 1024 ++#define dwc_param_host_perio_tx_fifo_size_default 0x292 ++ ++ /** The maximum transfer size supported in bytes. ++ * 2047 to 65,535 (default 65,535) ++ */ ++ int32_t max_transfer_size; ++#define dwc_param_max_transfer_size_default 65535 ++ ++ /** The maximum number of packets in a transfer. ++ * 15 to 511 (default 511) ++ */ ++ int32_t max_packet_count; ++#define dwc_param_max_packet_count_default 511 ++ ++ /** The number of host channel registers to use. ++ * 1 to 16 (default 12) ++ * Note: The FPGA configuration supports a maximum of 12 host channels. ++ */ ++ int32_t host_channels; ++//#define dwc_param_host_channels_default 12 ++#define dwc_param_host_channels_default 16 ++ ++ /** The number of endpoints in addition to EP0 available for device ++ * mode operations. ++ * 1 to 15 (default 6 IN and OUT) ++ * Note: The FPGA configuration supports a maximum of 6 IN and OUT ++ * endpoints in addition to EP0. ++ */ ++ int32_t dev_endpoints; ++//#define dwc_param_dev_endpoints_default 6 ++#define dwc_param_dev_endpoints_default 8 ++ ++ /** ++ * Specifies the type of PHY interface to use. By default, the driver ++ * will automatically detect the phy_type. ++ * ++ * 0 - Full Speed PHY ++ * 1 - UTMI+ (default) ++ * 2 - ULPI ++ */ ++ int32_t phy_type; ++#define DWC_PHY_TYPE_PARAM_FS 0 ++#define DWC_PHY_TYPE_PARAM_UTMI 1 ++#define DWC_PHY_TYPE_PARAM_ULPI 2 ++#define dwc_param_phy_type_default DWC_PHY_TYPE_PARAM_UTMI ++ ++ /** ++ * Specifies the UTMI+ Data Width. This parameter is ++ * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI ++ * PHY_TYPE, this parameter indicates the data width between ++ * the MAC and the ULPI Wrapper.) Also, this parameter is ++ * applicable only if the OTG_HSPHY_WIDTH cC parameter was set ++ * to "8 and 16 bits", meaning that the core has been ++ * configured to work at either data path width. ++ * ++ * 8 or 16 bits (default 16) ++ */ ++ int32_t phy_utmi_width; ++#define dwc_param_phy_utmi_width_default 16 ++ ++ /** ++ * Specifies whether the ULPI operates at double or single ++ * data rate. This parameter is only applicable if PHY_TYPE is ++ * ULPI. ++ * ++ * 0 - single data rate ULPI interface with 8 bit wide data ++ * bus (default) ++ * 1 - double data rate ULPI interface with 4 bit wide data ++ * bus ++ */ ++ int32_t phy_ulpi_ddr; ++#define dwc_param_phy_ulpi_ddr_default 0 ++ ++ /** ++ * Specifies whether to use the internal or external supply to ++ * drive the vbus with a ULPI phy. ++ */ ++ int32_t phy_ulpi_ext_vbus; ++#define DWC_PHY_ULPI_INTERNAL_VBUS 0 ++#define DWC_PHY_ULPI_EXTERNAL_VBUS 1 ++#define dwc_param_phy_ulpi_ext_vbus_default DWC_PHY_ULPI_INTERNAL_VBUS ++ ++ /** ++ * Specifies whether to use the I2Cinterface for full speed PHY. This ++ * parameter is only applicable if PHY_TYPE is FS. ++ * 0 - No (default) ++ * 1 - Yes ++ */ ++ int32_t i2c_enable; ++#define dwc_param_i2c_enable_default 0 ++ ++ int32_t ulpi_fs_ls; ++#define dwc_param_ulpi_fs_ls_default 0 ++ ++ int32_t ts_dline; ++#define dwc_param_ts_dline_default 0 ++ ++ /** Thresholding enable flag- ++ * bit 0 - enable non-ISO Tx thresholding ++ * bit 1 - enable ISO Tx thresholding ++ * bit 2 - enable Rx thresholding ++ */ ++ uint32_t thr_ctl; ++#define dwc_param_thr_ctl_default 0 ++ ++ /** Thresholding length for Tx ++ * FIFOs in 32 bit DWORDs ++ */ ++ uint32_t tx_thr_length; ++#define dwc_param_tx_thr_length_default 64 ++ ++ /** Thresholding length for Rx ++ * FIFOs in 32 bit DWORDs ++ */ ++ uint32_t rx_thr_length; ++#define dwc_param_rx_thr_length_default 64 ++ ++ /** Per Transfer Interrupt ++ * mode enable flag ++ * 1 - Enabled ++ * 0 - Disabled ++ */ ++ uint32_t pti_enable; ++#define dwc_param_pti_enable_default 0 ++ ++ /** Molti Processor Interrupt ++ * mode enable flag ++ * 1 - Enabled ++ * 0 - Disabled ++ */ ++ uint32_t mpi_enable; ++#define dwc_param_mpi_enable_default 0 ++ ++} dwc_otg_core_params_t; ++ ++#ifdef DEBUG ++struct dwc_otg_core_if; ++typedef struct hc_xfer_info ++{ ++ struct dwc_otg_core_if *core_if; ++ dwc_hc_t *hc; ++} hc_xfer_info_t; ++#endif ++ ++/** ++ * The dwc_otg_core_if structure contains information needed to manage ++ * the DWC_otg controller acting in either host or device mode. It ++ * represents the programming view of the controller as a whole. ++ */ ++typedef struct dwc_otg_core_if ++{ ++ /** Parameters that define how the core should be configured.*/ ++ dwc_otg_core_params_t *core_params; ++ ++ /** Core Global registers starting at offset 000h. */ ++ dwc_otg_core_global_regs_t *core_global_regs; ++ ++ /** Device-specific information */ ++ dwc_otg_dev_if_t *dev_if; ++ /** Host-specific information */ ++ dwc_otg_host_if_t *host_if; ++ ++ /** Value from SNPSID register */ ++ uint32_t snpsid; ++ ++ /* ++ * Set to 1 if the core PHY interface bits in USBCFG have been ++ * initialized. ++ */ ++ uint8_t phy_init_done; ++ ++ /* ++ * SRP Success flag, set by srp success interrupt in FS I2C mode ++ */ ++ uint8_t srp_success; ++ uint8_t srp_timer_started; ++ ++ /* Common configuration information */ ++ /** Power and Clock Gating Control Register */ ++ volatile uint32_t *pcgcctl; ++#define DWC_OTG_PCGCCTL_OFFSET 0xE00 ++ ++ /** Push/pop addresses for endpoints or host channels.*/ ++ uint32_t *data_fifo[MAX_EPS_CHANNELS]; ++#define DWC_OTG_DATA_FIFO_OFFSET 0x1000 ++#define DWC_OTG_DATA_FIFO_SIZE 0x1000 ++ ++ /** Total RAM for FIFOs (Bytes) */ ++ uint16_t total_fifo_size; ++ /** Size of Rx FIFO (Bytes) */ ++ uint16_t rx_fifo_size; ++ /** Size of Non-periodic Tx FIFO (Bytes) */ ++ uint16_t nperio_tx_fifo_size; ++ ++ ++ /** 1 if DMA is enabled, 0 otherwise. */ ++ uint8_t dma_enable; ++ ++ /** 1 if Descriptor DMA mode is enabled, 0 otherwise. */ ++ uint8_t dma_desc_enable; ++ ++ /** 1 if PTI Enhancement mode is enabled, 0 otherwise. */ ++ uint8_t pti_enh_enable; ++ ++ /** 1 if MPI Enhancement mode is enabled, 0 otherwise. */ ++ uint8_t multiproc_int_enable; ++ ++ /** 1 if dedicated Tx FIFOs are enabled, 0 otherwise. */ ++ uint8_t en_multiple_tx_fifo; ++ ++ /** Set to 1 if multiple packets of a high-bandwidth transfer is in ++ * process of being queued */ ++ uint8_t queuing_high_bandwidth; ++ ++ /** Hardware Configuration -- stored here for convenience.*/ ++ hwcfg1_data_t hwcfg1; ++ hwcfg2_data_t hwcfg2; ++ hwcfg3_data_t hwcfg3; ++ hwcfg4_data_t hwcfg4; ++ ++ /** Host and Device Configuration -- stored here for convenience.*/ ++ hcfg_data_t hcfg; ++ dcfg_data_t dcfg; ++ ++ /** The operational State, during transations ++ * (a_host>>a_peripherial and b_device=>b_host) this may not ++ * match the core but allows the software to determine ++ * transitions. ++ */ ++ uint8_t op_state; ++ ++ /** ++ * Set to 1 if the HCD needs to be restarted on a session request ++ * interrupt. This is required if no connector ID status change has ++ * occurred since the HCD was last disconnected. ++ */ ++ uint8_t restart_hcd_on_session_req; ++ ++ /** HCD callbacks */ ++ /** A-Device is a_host */ ++#define A_HOST (1) ++ /** A-Device is a_suspend */ ++#define A_SUSPEND (2) ++ /** A-Device is a_peripherial */ ++#define A_PERIPHERAL (3) ++ /** B-Device is operating as a Peripheral. */ ++#define B_PERIPHERAL (4) ++ /** B-Device is operating as a Host. */ ++#define B_HOST (5) ++ ++ /** HCD callbacks */ ++ struct dwc_otg_cil_callbacks *hcd_cb; ++ /** PCD callbacks */ ++ struct dwc_otg_cil_callbacks *pcd_cb; ++ ++ /** Device mode Periodic Tx FIFO Mask */ ++ uint32_t p_tx_msk; ++ /** Device mode Periodic Tx FIFO Mask */ ++ uint32_t tx_msk; ++ ++ /** Workqueue object used for handling several interrupts */ ++ struct workqueue_struct *wq_otg; ++ ++ /** Work object used for handling "Connector ID Status Change" Interrupt */ ++ struct work_struct w_conn_id; ++ ++ /** Work object used for handling "Wakeup Detected" Interrupt */ ++ struct delayed_work w_wkp; ++ ++#ifdef DEBUG ++ uint32_t start_hcchar_val[MAX_EPS_CHANNELS]; ++ ++ hc_xfer_info_t hc_xfer_info[MAX_EPS_CHANNELS]; ++ struct timer_list hc_xfer_timer[MAX_EPS_CHANNELS]; ++ ++ uint32_t hfnum_7_samples; ++ uint64_t hfnum_7_frrem_accum; ++ uint32_t hfnum_0_samples; ++ uint64_t hfnum_0_frrem_accum; ++ uint32_t hfnum_other_samples; ++ uint64_t hfnum_other_frrem_accum; ++#endif ++ ++ ++} dwc_otg_core_if_t; ++ ++/*We must clear S3C24XX_EINTPEND external interrupt register ++ * because after clearing in this register trigerred IRQ from ++ * H/W core in kernel interrupt can be occured again before OTG ++ * handlers clear all IRQ sources of Core registers because of ++ * timing latencies and Low Level IRQ Type. ++ */ ++ ++#ifdef CONFIG_MACH_IPMATE ++#define S3C2410X_CLEAR_EINTPEND() \ ++do { \ ++ if (!dwc_otg_read_core_intr(core_if)) { \ ++ __raw_writel(1UL << 11,S3C24XX_EINTPEND); \ ++ } \ ++} while (0) ++#else ++#define S3C2410X_CLEAR_EINTPEND() do { } while (0) ++#endif ++ ++/* ++ * The following functions are functions for works ++ * using during handling some interrupts ++ */ ++extern void w_conn_id_status_change(struct work_struct *p); ++extern void w_wakeup_detected(struct work_struct *p); ++ ++ ++/* ++ * The following functions support initialization of the CIL driver component ++ * and the DWC_otg controller. ++ */ ++extern dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *_reg_base_addr, ++ dwc_otg_core_params_t *_core_params); ++extern void dwc_otg_cil_remove(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_core_init(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_core_host_init(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_core_dev_init(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_enable_global_interrupts( dwc_otg_core_if_t *_core_if ); ++extern void dwc_otg_disable_global_interrupts( dwc_otg_core_if_t *_core_if ); ++ ++/** @name Device CIL Functions ++ * The following functions support managing the DWC_otg controller in device ++ * mode. ++ */ ++/**@{*/ ++extern void dwc_otg_wakeup(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_read_setup_packet (dwc_otg_core_if_t *_core_if, uint32_t *_dest); ++extern uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_ep0_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_deactivate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_start_zl_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_write_packet(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep, int _dma); ++extern void dwc_otg_ep_set_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_dump_dev_registers(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_dump_spram(dwc_otg_core_if_t *_core_if); ++#ifdef DWC_EN_ISOC ++extern void dwc_otg_iso_ep_start_frm_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep); ++extern void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep); ++#endif //DWC_EN_ISOC ++/**@}*/ ++ ++/** @name Host CIL Functions ++ * The following functions support managing the DWC_otg controller in host ++ * mode. ++ */ ++/**@{*/ ++extern void dwc_otg_hc_init(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_halt(dwc_otg_core_if_t *_core_if, ++ dwc_hc_t *_hc, ++ dwc_otg_halt_status_e _halt_status); ++extern void dwc_otg_hc_cleanup(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_do_ping(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_write_packet(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *_core_if); ++ ++/** ++ * This function Reads HPRT0 in preparation to modify. It keeps the ++ * WC bits 0 so that if they are read as 1, they won't clear when you ++ * write it back ++ */ ++static inline uint32_t dwc_otg_read_hprt0(dwc_otg_core_if_t *_core_if) ++{ ++ hprt0_data_t hprt0; ++ hprt0.d32 = dwc_read_reg32(_core_if->host_if->hprt0); ++ hprt0.b.prtena = 0; ++ hprt0.b.prtconndet = 0; ++ hprt0.b.prtenchng = 0; ++ hprt0.b.prtovrcurrchng = 0; ++ return hprt0.d32; ++} ++ ++extern void dwc_otg_dump_host_registers(dwc_otg_core_if_t *_core_if); ++/**@}*/ ++ ++/** @name Common CIL Functions ++ * The following functions support managing the DWC_otg controller in either ++ * device or host mode. ++ */ ++/**@{*/ ++ ++extern void dwc_otg_read_packet(dwc_otg_core_if_t *core_if, ++ uint8_t *dest, ++ uint16_t bytes); ++ ++extern void dwc_otg_dump_global_registers(dwc_otg_core_if_t *_core_if); ++ ++extern void dwc_otg_flush_tx_fifo( dwc_otg_core_if_t *_core_if, ++ const int _num ); ++extern void dwc_otg_flush_rx_fifo( dwc_otg_core_if_t *_core_if ); ++extern void dwc_otg_core_reset( dwc_otg_core_if_t *_core_if ); ++ ++extern dwc_otg_dma_desc_t* dwc_otg_ep_alloc_desc_chain(uint32_t * dma_desc_addr, uint32_t count); ++extern void dwc_otg_ep_free_desc_chain(dwc_otg_dma_desc_t* desc_addr, uint32_t dma_desc_addr, uint32_t count); ++ ++/** ++ * This function returns the Core Interrupt register. ++ */ ++static inline uint32_t dwc_otg_read_core_intr(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_read_reg32(&_core_if->core_global_regs->gintsts) & ++ dwc_read_reg32(&_core_if->core_global_regs->gintmsk)); ++} ++ ++/** ++ * This function returns the OTG Interrupt register. ++ */ ++static inline uint32_t dwc_otg_read_otg_intr (dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_read_reg32 (&_core_if->core_global_regs->gotgint)); ++} ++ ++/** ++ * This function reads the Device All Endpoints Interrupt register and ++ * returns the IN endpoint interrupt bits. ++ */ ++static inline uint32_t dwc_otg_read_dev_all_in_ep_intr(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t v; ++ ++ if(core_if->multiproc_int_enable) { ++ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachint) & ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachintmsk); ++ } else { ++ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->daint) & ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk); ++ } ++ return (v & 0xffff); ++ ++} ++ ++/** ++ * This function reads the Device All Endpoints Interrupt register and ++ * returns the OUT endpoint interrupt bits. ++ */ ++static inline uint32_t dwc_otg_read_dev_all_out_ep_intr(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t v; ++ ++ if(core_if->multiproc_int_enable) { ++ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachint) & ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachintmsk); ++ } else { ++ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->daint) & ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk); ++ } ++ ++ return ((v & 0xffff0000) >> 16); ++} ++ ++/** ++ * This function returns the Device IN EP Interrupt register ++ */ ++static inline uint32_t dwc_otg_read_dev_in_ep_intr(dwc_otg_core_if_t *core_if, ++ dwc_ep_t *ep) ++{ ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ uint32_t v, msk, emp; ++ ++ if(core_if->multiproc_int_enable) { ++ msk = dwc_read_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num]); ++ emp = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); ++ msk |= ((emp >> ep->num) & 0x1) << 7; ++ v = dwc_read_reg32(&dev_if->in_ep_regs[ep->num]->diepint) & msk; ++ } else { ++ msk = dwc_read_reg32(&dev_if->dev_global_regs->diepmsk); ++ emp = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); ++ msk |= ((emp >> ep->num) & 0x1) << 7; ++ v = dwc_read_reg32(&dev_if->in_ep_regs[ep->num]->diepint) & msk; ++ } ++ ++ ++ return v; ++} ++/** ++ * This function returns the Device OUT EP Interrupt register ++ */ ++static inline uint32_t dwc_otg_read_dev_out_ep_intr(dwc_otg_core_if_t *_core_if, ++ dwc_ep_t *_ep) ++{ ++ dwc_otg_dev_if_t *dev_if = _core_if->dev_if; ++ uint32_t v; ++ doepmsk_data_t msk = { .d32 = 0 }; ++ ++ if(_core_if->multiproc_int_enable) { ++ msk.d32 = dwc_read_reg32(&dev_if->dev_global_regs->doepeachintmsk[_ep->num]); ++ if(_core_if->pti_enh_enable) { ++ msk.b.pktdrpsts = 1; ++ } ++ v = dwc_read_reg32( &dev_if->out_ep_regs[_ep->num]->doepint) & msk.d32; ++ } else { ++ msk.d32 = dwc_read_reg32(&dev_if->dev_global_regs->doepmsk); ++ if(_core_if->pti_enh_enable) { ++ msk.b.pktdrpsts = 1; ++ } ++ v = dwc_read_reg32( &dev_if->out_ep_regs[_ep->num]->doepint) & msk.d32; ++ } ++ return v; ++} ++ ++/** ++ * This function returns the Host All Channel Interrupt register ++ */ ++static inline uint32_t dwc_otg_read_host_all_channels_intr (dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_read_reg32 (&_core_if->host_if->host_global_regs->haint)); ++} ++ ++static inline uint32_t dwc_otg_read_host_channel_intr (dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) ++{ ++ return (dwc_read_reg32 (&_core_if->host_if->hc_regs[_hc->hc_num]->hcint)); ++} ++ ++ ++/** ++ * This function returns the mode of the operation, host or device. ++ * ++ * @return 0 - Device Mode, 1 - Host Mode ++ */ ++static inline uint32_t dwc_otg_mode(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_read_reg32( &_core_if->core_global_regs->gintsts ) & 0x1); ++} ++ ++static inline uint8_t dwc_otg_is_device_mode(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_otg_mode(_core_if) != DWC_HOST_MODE); ++} ++static inline uint8_t dwc_otg_is_host_mode(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_otg_mode(_core_if) == DWC_HOST_MODE); ++} ++ ++extern int32_t dwc_otg_handle_common_intr( dwc_otg_core_if_t *_core_if ); ++ ++ ++/**@}*/ ++ ++/** ++ * DWC_otg CIL callback structure. This structure allows the HCD and ++ * PCD to register functions used for starting and stopping the PCD ++ * and HCD for role change on for a DRD. ++ */ ++typedef struct dwc_otg_cil_callbacks ++{ ++ /** Start function for role change */ ++ int (*start) (void *_p); ++ /** Stop Function for role change */ ++ int (*stop) (void *_p); ++ /** Disconnect Function for role change */ ++ int (*disconnect) (void *_p); ++ /** Resume/Remote wakeup Function */ ++ int (*resume_wakeup) (void *_p); ++ /** Suspend function */ ++ int (*suspend) (void *_p); ++ /** Session Start (SRP) */ ++ int (*session_start) (void *_p); ++ /** Pointer passed to start() and stop() */ ++ void *p; ++} dwc_otg_cil_callbacks_t; ++ ++extern void dwc_otg_cil_register_pcd_callbacks( dwc_otg_core_if_t *_core_if, ++ dwc_otg_cil_callbacks_t *_cb, ++ void *_p); ++extern void dwc_otg_cil_register_hcd_callbacks( dwc_otg_core_if_t *_core_if, ++ dwc_otg_cil_callbacks_t *_cb, ++ void *_p); ++#ifndef warn ++#define warn printk ++#endif ++ ++#endif ++ +--- /dev/null ++++ b/drivers/usb/dwc/otg_cil_intr.c +@@ -0,0 +1,852 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil_intr.c $ ++ * $Revision: #10 $ ++ * $Date: 2008/07/16 $ ++ * $Change: 1065567 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * ++ * The Core Interface Layer provides basic services for accessing and ++ * managing the DWC_otg hardware. These services are used by both the ++ * Host Controller Driver and the Peripheral Controller Driver. ++ * ++ * This file contains the Common Interrupt handlers. ++ */ ++#include "otg_plat.h" ++#include "otg_regs.h" ++#include "otg_cil.h" ++#include "otg_pcd.h" ++ ++#ifdef DEBUG ++inline const char *op_state_str(dwc_otg_core_if_t *core_if) ++{ ++ return (core_if->op_state==A_HOST?"a_host": ++ (core_if->op_state==A_SUSPEND?"a_suspend": ++ (core_if->op_state==A_PERIPHERAL?"a_peripheral": ++ (core_if->op_state==B_PERIPHERAL?"b_peripheral": ++ (core_if->op_state==B_HOST?"b_host": ++ "unknown"))))); ++} ++#endif ++ ++/** This function will log a debug message ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_mode_mismatch_intr (dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ DWC_WARN("Mode Mismatch Interrupt: currently in %s mode\n", ++ dwc_otg_mode(core_if) ? "Host" : "Device"); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.modemismatch = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ return 1; ++} ++ ++/** Start the HCD. Helper function for using the HCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_start(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->hcd_cb && core_if->hcd_cb->start) { ++ core_if->hcd_cb->start(core_if->hcd_cb->p); ++ } ++} ++/** Stop the HCD. Helper function for using the HCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_stop(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->hcd_cb && core_if->hcd_cb->stop) { ++ core_if->hcd_cb->stop(core_if->hcd_cb->p); ++ } ++} ++/** Disconnect the HCD. Helper function for using the HCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_disconnect(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->hcd_cb && core_if->hcd_cb->disconnect) { ++ core_if->hcd_cb->disconnect(core_if->hcd_cb->p); ++ } ++} ++/** Inform the HCD the a New Session has begun. Helper function for ++ * using the HCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_session_start(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->hcd_cb && core_if->hcd_cb->session_start) { ++ core_if->hcd_cb->session_start(core_if->hcd_cb->p); ++ } ++} ++ ++/** Start the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_start(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->pcd_cb && core_if->pcd_cb->start) { ++ core_if->pcd_cb->start(core_if->pcd_cb->p); ++ } ++} ++/** Stop the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_stop(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->pcd_cb && core_if->pcd_cb->stop) { ++ core_if->pcd_cb->stop(core_if->pcd_cb->p); ++ } ++} ++/** Suspend the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_suspend(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->pcd_cb && core_if->pcd_cb->suspend) { ++ core_if->pcd_cb->suspend(core_if->pcd_cb->p); ++ } ++} ++/** Resume the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_resume(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { ++ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); ++ } ++} ++ ++/** ++ * This function handles the OTG Interrupts. It reads the OTG ++ * Interrupt Register (GOTGINT) to determine what interrupt has ++ * occurred. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_otg_intr(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ gotgint_data_t gotgint; ++ gotgctl_data_t gotgctl; ++ gintmsk_data_t gintmsk; ++ gotgint.d32 = dwc_read_reg32(&global_regs->gotgint); ++ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); ++ DWC_DEBUGPL(DBG_CIL, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint.d32, ++ op_state_str(core_if)); ++ //DWC_DEBUGPL(DBG_CIL, "gotgctl=%08x\n", gotgctl.d32); ++ ++ if (gotgint.b.sesenddet) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Session End Detected++ (%s)\n", ++ op_state_str(core_if)); ++ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); ++ ++ if (core_if->op_state == B_HOST) { ++ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_start(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ core_if->op_state = B_PERIPHERAL; ++ } else { ++ dwc_otg_pcd_t *pcd; ++ ++ /* If not B_HOST and Device HNP still set. HNP ++ * Did not succeed!*/ ++ if (gotgctl.b.devhnpen) { ++ DWC_DEBUGPL(DBG_ANY, "Session End Detected\n"); ++ DWC_ERROR("Device Not Connected/Responding!\n"); ++ } ++ ++ /* If Session End Detected the B-Cable has ++ * been disconnected. */ ++ /* Reset PCD and Gadget driver to a ++ * clean state. */ ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_stop(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ } ++ gotgctl.d32 = 0; ++ gotgctl.b.devhnpen = 1; ++ dwc_modify_reg32(&global_regs->gotgctl, ++ gotgctl.d32, 0); ++ } ++ if (gotgint.b.sesreqsucstschng) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Session Reqeust Success Status Change++\n"); ++ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); ++ if (gotgctl.b.sesreqscs) { ++ if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) && ++ (core_if->core_params->i2c_enable)) { ++ core_if->srp_success = 1; ++ } ++ else { ++ dwc_otg_pcd_t *pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_resume(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ /* Clear Session Request */ ++ gotgctl.d32 = 0; ++ gotgctl.b.sesreq = 1; ++ dwc_modify_reg32(&global_regs->gotgctl, ++ gotgctl.d32, 0); ++ } ++ } ++ } ++ if (gotgint.b.hstnegsucstschng) { ++ /* Print statements during the HNP interrupt handling ++ * can cause it to fail.*/ ++ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); ++ if (gotgctl.b.hstnegscs) { ++ if (dwc_otg_is_host_mode(core_if)) { ++ dwc_otg_pcd_t *pcd; ++ ++ core_if->op_state = B_HOST; ++ /* ++ * Need to disable SOF interrupt immediately. ++ * When switching from device to host, the PCD ++ * interrupt handler won't handle the ++ * interrupt if host mode is already set. The ++ * HCD interrupt handler won't get called if ++ * the HCD state is HALT. This means that the ++ * interrupt does not get handled and Linux ++ * complains loudly. ++ */ ++ gintmsk.d32 = 0; ++ gintmsk.b.sofintr = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, ++ gintmsk.d32, 0); ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_stop(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ /* ++ * Initialize the Core for Host mode. ++ */ ++ hcd_start(core_if); ++ core_if->op_state = B_HOST; ++ } ++ } else { ++ gotgctl.d32 = 0; ++ gotgctl.b.hnpreq = 1; ++ gotgctl.b.devhnpen = 1; ++ dwc_modify_reg32(&global_regs->gotgctl, ++ gotgctl.d32, 0); ++ DWC_DEBUGPL(DBG_ANY, "HNP Failed\n"); ++ DWC_ERROR("Device Not Connected/Responding\n"); ++ } ++ } ++ if (gotgint.b.hstnegdet) { ++ /* The disconnect interrupt is set at the same time as ++ * Host Negotiation Detected. During the mode ++ * switch all interrupts are cleared so the disconnect ++ * interrupt handler will not get executed. ++ */ ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Host Negotiation Detected++ (%s)\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Device")); ++ if (dwc_otg_is_device_mode(core_if)){ ++ dwc_otg_pcd_t *pcd; ++ ++ DWC_DEBUGPL(DBG_ANY, "a_suspend->a_peripheral (%d)\n", core_if->op_state); ++ hcd_disconnect(core_if); ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_start(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ core_if->op_state = A_PERIPHERAL; ++ } else { ++ dwc_otg_pcd_t *pcd; ++ ++ /* ++ * Need to disable SOF interrupt immediately. When ++ * switching from device to host, the PCD interrupt ++ * handler won't handle the interrupt if host mode is ++ * already set. The HCD interrupt handler won't get ++ * called if the HCD state is HALT. This means that ++ * the interrupt does not get handled and Linux ++ * complains loudly. ++ */ ++ gintmsk.d32 = 0; ++ gintmsk.b.sofintr = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, ++ gintmsk.d32, 0); ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_stop(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ hcd_start(core_if); ++ core_if->op_state = A_HOST; ++ } ++ } ++ if (gotgint.b.adevtoutchng) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "A-Device Timeout Change++\n"); ++ } ++ if (gotgint.b.debdone) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Debounce Done++\n"); ++ } ++ ++ /* Clear GOTGINT */ ++ dwc_write_reg32 (&core_if->core_global_regs->gotgint, gotgint.d32); ++ ++ return 1; ++} ++ ++ ++void w_conn_id_status_change(struct work_struct *p) ++{ ++ dwc_otg_core_if_t *core_if = container_of(p, dwc_otg_core_if_t, w_conn_id); ++ ++ uint32_t count = 0; ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ ++ gotgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl); ++ DWC_DEBUGPL(DBG_CIL, "gotgctl=%0x\n", gotgctl.d32); ++ DWC_DEBUGPL(DBG_CIL, "gotgctl.b.conidsts=%d\n", gotgctl.b.conidsts); ++ ++ /* B-Device connector (Device Mode) */ ++ if (gotgctl.b.conidsts) { ++ dwc_otg_pcd_t *pcd; ++ ++ /* Wait for switch to device mode. */ ++ while (!dwc_otg_is_device_mode(core_if)){ ++ DWC_PRINT("Waiting for Peripheral Mode, Mode=%s\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Peripheral")); ++ MDELAY(100); ++ if (++count > 10000) *(uint32_t*)NULL=0; ++ } ++ core_if->op_state = B_PERIPHERAL; ++ dwc_otg_core_init(core_if); ++ dwc_otg_enable_global_interrupts(core_if); ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_start(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ } else { ++ /* A-Device connector (Host Mode) */ ++ while (!dwc_otg_is_host_mode(core_if)) { ++ DWC_PRINT("Waiting for Host Mode, Mode=%s\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Peripheral")); ++ MDELAY(100); ++ if (++count > 10000) *(uint32_t*)NULL=0; ++ } ++ core_if->op_state = A_HOST; ++ /* ++ * Initialize the Core for Host mode. ++ */ ++ dwc_otg_core_init(core_if); ++ dwc_otg_enable_global_interrupts(core_if); ++ hcd_start(core_if); ++ } ++} ++ ++ ++/** ++ * This function handles the Connector ID Status Change Interrupt. It ++ * reads the OTG Interrupt Register (GOTCTL) to determine whether this ++ * is a Device to Host Mode transition or a Host Mode to Device ++ * Transition. ++ * ++ * This only occurs when the cable is connected/removed from the PHY ++ * connector. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_conn_id_status_change_intr(dwc_otg_core_if_t *core_if) ++{ ++ ++ /* ++ * Need to disable SOF interrupt immediately. If switching from device ++ * to host, the PCD interrupt handler won't handle the interrupt if ++ * host mode is already set. The HCD interrupt handler won't get ++ * called if the HCD state is HALT. This means that the interrupt does ++ * not get handled and Linux complains loudly. ++ */ ++ gintmsk_data_t gintmsk = { .d32 = 0 }; ++ gintsts_data_t gintsts = { .d32 = 0 }; ++ ++ gintmsk.b.sofintr = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, gintmsk.d32, 0); ++ ++ DWC_DEBUGPL(DBG_CIL, " ++Connector ID Status Change Interrupt++ (%s)\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Device")); ++ ++ /* ++ * Need to schedule a work, as there are possible DELAY function calls ++ */ ++ queue_work(core_if->wq_otg, &core_if->w_conn_id); ++ ++ /* Set flag and clear interrupt */ ++ gintsts.b.conidstschng = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that a device is initiating the Session ++ * Request Protocol to request the host to turn on bus power so a new ++ * session can begin. The handler responds by turning on bus power. If ++ * the DWC_otg controller is in low power mode, the handler brings the ++ * controller out of low power mode before turning on bus power. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_session_req_intr(dwc_otg_core_if_t *core_if) ++{ ++ hprt0_data_t hprt0; ++ gintsts_data_t gintsts; ++ ++#ifndef DWC_HOST_ONLY ++ DWC_DEBUGPL(DBG_ANY, "++Session Request Interrupt++\n"); ++ ++ if (dwc_otg_is_device_mode(core_if)) { ++ DWC_PRINT("SRP: Device mode\n"); ++ } else { ++ DWC_PRINT("SRP: Host mode\n"); ++ ++ /* Turn on the port power bit. */ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtpwr = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ++ /* Start the Connection timer. So a message can be displayed ++ * if connect does not occur within 10 seconds. */ ++ hcd_session_start(core_if); ++ } ++#endif ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.sessreqintr = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++ ++void w_wakeup_detected(struct work_struct *p) ++{ ++ struct delayed_work *dw = container_of(p, struct delayed_work, work); ++ dwc_otg_core_if_t *core_if = container_of(dw, dwc_otg_core_if_t, w_wkp); ++ ++ /* ++ * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms ++ * so that OPT tests pass with all PHYs). ++ */ ++ hprt0_data_t hprt0 = {.d32=0}; ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ DWC_DEBUGPL(DBG_ANY,"Resume: HPRT0=%0x\n", hprt0.d32); ++// MDELAY(70); ++ hprt0.b.prtres = 0; /* Resume */ ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ DWC_DEBUGPL(DBG_ANY,"Clear Resume: HPRT0=%0x\n", dwc_read_reg32(core_if->host_if->hprt0)); ++} ++/** ++ * This interrupt indicates that the DWC_otg controller has detected a ++ * resume or remote wakeup sequence. If the DWC_otg controller is in ++ * low power mode, the handler must brings the controller out of low ++ * power mode. The controller automatically begins resume ++ * signaling. The handler schedules a time to stop resume signaling. ++ */ ++int32_t dwc_otg_handle_wakeup_detected_intr(dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_ANY, "++Resume and Remote Wakeup Detected Interrupt++\n"); ++ ++ if (dwc_otg_is_device_mode(core_if)) { ++ dctl_data_t dctl = {.d32=0}; ++ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts)); ++#ifdef PARTIAL_POWER_DOWN ++ if (core_if->hwcfg4.b.power_optimiz) { ++ pcgcctl_data_t power = {.d32=0}; ++ ++ power.d32 = dwc_read_reg32(core_if->pcgcctl); ++ DWC_DEBUGPL(DBG_CIL, "PCGCCTL=%0x\n", power.d32); ++ ++ power.b.stoppclk = 0; ++ dwc_write_reg32(core_if->pcgcctl, power.d32); ++ ++ power.b.pwrclmp = 0; ++ dwc_write_reg32(core_if->pcgcctl, power.d32); ++ ++ power.b.rstpdwnmodule = 0; ++ dwc_write_reg32(core_if->pcgcctl, power.d32); ++ } ++#endif ++ /* Clear the Remote Wakeup Signalling */ ++ dctl.b.rmtwkupsig = 1; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, ++ dctl.d32, 0); ++ ++ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { ++ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); ++ } ++ ++ } else { ++ pcgcctl_data_t pcgcctl = {.d32=0}; ++ ++ /* Restart the Phy Clock */ ++ pcgcctl.b.stoppclk = 1; ++ dwc_modify_reg32(core_if->pcgcctl, pcgcctl.d32, 0); ++ ++ queue_delayed_work(core_if->wq_otg, &core_if->w_wkp, ((70 * HZ / 1000) + 1)); ++ } ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.wkupintr = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that a device has been disconnected from ++ * the root port. ++ */ ++int32_t dwc_otg_handle_disconnect_intr(dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_ANY, "++Disconnect Detected Interrupt++ (%s) %s\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Device"), ++ op_state_str(core_if)); ++ ++/** @todo Consolidate this if statement. */ ++#ifndef DWC_HOST_ONLY ++ if (core_if->op_state == B_HOST) { ++ dwc_otg_pcd_t *pcd; ++ ++ /* If in device mode Disconnect and stop the HCD, then ++ * start the PCD. */ ++ hcd_disconnect(core_if); ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_start(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ core_if->op_state = B_PERIPHERAL; ++ } else if (dwc_otg_is_device_mode(core_if)) { ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ gotgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl); ++ if (gotgctl.b.hstsethnpen==1) { ++ /* Do nothing, if HNP in process the OTG ++ * interrupt "Host Negotiation Detected" ++ * interrupt will do the mode switch. ++ */ ++ } else if (gotgctl.b.devhnpen == 0) { ++ dwc_otg_pcd_t *pcd; ++ ++ /* If in device mode Disconnect and stop the HCD, then ++ * start the PCD. */ ++ hcd_disconnect(core_if); ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_start(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ ++ core_if->op_state = B_PERIPHERAL; ++ } else { ++ DWC_DEBUGPL(DBG_ANY,"!a_peripheral && !devhnpen\n"); ++ } ++ } else { ++ if (core_if->op_state == A_HOST) { ++ /* A-Cable still connected but device disconnected. */ ++ hcd_disconnect(core_if); ++ } ++ } ++#endif ++ ++ gintsts.d32 = 0; ++ gintsts.b.disconnect = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ return 1; ++} ++/** ++ * This interrupt indicates that SUSPEND state has been detected on ++ * the USB. ++ * ++ * For HNP the USB Suspend interrupt signals the change from ++ * "a_peripheral" to "a_host". ++ * ++ * When power management is enabled the core will be put in low power ++ * mode. ++ */ ++int32_t dwc_otg_handle_usb_suspend_intr(dwc_otg_core_if_t *core_if) ++{ ++ dsts_data_t dsts; ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_ANY,"USB SUSPEND\n"); ++ ++ if (dwc_otg_is_device_mode(core_if)) { ++ dwc_otg_pcd_t *pcd; ++ ++ /* Check the Device status register to determine if the Suspend ++ * state is active. */ ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", dsts.d32); ++ DWC_DEBUGPL(DBG_PCD, "DSTS.Suspend Status=%d " ++ "HWCFG4.power Optimize=%d\n", ++ dsts.b.suspsts, core_if->hwcfg4.b.power_optimiz); ++ ++ ++#ifdef PARTIAL_POWER_DOWN ++/** @todo Add a module parameter for power management. */ ++ if (dsts.b.suspsts && core_if->hwcfg4.b.power_optimiz) { ++ pcgcctl_data_t power = {.d32=0}; ++ DWC_DEBUGPL(DBG_CIL, "suspend\n"); ++ ++ power.b.pwrclmp = 1; ++ dwc_write_reg32(core_if->pcgcctl, power.d32); ++ ++ power.b.rstpdwnmodule = 1; ++ dwc_modify_reg32(core_if->pcgcctl, 0, power.d32); ++ ++ power.b.stoppclk = 1; ++ dwc_modify_reg32(core_if->pcgcctl, 0, power.d32); ++ } else { ++ DWC_DEBUGPL(DBG_ANY,"disconnect?\n"); ++ } ++#endif ++ /* PCD callback for suspend. */ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_suspend(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ } else { ++ if (core_if->op_state == A_PERIPHERAL) { ++ dwc_otg_pcd_t *pcd; ++ ++ DWC_DEBUGPL(DBG_ANY,"a_peripheral->a_host\n"); ++ /* Clear the a_peripheral flag, back to a_host. */ ++ ++ pcd=(dwc_otg_pcd_t *)core_if->pcd_cb->p; ++ if(unlikely(!pcd)) { ++ DWC_ERROR("%s: data structure not initialized properly, core_if->pcd_cb->p = NULL!!!",__func__); ++ BUG(); ++ } ++ SPIN_LOCK(&pcd->lock); ++ ++ pcd_stop(core_if); ++ ++ SPIN_UNLOCK(&pcd->lock); ++ ++ hcd_start(core_if); ++ core_if->op_state = A_HOST; ++ } ++ } ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.usbsuspend = 1; ++ dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++ ++/** ++ * This function returns the Core Interrupt register. ++ */ ++static inline uint32_t dwc_otg_read_common_intr(dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ gintmsk_data_t gintmsk; ++ gintmsk_data_t gintmsk_common = {.d32=0}; ++ gintmsk_common.b.wkupintr = 1; ++ gintmsk_common.b.sessreqintr = 1; ++ gintmsk_common.b.conidstschng = 1; ++ gintmsk_common.b.otgintr = 1; ++ gintmsk_common.b.modemismatch = 1; ++ gintmsk_common.b.disconnect = 1; ++ gintmsk_common.b.usbsuspend = 1; ++ /** @todo: The port interrupt occurs while in device ++ * mode. Added code to CIL to clear the interrupt for now! ++ */ ++ gintmsk_common.b.portintr = 1; ++ ++ gintsts.d32 = dwc_read_reg32(&core_if->core_global_regs->gintsts); ++ gintmsk.d32 = dwc_read_reg32(&core_if->core_global_regs->gintmsk); ++#ifdef DEBUG ++ /* if any common interrupts set */ ++ if (gintsts.d32 & gintmsk_common.d32) { ++ DWC_DEBUGPL(DBG_ANY, "gintsts=%08x gintmsk=%08x\n", ++ gintsts.d32, gintmsk.d32); ++ } ++#endif ++ ++ return ((gintsts.d32 & gintmsk.d32) & gintmsk_common.d32); ++ ++} ++ ++/** ++ * Common interrupt handler. ++ * ++ * The common interrupts are those that occur in both Host and Device mode. ++ * This handler handles the following interrupts: ++ * - Mode Mismatch Interrupt ++ * - Disconnect Interrupt ++ * - OTG Interrupt ++ * - Connector ID Status Change Interrupt ++ * - Session Request Interrupt. ++ * - Resume / Remote Wakeup Detected Interrupt. ++ * ++ */ ++int32_t dwc_otg_handle_common_intr(dwc_otg_core_if_t *core_if) ++{ ++ int retval = 0; ++ gintsts_data_t gintsts; ++ ++ gintsts.d32 = dwc_otg_read_common_intr(core_if); ++ ++ if (gintsts.b.modemismatch) { ++ retval |= dwc_otg_handle_mode_mismatch_intr(core_if); ++ } ++ if (gintsts.b.otgintr) { ++ retval |= dwc_otg_handle_otg_intr(core_if); ++ } ++ if (gintsts.b.conidstschng) { ++ retval |= dwc_otg_handle_conn_id_status_change_intr(core_if); ++ } ++ if (gintsts.b.disconnect) { ++ retval |= dwc_otg_handle_disconnect_intr(core_if); ++ } ++ if (gintsts.b.sessreqintr) { ++ retval |= dwc_otg_handle_session_req_intr(core_if); ++ } ++ if (gintsts.b.wkupintr) { ++ retval |= dwc_otg_handle_wakeup_detected_intr(core_if); ++ } ++ if (gintsts.b.usbsuspend) { ++ retval |= dwc_otg_handle_usb_suspend_intr(core_if); ++ } ++ if (gintsts.b.portintr && dwc_otg_is_device_mode(core_if)) { ++ /* The port interrupt occurs while in device mode with HPRT0 ++ * Port Enable/Disable. ++ */ ++ gintsts.d32 = 0; ++ gintsts.b.portintr = 1; ++ dwc_write_reg32(&core_if->core_global_regs->gintsts, ++ gintsts.d32); ++ retval |= 1; ++ ++ } ++ ++ S3C2410X_CLEAR_EINTPEND(); ++ ++ return retval; ++} +--- /dev/null ++++ b/drivers/usb/dwc/otg_driver.c +@@ -0,0 +1,960 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_driver.c $ ++ * $Revision: #63 $ ++ * $Date: 2008/09/24 $ ++ * $Change: 1101777 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * The dwc_otg_driver module provides the initialization and cleanup entry ++ * points for the DWC_otg driver. This module will be dynamically installed ++ * after Linux is booted using the insmod command. When the module is ++ * installed, the dwc_otg_driver_init function is called. When the module is ++ * removed (using rmmod), the dwc_otg_driver_cleanup function is called. ++ * ++ * This module also defines a data structure for the dwc_otg_driver, which is ++ * used in conjunction with the standard ARM lm_device structure. These ++ * structures allow the OTG driver to comply with the standard Linux driver ++ * model in which devices and drivers are registered with a bus driver. This ++ * has the benefit that Linux can expose attributes of the driver and device ++ * in its special sysfs file system. Users can then read or write files in ++ * this file system to perform diagnostics on the driver components or the ++ * device. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* permission constants */ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "otg_plat.h" ++#include "otg_attr.h" ++#include "otg_driver.h" ++#include "otg_cil.h" ++#include "otg_pcd.h" ++#include "otg_hcd.h" ++ ++#define DWC_DRIVER_VERSION "2.72a 24-JUN-2008" ++#define DWC_DRIVER_DESC "HS OTG USB Controller driver" ++ ++static const char dwc_driver_name[] = "dwc_otg"; ++ ++/*-------------------------------------------------------------------------*/ ++/* Encapsulate the module parameter settings */ ++ ++static dwc_otg_core_params_t dwc_otg_module_params = { ++ .opt = -1, ++ .otg_cap = -1, ++ .dma_enable = -1, ++ .dma_desc_enable = -1, ++ .dma_burst_size = -1, ++ .speed = -1, ++ .host_support_fs_ls_low_power = -1, ++ .host_ls_low_power_phy_clk = -1, ++ .enable_dynamic_fifo = -1, ++ .data_fifo_size = -1, ++ .dev_rx_fifo_size = -1, ++ .dev_nperio_tx_fifo_size = -1, ++ .dev_perio_tx_fifo_size = { ++ /* dev_perio_tx_fifo_size_1 */ ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1 ++ /* 15 */ ++ }, ++ .host_rx_fifo_size = -1, ++ .host_nperio_tx_fifo_size = -1, ++ .host_perio_tx_fifo_size = -1, ++ .max_transfer_size = -1, ++ .max_packet_count = -1, ++ .host_channels = -1, ++ .dev_endpoints = -1, ++ .phy_type = -1, ++ .phy_utmi_width = -1, ++ .phy_ulpi_ddr = -1, ++ .phy_ulpi_ext_vbus = -1, ++ .i2c_enable = -1, ++ .ulpi_fs_ls = -1, ++ .ts_dline = -1, ++ .en_multiple_tx_fifo = -1, ++ .dev_tx_fifo_size = { ++ /* dev_tx_fifo_size */ ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1 ++ /* 15 */ ++ }, ++ .thr_ctl = -1, ++ .tx_thr_length = -1, ++ .rx_thr_length = -1, ++ .pti_enable = -1, ++ .mpi_enable = -1, ++}; ++ ++/** ++ * Global Debug Level Mask. ++ */ ++uint32_t g_dbg_lvl = 0; /* OFF */ ++ ++/** ++ * This function is called during module intialization to verify that ++ * the module parameters are in a valid state. ++ */ ++static int check_parameters(dwc_otg_core_if_t *core_if) ++{ ++ int i; ++ int retval = 0; ++ ++/* Checks if the parameter is outside of its valid range of values */ ++#define DWC_OTG_PARAM_TEST(_param_, _low_, _high_) \ ++ ((dwc_otg_module_params._param_ < (_low_)) || \ ++ (dwc_otg_module_params._param_ > (_high_))) ++ ++/* If the parameter has been set by the user, check that the parameter value is ++ * within the value range of values. If not, report a module error. */ ++#define DWC_OTG_PARAM_ERR(_param_, _low_, _high_, _string_) \ ++ do { \ ++ if (dwc_otg_module_params._param_ != -1) { \ ++ if (DWC_OTG_PARAM_TEST(_param_, (_low_), (_high_))) { \ ++ DWC_ERROR("`%d' invalid for parameter `%s'\n", \ ++ dwc_otg_module_params._param_, _string_); \ ++ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \ ++ retval++; \ ++ } \ ++ } \ ++ } while (0) ++ ++ DWC_OTG_PARAM_ERR(opt,0,1,"opt"); ++ DWC_OTG_PARAM_ERR(otg_cap,0,2,"otg_cap"); ++ DWC_OTG_PARAM_ERR(dma_enable,0,1,"dma_enable"); ++ DWC_OTG_PARAM_ERR(dma_desc_enable,0,1,"dma_desc_enable"); ++ DWC_OTG_PARAM_ERR(speed,0,1,"speed"); ++ DWC_OTG_PARAM_ERR(host_support_fs_ls_low_power,0,1,"host_support_fs_ls_low_power"); ++ DWC_OTG_PARAM_ERR(host_ls_low_power_phy_clk,0,1,"host_ls_low_power_phy_clk"); ++ DWC_OTG_PARAM_ERR(enable_dynamic_fifo,0,1,"enable_dynamic_fifo"); ++ DWC_OTG_PARAM_ERR(data_fifo_size,32,32768,"data_fifo_size"); ++ DWC_OTG_PARAM_ERR(dev_rx_fifo_size,16,32768,"dev_rx_fifo_size"); ++ DWC_OTG_PARAM_ERR(dev_nperio_tx_fifo_size,16,32768,"dev_nperio_tx_fifo_size"); ++ DWC_OTG_PARAM_ERR(host_rx_fifo_size,16,32768,"host_rx_fifo_size"); ++ DWC_OTG_PARAM_ERR(host_nperio_tx_fifo_size,16,32768,"host_nperio_tx_fifo_size"); ++ DWC_OTG_PARAM_ERR(host_perio_tx_fifo_size,16,32768,"host_perio_tx_fifo_size"); ++ DWC_OTG_PARAM_ERR(max_transfer_size,2047,524288,"max_transfer_size"); ++ DWC_OTG_PARAM_ERR(max_packet_count,15,511,"max_packet_count"); ++ DWC_OTG_PARAM_ERR(host_channels,1,16,"host_channels"); ++ DWC_OTG_PARAM_ERR(dev_endpoints,1,15,"dev_endpoints"); ++ DWC_OTG_PARAM_ERR(phy_type,0,2,"phy_type"); ++ DWC_OTG_PARAM_ERR(phy_ulpi_ddr,0,1,"phy_ulpi_ddr"); ++ DWC_OTG_PARAM_ERR(phy_ulpi_ext_vbus,0,1,"phy_ulpi_ext_vbus"); ++ DWC_OTG_PARAM_ERR(i2c_enable,0,1,"i2c_enable"); ++ DWC_OTG_PARAM_ERR(ulpi_fs_ls,0,1,"ulpi_fs_ls"); ++ DWC_OTG_PARAM_ERR(ts_dline,0,1,"ts_dline"); ++ ++ if (dwc_otg_module_params.dma_burst_size != -1) { ++ if (DWC_OTG_PARAM_TEST(dma_burst_size,1,1) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,4,4) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,8,8) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,16,16) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,32,32) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,64,64) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,128,128) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,256,256)) { ++ DWC_ERROR("`%d' invalid for parameter `dma_burst_size'\n", ++ dwc_otg_module_params.dma_burst_size); ++ dwc_otg_module_params.dma_burst_size = 32; ++ retval++; ++ } ++ ++ { ++ uint8_t brst_sz = 0; ++ while(dwc_otg_module_params.dma_burst_size > 1) { ++ brst_sz ++; ++ dwc_otg_module_params.dma_burst_size >>= 1; ++ } ++ dwc_otg_module_params.dma_burst_size = brst_sz; ++ } ++ } ++ ++ if (dwc_otg_module_params.phy_utmi_width != -1) { ++ if (DWC_OTG_PARAM_TEST(phy_utmi_width, 8, 8) && ++ DWC_OTG_PARAM_TEST(phy_utmi_width, 16, 16)) { ++ DWC_ERROR("`%d' invalid for parameter `phy_utmi_width'\n", ++ dwc_otg_module_params.phy_utmi_width); ++ dwc_otg_module_params.phy_utmi_width = 16; ++ retval++; ++ } ++ } ++ ++ for (i = 0; i < 15; i++) { ++ /** @todo should be like above */ ++ //DWC_OTG_PARAM_ERR(dev_perio_tx_fifo_size[i], 4, 768, "dev_perio_tx_fifo_size"); ++ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] != -1) { ++ if (DWC_OTG_PARAM_TEST(dev_perio_tx_fifo_size[i], 4, 768)) { ++ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n", ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i], "dev_perio_tx_fifo_size", i); ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default; ++ retval++; ++ } ++ } ++ } ++ ++ DWC_OTG_PARAM_ERR(en_multiple_tx_fifo, 0, 1, "en_multiple_tx_fifo"); ++ ++ for (i = 0; i < 15; i++) { ++ /** @todo should be like above */ ++ //DWC_OTG_PARAM_ERR(dev_tx_fifo_size[i], 4, 768, "dev_tx_fifo_size"); ++ if (dwc_otg_module_params.dev_tx_fifo_size[i] != -1) { ++ if (DWC_OTG_PARAM_TEST(dev_tx_fifo_size[i], 4, 768)) { ++ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n", ++ dwc_otg_module_params.dev_tx_fifo_size[i], "dev_tx_fifo_size", i); ++ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_param_dev_tx_fifo_size_default; ++ retval++; ++ } ++ } ++ } ++ ++ DWC_OTG_PARAM_ERR(thr_ctl, 0, 7, "thr_ctl"); ++ DWC_OTG_PARAM_ERR(tx_thr_length, 8, 128, "tx_thr_length"); ++ DWC_OTG_PARAM_ERR(rx_thr_length, 8, 128, "rx_thr_length"); ++ ++ DWC_OTG_PARAM_ERR(pti_enable,0,1,"pti_enable"); ++ DWC_OTG_PARAM_ERR(mpi_enable,0,1,"mpi_enable"); ++ ++ /* At this point, all module parameters that have been set by the user ++ * are valid, and those that have not are left unset. Now set their ++ * default values and/or check the parameters against the hardware ++ * configurations of the OTG core. */ ++ ++/* This sets the parameter to the default value if it has not been set by the ++ * user */ ++#define DWC_OTG_PARAM_SET_DEFAULT(_param_) \ ++ ({ \ ++ int changed = 1; \ ++ if (dwc_otg_module_params._param_ == -1) { \ ++ changed = 0; \ ++ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \ ++ } \ ++ changed; \ ++ }) ++ ++/* This checks the macro agains the hardware configuration to see if it is ++ * valid. It is possible that the default value could be invalid. In this ++ * case, it will report a module error if the user touched the parameter. ++ * Otherwise it will adjust the value without any error. */ ++#define DWC_OTG_PARAM_CHECK_VALID(_param_, _str_, _is_valid_, _set_valid_) \ ++ ({ \ ++ int changed = DWC_OTG_PARAM_SET_DEFAULT(_param_); \ ++ int error = 0; \ ++ if (!(_is_valid_)) { \ ++ if (changed) { \ ++ DWC_ERROR("`%d' invalid for parameter `%s'. Check HW configuration.\n", dwc_otg_module_params._param_, _str_); \ ++ error = 1; \ ++ } \ ++ dwc_otg_module_params._param_ = (_set_valid_); \ ++ } \ ++ error; \ ++ }) ++ ++ /* OTG Cap */ ++ retval += DWC_OTG_PARAM_CHECK_VALID(otg_cap, "otg_cap", ++ ({ ++ int valid; ++ valid = 1; ++ switch (dwc_otg_module_params.otg_cap) { ++ case DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE: ++ if (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) ++ valid = 0; ++ break; ++ case DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE: ++ if ((core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) && ++ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) && ++ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) && ++ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) { ++ valid = 0; ++ } ++ break; ++ case DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE: ++ /* always valid */ ++ break; ++ } ++ valid; ++ }), ++ (((core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) || ++ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) || ++ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || ++ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) ? ++ DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE : ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dma_enable, "dma_enable", ++ ((dwc_otg_module_params.dma_enable == 1) && (core_if->hwcfg2.b.architecture == 0)) ? 0 : 1, ++ 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dma_desc_enable, "dma_desc_enable", ++ ((dwc_otg_module_params.dma_desc_enable == 1) && ++ ((dwc_otg_module_params.dma_enable == 0) || (core_if->hwcfg4.b.desc_dma == 0))) ? 0 : 1, ++ 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(opt, "opt", 1, 0); ++ ++ DWC_OTG_PARAM_SET_DEFAULT(dma_burst_size); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_support_fs_ls_low_power, ++ "host_support_fs_ls_low_power", ++ 1, 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(enable_dynamic_fifo, ++ "enable_dynamic_fifo", ++ ((dwc_otg_module_params.enable_dynamic_fifo == 0) || ++ (core_if->hwcfg2.b.dynamic_fifo == 1)), 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(data_fifo_size, ++ "data_fifo_size", ++ (dwc_otg_module_params.data_fifo_size <= core_if->hwcfg3.b.dfifo_depth), ++ core_if->hwcfg3.b.dfifo_depth); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dev_rx_fifo_size, ++ "dev_rx_fifo_size", ++ (dwc_otg_module_params.dev_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)), ++ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dev_nperio_tx_fifo_size, ++ "dev_nperio_tx_fifo_size", ++ (dwc_otg_module_params.dev_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)), ++ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_rx_fifo_size, ++ "host_rx_fifo_size", ++ (dwc_otg_module_params.host_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)), ++ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_nperio_tx_fifo_size, ++ "host_nperio_tx_fifo_size", ++ (dwc_otg_module_params.host_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)), ++ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_perio_tx_fifo_size, ++ "host_perio_tx_fifo_size", ++ (dwc_otg_module_params.host_perio_tx_fifo_size <= ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))), ++ ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(max_transfer_size, ++ "max_transfer_size", ++ (dwc_otg_module_params.max_transfer_size < (1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11))), ++ ((1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11)) - 1)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(max_packet_count, ++ "max_packet_count", ++ (dwc_otg_module_params.max_packet_count < (1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4))), ++ ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4)) - 1)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_channels, ++ "host_channels", ++ (dwc_otg_module_params.host_channels <= (core_if->hwcfg2.b.num_host_chan + 1)), ++ (core_if->hwcfg2.b.num_host_chan + 1)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dev_endpoints, ++ "dev_endpoints", ++ (dwc_otg_module_params.dev_endpoints <= (core_if->hwcfg2.b.num_dev_ep)), ++ core_if->hwcfg2.b.num_dev_ep); ++ ++/* ++ * Define the following to disable the FS PHY Hardware checking. This is for ++ * internal testing only. ++ * ++ * #define NO_FS_PHY_HW_CHECKS ++ */ ++ ++#ifdef NO_FS_PHY_HW_CHECKS ++ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, ++ "phy_type", 1, 0); ++#else ++ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, ++ "phy_type", ++ ({ ++ int valid = 0; ++ if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_UTMI) && ++ ((core_if->hwcfg2.b.hs_phy_type == 1) || ++ (core_if->hwcfg2.b.hs_phy_type == 3))) { ++ valid = 1; ++ } ++ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_ULPI) && ++ ((core_if->hwcfg2.b.hs_phy_type == 2) || ++ (core_if->hwcfg2.b.hs_phy_type == 3))) { ++ valid = 1; ++ } ++ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) && ++ (core_if->hwcfg2.b.fs_phy_type == 1)) { ++ valid = 1; ++ } ++ valid; ++ }), ++ ({ ++ int set = DWC_PHY_TYPE_PARAM_FS; ++ if (core_if->hwcfg2.b.hs_phy_type) { ++ if ((core_if->hwcfg2.b.hs_phy_type == 3) || ++ (core_if->hwcfg2.b.hs_phy_type == 1)) { ++ set = DWC_PHY_TYPE_PARAM_UTMI; ++ } ++ else { ++ set = DWC_PHY_TYPE_PARAM_ULPI; ++ } ++ } ++ set; ++ })); ++#endif ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(speed, "speed", ++ (dwc_otg_module_params.speed == 0) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1, ++ dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS ? 1 : 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_ls_low_power_phy_clk, ++ "host_ls_low_power_phy_clk", ++ ((dwc_otg_module_params.host_ls_low_power_phy_clk == DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1), ++ ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ : DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ)); ++ ++ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ddr); ++ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ext_vbus); ++ DWC_OTG_PARAM_SET_DEFAULT(phy_utmi_width); ++ DWC_OTG_PARAM_SET_DEFAULT(ulpi_fs_ls); ++ DWC_OTG_PARAM_SET_DEFAULT(ts_dline); ++ ++#ifdef NO_FS_PHY_HW_CHECKS ++ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, "i2c_enable", 1, 0); ++#else ++ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, ++ "i2c_enable", ++ (dwc_otg_module_params.i2c_enable == 1) && (core_if->hwcfg3.b.i2c == 0) ? 0 : 1, ++ 0); ++#endif ++ ++ for (i = 0; i < 15; i++) { ++ int changed = 1; ++ int error = 0; ++ ++ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] == -1) { ++ changed = 0; ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default; ++ } ++ if (!(dwc_otg_module_params.dev_perio_tx_fifo_size[i] <= (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) { ++ if (changed) { ++ DWC_ERROR("`%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n", dwc_otg_module_params.dev_perio_tx_fifo_size[i], i); ++ error = 1; ++ } ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]); ++ } ++ retval += error; ++ } ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(en_multiple_tx_fifo, "en_multiple_tx_fifo", ++ ((dwc_otg_module_params.en_multiple_tx_fifo == 1) && (core_if->hwcfg4.b.ded_fifo_en == 0)) ? 0 : 1, ++ 0); ++ ++ for (i = 0; i < 15; i++) { ++ int changed = 1; ++ int error = 0; ++ ++ if (dwc_otg_module_params.dev_tx_fifo_size[i] == -1) { ++ changed = 0; ++ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_param_dev_tx_fifo_size_default; ++ } ++ if (!(dwc_otg_module_params.dev_tx_fifo_size[i] <= (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) { ++ if (changed) { ++ DWC_ERROR("%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n", dwc_otg_module_params.dev_tx_fifo_size[i], i); ++ error = 1; ++ } ++ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]); ++ } ++ retval += error; ++ } ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(thr_ctl, "thr_ctl", ++ ((dwc_otg_module_params.thr_ctl != 0) && ((dwc_otg_module_params.dma_enable == 0) || (core_if->hwcfg4.b.ded_fifo_en == 0))) ? 0 : 1, ++ 0); ++ ++ DWC_OTG_PARAM_SET_DEFAULT(tx_thr_length); ++ DWC_OTG_PARAM_SET_DEFAULT(rx_thr_length); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(pti_enable, "pti_enable", ++ ((dwc_otg_module_params.pti_enable == 0) || ((dwc_otg_module_params.pti_enable == 1) && (core_if->snpsid >= 0x4F54272A))) ? 1 : 0, ++ 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(mpi_enable, "mpi_enable", ++ ((dwc_otg_module_params.mpi_enable == 0) || ((dwc_otg_module_params.mpi_enable == 1) && (core_if->hwcfg2.b.multi_proc_int == 1))) ? 1 : 0, ++ 0); ++ return retval; ++} ++ ++/** ++ * This function is the top level interrupt handler for the Common ++ * (Device and host modes) interrupts. ++ */ ++static irqreturn_t dwc_otg_common_irq(int irq, void *dev) ++{ ++ dwc_otg_device_t *otg_dev = dev; ++ int32_t retval = IRQ_NONE; ++ ++ retval = dwc_otg_handle_common_intr(otg_dev->core_if); ++ return IRQ_RETVAL(retval); ++} ++ ++/** ++ * This function is called when a lm_device is unregistered with the ++ * dwc_otg_driver. This happens, for example, when the rmmod command is ++ * executed. The device may or may not be electrically present. If it is ++ * present, the driver stops device processing. Any resources used on behalf ++ * of this device are freed. ++ * ++ * @param[in] lmdev ++ */ ++static int __devexit dwc_otg_driver_remove(struct platform_device *pdev) ++{ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); ++ DWC_DEBUGPL(DBG_ANY, "%s(%p)\n", __func__, pdev); ++ ++ if (!otg_dev) { ++ /* Memory allocation for the dwc_otg_device failed. */ ++ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__); ++ return 0; ++ } ++ ++ /* ++ * Free the IRQ ++ */ ++ if (otg_dev->common_irq_installed) { ++ free_irq(otg_dev->irq, otg_dev); ++ } ++ ++#ifndef DWC_DEVICE_ONLY ++ if (otg_dev->hcd) { ++ dwc_otg_hcd_remove(pdev); ++ } else { ++ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__); ++ return 0; ++ } ++#endif ++ ++#ifndef DWC_HOST_ONLY ++ if (otg_dev->pcd) { ++ dwc_otg_pcd_remove(pdev); ++ } ++#endif ++ if (otg_dev->core_if) { ++ dwc_otg_cil_remove(otg_dev->core_if); ++ } ++ ++ /* ++ * Remove the device attributes ++ */ ++ dwc_otg_attr_remove(pdev); ++ ++ /* ++ * Return the memory. ++ */ ++ if (otg_dev->base) { ++ iounmap(otg_dev->base); ++ } ++ kfree(otg_dev); ++ ++ /* ++ * Clear the drvdata pointer. ++ */ ++ platform_set_drvdata(pdev, 0); ++ ++ return 0; ++} ++ ++/** ++ * This function is called when an lm_device is bound to a ++ * dwc_otg_driver. It creates the driver components required to ++ * control the device (CIL, HCD, and PCD) and it initializes the ++ * device. The driver components are stored in a dwc_otg_device ++ * structure. A reference to the dwc_otg_device is saved in the ++ * lm_device. This allows the driver to access the dwc_otg_device ++ * structure on subsequent calls to driver methods for this device. ++ * ++ * @param[in] lmdev lm_device definition ++ */ ++static int __devinit dwc_otg_driver_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ int retval = 0; ++ uint32_t snpsid; ++ dwc_otg_device_t *dwc_otg_device; ++ struct resource *res; ++ ++ dev_dbg(dev, "dwc_otg_driver_probe(%p)\n", pdev); ++ ++ dwc_otg_device = kmalloc(sizeof(dwc_otg_device_t), GFP_KERNEL); ++ ++ if (!dwc_otg_device) { ++ dev_err(dev, "kmalloc of dwc_otg_device failed\n"); ++ retval = -ENOMEM; ++ goto fail; ++ } ++ ++ memset(dwc_otg_device, 0, sizeof(*dwc_otg_device)); ++ dwc_otg_device->reg_offset = 0xFFFFFFFF; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(dev, "Found OTG with no register addr.\n"); ++ retval = -ENODEV; ++ goto fail; ++ } ++ dwc_otg_device->rsrc_start = res->start; ++ dwc_otg_device->rsrc_len = res->end - res->start + 1; ++ ++ dwc_otg_device->base = ioremap(dwc_otg_device->rsrc_start, dwc_otg_device->rsrc_len); ++ ++ if (!dwc_otg_device->base) { ++ dev_err(dev, "ioremap() failed\n"); ++ retval = -ENOMEM; ++ goto fail; ++ } ++ dev_dbg(dev, "base=0x%08x\n", (unsigned)dwc_otg_device->base); ++ ++ /* ++ * Attempt to ensure this device is really a DWC_otg Controller. ++ * Read and verify the SNPSID register contents. The value should be ++ * 0x45F42XXX, which corresponds to "OT2", as in "OTG version 2.XX". ++ */ ++ snpsid = dwc_read_reg32((uint32_t *)((uint8_t *)dwc_otg_device->base + 0x40)); ++ ++ if ((snpsid & 0xFFFFF000) != OTG_CORE_REV_2_00) { ++ dev_err(dev, "Bad value for SNPSID: 0x%08x\n", snpsid); ++ retval = -EINVAL; ++ goto fail; ++ } ++ ++ DWC_PRINT("Core Release: %x.%x%x%x\n", ++ (snpsid >> 12 & 0xF), ++ (snpsid >> 8 & 0xF), ++ (snpsid >> 4 & 0xF), ++ (snpsid & 0xF)); ++ ++ /* ++ * Initialize driver data to point to the global DWC_otg ++ * Device structure. ++ */ ++ platform_set_drvdata(pdev, dwc_otg_device); ++ ++ dev_dbg(dev, "dwc_otg_device=0x%p\n", dwc_otg_device); ++ ++ dwc_otg_device->core_if = dwc_otg_cil_init(dwc_otg_device->base, ++ &dwc_otg_module_params); ++ ++ dwc_otg_device->core_if->snpsid = snpsid; ++ ++ if (!dwc_otg_device->core_if) { ++ dev_err(dev, "CIL initialization failed!\n"); ++ retval = -ENOMEM; ++ goto fail; ++ } ++ ++ /* ++ * Validate parameter values. ++ */ ++ if (check_parameters(dwc_otg_device->core_if)) { ++ retval = -EINVAL; ++ goto fail; ++ } ++ ++ /* ++ * Create Device Attributes in sysfs ++ */ ++ dwc_otg_attr_create(pdev); ++ ++ /* ++ * Disable the global interrupt until all the interrupt ++ * handlers are installed. ++ */ ++ dwc_otg_disable_global_interrupts(dwc_otg_device->core_if); ++ ++ /* ++ * Install the interrupt handler for the common interrupts before ++ * enabling common interrupts in core_init below. ++ */ ++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ if (!res) { ++ dev_err(dev, "Fount OTG with to IRQ.\n"); ++ retval = -ENODEV; ++ goto fail; ++ } ++ dwc_otg_device->irq = res->start; ++ ++ retval = request_irq(res->start, dwc_otg_common_irq, ++ IRQF_SHARED, "dwc_otg", dwc_otg_device); ++ if (retval) { ++ DWC_ERROR("request of irq%d failed\n", res->start); ++ retval = -EBUSY; ++ goto fail; ++ } else { ++ dwc_otg_device->common_irq_installed = 1; ++ } ++ ++ /* ++ * Initialize the DWC_otg core. ++ */ ++ dwc_otg_core_init(dwc_otg_device->core_if); ++ ++#ifndef DWC_HOST_ONLY ++ /* ++ * Initialize the PCD ++ */ ++ retval = dwc_otg_pcd_init(pdev); ++ if (retval != 0) { ++ DWC_ERROR("dwc_otg_pcd_init failed\n"); ++ dwc_otg_device->pcd = NULL; ++ goto fail; ++ } ++#endif ++#ifndef DWC_DEVICE_ONLY ++ /* ++ * Initialize the HCD ++ */ ++ retval = dwc_otg_hcd_init(pdev); ++ if (retval != 0) { ++ DWC_ERROR("dwc_otg_hcd_init failed\n"); ++ dwc_otg_device->hcd = NULL; ++ goto fail; ++ } ++#endif ++ ++ /* ++ * Enable the global interrupt after all the interrupt ++ * handlers are installed. ++ */ ++ dwc_otg_enable_global_interrupts(dwc_otg_device->core_if); ++ ++ return 0; ++ ++ fail: ++ dwc_otg_driver_remove(pdev); ++ return retval; ++} ++ ++static struct platform_driver dwc_otg_platform_driver = { ++ .driver.name = "dwc_otg", ++ .probe = dwc_otg_driver_probe, ++ .remove = dwc_otg_driver_remove, ++}; ++ ++static int __init dwc_otg_init_module(void) ++{ ++ return platform_driver_register(&dwc_otg_platform_driver); ++} ++ ++static void __exit dwc_otg_cleanup_module(void) ++{ ++ platform_driver_unregister(&dwc_otg_platform_driver); ++} ++ ++module_init(dwc_otg_init_module); ++module_exit(dwc_otg_cleanup_module); ++ ++/** ++ * This function is called when the driver is removed from the kernel ++ * with the rmmod command. The driver unregisters itself with its bus ++ * driver. ++ * ++ */ ++ ++MODULE_DESCRIPTION(DWC_DRIVER_DESC); ++MODULE_AUTHOR("Synopsys Inc."); ++MODULE_LICENSE("GPL"); ++ ++module_param_named(otg_cap, dwc_otg_module_params.otg_cap, int, 0444); ++MODULE_PARM_DESC(otg_cap, "OTG Capabilities 0=HNP&SRP 1=SRP Only 2=None"); ++module_param_named(opt, dwc_otg_module_params.opt, int, 0444); ++MODULE_PARM_DESC(opt, "OPT Mode"); ++module_param_named(dma_enable, dwc_otg_module_params.dma_enable, int, 0444); ++MODULE_PARM_DESC(dma_enable, "DMA Mode 0=Slave 1=DMA enabled"); ++ ++module_param_named(dma_desc_enable, dwc_otg_module_params.dma_desc_enable, int, 0444); ++MODULE_PARM_DESC(dma_desc_enable, "DMA Desc Mode 0=Address DMA 1=DMA Descriptor enabled"); ++ ++module_param_named(dma_burst_size, dwc_otg_module_params.dma_burst_size, int, 0444); ++MODULE_PARM_DESC(dma_burst_size, "DMA Burst Size 1, 4, 8, 16, 32, 64, 128, 256"); ++module_param_named(speed, dwc_otg_module_params.speed, int, 0444); ++MODULE_PARM_DESC(speed, "Speed 0=High Speed 1=Full Speed"); ++module_param_named(host_support_fs_ls_low_power, dwc_otg_module_params.host_support_fs_ls_low_power, int, 0444); ++MODULE_PARM_DESC(host_support_fs_ls_low_power, "Support Low Power w/FS or LS 0=Support 1=Don't Support"); ++module_param_named(host_ls_low_power_phy_clk, dwc_otg_module_params.host_ls_low_power_phy_clk, int, 0444); ++MODULE_PARM_DESC(host_ls_low_power_phy_clk, "Low Speed Low Power Clock 0=48Mhz 1=6Mhz"); ++module_param_named(enable_dynamic_fifo, dwc_otg_module_params.enable_dynamic_fifo, int, 0444); ++MODULE_PARM_DESC(enable_dynamic_fifo, "0=cC Setting 1=Allow Dynamic Sizing"); ++module_param_named(data_fifo_size, dwc_otg_module_params.data_fifo_size, int, 0444); ++MODULE_PARM_DESC(data_fifo_size, "Total number of words in the data FIFO memory 32-32768"); ++module_param_named(dev_rx_fifo_size, dwc_otg_module_params.dev_rx_fifo_size, int, 0444); ++MODULE_PARM_DESC(dev_rx_fifo_size, "Number of words in the Rx FIFO 16-32768"); ++module_param_named(dev_nperio_tx_fifo_size, dwc_otg_module_params.dev_nperio_tx_fifo_size, int, 0444); ++MODULE_PARM_DESC(dev_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768"); ++module_param_named(dev_perio_tx_fifo_size_1, dwc_otg_module_params.dev_perio_tx_fifo_size[0], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_1, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_2, dwc_otg_module_params.dev_perio_tx_fifo_size[1], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_2, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_3, dwc_otg_module_params.dev_perio_tx_fifo_size[2], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_3, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_4, dwc_otg_module_params.dev_perio_tx_fifo_size[3], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_4, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_5, dwc_otg_module_params.dev_perio_tx_fifo_size[4], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_5, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_6, dwc_otg_module_params.dev_perio_tx_fifo_size[5], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_6, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_7, dwc_otg_module_params.dev_perio_tx_fifo_size[6], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_7, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_8, dwc_otg_module_params.dev_perio_tx_fifo_size[7], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_8, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_9, dwc_otg_module_params.dev_perio_tx_fifo_size[8], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_9, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_10, dwc_otg_module_params.dev_perio_tx_fifo_size[9], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_10, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_11, dwc_otg_module_params.dev_perio_tx_fifo_size[10], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_11, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_12, dwc_otg_module_params.dev_perio_tx_fifo_size[11], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_12, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_13, dwc_otg_module_params.dev_perio_tx_fifo_size[12], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_13, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_14, dwc_otg_module_params.dev_perio_tx_fifo_size[13], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_14, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_15, dwc_otg_module_params.dev_perio_tx_fifo_size[14], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_15, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(host_rx_fifo_size, dwc_otg_module_params.host_rx_fifo_size, int, 0444); ++MODULE_PARM_DESC(host_rx_fifo_size, "Number of words in the Rx FIFO 16-32768"); ++module_param_named(host_nperio_tx_fifo_size, dwc_otg_module_params.host_nperio_tx_fifo_size, int, 0444); ++MODULE_PARM_DESC(host_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768"); ++module_param_named(host_perio_tx_fifo_size, dwc_otg_module_params.host_perio_tx_fifo_size, int, 0444); ++MODULE_PARM_DESC(host_perio_tx_fifo_size, "Number of words in the host periodic Tx FIFO 16-32768"); ++module_param_named(max_transfer_size, dwc_otg_module_params.max_transfer_size, int, 0444); ++/** @todo Set the max to 512K, modify checks */ ++MODULE_PARM_DESC(max_transfer_size, "The maximum transfer size supported in bytes 2047-65535"); ++module_param_named(max_packet_count, dwc_otg_module_params.max_packet_count, int, 0444); ++MODULE_PARM_DESC(max_packet_count, "The maximum number of packets in a transfer 15-511"); ++module_param_named(host_channels, dwc_otg_module_params.host_channels, int, 0444); ++MODULE_PARM_DESC(host_channels, "The number of host channel registers to use 1-16"); ++module_param_named(dev_endpoints, dwc_otg_module_params.dev_endpoints, int, 0444); ++MODULE_PARM_DESC(dev_endpoints, "The number of endpoints in addition to EP0 available for device mode 1-15"); ++module_param_named(phy_type, dwc_otg_module_params.phy_type, int, 0444); ++MODULE_PARM_DESC(phy_type, "0=Reserved 1=UTMI+ 2=ULPI"); ++module_param_named(phy_utmi_width, dwc_otg_module_params.phy_utmi_width, int, 0444); ++MODULE_PARM_DESC(phy_utmi_width, "Specifies the UTMI+ Data Width 8 or 16 bits"); ++module_param_named(phy_ulpi_ddr, dwc_otg_module_params.phy_ulpi_ddr, int, 0444); ++MODULE_PARM_DESC(phy_ulpi_ddr, "ULPI at double or single data rate 0=Single 1=Double"); ++module_param_named(phy_ulpi_ext_vbus, dwc_otg_module_params.phy_ulpi_ext_vbus, int, 0444); ++MODULE_PARM_DESC(phy_ulpi_ext_vbus, "ULPI PHY using internal or external vbus 0=Internal"); ++module_param_named(i2c_enable, dwc_otg_module_params.i2c_enable, int, 0444); ++MODULE_PARM_DESC(i2c_enable, "FS PHY Interface"); ++module_param_named(ulpi_fs_ls, dwc_otg_module_params.ulpi_fs_ls, int, 0444); ++MODULE_PARM_DESC(ulpi_fs_ls, "ULPI PHY FS/LS mode only"); ++module_param_named(ts_dline, dwc_otg_module_params.ts_dline, int, 0444); ++MODULE_PARM_DESC(ts_dline, "Term select Dline pulsing for all PHYs"); ++module_param_named(debug, g_dbg_lvl, int, 0444); ++MODULE_PARM_DESC(debug, ""); ++ ++module_param_named(en_multiple_tx_fifo, dwc_otg_module_params.en_multiple_tx_fifo, int, 0444); ++MODULE_PARM_DESC(en_multiple_tx_fifo, "Dedicated Non Periodic Tx FIFOs 0=disabled 1=enabled"); ++module_param_named(dev_tx_fifo_size_1, dwc_otg_module_params.dev_tx_fifo_size[0], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_1, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_2, dwc_otg_module_params.dev_tx_fifo_size[1], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_2, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_3, dwc_otg_module_params.dev_tx_fifo_size[2], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_3, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_4, dwc_otg_module_params.dev_tx_fifo_size[3], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_4, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_5, dwc_otg_module_params.dev_tx_fifo_size[4], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_5, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_6, dwc_otg_module_params.dev_tx_fifo_size[5], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_6, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_7, dwc_otg_module_params.dev_tx_fifo_size[6], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_7, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_8, dwc_otg_module_params.dev_tx_fifo_size[7], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_8, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_9, dwc_otg_module_params.dev_tx_fifo_size[8], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_9, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_10, dwc_otg_module_params.dev_tx_fifo_size[9], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_10, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_11, dwc_otg_module_params.dev_tx_fifo_size[10], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_11, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_12, dwc_otg_module_params.dev_tx_fifo_size[11], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_12, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_13, dwc_otg_module_params.dev_tx_fifo_size[12], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_13, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_14, dwc_otg_module_params.dev_tx_fifo_size[13], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_14, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_15, dwc_otg_module_params.dev_tx_fifo_size[14], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_15, "Number of words in the Tx FIFO 4-768"); ++ ++module_param_named(thr_ctl, dwc_otg_module_params.thr_ctl, int, 0444); ++MODULE_PARM_DESC(thr_ctl, "Thresholding enable flag bit 0 - non ISO Tx thr., 1 - ISO Tx thr., 2 - Rx thr.- bit 0=disabled 1=enabled"); ++module_param_named(tx_thr_length, dwc_otg_module_params.tx_thr_length, int, 0444); ++MODULE_PARM_DESC(tx_thr_length, "Tx Threshold length in 32 bit DWORDs"); ++module_param_named(rx_thr_length, dwc_otg_module_params.rx_thr_length, int, 0444); ++MODULE_PARM_DESC(rx_thr_length, "Rx Threshold length in 32 bit DWORDs"); ++ ++module_param_named(pti_enable, dwc_otg_module_params.pti_enable, int, 0444); ++MODULE_PARM_DESC(pti_enable, "Per Transfer Interrupt mode 0=disabled 1=enabled"); ++ ++module_param_named(mpi_enable, dwc_otg_module_params.mpi_enable, int, 0444); ++MODULE_PARM_DESC(mpi_enable, "Multiprocessor Interrupt mode 0=disabled 1=enabled"); +--- /dev/null ++++ b/drivers/usb/dwc/otg_driver.h +@@ -0,0 +1,62 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_driver.h $ ++ * $Revision: #12 $ ++ * $Date: 2008/07/15 $ ++ * $Change: 1064918 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#ifndef __DWC_OTG_DRIVER_H__ ++#define __DWC_OTG_DRIVER_H__ ++ ++/** @file ++ * This file contains the interface to the Linux driver. ++ */ ++#include "otg_cil.h" ++ ++/* Type declarations */ ++struct dwc_otg_pcd; ++struct dwc_otg_hcd; ++ ++/** ++ * This structure is a wrapper that encapsulates the driver components used to ++ * manage a single DWC_otg controller. ++ */ ++typedef struct dwc_otg_device { ++ void *base; ++ dwc_otg_core_if_t *core_if; ++ uint32_t reg_offset; ++ struct dwc_otg_pcd *pcd; ++ struct dwc_otg_hcd *hcd; ++ uint8_t common_irq_installed; ++ int irq; ++ uint32_t rsrc_start; ++ uint32_t rsrc_len; ++} dwc_otg_device_t; ++ ++#endif +--- /dev/null ++++ b/drivers/usb/dwc/otg_hcd.c +@@ -0,0 +1,2735 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.c $ ++ * $Revision: #75 $ ++ * $Date: 2008/07/15 $ ++ * $Change: 1064940 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++ ++/** ++ * @file ++ * ++ * This file contains the implementation of the HCD. In Linux, the HCD ++ * implements the hc_driver API. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "otg_driver.h" ++#include "otg_hcd.h" ++#include "otg_regs.h" ++ ++static const char dwc_otg_hcd_name[] = "dwc_otg_hcd"; ++ ++static const struct hc_driver dwc_otg_hc_driver = { ++ ++ .description = dwc_otg_hcd_name, ++ .product_desc = "DWC OTG Controller", ++ .hcd_priv_size = sizeof(dwc_otg_hcd_t), ++ .irq = dwc_otg_hcd_irq, ++ .flags = HCD_MEMORY | HCD_USB2, ++ .start = dwc_otg_hcd_start, ++ .stop = dwc_otg_hcd_stop, ++ .urb_enqueue = dwc_otg_hcd_urb_enqueue, ++ .urb_dequeue = dwc_otg_hcd_urb_dequeue, ++ .endpoint_disable = dwc_otg_hcd_endpoint_disable, ++ .get_frame_number = dwc_otg_hcd_get_frame_number, ++ .hub_status_data = dwc_otg_hcd_hub_status_data, ++ .hub_control = dwc_otg_hcd_hub_control, ++}; ++ ++/** ++ * Work queue function for starting the HCD when A-Cable is connected. ++ * The dwc_otg_hcd_start() must be called in a process context. ++ */ ++static void hcd_start_func(struct work_struct *_work) ++{ ++ struct delayed_work *dw = container_of(_work, struct delayed_work, work); ++ struct dwc_otg_hcd *otg_hcd = container_of(dw, struct dwc_otg_hcd, start_work); ++ struct usb_hcd *usb_hcd = container_of((void *)otg_hcd, struct usb_hcd, hcd_priv); ++ DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd); ++ if (usb_hcd) { ++ dwc_otg_hcd_start(usb_hcd); ++ } ++} ++ ++/** ++ * HCD Callback function for starting the HCD when A-Cable is ++ * connected. ++ * ++ * @param p void pointer to the struct usb_hcd ++ */ ++static int32_t dwc_otg_hcd_start_cb(void *p) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ hprt0_data_t hprt0; ++ ++ if (core_if->op_state == B_HOST) { ++ /* ++ * Reset the port. During a HNP mode switch the reset ++ * needs to occur within 1ms and have a duration of at ++ * least 50ms. ++ */ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtrst = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ((struct usb_hcd *)p)->self.is_b_host = 1; ++ } else { ++ ((struct usb_hcd *)p)->self.is_b_host = 0; ++ } ++ ++ /* Need to start the HCD in a non-interrupt context. */ ++// INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func); ++ INIT_DELAYED_WORK(&dwc_otg_hcd->start_work, hcd_start_func); ++// schedule_work(&dwc_otg_hcd->start_work); ++ queue_delayed_work(core_if->wq_otg, &dwc_otg_hcd->start_work, 50 * HZ / 1000); ++ ++ return 1; ++} ++ ++/** ++ * HCD Callback function for stopping the HCD. ++ * ++ * @param p void pointer to the struct usb_hcd ++ */ ++static int32_t dwc_otg_hcd_stop_cb(void *p) ++{ ++ struct usb_hcd *usb_hcd = (struct usb_hcd *)p; ++ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); ++ dwc_otg_hcd_stop(usb_hcd); ++ return 1; ++} ++ ++static void del_xfer_timers(dwc_otg_hcd_t *hcd) ++{ ++#ifdef DEBUG ++ int i; ++ int num_channels = hcd->core_if->core_params->host_channels; ++ for (i = 0; i < num_channels; i++) { ++ del_timer(&hcd->core_if->hc_xfer_timer[i]); ++ } ++#endif ++} ++ ++static void del_timers(dwc_otg_hcd_t *hcd) ++{ ++ del_xfer_timers(hcd); ++ del_timer(&hcd->conn_timer); ++} ++ ++/** ++ * Processes all the URBs in a single list of QHs. Completes them with ++ * -ETIMEDOUT and frees the QTD. ++ */ ++static void kill_urbs_in_qh_list(dwc_otg_hcd_t *hcd, struct list_head *qh_list) ++{ ++ struct list_head *qh_item; ++ dwc_otg_qh_t *qh; ++ struct list_head *qtd_item; ++ dwc_otg_qtd_t *qtd; ++ ++ list_for_each(qh_item, qh_list) { ++ qh = list_entry(qh_item, dwc_otg_qh_t, qh_list_entry); ++ for (qtd_item = qh->qtd_list.next; ++ qtd_item != &qh->qtd_list; ++ qtd_item = qh->qtd_list.next) { ++ qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry); ++ if (qtd->urb != NULL) { ++ dwc_otg_hcd_complete_urb(hcd, qtd->urb, ++ -ETIMEDOUT); ++ } ++ dwc_otg_hcd_qtd_remove_and_free(hcd, qtd); ++ } ++ } ++} ++ ++/** ++ * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic ++ * and periodic schedules. The QTD associated with each URB is removed from ++ * the schedule and freed. This function may be called when a disconnect is ++ * detected or when the HCD is being stopped. ++ */ ++static void kill_all_urbs(dwc_otg_hcd_t *hcd) ++{ ++ kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_inactive); ++ kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_active); ++ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_inactive); ++ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_ready); ++ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_assigned); ++ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_queued); ++} ++ ++/** ++ * HCD Callback function for disconnect of the HCD. ++ * ++ * @param p void pointer to the struct usb_hcd ++ */ ++static int32_t dwc_otg_hcd_disconnect_cb(void *p) ++{ ++ gintsts_data_t intr; ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); ++ ++ //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); ++ ++ /* ++ * Set status flags for the hub driver. ++ */ ++ dwc_otg_hcd->flags.b.port_connect_status_change = 1; ++ dwc_otg_hcd->flags.b.port_connect_status = 0; ++ ++ /* ++ * Shutdown any transfers in process by clearing the Tx FIFO Empty ++ * interrupt mask and status bits and disabling subsequent host ++ * channel interrupts. ++ */ ++ intr.d32 = 0; ++ intr.b.nptxfempty = 1; ++ intr.b.ptxfempty = 1; ++ intr.b.hcintr = 1; ++ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, intr.d32, 0); ++ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintsts, intr.d32, 0); ++ ++ del_timers(dwc_otg_hcd); ++ ++ /* ++ * Turn off the vbus power only if the core has transitioned to device ++ * mode. If still in host mode, need to keep power on to detect a ++ * reconnection. ++ */ ++ if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) { ++ if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) { ++ hprt0_data_t hprt0 = { .d32=0 }; ++ DWC_PRINT("Disconnect: PortPower off\n"); ++ hprt0.b.prtpwr = 0; ++ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32); ++ } ++ ++ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if); ++ } ++ ++ /* Respond with an error status to all URBs in the schedule. */ ++ kill_all_urbs(dwc_otg_hcd); ++ ++ if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) { ++ /* Clean up any host channels that were in use. */ ++ int num_channels; ++ int i; ++ dwc_hc_t *channel; ++ dwc_otg_hc_regs_t *hc_regs; ++ hcchar_data_t hcchar; ++ ++ num_channels = dwc_otg_hcd->core_if->core_params->host_channels; ++ ++ if (!dwc_otg_hcd->core_if->dma_enable) { ++ /* Flush out any channel requests in slave mode. */ ++ for (i = 0; i < num_channels; i++) { ++ channel = dwc_otg_hcd->hc_ptr_array[i]; ++ if (list_empty(&channel->hc_list_entry)) { ++ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ hcchar.b.chen = 0; ++ hcchar.b.chdis = 1; ++ hcchar.b.epdir = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ } ++ } ++ } ++ } ++ ++ for (i = 0; i < num_channels; i++) { ++ channel = dwc_otg_hcd->hc_ptr_array[i]; ++ if (list_empty(&channel->hc_list_entry)) { ++ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ /* Halt the channel. */ ++ hcchar.b.chdis = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ } ++ ++ dwc_otg_hc_cleanup(dwc_otg_hcd->core_if, channel); ++ list_add_tail(&channel->hc_list_entry, ++ &dwc_otg_hcd->free_hc_list); ++ } ++ } ++ } ++ ++ /* A disconnect will end the session so the B-Device is no ++ * longer a B-host. */ ++ ((struct usb_hcd *)p)->self.is_b_host = 0; ++ return 1; ++} ++ ++/** ++ * Connection timeout function. An OTG host is required to display a ++ * message if the device does not connect within 10 seconds. ++ */ ++void dwc_otg_hcd_connect_timeout(unsigned long ptr) ++{ ++ DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)ptr); ++ DWC_PRINT("Connect Timeout\n"); ++ DWC_ERROR("Device Not Connected/Responding\n"); ++} ++ ++/** ++ * Start the connection timer. An OTG host is required to display a ++ * message if the device does not connect within 10 seconds. The ++ * timer is deleted if a port connect interrupt occurs before the ++ * timer expires. ++ */ ++static void dwc_otg_hcd_start_connect_timer(dwc_otg_hcd_t *hcd) ++{ ++ init_timer(&hcd->conn_timer); ++ hcd->conn_timer.function = dwc_otg_hcd_connect_timeout; ++ hcd->conn_timer.data = 0; ++ hcd->conn_timer.expires = jiffies + (HZ * 10); ++ add_timer(&hcd->conn_timer); ++} ++ ++/** ++ * HCD Callback function for disconnect of the HCD. ++ * ++ * @param p void pointer to the struct usb_hcd ++ */ ++static int32_t dwc_otg_hcd_session_start_cb(void *p) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); ++ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); ++ dwc_otg_hcd_start_connect_timer(dwc_otg_hcd); ++ return 1; ++} ++ ++/** ++ * HCD Callback structure for handling mode switching. ++ */ ++static dwc_otg_cil_callbacks_t hcd_cil_callbacks = { ++ .start = dwc_otg_hcd_start_cb, ++ .stop = dwc_otg_hcd_stop_cb, ++ .disconnect = dwc_otg_hcd_disconnect_cb, ++ .session_start = dwc_otg_hcd_session_start_cb, ++ .p = 0, ++}; ++ ++/** ++ * Reset tasklet function ++ */ ++static void reset_tasklet_func(unsigned long data) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = (dwc_otg_hcd_t *)data; ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ hprt0_data_t hprt0; ++ ++ DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n"); ++ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtrst = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ mdelay(60); ++ ++ hprt0.b.prtrst = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ dwc_otg_hcd->flags.b.port_reset_change = 1; ++} ++ ++static struct tasklet_struct reset_tasklet = { ++ .next = NULL, ++ .state = 0, ++ .count = ATOMIC_INIT(0), ++ .func = reset_tasklet_func, ++ .data = 0, ++}; ++ ++/** ++ * Initializes the HCD. This function allocates memory for and initializes the ++ * static parts of the usb_hcd and dwc_otg_hcd structures. It also registers the ++ * USB bus with the core and calls the hc_driver->start() function. It returns ++ * a negative error on failure. ++ */ ++int dwc_otg_hcd_init(struct platform_device *pdev) ++{ ++ struct usb_hcd *hcd = NULL; ++ dwc_otg_hcd_t *dwc_otg_hcd = NULL; ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); ++ ++ int num_channels; ++ int i; ++ dwc_hc_t *channel; ++ ++ int retval = 0; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n"); ++ ++ /* Set device flags indicating whether the HCD supports DMA. */ ++ if (otg_dev->core_if->dma_enable) { ++ DWC_PRINT("Using DMA mode\n"); ++ ++ if (otg_dev->core_if->dma_desc_enable) { ++ DWC_PRINT("Device using Descriptor DMA mode\n"); ++ } else { ++ DWC_PRINT("Device using Buffer DMA mode\n"); ++ } ++ } ++ /* ++ * Allocate memory for the base HCD plus the DWC OTG HCD. ++ * Initialize the base HCD. ++ */ ++ ++ hcd = usb_create_hcd(&dwc_otg_hc_driver, &pdev->dev, "gadget"); ++ if (!hcd) { ++ retval = -ENOMEM; ++ goto error1; ++ } ++ ++ hcd->regs = otg_dev->base; ++ hcd->self.otg_port = 1; ++ ++ /* Initialize the DWC OTG HCD. */ ++ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_hcd->core_if = otg_dev->core_if; ++ otg_dev->hcd = dwc_otg_hcd; ++ ++ /* */ ++ spin_lock_init(&dwc_otg_hcd->lock); ++ ++ /* Register the HCD CIL Callbacks */ ++ dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if, ++ &hcd_cil_callbacks, hcd); ++ ++ /* Initialize the non-periodic schedule. */ ++ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive); ++ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_active); ++ ++ /* Initialize the periodic schedule. */ ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive); ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready); ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned); ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued); ++ ++ /* ++ * Create a host channel descriptor for each host channel implemented ++ * in the controller. Initialize the channel descriptor array. ++ */ ++ INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list); ++ num_channels = dwc_otg_hcd->core_if->core_params->host_channels; ++ memset(dwc_otg_hcd->hc_ptr_array, 0, sizeof(dwc_otg_hcd->hc_ptr_array)); ++ for (i = 0; i < num_channels; i++) { ++ channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNEL); ++ if (channel == NULL) { ++ retval = -ENOMEM; ++ DWC_ERROR("%s: host channel allocation failed\n", __func__); ++ goto error2; ++ } ++ memset(channel, 0, sizeof(dwc_hc_t)); ++ channel->hc_num = i; ++ dwc_otg_hcd->hc_ptr_array[i] = channel; ++#ifdef DEBUG ++ init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]); ++#endif ++ DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i, channel); ++ } ++ ++ /* Initialize the Connection timeout timer. */ ++ init_timer(&dwc_otg_hcd->conn_timer); ++ ++ /* Initialize reset tasklet. */ ++ reset_tasklet.data = (unsigned long) dwc_otg_hcd; ++ dwc_otg_hcd->reset_tasklet = &reset_tasklet; ++ ++ /* ++ * Finish generic HCD initialization and start the HCD. This function ++ * allocates the DMA buffer pool, registers the USB bus, requests the ++ * IRQ line, and calls dwc_otg_hcd_start method. ++ */ ++ retval = usb_add_hcd(hcd, otg_dev->irq, IRQF_SHARED); ++ if (retval < 0) { ++ goto error2; ++ } ++ ++ /* ++ * Allocate space for storing data on status transactions. Normally no ++ * data is sent, but this space acts as a bit bucket. This must be ++ * done after usb_add_hcd since that function allocates the DMA buffer ++ * pool. ++ */ ++ if (otg_dev->core_if->dma_enable) { ++ dwc_otg_hcd->status_buf = ++ dma_alloc_coherent(&pdev->dev, ++ DWC_OTG_HCD_STATUS_BUF_SIZE, ++ &dwc_otg_hcd->status_buf_dma, ++ GFP_KERNEL | GFP_DMA); ++ } else { ++ dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD_STATUS_BUF_SIZE, ++ GFP_KERNEL); ++ } ++ if (!dwc_otg_hcd->status_buf) { ++ retval = -ENOMEM; ++ DWC_ERROR("%s: status_buf allocation failed\n", __func__); ++ goto error3; ++ } ++ ++ dwc_otg_hcd->otg_dev = otg_dev; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Initialized HCD, usbbus=%d\n", ++ hcd->self.busnum); ++ return 0; ++ ++ /* Error conditions */ ++ error3: ++ usb_remove_hcd(hcd); ++ error2: ++ dwc_otg_hcd_free(hcd); ++ usb_put_hcd(hcd); ++ error1: ++ return retval; ++} ++ ++/** ++ * Removes the HCD. ++ * Frees memory and resources associated with the HCD and deregisters the bus. ++ */ ++void dwc_otg_hcd_remove(struct platform_device *pdev) ++{ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); ++ dwc_otg_hcd_t *dwc_otg_hcd; ++ struct usb_hcd *hcd; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n"); ++ ++ if (!otg_dev) { ++ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__); ++ return; ++ } ++ ++ dwc_otg_hcd = otg_dev->hcd; ++ ++ if (!dwc_otg_hcd) { ++ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__); ++ return; ++ } ++ ++ hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd); ++ ++ if (!hcd) { ++ DWC_DEBUGPL(DBG_ANY, "%s: dwc_otg_hcd_to_hcd(dwc_otg_hcd) NULL!\n", __func__); ++ return; ++ } ++ ++ /* Turn off all interrupts */ ++ dwc_write_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0); ++ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gahbcfg, 1, 0); ++ ++ usb_remove_hcd(hcd); ++ dwc_otg_hcd_free(hcd); ++ usb_put_hcd(hcd); ++} ++ ++/* ========================================================================= ++ * Linux HC Driver Functions ++ * ========================================================================= */ ++ ++/** ++ * Initializes dynamic portions of the DWC_otg HCD state. ++ */ ++static void hcd_reinit(dwc_otg_hcd_t *hcd) ++{ ++ struct list_head *item; ++ int num_channels; ++ int i; ++ dwc_hc_t *channel; ++ ++ hcd->flags.d32 = 0; ++ ++ hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active; ++ hcd->non_periodic_channels = 0; ++ hcd->periodic_channels = 0; ++ ++ /* ++ * Put all channels in the free channel list and clean up channel ++ * states. ++ */ ++ item = hcd->free_hc_list.next; ++ while (item != &hcd->free_hc_list) { ++ list_del(item); ++ item = hcd->free_hc_list.next; ++ } ++ num_channels = hcd->core_if->core_params->host_channels; ++ for (i = 0; i < num_channels; i++) { ++ channel = hcd->hc_ptr_array[i]; ++ list_add_tail(&channel->hc_list_entry, &hcd->free_hc_list); ++ dwc_otg_hc_cleanup(hcd->core_if, channel); ++ } ++ ++ /* Initialize the DWC core for host mode operation. */ ++ dwc_otg_core_host_init(hcd->core_if); ++} ++ ++/** Initializes the DWC_otg controller and its root hub and prepares it for host ++ * mode operation. Activates the root port. Returns 0 on success and a negative ++ * error code on failure. */ ++int dwc_otg_hcd_start(struct usb_hcd *hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ struct usb_bus *bus; ++ ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n"); ++ ++ bus = hcd_to_bus(hcd); ++ ++ /* Initialize the bus state. If the core is in Device Mode ++ * HALT the USB bus and return. */ ++ if (dwc_otg_is_device_mode(core_if)) { ++ hcd->state = HC_STATE_RUNNING; ++ return 0; ++ } ++ hcd->state = HC_STATE_RUNNING; ++ ++ /* Initialize and connect root hub if one is not already attached */ ++ if (bus->root_hub) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hub\n"); ++ /* Inform the HUB driver to resume. */ ++ usb_hcd_resume_root_hub(hcd); ++ } ++ else { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Does Not Have Root Hub\n"); ++ } ++ ++ hcd_reinit(dwc_otg_hcd); ++ ++ return 0; ++} ++ ++static void qh_list_free(dwc_otg_hcd_t *hcd, struct list_head *qh_list) ++{ ++ struct list_head *item; ++ dwc_otg_qh_t *qh; ++ ++ if (!qh_list->next) { ++ /* The list hasn't been initialized yet. */ ++ return; ++ } ++ ++ /* Ensure there are no QTDs or URBs left. */ ++ kill_urbs_in_qh_list(hcd, qh_list); ++ ++ for (item = qh_list->next; item != qh_list; item = qh_list->next) { ++ qh = list_entry(item, dwc_otg_qh_t, qh_list_entry); ++ dwc_otg_hcd_qh_remove_and_free(hcd, qh); ++ } ++} ++ ++/** ++ * Halts the DWC_otg host mode operations in a clean manner. USB transfers are ++ * stopped. ++ */ ++void dwc_otg_hcd_stop(struct usb_hcd *hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ hprt0_data_t hprt0 = { .d32=0 }; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n"); ++ ++ /* Turn off all host-specific interrupts. */ ++ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if); ++ ++ /* ++ * The root hub should be disconnected before this function is called. ++ * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue) ++ * and the QH lists (via ..._hcd_endpoint_disable). ++ */ ++ ++ /* Turn off the vbus power */ ++ DWC_PRINT("PortPower off\n"); ++ hprt0.b.prtpwr = 0; ++ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32); ++} ++ ++/** Returns the current frame number. */ ++int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ hfnum_data_t hfnum; ++ ++ hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if-> ++ host_if->host_global_regs->hfnum); ++ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n", hfnum.b.frnum); ++#endif ++ return hfnum.b.frnum; ++} ++ ++/** ++ * Frees secondary storage associated with the dwc_otg_hcd structure contained ++ * in the struct usb_hcd field. ++ */ ++void dwc_otg_hcd_free(struct usb_hcd *hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ int i; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n"); ++ ++ del_timers(dwc_otg_hcd); ++ ++ /* Free memory for QH/QTD lists */ ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_inactive); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued); ++ ++ /* Free memory for the host channels. */ ++ for (i = 0; i < MAX_EPS_CHANNELS; i++) { ++ dwc_hc_t *hc = dwc_otg_hcd->hc_ptr_array[i]; ++ if (hc != NULL) { ++ DWC_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n", i, hc); ++ kfree(hc); ++ } ++ } ++ ++ if (dwc_otg_hcd->core_if->dma_enable) { ++ if (dwc_otg_hcd->status_buf_dma) { ++ dma_free_coherent(hcd->self.controller, ++ DWC_OTG_HCD_STATUS_BUF_SIZE, ++ dwc_otg_hcd->status_buf, ++ dwc_otg_hcd->status_buf_dma); ++ } ++ } else if (dwc_otg_hcd->status_buf != NULL) { ++ kfree(dwc_otg_hcd->status_buf); ++ } ++} ++ ++#ifdef DEBUG ++static void dump_urb_info(struct urb *urb, char* fn_name) ++{ ++ DWC_PRINT("%s, urb %p\n", fn_name, urb); ++ DWC_PRINT(" Device address: %d\n", usb_pipedevice(urb->pipe)); ++ DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe), ++ (usb_pipein(urb->pipe) ? "IN" : "OUT")); ++ DWC_PRINT(" Endpoint type: %s\n", ++ ({char *pipetype; ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: pipetype = "CONTROL"; break; ++ case PIPE_BULK: pipetype = "BULK"; break; ++ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break; ++ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; ++ default: pipetype = "UNKNOWN"; break; ++ }; pipetype;})); ++ DWC_PRINT(" Speed: %s\n", ++ ({char *speed; ++ switch (urb->dev->speed) { ++ case USB_SPEED_HIGH: speed = "HIGH"; break; ++ case USB_SPEED_FULL: speed = "FULL"; break; ++ case USB_SPEED_LOW: speed = "LOW"; break; ++ default: speed = "UNKNOWN"; break; ++ }; speed;})); ++ DWC_PRINT(" Max packet size: %d\n", ++ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); ++ DWC_PRINT(" Data buffer length: %d\n", urb->transfer_buffer_length); ++ DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n", ++ urb->transfer_buffer, (void *)urb->transfer_dma); ++ DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n", ++ urb->setup_packet, (void *)urb->setup_dma); ++ DWC_PRINT(" Interval: %d\n", urb->interval); ++ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { ++ int i; ++ for (i = 0; i < urb->number_of_packets; i++) { ++ DWC_PRINT(" ISO Desc %d:\n", i); ++ DWC_PRINT(" offset: %d, length %d\n", ++ urb->iso_frame_desc[i].offset, ++ urb->iso_frame_desc[i].length); ++ } ++ } ++} ++ ++static void dump_channel_info(dwc_otg_hcd_t *hcd, ++ dwc_otg_qh_t *qh) ++{ ++ if (qh->channel != NULL) { ++ dwc_hc_t *hc = qh->channel; ++ struct list_head *item; ++ dwc_otg_qh_t *qh_item; ++ int num_channels = hcd->core_if->core_params->host_channels; ++ int i; ++ ++ dwc_otg_hc_regs_t *hc_regs; ++ hcchar_data_t hcchar; ++ hcsplt_data_t hcsplt; ++ hctsiz_data_t hctsiz; ++ uint32_t hcdma; ++ ++ hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ hcdma = dwc_read_reg32(&hc_regs->hcdma); ++ ++ DWC_PRINT(" Assigned to channel %p:\n", hc); ++ DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32); ++ DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma); ++ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n", ++ hc->dev_addr, hc->ep_num, hc->ep_is_in); ++ DWC_PRINT(" ep_type: %d\n", hc->ep_type); ++ DWC_PRINT(" max_packet: %d\n", hc->max_packet); ++ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start); ++ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started); ++ DWC_PRINT(" halt_status: %d\n", hc->halt_status); ++ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff); ++ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len); ++ DWC_PRINT(" qh: %p\n", hc->qh); ++ DWC_PRINT(" NP inactive sched:\n"); ++ list_for_each(item, &hcd->non_periodic_sched_inactive) { ++ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry); ++ DWC_PRINT(" %p\n", qh_item); ++ } ++ DWC_PRINT(" NP active sched:\n"); ++ list_for_each(item, &hcd->non_periodic_sched_active) { ++ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry); ++ DWC_PRINT(" %p\n", qh_item); ++ } ++ DWC_PRINT(" Channels: \n"); ++ for (i = 0; i < num_channels; i++) { ++ dwc_hc_t *hc = hcd->hc_ptr_array[i]; ++ DWC_PRINT(" %2d: %p\n", i, hc); ++ } ++ } ++} ++#endif ++ ++ ++//OTG host require the DMA addr is DWORD-aligned, ++//patch it if the buffer is not DWORD-aligned ++inline ++void hcd_check_and_patch_dma_addr(struct urb *urb){ ++ ++ if((!urb->transfer_buffer)||!urb->transfer_dma||urb->transfer_dma==0xffffffff) ++ return; ++ ++ if(((u32)urb->transfer_buffer)& 0x3){ ++ /* ++ printk("%s: " ++ "urb(%.8x) " ++ "transfer_buffer=%.8x, " ++ "transfer_dma=%.8x, " ++ "transfer_buffer_length=%d, " ++ "actual_length=%d(%x), " ++ "\n", ++ ((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_OUT)?"OUT":"IN", ++ urb, ++ urb->transfer_buffer, ++ urb->transfer_dma, ++ urb->transfer_buffer_length, ++ urb->actual_length,urb->actual_length ++ ); ++ */ ++ if(!urb->aligned_transfer_buffer||urb->aligned_transfer_buffer_lengthtransfer_buffer_length){ ++ urb->aligned_transfer_buffer_length=urb->transfer_buffer_length; ++ if(urb->aligned_transfer_buffer) { ++ kfree(urb->aligned_transfer_buffer); ++ } ++ urb->aligned_transfer_buffer=kmalloc(urb->aligned_transfer_buffer_length,GFP_KERNEL|GFP_DMA|GFP_ATOMIC); ++ urb->aligned_transfer_dma=dma_map_single(NULL,(void *)(urb->aligned_transfer_buffer),(urb->aligned_transfer_buffer_length),DMA_FROM_DEVICE); ++ if(!urb->aligned_transfer_buffer){ ++ DWC_ERROR("Cannot alloc required buffer!!\n"); ++ BUG(); ++ } ++ //printk(" new allocated aligned_buf=%.8x aligned_buf_len=%d\n", (u32)urb->aligned_transfer_buffer, urb->aligned_transfer_buffer_length); ++ } ++ urb->transfer_dma=urb->aligned_transfer_dma; ++ if((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_OUT) { ++ memcpy(urb->aligned_transfer_buffer,urb->transfer_buffer,urb->transfer_buffer_length); ++ dma_sync_single_for_device(NULL,urb->transfer_dma,urb->transfer_buffer_length,DMA_TO_DEVICE); ++ } ++ } ++} ++ ++ ++ ++/** Starts processing a USB transfer request specified by a USB Request Block ++ * (URB). mem_flags indicates the type of memory allocation to use while ++ * processing this URB. */ ++int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd, ++// struct usb_host_endpoint *ep, ++ struct urb *urb, ++ gfp_t mem_flags ++ ) ++{ ++ int retval = 0; ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_qtd_t *qtd; ++ ++#ifdef DEBUG ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ dump_urb_info(urb, "dwc_otg_hcd_urb_enqueue"); ++ } ++#endif ++ if (!dwc_otg_hcd->flags.b.port_connect_status) { ++ /* No longer connected. */ ++ return -ENODEV; ++ } ++ ++ hcd_check_and_patch_dma_addr(urb); ++ qtd = dwc_otg_hcd_qtd_create(urb); ++ if (qtd == NULL) { ++ DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n"); ++ return -ENOMEM; ++ } ++ ++ retval = dwc_otg_hcd_qtd_add(qtd, dwc_otg_hcd); ++ if (retval < 0) { ++ DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. " ++ "Error status %d\n", retval); ++ dwc_otg_hcd_qtd_free(qtd); ++ } ++ ++ return retval; ++} ++ ++/** Aborts/cancels a USB transfer request. Always returns 0 to indicate ++ * success. */ ++int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd, ++ struct urb *urb, int status) ++{ ++ unsigned long flags; ++ dwc_otg_hcd_t *dwc_otg_hcd; ++ dwc_otg_qtd_t *urb_qtd; ++ dwc_otg_qh_t *qh; ++ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n"); ++ ++ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ ++ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); ++ ++ urb_qtd = (dwc_otg_qtd_t *)urb->hcpriv; ++ qh = (dwc_otg_qh_t *)ep->hcpriv; ++ ++#ifdef DEBUG ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ dump_urb_info(urb, "dwc_otg_hcd_urb_dequeue"); ++ if (urb_qtd == qh->qtd_in_process) { ++ dump_channel_info(dwc_otg_hcd, qh); ++ } ++ } ++#endif ++ ++ if (urb_qtd == qh->qtd_in_process) { ++ /* The QTD is in process (it has been assigned to a channel). */ ++ ++ if (dwc_otg_hcd->flags.b.port_connect_status) { ++ /* ++ * If still connected (i.e. in host mode), halt the ++ * channel so it can be used for other transfers. If ++ * no longer connected, the host registers can't be ++ * written to halt the channel since the core is in ++ * device mode. ++ */ ++ dwc_otg_hc_halt(dwc_otg_hcd->core_if, qh->channel, ++ DWC_OTG_HC_XFER_URB_DEQUEUE); ++ } ++ } ++ ++ /* ++ * Free the QTD and clean up the associated QH. Leave the QH in the ++ * schedule if it has any remaining QTDs. ++ */ ++ dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd, urb_qtd); ++ if (urb_qtd == qh->qtd_in_process) { ++ dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0); ++ qh->channel = NULL; ++ qh->qtd_in_process = NULL; ++ } else if (list_empty(&qh->qtd_list)) { ++ dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh); ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); ++ ++ urb->hcpriv = NULL; ++ ++ /* Higher layer software sets URB status. */ ++ usb_hcd_giveback_urb(hcd, urb, status); ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ DWC_PRINT("Called usb_hcd_giveback_urb()\n"); ++ DWC_PRINT(" urb->status = %d\n", urb->status); ++ } ++ ++ return 0; ++} ++ ++/** Frees resources in the DWC_otg controller related to a given endpoint. Also ++ * clears state in the HCD related to the endpoint. Any URBs for the endpoint ++ * must already be dequeued. */ ++void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd, ++ struct usb_host_endpoint *ep) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_qh_t *qh; ++ ++ unsigned long flags; ++ int retry = 0; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, " ++ "endpoint=%d\n", ep->desc.bEndpointAddress, ++ dwc_ep_addr_to_endpoint(ep->desc.bEndpointAddress)); ++ ++rescan: ++ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); ++ qh = (dwc_otg_qh_t *)(ep->hcpriv); ++ if (!qh) ++ goto done; ++ ++ /** Check that the QTD list is really empty */ ++ if (!list_empty(&qh->qtd_list)) { ++ if (retry++ < 250) { ++ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); ++ schedule_timeout_uninterruptible(1); ++ goto rescan; ++ } ++ ++ DWC_WARN("DWC OTG HCD EP DISABLE:" ++ " QTD List for this endpoint is not empty\n"); ++ } ++ ++ dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh); ++ ep->hcpriv = NULL; ++done: ++ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); ++ ++} ++ ++/** Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if ++ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid ++ * interrupt. ++ * ++ * This function is called by the USB core when an interrupt occurs */ ++irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd) ++{ ++ int retVal = 0; ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ retVal = dwc_otg_hcd_handle_intr(dwc_otg_hcd); ++ if (dwc_otg_hcd->flags.b.port_connect_status_change == 1) ++ usb_hcd_poll_rh_status(hcd); ++ return IRQ_RETVAL(retVal); ++} ++ ++/** Creates Status Change bitmap for the root hub and root port. The bitmap is ++ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1 ++ * is the status change indicator for the single root port. Returns 1 if either ++ * change indicator is 1, otherwise returns 0. */ ++int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, char *buf) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ ++ buf[0] = 0; ++ buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_change || ++ dwc_otg_hcd->flags.b.port_reset_change || ++ dwc_otg_hcd->flags.b.port_enable_change || ++ dwc_otg_hcd->flags.b.port_suspend_change || ++ dwc_otg_hcd->flags.b.port_over_current_change) << 1; ++ ++#ifdef DEBUG ++ if (buf[0]) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:" ++ " Root port status changed\n"); ++ DWC_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n", ++ dwc_otg_hcd->flags.b.port_connect_status_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n", ++ dwc_otg_hcd->flags.b.port_reset_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n", ++ dwc_otg_hcd->flags.b.port_enable_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n", ++ dwc_otg_hcd->flags.b.port_suspend_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n", ++ dwc_otg_hcd->flags.b.port_over_current_change); ++ } ++#endif ++ return (buf[0] != 0); ++} ++ ++#ifdef DWC_HS_ELECT_TST ++/* ++ * Quick and dirty hack to implement the HS Electrical Test ++ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature. ++ * ++ * This code was copied from our userspace app "hset". It sends a ++ * Get Device Descriptor control sequence in two parts, first the ++ * Setup packet by itself, followed some time later by the In and ++ * Ack packets. Rather than trying to figure out how to add this ++ * functionality to the normal driver code, we just hijack the ++ * hardware, using these two function to drive the hardware ++ * directly. ++ */ ++ ++dwc_otg_core_global_regs_t *global_regs; ++dwc_otg_host_global_regs_t *hc_global_regs; ++dwc_otg_hc_regs_t *hc_regs; ++uint32_t *data_fifo; ++ ++static void do_setup(void) ++{ ++ gintsts_data_t gintsts; ++ hctsiz_data_t hctsiz; ++ hcchar_data_t hcchar; ++ haint_data_t haint; ++ hcint_data_t hcint; ++ ++ /* Enable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); ++ ++ /* Enable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* ++ * Send Setup packet (Get Device Descriptor) ++ */ ++ ++ /* Make sure channel is disabled */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ //fprintf(stderr, "Channel already enabled 1, HCCHAR = %08x\n", hcchar.d32); ++ hcchar.b.chdis = 1; ++// hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ //sleep(1); ++ mdelay(1000); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //if (hcchar.b.chen) { ++ // fprintf(stderr, "** Channel _still_ enabled 1, HCCHAR = %08x **\n", hcchar.d32); ++ //} ++ } ++ ++ /* Set HCTSIZ */ ++ hctsiz.d32 = 0; ++ hctsiz.b.xfersize = 8; ++ hctsiz.b.pktcnt = 1; ++ hctsiz.b.pid = DWC_OTG_HC_PID_SETUP; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ /* Set HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; ++ hcchar.b.epdir = 0; ++ hcchar.b.epnum = 0; ++ hcchar.b.mps = 8; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ /* Fill FIFO with Setup data for Get Device Descriptor */ ++ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); ++ dwc_write_reg32(data_fifo++, 0x01000680); ++ dwc_write_reg32(data_fifo++, 0x00080000); ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for host channel interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.hcintr == 0); ++ ++ //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Disable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); ++ ++ /* Disable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++} ++ ++static void do_in_ack(void) ++{ ++ gintsts_data_t gintsts; ++ hctsiz_data_t hctsiz; ++ hcchar_data_t hcchar; ++ haint_data_t haint; ++ hcint_data_t hcint; ++ host_grxsts_data_t grxsts; ++ ++ /* Enable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); ++ ++ /* Enable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* ++ * Receive Control In packet ++ */ ++ ++ /* Make sure channel is disabled */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ //fprintf(stderr, "Channel already enabled 2, HCCHAR = %08x\n", hcchar.d32); ++ hcchar.b.chdis = 1; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ //sleep(1); ++ mdelay(1000); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //if (hcchar.b.chen) { ++ // fprintf(stderr, "** Channel _still_ enabled 2, HCCHAR = %08x **\n", hcchar.d32); ++ //} ++ } ++ ++ /* Set HCTSIZ */ ++ hctsiz.d32 = 0; ++ hctsiz.b.xfersize = 8; ++ hctsiz.b.pktcnt = 1; ++ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ /* Set HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; ++ hcchar.b.epdir = 1; ++ hcchar.b.epnum = 0; ++ hcchar.b.mps = 8; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for receive status queue interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.rxstsqlvl == 0); ++ ++ //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Read RXSTS */ ++ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); ++ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); ++ ++ /* Clear RXSTSQLVL in GINTSTS */ ++ gintsts.d32 = 0; ++ gintsts.b.rxstsqlvl = 1; ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ switch (grxsts.b.pktsts) { ++ case DWC_GRXSTS_PKTSTS_IN: ++ /* Read the data into the host buffer */ ++ if (grxsts.b.bcnt > 0) { ++ int i; ++ int word_count = (grxsts.b.bcnt + 3) / 4; ++ ++ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); ++ ++ for (i = 0; i < word_count; i++) { ++ (void)dwc_read_reg32(data_fifo++); ++ } ++ } ++ ++ //fprintf(stderr, "Received %u bytes\n", (unsigned)grxsts.b.bcnt); ++ break; ++ ++ default: ++ //fprintf(stderr, "** Unexpected GRXSTS packet status 1 **\n"); ++ break; ++ } ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for receive status queue interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.rxstsqlvl == 0); ++ ++ //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Read RXSTS */ ++ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); ++ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); ++ ++ /* Clear RXSTSQLVL in GINTSTS */ ++ gintsts.d32 = 0; ++ gintsts.b.rxstsqlvl = 1; ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ switch (grxsts.b.pktsts) { ++ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: ++ break; ++ ++ default: ++ //fprintf(stderr, "** Unexpected GRXSTS packet status 2 **\n"); ++ break; ++ } ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for host channel interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.hcintr == 0); ++ ++ //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++// usleep(100000); ++// mdelay(100); ++ mdelay(1); ++ ++ /* ++ * Send handshake packet ++ */ ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Make sure channel is disabled */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ //fprintf(stderr, "Channel already enabled 3, HCCHAR = %08x\n", hcchar.d32); ++ hcchar.b.chdis = 1; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ //sleep(1); ++ mdelay(1000); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //if (hcchar.b.chen) { ++ // fprintf(stderr, "** Channel _still_ enabled 3, HCCHAR = %08x **\n", hcchar.d32); ++ //} ++ } ++ ++ /* Set HCTSIZ */ ++ hctsiz.d32 = 0; ++ hctsiz.b.xfersize = 0; ++ hctsiz.b.pktcnt = 1; ++ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ /* Set HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; ++ hcchar.b.epdir = 0; ++ hcchar.b.epnum = 0; ++ hcchar.b.mps = 8; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for host channel interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.hcintr == 0); ++ ++ //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Disable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); ++ ++ /* Disable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++} ++#endif /* DWC_HS_ELECT_TST */ ++ ++/** Handles hub class-specific requests. */ ++int dwc_otg_hcd_hub_control(struct usb_hcd *hcd, ++ u16 typeReq, ++ u16 wValue, ++ u16 wIndex, ++ char *buf, ++ u16 wLength) ++{ ++ int retval = 0; ++ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_core_if_t *core_if = hcd_to_dwc_otg_hcd(hcd)->core_if; ++ struct usb_hub_descriptor *desc; ++ hprt0_data_t hprt0 = {.d32 = 0}; ++ ++ uint32_t port_status; ++ ++ switch (typeReq) { ++ case ClearHubFeature: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearHubFeature 0x%x\n", wValue); ++ switch (wValue) { ++ case C_HUB_LOCAL_POWER: ++ case C_HUB_OVER_CURRENT: ++ /* Nothing required here */ ++ break; ++ default: ++ retval = -EINVAL; ++ DWC_ERROR("DWC OTG HCD - " ++ "ClearHubFeature request %xh unknown\n", wValue); ++ } ++ break; ++ case ClearPortFeature: ++ if (!wIndex || wIndex > 1) ++ goto error; ++ ++ switch (wValue) { ++ case USB_PORT_FEAT_ENABLE: ++ DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_ENABLE\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtena = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_SUSPEND: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtres = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ /* Clear Resume bit */ ++ mdelay(100); ++ hprt0.b.prtres = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_POWER: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_POWER\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtpwr = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_INDICATOR: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_INDICATOR\n"); ++ /* Port inidicator not supported */ ++ break; ++ case USB_PORT_FEAT_C_CONNECTION: ++ /* Clears drivers internal connect status change ++ * flag */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n"); ++ dwc_otg_hcd->flags.b.port_connect_status_change = 0; ++ break; ++ case USB_PORT_FEAT_C_RESET: ++ /* Clears the driver's internal Port Reset Change ++ * flag */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_RESET\n"); ++ dwc_otg_hcd->flags.b.port_reset_change = 0; ++ break; ++ case USB_PORT_FEAT_C_ENABLE: ++ /* Clears the driver's internal Port ++ * Enable/Disable Change flag */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n"); ++ dwc_otg_hcd->flags.b.port_enable_change = 0; ++ break; ++ case USB_PORT_FEAT_C_SUSPEND: ++ /* Clears the driver's internal Port Suspend ++ * Change flag, which is set when resume signaling on ++ * the host port is complete */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n"); ++ dwc_otg_hcd->flags.b.port_suspend_change = 0; ++ break; ++ case USB_PORT_FEAT_C_OVER_CURRENT: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n"); ++ dwc_otg_hcd->flags.b.port_over_current_change = 0; ++ break; ++ default: ++ retval = -EINVAL; ++ DWC_ERROR("DWC OTG HCD - " ++ "ClearPortFeature request %xh " ++ "unknown or unsupported\n", wValue); ++ } ++ break; ++ case GetHubDescriptor: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "GetHubDescriptor\n"); ++ desc = (struct usb_hub_descriptor *)buf; ++ desc->bDescLength = 9; ++ desc->bDescriptorType = 0x29; ++ desc->bNbrPorts = 1; ++ desc->wHubCharacteristics = 0x08; ++ desc->bPwrOn2PwrGood = 1; ++ desc->bHubContrCurrent = 0; ++ desc->u.hs.DeviceRemovable[0] = 0; ++ desc->u.hs.DeviceRemovable[1] = 0xff; ++ break; ++ case GetHubStatus: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "GetHubStatus\n"); ++ memset(buf, 0, 4); ++ break; ++ case GetPortStatus: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "GetPortStatus\n"); ++ ++ if (!wIndex || wIndex > 1) ++ goto error; ++ ++ port_status = 0; ++ ++ if (dwc_otg_hcd->flags.b.port_connect_status_change) ++ port_status |= (1 << USB_PORT_FEAT_C_CONNECTION); ++ ++ if (dwc_otg_hcd->flags.b.port_enable_change) ++ port_status |= (1 << USB_PORT_FEAT_C_ENABLE); ++ ++ if (dwc_otg_hcd->flags.b.port_suspend_change) ++ port_status |= (1 << USB_PORT_FEAT_C_SUSPEND); ++ ++ if (dwc_otg_hcd->flags.b.port_reset_change) ++ port_status |= (1 << USB_PORT_FEAT_C_RESET); ++ ++ if (dwc_otg_hcd->flags.b.port_over_current_change) { ++ DWC_ERROR("Device Not Supported\n"); ++ port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT); ++ } ++ ++ if (!dwc_otg_hcd->flags.b.port_connect_status) { ++ /* ++ * The port is disconnected, which means the core is ++ * either in device mode or it soon will be. Just ++ * return 0's for the remainder of the port status ++ * since the port register can't be read if the core ++ * is in device mode. ++ */ ++ *((__le32 *) buf) = cpu_to_le32(port_status); ++ break; ++ } ++ ++ hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0); ++ DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32); ++ ++ if (hprt0.b.prtconnsts) ++ port_status |= (1 << USB_PORT_FEAT_CONNECTION); ++ ++ if (hprt0.b.prtena) ++ port_status |= (1 << USB_PORT_FEAT_ENABLE); ++ ++ if (hprt0.b.prtsusp) ++ port_status |= (1 << USB_PORT_FEAT_SUSPEND); ++ ++ if (hprt0.b.prtovrcurract) ++ port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT); ++ ++ if (hprt0.b.prtrst) ++ port_status |= (1 << USB_PORT_FEAT_RESET); ++ ++ if (hprt0.b.prtpwr) ++ port_status |= (1 << USB_PORT_FEAT_POWER); ++ ++ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) ++ port_status |= (USB_PORT_STAT_HIGH_SPEED); ++ else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) ++ port_status |= (USB_PORT_STAT_LOW_SPEED); ++ ++ if (hprt0.b.prttstctl) ++ port_status |= (1 << USB_PORT_FEAT_TEST); ++ ++ /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ ++ ++ *((__le32 *) buf) = cpu_to_le32(port_status); ++ ++ break; ++ case SetHubFeature: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetHubFeature\n"); ++ /* No HUB features supported */ ++ break; ++ case SetPortFeature: ++ if (wValue != USB_PORT_FEAT_TEST && (!wIndex || wIndex > 1)) ++ goto error; ++ ++ if (!dwc_otg_hcd->flags.b.port_connect_status) { ++ /* ++ * The port is disconnected, which means the core is ++ * either in device mode or it soon will be. Just ++ * return without doing anything since the port ++ * register can't be written if the core is in device ++ * mode. ++ */ ++ break; ++ } ++ ++ switch (wValue) { ++ case USB_PORT_FEAT_SUSPEND: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_SUSPEND\n"); ++ if (hcd->self.otg_port == wIndex && ++ hcd->self.b_hnp_enable) { ++ gotgctl_data_t gotgctl = {.d32=0}; ++ gotgctl.b.hstsethnpen = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gotgctl, ++ 0, gotgctl.d32); ++ core_if->op_state = A_SUSPEND; ++ } ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtsusp = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ //DWC_PRINT("SUSPEND: HPRT0=%0x\n", hprt0.d32); ++ /* Suspend the Phy Clock */ ++ { ++ pcgcctl_data_t pcgcctl = {.d32=0}; ++ pcgcctl.b.stoppclk = 1; ++ dwc_write_reg32(core_if->pcgcctl, pcgcctl.d32); ++ } ++ ++ /* For HNP the bus must be suspended for at least 200ms. */ ++ if (hcd->self.b_hnp_enable) { ++ mdelay(200); ++ //DWC_PRINT("SUSPEND: wait complete! (%d)\n", _hcd->state); ++ } ++ break; ++ case USB_PORT_FEAT_POWER: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_POWER\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtpwr = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_RESET: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_RESET\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ /* When B-Host the Port reset bit is set in ++ * the Start HCD Callback function, so that ++ * the reset is started within 1ms of the HNP ++ * success interrupt. */ ++ if (!hcd->self.is_b_host) { ++ hprt0.b.prtrst = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ } ++ /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */ ++ MDELAY(60); ++ hprt0.b.prtrst = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ ++#ifdef DWC_HS_ELECT_TST ++ case USB_PORT_FEAT_TEST: ++ { ++ uint32_t t; ++ gintmsk_data_t gintmsk; ++ ++ t = (wIndex >> 8); /* MSB wIndex USB */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_TEST %d\n", t); ++ warn("USB_PORT_FEAT_TEST %d\n", t); ++ if (t < 6) { ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prttstctl = t; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ } else { ++ /* Setup global vars with reg addresses (quick and ++ * dirty hack, should be cleaned up) ++ */ ++ global_regs = core_if->core_global_regs; ++ hc_global_regs = core_if->host_if->host_global_regs; ++ hc_regs = (dwc_otg_hc_regs_t *)((char *)global_regs + 0x500); ++ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); ++ ++ if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */ ++ /* Save current interrupt mask */ ++ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); ++ ++ /* Disable all interrupts while we muck with ++ * the hardware directly ++ */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* 15 second delay per the test spec */ ++ mdelay(15000); ++ ++ /* Drive suspend on the root port */ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtsusp = 1; ++ hprt0.b.prtres = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ++ /* 15 second delay per the test spec */ ++ mdelay(15000); ++ ++ /* Drive resume on the root port */ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtsusp = 0; ++ hprt0.b.prtres = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ mdelay(100); ++ ++ /* Clear the resume bit */ ++ hprt0.b.prtres = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ++ /* Restore interrupts */ ++ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); ++ } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */ ++ /* Save current interrupt mask */ ++ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); ++ ++ /* Disable all interrupts while we muck with ++ * the hardware directly ++ */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* 15 second delay per the test spec */ ++ mdelay(15000); ++ ++ /* Send the Setup packet */ ++ do_setup(); ++ ++ /* 15 second delay so nothing else happens for awhile */ ++ mdelay(15000); ++ ++ /* Restore interrupts */ ++ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); ++ } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */ ++ /* Save current interrupt mask */ ++ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); ++ ++ /* Disable all interrupts while we muck with ++ * the hardware directly ++ */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* Send the Setup packet */ ++ do_setup(); ++ ++ /* 15 second delay so nothing else happens for awhile */ ++ mdelay(15000); ++ ++ /* Send the In and Ack packets */ ++ do_in_ack(); ++ ++ /* 15 second delay so nothing else happens for awhile */ ++ mdelay(15000); ++ ++ /* Restore interrupts */ ++ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); ++ } ++ } ++ break; ++ } ++#endif /* DWC_HS_ELECT_TST */ ++ ++ case USB_PORT_FEAT_INDICATOR: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_INDICATOR\n"); ++ /* Not supported */ ++ break; ++ default: ++ retval = -EINVAL; ++ DWC_ERROR("DWC OTG HCD - " ++ "SetPortFeature request %xh " ++ "unknown or unsupported\n", wValue); ++ break; ++ } ++ break; ++ default: ++ error: ++ retval = -EINVAL; ++ DWC_WARN("DWC OTG HCD - " ++ "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n", ++ typeReq, wIndex, wValue); ++ break; ++ } ++ ++ return retval; ++} ++ ++/** ++ * Assigns transactions from a QTD to a free host channel and initializes the ++ * host channel to perform the transactions. The host channel is removed from ++ * the free list. ++ * ++ * @param hcd The HCD state structure. ++ * @param qh Transactions from the first QTD for this QH are selected and ++ * assigned to a free host channel. ++ */ ++static void assign_and_init_hc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ dwc_hc_t *hc; ++ dwc_otg_qtd_t *qtd; ++ struct urb *urb; ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, hcd, qh); ++ ++ hc = list_entry(hcd->free_hc_list.next, dwc_hc_t, hc_list_entry); ++ ++ /* Remove the host channel from the free list. */ ++ list_del_init(&hc->hc_list_entry); ++ ++ qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); ++ urb = qtd->urb; ++ qh->channel = hc; ++ qh->qtd_in_process = qtd; ++ ++ /* ++ * Use usb_pipedevice to determine device address. This address is ++ * 0 before the SET_ADDRESS command and the correct address afterward. ++ */ ++ hc->dev_addr = usb_pipedevice(urb->pipe); ++ hc->ep_num = usb_pipeendpoint(urb->pipe); ++ ++ if (urb->dev->speed == USB_SPEED_LOW) { ++ hc->speed = DWC_OTG_EP_SPEED_LOW; ++ } else if (urb->dev->speed == USB_SPEED_FULL) { ++ hc->speed = DWC_OTG_EP_SPEED_FULL; ++ } else { ++ hc->speed = DWC_OTG_EP_SPEED_HIGH; ++ } ++ ++ hc->max_packet = dwc_max_packet(qh->maxp); ++ ++ hc->xfer_started = 0; ++ hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS; ++ hc->error_state = (qtd->error_count > 0); ++ hc->halt_on_queue = 0; ++ hc->halt_pending = 0; ++ hc->requests = 0; ++ ++ /* ++ * The following values may be modified in the transfer type section ++ * below. The xfer_len value may be reduced when the transfer is ++ * started to accommodate the max widths of the XferSize and PktCnt ++ * fields in the HCTSIZn register. ++ */ ++ hc->do_ping = qh->ping_state; ++ hc->ep_is_in = (usb_pipein(urb->pipe) != 0); ++ hc->data_pid_start = qh->data_toggle; ++ hc->multi_count = 1; ++ ++ if (hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)urb->transfer_dma + urb->actual_length; ++ } else { ++ hc->xfer_buff = (uint8_t *)urb->transfer_buffer + urb->actual_length; ++ } ++ hc->xfer_len = urb->transfer_buffer_length - urb->actual_length; ++ hc->xfer_count = 0; ++ ++ /* ++ * Set the split attributes ++ */ ++ hc->do_split = 0; ++ if (qh->do_split) { ++ hc->do_split = 1; ++ hc->xact_pos = qtd->isoc_split_pos; ++ hc->complete_split = qtd->complete_split; ++ hc->hub_addr = urb->dev->tt->hub->devnum; ++ hc->port_addr = urb->dev->ttport; ++ } ++ ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: ++ hc->ep_type = DWC_OTG_EP_TYPE_CONTROL; ++ switch (qtd->control_phase) { ++ case DWC_OTG_CONTROL_SETUP: ++ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction\n"); ++ hc->do_ping = 0; ++ hc->ep_is_in = 0; ++ hc->data_pid_start = DWC_OTG_HC_PID_SETUP; ++ if (hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)urb->setup_dma; ++ } else { ++ hc->xfer_buff = (uint8_t *)urb->setup_packet; ++ } ++ hc->xfer_len = 8; ++ break; ++ case DWC_OTG_CONTROL_DATA: ++ DWC_DEBUGPL(DBG_HCDV, " Control data transaction\n"); ++ hc->data_pid_start = qtd->data_toggle; ++ break; ++ case DWC_OTG_CONTROL_STATUS: ++ /* ++ * Direction is opposite of data direction or IN if no ++ * data. ++ */ ++ DWC_DEBUGPL(DBG_HCDV, " Control status transaction\n"); ++ if (urb->transfer_buffer_length == 0) { ++ hc->ep_is_in = 1; ++ } else { ++ hc->ep_is_in = (usb_pipein(urb->pipe) != USB_DIR_IN); ++ } ++ if (hc->ep_is_in) { ++ hc->do_ping = 0; ++ } ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA1; ++ hc->xfer_len = 0; ++ if (hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)hcd->status_buf_dma; ++ } else { ++ hc->xfer_buff = (uint8_t *)hcd->status_buf; ++ } ++ break; ++ } ++ break; ++ case PIPE_BULK: ++ hc->ep_type = DWC_OTG_EP_TYPE_BULK; ++ break; ++ case PIPE_INTERRUPT: ++ hc->ep_type = DWC_OTG_EP_TYPE_INTR; ++ break; ++ case PIPE_ISOCHRONOUS: ++ { ++ struct usb_iso_packet_descriptor *frame_desc; ++ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index]; ++ hc->ep_type = DWC_OTG_EP_TYPE_ISOC; ++ if (hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)urb->transfer_dma; ++ } else { ++ hc->xfer_buff = (uint8_t *)urb->transfer_buffer; ++ } ++ hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset; ++ hc->xfer_len = frame_desc->length - qtd->isoc_split_offset; ++ ++ if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) { ++ if (hc->xfer_len <= 188) { ++ hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL; ++ } ++ else { ++ hc->xact_pos = DWC_HCSPLIT_XACTPOS_BEGIN; ++ } ++ } ++ } ++ break; ++ } ++ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * This value may be modified when the transfer is started to ++ * reflect the actual transfer length. ++ */ ++ hc->multi_count = dwc_hb_mult(qh->maxp); ++ } ++ ++ dwc_otg_hc_init(hcd->core_if, hc); ++ hc->qh = qh; ++} ++ ++/** ++ * This function selects transactions from the HCD transfer schedule and ++ * assigns them to available host channels. It is called from HCD interrupt ++ * handler functions. ++ * ++ * @param hcd The HCD state structure. ++ * ++ * @return The types of new transactions that were assigned to host channels. ++ */ ++dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd) ++{ ++ struct list_head *qh_ptr; ++ dwc_otg_qh_t *qh; ++ int num_channels; ++ dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE; ++ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCD, " Select Transactions\n"); ++#endif ++ ++ /* Process entries in the periodic ready list. */ ++ qh_ptr = hcd->periodic_sched_ready.next; ++ while (qh_ptr != &hcd->periodic_sched_ready && ++ !list_empty(&hcd->free_hc_list)) { ++ ++ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ assign_and_init_hc(hcd, qh); ++ ++ /* ++ * Move the QH from the periodic ready schedule to the ++ * periodic assigned schedule. ++ */ ++ qh_ptr = qh_ptr->next; ++ list_move(&qh->qh_list_entry, &hcd->periodic_sched_assigned); ++ ++ ret_val = DWC_OTG_TRANSACTION_PERIODIC; ++ } ++ ++ /* ++ * Process entries in the inactive portion of the non-periodic ++ * schedule. Some free host channels may not be used if they are ++ * reserved for periodic transfers. ++ */ ++ qh_ptr = hcd->non_periodic_sched_inactive.next; ++ num_channels = hcd->core_if->core_params->host_channels; ++ while (qh_ptr != &hcd->non_periodic_sched_inactive && ++ (hcd->non_periodic_channels < ++ num_channels - hcd->periodic_channels) && ++ !list_empty(&hcd->free_hc_list)) { ++ ++ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ assign_and_init_hc(hcd, qh); ++ ++ /* ++ * Move the QH from the non-periodic inactive schedule to the ++ * non-periodic active schedule. ++ */ ++ qh_ptr = qh_ptr->next; ++ list_move(&qh->qh_list_entry, &hcd->non_periodic_sched_active); ++ ++ if (ret_val == DWC_OTG_TRANSACTION_NONE) { ++ ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC; ++ } else { ++ ret_val = DWC_OTG_TRANSACTION_ALL; ++ } ++ ++ hcd->non_periodic_channels++; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * Attempts to queue a single transaction request for a host channel ++ * associated with either a periodic or non-periodic transfer. This function ++ * assumes that there is space available in the appropriate request queue. For ++ * an OUT transfer or SETUP transaction in Slave mode, it checks whether space ++ * is available in the appropriate Tx FIFO. ++ * ++ * @param hcd The HCD state structure. ++ * @param hc Host channel descriptor associated with either a periodic or ++ * non-periodic transfer. ++ * @param fifo_dwords_avail Number of DWORDs available in the periodic Tx ++ * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic ++ * transfers. ++ * ++ * @return 1 if a request is queued and more requests may be needed to ++ * complete the transfer, 0 if no more requests are required for this ++ * transfer, -1 if there is insufficient space in the Tx FIFO. ++ */ ++static int queue_transaction(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ uint16_t fifo_dwords_avail) ++{ ++ int retval; ++ ++ if (hcd->core_if->dma_enable) { ++ if (!hc->xfer_started) { ++ dwc_otg_hc_start_transfer(hcd->core_if, hc); ++ hc->qh->ping_state = 0; ++ } ++ retval = 0; ++ } else if (hc->halt_pending) { ++ /* Don't queue a request if the channel has been halted. */ ++ retval = 0; ++ } else if (hc->halt_on_queue) { ++ dwc_otg_hc_halt(hcd->core_if, hc, hc->halt_status); ++ retval = 0; ++ } else if (hc->do_ping) { ++ if (!hc->xfer_started) { ++ dwc_otg_hc_start_transfer(hcd->core_if, hc); ++ } ++ retval = 0; ++ } else if (!hc->ep_is_in || ++ hc->data_pid_start == DWC_OTG_HC_PID_SETUP) { ++ if ((fifo_dwords_avail * 4) >= hc->max_packet) { ++ if (!hc->xfer_started) { ++ dwc_otg_hc_start_transfer(hcd->core_if, hc); ++ retval = 1; ++ } else { ++ retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc); ++ } ++ } else { ++ retval = -1; ++ } ++ } else { ++ if (!hc->xfer_started) { ++ dwc_otg_hc_start_transfer(hcd->core_if, hc); ++ retval = 1; ++ } else { ++ retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc); ++ } ++ } ++ ++ return retval; ++} ++ ++/** ++ * Processes active non-periodic channels and queues transactions for these ++ * channels to the DWC_otg controller. After queueing transactions, the NP Tx ++ * FIFO Empty interrupt is enabled if there are more transactions to queue as ++ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx ++ * FIFO Empty interrupt is disabled. ++ */ ++static void process_non_periodic_channels(dwc_otg_hcd_t *hcd) ++{ ++ gnptxsts_data_t tx_status; ++ struct list_head *orig_qh_ptr; ++ dwc_otg_qh_t *qh; ++ int status; ++ int no_queue_space = 0; ++ int no_fifo_space = 0; ++ int more_to_do = 0; ++ ++ dwc_otg_core_global_regs_t *global_regs = hcd->core_if->core_global_regs; ++ ++ DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n"); ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (before queue): %d\n", ++ tx_status.b.nptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n", ++ tx_status.b.nptxfspcavail); ++#endif ++ /* ++ * Keep track of the starting point. Skip over the start-of-list ++ * entry. ++ */ ++ if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) { ++ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; ++ } ++ orig_qh_ptr = hcd->non_periodic_qh_ptr; ++ ++ /* ++ * Process once through the active list or until no more space is ++ * available in the request queue or the Tx FIFO. ++ */ ++ do { ++ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ if (!hcd->core_if->dma_enable && tx_status.b.nptxqspcavail == 0) { ++ no_queue_space = 1; ++ break; ++ } ++ ++ qh = list_entry(hcd->non_periodic_qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ status = queue_transaction(hcd, qh->channel, tx_status.b.nptxfspcavail); ++ ++ if (status > 0) { ++ more_to_do = 1; ++ } else if (status < 0) { ++ no_fifo_space = 1; ++ break; ++ } ++ ++ /* Advance to next QH, skipping start-of-list entry. */ ++ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; ++ if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) { ++ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; ++ } ++ ++ } while (hcd->non_periodic_qh_ptr != orig_qh_ptr); ++ ++ if (!hcd->core_if->dma_enable) { ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ intr_mask.b.nptxfempty = 1; ++ ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (after queue): %d\n", ++ tx_status.b.nptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (after queue): %d\n", ++ tx_status.b.nptxfspcavail); ++#endif ++ if (more_to_do || no_queue_space || no_fifo_space) { ++ /* ++ * May need to queue more transactions as the request ++ * queue or Tx FIFO empties. Enable the non-periodic ++ * Tx FIFO empty interrupt. (Always use the half-empty ++ * level to ensure that new requests are loaded as ++ * soon as possible.) ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32); ++ } else { ++ /* ++ * Disable the Tx FIFO empty interrupt since there are ++ * no more transactions that need to be queued right ++ * now. This function is called from interrupt ++ * handlers to queue more transactions as transfer ++ * states change. ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ } ++} ++ ++/** ++ * Processes periodic channels for the next frame and queues transactions for ++ * these channels to the DWC_otg controller. After queueing transactions, the ++ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions ++ * to queue as Periodic Tx FIFO or request queue space becomes available. ++ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled. ++ */ ++static void process_periodic_channels(dwc_otg_hcd_t *hcd) ++{ ++ hptxsts_data_t tx_status; ++ struct list_head *qh_ptr; ++ dwc_otg_qh_t *qh; ++ int status; ++ int no_queue_space = 0; ++ int no_fifo_space = 0; ++ ++ dwc_otg_host_global_regs_t *host_regs; ++ host_regs = hcd->core_if->host_if->host_global_regs; ++ ++ DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n"); ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (before queue): %d\n", ++ tx_status.b.ptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (before queue): %d\n", ++ tx_status.b.ptxfspcavail); ++#endif ++ ++ qh_ptr = hcd->periodic_sched_assigned.next; ++ while (qh_ptr != &hcd->periodic_sched_assigned) { ++ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); ++ if (tx_status.b.ptxqspcavail == 0) { ++ no_queue_space = 1; ++ break; ++ } ++ ++ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ ++ /* ++ * Set a flag if we're queuing high-bandwidth in slave mode. ++ * The flag prevents any halts to get into the request queue in ++ * the middle of multiple high-bandwidth packets getting queued. ++ */ ++ if (!hcd->core_if->dma_enable && ++ qh->channel->multi_count > 1) ++ { ++ hcd->core_if->queuing_high_bandwidth = 1; ++ } ++ ++ status = queue_transaction(hcd, qh->channel, tx_status.b.ptxfspcavail); ++ if (status < 0) { ++ no_fifo_space = 1; ++ break; ++ } ++ ++ /* ++ * In Slave mode, stay on the current transfer until there is ++ * nothing more to do or the high-bandwidth request count is ++ * reached. In DMA mode, only need to queue one request. The ++ * controller automatically handles multiple packets for ++ * high-bandwidth transfers. ++ */ ++ if (hcd->core_if->dma_enable || status == 0 || ++ qh->channel->requests == qh->channel->multi_count) { ++ qh_ptr = qh_ptr->next; ++ /* ++ * Move the QH from the periodic assigned schedule to ++ * the periodic queued schedule. ++ */ ++ list_move(&qh->qh_list_entry, &hcd->periodic_sched_queued); ++ ++ /* done queuing high bandwidth */ ++ hcd->core_if->queuing_high_bandwidth = 0; ++ } ++ } ++ ++ if (!hcd->core_if->dma_enable) { ++ dwc_otg_core_global_regs_t *global_regs; ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ global_regs = hcd->core_if->core_global_regs; ++ intr_mask.b.ptxfempty = 1; ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (after queue): %d\n", ++ tx_status.b.ptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (after queue): %d\n", ++ tx_status.b.ptxfspcavail); ++#endif ++ if (!list_empty(&hcd->periodic_sched_assigned) || ++ no_queue_space || no_fifo_space) { ++ /* ++ * May need to queue more transactions as the request ++ * queue or Tx FIFO empties. Enable the periodic Tx ++ * FIFO empty interrupt. (Always use the half-empty ++ * level to ensure that new requests are loaded as ++ * soon as possible.) ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32); ++ } else { ++ /* ++ * Disable the Tx FIFO empty interrupt since there are ++ * no more transactions that need to be queued right ++ * now. This function is called from interrupt ++ * handlers to queue more transactions as transfer ++ * states change. ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ } ++} ++ ++/** ++ * This function processes the currently active host channels and queues ++ * transactions for these channels to the DWC_otg controller. It is called ++ * from HCD interrupt handler functions. ++ * ++ * @param hcd The HCD state structure. ++ * @param tr_type The type(s) of transactions to queue (non-periodic, ++ * periodic, or both). ++ */ ++void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd, ++ dwc_otg_transaction_type_e tr_type) ++{ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n"); ++#endif ++ /* Process host channels associated with periodic transfers. */ ++ if ((tr_type == DWC_OTG_TRANSACTION_PERIODIC || ++ tr_type == DWC_OTG_TRANSACTION_ALL) && ++ !list_empty(&hcd->periodic_sched_assigned)) { ++ ++ process_periodic_channels(hcd); ++ } ++ ++ /* Process host channels associated with non-periodic transfers. */ ++ if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC || ++ tr_type == DWC_OTG_TRANSACTION_ALL) { ++ if (!list_empty(&hcd->non_periodic_sched_active)) { ++ process_non_periodic_channels(hcd); ++ } else { ++ /* ++ * Ensure NP Tx FIFO empty interrupt is disabled when ++ * there are no non-periodic transfers to process. ++ */ ++ gintmsk_data_t gintmsk = {.d32 = 0}; ++ gintmsk.b.nptxfempty = 1; ++ dwc_modify_reg32(&hcd->core_if->core_global_regs->gintmsk, ++ gintmsk.d32, 0); ++ } ++ } ++} ++ ++/** ++ * Sets the final status of an URB and returns it to the device driver. Any ++ * required cleanup of the URB is performed. ++ */ ++void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *hcd, struct urb *urb, int status) ++{ ++#ifdef DEBUG ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n", ++ __func__, urb, usb_pipedevice(urb->pipe), ++ usb_pipeendpoint(urb->pipe), ++ usb_pipein(urb->pipe) ? "IN" : "OUT", status); ++ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { ++ int i; ++ for (i = 0; i < urb->number_of_packets; i++) { ++ DWC_PRINT(" ISO Desc %d status: %d\n", ++ i, urb->iso_frame_desc[i].status); ++ } ++ } ++ } ++#endif ++ ++ //if we use the aligned buffer instead of the original unaligned buffer, ++ //for IN data, we have to move the data to the original buffer ++ if((urb->transfer_dma==urb->aligned_transfer_dma)&&((urb->transfer_flags & URB_DIR_MASK)==URB_DIR_IN)){ ++ dma_sync_single_for_device(NULL,urb->transfer_dma,urb->actual_length,DMA_FROM_DEVICE); ++ memcpy(urb->transfer_buffer,urb->aligned_transfer_buffer,urb->actual_length); ++ } ++ ++ ++ urb->status = status; ++ urb->hcpriv = NULL; ++ usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status); ++} ++ ++/* ++ * Returns the Queue Head for an URB. ++ */ ++dwc_otg_qh_t *dwc_urb_to_qh(struct urb *urb) ++{ ++ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); ++ return (dwc_otg_qh_t *)ep->hcpriv; ++} ++ ++#ifdef DEBUG ++void dwc_print_setup_data(uint8_t *setup) ++{ ++ int i; ++ if (CHK_DEBUG_LEVEL(DBG_HCD)){ ++ DWC_PRINT("Setup Data = MSB "); ++ for (i = 7; i >= 0; i--) DWC_PRINT("%02x ", setup[i]); ++ DWC_PRINT("\n"); ++ DWC_PRINT(" bmRequestType Tranfer = %s\n", (setup[0] & 0x80) ? "Device-to-Host" : "Host-to-Device"); ++ DWC_PRINT(" bmRequestType Type = "); ++ switch ((setup[0] & 0x60) >> 5) { ++ case 0: DWC_PRINT("Standard\n"); break; ++ case 1: DWC_PRINT("Class\n"); break; ++ case 2: DWC_PRINT("Vendor\n"); break; ++ case 3: DWC_PRINT("Reserved\n"); break; ++ } ++ DWC_PRINT(" bmRequestType Recipient = "); ++ switch (setup[0] & 0x1f) { ++ case 0: DWC_PRINT("Device\n"); break; ++ case 1: DWC_PRINT("Interface\n"); break; ++ case 2: DWC_PRINT("Endpoint\n"); break; ++ case 3: DWC_PRINT("Other\n"); break; ++ default: DWC_PRINT("Reserved\n"); break; ++ } ++ DWC_PRINT(" bRequest = 0x%0x\n", setup[1]); ++ DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t *)&setup[2])); ++ DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t *)&setup[4])); ++ DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_t *)&setup[6])); ++ } ++} ++#endif ++ ++void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *hcd) { ++} ++ ++void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd) ++{ ++#ifdef DEBUG ++ int num_channels; ++ int i; ++ gnptxsts_data_t np_tx_status; ++ hptxsts_data_t p_tx_status; ++ ++ num_channels = hcd->core_if->core_params->host_channels; ++ DWC_PRINT("\n"); ++ DWC_PRINT("************************************************************\n"); ++ DWC_PRINT("HCD State:\n"); ++ DWC_PRINT(" Num channels: %d\n", num_channels); ++ for (i = 0; i < num_channels; i++) { ++ dwc_hc_t *hc = hcd->hc_ptr_array[i]; ++ DWC_PRINT(" Channel %d:\n", i); ++ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n", ++ hc->dev_addr, hc->ep_num, hc->ep_is_in); ++ DWC_PRINT(" speed: %d\n", hc->speed); ++ DWC_PRINT(" ep_type: %d\n", hc->ep_type); ++ DWC_PRINT(" max_packet: %d\n", hc->max_packet); ++ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start); ++ DWC_PRINT(" multi_count: %d\n", hc->multi_count); ++ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started); ++ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff); ++ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len); ++ DWC_PRINT(" xfer_count: %d\n", hc->xfer_count); ++ DWC_PRINT(" halt_on_queue: %d\n", hc->halt_on_queue); ++ DWC_PRINT(" halt_pending: %d\n", hc->halt_pending); ++ DWC_PRINT(" halt_status: %d\n", hc->halt_status); ++ DWC_PRINT(" do_split: %d\n", hc->do_split); ++ DWC_PRINT(" complete_split: %d\n", hc->complete_split); ++ DWC_PRINT(" hub_addr: %d\n", hc->hub_addr); ++ DWC_PRINT(" port_addr: %d\n", hc->port_addr); ++ DWC_PRINT(" xact_pos: %d\n", hc->xact_pos); ++ DWC_PRINT(" requests: %d\n", hc->requests); ++ DWC_PRINT(" qh: %p\n", hc->qh); ++ if (hc->xfer_started) { ++ hfnum_data_t hfnum; ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum); ++ hcchar.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcchar); ++ hctsiz.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hctsiz); ++ hcint.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcintmsk); ++ DWC_PRINT(" hfnum: 0x%08x\n", hfnum.d32); ++ DWC_PRINT(" hcchar: 0x%08x\n", hcchar.d32); ++ DWC_PRINT(" hctsiz: 0x%08x\n", hctsiz.d32); ++ DWC_PRINT(" hcint: 0x%08x\n", hcint.d32); ++ DWC_PRINT(" hcintmsk: 0x%08x\n", hcintmsk.d32); ++ } ++ if (hc->xfer_started && hc->qh && hc->qh->qtd_in_process) { ++ dwc_otg_qtd_t *qtd; ++ struct urb *urb; ++ qtd = hc->qh->qtd_in_process; ++ urb = qtd->urb; ++ DWC_PRINT(" URB Info:\n"); ++ DWC_PRINT(" qtd: %p, urb: %p\n", qtd, urb); ++ if (urb) { ++ DWC_PRINT(" Dev: %d, EP: %d %s\n", ++ usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), ++ usb_pipein(urb->pipe) ? "IN" : "OUT"); ++ DWC_PRINT(" Max packet size: %d\n", ++ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); ++ DWC_PRINT(" transfer_buffer: %p\n", urb->transfer_buffer); ++ DWC_PRINT(" transfer_dma: %p\n", (void *)urb->transfer_dma); ++ DWC_PRINT(" transfer_buffer_length: %d\n", urb->transfer_buffer_length); ++ DWC_PRINT(" actual_length: %d\n", urb->actual_length); ++ } ++ } ++ } ++ DWC_PRINT(" non_periodic_channels: %d\n", hcd->non_periodic_channels); ++ DWC_PRINT(" periodic_channels: %d\n", hcd->periodic_channels); ++ DWC_PRINT(" periodic_usecs: %d\n", hcd->periodic_usecs); ++ np_tx_status.d32 = dwc_read_reg32(&hcd->core_if->core_global_regs->gnptxsts); ++ DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n", np_tx_status.b.nptxqspcavail); ++ DWC_PRINT(" NP Tx FIFO Space Avail: %d\n", np_tx_status.b.nptxfspcavail); ++ p_tx_status.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hptxsts); ++ DWC_PRINT(" P Tx Req Queue Space Avail: %d\n", p_tx_status.b.ptxqspcavail); ++ DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail); ++ dwc_otg_hcd_dump_frrem(hcd); ++ dwc_otg_dump_global_registers(hcd->core_if); ++ dwc_otg_dump_host_registers(hcd->core_if); ++ DWC_PRINT("************************************************************\n"); ++ DWC_PRINT("\n"); ++#endif ++} ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc/otg_hcd.h +@@ -0,0 +1,647 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.h $ ++ * $Revision: #45 $ ++ * $Date: 2008/07/15 $ ++ * $Change: 1064918 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++#ifndef __DWC_HCD_H__ ++#define __DWC_HCD_H__ ++ ++#include ++#include ++#include ++ ++struct dwc_otg_device; ++ ++#include "otg_cil.h" ++ ++/** ++ * @file ++ * ++ * This file contains the structures, constants, and interfaces for ++ * the Host Contoller Driver (HCD). ++ * ++ * The Host Controller Driver (HCD) is responsible for translating requests ++ * from the USB Driver into the appropriate actions on the DWC_otg controller. ++ * It isolates the USBD from the specifics of the controller by providing an ++ * API to the USBD. ++ */ ++ ++/** ++ * Phases for control transfers. ++ */ ++typedef enum dwc_otg_control_phase { ++ DWC_OTG_CONTROL_SETUP, ++ DWC_OTG_CONTROL_DATA, ++ DWC_OTG_CONTROL_STATUS ++} dwc_otg_control_phase_e; ++ ++/** Transaction types. */ ++typedef enum dwc_otg_transaction_type { ++ DWC_OTG_TRANSACTION_NONE, ++ DWC_OTG_TRANSACTION_PERIODIC, ++ DWC_OTG_TRANSACTION_NON_PERIODIC, ++ DWC_OTG_TRANSACTION_ALL ++} dwc_otg_transaction_type_e; ++ ++/** ++ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control, ++ * interrupt, or isochronous transfer. A single QTD is created for each URB ++ * (of one of these types) submitted to the HCD. The transfer associated with ++ * a QTD may require one or multiple transactions. ++ * ++ * A QTD is linked to a Queue Head, which is entered in either the ++ * non-periodic or periodic schedule for execution. When a QTD is chosen for ++ * execution, some or all of its transactions may be executed. After ++ * execution, the state of the QTD is updated. The QTD may be retired if all ++ * its transactions are complete or if an error occurred. Otherwise, it ++ * remains in the schedule so more transactions can be executed later. ++ */ ++typedef struct dwc_otg_qtd { ++ /** ++ * Determines the PID of the next data packet for the data phase of ++ * control transfers. Ignored for other transfer types.
++ * One of the following values: ++ * - DWC_OTG_HC_PID_DATA0 ++ * - DWC_OTG_HC_PID_DATA1 ++ */ ++ uint8_t data_toggle; ++ ++ /** Current phase for control transfers (Setup, Data, or Status). */ ++ dwc_otg_control_phase_e control_phase; ++ ++ /** Keep track of the current split type ++ * for FS/LS endpoints on a HS Hub */ ++ uint8_t complete_split; ++ ++ /** How many bytes transferred during SSPLIT OUT */ ++ uint32_t ssplit_out_xfer_count; ++ ++ /** ++ * Holds the number of bus errors that have occurred for a transaction ++ * within this transfer. ++ */ ++ uint8_t error_count; ++ ++ /** ++ * Index of the next frame descriptor for an isochronous transfer. A ++ * frame descriptor describes the buffer position and length of the ++ * data to be transferred in the next scheduled (micro)frame of an ++ * isochronous transfer. It also holds status for that transaction. ++ * The frame index starts at 0. ++ */ ++ int isoc_frame_index; ++ ++ /** Position of the ISOC split on full/low speed */ ++ uint8_t isoc_split_pos; ++ ++ /** Position of the ISOC split in the buffer for the current frame */ ++ uint16_t isoc_split_offset; ++ ++ /** URB for this transfer */ ++ struct urb *urb; ++ ++ /** This list of QTDs */ ++ struct list_head qtd_list_entry; ++ ++} dwc_otg_qtd_t; ++ ++/** ++ * A Queue Head (QH) holds the static characteristics of an endpoint and ++ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may ++ * be entered in either the non-periodic or periodic schedule. ++ */ ++typedef struct dwc_otg_qh { ++ /** ++ * Endpoint type. ++ * One of the following values: ++ * - USB_ENDPOINT_XFER_CONTROL ++ * - USB_ENDPOINT_XFER_ISOC ++ * - USB_ENDPOINT_XFER_BULK ++ * - USB_ENDPOINT_XFER_INT ++ */ ++ uint8_t ep_type; ++ uint8_t ep_is_in; ++ ++ /** wMaxPacketSize Field of Endpoint Descriptor. */ ++ uint16_t maxp; ++ ++ /** ++ * Determines the PID of the next data packet for non-control ++ * transfers. Ignored for control transfers.
++ * One of the following values: ++ * - DWC_OTG_HC_PID_DATA0 ++ * - DWC_OTG_HC_PID_DATA1 ++ */ ++ uint8_t data_toggle; ++ ++ /** Ping state if 1. */ ++ uint8_t ping_state; ++ ++ /** ++ * List of QTDs for this QH. ++ */ ++ struct list_head qtd_list; ++ ++ /** Host channel currently processing transfers for this QH. */ ++ dwc_hc_t *channel; ++ ++ /** QTD currently assigned to a host channel for this QH. */ ++ dwc_otg_qtd_t *qtd_in_process; ++ ++ /** Full/low speed endpoint on high-speed hub requires split. */ ++ uint8_t do_split; ++ ++ /** @name Periodic schedule information */ ++ /** @{ */ ++ ++ /** Bandwidth in microseconds per (micro)frame. */ ++ uint8_t usecs; ++ ++ /** Interval between transfers in (micro)frames. */ ++ uint16_t interval; ++ ++ /** ++ * (micro)frame to initialize a periodic transfer. The transfer ++ * executes in the following (micro)frame. ++ */ ++ uint16_t sched_frame; ++ ++ /** (micro)frame at which last start split was initialized. */ ++ uint16_t start_split_frame; ++ ++ /** @} */ ++ ++ /** Entry for QH in either the periodic or non-periodic schedule. */ ++ struct list_head qh_list_entry; ++} dwc_otg_qh_t; ++ ++/** ++ * This structure holds the state of the HCD, including the non-periodic and ++ * periodic schedules. ++ */ ++typedef struct dwc_otg_hcd { ++ /** The DWC otg device pointer */ ++ struct dwc_otg_device *otg_dev; ++ ++ /** DWC OTG Core Interface Layer */ ++ dwc_otg_core_if_t *core_if; ++ ++ /** Internal DWC HCD Flags */ ++ volatile union dwc_otg_hcd_internal_flags { ++ uint32_t d32; ++ struct { ++ unsigned port_connect_status_change : 1; ++ unsigned port_connect_status : 1; ++ unsigned port_reset_change : 1; ++ unsigned port_enable_change : 1; ++ unsigned port_suspend_change : 1; ++ unsigned port_over_current_change : 1; ++ unsigned reserved : 27; ++ } b; ++ } flags; ++ ++ /** ++ * Inactive items in the non-periodic schedule. This is a list of ++ * Queue Heads. Transfers associated with these Queue Heads are not ++ * currently assigned to a host channel. ++ */ ++ struct list_head non_periodic_sched_inactive; ++ ++ /** ++ * Active items in the non-periodic schedule. This is a list of ++ * Queue Heads. Transfers associated with these Queue Heads are ++ * currently assigned to a host channel. ++ */ ++ struct list_head non_periodic_sched_active; ++ ++ /** ++ * Pointer to the next Queue Head to process in the active ++ * non-periodic schedule. ++ */ ++ struct list_head *non_periodic_qh_ptr; ++ ++ /** ++ * Inactive items in the periodic schedule. This is a list of QHs for ++ * periodic transfers that are _not_ scheduled for the next frame. ++ * Each QH in the list has an interval counter that determines when it ++ * needs to be scheduled for execution. This scheduling mechanism ++ * allows only a simple calculation for periodic bandwidth used (i.e. ++ * must assume that all periodic transfers may need to execute in the ++ * same frame). However, it greatly simplifies scheduling and should ++ * be sufficient for the vast majority of OTG hosts, which need to ++ * connect to a small number of peripherals at one time. ++ * ++ * Items move from this list to periodic_sched_ready when the QH ++ * interval counter is 0 at SOF. ++ */ ++ struct list_head periodic_sched_inactive; ++ ++ /** ++ * List of periodic QHs that are ready for execution in the next ++ * frame, but have not yet been assigned to host channels. ++ * ++ * Items move from this list to periodic_sched_assigned as host ++ * channels become available during the current frame. ++ */ ++ struct list_head periodic_sched_ready; ++ ++ /** ++ * List of periodic QHs to be executed in the next frame that are ++ * assigned to host channels. ++ * ++ * Items move from this list to periodic_sched_queued as the ++ * transactions for the QH are queued to the DWC_otg controller. ++ */ ++ struct list_head periodic_sched_assigned; ++ ++ /** ++ * List of periodic QHs that have been queued for execution. ++ * ++ * Items move from this list to either periodic_sched_inactive or ++ * periodic_sched_ready when the channel associated with the transfer ++ * is released. If the interval for the QH is 1, the item moves to ++ * periodic_sched_ready because it must be rescheduled for the next ++ * frame. Otherwise, the item moves to periodic_sched_inactive. ++ */ ++ struct list_head periodic_sched_queued; ++ ++ /** ++ * Total bandwidth claimed so far for periodic transfers. This value ++ * is in microseconds per (micro)frame. The assumption is that all ++ * periodic transfers may occur in the same (micro)frame. ++ */ ++ uint16_t periodic_usecs; ++ ++ /** ++ * Frame number read from the core at SOF. The value ranges from 0 to ++ * DWC_HFNUM_MAX_FRNUM. ++ */ ++ uint16_t frame_number; ++ ++ /** ++ * Free host channels in the controller. This is a list of ++ * dwc_hc_t items. ++ */ ++ struct list_head free_hc_list; ++ ++ /** ++ * Number of host channels assigned to periodic transfers. Currently ++ * assuming that there is a dedicated host channel for each periodic ++ * transaction and at least one host channel available for ++ * non-periodic transactions. ++ */ ++ int periodic_channels; ++ ++ /** ++ * Number of host channels assigned to non-periodic transfers. ++ */ ++ int non_periodic_channels; ++ ++ /** ++ * Array of pointers to the host channel descriptors. Allows accessing ++ * a host channel descriptor given the host channel number. This is ++ * useful in interrupt handlers. ++ */ ++ dwc_hc_t *hc_ptr_array[MAX_EPS_CHANNELS]; ++ ++ /** ++ * Buffer to use for any data received during the status phase of a ++ * control transfer. Normally no data is transferred during the status ++ * phase. This buffer is used as a bit bucket. ++ */ ++ uint8_t *status_buf; ++ ++ /** ++ * DMA address for status_buf. ++ */ ++ dma_addr_t status_buf_dma; ++#define DWC_OTG_HCD_STATUS_BUF_SIZE 64 ++ ++ /** ++ * Structure to allow starting the HCD in a non-interrupt context ++ * during an OTG role change. ++ */ ++ struct delayed_work start_work; ++ ++ /** ++ * Connection timer. An OTG host must display a message if the device ++ * does not connect. Started when the VBus power is turned on via ++ * sysfs attribute "buspower". ++ */ ++ struct timer_list conn_timer; ++ ++ /* Tasket to do a reset */ ++ struct tasklet_struct *reset_tasklet; ++ ++ /* */ ++ spinlock_t lock; ++ ++#ifdef DEBUG ++ uint32_t frrem_samples; ++ uint64_t frrem_accum; ++ ++ uint32_t hfnum_7_samples_a; ++ uint64_t hfnum_7_frrem_accum_a; ++ uint32_t hfnum_0_samples_a; ++ uint64_t hfnum_0_frrem_accum_a; ++ uint32_t hfnum_other_samples_a; ++ uint64_t hfnum_other_frrem_accum_a; ++ ++ uint32_t hfnum_7_samples_b; ++ uint64_t hfnum_7_frrem_accum_b; ++ uint32_t hfnum_0_samples_b; ++ uint64_t hfnum_0_frrem_accum_b; ++ uint32_t hfnum_other_samples_b; ++ uint64_t hfnum_other_frrem_accum_b; ++#endif ++} dwc_otg_hcd_t; ++ ++/** Gets the dwc_otg_hcd from a struct usb_hcd */ ++static inline dwc_otg_hcd_t *hcd_to_dwc_otg_hcd(struct usb_hcd *hcd) ++{ ++ return (dwc_otg_hcd_t *)(hcd->hcd_priv); ++} ++ ++/** Gets the struct usb_hcd that contains a dwc_otg_hcd_t. */ ++static inline struct usb_hcd *dwc_otg_hcd_to_hcd(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ return container_of((void *)dwc_otg_hcd, struct usb_hcd, hcd_priv); ++} ++ ++/** @name HCD Create/Destroy Functions */ ++/** @{ */ ++extern int dwc_otg_hcd_init(struct platform_device *pdev); ++extern void dwc_otg_hcd_remove(struct platform_device *pdev); ++/** @} */ ++ ++/** @name Linux HC Driver API Functions */ ++/** @{ */ ++ ++extern int dwc_otg_hcd_start(struct usb_hcd *hcd); ++extern void dwc_otg_hcd_stop(struct usb_hcd *hcd); ++extern int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd); ++extern void dwc_otg_hcd_free(struct usb_hcd *hcd); ++extern int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd, ++ // struct usb_host_endpoint *ep, ++ struct urb *urb, ++ gfp_t mem_flags ++ ); ++extern int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd, ++ struct urb *urb, int status); ++extern void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd, ++ struct usb_host_endpoint *ep); ++extern irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd); ++extern int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, ++ char *buf); ++extern int dwc_otg_hcd_hub_control(struct usb_hcd *hcd, ++ u16 typeReq, ++ u16 wValue, ++ u16 wIndex, ++ char *buf, ++ u16 wLength); ++ ++/** @} */ ++ ++/** @name Transaction Execution Functions */ ++/** @{ */ ++extern dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd); ++extern void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd, ++ dwc_otg_transaction_type_e tr_type); ++extern void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *_hcd, struct urb *urb, ++ int status); ++/** @} */ ++ ++/** @name Interrupt Handler Functions */ ++/** @{ */ ++extern int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_incomplete_periodic_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_conn_id_status_change_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_disconnect_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num); ++extern int32_t dwc_otg_hcd_handle_session_req_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_wakeup_detected_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++/** @} */ ++ ++ ++/** @name Schedule Queue Functions */ ++/** @{ */ ++ ++/* Implemented in dwc_otg_hcd_queue.c */ ++extern dwc_otg_qh_t *dwc_otg_hcd_qh_create(dwc_otg_hcd_t *hcd, struct urb *urb); ++extern void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, struct urb *urb); ++extern void dwc_otg_hcd_qh_free(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); ++extern int dwc_otg_hcd_qh_add(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); ++extern void dwc_otg_hcd_qh_remove(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); ++extern void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_csplit); ++ ++/** Remove and free a QH */ ++static inline void dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd_t *hcd, ++ dwc_otg_qh_t *qh) ++{ ++ dwc_otg_hcd_qh_remove(hcd, qh); ++ dwc_otg_hcd_qh_free(hcd, qh); ++} ++ ++/** Allocates memory for a QH structure. ++ * @return Returns the memory allocate or NULL on error. */ ++static inline dwc_otg_qh_t *dwc_otg_hcd_qh_alloc(void) ++{ ++ return (dwc_otg_qh_t *) kmalloc(sizeof(dwc_otg_qh_t), GFP_KERNEL); ++} ++ ++extern dwc_otg_qtd_t *dwc_otg_hcd_qtd_create(struct urb *urb); ++extern void dwc_otg_hcd_qtd_init(dwc_otg_qtd_t *qtd, struct urb *urb); ++extern int dwc_otg_hcd_qtd_add(dwc_otg_qtd_t *qtd, dwc_otg_hcd_t *dwc_otg_hcd); ++ ++/** Allocates memory for a QTD structure. ++ * @return Returns the memory allocate or NULL on error. */ ++static inline dwc_otg_qtd_t *dwc_otg_hcd_qtd_alloc(void) ++{ ++ return (dwc_otg_qtd_t *) kmalloc(sizeof(dwc_otg_qtd_t), GFP_KERNEL); ++} ++ ++/** Frees the memory for a QTD structure. QTD should already be removed from ++ * list. ++ * @param[in] qtd QTD to free.*/ ++static inline void dwc_otg_hcd_qtd_free(dwc_otg_qtd_t *qtd) ++{ ++ kfree(qtd); ++} ++ ++/** Removes a QTD from list. ++ * @param[in] hcd HCD instance. ++ * @param[in] qtd QTD to remove from list. */ ++static inline void dwc_otg_hcd_qtd_remove(dwc_otg_hcd_t *hcd, dwc_otg_qtd_t *qtd) ++{ ++ unsigned long flags; ++ SPIN_LOCK_IRQSAVE(&hcd->lock, flags); ++ list_del(&qtd->qtd_list_entry); ++ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); ++} ++ ++/** Remove and free a QTD */ ++static inline void dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd_t *hcd, dwc_otg_qtd_t *qtd) ++{ ++ dwc_otg_hcd_qtd_remove(hcd, qtd); ++ dwc_otg_hcd_qtd_free(qtd); ++} ++ ++/** @} */ ++ ++ ++/** @name Internal Functions */ ++/** @{ */ ++dwc_otg_qh_t *dwc_urb_to_qh(struct urb *urb); ++void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *hcd); ++void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd); ++/** @} */ ++ ++/** Gets the usb_host_endpoint associated with an URB. */ ++static inline struct usb_host_endpoint *dwc_urb_to_endpoint(struct urb *urb) ++{ ++ struct usb_device *dev = urb->dev; ++ int ep_num = usb_pipeendpoint(urb->pipe); ++ ++ if (usb_pipein(urb->pipe)) ++ return dev->ep_in[ep_num]; ++ else ++ return dev->ep_out[ep_num]; ++} ++ ++/** ++ * Gets the endpoint number from a _bEndpointAddress argument. The endpoint is ++ * qualified with its direction (possible 32 endpoints per device). ++ */ ++#define dwc_ep_addr_to_endpoint(_bEndpointAddress_) ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \ ++ ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4) ++ ++/** Gets the QH that contains the list_head */ ++#define dwc_list_to_qh(_list_head_ptr_) container_of(_list_head_ptr_, dwc_otg_qh_t, qh_list_entry) ++ ++/** Gets the QTD that contains the list_head */ ++#define dwc_list_to_qtd(_list_head_ptr_) container_of(_list_head_ptr_, dwc_otg_qtd_t, qtd_list_entry) ++ ++/** Check if QH is non-periodic */ ++#define dwc_qh_is_non_per(_qh_ptr_) ((_qh_ptr_->ep_type == USB_ENDPOINT_XFER_BULK) || \ ++ (_qh_ptr_->ep_type == USB_ENDPOINT_XFER_CONTROL)) ++ ++/** High bandwidth multiplier as encoded in highspeed endpoint descriptors */ ++#define dwc_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) ++ ++/** Packet size for any kind of endpoint descriptor */ ++#define dwc_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) ++ ++/** ++ * Returns true if _frame1 is less than or equal to _frame2. The comparison is ++ * done modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the ++ * frame number when the max frame number is reached. ++ */ ++static inline int dwc_frame_num_le(uint16_t frame1, uint16_t frame2) ++{ ++ return ((frame2 - frame1) & DWC_HFNUM_MAX_FRNUM) <= ++ (DWC_HFNUM_MAX_FRNUM >> 1); ++} ++ ++/** ++ * Returns true if _frame1 is greater than _frame2. The comparison is done ++ * modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the frame ++ * number when the max frame number is reached. ++ */ ++static inline int dwc_frame_num_gt(uint16_t frame1, uint16_t frame2) ++{ ++ return (frame1 != frame2) && ++ (((frame1 - frame2) & DWC_HFNUM_MAX_FRNUM) < ++ (DWC_HFNUM_MAX_FRNUM >> 1)); ++} ++ ++/** ++ * Increments _frame by the amount specified by _inc. The addition is done ++ * modulo DWC_HFNUM_MAX_FRNUM. Returns the incremented value. ++ */ ++static inline uint16_t dwc_frame_num_inc(uint16_t frame, uint16_t inc) ++{ ++ return (frame + inc) & DWC_HFNUM_MAX_FRNUM; ++} ++ ++static inline uint16_t dwc_full_frame_num(uint16_t frame) ++{ ++ return (frame & DWC_HFNUM_MAX_FRNUM) >> 3; ++} ++ ++static inline uint16_t dwc_micro_frame_num(uint16_t frame) ++{ ++ return frame & 0x7; ++} ++ ++#ifdef DEBUG ++/** ++ * Macro to sample the remaining PHY clocks left in the current frame. This ++ * may be used during debugging to determine the average time it takes to ++ * execute sections of code. There are two possible sample points, "a" and ++ * "b", so the _letter argument must be one of these values. ++ * ++ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For ++ * example, "cat /sys/devices/lm0/hcd_frrem". ++ */ ++#define dwc_sample_frrem(_hcd, _qh, _letter) \ ++{ \ ++ hfnum_data_t hfnum; \ ++ dwc_otg_qtd_t *qtd; \ ++ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); \ ++ if (usb_pipeint(qtd->urb->pipe) && _qh->start_split_frame != 0 && !qtd->complete_split) { \ ++ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum); \ ++ switch (hfnum.b.frnum & 0x7) { \ ++ case 7: \ ++ _hcd->hfnum_7_samples_##_letter++; \ ++ _hcd->hfnum_7_frrem_accum_##_letter += hfnum.b.frrem; \ ++ break; \ ++ case 0: \ ++ _hcd->hfnum_0_samples_##_letter++; \ ++ _hcd->hfnum_0_frrem_accum_##_letter += hfnum.b.frrem; \ ++ break; \ ++ default: \ ++ _hcd->hfnum_other_samples_##_letter++; \ ++ _hcd->hfnum_other_frrem_accum_##_letter += hfnum.b.frrem; \ ++ break; \ ++ } \ ++ } \ ++} ++#else ++#define dwc_sample_frrem(_hcd, _qh, _letter) ++#endif ++#endif ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc/otg_hcd_intr.c +@@ -0,0 +1,1826 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_intr.c $ ++ * $Revision: #70 $ ++ * $Date: 2008/10/16 $ ++ * $Change: 1117667 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++ ++#include ++ ++#include "otg_driver.h" ++#include "otg_hcd.h" ++#include "otg_regs.h" ++ ++/** @file ++ * This file contains the implementation of the HCD Interrupt handlers. ++ */ ++ ++/** This function handles interrupts for the HCD. */ ++int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ int retval = 0; ++ ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ gintsts_data_t gintsts; ++#ifdef DEBUG ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++#endif ++ ++ /* Check if HOST Mode */ ++ if (dwc_otg_is_host_mode(core_if)) { ++ gintsts.d32 = dwc_otg_read_core_intr(core_if); ++ if (!gintsts.d32) { ++ return 0; ++ } ++ ++#ifdef DEBUG ++ /* Don't print debug message in the interrupt handler on SOF */ ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ DWC_DEBUGPL(DBG_HCD, "\n"); ++#endif ++ ++#ifdef DEBUG ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", gintsts.d32); ++#endif ++ if (gintsts.b.usbreset) { ++ DWC_PRINT("Usb Reset In Host Mode\n"); ++ } ++ if (gintsts.b.sofintr) { ++ retval |= dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.rxstsqlvl) { ++ retval |= dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.nptxfempty) { ++ retval |= dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.i2cintr) { ++ /** @todo Implement i2cintr handler. */ ++ } ++ if (gintsts.b.portintr) { ++ retval |= dwc_otg_hcd_handle_port_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.hcintr) { ++ retval |= dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.ptxfempty) { ++ retval |= dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd); ++ } ++#ifdef DEBUG ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Finished Servicing Interrupts\n"); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n", ++ dwc_read_reg32(&global_regs->gintsts)); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n", ++ dwc_read_reg32(&global_regs->gintmsk)); ++ } ++#endif ++ ++#ifdef DEBUG ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ DWC_DEBUGPL(DBG_HCD, "\n"); ++#endif ++ ++ } ++ S3C2410X_CLEAR_EINTPEND(); ++ ++ return retval; ++} ++ ++#ifdef DWC_TRACK_MISSED_SOFS ++#warning Compiling code to track missed SOFs ++#define FRAME_NUM_ARRAY_SIZE 1000 ++/** ++ * This function is for debug only. ++ */ ++static inline void track_missed_sofs(uint16_t curr_frame_number) ++{ ++ static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE]; ++ static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE]; ++ static int frame_num_idx = 0; ++ static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM; ++ static int dumped_frame_num_array = 0; ++ ++ if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) { ++ if (((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) != curr_frame_number) { ++ frame_num_array[frame_num_idx] = curr_frame_number; ++ last_frame_num_array[frame_num_idx++] = last_frame_num; ++ } ++ } else if (!dumped_frame_num_array) { ++ int i; ++ printk(KERN_EMERG USB_DWC "Frame Last Frame\n"); ++ printk(KERN_EMERG USB_DWC "----- ----------\n"); ++ for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) { ++ printk(KERN_EMERG USB_DWC "0x%04x 0x%04x\n", ++ frame_num_array[i], last_frame_num_array[i]); ++ } ++ dumped_frame_num_array = 1; ++ } ++ last_frame_num = curr_frame_number; ++} ++#endif ++ ++/** ++ * Handles the start-of-frame interrupt in host mode. Non-periodic ++ * transactions may be queued to the DWC_otg controller for the current ++ * (micro)frame. Periodic transactions may be queued to the controller for the ++ * next (micro)frame. ++ */ ++int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t *hcd) ++{ ++ hfnum_data_t hfnum; ++ struct list_head *qh_entry; ++ dwc_otg_qh_t *qh; ++ dwc_otg_transaction_type_e tr_type; ++ gintsts_data_t gintsts = {.d32 = 0}; ++ ++ hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum); ++ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n"); ++#endif ++ hcd->frame_number = hfnum.b.frnum; ++ ++#ifdef DEBUG ++ hcd->frrem_accum += hfnum.b.frrem; ++ hcd->frrem_samples++; ++#endif ++ ++#ifdef DWC_TRACK_MISSED_SOFS ++ track_missed_sofs(hcd->frame_number); ++#endif ++ ++ /* Determine whether any periodic QHs should be executed. */ ++ qh_entry = hcd->periodic_sched_inactive.next; ++ while (qh_entry != &hcd->periodic_sched_inactive) { ++ qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry); ++ qh_entry = qh_entry->next; ++ if (dwc_frame_num_le(qh->sched_frame, hcd->frame_number)) { ++ /* ++ * Move QH to the ready list to be executed next ++ * (micro)frame. ++ */ ++ list_move(&qh->qh_list_entry, &hcd->periodic_sched_ready); ++ } ++ } ++ ++ tr_type = dwc_otg_hcd_select_transactions(hcd); ++ if (tr_type != DWC_OTG_TRANSACTION_NONE) { ++ dwc_otg_hcd_queue_transactions(hcd, tr_type); ++ } ++ ++ /* Clear interrupt */ ++ gintsts.b.sofintr = 1; ++ dwc_write_reg32(&hcd->core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** Handles the Rx Status Queue Level Interrupt, which indicates that there is at ++ * least one packet in the Rx FIFO. The packets are moved from the FIFO to ++ * memory if the DWC_otg controller is operating in Slave mode. */ ++int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ host_grxsts_data_t grxsts; ++ dwc_hc_t *hc = NULL; ++ ++ DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n"); ++ ++ grxsts.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->core_global_regs->grxstsp); ++ ++ hc = dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum]; ++ ++ /* Packet Status */ ++ DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum); ++ DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt); ++ DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid, hc->data_pid_start); ++ DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts); ++ ++ switch (grxsts.b.pktsts) { ++ case DWC_GRXSTS_PKTSTS_IN: ++ /* Read the data into the host buffer. */ ++ if (grxsts.b.bcnt > 0) { ++ dwc_otg_read_packet(dwc_otg_hcd->core_if, ++ hc->xfer_buff, ++ grxsts.b.bcnt); ++ ++ /* Update the HC fields for the next packet received. */ ++ hc->xfer_count += grxsts.b.bcnt; ++ hc->xfer_buff += grxsts.b.bcnt; ++ } ++ ++ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: ++ case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR: ++ case DWC_GRXSTS_PKTSTS_CH_HALTED: ++ /* Handled in interrupt, just ignore data */ ++ break; ++ default: ++ DWC_ERROR("RX_STS_Q Interrupt: Unknown status %d\n", grxsts.b.pktsts); ++ break; ++ } ++ ++ return 1; ++} ++ ++/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More ++ * data packets may be written to the FIFO for OUT transfers. More requests ++ * may be written to the non-periodic request queue for IN transfers. This ++ * interrupt is enabled only in Slave mode. */ ++int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n"); ++ dwc_otg_hcd_queue_transactions(dwc_otg_hcd, ++ DWC_OTG_TRANSACTION_NON_PERIODIC); ++ return 1; ++} ++ ++/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data ++ * packets may be written to the FIFO for OUT transfers. More requests may be ++ * written to the periodic request queue for IN transfers. This interrupt is ++ * enabled only in Slave mode. */ ++int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n"); ++ dwc_otg_hcd_queue_transactions(dwc_otg_hcd, ++ DWC_OTG_TRANSACTION_PERIODIC); ++ return 1; ++} ++ ++/** There are multiple conditions that can cause a port interrupt. This function ++ * determines which interrupt conditions have occurred and handles them ++ * appropriately. */ ++int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ int retval = 0; ++ hprt0_data_t hprt0; ++ hprt0_data_t hprt0_modify; ++ ++ hprt0.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0); ++ hprt0_modify.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0); ++ ++ /* Clear appropriate bits in HPRT0 to clear the interrupt bit in ++ * GINTSTS */ ++ ++ hprt0_modify.b.prtena = 0; ++ hprt0_modify.b.prtconndet = 0; ++ hprt0_modify.b.prtenchng = 0; ++ hprt0_modify.b.prtovrcurrchng = 0; ++ ++ /* Port Connect Detected ++ * Set flag and clear if detected */ ++ if (hprt0.b.prtconndet) { ++ DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x " ++ "Port Connect Detected--\n", hprt0.d32); ++ dwc_otg_hcd->flags.b.port_connect_status_change = 1; ++ dwc_otg_hcd->flags.b.port_connect_status = 1; ++ hprt0_modify.b.prtconndet = 1; ++ ++ /* B-Device has connected, Delete the connection timer. */ ++ del_timer( &dwc_otg_hcd->conn_timer ); ++ ++ /* The Hub driver asserts a reset when it sees port connect ++ * status change flag */ ++ retval |= 1; ++ } ++ ++ /* Port Enable Changed ++ * Clear if detected - Set internal flag if disabled */ ++ if (hprt0.b.prtenchng) { ++ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x " ++ "Port Enable Changed--\n", hprt0.d32); ++ hprt0_modify.b.prtenchng = 1; ++ if (hprt0.b.prtena == 1) { ++ int do_reset = 0; ++ dwc_otg_core_params_t *params = dwc_otg_hcd->core_if->core_params; ++ dwc_otg_core_global_regs_t *global_regs = dwc_otg_hcd->core_if->core_global_regs; ++ dwc_otg_host_if_t *host_if = dwc_otg_hcd->core_if->host_if; ++ ++ /* Check if we need to adjust the PHY clock speed for ++ * low power and adjust it */ ++ if (params->host_support_fs_ls_low_power) { ++ gusbcfg_data_t usbcfg; ++ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ ++ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED || ++ hprt0.b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED) { ++ /* ++ * Low power ++ */ ++ hcfg_data_t hcfg; ++ if (usbcfg.b.phylpwrclksel == 0) { ++ /* Set PHY low power clock select for FS/LS devices */ ++ usbcfg.b.phylpwrclksel = 1; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ do_reset = 1; ++ } ++ ++ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg); ++ ++ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED && ++ params->host_ls_low_power_phy_clk == ++ DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) { ++ /* 6 MHZ */ ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 6 MHz (Low Power)\n"); ++ if (hcfg.b.fslspclksel != DWC_HCFG_6_MHZ) { ++ hcfg.b.fslspclksel = DWC_HCFG_6_MHZ; ++ dwc_write_reg32(&host_if->host_global_regs->hcfg, ++ hcfg.d32); ++ do_reset = 1; ++ } ++ } else { ++ /* 48 MHZ */ ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 48 MHz ()\n"); ++ if (hcfg.b.fslspclksel != DWC_HCFG_48_MHZ) { ++ hcfg.b.fslspclksel = DWC_HCFG_48_MHZ; ++ dwc_write_reg32(&host_if->host_global_regs->hcfg, ++ hcfg.d32); ++ do_reset = 1; ++ } ++ } ++ } else { ++ /* ++ * Not low power ++ */ ++ if (usbcfg.b.phylpwrclksel == 1) { ++ usbcfg.b.phylpwrclksel = 0; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ do_reset = 1; ++ } ++ } ++ ++ if (do_reset) { ++ tasklet_schedule(dwc_otg_hcd->reset_tasklet); ++ } ++ } ++ ++ if (!do_reset) { ++ /* Port has been enabled set the reset change flag */ ++ dwc_otg_hcd->flags.b.port_reset_change = 1; ++ } ++ } else { ++ dwc_otg_hcd->flags.b.port_enable_change = 1; ++ } ++ retval |= 1; ++ } ++ ++ /** Overcurrent Change Interrupt */ ++ if (hprt0.b.prtovrcurrchng) { ++ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x " ++ "Port Overcurrent Changed--\n", hprt0.d32); ++ dwc_otg_hcd->flags.b.port_over_current_change = 1; ++ hprt0_modify.b.prtovrcurrchng = 1; ++ retval |= 1; ++ } ++ ++ /* Clear Port Interrupts */ ++ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32); ++ ++ return retval; ++} ++ ++/** This interrupt indicates that one or more host channels has a pending ++ * interrupt. There are multiple conditions that can cause each host channel ++ * interrupt. This function determines which conditions have occurred for each ++ * host channel interrupt and handles them appropriately. */ ++int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ int i; ++ int retval = 0; ++ haint_data_t haint; ++ ++ /* Clear appropriate bits in HCINTn to clear the interrupt bit in ++ * GINTSTS */ ++ ++ haint.d32 = dwc_otg_read_host_all_channels_intr(dwc_otg_hcd->core_if); ++ ++ for (i = 0; i < dwc_otg_hcd->core_if->core_params->host_channels; i++) { ++ if (haint.b2.chint & (1 << i)) { ++ retval |= dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd, i); ++ } ++ } ++ ++ return retval; ++} ++ ++/* Macro used to clear one channel interrupt */ ++#define clear_hc_int(_hc_regs_, _intr_) \ ++do { \ ++ hcint_data_t hcint_clear = {.d32 = 0}; \ ++ hcint_clear.b._intr_ = 1; \ ++ dwc_write_reg32(&(_hc_regs_)->hcint, hcint_clear.d32); \ ++} while (0) ++ ++/* ++ * Macro used to disable one channel interrupt. Channel interrupts are ++ * disabled when the channel is halted or released by the interrupt handler. ++ * There is no need to handle further interrupts of that type until the ++ * channel is re-assigned. In fact, subsequent handling may cause crashes ++ * because the channel structures are cleaned up when the channel is released. ++ */ ++#define disable_hc_int(_hc_regs_, _intr_) \ ++do { \ ++ hcintmsk_data_t hcintmsk = {.d32 = 0}; \ ++ hcintmsk.b._intr_ = 1; \ ++ dwc_modify_reg32(&(_hc_regs_)->hcintmsk, hcintmsk.d32, 0); \ ++} while (0) ++ ++/** ++ * Gets the actual length of a transfer after the transfer halts. _halt_status ++ * holds the reason for the halt. ++ * ++ * For IN transfers where halt_status is DWC_OTG_HC_XFER_COMPLETE, ++ * *short_read is set to 1 upon return if less than the requested ++ * number of bytes were transferred. Otherwise, *short_read is set to 0 upon ++ * return. short_read may also be NULL on entry, in which case it remains ++ * unchanged. ++ */ ++static uint32_t get_actual_xfer_length(dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status, ++ int *short_read) ++{ ++ hctsiz_data_t hctsiz; ++ uint32_t length; ++ ++ if (short_read != NULL) { ++ *short_read = 0; ++ } ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ ++ if (halt_status == DWC_OTG_HC_XFER_COMPLETE) { ++ if (hc->ep_is_in) { ++ length = hc->xfer_len - hctsiz.b.xfersize; ++ if (short_read != NULL) { ++ *short_read = (hctsiz.b.xfersize != 0); ++ } ++ } else if (hc->qh->do_split) { ++ length = qtd->ssplit_out_xfer_count; ++ } else { ++ length = hc->xfer_len; ++ } ++ } else { ++ /* ++ * Must use the hctsiz.pktcnt field to determine how much data ++ * has been transferred. This field reflects the number of ++ * packets that have been transferred via the USB. This is ++ * always an integral number of packets if the transfer was ++ * halted before its normal completion. (Can't use the ++ * hctsiz.xfersize field because that reflects the number of ++ * bytes transferred via the AHB, not the USB). ++ */ ++ length = (hc->start_pkt_count - hctsiz.b.pktcnt) * hc->max_packet; ++ } ++ ++ return length; ++} ++ ++/** ++ * Updates the state of the URB after a Transfer Complete interrupt on the ++ * host channel. Updates the actual_length field of the URB based on the ++ * number of bytes transferred via the host channel. Sets the URB status ++ * if the data transfer is finished. ++ * ++ * @return 1 if the data transfer specified by the URB is completely finished, ++ * 0 otherwise. ++ */ ++static int update_urb_state_xfer_comp(dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ struct urb *urb, ++ dwc_otg_qtd_t *qtd) ++{ ++ int xfer_done = 0; ++ int short_read = 0; ++ ++ urb->actual_length += get_actual_xfer_length(hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_COMPLETE, ++ &short_read); ++ ++ if (short_read || urb->actual_length == urb->transfer_buffer_length) { ++ xfer_done = 1; ++ if (short_read && (urb->transfer_flags & URB_SHORT_NOT_OK)) { ++ urb->status = -EREMOTEIO; ++ } else { ++ urb->status = 0; ++ } ++ } ++ ++#ifdef DEBUG ++ { ++ hctsiz_data_t hctsiz; ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n", ++ __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", hc->xfer_len); ++ DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n", hctsiz.b.xfersize); ++ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n", ++ urb->transfer_buffer_length); ++ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length); ++ DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n", ++ short_read, xfer_done); ++ } ++#endif ++ ++ return xfer_done; ++} ++ ++/* ++ * Save the starting data toggle for the next transfer. The data toggle is ++ * saved in the QH for non-control transfers and it's saved in the QTD for ++ * control transfers. ++ */ ++static void save_data_toggle(dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ hctsiz_data_t hctsiz; ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ ++ if (hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) { ++ dwc_otg_qh_t *qh = hc->qh; ++ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { ++ qh->data_toggle = DWC_OTG_HC_PID_DATA0; ++ } else { ++ qh->data_toggle = DWC_OTG_HC_PID_DATA1; ++ } ++ } else { ++ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { ++ qtd->data_toggle = DWC_OTG_HC_PID_DATA0; ++ } else { ++ qtd->data_toggle = DWC_OTG_HC_PID_DATA1; ++ } ++ } ++} ++ ++/** ++ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic ++ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are ++ * still linked to the QH, the QH is added to the end of the inactive ++ * non-periodic schedule. For periodic QHs, removes the QH from the periodic ++ * schedule if no more QTDs are linked to the QH. ++ */ ++static void deactivate_qh(dwc_otg_hcd_t *hcd, ++ dwc_otg_qh_t *qh, ++ int free_qtd) ++{ ++ int continue_split = 0; ++ dwc_otg_qtd_t *qtd; ++ ++ DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd); ++ ++ qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); ++ ++ if (qtd->complete_split) { ++ continue_split = 1; ++ } else if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID || ++ qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END) { ++ continue_split = 1; ++ } ++ ++ if (free_qtd) { ++ dwc_otg_hcd_qtd_remove_and_free(hcd, qtd); ++ continue_split = 0; ++ } ++ ++ qh->channel = NULL; ++ qh->qtd_in_process = NULL; ++ dwc_otg_hcd_qh_deactivate(hcd, qh, continue_split); ++} ++ ++/** ++ * Updates the state of an Isochronous URB when the transfer is stopped for ++ * any reason. The fields of the current entry in the frame descriptor array ++ * are set based on the transfer state and the input _halt_status. Completes ++ * the Isochronous URB if all the URB frames have been completed. ++ * ++ * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be ++ * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE. ++ */ ++static dwc_otg_halt_status_e ++update_isoc_urb_state(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ struct urb *urb = qtd->urb; ++ dwc_otg_halt_status_e ret_val = halt_status; ++ struct usb_iso_packet_descriptor *frame_desc; ++ ++ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index]; ++ switch (halt_status) { ++ case DWC_OTG_HC_XFER_COMPLETE: ++ frame_desc->status = 0; ++ frame_desc->actual_length = ++ get_actual_xfer_length(hc, hc_regs, qtd, ++ halt_status, NULL); ++ break; ++ case DWC_OTG_HC_XFER_FRAME_OVERRUN: ++ urb->error_count++; ++ if (hc->ep_is_in) { ++ frame_desc->status = -ENOSR; ++ } else { ++ frame_desc->status = -ECOMM; ++ } ++ frame_desc->actual_length = 0; ++ break; ++ case DWC_OTG_HC_XFER_BABBLE_ERR: ++ urb->error_count++; ++ frame_desc->status = -EOVERFLOW; ++ /* Don't need to update actual_length in this case. */ ++ break; ++ case DWC_OTG_HC_XFER_XACT_ERR: ++ urb->error_count++; ++ frame_desc->status = -EPROTO; ++ frame_desc->actual_length = ++ get_actual_xfer_length(hc, hc_regs, qtd, ++ halt_status, NULL); ++ default: ++ DWC_ERROR("%s: Unhandled _halt_status (%d)\n", __func__, ++ halt_status); ++ BUG(); ++ break; ++ } ++ ++ if (++qtd->isoc_frame_index == urb->number_of_packets) { ++ /* ++ * urb->status is not used for isoc transfers. ++ * The individual frame_desc statuses are used instead. ++ */ ++ dwc_otg_hcd_complete_urb(hcd, urb, 0); ++ ret_val = DWC_OTG_HC_XFER_URB_COMPLETE; ++ } else { ++ ret_val = DWC_OTG_HC_XFER_COMPLETE; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * Releases a host channel for use by other transfers. Attempts to select and ++ * queue more transactions since at least one host channel is available. ++ * ++ * @param hcd The HCD state structure. ++ * @param hc The host channel to release. ++ * @param qtd The QTD associated with the host channel. This QTD may be freed ++ * if the transfer is complete or an error has occurred. ++ * @param halt_status Reason the channel is being released. This status ++ * determines the actions taken by this function. ++ */ ++static void release_channel(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ dwc_otg_transaction_type_e tr_type; ++ int free_qtd; ++ ++ DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n", ++ __func__, hc->hc_num, halt_status); ++ ++ switch (halt_status) { ++ case DWC_OTG_HC_XFER_URB_COMPLETE: ++ free_qtd = 1; ++ break; ++ case DWC_OTG_HC_XFER_AHB_ERR: ++ case DWC_OTG_HC_XFER_STALL: ++ case DWC_OTG_HC_XFER_BABBLE_ERR: ++ free_qtd = 1; ++ break; ++ case DWC_OTG_HC_XFER_XACT_ERR: ++ if (qtd->error_count >= 3) { ++ DWC_DEBUGPL(DBG_HCDV, " Complete URB with transaction error\n"); ++ free_qtd = 1; ++ qtd->urb->status = -EPROTO; ++ dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EPROTO); ++ } else { ++ free_qtd = 0; ++ } ++ break; ++ case DWC_OTG_HC_XFER_URB_DEQUEUE: ++ /* ++ * The QTD has already been removed and the QH has been ++ * deactivated. Don't want to do anything except release the ++ * host channel and try to queue more transfers. ++ */ ++ goto cleanup; ++ case DWC_OTG_HC_XFER_NO_HALT_STATUS: ++ DWC_ERROR("%s: No halt_status, channel %d\n", __func__, hc->hc_num); ++ free_qtd = 0; ++ break; ++ default: ++ free_qtd = 0; ++ break; ++ } ++ ++ deactivate_qh(hcd, hc->qh, free_qtd); ++ ++ cleanup: ++ /* ++ * Release the host channel for use by other transfers. The cleanup ++ * function clears the channel interrupt enables and conditions, so ++ * there's no need to clear the Channel Halted interrupt separately. ++ */ ++ dwc_otg_hc_cleanup(hcd->core_if, hc); ++ list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list); ++ ++ switch (hc->ep_type) { ++ case DWC_OTG_EP_TYPE_CONTROL: ++ case DWC_OTG_EP_TYPE_BULK: ++ hcd->non_periodic_channels--; ++ break; ++ ++ default: ++ /* ++ * Don't release reservations for periodic channels here. ++ * That's done when a periodic transfer is descheduled (i.e. ++ * when the QH is removed from the periodic schedule). ++ */ ++ break; ++ } ++ ++ /* Try to queue more transfers now that there's a free channel. */ ++ tr_type = dwc_otg_hcd_select_transactions(hcd); ++ if (tr_type != DWC_OTG_TRANSACTION_NONE) { ++ dwc_otg_hcd_queue_transactions(hcd, tr_type); ++ } ++} ++ ++/** ++ * Halts a host channel. If the channel cannot be halted immediately because ++ * the request queue is full, this function ensures that the FIFO empty ++ * interrupt for the appropriate queue is enabled so that the halt request can ++ * be queued when there is space in the request queue. ++ * ++ * This function may also be called in DMA mode. In that case, the channel is ++ * simply released since the core always halts the channel automatically in ++ * DMA mode. ++ */ ++static void halt_channel(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ if (hcd->core_if->dma_enable) { ++ release_channel(hcd, hc, qtd, halt_status); ++ return; ++ } ++ ++ /* Slave mode processing... */ ++ dwc_otg_hc_halt(hcd->core_if, hc, halt_status); ++ ++ if (hc->halt_on_queue) { ++ gintmsk_data_t gintmsk = {.d32 = 0}; ++ dwc_otg_core_global_regs_t *global_regs; ++ global_regs = hcd->core_if->core_global_regs; ++ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || ++ hc->ep_type == DWC_OTG_EP_TYPE_BULK) { ++ /* ++ * Make sure the Non-periodic Tx FIFO empty interrupt ++ * is enabled so that the non-periodic schedule will ++ * be processed. ++ */ ++ gintmsk.b.nptxfempty = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32); ++ } else { ++ /* ++ * Move the QH from the periodic queued schedule to ++ * the periodic assigned schedule. This allows the ++ * halt to be queued when the periodic schedule is ++ * processed. ++ */ ++ list_move(&hc->qh->qh_list_entry, ++ &hcd->periodic_sched_assigned); ++ ++ /* ++ * Make sure the Periodic Tx FIFO Empty interrupt is ++ * enabled so that the periodic schedule will be ++ * processed. ++ */ ++ gintmsk.b.ptxfempty = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32); ++ } ++ } ++} ++ ++/** ++ * Performs common cleanup for non-periodic transfers after a Transfer ++ * Complete interrupt. This function should be called after any endpoint type ++ * specific handling is finished to release the host channel. ++ */ ++static void complete_non_periodic_xfer(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ hcint_data_t hcint; ++ ++ qtd->error_count = 0; ++ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ if (hcint.b.nyet) { ++ /* ++ * Got a NYET on the last transaction of the transfer. This ++ * means that the endpoint should be in the PING state at the ++ * beginning of the next transfer. ++ */ ++ hc->qh->ping_state = 1; ++ clear_hc_int(hc_regs, nyet); ++ } ++ ++ /* ++ * Always halt and release the host channel to make it available for ++ * more transfers. There may still be more phases for a control ++ * transfer or more data packets for a bulk transfer at this point, ++ * but the host channel is still halted. A channel will be reassigned ++ * to the transfer when the non-periodic schedule is processed after ++ * the channel is released. This allows transactions to be queued ++ * properly via dwc_otg_hcd_queue_transactions, which also enables the ++ * Tx FIFO Empty interrupt if necessary. ++ */ ++ if (hc->ep_is_in) { ++ /* ++ * IN transfers in Slave mode require an explicit disable to ++ * halt the channel. (In DMA mode, this call simply releases ++ * the channel.) ++ */ ++ halt_channel(hcd, hc, qtd, halt_status); ++ } else { ++ /* ++ * The channel is automatically disabled by the core for OUT ++ * transfers in Slave mode. ++ */ ++ release_channel(hcd, hc, qtd, halt_status); ++ } ++} ++ ++/** ++ * Performs common cleanup for periodic transfers after a Transfer Complete ++ * interrupt. This function should be called after any endpoint type specific ++ * handling is finished to release the host channel. ++ */ ++static void complete_periodic_xfer(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ hctsiz_data_t hctsiz; ++ qtd->error_count = 0; ++ ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ if (!hc->ep_is_in || hctsiz.b.pktcnt == 0) { ++ /* Core halts channel in these cases. */ ++ release_channel(hcd, hc, qtd, halt_status); ++ } else { ++ /* Flush any outstanding requests from the Tx queue. */ ++ halt_channel(hcd, hc, qtd, halt_status); ++ } ++} ++ ++/** ++ * Handles a host channel Transfer Complete interrupt. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ int urb_xfer_done; ++ dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ struct urb *urb = qtd->urb; ++ int pipe_type = usb_pipetype(urb->pipe); ++ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Transfer Complete--\n", hc->hc_num); ++ ++ /* ++ * Handle xfer complete on CSPLIT. ++ */ ++ if (hc->qh->do_split) { ++ qtd->complete_split = 0; ++ } ++ ++ /* Update the QTD and URB states. */ ++ switch (pipe_type) { ++ case PIPE_CONTROL: ++ switch (qtd->control_phase) { ++ case DWC_OTG_CONTROL_SETUP: ++ if (urb->transfer_buffer_length > 0) { ++ qtd->control_phase = DWC_OTG_CONTROL_DATA; ++ } else { ++ qtd->control_phase = DWC_OTG_CONTROL_STATUS; ++ } ++ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction done\n"); ++ halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ break; ++ case DWC_OTG_CONTROL_DATA: { ++ urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); ++ if (urb_xfer_done) { ++ qtd->control_phase = DWC_OTG_CONTROL_STATUS; ++ DWC_DEBUGPL(DBG_HCDV, " Control data transfer done\n"); ++ } else { ++ save_data_toggle(hc, hc_regs, qtd); ++ } ++ halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ break; ++ } ++ case DWC_OTG_CONTROL_STATUS: ++ DWC_DEBUGPL(DBG_HCDV, " Control transfer complete\n"); ++ if (urb->status == -EINPROGRESS) { ++ urb->status = 0; ++ } ++ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); ++ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE; ++ break; ++ } ++ ++ complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); ++ break; ++ case PIPE_BULK: ++ DWC_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n"); ++ urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); ++ if (urb_xfer_done) { ++ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); ++ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE; ++ } else { ++ halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ } ++ ++ save_data_toggle(hc, hc_regs, qtd); ++ complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); ++ break; ++ case PIPE_INTERRUPT: ++ DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n"); ++ update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); ++ ++ /* ++ * Interrupt URB is done on the first transfer complete ++ * interrupt. ++ */ ++ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); ++ save_data_toggle(hc, hc_regs, qtd); ++ complete_periodic_xfer(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_URB_COMPLETE); ++ break; ++ case PIPE_ISOCHRONOUS: ++ DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n"); ++ if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL) { ++ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_COMPLETE); ++ } ++ complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); ++ break; ++ } ++ ++ disable_hc_int(hc_regs, xfercompl); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel STALL interrupt. This handler may be called in ++ * either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_stall_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ struct urb *urb = qtd->urb; ++ int pipe_type = usb_pipetype(urb->pipe); ++ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "STALL Received--\n", hc->hc_num); ++ ++ if (pipe_type == PIPE_CONTROL) { ++ dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE); ++ } ++ ++ if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTERRUPT) { ++ dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE); ++ /* ++ * USB protocol requires resetting the data toggle for bulk ++ * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT) ++ * setup command is issued to the endpoint. Anticipate the ++ * CLEAR_FEATURE command since a STALL has occurred and reset ++ * the data toggle now. ++ */ ++ hc->qh->data_toggle = 0; ++ } ++ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_STALL); ++ ++ disable_hc_int(hc_regs, stall); ++ ++ return 1; ++} ++ ++/* ++ * Updates the state of the URB when a transfer has been stopped due to an ++ * abnormal condition before the transfer completes. Modifies the ++ * actual_length field of the URB to reflect the number of bytes that have ++ * actually been transferred via the host channel. ++ */ ++static void update_urb_state_xfer_intr(dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ struct urb *urb, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ uint32_t bytes_transferred = get_actual_xfer_length(hc, hc_regs, qtd, ++ halt_status, NULL); ++ urb->actual_length += bytes_transferred; ++ ++#ifdef DEBUG ++ { ++ hctsiz_data_t hctsiz; ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n", ++ __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " hc->start_pkt_count %d\n", hc->start_pkt_count); ++ DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt); ++ DWC_DEBUGPL(DBG_HCDV, " hc->max_packet %d\n", hc->max_packet); ++ DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n", bytes_transferred); ++ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length); ++ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n", ++ urb->transfer_buffer_length); ++ } ++#endif ++} ++ ++/** ++ * Handles a host channel NAK interrupt. This handler may be called in either ++ * DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_nak_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "NAK Received--\n", hc->hc_num); ++ ++ /* ++ * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and ++ * interrupt. Re-start the SSPLIT transfer. ++ */ ++ if (hc->do_split) { ++ if (hc->complete_split) { ++ qtd->error_count = 0; ++ } ++ qtd->complete_split = 0; ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); ++ goto handle_nak_done; ++ } ++ ++ switch (usb_pipetype(qtd->urb->pipe)) { ++ case PIPE_CONTROL: ++ case PIPE_BULK: ++ if (hcd->core_if->dma_enable && hc->ep_is_in) { ++ /* ++ * NAK interrupts are enabled on bulk/control IN ++ * transfers in DMA mode for the sole purpose of ++ * resetting the error count after a transaction error ++ * occurs. The core will continue transferring data. ++ */ ++ qtd->error_count = 0; ++ goto handle_nak_done; ++ } ++ ++ /* ++ * NAK interrupts normally occur during OUT transfers in DMA ++ * or Slave mode. For IN transfers, more requests will be ++ * queued as request queue space is available. ++ */ ++ qtd->error_count = 0; ++ ++ if (!hc->qh->ping_state) { ++ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, ++ qtd, DWC_OTG_HC_XFER_NAK); ++ save_data_toggle(hc, hc_regs, qtd); ++ if (qtd->urb->dev->speed == USB_SPEED_HIGH) { ++ hc->qh->ping_state = 1; ++ } ++ } ++ ++ /* ++ * Halt the channel so the transfer can be re-started from ++ * the appropriate point or the PING protocol will ++ * start/continue. ++ */ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); ++ break; ++ case PIPE_INTERRUPT: ++ qtd->error_count = 0; ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); ++ break; ++ case PIPE_ISOCHRONOUS: ++ /* Should never get called for isochronous transfers. */ ++ BUG(); ++ break; ++ } ++ ++ handle_nak_done: ++ disable_hc_int(hc_regs, nak); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel ACK interrupt. This interrupt is enabled when ++ * performing the PING protocol in Slave mode, when errors occur during ++ * either Slave mode or DMA mode, and during Start Split transactions. ++ */ ++static int32_t handle_hc_ack_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "ACK Received--\n", hc->hc_num); ++ ++ if (hc->do_split) { ++ /* ++ * Handle ACK on SSPLIT. ++ * ACK should not occur in CSPLIT. ++ */ ++ if (!hc->ep_is_in && hc->data_pid_start != DWC_OTG_HC_PID_SETUP) { ++ qtd->ssplit_out_xfer_count = hc->xfer_len; ++ } ++ if (!(hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in)) { ++ /* Don't need complete for isochronous out transfers. */ ++ qtd->complete_split = 1; ++ } ++ ++ /* ISOC OUT */ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) { ++ switch (hc->xact_pos) { ++ case DWC_HCSPLIT_XACTPOS_ALL: ++ break; ++ case DWC_HCSPLIT_XACTPOS_END: ++ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL; ++ qtd->isoc_split_offset = 0; ++ break; ++ case DWC_HCSPLIT_XACTPOS_BEGIN: ++ case DWC_HCSPLIT_XACTPOS_MID: ++ /* ++ * For BEGIN or MID, calculate the length for ++ * the next microframe to determine the correct ++ * SSPLIT token, either MID or END. ++ */ ++ { ++ struct usb_iso_packet_descriptor *frame_desc; ++ ++ frame_desc = &qtd->urb->iso_frame_desc[qtd->isoc_frame_index]; ++ qtd->isoc_split_offset += 188; ++ ++ if ((frame_desc->length - qtd->isoc_split_offset) <= 188) { ++ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_END; ++ } else { ++ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_MID; ++ } ++ ++ } ++ break; ++ } ++ } else { ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK); ++ } ++ } else { ++ qtd->error_count = 0; ++ ++ if (hc->qh->ping_state) { ++ hc->qh->ping_state = 0; ++ /* ++ * Halt the channel so the transfer can be re-started ++ * from the appropriate point. This only happens in ++ * Slave mode. In DMA mode, the ping_state is cleared ++ * when the transfer is started because the core ++ * automatically executes the PING, then the transfer. ++ */ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK); ++ } ++ } ++ ++ /* ++ * If the ACK occurred when _not_ in the PING state, let the channel ++ * continue transferring data after clearing the error count. ++ */ ++ ++ disable_hc_int(hc_regs, ack); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel NYET interrupt. This interrupt should only occur on ++ * Bulk and Control OUT endpoints and for complete split transactions. If a ++ * NYET occurs at the same time as a Transfer Complete interrupt, it is ++ * handled in the xfercomp interrupt handler, not here. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "NYET Received--\n", hc->hc_num); ++ ++ /* ++ * NYET on CSPLIT ++ * re-do the CSPLIT immediately on non-periodic ++ */ ++ if (hc->do_split && hc->complete_split) { ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ int frnum = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd)); ++ ++ if (dwc_full_frame_num(frnum) != ++ dwc_full_frame_num(hc->qh->sched_frame)) { ++ /* ++ * No longer in the same full speed frame. ++ * Treat this as a transaction error. ++ */ ++#if 0 ++ /** @todo Fix system performance so this can ++ * be treated as an error. Right now complete ++ * splits cannot be scheduled precisely enough ++ * due to other system activity, so this error ++ * occurs regularly in Slave mode. ++ */ ++ qtd->error_count++; ++#endif ++ qtd->complete_split = 0; ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); ++ /** @todo add support for isoc release */ ++ goto handle_nyet_done; ++ } ++ } ++ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET); ++ goto handle_nyet_done; ++ } ++ ++ hc->qh->ping_state = 1; ++ qtd->error_count = 0; ++ ++ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, qtd, ++ DWC_OTG_HC_XFER_NYET); ++ save_data_toggle(hc, hc_regs, qtd); ++ ++ /* ++ * Halt the channel and re-start the transfer so the PING ++ * protocol will start. ++ */ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET); ++ ++handle_nyet_done: ++ disable_hc_int(hc_regs, nyet); ++ return 1; ++} ++ ++/** ++ * Handles a host channel babble interrupt. This handler may be called in ++ * either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_babble_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Babble Error--\n", hc->hc_num); ++ if (hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { ++ dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EOVERFLOW); ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_BABBLE_ERR); ++ } else { ++ dwc_otg_halt_status_e halt_status; ++ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_BABBLE_ERR); ++ halt_channel(hcd, hc, qtd, halt_status); ++ } ++ disable_hc_int(hc_regs, bblerr); ++ return 1; ++} ++ ++/** ++ * Handles a host channel AHB error interrupt. This handler is only called in ++ * DMA mode. ++ */ ++static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ hcchar_data_t hcchar; ++ hcsplt_data_t hcsplt; ++ hctsiz_data_t hctsiz; ++ uint32_t hcdma; ++ struct urb *urb = qtd->urb; ++ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "AHB Error--\n", hc->hc_num); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ hcdma = dwc_read_reg32(&hc_regs->hcdma); ++ ++ DWC_ERROR("AHB ERROR, Channel %d\n", hc->hc_num); ++ DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32); ++ DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma); ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n"); ++ DWC_ERROR(" Device address: %d\n", usb_pipedevice(urb->pipe)); ++ DWC_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe), ++ (usb_pipein(urb->pipe) ? "IN" : "OUT")); ++ DWC_ERROR(" Endpoint type: %s\n", ++ ({char *pipetype; ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: pipetype = "CONTROL"; break; ++ case PIPE_BULK: pipetype = "BULK"; break; ++ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break; ++ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; ++ default: pipetype = "UNKNOWN"; break; ++ }; pipetype;})); ++ DWC_ERROR(" Speed: %s\n", ++ ({char *speed; ++ switch (urb->dev->speed) { ++ case USB_SPEED_HIGH: speed = "HIGH"; break; ++ case USB_SPEED_FULL: speed = "FULL"; break; ++ case USB_SPEED_LOW: speed = "LOW"; break; ++ default: speed = "UNKNOWN"; break; ++ }; speed;})); ++ DWC_ERROR(" Max packet size: %d\n", ++ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); ++ DWC_ERROR(" Data buffer length: %d\n", urb->transfer_buffer_length); ++ DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n", ++ urb->transfer_buffer, (void *)urb->transfer_dma); ++ DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n", ++ urb->setup_packet, (void *)urb->setup_dma); ++ DWC_ERROR(" Interval: %d\n", urb->interval); ++ ++ dwc_otg_hcd_complete_urb(hcd, urb, -EIO); ++ ++ /* ++ * Force a channel halt. Don't call halt_channel because that won't ++ * write to the HCCHARn register in DMA mode to force the halt. ++ */ ++ dwc_otg_hc_halt(hcd->core_if, hc, DWC_OTG_HC_XFER_AHB_ERR); ++ ++ disable_hc_int(hc_regs, ahberr); ++ return 1; ++} ++ ++/** ++ * Handles a host channel transaction error interrupt. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Transaction Error--\n", hc->hc_num); ++ ++ switch (usb_pipetype(qtd->urb->pipe)) { ++ case PIPE_CONTROL: ++ case PIPE_BULK: ++ qtd->error_count++; ++ if (!hc->qh->ping_state) { ++ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, ++ qtd, DWC_OTG_HC_XFER_XACT_ERR); ++ save_data_toggle(hc, hc_regs, qtd); ++ if (!hc->ep_is_in && qtd->urb->dev->speed == USB_SPEED_HIGH) { ++ hc->qh->ping_state = 1; ++ } ++ } ++ ++ /* ++ * Halt the channel so the transfer can be re-started from ++ * the appropriate point or the PING protocol will start. ++ */ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); ++ break; ++ case PIPE_INTERRUPT: ++ qtd->error_count++; ++ if (hc->do_split && hc->complete_split) { ++ qtd->complete_split = 0; ++ } ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); ++ break; ++ case PIPE_ISOCHRONOUS: ++ { ++ dwc_otg_halt_status_e halt_status; ++ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_XACT_ERR); ++ ++ halt_channel(hcd, hc, qtd, halt_status); ++ } ++ break; ++ } ++ ++ disable_hc_int(hc_regs, xacterr); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel frame overrun interrupt. This handler may be called ++ * in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Frame Overrun--\n", hc->hc_num); ++ ++ switch (usb_pipetype(qtd->urb->pipe)) { ++ case PIPE_CONTROL: ++ case PIPE_BULK: ++ break; ++ case PIPE_INTERRUPT: ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN); ++ break; ++ case PIPE_ISOCHRONOUS: ++ { ++ dwc_otg_halt_status_e halt_status; ++ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_FRAME_OVERRUN); ++ ++ halt_channel(hcd, hc, qtd, halt_status); ++ } ++ break; ++ } ++ ++ disable_hc_int(hc_regs, frmovrun); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel data toggle error interrupt. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Data Toggle Error--\n", hc->hc_num); ++ ++ if (hc->ep_is_in) { ++ qtd->error_count = 0; ++ } else { ++ DWC_ERROR("Data Toggle Error on OUT transfer," ++ "channel %d\n", hc->hc_num); ++ } ++ ++ disable_hc_int(hc_regs, datatglerr); ++ ++ return 1; ++} ++ ++#ifdef DEBUG ++/** ++ * This function is for debug only. It checks that a valid halt status is set ++ * and that HCCHARn.chdis is clear. If there's a problem, corrective action is ++ * taken and a warning is issued. ++ * @return 1 if halt status is ok, 0 otherwise. ++ */ ++static inline int halt_status_ok(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ hcsplt_data_t hcsplt; ++ ++ if (hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) { ++ /* ++ * This code is here only as a check. This condition should ++ * never happen. Ignore the halt if it does occur. ++ */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); ++ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); ++ DWC_WARN("%s: hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS, " ++ "channel %d, hcchar 0x%08x, hctsiz 0x%08x, " ++ "hcint 0x%08x, hcintmsk 0x%08x, " ++ "hcsplt 0x%08x, qtd->complete_split %d\n", ++ __func__, hc->hc_num, hcchar.d32, hctsiz.d32, ++ hcint.d32, hcintmsk.d32, ++ hcsplt.d32, qtd->complete_split); ++ ++ DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n", ++ __func__, hc->hc_num); ++ DWC_WARN("\n"); ++ clear_hc_int(hc_regs, chhltd); ++ return 0; ++ } ++ ++ /* ++ * This code is here only as a check. hcchar.chdis should ++ * never be set when the halt interrupt occurs. Halt the ++ * channel again if it does occur. ++ */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chdis) { ++ DWC_WARN("%s: hcchar.chdis set unexpectedly, " ++ "hcchar 0x%08x, trying to halt again\n", ++ __func__, hcchar.d32); ++ clear_hc_int(hc_regs, chhltd); ++ hc->halt_pending = 0; ++ halt_channel(hcd, hc, qtd, hc->halt_status); ++ return 0; ++ } ++ ++ return 1; ++} ++#endif ++ ++/** ++ * Handles a host Channel Halted interrupt in DMA mode. This handler ++ * determines the reason the channel halted and proceeds accordingly. ++ */ ++static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ int out_nak_enh = 0; ++ ++ /* For core with OUT NAK enhancement, the flow for high- ++ * speed CONTROL/BULK OUT is handled a little differently. ++ */ ++ if (hcd->core_if->snpsid >= 0x4F54271A) { ++ if (hc->speed == DWC_OTG_EP_SPEED_HIGH && !hc->ep_is_in && ++ (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || ++ hc->ep_type == DWC_OTG_EP_TYPE_BULK)) { ++ DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement enabled\n"); ++ out_nak_enh = 1; ++ } else { ++ DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement disabled, not HS Ctrl/Bulk OUT EP\n"); ++ } ++ } else { ++ DWC_DEBUGPL(DBG_HCD, "OUT NAK enhancement disabled, no core support\n"); ++ } ++ ++ if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || ++ hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR) { ++ /* ++ * Just release the channel. A dequeue can happen on a ++ * transfer timeout. In the case of an AHB Error, the channel ++ * was forced to halt because there's no way to gracefully ++ * recover. ++ */ ++ release_channel(hcd, hc, qtd, hc->halt_status); ++ return; ++ } ++ ++ /* Read the HCINTn register to determine the cause for the halt. */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); ++ ++ if (hcint.b.xfercomp) { ++ /** @todo This is here because of a possible hardware bug. Spec ++ * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT ++ * interrupt w/ACK bit set should occur, but I only see the ++ * XFERCOMP bit, even with it masked out. This is a workaround ++ * for that behavior. Should fix this when hardware is fixed. ++ */ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) { ++ handle_hc_ack_intr(hcd, hc, hc_regs, qtd); ++ } ++ handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.stall) { ++ handle_hc_stall_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.xacterr) { ++ if (out_nak_enh) { ++ if (hcint.b.nyet || hcint.b.nak || hcint.b.ack) { ++ printk(KERN_DEBUG "XactErr with NYET/NAK/ACK\n"); ++ qtd->error_count = 0; ++ } else { ++ printk(KERN_DEBUG "XactErr without NYET/NAK/ACK\n"); ++ } ++ } ++ ++ /* ++ * Must handle xacterr before nak or ack. Could get a xacterr ++ * at the same time as either of these on a BULK/CONTROL OUT ++ * that started with a PING. The xacterr takes precedence. ++ */ ++ handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd); ++ } else if (!out_nak_enh) { ++ if (hcint.b.nyet) { ++ /* ++ * Must handle nyet before nak or ack. Could get a nyet at the ++ * same time as either of those on a BULK/CONTROL OUT that ++ * started with a PING. The nyet takes precedence. ++ */ ++ handle_hc_nyet_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.bblerr) { ++ handle_hc_babble_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.frmovrun) { ++ handle_hc_frmovrun_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.nak && !hcintmsk.b.nak) { ++ /* ++ * If nak is not masked, it's because a non-split IN transfer ++ * is in an error state. In that case, the nak is handled by ++ * the nak interrupt handler, not here. Handle nak here for ++ * BULK/CONTROL OUT transfers, which halt on a NAK to allow ++ * rewinding the buffer pointer. ++ */ ++ handle_hc_nak_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.ack && !hcintmsk.b.ack) { ++ /* ++ * If ack is not masked, it's because a non-split IN transfer ++ * is in an error state. In that case, the ack is handled by ++ * the ack interrupt handler, not here. Handle ack here for ++ * split transfers. Start splits halt on ACK. ++ */ ++ handle_hc_ack_intr(hcd, hc, hc_regs, qtd); ++ } else { ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * A periodic transfer halted with no other channel ++ * interrupts set. Assume it was halted by the core ++ * because it could not be completed in its scheduled ++ * (micro)frame. ++ */ ++#ifdef DEBUG ++ DWC_PRINT("%s: Halt channel %d (assume incomplete periodic transfer)\n", ++ __func__, hc->hc_num); ++#endif ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE); ++ } else { ++ DWC_ERROR("%s: Channel %d, DMA Mode -- ChHltd set, but reason " ++ "for halting is unknown, hcint 0x%08x, intsts 0x%08x\n", ++ __func__, hc->hc_num, hcint.d32, ++ dwc_read_reg32(&hcd->core_if->core_global_regs->gintsts)); ++ } ++ } ++ } else { ++ printk(KERN_DEBUG "NYET/NAK/ACK/other in non-error case, 0x%08x\n", hcint.d32); ++ } ++} ++ ++/** ++ * Handles a host channel Channel Halted interrupt. ++ * ++ * In slave mode, this handler is called only when the driver specifically ++ * requests a halt. This occurs during handling other host channel interrupts ++ * (e.g. nak, xacterr, stall, nyet, etc.). ++ * ++ * In DMA mode, this is the interrupt that occurs when the core has finished ++ * processing a transfer on a channel. Other host channel interrupts (except ++ * ahberr) are disabled in DMA mode. ++ */ ++static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Channel Halted--\n", hc->hc_num); ++ ++ if (hcd->core_if->dma_enable) { ++ handle_hc_chhltd_intr_dma(hcd, hc, hc_regs, qtd); ++ } else { ++#ifdef DEBUG ++ if (!halt_status_ok(hcd, hc, hc_regs, qtd)) { ++ return 1; ++ } ++#endif ++ release_channel(hcd, hc, qtd, hc->halt_status); ++ } ++ ++ return 1; ++} ++ ++/** Handles interrupt for a specific Host Channel */ ++int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num) ++{ ++ int retval = 0; ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ dwc_hc_t *hc; ++ dwc_otg_hc_regs_t *hc_regs; ++ dwc_otg_qtd_t *qtd; ++ ++ DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num); ++ ++ hc = dwc_otg_hcd->hc_ptr_array[num]; ++ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[num]; ++ qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); ++ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); ++ DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", ++ hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32)); ++ hcint.d32 = hcint.d32 & hcintmsk.d32; ++ ++ if (!dwc_otg_hcd->core_if->dma_enable) { ++ if (hcint.b.chhltd && hcint.d32 != 0x2) { ++ hcint.b.chhltd = 0; ++ } ++ } ++ ++ if (hcint.b.xfercomp) { ++ retval |= handle_hc_xfercomp_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ /* ++ * If NYET occurred at same time as Xfer Complete, the NYET is ++ * handled by the Xfer Complete interrupt handler. Don't want ++ * to call the NYET interrupt handler in this case. ++ */ ++ hcint.b.nyet = 0; ++ } ++ if (hcint.b.chhltd) { ++ retval |= handle_hc_chhltd_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.ahberr) { ++ retval |= handle_hc_ahberr_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.stall) { ++ retval |= handle_hc_stall_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.nak) { ++ retval |= handle_hc_nak_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.ack) { ++ retval |= handle_hc_ack_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.nyet) { ++ retval |= handle_hc_nyet_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.xacterr) { ++ retval |= handle_hc_xacterr_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.bblerr) { ++ retval |= handle_hc_babble_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.frmovrun) { ++ retval |= handle_hc_frmovrun_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.datatglerr) { ++ retval |= handle_hc_datatglerr_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ ++ return retval; ++} ++ ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc/otg_hcd_queue.c +@@ -0,0 +1,713 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_queue.c $ ++ * $Revision: #33 $ ++ * $Date: 2008/07/15 $ ++ * $Change: 1064918 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++ ++/** ++ * @file ++ * ++ * This file contains the functions to manage Queue Heads and Queue ++ * Transfer Descriptors. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "otg_driver.h" ++#include "otg_hcd.h" ++#include "otg_regs.h" ++ ++/** ++ * This function allocates and initializes a QH. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param[in] urb Holds the information about the device/endpoint that we need ++ * to initialize the QH. ++ * ++ * @return Returns pointer to the newly allocated QH, or NULL on error. */ ++dwc_otg_qh_t *dwc_otg_hcd_qh_create (dwc_otg_hcd_t *hcd, struct urb *urb) ++{ ++ dwc_otg_qh_t *qh; ++ ++ /* Allocate memory */ ++ /** @todo add memflags argument */ ++ qh = dwc_otg_hcd_qh_alloc (); ++ if (qh == NULL) { ++ return NULL; ++ } ++ ++ dwc_otg_hcd_qh_init (hcd, qh, urb); ++ return qh; ++} ++ ++/** Free each QTD in the QH's QTD-list then free the QH. QH should already be ++ * removed from a list. QTD list should already be empty if called from URB ++ * Dequeue. ++ * ++ * @param[in] hcd HCD instance. ++ * @param[in] qh The QH to free. ++ */ ++void dwc_otg_hcd_qh_free (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ dwc_otg_qtd_t *qtd; ++ struct list_head *pos; ++ //unsigned long flags; ++ ++ /* Free each QTD in the QTD list */ ++ ++#ifdef CONFIG_SMP ++ //the spinlock is locked before this function get called, ++ //but in case the lock is needed, the check function is preserved ++ ++ //but in non-SMP mode, all spinlock is lockable. ++ //don't do the test in non-SMP mode ++ ++ if(spin_trylock(&hcd->lock)) { ++ printk("%s: It is not supposed to be lockable!!\n",__func__); ++ BUG(); ++ } ++#endif ++// SPIN_LOCK_IRQSAVE(&hcd->lock, flags) ++ for (pos = qh->qtd_list.next; ++ pos != &qh->qtd_list; ++ pos = qh->qtd_list.next) ++ { ++ list_del (pos); ++ qtd = dwc_list_to_qtd (pos); ++ dwc_otg_hcd_qtd_free (qtd); ++ } ++// SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) ++ ++ kfree (qh); ++ return; ++} ++ ++/** Initializes a QH structure. ++ * ++ * @param[in] hcd The HCD state structure for the DWC OTG controller. ++ * @param[in] qh The QH to init. ++ * @param[in] urb Holds the information about the device/endpoint that we need ++ * to initialize the QH. */ ++#define SCHEDULE_SLOP 10 ++void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, struct urb *urb) ++{ ++ char *speed, *type; ++ memset (qh, 0, sizeof (dwc_otg_qh_t)); ++ ++ /* Initialize QH */ ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: ++ qh->ep_type = USB_ENDPOINT_XFER_CONTROL; ++ break; ++ case PIPE_BULK: ++ qh->ep_type = USB_ENDPOINT_XFER_BULK; ++ break; ++ case PIPE_ISOCHRONOUS: ++ qh->ep_type = USB_ENDPOINT_XFER_ISOC; ++ break; ++ case PIPE_INTERRUPT: ++ qh->ep_type = USB_ENDPOINT_XFER_INT; ++ break; ++ } ++ ++ qh->ep_is_in = usb_pipein(urb->pipe) ? 1 : 0; ++ ++ qh->data_toggle = DWC_OTG_HC_PID_DATA0; ++ qh->maxp = usb_maxpacket(urb->dev, urb->pipe, !(usb_pipein(urb->pipe))); ++ INIT_LIST_HEAD(&qh->qtd_list); ++ INIT_LIST_HEAD(&qh->qh_list_entry); ++ qh->channel = NULL; ++ ++ /* FS/LS Enpoint on HS Hub ++ * NOT virtual root hub */ ++ qh->do_split = 0; ++ if (((urb->dev->speed == USB_SPEED_LOW) || ++ (urb->dev->speed == USB_SPEED_FULL)) && ++ (urb->dev->tt) && (urb->dev->tt->hub) && (urb->dev->tt->hub->devnum != 1)) ++ { ++ DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT found at hub addr %d, for port %d\n", ++ usb_pipeendpoint(urb->pipe), urb->dev->tt->hub->devnum, ++ urb->dev->ttport); ++ qh->do_split = 1; ++ } ++ ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT || ++ qh->ep_type == USB_ENDPOINT_XFER_ISOC) { ++ /* Compute scheduling parameters once and save them. */ ++ hprt0_data_t hprt; ++ ++ /** @todo Account for split transfers in the bus time. */ ++ int bytecount = dwc_hb_mult(qh->maxp) * dwc_max_packet(qh->maxp); ++ qh->usecs = usb_calc_bus_time(urb->dev->speed, ++ usb_pipein(urb->pipe), ++ (qh->ep_type == USB_ENDPOINT_XFER_ISOC), ++ bytecount); ++ ++ /* Start in a slightly future (micro)frame. */ ++ qh->sched_frame = dwc_frame_num_inc(hcd->frame_number, ++ SCHEDULE_SLOP); ++ qh->interval = urb->interval; ++#if 0 ++ /* Increase interrupt polling rate for debugging. */ ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ qh->interval = 8; ++ } ++#endif ++ hprt.d32 = dwc_read_reg32(hcd->core_if->host_if->hprt0); ++ if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) && ++ ((urb->dev->speed == USB_SPEED_LOW) || ++ (urb->dev->speed == USB_SPEED_FULL))) { ++ qh->interval *= 8; ++ qh->sched_frame |= 0x7; ++ qh->start_split_frame = qh->sched_frame; ++ } ++ ++ } ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n"); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - qh = %p\n", qh); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Device Address = %d\n", ++ urb->dev->devnum); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Endpoint %d, %s\n", ++ usb_pipeendpoint(urb->pipe), ++ usb_pipein(urb->pipe) == USB_DIR_IN ? "IN" : "OUT"); ++ ++ switch(urb->dev->speed) { ++ case USB_SPEED_LOW: ++ speed = "low"; ++ break; ++ case USB_SPEED_FULL: ++ speed = "full"; ++ break; ++ case USB_SPEED_HIGH: ++ speed = "high"; ++ break; ++ default: ++ speed = "?"; ++ break; ++ } ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Speed = %s\n", speed); ++ ++ switch (qh->ep_type) { ++ case USB_ENDPOINT_XFER_ISOC: ++ type = "isochronous"; ++ break; ++ case USB_ENDPOINT_XFER_INT: ++ type = "interrupt"; ++ break; ++ case USB_ENDPOINT_XFER_CONTROL: ++ type = "control"; ++ break; ++ case USB_ENDPOINT_XFER_BULK: ++ type = "bulk"; ++ break; ++ default: ++ type = "?"; ++ break; ++ } ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Type = %s\n",type); ++ ++#ifdef DEBUG ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - usecs = %d\n", ++ qh->usecs); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - interval = %d\n", ++ qh->interval); ++ } ++#endif ++ ++ return; ++} ++ ++/** ++ * Checks that a channel is available for a periodic transfer. ++ * ++ * @return 0 if successful, negative error code otherise. ++ */ ++static int periodic_channel_available(dwc_otg_hcd_t *hcd) ++{ ++ /* ++ * Currently assuming that there is a dedicated host channnel for each ++ * periodic transaction plus at least one host channel for ++ * non-periodic transactions. ++ */ ++ int status; ++ int num_channels; ++ ++ num_channels = hcd->core_if->core_params->host_channels; ++ if ((hcd->periodic_channels + hcd->non_periodic_channels < num_channels) && ++ (hcd->periodic_channels < num_channels - 1)) { ++ status = 0; ++ } ++ else { ++ DWC_NOTICE("%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n", ++ __func__, num_channels, hcd->periodic_channels, ++ hcd->non_periodic_channels); ++ status = -ENOSPC; ++ } ++ ++ return status; ++} ++ ++/** ++ * Checks that there is sufficient bandwidth for the specified QH in the ++ * periodic schedule. For simplicity, this calculation assumes that all the ++ * transfers in the periodic schedule may occur in the same (micro)frame. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param qh QH containing periodic bandwidth required. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++static int check_periodic_bandwidth(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ int status; ++ uint16_t max_claimed_usecs; ++ ++ status = 0; ++ ++ if (hcd->core_if->core_params->speed == DWC_SPEED_PARAM_HIGH) { ++ /* ++ * High speed mode. ++ * Max periodic usecs is 80% x 125 usec = 100 usec. ++ */ ++ max_claimed_usecs = 100 - qh->usecs; ++ } else { ++ /* ++ * Full speed mode. ++ * Max periodic usecs is 90% x 1000 usec = 900 usec. ++ */ ++ max_claimed_usecs = 900 - qh->usecs; ++ } ++ ++ if (hcd->periodic_usecs > max_claimed_usecs) { ++ DWC_NOTICE("%s: already claimed usecs %d, required usecs %d\n", ++ __func__, hcd->periodic_usecs, qh->usecs); ++ status = -ENOSPC; ++ } ++ ++ return status; ++} ++ ++/** ++ * Checks that the max transfer size allowed in a host channel is large enough ++ * to handle the maximum data transfer in a single (micro)frame for a periodic ++ * transfer. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param qh QH for a periodic endpoint. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++static int check_max_xfer_size(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ int status; ++ uint32_t max_xfer_size; ++ uint32_t max_channel_xfer_size; ++ ++ status = 0; ++ ++ max_xfer_size = dwc_max_packet(qh->maxp) * dwc_hb_mult(qh->maxp); ++ max_channel_xfer_size = hcd->core_if->core_params->max_transfer_size; ++ ++ if (max_xfer_size > max_channel_xfer_size) { ++ DWC_NOTICE("%s: Periodic xfer length %d > " ++ "max xfer length for channel %d\n", ++ __func__, max_xfer_size, max_channel_xfer_size); ++ status = -ENOSPC; ++ } ++ ++ return status; ++} ++ ++/** ++ * Schedules an interrupt or isochronous transfer in the periodic schedule. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param qh QH for the periodic transfer. The QH should already contain the ++ * scheduling information. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++static int schedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ int status = 0; ++ ++ status = periodic_channel_available(hcd); ++ if (status) { ++ DWC_NOTICE("%s: No host channel available for periodic " ++ "transfer.\n", __func__); ++ return status; ++ } ++ ++ status = check_periodic_bandwidth(hcd, qh); ++ if (status) { ++ DWC_NOTICE("%s: Insufficient periodic bandwidth for " ++ "periodic transfer.\n", __func__); ++ return status; ++ } ++ ++ status = check_max_xfer_size(hcd, qh); ++ if (status) { ++ DWC_NOTICE("%s: Channel max transfer size too small " ++ "for periodic transfer.\n", __func__); ++ return status; ++ } ++ ++ /* Always start in the inactive schedule. */ ++ list_add_tail(&qh->qh_list_entry, &hcd->periodic_sched_inactive); ++ ++ /* Reserve the periodic channel. */ ++ hcd->periodic_channels++; ++ ++ /* Update claimed usecs per (micro)frame. */ ++ hcd->periodic_usecs += qh->usecs; ++ ++ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated += qh->usecs / qh->interval; ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs++; ++ DWC_DEBUGPL(DBG_HCD, "Scheduled intr: qh %p, usecs %d, period %d\n", ++ qh, qh->usecs, qh->interval); ++ } else { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs++; ++ DWC_DEBUGPL(DBG_HCD, "Scheduled isoc: qh %p, usecs %d, period %d\n", ++ qh, qh->usecs, qh->interval); ++ } ++ ++ return status; ++} ++ ++/** ++ * This function adds a QH to either the non periodic or periodic schedule if ++ * it is not already in the schedule. If the QH is already in the schedule, no ++ * action is taken. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++int dwc_otg_hcd_qh_add (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ //unsigned long flags; ++ int status = 0; ++ ++#ifdef CONFIG_SMP ++ //the spinlock is locked before this function get called, ++ //but in case the lock is needed, the check function is preserved ++ //but in non-SMP mode, all spinlock is lockable. ++ //don't do the test in non-SMP mode ++ ++ if(spin_trylock(&hcd->lock)) { ++ printk("%s: It is not supposed to be lockable!!\n",__func__); ++ BUG(); ++ } ++#endif ++// SPIN_LOCK_IRQSAVE(&hcd->lock, flags) ++ ++ if (!list_empty(&qh->qh_list_entry)) { ++ /* QH already in a schedule. */ ++ goto done; ++ } ++ ++ /* Add the new QH to the appropriate schedule */ ++ if (dwc_qh_is_non_per(qh)) { ++ /* Always start in the inactive schedule. */ ++ list_add_tail(&qh->qh_list_entry, &hcd->non_periodic_sched_inactive); ++ } else { ++ status = schedule_periodic(hcd, qh); ++ } ++ ++ done: ++// SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) ++ ++ return status; ++} ++ ++/** ++ * Removes an interrupt or isochronous transfer from the periodic schedule. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param qh QH for the periodic transfer. ++ */ ++static void deschedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ list_del_init(&qh->qh_list_entry); ++ ++ /* Release the periodic channel reservation. */ ++ hcd->periodic_channels--; ++ ++ /* Update claimed usecs per (micro)frame. */ ++ hcd->periodic_usecs -= qh->usecs; ++ ++ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated -= qh->usecs / qh->interval; ++ ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs--; ++ DWC_DEBUGPL(DBG_HCD, "Descheduled intr: qh %p, usecs %d, period %d\n", ++ qh, qh->usecs, qh->interval); ++ } else { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs--; ++ DWC_DEBUGPL(DBG_HCD, "Descheduled isoc: qh %p, usecs %d, period %d\n", ++ qh, qh->usecs, qh->interval); ++ } ++} ++ ++/** ++ * Removes a QH from either the non-periodic or periodic schedule. Memory is ++ * not freed. ++ * ++ * @param[in] hcd The HCD state structure. ++ * @param[in] qh QH to remove from schedule. */ ++void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ //unsigned long flags; ++ ++#ifdef CONFIG_SMP ++ //the spinlock is locked before this function get called, ++ //but in case the lock is needed, the check function is preserved ++ //but in non-SMP mode, all spinlock is lockable. ++ //don't do the test in non-SMP mode ++ ++ if(spin_trylock(&hcd->lock)) { ++ printk("%s: It is not supposed to be lockable!!\n",__func__); ++ BUG(); ++ } ++#endif ++// SPIN_LOCK_IRQSAVE(&hcd->lock, flags); ++ ++ if (list_empty(&qh->qh_list_entry)) { ++ /* QH is not in a schedule. */ ++ goto done; ++ } ++ ++ if (dwc_qh_is_non_per(qh)) { ++ if (hcd->non_periodic_qh_ptr == &qh->qh_list_entry) { ++ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; ++ } ++ list_del_init(&qh->qh_list_entry); ++ } else { ++ deschedule_periodic(hcd, qh); ++ } ++ ++ done: ++// SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); ++ return; ++} ++ ++/** ++ * Deactivates a QH. For non-periodic QHs, removes the QH from the active ++ * non-periodic schedule. The QH is added to the inactive non-periodic ++ * schedule if any QTDs are still attached to the QH. ++ * ++ * For periodic QHs, the QH is removed from the periodic queued schedule. If ++ * there are any QTDs still attached to the QH, the QH is added to either the ++ * periodic inactive schedule or the periodic ready schedule and its next ++ * scheduled frame is calculated. The QH is placed in the ready schedule if ++ * the scheduled frame has been reached already. Otherwise it's placed in the ++ * inactive schedule. If there are no QTDs attached to the QH, the QH is ++ * completely removed from the periodic schedule. ++ */ ++void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_next_periodic_split) ++{ ++ unsigned long flags; ++ SPIN_LOCK_IRQSAVE(&hcd->lock, flags); ++ ++ if (dwc_qh_is_non_per(qh)) { ++ dwc_otg_hcd_qh_remove(hcd, qh); ++ if (!list_empty(&qh->qtd_list)) { ++ /* Add back to inactive non-periodic schedule. */ ++ dwc_otg_hcd_qh_add(hcd, qh); ++ } ++ } else { ++ uint16_t frame_number = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd)); ++ ++ if (qh->do_split) { ++ /* Schedule the next continuing periodic split transfer */ ++ if (sched_next_periodic_split) { ++ ++ qh->sched_frame = frame_number; ++ if (dwc_frame_num_le(frame_number, ++ dwc_frame_num_inc(qh->start_split_frame, 1))) { ++ /* ++ * Allow one frame to elapse after start ++ * split microframe before scheduling ++ * complete split, but DONT if we are ++ * doing the next start split in the ++ * same frame for an ISOC out. ++ */ ++ if ((qh->ep_type != USB_ENDPOINT_XFER_ISOC) || (qh->ep_is_in != 0)) { ++ qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, 1); ++ } ++ } ++ } else { ++ qh->sched_frame = dwc_frame_num_inc(qh->start_split_frame, ++ qh->interval); ++ if (dwc_frame_num_le(qh->sched_frame, frame_number)) { ++ qh->sched_frame = frame_number; ++ } ++ qh->sched_frame |= 0x7; ++ qh->start_split_frame = qh->sched_frame; ++ } ++ } else { ++ qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, qh->interval); ++ if (dwc_frame_num_le(qh->sched_frame, frame_number)) { ++ qh->sched_frame = frame_number; ++ } ++ } ++ ++ if (list_empty(&qh->qtd_list)) { ++ dwc_otg_hcd_qh_remove(hcd, qh); ++ } else { ++ /* ++ * Remove from periodic_sched_queued and move to ++ * appropriate queue. ++ */ ++ if (qh->sched_frame == frame_number) { ++ list_move(&qh->qh_list_entry, ++ &hcd->periodic_sched_ready); ++ } else { ++ list_move(&qh->qh_list_entry, ++ &hcd->periodic_sched_inactive); ++ } ++ } ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); ++} ++ ++/** ++ * This function allocates and initializes a QTD. ++ * ++ * @param[in] urb The URB to create a QTD from. Each URB-QTD pair will end up ++ * pointing to each other so each pair should have a unique correlation. ++ * ++ * @return Returns pointer to the newly allocated QTD, or NULL on error. */ ++dwc_otg_qtd_t *dwc_otg_hcd_qtd_create (struct urb *urb) ++{ ++ dwc_otg_qtd_t *qtd; ++ ++ qtd = dwc_otg_hcd_qtd_alloc (); ++ if (qtd == NULL) { ++ return NULL; ++ } ++ ++ dwc_otg_hcd_qtd_init (qtd, urb); ++ return qtd; ++} ++ ++/** ++ * Initializes a QTD structure. ++ * ++ * @param[in] qtd The QTD to initialize. ++ * @param[in] urb The URB to use for initialization. */ ++void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *qtd, struct urb *urb) ++{ ++ memset (qtd, 0, sizeof (dwc_otg_qtd_t)); ++ qtd->urb = urb; ++ if (usb_pipecontrol(urb->pipe)) { ++ /* ++ * The only time the QTD data toggle is used is on the data ++ * phase of control transfers. This phase always starts with ++ * DATA1. ++ */ ++ qtd->data_toggle = DWC_OTG_HC_PID_DATA1; ++ qtd->control_phase = DWC_OTG_CONTROL_SETUP; ++ } ++ ++ /* start split */ ++ qtd->complete_split = 0; ++ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL; ++ qtd->isoc_split_offset = 0; ++ ++ /* Store the qtd ptr in the urb to reference what QTD. */ ++ urb->hcpriv = qtd; ++ return; ++} ++ ++/** ++ * This function adds a QTD to the QTD-list of a QH. It will find the correct ++ * QH to place the QTD into. If it does not find a QH, then it will create a ++ * new QH. If the QH to which the QTD is added is not currently scheduled, it ++ * is placed into the proper schedule based on its EP type. ++ * ++ * @param[in] qtd The QTD to add ++ * @param[in] dwc_otg_hcd The DWC HCD structure ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd, ++ dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ struct usb_host_endpoint *ep; ++ dwc_otg_qh_t *qh; ++ unsigned long flags; ++ int retval = 0; ++ ++ struct urb *urb = qtd->urb; ++ ++ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); ++ ++ /* ++ * Get the QH which holds the QTD-list to insert to. Create QH if it ++ * doesn't exist. ++ */ ++ ep = dwc_urb_to_endpoint(urb); ++ qh = (dwc_otg_qh_t *)ep->hcpriv; ++ if (qh == NULL) { ++ qh = dwc_otg_hcd_qh_create (dwc_otg_hcd, urb); ++ if (qh == NULL) { ++ goto done; ++ } ++ ep->hcpriv = qh; ++ } ++ ++ retval = dwc_otg_hcd_qh_add(dwc_otg_hcd, qh); ++ if (retval == 0) { ++ list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list); ++ } ++ ++ done: ++ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); ++ ++ return retval; ++} ++ ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc/otg_pcd.c +@@ -0,0 +1,2502 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $ ++ * $Revision: #70 $ ++ * $Date: 2008/10/14 $ ++ * $Change: 1115682 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_HOST_ONLY ++ ++/** @file ++ * This file implements the Peripheral Controller Driver. ++ * ++ * The Peripheral Controller Driver (PCD) is responsible for ++ * translating requests from the Function Driver into the appropriate ++ * actions on the DWC_otg controller. It isolates the Function Driver ++ * from the specifics of the controller by providing an API to the ++ * Function Driver. ++ * ++ * The Peripheral Controller Driver for Linux will implement the ++ * Gadget API, so that the existing Gadget drivers can be used. ++ * (Gadget Driver is the Linux terminology for a Function Driver.) ++ * ++ * The Linux Gadget API is defined in the header file ++ * . The USB EP operations API is ++ * defined in the structure usb_ep_ops and the USB ++ * Controller API is defined in the structure ++ * usb_gadget_ops. ++ * ++ * An important function of the PCD is managing interrupts generated ++ * by the DWC_otg controller. The implementation of the DWC_otg device ++ * mode interrupt service routines is in dwc_otg_pcd_intr.c. ++ * ++ * @todo Add Device Mode test modes (Test J mode, Test K mode, etc). ++ * @todo Does it work when the request size is greater than DEPTSIZ ++ * transfer size ++ * ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++//#include ++ ++#include "otg_driver.h" ++#include "otg_pcd.h" ++ ++ ++ ++/** ++ * Static PCD pointer for use in usb_gadget_register_driver and ++ * usb_gadget_unregister_driver. Initialized in dwc_otg_pcd_init. ++ */ ++static dwc_otg_pcd_t *s_pcd = 0; ++ ++ ++/* Display the contents of the buffer */ ++extern void dump_msg(const u8 *buf, unsigned int length); ++ ++ ++/** ++ * This function completes a request. It call's the request call back. ++ */ ++void dwc_otg_request_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req, ++ int status) ++{ ++ unsigned stopped = ep->stopped; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, ep); ++ list_del_init(&req->queue); ++ ++ if (req->req.status == -EINPROGRESS) { ++ req->req.status = status; ++ } else { ++ status = req->req.status; ++ } ++ ++ /* don't modify queue heads during completion callback */ ++ ep->stopped = 1; ++ SPIN_UNLOCK(&ep->pcd->lock); ++ req->req.complete(&ep->ep, &req->req); ++ SPIN_LOCK(&ep->pcd->lock); ++ ++ if (ep->pcd->request_pending > 0) { ++ --ep->pcd->request_pending; ++ } ++ ++ ep->stopped = stopped; ++} ++ ++/** ++ * This function terminates all the requsts in the EP request queue. ++ */ ++void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_pcd_request_t *req; ++ ++ ep->stopped = 1; ++ ++ /* called with irqs blocked?? */ ++ while (!list_empty(&ep->queue)) { ++ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, ++ queue); ++ dwc_otg_request_done(ep, req, -ESHUTDOWN); ++ } ++} ++ ++/* USB Endpoint Operations */ ++/* ++ * The following sections briefly describe the behavior of the Gadget ++ * API endpoint operations implemented in the DWC_otg driver ++ * software. Detailed descriptions of the generic behavior of each of ++ * these functions can be found in the Linux header file ++ * include/linux/usb_gadget.h. ++ * ++ * The Gadget API provides wrapper functions for each of the function ++ * pointers defined in usb_ep_ops. The Gadget Driver calls the wrapper ++ * function, which then calls the underlying PCD function. The ++ * following sections are named according to the wrapper ++ * functions. Within each section, the corresponding DWC_otg PCD ++ * function name is specified. ++ * ++ */ ++ ++/** ++ * This function assigns periodic Tx FIFO to an periodic EP ++ * in shared Tx FIFO mode ++ */ ++static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t PerTxMsk = 1; ++ int i; ++ for(i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) ++ { ++ if((PerTxMsk & core_if->p_tx_msk) == 0) { ++ core_if->p_tx_msk |= PerTxMsk; ++ return i + 1; ++ } ++ PerTxMsk <<= 1; ++ } ++ return 0; ++} ++/** ++ * This function releases periodic Tx FIFO ++ * in shared Tx FIFO mode ++ */ ++static void release_perio_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num) ++{ ++ core_if->p_tx_msk = (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk; ++} ++/** ++ * This function assigns periodic Tx FIFO to an periodic EP ++ * in shared Tx FIFO mode ++ */ ++static uint32_t assign_tx_fifo(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t TxMsk = 1; ++ int i; ++ ++ for(i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) ++ { ++ if((TxMsk & core_if->tx_msk) == 0) { ++ core_if->tx_msk |= TxMsk; ++ return i + 1; ++ } ++ TxMsk <<= 1; ++ } ++ return 0; ++} ++/** ++ * This function releases periodic Tx FIFO ++ * in shared Tx FIFO mode ++ */ ++static void release_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num) ++{ ++ core_if->tx_msk = (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk; ++} ++ ++/** ++ * This function is called by the Gadget Driver for each EP to be ++ * configured for the current configuration (SET_CONFIGURATION). ++ * ++ * This function initializes the dwc_otg_ep_t data structure, and then ++ * calls dwc_otg_ep_activate. ++ */ ++static int dwc_otg_pcd_ep_enable(struct usb_ep *usb_ep, ++ const struct usb_endpoint_descriptor *ep_desc) ++{ ++ dwc_otg_pcd_ep_t *ep = 0; ++ dwc_otg_pcd_t *pcd = 0; ++ unsigned long flags; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, usb_ep, ep_desc); ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ if (!usb_ep || !ep_desc || ep->desc || ++ ep_desc->bDescriptorType != USB_DT_ENDPOINT) { ++ DWC_WARN("%s, bad ep or descriptor\n", __func__); ++ return -EINVAL; ++ } ++ if (ep == &ep->pcd->ep0) { ++ DWC_WARN("%s, bad ep(0)\n", __func__); ++ return -EINVAL; ++ } ++ ++ /* Check FIFO size? */ ++ if (!ep_desc->wMaxPacketSize) { ++ DWC_WARN("%s, bad %s maxpacket\n", __func__, usb_ep->name); ++ return -ERANGE; ++ } ++ ++ pcd = ep->pcd; ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); ++ ++ ep->desc = ep_desc; ++ ep->ep.maxpacket = le16_to_cpu (ep_desc->wMaxPacketSize); ++ ++ /* ++ * Activate the EP ++ */ ++ ep->stopped = 0; ++ ++ ep->dwc_ep.is_in = (USB_DIR_IN & ep_desc->bEndpointAddress) != 0; ++ ep->dwc_ep.maxpacket = ep->ep.maxpacket; ++ ++ ep->dwc_ep.type = ep_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; ++ ++ if(ep->dwc_ep.is_in) { ++ if(!pcd->otg_dev->core_if->en_multiple_tx_fifo) { ++ ep->dwc_ep.tx_fifo_num = 0; ++ ++ if (ep->dwc_ep.type == USB_ENDPOINT_XFER_ISOC) { ++ /* ++ * if ISOC EP then assign a Periodic Tx FIFO. ++ */ ++ ep->dwc_ep.tx_fifo_num = assign_perio_tx_fifo(pcd->otg_dev->core_if); ++ } ++ } else { ++ /* ++ * if Dedicated FIFOs mode is on then assign a Tx FIFO. ++ */ ++ ep->dwc_ep.tx_fifo_num = assign_tx_fifo(pcd->otg_dev->core_if); ++ ++ } ++ } ++ /* Set initial data PID. */ ++ if (ep->dwc_ep.type == USB_ENDPOINT_XFER_BULK) { ++ ep->dwc_ep.data_pid_start = 0; ++ } ++ ++ DWC_DEBUGPL(DBG_PCD, "Activate %s-%s: type=%d, mps=%d desc=%p\n", ++ ep->ep.name, (ep->dwc_ep.is_in ?"IN":"OUT"), ++ ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc); ++ ++ if(ep->dwc_ep.type != USB_ENDPOINT_XFER_ISOC) { ++ ep->dwc_ep.desc_addr = dwc_otg_ep_alloc_desc_chain(&ep->dwc_ep.dma_desc_addr, MAX_DMA_DESC_CNT); ++ } ++ ++ dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep); ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ ++ return 0; ++} ++ ++/** ++ * This function is called when an EP is disabled due to disconnect or ++ * change in configuration. Any pending requests will terminate with a ++ * status of -ESHUTDOWN. ++ * ++ * This function modifies the dwc_otg_ep_t data structure for this EP, ++ * and then calls dwc_otg_ep_deactivate. ++ */ ++static int dwc_otg_pcd_ep_disable(struct usb_ep *usb_ep) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd = 0; ++ unsigned long flags; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, usb_ep); ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ if (!usb_ep || !ep->desc) { ++ DWC_DEBUGPL(DBG_PCD, "%s, %s not enabled\n", __func__, ++ usb_ep ? ep->ep.name : NULL); ++ return -EINVAL; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); ++ ++ dwc_otg_request_nuke(ep); ++ ++ dwc_otg_ep_deactivate(GET_CORE_IF(ep->pcd), &ep->dwc_ep); ++ ep->desc = 0; ++ ep->stopped = 1; ++ ++ if(ep->dwc_ep.is_in) { ++ dwc_otg_flush_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); ++ release_perio_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); ++ release_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); ++ } ++ ++ /* Free DMA Descriptors */ ++ pcd = ep->pcd; ++ ++ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags); ++ ++ if(ep->dwc_ep.type != USB_ENDPOINT_XFER_ISOC && ep->dwc_ep.desc_addr) { ++ dwc_otg_ep_free_desc_chain(ep->dwc_ep.desc_addr, ep->dwc_ep.dma_desc_addr, MAX_DMA_DESC_CNT); ++ } ++ ++ DWC_DEBUGPL(DBG_PCD, "%s disabled\n", usb_ep->name); ++ return 0; ++} ++ ++ ++/** ++ * This function allocates a request object to use with the specified ++ * endpoint. ++ * ++ * @param ep The endpoint to be used with with the request ++ * @param gfp_flags the GFP_* flags to use. ++ */ ++static struct usb_request *dwc_otg_pcd_alloc_request(struct usb_ep *ep, ++ gfp_t gfp_flags) ++{ ++ dwc_otg_pcd_request_t *req; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%d)\n", __func__, ep, gfp_flags); ++ if (0 == ep) { ++ DWC_WARN("%s() %s\n", __func__, "Invalid EP!\n"); ++ return 0; ++ } ++ req = kmalloc(sizeof(dwc_otg_pcd_request_t), gfp_flags); ++ if (0 == req) { ++ DWC_WARN("%s() %s\n", __func__, ++ "request allocation failed!\n"); ++ return 0; ++ } ++ memset(req, 0, sizeof(dwc_otg_pcd_request_t)); ++ req->req.dma = DMA_ADDR_INVALID; ++ INIT_LIST_HEAD(&req->queue); ++ return &req->req; ++} ++ ++/** ++ * This function frees a request object. ++ * ++ * @param ep The endpoint associated with the request ++ * @param req The request being freed ++ */ ++static void dwc_otg_pcd_free_request(struct usb_ep *ep, ++ struct usb_request *req) ++{ ++ dwc_otg_pcd_request_t *request; ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, ep, req); ++ ++ if (0 == ep || 0 == req) { ++ DWC_WARN("%s() %s\n", __func__, ++ "Invalid ep or req argument!\n"); ++ return; ++ } ++ ++ request = container_of(req, dwc_otg_pcd_request_t, req); ++ kfree(request); ++} ++ ++#if 0 ++/** ++ * This function allocates an I/O buffer to be used for a transfer ++ * to/from the specified endpoint. ++ * ++ * @param usb_ep The endpoint to be used with with the request ++ * @param bytes The desired number of bytes for the buffer ++ * @param dma Pointer to the buffer's DMA address; must be valid ++ * @param gfp_flags the GFP_* flags to use. ++ * @return address of a new buffer or null is buffer could not be allocated. ++ */ ++static void *dwc_otg_pcd_alloc_buffer(struct usb_ep *usb_ep, unsigned bytes, ++ dma_addr_t *dma, ++ gfp_t gfp_flags) ++{ ++ void *buf; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd = 0; ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ pcd = ep->pcd; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%d,%p,%0x)\n", __func__, usb_ep, bytes, ++ dma, gfp_flags); ++ ++ /* Check dword alignment */ ++ if ((bytes & 0x3UL) != 0) { ++ DWC_WARN("%s() Buffer size is not a multiple of" ++ "DWORD size (%d)",__func__, bytes); ++ } ++ ++ if (GET_CORE_IF(pcd)->dma_enable) { ++ buf = dma_alloc_coherent (NULL, bytes, dma, gfp_flags); ++ } ++ else { ++ buf = kmalloc(bytes, gfp_flags); ++ } ++ ++ /* Check dword alignment */ ++ if (((int)buf & 0x3UL) != 0) { ++ DWC_WARN("%s() Buffer is not DWORD aligned (%p)", ++ __func__, buf); ++ } ++ ++ return buf; ++} ++ ++/** ++ * This function frees an I/O buffer that was allocated by alloc_buffer. ++ * ++ * @param usb_ep the endpoint associated with the buffer ++ * @param buf address of the buffer ++ * @param dma The buffer's DMA address ++ * @param bytes The number of bytes of the buffer ++ */ ++static void dwc_otg_pcd_free_buffer(struct usb_ep *usb_ep, void *buf, ++ dma_addr_t dma, unsigned bytes) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd = 0; ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ pcd = ep->pcd; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p,%0x,%d)\n", __func__, ep, buf, dma, bytes); ++ ++ if (GET_CORE_IF(pcd)->dma_enable) { ++ dma_free_coherent (NULL, bytes, buf, dma); ++ } ++ else { ++ kfree(buf); ++ } ++} ++#endif ++ ++/** ++ * This function is used to submit an I/O Request to an EP. ++ * ++ * - When the request completes the request's completion callback ++ * is called to return the request to the driver. ++ * - An EP, except control EPs, may have multiple requests ++ * pending. ++ * - Once submitted the request cannot be examined or modified. ++ * - Each request is turned into one or more packets. ++ * - A BULK EP can queue any amount of data; the transfer is ++ * packetized. ++ * - Zero length Packets are specified with the request 'zero' ++ * flag. ++ */ ++static int dwc_otg_pcd_ep_queue(struct usb_ep *usb_ep, ++ struct usb_request *usb_req, ++ gfp_t gfp_flags) ++{ ++ int prevented = 0; ++ dwc_otg_pcd_request_t *req; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd; ++ unsigned long flags = 0; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p,%d)\n", ++ __func__, usb_ep, usb_req, gfp_flags); ++ ++ req = container_of(usb_req, dwc_otg_pcd_request_t, req); ++ if (!usb_req || !usb_req->complete || !usb_req->buf || ++ !list_empty(&req->queue)) { ++ DWC_WARN("%s, bad params\n", __func__); ++ return -EINVAL; ++ } ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ if (!usb_ep || (!ep->desc && ep->dwc_ep.num != 0)/* || ep->stopped != 0*/) { ++ DWC_WARN("%s, bad ep\n", __func__); ++ return -EINVAL; ++ } ++ ++ pcd = ep->pcd; ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ ++ DWC_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n", ++ usb_ep->name, usb_req, usb_req->length, usb_req->buf); ++ ++ if (!GET_CORE_IF(pcd)->core_params->opt) { ++ if (ep->dwc_ep.num != 0) { ++ DWC_ERROR("%s queue req %p, len %d buf %p\n", ++ usb_ep->name, usb_req, usb_req->length, usb_req->buf); ++ } ++ } ++ ++ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); ++ ++#if defined(DEBUG) & defined(VERBOSE) ++ dump_msg(usb_req->buf, usb_req->length); ++#endif ++ ++ usb_req->status = -EINPROGRESS; ++ usb_req->actual = 0; ++ ++ /* ++ * For EP0 IN without premature status, zlp is required? ++ */ ++ if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) { ++ DWC_DEBUGPL(DBG_PCDV, "%s-OUT ZLP\n", usb_ep->name); ++ //_req->zero = 1; ++ } ++ ++ /* Start the transfer */ ++ if (list_empty(&ep->queue) && !ep->stopped) { ++ /* EP0 Transfer? */ ++ if (ep->dwc_ep.num == 0) { ++ switch (pcd->ep0state) { ++ case EP0_IN_DATA_PHASE: ++ DWC_DEBUGPL(DBG_PCD, ++ "%s ep0: EP0_IN_DATA_PHASE\n", ++ __func__); ++ break; ++ ++ case EP0_OUT_DATA_PHASE: ++ DWC_DEBUGPL(DBG_PCD, ++ "%s ep0: EP0_OUT_DATA_PHASE\n", ++ __func__); ++ if (pcd->request_config) { ++ /* Complete STATUS PHASE */ ++ ep->dwc_ep.is_in = 1; ++ pcd->ep0state = EP0_IN_STATUS_PHASE; ++ } ++ break; ++ ++ case EP0_IN_STATUS_PHASE: ++ DWC_DEBUGPL(DBG_PCD, ++ "%s ep0: EP0_IN_STATUS_PHASE\n", ++ __func__); ++ break; ++ ++ default: ++ DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n", ++ pcd->ep0state); ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ return -EL2HLT; ++ } ++ ep->dwc_ep.dma_addr = usb_req->dma; ++ ep->dwc_ep.start_xfer_buff = usb_req->buf; ++ ep->dwc_ep.xfer_buff = usb_req->buf; ++ ep->dwc_ep.xfer_len = usb_req->length; ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = ep->dwc_ep.xfer_len; ++ ++ if(usb_req->zero) { ++ if((ep->dwc_ep.xfer_len % ep->dwc_ep.maxpacket == 0) ++ && (ep->dwc_ep.xfer_len != 0)) { ++ ep->dwc_ep.sent_zlp = 1; ++ } ++ ++ } ++ ++ ep_check_and_patch_dma_addr(ep); ++ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep->dwc_ep); ++ } ++ else { ++ ++ uint32_t max_transfer = GET_CORE_IF(ep->pcd)->core_params->max_transfer_size; ++ ++ /* Setup and start the Transfer */ ++ ep->dwc_ep.dma_addr = usb_req->dma; ++ ep->dwc_ep.start_xfer_buff = usb_req->buf; ++ ep->dwc_ep.xfer_buff = usb_req->buf; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = usb_req->length; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ++ if(max_transfer > MAX_TRANSFER_SIZE) { ++ ep->dwc_ep.maxxfer = max_transfer - (max_transfer % ep->dwc_ep.maxpacket); ++ } else { ++ ep->dwc_ep.maxxfer = max_transfer; ++ } ++ ++ if(usb_req->zero) { ++ if((ep->dwc_ep.total_len % ep->dwc_ep.maxpacket == 0) ++ && (ep->dwc_ep.total_len != 0)) { ++ ep->dwc_ep.sent_zlp = 1; ++ } ++ ++ } ++ ++ ep_check_and_patch_dma_addr(ep); ++ dwc_otg_ep_start_transfer(GET_CORE_IF(pcd), &ep->dwc_ep); ++ } ++ } ++ ++ if ((req != 0) || prevented) { ++ ++pcd->request_pending; ++ list_add_tail(&req->queue, &ep->queue); ++ if (ep->dwc_ep.is_in && ep->stopped && !(GET_CORE_IF(pcd)->dma_enable)) { ++ /** @todo NGS Create a function for this. */ ++ diepmsk_data_t diepmsk = { .d32 = 0}; ++ diepmsk.b.intktxfemp = 1; ++ if(&GET_CORE_IF(pcd)->multiproc_int_enable) { ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepeachintmsk[ep->dwc_ep.num], ++ 0, diepmsk.d32); ++ } else { ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepmsk, 0, diepmsk.d32); ++ } ++ } ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ return 0; ++} ++ ++/** ++ * This function cancels an I/O request from an EP. ++ */ ++static int dwc_otg_pcd_ep_dequeue(struct usb_ep *usb_ep, ++ struct usb_request *usb_req) ++{ ++ dwc_otg_pcd_request_t *req; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd; ++ unsigned long flags; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, usb_ep, usb_req); ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ if (!usb_ep || !usb_req || (!ep->desc && ep->dwc_ep.num != 0)) { ++ DWC_WARN("%s, bad argument\n", __func__); ++ return -EINVAL; ++ } ++ pcd = ep->pcd; ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); ++ DWC_DEBUGPL(DBG_PCDV, "%s %s %s %p\n", __func__, usb_ep->name, ++ ep->dwc_ep.is_in ? "IN" : "OUT", ++ usb_req); ++ ++ /* make sure it's actually queued on this endpoint */ ++ list_for_each_entry(req, &ep->queue, queue) ++ { ++ if (&req->req == usb_req) { ++ break; ++ } ++ } ++ ++ if (&req->req != usb_req) { ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ return -EINVAL; ++ } ++ ++ if (!list_empty(&req->queue)) { ++ dwc_otg_request_done(ep, req, -ECONNRESET); ++ } ++ else { ++ req = 0; ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ ++ return req ? 0 : -EOPNOTSUPP; ++} ++ ++/** ++ * usb_ep_set_halt stalls an endpoint. ++ * ++ * usb_ep_clear_halt clears an endpoint halt and resets its data ++ * toggle. ++ * ++ * Both of these functions are implemented with the same underlying ++ * function. The behavior depends on the value argument. ++ * ++ * @param[in] usb_ep the Endpoint to halt or clear halt. ++ * @param[in] value ++ * - 0 means clear_halt. ++ * - 1 means set_halt, ++ * - 2 means clear stall lock flag. ++ * - 3 means set stall lock flag. ++ */ ++static int dwc_otg_pcd_ep_set_halt(struct usb_ep *usb_ep, int value) ++{ ++ int retval = 0; ++ unsigned long flags; ++ dwc_otg_pcd_ep_t *ep = 0; ++ ++ ++ DWC_DEBUGPL(DBG_PCD,"HALT %s %d\n", usb_ep->name, value); ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ ++ if (!usb_ep || (!ep->desc && ep != &ep->pcd->ep0) || ++ ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { ++ DWC_WARN("%s, bad ep\n", __func__); ++ return -EINVAL; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); ++ if (!list_empty(&ep->queue)) { ++ DWC_WARN("%s() %s XFer In process\n", __func__, usb_ep->name); ++ retval = -EAGAIN; ++ } ++ else if (value == 0) { ++ dwc_otg_ep_clear_stall(ep->pcd->otg_dev->core_if, ++ &ep->dwc_ep); ++ } ++ else if(value == 1) { ++ if (ep->dwc_ep.is_in == 1 && ep->pcd->otg_dev->core_if->dma_desc_enable) { ++ dtxfsts_data_t txstatus; ++ fifosize_data_t txfifosize; ++ ++ txfifosize.d32 = dwc_read_reg32(&ep->pcd->otg_dev->core_if->core_global_regs->dptxfsiz_dieptxf[ep->dwc_ep.tx_fifo_num]); ++ txstatus.d32 = dwc_read_reg32(&ep->pcd->otg_dev->core_if->dev_if->in_ep_regs[ep->dwc_ep.num]->dtxfsts); ++ ++ if(txstatus.b.txfspcavail < txfifosize.b.depth) { ++ DWC_WARN("%s() %s Data In Tx Fifo\n", __func__, usb_ep->name); ++ retval = -EAGAIN; ++ } ++ else { ++ if (ep->dwc_ep.num == 0) { ++ ep->pcd->ep0state = EP0_STALL; ++ } ++ ++ ep->stopped = 1; ++ dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if, ++ &ep->dwc_ep); ++ } ++ } ++ else { ++ if (ep->dwc_ep.num == 0) { ++ ep->pcd->ep0state = EP0_STALL; ++ } ++ ++ ep->stopped = 1; ++ dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if, ++ &ep->dwc_ep); ++ } ++ } ++ else if (value == 2) { ++ ep->dwc_ep.stall_clear_flag = 0; ++ } ++ else if (value == 3) { ++ ep->dwc_ep.stall_clear_flag = 1; ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags); ++ return retval; ++} ++ ++/** ++ * This function allocates a DMA Descriptor chain for the Endpoint ++ * buffer to be used for a transfer to/from the specified endpoint. ++ */ ++dwc_otg_dma_desc_t* dwc_otg_ep_alloc_desc_chain(uint32_t * dma_desc_addr, uint32_t count) ++{ ++ ++ return dma_alloc_coherent(NULL, count * sizeof(dwc_otg_dma_desc_t), dma_desc_addr, GFP_KERNEL); ++} ++ ++LIST_HEAD(tofree_list); ++DEFINE_SPINLOCK(tofree_list_lock); ++ ++struct free_param { ++ struct list_head list; ++ ++ void* addr; ++ dma_addr_t dma_addr; ++ uint32_t size; ++}; ++void free_list_agent_fn(void *data){ ++ struct list_head free_list; ++ struct free_param *cur,*next; ++ ++ spin_lock(&tofree_list_lock); ++ list_add(&free_list,&tofree_list); ++ list_del_init(&tofree_list); ++ spin_unlock(&tofree_list_lock); ++ ++ list_for_each_entry_safe(cur,next,&free_list,list){ ++ if(cur==&free_list) break; ++ dma_free_coherent(NULL,cur->size,cur->addr,cur->dma_addr); ++ list_del(&cur->list); ++ kfree(cur); ++ } ++} ++DECLARE_WORK(free_list_agent,free_list_agent_fn); ++/** ++ * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc. ++ */ ++void dwc_otg_ep_free_desc_chain(dwc_otg_dma_desc_t* desc_addr, uint32_t dma_desc_addr, uint32_t count) ++{ ++ if(irqs_disabled()){ ++ struct free_param* fp=kmalloc(sizeof(struct free_param),GFP_KERNEL); ++ fp->addr=desc_addr; ++ fp->dma_addr=dma_desc_addr; ++ fp->size=count*sizeof(dwc_otg_dma_desc_t); ++ ++ spin_lock(&tofree_list_lock); ++ list_add(&fp->list,&tofree_list); ++ spin_unlock(&tofree_list_lock); ++ ++ schedule_work(&free_list_agent); ++ return ; ++ } ++ dma_free_coherent(NULL, count * sizeof(dwc_otg_dma_desc_t), desc_addr, dma_desc_addr); ++} ++ ++#ifdef DWC_EN_ISOC ++ ++/** ++ * This function initializes a descriptor chain for Isochronous transfer ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dwc_ep The EP to start the transfer on. ++ * ++ */ ++void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) ++{ ++ ++ dsts_data_t dsts = { .d32 = 0}; ++ depctl_data_t depctl = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ int i, j; ++ ++ if(dwc_ep->is_in) ++ dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval; ++ else ++ dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; ++ ++ ++ /** Allocate descriptors for double buffering */ ++ dwc_ep->iso_desc_addr = dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,dwc_ep->desc_cnt*2); ++ if(dwc_ep->desc_addr) { ++ DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__); ++ return; ++ } ++ ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ /** ISO OUT EP */ ++ if(dwc_ep->is_in == 0) { ++ desc_sts_data_t sts = { .d32 =0 }; ++ dwc_otg_dma_desc_t* dma_desc = dwc_ep->iso_desc_addr; ++ dma_addr_t dma_ad; ++ uint32_t data_per_desc; ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[dwc_ep->num]; ++ int offset; ++ ++ addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl; ++ dma_ad = (dma_addr_t)dwc_read_reg32(&(out_regs->doepdma)); ++ ++ /** Buffer 0 descriptors setup */ ++ dma_ad = dwc_ep->dma_addr0; ++ ++ sts.b_iso_out.bs = BS_HOST_READY; ++ sts.b_iso_out.rxsts = 0; ++ sts.b_iso_out.l = 0; ++ sts.b_iso_out.sp = 0; ++ sts.b_iso_out.ioc = 0; ++ sts.b_iso_out.pid = 0; ++ sts.b_iso_out.framenum = 0; ++ ++ offset = 0; ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ ++ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ //(uint32_t)dma_ad += data_per_desc; ++ dma_ad = (uint32_t)dma_ad + data_per_desc; ++ } ++ } ++ ++ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ //(uint32_t)dma_ad += data_per_desc; ++ dma_ad = (uint32_t)dma_ad + data_per_desc; ++ } ++ ++ sts.b_iso_out.ioc = 1; ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ dma_desc ++; ++ ++ /** Buffer 1 descriptors setup */ ++ sts.b_iso_out.ioc = 0; ++ dma_ad = dwc_ep->dma_addr1; ++ ++ offset = 0; ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ //(uint32_t)dma_ad += data_per_desc; ++ dma_ad = (uint32_t)dma_ad + data_per_desc; ++ } ++ } ++ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ //(uint32_t)dma_ad += data_per_desc; ++ dma_ad = (uint32_t)dma_ad + data_per_desc; ++ } ++ ++ sts.b_iso_out.ioc = 1; ++ sts.b_iso_out.l = 1; ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ dwc_ep->next_frame = 0; ++ ++ /** Write dma_ad into DOEPDMA register */ ++ dwc_write_reg32(&(out_regs->doepdma),(uint32_t)dwc_ep->iso_dma_desc_addr); ++ ++ } ++ /** ISO IN EP */ ++ else { ++ desc_sts_data_t sts = { .d32 =0 }; ++ dwc_otg_dma_desc_t* dma_desc = dwc_ep->iso_desc_addr; ++ dma_addr_t dma_ad; ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[dwc_ep->num]; ++ unsigned int frmnumber; ++ fifosize_data_t txfifosize,rxfifosize; ++ ++ txfifosize.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[dwc_ep->num]->dtxfsts); ++ rxfifosize.d32 = dwc_read_reg32(&core_if->core_global_regs->grxfsiz); ++ ++ ++ addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl; ++ ++ dma_ad = dwc_ep->dma_addr0; ++ ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ sts.b_iso_in.bs = BS_HOST_READY; ++ sts.b_iso_in.txsts = 0; ++ sts.b_iso_in.sp = (dwc_ep->data_per_frame % dwc_ep->maxpacket)? 1 : 0; ++ sts.b_iso_in.ioc = 0; ++ sts.b_iso_in.pid = dwc_ep->pkt_per_frm; ++ ++ ++ frmnumber = dwc_ep->next_frame; ++ ++ sts.b_iso_in.framenum = frmnumber; ++ sts.b_iso_in.txbytes = dwc_ep->data_per_frame; ++ sts.b_iso_in.l = 0; ++ ++ /** Buffer 0 descriptors setup */ ++ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) ++ { ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ dma_desc ++; ++ ++ //(uint32_t)dma_ad += dwc_ep->data_per_frame; ++ dma_ad = (uint32_t)dma_ad + dwc_ep->data_per_frame; ++ sts.b_iso_in.framenum += dwc_ep->bInterval; ++ } ++ ++ sts.b_iso_in.ioc = 1; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++dma_desc; ++ ++ /** Buffer 1 descriptors setup */ ++ sts.b_iso_in.ioc = 0; ++ dma_ad = dwc_ep->dma_addr1; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ dma_desc ++; ++ ++ //(uint32_t)dma_ad += dwc_ep->data_per_frame; ++ dma_ad = (uint32_t)dma_ad + dwc_ep->data_per_frame; ++ sts.b_iso_in.framenum += dwc_ep->bInterval; ++ ++ sts.b_iso_in.ioc = 0; ++ } ++ sts.b_iso_in.ioc = 1; ++ sts.b_iso_in.l = 1; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval; ++ ++ /** Write dma_ad into diepdma register */ ++ dwc_write_reg32(&(in_regs->diepdma),(uint32_t)dwc_ep->iso_dma_desc_addr); ++ } ++ /** Enable endpoint, clear nak */ ++ depctl.d32 = 0; ++ depctl.b.epena = 1; ++ depctl.b.usbactep = 1; ++ depctl.b.cnak = 1; ++ ++ dwc_modify_reg32(addr, depctl.d32,depctl.d32); ++ depctl.d32 = dwc_read_reg32(addr); ++} ++ ++/** ++ * This function initializes a descriptor chain for Isochronous transfer ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++ ++void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ ++ ++ if(ep->is_in) { ++ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; ++ } else { ++ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; ++ } ++ ++ ++ if(core_if->dma_enable == 0 || core_if->dma_desc_enable!= 0) { ++ return; ++ } else { ++ deptsiz_data_t deptsiz = { .d32 = 0 }; ++ ++ ep->xfer_len = ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval; ++ ep->pkt_cnt = (ep->xfer_len - 1 + ep->maxpacket) / ++ ep->maxpacket; ++ ep->xfer_count = 0; ++ ep->xfer_buff = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; ++ ep->dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; ++ ++ if(ep->is_in) { ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.mc = ep->pkt_per_frm; ++ deptsiz.b.xfersize = ep->xfer_len; ++ deptsiz.b.pktcnt = ++ (ep->xfer_len - 1 + ep->maxpacket) / ++ ep->maxpacket; ++ dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr); ++ ++ } else { ++ deptsiz.b.pktcnt = ++ (ep->xfer_len + (ep->maxpacket - 1)) / ++ ep->maxpacket; ++ deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket; ++ ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma), (uint32_t)ep->dma_addr); ++ ++ } ++ /** Enable endpoint, clear nak */ ++ depctl.d32 = 0; ++ dwc_modify_reg32(addr, depctl.d32,depctl.d32); ++ ++ depctl.b.epena = 1; ++ depctl.b.cnak = 1; ++ ++ dwc_modify_reg32(addr, depctl.d32,depctl.d32); ++ } ++} ++ ++ ++/** ++ * This function does the setup for a data transfer for an EP and ++ * starts the transfer. For an IN transfer, the packets will be ++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, ++ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ */ ++ ++void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ if(core_if->dma_enable) { ++ if(core_if->dma_desc_enable) { ++ if(ep->is_in) { ++ ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm; ++ } else { ++ ep->desc_cnt = ep->pkt_cnt; ++ } ++ dwc_otg_iso_ep_start_ddma_transfer(core_if, ep); ++ } else { ++ if(core_if->pti_enh_enable) { ++ dwc_otg_iso_ep_start_buf_transfer(core_if, ep); ++ } else { ++ ep->cur_pkt_addr = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; ++ ep->cur_pkt_dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; ++ dwc_otg_iso_ep_start_frm_transfer(core_if, ep); ++ } ++ } ++ } else { ++ ep->cur_pkt_addr = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; ++ ep->cur_pkt_dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; ++ dwc_otg_iso_ep_start_frm_transfer(core_if, ep); ++ } ++} ++ ++/** ++ * This function does the setup for a data transfer for an EP and ++ * starts the transfer. For an IN transfer, the packets will be ++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, ++ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ */ ++ ++void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ ++ if(ep->is_in == 1) { ++ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; ++ } ++ else { ++ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; ++ } ++ ++ /* disable the ep */ ++ depctl.d32 = dwc_read_reg32(addr); ++ ++ depctl.b.epdis = 1; ++ depctl.b.snak = 1; ++ ++ dwc_write_reg32(addr, depctl.d32); ++ ++ if(core_if->dma_desc_enable && ++ ep->iso_desc_addr && ep->iso_dma_desc_addr) { ++ dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,ep->iso_dma_desc_addr,ep->desc_cnt * 2); ++ } ++ ++ /* reset varibales */ ++ ep->dma_addr0 = 0; ++ ep->dma_addr1 = 0; ++ ep->xfer_buff0 = 0; ++ ep->xfer_buff1 = 0; ++ ep->data_per_frame = 0; ++ ep->data_pattern_frame = 0; ++ ep->sync_frame = 0; ++ ep->buf_proc_intrvl = 0; ++ ep->bInterval = 0; ++ ep->proc_buf_num = 0; ++ ep->pkt_per_frm = 0; ++ ep->pkt_per_frm = 0; ++ ep->desc_cnt = 0; ++ ep->iso_desc_addr = 0; ++ ep->iso_dma_desc_addr = 0; ++} ++ ++ ++/** ++ * This function is used to submit an ISOC Transfer Request to an EP. ++ * ++ * - Every time a sync period completes the request's completion callback ++ * is called to provide data to the gadget driver. ++ * - Once submitted the request cannot be modified. ++ * - Each request is turned into periodic data packets untill ISO ++ * Transfer is stopped.. ++ */ ++static int dwc_otg_pcd_iso_ep_start(struct usb_ep *usb_ep, struct usb_iso_request *req, ++ gfp_t gfp_flags) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd; ++ dwc_ep_t *dwc_ep; ++ unsigned long flags = 0; ++ int32_t frm_data; ++ dwc_otg_core_if_t *core_if; ++ dcfg_data_t dcfg; ++ dsts_data_t dsts; ++ ++ ++ if (!req || !req->process_buffer || !req->buf0 || !req->buf1) { ++ DWC_WARN("%s, bad params\n", __func__); ++ return -EINVAL; ++ } ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ ++ if (!usb_ep || !ep->desc || ep->dwc_ep.num == 0) { ++ DWC_WARN("%s, bad ep\n", __func__); ++ return -EINVAL; ++ } ++ ++ pcd = ep->pcd; ++ core_if = GET_CORE_IF(pcd); ++ ++ dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); ++ ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); ++ ++ dwc_ep = &ep->dwc_ep; ++ ++ if(ep->iso_req) { ++ DWC_WARN("%s, iso request in progress\n", __func__); ++ } ++ req->status = -EINPROGRESS; ++ ++ dwc_ep->dma_addr0 = req->dma0; ++ dwc_ep->dma_addr1 = req->dma1; ++ ++ dwc_ep->xfer_buff0 = req->buf0; ++ dwc_ep->xfer_buff1 = req->buf1; ++ ++ ep->iso_req = req; ++ ++ dwc_ep->data_per_frame = req->data_per_frame; ++ ++ /** @todo - pattern data support is to be implemented in the future */ ++ dwc_ep->data_pattern_frame = req->data_pattern_frame; ++ dwc_ep->sync_frame = req->sync_frame; ++ ++ dwc_ep->buf_proc_intrvl = req->buf_proc_intrvl; ++ ++ dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1); ++ ++ dwc_ep->proc_buf_num = 0; ++ ++ dwc_ep->pkt_per_frm = 0; ++ frm_data = ep->dwc_ep.data_per_frame; ++ while(frm_data > 0) { ++ dwc_ep->pkt_per_frm++; ++ frm_data -= ep->dwc_ep.maxpacket; ++ } ++ ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ if(req->flags & USB_REQ_ISO_ASAP) { ++ dwc_ep->next_frame = dsts.b.soffn + 1; ++ if(dwc_ep->bInterval != 1){ ++ dwc_ep->next_frame = dwc_ep->next_frame + (dwc_ep->bInterval - 1 - dwc_ep->next_frame % dwc_ep->bInterval); ++ } ++ } else { ++ dwc_ep->next_frame = req->start_frame; ++ } ++ ++ ++ if(!core_if->pti_enh_enable) { ++ dwc_ep->pkt_cnt = dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; ++ } else { ++ dwc_ep->pkt_cnt = ++ (dwc_ep->data_per_frame * (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval) ++ - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket; ++ } ++ ++ if(core_if->dma_desc_enable) { ++ dwc_ep->desc_cnt = ++ dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; ++ } ++ ++ dwc_ep->pkt_info = kmalloc(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt, GFP_KERNEL); ++ if(!dwc_ep->pkt_info) { ++ return -ENOMEM; ++ } ++ if(core_if->pti_enh_enable) { ++ memset(dwc_ep->pkt_info, 0, sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt); ++ } ++ ++ dwc_ep->cur_pkt = 0; ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ ++ dwc_otg_iso_ep_start_transfer(core_if, dwc_ep); ++ ++ return 0; ++} ++ ++/** ++ * This function stops ISO EP Periodic Data Transfer. ++ */ ++static int dwc_otg_pcd_iso_ep_stop(struct usb_ep *usb_ep, struct usb_iso_request *req) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd; ++ dwc_ep_t *dwc_ep; ++ unsigned long flags; ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ ++ if (!usb_ep || !ep->desc || ep->dwc_ep.num == 0) { ++ DWC_WARN("%s, bad ep\n", __func__); ++ return -EINVAL; ++ } ++ ++ pcd = ep->pcd; ++ ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ dwc_ep = &ep->dwc_ep; ++ ++ dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep); ++ ++ kfree(dwc_ep->pkt_info); ++ ++ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); ++ ++ if(ep->iso_req != req) { ++ return -EINVAL; ++ } ++ ++ req->status = -ECONNRESET; ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ ++ ++ ep->iso_req = 0; ++ ++ return 0; ++} ++ ++/** ++ * This function is used for perodical data exchnage between PCD and gadget drivers. ++ * for Isochronous EPs ++ * ++ * - Every time a sync period completes this function is called to ++ * perform data exchange between PCD and gadget ++ */ ++void dwc_otg_iso_buffer_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_iso_request_t *req) ++{ ++ int i; ++ struct usb_gadget_iso_packet_descriptor *iso_packet; ++ dwc_ep_t *dwc_ep; ++ ++ dwc_ep = &ep->dwc_ep; ++ ++ if(ep->iso_req->status == -ECONNRESET) { ++ DWC_PRINT("Device has already disconnected\n"); ++ /*Device has been disconnected*/ ++ return; ++ } ++ ++ if(dwc_ep->proc_buf_num != 0) { ++ iso_packet = ep->iso_req->iso_packet_desc0; ++ } ++ ++ else { ++ iso_packet = ep->iso_req->iso_packet_desc1; ++ } ++ ++ /* Fill in ISOC packets descriptors & pass to gadget driver*/ ++ ++ for(i = 0; i < dwc_ep->pkt_cnt; ++i) { ++ iso_packet[i].status = dwc_ep->pkt_info[i].status; ++ iso_packet[i].offset = dwc_ep->pkt_info[i].offset; ++ iso_packet[i].actual_length = dwc_ep->pkt_info[i].length; ++ dwc_ep->pkt_info[i].status = 0; ++ dwc_ep->pkt_info[i].offset = 0; ++ dwc_ep->pkt_info[i].length = 0; ++ } ++ ++ /* Call callback function to process data buffer */ ++ ep->iso_req->status = 0;/* success */ ++ ++ SPIN_UNLOCK(&ep->pcd->lock); ++ ep->iso_req->process_buffer(&ep->ep, ep->iso_req); ++ SPIN_LOCK(&ep->pcd->lock); ++} ++ ++ ++static struct usb_iso_request *dwc_otg_pcd_alloc_iso_request(struct usb_ep *ep,int packets, ++ gfp_t gfp_flags) ++{ ++ struct usb_iso_request *pReq = NULL; ++ uint32_t req_size; ++ ++ ++ req_size = sizeof(struct usb_iso_request); ++ req_size += (2 * packets * (sizeof(struct usb_gadget_iso_packet_descriptor))); ++ ++ ++ pReq = kmalloc(req_size, gfp_flags); ++ if (!pReq) { ++ DWC_WARN("%s, can't allocate Iso Request\n", __func__); ++ return 0; ++ } ++ pReq->iso_packet_desc0 = (void*) (pReq + 1); ++ ++ pReq->iso_packet_desc1 = pReq->iso_packet_desc0 + packets; ++ ++ return pReq; ++} ++ ++static void dwc_otg_pcd_free_iso_request(struct usb_ep *ep, struct usb_iso_request *req) ++{ ++ kfree(req); ++} ++ ++static struct usb_isoc_ep_ops dwc_otg_pcd_ep_ops = ++{ ++ .ep_ops = ++ { ++ .enable = dwc_otg_pcd_ep_enable, ++ .disable = dwc_otg_pcd_ep_disable, ++ ++ .alloc_request = dwc_otg_pcd_alloc_request, ++ .free_request = dwc_otg_pcd_free_request, ++ ++ //.alloc_buffer = dwc_otg_pcd_alloc_buffer, ++ //.free_buffer = dwc_otg_pcd_free_buffer, ++ ++ .queue = dwc_otg_pcd_ep_queue, ++ .dequeue = dwc_otg_pcd_ep_dequeue, ++ ++ .set_halt = dwc_otg_pcd_ep_set_halt, ++ .fifo_status = 0, ++ .fifo_flush = 0, ++ }, ++ .iso_ep_start = dwc_otg_pcd_iso_ep_start, ++ .iso_ep_stop = dwc_otg_pcd_iso_ep_stop, ++ .alloc_iso_request = dwc_otg_pcd_alloc_iso_request, ++ .free_iso_request = dwc_otg_pcd_free_iso_request, ++}; ++ ++#else ++ ++ ++static struct usb_ep_ops dwc_otg_pcd_ep_ops = ++{ ++ .enable = dwc_otg_pcd_ep_enable, ++ .disable = dwc_otg_pcd_ep_disable, ++ ++ .alloc_request = dwc_otg_pcd_alloc_request, ++ .free_request = dwc_otg_pcd_free_request, ++ ++// .alloc_buffer = dwc_otg_pcd_alloc_buffer, ++// .free_buffer = dwc_otg_pcd_free_buffer, ++ ++ .queue = dwc_otg_pcd_ep_queue, ++ .dequeue = dwc_otg_pcd_ep_dequeue, ++ ++ .set_halt = dwc_otg_pcd_ep_set_halt, ++ .fifo_status = 0, ++ .fifo_flush = 0, ++ ++ ++}; ++ ++#endif /* DWC_EN_ISOC */ ++/* Gadget Operations */ ++/** ++ * The following gadget operations will be implemented in the DWC_otg ++ * PCD. Functions in the API that are not described below are not ++ * implemented. ++ * ++ * The Gadget API provides wrapper functions for each of the function ++ * pointers defined in usb_gadget_ops. The Gadget Driver calls the ++ * wrapper function, which then calls the underlying PCD function. The ++ * following sections are named according to the wrapper functions ++ * (except for ioctl, which doesn't have a wrapper function). Within ++ * each section, the corresponding DWC_otg PCD function name is ++ * specified. ++ * ++ */ ++ ++/** ++ *Gets the USB Frame number of the last SOF. ++ */ ++static int dwc_otg_pcd_get_frame(struct usb_gadget *gadget) ++{ ++ dwc_otg_pcd_t *pcd; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, gadget); ++ ++ if (gadget == 0) { ++ return -ENODEV; ++ } ++ else { ++ pcd = container_of(gadget, dwc_otg_pcd_t, gadget); ++ dwc_otg_get_frame_number(GET_CORE_IF(pcd)); ++ } ++ ++ return 0; ++} ++ ++void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd) ++{ ++ uint32_t *addr = (uint32_t *)&(GET_CORE_IF(pcd)->core_global_regs->gotgctl); ++ gotgctl_data_t mem; ++ gotgctl_data_t val; ++ ++ val.d32 = dwc_read_reg32(addr); ++ if (val.b.sesreq) { ++ DWC_ERROR("Session Request Already active!\n"); ++ return; ++ } ++ ++ DWC_NOTICE("Session Request Initated\n"); ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.sesreq = 1; ++ dwc_write_reg32(addr, mem.d32); ++ ++ /* Start the SRP timer */ ++ dwc_otg_pcd_start_srp_timer(pcd); ++ return; ++} ++ ++void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set) ++{ ++ dctl_data_t dctl = {.d32=0}; ++ volatile uint32_t *addr = &(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl); ++ ++ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) { ++ if (pcd->remote_wakeup_enable) { ++ if (set) { ++ dctl.b.rmtwkupsig = 1; ++ dwc_modify_reg32(addr, 0, dctl.d32); ++ DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n"); ++ mdelay(1); ++ dwc_modify_reg32(addr, dctl.d32, 0); ++ DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n"); ++ } ++ else { ++ } ++ } ++ else { ++ DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n"); ++ } ++ } ++ return; ++} ++ ++/** ++ * Initiates Session Request Protocol (SRP) to wakeup the host if no ++ * session is in progress. If a session is already in progress, but ++ * the device is suspended, remote wakeup signaling is started. ++ * ++ */ ++static int dwc_otg_pcd_wakeup(struct usb_gadget *gadget) ++{ ++ unsigned long flags; ++ dwc_otg_pcd_t *pcd; ++ dsts_data_t dsts; ++ gotgctl_data_t gotgctl; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, gadget); ++ ++ if (gadget == 0) { ++ return -ENODEV; ++ } ++ else { ++ pcd = container_of(gadget, dwc_otg_pcd_t, gadget); ++ } ++ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); ++ ++ /* ++ * This function starts the Protocol if no session is in progress. If ++ * a session is already in progress, but the device is suspended, ++ * remote wakeup signaling is started. ++ */ ++ ++ /* Check if valid session */ ++ gotgctl.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl)); ++ if (gotgctl.b.bsesvld) { ++ /* Check if suspend state */ ++ dsts.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts)); ++ if (dsts.b.suspsts) { ++ dwc_otg_pcd_remote_wakeup(pcd, 1); ++ } ++ } ++ else { ++ dwc_otg_pcd_initiate_srp(pcd); ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ return 0; ++} ++ ++static const struct usb_gadget_ops dwc_otg_pcd_ops = ++{ ++ .get_frame = dwc_otg_pcd_get_frame, ++ .wakeup = dwc_otg_pcd_wakeup, ++ // current versions must always be self-powered ++}; ++ ++/** ++ * This function updates the otg values in the gadget structure. ++ */ ++void dwc_otg_pcd_update_otg(dwc_otg_pcd_t *pcd, const unsigned reset) ++{ ++ ++ if (!pcd->gadget.is_otg) ++ return; ++ ++ if (reset) { ++ pcd->b_hnp_enable = 0; ++ pcd->a_hnp_support = 0; ++ pcd->a_alt_hnp_support = 0; ++ } ++ ++ pcd->gadget.b_hnp_enable = pcd->b_hnp_enable; ++ pcd->gadget.a_hnp_support = pcd->a_hnp_support; ++ pcd->gadget.a_alt_hnp_support = pcd->a_alt_hnp_support; ++} ++ ++/** ++ * This function is the top level PCD interrupt handler. ++ */ ++static irqreturn_t dwc_otg_pcd_irq(int irq, void *dev) ++{ ++ dwc_otg_pcd_t *pcd = dev; ++ int32_t retval = IRQ_NONE; ++ ++ retval = dwc_otg_pcd_handle_intr(pcd); ++ return IRQ_RETVAL(retval); ++} ++ ++/** ++ * PCD Callback function for initializing the PCD when switching to ++ * device mode. ++ * ++ * @param p void pointer to the dwc_otg_pcd_t ++ */ ++static int32_t dwc_otg_pcd_start_cb(void *p) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; ++ ++ /* ++ * Initialized the Core for Device mode. ++ */ ++ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) { ++ dwc_otg_core_dev_init(GET_CORE_IF(pcd)); ++ } ++ return 1; ++} ++ ++/** ++ * PCD Callback function for stopping the PCD when switching to Host ++ * mode. ++ * ++ * @param p void pointer to the dwc_otg_pcd_t ++ */ ++static int32_t dwc_otg_pcd_stop_cb(void *p) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; ++ extern void dwc_otg_pcd_stop(dwc_otg_pcd_t *_pcd); ++ ++ dwc_otg_pcd_stop(pcd); ++ return 1; ++} ++ ++ ++/** ++ * PCD Callback function for notifying the PCD when resuming from ++ * suspend. ++ * ++ * @param p void pointer to the dwc_otg_pcd_t ++ */ ++static int32_t dwc_otg_pcd_suspend_cb(void *p) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; ++ ++ if (pcd->driver && pcd->driver->resume) { ++ SPIN_UNLOCK(&pcd->lock); ++ pcd->driver->suspend(&pcd->gadget); ++ SPIN_LOCK(&pcd->lock); ++ } ++ ++ return 1; ++} ++ ++ ++/** ++ * PCD Callback function for notifying the PCD when resuming from ++ * suspend. ++ * ++ * @param p void pointer to the dwc_otg_pcd_t ++ */ ++static int32_t dwc_otg_pcd_resume_cb(void *p) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; ++ ++ if (pcd->driver && pcd->driver->resume) { ++ SPIN_UNLOCK(&pcd->lock); ++ pcd->driver->resume(&pcd->gadget); ++ SPIN_LOCK(&pcd->lock); ++ } ++ ++ /* Stop the SRP timeout timer. */ ++ if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS) || ++ (!GET_CORE_IF(pcd)->core_params->i2c_enable)) { ++ if (GET_CORE_IF(pcd)->srp_timer_started) { ++ GET_CORE_IF(pcd)->srp_timer_started = 0; ++ del_timer(&pcd->srp_timer); ++ } ++ } ++ return 1; ++} ++ ++ ++/** ++ * PCD Callback structure for handling mode switching. ++ */ ++static dwc_otg_cil_callbacks_t pcd_callbacks = ++{ ++ .start = dwc_otg_pcd_start_cb, ++ .stop = dwc_otg_pcd_stop_cb, ++ .suspend = dwc_otg_pcd_suspend_cb, ++ .resume_wakeup = dwc_otg_pcd_resume_cb, ++ .p = 0, /* Set at registration */ ++}; ++ ++/** ++ * This function is called when the SRP timer expires. The SRP should ++ * complete within 6 seconds. ++ */ ++static void srp_timeout(unsigned long ptr) ++{ ++ gotgctl_data_t gotgctl; ++ dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *)ptr; ++ volatile uint32_t *addr = &core_if->core_global_regs->gotgctl; ++ ++ gotgctl.d32 = dwc_read_reg32(addr); ++ ++ core_if->srp_timer_started = 0; ++ ++ if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) && ++ (core_if->core_params->i2c_enable)) { ++ DWC_PRINT("SRP Timeout\n"); ++ ++ if ((core_if->srp_success) && ++ (gotgctl.b.bsesvld)) { ++ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { ++ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); ++ } ++ ++ /* Clear Session Request */ ++ gotgctl.d32 = 0; ++ gotgctl.b.sesreq = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gotgctl, ++ gotgctl.d32, 0); ++ ++ core_if->srp_success = 0; ++ } ++ else { ++ DWC_ERROR("Device not connected/responding\n"); ++ gotgctl.b.sesreq = 0; ++ dwc_write_reg32(addr, gotgctl.d32); ++ } ++ } ++ else if (gotgctl.b.sesreq) { ++ DWC_PRINT("SRP Timeout\n"); ++ ++ DWC_ERROR("Device not connected/responding\n"); ++ gotgctl.b.sesreq = 0; ++ dwc_write_reg32(addr, gotgctl.d32); ++ } ++ else { ++ DWC_PRINT(" SRP GOTGCTL=%0x\n", gotgctl.d32); ++ } ++} ++ ++/** ++ * Start the SRP timer to detect when the SRP does not complete within ++ * 6 seconds. ++ * ++ * @param pcd the pcd structure. ++ */ ++void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t *pcd) ++{ ++ struct timer_list *srp_timer = &pcd->srp_timer; ++ GET_CORE_IF(pcd)->srp_timer_started = 1; ++ init_timer(srp_timer); ++ srp_timer->function = srp_timeout; ++ srp_timer->data = (unsigned long)GET_CORE_IF(pcd); ++ srp_timer->expires = jiffies + (HZ*6); ++ add_timer(srp_timer); ++} ++ ++/** ++ * Tasklet ++ * ++ */ ++extern void start_next_request(dwc_otg_pcd_ep_t *ep); ++ ++static void start_xfer_tasklet_func (unsigned long data) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t*)data; ++ dwc_otg_core_if_t *core_if = pcd->otg_dev->core_if; ++ ++ int i; ++ depctl_data_t diepctl; ++ ++ DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n"); ++ ++ diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl); ++ ++ if (pcd->ep0.queue_sof) { ++ pcd->ep0.queue_sof = 0; ++ start_next_request (&pcd->ep0); ++ // break; ++ } ++ ++ for (i=0; idev_if->num_in_eps; i++) ++ { ++ depctl_data_t diepctl; ++ diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[i]->diepctl); ++ ++ if (pcd->in_ep[i].queue_sof) { ++ pcd->in_ep[i].queue_sof = 0; ++ start_next_request (&pcd->in_ep[i]); ++ // break; ++ } ++ } ++ ++ return; ++} ++ ++ ++ ++ ++ ++ ++ ++static struct tasklet_struct start_xfer_tasklet = { ++ .next = NULL, ++ .state = 0, ++ .count = ATOMIC_INIT(0), ++ .func = start_xfer_tasklet_func, ++ .data = 0, ++}; ++/** ++ * This function initialized the pcd Dp structures to there default ++ * state. ++ * ++ * @param pcd the pcd structure. ++ */ ++void dwc_otg_pcd_reinit(dwc_otg_pcd_t *pcd) ++{ ++ static const char * names[] = ++ { ++ ++ "ep0", ++ "ep1in", ++ "ep2in", ++ "ep3in", ++ "ep4in", ++ "ep5in", ++ "ep6in", ++ "ep7in", ++ "ep8in", ++ "ep9in", ++ "ep10in", ++ "ep11in", ++ "ep12in", ++ "ep13in", ++ "ep14in", ++ "ep15in", ++ "ep1out", ++ "ep2out", ++ "ep3out", ++ "ep4out", ++ "ep5out", ++ "ep6out", ++ "ep7out", ++ "ep8out", ++ "ep9out", ++ "ep10out", ++ "ep11out", ++ "ep12out", ++ "ep13out", ++ "ep14out", ++ "ep15out" ++ ++ }; ++ ++ int i; ++ int in_ep_cntr, out_ep_cntr; ++ uint32_t hwcfg1; ++ uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps; ++ uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps; ++ dwc_otg_pcd_ep_t *ep; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd); ++ ++ INIT_LIST_HEAD (&pcd->gadget.ep_list); ++ pcd->gadget.ep0 = &pcd->ep0.ep; ++ pcd->gadget.speed = USB_SPEED_UNKNOWN; ++ ++ INIT_LIST_HEAD (&pcd->gadget.ep0->ep_list); ++ ++ /** ++ * Initialize the EP0 structure. ++ */ ++ ep = &pcd->ep0; ++ ++ /* Init EP structure */ ++ ep->desc = 0; ++ ep->pcd = pcd; ++ ep->stopped = 1; ++ ++ /* Init DWC ep structure */ ++ ep->dwc_ep.num = 0; ++ ep->dwc_ep.active = 0; ++ ep->dwc_ep.tx_fifo_num = 0; ++ /* Control until ep is actvated */ ++ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; ++ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; ++ ep->dwc_ep.dma_addr = 0; ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = 0; ++ ep->queue_sof = 0; ++ ep->dwc_ep.desc_addr = 0; ++ ep->dwc_ep.dma_desc_addr = 0; ++ ++ ep->dwc_ep.aligned_buf=NULL; ++ ep->dwc_ep.aligned_buf_size=0; ++ ep->dwc_ep.aligned_dma_addr=0; ++ ++ ++ /* Init the usb_ep structure. */ ++ ep->ep.name = names[0]; ++ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; ++ ++ /** ++ * @todo NGS: What should the max packet size be set to ++ * here? Before EP type is set? ++ */ ++ ep->ep.maxpacket = MAX_PACKET_SIZE; ++ ++ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); ++ ++ INIT_LIST_HEAD (&ep->queue); ++ /** ++ * Initialize the EP structures. ++ */ ++ in_ep_cntr = 0; ++ hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3; ++ ++ for (i = 1; in_ep_cntr < num_in_eps; i++) ++ { ++ if((hwcfg1 & 0x1) == 0) { ++ dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr]; ++ in_ep_cntr ++; ++ ++ /* Init EP structure */ ++ ep->desc = 0; ++ ep->pcd = pcd; ++ ep->stopped = 1; ++ ++ /* Init DWC ep structure */ ++ ep->dwc_ep.is_in = 1; ++ ep->dwc_ep.num = i; ++ ep->dwc_ep.active = 0; ++ ep->dwc_ep.tx_fifo_num = 0; ++ ++ /* Control until ep is actvated */ ++ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; ++ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; ++ ep->dwc_ep.dma_addr = 0; ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = 0; ++ ep->queue_sof = 0; ++ ep->dwc_ep.desc_addr = 0; ++ ep->dwc_ep.dma_desc_addr = 0; ++ ++ /* Init the usb_ep structure. */ ++ ep->ep.name = names[i]; ++ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; ++ ++ /** ++ * @todo NGS: What should the max packet size be set to ++ * here? Before EP type is set? ++ */ ++ ep->ep.maxpacket = MAX_PACKET_SIZE; ++ ++ //add only even number ep as in ++ if((i%2)==1) ++ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); ++ ++ INIT_LIST_HEAD (&ep->queue); ++ } ++ hwcfg1 >>= 2; ++ } ++ ++ out_ep_cntr = 0; ++ hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2; ++ ++ for (i = 1; out_ep_cntr < num_out_eps; i++) ++ { ++ if((hwcfg1 & 0x1) == 0) { ++ dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr]; ++ out_ep_cntr++; ++ ++ /* Init EP structure */ ++ ep->desc = 0; ++ ep->pcd = pcd; ++ ep->stopped = 1; ++ ++ /* Init DWC ep structure */ ++ ep->dwc_ep.is_in = 0; ++ ep->dwc_ep.num = i; ++ ep->dwc_ep.active = 0; ++ ep->dwc_ep.tx_fifo_num = 0; ++ /* Control until ep is actvated */ ++ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; ++ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; ++ ep->dwc_ep.dma_addr = 0; ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = 0; ++ ep->queue_sof = 0; ++ ++ /* Init the usb_ep structure. */ ++ ep->ep.name = names[15 + i]; ++ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; ++ /** ++ * @todo NGS: What should the max packet size be set to ++ * here? Before EP type is set? ++ */ ++ ep->ep.maxpacket = MAX_PACKET_SIZE; ++ ++ //add only odd number ep as out ++ if((i%2)==0) ++ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); ++ ++ INIT_LIST_HEAD (&ep->queue); ++ } ++ hwcfg1 >>= 2; ++ } ++ ++ /* remove ep0 from the list. There is a ep0 pointer.*/ ++ list_del_init (&pcd->ep0.ep.ep_list); ++ ++ pcd->ep0state = EP0_DISCONNECT; ++ pcd->ep0.ep.maxpacket = MAX_EP0_SIZE; ++ pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE; ++ pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; ++} ++ ++/** ++ * This function releases the Gadget device. ++ * required by device_unregister(). ++ * ++ * @todo Should this do something? Should it free the PCD? ++ */ ++static void dwc_otg_pcd_gadget_release(struct device *dev) ++{ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, dev); ++} ++ ++ ++ ++/** ++ * This function initialized the PCD portion of the driver. ++ * ++ */ ++u8 dev_id[]="gadget"; ++int dwc_otg_pcd_init(struct platform_device *pdev) ++{ ++ static char pcd_name[] = "dwc_otg_pcd"; ++ dwc_otg_pcd_t *pcd; ++ dwc_otg_core_if_t* core_if; ++ dwc_otg_dev_if_t* dev_if; ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); ++ int retval = 0; ++ ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n",__func__, pdev); ++ /* ++ * Allocate PCD structure ++ */ ++ pcd = kmalloc(sizeof(dwc_otg_pcd_t), GFP_KERNEL); ++ ++ if (pcd == 0) { ++ return -ENOMEM; ++ } ++ ++ memset(pcd, 0, sizeof(dwc_otg_pcd_t)); ++ spin_lock_init(&pcd->lock); ++ ++ otg_dev->pcd = pcd; ++ s_pcd = pcd; ++ pcd->gadget.name = pcd_name; ++ ++ pcd->gadget.dev.init_name = dev_id; ++ pcd->otg_dev = platform_get_drvdata(pdev); ++ ++ pcd->gadget.dev.parent = &pdev->dev; ++ pcd->gadget.dev.release = dwc_otg_pcd_gadget_release; ++ pcd->gadget.ops = &dwc_otg_pcd_ops; ++ ++ core_if = GET_CORE_IF(pcd); ++ dev_if = core_if->dev_if; ++ ++ if(core_if->hwcfg4.b.ded_fifo_en) { ++ DWC_PRINT("Dedicated Tx FIFOs mode\n"); ++ } ++ else { ++ DWC_PRINT("Shared Tx FIFO mode\n"); ++ } ++ ++ /* If the module is set to FS or if the PHY_TYPE is FS then the gadget ++ * should not report as dual-speed capable. replace the following line ++ * with the block of code below it once the software is debugged for ++ * this. If is_dualspeed = 0 then the gadget driver should not report ++ * a device qualifier descriptor when queried. */ ++ if ((GET_CORE_IF(pcd)->core_params->speed == DWC_SPEED_PARAM_FULL) || ++ ((GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == 2) && ++ (GET_CORE_IF(pcd)->hwcfg2.b.fs_phy_type == 1) && ++ (GET_CORE_IF(pcd)->core_params->ulpi_fs_ls))) { ++ pcd->gadget.is_dualspeed = 0; ++ } ++ else { ++ pcd->gadget.is_dualspeed = 1; ++ } ++ ++ if ((otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE) || ++ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST) || ++ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || ++ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) { ++ pcd->gadget.is_otg = 0; ++ } ++ else { ++ pcd->gadget.is_otg = 1; ++ } ++ ++ ++ pcd->driver = 0; ++ /* Register the gadget device */ ++printk("%s: 1\n",__func__); ++ retval = device_register(&pcd->gadget.dev); ++ if (retval != 0) { ++ kfree (pcd); ++printk("%s: 2\n",__func__); ++ return retval; ++ } ++ ++ ++ /* ++ * Initialized the Core for Device mode. ++ */ ++ if (dwc_otg_is_device_mode(core_if)) { ++ dwc_otg_core_dev_init(core_if); ++ } ++ ++ /* ++ * Initialize EP structures ++ */ ++ dwc_otg_pcd_reinit(pcd); ++ ++ /* ++ * Register the PCD Callbacks. ++ */ ++ dwc_otg_cil_register_pcd_callbacks(otg_dev->core_if, &pcd_callbacks, ++ pcd); ++ /* ++ * Setup interupt handler ++ */ ++ DWC_DEBUGPL(DBG_ANY, "registering handler for irq%d\n", otg_dev->irq); ++ retval = request_irq(otg_dev->irq, dwc_otg_pcd_irq, ++ IRQF_SHARED, pcd->gadget.name, pcd); ++ if (retval != 0) { ++ DWC_ERROR("request of irq%d failed\n", otg_dev->irq); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -EBUSY; ++ } ++ ++ /* ++ * Initialize the DMA buffer for SETUP packets ++ */ ++ if (GET_CORE_IF(pcd)->dma_enable) { ++ pcd->setup_pkt = dma_alloc_coherent (NULL, sizeof (*pcd->setup_pkt) * 5, &pcd->setup_pkt_dma_handle, 0); ++ if (pcd->setup_pkt == 0) { ++ free_irq(otg_dev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -ENOMEM; ++ } ++ ++ pcd->status_buf = dma_alloc_coherent (NULL, sizeof (uint16_t), &pcd->status_buf_dma_handle, 0); ++ if (pcd->status_buf == 0) { ++ dma_free_coherent(NULL, sizeof(*pcd->setup_pkt), pcd->setup_pkt, pcd->setup_pkt_dma_handle); ++ free_irq(otg_dev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -ENOMEM; ++ } ++ ++ if (GET_CORE_IF(pcd)->dma_desc_enable) { ++ dev_if->setup_desc_addr[0] = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_setup_desc_addr[0], 1); ++ dev_if->setup_desc_addr[1] = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_setup_desc_addr[1], 1); ++ dev_if->in_desc_addr = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_in_desc_addr, 1); ++ dev_if->out_desc_addr = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_out_desc_addr, 1); ++ ++ if(dev_if->setup_desc_addr[0] == 0 ++ || dev_if->setup_desc_addr[1] == 0 ++ || dev_if->in_desc_addr == 0 ++ || dev_if->out_desc_addr == 0 ) { ++ ++ if(dev_if->out_desc_addr) ++ dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr, dev_if->dma_out_desc_addr, 1); ++ if(dev_if->in_desc_addr) ++ dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr, dev_if->dma_in_desc_addr, 1); ++ if(dev_if->setup_desc_addr[1]) ++ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1], dev_if->dma_setup_desc_addr[1], 1); ++ if(dev_if->setup_desc_addr[0]) ++ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0], dev_if->dma_setup_desc_addr[0], 1); ++ ++ ++ dma_free_coherent(NULL, sizeof(*pcd->status_buf), pcd->status_buf, pcd->setup_pkt_dma_handle); ++ dma_free_coherent(NULL, sizeof(*pcd->setup_pkt), pcd->setup_pkt, pcd->setup_pkt_dma_handle); ++ ++ free_irq(otg_dev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ ++ return -ENOMEM; ++ } ++ } ++ } ++ else { ++ pcd->setup_pkt = kmalloc (sizeof (*pcd->setup_pkt) * 5, GFP_KERNEL); ++ if (pcd->setup_pkt == 0) { ++ free_irq(otg_dev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -ENOMEM; ++ } ++ ++ pcd->status_buf = kmalloc (sizeof (uint16_t), GFP_KERNEL); ++ if (pcd->status_buf == 0) { ++ kfree(pcd->setup_pkt); ++ free_irq(otg_dev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -ENOMEM; ++ } ++ } ++ ++ ++ /* Initialize tasklet */ ++ start_xfer_tasklet.data = (unsigned long)pcd; ++ pcd->start_xfer_tasklet = &start_xfer_tasklet; ++ ++ return 0; ++} ++ ++/** ++ * Cleanup the PCD. ++ */ ++void dwc_otg_pcd_remove(struct platform_device *pdev) ++{ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); ++ dwc_otg_pcd_t *pcd = otg_dev->pcd; ++ dwc_otg_dev_if_t* dev_if = GET_CORE_IF(pcd)->dev_if; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pdev); ++ ++ /* ++ * Free the IRQ ++ */ ++ free_irq(otg_dev->irq, pcd); ++ ++ /* start with the driver above us */ ++ if (pcd->driver) { ++ /* should have been done already by driver model core */ ++ DWC_WARN("driver '%s' is still registered\n", ++ pcd->driver->driver.name); ++ usb_gadget_unregister_driver(pcd->driver); ++ } ++ device_unregister(&pcd->gadget.dev); ++ ++ if (GET_CORE_IF(pcd)->dma_enable) { ++ dma_free_coherent (NULL, sizeof (*pcd->setup_pkt) * 5, pcd->setup_pkt, pcd->setup_pkt_dma_handle); ++ dma_free_coherent (NULL, sizeof (uint16_t), pcd->status_buf, pcd->status_buf_dma_handle); ++ if (GET_CORE_IF(pcd)->dma_desc_enable) { ++ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0], dev_if->dma_setup_desc_addr[0], 1); ++ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1], dev_if->dma_setup_desc_addr[1], 1); ++ dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr, dev_if->dma_in_desc_addr, 1); ++ dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr, dev_if->dma_out_desc_addr, 1); ++ } ++ } ++ else { ++ kfree (pcd->setup_pkt); ++ kfree (pcd->status_buf); ++ } ++ ++ kfree(pcd); ++ otg_dev->pcd = 0; ++} ++ ++/** ++ * This function registers a gadget driver with the PCD. ++ * ++ * When a driver is successfully registered, it will receive control ++ * requests including set_configuration(), which enables non-control ++ * requests. then usb traffic follows until a disconnect is reported. ++ * then a host may connect again, or the driver might get unbound. ++ * ++ * @param driver The driver being registered ++ */ ++int usb_gadget_probe_driver(struct usb_gadget_driver *driver, ++ int (*bind)(struct usb_gadget *)) ++{ ++ int retval; ++ ++ DWC_DEBUGPL(DBG_PCD, "registering gadget driver '%s'\n", driver->driver.name); ++ ++ if (!driver || driver->speed == USB_SPEED_UNKNOWN || ++ !bind || ++ !driver->unbind || ++ !driver->disconnect || ++ !driver->setup) { ++ DWC_DEBUGPL(DBG_PCDV,"EINVAL\n"); ++ return -EINVAL; ++ } ++ if (s_pcd == 0) { ++ DWC_DEBUGPL(DBG_PCDV,"ENODEV\n"); ++ return -ENODEV; ++ } ++ if (s_pcd->driver != 0) { ++ DWC_DEBUGPL(DBG_PCDV,"EBUSY (%p)\n", s_pcd->driver); ++ return -EBUSY; ++ } ++ ++ /* hook up the driver */ ++ s_pcd->driver = driver; ++ s_pcd->gadget.dev.driver = &driver->driver; ++ ++ DWC_DEBUGPL(DBG_PCD, "bind to driver %s\n", driver->driver.name); ++ retval = bind(&s_pcd->gadget); ++ if (retval) { ++ DWC_ERROR("bind to driver %s --> error %d\n", ++ driver->driver.name, retval); ++ s_pcd->driver = 0; ++ s_pcd->gadget.dev.driver = 0; ++ return retval; ++ } ++ DWC_DEBUGPL(DBG_ANY, "registered gadget driver '%s'\n", ++ driver->driver.name); ++ return 0; ++} ++ ++EXPORT_SYMBOL(usb_gadget_probe_driver); ++ ++/** ++ * This function unregisters a gadget driver ++ * ++ * @param driver The driver being unregistered ++ */ ++int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) ++{ ++ //DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, _driver); ++ ++ if (s_pcd == 0) { ++ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): s_pcd==0\n", __func__, ++ -ENODEV); ++ return -ENODEV; ++ } ++ if (driver == 0 || driver != s_pcd->driver) { ++ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): driver?\n", __func__, ++ -EINVAL); ++ return -EINVAL; ++ } ++ ++ driver->unbind(&s_pcd->gadget); ++ s_pcd->driver = 0; ++ ++ DWC_DEBUGPL(DBG_ANY, "unregistered driver '%s'\n", ++ driver->driver.name); ++ return 0; ++} ++EXPORT_SYMBOL(usb_gadget_unregister_driver); ++ ++#endif /* DWC_HOST_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc/otg_pcd.h +@@ -0,0 +1,292 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.h $ ++ * $Revision: #36 $ ++ * $Date: 2008/09/26 $ ++ * $Change: 1103515 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_HOST_ONLY ++#if !defined(__DWC_PCD_H__) ++#define __DWC_PCD_H__ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++ ++struct dwc_otg_device; ++ ++#include "otg_cil.h" ++ ++/** ++ * @file ++ * ++ * This file contains the structures, constants, and interfaces for ++ * the Perpherial Contoller Driver (PCD). ++ * ++ * The Peripheral Controller Driver (PCD) for Linux will implement the ++ * Gadget API, so that the existing Gadget drivers can be used. For ++ * the Mass Storage Function driver the File-backed USB Storage Gadget ++ * (FBS) driver will be used. The FBS driver supports the ++ * Control-Bulk (CB), Control-Bulk-Interrupt (CBI), and Bulk-Only ++ * transports. ++ * ++ */ ++ ++/** Invalid DMA Address */ ++#define DMA_ADDR_INVALID (~(dma_addr_t)0) ++/** Maxpacket size for EP0 */ ++#define MAX_EP0_SIZE 64 ++/** Maxpacket size for any EP */ ++#define MAX_PACKET_SIZE 1024 ++ ++/** Max Transfer size for any EP */ ++#define MAX_TRANSFER_SIZE 65535 ++ ++/** Max DMA Descriptor count for any EP */ ++#define MAX_DMA_DESC_CNT 64 ++ ++/** ++ * Get the pointer to the core_if from the pcd pointer. ++ */ ++#define GET_CORE_IF( _pcd ) (_pcd->otg_dev->core_if) ++ ++/** ++ * States of EP0. ++ */ ++typedef enum ep0_state ++{ ++ EP0_DISCONNECT, /* no host */ ++ EP0_IDLE, ++ EP0_IN_DATA_PHASE, ++ EP0_OUT_DATA_PHASE, ++ EP0_IN_STATUS_PHASE, ++ EP0_OUT_STATUS_PHASE, ++ EP0_STALL, ++} ep0state_e; ++ ++/** Fordward declaration.*/ ++struct dwc_otg_pcd; ++ ++/** DWC_otg iso request structure. ++ * ++ */ ++typedef struct usb_iso_request dwc_otg_pcd_iso_request_t; ++ ++/** PCD EP structure. ++ * This structure describes an EP, there is an array of EPs in the PCD ++ * structure. ++ */ ++typedef struct dwc_otg_pcd_ep ++{ ++ /** USB EP data */ ++ struct usb_ep ep; ++ /** USB EP Descriptor */ ++ const struct usb_endpoint_descriptor *desc; ++ ++ /** queue of dwc_otg_pcd_requests. */ ++ struct list_head queue; ++ unsigned stopped : 1; ++ unsigned disabling : 1; ++ unsigned dma : 1; ++ unsigned queue_sof : 1; ++ ++#ifdef DWC_EN_ISOC ++ /** DWC_otg Isochronous Transfer */ ++ struct usb_iso_request* iso_req; ++#endif //DWC_EN_ISOC ++ ++ /** DWC_otg ep data. */ ++ dwc_ep_t dwc_ep; ++ ++ /** Pointer to PCD */ ++ struct dwc_otg_pcd *pcd; ++}dwc_otg_pcd_ep_t; ++ ++ ++ ++/** DWC_otg PCD Structure. ++ * This structure encapsulates the data for the dwc_otg PCD. ++ */ ++typedef struct dwc_otg_pcd ++{ ++ /** USB gadget */ ++ struct usb_gadget gadget; ++ /** USB gadget driver pointer*/ ++ struct usb_gadget_driver *driver; ++ /** The DWC otg device pointer. */ ++ struct dwc_otg_device *otg_dev; ++ ++ /** State of EP0 */ ++ ep0state_e ep0state; ++ /** EP0 Request is pending */ ++ unsigned ep0_pending : 1; ++ /** Indicates when SET CONFIGURATION Request is in process */ ++ unsigned request_config : 1; ++ /** The state of the Remote Wakeup Enable. */ ++ unsigned remote_wakeup_enable : 1; ++ /** The state of the B-Device HNP Enable. */ ++ unsigned b_hnp_enable : 1; ++ /** The state of A-Device HNP Support. */ ++ unsigned a_hnp_support : 1; ++ /** The state of the A-Device Alt HNP support. */ ++ unsigned a_alt_hnp_support : 1; ++ /** Count of pending Requests */ ++ unsigned request_pending; ++ ++ /** SETUP packet for EP0 ++ * This structure is allocated as a DMA buffer on PCD initialization ++ * with enough space for up to 3 setup packets. ++ */ ++ union ++ { ++ struct usb_ctrlrequest req; ++ uint32_t d32[2]; ++ } *setup_pkt; ++ ++ dma_addr_t setup_pkt_dma_handle; ++ ++ /** 2-byte dma buffer used to return status from GET_STATUS */ ++ uint16_t *status_buf; ++ dma_addr_t status_buf_dma_handle; ++ ++ /** EP0 */ ++ dwc_otg_pcd_ep_t ep0; ++ ++ /** Array of IN EPs. */ ++ dwc_otg_pcd_ep_t in_ep[ MAX_EPS_CHANNELS - 1]; ++ /** Array of OUT EPs. */ ++ dwc_otg_pcd_ep_t out_ep[ MAX_EPS_CHANNELS - 1]; ++ /** number of valid EPs in the above array. */ ++// unsigned num_eps : 4; ++ spinlock_t lock; ++ /** Timer for SRP. If it expires before SRP is successful ++ * clear the SRP. */ ++ struct timer_list srp_timer; ++ ++ /** Tasklet to defer starting of TEST mode transmissions until ++ * Status Phase has been completed. ++ */ ++ struct tasklet_struct test_mode_tasklet; ++ ++ /** Tasklet to delay starting of xfer in DMA mode */ ++ struct tasklet_struct *start_xfer_tasklet; ++ ++ /** The test mode to enter when the tasklet is executed. */ ++ unsigned test_mode; ++ ++} dwc_otg_pcd_t; ++ ++ ++/** DWC_otg request structure. ++ * This structure is a list of requests. ++ */ ++typedef struct ++{ ++ struct usb_request req; /**< USB Request. */ ++ struct list_head queue; /**< queue of these requests. */ ++} dwc_otg_pcd_request_t; ++ ++ ++extern int dwc_otg_pcd_init(struct platform_device *pdev); ++ ++//extern void dwc_otg_pcd_remove( struct dwc_otg_device *_otg_dev ); ++extern void dwc_otg_pcd_remove( struct platform_device *pdev ); ++extern int32_t dwc_otg_pcd_handle_intr( dwc_otg_pcd_t *pcd ); ++extern void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t *pcd ); ++ ++extern void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd); ++extern void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set); ++ ++extern void dwc_otg_iso_buffer_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_iso_request_t *req); ++extern void dwc_otg_request_done(dwc_otg_pcd_ep_t *_ep, dwc_otg_pcd_request_t *req, ++ int status); ++extern void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *_ep); ++extern void dwc_otg_pcd_update_otg(dwc_otg_pcd_t *_pcd, ++ const unsigned reset); ++#ifndef VERBOSE ++#define VERIFY_PCD_DMA_ADDR(_addr_) BUG_ON(((_addr_)==DMA_ADDR_INVALID)||\ ++ ((_addr_)==0)||\ ++ ((_addr_)&0x3)) ++#else ++#define VERIFY_PCD_DMA_ADDR(_addr_) {\ ++ if(((_addr_)==DMA_ADDR_INVALID)||\ ++ ((_addr_)==0)||\ ++ ((_addr_)&0x3)) {\ ++ printk("%s: Invalid DMA address "#_addr_"(%.8x)\n",__func__,_addr_);\ ++ BUG();\ ++ }\ ++ } ++#endif ++ ++ ++static inline void ep_check_and_patch_dma_addr(dwc_otg_pcd_ep_t *ep){ ++//void ep_check_and_patch_dma_addr(dwc_otg_pcd_ep_t *ep){ ++ dwc_ep_t *dwc_ep=&ep->dwc_ep; ++ ++DWC_DEBUGPL(DBG_PCDV,"%s: dwc_ep xfer_buf=%.8x, total_len=%d, dma_addr=%.8x\n",__func__,(u32)dwc_ep->xfer_buff,(dwc_ep->total_len),dwc_ep->dma_addr); ++ if (/*(core_if->dma_enable)&&*/(dwc_ep->dma_addr==DMA_ADDR_INVALID)) { ++ if((((u32)dwc_ep->xfer_buff)&0x3)==0){ ++ dwc_ep->dma_addr=dma_map_single(NULL,(void *)(dwc_ep->start_xfer_buff),(dwc_ep->total_len), DMA_TO_DEVICE); ++DWC_DEBUGPL(DBG_PCDV," got dma_addr=%.8x\n",dwc_ep->dma_addr); ++ }else{ ++DWC_DEBUGPL(DBG_PCDV," buf not aligned, use aligned_buf instead. xfer_buf=%.8x, total_len=%d, aligned_buf_size=%d\n",(u32)dwc_ep->xfer_buff,(dwc_ep->total_len),dwc_ep->aligned_buf_size); ++ if(dwc_ep->aligned_buf_sizetotal_len){ ++ if(dwc_ep->aligned_buf){ ++//printk(" free buff dwc_ep aligned_buf_size=%d, aligned_buf(%.8x), aligned_dma_addr(%.8x));\n",dwc_ep->aligned_buf_size,dwc_ep->aligned_buf,dwc_ep->aligned_dma_addr); ++ //dma_free_coherent(NULL,dwc_ep->aligned_buf_size,dwc_ep->aligned_buf,dwc_ep->aligned_dma_addr); ++ kfree(dwc_ep->aligned_buf); ++ } ++ dwc_ep->aligned_buf_size=((1<<20)>(dwc_ep->total_len<<1))?(dwc_ep->total_len<<1):(1<<20); ++ //dwc_ep->aligned_buf = dma_alloc_coherent (NULL, dwc_ep->aligned_buf_size, &dwc_ep->aligned_dma_addr, GFP_KERNEL|GFP_DMA); ++ dwc_ep->aligned_buf=kmalloc(dwc_ep->aligned_buf_size,GFP_KERNEL|GFP_DMA|GFP_ATOMIC); ++ dwc_ep->aligned_dma_addr=dma_map_single(NULL,(void *)(dwc_ep->aligned_buf),(dwc_ep->aligned_buf_size),DMA_FROM_DEVICE); ++ if(!dwc_ep->aligned_buf){ ++ DWC_ERROR("Cannot alloc required buffer!!\n"); ++ BUG(); ++ } ++DWC_DEBUGPL(DBG_PCDV," dwc_ep allocated aligned buf=%.8x, dma_addr=%.8x, size=%d(0x%x)\n", (u32)dwc_ep->aligned_buf, dwc_ep->aligned_dma_addr, dwc_ep->aligned_buf_size, dwc_ep->aligned_buf_size); ++ } ++ dwc_ep->dma_addr=dwc_ep->aligned_dma_addr; ++ if(dwc_ep->is_in) { ++ memcpy(dwc_ep->aligned_buf,dwc_ep->xfer_buff,dwc_ep->total_len); ++ dma_sync_single_for_device(NULL,dwc_ep->dma_addr,dwc_ep->total_len,DMA_TO_DEVICE); ++ } ++ } ++ } ++} ++ ++#endif ++#endif /* DWC_HOST_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc/otg_pcd_intr.c +@@ -0,0 +1,3682 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd_intr.c $ ++ * $Revision: #83 $ ++ * $Date: 2008/10/14 $ ++ * $Change: 1115682 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_HOST_ONLY ++#include ++#include ++#include ++#include ++ ++#include "otg_driver.h" ++#include "otg_pcd.h" ++ ++ ++#define DEBUG_EP0 ++ ++ ++/* request functions defined in "dwc_otg_pcd.c" */ ++ ++/** @file ++ * This file contains the implementation of the PCD Interrupt handlers. ++ * ++ * The PCD handles the device interrupts. Many conditions can cause a ++ * device interrupt. When an interrupt occurs, the device interrupt ++ * service routine determines the cause of the interrupt and ++ * dispatches handling to the appropriate function. These interrupt ++ * handling functions are described below. ++ * All interrupt registers are processed from LSB to MSB. ++ */ ++ ++ ++/** ++ * This function prints the ep0 state for debug purposes. ++ */ ++static inline void print_ep0_state(dwc_otg_pcd_t *pcd) ++{ ++#ifdef DEBUG ++ char str[40]; ++ ++ switch (pcd->ep0state) { ++ case EP0_DISCONNECT: ++ strcpy(str, "EP0_DISCONNECT"); ++ break; ++ case EP0_IDLE: ++ strcpy(str, "EP0_IDLE"); ++ break; ++ case EP0_IN_DATA_PHASE: ++ strcpy(str, "EP0_IN_DATA_PHASE"); ++ break; ++ case EP0_OUT_DATA_PHASE: ++ strcpy(str, "EP0_OUT_DATA_PHASE"); ++ break; ++ case EP0_IN_STATUS_PHASE: ++ strcpy(str,"EP0_IN_STATUS_PHASE"); ++ break; ++ case EP0_OUT_STATUS_PHASE: ++ strcpy(str,"EP0_OUT_STATUS_PHASE"); ++ break; ++ case EP0_STALL: ++ strcpy(str,"EP0_STALL"); ++ break; ++ default: ++ strcpy(str,"EP0_INVALID"); ++ } ++ ++ DWC_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state); ++#endif ++} ++ ++/** ++ * This function returns pointer to in ep struct with number ep_num ++ */ ++static inline dwc_otg_pcd_ep_t* get_in_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num) ++{ ++ int i; ++ int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps; ++ if(ep_num == 0) { ++ return &pcd->ep0; ++ } ++ else { ++ for(i = 0; i < num_in_eps; ++i) ++ { ++ if(pcd->in_ep[i].dwc_ep.num == ep_num) ++ return &pcd->in_ep[i]; ++ } ++ return 0; ++ } ++} ++/** ++ * This function returns pointer to out ep struct with number ep_num ++ */ ++static inline dwc_otg_pcd_ep_t* get_out_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num) ++{ ++ int i; ++ int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps; ++ if(ep_num == 0) { ++ return &pcd->ep0; ++ } ++ else { ++ for(i = 0; i < num_out_eps; ++i) ++ { ++ if(pcd->out_ep[i].dwc_ep.num == ep_num) ++ return &pcd->out_ep[i]; ++ } ++ return 0; ++ } ++} ++/** ++ * This functions gets a pointer to an EP from the wIndex address ++ * value of the control request. ++ */ ++static dwc_otg_pcd_ep_t *get_ep_by_addr (dwc_otg_pcd_t *pcd, u16 wIndex) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ ++ if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) ++ return &pcd->ep0; ++ list_for_each_entry(ep, &pcd->gadget.ep_list, ep.ep_list) ++ { ++ u8 bEndpointAddress; ++ ++ if (!ep->desc) ++ continue; ++ ++ bEndpointAddress = ep->desc->bEndpointAddress; ++ if((wIndex & (USB_DIR_IN | USB_ENDPOINT_NUMBER_MASK)) ++ == (bEndpointAddress & (USB_DIR_IN | USB_ENDPOINT_NUMBER_MASK))) ++ return ep; ++ } ++ return NULL; ++} ++ ++/** ++ * This function checks the EP request queue, if the queue is not ++ * empty the next request is started. ++ */ ++void start_next_request(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_pcd_request_t *req = 0; ++ uint32_t max_transfer = GET_CORE_IF(ep->pcd)->core_params->max_transfer_size; ++ if (!list_empty(&ep->queue)) { ++ req = list_entry(ep->queue.next, ++ dwc_otg_pcd_request_t, queue); ++ ++ /* Setup and start the Transfer */ ++ ep->dwc_ep.dma_addr = req->req.dma; ++ ep->dwc_ep.start_xfer_buff = req->req.buf; ++ ep->dwc_ep.xfer_buff = req->req.buf; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = req->req.length; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ++ if(max_transfer > MAX_TRANSFER_SIZE) { ++ ep->dwc_ep.maxxfer = max_transfer - (max_transfer % ep->dwc_ep.maxpacket); ++ } else { ++ ep->dwc_ep.maxxfer = max_transfer; ++ } ++ ++ if(req->req.zero) { ++ if((ep->dwc_ep.total_len % ep->dwc_ep.maxpacket == 0) ++ && (ep->dwc_ep.total_len != 0)) { ++ ep->dwc_ep.sent_zlp = 1; ++ } ++ ++ } ++ ep_check_and_patch_dma_addr(ep); ++ dwc_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->dwc_ep); ++ } ++} ++ ++/** ++ * This function handles the SOF Interrupts. At this time the SOF ++ * Interrupt is disabled. ++ */ ++int32_t dwc_otg_pcd_handle_sof_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_PCD, "SOF\n"); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.sofintr = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++ ++/** ++ * This function handles the Rx Status Queue Level Interrupt, which ++ * indicates that there is a least one packet in the Rx FIFO. The ++ * packets are moved from the FIFO to memory, where they will be ++ * processed when the Endpoint Interrupt Register indicates Transfer ++ * Complete or SETUP Phase Done. ++ * ++ * Repeat the following until the Rx Status Queue is empty: ++ * -# Read the Receive Status Pop Register (GRXSTSP) to get Packet ++ * info ++ * -# If Receive FIFO is empty then skip to step Clear the interrupt ++ * and exit ++ * -# If SETUP Packet call dwc_otg_read_setup_packet to copy the ++ * SETUP data to the buffer ++ * -# If OUT Data Packet call dwc_otg_read_packet to copy the data ++ * to the destination buffer ++ */ ++int32_t dwc_otg_pcd_handle_rx_status_q_level_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ gintmsk_data_t gintmask = {.d32=0}; ++ device_grxsts_data_t status; ++ dwc_otg_pcd_ep_t *ep; ++ gintsts_data_t gintsts; ++#ifdef DEBUG ++ static char *dpid_str[] ={ "D0", "D2", "D1", "MDATA" }; ++#endif ++ ++ //DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd); ++ /* Disable the Rx Status Queue Level interrupt */ ++ gintmask.b.rxstsqlvl= 1; ++ dwc_modify_reg32(&global_regs->gintmsk, gintmask.d32, 0); ++ ++ /* Get the Status from the top of the FIFO */ ++ status.d32 = dwc_read_reg32(&global_regs->grxstsp); ++ ++ DWC_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s " ++ "pktsts:%x Frame:%d(0x%0x)\n", ++ status.b.epnum, status.b.bcnt, ++ dpid_str[status.b.dpid], ++ status.b.pktsts, status.b.fn, status.b.fn); ++ /* Get pointer to EP structure */ ++ ep = get_out_ep(pcd, status.b.epnum); ++ ++ switch (status.b.pktsts) { ++ case DWC_DSTS_GOUT_NAK: ++ DWC_DEBUGPL(DBG_PCDV, "Global OUT NAK\n"); ++ break; ++ case DWC_STS_DATA_UPDT: ++ DWC_DEBUGPL(DBG_PCDV, "OUT Data Packet\n"); ++ if (status.b.bcnt && ep->dwc_ep.xfer_buff) { ++ /** @todo NGS Check for buffer overflow? */ ++ dwc_otg_read_packet(core_if, ++ ep->dwc_ep.xfer_buff, ++ status.b.bcnt); ++ ep->dwc_ep.xfer_count += status.b.bcnt; ++ ep->dwc_ep.xfer_buff += status.b.bcnt; ++ } ++ break; ++ case DWC_STS_XFER_COMP: ++ DWC_DEBUGPL(DBG_PCDV, "OUT Complete\n"); ++ break; ++ case DWC_DSTS_SETUP_COMP: ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "Setup Complete\n"); ++#endif ++ break; ++case DWC_DSTS_SETUP_UPDT: ++ dwc_otg_read_setup_packet(core_if, pcd->setup_pkt->d32); ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, ++ "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n", ++ pcd->setup_pkt->req.bRequestType, ++ pcd->setup_pkt->req.bRequest, ++ pcd->setup_pkt->req.wValue, ++ pcd->setup_pkt->req.wIndex, ++ pcd->setup_pkt->req.wLength); ++#endif ++ ep->dwc_ep.xfer_count += status.b.bcnt; ++ break; ++ default: ++ DWC_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n", ++ status.b.pktsts); ++ break; ++ } ++ ++ /* Enable the Rx Status Queue Level interrupt */ ++ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmask.d32); ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.rxstsqlvl = 1; ++ dwc_write_reg32 (&global_regs->gintsts, gintsts.d32); ++ ++ //DWC_DEBUGPL(DBG_PCDV, "EXIT: %s\n", __func__); ++ return 1; ++} ++/** ++ * This function examines the Device IN Token Learning Queue to ++ * determine the EP number of the last IN token received. This ++ * implementation is for the Mass Storage device where there are only ++ * 2 IN EPs (Control-IN and BULK-IN). ++ * ++ * The EP numbers for the first six IN Tokens are in DTKNQR1 and there ++ * are 8 EP Numbers in each of the other possible DTKNQ Registers. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * ++ */ ++static inline int get_ep_of_last_in_token(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_device_global_regs_t *dev_global_regs = ++ core_if->dev_if->dev_global_regs; ++ const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth; ++ /* Number of Token Queue Registers */ ++ const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8; ++ dtknq1_data_t dtknqr1; ++ uint32_t in_tkn_epnums[4]; ++ int ndx = 0; ++ int i = 0; ++ volatile uint32_t *addr = &dev_global_regs->dtknqr1; ++ int epnum = 0; ++ ++ //DWC_DEBUGPL(DBG_PCD,"dev_token_q_depth=%d\n",TOKEN_Q_DEPTH); ++ ++ /* Read the DTKNQ Registers */ ++ for (i = 0; i < DTKNQ_REG_CNT; i++) ++ { ++ in_tkn_epnums[ i ] = dwc_read_reg32(addr); ++ DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i+1, ++ in_tkn_epnums[i]); ++ if (addr == &dev_global_regs->dvbusdis) { ++ addr = &dev_global_regs->dtknqr3_dthrctl; ++ } ++ else { ++ ++addr; ++ } ++ } ++ ++ /* Copy the DTKNQR1 data to the bit field. */ ++ dtknqr1.d32 = in_tkn_epnums[0]; ++ /* Get the EP numbers */ ++ in_tkn_epnums[0] = dtknqr1.b.epnums0_5; ++ ndx = dtknqr1.b.intknwptr - 1; ++ ++ //DWC_DEBUGPL(DBG_PCDV,"ndx=%d\n",ndx); ++ if (ndx == -1) { ++ /** @todo Find a simpler way to calculate the max ++ * queue position.*/ ++ int cnt = TOKEN_Q_DEPTH; ++ if (TOKEN_Q_DEPTH <= 6) { ++ cnt = TOKEN_Q_DEPTH - 1; ++ } ++ else if (TOKEN_Q_DEPTH <= 14) { ++ cnt = TOKEN_Q_DEPTH - 7; ++ } ++ else if (TOKEN_Q_DEPTH <= 22) { ++ cnt = TOKEN_Q_DEPTH - 15; ++ } ++ else { ++ cnt = TOKEN_Q_DEPTH - 23; ++ } ++ epnum = (in_tkn_epnums[ DTKNQ_REG_CNT - 1 ] >> (cnt * 4)) & 0xF; ++ } ++ else { ++ if (ndx <= 5) { ++ epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF; ++ } ++ else if (ndx <= 13) { ++ ndx -= 6; ++ epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF; ++ } ++ else if (ndx <= 21) { ++ ndx -= 14; ++ epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF; ++ } ++ else if (ndx <= 29) { ++ ndx -= 22; ++ epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF; ++ } ++ } ++ //DWC_DEBUGPL(DBG_PCD,"epnum=%d\n",epnum); ++ return epnum; ++} ++ ++/** ++ * This interrupt occurs when the non-periodic Tx FIFO is half-empty. ++ * The active request is checked for the next packet to be loaded into ++ * the non-periodic Tx FIFO. ++ */ ++int32_t dwc_otg_pcd_handle_np_tx_fifo_empty_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ dwc_otg_dev_in_ep_regs_t *ep_regs; ++ gnptxsts_data_t txstatus = {.d32 = 0}; ++ gintsts_data_t gintsts; ++ ++ int epnum = 0; ++ dwc_otg_pcd_ep_t *ep = 0; ++ uint32_t len = 0; ++ int dwords; ++ ++ /* Get the epnum from the IN Token Learning Queue. */ ++ epnum = get_ep_of_last_in_token(core_if); ++ ep = get_in_ep(pcd, epnum); ++ ++ DWC_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %s(%d) \n", ep->ep.name, epnum); ++ ep_regs = core_if->dev_if->in_ep_regs[epnum]; ++ ++ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++ if (len > ep->dwc_ep.maxpacket) { ++ len = ep->dwc_ep.maxpacket; ++ } ++ dwords = (len + 3)/4; ++ ++ /* While there is space in the queue and space in the FIFO and ++ * More data to tranfer, Write packets to the Tx FIFO */ ++ txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n",txstatus.d32); ++ ++ while (txstatus.b.nptxqspcavail > 0 && ++ txstatus.b.nptxfspcavail > dwords && ++ ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len) { ++ /* Write the FIFO */ ++ dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0); ++ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++ ++ if (len > ep->dwc_ep.maxpacket) { ++ len = ep->dwc_ep.maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_PCDV,"GNPTXSTS=0x%08x\n",txstatus.d32); ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", ++ dwc_read_reg32(&global_regs->gnptxsts)); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.nptxfempty = 1; ++ dwc_write_reg32 (&global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This function is called when dedicated Tx FIFO Empty interrupt occurs. ++ * The active request is checked for the next packet to be loaded into ++ * apropriate Tx FIFO. ++ */ ++static int32_t write_empty_tx_fifo(dwc_otg_pcd_t *pcd, uint32_t epnum) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t* dev_if = core_if->dev_if; ++ dwc_otg_dev_in_ep_regs_t *ep_regs; ++ dtxfsts_data_t txstatus = {.d32 = 0}; ++ dwc_otg_pcd_ep_t *ep = 0; ++ uint32_t len = 0; ++ int dwords; ++ ++ ep = get_in_ep(pcd, epnum); ++ ++ DWC_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %s(%d) \n", ep->ep.name, epnum); ++ ++ ep_regs = core_if->dev_if->in_ep_regs[epnum]; ++ ++ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++ ++ if (len > ep->dwc_ep.maxpacket) { ++ len = ep->dwc_ep.maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ ++ /* While there is space in the queue and space in the FIFO and ++ * More data to tranfer, Write packets to the Tx FIFO */ ++ txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts); ++ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",epnum,txstatus.d32); ++ ++ while (txstatus.b.txfspcavail > dwords && ++ ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len && ++ ep->dwc_ep.xfer_len != 0) { ++ /* Write the FIFO */ ++ dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0); ++ ++ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++ if (len > ep->dwc_ep.maxpacket) { ++ len = ep->dwc_ep.maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts); ++ DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32); ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",epnum,dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts)); ++ ++ return 1; ++} ++ ++/** ++ * This function is called when the Device is disconnected. It stops ++ * any active requests and informs the Gadget driver of the ++ * disconnect. ++ */ ++void dwc_otg_pcd_stop(dwc_otg_pcd_t *pcd) ++{ ++ int i, num_in_eps, num_out_eps; ++ dwc_otg_pcd_ep_t *ep; ++ ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps; ++ num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s() \n", __func__); ++ /* don't disconnect drivers more than once */ ++ if (pcd->ep0state == EP0_DISCONNECT) { ++ DWC_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__); ++ return; ++ } ++ pcd->ep0state = EP0_DISCONNECT; ++ ++ /* Reset the OTG state. */ ++ dwc_otg_pcd_update_otg(pcd, 1); ++ ++ /* Disable the NP Tx Fifo Empty Interrupt. */ ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Flush the FIFOs */ ++ /**@todo NGS Flush Periodic FIFOs */ ++ dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0x10); ++ dwc_otg_flush_rx_fifo(GET_CORE_IF(pcd)); ++ ++ /* prevent new request submissions, kill any outstanding requests */ ++ ep = &pcd->ep0; ++ dwc_otg_request_nuke(ep); ++ /* prevent new request submissions, kill any outstanding requests */ ++ for (i = 0; i < num_in_eps; i++) ++ { ++ dwc_otg_pcd_ep_t *ep = &pcd->in_ep[i]; ++ dwc_otg_request_nuke(ep); ++ } ++ /* prevent new request submissions, kill any outstanding requests */ ++ for (i = 0; i < num_out_eps; i++) ++ { ++ dwc_otg_pcd_ep_t *ep = &pcd->out_ep[i]; ++ dwc_otg_request_nuke(ep); ++ } ++ ++ /* report disconnect; the driver is already quiesced */ ++ if (pcd->driver && pcd->driver->disconnect) { ++ SPIN_UNLOCK(&pcd->lock); ++ pcd->driver->disconnect(&pcd->gadget); ++ SPIN_LOCK(&pcd->lock); ++ } ++} ++ ++/** ++ * This interrupt indicates that ... ++ */ ++int32_t dwc_otg_pcd_handle_i2c_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "i2cintr"); ++ intr_mask.b.i2cintr = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.i2cintr = 1; ++ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ return 1; ++} ++ ++ ++/** ++ * This interrupt indicates that ... ++ */ ++int32_t dwc_otg_pcd_handle_early_suspend_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintsts_data_t gintsts; ++#if defined(VERBOSE) ++ DWC_PRINT("Early Suspend Detected\n"); ++#endif ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.erlysuspend = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ return 1; ++} ++ ++/** ++ * This function configures EPO to receive SETUP packets. ++ * ++ * @todo NGS: Update the comments from the HW FS. ++ * ++ * -# Program the following fields in the endpoint specific registers ++ * for Control OUT EP 0, in order to receive a setup packet ++ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back ++ * setup packets) ++ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back ++ * to back setup packets) ++ * - In DMA mode, DOEPDMA0 Register with a memory address to ++ * store any setup packets received ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param pcd Programming view of the PCD. ++ */ ++static inline void ep0_out_start(dwc_otg_core_if_t *core_if, dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ deptsiz0_data_t doeptsize0 = { .d32 = 0}; ++ dwc_otg_dma_desc_t* dma_desc; ++ depctl_data_t doepctl = { .d32 = 0 }; ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV,"%s() doepctl0=%0x\n", __func__, ++ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); ++#endif ++ ++ doeptsize0.b.supcnt = 3; ++ doeptsize0.b.pktcnt = 1; ++ doeptsize0.b.xfersize = 8*3; ++ ++ if (core_if->dma_enable) { ++ if (!core_if->dma_desc_enable) { ++ /** put here as for Hermes mode deptisz register should not be written */ ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz, ++ doeptsize0.d32); ++ ++ /** @todo dma needs to handle multiple setup packets (up to 3) */ ++ VERIFY_PCD_DMA_ADDR(pcd->setup_pkt_dma_handle); ++ ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma, ++ pcd->setup_pkt_dma_handle); ++ } else { ++ dev_if->setup_desc_index = (dev_if->setup_desc_index + 1) & 1; ++ dma_desc = dev_if->setup_desc_addr[dev_if->setup_desc_index]; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.bytes = pcd->ep0.dwc_ep.maxpacket; ++ dma_desc->buf = pcd->setup_pkt_dma_handle; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ /** DOEPDMA0 Register write */ ++ VERIFY_PCD_DMA_ADDR(dev_if->dma_setup_desc_addr[dev_if->setup_desc_index]); ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma, dev_if->dma_setup_desc_addr[dev_if->setup_desc_index]); ++ } ++ ++ } else { ++ /** put here as for Hermes mode deptisz register should not be written */ ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz, ++ doeptsize0.d32); ++ } ++ ++ /** DOEPCTL0 Register write */ ++ doepctl.b.epena = 1; ++ doepctl.b.cnak = 1; ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32); ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); ++ DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl)); ++#endif ++} ++ ++/** ++ * This interrupt occurs when a USB Reset is detected. When the USB ++ * Reset Interrupt occurs the device state is set to DEFAULT and the ++ * EP0 state is set to IDLE. ++ * -# Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1) ++ * -# Unmask the following interrupt bits ++ * - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint) ++ * - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint) ++ * - DOEPMSK.SETUP = 1 ++ * - DOEPMSK.XferCompl = 1 ++ * - DIEPMSK.XferCompl = 1 ++ * - DIEPMSK.TimeOut = 1 ++ * -# Program the following fields in the endpoint specific registers ++ * for Control OUT EP 0, in order to receive a setup packet ++ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back ++ * setup packets) ++ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back ++ * to back setup packets) ++ * - In DMA mode, DOEPDMA0 Register with a memory address to ++ * store any setup packets received ++ * At this point, all the required initialization, except for enabling ++ * the control 0 OUT endpoint is done, for receiving SETUP packets. ++ */ ++int32_t dwc_otg_pcd_handle_usb_reset_intr(dwc_otg_pcd_t * pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ depctl_data_t doepctl = { .d32 = 0}; ++ ++ daint_data_t daintmsk = { .d32 = 0}; ++ doepmsk_data_t doepmsk = { .d32 = 0}; ++ diepmsk_data_t diepmsk = { .d32 = 0}; ++ ++ dcfg_data_t dcfg = { .d32=0 }; ++ grstctl_t resetctl = { .d32=0 }; ++ dctl_data_t dctl = {.d32=0}; ++ int i = 0; ++ gintsts_data_t gintsts; ++ ++ DWC_PRINT("USB RESET\n"); ++#ifdef DWC_EN_ISOC ++ for(i = 1;i < 16; ++i) ++ { ++ dwc_otg_pcd_ep_t *ep; ++ dwc_ep_t *dwc_ep; ++ ep = get_in_ep(pcd,i); ++ if(ep != 0){ ++ dwc_ep = &ep->dwc_ep; ++ dwc_ep->next_frame = 0xffffffff; ++ } ++ } ++#endif /* DWC_EN_ISOC */ ++ ++ /* reset the HNP settings */ ++ dwc_otg_pcd_update_otg(pcd, 1); ++ ++ /* Clear the Remote Wakeup Signalling */ ++ dctl.b.rmtwkupsig = 1; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, ++ dctl.d32, 0); ++ ++ /* Set NAK for all OUT EPs */ ++ doepctl.b.snak = 1; ++ for (i=0; i <= dev_if->num_out_eps; i++) ++ { ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, ++ doepctl.d32); ++ } ++ ++ /* Flush the NP Tx FIFO */ ++ dwc_otg_flush_tx_fifo(core_if, 0x10); ++ /* Flush the Learning Queue */ ++ resetctl.b.intknqflsh = 1; ++ dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32); ++ ++ if(core_if->multiproc_int_enable) { ++ daintmsk.b.inep0 = 1; ++ daintmsk.b.outep0 = 1; ++ dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, daintmsk.d32); ++ ++ doepmsk.b.setup = 1; ++ doepmsk.b.xfercompl = 1; ++ doepmsk.b.ahberr = 1; ++ doepmsk.b.epdisabled = 1; ++ ++ if(core_if->dma_desc_enable) { ++ doepmsk.b.stsphsercvd = 1; ++ doepmsk.b.bna = 1; ++ } ++/* ++ doepmsk.b.babble = 1; ++ doepmsk.b.nyet = 1; ++ ++ if(core_if->dma_enable) { ++ doepmsk.b.nak = 1; ++ } ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[0], doepmsk.d32); ++ ++ diepmsk.b.xfercompl = 1; ++ diepmsk.b.timeout = 1; ++ diepmsk.b.epdisabled = 1; ++ diepmsk.b.ahberr = 1; ++ diepmsk.b.intknepmis = 1; ++ ++ if(core_if->dma_desc_enable) { ++ diepmsk.b.bna = 1; ++ } ++/* ++ if(core_if->dma_enable) { ++ diepmsk.b.nak = 1; ++ } ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], diepmsk.d32); ++ } else{ ++ daintmsk.b.inep0 = 1; ++ daintmsk.b.outep0 = 1; ++ dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, daintmsk.d32); ++ ++ doepmsk.b.setup = 1; ++ doepmsk.b.xfercompl = 1; ++ doepmsk.b.ahberr = 1; ++ doepmsk.b.epdisabled = 1; ++ ++ if(core_if->dma_desc_enable) { ++ doepmsk.b.stsphsercvd = 1; ++ doepmsk.b.bna = 1; ++ } ++/* ++ doepmsk.b.babble = 1; ++ doepmsk.b.nyet = 1; ++ doepmsk.b.nak = 1; ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32); ++ ++ diepmsk.b.xfercompl = 1; ++ diepmsk.b.timeout = 1; ++ diepmsk.b.epdisabled = 1; ++ diepmsk.b.ahberr = 1; ++ diepmsk.b.intknepmis = 1; ++ ++ if(core_if->dma_desc_enable) { ++ diepmsk.b.bna = 1; ++ } ++ ++// diepmsk.b.nak = 1; ++ ++ dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32); ++ } ++ ++ /* Reset Device Address */ ++ dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg); ++ dcfg.b.devaddr = 0; ++ dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32); ++ ++ /* setup EP0 to receive SETUP packets */ ++ ep0_out_start(core_if, pcd); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.usbreset = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * Get the device speed from the device status register and convert it ++ * to USB speed constant. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static int get_device_speed(dwc_otg_core_if_t *core_if) ++{ ++ dsts_data_t dsts; ++ enum usb_device_speed speed = USB_SPEED_UNKNOWN; ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ switch (dsts.b.enumspd) { ++ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ: ++ speed = USB_SPEED_HIGH; ++ break; ++ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ: ++ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ: ++ speed = USB_SPEED_FULL; ++ break; ++ ++ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ: ++ speed = USB_SPEED_LOW; ++ break; ++ } ++ ++ return speed; ++} ++ ++/** ++ * Read the device status register and set the device speed in the ++ * data structure. ++ * Set up EP0 to receive SETUP packets by calling dwc_ep0_activate. ++ */ ++int32_t dwc_otg_pcd_handle_enum_done_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ gintsts_data_t gintsts; ++ gusbcfg_data_t gusbcfg; ++ dwc_otg_core_global_regs_t *global_regs = ++ GET_CORE_IF(pcd)->core_global_regs; ++ uint8_t utmi16b, utmi8b; ++// DWC_DEBUGPL(DBG_PCD, "SPEED ENUM\n"); ++ DWC_PRINT("SPEED ENUM\n"); ++ ++ if (GET_CORE_IF(pcd)->snpsid >= 0x4F54260A) { ++ utmi16b = 6; ++ utmi8b = 9; ++ } else { ++ utmi16b = 4; ++ utmi8b = 8; ++ } ++ dwc_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ++#ifdef DEBUG_EP0 ++ print_ep0_state(pcd); ++#endif ++ ++ if (pcd->ep0state == EP0_DISCONNECT) { ++ pcd->ep0state = EP0_IDLE; ++ } ++ else if (pcd->ep0state == EP0_STALL) { ++ pcd->ep0state = EP0_IDLE; ++ } ++ ++ pcd->ep0state = EP0_IDLE; ++ ++ ep0->stopped = 0; ++ ++ pcd->gadget.speed = get_device_speed(GET_CORE_IF(pcd)); ++ ++ /* Set USB turnaround time based on device speed and PHY interface. */ ++ gusbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ if (pcd->gadget.speed == USB_SPEED_HIGH) { ++ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_ULPI) { ++ /* ULPI interface */ ++ gusbcfg.b.usbtrdtim = 9; ++ } ++ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_UTMI) { ++ /* UTMI+ interface */ ++ if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 0) { ++ gusbcfg.b.usbtrdtim = utmi8b; ++ } ++ else if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 1) { ++ gusbcfg.b.usbtrdtim = utmi16b; ++ } ++ else if (GET_CORE_IF(pcd)->core_params->phy_utmi_width == 8) { ++ gusbcfg.b.usbtrdtim = utmi8b; ++ } ++ else { ++ gusbcfg.b.usbtrdtim = utmi16b; ++ } ++ } ++ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) { ++ /* UTMI+ OR ULPI interface */ ++ if (gusbcfg.b.ulpi_utmi_sel == 1) { ++ /* ULPI interface */ ++ gusbcfg.b.usbtrdtim = 9; ++ } ++ else { ++ /* UTMI+ interface */ ++ if (GET_CORE_IF(pcd)->core_params->phy_utmi_width == 16) { ++ gusbcfg.b.usbtrdtim = utmi16b; ++ } ++ else { ++ gusbcfg.b.usbtrdtim = utmi8b; ++ } ++ } ++ } ++ } ++ else { ++ /* Full or low speed */ ++ gusbcfg.b.usbtrdtim = 9; ++ } ++ dwc_write_reg32(&global_regs->gusbcfg, gusbcfg.d32); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.enumdone = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that the ISO OUT Packet was dropped due to ++ * Rx FIFO full or Rx Status Queue Full. If this interrupt occurs ++ * read all the data from the Rx FIFO. ++ */ ++int32_t dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", ++ "ISOC Out Dropped"); ++ ++ intr_mask.b.isooutdrop = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ ++ gintsts.d32 = 0; ++ gintsts.b.isooutdrop = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates the end of the portion of the micro-frame ++ * for periodic transactions. If there is a periodic transaction for ++ * the next frame, load the packets into the EP periodic Tx FIFO. ++ */ ++int32_t dwc_otg_pcd_handle_end_periodic_frame_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "EOP"); ++ ++ intr_mask.b.eopframe = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.eopframe = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that EP of the packet on the top of the ++ * non-periodic Tx FIFO does not match EP of the IN Token received. ++ * ++ * The "Device IN Token Queue" Registers are read to determine the ++ * order the IN Tokens have been received. The non-periodic Tx FIFO ++ * is flushed, so it can be reloaded in the order seen in the IN Token ++ * Queue. ++ */ ++int32_t dwc_otg_pcd_handle_ep_mismatch_intr(dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.epmismatch = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This funcion stalls EP0. ++ */ ++static inline void ep0_do_stall(dwc_otg_pcd_t *pcd, const int err_val) ++{ ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ struct usb_ctrlrequest *ctrl = &pcd->setup_pkt->req; ++ DWC_WARN("req %02x.%02x protocol STALL; err %d\n", ++ ctrl->bRequestType, ctrl->bRequest, err_val); ++ ++ ep0->dwc_ep.is_in = 1; ++ dwc_otg_ep_set_stall(pcd->otg_dev->core_if, &ep0->dwc_ep); ++ pcd->ep0.stopped = 1; ++ pcd->ep0state = EP0_IDLE; ++ ep0_out_start(GET_CORE_IF(pcd), pcd); ++} ++ ++/** ++ * This functions delegates the setup command to the gadget driver. ++ */ ++static inline void do_gadget_setup(dwc_otg_pcd_t *pcd, ++ struct usb_ctrlrequest * ctrl) ++{ ++ int ret = 0; ++ if (pcd->driver && pcd->driver->setup) { ++ SPIN_UNLOCK(&pcd->lock); ++ ret = pcd->driver->setup(&pcd->gadget, ctrl); ++ SPIN_LOCK(&pcd->lock); ++ if (ret < 0) { ++ ep0_do_stall(pcd, ret); ++ } ++ ++ /** @todo This is a g_file_storage gadget driver specific ++ * workaround: a DELAYED_STATUS result from the fsg_setup ++ * routine will result in the gadget queueing a EP0 IN status ++ * phase for a two-stage control transfer. Exactly the same as ++ * a SET_CONFIGURATION/SET_INTERFACE except that this is a class ++ * specific request. Need a generic way to know when the gadget ++ * driver will queue the status phase. Can we assume when we ++ * call the gadget driver setup() function that it will always ++ * queue and require the following flag? Need to look into ++ * this. ++ */ ++ ++ if (ret == 256 + 999) { ++ pcd->request_config = 1; ++ } ++ } ++} ++ ++/** ++ * This function starts the Zero-Length Packet for the IN status phase ++ * of a 2 stage control transfer. ++ */ ++static inline void do_setup_in_status_phase(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ if (pcd->ep0state == EP0_STALL) { ++ return; ++ } ++ ++ pcd->ep0state = EP0_IN_STATUS_PHASE; ++ ++ /* Prepare for more SETUP Packets */ ++ DWC_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n"); ++ ep0->dwc_ep.xfer_len = 0; ++ ep0->dwc_ep.xfer_count = 0; ++ ep0->dwc_ep.is_in = 1; ++ ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle; ++ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ++ /* Prepare for more SETUP Packets */ ++// if(GET_CORE_IF(pcd)->dma_enable == 0) ep0_out_start(GET_CORE_IF(pcd), pcd); ++} ++ ++/** ++ * This function starts the Zero-Length Packet for the OUT status phase ++ * of a 2 stage control transfer. ++ */ ++static inline void do_setup_out_status_phase(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ if (pcd->ep0state == EP0_STALL) { ++ DWC_DEBUGPL(DBG_PCD, "EP0 STALLED\n"); ++ return; ++ } ++ pcd->ep0state = EP0_OUT_STATUS_PHASE; ++ ++ DWC_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n"); ++ ep0->dwc_ep.xfer_len = 0; ++ ep0->dwc_ep.xfer_count = 0; ++ ep0->dwc_ep.is_in = 0; ++ ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle; ++ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ++ /* Prepare for more SETUP Packets */ ++ if(GET_CORE_IF(pcd)->dma_enable == 0) { ++ ep0_out_start(GET_CORE_IF(pcd), pcd); ++ } ++} ++ ++/** ++ * Clear the EP halt (STALL) and if pending requests start the ++ * transfer. ++ */ ++static inline void pcd_clear_halt(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep) ++{ ++ if(ep->dwc_ep.stall_clear_flag == 0) ++ dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep); ++ ++ /* Reactive the EP */ ++ dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep); ++ if (ep->stopped) { ++ ep->stopped = 0; ++ /* If there is a request in the EP queue start it */ ++ ++ /** @todo FIXME: this causes an EP mismatch in DMA mode. ++ * epmismatch not yet implemented. */ ++ ++ /* ++ * Above fixme is solved by implmenting a tasklet to call the ++ * start_next_request(), outside of interrupt context at some ++ * time after the current time, after a clear-halt setup packet. ++ * Still need to implement ep mismatch in the future if a gadget ++ * ever uses more than one endpoint at once ++ */ ++ ep->queue_sof = 1; ++ tasklet_schedule (pcd->start_xfer_tasklet); ++ } ++ /* Start Control Status Phase */ ++ do_setup_in_status_phase(pcd); ++} ++ ++/** ++ * This function is called when the SET_FEATURE TEST_MODE Setup packet ++ * is sent from the host. The Device Control register is written with ++ * the Test Mode bits set to the specified Test Mode. This is done as ++ * a tasklet so that the "Status" phase of the control transfer ++ * completes before transmitting the TEST packets. ++ * ++ * @todo This has not been tested since the tasklet struct was put ++ * into the PCD struct! ++ * ++ */ ++static void do_test_mode(unsigned long data) ++{ ++ dctl_data_t dctl; ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)data; ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ int test_mode = pcd->test_mode; ++ ++ ++// DWC_WARN("%s() has not been tested since being rewritten!\n", __func__); ++ ++ dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl); ++ switch (test_mode) { ++ case 1: // TEST_J ++ dctl.b.tstctl = 1; ++ break; ++ ++ case 2: // TEST_K ++ dctl.b.tstctl = 2; ++ break; ++ ++ case 3: // TEST_SE0_NAK ++ dctl.b.tstctl = 3; ++ break; ++ ++ case 4: // TEST_PACKET ++ dctl.b.tstctl = 4; ++ break; ++ ++ case 5: // TEST_FORCE_ENABLE ++ dctl.b.tstctl = 5; ++ break; ++ } ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32); ++} ++ ++/** ++ * This function process the GET_STATUS Setup Commands. ++ */ ++static inline void do_get_status(dwc_otg_pcd_t *pcd) ++{ ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ uint16_t *status = pcd->status_buf; ++ ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, ++ "GET_STATUS %02x.%02x v%04x i%04x l%04x\n", ++ ctrl.bRequestType, ctrl.bRequest, ++ ctrl.wValue, ctrl.wIndex, ctrl.wLength); ++#endif ++ ++ switch (ctrl.bRequestType & USB_RECIP_MASK) { ++ case USB_RECIP_DEVICE: ++ *status = 0x1; /* Self powered */ ++ *status |= pcd->remote_wakeup_enable << 1; ++ break; ++ ++ case USB_RECIP_INTERFACE: ++ *status = 0; ++ break; ++ ++ case USB_RECIP_ENDPOINT: ++ ep = get_ep_by_addr(pcd, ctrl.wIndex); ++ if (ep == 0 || ctrl.wLength > 2) { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ return; ++ } ++ /** @todo check for EP stall */ ++ *status = ep->stopped; ++ break; ++ } ++ pcd->ep0_pending = 1; ++ ep0->dwc_ep.start_xfer_buff = (uint8_t *)status; ++ ep0->dwc_ep.xfer_buff = (uint8_t *)status; ++ ep0->dwc_ep.dma_addr = pcd->status_buf_dma_handle; ++ ep0->dwc_ep.xfer_len = 2; ++ ep0->dwc_ep.xfer_count = 0; ++ ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len; ++ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); ++} ++/** ++ * This function process the SET_FEATURE Setup Commands. ++ */ ++static inline void do_set_feature(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ dwc_otg_pcd_ep_t *ep = 0; ++ int32_t otg_cap_param = core_if->core_params->otg_cap; ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n", ++ ctrl.bRequestType, ctrl.bRequest, ++ ctrl.wValue, ctrl.wIndex, ctrl.wLength); ++ DWC_DEBUGPL(DBG_PCD,"otg_cap=%d\n", otg_cap_param); ++ ++ ++ switch (ctrl.bRequestType & USB_RECIP_MASK) { ++ case USB_RECIP_DEVICE: ++ switch (ctrl.wValue) { ++ case USB_DEVICE_REMOTE_WAKEUP: ++ pcd->remote_wakeup_enable = 1; ++ break; ++ ++ case USB_DEVICE_TEST_MODE: ++ /* Setup the Test Mode tasklet to do the Test ++ * Packet generation after the SETUP Status ++ * phase has completed. */ ++ ++ /** @todo This has not been tested since the ++ * tasklet struct was put into the PCD ++ * struct! */ ++ pcd->test_mode_tasklet.next = 0; ++ pcd->test_mode_tasklet.state = 0; ++ atomic_set(&pcd->test_mode_tasklet.count, 0); ++ pcd->test_mode_tasklet.func = do_test_mode; ++ pcd->test_mode_tasklet.data = (unsigned long)pcd; ++ pcd->test_mode = ctrl.wIndex >> 8; ++ tasklet_schedule(&pcd->test_mode_tasklet); ++ break; ++ ++ case USB_DEVICE_B_HNP_ENABLE: ++ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n"); ++ ++ /* dev may initiate HNP */ ++ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { ++ pcd->b_hnp_enable = 1; ++ dwc_otg_pcd_update_otg(pcd, 0); ++ DWC_DEBUGPL(DBG_PCD, "Request B HNP\n"); ++ /**@todo Is the gotgctl.devhnpen cleared ++ * by a USB Reset? */ ++ gotgctl.b.devhnpen = 1; ++ gotgctl.b.hnpreq = 1; ++ dwc_write_reg32(&global_regs->gotgctl, gotgctl.d32); ++ } ++ else { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ } ++ break; ++ ++ case USB_DEVICE_A_HNP_SUPPORT: ++ /* RH port supports HNP */ ++ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n"); ++ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { ++ pcd->a_hnp_support = 1; ++ dwc_otg_pcd_update_otg(pcd, 0); ++ } ++ else { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ } ++ break; ++ ++ case USB_DEVICE_A_ALT_HNP_SUPPORT: ++ /* other RH port does */ ++ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n"); ++ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { ++ pcd->a_alt_hnp_support = 1; ++ dwc_otg_pcd_update_otg(pcd, 0); ++ } ++ else { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ } ++ break; ++ } ++ do_setup_in_status_phase(pcd); ++ break; ++ ++ case USB_RECIP_INTERFACE: ++ do_gadget_setup(pcd, &ctrl); ++ break; ++ ++ case USB_RECIP_ENDPOINT: ++ if (ctrl.wValue == USB_ENDPOINT_HALT) { ++ ep = get_ep_by_addr(pcd, ctrl.wIndex); ++ if (ep == 0) { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ return; ++ } ++ ep->stopped = 1; ++ dwc_otg_ep_set_stall(core_if, &ep->dwc_ep); ++ } ++ do_setup_in_status_phase(pcd); ++ break; ++ } ++} ++ ++/** ++ * This function process the CLEAR_FEATURE Setup Commands. ++ */ ++static inline void do_clear_feature(dwc_otg_pcd_t *pcd) ++{ ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ dwc_otg_pcd_ep_t *ep = 0; ++ ++ DWC_DEBUGPL(DBG_PCD, ++ "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n", ++ ctrl.bRequestType, ctrl.bRequest, ++ ctrl.wValue, ctrl.wIndex, ctrl.wLength); ++ ++ switch (ctrl.bRequestType & USB_RECIP_MASK) { ++ case USB_RECIP_DEVICE: ++ switch (ctrl.wValue) { ++ case USB_DEVICE_REMOTE_WAKEUP: ++ pcd->remote_wakeup_enable = 0; ++ break; ++ ++ case USB_DEVICE_TEST_MODE: ++ /** @todo Add CLEAR_FEATURE for TEST modes. */ ++ break; ++ } ++ do_setup_in_status_phase(pcd); ++ break; ++ ++ case USB_RECIP_ENDPOINT: ++ ep = get_ep_by_addr(pcd, ctrl.wIndex); ++ if (ep == 0) { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ return; ++ } ++ ++ pcd_clear_halt(pcd, ep); ++ ++ break; ++ } ++} ++ ++/** ++ * This function process the SET_ADDRESS Setup Commands. ++ */ ++static inline void do_set_address(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if; ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ ++ if (ctrl.bRequestType == USB_RECIP_DEVICE) { ++ dcfg_data_t dcfg = {.d32=0}; ++ ++#ifdef DEBUG_EP0 ++// DWC_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue); ++#endif ++ dcfg.b.devaddr = ctrl.wValue; ++ dwc_modify_reg32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32); ++ do_setup_in_status_phase(pcd); ++ } ++} ++ ++/** ++ * This function processes SETUP commands. In Linux, the USB Command ++ * processing is done in two places - the first being the PCD and the ++ * second in the Gadget Driver (for example, the File-Backed Storage ++ * Gadget Driver). ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ *
Command Driver Description
GET_STATUS PCD Command is processed as ++ * defined in chapter 9 of the USB 2.0 Specification chapter 9 ++ *
CLEAR_FEATURE PCD The Device and Endpoint ++ * requests are the ENDPOINT_HALT feature is procesed, all others the ++ * interface requests are ignored.
SET_FEATURE PCD The Device and Endpoint ++ * requests are processed by the PCD. Interface requests are passed ++ * to the Gadget Driver.
SET_ADDRESS PCD Program the DCFG reg, ++ * with device address received
GET_DESCRIPTOR Gadget Driver Return the ++ * requested descriptor
SET_DESCRIPTOR Gadget Driver Optional - ++ * not implemented by any of the existing Gadget Drivers.
SET_CONFIGURATION Gadget Driver Disable ++ * all EPs and enable EPs for new configuration.
GET_CONFIGURATION Gadget Driver Return ++ * the current configuration
SET_INTERFACE Gadget Driver Disable all ++ * EPs and enable EPs for new configuration.
GET_INTERFACE Gadget Driver Return the ++ * current interface.
SYNC_FRAME PCD Display debug ++ * message.
++ * ++ * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are ++ * processed by pcd_setup. Calling the Function Driver's setup function from ++ * pcd_setup processes the gadget SETUP commands. ++ */ ++static inline void pcd_setup(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ ++ deptsiz0_data_t doeptsize0 = { .d32 = 0}; ++ ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n", ++ ctrl.bRequestType, ctrl.bRequest, ++ ctrl.wValue, ctrl.wIndex, ctrl.wLength); ++#endif ++ ++ doeptsize0.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doeptsiz); ++ ++ /** @todo handle > 1 setup packet , assert error for now */ ++ ++ if (core_if->dma_enable && core_if->dma_desc_enable == 0 && (doeptsize0.b.supcnt < 2)) { ++ DWC_ERROR ("\n\n----------- CANNOT handle > 1 setup packet in DMA mode\n\n"); ++ } ++ ++ /* Clean up the request queue */ ++ dwc_otg_request_nuke(ep0); ++ ep0->stopped = 0; ++ ++ if (ctrl.bRequestType & USB_DIR_IN) { ++ ep0->dwc_ep.is_in = 1; ++ pcd->ep0state = EP0_IN_DATA_PHASE; ++ } ++ else { ++ ep0->dwc_ep.is_in = 0; ++ pcd->ep0state = EP0_OUT_DATA_PHASE; ++ } ++ ++ if(ctrl.wLength == 0) { ++ ep0->dwc_ep.is_in = 1; ++ pcd->ep0state = EP0_IN_STATUS_PHASE; ++ } ++ ++ if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) { ++ /* handle non-standard (class/vendor) requests in the gadget driver */ ++ do_gadget_setup(pcd, &ctrl); ++ return; ++ } ++ ++ /** @todo NGS: Handle bad setup packet? */ ++ ++/////////////////////////////////////////// ++//// --- Standard Request handling --- //// ++ ++ switch (ctrl.bRequest) { ++ case USB_REQ_GET_STATUS: ++ do_get_status(pcd); ++ break; ++ ++ case USB_REQ_CLEAR_FEATURE: ++ do_clear_feature(pcd); ++ break; ++ ++ case USB_REQ_SET_FEATURE: ++ do_set_feature(pcd); ++ break; ++ ++ case USB_REQ_SET_ADDRESS: ++ do_set_address(pcd); ++ break; ++ ++ case USB_REQ_SET_INTERFACE: ++ case USB_REQ_SET_CONFIGURATION: ++// _pcd->request_config = 1; /* Configuration changed */ ++ do_gadget_setup(pcd, &ctrl); ++ break; ++ ++ case USB_REQ_SYNCH_FRAME: ++ do_gadget_setup(pcd, &ctrl); ++ break; ++ ++ default: ++ /* Call the Gadget Driver's setup functions */ ++ do_gadget_setup(pcd, &ctrl); ++ break; ++ } ++} ++ ++/** ++ * This function completes the ep0 control transfer. ++ */ ++static int32_t ep0_complete_request(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ dwc_otg_dev_in_ep_regs_t *in_ep_regs = ++ dev_if->in_ep_regs[ep->dwc_ep.num]; ++#ifdef DEBUG_EP0 ++ dwc_otg_dev_out_ep_regs_t *out_ep_regs = ++ dev_if->out_ep_regs[ep->dwc_ep.num]; ++#endif ++ deptsiz0_data_t deptsiz; ++ desc_sts_data_t desc_sts; ++ dwc_otg_pcd_request_t *req; ++ int is_last = 0; ++ dwc_otg_pcd_t *pcd = ep->pcd; ++ ++ //DWC_DEBUGPL(DBG_PCDV, "%s() %s\n", __func__, _ep->ep.name); ++ ++ if (pcd->ep0_pending && list_empty(&ep->queue)) { ++ if (ep->dwc_ep.is_in) { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n"); ++#endif ++ do_setup_out_status_phase(pcd); ++ } ++ else { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n"); ++#endif ++ do_setup_in_status_phase(pcd); ++ } ++ pcd->ep0_pending = 0; ++ return 1; ++ } ++ ++ if (list_empty(&ep->queue)) { ++ return 0; ++ } ++ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, queue); ++ ++ ++ if (pcd->ep0state == EP0_OUT_STATUS_PHASE || pcd->ep0state == EP0_IN_STATUS_PHASE) { ++ is_last = 1; ++ } ++ else if (ep->dwc_ep.is_in) { ++ deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz); ++ if(core_if->dma_desc_enable != 0) ++ desc_sts.d32 = readl(dev_if->in_desc_addr); ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", ++ ep->ep.name, ep->dwc_ep.xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++#endif ++ ++ if (((core_if->dma_desc_enable == 0) && (deptsiz.b.xfersize == 0)) || ++ ((core_if->dma_desc_enable != 0) && (desc_sts.b.bytes == 0))) { ++ req->req.actual = ep->dwc_ep.xfer_count; ++ /* Is a Zero Len Packet needed? */ ++ if (req->req.zero) { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n"); ++#endif ++ req->req.zero = 0; ++ } ++ do_setup_out_status_phase(pcd); ++ } ++ } ++ else { ++ /* ep0-OUT */ ++#ifdef DEBUG_EP0 ++ deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz); ++ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xsize=%d pktcnt=%d\n", ++ ep->ep.name, ep->dwc_ep.xfer_len, ++ deptsiz.b.xfersize, ++ deptsiz.b.pktcnt); ++#endif ++ req->req.actual = ep->dwc_ep.xfer_count; ++ /* Is a Zero Len Packet needed? */ ++ if (req->req.zero) { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n"); ++#endif ++ req->req.zero = 0; ++ } ++ if(core_if->dma_desc_enable == 0) ++ do_setup_in_status_phase(pcd); ++ } ++ ++ /* Complete the request */ ++ if (is_last) { ++ dwc_otg_request_done(ep, req, 0); ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ return 1; ++ } ++ return 0; ++} ++ ++inline void aligned_buf_patch_on_buf_dma_oep_completion(dwc_otg_pcd_ep_t *ep, uint32_t byte_count) ++{ ++ dwc_ep_t *dwc_ep = &ep->dwc_ep; ++ if(byte_count && dwc_ep->aligned_buf && ++ dwc_ep->dma_addr>=dwc_ep->aligned_dma_addr && ++ dwc_ep->dma_addr<=(dwc_ep->aligned_dma_addr+dwc_ep->aligned_buf_size))\ ++ { ++ //aligned buf used, apply complete patch ++ u32 offset=(dwc_ep->dma_addr-dwc_ep->aligned_dma_addr); ++ memcpy(dwc_ep->start_xfer_buff+offset, dwc_ep->aligned_buf+offset, byte_count); ++ } ++} ++ ++/** ++ * This function completes the request for the EP. If there are ++ * additional requests for the EP in the queue they will be started. ++ */ ++static void complete_ep(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ dwc_otg_dev_in_ep_regs_t *in_ep_regs = ++ dev_if->in_ep_regs[ep->dwc_ep.num]; ++ deptsiz_data_t deptsiz; ++ desc_sts_data_t desc_sts; ++ dwc_otg_pcd_request_t *req = 0; ++ dwc_otg_dma_desc_t* dma_desc; ++ uint32_t byte_count = 0; ++ int is_last = 0; ++ int i; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s() %s-%s\n", __func__, ep->ep.name, ++ (ep->dwc_ep.is_in?"IN":"OUT")); ++ ++ /* Get any pending requests */ ++ if (!list_empty(&ep->queue)) { ++ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, ++ queue); ++ if (!req) { ++ printk("complete_ep 0x%p, req = NULL!\n", ep); ++ return; ++ } ++ } ++ else { ++ printk("complete_ep 0x%p, ep->queue empty!\n", ep); ++ return; ++ } ++ DWC_DEBUGPL(DBG_PCD, "Requests %d\n", ep->pcd->request_pending); ++ ++ if (ep->dwc_ep.is_in) { ++ deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz); ++ ++ if (core_if->dma_enable) { ++ //dma_unmap_single(NULL,ep->dwc_ep.dma_addr,ep->dwc_ep.xfer_count,DMA_NONE); ++ if(core_if->dma_desc_enable == 0) { ++ //dma_unmap_single(NULL,ep->dwc_ep.dma_addr,ep->dwc_ep.xfer_count,DMA_NONE); ++ if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) { ++ byte_count = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++DWC_DEBUGPL(DBG_PCDV,"byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x)\n", byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count ); ++ ++ ep->dwc_ep.xfer_buff += byte_count; ++ ep->dwc_ep.dma_addr += byte_count; ++ ep->dwc_ep.xfer_count += byte_count; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", ++ ep->ep.name, ep->dwc_ep.xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ ++ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { ++ //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x) - deptsiz.b.xfersize(%.8x)\n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count , deptsiz.b.xfersize); ++ } else if(ep->dwc_ep.sent_zlp) { ++ /* ++ * This fragment of code should initiate 0 ++ * length trasfer in case if it is queued ++ * a trasfer with size divisible to EPs max ++ * packet size and with usb_request zero field ++ * is set, which means that after data is transfered, ++ * it is also should be transfered ++ * a 0 length packet at the end. For Slave and ++ * Buffer DMA modes in this case SW has ++ * to initiate 2 transfers one with transfer size, ++ * and the second with 0 size. For Desriptor ++ * DMA mode SW is able to initiate a transfer, ++ * which will handle all the packets including ++ * the last 0 legth. ++ */ ++ ep->dwc_ep.sent_zlp = 0; ++ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); ++ } else { ++ is_last = 1; ++ } ++ } else { ++ DWC_WARN("Incomplete transfer (%s-%s [siz=%d pkt=%d])\n", ++ ep->ep.name, (ep->dwc_ep.is_in?"IN":"OUT"), ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ } ++ } else { ++ ++ dma_desc = ep->dwc_ep.desc_addr; ++ byte_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ++ for(i = 0; i < ep->dwc_ep.desc_cnt; ++i) { ++ desc_sts.d32 = readl(dma_desc); ++ byte_count += desc_sts.b.bytes; ++ dma_desc++; ++ } ++ ++ if(byte_count == 0) { ++ ep->dwc_ep.xfer_count = ep->dwc_ep.total_len; ++ is_last = 1; ++ } else { ++ DWC_WARN("Incomplete transfer\n"); ++ } ++ } ++ } else { ++ if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) { ++ /* Check if the whole transfer was completed, ++ * if no, setup transfer for next portion of data ++ */ ++ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", ++ ep->ep.name, ep->dwc_ep.xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { ++ //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, ep->dwc_ep.xfer_len(%.8x) \n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, ep->dwc_ep.xfer_len ); ++ } else if(ep->dwc_ep.sent_zlp) { ++ /* ++ * This fragment of code should initiate 0 ++ * length trasfer in case if it is queued ++ * a trasfer with size divisible to EPs max ++ * packet size and with usb_request zero field ++ * is set, which means that after data is transfered, ++ * it is also should be transfered ++ * a 0 length packet at the end. For Slave and ++ * Buffer DMA modes in this case SW has ++ * to initiate 2 transfers one with transfer size, ++ * and the second with 0 size. For Desriptor ++ * DMA mode SW is able to initiate a transfer, ++ * which will handle all the packets including ++ * the last 0 legth. ++ */ ++ ep->dwc_ep.sent_zlp = 0; ++ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); ++ } else { ++ is_last = 1; ++ } ++ } ++ else { ++ DWC_WARN("Incomplete transfer (%s-%s [siz=%d pkt=%d])\n", ++ ep->ep.name, (ep->dwc_ep.is_in?"IN":"OUT"), ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ } ++ } ++ } else { ++ dwc_otg_dev_out_ep_regs_t *out_ep_regs = ++ dev_if->out_ep_regs[ep->dwc_ep.num]; ++ desc_sts.d32 = 0; ++ if(core_if->dma_enable) { ++ //dma_unmap_single(NULL,ep->dwc_ep.dma_addr,ep->dwc_ep.xfer_count,DMA_FROM_DEVICE); ++ if(core_if->dma_desc_enable) { ++ DWC_WARN("\n\n%s: we need a cache invalidation here!!\n\n",__func__); ++ dma_desc = ep->dwc_ep.desc_addr; ++ byte_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ for(i = 0; i < ep->dwc_ep.desc_cnt; ++i) { ++ desc_sts.d32 = readl(dma_desc); ++ byte_count += desc_sts.b.bytes; ++ dma_desc++; ++ } ++ ++ ep->dwc_ep.xfer_count = ep->dwc_ep.total_len ++ - byte_count + ((4 - (ep->dwc_ep.total_len & 0x3)) & 0x3); ++ ++ //todo: invalidate cache & aligned buf patch on completion ++ // ++ ++ is_last = 1; ++ } else { ++ deptsiz.d32 = 0; ++ deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz); ++ ++ byte_count = (ep->dwc_ep.xfer_len - ++ ep->dwc_ep.xfer_count - deptsiz.b.xfersize); ++ ++// dma_sync_single_for_device(NULL,ep->dwc_ep.dma_addr,byte_count,DMA_FROM_DEVICE); ++ ++DWC_DEBUGPL(DBG_PCDV,"ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x) - deptsiz.b.xfersize(%.8x)\n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count , deptsiz.b.xfersize); ++ //todo: invalidate cache & aligned buf patch on completion ++ dma_sync_single_for_device(NULL,ep->dwc_ep.dma_addr,byte_count,DMA_FROM_DEVICE); ++ aligned_buf_patch_on_buf_dma_oep_completion(ep,byte_count); ++ ++ ep->dwc_ep.xfer_buff += byte_count; ++ ep->dwc_ep.dma_addr += byte_count; ++ ep->dwc_ep.xfer_count += byte_count; ++ ++ /* Check if the whole transfer was completed, ++ * if no, setup transfer for next portion of data ++ */ ++ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { ++ //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, byte_count(%.8x) = (ep->dwc_ep.xfer_len(%.8x) - ep->dwc_ep.xfer_count(%.8x) - deptsiz.b.xfersize(%.8x)\n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, byte_count ,ep->dwc_ep.xfer_len , ep->dwc_ep.xfer_count , deptsiz.b.xfersize); ++ } ++ else if(ep->dwc_ep.sent_zlp) { ++ /* ++ * This fragment of code should initiate 0 ++ * length trasfer in case if it is queued ++ * a trasfer with size divisible to EPs max ++ * packet size and with usb_request zero field ++ * is set, which means that after data is transfered, ++ * it is also should be transfered ++ * a 0 length packet at the end. For Slave and ++ * Buffer DMA modes in this case SW has ++ * to initiate 2 transfers one with transfer size, ++ * and the second with 0 size. For Desriptor ++ * DMA mode SW is able to initiate a transfer, ++ * which will handle all the packets including ++ * the last 0 legth. ++ */ ++ ep->dwc_ep.sent_zlp = 0; ++ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); ++ } else { ++ is_last = 1; ++ } ++ } ++ } else { ++ /* Check if the whole transfer was completed, ++ * if no, setup transfer for next portion of data ++ */ ++ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { ++ //dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++printk("Warning: transfer ended, but specified len is not accomplished!! ep->total_len=%.x,ep->dwc_ep.sent_zlp=%d, ep->dwc_ep.xfer_len(%.8x) \n", ep->dwc_ep.total_len, ep->dwc_ep.sent_zlp, ep->dwc_ep.xfer_len ); ++ } ++ else if(ep->dwc_ep.sent_zlp) { ++ /* ++ * This fragment of code should initiate 0 ++ * length trasfer in case if it is queued ++ * a trasfer with size divisible to EPs max ++ * packet size and with usb_request zero field ++ * is set, which means that after data is transfered, ++ * it is also should be transfered ++ * a 0 length packet at the end. For Slave and ++ * Buffer DMA modes in this case SW has ++ * to initiate 2 transfers one with transfer size, ++ * and the second with 0 size. For Desriptor ++ * DMA mode SW is able to initiate a transfer, ++ * which will handle all the packets including ++ * the last 0 legth. ++ */ ++ ep->dwc_ep.sent_zlp = 0; ++ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); ++ } else { ++ is_last = 1; ++ } ++ } ++ ++#ifdef DEBUG ++ ++ DWC_DEBUGPL(DBG_PCDV, "addr %p, %s len=%d cnt=%d xsize=%d pktcnt=%d\n", ++ &out_ep_regs->doeptsiz, ep->ep.name, ep->dwc_ep.xfer_len, ++ ep->dwc_ep.xfer_count, ++ deptsiz.b.xfersize, ++ deptsiz.b.pktcnt); ++#endif ++ } ++ ++ /* Complete the request */ ++ if (is_last) { ++ req->req.actual = ep->dwc_ep.xfer_count; ++ ++ dwc_otg_request_done(ep, req, 0); ++ ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ ++ /* If there is a request in the queue start it.*/ ++ start_next_request(ep); ++ } ++} ++ ++ ++#ifdef DWC_EN_ISOC ++ ++/** ++ * This function BNA interrupt for Isochronous EPs ++ * ++ */ ++static void dwc_otg_pcd_handle_iso_bna(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_ep_t *dwc_ep = &ep->dwc_ep; ++ volatile uint32_t *addr; ++ depctl_data_t depctl = {.d32 = 0}; ++ dwc_otg_pcd_t *pcd = ep->pcd; ++ dwc_otg_dma_desc_t *dma_desc; ++ int i; ++ ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * (dwc_ep->proc_buf_num); ++ ++ if(dwc_ep->is_in) { ++ desc_sts_data_t sts = {.d32 = 0}; ++ for(i = 0;i < dwc_ep->desc_cnt; ++i, ++dma_desc) ++ { ++ sts.d32 = readl(&dma_desc->status); ++ sts.b_iso_in.bs = BS_HOST_READY; ++ writel(sts.d32,&dma_desc->status); ++ } ++ } ++ else { ++ desc_sts_data_t sts = {.d32 = 0}; ++ for(i = 0;i < dwc_ep->desc_cnt; ++i, ++dma_desc) ++ { ++ sts.d32 = readl(&dma_desc->status); ++ sts.b_iso_out.bs = BS_HOST_READY; ++ writel(sts.d32,&dma_desc->status); ++ } ++ } ++ ++ if(dwc_ep->is_in == 0){ ++ addr = &GET_CORE_IF(pcd)->dev_if->out_ep_regs[dwc_ep->num]->doepctl; ++ } ++ else{ ++ addr = &GET_CORE_IF(pcd)->dev_if->in_ep_regs[dwc_ep->num]->diepctl; ++ } ++ depctl.b.epena = 1; ++ dwc_modify_reg32(addr,depctl.d32,depctl.d32); ++} ++ ++/** ++ * This function sets latest iso packet information(non-PTI mode) ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++void set_current_pkt_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ deptsiz_data_t deptsiz = { .d32 = 0 }; ++ dma_addr_t dma_addr; ++ uint32_t offset; ++ ++ if(ep->proc_buf_num) ++ dma_addr = ep->dma_addr1; ++ else ++ dma_addr = ep->dma_addr0; ++ ++ if(ep->is_in) { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz); ++ offset = ep->data_per_frame; ++ } else { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz); ++ offset = ep->data_per_frame + (0x4 & (0x4 - (ep->data_per_frame & 0x3))); ++ } ++ ++ if(!deptsiz.b.xfersize) { ++ ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame; ++ ep->pkt_info[ep->cur_pkt].offset = ep->cur_pkt_dma_addr - dma_addr; ++ ep->pkt_info[ep->cur_pkt].status = 0; ++ } else { ++ ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame; ++ ep->pkt_info[ep->cur_pkt].offset = ep->cur_pkt_dma_addr - dma_addr; ++ ep->pkt_info[ep->cur_pkt].status = -ENODATA; ++ } ++ ep->cur_pkt_addr += offset; ++ ep->cur_pkt_dma_addr += offset; ++ ep->cur_pkt++; ++} ++ ++/** ++ * This function sets latest iso packet information(DDMA mode) ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dwc_ep The EP to start the transfer on. ++ * ++ */ ++static void set_ddma_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) ++{ ++ dwc_otg_dma_desc_t* dma_desc; ++ desc_sts_data_t sts = {.d32 = 0}; ++ iso_pkt_info_t *iso_packet; ++ uint32_t data_per_desc; ++ uint32_t offset; ++ int i, j; ++ ++ iso_packet = dwc_ep->pkt_info; ++ ++ /** Reinit closed DMA Descriptors*/ ++ /** ISO OUT EP */ ++ if(dwc_ep->is_in == 0) { ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; ++ offset = 0; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ ++ sts.d32 = readl(&dma_desc->status); ++ ++ /* Write status in iso_packet_decsriptor */ ++ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); ++ if(iso_packet->status) { ++ iso_packet->status = -ENODATA; ++ } ++ ++ /* Received data length */ ++ if(!sts.b_iso_out.rxbytes){ ++ iso_packet->length = data_per_desc - sts.b_iso_out.rxbytes; ++ } else { ++ iso_packet->length = data_per_desc - sts.b_iso_out.rxbytes + ++ (4 - dwc_ep->data_per_frame % 4); ++ } ++ ++ iso_packet->offset = offset; ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ iso_packet ++; ++ } ++ } ++ ++ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ ++ sts.d32 = readl(&dma_desc->status); ++ ++ /* Write status in iso_packet_decsriptor */ ++ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); ++ if(iso_packet->status) { ++ iso_packet->status = -ENODATA; ++ } ++ ++ /* Received data length */ ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes; ++ ++ iso_packet->offset = offset; ++ ++ offset += data_per_desc; ++ iso_packet++; ++ dma_desc++; ++ } ++ ++ sts.d32 = readl(&dma_desc->status); ++ ++ /* Write status in iso_packet_decsriptor */ ++ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); ++ if(iso_packet->status) { ++ iso_packet->status = -ENODATA; ++ } ++ /* Received data length */ ++ if(!sts.b_iso_out.rxbytes){ ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes; ++ } else { ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes + ++ (4 - dwc_ep->data_per_frame % 4); ++ } ++ ++ iso_packet->offset = offset; ++ } ++ else /** ISO IN EP */ ++ { ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) ++ { ++ sts.d32 = readl(&dma_desc->status); ++ ++ /* Write status in iso packet descriptor */ ++ iso_packet->status = sts.b_iso_in.txsts + (sts.b_iso_in.bs^BS_DMA_DONE); ++ if(iso_packet->status != 0) { ++ iso_packet->status = -ENODATA; ++ ++ } ++ /* Bytes has been transfered */ ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_in.txbytes; ++ ++ dma_desc ++; ++ iso_packet++; ++ } ++ ++ sts.d32 = readl(&dma_desc->status); ++ while(sts.b_iso_in.bs == BS_DMA_BUSY) { ++ sts.d32 = readl(&dma_desc->status); ++ } ++ ++ /* Write status in iso packet descriptor ??? do be done with ERROR codes*/ ++ iso_packet->status = sts.b_iso_in.txsts + (sts.b_iso_in.bs^BS_DMA_DONE); ++ if(iso_packet->status != 0) { ++ iso_packet->status = -ENODATA; ++ } ++ ++ /* Bytes has been transfered */ ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_in.txbytes; ++ } ++} ++ ++/** ++ * This function reinitialize DMA Descriptors for Isochronous transfer ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dwc_ep The EP to start the transfer on. ++ * ++ */ ++static void reinit_ddma_iso_xfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) ++{ ++ int i, j; ++ dwc_otg_dma_desc_t* dma_desc; ++ dma_addr_t dma_ad; ++ volatile uint32_t *addr; ++ desc_sts_data_t sts = { .d32 =0 }; ++ uint32_t data_per_desc; ++ ++ if(dwc_ep->is_in == 0) { ++ addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl; ++ } ++ else { ++ addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl; ++ } ++ ++ ++ if(dwc_ep->proc_buf_num == 0) { ++ /** Buffer 0 descriptors setup */ ++ dma_ad = dwc_ep->dma_addr0; ++ } ++ else { ++ /** Buffer 1 descriptors setup */ ++ dma_ad = dwc_ep->dma_addr1; ++ } ++ ++ /** Reinit closed DMA Descriptors*/ ++ /** ISO OUT EP */ ++ if(dwc_ep->is_in == 0) { ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; ++ ++ sts.b_iso_out.bs = BS_HOST_READY; ++ sts.b_iso_out.rxsts = 0; ++ sts.b_iso_out.l = 0; ++ sts.b_iso_out.sp = 0; ++ sts.b_iso_out.ioc = 0; ++ sts.b_iso_out.pid = 0; ++ sts.b_iso_out.framenum = 0; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ //(uint32_t)dma_ad += data_per_desc; ++ dma_ad = (uint32_t)dma_ad + data_per_desc; ++ dma_desc ++; ++ } ++ } ++ ++ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) ++ { ++ ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ dma_desc++; ++ //(uint32_t)dma_ad += data_per_desc; ++ dma_ad = (uint32_t)dma_ad + data_per_desc; ++ } ++ ++ sts.b_iso_out.ioc = 1; ++ sts.b_iso_out.l = dwc_ep->proc_buf_num; ++ ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ } ++ else /** ISO IN EP */ ++ { ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; ++ ++ sts.b_iso_in.bs = BS_HOST_READY; ++ sts.b_iso_in.txsts = 0; ++ sts.b_iso_in.sp = 0; ++ sts.b_iso_in.ioc = 0; ++ sts.b_iso_in.pid = dwc_ep->pkt_per_frm; ++ sts.b_iso_in.framenum = dwc_ep->next_frame; ++ sts.b_iso_in.txbytes = dwc_ep->data_per_frame; ++ sts.b_iso_in.l = 0; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) ++ { ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ sts.b_iso_in.framenum += dwc_ep->bInterval; ++ //(uint32_t)dma_ad += dwc_ep->data_per_frame; ++ dma_ad = (uint32_t)dma_ad + dwc_ep->data_per_frame; ++ dma_desc ++; ++ } ++ ++ sts.b_iso_in.ioc = 1; ++ sts.b_iso_in.l = dwc_ep->proc_buf_num; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval * 1; ++ } ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++} ++ ++ ++/** ++ * This function is to handle Iso EP transfer complete interrupt ++ * in case Iso out packet was dropped ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dwc_ep The EP for wihich transfer complete was asserted ++ * ++ */ ++static uint32_t handle_iso_out_pkt_dropped(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) ++{ ++ uint32_t dma_addr; ++ uint32_t drp_pkt; ++ uint32_t drp_pkt_cnt; ++ deptsiz_data_t deptsiz = { .d32 = 0 }; ++ depctl_data_t depctl = { .d32 = 0 }; ++ int i; ++ ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz); ++ ++ drp_pkt = dwc_ep->pkt_cnt - deptsiz.b.pktcnt; ++ drp_pkt_cnt = dwc_ep->pkt_per_frm - (drp_pkt % dwc_ep->pkt_per_frm); ++ ++ /* Setting dropped packets status */ ++ for(i = 0; i < drp_pkt_cnt; ++i) { ++ dwc_ep->pkt_info[drp_pkt].status = -ENODATA; ++ drp_pkt ++; ++ deptsiz.b.pktcnt--; ++ } ++ ++ ++ if(deptsiz.b.pktcnt > 0) { ++ deptsiz.b.xfersize = dwc_ep->xfer_len - (dwc_ep->pkt_cnt - deptsiz.b.pktcnt) * dwc_ep->maxpacket; ++ } else { ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 0; ++ } ++ ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz, deptsiz.d32); ++ ++ if(deptsiz.b.pktcnt > 0) { ++ if(dwc_ep->proc_buf_num) { ++ dma_addr = dwc_ep->dma_addr1 + dwc_ep->xfer_len - deptsiz.b.xfersize; ++ } else { ++ dma_addr = dwc_ep->dma_addr0 + dwc_ep->xfer_len - deptsiz.b.xfersize;; ++ } ++ ++ VERIFY_PCD_DMA_ADDR(dma_addr); ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepdma, dma_addr); ++ ++ /** Re-enable endpoint, clear nak */ ++ depctl.d32 = 0; ++ depctl.b.epena = 1; ++ depctl.b.cnak = 1; ++ ++ dwc_modify_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl, ++ depctl.d32,depctl.d32); ++ return 0; ++ } else { ++ return 1; ++ } ++} ++ ++/** ++ * This function sets iso packets information(PTI mode) ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++static uint32_t set_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ int i, j; ++ dma_addr_t dma_ad; ++ iso_pkt_info_t *packet_info = ep->pkt_info; ++ uint32_t offset; ++ uint32_t frame_data; ++ deptsiz_data_t deptsiz; ++ ++ if(ep->proc_buf_num == 0) { ++ /** Buffer 0 descriptors setup */ ++ dma_ad = ep->dma_addr0; ++ } ++ else { ++ /** Buffer 1 descriptors setup */ ++ dma_ad = ep->dma_addr1; ++ } ++ ++ if(ep->is_in) { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz); ++ } else { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz); ++ } ++ ++ if(!deptsiz.b.xfersize) { ++ offset = 0; ++ for(i = 0; i < ep->pkt_cnt; i += ep->pkt_per_frm) ++ { ++ frame_data = ep->data_per_frame; ++ for(j = 0; j < ep->pkt_per_frm; ++j) { ++ ++ /* Packet status - is not set as initially ++ * it is set to 0 and if packet was sent ++ successfully, status field will remain 0*/ ++ ++ /* Bytes has been transfered */ ++ packet_info->length = (ep->maxpacket < frame_data) ? ++ ep->maxpacket : frame_data; ++ ++ /* Received packet offset */ ++ packet_info->offset = offset; ++ offset += packet_info->length; ++ frame_data -= packet_info->length; ++ ++ packet_info ++; ++ } ++ } ++ return 1; ++ } else { ++ /* This is a workaround for in case of Transfer Complete with ++ * PktDrpSts interrupts merging - in this case Transfer complete ++ * interrupt for Isoc Out Endpoint is asserted without PktDrpSts ++ * set and with DOEPTSIZ register non zero. Investigations showed, ++ * that this happens when Out packet is dropped, but because of ++ * interrupts merging during first interrupt handling PktDrpSts ++ * bit is cleared and for next merged interrupts it is not reset. ++ * In this case SW hadles the interrupt as if PktDrpSts bit is set. ++ */ ++ if(ep->is_in) { ++ return 1; ++ } else { ++ return handle_iso_out_pkt_dropped(core_if, ep); ++ } ++ } ++} ++ ++/** ++ * This function is to handle Iso EP transfer complete interrupt ++ * ++ * @param ep The EP for which transfer complete was asserted ++ * ++ */ ++static void complete_iso_ep(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); ++ dwc_ep_t *dwc_ep = &ep->dwc_ep; ++ uint8_t is_last = 0; ++ ++ if(core_if->dma_enable) { ++ if(core_if->dma_desc_enable) { ++ set_ddma_iso_pkts_info(core_if, dwc_ep); ++ reinit_ddma_iso_xfer(core_if, dwc_ep); ++ is_last = 1; ++ } else { ++ if(core_if->pti_enh_enable) { ++ if(set_iso_pkts_info(core_if, dwc_ep)) { ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ dwc_otg_iso_ep_start_buf_transfer(core_if, dwc_ep); ++ is_last = 1; ++ } ++ } else { ++ set_current_pkt_info(core_if, dwc_ep); ++ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { ++ is_last = 1; ++ dwc_ep->cur_pkt = 0; ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ if(dwc_ep->proc_buf_num) { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; ++ } else { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; ++ } ++ } ++ dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep); ++ } ++ } ++ } else { ++ set_current_pkt_info(core_if, dwc_ep); ++ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { ++ is_last = 1; ++ dwc_ep->cur_pkt = 0; ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ if(dwc_ep->proc_buf_num) { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; ++ } else { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; ++ } ++ } ++ dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep); ++ } ++ if(is_last) ++ dwc_otg_iso_buffer_done(ep, ep->iso_req); ++} ++ ++#endif //DWC_EN_ISOC ++ ++ ++/** ++ * This function handles EP0 Control transfers. ++ * ++ * The state of the control tranfers are tracked in ++ * ep0state. ++ */ ++static void handle_ep0(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ desc_sts_data_t desc_sts; ++ deptsiz0_data_t deptsiz; ++ uint32_t byte_count; ++ ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__); ++ print_ep0_state(pcd); ++#endif ++ ++ switch (pcd->ep0state) { ++ case EP0_DISCONNECT: ++ break; ++ ++ case EP0_IDLE: ++ pcd->request_config = 0; ++ ++ pcd_setup(pcd); ++ break; ++ ++ case EP0_IN_DATA_PHASE: ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n", ++ ep0->dwc_ep.num, (ep0->dwc_ep.is_in ?"IN":"OUT"), ++ ep0->dwc_ep.type, ep0->dwc_ep.maxpacket); ++#endif ++ ++ if (core_if->dma_enable != 0) { ++ /* ++ * For EP0 we can only program 1 packet at a time so we ++ * need to do the make calculations after each complete. ++ * Call write_packet to make the calculations, as in ++ * slave mode, and use those values to determine if we ++ * can complete. ++ */ ++ if(core_if->dma_desc_enable == 0) { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->dieptsiz); ++ byte_count = ep0->dwc_ep.xfer_len - deptsiz.b.xfersize; ++ } ++ else { ++ desc_sts.d32 = readl(core_if->dev_if->in_desc_addr); ++ byte_count = ep0->dwc_ep.xfer_len - desc_sts.b.bytes; ++ } ++ ++ ep0->dwc_ep.xfer_count += byte_count; ++ ep0->dwc_ep.xfer_buff += byte_count; ++ ep0->dwc_ep.dma_addr += byte_count; ++ } ++ if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) { ++ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); ++ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); ++ } ++ else if(ep0->dwc_ep.sent_zlp) { ++ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ep0->dwc_ep.sent_zlp = 0; ++ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); ++ } ++ else { ++ ep0_complete_request(ep0); ++ DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n"); ++ } ++ break; ++ case EP0_OUT_DATA_PHASE: ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n", ++ ep0->dwc_ep.num, (ep0->dwc_ep.is_in ?"IN":"OUT"), ++ ep0->dwc_ep.type, ep0->dwc_ep.maxpacket); ++#endif ++ if (core_if->dma_enable != 0) { ++ if(core_if->dma_desc_enable == 0) { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[0]->doeptsiz); ++ byte_count = ep0->dwc_ep.maxpacket - deptsiz.b.xfersize; ++ ++ //todo: invalidate cache & aligned buf patch on completion ++ dma_sync_single_for_device(NULL,ep0->dwc_ep.dma_addr,byte_count,DMA_FROM_DEVICE); ++ aligned_buf_patch_on_buf_dma_oep_completion(ep0,byte_count); ++ } ++ else { ++ desc_sts.d32 = readl(core_if->dev_if->out_desc_addr); ++ byte_count = ep0->dwc_ep.maxpacket - desc_sts.b.bytes; ++ ++ //todo: invalidate cache & aligned buf patch on completion ++ // ++ ++ } ++ ep0->dwc_ep.xfer_count += byte_count; ++ ep0->dwc_ep.xfer_buff += byte_count; ++ ep0->dwc_ep.dma_addr += byte_count; ++ } ++ if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) { ++ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); ++ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); ++ } ++ else if(ep0->dwc_ep.sent_zlp) { ++ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ep0->dwc_ep.sent_zlp = 0; ++ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); ++ } ++ else { ++ ep0_complete_request(ep0); ++ DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n"); ++ } ++ break; ++ ++ case EP0_IN_STATUS_PHASE: ++ case EP0_OUT_STATUS_PHASE: ++ DWC_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n"); ++ ep0_complete_request(ep0); ++ pcd->ep0state = EP0_IDLE; ++ ep0->stopped = 1; ++ ep0->dwc_ep.is_in = 0; /* OUT for next SETUP */ ++ ++ /* Prepare for more SETUP Packets */ ++ if(core_if->dma_enable) { ++ ep0_out_start(core_if, pcd); ++ } ++ break; ++ ++ case EP0_STALL: ++ DWC_ERROR("EP0 STALLed, should not get here pcd_setup()\n"); ++ break; ++ } ++#ifdef DEBUG_EP0 ++ print_ep0_state(pcd); ++#endif ++} ++ ++ ++/** ++ * Restart transfer ++ */ ++static void restart_transfer(dwc_otg_pcd_t *pcd, const uint32_t epnum) ++{ ++ dwc_otg_core_if_t *core_if; ++ dwc_otg_dev_if_t *dev_if; ++ deptsiz_data_t dieptsiz = {.d32=0}; ++ dwc_otg_pcd_ep_t *ep; ++ ++ ep = get_in_ep(pcd, epnum); ++ ++#ifdef DWC_EN_ISOC ++ if(ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) { ++ return; ++ } ++#endif /* DWC_EN_ISOC */ ++ ++ core_if = GET_CORE_IF(pcd); ++ dev_if = core_if->dev_if; ++ ++ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz); ++ ++ DWC_DEBUGPL(DBG_PCD,"xfer_buff=%p xfer_count=%0x xfer_len=%0x" ++ " stopped=%d\n", ep->dwc_ep.xfer_buff, ++ ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len , ++ ep->stopped); ++ /* ++ * If xfersize is 0 and pktcnt in not 0, resend the last packet. ++ */ ++ if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0 && ++ ep->dwc_ep.start_xfer_buff != 0) { ++ if (ep->dwc_ep.total_len <= ep->dwc_ep.maxpacket) { ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.xfer_buff = ep->dwc_ep.start_xfer_buff; ++ ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count; ++ } ++ else { ++ ep->dwc_ep.xfer_count -= ep->dwc_ep.maxpacket; ++ /* convert packet size to dwords. */ ++ ep->dwc_ep.xfer_buff -= ep->dwc_ep.maxpacket; ++ ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count; ++ } ++ ep->stopped = 0; ++ DWC_DEBUGPL(DBG_PCD,"xfer_buff=%p xfer_count=%0x " ++ "xfer_len=%0x stopped=%d\n", ++ ep->dwc_ep.xfer_buff, ++ ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len , ++ ep->stopped ++ ); ++ if (epnum == 0) { ++ dwc_otg_ep0_start_transfer(core_if, &ep->dwc_ep); ++ } ++ else { ++ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++ } ++ } ++} ++ ++ ++/** ++ * handle the IN EP disable interrupt. ++ */ ++static inline void handle_in_ep_disable_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ deptsiz_data_t dieptsiz = {.d32=0}; ++ dctl_data_t dctl = {.d32=0}; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_ep_t *dwc_ep; ++ ++ ep = get_in_ep(pcd, epnum); ++ dwc_ep = &ep->dwc_ep; ++ ++ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num); ++ return; ++ } ++ ++ DWC_DEBUGPL(DBG_PCD,"diepctl%d=%0x\n", epnum, ++ dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl)); ++ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz); ++ ++ DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", ++ dieptsiz.b.pktcnt, ++ dieptsiz.b.xfersize); ++ ++ if (ep->stopped) { ++ /* Flush the Tx FIFO */ ++ dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num); ++ /* Clear the Global IN NP NAK */ ++ dctl.d32 = 0; ++ dctl.b.cgnpinnak = 1; ++ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, ++ dctl.d32, 0); ++ /* Restart the transaction */ ++ if (dieptsiz.b.pktcnt != 0 || ++ dieptsiz.b.xfersize != 0) { ++ restart_transfer(pcd, epnum); ++ } ++ } ++ else { ++ /* Restart the transaction */ ++ if (dieptsiz.b.pktcnt != 0 || ++ dieptsiz.b.xfersize != 0) { ++ restart_transfer(pcd, epnum); ++ } ++ DWC_DEBUGPL(DBG_ANY, "STOPPED!!!\n"); ++ } ++} ++ ++/** ++ * Handler for the IN EP timeout handshake interrupt. ++ */ ++static inline void handle_in_ep_timeout_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ ++#ifdef DEBUG ++ deptsiz_data_t dieptsiz = {.d32=0}; ++ uint32_t num = 0; ++#endif ++ dctl_data_t dctl = {.d32=0}; ++ dwc_otg_pcd_ep_t *ep; ++ ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ ep = get_in_ep(pcd, epnum); ++ ++ /* Disable the NP Tx Fifo Empty Interrrupt */ ++ if (!core_if->dma_enable) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ /** @todo NGS Check EP type. ++ * Implement for Periodic EPs */ ++ /* ++ * Non-periodic EP ++ */ ++ /* Enable the Global IN NAK Effective Interrupt */ ++ intr_mask.b.ginnakeff = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ 0, intr_mask.d32); ++ ++ /* Set Global IN NAK */ ++ dctl.b.sgnpinnak = 1; ++ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, ++ dctl.d32, dctl.d32); ++ ++ ep->stopped = 1; ++ ++#ifdef DEBUG ++ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[num]->dieptsiz); ++ DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", ++ dieptsiz.b.pktcnt, ++ dieptsiz.b.xfersize); ++#endif ++ ++#ifdef DISABLE_PERIODIC_EP ++ /* ++ * Set the NAK bit for this EP to ++ * start the disable process. ++ */ ++ diepctl.d32 = 0; ++ diepctl.b.snak = 1; ++ dwc_modify_reg32(&dev_if->in_ep_regs[num]->diepctl, diepctl.d32, diepctl.d32); ++ ep->disabling = 1; ++ ep->stopped = 1; ++#endif ++} ++ ++/** ++ * Handler for the IN EP NAK interrupt. ++ */ ++static inline int32_t handle_in_ep_nak_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ /** @todo implement ISR */ ++ dwc_otg_core_if_t* core_if; ++ diepmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "IN EP NAK"); ++ core_if = GET_CORE_IF(pcd); ++ intr_mask.b.nak = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[epnum], ++ intr_mask.d32, 0); ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepmsk, ++ intr_mask.d32, 0); ++ } ++ ++ return 1; ++} ++ ++/** ++ * Handler for the OUT EP Babble interrupt. ++ */ ++static inline int32_t handle_out_ep_babble_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ /** @todo implement ISR */ ++ dwc_otg_core_if_t* core_if; ++ doepmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP Babble"); ++ core_if = GET_CORE_IF(pcd); ++ intr_mask.b.babble = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], ++ intr_mask.d32, 0); ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, ++ intr_mask.d32, 0); ++ } ++ ++ return 1; ++} ++ ++/** ++ * Handler for the OUT EP NAK interrupt. ++ */ ++static inline int32_t handle_out_ep_nak_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ /** @todo implement ISR */ ++ dwc_otg_core_if_t* core_if; ++ doepmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NAK"); ++ core_if = GET_CORE_IF(pcd); ++ intr_mask.b.nak = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], ++ intr_mask.d32, 0); ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, ++ intr_mask.d32, 0); ++ } ++ ++ return 1; ++} ++ ++/** ++ * Handler for the OUT EP NYET interrupt. ++ */ ++static inline int32_t handle_out_ep_nyet_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ /** @todo implement ISR */ ++ dwc_otg_core_if_t* core_if; ++ doepmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NYET"); ++ core_if = GET_CORE_IF(pcd); ++ intr_mask.b.nyet = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], ++ intr_mask.d32, 0); ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, ++ intr_mask.d32, 0); ++ } ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that an IN EP has a pending Interrupt. ++ * The sequence for handling the IN EP interrupt is shown below: ++ * -# Read the Device All Endpoint Interrupt register ++ * -# Repeat the following for each IN EP interrupt bit set (from ++ * LSB to MSB). ++ * -# Read the Device Endpoint Interrupt (DIEPINTn) register ++ * -# If "Transfer Complete" call the request complete function ++ * -# If "Endpoint Disabled" complete the EP disable procedure. ++ * -# If "AHB Error Interrupt" log error ++ * -# If "Time-out Handshake" log error ++ * -# If "IN Token Received when TxFIFO Empty" write packet to Tx ++ * FIFO. ++ * -# If "IN Token EP Mismatch" (disable, this is handled by EP ++ * Mismatch Interrupt) ++ */ ++static int32_t dwc_otg_pcd_handle_in_ep_intr(dwc_otg_pcd_t *pcd) ++{ ++#define CLEAR_IN_EP_INTR(__core_if,__epnum,__intr) \ ++do { \ ++ diepint_data_t diepint = {.d32=0}; \ ++ diepint.b.__intr = 1; \ ++ dwc_write_reg32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \ ++ diepint.d32); \ ++} while (0) ++ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ diepint_data_t diepint = {.d32=0}; ++ dctl_data_t dctl = {.d32=0}; ++ depctl_data_t depctl = {.d32=0}; ++ uint32_t ep_intr; ++ uint32_t epnum = 0; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_ep_t *dwc_ep; ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd); ++ ++ /* Read in the device interrupt bits */ ++ ep_intr = dwc_otg_read_dev_all_in_ep_intr(core_if); ++ ++ /* Service the Device IN interrupts for each endpoint */ ++ while(ep_intr) { ++ if (ep_intr&0x1) { ++ uint32_t empty_msk; ++ /* Get EP pointer */ ++ ep = get_in_ep(pcd, epnum); ++ dwc_ep = &ep->dwc_ep; ++ ++ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl); ++ empty_msk = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); ++ ++ DWC_DEBUGPL(DBG_PCDV, ++ "IN EP INTERRUPT - %d\nepmty_msk - %8x diepctl - %8x\n", ++ epnum, ++ empty_msk, ++ depctl.d32); ++ ++ DWC_DEBUGPL(DBG_PCD, ++ "EP%d-%s: type=%d, mps=%d\n", ++ dwc_ep->num, (dwc_ep->is_in ?"IN":"OUT"), ++ dwc_ep->type, dwc_ep->maxpacket); ++ ++ diepint.d32 = dwc_otg_read_dev_in_ep_intr(core_if, dwc_ep); ++ ++ DWC_DEBUGPL(DBG_PCDV, "EP %d Interrupt Register - 0x%x\n", epnum, diepint.d32); ++ /* Transfer complete */ ++ if (diepint.b.xfercompl) { ++ /* Disable the NP Tx FIFO Empty ++ * Interrrupt */ ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ else { ++ /* Disable the Tx FIFO Empty Interrupt for this EP */ ++ uint32_t fifoemptymsk = 0x1 << dwc_ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ fifoemptymsk, 0); ++ } ++ /* Clear the bit in DIEPINTn for this interrupt */ ++ CLEAR_IN_EP_INTR(core_if,epnum,xfercompl); ++ ++ /* Complete the transfer */ ++ if (epnum == 0) { ++ handle_ep0(pcd); ++ } ++#ifdef DWC_EN_ISOC ++ else if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ if(!ep->stopped) ++ complete_iso_ep(ep); ++ } ++#endif //DWC_EN_ISOC ++ else { ++ ++ complete_ep(ep); ++ } ++ } ++ /* Endpoint disable */ ++ if (diepint.b.epdisabled) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN disabled\n", epnum); ++ handle_in_ep_disable_intr(pcd, epnum); ++ ++ /* Clear the bit in DIEPINTn for this interrupt */ ++ CLEAR_IN_EP_INTR(core_if,epnum,epdisabled); ++ } ++ /* AHB Error */ ++ if (diepint.b.ahberr) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN AHB Error\n", epnum); ++ /* Clear the bit in DIEPINTn for this interrupt */ ++ CLEAR_IN_EP_INTR(core_if,epnum,ahberr); ++ } ++ /* TimeOUT Handshake (non-ISOC IN EPs) */ ++ if (diepint.b.timeout) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN Time-out\n", epnum); ++ handle_in_ep_timeout_intr(pcd, epnum); ++ ++ CLEAR_IN_EP_INTR(core_if,epnum,timeout); ++ } ++ /** IN Token received with TxF Empty */ ++ if (diepint.b.intktxfemp) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN TxFifo Empty\n", ++ epnum); ++ if (!ep->stopped && epnum != 0) { ++ ++ diepmsk_data_t diepmsk = { .d32 = 0}; ++ diepmsk.b.intktxfemp = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[epnum], ++ diepmsk.d32, 0); ++ } else { ++ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32, 0); ++ } ++ start_next_request(ep); ++ } ++ else if(core_if->dma_desc_enable && epnum == 0 && ++ pcd->ep0state == EP0_OUT_STATUS_PHASE) { ++ // EP0 IN set STALL ++ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl); ++ ++ /* set the disable and stall bits */ ++ if (depctl.b.epena) { ++ depctl.b.epdis = 1; ++ } ++ depctl.b.stall = 1; ++ dwc_write_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32); ++ } ++ CLEAR_IN_EP_INTR(core_if,epnum,intktxfemp); ++ } ++ /** IN Token Received with EP mismatch */ ++ if (diepint.b.intknepmis) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN EP Mismatch\n", epnum); ++ CLEAR_IN_EP_INTR(core_if,epnum,intknepmis); ++ } ++ /** IN Endpoint NAK Effective */ ++ if (diepint.b.inepnakeff) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN EP NAK Effective\n", epnum); ++ /* Periodic EP */ ++ if (ep->disabling) { ++ depctl.d32 = 0; ++ depctl.b.snak = 1; ++ depctl.b.epdis = 1; ++ dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32, depctl.d32); ++ } ++ CLEAR_IN_EP_INTR(core_if,epnum,inepnakeff); ++ ++ } ++ ++ /** IN EP Tx FIFO Empty Intr */ ++ if (diepint.b.emptyintr) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d Tx FIFO Empty Intr \n", epnum); ++ write_empty_tx_fifo(pcd, epnum); ++ ++ CLEAR_IN_EP_INTR(core_if,epnum,emptyintr); ++ } ++ ++ /** IN EP BNA Intr */ ++ if (diepint.b.bna) { ++ CLEAR_IN_EP_INTR(core_if,epnum,bna); ++ if(core_if->dma_desc_enable) { ++#ifdef DWC_EN_ISOC ++ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * This checking is performed to prevent first "false" BNA ++ * handling occuring right after reconnect ++ */ ++ if(dwc_ep->next_frame != 0xffffffff) ++ dwc_otg_pcd_handle_iso_bna(ep); ++ } ++ else ++#endif //DWC_EN_ISOC ++ { ++ dctl.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dctl); ++ ++ /* If Global Continue on BNA is disabled - disable EP */ ++ if(!dctl.b.gcontbna) { ++ depctl.d32 = 0; ++ depctl.b.snak = 1; ++ depctl.b.epdis = 1; ++ dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32, depctl.d32); ++ } else { ++ start_next_request(ep); ++ } ++ } ++ } ++ } ++ /* NAK Interrutp */ ++ if (diepint.b.nak) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN NAK Interrupt\n", epnum); ++ handle_in_ep_nak_intr(pcd, epnum); ++ ++ CLEAR_IN_EP_INTR(core_if,epnum,nak); ++ } ++ } ++ epnum++; ++ ep_intr >>=1; ++ } ++ ++ return 1; ++#undef CLEAR_IN_EP_INTR ++} ++ ++/** ++ * This interrupt indicates that an OUT EP has a pending Interrupt. ++ * The sequence for handling the OUT EP interrupt is shown below: ++ * -# Read the Device All Endpoint Interrupt register ++ * -# Repeat the following for each OUT EP interrupt bit set (from ++ * LSB to MSB). ++ * -# Read the Device Endpoint Interrupt (DOEPINTn) register ++ * -# If "Transfer Complete" call the request complete function ++ * -# If "Endpoint Disabled" complete the EP disable procedure. ++ * -# If "AHB Error Interrupt" log error ++ * -# If "Setup Phase Done" process Setup Packet (See Standard USB ++ * Command Processing) ++ */ ++static int32_t dwc_otg_pcd_handle_out_ep_intr(dwc_otg_pcd_t *pcd) ++{ ++#define CLEAR_OUT_EP_INTR(__core_if,__epnum,__intr) \ ++do { \ ++ doepint_data_t doepint = {.d32=0}; \ ++ doepint.b.__intr = 1; \ ++ dwc_write_reg32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \ ++ doepint.d32); \ ++} while (0) ++ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ uint32_t ep_intr; ++ doepint_data_t doepint = {.d32=0}; ++ dctl_data_t dctl = {.d32=0}; ++ depctl_data_t doepctl = {.d32=0}; ++ uint32_t epnum = 0; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_ep_t *dwc_ep; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__); ++ ++ /* Read in the device interrupt bits */ ++ ep_intr = dwc_otg_read_dev_all_out_ep_intr(core_if); ++ ++ while(ep_intr) { ++ if (ep_intr&0x1) { ++ /* Get EP pointer */ ++ ep = get_out_ep(pcd, epnum); ++ dwc_ep = &ep->dwc_ep; ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV, ++ "EP%d-%s: type=%d, mps=%d\n", ++ dwc_ep->num, (dwc_ep->is_in ?"IN":"OUT"), ++ dwc_ep->type, dwc_ep->maxpacket); ++#endif ++ doepint.d32 = dwc_otg_read_dev_out_ep_intr(core_if, dwc_ep); ++ ++ /* Transfer complete */ ++ if (doepint.b.xfercompl) { ++ if (epnum == 0) { ++ /* Clear the bit in DOEPINTn for this interrupt */ ++ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); ++ if(core_if->dma_desc_enable == 0 || pcd->ep0state != EP0_IDLE) ++ handle_ep0(pcd); ++#ifdef DWC_EN_ISOC ++ } else if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ if (doepint.b.pktdrpsts == 0) { ++ /* Clear the bit in DOEPINTn for this interrupt */ ++ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); ++ complete_iso_ep(ep); ++ } else { ++ doepint_data_t doepint = {.d32=0}; ++ doepint.b.xfercompl = 1; ++ doepint.b.pktdrpsts = 1; ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[epnum]->doepint, ++ doepint.d32); ++ if(handle_iso_out_pkt_dropped(core_if,dwc_ep)) { ++ complete_iso_ep(ep); ++ } ++ } ++#endif //DWC_EN_ISOC ++ } else { ++ /* Clear the bit in DOEPINTn for this interrupt */ ++ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); ++ complete_ep(ep); ++ } ++ ++ } ++ ++ /* Endpoint disable */ ++ if (doepint.b.epdisabled) { ++ /* Clear the bit in DOEPINTn for this interrupt */ ++ CLEAR_OUT_EP_INTR(core_if,epnum,epdisabled); ++ } ++ /* AHB Error */ ++ if (doepint.b.ahberr) { ++ DWC_DEBUGPL(DBG_PCD,"EP%d OUT AHB Error\n", epnum); ++ DWC_DEBUGPL(DBG_PCD,"EP DMA REG %d \n", core_if->dev_if->out_ep_regs[epnum]->doepdma); ++ CLEAR_OUT_EP_INTR(core_if,epnum,ahberr); ++ } ++ /* Setup Phase Done (contorl EPs) */ ++ if (doepint.b.setup) { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD,"EP%d SETUP Done\n", ++ epnum); ++#endif ++ CLEAR_OUT_EP_INTR(core_if,epnum,setup); ++ handle_ep0(pcd); ++ } ++ ++ /** OUT EP BNA Intr */ ++ if (doepint.b.bna) { ++ CLEAR_OUT_EP_INTR(core_if,epnum,bna); ++ if(core_if->dma_desc_enable) { ++#ifdef DWC_EN_ISOC ++ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * This checking is performed to prevent first "false" BNA ++ * handling occuring right after reconnect ++ */ ++ if(dwc_ep->next_frame != 0xffffffff) ++ dwc_otg_pcd_handle_iso_bna(ep); ++ } ++ else ++#endif //DWC_EN_ISOC ++ { ++ dctl.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dctl); ++ ++ /* If Global Continue on BNA is disabled - disable EP*/ ++ if(!dctl.b.gcontbna) { ++ doepctl.d32 = 0; ++ doepctl.b.snak = 1; ++ doepctl.b.epdis = 1; ++ dwc_modify_reg32(&dev_if->out_ep_regs[epnum]->doepctl, doepctl.d32, doepctl.d32); ++ } else { ++ start_next_request(ep); ++ } ++ } ++ } ++ } ++ if (doepint.b.stsphsercvd) { ++ CLEAR_OUT_EP_INTR(core_if,epnum,stsphsercvd); ++ if(core_if->dma_desc_enable) { ++ do_setup_in_status_phase(pcd); ++ } ++ } ++ /* Babble Interrutp */ ++ if (doepint.b.babble) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d OUT Babble\n", epnum); ++ handle_out_ep_babble_intr(pcd, epnum); ++ ++ CLEAR_OUT_EP_INTR(core_if,epnum,babble); ++ } ++ /* NAK Interrutp */ ++ if (doepint.b.nak) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d OUT NAK\n", epnum); ++ handle_out_ep_nak_intr(pcd, epnum); ++ ++ CLEAR_OUT_EP_INTR(core_if,epnum,nak); ++ } ++ /* NYET Interrutp */ ++ if (doepint.b.nyet) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d OUT NYET\n", epnum); ++ handle_out_ep_nyet_intr(pcd, epnum); ++ ++ CLEAR_OUT_EP_INTR(core_if,epnum,nyet); ++ } ++ } ++ ++ epnum++; ++ ep_intr >>=1; ++ } ++ ++ return 1; ++ ++#undef CLEAR_OUT_EP_INTR ++} ++ ++ ++/** ++ * Incomplete ISO IN Transfer Interrupt. ++ * This interrupt indicates one of the following conditions occurred ++ * while transmitting an ISOC transaction. ++ * - Corrupted IN Token for ISOC EP. ++ * - Packet not complete in FIFO. ++ * The follow actions will be taken: ++ * -# Determine the EP ++ * -# Set incomplete flag in dwc_ep structure ++ * -# Disable EP; when "Endpoint Disabled" interrupt is received ++ * Flush FIFO ++ */ ++int32_t dwc_otg_pcd_handle_incomplete_isoc_in_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintsts_data_t gintsts; ++ ++ ++#ifdef DWC_EN_ISOC ++ dwc_otg_dev_if_t *dev_if; ++ deptsiz_data_t deptsiz = { .d32 = 0}; ++ depctl_data_t depctl = { .d32 = 0}; ++ dsts_data_t dsts = { .d32 = 0}; ++ dwc_ep_t *dwc_ep; ++ int i; ++ ++ dev_if = GET_CORE_IF(pcd)->dev_if; ++ ++ for(i = 1; i <= dev_if->num_in_eps; ++i) { ++ dwc_ep = &pcd->in_ep[i].dwc_ep; ++ if(dwc_ep->active && ++ dwc_ep->type == USB_ENDPOINT_XFER_ISOC) ++ { ++ deptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->dieptsiz); ++ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); ++ ++ if(depctl.b.epdis && deptsiz.d32) { ++ set_current_pkt_info(GET_CORE_IF(pcd), dwc_ep); ++ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { ++ dwc_ep->cur_pkt = 0; ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ ++ if(dwc_ep->proc_buf_num) { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; ++ } else { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; ++ } ++ } ++ ++ dsts.d32 = dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts); ++ dwc_ep->next_frame = dsts.b.soffn; ++ ++ dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF(pcd), dwc_ep); ++ } ++ } ++ } ++ ++#else ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", ++ "IN ISOC Incomplete"); ++ ++ intr_mask.b.incomplisoin = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++#endif //DWC_EN_ISOC ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.incomplisoin = 1; ++ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * Incomplete ISO OUT Transfer Interrupt. ++ * ++ * This interrupt indicates that the core has dropped an ISO OUT ++ * packet. The following conditions can be the cause: ++ * - FIFO Full, the entire packet would not fit in the FIFO. ++ * - CRC Error ++ * - Corrupted Token ++ * The follow actions will be taken: ++ * -# Determine the EP ++ * -# Set incomplete flag in dwc_ep structure ++ * -# Read any data from the FIFO ++ * -# Disable EP. when "Endpoint Disabled" interrupt is received ++ * re-enable EP. ++ */ ++int32_t dwc_otg_pcd_handle_incomplete_isoc_out_intr(dwc_otg_pcd_t *pcd) ++{ ++ /* @todo implement ISR */ ++ gintsts_data_t gintsts; ++ ++#ifdef DWC_EN_ISOC ++ dwc_otg_dev_if_t *dev_if; ++ deptsiz_data_t deptsiz = { .d32 = 0}; ++ depctl_data_t depctl = { .d32 = 0}; ++ dsts_data_t dsts = { .d32 = 0}; ++ dwc_ep_t *dwc_ep; ++ int i; ++ ++ dev_if = GET_CORE_IF(pcd)->dev_if; ++ ++ for(i = 1; i <= dev_if->num_out_eps; ++i) { ++ dwc_ep = &pcd->in_ep[i].dwc_ep; ++ if(pcd->out_ep[i].dwc_ep.active && ++ pcd->out_ep[i].dwc_ep.type == USB_ENDPOINT_XFER_ISOC) ++ { ++ deptsiz.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doeptsiz); ++ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl); ++ ++ if(depctl.b.epdis && deptsiz.d32) { ++ set_current_pkt_info(GET_CORE_IF(pcd), &pcd->out_ep[i].dwc_ep); ++ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { ++ dwc_ep->cur_pkt = 0; ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ ++ if(dwc_ep->proc_buf_num) { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; ++ } else { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; ++ } ++ } ++ ++ dsts.d32 = dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts); ++ dwc_ep->next_frame = dsts.b.soffn; ++ ++ dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF(pcd), dwc_ep); ++ } ++ } ++ } ++#else ++ /** @todo implement ISR */ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", ++ "OUT ISOC Incomplete"); ++ ++ intr_mask.b.incomplisoout = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++#endif // DWC_EN_ISOC ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.incomplisoout = 1; ++ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This function handles the Global IN NAK Effective interrupt. ++ * ++ */ ++int32_t dwc_otg_pcd_handle_in_nak_effective(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if; ++ depctl_data_t diepctl = { .d32 = 0}; ++ depctl_data_t diepctl_rd = { .d32 = 0}; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ int i; ++ ++ DWC_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n"); ++ ++ /* Disable all active IN EPs */ ++ diepctl.b.epdis = 1; ++ diepctl.b.snak = 1; ++ ++ for (i=0; i <= dev_if->num_in_eps; i++) ++ { ++ diepctl_rd.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); ++ if (diepctl_rd.b.epena) { ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, ++ diepctl.d32); ++ } ++ } ++ /* Disable the Global IN NAK Effective Interrupt */ ++ intr_mask.b.ginnakeff = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.ginnakeff = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * OUT NAK Effective. ++ * ++ */ ++int32_t dwc_otg_pcd_handle_out_nak_effective(dwc_otg_pcd_t *pcd) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", ++ "Global IN NAK Effective\n"); ++ /* Disable the Global IN NAK Effective Interrupt */ ++ intr_mask.b.goutnakeff = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.goutnakeff = 1; ++ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++ ++/** ++ * PCD interrupt handler. ++ * ++ * The PCD handles the device interrupts. Many conditions can cause a ++ * device interrupt. When an interrupt occurs, the device interrupt ++ * service routine determines the cause of the interrupt and ++ * dispatches handling to the appropriate function. These interrupt ++ * handling functions are described below. ++ * ++ * All interrupt registers are processed from LSB to MSB. ++ * ++ */ ++int32_t dwc_otg_pcd_handle_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++#ifdef VERBOSE ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++#endif ++ gintsts_data_t gintr_status; ++ int32_t retval = 0; ++ ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_ANY, "%s() gintsts=%08x gintmsk=%08x\n", ++ __func__, ++ dwc_read_reg32(&global_regs->gintsts), ++ dwc_read_reg32(&global_regs->gintmsk)); ++#endif ++ ++ if (dwc_otg_is_device_mode(core_if)) { ++ SPIN_LOCK(&pcd->lock); ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%08x gintmsk=%08x\n", ++ __func__, ++ dwc_read_reg32(&global_regs->gintsts), ++ dwc_read_reg32(&global_regs->gintmsk)); ++#endif ++ ++ gintr_status.d32 = dwc_otg_read_core_intr(core_if); ++/* ++ if (!gintr_status.d32) { ++ SPIN_UNLOCK(&pcd->lock); ++ return 0; ++ } ++*/ ++ DWC_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n", ++ __func__, gintr_status.d32); ++ ++ if (gintr_status.b.sofintr) { ++ retval |= dwc_otg_pcd_handle_sof_intr(pcd); ++ } ++ if (gintr_status.b.rxstsqlvl) { ++ retval |= dwc_otg_pcd_handle_rx_status_q_level_intr(pcd); ++ } ++ if (gintr_status.b.nptxfempty) { ++ retval |= dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd); ++ } ++ if (gintr_status.b.ginnakeff) { ++ retval |= dwc_otg_pcd_handle_in_nak_effective(pcd); ++ } ++ if (gintr_status.b.goutnakeff) { ++ retval |= dwc_otg_pcd_handle_out_nak_effective(pcd); ++ } ++ if (gintr_status.b.i2cintr) { ++ retval |= dwc_otg_pcd_handle_i2c_intr(pcd); ++ } ++ if (gintr_status.b.erlysuspend) { ++ retval |= dwc_otg_pcd_handle_early_suspend_intr(pcd); ++ } ++ if (gintr_status.b.usbreset) { ++ retval |= dwc_otg_pcd_handle_usb_reset_intr(pcd); ++ } ++ if (gintr_status.b.enumdone) { ++ retval |= dwc_otg_pcd_handle_enum_done_intr(pcd); ++ } ++ if (gintr_status.b.isooutdrop) { ++ retval |= dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(pcd); ++ } ++ if (gintr_status.b.eopframe) { ++ retval |= dwc_otg_pcd_handle_end_periodic_frame_intr(pcd); ++ } ++ if (gintr_status.b.epmismatch) { ++ retval |= dwc_otg_pcd_handle_ep_mismatch_intr(core_if); ++ } ++ if (gintr_status.b.inepint) { ++ if(!core_if->multiproc_int_enable) { ++ retval |= dwc_otg_pcd_handle_in_ep_intr(pcd); ++ } ++ } ++ if (gintr_status.b.outepintr) { ++ if(!core_if->multiproc_int_enable) { ++ retval |= dwc_otg_pcd_handle_out_ep_intr(pcd); ++ } ++ } ++ if (gintr_status.b.incomplisoin) { ++ retval |= dwc_otg_pcd_handle_incomplete_isoc_in_intr(pcd); ++ } ++ if (gintr_status.b.incomplisoout) { ++ retval |= dwc_otg_pcd_handle_incomplete_isoc_out_intr(pcd); ++ } ++ ++ /* In MPI mode De vice Endpoints intterrupts are asserted ++ * without setting outepintr and inepint bits set, so these ++ * Interrupt handlers are called without checking these bit-fields ++ */ ++ if(core_if->multiproc_int_enable) { ++ retval |= dwc_otg_pcd_handle_in_ep_intr(pcd); ++ retval |= dwc_otg_pcd_handle_out_ep_intr(pcd); ++ } ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%0x\n", __func__, ++ dwc_read_reg32(&global_regs->gintsts)); ++#endif ++ SPIN_UNLOCK(&pcd->lock); ++ } ++ S3C2410X_CLEAR_EINTPEND(); ++ ++ return retval; ++} ++ ++#endif /* DWC_HOST_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc/otg_plat.h +@@ -0,0 +1,266 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/platform/dwc_otg_plat.h $ ++ * $Revision: #23 $ ++ * $Date: 2008/07/15 $ ++ * $Change: 1064915 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#if !defined(__DWC_OTG_PLAT_H__) ++#define __DWC_OTG_PLAT_H__ ++ ++#include ++#include ++#include ++#include ++#include ++ ++/* Changed all readl and writel to __raw_readl, __raw_writel */ ++ ++/** ++ * @file ++ * ++ * This file contains the Platform Specific constants, interfaces ++ * (functions and macros) for Linux. ++ * ++ */ ++//#if !defined(__LINUX_ARM_ARCH__) ++//#error "The contents of this file is Linux specific!!!" ++//#endif ++ ++/** ++ * Reads the content of a register. ++ * ++ * @param reg address of register to read. ++ * @return contents of the register. ++ * ++ ++ * Usage:
++ * uint32_t dev_ctl = dwc_read_reg32(&dev_regs->dctl); ++ */ ++static __inline__ uint32_t dwc_read_reg32( volatile uint32_t *reg) ++{ ++ return __raw_readl(reg); ++ // return readl(reg); ++}; ++ ++/** ++ * Writes a register with a 32 bit value. ++ * ++ * @param reg address of register to read. ++ * @param value to write to _reg. ++ * ++ * Usage:
++ * dwc_write_reg32(&dev_regs->dctl, 0); ++ */ ++static __inline__ void dwc_write_reg32( volatile uint32_t *reg, const uint32_t value) ++{ ++ // writel( value, reg ); ++ __raw_writel(value, reg); ++ ++}; ++ ++/** ++ * This function modifies bit values in a register. Using the ++ * algorithm: (reg_contents & ~clear_mask) | set_mask. ++ * ++ * @param reg address of register to read. ++ * @param clear_mask bit mask to be cleared. ++ * @param set_mask bit mask to be set. ++ * ++ * Usage:
++ * // Clear the SOF Interrupt Mask bit and
++ * // set the OTG Interrupt mask bit, leaving all others as they were. ++ * dwc_modify_reg32(&dev_regs->gintmsk, DWC_SOF_INT, DWC_OTG_INT);
++ */ ++static __inline__ ++ void dwc_modify_reg32( volatile uint32_t *reg, const uint32_t clear_mask, const uint32_t set_mask) ++{ ++ // writel( (readl(reg) & ~clear_mask) | set_mask, reg ); ++ __raw_writel( (__raw_readl(reg) & ~clear_mask) | set_mask, reg ); ++}; ++ ++ ++/** ++ * Wrapper for the OS micro-second delay function. ++ * @param[in] usecs Microseconds of delay ++ */ ++static __inline__ void UDELAY( const uint32_t usecs ) ++{ ++ udelay( usecs ); ++} ++ ++/** ++ * Wrapper for the OS milli-second delay function. ++ * @param[in] msecs milliseconds of delay ++ */ ++static __inline__ void MDELAY( const uint32_t msecs ) ++{ ++ mdelay( msecs ); ++} ++ ++/** ++ * Wrapper for the Linux spin_lock. On the ARM (Integrator) ++ * spin_lock() is a nop. ++ * ++ * @param lock Pointer to the spinlock. ++ */ ++static __inline__ void SPIN_LOCK( spinlock_t *lock ) ++{ ++ spin_lock(lock); ++} ++ ++/** ++ * Wrapper for the Linux spin_unlock. On the ARM (Integrator) ++ * spin_lock() is a nop. ++ * ++ * @param lock Pointer to the spinlock. ++ */ ++static __inline__ void SPIN_UNLOCK( spinlock_t *lock ) ++{ ++ spin_unlock(lock); ++} ++ ++/** ++ * Wrapper (macro) for the Linux spin_lock_irqsave. On the ARM ++ * (Integrator) spin_lock() is a nop. ++ * ++ * @param l Pointer to the spinlock. ++ * @param f unsigned long for irq flags storage. ++ */ ++#define SPIN_LOCK_IRQSAVE( l, f ) spin_lock_irqsave(l,f); ++ ++/** ++ * Wrapper (macro) for the Linux spin_unlock_irqrestore. On the ARM ++ * (Integrator) spin_lock() is a nop. ++ * ++ * @param l Pointer to the spinlock. ++ * @param f unsigned long for irq flags storage. ++ */ ++#define SPIN_UNLOCK_IRQRESTORE( l,f ) spin_unlock_irqrestore(l,f); ++ ++/* ++ * Debugging support vanishes in non-debug builds. ++ */ ++ ++ ++/** ++ * The Debug Level bit-mask variable. ++ */ ++extern uint32_t g_dbg_lvl; ++/** ++ * Set the Debug Level variable. ++ */ ++static inline uint32_t SET_DEBUG_LEVEL( const uint32_t new ) ++{ ++ uint32_t old = g_dbg_lvl; ++ g_dbg_lvl = new; ++ return old; ++} ++ ++/** When debug level has the DBG_CIL bit set, display CIL Debug messages. */ ++#define DBG_CIL (0x2) ++/** When debug level has the DBG_CILV bit set, display CIL Verbose debug ++ * messages */ ++#define DBG_CILV (0x20) ++/** When debug level has the DBG_PCD bit set, display PCD (Device) debug ++ * messages */ ++#define DBG_PCD (0x4) ++/** When debug level has the DBG_PCDV set, display PCD (Device) Verbose debug ++ * messages */ ++#define DBG_PCDV (0x40) ++/** When debug level has the DBG_HCD bit set, display Host debug messages */ ++#define DBG_HCD (0x8) ++/** When debug level has the DBG_HCDV bit set, display Verbose Host debug ++ * messages */ ++#define DBG_HCDV (0x80) ++/** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host ++ * mode. */ ++#define DBG_HCD_URB (0x800) ++ ++/** When debug level has any bit set, display debug messages */ ++#define DBG_ANY (0xFF) ++ ++/** All debug messages off */ ++#define DBG_OFF 0 ++ ++/** Prefix string for DWC_DEBUG print macros. */ ++#define USB_DWC "DWC_otg: " ++ ++/** ++ * Print a debug message when the Global debug level variable contains ++ * the bit defined in lvl. ++ * ++ * @param[in] lvl - Debug level, use one of the DBG_ constants above. ++ * @param[in] x - like printf ++ * ++ * Example:

++ * ++ * DWC_DEBUGPL( DBG_ANY, "%s(%p)\n", __func__, _reg_base_addr); ++ * ++ *
++ * results in:
++ * ++ * usb-DWC_otg: dwc_otg_cil_init(ca867000) ++ * ++ */ ++#ifdef DEBUG ++ ++# define DWC_DEBUGPL(lvl, x...) do{ if ((lvl)&g_dbg_lvl)printk( KERN_DEBUG USB_DWC x ); }while(0) ++# define DWC_DEBUGP(x...) DWC_DEBUGPL(DBG_ANY, x ) ++ ++# define CHK_DEBUG_LEVEL(level) ((level) & g_dbg_lvl) ++ ++#else ++ ++# define DWC_DEBUGPL(lvl, x...) do{}while(0) ++# define DWC_DEBUGP(x...) ++ ++# define CHK_DEBUG_LEVEL(level) (0) ++ ++#endif /*DEBUG*/ ++ ++/** ++ * Print an Error message. ++ */ ++#define DWC_ERROR(x...) printk( KERN_ERR USB_DWC x ) ++/** ++ * Print a Warning message. ++ */ ++#define DWC_WARN(x...) printk( KERN_WARNING USB_DWC x ) ++/** ++ * Print a notice (normal but significant message). ++ */ ++#define DWC_NOTICE(x...) printk( KERN_NOTICE USB_DWC x ) ++/** ++ * Basic message printing. ++ */ ++#define DWC_PRINT(x...) printk( KERN_INFO USB_DWC x ) ++ ++#endif ++ +--- /dev/null ++++ b/drivers/usb/dwc/otg_regs.h +@@ -0,0 +1,2059 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_regs.h $ ++ * $Revision: #72 $ ++ * $Date: 2008/09/19 $ ++ * $Change: 1099526 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#ifndef __DWC_OTG_REGS_H__ ++#define __DWC_OTG_REGS_H__ ++ ++/** ++ * @file ++ * ++ * This file contains the data structures for accessing the DWC_otg core registers. ++ * ++ * The application interfaces with the HS OTG core by reading from and ++ * writing to the Control and Status Register (CSR) space through the ++ * AHB Slave interface. These registers are 32 bits wide, and the ++ * addresses are 32-bit-block aligned. ++ * CSRs are classified as follows: ++ * - Core Global Registers ++ * - Device Mode Registers ++ * - Device Global Registers ++ * - Device Endpoint Specific Registers ++ * - Host Mode Registers ++ * - Host Global Registers ++ * - Host Port CSRs ++ * - Host Channel Specific Registers ++ * ++ * Only the Core Global registers can be accessed in both Device and ++ * Host modes. When the HS OTG core is operating in one mode, either ++ * Device or Host, the application must not access registers from the ++ * other mode. When the core switches from one mode to another, the ++ * registers in the new mode of operation must be reprogrammed as they ++ * would be after a power-on reset. ++ */ ++ ++/** Maximum number of Periodic FIFOs */ ++#define MAX_PERIO_FIFOS 15 ++/** Maximum number of Transmit FIFOs */ ++#define MAX_TX_FIFOS 15 ++ ++/** Maximum number of Endpoints/HostChannels */ ++#define MAX_EPS_CHANNELS 16 ++ ++/****************************************************************************/ ++/** DWC_otg Core registers . ++ * The dwc_otg_core_global_regs structure defines the size ++ * and relative field offsets for the Core Global registers. ++ */ ++typedef struct dwc_otg_core_global_regs ++{ ++ /** OTG Control and Status Register. Offset: 000h */ ++ volatile uint32_t gotgctl; ++ /** OTG Interrupt Register. Offset: 004h */ ++ volatile uint32_t gotgint; ++ /**Core AHB Configuration Register. Offset: 008h */ ++ volatile uint32_t gahbcfg; ++ ++#define DWC_GLBINTRMASK 0x0001 ++#define DWC_DMAENABLE 0x0020 ++#define DWC_NPTXEMPTYLVL_EMPTY 0x0080 ++#define DWC_NPTXEMPTYLVL_HALFEMPTY 0x0000 ++#define DWC_PTXEMPTYLVL_EMPTY 0x0100 ++#define DWC_PTXEMPTYLVL_HALFEMPTY 0x0000 ++ ++ /**Core USB Configuration Register. Offset: 00Ch */ ++ volatile uint32_t gusbcfg; ++ /**Core Reset Register. Offset: 010h */ ++ volatile uint32_t grstctl; ++ /**Core Interrupt Register. Offset: 014h */ ++ volatile uint32_t gintsts; ++ /**Core Interrupt Mask Register. Offset: 018h */ ++ volatile uint32_t gintmsk; ++ /**Receive Status Queue Read Register (Read Only). Offset: 01Ch */ ++ volatile uint32_t grxstsr; ++ /**Receive Status Queue Read & POP Register (Read Only). Offset: 020h*/ ++ volatile uint32_t grxstsp; ++ /**Receive FIFO Size Register. Offset: 024h */ ++ volatile uint32_t grxfsiz; ++ /**Non Periodic Transmit FIFO Size Register. Offset: 028h */ ++ volatile uint32_t gnptxfsiz; ++ /**Non Periodic Transmit FIFO/Queue Status Register (Read ++ * Only). Offset: 02Ch */ ++ volatile uint32_t gnptxsts; ++ /**I2C Access Register. Offset: 030h */ ++ volatile uint32_t gi2cctl; ++ /**PHY Vendor Control Register. Offset: 034h */ ++ volatile uint32_t gpvndctl; ++ /**General Purpose Input/Output Register. Offset: 038h */ ++ volatile uint32_t ggpio; ++ /**User ID Register. Offset: 03Ch */ ++ volatile uint32_t guid; ++ /**Synopsys ID Register (Read Only). Offset: 040h */ ++ volatile uint32_t gsnpsid; ++ /**User HW Config1 Register (Read Only). Offset: 044h */ ++ volatile uint32_t ghwcfg1; ++ /**User HW Config2 Register (Read Only). Offset: 048h */ ++ volatile uint32_t ghwcfg2; ++#define DWC_SLAVE_ONLY_ARCH 0 ++#define DWC_EXT_DMA_ARCH 1 ++#define DWC_INT_DMA_ARCH 2 ++ ++#define DWC_MODE_HNP_SRP_CAPABLE 0 ++#define DWC_MODE_SRP_ONLY_CAPABLE 1 ++#define DWC_MODE_NO_HNP_SRP_CAPABLE 2 ++#define DWC_MODE_SRP_CAPABLE_DEVICE 3 ++#define DWC_MODE_NO_SRP_CAPABLE_DEVICE 4 ++#define DWC_MODE_SRP_CAPABLE_HOST 5 ++#define DWC_MODE_NO_SRP_CAPABLE_HOST 6 ++ ++ /**User HW Config3 Register (Read Only). Offset: 04Ch */ ++ volatile uint32_t ghwcfg3; ++ /**User HW Config4 Register (Read Only). Offset: 050h*/ ++ volatile uint32_t ghwcfg4; ++ /** Reserved Offset: 054h-0FFh */ ++ volatile uint32_t reserved[43]; ++ /** Host Periodic Transmit FIFO Size Register. Offset: 100h */ ++ volatile uint32_t hptxfsiz; ++ /** Device Periodic Transmit FIFO#n Register if dedicated fifos are disabled, ++ otherwise Device Transmit FIFO#n Register. ++ * Offset: 104h + (FIFO_Number-1)*04h, 1 <= FIFO Number <= 15 (1<=n<=15). */ ++ volatile uint32_t dptxfsiz_dieptxf[15]; ++} dwc_otg_core_global_regs_t; ++ ++/** ++ * This union represents the bit fields of the Core OTG Control ++ * and Status Register (GOTGCTL). Set the bits using the bit ++ * fields then write the d32 value to the register. ++ */ ++typedef union gotgctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned sesreqscs : 1; ++ unsigned sesreq : 1; ++ unsigned reserved2_7 : 6; ++ unsigned hstnegscs : 1; ++ unsigned hnpreq : 1; ++ unsigned hstsethnpen : 1; ++ unsigned devhnpen : 1; ++ unsigned reserved12_15 : 4; ++ unsigned conidsts : 1; ++ unsigned reserved17 : 1; ++ unsigned asesvld : 1; ++ unsigned bsesvld : 1; ++ unsigned currmod : 1; ++ unsigned reserved21_31 : 11; ++ } b; ++} gotgctl_data_t; ++ ++/** ++ * This union represents the bit fields of the Core OTG Interrupt Register ++ * (GOTGINT). Set/clear the bits using the bit fields then write the d32 ++ * value to the register. ++ */ ++typedef union gotgint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Current Mode */ ++ unsigned reserved0_1 : 2; ++ ++ /** Session End Detected */ ++ unsigned sesenddet : 1; ++ ++ unsigned reserved3_7 : 5; ++ ++ /** Session Request Success Status Change */ ++ unsigned sesreqsucstschng : 1; ++ /** Host Negotiation Success Status Change */ ++ unsigned hstnegsucstschng : 1; ++ ++ unsigned reserver10_16 : 7; ++ ++ /** Host Negotiation Detected */ ++ unsigned hstnegdet : 1; ++ /** A-Device Timeout Change */ ++ unsigned adevtoutchng : 1; ++ /** Debounce Done */ ++ unsigned debdone : 1; ++ ++ unsigned reserved31_20 : 12; ++ ++ } b; ++} gotgint_data_t; ++ ++ ++/** ++ * This union represents the bit fields of the Core AHB Configuration ++ * Register (GAHBCFG). Set/clear the bits using the bit fields then ++ * write the d32 value to the register. ++ */ ++typedef union gahbcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned glblintrmsk : 1; ++#define DWC_GAHBCFG_GLBINT_ENABLE 1 ++ ++ unsigned hburstlen : 4; ++#define DWC_GAHBCFG_INT_DMA_BURST_SINGLE 0 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR 1 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR4 3 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR8 5 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR16 7 ++ ++ unsigned dmaenable : 1; ++#define DWC_GAHBCFG_DMAENABLE 1 ++ unsigned reserved : 1; ++ unsigned nptxfemplvl_txfemplvl : 1; ++ unsigned ptxfemplvl : 1; ++#define DWC_GAHBCFG_TXFEMPTYLVL_EMPTY 1 ++#define DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY 0 ++ unsigned reserved9_31 : 23; ++ } b; ++} gahbcfg_data_t; ++ ++/** ++ * This union represents the bit fields of the Core USB Configuration ++ * Register (GUSBCFG). Set the bits using the bit fields then write ++ * the d32 value to the register. ++ */ ++typedef union gusbcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned toutcal : 3; ++ unsigned phyif : 1; ++ unsigned ulpi_utmi_sel : 1; ++ unsigned fsintf : 1; ++ unsigned physel : 1; ++ unsigned ddrsel : 1; ++ unsigned srpcap : 1; ++ unsigned hnpcap : 1; ++ unsigned usbtrdtim : 4; ++ unsigned nptxfrwnden : 1; ++ unsigned phylpwrclksel : 1; ++ unsigned otgutmifssel : 1; ++ unsigned ulpi_fsls : 1; ++ unsigned ulpi_auto_res : 1; ++ unsigned ulpi_clk_sus_m : 1; ++ unsigned ulpi_ext_vbus_drv : 1; ++ unsigned ulpi_int_vbus_indicator : 1; ++ unsigned term_sel_dl_pulse : 1; ++ unsigned reserved23_27 : 5; ++ unsigned tx_end_delay : 1; ++ unsigned reserved29_31 : 3; ++ } b; ++} gusbcfg_data_t; ++ ++/** ++ * This union represents the bit fields of the Core Reset Register ++ * (GRSTCTL). Set/clear the bits using the bit fields then write the ++ * d32 value to the register. ++ */ ++typedef union grstctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Core Soft Reset (CSftRst) (Device and Host) ++ * ++ * The application can flush the control logic in the ++ * entire core using this bit. This bit resets the ++ * pipelines in the AHB Clock domain as well as the ++ * PHY Clock domain. ++ * ++ * The state machines are reset to an IDLE state, the ++ * control bits in the CSRs are cleared, all the ++ * transmit FIFOs and the receive FIFO are flushed. ++ * ++ * The status mask bits that control the generation of ++ * the interrupt, are cleared, to clear the ++ * interrupt. The interrupt status bits are not ++ * cleared, so the application can get the status of ++ * any events that occurred in the core after it has ++ * set this bit. ++ * ++ * Any transactions on the AHB are terminated as soon ++ * as possible following the protocol. Any ++ * transactions on the USB are terminated immediately. ++ * ++ * The configuration settings in the CSRs are ++ * unchanged, so the software doesn't have to ++ * reprogram these registers (Device ++ * Configuration/Host Configuration/Core System ++ * Configuration/Core PHY Configuration). ++ * ++ * The application can write to this bit, any time it ++ * wants to reset the core. This is a self clearing ++ * bit and the core clears this bit after all the ++ * necessary logic is reset in the core, which may ++ * take several clocks, depending on the current state ++ * of the core. ++ */ ++ unsigned csftrst : 1; ++ /** Hclk Soft Reset ++ * ++ * The application uses this bit to reset the control logic in ++ * the AHB clock domain. Only AHB clock domain pipelines are ++ * reset. ++ */ ++ unsigned hsftrst : 1; ++ /** Host Frame Counter Reset (Host Only)
++ * ++ * The application can reset the (micro)frame number ++ * counter inside the core, using this bit. When the ++ * (micro)frame counter is reset, the subsequent SOF ++ * sent out by the core, will have a (micro)frame ++ * number of 0. ++ */ ++ unsigned hstfrm : 1; ++ /** In Token Sequence Learning Queue Flush ++ * (INTknQFlsh) (Device Only) ++ */ ++ unsigned intknqflsh : 1; ++ /** RxFIFO Flush (RxFFlsh) (Device and Host) ++ * ++ * The application can flush the entire Receive FIFO ++ * using this bit.

The application must first ++ * ensure that the core is not in the middle of a ++ * transaction.

The application should write into ++ * this bit, only after making sure that neither the ++ * DMA engine is reading from the RxFIFO nor the MAC ++ * is writing the data in to the FIFO.

The ++ * application should wait until the bit is cleared ++ * before performing any other operations. This bit ++ * will takes 8 clocks (slowest of PHY or AHB clock) ++ * to clear. ++ */ ++ unsigned rxfflsh : 1; ++ /** TxFIFO Flush (TxFFlsh) (Device and Host). ++ * ++ * This bit is used to selectively flush a single or ++ * all transmit FIFOs. The application must first ++ * ensure that the core is not in the middle of a ++ * transaction.

The application should write into ++ * this bit, only after making sure that neither the ++ * DMA engine is writing into the TxFIFO nor the MAC ++ * is reading the data out of the FIFO.

The ++ * application should wait until the core clears this ++ * bit, before performing any operations. This bit ++ * will takes 8 clocks (slowest of PHY or AHB clock) ++ * to clear. ++ */ ++ unsigned txfflsh : 1; ++ /** TxFIFO Number (TxFNum) (Device and Host). ++ * ++ * This is the FIFO number which needs to be flushed, ++ * using the TxFIFO Flush bit. This field should not ++ * be changed until the TxFIFO Flush bit is cleared by ++ * the core. ++ * - 0x0 : Non Periodic TxFIFO Flush ++ * - 0x1 : Periodic TxFIFO #1 Flush in device mode ++ * or Periodic TxFIFO in host mode ++ * - 0x2 : Periodic TxFIFO #2 Flush in device mode. ++ * - ... ++ * - 0xF : Periodic TxFIFO #15 Flush in device mode ++ * - 0x10: Flush all the Transmit NonPeriodic and ++ * Transmit Periodic FIFOs in the core ++ */ ++ unsigned txfnum : 5; ++ /** Reserved */ ++ unsigned reserved11_29 : 19; ++ /** DMA Request Signal. Indicated DMA request is in ++ * probress. Used for debug purpose. */ ++ unsigned dmareq : 1; ++ /** AHB Master Idle. Indicates the AHB Master State ++ * Machine is in IDLE condition. */ ++ unsigned ahbidle : 1; ++ } b; ++} grstctl_t; ++ ++ ++/** ++ * This union represents the bit fields of the Core Interrupt Mask ++ * Register (GINTMSK). Set/clear the bits using the bit fields then ++ * write the d32 value to the register. ++ */ ++typedef union gintmsk_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned reserved0 : 1; ++ unsigned modemismatch : 1; ++ unsigned otgintr : 1; ++ unsigned sofintr : 1; ++ unsigned rxstsqlvl : 1; ++ unsigned nptxfempty : 1; ++ unsigned ginnakeff : 1; ++ unsigned goutnakeff : 1; ++ unsigned reserved8 : 1; ++ unsigned i2cintr : 1; ++ unsigned erlysuspend : 1; ++ unsigned usbsuspend : 1; ++ unsigned usbreset : 1; ++ unsigned enumdone : 1; ++ unsigned isooutdrop : 1; ++ unsigned eopframe : 1; ++ unsigned reserved16 : 1; ++ unsigned epmismatch : 1; ++ unsigned inepintr : 1; ++ unsigned outepintr : 1; ++ unsigned incomplisoin : 1; ++ unsigned incomplisoout : 1; ++ unsigned reserved22_23 : 2; ++ unsigned portintr : 1; ++ unsigned hcintr : 1; ++ unsigned ptxfempty : 1; ++ unsigned reserved27 : 1; ++ unsigned conidstschng : 1; ++ unsigned disconnect : 1; ++ unsigned sessreqintr : 1; ++ unsigned wkupintr : 1; ++ } b; ++} gintmsk_data_t; ++/** ++ * This union represents the bit fields of the Core Interrupt Register ++ * (GINTSTS). Set/clear the bits using the bit fields then write the ++ * d32 value to the register. ++ */ ++typedef union gintsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++#define DWC_SOF_INTR_MASK 0x0008 ++ /** register bits */ ++ struct ++ { ++#define DWC_HOST_MODE 1 ++ unsigned curmode : 1; ++ unsigned modemismatch : 1; ++ unsigned otgintr : 1; ++ unsigned sofintr : 1; ++ unsigned rxstsqlvl : 1; ++ unsigned nptxfempty : 1; ++ unsigned ginnakeff : 1; ++ unsigned goutnakeff : 1; ++ unsigned reserved8 : 1; ++ unsigned i2cintr : 1; ++ unsigned erlysuspend : 1; ++ unsigned usbsuspend : 1; ++ unsigned usbreset : 1; ++ unsigned enumdone : 1; ++ unsigned isooutdrop : 1; ++ unsigned eopframe : 1; ++ unsigned intokenrx : 1; ++ unsigned epmismatch : 1; ++ unsigned inepint: 1; ++ unsigned outepintr : 1; ++ unsigned incomplisoin : 1; ++ unsigned incomplisoout : 1; ++ unsigned reserved22_23 : 2; ++ unsigned portintr : 1; ++ unsigned hcintr : 1; ++ unsigned ptxfempty : 1; ++ unsigned reserved27 : 1; ++ unsigned conidstschng : 1; ++ unsigned disconnect : 1; ++ unsigned sessreqintr : 1; ++ unsigned wkupintr : 1; ++ } b; ++} gintsts_data_t; ++ ++ ++/** ++ * This union represents the bit fields in the Device Receive Status Read and ++ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the d32 ++ * element then read out the bits using the bit elements. ++ */ ++typedef union device_grxsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned epnum : 4; ++ unsigned bcnt : 11; ++ unsigned dpid : 2; ++#define DWC_STS_DATA_UPDT 0x2 // OUT Data Packet ++#define DWC_STS_XFER_COMP 0x3 // OUT Data Transfer Complete ++ ++#define DWC_DSTS_GOUT_NAK 0x1 // Global OUT NAK ++#define DWC_DSTS_SETUP_COMP 0x4 // Setup Phase Complete ++#define DWC_DSTS_SETUP_UPDT 0x6 // SETUP Packet ++ unsigned pktsts : 4; ++ unsigned fn : 4; ++ unsigned reserved : 7; ++ } b; ++} device_grxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Receive Status Read and ++ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the d32 ++ * element then read out the bits using the bit elements. ++ */ ++typedef union host_grxsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned chnum : 4; ++ unsigned bcnt : 11; ++ unsigned dpid : 2; ++ unsigned pktsts : 4; ++#define DWC_GRXSTS_PKTSTS_IN 0x2 ++#define DWC_GRXSTS_PKTSTS_IN_XFER_COMP 0x3 ++#define DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR 0x5 ++#define DWC_GRXSTS_PKTSTS_CH_HALTED 0x7 ++ unsigned reserved : 11; ++ } b; ++} host_grxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the FIFO Size Registers (HPTXFSIZ, ++ * GNPTXFSIZ, DPTXFSIZn, DIEPTXFn). Read the register into the d32 element then ++ * read out the bits using the bit elements. ++ */ ++typedef union fifosize_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned startaddr : 16; ++ unsigned depth : 16; ++ } b; ++} fifosize_data_t; ++ ++/** ++ * This union represents the bit fields in the Non-Periodic Transmit ++ * FIFO/Queue Status Register (GNPTXSTS). Read the register into the ++ * d32 element then read out the bits using the bit ++ * elements. ++ */ ++typedef union gnptxsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned nptxfspcavail : 16; ++ unsigned nptxqspcavail : 8; ++ /** Top of the Non-Periodic Transmit Request Queue ++ * - bit 24 - Terminate (Last entry for the selected ++ * channel/EP) ++ * - bits 26:25 - Token Type ++ * - 2'b00 - IN/OUT ++ * - 2'b01 - Zero Length OUT ++ * - 2'b10 - PING/Complete Split ++ * - 2'b11 - Channel Halt ++ * - bits 30:27 - Channel/EP Number ++ */ ++ unsigned nptxqtop_terminate : 1; ++ unsigned nptxqtop_token : 2; ++ unsigned nptxqtop_chnep : 4; ++ unsigned reserved : 1; ++ } b; ++} gnptxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the Transmit ++ * FIFO Status Register (DTXFSTS). Read the register into the ++ * d32 element then read out the bits using the bit ++ * elements. ++ */ ++typedef union dtxfsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned txfspcavail : 16; ++ unsigned reserved : 16; ++ } b; ++} dtxfsts_data_t; ++ ++/** ++ * This union represents the bit fields in the I2C Control Register ++ * (I2CCTL). Read the register into the d32 element then read out the ++ * bits using the bit elements. ++ */ ++typedef union gi2cctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned rwdata : 8; ++ unsigned regaddr : 8; ++ unsigned addr : 7; ++ unsigned i2cen : 1; ++ unsigned ack : 1; ++ unsigned i2csuspctl : 1; ++ unsigned i2cdevaddr : 2; ++ unsigned reserved : 2; ++ unsigned rw : 1; ++ unsigned bsydne : 1; ++ } b; ++} gi2cctl_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config1 ++ * Register. Read the register into the d32 element then read ++ * out the bits using the bit elements. ++ */ ++typedef union hwcfg1_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned ep_dir0 : 2; ++ unsigned ep_dir1 : 2; ++ unsigned ep_dir2 : 2; ++ unsigned ep_dir3 : 2; ++ unsigned ep_dir4 : 2; ++ unsigned ep_dir5 : 2; ++ unsigned ep_dir6 : 2; ++ unsigned ep_dir7 : 2; ++ unsigned ep_dir8 : 2; ++ unsigned ep_dir9 : 2; ++ unsigned ep_dir10 : 2; ++ unsigned ep_dir11 : 2; ++ unsigned ep_dir12 : 2; ++ unsigned ep_dir13 : 2; ++ unsigned ep_dir14 : 2; ++ unsigned ep_dir15 : 2; ++ } b; ++} hwcfg1_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config2 ++ * Register. Read the register into the d32 element then read ++ * out the bits using the bit elements. ++ */ ++typedef union hwcfg2_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /* GHWCFG2 */ ++ unsigned op_mode : 3; ++#define DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG 0 ++#define DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG 1 ++#define DWC_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG 2 ++#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3 ++#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4 ++#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST 5 ++#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6 ++ ++ unsigned architecture : 2; ++ unsigned point2point : 1; ++ unsigned hs_phy_type : 2; ++#define DWC_HWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0 ++#define DWC_HWCFG2_HS_PHY_TYPE_UTMI 1 ++#define DWC_HWCFG2_HS_PHY_TYPE_ULPI 2 ++#define DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI 3 ++ ++ unsigned fs_phy_type : 2; ++ unsigned num_dev_ep : 4; ++ unsigned num_host_chan : 4; ++ unsigned perio_ep_supported : 1; ++ unsigned dynamic_fifo : 1; ++ unsigned multi_proc_int : 1; ++ unsigned reserved21 : 1; ++ unsigned nonperio_tx_q_depth : 2; ++ unsigned host_perio_tx_q_depth : 2; ++ unsigned dev_token_q_depth : 5; ++ unsigned reserved31 : 1; ++ } b; ++} hwcfg2_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config3 ++ * Register. Read the register into the d32 element then read ++ * out the bits using the bit elements. ++ */ ++typedef union hwcfg3_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /* GHWCFG3 */ ++ unsigned xfer_size_cntr_width : 4; ++ unsigned packet_size_cntr_width : 3; ++ unsigned otg_func : 1; ++ unsigned i2c : 1; ++ unsigned vendor_ctrl_if : 1; ++ unsigned optional_features : 1; ++ unsigned synch_reset_type : 1; ++ unsigned ahb_phy_clock_synch : 1; ++ unsigned reserved15_13 : 3; ++ unsigned dfifo_depth : 16; ++ } b; ++} hwcfg3_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config4 ++ * Register. Read the register into the d32 element then read ++ * out the bits using the bit elements. ++ */ ++typedef union hwcfg4_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned num_dev_perio_in_ep : 4; ++ unsigned power_optimiz : 1; ++ unsigned min_ahb_freq : 9; ++ unsigned utmi_phy_data_width : 2; ++ unsigned num_dev_mode_ctrl_ep : 4; ++ unsigned iddig_filt_en : 1; ++ unsigned vbus_valid_filt_en : 1; ++ unsigned a_valid_filt_en : 1; ++ unsigned b_valid_filt_en : 1; ++ unsigned session_end_filt_en : 1; ++ unsigned ded_fifo_en : 1; ++ unsigned num_in_eps : 4; ++ unsigned desc_dma : 1; ++ unsigned desc_dma_dyn : 1; ++ } b; ++} hwcfg4_data_t; ++ ++//////////////////////////////////////////// ++// Device Registers ++/** ++ * Device Global Registers. Offsets 800h-BFFh ++ * ++ * The following structures define the size and relative field offsets ++ * for the Device Mode Registers. ++ * ++ * These registers are visible only in Device mode and must not be ++ * accessed in Host mode, as the results are unknown. ++ */ ++typedef struct dwc_otg_dev_global_regs ++{ ++ /** Device Configuration Register. Offset 800h */ ++ volatile uint32_t dcfg; ++ /** Device Control Register. Offset: 804h */ ++ volatile uint32_t dctl; ++ /** Device Status Register (Read Only). Offset: 808h */ ++ volatile uint32_t dsts; ++ /** Reserved. Offset: 80Ch */ ++ uint32_t unused; ++ /** Device IN Endpoint Common Interrupt Mask ++ * Register. Offset: 810h */ ++ volatile uint32_t diepmsk; ++ /** Device OUT Endpoint Common Interrupt Mask ++ * Register. Offset: 814h */ ++ volatile uint32_t doepmsk; ++ /** Device All Endpoints Interrupt Register. Offset: 818h */ ++ volatile uint32_t daint; ++ /** Device All Endpoints Interrupt Mask Register. Offset: ++ * 81Ch */ ++ volatile uint32_t daintmsk; ++ /** Device IN Token Queue Read Register-1 (Read Only). ++ * Offset: 820h */ ++ volatile uint32_t dtknqr1; ++ /** Device IN Token Queue Read Register-2 (Read Only). ++ * Offset: 824h */ ++ volatile uint32_t dtknqr2; ++ /** Device VBUS discharge Register. Offset: 828h */ ++ volatile uint32_t dvbusdis; ++ /** Device VBUS Pulse Register. Offset: 82Ch */ ++ volatile uint32_t dvbuspulse; ++ /** Device IN Token Queue Read Register-3 (Read Only). / ++ * Device Thresholding control register (Read/Write) ++ * Offset: 830h */ ++ volatile uint32_t dtknqr3_dthrctl; ++ /** Device IN Token Queue Read Register-4 (Read Only). / ++ * Device IN EPs empty Inr. Mask Register (Read/Write) ++ * Offset: 834h */ ++ volatile uint32_t dtknqr4_fifoemptymsk; ++ /** Device Each Endpoint Interrupt Register (Read Only). / ++ * Offset: 838h */ ++ volatile uint32_t deachint; ++ /** Device Each Endpoint Interrupt mask Register (Read/Write). / ++ * Offset: 83Ch */ ++ volatile uint32_t deachintmsk; ++ /** Device Each In Endpoint Interrupt mask Register (Read/Write). / ++ * Offset: 840h */ ++ volatile uint32_t diepeachintmsk[MAX_EPS_CHANNELS]; ++ /** Device Each Out Endpoint Interrupt mask Register (Read/Write). / ++ * Offset: 880h */ ++ volatile uint32_t doepeachintmsk[MAX_EPS_CHANNELS]; ++} dwc_otg_device_global_regs_t; ++ ++/** ++ * This union represents the bit fields in the Device Configuration ++ * Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. Write the ++ * d32 member to the dcfg register. ++ */ ++typedef union dcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Device Speed */ ++ unsigned devspd : 2; ++ /** Non Zero Length Status OUT Handshake */ ++ unsigned nzstsouthshk : 1; ++#define DWC_DCFG_SEND_STALL 1 ++ ++ unsigned reserved3 : 1; ++ /** Device Addresses */ ++ unsigned devaddr : 7; ++ /** Periodic Frame Interval */ ++ unsigned perfrint : 2; ++#define DWC_DCFG_FRAME_INTERVAL_80 0 ++#define DWC_DCFG_FRAME_INTERVAL_85 1 ++#define DWC_DCFG_FRAME_INTERVAL_90 2 ++#define DWC_DCFG_FRAME_INTERVAL_95 3 ++ ++ unsigned reserved13_17 : 5; ++ /** In Endpoint Mis-match count */ ++ unsigned epmscnt : 5; ++ /** Enable Descriptor DMA in Device mode */ ++ unsigned descdma : 1; ++ } b; ++} dcfg_data_t; ++ ++/** ++ * This union represents the bit fields in the Device Control ++ * Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union dctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Remote Wakeup */ ++ unsigned rmtwkupsig : 1; ++ /** Soft Disconnect */ ++ unsigned sftdiscon : 1; ++ /** Global Non-Periodic IN NAK Status */ ++ unsigned gnpinnaksts : 1; ++ /** Global OUT NAK Status */ ++ unsigned goutnaksts : 1; ++ /** Test Control */ ++ unsigned tstctl : 3; ++ /** Set Global Non-Periodic IN NAK */ ++ unsigned sgnpinnak : 1; ++ /** Clear Global Non-Periodic IN NAK */ ++ unsigned cgnpinnak : 1; ++ /** Set Global OUT NAK */ ++ unsigned sgoutnak : 1; ++ /** Clear Global OUT NAK */ ++ unsigned cgoutnak : 1; ++ ++ /** Power-On Programming Done */ ++ unsigned pwronprgdone : 1; ++ /** Global Continue on BNA */ ++ unsigned gcontbna : 1; ++ /** Global Multi Count */ ++ unsigned gmc : 2; ++ /** Ignore Frame Number for ISOC EPs */ ++ unsigned ifrmnum : 1; ++ /** NAK on Babble */ ++ unsigned nakonbble : 1; ++ ++ unsigned reserved16_31 : 16; ++ } b; ++} dctl_data_t; ++ ++/** ++ * This union represents the bit fields in the Device Status ++ * Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union dsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Suspend Status */ ++ unsigned suspsts : 1; ++ /** Enumerated Speed */ ++ unsigned enumspd : 2; ++#define DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ 0 ++#define DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ 1 ++#define DWC_DSTS_ENUMSPD_LS_PHY_6MHZ 2 ++#define DWC_DSTS_ENUMSPD_FS_PHY_48MHZ 3 ++ /** Erratic Error */ ++ unsigned errticerr : 1; ++ unsigned reserved4_7: 4; ++ /** Frame or Microframe Number of the received SOF */ ++ unsigned soffn : 14; ++ unsigned reserved22_31 : 10; ++ } b; ++} dsts_data_t; ++ ++ ++/** ++ * This union represents the bit fields in the Device IN EP Interrupt ++ * Register and the Device IN EP Common Mask Register. ++ * ++ * - Read the register into the d32 member then set/clear the ++ * bits using the bit elements. ++ */ ++typedef union diepint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Transfer complete mask */ ++ unsigned xfercompl : 1; ++ /** Endpoint disable mask */ ++ unsigned epdisabled : 1; ++ /** AHB Error mask */ ++ unsigned ahberr : 1; ++ /** TimeOUT Handshake mask (non-ISOC EPs) */ ++ unsigned timeout : 1; ++ /** IN Token received with TxF Empty mask */ ++ unsigned intktxfemp : 1; ++ /** IN Token Received with EP mismatch mask */ ++ unsigned intknepmis : 1; ++ /** IN Endpoint HAK Effective mask */ ++ unsigned inepnakeff : 1; ++ /** IN Endpoint HAK Effective mask */ ++ unsigned emptyintr : 1; ++ unsigned txfifoundrn : 1; ++ ++ /** BNA Interrupt mask */ ++ unsigned bna : 1; ++ unsigned reserved10_12 : 3; ++ /** BNA Interrupt mask */ ++ unsigned nak : 1; ++ unsigned reserved14_31 : 18; ++ } b; ++} diepint_data_t; ++ ++/** ++ * This union represents the bit fields in the Device IN EP ++ * Common/Dedicated Interrupt Mask Register. ++ */ ++typedef union diepint_data diepmsk_data_t; ++ ++/** ++ * This union represents the bit fields in the Device OUT EP Interrupt ++ * Registerand Device OUT EP Common Interrupt Mask Register. ++ * ++ * - Read the register into the d32 member then set/clear the ++ * bits using the bit elements. ++ */ ++typedef union doepint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Transfer complete */ ++ unsigned xfercompl : 1; ++ /** Endpoint disable */ ++ unsigned epdisabled : 1; ++ /** AHB Error */ ++ unsigned ahberr : 1; ++ /** Setup Phase Done (contorl EPs) */ ++ unsigned setup : 1; ++ /** OUT Token Received when Endpoint Disabled */ ++ unsigned outtknepdis : 1; ++ unsigned stsphsercvd : 1; ++ /** Back-to-Back SETUP Packets Received */ ++ unsigned back2backsetup : 1; ++ unsigned reserved7 : 1; ++ /** OUT packet Error */ ++ unsigned outpkterr : 1; ++ /** BNA Interrupt */ ++ unsigned bna : 1; ++ unsigned reserved10 : 1; ++ /** Packet Drop Status */ ++ unsigned pktdrpsts : 1; ++ /** Babble Interrupt */ ++ unsigned babble : 1; ++ /** NAK Interrupt */ ++ unsigned nak : 1; ++ /** NYET Interrupt */ ++ unsigned nyet : 1; ++ ++ unsigned reserved15_31 : 17; ++ } b; ++} doepint_data_t; ++ ++/** ++ * This union represents the bit fields in the Device OUT EP ++ * Common/Dedicated Interrupt Mask Register. ++ */ ++typedef union doepint_data doepmsk_data_t; ++ ++/** ++ * This union represents the bit fields in the Device All EP Interrupt ++ * and Mask Registers. ++ * - Read the register into the d32 member then set/clear the ++ * bits using the bit elements. ++ */ ++typedef union daint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** IN Endpoint bits */ ++ unsigned in : 16; ++ /** OUT Endpoint bits */ ++ unsigned out : 16; ++ } ep; ++ struct ++ { ++ /** IN Endpoint bits */ ++ unsigned inep0 : 1; ++ unsigned inep1 : 1; ++ unsigned inep2 : 1; ++ unsigned inep3 : 1; ++ unsigned inep4 : 1; ++ unsigned inep5 : 1; ++ unsigned inep6 : 1; ++ unsigned inep7 : 1; ++ unsigned inep8 : 1; ++ unsigned inep9 : 1; ++ unsigned inep10 : 1; ++ unsigned inep11 : 1; ++ unsigned inep12 : 1; ++ unsigned inep13 : 1; ++ unsigned inep14 : 1; ++ unsigned inep15 : 1; ++ /** OUT Endpoint bits */ ++ unsigned outep0 : 1; ++ unsigned outep1 : 1; ++ unsigned outep2 : 1; ++ unsigned outep3 : 1; ++ unsigned outep4 : 1; ++ unsigned outep5 : 1; ++ unsigned outep6 : 1; ++ unsigned outep7 : 1; ++ unsigned outep8 : 1; ++ unsigned outep9 : 1; ++ unsigned outep10 : 1; ++ unsigned outep11 : 1; ++ unsigned outep12 : 1; ++ unsigned outep13 : 1; ++ unsigned outep14 : 1; ++ unsigned outep15 : 1; ++ } b; ++} daint_data_t; ++ ++/** ++ * This union represents the bit fields in the Device IN Token Queue ++ * Read Registers. ++ * - Read the register into the d32 member. ++ * - READ-ONLY Register ++ */ ++typedef union dtknq1_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** In Token Queue Write Pointer */ ++ unsigned intknwptr : 5; ++ /** Reserved */ ++ unsigned reserved05_06 : 2; ++ /** write pointer has wrapped. */ ++ unsigned wrap_bit : 1; ++ /** EP Numbers of IN Tokens 0 ... 4 */ ++ unsigned epnums0_5 : 24; ++ }b; ++} dtknq1_data_t; ++ ++/** ++ * This union represents Threshold control Register ++ * - Read and write the register into the d32 member. ++ * - READ-WRITABLE Register ++ */ ++typedef union dthrctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** non ISO Tx Thr. Enable */ ++ unsigned non_iso_thr_en : 1; ++ /** ISO Tx Thr. Enable */ ++ unsigned iso_thr_en : 1; ++ /** Tx Thr. Length */ ++ unsigned tx_thr_len : 9; ++ /** Reserved */ ++ unsigned reserved11_15 : 5; ++ /** Rx Thr. Enable */ ++ unsigned rx_thr_en : 1; ++ /** Rx Thr. Length */ ++ unsigned rx_thr_len : 9; ++ /** Reserved */ ++ unsigned reserved26_31 : 6; ++ }b; ++} dthrctl_data_t; ++ ++ ++/** ++ * Device Logical IN Endpoint-Specific Registers. Offsets ++ * 900h-AFCh ++ * ++ * There will be one set of endpoint registers per logical endpoint ++ * implemented. ++ * ++ * These registers are visible only in Device mode and must not be ++ * accessed in Host mode, as the results are unknown. ++ */ ++typedef struct dwc_otg_dev_in_ep_regs ++{ ++ /** Device IN Endpoint Control Register. Offset:900h + ++ * (ep_num * 20h) + 00h */ ++ volatile uint32_t diepctl; ++ /** Reserved. Offset:900h + (ep_num * 20h) + 04h */ ++ uint32_t reserved04; ++ /** Device IN Endpoint Interrupt Register. Offset:900h + ++ * (ep_num * 20h) + 08h */ ++ volatile uint32_t diepint; ++ /** Reserved. Offset:900h + (ep_num * 20h) + 0Ch */ ++ uint32_t reserved0C; ++ /** Device IN Endpoint Transfer Size ++ * Register. Offset:900h + (ep_num * 20h) + 10h */ ++ volatile uint32_t dieptsiz; ++ /** Device IN Endpoint DMA Address Register. Offset:900h + ++ * (ep_num * 20h) + 14h */ ++ volatile uint32_t diepdma; ++ /** Device IN Endpoint Transmit FIFO Status Register. Offset:900h + ++ * (ep_num * 20h) + 18h */ ++ volatile uint32_t dtxfsts; ++ /** Device IN Endpoint DMA Buffer Register. Offset:900h + ++ * (ep_num * 20h) + 1Ch */ ++ volatile uint32_t diepdmab; ++} dwc_otg_dev_in_ep_regs_t; ++ ++/** ++ * Device Logical OUT Endpoint-Specific Registers. Offsets: ++ * B00h-CFCh ++ * ++ * There will be one set of endpoint registers per logical endpoint ++ * implemented. ++ * ++ * These registers are visible only in Device mode and must not be ++ * accessed in Host mode, as the results are unknown. ++ */ ++typedef struct dwc_otg_dev_out_ep_regs ++{ ++ /** Device OUT Endpoint Control Register. Offset:B00h + ++ * (ep_num * 20h) + 00h */ ++ volatile uint32_t doepctl; ++ /** Device OUT Endpoint Frame number Register. Offset: ++ * B00h + (ep_num * 20h) + 04h */ ++ volatile uint32_t doepfn; ++ /** Device OUT Endpoint Interrupt Register. Offset:B00h + ++ * (ep_num * 20h) + 08h */ ++ volatile uint32_t doepint; ++ /** Reserved. Offset:B00h + (ep_num * 20h) + 0Ch */ ++ uint32_t reserved0C; ++ /** Device OUT Endpoint Transfer Size Register. Offset: ++ * B00h + (ep_num * 20h) + 10h */ ++ volatile uint32_t doeptsiz; ++ /** Device OUT Endpoint DMA Address Register. Offset:B00h ++ * + (ep_num * 20h) + 14h */ ++ volatile uint32_t doepdma; ++ /** Reserved. Offset:B00h + * (ep_num * 20h) + 1Ch */ ++ uint32_t unused; ++ /** Device OUT Endpoint DMA Buffer Register. Offset:B00h ++ * + (ep_num * 20h) + 1Ch */ ++ uint32_t doepdmab; ++} dwc_otg_dev_out_ep_regs_t; ++ ++/** ++ * This union represents the bit fields in the Device EP Control ++ * Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union depctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Maximum Packet Size ++ * IN/OUT EPn ++ * IN/OUT EP0 - 2 bits ++ * 2'b00: 64 Bytes ++ * 2'b01: 32 ++ * 2'b10: 16 ++ * 2'b11: 8 */ ++ unsigned mps : 11; ++#define DWC_DEP0CTL_MPS_64 0 ++#define DWC_DEP0CTL_MPS_32 1 ++#define DWC_DEP0CTL_MPS_16 2 ++#define DWC_DEP0CTL_MPS_8 3 ++ ++ /** Next Endpoint ++ * IN EPn/IN EP0 ++ * OUT EPn/OUT EP0 - reserved */ ++ unsigned nextep : 4; ++ ++ /** USB Active Endpoint */ ++ unsigned usbactep : 1; ++ ++ /** Endpoint DPID (INTR/Bulk IN and OUT endpoints) ++ * This field contains the PID of the packet going to ++ * be received or transmitted on this endpoint. The ++ * application should program the PID of the first ++ * packet going to be received or transmitted on this ++ * endpoint , after the endpoint is ++ * activated. Application use the SetD1PID and ++ * SetD0PID fields of this register to program either ++ * D0 or D1 PID. ++ * ++ * The encoding for this field is ++ * - 0: D0 ++ * - 1: D1 ++ */ ++ unsigned dpid : 1; ++ ++ /** NAK Status */ ++ unsigned naksts : 1; ++ ++ /** Endpoint Type ++ * 2'b00: Control ++ * 2'b01: Isochronous ++ * 2'b10: Bulk ++ * 2'b11: Interrupt */ ++ unsigned eptype : 2; ++ ++ /** Snoop Mode ++ * OUT EPn/OUT EP0 ++ * IN EPn/IN EP0 - reserved */ ++ unsigned snp : 1; ++ ++ /** Stall Handshake */ ++ unsigned stall : 1; ++ ++ /** Tx Fifo Number ++ * IN EPn/IN EP0 ++ * OUT EPn/OUT EP0 - reserved */ ++ unsigned txfnum : 4; ++ ++ /** Clear NAK */ ++ unsigned cnak : 1; ++ /** Set NAK */ ++ unsigned snak : 1; ++ /** Set DATA0 PID (INTR/Bulk IN and OUT endpoints) ++ * Writing to this field sets the Endpoint DPID (DPID) ++ * field in this register to DATA0. Set Even ++ * (micro)frame (SetEvenFr) (ISO IN and OUT Endpoints) ++ * Writing to this field sets the Even/Odd ++ * (micro)frame (EO_FrNum) field to even (micro) ++ * frame. ++ */ ++ unsigned setd0pid : 1; ++ /** Set DATA1 PID (INTR/Bulk IN and OUT endpoints) ++ * Writing to this field sets the Endpoint DPID (DPID) ++ * field in this register to DATA1 Set Odd ++ * (micro)frame (SetOddFr) (ISO IN and OUT Endpoints) ++ * Writing to this field sets the Even/Odd ++ * (micro)frame (EO_FrNum) field to odd (micro) frame. ++ */ ++ unsigned setd1pid : 1; ++ /** Endpoint Disable */ ++ unsigned epdis : 1; ++ /** Endpoint Enable */ ++ unsigned epena : 1; ++ } b; ++} depctl_data_t; ++ ++/** ++ * This union represents the bit fields in the Device EP Transfer ++ * Size Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union deptsiz_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ /** Transfer size */ ++ unsigned xfersize : 19; ++ /** Packet Count */ ++ unsigned pktcnt : 10; ++ /** Multi Count - Periodic IN endpoints */ ++ unsigned mc : 2; ++ unsigned reserved : 1; ++ } b; ++} deptsiz_data_t; ++ ++/** ++ * This union represents the bit fields in the Device EP 0 Transfer ++ * Size Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union deptsiz0_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ /** Transfer size */ ++ unsigned xfersize : 7; ++ /** Reserved */ ++ unsigned reserved7_18 : 12; ++ /** Packet Count */ ++ unsigned pktcnt : 1; ++ /** Reserved */ ++ unsigned reserved20_28 : 9; ++ /**Setup Packet Count (DOEPTSIZ0 Only) */ ++ unsigned supcnt : 2; ++ unsigned reserved31; ++ } b; ++} deptsiz0_data_t; ++ ++ ++///////////////////////////////////////////////// ++// DMA Descriptor Specific Structures ++// ++ ++/** Buffer status definitions */ ++ ++#define BS_HOST_READY 0x0 ++#define BS_DMA_BUSY 0x1 ++#define BS_DMA_DONE 0x2 ++#define BS_HOST_BUSY 0x3 ++ ++/** Receive/Transmit status definitions */ ++ ++#define RTS_SUCCESS 0x0 ++#define RTS_BUFFLUSH 0x1 ++#define RTS_RESERVED 0x2 ++#define RTS_BUFERR 0x3 ++ ++ ++/** ++ * This union represents the bit fields in the DMA Descriptor ++ * status quadlet. Read the quadlet into the d32 member then ++ * set/clear the bits using the bit, b_iso_out and ++ * b_iso_in elements. ++ */ ++typedef union desc_sts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** quadlet bits */ ++ struct { ++ /** Received number of bytes */ ++ unsigned bytes : 16; ++ ++ unsigned reserved16_22 : 7; ++ /** Multiple Transfer - only for OUT EPs */ ++ unsigned mtrf : 1; ++ /** Setup Packet received - only for OUT EPs */ ++ unsigned sr : 1; ++ /** Interrupt On Complete */ ++ unsigned ioc : 1; ++ /** Short Packet */ ++ unsigned sp : 1; ++ /** Last */ ++ unsigned l : 1; ++ /** Receive Status */ ++ unsigned sts : 2; ++ /** Buffer Status */ ++ unsigned bs : 2; ++ } b; ++ ++#ifdef DWC_EN_ISOC ++ /** iso out quadlet bits */ ++ struct { ++ /** Received number of bytes */ ++ unsigned rxbytes : 11; ++ ++ unsigned reserved11 : 1; ++ /** Frame Number */ ++ unsigned framenum : 11; ++ /** Received ISO Data PID */ ++ unsigned pid : 2; ++ /** Interrupt On Complete */ ++ unsigned ioc : 1; ++ /** Short Packet */ ++ unsigned sp : 1; ++ /** Last */ ++ unsigned l : 1; ++ /** Receive Status */ ++ unsigned rxsts : 2; ++ /** Buffer Status */ ++ unsigned bs : 2; ++ } b_iso_out; ++ ++ /** iso in quadlet bits */ ++ struct { ++ /** Transmited number of bytes */ ++ unsigned txbytes : 12; ++ /** Frame Number */ ++ unsigned framenum : 11; ++ /** Transmited ISO Data PID */ ++ unsigned pid : 2; ++ /** Interrupt On Complete */ ++ unsigned ioc : 1; ++ /** Short Packet */ ++ unsigned sp : 1; ++ /** Last */ ++ unsigned l : 1; ++ /** Transmit Status */ ++ unsigned txsts : 2; ++ /** Buffer Status */ ++ unsigned bs : 2; ++ } b_iso_in; ++#endif //DWC_EN_ISOC ++} desc_sts_data_t; ++ ++/** ++ * DMA Descriptor structure ++ * ++ * DMA Descriptor structure contains two quadlets: ++ * Status quadlet and Data buffer pointer. ++ */ ++typedef struct dwc_otg_dma_desc ++{ ++ /** DMA Descriptor status quadlet */ ++ desc_sts_data_t status; ++ /** DMA Descriptor data buffer pointer */ ++ dma_addr_t buf; ++} dwc_otg_dma_desc_t; ++ ++/** ++ * The dwc_otg_dev_if structure contains information needed to manage ++ * the DWC_otg controller acting in device mode. It represents the ++ * programming view of the device-specific aspects of the controller. ++ */ ++typedef struct dwc_otg_dev_if ++{ ++ /** Pointer to device Global registers. ++ * Device Global Registers starting at offset 800h ++ */ ++ dwc_otg_device_global_regs_t *dev_global_regs; ++#define DWC_DEV_GLOBAL_REG_OFFSET 0x800 ++ ++ /** ++ * Device Logical IN Endpoint-Specific Registers 900h-AFCh ++ */ ++ dwc_otg_dev_in_ep_regs_t *in_ep_regs[MAX_EPS_CHANNELS]; ++#define DWC_DEV_IN_EP_REG_OFFSET 0x900 ++#define DWC_EP_REG_OFFSET 0x20 ++ ++ /** Device Logical OUT Endpoint-Specific Registers B00h-CFCh */ ++ dwc_otg_dev_out_ep_regs_t *out_ep_regs[MAX_EPS_CHANNELS]; ++#define DWC_DEV_OUT_EP_REG_OFFSET 0xB00 ++ ++ /* Device configuration information*/ ++ uint8_t speed; /**< Device Speed 0: Unknown, 1: LS, 2:FS, 3: HS */ ++ uint8_t num_in_eps; /**< Number # of Tx EP range: 0-15 exept ep0 */ ++ uint8_t num_out_eps; /**< Number # of Rx EP range: 0-15 exept ep 0*/ ++ ++ /** Size of periodic FIFOs (Bytes) */ ++ uint16_t perio_tx_fifo_size[MAX_PERIO_FIFOS]; ++ ++ /** Size of Tx FIFOs (Bytes) */ ++ uint16_t tx_fifo_size[MAX_TX_FIFOS]; ++ ++ /** Thresholding enable flags and length varaiables **/ ++ uint16_t rx_thr_en; ++ uint16_t iso_tx_thr_en; ++ uint16_t non_iso_tx_thr_en; ++ ++ uint16_t rx_thr_length; ++ uint16_t tx_thr_length; ++ ++ /** ++ * Pointers to the DMA Descriptors for EP0 Control ++ * transfers (virtual and physical) ++ */ ++ /** 2 descriptors for SETUP packets */ ++ uint32_t dma_setup_desc_addr[2]; ++ dwc_otg_dma_desc_t* setup_desc_addr[2]; ++ ++ /** Pointer to Descriptor with latest SETUP packet */ ++ dwc_otg_dma_desc_t* psetup; ++ ++ /** Index of current SETUP handler descriptor */ ++ uint32_t setup_desc_index; ++ ++ /** Descriptor for Data In or Status In phases */ ++ uint32_t dma_in_desc_addr; ++ dwc_otg_dma_desc_t* in_desc_addr;; ++ ++ /** Descriptor for Data Out or Status Out phases */ ++ uint32_t dma_out_desc_addr; ++ dwc_otg_dma_desc_t* out_desc_addr; ++} dwc_otg_dev_if_t; ++ ++ ++ ++ ++///////////////////////////////////////////////// ++// Host Mode Register Structures ++// ++/** ++ * The Host Global Registers structure defines the size and relative ++ * field offsets for the Host Mode Global Registers. Host Global ++ * Registers offsets 400h-7FFh. ++*/ ++typedef struct dwc_otg_host_global_regs ++{ ++ /** Host Configuration Register. Offset: 400h */ ++ volatile uint32_t hcfg; ++ /** Host Frame Interval Register. Offset: 404h */ ++ volatile uint32_t hfir; ++ /** Host Frame Number / Frame Remaining Register. Offset: 408h */ ++ volatile uint32_t hfnum; ++ /** Reserved. Offset: 40Ch */ ++ uint32_t reserved40C; ++ /** Host Periodic Transmit FIFO/ Queue Status Register. Offset: 410h */ ++ volatile uint32_t hptxsts; ++ /** Host All Channels Interrupt Register. Offset: 414h */ ++ volatile uint32_t haint; ++ /** Host All Channels Interrupt Mask Register. Offset: 418h */ ++ volatile uint32_t haintmsk; ++} dwc_otg_host_global_regs_t; ++ ++/** ++ * This union represents the bit fields in the Host Configuration Register. ++ * Read the register into the d32 member then set/clear the bits using ++ * the bit elements. Write the d32 member to the hcfg register. ++ */ ++typedef union hcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** FS/LS Phy Clock Select */ ++ unsigned fslspclksel : 2; ++#define DWC_HCFG_30_60_MHZ 0 ++#define DWC_HCFG_48_MHZ 1 ++#define DWC_HCFG_6_MHZ 2 ++ ++ /** FS/LS Only Support */ ++ unsigned fslssupp : 1; ++ } b; ++} hcfg_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Frame Remaing/Number ++ * Register. ++ */ ++typedef union hfir_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ unsigned frint : 16; ++ unsigned reserved : 16; ++ } b; ++} hfir_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Frame Remaing/Number ++ * Register. ++ */ ++typedef union hfnum_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ unsigned frnum : 16; ++#define DWC_HFNUM_MAX_FRNUM 0x3FFF ++ unsigned frrem : 16; ++ } b; ++} hfnum_data_t; ++ ++typedef union hptxsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ unsigned ptxfspcavail : 16; ++ unsigned ptxqspcavail : 8; ++ /** Top of the Periodic Transmit Request Queue ++ * - bit 24 - Terminate (last entry for the selected channel) ++ * - bits 26:25 - Token Type ++ * - 2'b00 - Zero length ++ * - 2'b01 - Ping ++ * - 2'b10 - Disable ++ * - bits 30:27 - Channel Number ++ * - bit 31 - Odd/even microframe ++ */ ++ unsigned ptxqtop_terminate : 1; ++ unsigned ptxqtop_token : 2; ++ unsigned ptxqtop_chnum : 4; ++ unsigned ptxqtop_odd : 1; ++ } b; ++} hptxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Port Control and Status ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. Write the d32 member to the ++ * hprt0 register. ++ */ ++typedef union hprt0_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned prtconnsts : 1; ++ unsigned prtconndet : 1; ++ unsigned prtena : 1; ++ unsigned prtenchng : 1; ++ unsigned prtovrcurract : 1; ++ unsigned prtovrcurrchng : 1; ++ unsigned prtres : 1; ++ unsigned prtsusp : 1; ++ unsigned prtrst : 1; ++ unsigned reserved9 : 1; ++ unsigned prtlnsts : 2; ++ unsigned prtpwr : 1; ++ unsigned prttstctl : 4; ++ unsigned prtspd : 2; ++#define DWC_HPRT0_PRTSPD_HIGH_SPEED 0 ++#define DWC_HPRT0_PRTSPD_FULL_SPEED 1 ++#define DWC_HPRT0_PRTSPD_LOW_SPEED 2 ++ unsigned reserved19_31 : 13; ++ } b; ++} hprt0_data_t; ++ ++/** ++ * This union represents the bit fields in the Host All Interrupt ++ * Register. ++ */ ++typedef union haint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned ch0 : 1; ++ unsigned ch1 : 1; ++ unsigned ch2 : 1; ++ unsigned ch3 : 1; ++ unsigned ch4 : 1; ++ unsigned ch5 : 1; ++ unsigned ch6 : 1; ++ unsigned ch7 : 1; ++ unsigned ch8 : 1; ++ unsigned ch9 : 1; ++ unsigned ch10 : 1; ++ unsigned ch11 : 1; ++ unsigned ch12 : 1; ++ unsigned ch13 : 1; ++ unsigned ch14 : 1; ++ unsigned ch15 : 1; ++ unsigned reserved : 16; ++ } b; ++ ++ struct ++ { ++ unsigned chint : 16; ++ unsigned reserved : 16; ++ } b2; ++} haint_data_t; ++ ++/** ++ * This union represents the bit fields in the Host All Interrupt ++ * Register. ++ */ ++typedef union haintmsk_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned ch0 : 1; ++ unsigned ch1 : 1; ++ unsigned ch2 : 1; ++ unsigned ch3 : 1; ++ unsigned ch4 : 1; ++ unsigned ch5 : 1; ++ unsigned ch6 : 1; ++ unsigned ch7 : 1; ++ unsigned ch8 : 1; ++ unsigned ch9 : 1; ++ unsigned ch10 : 1; ++ unsigned ch11 : 1; ++ unsigned ch12 : 1; ++ unsigned ch13 : 1; ++ unsigned ch14 : 1; ++ unsigned ch15 : 1; ++ unsigned reserved : 16; ++ } b; ++ ++ struct ++ { ++ unsigned chint : 16; ++ unsigned reserved : 16; ++ } b2; ++} haintmsk_data_t; ++ ++/** ++ * Host Channel Specific Registers. 500h-5FCh ++ */ ++typedef struct dwc_otg_hc_regs ++{ ++ /** Host Channel 0 Characteristic Register. Offset: 500h + (chan_num * 20h) + 00h */ ++ volatile uint32_t hcchar; ++ /** Host Channel 0 Split Control Register. Offset: 500h + (chan_num * 20h) + 04h */ ++ volatile uint32_t hcsplt; ++ /** Host Channel 0 Interrupt Register. Offset: 500h + (chan_num * 20h) + 08h */ ++ volatile uint32_t hcint; ++ /** Host Channel 0 Interrupt Mask Register. Offset: 500h + (chan_num * 20h) + 0Ch */ ++ volatile uint32_t hcintmsk; ++ /** Host Channel 0 Transfer Size Register. Offset: 500h + (chan_num * 20h) + 10h */ ++ volatile uint32_t hctsiz; ++ /** Host Channel 0 DMA Address Register. Offset: 500h + (chan_num * 20h) + 14h */ ++ volatile uint32_t hcdma; ++ /** Reserved. Offset: 500h + (chan_num * 20h) + 18h - 500h + (chan_num * 20h) + 1Ch */ ++ uint32_t reserved[2]; ++} dwc_otg_hc_regs_t; ++ ++/** ++ * This union represents the bit fields in the Host Channel Characteristics ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. Write the d32 member to the ++ * hcchar register. ++ */ ++typedef union hcchar_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** Maximum packet size in bytes */ ++ unsigned mps : 11; ++ ++ /** Endpoint number */ ++ unsigned epnum : 4; ++ ++ /** 0: OUT, 1: IN */ ++ unsigned epdir : 1; ++ ++ unsigned reserved : 1; ++ ++ /** 0: Full/high speed device, 1: Low speed device */ ++ unsigned lspddev : 1; ++ ++ /** 0: Control, 1: Isoc, 2: Bulk, 3: Intr */ ++ unsigned eptype : 2; ++ ++ /** Packets per frame for periodic transfers. 0 is reserved. */ ++ unsigned multicnt : 2; ++ ++ /** Device address */ ++ unsigned devaddr : 7; ++ ++ /** ++ * Frame to transmit periodic transaction. ++ * 0: even, 1: odd ++ */ ++ unsigned oddfrm : 1; ++ ++ /** Channel disable */ ++ unsigned chdis : 1; ++ ++ /** Channel enable */ ++ unsigned chen : 1; ++ } b; ++} hcchar_data_t; ++ ++typedef union hcsplt_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** Port Address */ ++ unsigned prtaddr : 7; ++ ++ /** Hub Address */ ++ unsigned hubaddr : 7; ++ ++ /** Transaction Position */ ++ unsigned xactpos : 2; ++#define DWC_HCSPLIT_XACTPOS_MID 0 ++#define DWC_HCSPLIT_XACTPOS_END 1 ++#define DWC_HCSPLIT_XACTPOS_BEGIN 2 ++#define DWC_HCSPLIT_XACTPOS_ALL 3 ++ ++ /** Do Complete Split */ ++ unsigned compsplt : 1; ++ ++ /** Reserved */ ++ unsigned reserved : 14; ++ ++ /** Split Enble */ ++ unsigned spltena : 1; ++ } b; ++} hcsplt_data_t; ++ ++ ++/** ++ * This union represents the bit fields in the Host All Interrupt ++ * Register. ++ */ ++typedef union hcint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Transfer Complete */ ++ unsigned xfercomp : 1; ++ /** Channel Halted */ ++ unsigned chhltd : 1; ++ /** AHB Error */ ++ unsigned ahberr : 1; ++ /** STALL Response Received */ ++ unsigned stall : 1; ++ /** NAK Response Received */ ++ unsigned nak : 1; ++ /** ACK Response Received */ ++ unsigned ack : 1; ++ /** NYET Response Received */ ++ unsigned nyet : 1; ++ /** Transaction Err */ ++ unsigned xacterr : 1; ++ /** Babble Error */ ++ unsigned bblerr : 1; ++ /** Frame Overrun */ ++ unsigned frmovrun : 1; ++ /** Data Toggle Error */ ++ unsigned datatglerr : 1; ++ /** Reserved */ ++ unsigned reserved : 21; ++ } b; ++} hcint_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Channel Transfer Size ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. Write the d32 member to the ++ * hcchar register. ++ */ ++typedef union hctsiz_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** Total transfer size in bytes */ ++ unsigned xfersize : 19; ++ ++ /** Data packets to transfer */ ++ unsigned pktcnt : 10; ++ ++ /** ++ * Packet ID for next data packet ++ * 0: DATA0 ++ * 1: DATA2 ++ * 2: DATA1 ++ * 3: MDATA (non-Control), SETUP (Control) ++ */ ++ unsigned pid : 2; ++#define DWC_HCTSIZ_DATA0 0 ++#define DWC_HCTSIZ_DATA1 2 ++#define DWC_HCTSIZ_DATA2 1 ++#define DWC_HCTSIZ_MDATA 3 ++#define DWC_HCTSIZ_SETUP 3 ++ ++ /** Do PING protocol when 1 */ ++ unsigned dopng : 1; ++ } b; ++} hctsiz_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Channel Interrupt Mask ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. Write the d32 member to the ++ * hcintmsk register. ++ */ ++typedef union hcintmsk_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ unsigned xfercompl : 1; ++ unsigned chhltd : 1; ++ unsigned ahberr : 1; ++ unsigned stall : 1; ++ unsigned nak : 1; ++ unsigned ack : 1; ++ unsigned nyet : 1; ++ unsigned xacterr : 1; ++ unsigned bblerr : 1; ++ unsigned frmovrun : 1; ++ unsigned datatglerr : 1; ++ unsigned reserved : 21; ++ } b; ++} hcintmsk_data_t; ++ ++/** OTG Host Interface Structure. ++ * ++ * The OTG Host Interface Structure structure contains information ++ * needed to manage the DWC_otg controller acting in host mode. It ++ * represents the programming view of the host-specific aspects of the ++ * controller. ++ */ ++typedef struct dwc_otg_host_if ++{ ++ /** Host Global Registers starting at offset 400h.*/ ++ dwc_otg_host_global_regs_t *host_global_regs; ++#define DWC_OTG_HOST_GLOBAL_REG_OFFSET 0x400 ++ ++ /** Host Port 0 Control and Status Register */ ++ volatile uint32_t *hprt0; ++#define DWC_OTG_HOST_PORT_REGS_OFFSET 0x440 ++ ++ /** Host Channel Specific Registers at offsets 500h-5FCh. */ ++ dwc_otg_hc_regs_t *hc_regs[MAX_EPS_CHANNELS]; ++#define DWC_OTG_HOST_CHAN_REGS_OFFSET 0x500 ++#define DWC_OTG_CHAN_REGS_OFFSET 0x20 ++ ++ ++ /* Host configuration information */ ++ /** Number of Host Channels (range: 1-16) */ ++ uint8_t num_host_channels; ++ /** Periodic EPs supported (0: no, 1: yes) */ ++ uint8_t perio_eps_supported; ++ /** Periodic Tx FIFO Size (Only 1 host periodic Tx FIFO) */ ++ uint16_t perio_tx_fifo_size; ++} dwc_otg_host_if_t; ++ ++ ++/** ++ * This union represents the bit fields in the Power and Clock Gating Control ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. ++ */ ++typedef union pcgcctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** Stop Pclk */ ++ unsigned stoppclk : 1; ++ /** Gate Hclk */ ++ unsigned gatehclk : 1; ++ /** Power Clamp */ ++ unsigned pwrclmp : 1; ++ /** Reset Power Down Modules */ ++ unsigned rstpdwnmodule : 1; ++ /** PHY Suspended */ ++ unsigned physuspended : 1; ++ unsigned reserved : 27; ++ } b; ++} pcgcctl_data_t; ++ ++ ++#endif +--- a/drivers/usb/core/urb.c ++++ b/drivers/usb/core/urb.c +@@ -17,7 +17,11 @@ static void urb_destroy(struct kref *kre + + if (urb->transfer_flags & URB_FREE_BUFFER) + kfree(urb->transfer_buffer); +- ++ if (urb->aligned_transfer_buffer) { ++ kfree(urb->aligned_transfer_buffer); ++ urb->aligned_transfer_buffer = 0; ++ urb->aligned_transfer_dma = 0; ++ } + kfree(urb); + } + +--- a/include/linux/usb.h ++++ b/include/linux/usb.h +@@ -1202,6 +1202,9 @@ struct urb { + unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/ + void *transfer_buffer; /* (in) associated data buffer */ + dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */ ++ void *aligned_transfer_buffer; /* (in) associeated data buffer */ ++ dma_addr_t aligned_transfer_dma;/* (in) dma addr for transfer_buffer */ ++ u32 aligned_transfer_buffer_length; /* (in) data buffer length */ + struct scatterlist *sg; /* (in) scatter gather buffer list */ + int num_mapped_sgs; /* (internal) mapped sg entries */ + int num_sgs; /* (in) number of entries in the sg list */ +--- a/drivers/usb/gadget/Kconfig ++++ b/drivers/usb/gadget/Kconfig +@@ -108,7 +108,7 @@ config USB_GADGET_VBUS_DRAW + # + choice + prompt "USB Peripheral Controller" +- depends on USB_GADGET ++ depends on USB_GADGET && !USB_DWC_OTG + help + A USB device uses a controller to talk to its host. + Systems should have only one such upstream link. +--- a/drivers/usb/gadget/Makefile ++++ b/drivers/usb/gadget/Makefile +@@ -3,7 +3,7 @@ + # + ccflags-$(CONFIG_USB_GADGET_DEBUG) := -DDEBUG + +-obj-$(CONFIG_USB_GADGET) += udc-core.o ++#obj-$(CONFIG_USB_GADGET) += udc-core.o + obj-$(CONFIG_USB_DUMMY_HCD) += dummy_hcd.o + obj-$(CONFIG_USB_NET2272) += net2272.o + obj-$(CONFIG_USB_NET2280) += net2280.o -- cgit v1.2.3