aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/lantiq/files-3.3
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/lantiq/files-3.3')
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/configs/ase_defconfig67
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/configs/falcon_defconfig72
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/configs/xway_defconfig66
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/clkdev.h25
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/dev-gpio-buttons.h26
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/dev-gpio-leds.h21
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h268
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/falcon/irq.h18
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h152
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/lantiq_timer.h155
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/base_reg.h56
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/boot_reg.h37
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/dma_reg.h308
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/ebu_reg.h615
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/es_reg.h2098
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/irq.h36
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/lantiq_soc.h71
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/mps_reg.h242
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/port_reg.h3262
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/ssc_reg.h624
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/status_reg.h130
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_dma.h245
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_irq.h35
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_mux.h467
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_pms.h23
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/sys0_reg.h165
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/sys1_reg.h370
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/sys2_reg.h494
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/dev-gpio-buttons.c58
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/dev-gpio-leds.c57
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/Kconfig11
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/Makefile2
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/addon-easy98000.c213
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/dev-leds-easy98000-cpld.c161
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/dev-leds-easy98000-cpld.h20
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/devices.c152
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/devices.h25
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/gpio.c409
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/mach-95C3AM1.c94
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/mach-easy98000.c138
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/mach-easy98020.c118
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/prom.c84
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/reset.c87
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/sysctrl.c211
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/Kconfig16
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/Makefile3
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/clk-svip.c100
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/devices.c380
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/devices.h23
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/dma.c1206
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/gpio.c553
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/mach-easy33016.c73
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/mach-easy336.c221
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/mux.c187
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/pms.c101
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/prom.c73
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/reset.c95
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/switchip_setup.c666
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/clk.c329
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-dwc_otg.c70
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-dwc_otg.h17
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-ifxhcd.c45
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-ifxhcd.h17
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-athxk.c53
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-athxk.h16
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-rt2x00.c32
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-rt2x00.h14
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/gptu.c176
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-arv.c793
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-bthomehubv2b.c542
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-fritz_ar9.c115
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-fritz_vr9.c164
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-gigasx76x.c168
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-gigasx76x.h210
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-h201l.c100
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-netgear.c239
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-p2601hnfx.c113
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-wbmr.c120
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/nand.c216
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/pci-ath-fixup.c109
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/pci-ath-fixup.h6
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/prom.c110
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/sysctrl.c283
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/timer.c846
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/pci/fixup-lantiq-pcie.c81
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/pci/fixup-lantiq.c42
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq-msi.c399
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq-phy.c408
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq.c1146
-rw-r--r--target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq.h1305
-rw-r--r--target/linux/lantiq/files-3.3/drivers/i2c/busses/i2c-falcon.c1040
-rw-r--r--target/linux/lantiq/files-3.3/drivers/net/ethernet/lantiq_vrx200.c1358
-rw-r--r--target/linux/lantiq/files-3.3/drivers/net/ethernet/svip_eth.c636
-rw-r--r--target/linux/lantiq/files-3.3/drivers/net/ethernet/svip_virtual_eth.c346
-rw-r--r--target/linux/lantiq/files-3.3/drivers/spi/spi-falcon.c483
-rw-r--r--target/linux/lantiq/files-3.3/drivers/spi/spi-xway.c1070
-rw-r--r--target/linux/lantiq/files-3.3/drivers/spi/spi_svip.c955
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/Kconfig37
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/Makefile39
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_attr.c802
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_attr.h67
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil.c3025
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil.h911
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil_ifx.h58
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil_intr.c708
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_driver.c1277
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_driver.h84
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd.c2870
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd.h676
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c1839
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd_queue.c794
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_ifx.c103
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_ifx.h85
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_plat.h269
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_regs.h1797
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/Kconfig58
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/Makefile85
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/TagHistory171
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd.c2523
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd.h628
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd_es.c549
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd_intr.c3742
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd_queue.c418
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif.c1458
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif.h665
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif_d.c458
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif_h.c846
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_ctl.c1385
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_driver.c970
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_plat.h1018
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_regs.h1420
-rw-r--r--target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_version.h5
-rw-r--r--target/linux/lantiq/files-3.3/include/linux/svip_nat.h37
-rw-r--r--target/linux/lantiq/files-3.3/include/linux/svip_nat_io.h103
-rw-r--r--target/linux/lantiq/files-3.3/net/ipv4/svip_nat.c1569
135 files changed, 63406 insertions, 0 deletions
diff --git a/target/linux/lantiq/files-3.3/arch/mips/configs/ase_defconfig b/target/linux/lantiq/files-3.3/arch/mips/configs/ase_defconfig
new file mode 100644
index 0000000000..5bb1d93ebd
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/configs/ase_defconfig
@@ -0,0 +1,67 @@
+CONFIG_LANTIQ=y
+CONFIG_SOC_AMAZON_SE=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_EXPERIMENTAL=y
+CONFIG_DEFAULT_HOSTNAME="amazon_se"
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../root-lantiq/ ../root-lantiq/initramfs-base-files.txt"
+CONFIG_INITRAMFS_ROOT_UID=1000
+CONFIG_INITRAMFS_ROOT_GID=1000
++# CONFIG_RD_GZIP is not set
+CONFIG_RD_LZMA=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_ARPD=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_LANTIQ=y
+CONFIG_MISC_DEVICES=y
+CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_LANTIQ_ETOP=y
+CONFIG_PHYLIB=y
+CONFIG_SERIAL_LANTIQ=y
+CONFIG_PINCTRL=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_WATCHDOG=y
+CONFIG_LANTIQ_WDT=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
diff --git a/target/linux/lantiq/files-3.3/arch/mips/configs/falcon_defconfig b/target/linux/lantiq/files-3.3/arch/mips/configs/falcon_defconfig
new file mode 100644
index 0000000000..ce242a88ed
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/configs/falcon_defconfig
@@ -0,0 +1,72 @@
+CONFIG_LANTIQ=y
+CONFIG_SOC_FALCON=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_EXPERIMENTAL=y
+CONFIG_DEFAULT_HOSTNAME="falcon"
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../root-lantiq/ ../root-lantiq/initramfs-base-files.txt"
+CONFIG_INITRAMFS_ROOT_UID=1000
+CONFIG_INITRAMFS_ROOT_GID=1000
++# CONFIG_RD_GZIP is not set
+CONFIG_RD_LZMA=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_ARPD=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_LANTIQ=y
+CONFIG_MTD_M25P80=y
+CONFIG_MISC_DEVICES=y
+CONFIG_EEPROM_AT24=y
+CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_PHYLIB=y
+CONFIG_SERIAL_LANTIQ=y
+CONFIG_I2C=y
+CONFIG_I2C_FALCON=y
+CONFIG_SPI=y
+CONFIG_SPI_FALCON=y
+CONFIG_PINCTRL=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_WATCHDOG=y
+CONFIG_LANTIQ_WDT=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
diff --git a/target/linux/lantiq/files-3.3/arch/mips/configs/xway_defconfig b/target/linux/lantiq/files-3.3/arch/mips/configs/xway_defconfig
new file mode 100644
index 0000000000..510a9641d3
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/configs/xway_defconfig
@@ -0,0 +1,66 @@
+CONFIG_LANTIQ=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_EXPERIMENTAL=y
+CONFIG_DEFAULT_HOSTNAME="danube"
+CONFIG_SYSVIPC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../root-lantiq/ ../root-lantiq/initramfs-base-files.txt"
+CONFIG_INITRAMFS_ROOT_UID=1000
+CONFIG_INITRAMFS_ROOT_GID=1000
+# CONFIG_RD_GZIP is not set
+CONFIG_RD_LZMA=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_ARPD=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NETFILTER=y
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_LANTIQ=y
+CONFIG_MISC_DEVICES=y
+CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_LANTIQ_ETOP=y
+CONFIG_PHYLIB=y
+CONFIG_SERIAL_LANTIQ=y
+CONFIG_PINCTRL=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_WATCHDOG=y
+CONFIG_LANTIQ_WDT=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/clkdev.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/clkdev.h
new file mode 100644
index 0000000000..262475414e
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/clkdev.h
@@ -0,0 +1,25 @@
+/*
+ * based on arch/arm/include/asm/clkdev.h
+ *
+ * Copyright (C) 2008 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Helper for the clk API to assist looking up a struct clk.
+ */
+#ifndef __ASM_CLKDEV_H
+#define __ASM_CLKDEV_H
+
+#include <linux/slab.h>
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do { } while (0)
+
+static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
+{
+ return kzalloc(size, GFP_KERNEL);
+}
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/dev-gpio-buttons.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/dev-gpio-buttons.h
new file mode 100644
index 0000000000..adb531c39b
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/dev-gpio-buttons.h
@@ -0,0 +1,26 @@
+/*
+ * Lantiq GPIO button support
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _LANTIQ_DEV_GPIO_BUTTONS_H
+#define _LANTIQ_DEV_GPIO_BUTTONS_H
+
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+
+#define LTQ_KEYS_POLL_INTERVAL 20 /* msecs */
+#define LTQ_KEYS_DEBOUNCE_INTERVAL (3 * LTQ_KEYS_POLL_INTERVAL)
+
+void ltq_register_gpio_keys_polled(int id,
+ unsigned poll_interval,
+ unsigned nbuttons,
+ struct gpio_keys_button *buttons);
+
+#endif /* _LANTIQ_DEV_GPIO_BUTTONS_H */
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/dev-gpio-leds.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/dev-gpio-leds.h
new file mode 100644
index 0000000000..d51e496d14
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/dev-gpio-leds.h
@@ -0,0 +1,21 @@
+/*
+ * Lantiq GPIO LED device support
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _LANTIQ_DEV_LEDS_GPIO_H
+#define _LANTIQ_DEV_LEDS_GPIO_H
+
+#include <linux/leds.h>
+
+void ltq_add_device_gpio_leds(int id,
+ unsigned num_leds,
+ struct gpio_led *leds) __init;
+
+#endif /* _LANTIQ_DEV_LEDS_GPIO_H */
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
new file mode 100644
index 0000000000..4dc6466e84
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
@@ -0,0 +1,268 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#ifndef _FALCON_IRQ__
+#define _FALCON_IRQ__
+
+#define INT_NUM_IRQ0 8
+#define INT_NUM_IM0_IRL0 (INT_NUM_IRQ0 + 0)
+#define INT_NUM_IM1_IRL0 (INT_NUM_IM0_IRL0 + 32)
+#define INT_NUM_IM2_IRL0 (INT_NUM_IM1_IRL0 + 32)
+#define INT_NUM_IM3_IRL0 (INT_NUM_IM2_IRL0 + 32)
+#define INT_NUM_IM4_IRL0 (INT_NUM_IM3_IRL0 + 32)
+#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
+#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
+
+#define MIPS_CPU_TIMER_IRQ 7
+
+/* HOST IF Event Interrupt */
+#define FALCON_IRQ_HOST (INT_NUM_IM0_IRL0 + 0)
+/* HOST IF Mailbox0 Receive Interrupt */
+#define FALCON_IRQ_HOST_MB0_RX (INT_NUM_IM0_IRL0 + 1)
+/* HOST IF Mailbox0 Transmit Interrupt */
+#define FALCON_IRQ_HOST_MB0_TX (INT_NUM_IM0_IRL0 + 2)
+/* HOST IF Mailbox1 Receive Interrupt */
+#define FALCON_IRQ_HOST_MB1_RX (INT_NUM_IM0_IRL0 + 3)
+/* HOST IF Mailbox1 Transmit Interrupt */
+#define FALCON_IRQ_HOST_MB1_TX (INT_NUM_IM0_IRL0 + 4)
+/* I2C Last Single Data Transfer Request */
+#define FALCON_IRQ_I2C_LSREQ (INT_NUM_IM0_IRL0 + 8)
+/* I2C Single Data Transfer Request */
+#define FALCON_IRQ_I2C_SREQ (INT_NUM_IM0_IRL0 + 9)
+/* I2C Last Burst Data Transfer Request */
+#define FALCON_IRQ_I2C_LBREQ (INT_NUM_IM0_IRL0 + 10)
+/* I2C Burst Data Transfer Request */
+#define FALCON_IRQ_I2C_BREQ (INT_NUM_IM0_IRL0 + 11)
+/* I2C Error Interrupt */
+#define FALCON_IRQ_I2C_I2C_ERR (INT_NUM_IM0_IRL0 + 12)
+/* I2C Protocol Interrupt */
+#define FALCON_IRQ_I2C_I2C_P (INT_NUM_IM0_IRL0 + 13)
+/* SSC Transmit Interrupt */
+#define FALCON_IRQ_SSC_T (INT_NUM_IM0_IRL0 + 14)
+/* SSC Receive Interrupt */
+#define FALCON_IRQ_SSC_R (INT_NUM_IM0_IRL0 + 15)
+/* SSC Error Interrupt */
+#define FALCON_IRQ_SSC_E (INT_NUM_IM0_IRL0 + 16)
+/* SSC Frame Interrupt */
+#define FALCON_IRQ_SSC_F (INT_NUM_IM0_IRL0 + 17)
+/* Advanced Encryption Standard Interrupt */
+#define FALCON_IRQ_AES_AES (INT_NUM_IM0_IRL0 + 27)
+/* Secure Hash Algorithm Interrupt */
+#define FALCON_IRQ_SHA_HASH (INT_NUM_IM0_IRL0 + 28)
+/* PCM Receive Interrupt */
+#define FALCON_IRQ_PCM_RX (INT_NUM_IM0_IRL0 + 29)
+/* PCM Transmit Interrupt */
+#define FALCON_IRQ_PCM_TX (INT_NUM_IM0_IRL0 + 30)
+/* PCM Transmit Crash Interrupt */
+#define FALCON_IRQ_PCM_HW2_CRASH (INT_NUM_IM0_IRL0 + 31)
+
+/* EBU Serial Flash Command Error */
+#define FALCON_IRQ_EBU_SF_CMDERR (INT_NUM_IM1_IRL0 + 0)
+/* EBU Serial Flash Command Overwrite Error */
+#define FALCON_IRQ_EBU_SF_COVERR (INT_NUM_IM1_IRL0 + 1)
+/* EBU Serial Flash Busy */
+#define FALCON_IRQ_EBU_SF_BUSY (INT_NUM_IM1_IRL0 + 2)
+/* External Interrupt from GPIO P0 */
+#define FALCON_IRQ_GPIO_P0 (INT_NUM_IM1_IRL0 + 4)
+/* External Interrupt from GPIO P1 */
+#define FALCON_IRQ_GPIO_P1 (INT_NUM_IM1_IRL0 + 5)
+/* External Interrupt from GPIO P2 */
+#define FALCON_IRQ_GPIO_P2 (INT_NUM_IM1_IRL0 + 6)
+/* External Interrupt from GPIO P3 */
+#define FALCON_IRQ_GPIO_P3 (INT_NUM_IM1_IRL0 + 7)
+/* External Interrupt from GPIO P4 */
+#define FALCON_IRQ_GPIO_P4 (INT_NUM_IM1_IRL0 + 8)
+/* 8kHz backup interrupt derived from core-PLL */
+#define FALCON_IRQ_FSC_BKP (INT_NUM_IM1_IRL0 + 10)
+/* FSC Timer Interrupt 0 */
+#define FALCON_IRQ_FSCT_CMP0 (INT_NUM_IM1_IRL0 + 11)
+/* FSC Timer Interrupt 1 */
+#define FALCON_IRQ_FSCT_CMP1 (INT_NUM_IM1_IRL0 + 12)
+/* 8kHz root interrupt derived from GPON interface */
+#define FALCON_IRQ_FSC_ROOT (INT_NUM_IM1_IRL0 + 13)
+/* Time of Day */
+#define FALCON_IRQ_TOD (INT_NUM_IM1_IRL0 + 14)
+/* PMA Interrupt from IntNode of the 200MHz Domain */
+#define FALCON_IRQ_PMA_200M (INT_NUM_IM1_IRL0 + 15)
+/* PMA Interrupt from IntNode of the TX Clk Domain */
+#define FALCON_IRQ_PMA_TX (INT_NUM_IM1_IRL0 + 16)
+/* PMA Interrupt from IntNode of the RX Clk Domain */
+#define FALCON_IRQ_PMA_RX (INT_NUM_IM1_IRL0 + 17)
+/* SYS1 Interrupt */
+#define FALCON_IRQ_SYS1 (INT_NUM_IM1_IRL0 + 20)
+/* SYS GPE Interrupt */
+#define FALCON_IRQ_SYS_GPE (INT_NUM_IM1_IRL0 + 21)
+/* Watchdog Access Error Interrupt */
+#define FALCON_IRQ_WDT_AEIR (INT_NUM_IM1_IRL0 + 24)
+/* Watchdog Prewarning Interrupt */
+#define FALCON_IRQ_WDT_PIR (INT_NUM_IM1_IRL0 + 25)
+/* SBIU interrupt */
+#define FALCON_IRQ_SBIU0 (INT_NUM_IM1_IRL0 + 27)
+/* FPI Bus Control Unit Interrupt */
+#define FALCON_IRQ_BCU0 (INT_NUM_IM1_IRL0 + 29)
+/* DDR Controller Interrupt */
+#define FALCON_IRQ_DDR (INT_NUM_IM1_IRL0 + 30)
+/* Crossbar Error Interrupt */
+#define FALCON_IRQ_XBAR_ERROR (INT_NUM_IM1_IRL0 + 31)
+
+/* ICTRLL 0 Interrupt */
+#define FALCON_IRQ_ICTRLL0 (INT_NUM_IM2_IRL0 + 0)
+/* ICTRLL 1 Interrupt */
+#define FALCON_IRQ_ICTRLL1 (INT_NUM_IM2_IRL0 + 1)
+/* ICTRLL 2 Interrupt */
+#define FALCON_IRQ_ICTRLL2 (INT_NUM_IM2_IRL0 + 2)
+/* ICTRLL 3 Interrupt */
+#define FALCON_IRQ_ICTRLL3 (INT_NUM_IM2_IRL0 + 3)
+/* OCTRLL 0 Interrupt */
+#define FALCON_IRQ_OCTRLL0 (INT_NUM_IM2_IRL0 + 4)
+/* OCTRLL 1 Interrupt */
+#define FALCON_IRQ_OCTRLL1 (INT_NUM_IM2_IRL0 + 5)
+/* OCTRLL 2 Interrupt */
+#define FALCON_IRQ_OCTRLL2 (INT_NUM_IM2_IRL0 + 6)
+/* OCTRLL 3 Interrupt */
+#define FALCON_IRQ_OCTRLL3 (INT_NUM_IM2_IRL0 + 7)
+/* OCTRLG Interrupt */
+#define FALCON_IRQ_OCTRLG (INT_NUM_IM2_IRL0 + 9)
+/* IQM Interrupt */
+#define FALCON_IRQ_IQM (INT_NUM_IM2_IRL0 + 10)
+/* FSQM Interrupt */
+#define FALCON_IRQ_FSQM (INT_NUM_IM2_IRL0 + 11)
+/* TMU Interrupt */
+#define FALCON_IRQ_TMU (INT_NUM_IM2_IRL0 + 12)
+/* LINK1 Interrupt */
+#define FALCON_IRQ_LINK1 (INT_NUM_IM2_IRL0 + 14)
+/* ICTRLC 0 Interrupt */
+#define FALCON_IRQ_ICTRLC0 (INT_NUM_IM2_IRL0 + 16)
+/* ICTRLC 1 Interrupt */
+#define FALCON_IRQ_ICTRLC1 (INT_NUM_IM2_IRL0 + 17)
+/* OCTRLC Interrupt */
+#define FALCON_IRQ_OCTRLC (INT_NUM_IM2_IRL0 + 18)
+/* CONFIG Break Interrupt */
+#define FALCON_IRQ_CONFIG_BREAK (INT_NUM_IM2_IRL0 + 19)
+/* CONFIG Interrupt */
+#define FALCON_IRQ_CONFIG (INT_NUM_IM2_IRL0 + 20)
+/* Dispatcher Interrupt */
+#define FALCON_IRQ_DISP (INT_NUM_IM2_IRL0 + 21)
+/* TBM Interrupt */
+#define FALCON_IRQ_TBM (INT_NUM_IM2_IRL0 + 22)
+/* GTC Downstream Interrupt */
+#define FALCON_IRQ_GTC_DS (INT_NUM_IM2_IRL0 + 29)
+/* GTC Upstream Interrupt */
+#define FALCON_IRQ_GTC_US (INT_NUM_IM2_IRL0 + 30)
+/* EIM Interrupt */
+#define FALCON_IRQ_EIM (INT_NUM_IM2_IRL0 + 31)
+
+/* ASC0 Transmit Interrupt */
+#define FALCON_IRQ_ASC0_T (INT_NUM_IM3_IRL0 + 0)
+/* ASC0 Receive Interrupt */
+#define FALCON_IRQ_ASC0_R (INT_NUM_IM3_IRL0 + 1)
+/* ASC0 Error Interrupt */
+#define FALCON_IRQ_ASC0_E (INT_NUM_IM3_IRL0 + 2)
+/* ASC0 Transmit Buffer Interrupt */
+#define FALCON_IRQ_ASC0_TB (INT_NUM_IM3_IRL0 + 3)
+/* ASC0 Autobaud Start Interrupt */
+#define FALCON_IRQ_ASC0_ABST (INT_NUM_IM3_IRL0 + 4)
+/* ASC0 Autobaud Detection Interrupt */
+#define FALCON_IRQ_ASC0_ABDET (INT_NUM_IM3_IRL0 + 5)
+/* ASC1 Modem Status Interrupt */
+#define FALCON_IRQ_ASC0_MS (INT_NUM_IM3_IRL0 + 6)
+/* ASC0 Soft Flow Control Interrupt */
+#define FALCON_IRQ_ASC0_SFC (INT_NUM_IM3_IRL0 + 7)
+/* ASC1 Transmit Interrupt */
+#define FALCON_IRQ_ASC1_T (INT_NUM_IM3_IRL0 + 8)
+/* ASC1 Receive Interrupt */
+#define FALCON_IRQ_ASC1_R (INT_NUM_IM3_IRL0 + 9)
+/* ASC1 Error Interrupt */
+#define FALCON_IRQ_ASC1_E (INT_NUM_IM3_IRL0 + 10)
+/* ASC1 Transmit Buffer Interrupt */
+#define FALCON_IRQ_ASC1_TB (INT_NUM_IM3_IRL0 + 11)
+/* ASC1 Autobaud Start Interrupt */
+#define FALCON_IRQ_ASC1_ABST (INT_NUM_IM3_IRL0 + 12)
+/* ASC1 Autobaud Detection Interrupt */
+#define FALCON_IRQ_ASC1_ABDET (INT_NUM_IM3_IRL0 + 13)
+/* ASC1 Modem Status Interrupt */
+#define FALCON_IRQ_ASC1_MS (INT_NUM_IM3_IRL0 + 14)
+/* ASC1 Soft Flow Control Interrupt */
+#define FALCON_IRQ_ASC1_SFC (INT_NUM_IM3_IRL0 + 15)
+/* GPTC Timer/Counter 1A Interrupt */
+#define FALCON_IRQ_GPTC_TC1A (INT_NUM_IM3_IRL0 + 16)
+/* GPTC Timer/Counter 1B Interrupt */
+#define FALCON_IRQ_GPTC_TC1B (INT_NUM_IM3_IRL0 + 17)
+/* GPTC Timer/Counter 2A Interrupt */
+#define FALCON_IRQ_GPTC_TC2A (INT_NUM_IM3_IRL0 + 18)
+/* GPTC Timer/Counter 2B Interrupt */
+#define FALCON_IRQ_GPTC_TC2B (INT_NUM_IM3_IRL0 + 19)
+/* GPTC Timer/Counter 3A Interrupt */
+#define FALCON_IRQ_GPTC_TC3A (INT_NUM_IM3_IRL0 + 20)
+/* GPTC Timer/Counter 3B Interrupt */
+#define FALCON_IRQ_GPTC_TC3B (INT_NUM_IM3_IRL0 + 21)
+/* DFEV0, Channel 1 Transmit Interrupt */
+#define FALCON_IRQ_DFEV0_2TX (INT_NUM_IM3_IRL0 + 26)
+/* DFEV0, Channel 1 Receive Interrupt */
+#define FALCON_IRQ_DFEV0_2RX (INT_NUM_IM3_IRL0 + 27)
+/* DFEV0, Channel 1 General Purpose Interrupt */
+#define FALCON_IRQ_DFEV0_2GP (INT_NUM_IM3_IRL0 + 28)
+/* DFEV0, Channel 0 Transmit Interrupt */
+#define FALCON_IRQ_DFEV0_1TX (INT_NUM_IM3_IRL0 + 29)
+/* DFEV0, Channel 0 Receive Interrupt */
+#define FALCON_IRQ_DFEV0_1RX (INT_NUM_IM3_IRL0 + 30)
+/* DFEV0, Channel 0 General Purpose Interrupt */
+#define FALCON_IRQ_DFEV0_1GP (INT_NUM_IM3_IRL0 + 31)
+
+/* ICTRLL 0 Error */
+#define FALCON_IRQ_ICTRLL0_ERR (INT_NUM_IM4_IRL0 + 0)
+/* ICTRLL 1 Error */
+#define FALCON_IRQ_ICTRLL1_ERR (INT_NUM_IM4_IRL0 + 1)
+/* ICTRLL 2 Error */
+#define FALCON_IRQ_ICTRLL2_ERR (INT_NUM_IM4_IRL0 + 2)
+/* ICTRLL 3 Error */
+#define FALCON_IRQ_ICTRLL3_ERR (INT_NUM_IM4_IRL0 + 3)
+/* OCTRLL 0 Error */
+#define FALCON_IRQ_OCTRLL0_ERR (INT_NUM_IM4_IRL0 + 4)
+/* OCTRLL 1 Error */
+#define FALCON_IRQ_OCTRLL1_ERR (INT_NUM_IM4_IRL0 + 5)
+/* OCTRLL 2 Error */
+#define FALCON_IRQ_OCTRLL2_ERR (INT_NUM_IM4_IRL0 + 6)
+/* OCTRLL 3 Error */
+#define FALCON_IRQ_OCTRLL3_ERR (INT_NUM_IM4_IRL0 + 7)
+/* ICTRLG Error */
+#define FALCON_IRQ_ICTRLG_ERR (INT_NUM_IM4_IRL0 + 8)
+/* OCTRLG Error */
+#define FALCON_IRQ_OCTRLG_ERR (INT_NUM_IM4_IRL0 + 9)
+/* IQM Error */
+#define FALCON_IRQ_IQM_ERR (INT_NUM_IM4_IRL0 + 10)
+/* FSQM Error */
+#define FALCON_IRQ_FSQM_ERR (INT_NUM_IM4_IRL0 + 11)
+/* TMU Error */
+#define FALCON_IRQ_TMU_ERR (INT_NUM_IM4_IRL0 + 12)
+/* MPS Status Interrupt #0 (VPE1 to VPE0) */
+#define FALCON_IRQ_MPS_IR0 (INT_NUM_IM4_IRL0 + 14)
+/* MPS Status Interrupt #1 (VPE1 to VPE0) */
+#define FALCON_IRQ_MPS_IR1 (INT_NUM_IM4_IRL0 + 15)
+/* MPS Status Interrupt #2 (VPE1 to VPE0) */
+#define FALCON_IRQ_MPS_IR2 (INT_NUM_IM4_IRL0 + 16)
+/* MPS Status Interrupt #3 (VPE1 to VPE0) */
+#define FALCON_IRQ_MPS_IR3 (INT_NUM_IM4_IRL0 + 17)
+/* MPS Status Interrupt #4 (VPE1 to VPE0) */
+#define FALCON_IRQ_MPS_IR4 (INT_NUM_IM4_IRL0 + 18)
+/* MPS Status Interrupt #5 (VPE1 to VPE0) */
+#define FALCON_IRQ_MPS_IR5 (INT_NUM_IM4_IRL0 + 19)
+/* MPS Status Interrupt #6 (VPE1 to VPE0) */
+#define FALCON_IRQ_MPS_IR6 (INT_NUM_IM4_IRL0 + 20)
+/* MPS Status Interrupt #7 (VPE1 to VPE0) */
+#define FALCON_IRQ_MPS_IR7 (INT_NUM_IM4_IRL0 + 21)
+/* MPS Status Interrupt #8 (VPE1 to VPE0) */
+#define FALCON_IRQ_MPS_IR8 (INT_NUM_IM4_IRL0 + 22)
+/* VPE0 Exception Level Flag Interrupt */
+#define FALCON_IRQ_VPE0_EXL (INT_NUM_IM4_IRL0 + 29)
+/* VPE0 Error Level Flag Interrupt */
+#define FALCON_IRQ_VPE0_ERL (INT_NUM_IM4_IRL0 + 30)
+/* VPE0 Performance Monitoring Counter Interrupt */
+#define FALCON_IRQ_VPE0_PMCIR (INT_NUM_IM4_IRL0 + 31)
+
+#endif /* _FALCON_IRQ__ */
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/falcon/irq.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/falcon/irq.h
new file mode 100644
index 0000000000..2caccd9f9d
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/falcon/irq.h
@@ -0,0 +1,18 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#ifndef __FALCON_IRQ_H
+#define __FALCON_IRQ_H
+
+#include <falcon_irq.h>
+
+#define NR_IRQS 328
+
+#include_next <irq.h>
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
new file mode 100644
index 0000000000..fff5ecdbea
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
@@ -0,0 +1,152 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _LTQ_FALCON_H__
+#define _LTQ_FALCON_H__
+
+#ifdef CONFIG_SOC_FALCON
+
+#include <lantiq.h>
+
+/* Chip IDs */
+#define SOC_ID_FALCON 0x01B8
+
+/* SoC Types */
+#define SOC_TYPE_FALCON 0x01
+
+/* ASC0/1 - serial port */
+#define LTQ_ASC0_BASE_ADDR 0x1E100C00
+#define LTQ_ASC1_BASE_ADDR 0x1E100B00
+#define LTQ_ASC_SIZE 0x100
+
+#define LTQ_ASC_TIR(x) (INT_NUM_IM3_IRL0 + (x * 8))
+#define LTQ_ASC_RIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 1)
+#define LTQ_ASC_EIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 2)
+
+/*
+ * during early_printk no ioremap possible at this early stage
+ * lets use KSEG1 instead
+ */
+#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC0_BASE_ADDR)
+
+/* ICU - interrupt control unit */
+#define LTQ_ICU_BASE_ADDR 0x1F880200
+#define LTQ_ICU_SIZE 0x100
+
+/* WDT */
+#define LTQ_WDT_BASE_ADDR 0x1F8803F0
+#define LTQ_WDT_SIZE 0x10
+
+#define LTQ_RST_CAUSE_WDTRST 0x0002
+
+/* EBU - external bus unit */
+#define LTQ_EBU_BASE_ADDR 0x18000000
+#define LTQ_EBU_SIZE 0x0100
+
+#define LTQ_EBU_MODCON 0x000C
+
+/* GPIO */
+#define LTQ_GPIO0_BASE_ADDR 0x1D810000
+#define LTQ_GPIO0_SIZE 0x0080
+#define LTQ_GPIO1_BASE_ADDR 0x1E800100
+#define LTQ_GPIO1_SIZE 0x0080
+#define LTQ_GPIO2_BASE_ADDR 0x1D810100
+#define LTQ_GPIO2_SIZE 0x0080
+#define LTQ_GPIO3_BASE_ADDR 0x1E800200
+#define LTQ_GPIO3_SIZE 0x0080
+#define LTQ_GPIO4_BASE_ADDR 0x1E800300
+#define LTQ_GPIO4_SIZE 0x0080
+#define LTQ_PADCTRL0_BASE_ADDR 0x1DB01000
+#define LTQ_PADCTRL0_SIZE 0x0100
+#define LTQ_PADCTRL1_BASE_ADDR 0x1E800400
+#define LTQ_PADCTRL1_SIZE 0x0100
+#define LTQ_PADCTRL2_BASE_ADDR 0x1DB02000
+#define LTQ_PADCTRL2_SIZE 0x0100
+#define LTQ_PADCTRL3_BASE_ADDR 0x1E800500
+#define LTQ_PADCTRL3_SIZE 0x0100
+#define LTQ_PADCTRL4_BASE_ADDR 0x1E800600
+#define LTQ_PADCTRL4_SIZE 0x0100
+
+/* I2C */
+#define GPON_I2C_BASE 0x1E200000
+#define GPON_I2C_SIZE 0x00010000
+
+/* CHIP ID */
+#define LTQ_STATUS_BASE_ADDR 0x1E802000
+
+#define LTQ_FALCON_CHIPID ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x0c))
+#define LTQ_FALCON_CHIPTYPE ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x38))
+#define LTQ_FALCON_CHIPCONF ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x40))
+
+/* SYSCTL - start/stop/restart/configure/... different parts of the Soc */
+#define LTQ_SYS1_BASE_ADDR 0x1EF00000
+#define LTQ_SYS1_SIZE 0x0100
+#define LTQ_STATUS_BASE_ADDR 0x1E802000
+#define LTQ_STATUS_SIZE 0x0080
+#define LTQ_SYS_ETH_BASE_ADDR 0x1DB00000
+#define LTQ_SYS_ETH_SIZE 0x0100
+#define LTQ_SYS_GPE_BASE_ADDR 0x1D700000
+#define LTQ_SYS_GPE_SIZE 0x0100
+
+#define SYSCTL_SYS1 0
+#define SYSCTL_SYSETH 1
+#define SYSCTL_SYSGPE 2
+
+/* Activation Status Register */
+#define ACTS_ASC1_ACT 0x00000800
+#define ACTS_I2C_ACT 0x00004000
+#define ACTS_P0 0x00010000
+#define ACTS_P1 0x00010000
+#define ACTS_P2 0x00020000
+#define ACTS_P3 0x00020000
+#define ACTS_P4 0x00040000
+#define ACTS_PADCTRL0 0x00100000
+#define ACTS_PADCTRL1 0x00100000
+#define ACTS_PADCTRL2 0x00200000
+#define ACTS_PADCTRL3 0x00200000
+#define ACTS_PADCTRL4 0x00400000
+#define ACTS_I2C_ACT 0x00004000
+
+/* global register ranges */
+extern __iomem void *ltq_ebu_membase;
+extern __iomem void *ltq_sys1_membase;
+#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y))
+#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x))
+#define ltq_ebu_w32_mask(clear, set, reg) \
+ ltq_ebu_w32((ltq_ebu_r32(reg) & ~(clear)) | (set), reg)
+
+#define ltq_sys1_w32(x, y) ltq_w32((x), ltq_sys1_membase + (y))
+#define ltq_sys1_r32(x) ltq_r32(ltq_sys1_membase + (x))
+#define ltq_sys1_w32_mask(clear, set, reg) \
+ ltq_sys1_w32((ltq_sys1_r32(reg) & ~(clear)) | (set), reg)
+
+/* gpio wrapper to help configure the pin muxing */
+extern int ltq_gpio_mux_set(unsigned int pin, unsigned int mux);
+
+/* to keep the irq code generic we need to define these to 0 as falcon
+ has no EIU/EBU */
+#define LTQ_EIU_BASE_ADDR 0
+#define LTQ_EBU_PCC_ISTAT 0
+
+static inline int ltq_is_ar9(void)
+{
+ return 0;
+}
+
+static inline int ltq_is_vr9(void)
+{
+ return 0;
+}
+
+static inline int ltq_is_falcon(void)
+{
+ return 1;
+}
+
+#endif /* CONFIG_SOC_FALCON */
+#endif /* _LTQ_XWAY_H__ */
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/lantiq_timer.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/lantiq_timer.h
new file mode 100644
index 0000000000..ef564abc3a
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/lantiq_timer.h
@@ -0,0 +1,155 @@
+#ifndef __DANUBE_GPTU_DEV_H__2005_07_26__10_19__
+#define __DANUBE_GPTU_DEV_H__2005_07_26__10_19__
+
+
+/******************************************************************************
+ Copyright (c) 2002, Infineon Technologies. All rights reserved.
+
+ No Warranty
+ Because the program is licensed free of charge, there is no warranty for
+ the program, to the extent permitted by applicable law. Except when
+ otherwise stated in writing the copyright holders and/or other parties
+ provide the program "as is" without warranty of any kind, either
+ expressed or implied, including, but not limited to, the implied
+ warranties of merchantability and fitness for a particular purpose. The
+ entire risk as to the quality and performance of the program is with
+ you. should the program prove defective, you assume the cost of all
+ necessary servicing, repair or correction.
+
+ In no event unless required by applicable law or agreed to in writing
+ will any copyright holder, or any other party who may modify and/or
+ redistribute the program as permitted above, be liable to you for
+ damages, including any general, special, incidental or consequential
+ damages arising out of the use or inability to use the program
+ (including but not limited to loss of data or data being rendered
+ inaccurate or losses sustained by you or third parties or a failure of
+ the program to operate with any other programs), even if such holder or
+ other party has been advised of the possibility of such damages.
+******************************************************************************/
+
+
+/*
+ * ####################################
+ * Definition
+ * ####################################
+ */
+
+/*
+ * Available Timer/Counter Index
+ */
+#define TIMER(n, X) (n * 2 + (X ? 1 : 0))
+#define TIMER_ANY 0x00
+#define TIMER1A TIMER(1, 0)
+#define TIMER1B TIMER(1, 1)
+#define TIMER2A TIMER(2, 0)
+#define TIMER2B TIMER(2, 1)
+#define TIMER3A TIMER(3, 0)
+#define TIMER3B TIMER(3, 1)
+
+/*
+ * Flag of Timer/Counter
+ * These flags specify the way in which timer is configured.
+ */
+/* Bit size of timer/counter. */
+#define TIMER_FLAG_16BIT 0x0000
+#define TIMER_FLAG_32BIT 0x0001
+/* Switch between timer and counter. */
+#define TIMER_FLAG_TIMER 0x0000
+#define TIMER_FLAG_COUNTER 0x0002
+/* Stop or continue when overflowing/underflowing. */
+#define TIMER_FLAG_ONCE 0x0000
+#define TIMER_FLAG_CYCLIC 0x0004
+/* Count up or counter down. */
+#define TIMER_FLAG_UP 0x0000
+#define TIMER_FLAG_DOWN 0x0008
+/* Count on specific level or edge. */
+#define TIMER_FLAG_HIGH_LEVEL_SENSITIVE 0x0000
+#define TIMER_FLAG_LOW_LEVEL_SENSITIVE 0x0040
+#define TIMER_FLAG_RISE_EDGE 0x0010
+#define TIMER_FLAG_FALL_EDGE 0x0020
+#define TIMER_FLAG_ANY_EDGE 0x0030
+/* Signal is syncronous to module clock or not. */
+#define TIMER_FLAG_UNSYNC 0x0000
+#define TIMER_FLAG_SYNC 0x0080
+/* Different interrupt handle type. */
+#define TIMER_FLAG_NO_HANDLE 0x0000
+#if defined(__KERNEL__)
+ #define TIMER_FLAG_CALLBACK_IN_IRQ 0x0100
+#endif // defined(__KERNEL__)
+#define TIMER_FLAG_SIGNAL 0x0300
+/* Internal clock source or external clock source */
+#define TIMER_FLAG_INT_SRC 0x0000
+#define TIMER_FLAG_EXT_SRC 0x1000
+
+
+/*
+ * ioctl Command
+ */
+#define GPTU_REQUEST_TIMER 0x01 /* General method to setup timer/counter. */
+#define GPTU_FREE_TIMER 0x02 /* Free timer/counter. */
+#define GPTU_START_TIMER 0x03 /* Start or resume timer/counter. */
+#define GPTU_STOP_TIMER 0x04 /* Suspend timer/counter. */
+#define GPTU_GET_COUNT_VALUE 0x05 /* Get current count value. */
+#define GPTU_CALCULATE_DIVIDER 0x06 /* Calculate timer divider from given freq.*/
+#define GPTU_SET_TIMER 0x07 /* Simplified method to setup timer. */
+#define GPTU_SET_COUNTER 0x08 /* Simplified method to setup counter. */
+
+/*
+ * Data Type Used to Call ioctl
+ */
+struct gptu_ioctl_param {
+ unsigned int timer; /* In command GPTU_REQUEST_TIMER, GPTU_SET_TIMER, and *
+ * GPTU_SET_COUNTER, this field is ID of expected *
+ * timer/counter. If it's zero, a timer/counter would *
+ * be dynamically allocated and ID would be stored in *
+ * this field. *
+ * In command GPTU_GET_COUNT_VALUE, this field is *
+ * ignored. *
+ * In other command, this field is ID of timer/counter *
+ * allocated. */
+ unsigned int flag; /* In command GPTU_REQUEST_TIMER, GPTU_SET_TIMER, and *
+ * GPTU_SET_COUNTER, this field contains flags to *
+ * specify how to configure timer/counter. *
+ * In command GPTU_START_TIMER, zero indicate start *
+ * and non-zero indicate resume timer/counter. *
+ * In other command, this field is ignored. */
+ unsigned long value; /* In command GPTU_REQUEST_TIMER, this field contains *
+ * init/reload value. *
+ * In command GPTU_SET_TIMER, this field contains *
+ * frequency (0.001Hz) of timer. *
+ * In command GPTU_GET_COUNT_VALUE, current count *
+ * value would be stored in this field. *
+ * In command GPTU_CALCULATE_DIVIDER, this field *
+ * contains frequency wanted, and after calculation, *
+ * divider would be stored in this field to overwrite *
+ * the frequency. *
+ * In other command, this field is ignored. */
+ int pid; /* In command GPTU_REQUEST_TIMER and GPTU_SET_TIMER, *
+ * if signal is required, this field contains process *
+ * ID to which signal would be sent. *
+ * In other command, this field is ignored. */
+ int sig; /* In command GPTU_REQUEST_TIMER and GPTU_SET_TIMER, *
+ * if signal is required, this field contains signal *
+ * number which would be sent. *
+ * In other command, this field is ignored. */
+};
+
+/*
+ * ####################################
+ * Data Type
+ * ####################################
+ */
+typedef void (*timer_callback)(unsigned long arg);
+
+extern int lq_request_timer(unsigned int, unsigned int, unsigned long, unsigned long, unsigned long);
+extern int lq_free_timer(unsigned int);
+extern int lq_start_timer(unsigned int, int);
+extern int lq_stop_timer(unsigned int);
+extern int lq_reset_counter_flags(u32 timer, u32 flags);
+extern int lq_get_count_value(unsigned int, unsigned long *);
+extern u32 lq_cal_divider(unsigned long);
+extern int lq_set_timer(unsigned int, unsigned int, int, int, unsigned int, unsigned long, unsigned long);
+extern int lq_set_counter(unsigned int timer, unsigned int flag,
+ u32 reload, unsigned long arg1, unsigned long arg2);
+
+#endif /* __DANUBE_GPTU_DEV_H__2005_07_26__10_19__ */
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/base_reg.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/base_reg.h
new file mode 100644
index 0000000000..8149f1204e
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/base_reg.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ St. Martin Strasse 53; 81669 Munich, Germany
+
+ Any use of this Software is subject to the conclusion of a respective
+ License Agreement. Without such a License Agreement no rights to the
+ Software are granted.
+
+ ******************************************************************************/
+
+#ifndef __BASE_REG_H
+#define __BASE_REG_H
+
+#ifndef KSEG1
+#define KSEG1 0xA0000000
+#endif
+
+#define LTQ_EBU_SEG1_BASE (KSEG1 + 0x10000000)
+#define LTQ_EBU_SEG2_BASE (KSEG1 + 0x11000000)
+#define LTQ_EBU_SEG3_BASE (KSEG1 + 0x12000000)
+#define LTQ_EBU_SEG4_BASE (KSEG1 + 0x13000000)
+
+#define LTQ_ASC0_BASE (KSEG1 + 0x14100100)
+#define LTQ_ASC1_BASE (KSEG1 + 0x14100200)
+
+#define LTQ_SSC0_BASE (0x14100300)
+#define LTQ_SSC1_BASE (0x14100400)
+
+#define LTQ_PORT_P0_BASE (KSEG1 + 0x14100600)
+#define LTQ_PORT_P1_BASE (KSEG1 + 0x14108100)
+#define LTQ_PORT_P2_BASE (KSEG1 + 0x14100800)
+#define LTQ_PORT_P3_BASE (KSEG1 + 0x14100900)
+#define LTQ_PORT_P4_BASE (KSEG1 + 0x1E000400)
+
+#define LTQ_EBU_BASE (KSEG1 + 0x14102000)
+#define LTQ_DMA_BASE (KSEG1 + 0x14104000)
+
+#define LTQ_ICU0_IM3_IM2_BASE (KSEG1 + 0x1E016000)
+#define LTQ_ICU0_IM5_IM4_IM1_IM0_BASE (KSEG1 + 0x14106000)
+
+#define LTQ_ES_BASE (KSEG1 + 0x18000000)
+
+#define LTQ_SYS0_BASE (KSEG1 + 0x1C000000)
+#define LTQ_SYS1_BASE (KSEG1 + 0x1C000800)
+#define LTQ_SYS2_BASE (KSEG1 + 0x1E400000)
+
+#define LTQ_L2_SPRAM_BASE (KSEG1 + 0x1F1E8000)
+
+#define LTQ_SWINT_BASE (KSEG1 + 0x1E000100)
+#define LTQ_MBS_BASE (KSEG1 + 0x1E000200)
+
+#define LTQ_STATUS_BASE (KSEG1 + 0x1E000500)
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/boot_reg.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/boot_reg.h
new file mode 100644
index 0000000000..9c33516e8a
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/boot_reg.h
@@ -0,0 +1,37 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ St. Martin Strasse 53; 81669 Munich, Germany
+
+ Any use of this Software is subject to the conclusion of a respective
+ License Agreement. Without such a License Agreement no rights to the
+ Software are granted.
+
+******************************************************************************/
+
+#ifndef __BOOT_REG_H
+#define __BOOT_REG_H
+
+#define LTQ_BOOT_CPU_OFFSET 0x20
+
+#define LTQ_BOOT_RVEC(cpu) (volatile u32*)(LTQ_L2_SPRAM_BASE + \
+ (cpu * LTQ_BOOT_CPU_OFFSET) + 0x00)
+#define LTQ_BOOT_NVEC(cpu) (volatile u32*)(LTQ_L2_SPRAM_BASE + \
+ (cpu * LTQ_BOOT_CPU_OFFSET) + 0x04)
+#define LTQ_BOOT_EVEC(cpu) (volatile u32*)(LTQ_L2_SPRAM_BASE + \
+ (cpu * LTQ_BOOT_CPU_OFFSET) + 0x08)
+#define LTQ_BOOT_CP0_STATUS(cpu) (volatile u32*)(LTQ_L2_SPRAM_BASE + \
+ (cpu * LTQ_BOOT_CPU_OFFSET) + 0x0C)
+#define LTQ_BOOT_CP0_EPC(cpu) (volatile u32*)(LTQ_L2_SPRAM_BASE + \
+ (cpu * LTQ_BOOT_CPU_OFFSET) + 0x10)
+#define LTQ_BOOT_CP0_EEPC(cpu) (volatile u32*)(LTQ_L2_SPRAM_BASE + \
+ (cpu * LTQ_BOOT_CPU_OFFSET) + 0x14)
+#define LTQ_BOOT_SIZE(cpu) (volatile u32*)(LTQ_L2_SPRAM_BASE + \
+ (cpu * LTQ_BOOT_CPU_OFFSET) + 0x18) /* only for CP1 */
+#define LTQ_BOOT_RCU_SR(cpu) (volatile u32*)(LTQ_L2_SPRAM_BASE + \
+ (cpu * LTQ_BOOT_CPU_OFFSET) + 0x18) /* only for CP0 */
+#define LTQ_BOOT_CFG_STAT(cpu) (volatile u32*)(LTQ_L2_SPRAM_BASE + \
+ (cpu * LTQ_BOOT_CPU_OFFSET) + 0x1C)
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/dma_reg.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/dma_reg.h
new file mode 100644
index 0000000000..9fde2f6dd3
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/dma_reg.h
@@ -0,0 +1,308 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ St. Martin Strasse 53; 81669 Munich, Germany
+
+ Any use of this Software is subject to the conclusion of a respective
+ License Agreement. Without such a License Agreement no rights to the
+ Software are granted.
+
+ ******************************************************************************/
+
+#ifndef __DMA_REG_H
+#define __DMA_REG_H
+
+#define dma_r32(reg) ltq_r32(&dma->reg)
+#define dma_w32(val, reg) ltq_w32(val, &dma->reg)
+#define dma_w32_mask(clear, set, reg) ltq_w32_mask(clear, set, &dma->reg)
+
+/** DMA register structure */
+struct svip_reg_dma {
+ volatile unsigned long clc; /* 0x00 */
+ volatile unsigned long reserved0; /* 0x04 */
+ volatile unsigned long id; /* 0x08 */
+ volatile unsigned long reserved1; /* 0x0c */
+ volatile unsigned long ctrl; /* 0x10 */
+ volatile unsigned long cpoll; /* 0x14 */
+ volatile unsigned long cs; /* 0x18 */
+ volatile unsigned long cctrl; /* 0x1C */
+ volatile unsigned long cdba; /* 0x20 */
+ volatile unsigned long cdlen; /* 0x24 */
+ volatile unsigned long cis; /* 0x28 */
+ volatile unsigned long cie; /* 0x2C */
+ volatile unsigned long cgbl; /* 0x30 */
+ volatile unsigned long reserved2[3]; /* 0x34 */
+ volatile unsigned long ps; /* 0x40 */
+ volatile unsigned long pctrl; /* 0x44 */
+ volatile unsigned long reserved3[43]; /* 0x48 */
+ volatile unsigned long irnen; /* 0xF4 */
+ volatile unsigned long irncr; /* 0xF8 */
+ volatile unsigned long irnicr; /* 0xFC */
+};
+
+/*******************************************************************************
+ * CLC Register
+ ******************************************************************************/
+
+/* Fast Shut-Off Enable Bit (5) */
+#define DMA_CLC_FSOE (0x1 << 5)
+#define DMA_CLC_FSOE_VAL(val) (((val) & 0x1) << 5)
+#define DMA_CLC_FSOE_GET(val) ((((val) & DMA_CLC_FSOE) >> 5) & 0x1)
+#define DMA_CLC_FSOE_SET(reg,val) (reg) = ((reg & ~DMA_CLC_FSOE) | (((val) & 0x1) << 5))
+/* Suspend Bit Write Enable for OCDS (4) */
+#define DMA_CLC_SBWE (0x1 << 4)
+#define DMA_CLC_SBWE_VAL(val) (((val) & 0x1) << 4)
+#define DMA_CLC_SBWE_SET(reg,val) (reg) = (((reg & ~DMA_CLC_SBWE) | (val) & 1) << 4)
+/* External Request Disable (3) */
+#define DMA_CLC_EDIS (0x1 << 3)
+#define DMA_CLC_EDIS_VAL(val) (((val) & 0x1) << 3)
+#define DMA_CLC_EDIS_GET(val) ((((val) & DMA_CLC_EDIS) >> 3) & 0x1)
+#define DMA_CLC_EDIS_SET(reg,val) (reg) = ((reg & ~DMA_CLC_EDIS) | (((val) & 0x1) << 3))
+/* Suspend Enable Bit for OCDS (2) */
+#define DMA_CLC_SPEN (0x1 << 2)
+#define DMA_CLC_SPEN_VAL(val) (((val) & 0x1) << 2)
+#define DMA_CLC_SPEN_GET(val) ((((val) & DMA_CLC_SPEN) >> 2) & 0x1)
+#define DMA_CLC_SPEN_SET(reg,val) (reg) = ((reg & ~DMA_CLC_SPEN) | (((val) & 0x1) << 2))
+/* Disable Status Bit (1) */
+#define DMA_CLC_DISS (0x1 << 1)
+#define DMA_CLC_DISS_GET(val) ((((val) & DMA_CLC_DISS) >> 1) & 0x1)
+/* Disable Request Bit (0) */
+#define DMA_CLC_DISR (0x1)
+#define DMA_CLC_DISR_VAL(val) (((val) & 0x1) << 0)
+#define DMA_CLC_DISR_GET(val) ((((val) & DMA_CLC_DISR) >> 0) & 0x1)
+#define DMA_CLC_DISR_SET(reg,val) (reg) = ((reg & ~DMA_CLC_DISR) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * ID Register
+ ******************************************************************************/
+
+/* Number of Channels (25:20) */
+#define DMA_ID_CHNR (0x3f << 20)
+#define DMA_ID_CHNR_GET(val) ((((val) & DMA_ID_CHNR) >> 20) & 0x3f)
+/* Number of Ports (19:16) */
+#define DMA_ID_PRTNR (0xf << 16)
+#define DMA_ID_PRTNR_GET(val) ((((val) & DMA_ID_PRTNR) >> 16) & 0xf)
+/* Module ID (15:8) */
+#define DMA_ID_ID (0xff << 8)
+#define DMA_ID_ID_GET(val) ((((val) & DMA_ID_ID) >> 8) & 0xff)
+/* Revision (4:0) */
+#define DMA_ID_REV (0x1f)
+#define DMA_ID_REV_GET(val) ((((val) & DMA_ID_REV) >> 0) & 0x1f)
+
+/*******************************************************************************
+ * Control Register
+ ******************************************************************************/
+
+/* Global Software Reset (0) */
+#define DMA_CTRL_RST (0x1)
+#define DMA_CTRL_RST_GET(val) ((((val) & DMA_CTRL_RST) >> 0) & 0x1)
+
+/*******************************************************************************
+ * Channel Polling Register
+ ******************************************************************************/
+
+/* Enable (31) */
+#define DMA_CPOLL_EN (0x1 << 31)
+#define DMA_CPOLL_EN_VAL(val) (((val) & 0x1) << 31)
+#define DMA_CPOLL_EN_GET(val) ((((val) & DMA_CPOLL_EN) >> 31) & 0x1)
+#define DMA_CPOLL_EN_SET(reg,val) (reg) = ((reg & ~DMA_CPOLL_EN) | (((val) & 0x1) << 31))
+/* Counter (15:4) */
+#define DMA_CPOLL_CNT (0xfff << 4)
+#define DMA_CPOLL_CNT_VAL(val) (((val) & 0xfff) << 4)
+#define DMA_CPOLL_CNT_GET(val) ((((val) & DMA_CPOLL_CNT) >> 4) & 0xfff)
+#define DMA_CPOLL_CNT_SET(reg,val) (reg) = ((reg & ~DMA_CPOLL_CNT) | (((val) & 0xfff) << 4))
+
+/*******************************************************************************
+ * Global Buffer Length Register
+ ******************************************************************************/
+
+/* Global Buffer Length (15:0) */
+#define DMA_CGBL_GBL (0xffff)
+#define DMA_CGBL_GBL_VAL(val) (((val) & 0xffff) << 0)
+#define DMA_CGBL_GBL_GET(val) ((((val) & DMA_CGBL_GBL) >> 0) & 0xffff)
+#define DMA_CGBL_GBL_SET(reg,val) (reg) = ((reg & ~DMA_CGBL_GBL) | (((val) & 0xffff) << 0))
+
+/*******************************************************************************
+ * Channel Select Register
+ ******************************************************************************/
+
+/* Channel Selection (4:0) */
+#define DMA_CS_CS (0x1f)
+#define DMA_CS_CS_VAL(val) (((val) & 0x1f) << 0)
+#define DMA_CS_CS_GET(val) ((((val) & DMA_CS_CS) >> 0) & 0x1f)
+#define DMA_CS_CS_SET(reg,val) (reg) = ((reg & ~DMA_CS_CS) | (((val) & 0x1f) << 0))
+
+/*******************************************************************************
+ * Channel Control Register
+ ******************************************************************************/
+
+/* Peripheral to Peripheral Copy (24) */
+#define DMA_CCTRL_P2PCPY (0x1 << 24)
+#define DMA_CCTRL_P2PCPY_VAL(val) (((val) & 0x1) << 24)
+#define DMA_CCTRL_P2PCPY_GET(val) ((((val) & DMA_CCTRL_P2PCPY) >> 24) & 0x1)
+#define DMA_CCTRL_P2PCPY_SET(reg,val) (reg) = ((reg & ~DMA_CCTRL_P2PCPY) | (((val) & 0x1) << 24))
+/* Channel Weight for Transmit Direction (17:16) */
+#define DMA_CCTRL_TXWGT (0x3 << 16)
+#define DMA_CCTRL_TXWGT_VAL(val) (((val) & 0x3) << 16)
+#define DMA_CCTRL_TXWGT_GET(val) ((((val) & DMA_CCTRL_TXWGT) >> 16) & 0x3)
+#define DMA_CCTRL_TXWGT_SET(reg,val) (reg) = ((reg & ~DMA_CCTRL_TXWGT) | (((val) & 0x3) << 16))
+/* Port Assignment (13:11) */
+#define DMA_CCTRL_PRTNR (0x7 << 11)
+#define DMA_CCTRL_PRTNR_GET(val) ((((val) & DMA_CCTRL_PRTNR) >> 11) & 0x7)
+/* Class (10:9) */
+#define DMA_CCTRL_CLASS (0x3 << 9)
+#define DMA_CCTRL_CLASS_VAL(val) (((val) & 0x3) << 9)
+#define DMA_CCTRL_CLASS_GET(val) ((((val) & DMA_CCTRL_CLASS) >> 9) & 0x3)
+#define DMA_CCTRL_CLASS_SET(reg,val) (reg) = ((reg & ~DMA_CCTRL_CLASS) | (((val) & 0x3) << 9))
+/* Direction (8) */
+#define DMA_CCTRL_DIR (0x1 << 8)
+#define DMA_CCTRL_DIR_GET(val) ((((val) & DMA_CCTRL_DIR) >> 8) & 0x1)
+/* Reset (1) */
+#define DMA_CCTRL_RST (0x1 << 1)
+#define DMA_CCTRL_RST_VAL(val) (((val) & 0x1) << 1)
+#define DMA_CCTRL_RST_GET(val) ((((val) & DMA_CCTRL_RST) >> 1) & 0x1)
+#define DMA_CCTRL_RST_SET(reg,val) (reg) = ((reg & ~DMA_CCTRL_RST) | (((val) & 0x1) << 1))
+/* Channel On or Off (0) */
+#define DMA_CCTRL_ON_OFF (0x1)
+#define DMA_CCTRL_ON_OFF_VAL(val) (((val) & 0x1) << 0)
+#define DMA_CCTRL_ON_OFF_GET(val) ((((val) & DMA_CCTRL_ON_OFF) >> 0) & 0x1)
+#define DMA_CCTRL_ON_OFF_SET(reg,val) (reg) = ((reg & ~DMA_CCTRL_ON_OFF) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Channel Descriptor Base Address Register
+ ******************************************************************************/
+
+/* Channel Descriptor Base Address (29:3) */
+#define DMA_CDBA_CDBA (0x7ffffff << 3)
+#define DMA_CDBA_CDBA_VAL(val) (((val) & 0x7ffffff) << 3)
+#define DMA_CDBA_CDBA_GET(val) ((((val) & DMA_CDBA_CDBA) >> 3) & 0x7ffffff)
+#define DMA_CDBA_CDBA_SET(reg,val) (reg) = ((reg & ~DMA_CDBA_CDBA) | (((val) & 0x7ffffff) << 3))
+
+/*******************************************************************************
+ * Channel Descriptor Length Register
+ ******************************************************************************/
+
+/* Channel Descriptor Length (7:0) */
+#define DMA_CDLEN_CDLEN (0xff)
+#define DMA_CDLEN_CDLEN_VAL(val) (((val) & 0xff) << 0)
+#define DMA_CDLEN_CDLEN_GET(val) ((((val) & DMA_CDLEN_CDLEN) >> 0) & 0xff)
+#define DMA_CDLEN_CDLEN_SET(reg,val) (reg) = ((reg & ~DMA_CDLEN_CDLEN) | (((val) & 0xff) << 0))
+
+/*******************************************************************************
+ * Channel Interrupt Status Register
+ ******************************************************************************/
+
+/* SAI Read Error Interrupt (5) */
+#define DMA_CIS_RDERR (0x1 << 5)
+#define DMA_CIS_RDERR_GET(val) ((((val) & DMA_CIS_RDERR) >> 5) & 0x1)
+/* Channel Off Interrupt (4) */
+#define DMA_CIS_CHOFF (0x1 << 4)
+#define DMA_CIS_CHOFF_GET(val) ((((val) & DMA_CIS_CHOFF) >> 4) & 0x1)
+/* Descriptor Complete Interrupt (3) */
+#define DMA_CIS_DESCPT (0x1 << 3)
+#define DMA_CIS_DESCPT_GET(val) ((((val) & DMA_CIS_DESCPT) >> 3) & 0x1)
+/* Descriptor Under-Run Interrupt (2) */
+#define DMA_CIS_DUR (0x1 << 2)
+#define DMA_CIS_DUR_GET(val) ((((val) & DMA_CIS_DUR) >> 2) & 0x1)
+/* End of Packet Interrupt (1) */
+#define DMA_CIS_EOP (0x1 << 1)
+#define DMA_CIS_EOP_GET(val) ((((val) & DMA_CIS_EOP) >> 1) & 0x1)
+
+/*******************************************************************************
+ * Channel Interrupt Enable Register
+ ******************************************************************************/
+
+/* SAI Read Error Interrupt (5) */
+#define DMA_CIE_RDERR (0x1 << 5)
+#define DMA_CIE_RDERR_GET(val) ((((val) & DMA_CIE_RDERR) >> 5) & 0x1)
+/* Channel Off Interrupt (4) */
+#define DMA_CIE_CHOFF (0x1 << 4)
+#define DMA_CIE_CHOFF_GET(val) ((((val) & DMA_CIE_CHOFF) >> 4) & 0x1)
+/* Descriptor Complete Interrupt Enable (3) */
+#define DMA_CIE_DESCPT (0x1 << 3)
+#define DMA_CIE_DESCPT_GET(val) ((((val) & DMA_CIE_DESCPT) >> 3) & 0x1)
+/* Descriptor Under Run Interrupt Enable (2) */
+#define DMA_CIE_DUR (0x1 << 2)
+#define DMA_CIE_DUR_GET(val) ((((val) & DMA_CIE_DUR) >> 2) & 0x1)
+/* End of Packet Interrupt Enable (1) */
+#define DMA_CIE_EOP (0x1 << 1)
+#define DMA_CIE_EOP_GET(val) ((((val) & DMA_CIE_EOP) >> 1) & 0x1)
+
+/*******************************************************************************
+ * Port Select Register
+ ******************************************************************************/
+
+/* Port Selection (2:0) */
+#define DMA_PS_PS (0x7)
+#define DMA_PS_PS_VAL(val) (((val) & 0x7) << 0)
+#define DMA_PS_PS_GET(val) ((((val) & DMA_PS_PS) >> 0) & 0x7)
+#define DMA_PS_PS_SET(reg,val) (reg) = ((reg & ~DMA_PS_PS) | (((val) & 0x7) << 0))
+
+/*******************************************************************************
+ * Port Control Register
+ ******************************************************************************/
+
+/* General Purpose Control (16) */
+#define DMA_PCTRL_GPC (0x1 << 16)
+#define DMA_PCTRL_GPC_VAL(val) (((val) & 0x1) << 16)
+#define DMA_PCTRL_GPC_GET(val) ((((val) & DMA_PCTRL_GPC) >> 16) & 0x1)
+#define DMA_PCTRL_GPC_SET(reg,val) (reg) = ((reg & ~DMA_PCTRL_GPC) | (((val) & 0x1) << 16))
+/* Port Weight for Transmit Direction (14:12) */
+#define DMA_PCTRL_TXWGT (0x7 << 12)
+#define DMA_PCTRL_TXWGT_VAL(val) (((val) & 0x7) << 12)
+#define DMA_PCTRL_TXWGT_GET(val) ((((val) & DMA_PCTRL_TXWGT) >> 12) & 0x7)
+#define DMA_PCTRL_TXWGT_SET(reg,val) (reg) = ((reg & ~DMA_PCTRL_TXWGT) | (((val) & 0x7) << 12))
+/* Endianness for Transmit Direction (11:10) */
+#define DMA_PCTRL_TXENDI (0x3 << 10)
+#define DMA_PCTRL_TXENDI_VAL(val) (((val) & 0x3) << 10)
+#define DMA_PCTRL_TXENDI_GET(val) ((((val) & DMA_PCTRL_TXENDI) >> 10) & 0x3)
+#define DMA_PCTRL_TXENDI_SET(reg,val) (reg) = ((reg & ~DMA_PCTRL_TXENDI) | (((val) & 0x3) << 10))
+/* Endianness for Receive Direction (9:8) */
+#define DMA_PCTRL_RXENDI (0x3 << 8)
+#define DMA_PCTRL_RXENDI_VAL(val) (((val) & 0x3) << 8)
+#define DMA_PCTRL_RXENDI_GET(val) ((((val) & DMA_PCTRL_RXENDI) >> 8) & 0x3)
+#define DMA_PCTRL_RXENDI_SET(reg,val) (reg) = ((reg & ~DMA_PCTRL_RXENDI) | (((val) & 0x3) << 8))
+/* Packet Drop Enable (6) */
+#define DMA_PCTRL_PDEN (0x1 << 6)
+#define DMA_PCTRL_PDEN_VAL(val) (((val) & 0x1) << 6)
+#define DMA_PCTRL_PDEN_GET(val) ((((val) & DMA_PCTRL_PDEN) >> 6) & 0x1)
+#define DMA_PCTRL_PDEN_SET(reg,val) (reg) = ((reg & ~DMA_PCTRL_PDEN) | (((val) & 0x1) << 6))
+/* Burst Length for Transmit Direction (5:4) */
+#define DMA_PCTRL_TXBL (0x3 << 4)
+#define DMA_PCTRL_TXBL_VAL(val) (((val) & 0x3) << 4)
+#define DMA_PCTRL_TXBL_GET(val) ((((val) & DMA_PCTRL_TXBL) >> 4) & 0x3)
+#define DMA_PCTRL_TXBL_SET(reg,val) (reg) = ((reg & ~DMA_PCTRL_TXBL) | (((val) & 0x3) << 4))
+/* Burst Length for Receive Direction (3:2) */
+#define DMA_PCTRL_RXBL (0x3 << 2)
+#define DMA_PCTRL_RXBL_VAL(val) (((val) & 0x3) << 2)
+#define DMA_PCTRL_RXBL_GET(val) ((((val) & DMA_PCTRL_RXBL) >> 2) & 0x3)
+#define DMA_PCTRL_RXBL_SET(reg,val) (reg) = ((reg & ~DMA_PCTRL_RXBL) | (((val) & 0x3) << 2))
+
+/*******************************************************************************
+ * DMA_IRNEN Register
+ ******************************************************************************/
+
+/* Channel x Interrupt Request Enable (23) */
+#define DMA_IRNEN_CH23 (0x1 << 23)
+#define DMA_IRNEN_CH23_VAL(val) (((val) & 0x1) << 23)
+#define DMA_IRNEN_CH23_GET(val) ((((val) & DMA_IRNEN_CH23) >> 23) & 0x1)
+#define DMA_IRNEN_CH23_SET(reg,val) (reg) = ((reg & ~DMA_IRNEN_CH23) | (((val) & 0x1) << 23))
+
+/*******************************************************************************
+ * DMA_IRNCR Register
+ ******************************************************************************/
+
+/* Channel x Interrupt (23) */
+#define DMA_IRNCR_CH23 (0x1 << 23)
+#define DMA_IRNCR_CH23_GET(val) ((((val) & DMA_IRNCR_CH23) >> 23) & 0x1)
+
+/*******************************************************************************
+ * DMA_IRNICR Register
+ ******************************************************************************/
+
+/* Channel x Interrupt Request (23) */
+#define DMA_IRNICR_CH23 (0x1 << 23)
+#define DMA_IRNICR_CH23_GET(val) ((((val) & DMA_IRNICR_CH23) >> 23) & 0x1)
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/ebu_reg.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/ebu_reg.h
new file mode 100644
index 0000000000..4e00d01bae
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/ebu_reg.h
@@ -0,0 +1,615 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ St. Martin Strasse 53; 81669 Munich, Germany
+
+ Any use of this Software is subject to the conclusion of a respective
+ License Agreement. Without such a License Agreement no rights to the
+ Software are granted.
+
+ ******************************************************************************/
+
+#ifndef __EBU_REG_H
+#define __EBU_REG_H
+
+#define ebu_r32(reg) ltq_r32(&ebu->reg)
+#define ebu_w32(val, reg) ltq_w32(val, &ebu->reg)
+#define ebu_w32_mask(clear, set, reg) ltq_w32_mask(clear, set, &ebu->reg)
+
+/** EBU register structure */
+struct svip_reg_ebu {
+ volatile unsigned long clc; /* 0x0000 */
+ volatile unsigned long reserved0; /* 0x04 */
+ volatile unsigned long id; /* 0x0008 */
+ volatile unsigned long reserved1; /* 0x0c */
+ volatile unsigned long con; /* 0x0010 */
+ volatile unsigned long reserved2[3]; /* 0x14 */
+ volatile unsigned long addr_sel_0; /* 0x0020 */
+ volatile unsigned long addr_sel_1; /* 0x0024 */
+ volatile unsigned long addr_sel_2; /* 0x0028 */
+ volatile unsigned long addr_sel_3; /* 0x002c */
+ volatile unsigned long reserved3[12]; /* 0x30 */
+ volatile unsigned long con_0; /* 0x0060 */
+ volatile unsigned long con_1; /* 0x0064 */
+ volatile unsigned long con_2; /* 0x0068 */
+ volatile unsigned long con_3; /* 0x006c */
+ volatile unsigned long reserved4[4]; /* 0x70 */
+ volatile unsigned long emu_addr; /* 0x0080 */
+ volatile unsigned long emu_bc; /* 0x0084 */
+ volatile unsigned long emu_con; /* 0x0088 */
+ volatile unsigned long reserved5; /* 0x8c */
+ volatile unsigned long pcc_con; /* 0x0090 */
+ volatile unsigned long pcc_stat; /* 0x0094 */
+ volatile unsigned long reserved6[2]; /* 0x98 */
+ volatile unsigned long pcc_istat; /* 0x00A0 */
+ volatile unsigned long pcc_ien; /* 0x00A4 */
+ volatile unsigned long pcc_int_out; /* 0x00A8 */
+ volatile unsigned long pcc_irs; /* 0x00AC */
+ volatile unsigned long nand_con; /* 0x00B0 */
+ volatile unsigned long nand_wait; /* 0x00B4 */
+ volatile unsigned long nand_ecc0; /* 0x00B8 */
+ volatile unsigned long nand_ecc_ac; /* 0x00BC */
+};
+
+/*******************************************************************************
+ * EBU
+ ******************************************************************************/
+#define LTQ_EBU_CLC ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0000))
+#define LTQ_EBU_ID ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0008))
+#define LTQ_EBU_CON ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0010))
+#define LTQ_EBU_ADDR_SEL_0 ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0020))
+#define LTQ_EBU_ADDR_SEL_1 ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0024))
+#define LTQ_EBU_ADDR_SEL_2 ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0028))
+#define LTQ_EBU_ADDR_SEL_3 ((volatile unsigned int*)(LTQ_EBU_BASE + 0x002c))
+#define LTQ_EBU_CON_0 ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0060))
+#define LTQ_EBU_CON_1 ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0064))
+#define LTQ_EBU_CON_2 ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0068))
+#define LTQ_EBU_CON_3 ((volatile unsigned int*)(LTQ_EBU_BASE + 0x006c))
+#define LTQ_EBU_EMU_BC ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0084))
+#define LTQ_EBU_PCC_CON ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0090))
+#define LTQ_EBU_PCC_STAT ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0094))
+#define LTQ_EBU_PCC_ISTAT ((volatile unsigned int*)(LTQ_EBU_BASE + 0x00A0))
+#define LTQ_EBU_PCC_IEN ((volatile unsigned int*)(LTQ_EBU_BASE + 0x00A4))
+#define LTQ_EBU_PCC_INT_OUT ((volatile unsigned int*)(LTQ_EBU_BASE + 0x00A8))
+#define LTQ_EBU_PCC_IRS ((volatile unsigned int*)(LTQ_EBU_BASE + 0x00AC))
+#define LTQ_EBU_NAND_CON ((volatile unsigned int*)(LTQ_EBU_BASE + 0x00B0))
+#define LTQ_EBU_NAND_WAIT ((volatile unsigned int*)(LTQ_EBU_BASE + 0x00B4))
+#define LTQ_EBU_NAND_ECC0 ((volatile unsigned int*)(LTQ_EBU_BASE + 0x00B8))
+#define LTQ_EBU_NAND_ECC_AC ((volatile unsigned int*)(LTQ_EBU_BASE + 0x00BC))
+#define LTQ_EBU_EMU_ADDR ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0080))
+#define LTQ_EBU_EMU_CON ((volatile unsigned int*)(LTQ_EBU_BASE + 0x0088))
+
+/*******************************************************************************
+ * EBU Clock Control Register
+ ******************************************************************************/
+
+/* EBU Disable Status Bit (1) */
+#define LTQ_EBU_CLC_DISS (0x1 << 1)
+#define LTQ_EBU_CLC_DISS_GET(val) ((((val) & LTQ_EBU_CLC_DISS) >> 1) & 0x1)
+/* Used for Enable/disable Control of the EBU (0) */
+#define LTQ_EBU_CLC_DISR (0x1)
+#define LTQ_EBU_CLC_DISR_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_EBU_CLC_DISR_GET(val) ((((val) & LTQ_EBU_CLC_DISR) >> 0) & 0x1)
+#define LTQ_EBU_CLC_DISR_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CLC_DISR) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * EBU Identification Register (Internal)
+ ******************************************************************************/
+
+/* Module Number (31:8) */
+#define LTQ_EBU_ID_MODNUM (0xffffff << 8)
+#define LTQ_EBU_ID_MODNUM_GET(val) ((((val) & LTQ_EBU_ID_MODNUM) >> 8) & 0xffffff)
+/* Revision Number (7:0) */
+#define LTQ_EBU_ID_REVNUM (0xff)
+#define LTQ_EBU_ID_REVNUM_GET(val) ((((val) & LTQ_EBU_ID_REVNUM) >> 0) & 0xff)
+
+/*******************************************************************************
+ * External Bus Unit Control Register
+ ******************************************************************************/
+
+/* Driver Turn-Around Control, Chip Select Triggered (22:20) */
+#define LTQ_EBU_CON_DTACS (0x7 << 20)
+#define LTQ_EBU_CON_DTACS_VAL(val) (((val) & 0x7) << 20)
+#define LTQ_EBU_CON_DTACS_GET(val) ((((val) & LTQ_EBU_CON_DTACS) >> 20) & 0x7)
+#define LTQ_EBU_CON_DTACS_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_DTACS) | (((val) & 0x7) << 20))
+/* Driver Turn-Around Control, Read-write Triggered (18:16) */
+#define LTQ_EBU_CON_DTARW (0x7 << 16)
+#define LTQ_EBU_CON_DTARW_VAL(val) (((val) & 0x7) << 16)
+#define LTQ_EBU_CON_DTARW_GET(val) ((((val) & LTQ_EBU_CON_DTARW) >> 16) & 0x7)
+#define LTQ_EBU_CON_DTARW_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_DTARW) | (((val) & 0x7) << 16))
+/* Time-Out Control (15:8) */
+#define LTQ_EBU_CON_TOUTC (0xff << 8)
+#define LTQ_EBU_CON_TOUTC_VAL(val) (((val) & 0xff) << 8)
+#define LTQ_EBU_CON_TOUTC_GET(val) ((((val) & LTQ_EBU_CON_TOUTC) >> 8) & 0xff)
+#define LTQ_EBU_CON_TOUTC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_TOUTC) | (((val) & 0xff) << 8))
+/* Arbitration Mode (7:6) */
+#define LTQ_EBU_CON_ARBMODE (0x3 << 6)
+#define LTQ_EBU_CON_ARBMODE_VAL(val) (((val) & 0x3) << 6)
+#define LTQ_EBU_CON_ARBMODE_GET(val) ((((val) & LTQ_EBU_CON_ARBMODE) >> 6) & 0x3)
+#define LTQ_EBU_CON_ARBMODE_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_ARBMODE) | (((val) & 0x3) << 6))
+/* Arbitration Synchronization (5) */
+#define LTQ_EBU_CON_ARBSYNC (0x1 << 5)
+#define LTQ_EBU_CON_ARBSYNC_VAL(val) (((val) & 0x1) << 5)
+#define LTQ_EBU_CON_ARBSYNC_GET(val) ((((val) & LTQ_EBU_CON_ARBSYNC) >> 5) & 0x1)
+#define LTQ_EBU_CON_ARBSYNC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_ARBSYNC) | (((val) & 0x1) << 5))
+
+/*******************************************************************************
+ * Address Select Registers
+ ******************************************************************************/
+
+/* Memory Region Base Address (31:12) */
+#define LTQ_EBU_ADDR_SEL_0_BASE (0xfffff << 12)
+#define LTQ_EBU_ADDR_SEL_0_BASE_VAL(val) (((val) & 0xfffff) << 12)
+#define LTQ_EBU_ADDR_SEL_0_BASE_GET(val) ((((val) & LTQ_EBU_ADDR_SEL_0_BASE) >> 12) & 0xfffff)
+#define LTQ_EBU_ADDR_SEL_0_BASE_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_ADDR_SEL_0_BASE) | (((val) & 0xfffff) << 12))
+/* Memory Region Address Mask (7:4) */
+#define LTQ_EBU_ADDR_SEL_0_MASK (0xf << 4)
+#define LTQ_EBU_ADDR_SEL_0_MASK_VAL(val) (((val) & 0xf) << 4)
+#define LTQ_EBU_ADDR_SEL_0_MASK_GET(val) ((((val) & LTQ_EBU_ADDR_SEL_0_MASK) >> 4) & 0xf)
+#define LTQ_EBU_ADDR_SEL_0_MASK_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_ADDR_SEL_0_MASK) | (((val) & 0xf) << 4))
+/* Memory Region Mirror Enable Control (1) */
+#define LTQ_EBU_ADDR_SEL_0_MRME (0x1 << 1)
+#define LTQ_EBU_ADDR_SEL_0_MRME_VAL(val) (((val) & 0x1) << 1)
+#define LTQ_EBU_ADDR_SEL_0_MRME_GET(val) ((((val) & LTQ_EBU_ADDR_SEL_0_MRME) >> 1) & 0x1)
+#define LTQ_EBU_ADDR_SEL_0_MRME_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_ADDR_SEL_0_MRME) | (((val) & 0x1) << 1))
+/* Memory Region Enable Control (0) */
+#define LTQ_EBU_ADDR_SEL_0_REGEN (0x1)
+#define LTQ_EBU_ADDR_SEL_0_REGEN_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_EBU_ADDR_SEL_0_REGEN_GET(val) ((((val) & LTQ_EBU_ADDR_SEL_0_REGEN) >> 0) & 0x1)
+#define LTQ_EBU_ADDR_SEL_0_REGEN_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_ADDR_SEL_0_REGEN) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Bus Configuration Registers
+ ******************************************************************************/
+
+/* Memory Region Write Protection (31) */
+#define LTQ_EBU_CON_0_WRDIS (0x1 << 31)
+#define LTQ_EBU_CON_0_WRDIS_VAL(val) (((val) & 0x1) << 31)
+#define LTQ_EBU_CON_0_WRDIS_GET(val) ((((val) & LTQ_EBU_CON_0_WRDIS) >> 31) & 0x1)
+#define LTQ_EBU_CON_0_WRDIS_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_WRDIS) | (((val) & 0x1) << 31))
+/* Address Swapping (30) */
+#define LTQ_EBU_CON_0_ADSWP (0x1 << 30)
+#define LTQ_EBU_CON_0_ADSWP_VAL(val) (((val) & 0x1) << 30)
+#define LTQ_EBU_CON_0_ADSWP_GET(val) ((((val) & LTQ_EBU_CON_0_ADSWP) >> 30) & 0x1)
+#define LTQ_EBU_CON_0_ADSWP_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_ADSWP) | (((val) & 0x1) << 30))
+/* Address Generation Control (26:24) */
+#define LTQ_EBU_CON_0_AGEN (0x7 << 24)
+#define LTQ_EBU_CON_0_AGEN_VAL(val) (((val) & 0x7) << 24)
+#define LTQ_EBU_CON_0_AGEN_GET(val) ((((val) & LTQ_EBU_CON_0_AGEN) >> 24) & 0x7)
+#define LTQ_EBU_CON_0_AGEN_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_AGEN) | (((val) & 0x7) << 24))
+/* Extended Address Setup Control (22) */
+#define LTQ_EBU_CON_0_SETUP (0x1 << 22)
+#define LTQ_EBU_CON_0_SETUP_VAL(val) (((val) & 0x1) << 22)
+#define LTQ_EBU_CON_0_SETUP_GET(val) ((((val) & LTQ_EBU_CON_0_SETUP) >> 22) & 0x1)
+#define LTQ_EBU_CON_0_SETUP_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_SETUP) | (((val) & 0x1) << 22))
+/* Variable Wait-State Insertion Control (21:20) */
+#define LTQ_EBU_CON_0_WAIT (0x3 << 20)
+#define LTQ_EBU_CON_0_WAIT_VAL(val) (((val) & 0x3) << 20)
+#define LTQ_EBU_CON_0_WAIT_GET(val) ((((val) & LTQ_EBU_CON_0_WAIT) >> 20) & 0x3)
+#define LTQ_EBU_CON_0_WAIT_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_WAIT) | (((val) & 0x3) << 20))
+/* Active WAIT Level Control (19) */
+#define LTQ_EBU_CON_0_WINV (0x1 << 19)
+#define LTQ_EBU_CON_0_WINV_VAL(val) (((val) & 0x1) << 19)
+#define LTQ_EBU_CON_0_WINV_GET(val) ((((val) & LTQ_EBU_CON_0_WINV) >> 19) & 0x1)
+#define LTQ_EBU_CON_0_WINV_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_WINV) | (((val) & 0x1) << 19))
+/* External Device Data Width Control (17:16) */
+#define LTQ_EBU_CON_0_PW (0x3 << 16)
+#define LTQ_EBU_CON_0_PW_VAL(val) (((val) & 0x3) << 16)
+#define LTQ_EBU_CON_0_PW_GET(val) ((((val) & LTQ_EBU_CON_0_PW) >> 16) & 0x3)
+#define LTQ_EBU_CON_0_PW_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_PW) | (((val) & 0x3) << 16))
+/* Address Latch Enable ALE Duration Control (15:14) */
+#define LTQ_EBU_CON_0_ALEC (0x3 << 14)
+#define LTQ_EBU_CON_0_ALEC_VAL(val) (((val) & 0x3) << 14)
+#define LTQ_EBU_CON_0_ALEC_GET(val) ((((val) & LTQ_EBU_CON_0_ALEC) >> 14) & 0x3)
+#define LTQ_EBU_CON_0_ALEC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_ALEC) | (((val) & 0x3) << 14))
+/* Byte Control Signal Timing Mode Control (13:12) */
+#define LTQ_EBU_CON_0_BCGEN (0x3 << 12)
+#define LTQ_EBU_CON_0_BCGEN_VAL(val) (((val) & 0x3) << 12)
+#define LTQ_EBU_CON_0_BCGEN_GET(val) ((((val) & LTQ_EBU_CON_0_BCGEN) >> 12) & 0x3)
+#define LTQ_EBU_CON_0_BCGEN_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_BCGEN) | (((val) & 0x3) << 12))
+/* Write Access Wait-State Control (10:8) */
+#define LTQ_EBU_CON_0_WAITWRC (0x7 << 8)
+#define LTQ_EBU_CON_0_WAITWRC_VAL(val) (((val) & 0x7) << 8)
+#define LTQ_EBU_CON_0_WAITWRC_GET(val) ((((val) & LTQ_EBU_CON_0_WAITWRC) >> 8) & 0x7)
+#define LTQ_EBU_CON_0_WAITWRC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_WAITWRC) | (((val) & 0x7) << 8))
+/* Read Access Wait-State Control (7:6) */
+#define LTQ_EBU_CON_0_WAITRDC (0x3 << 6)
+#define LTQ_EBU_CON_0_WAITRDC_VAL(val) (((val) & 0x3) << 6)
+#define LTQ_EBU_CON_0_WAITRDC_GET(val) ((((val) & LTQ_EBU_CON_0_WAITRDC) >> 6) & 0x3)
+#define LTQ_EBU_CON_0_WAITRDC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_WAITRDC) | (((val) & 0x3) << 6))
+/* Hold/Pause Cycle Control (5:4) */
+#define LTQ_EBU_CON_0_HOLDC (0x3 << 4)
+#define LTQ_EBU_CON_0_HOLDC_VAL(val) (((val) & 0x3) << 4)
+#define LTQ_EBU_CON_0_HOLDC_GET(val) ((((val) & LTQ_EBU_CON_0_HOLDC) >> 4) & 0x3)
+#define LTQ_EBU_CON_0_HOLDC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_HOLDC) | (((val) & 0x3) << 4))
+/* Recovery Cycle Control (3:2) */
+#define LTQ_EBU_CON_0_RECOVC (0x3 << 2)
+#define LTQ_EBU_CON_0_RECOVC_VAL(val) (((val) & 0x3) << 2)
+#define LTQ_EBU_CON_0_RECOVC_GET(val) ((((val) & LTQ_EBU_CON_0_RECOVC) >> 2) & 0x3)
+#define LTQ_EBU_CON_0_RECOVC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_RECOVC) | (((val) & 0x3) << 2))
+/* Wait Cycle Multiplier Control (1:0) */
+#define LTQ_EBU_CON_0_CMULT (0x3)
+#define LTQ_EBU_CON_0_CMULT_VAL(val) (((val) & 0x3) << 0)
+#define LTQ_EBU_CON_0_CMULT_GET(val) ((((val) & LTQ_EBU_CON_0_CMULT) >> 0) & 0x3)
+#define LTQ_EBU_CON_0_CMULT_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_CON_0_CMULT) | (((val) & 0x3) << 0))
+
+/*******************************************************************************
+ * External Bus Unit Emulator Bus Configuration Register
+ ******************************************************************************/
+
+/* Write Protection (31) */
+#define LTQ_EBU_EMU_BC_WRITE (0x1 << 31)
+#define LTQ_EBU_EMU_BC_WRITE_VAL(val) (((val) & 0x1) << 31)
+#define LTQ_EBU_EMU_BC_WRITE_GET(val) ((((val) & LTQ_EBU_EMU_BC_WRITE) >> 31) & 0x1)
+#define LTQ_EBU_EMU_BC_WRITE_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_BC_WRITE) | (((val) & 0x1) << 31))
+/* Address Generation Control (26:24) */
+#define LTQ_EBU_EMU_BC_AGEN (0x7 << 24)
+#define LTQ_EBU_EMU_BC_AGEN_VAL(val) (((val) & 0x7) << 24)
+#define LTQ_EBU_EMU_BC_AGEN_GET(val) ((((val) & LTQ_EBU_EMU_BC_AGEN) >> 24) & 0x7)
+#define LTQ_EBU_EMU_BC_AGEN_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_BC_AGEN) | (((val) & 0x7) << 24))
+/* Extended Address Setup Control (22) */
+#define LTQ_EBU_EMU_BC_SETUP (0x1 << 22)
+#define LTQ_EBU_EMU_BC_SETUP_VAL(val) (((val) & 0x1) << 22)
+#define LTQ_EBU_EMU_BC_SETUP_GET(val) ((((val) & LTQ_EBU_EMU_BC_SETUP) >> 22) & 0x1)
+#define LTQ_EBU_EMU_BC_SETUP_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_BC_SETUP) | (((val) & 0x1) << 22))
+/* Variable Waitstate Insertion Control (21:20) */
+#define LTQ_EBU_EMU_BC_WAIT (0x3 << 20)
+#define LTQ_EBU_EMU_BC_WAIT_VAL(val) (((val) & 0x3) << 20)
+#define LTQ_EBU_EMU_BC_WAIT_GET(val) ((((val) & LTQ_EBU_EMU_BC_WAIT) >> 20) & 0x3)
+#define LTQ_EBU_EMU_BC_WAIT_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_BC_WAIT) | (((val) & 0x3) << 20))
+/* Active WAIT Level Control (19) */
+#define LTQ_EBU_EMU_BC_WINV (0x1 << 19)
+#define LTQ_EBU_EMU_BC_WINV_VAL(val) (((val) & 0x1) << 19)
+#define LTQ_EBU_EMU_BC_WINV_GET(val) ((((val) & LTQ_EBU_EMU_BC_WINV) >> 19) & 0x1)
+#define LTQ_EBU_EMU_BC_WINV_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_BC_WINV) | (((val) & 0x1) << 19))
+/* External Device Data Width Control (17:16) */
+#define LTQ_EBU_EMU_BC_PORTW (0x3 << 16)
+#define LTQ_EBU_EMU_BC_PORTW_VAL(val) (((val) & 0x3) << 16)
+#define LTQ_EBU_EMU_BC_PORTW_GET(val) ((((val) & LTQ_EBU_EMU_BC_PORTW) >> 16) & 0x3)
+#define LTQ_EBU_EMU_BC_PORTW_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_BC_PORTW) | (((val) & 0x3) << 16))
+/* Address Latch Enable Function (15:14) */
+#define LTQ_EBU_EMU_BC_ALEC (0x3 << 14)
+#define LTQ_EBU_EMU_BC_ALEC_VAL(val) (((val) & 0x3) << 14)
+#define LTQ_EBU_EMU_BC_ALEC_GET(val) ((((val) & LTQ_EBU_EMU_BC_ALEC) >> 14) & 0x3)
+#define LTQ_EBU_EMU_BC_ALEC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_BC_ALEC) | (((val) & 0x3) << 14))
+/* Byte Control Signal Timing Mode (13:12) */
+#define LTQ_EBU_EMU_BC_BCGEN (0x3 << 12)
+#define LTQ_EBU_EMU_BC_BCGEN_VAL(val) (((val) & 0x3) << 12)
+#define LTQ_EBU_EMU_BC_BCGEN_GET(val) ((((val) & LTQ_EBU_EMU_BC_BCGEN) >> 12) & 0x3)
+#define LTQ_EBU_EMU_BC_BCGEN_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_BC_BCGEN) | (((val) & 0x3) << 12))
+/* Write Access Waitstate Control (10:8) */
+#define LTQ_EBU_EMU_BC_WAITWRC (0x7 << 8)
+#define LTQ_EBU_EMU_BC_WAITWRC_VAL(val) (((val) & 0x7) << 8)
+#define LTQ_EBU_EMU_BC_WAITWRC_GET(val) ((((val) & LTQ_EBU_EMU_BC_WAITWRC) >> 8) & 0x7)
+#define LTQ_EBU_EMU_BC_WAITWRC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_BC_WAITWRC) | (((val) & 0x7) << 8))
+/* Read Access Waitstate Control (7:6) */
+#define LTQ_EBU_EMU_BC_WAITRDC (0x3 << 6)
+#define LTQ_EBU_EMU_BC_WAITRDC_VAL(val) (((val) & 0x3) << 6)
+#define LTQ_EBU_EMU_BC_WAITRDC_GET(val) ((((val) & LTQ_EBU_EMU_BC_WAITRDC) >> 6) & 0x3)
+#define LTQ_EBU_EMU_BC_WAITRDC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_BC_WAITRDC) | (((val) & 0x3) << 6))
+/* Hold/Pause Cycle Control (5:4) */
+#define LTQ_EBU_EMU_BC_HOLDC (0x3 << 4)
+#define LTQ_EBU_EMU_BC_HOLDC_VAL(val) (((val) & 0x3) << 4)
+#define LTQ_EBU_EMU_BC_HOLDC_GET(val) ((((val) & LTQ_EBU_EMU_BC_HOLDC) >> 4) & 0x3)
+#define LTQ_EBU_EMU_BC_HOLDC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_BC_HOLDC) | (((val) & 0x3) << 4))
+/* Recovery Cycles Control (3:2) */
+#define LTQ_EBU_EMU_BC_RECOVC (0x3 << 2)
+#define LTQ_EBU_EMU_BC_RECOVC_VAL(val) (((val) & 0x3) << 2)
+#define LTQ_EBU_EMU_BC_RECOVC_GET(val) ((((val) & LTQ_EBU_EMU_BC_RECOVC) >> 2) & 0x3)
+#define LTQ_EBU_EMU_BC_RECOVC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_BC_RECOVC) | (((val) & 0x3) << 2))
+/* Cycle Multiplier Control (1:0) */
+#define LTQ_EBU_EMU_BC_CMULT (0x3)
+#define LTQ_EBU_EMU_BC_CMULT_VAL(val) (((val) & 0x3) << 0)
+#define LTQ_EBU_EMU_BC_CMULT_GET(val) ((((val) & LTQ_EBU_EMU_BC_CMULT) >> 0) & 0x3)
+#define LTQ_EBU_EMU_BC_CMULT_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_BC_CMULT) | (((val) & 0x3) << 0))
+
+/*******************************************************************************
+ * PC-Card Control Register
+ ******************************************************************************/
+
+/* External Interrupt Input IREQ (3:1) */
+#define LTQ_EBU_PCC_CON_IREQ (0x7 << 1)
+#define LTQ_EBU_PCC_CON_IREQ_VAL(val) (((val) & 0x7) << 1)
+#define LTQ_EBU_PCC_CON_IREQ_GET(val) ((((val) & LTQ_EBU_PCC_CON_IREQ) >> 1) & 0x7)
+#define LTQ_EBU_PCC_CON_IREQ_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_PCC_CON_IREQ) | (((val) & 0x7) << 1))
+/* PC Card ON (0) */
+#define LTQ_EBU_PCC_CON_ON (0x1)
+#define LTQ_EBU_PCC_CON_ON_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_EBU_PCC_CON_ON_GET(val) ((((val) & LTQ_EBU_PCC_CON_ON) >> 0) & 0x1)
+#define LTQ_EBU_PCC_CON_ON_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_PCC_CON_ON) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * PCC Status Register
+ ******************************************************************************/
+
+/* Interrupt Request (6) */
+#define LTQ_EBU_PCC_STAT_IRQ (0x1 << 6)
+#define LTQ_EBU_PCC_STAT_IRQ_GET(val) ((((val) & LTQ_EBU_PCC_STAT_IRQ) >> 6) & 0x1)
+/* PC-Card Overcurrent (5) */
+#define LTQ_EBU_PCC_STAT_OC (0x1 << 5)
+#define LTQ_EBU_PCC_STAT_OC_GET(val) ((((val) & LTQ_EBU_PCC_STAT_OC) >> 5) & 0x1)
+/* PC-Card Socket Power On (4) */
+#define LTQ_EBU_PCC_STAT_SPON (0x1 << 4)
+#define LTQ_EBU_PCC_STAT_SPON_GET(val) ((((val) & LTQ_EBU_PCC_STAT_SPON) >> 4) & 0x1)
+/* Card Detect Status (1:0) */
+#define LTQ_EBU_PCC_STAT_CD (0x3)
+#define LTQ_EBU_PCC_STAT_CD_GET(val) ((((val) & LTQ_EBU_PCC_STAT_CD) >> 0) & 0x3)
+
+/*******************************************************************************
+ * PCC Interrupt Status Register
+ ******************************************************************************/
+
+/* Interrupt Request Active Interrupt (4) */
+#define LTQ_EBU_PCC_ISTAT_IREQ (0x1 << 4)
+#define LTQ_EBU_PCC_ISTAT_IREQ_VAL(val) (((val) & 0x1) << 4)
+#define LTQ_EBU_PCC_ISTAT_IREQ_GET(val) ((((val) & LTQ_EBU_PCC_ISTAT_IREQ) >> 4) & 0x1)
+#define LTQ_EBU_PCC_ISTAT_IREQ_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_PCC_ISTAT_IREQ) | (((val) & 0x1) << 4))
+/* Over Current Status Change Interrupt (3) */
+#define LTQ_EBU_PCC_ISTAT_OC (0x1 << 3)
+#define LTQ_EBU_PCC_ISTAT_OC_VAL(val) (((val) & 0x1) << 3)
+#define LTQ_EBU_PCC_ISTAT_OC_GET(val) ((((val) & LTQ_EBU_PCC_ISTAT_OC) >> 3) & 0x1)
+#define LTQ_EBU_PCC_ISTAT_OC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_PCC_ISTAT_OC) | (((val) & 0x1) << 3))
+/* Socket Power on Status Change Interrupt (2) */
+#define LTQ_EBU_PCC_ISTAT_SPON (0x1 << 2)
+#define LTQ_EBU_PCC_ISTAT_SPON_VAL(val) (((val) & 0x1) << 2)
+#define LTQ_EBU_PCC_ISTAT_SPON_GET(val) ((((val) & LTQ_EBU_PCC_ISTAT_SPON) >> 2) & 0x1)
+#define LTQ_EBU_PCC_ISTAT_SPON_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_PCC_ISTAT_SPON) | (((val) & 0x1) << 2))
+/* Voltage Sense Status Change Interrupt (1) */
+#define LTQ_EBU_PCC_ISTAT_VS (0x1 << 1)
+#define LTQ_EBU_PCC_ISTAT_VS_VAL(val) (((val) & 0x1) << 1)
+#define LTQ_EBU_PCC_ISTAT_VS_GET(val) ((((val) & LTQ_EBU_PCC_ISTAT_VS) >> 1) & 0x1)
+#define LTQ_EBU_PCC_ISTAT_VS_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_PCC_ISTAT_VS) | (((val) & 0x1) << 1))
+/* Card Detect Status Change Interrupt (0) */
+#define LTQ_EBU_PCC_ISTAT_CD (0x1)
+#define LTQ_EBU_PCC_ISTAT_CD_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_EBU_PCC_ISTAT_CD_GET(val) ((((val) & LTQ_EBU_PCC_ISTAT_CD) >> 0) & 0x1)
+#define LTQ_EBU_PCC_ISTAT_CD_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_PCC_ISTAT_CD) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * PCC Interrupt Enable Register
+ ******************************************************************************/
+
+/* Enable of Interrupt Request IR (4) */
+#define LTQ_EBU_PCC_IEN_IR (0x1 << 4)
+#define LTQ_EBU_PCC_IEN_IR_VAL(val) (((val) & 0x1) << 4)
+#define LTQ_EBU_PCC_IEN_IR_GET(val) ((((val) & LTQ_EBU_PCC_IEN_IR) >> 4) & 0x1)
+#define LTQ_EBU_PCC_IEN_IR_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_PCC_IEN_IR) | (((val) & 0x1) << 4))
+/* Enable of Interrupt Request OC event (3) */
+#define LTQ_EBU_PCC_IEN_OC (0x1 << 3)
+#define LTQ_EBU_PCC_IEN_OC_VAL(val) (((val) & 0x1) << 3)
+#define LTQ_EBU_PCC_IEN_OC_GET(val) ((((val) & LTQ_EBU_PCC_IEN_OC) >> 3) & 0x1)
+#define LTQ_EBU_PCC_IEN_OC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_PCC_IEN_OC) | (((val) & 0x1) << 3))
+/* Enable of Interrupt Request Socket Power On (2) */
+#define LTQ_EBU_PCC_IEN_PWRON (0x1 << 2)
+#define LTQ_EBU_PCC_IEN_PWRON_VAL(val) (((val) & 0x1) << 2)
+#define LTQ_EBU_PCC_IEN_PWRON_GET(val) ((((val) & LTQ_EBU_PCC_IEN_PWRON) >> 2) & 0x1)
+#define LTQ_EBU_PCC_IEN_PWRON_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_PCC_IEN_PWRON) | (((val) & 0x1) << 2))
+/* Enable of Interrupt Request Voltage Sense (1) */
+#define LTQ_EBU_PCC_IEN_VS (0x1 << 1)
+#define LTQ_EBU_PCC_IEN_VS_VAL(val) (((val) & 0x1) << 1)
+#define LTQ_EBU_PCC_IEN_VS_GET(val) ((((val) & LTQ_EBU_PCC_IEN_VS) >> 1) & 0x1)
+#define LTQ_EBU_PCC_IEN_VS_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_PCC_IEN_VS) | (((val) & 0x1) << 1))
+/* Enable of Interrupt Request Card Detect (0) */
+#define LTQ_EBU_PCC_IEN_CD (0x1)
+#define LTQ_EBU_PCC_IEN_CD_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_EBU_PCC_IEN_CD_GET(val) ((((val) & LTQ_EBU_PCC_IEN_CD) >> 0) & 0x1)
+#define LTQ_EBU_PCC_IEN_CD_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_PCC_IEN_CD) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * PCC Interrupt Output Status Register
+ ******************************************************************************/
+
+/* Status of Interrupt Request IR (4) */
+#define LTQ_EBU_PCC_INT_OUT_IR (0x1 << 4)
+#define LTQ_EBU_PCC_INT_OUT_IR_GET(val) ((((val) & LTQ_EBU_PCC_INT_OUT_IR) >> 4) & 0x1)
+/* Status of Interrupt Request OC (3) */
+#define LTQ_EBU_PCC_INT_OUT_OC (0x1 << 3)
+#define LTQ_EBU_PCC_INT_OUT_OC_GET(val) ((((val) & LTQ_EBU_PCC_INT_OUT_OC) >> 3) & 0x1)
+/* Status of Interrupt Request Socket Power On (2) */
+#define LTQ_EBU_PCC_INT_OUT_PWRON (0x1 << 2)
+#define LTQ_EBU_PCC_INT_OUT_PWRON_GET(val) ((((val) & LTQ_EBU_PCC_INT_OUT_PWRON) >> 2) & 0x1)
+/* Status of Interrupt Request Voltage Sense (1) */
+#define LTQ_EBU_PCC_INT_OUT_VS (0x1 << 1)
+#define LTQ_EBU_PCC_INT_OUT_VS_GET(val) ((((val) & LTQ_EBU_PCC_INT_OUT_VS) >> 1) & 0x1)
+/* Status of Interrupt Request Card Detect (0) */
+#define LTQ_EBU_PCC_INT_OUT_CD (0x1)
+#define LTQ_EBU_PCC_INT_OUT_CD_GET(val) ((((val) & LTQ_EBU_PCC_INT_OUT_CD) >> 0) & 0x1)
+
+/*******************************************************************************
+ * PCC Interrupt Request Set Register
+ ******************************************************************************/
+
+/* Set Interrupt Request IR (4) */
+#define LTQ_EBU_PCC_IRS_IR (0x1 << 4)
+#define LTQ_EBU_PCC_IRS_IR_VAL(val) (((val) & 0x1) << 4)
+#define LTQ_EBU_PCC_IRS_IR_SET(reg,val) (reg) = (((reg & ~LTQ_EBU_PCC_IRS_IR) | (val) & 1) << 4)
+/* Set Interrupt Request OC (3) */
+#define LTQ_EBU_PCC_IRS_OC (0x1 << 3)
+#define LTQ_EBU_PCC_IRS_OC_VAL(val) (((val) & 0x1) << 3)
+#define LTQ_EBU_PCC_IRS_OC_SET(reg,val) (reg) = (((reg & ~LTQ_EBU_PCC_IRS_OC) | (val) & 1) << 3)
+/* Set Interrupt Request Socket Power On (2) */
+#define LTQ_EBU_PCC_IRS_PWRON (0x1 << 2)
+#define LTQ_EBU_PCC_IRS_PWRON_VAL(val) (((val) & 0x1) << 2)
+#define LTQ_EBU_PCC_IRS_PWRON_SET(reg,val) (reg) = (((reg & ~LTQ_EBU_PCC_IRS_PWRON) | (val) & 1) << 2)
+/* Set Interrupt Request Voltage Sense (1) */
+#define LTQ_EBU_PCC_IRS_VS (0x1 << 1)
+#define LTQ_EBU_PCC_IRS_VS_VAL(val) (((val) & 0x1) << 1)
+#define LTQ_EBU_PCC_IRS_VS_SET(reg,val) (reg) = (((reg & ~LTQ_EBU_PCC_IRS_VS) | (val) & 1) << 1)
+/* Set Interrupt Request Card Detect (0) */
+#define LTQ_EBU_PCC_IRS_CD (0x1)
+#define LTQ_EBU_PCC_IRS_CD_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_EBU_PCC_IRS_CD_SET(reg,val) (reg) = (((reg & ~LTQ_EBU_PCC_IRS_CD) | (val) & 1) << 0)
+
+/*******************************************************************************
+ * NAND Flash Control Register
+ ******************************************************************************/
+
+/* ECC Enabling (31) */
+#define LTQ_EBU_NAND_CON_ECC_ON (0x1 << 31)
+#define LTQ_EBU_NAND_CON_ECC_ON_VAL(val) (((val) & 0x1) << 31)
+#define LTQ_EBU_NAND_CON_ECC_ON_GET(val) ((((val) & LTQ_EBU_NAND_CON_ECC_ON) >> 31) & 0x1)
+#define LTQ_EBU_NAND_CON_ECC_ON_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_CON_ECC_ON) | (((val) & 0x1) << 31))
+/* Latch enable (23:18) */
+#define LTQ_EBU_NAND_CON_LAT_EN (0x3f << 18)
+#define LTQ_EBU_NAND_CON_LAT_EN_VAL(val) (((val) & 0x3f) << 18)
+#define LTQ_EBU_NAND_CON_LAT_EN_GET(val) ((((val) & LTQ_EBU_NAND_CON_LAT_EN) >> 18) & 0x3f)
+#define LTQ_EBU_NAND_CON_LAT_EN_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_CON_LAT_EN) | (((val) & 0x3f) << 18))
+/* Output ChipSelect# Selection (11:10) */
+#define LTQ_EBU_NAND_CON_OUT_CS_S (0x3 << 10)
+#define LTQ_EBU_NAND_CON_OUT_CS_S_VAL(val) (((val) & 0x3) << 10)
+#define LTQ_EBU_NAND_CON_OUT_CS_S_GET(val) ((((val) & LTQ_EBU_NAND_CON_OUT_CS_S) >> 10) & 0x3)
+#define LTQ_EBU_NAND_CON_OUT_CS_S_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_CON_OUT_CS_S) | (((val) & 0x3) << 10))
+/* Input ChipSelect# Selection (9:8) */
+#define LTQ_EBU_NAND_CON_IN_CS_S (0x3 << 8)
+#define LTQ_EBU_NAND_CON_IN_CS_S_VAL(val) (((val) & 0x3) << 8)
+#define LTQ_EBU_NAND_CON_IN_CS_S_GET(val) ((((val) & LTQ_EBU_NAND_CON_IN_CS_S) >> 8) & 0x3)
+#define LTQ_EBU_NAND_CON_IN_CS_S_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_CON_IN_CS_S) | (((val) & 0x3) << 8))
+/* Set PRE (7) */
+#define LTQ_EBU_NAND_CON_PRE_P (0x1 << 7)
+#define LTQ_EBU_NAND_CON_PRE_P_VAL(val) (((val) & 0x1) << 7)
+#define LTQ_EBU_NAND_CON_PRE_P_GET(val) ((((val) & LTQ_EBU_NAND_CON_PRE_P) >> 7) & 0x1)
+#define LTQ_EBU_NAND_CON_PRE_P_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_CON_PRE_P) | (((val) & 0x1) << 7))
+/* Set WP Active Polarity (6) */
+#define LTQ_EBU_NAND_CON_WP_P (0x1 << 6)
+#define LTQ_EBU_NAND_CON_WP_P_VAL(val) (((val) & 0x1) << 6)
+#define LTQ_EBU_NAND_CON_WP_P_GET(val) ((((val) & LTQ_EBU_NAND_CON_WP_P) >> 6) & 0x1)
+#define LTQ_EBU_NAND_CON_WP_P_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_CON_WP_P) | (((val) & 0x1) << 6))
+/* Set SE Active Polarity (5) */
+#define LTQ_EBU_NAND_CON_SE_P (0x1 << 5)
+#define LTQ_EBU_NAND_CON_SE_P_VAL(val) (((val) & 0x1) << 5)
+#define LTQ_EBU_NAND_CON_SE_P_GET(val) ((((val) & LTQ_EBU_NAND_CON_SE_P) >> 5) & 0x1)
+#define LTQ_EBU_NAND_CON_SE_P_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_CON_SE_P) | (((val) & 0x1) << 5))
+/* Set CS Active Polarity (4) */
+#define LTQ_EBU_NAND_CON_CS_P (0x1 << 4)
+#define LTQ_EBU_NAND_CON_CS_P_VAL(val) (((val) & 0x1) << 4)
+#define LTQ_EBU_NAND_CON_CS_P_GET(val) ((((val) & LTQ_EBU_NAND_CON_CS_P) >> 4) & 0x1)
+#define LTQ_EBU_NAND_CON_CS_P_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_CON_CS_P) | (((val) & 0x1) << 4))
+/* Set CLE Active Polarity (3) */
+#define LTQ_EBU_NAND_CON_CLE_P (0x1 << 3)
+#define LTQ_EBU_NAND_CON_CLE_P_VAL(val) (((val) & 0x1) << 3)
+#define LTQ_EBU_NAND_CON_CLE_P_GET(val) ((((val) & LTQ_EBU_NAND_CON_CLE_P) >> 3) & 0x1)
+#define LTQ_EBU_NAND_CON_CLE_P_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_CON_CLE_P) | (((val) & 0x1) << 3))
+/* Set ALE Active Polarity (2) */
+#define LTQ_EBU_NAND_CON_ALE_P (0x1 << 2)
+#define LTQ_EBU_NAND_CON_ALE_P_VAL(val) (((val) & 0x1) << 2)
+#define LTQ_EBU_NAND_CON_ALE_P_GET(val) ((((val) & LTQ_EBU_NAND_CON_ALE_P) >> 2) & 0x1)
+#define LTQ_EBU_NAND_CON_ALE_P_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_CON_ALE_P) | (((val) & 0x1) << 2))
+/* NAND CS Mux with EBU CS Enable (1) */
+#define LTQ_EBU_NAND_CON_CSMUX_E (0x1 << 1)
+#define LTQ_EBU_NAND_CON_CSMUX_E_VAL(val) (((val) & 0x1) << 1)
+#define LTQ_EBU_NAND_CON_CSMUX_E_GET(val) ((((val) & LTQ_EBU_NAND_CON_CSMUX_E) >> 1) & 0x1)
+#define LTQ_EBU_NAND_CON_CSMUX_E_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_CON_CSMUX_E) | (((val) & 0x1) << 1))
+/* NAND FLASH Mode Support (0) */
+#define LTQ_EBU_NAND_CON_NANDMODE (0x1)
+#define LTQ_EBU_NAND_CON_NANDMODE_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_EBU_NAND_CON_NANDMODE_GET(val) ((((val) & LTQ_EBU_NAND_CON_NANDMODE) >> 0) & 0x1)
+#define LTQ_EBU_NAND_CON_NANDMODE_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_CON_NANDMODE) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * NAND Flash State Register
+ ******************************************************************************/
+
+/* Reserved (31:3) */
+#define LTQ_EBU_NAND_WAIT_RES (0x1fffffff << 3)
+#define LTQ_EBU_NAND_WAIT_RES_GET(val) ((((val) & LTQ_EBU_NAND_WAIT_RES) >> 3) & 0x1fffffff)
+/* NAND Write Complete (3) */
+#define LTQ_EBU_NAND_WAIT_WR_C (0x1 << 3)
+#define LTQ_EBU_NAND_WAIT_WR_C_GET(val) ((((val) & LTQ_EBU_NAND_WAIT_WR_C) >> 3) & 0x1)
+/* Record the RD Edge (rising ) (2) */
+#define LTQ_EBU_NAND_WAIT_RD_EDGE (0x1 << 2)
+#define LTQ_EBU_NAND_WAIT_RD_EDGE_GET(val) ((((val) & LTQ_EBU_NAND_WAIT_RD_EDGE) >> 2) & 0x1)
+/* Record the BY# Edge (falling) (1) */
+#define LTQ_EBU_NAND_WAIT_BY_EDGE (0x1 << 1)
+#define LTQ_EBU_NAND_WAIT_BY_EDGE_GET(val) ((((val) & LTQ_EBU_NAND_WAIT_BY_EDGE) >> 1) & 0x1)
+/* Rd/BY# value (0) */
+#define LTQ_EBU_NAND_WAIT_RDBY_VALUE (0x1)
+#define LTQ_EBU_NAND_WAIT_RDBY_VALUE_GET(val) ((((val) & LTQ_EBU_NAND_WAIT_RDBY_VALUE) >> 0) & 0x1)
+
+/*******************************************************************************
+ * NAND ECC Result Register 0
+ ******************************************************************************/
+
+/* Reserved (31:24) */
+#define LTQ_EBU_NAND_ECC0_RES (0xff << 24)
+#define LTQ_EBU_NAND_ECC0_RES_GET(val) ((((val) & LTQ_EBU_NAND_ECC0_RES) >> 24) & 0xff)
+/* ECC value (23:16) */
+#define LTQ_EBU_NAND_ECC0_ECC_B2 (0xff << 16)
+#define LTQ_EBU_NAND_ECC0_ECC_B2_VAL(val) (((val) & 0xff) << 16)
+#define LTQ_EBU_NAND_ECC0_ECC_B2_GET(val) ((((val) & LTQ_EBU_NAND_ECC0_ECC_B2) >> 16) & 0xff)
+#define LTQ_EBU_NAND_ECC0_ECC_B2_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_ECC0_ECC_B2) | (((val) & 0xff) << 16))
+/* ECC value (15:8) */
+#define LTQ_EBU_NAND_ECC0_ECC_B1 (0xff << 8)
+#define LTQ_EBU_NAND_ECC0_ECC_B1_VAL(val) (((val) & 0xff) << 8)
+#define LTQ_EBU_NAND_ECC0_ECC_B1_GET(val) ((((val) & LTQ_EBU_NAND_ECC0_ECC_B1) >> 8) & 0xff)
+#define LTQ_EBU_NAND_ECC0_ECC_B1_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_ECC0_ECC_B1) | (((val) & 0xff) << 8))
+/* ECC value (7:0) */
+#define LTQ_EBU_NAND_ECC0_ECC_B0 (0xff)
+#define LTQ_EBU_NAND_ECC0_ECC_B0_VAL(val) (((val) & 0xff) << 0)
+#define LTQ_EBU_NAND_ECC0_ECC_B0_GET(val) ((((val) & LTQ_EBU_NAND_ECC0_ECC_B0) >> 0) & 0xff)
+#define LTQ_EBU_NAND_ECC0_ECC_B0_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_ECC0_ECC_B0) | (((val) & 0xff) << 0))
+
+/*******************************************************************************
+ * NAND ECC Address Counter Register
+ ******************************************************************************/
+
+/* Reserved (31:9) */
+#define LTQ_EBU_NAND_ECC_AC_RES (0x7fffff << 9)
+#define LTQ_EBU_NAND_ECC_AC_RES_GET(val) ((((val) & LTQ_EBU_NAND_ECC_AC_RES) >> 9) & 0x7fffff)
+/* ECC address counter (8:0) */
+#define LTQ_EBU_NAND_ECC_AC_ECC_AC (0x1ff)
+#define LTQ_EBU_NAND_ECC_AC_ECC_AC_VAL(val) (((val) & 0x1ff) << 0)
+#define LTQ_EBU_NAND_ECC_AC_ECC_AC_GET(val) ((((val) & LTQ_EBU_NAND_ECC_AC_ECC_AC) >> 0) & 0x1ff)
+#define LTQ_EBU_NAND_ECC_AC_ECC_AC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_NAND_ECC_AC_ECC_AC) | (((val) & 0x1ff) << 0))
+
+/*******************************************************************************
+ * Internal Address Emulation Register
+ ******************************************************************************/
+
+/* Memory Region Base Address (31:12) */
+#define LTQ_EBU_EMU_ADDR_BASE (0xfffff << 12)
+#define LTQ_EBU_EMU_ADDR_BASE_VAL(val) (((val) & 0xfffff) << 12)
+#define LTQ_EBU_EMU_ADDR_BASE_GET(val) ((((val) & LTQ_EBU_EMU_ADDR_BASE) >> 12) & 0xfffff)
+#define LTQ_EBU_EMU_ADDR_BASE_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_ADDR_BASE) | (((val) & 0xfffff) << 12))
+/* Memory Region Address Mask (7:4) */
+#define LTQ_EBU_EMU_ADDR_MASK (0xf << 4)
+#define LTQ_EBU_EMU_ADDR_MASK_VAL(val) (((val) & 0xf) << 4)
+#define LTQ_EBU_EMU_ADDR_MASK_GET(val) ((((val) & LTQ_EBU_EMU_ADDR_MASK) >> 4) & 0xf)
+#define LTQ_EBU_EMU_ADDR_MASK_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_ADDR_MASK) | (((val) & 0xf) << 4))
+/* Memory Region Mirror Segment B Control (1) */
+#define LTQ_EBU_EMU_ADDR_MRMB (0x1 << 1)
+#define LTQ_EBU_EMU_ADDR_MRMB_VAL(val) (((val) & 0x1) << 1)
+#define LTQ_EBU_EMU_ADDR_MRMB_GET(val) ((((val) & LTQ_EBU_EMU_ADDR_MRMB) >> 1) & 0x1)
+#define LTQ_EBU_EMU_ADDR_MRMB_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_ADDR_MRMB) | (((val) & 0x1) << 1))
+/* Memory Region Enable Control (0) */
+#define LTQ_EBU_EMU_ADDR_MREC (0x1)
+#define LTQ_EBU_EMU_ADDR_MREC_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_EBU_EMU_ADDR_MREC_GET(val) ((((val) & LTQ_EBU_EMU_ADDR_MREC) >> 0) & 0x1)
+#define LTQ_EBU_EMU_ADDR_MREC_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_ADDR_MREC) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * nternal Emulator Configuration Register
+ ******************************************************************************/
+
+/* Overlay Memory Control Region 3 (3) */
+#define LTQ_EBU_EMU_CON_OVL3 (0x1 << 3)
+#define LTQ_EBU_EMU_CON_OVL3_VAL(val) (((val) & 0x1) << 3)
+#define LTQ_EBU_EMU_CON_OVL3_GET(val) ((((val) & LTQ_EBU_EMU_CON_OVL3) >> 3) & 0x1)
+#define LTQ_EBU_EMU_CON_OVL3_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_CON_OVL3) | (((val) & 0x1) << 3))
+/* Overlay Memory Control Region 2 (2) */
+#define LTQ_EBU_EMU_CON_OVL2 (0x1 << 2)
+#define LTQ_EBU_EMU_CON_OVL2_VAL(val) (((val) & 0x1) << 2)
+#define LTQ_EBU_EMU_CON_OVL2_GET(val) ((((val) & LTQ_EBU_EMU_CON_OVL2) >> 2) & 0x1)
+#define LTQ_EBU_EMU_CON_OVL2_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_CON_OVL2) | (((val) & 0x1) << 2))
+/* Overlay Memory Control Region 1 (1) */
+#define LTQ_EBU_EMU_CON_OVL1 (0x1 << 1)
+#define LTQ_EBU_EMU_CON_OVL1_VAL(val) (((val) & 0x1) << 1)
+#define LTQ_EBU_EMU_CON_OVL1_GET(val) ((((val) & LTQ_EBU_EMU_CON_OVL1) >> 1) & 0x1)
+#define LTQ_EBU_EMU_CON_OVL1_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_CON_OVL1) | (((val) & 0x1) << 1))
+/* Overlay Memory Control Region 0 (0) */
+#define LTQ_EBU_EMU_CON_OVL0 (0x1)
+#define LTQ_EBU_EMU_CON_OVL0_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_EBU_EMU_CON_OVL0_GET(val) ((((val) & LTQ_EBU_EMU_CON_OVL0) >> 0) & 0x1)
+#define LTQ_EBU_EMU_CON_OVL0_SET(reg,val) (reg) = ((reg & ~LTQ_EBU_EMU_CON_OVL0) | (((val) & 0x1) << 0))
+
+#endif /* __LTQ_EBU_H */
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/es_reg.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/es_reg.h
new file mode 100644
index 0000000000..da84900f5e
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/es_reg.h
@@ -0,0 +1,2098 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ St. Martin Strasse 53; 81669 Munich, Germany
+
+ Any use of this Software is subject to the conclusion of a respective
+ License Agreement. Without such a License Agreement no rights to the
+ Software are granted.
+
+ ******************************************************************************/
+
+#ifndef __ES_REG_H
+#define __ES_REG_H
+
+#define es_r32(reg) ltq_r32(&es->reg)
+#define es_w32(val, reg) ltq_w32(val, &es->reg)
+#define es_w32_mask(clear, set, reg) ltq_w32_mask(clear, set, &es->reg)
+
+/** ES register structure */
+struct svip_reg_es {
+ volatile unsigned long ps; /* 0x0000 */
+ volatile unsigned long p0_ctl; /* 0x0004 */
+ volatile unsigned long p1_ctl; /* 0x0008 */
+ volatile unsigned long p2_ctl; /* 0x000C */
+ volatile unsigned long p0_vlan; /* 0x0010 */
+ volatile unsigned long p1_vlan; /* 0x0014 */
+ volatile unsigned long p2_vlan; /* 0x0018 */
+ volatile unsigned long reserved1[1]; /* 0x001C */
+ volatile unsigned long p0_inctl; /* 0x0020 */
+ volatile unsigned long p1_inctl; /* 0x0024 */
+ volatile unsigned long p2_inctl; /* 0x0028 */
+ volatile unsigned long reserved2[1]; /* 0x002C */
+ volatile unsigned long p0_ecs_q32; /* 0x0030 */
+ volatile unsigned long p0_ecs_q10; /* 0x0034 */
+ volatile unsigned long p0_ecw_q32; /* 0x0038 */
+ volatile unsigned long p0_ecw_q10; /* 0x003C */
+ volatile unsigned long p1_ecs_q32; /* 0x0040 */
+ volatile unsigned long p1_ecs_q10; /* 0x0044 */
+ volatile unsigned long p1_ecw_q32; /* 0x0048 */
+ volatile unsigned long p1_ecw_q10; /* 0x004C */
+ volatile unsigned long p2_ecs_q32; /* 0x0050 */
+ volatile unsigned long p2_ecs_q10; /* 0x0054 */
+ volatile unsigned long p2_ecw_q32; /* 0x0058 */
+ volatile unsigned long p2_ecw_q10; /* 0x005C */
+ volatile unsigned long int_ena; /* 0x0060 */
+ volatile unsigned long int_st; /* 0x0064 */
+ volatile unsigned long sw_gctl0; /* 0x0068 */
+ volatile unsigned long sw_gctl1; /* 0x006C */
+ volatile unsigned long arp; /* 0x0070 */
+ volatile unsigned long strm_ctl; /* 0x0074 */
+ volatile unsigned long rgmii_ctl; /* 0x0078 */
+ volatile unsigned long prt_1p; /* 0x007C */
+ volatile unsigned long gbkt_szbs; /* 0x0080 */
+ volatile unsigned long gbkt_szebs; /* 0x0084 */
+ volatile unsigned long bf_th; /* 0x0088 */
+ volatile unsigned long pmac_hd_ctl; /* 0x008C */
+ volatile unsigned long pmac_sa1; /* 0x0090 */
+ volatile unsigned long pmac_sa2; /* 0x0094 */
+ volatile unsigned long pmac_da1; /* 0x0098 */
+ volatile unsigned long pmac_da2; /* 0x009C */
+ volatile unsigned long pmac_vlan; /* 0x00A0 */
+ volatile unsigned long pmac_tx_ipg; /* 0x00A4 */
+ volatile unsigned long pmac_rx_ipg; /* 0x00A8 */
+ volatile unsigned long adr_tb_ctl0; /* 0x00AC */
+ volatile unsigned long adr_tb_ctl1; /* 0x00B0 */
+ volatile unsigned long adr_tb_ctl2; /* 0x00B4 */
+ volatile unsigned long adr_tb_st0; /* 0x00B8 */
+ volatile unsigned long adr_tb_st1; /* 0x00BC */
+ volatile unsigned long adr_tb_st2; /* 0x00C0 */
+ volatile unsigned long rmon_ctl; /* 0x00C4 */
+ volatile unsigned long rmon_st; /* 0x00C8 */
+ volatile unsigned long mdio_ctl; /* 0x00CC */
+ volatile unsigned long mdio_data; /* 0x00D0 */
+ volatile unsigned long tp_flt_act; /* 0x00D4 */
+ volatile unsigned long prtcl_flt_act; /* 0x00D8 */
+ volatile unsigned long reserved4[9]; /* 0xdc */
+ volatile unsigned long vlan_flt0; /* 0x0100 */
+ volatile unsigned long vlan_flt1; /* 0x0104 */
+ volatile unsigned long vlan_flt2; /* 0x0108 */
+ volatile unsigned long vlan_flt3; /* 0x010C */
+ volatile unsigned long vlan_flt4; /* 0x0110 */
+ volatile unsigned long vlan_flt5; /* 0x0114 */
+ volatile unsigned long vlan_flt6; /* 0x0118 */
+ volatile unsigned long vlan_flt7; /* 0x011C */
+ volatile unsigned long vlan_flt8; /* 0x0120 */
+ volatile unsigned long vlan_flt9; /* 0x0124 */
+ volatile unsigned long vlan_flt10; /* 0x0128 */
+ volatile unsigned long vlan_flt11; /* 0x012C */
+ volatile unsigned long vlan_flt12; /* 0x0130 */
+ volatile unsigned long vlan_flt13; /* 0x0134 */
+ volatile unsigned long vlan_flt14; /* 0x0138 */
+ volatile unsigned long vlan_flt15; /* 0x013C */
+ volatile unsigned long tp_flt10; /* 0x0140 */
+ volatile unsigned long tp_flt32; /* 0x0144 */
+ volatile unsigned long tp_flt54; /* 0x0148 */
+ volatile unsigned long tp_flt76; /* 0x014C */
+ volatile unsigned long dfsrv_map0; /* 0x0150 */
+ volatile unsigned long dfsrv_map1; /* 0x0154 */
+ volatile unsigned long dfsrv_map2; /* 0x0158 */
+ volatile unsigned long dfsrv_map3; /* 0x015C */
+ volatile unsigned long tcp_pf0; /* 0x0160 */
+ volatile unsigned long tcp_pf1; /* 0x0164 */
+ volatile unsigned long tcp_pf2; /* 0x0168 */
+ volatile unsigned long tcp_pf3; /* 0x016C */
+ volatile unsigned long tcp_pf4; /* 0x0170 */
+ volatile unsigned long tcp_pf5; /* 0x0174 */
+ volatile unsigned long tcp_pf6; /* 0x0178 */
+ volatile unsigned long tcp_pf7; /* 0x017C */
+ volatile unsigned long ra_03_00; /* 0x0180 */
+ volatile unsigned long ra_07_04; /* 0x0184 */
+ volatile unsigned long ra_0b_08; /* 0x0188 */
+ volatile unsigned long ra_0f_0c; /* 0x018C */
+ volatile unsigned long ra_13_10; /* 0x0190 */
+ volatile unsigned long ra_17_14; /* 0x0194 */
+ volatile unsigned long ra_1b_18; /* 0x0198 */
+ volatile unsigned long ra_1f_1c; /* 0x019C */
+ volatile unsigned long ra_23_20; /* 0x01A0 */
+ volatile unsigned long ra_27_24; /* 0x01A4 */
+ volatile unsigned long ra_2b_28; /* 0x01A8 */
+ volatile unsigned long ra_2f_2c; /* 0x01AC */
+ volatile unsigned long prtcl_f0; /* 0x01B0 */
+ volatile unsigned long prtcl_f1; /* 0x01B4 */
+};
+
+/*******************************************************************************
+ * ES
+ ******************************************************************************/
+#define LTQ_ES_PS_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0000))
+#define LTQ_ES_P0_CTL_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0004))
+#define LTQ_ES_P1_CTL_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0008))
+#define LTQ_ES_P2_CTL_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x000C))
+#define LTQ_ES_P0_VLAN_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0010))
+#define LTQ_ES_P1_VLAN_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0014))
+#define LTQ_ES_P2_VLAN_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0010))
+#define LTQ_ES_P0_INCTL_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0020))
+#define LTQ_ES_P1_INCTL_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0024))
+#define LTQ_ES_P2_INCTL_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0028))
+#define LTQ_ES_P0_ECS_Q32_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0030))
+#define LTQ_ES_P0_ECS_Q10_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0034))
+#define LTQ_ES_P0_ECW_Q32_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0038))
+#define LTQ_ES_P0_ECW_Q10_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x003C))
+#define LTQ_ES_P1_ECS_Q32_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0030))
+#define LTQ_ES_P1_ECS_Q10_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0034))
+#define LTQ_ES_P1_ECW_Q32_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0038))
+#define LTQ_ES_P1_ECW_Q10_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x003C))
+#define LTQ_ES_P2_ECS_Q32_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0030))
+#define LTQ_ES_P2_ECS_Q10_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0034))
+#define LTQ_ES_P2_ECW_Q32_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0038))
+#define LTQ_ES_P2_ECW_Q10_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x003C))
+#define LTQ_ES_INT_ENA_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0060))
+#define LTQ_ES_INT_ST_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0064))
+#define LTQ_ES_SW_GCTL0_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0068))
+#define LTQ_ES_SW_GCTL1_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x006C))
+#define LTQ_ES_ARP_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0070))
+#define LTQ_ES_STRM_CTL_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0074))
+#define LTQ_ES_RGMII_CTL_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0078))
+#define LTQ_ES_PRT_1P_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x007C))
+#define LTQ_ES_GBKT_SZBS_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0080))
+#define LTQ_ES_GBKT_SZEBS_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0084))
+#define LTQ_ES_BF_TH_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0088))
+#define LTQ_ES_PMAC_HD_CTL ((volatile unsigned int*)(LTQ_ES_BASE + 0x008C))
+#define LTQ_ES_PMAC_SA1 ((volatile unsigned int*)(LTQ_ES_BASE + 0x0090))
+#define LTQ_ES_PMAC_SA2 ((volatile unsigned int*)(LTQ_ES_BASE + 0x0094))
+#define LTQ_ES_PMAC_DA1 ((volatile unsigned int*)(LTQ_ES_BASE + 0x0098))
+#define LTQ_ES_PMAC_DA2 ((volatile unsigned int*)(LTQ_ES_BASE + 0x009C))
+#define LTQ_ES_PMAC_VLAN ((volatile unsigned int*)(LTQ_ES_BASE + 0x00A0))
+#define LTQ_ES_PMAC_TX_IPG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00A4))
+#define LTQ_ES_PMAC_RX_IPG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00A8))
+#define LTQ_ES_ADR_TB_CTL0_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00AC))
+#define LTQ_ES_ADR_TB_CTL1_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00B0))
+#define LTQ_ES_ADR_TB_CTL2_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00B4))
+#define LTQ_ES_ADR_TB_ST0_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00B8))
+#define LTQ_ES_ADR_TB_ST1_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00BC))
+#define LTQ_ES_ADR_TB_ST2_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00C0))
+#define LTQ_ES_RMON_CTL_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00C4))
+#define LTQ_ES_RMON_ST_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00C8))
+#define LTQ_ES_MDIO_CTL_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00CC))
+#define LTQ_ES_MDIO_DATA_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00D0))
+#define LTQ_ES_TP_FLT_ACT_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00D4))
+#define LTQ_ES_PRTCL_FLT_ACT_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x00D8))
+#define LTQ_ES_VLAN_FLT0_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0100))
+#define LTQ_ES_VLAN_FLT1_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0104))
+#define LTQ_ES_VLAN_FLT2_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0108))
+#define LTQ_ES_VLAN_FLT3_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x010C))
+#define LTQ_ES_VLAN_FLT4_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0110))
+#define LTQ_ES_VLAN_FLT5_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0114))
+#define LTQ_ES_VLAN_FLT6_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0118))
+#define LTQ_ES_VLAN_FLT7_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x011C))
+#define LTQ_ES_VLAN_FLT8_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0120))
+#define LTQ_ES_VLAN_FLT9_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0124))
+#define LTQ_ES_VLAN_FLT10_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0128))
+#define LTQ_ES_VLAN_FLT11_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x012C))
+#define LTQ_ES_VLAN_FLT12_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0130))
+#define LTQ_ES_VLAN_FLT13_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0134))
+#define LTQ_ES_VLAN_FLT14_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0138))
+#define LTQ_ES_VLAN_FLT15_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x013C))
+#define LTQ_ES_TP_FLT10_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0140))
+#define LTQ_ES_TP_FLT32_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0144))
+#define LTQ_ES_TP_FLT54_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0148))
+#define LTQ_ES_TP_FLT76_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x014C))
+#define LTQ_ES_DFSRV_MAP0_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0150))
+#define LTQ_ES_DFSRV_MAP1_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0154))
+#define LTQ_ES_DFSRV_MAP2_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0158))
+#define LTQ_ES_DFSRV_MAP3_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x015C))
+#define LTQ_ES_TCP_PF0_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0160))
+#define LTQ_ES_TCP_PF1_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0164))
+#define LTQ_ES_TCP_PF2_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0168))
+#define LTQ_ES_TCP_PF3_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x016C))
+#define LTQ_ES_TCP_PF4_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0170))
+#define LTQ_ES_TCP_PF5_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0174))
+#define LTQ_ES_TCP_PF6_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0178))
+#define LTQ_ES_TCP_PF7_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x017C))
+#define LTQ_ES_RA_03_00_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0180))
+#define LTQ_ES_RA_07_04_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0184))
+#define LTQ_ES_RA_0B_08_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0188))
+#define LTQ_ES_RA_0F_0C_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x018C))
+#define LTQ_ES_RA_13_10_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0190))
+#define LTQ_ES_RA_17_14_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0194))
+#define LTQ_ES_RA_1B_18_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x0198))
+#define LTQ_ES_RA_1F_1C_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x019C))
+#define LTQ_ES_RA_23_20_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x01A0))
+#define LTQ_ES_RA_27_24_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x01A4))
+#define LTQ_ES_RA_2B_28_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x01A8))
+#define LTQ_ES_RA_2F_2C_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x01AC))
+#define LTQ_ES_PRTCL_F0_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x01B0))
+#define LTQ_ES_PRTCL_F1_REG ((volatile unsigned int*)(LTQ_ES_BASE + 0x01B4))
+
+/*******************************************************************************
+ * Port Status Register
+ ******************************************************************************/
+
+/* Port 1 Flow Control Status (12) */
+#define LTQ_ES_PS_REG_P1FCS (0x1 << 12)
+#define LTQ_ES_PS_REG_P1FCS_GET(val) ((((val) & LTQ_ES_PS_REG_P1FCS) >> 12) & 0x1)
+/* Port 1 Duplex Status (11) */
+#define LTQ_ES_PS_REG_P1DS (0x1 << 11)
+#define LTQ_ES_PS_REG_P1DS_GET(val) ((((val) & LTQ_ES_PS_REG_P1DS) >> 11) & 0x1)
+/* Port 1 Speed High Status (10) */
+#define LTQ_ES_PS_REG_P1SHS (0x1 << 10)
+#define LTQ_ES_PS_REG_P1SHS_GET(val) ((((val) & LTQ_ES_PS_REG_P1SHS) >> 10) & 0x1)
+/* Port 1 Speed Status (9) */
+#define LTQ_ES_PS_REG_P1SS (0x1 << 9)
+#define LTQ_ES_PS_REG_P1SS_GET(val) ((((val) & LTQ_ES_PS_REG_P1SS) >> 9) & 0x1)
+/* Port 1 Link Status (8) */
+#define LTQ_ES_PS_REG_P1LS (0x1 << 8)
+#define LTQ_ES_PS_REG_P1LS_GET(val) ((((val) & LTQ_ES_PS_REG_P1LS) >> 8) & 0x1)
+/* Port 0 Flow Control Status (4) */
+#define LTQ_ES_PS_REG_P0FCS (0x1 << 4)
+#define LTQ_ES_PS_REG_P0FCS_GET(val) ((((val) & LTQ_ES_PS_REG_P0FCS) >> 4) & 0x1)
+/* Port 0 Duplex Status (3) */
+#define LTQ_ES_PS_REG_P0DS (0x1 << 3)
+#define LTQ_ES_PS_REG_P0DS_GET(val) ((((val) & LTQ_ES_PS_REG_P0DS) >> 3) & 0x1)
+/* Port 0 Speed High Status (2) */
+#define LTQ_ES_PS_REG_P0SHS (0x1 << 2)
+#define LTQ_ES_PS_REG_P0SHS_GET(val) ((((val) & LTQ_ES_PS_REG_P0SHS) >> 2) & 0x1)
+/* Port 0 Speed Status (1) */
+#define LTQ_ES_PS_REG_P0SS (0x1 << 1)
+#define LTQ_ES_PS_REG_P0SS_GET(val) ((((val) & LTQ_ES_PS_REG_P0SS) >> 1) & 0x1)
+/* Port 0 Link Status (0) */
+#define LTQ_ES_PS_REG_P0LS (0x1)
+#define LTQ_ES_PS_REG_P0LS_GET(val) ((((val) & LTQ_ES_PS_REG_P0LS) >> 0) & 0x1)
+
+/*******************************************************************************
+ * P0 Control Register
+ ******************************************************************************/
+
+/* STP/RSTP port state (31:30) */
+#define LTQ_ES_P0_CTL_REG_SPS (0x3 << 30)
+#define LTQ_ES_P0_CTL_REG_SPS_VAL(val) (((val) & 0x3) << 30)
+#define LTQ_ES_P0_CTL_REG_SPS_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_SPS) >> 30) & 0x3)
+#define LTQ_ES_P0_CTL_REG_SPS_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_SPS) | (((val) & 0x3) << 30))
+/* TCP/UDP PRIEN (29) */
+#define LTQ_ES_P0_CTL_REG_TCPE (0x1 << 29)
+#define LTQ_ES_P0_CTL_REG_TCPE_VAL(val) (((val) & 0x1) << 29)
+#define LTQ_ES_P0_CTL_REG_TCPE_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_TCPE) >> 29) & 0x1)
+#define LTQ_ES_P0_CTL_REG_TCPE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_TCPE) | (((val) & 0x1) << 29))
+/* IP over TCP/UDP (28) */
+#define LTQ_ES_P0_CTL_REG_IPOVTU (0x1 << 28)
+#define LTQ_ES_P0_CTL_REG_IPOVTU_VAL(val) (((val) & 0x1) << 28)
+#define LTQ_ES_P0_CTL_REG_IPOVTU_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_IPOVTU) >> 28) & 0x1)
+#define LTQ_ES_P0_CTL_REG_IPOVTU_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_IPOVTU) | (((val) & 0x1) << 28))
+/* VLAN Priority Enable (27) */
+#define LTQ_ES_P0_CTL_REG_VPE (0x1 << 27)
+#define LTQ_ES_P0_CTL_REG_VPE_VAL(val) (((val) & 0x1) << 27)
+#define LTQ_ES_P0_CTL_REG_VPE_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_VPE) >> 27) & 0x1)
+#define LTQ_ES_P0_CTL_REG_VPE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_VPE) | (((val) & 0x1) << 27))
+/* Service Priority Enable (26) */
+#define LTQ_ES_P0_CTL_REG_SPE (0x1 << 26)
+#define LTQ_ES_P0_CTL_REG_SPE_VAL(val) (((val) & 0x1) << 26)
+#define LTQ_ES_P0_CTL_REG_SPE_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_SPE) >> 26) & 0x1)
+#define LTQ_ES_P0_CTL_REG_SPE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_SPE) | (((val) & 0x1) << 26))
+/* IP over VLAN PRI (25) */
+#define LTQ_ES_P0_CTL_REG_IPVLAN (0x1 << 25)
+#define LTQ_ES_P0_CTL_REG_IPVLAN_VAL(val) (((val) & 0x1) << 25)
+#define LTQ_ES_P0_CTL_REG_IPVLAN_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_IPVLAN) >> 25) & 0x1)
+#define LTQ_ES_P0_CTL_REG_IPVLAN_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_IPVLAN) | (((val) & 0x1) << 25))
+/* Ether Type Priority Enable (24) */
+#define LTQ_ES_P0_CTL_REG_TPE (0x1 << 24)
+#define LTQ_ES_P0_CTL_REG_TPE_VAL(val) (((val) & 0x1) << 24)
+#define LTQ_ES_P0_CTL_REG_TPE_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_TPE) >> 24) & 0x1)
+#define LTQ_ES_P0_CTL_REG_TPE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_TPE) | (((val) & 0x1) << 24))
+/* Force Link Up (18) */
+#define LTQ_ES_P0_CTL_REG_FLP (0x1 << 18)
+#define LTQ_ES_P0_CTL_REG_FLP_VAL(val) (((val) & 0x1) << 18)
+#define LTQ_ES_P0_CTL_REG_FLP_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_FLP) >> 18) & 0x1)
+#define LTQ_ES_P0_CTL_REG_FLP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_FLP) | (((val) & 0x1) << 18))
+/* Force Link Down (17) */
+#define LTQ_ES_P0_CTL_REG_FLD (0x1 << 17)
+#define LTQ_ES_P0_CTL_REG_FLD_VAL(val) (((val) & 0x1) << 17)
+#define LTQ_ES_P0_CTL_REG_FLD_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_FLD) >> 17) & 0x1)
+#define LTQ_ES_P0_CTL_REG_FLD_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_FLD) | (((val) & 0x1) << 17))
+/* Ratio Mode for WFQ (16) */
+#define LTQ_ES_P0_CTL_REG_RMWFQ (0x1 << 16)
+#define LTQ_ES_P0_CTL_REG_RMWFQ_VAL(val) (((val) & 0x1) << 16)
+#define LTQ_ES_P0_CTL_REG_RMWFQ_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_RMWFQ) >> 16) & 0x1)
+#define LTQ_ES_P0_CTL_REG_RMWFQ_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_RMWFQ) | (((val) & 0x1) << 16))
+/* Aging Disable (15) */
+#define LTQ_ES_P0_CTL_REG_AD (0x1 << 15)
+#define LTQ_ES_P0_CTL_REG_AD_VAL(val) (((val) & 0x1) << 15)
+#define LTQ_ES_P0_CTL_REG_AD_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_AD) >> 15) & 0x1)
+#define LTQ_ES_P0_CTL_REG_AD_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_AD) | (((val) & 0x1) << 15))
+/* Learning Disable (14) */
+#define LTQ_ES_P0_CTL_REG_LD (0x1 << 14)
+#define LTQ_ES_P0_CTL_REG_LD_VAL(val) (((val) & 0x1) << 14)
+#define LTQ_ES_P0_CTL_REG_LD_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_LD) >> 14) & 0x1)
+#define LTQ_ES_P0_CTL_REG_LD_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_LD) | (((val) & 0x1) << 14))
+/* Maximum Number of Addresses (12:8) */
+#define LTQ_ES_P0_CTL_REG_MNA024 (0x1f << 8)
+#define LTQ_ES_P0_CTL_REG_MNA024_VAL(val) (((val) & 0x1f) << 8)
+#define LTQ_ES_P0_CTL_REG_MNA024_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_MNA024) >> 8) & 0x1f)
+#define LTQ_ES_P0_CTL_REG_MNA024_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_MNA024) | (((val) & 0x1f) << 8))
+/* PPPOE Port Only (7) */
+#define LTQ_ES_P0_CTL_REG_PPPOEP (0x1 << 7)
+#define LTQ_ES_P0_CTL_REG_PPPOEP_VAL(val) (((val) & 0x1) << 7)
+#define LTQ_ES_P0_CTL_REG_PPPOEP_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_PPPOEP) >> 7) & 0x1)
+#define LTQ_ES_P0_CTL_REG_PPPOEP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_PPPOEP) | (((val) & 0x1) << 7))
+/* PPPOE Manage (6) */
+#define LTQ_ES_P0_CTL_REG_PM (0x1 << 6)
+#define LTQ_ES_P0_CTL_REG_PM_VAL(val) (((val) & 0x1) << 6)
+#define LTQ_ES_P0_CTL_REG_PM_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_PM) >> 6) & 0x1)
+#define LTQ_ES_P0_CTL_REG_PM_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_PM) | (((val) & 0x1) << 6))
+/* Port Mirror Option (5:4) */
+#define LTQ_ES_P0_CTL_REG_IPMO (0x3 << 4)
+#define LTQ_ES_P0_CTL_REG_IPMO_VAL(val) (((val) & 0x3) << 4)
+#define LTQ_ES_P0_CTL_REG_IPMO_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_IPMO) >> 4) & 0x3)
+#define LTQ_ES_P0_CTL_REG_IPMO_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_IPMO) | (((val) & 0x3) << 4))
+/* 802.1x Port Authorized state (3:2) */
+#define LTQ_ES_P0_CTL_REG_PAS (0x3 << 2)
+#define LTQ_ES_P0_CTL_REG_PAS_VAL(val) (((val) & 0x3) << 2)
+#define LTQ_ES_P0_CTL_REG_PAS_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_PAS) >> 2) & 0x3)
+#define LTQ_ES_P0_CTL_REG_PAS_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_PAS) | (((val) & 0x3) << 2))
+/* Drop Scheme for voilation 802.1x (1) */
+#define LTQ_ES_P0_CTL_REG_DSV8021X (0x1 << 1)
+#define LTQ_ES_P0_CTL_REG_DSV8021X_VAL(val) (((val) & 0x1) << 1)
+#define LTQ_ES_P0_CTL_REG_DSV8021X_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_DSV8021X) >> 1) & 0x1)
+#define LTQ_ES_P0_CTL_REG_DSV8021X_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_DSV8021X) | (((val) & 0x1) << 1))
+/* ByPass Mode for Output (0) */
+#define LTQ_ES_P0_CTL_REG_BYPASS (0x1)
+#define LTQ_ES_P0_CTL_REG_BYPASS_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_ES_P0_CTL_REG_BYPASS_GET(val) ((((val) & LTQ_ES_P0_CTL_REG_BYPASS) >> 0) & 0x1)
+#define LTQ_ES_P0_CTL_REG_BYPASS_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_CTL_REG_BYPASS) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 0 VLAN Control Register
+ ******************************************************************************/
+
+/* Default FID (31:30) */
+#define LTQ_ES_P0_VLAN_REG_DFID (0x3 << 30)
+#define LTQ_ES_P0_VLAN_REG_DFID_VAL(val) (((val) & 0x3) << 30)
+#define LTQ_ES_P0_VLAN_REG_DFID_GET(val) ((((val) & LTQ_ES_P0_VLAN_REG_DFID) >> 30) & 0x3)
+#define LTQ_ES_P0_VLAN_REG_DFID_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_VLAN_REG_DFID) | (((val) & 0x3) << 30))
+/* Tagged Base VLAN Enable (29) */
+#define LTQ_ES_P0_VLAN_REG_TBVE (0x1 << 29)
+#define LTQ_ES_P0_VLAN_REG_TBVE_VAL(val) (((val) & 0x1) << 29)
+#define LTQ_ES_P0_VLAN_REG_TBVE_GET(val) ((((val) & LTQ_ES_P0_VLAN_REG_TBVE) >> 29) & 0x1)
+#define LTQ_ES_P0_VLAN_REG_TBVE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_VLAN_REG_TBVE) | (((val) & 0x1) << 29))
+/* Input Force No TAG Enable (28) */
+#define LTQ_ES_P0_VLAN_REG_IFNTE (0x1 << 28)
+#define LTQ_ES_P0_VLAN_REG_IFNTE_VAL(val) (((val) & 0x1) << 28)
+#define LTQ_ES_P0_VLAN_REG_IFNTE_GET(val) ((((val) & LTQ_ES_P0_VLAN_REG_IFNTE) >> 28) & 0x1)
+#define LTQ_ES_P0_VLAN_REG_IFNTE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_VLAN_REG_IFNTE) | (((val) & 0x1) << 28))
+/* VID Check with the VID table (27) */
+#define LTQ_ES_P0_VLAN_REG_VC (0x1 << 27)
+#define LTQ_ES_P0_VLAN_REG_VC_VAL(val) (((val) & 0x1) << 27)
+#define LTQ_ES_P0_VLAN_REG_VC_GET(val) ((((val) & LTQ_ES_P0_VLAN_REG_VC) >> 27) & 0x1)
+#define LTQ_ES_P0_VLAN_REG_VC_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_VLAN_REG_VC) | (((val) & 0x1) << 27))
+/* VLAN Security Disable (26) */
+#define LTQ_ES_P0_VLAN_REG_VSD (0x1 << 26)
+#define LTQ_ES_P0_VLAN_REG_VSD_VAL(val) (((val) & 0x1) << 26)
+#define LTQ_ES_P0_VLAN_REG_VSD_GET(val) ((((val) & LTQ_ES_P0_VLAN_REG_VSD) >> 26) & 0x1)
+#define LTQ_ES_P0_VLAN_REG_VSD_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_VLAN_REG_VSD) | (((val) & 0x1) << 26))
+/* Admit Only VLAN_Tagged Packet (25) */
+#define LTQ_ES_P0_VLAN_REG_AOVTP (0x1 << 25)
+#define LTQ_ES_P0_VLAN_REG_AOVTP_VAL(val) (((val) & 0x1) << 25)
+#define LTQ_ES_P0_VLAN_REG_AOVTP_GET(val) ((((val) & LTQ_ES_P0_VLAN_REG_AOVTP) >> 25) & 0x1)
+#define LTQ_ES_P0_VLAN_REG_AOVTP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_VLAN_REG_AOVTP) | (((val) & 0x1) << 25))
+/* VLAN Member Check Enable (24) */
+#define LTQ_ES_P0_VLAN_REG_VMCE (0x1 << 24)
+#define LTQ_ES_P0_VLAN_REG_VMCE_VAL(val) (((val) & 0x1) << 24)
+#define LTQ_ES_P0_VLAN_REG_VMCE_GET(val) ((((val) & LTQ_ES_P0_VLAN_REG_VMCE) >> 24) & 0x1)
+#define LTQ_ES_P0_VLAN_REG_VMCE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_VLAN_REG_VMCE) | (((val) & 0x1) << 24))
+/* Reserved (23:19) */
+#define LTQ_ES_P0_VLAN_REG_RES (0x1f << 19)
+#define LTQ_ES_P0_VLAN_REG_RES_GET(val) ((((val) & LTQ_ES_P0_VLAN_REG_RES) >> 19) & 0x1f)
+/* Default VLAN Port Map (18:16) */
+#define LTQ_ES_P0_VLAN_REG_DVPM (0x7 << 16)
+#define LTQ_ES_P0_VLAN_REG_DVPM_VAL(val) (((val) & 0x7) << 16)
+#define LTQ_ES_P0_VLAN_REG_DVPM_GET(val) ((((val) & LTQ_ES_P0_VLAN_REG_DVPM) >> 16) & 0x7)
+#define LTQ_ES_P0_VLAN_REG_DVPM_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_VLAN_REG_DVPM) | (((val) & 0x7) << 16))
+/* Port Priority (15:14) */
+#define LTQ_ES_P0_VLAN_REG_PP (0x3 << 14)
+#define LTQ_ES_P0_VLAN_REG_PP_VAL(val) (((val) & 0x3) << 14)
+#define LTQ_ES_P0_VLAN_REG_PP_GET(val) ((((val) & LTQ_ES_P0_VLAN_REG_PP) >> 14) & 0x3)
+#define LTQ_ES_P0_VLAN_REG_PP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_VLAN_REG_PP) | (((val) & 0x3) << 14))
+/* Port Priority Enable (13) */
+#define LTQ_ES_P0_VLAN_REG_PPE (0x1 << 13)
+#define LTQ_ES_P0_VLAN_REG_PPE_VAL(val) (((val) & 0x1) << 13)
+#define LTQ_ES_P0_VLAN_REG_PPE_GET(val) ((((val) & LTQ_ES_P0_VLAN_REG_PPE) >> 13) & 0x1)
+#define LTQ_ES_P0_VLAN_REG_PPE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_VLAN_REG_PPE) | (((val) & 0x1) << 13))
+/* Portbase VLAN tag member for Port 0 (12) */
+#define LTQ_ES_P0_VLAN_REG_PVTAGMP (0x1 << 12)
+#define LTQ_ES_P0_VLAN_REG_PVTAGMP_VAL(val) (((val) & 0x1) << 12)
+#define LTQ_ES_P0_VLAN_REG_PVTAGMP_GET(val) ((((val) & LTQ_ES_P0_VLAN_REG_PVTAGMP) >> 12) & 0x1)
+#define LTQ_ES_P0_VLAN_REG_PVTAGMP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_VLAN_REG_PVTAGMP) | (((val) & 0x1) << 12))
+/* PVID (11:0) */
+#define LTQ_ES_P0_VLAN_REG_PVID (0xfff)
+#define LTQ_ES_P0_VLAN_REG_PVID_VAL(val) (((val) & 0xfff) << 0)
+#define LTQ_ES_P0_VLAN_REG_PVID_GET(val) ((((val) & LTQ_ES_P0_VLAN_REG_PVID) >> 0) & 0xfff)
+#define LTQ_ES_P0_VLAN_REG_PVID_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_VLAN_REG_PVID) | (((val) & 0xfff) << 0))
+
+/*******************************************************************************
+ * Port 0 Ingress Control Register
+ ******************************************************************************/
+
+/* Reserved (31:13) */
+#define LTQ_ES_P0_INCTL_REG_RES (0x7ffff << 13)
+#define LTQ_ES_P0_INCTL_REG_RES_GET(val) ((((val) & LTQ_ES_P0_INCTL_REG_RES) >> 13) & 0x7ffff)
+/* Port 0 Ingress/Egress Timer Tick T selection (12:11) */
+#define LTQ_ES_P0_INCTL_REG_P0ITT (0x3 << 11)
+#define LTQ_ES_P0_INCTL_REG_P0ITT_VAL(val) (((val) & 0x3) << 11)
+#define LTQ_ES_P0_INCTL_REG_P0ITT_GET(val) ((((val) & LTQ_ES_P0_INCTL_REG_P0ITT) >> 11) & 0x3)
+#define LTQ_ES_P0_INCTL_REG_P0ITT_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_INCTL_REG_P0ITT) | (((val) & 0x3) << 11))
+/* Port 0 Igress Token R (10:0) */
+#define LTQ_ES_P0_INCTL_REG_P0ITR (0x7ff)
+#define LTQ_ES_P0_INCTL_REG_P0ITR_VAL(val) (((val) & 0x7ff) << 0)
+#define LTQ_ES_P0_INCTL_REG_P0ITR_GET(val) ((((val) & LTQ_ES_P0_INCTL_REG_P0ITR) >> 0) & 0x7ff)
+#define LTQ_ES_P0_INCTL_REG_P0ITR_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_INCTL_REG_P0ITR) | (((val) & 0x7ff) << 0))
+
+/*******************************************************************************
+ * Port 0 Egress Control for Strict Q32 Register
+ ******************************************************************************/
+
+/* Port 0 Egress Token R for Strict Priority Q3 (26:16) */
+#define LTQ_ES_P0_ECS_Q32_REG_P0SPQ3TR (0x7ff << 16)
+#define LTQ_ES_P0_ECS_Q32_REG_P0SPQ3TR_VAL(val) (((val) & 0x7ff) << 16)
+#define LTQ_ES_P0_ECS_Q32_REG_P0SPQ3TR_GET(val) ((((val) & LTQ_ES_P0_ECS_Q32_REG_P0SPQ3TR) >> 16) & 0x7ff)
+#define LTQ_ES_P0_ECS_Q32_REG_P0SPQ3TR_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_ECS_Q32_REG_P0SPQ3TR) | (((val) & 0x7ff) << 16))
+/* Port 0 Egress Token R for Strict Priority Q2 (10:0) */
+#define LTQ_ES_P0_ECS_Q32_REG_P0SPQ2TR (0x7ff)
+#define LTQ_ES_P0_ECS_Q32_REG_P0SPQ2TR_VAL(val) (((val) & 0x7ff) << 0)
+#define LTQ_ES_P0_ECS_Q32_REG_P0SPQ2TR_GET(val) ((((val) & LTQ_ES_P0_ECS_Q32_REG_P0SPQ2TR) >> 0) & 0x7ff)
+#define LTQ_ES_P0_ECS_Q32_REG_P0SPQ2TR_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_ECS_Q32_REG_P0SPQ2TR) | (((val) & 0x7ff) << 0))
+
+/*******************************************************************************
+ * Port 0 Egress Control for Strict Q10 Register
+ ******************************************************************************/
+
+/* Reserved (31:27) */
+#define LTQ_ES_P0_ECS_Q10_REG_RES (0x1f << 27)
+#define LTQ_ES_P0_ECS_Q10_REG_RES_GET(val) ((((val) & LTQ_ES_P0_ECS_Q10_REG_RES) >> 27) & 0x1f)
+/* Port 0 Egress Token R for Strict Priority Q1 (26:16) */
+#define LTQ_ES_P0_ECS_Q10_REG_P0SPQ1TR (0x7ff << 16)
+#define LTQ_ES_P0_ECS_Q10_REG_P0SPQ1TR_VAL(val) (((val) & 0x7ff) << 16)
+#define LTQ_ES_P0_ECS_Q10_REG_P0SPQ1TR_GET(val) ((((val) & LTQ_ES_P0_ECS_Q10_REG_P0SPQ1TR) >> 16) & 0x7ff)
+#define LTQ_ES_P0_ECS_Q10_REG_P0SPQ1TR_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_ECS_Q10_REG_P0SPQ1TR) | (((val) & 0x7ff) << 16))
+/* Port 0 Egress Token R for Strict Priority Q0 (10:0) */
+#define LTQ_ES_P0_ECS_Q10_REG_P0SPQ0TR (0x7ff)
+#define LTQ_ES_P0_ECS_Q10_REG_P0SPQ0TR_VAL(val) (((val) & 0x7ff) << 0)
+#define LTQ_ES_P0_ECS_Q10_REG_P0SPQ0TR_GET(val) ((((val) & LTQ_ES_P0_ECS_Q10_REG_P0SPQ0TR) >> 0) & 0x7ff)
+#define LTQ_ES_P0_ECS_Q10_REG_P0SPQ0TR_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_ECS_Q10_REG_P0SPQ0TR) | (((val) & 0x7ff) << 0))
+
+/*******************************************************************************
+ * Port 0 Egress Control for WFQ Q32 Register
+ ******************************************************************************/
+
+/* Reserved (31:27) */
+#define LTQ_ES_P0_ECW_Q32_REG_RES (0x1f << 27)
+#define LTQ_ES_P0_ECW_Q32_REG_RES_GET(val) ((((val) & LTQ_ES_P0_ECW_Q32_REG_RES) >> 27) & 0x1f)
+/* Port 0 Egress Token R for WFQ Q3 (26:16) */
+#define LTQ_ES_P0_ECW_Q32_REG_P0WQ3TR (0x7ff << 16)
+#define LTQ_ES_P0_ECW_Q32_REG_P0WQ3TR_VAL(val) (((val) & 0x7ff) << 16)
+#define LTQ_ES_P0_ECW_Q32_REG_P0WQ3TR_GET(val) ((((val) & LTQ_ES_P0_ECW_Q32_REG_P0WQ3TR) >> 16) & 0x7ff)
+#define LTQ_ES_P0_ECW_Q32_REG_P0WQ3TR_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_ECW_Q32_REG_P0WQ3TR) | (((val) & 0x7ff) << 16))
+/* Port 0 Egress Token R for WFQ Q2 (10:0) */
+#define LTQ_ES_P0_ECW_Q32_REG_P0WQ2TR (0x7ff)
+#define LTQ_ES_P0_ECW_Q32_REG_P0WQ2TR_VAL(val) (((val) & 0x7ff) << 0)
+#define LTQ_ES_P0_ECW_Q32_REG_P0WQ2TR_GET(val) ((((val) & LTQ_ES_P0_ECW_Q32_REG_P0WQ2TR) >> 0) & 0x7ff)
+#define LTQ_ES_P0_ECW_Q32_REG_P0WQ2TR_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_ECW_Q32_REG_P0WQ2TR) | (((val) & 0x7ff) << 0))
+
+/*******************************************************************************
+ * Port 0 Egress Control for WFQ Q10 Register
+ ******************************************************************************/
+
+/* Reserved (31:27) */
+#define LTQ_ES_P0_ECW_Q10_REG_RES (0x1f << 27)
+#define LTQ_ES_P0_ECW_Q10_REG_RES_GET(val) ((((val) & LTQ_ES_P0_ECW_Q10_REG_RES) >> 27) & 0x1f)
+/* Port 0 Egress Token R for WFQ Q1 (26:16) */
+#define LTQ_ES_P0_ECW_Q10_REG_P0WQ1TR (0x7ff << 16)
+#define LTQ_ES_P0_ECW_Q10_REG_P0WQ1TR_VAL(val) (((val) & 0x7ff) << 16)
+#define LTQ_ES_P0_ECW_Q10_REG_P0WQ1TR_GET(val) ((((val) & LTQ_ES_P0_ECW_Q10_REG_P0WQ1TR) >> 16) & 0x7ff)
+#define LTQ_ES_P0_ECW_Q10_REG_P0WQ1TR_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_ECW_Q10_REG_P0WQ1TR) | (((val) & 0x7ff) << 16))
+/* Port 0 Egress Token R for WFQ Q0 (10:0) */
+#define LTQ_ES_P0_ECW_Q10_REG_P0WQ0TR (0x7ff)
+#define LTQ_ES_P0_ECW_Q10_REG_P0WQ0TR_VAL(val) (((val) & 0x7ff) << 0)
+#define LTQ_ES_P0_ECW_Q10_REG_P0WQ0TR_GET(val) ((((val) & LTQ_ES_P0_ECW_Q10_REG_P0WQ0TR) >> 0) & 0x7ff)
+#define LTQ_ES_P0_ECW_Q10_REG_P0WQ0TR_SET(reg,val) (reg) = ((reg & ~LTQ_ES_P0_ECW_Q10_REG_P0WQ0TR) | (((val) & 0x7ff) << 0))
+
+/*******************************************************************************
+ * Interrupt Enable Register
+ ******************************************************************************/
+
+/* Reserved (31:8) */
+#define LTQ_ES_INT_ENA_REG_RES (0xffffff << 8)
+#define LTQ_ES_INT_ENA_REG_RES_GET(val) ((((val) & LTQ_ES_INT_ENA_REG_RES) >> 8) & 0xffffff)
+/* Data Buffer is Full Interrupt Enable (7) */
+#define LTQ_ES_INT_ENA_REG_DBFIE (0x1 << 7)
+#define LTQ_ES_INT_ENA_REG_DBFIE_VAL(val) (((val) & 0x1) << 7)
+#define LTQ_ES_INT_ENA_REG_DBFIE_GET(val) ((((val) & LTQ_ES_INT_ENA_REG_DBFIE) >> 7) & 0x1)
+#define LTQ_ES_INT_ENA_REG_DBFIE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_INT_ENA_REG_DBFIE) | (((val) & 0x1) << 7))
+/* Data Buffer is nearly Full Interrupt Enable (6) */
+#define LTQ_ES_INT_ENA_REG_DBNFIE (0x1 << 6)
+#define LTQ_ES_INT_ENA_REG_DBNFIE_VAL(val) (((val) & 0x1) << 6)
+#define LTQ_ES_INT_ENA_REG_DBNFIE_GET(val) ((((val) & LTQ_ES_INT_ENA_REG_DBNFIE) >> 6) & 0x1)
+#define LTQ_ES_INT_ENA_REG_DBNFIE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_INT_ENA_REG_DBNFIE) | (((val) & 0x1) << 6))
+/* Learning Table Full Interrupt Enable (5) */
+#define LTQ_ES_INT_ENA_REG_LTFIE (0x1 << 5)
+#define LTQ_ES_INT_ENA_REG_LTFIE_VAL(val) (((val) & 0x1) << 5)
+#define LTQ_ES_INT_ENA_REG_LTFIE_GET(val) ((((val) & LTQ_ES_INT_ENA_REG_LTFIE) >> 5) & 0x1)
+#define LTQ_ES_INT_ENA_REG_LTFIE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_INT_ENA_REG_LTFIE) | (((val) & 0x1) << 5))
+/* Leaning Table Access Done Interrupt Enable (4) */
+#define LTQ_ES_INT_ENA_REG_LTADIE (0x1 << 4)
+#define LTQ_ES_INT_ENA_REG_LTADIE_VAL(val) (((val) & 0x1) << 4)
+#define LTQ_ES_INT_ENA_REG_LTADIE_GET(val) ((((val) & LTQ_ES_INT_ENA_REG_LTADIE) >> 4) & 0x1)
+#define LTQ_ES_INT_ENA_REG_LTADIE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_INT_ENA_REG_LTADIE) | (((val) & 0x1) << 4))
+/* Port Security Violation Interrupt Enable (3:1) */
+#define LTQ_ES_INT_ENA_REG_PSVIE (0x7 << 1)
+#define LTQ_ES_INT_ENA_REG_PSVIE_VAL(val) (((val) & 0x7) << 1)
+#define LTQ_ES_INT_ENA_REG_PSVIE_GET(val) ((((val) & LTQ_ES_INT_ENA_REG_PSVIE) >> 1) & 0x7)
+#define LTQ_ES_INT_ENA_REG_PSVIE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_INT_ENA_REG_PSVIE) | (((val) & 0x7) << 1))
+/* Port Status Change Interrupt Enable (0) */
+#define LTQ_ES_INT_ENA_REG_PSCIE (0x1)
+#define LTQ_ES_INT_ENA_REG_PSCIE_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_ES_INT_ENA_REG_PSCIE_GET(val) ((((val) & LTQ_ES_INT_ENA_REG_PSCIE) >> 0) & 0x1)
+#define LTQ_ES_INT_ENA_REG_PSCIE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_INT_ENA_REG_PSCIE) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Interrupt Status Register
+ ******************************************************************************/
+
+/* Reserved (31:8) */
+#define LTQ_ES_INT_ST_REG_RES (0xffffff << 8)
+#define LTQ_ES_INT_ST_REG_RES_GET(val) ((((val) & LTQ_ES_INT_ST_REG_RES) >> 8) & 0xffffff)
+/* Data Buffer is Full (7) */
+#define LTQ_ES_INT_ST_REG_DBF (0x1 << 7)
+#define LTQ_ES_INT_ST_REG_DBF_GET(val) ((((val) & LTQ_ES_INT_ST_REG_DBF) >> 7) & 0x1)
+/* Data Buffer is nearly Full (6) */
+#define LTQ_ES_INT_ST_REG_DBNF (0x1 << 6)
+#define LTQ_ES_INT_ST_REG_DBNF_GET(val) ((((val) & LTQ_ES_INT_ST_REG_DBNF) >> 6) & 0x1)
+/* Learning Table Full (5) */
+#define LTQ_ES_INT_ST_REG_LTF (0x1 << 5)
+#define LTQ_ES_INT_ST_REG_LTF_GET(val) ((((val) & LTQ_ES_INT_ST_REG_LTF) >> 5) & 0x1)
+/* Leaning Table Access Done (4) */
+#define LTQ_ES_INT_ST_REG_LTAD (0x1 << 4)
+#define LTQ_ES_INT_ST_REG_LTAD_GET(val) ((((val) & LTQ_ES_INT_ST_REG_LTAD) >> 4) & 0x1)
+/* Port Security Violation (3:1) */
+#define LTQ_ES_INT_ST_REG_PSV (0x7 << 1)
+#define LTQ_ES_INT_ST_REG_PSV_GET(val) ((((val) & LTQ_ES_INT_ST_REG_PSV) >> 1) & 0x7)
+/* Port Status Change (0) */
+#define LTQ_ES_INT_ST_REG_PSC (0x1)
+#define LTQ_ES_INT_ST_REG_PSC_GET(val) ((((val) & LTQ_ES_INT_ST_REG_PSC) >> 0) & 0x1)
+
+/*******************************************************************************
+ * Switch Global Control Register 0
+ ******************************************************************************/
+
+/* Switch Enable (31) */
+#define LTQ_ES_SW_GCTL0_REG_SE (0x1 << 31)
+#define LTQ_ES_SW_GCTL0_REG_SE_VAL(val) (((val) & 0x1) << 31)
+#define LTQ_ES_SW_GCTL0_REG_SE_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_SE) >> 31) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_SE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_SE) | (((val) & 0x1) << 31))
+/* CRC Check Disable (30) */
+#define LTQ_ES_SW_GCTL0_REG_ICRCCD (0x1 << 30)
+#define LTQ_ES_SW_GCTL0_REG_ICRCCD_VAL(val) (((val) & 0x1) << 30)
+#define LTQ_ES_SW_GCTL0_REG_ICRCCD_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_ICRCCD) >> 30) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_ICRCCD_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_ICRCCD) | (((val) & 0x1) << 30))
+/* Replace VID0 (28) */
+#define LTQ_ES_SW_GCTL0_REG_RVID0 (0x1 << 28)
+#define LTQ_ES_SW_GCTL0_REG_RVID0_VAL(val) (((val) & 0x1) << 28)
+#define LTQ_ES_SW_GCTL0_REG_RVID0_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_RVID0) >> 28) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_RVID0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_RVID0) | (((val) & 0x1) << 28))
+/* Replace VID1 (27) */
+#define LTQ_ES_SW_GCTL0_REG_RVID1 (0x1 << 27)
+#define LTQ_ES_SW_GCTL0_REG_RVID1_VAL(val) (((val) & 0x1) << 27)
+#define LTQ_ES_SW_GCTL0_REG_RVID1_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_RVID1) >> 27) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_RVID1_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_RVID1) | (((val) & 0x1) << 27))
+/* Replace VIDFFF (26) */
+#define LTQ_ES_SW_GCTL0_REG_RVIDFFF (0x1 << 26)
+#define LTQ_ES_SW_GCTL0_REG_RVIDFFF_VAL(val) (((val) & 0x1) << 26)
+#define LTQ_ES_SW_GCTL0_REG_RVIDFFF_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_RVIDFFF) >> 26) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_RVIDFFF_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_RVIDFFF) | (((val) & 0x1) << 26))
+/* Priority Change Rule (25) */
+#define LTQ_ES_SW_GCTL0_REG_PCR (0x1 << 25)
+#define LTQ_ES_SW_GCTL0_REG_PCR_VAL(val) (((val) & 0x1) << 25)
+#define LTQ_ES_SW_GCTL0_REG_PCR_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_PCR) >> 25) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_PCR_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_PCR) | (((val) & 0x1) << 25))
+/* Priority Change Enable (24) */
+#define LTQ_ES_SW_GCTL0_REG_PCE (0x1 << 24)
+#define LTQ_ES_SW_GCTL0_REG_PCE_VAL(val) (((val) & 0x1) << 24)
+#define LTQ_ES_SW_GCTL0_REG_PCE_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_PCE) >> 24) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_PCE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_PCE) | (((val) & 0x1) << 24))
+/* Transmit Short IPG Enable (23) */
+#define LTQ_ES_SW_GCTL0_REG_TSIPGE (0x1 << 23)
+#define LTQ_ES_SW_GCTL0_REG_TSIPGE_VAL(val) (((val) & 0x1) << 23)
+#define LTQ_ES_SW_GCTL0_REG_TSIPGE_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_TSIPGE) >> 23) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_TSIPGE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_TSIPGE) | (((val) & 0x1) << 23))
+/* PHY Base Address (22) */
+#define LTQ_ES_SW_GCTL0_REG_PHYBA (0x1 << 22)
+#define LTQ_ES_SW_GCTL0_REG_PHYBA_VAL(val) (((val) & 0x1) << 22)
+#define LTQ_ES_SW_GCTL0_REG_PHYBA_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_PHYBA) >> 22) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_PHYBA_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_PHYBA) | (((val) & 0x1) << 22))
+/* Drop Packet When Excessive Collision Happen (21) */
+#define LTQ_ES_SW_GCTL0_REG_DPWECH (0x1 << 21)
+#define LTQ_ES_SW_GCTL0_REG_DPWECH_VAL(val) (((val) & 0x1) << 21)
+#define LTQ_ES_SW_GCTL0_REG_DPWECH_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_DPWECH) >> 21) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_DPWECH_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_DPWECH) | (((val) & 0x1) << 21))
+/* Aging Timer Select (20:18) */
+#define LTQ_ES_SW_GCTL0_REG_ATS (0x7 << 18)
+#define LTQ_ES_SW_GCTL0_REG_ATS_VAL(val) (((val) & 0x7) << 18)
+#define LTQ_ES_SW_GCTL0_REG_ATS_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_ATS) >> 18) & 0x7)
+#define LTQ_ES_SW_GCTL0_REG_ATS_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_ATS) | (((val) & 0x7) << 18))
+/* Mirror CRC Also (17) */
+#define LTQ_ES_SW_GCTL0_REG_MCA (0x1 << 17)
+#define LTQ_ES_SW_GCTL0_REG_MCA_VAL(val) (((val) & 0x1) << 17)
+#define LTQ_ES_SW_GCTL0_REG_MCA_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_MCA) >> 17) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_MCA_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_MCA) | (((val) & 0x1) << 17))
+/* Mirror RXER Also (16) */
+#define LTQ_ES_SW_GCTL0_REG_MRA (0x1 << 16)
+#define LTQ_ES_SW_GCTL0_REG_MRA_VAL(val) (((val) & 0x1) << 16)
+#define LTQ_ES_SW_GCTL0_REG_MRA_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_MRA) >> 16) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_MRA_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_MRA) | (((val) & 0x1) << 16))
+/* Mirror PAUSE Also (15) */
+#define LTQ_ES_SW_GCTL0_REG_MPA (0x1 << 15)
+#define LTQ_ES_SW_GCTL0_REG_MPA_VAL(val) (((val) & 0x1) << 15)
+#define LTQ_ES_SW_GCTL0_REG_MPA_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_MPA) >> 15) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_MPA_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_MPA) | (((val) & 0x1) << 15))
+/* Mirror Long Also (14) */
+#define LTQ_ES_SW_GCTL0_REG_MLA (0x1 << 14)
+#define LTQ_ES_SW_GCTL0_REG_MLA_VAL(val) (((val) & 0x1) << 14)
+#define LTQ_ES_SW_GCTL0_REG_MLA_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_MLA) >> 14) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_MLA_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_MLA) | (((val) & 0x1) << 14))
+/* Mirror Short Also (13) */
+#define LTQ_ES_SW_GCTL0_REG_MSA (0x1 << 13)
+#define LTQ_ES_SW_GCTL0_REG_MSA_VAL(val) (((val) & 0x1) << 13)
+#define LTQ_ES_SW_GCTL0_REG_MSA_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_MSA) >> 13) & 0x1)
+#define LTQ_ES_SW_GCTL0_REG_MSA_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_MSA) | (((val) & 0x1) << 13))
+/* Sniffer port number (12:11) */
+#define LTQ_ES_SW_GCTL0_REG_SNIFFPN (0x3 << 11)
+#define LTQ_ES_SW_GCTL0_REG_SNIFFPN_VAL(val) (((val) & 0x3) << 11)
+#define LTQ_ES_SW_GCTL0_REG_SNIFFPN_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_SNIFFPN) >> 11) & 0x3)
+#define LTQ_ES_SW_GCTL0_REG_SNIFFPN_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_SNIFFPN) | (((val) & 0x3) << 11))
+/* Max Packet Length (MAXPKTLEN) (9:8) */
+#define LTQ_ES_SW_GCTL0_REG_MPL (0x3 << 8)
+#define LTQ_ES_SW_GCTL0_REG_MPL_VAL(val) (((val) & 0x3) << 8)
+#define LTQ_ES_SW_GCTL0_REG_MPL_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_MPL) >> 8) & 0x3)
+#define LTQ_ES_SW_GCTL0_REG_MPL_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_MPL) | (((val) & 0x3) << 8))
+/* Discard Mode (Drop scheme for Packets Classified as Q3) (7:6) */
+#define LTQ_ES_SW_GCTL0_REG_DMQ3 (0x3 << 6)
+#define LTQ_ES_SW_GCTL0_REG_DMQ3_VAL(val) (((val) & 0x3) << 6)
+#define LTQ_ES_SW_GCTL0_REG_DMQ3_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_DMQ3) >> 6) & 0x3)
+#define LTQ_ES_SW_GCTL0_REG_DMQ3_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_DMQ3) | (((val) & 0x3) << 6))
+/* Discard Mode (Drop scheme for Packets Classified as Q2) (5:4) */
+#define LTQ_ES_SW_GCTL0_REG_DMQ2 (0x3 << 4)
+#define LTQ_ES_SW_GCTL0_REG_DMQ2_VAL(val) (((val) & 0x3) << 4)
+#define LTQ_ES_SW_GCTL0_REG_DMQ2_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_DMQ2) >> 4) & 0x3)
+#define LTQ_ES_SW_GCTL0_REG_DMQ2_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_DMQ2) | (((val) & 0x3) << 4))
+/* Discard Mode (Drop scheme for Packets Classified as Q1) (3:2) */
+#define LTQ_ES_SW_GCTL0_REG_DMQ1 (0x3 << 2)
+#define LTQ_ES_SW_GCTL0_REG_DMQ1_VAL(val) (((val) & 0x3) << 2)
+#define LTQ_ES_SW_GCTL0_REG_DMQ1_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_DMQ1) >> 2) & 0x3)
+#define LTQ_ES_SW_GCTL0_REG_DMQ1_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_DMQ1) | (((val) & 0x3) << 2))
+/* Discard Mode (Drop scheme for Packets Classified as Q0) (1:0) */
+#define LTQ_ES_SW_GCTL0_REG_DMQ0 (0x3)
+#define LTQ_ES_SW_GCTL0_REG_DMQ0_VAL(val) (((val) & 0x3) << 0)
+#define LTQ_ES_SW_GCTL0_REG_DMQ0_GET(val) ((((val) & LTQ_ES_SW_GCTL0_REG_DMQ0) >> 0) & 0x3)
+#define LTQ_ES_SW_GCTL0_REG_DMQ0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL0_REG_DMQ0) | (((val) & 0x3) << 0))
+
+/*******************************************************************************
+ * Switch Global Control Register 1
+ ******************************************************************************/
+
+/* BIST Done (27) */
+#define LTQ_ES_SW_GCTL1_REG_BISTDN (0x1 << 27)
+#define LTQ_ES_SW_GCTL1_REG_BISTDN_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_BISTDN) >> 27) & 0x1)
+/* Enable drop scheme of TX and RX (26) */
+#define LTQ_ES_SW_GCTL1_REG_EDSTX (0x1 << 26)
+#define LTQ_ES_SW_GCTL1_REG_EDSTX_VAL(val) (((val) & 0x1) << 26)
+#define LTQ_ES_SW_GCTL1_REG_EDSTX_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_EDSTX) >> 26) & 0x1)
+#define LTQ_ES_SW_GCTL1_REG_EDSTX_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL1_REG_EDSTX) | (((val) & 0x1) << 26))
+/* Congestion threshold for TX queue (25:24) */
+#define LTQ_ES_SW_GCTL1_REG_CTTX (0x3 << 24)
+#define LTQ_ES_SW_GCTL1_REG_CTTX_VAL(val) (((val) & 0x3) << 24)
+#define LTQ_ES_SW_GCTL1_REG_CTTX_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_CTTX) >> 24) & 0x3)
+#define LTQ_ES_SW_GCTL1_REG_CTTX_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL1_REG_CTTX) | (((val) & 0x3) << 24))
+/* Input Jam Threshold (23:21) */
+#define LTQ_ES_SW_GCTL1_REG_IJT (0x7 << 21)
+#define LTQ_ES_SW_GCTL1_REG_IJT_VAL(val) (((val) & 0x7) << 21)
+#define LTQ_ES_SW_GCTL1_REG_IJT_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_IJT) >> 21) & 0x7)
+#define LTQ_ES_SW_GCTL1_REG_IJT_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL1_REG_IJT) | (((val) & 0x7) << 21))
+/* Do not Identify VLAN after SNAP (20) */
+#define LTQ_ES_SW_GCTL1_REG_DIVS (0x1 << 20)
+#define LTQ_ES_SW_GCTL1_REG_DIVS_VAL(val) (((val) & 0x1) << 20)
+#define LTQ_ES_SW_GCTL1_REG_DIVS_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_DIVS) >> 20) & 0x1)
+#define LTQ_ES_SW_GCTL1_REG_DIVS_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL1_REG_DIVS) | (((val) & 0x1) << 20))
+/* Do not Identify IPV6 in PPPOE (19) */
+#define LTQ_ES_SW_GCTL1_REG_DII6P (0x1 << 19)
+#define LTQ_ES_SW_GCTL1_REG_DII6P_VAL(val) (((val) & 0x1) << 19)
+#define LTQ_ES_SW_GCTL1_REG_DII6P_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_DII6P) >> 19) & 0x1)
+#define LTQ_ES_SW_GCTL1_REG_DII6P_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL1_REG_DII6P) | (((val) & 0x1) << 19))
+/* Do not Identify IP in PPPOE after SNAP (18) */
+#define LTQ_ES_SW_GCTL1_REG_DIIPS (0x1 << 18)
+#define LTQ_ES_SW_GCTL1_REG_DIIPS_VAL(val) (((val) & 0x1) << 18)
+#define LTQ_ES_SW_GCTL1_REG_DIIPS_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_DIIPS) >> 18) & 0x1)
+#define LTQ_ES_SW_GCTL1_REG_DIIPS_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL1_REG_DIIPS) | (((val) & 0x1) << 18))
+/* Do not Identify Ether-Type = 0x0800, IP VER = 6 as IPV6 packets (17) */
+#define LTQ_ES_SW_GCTL1_REG_DIE (0x1 << 17)
+#define LTQ_ES_SW_GCTL1_REG_DIE_VAL(val) (((val) & 0x1) << 17)
+#define LTQ_ES_SW_GCTL1_REG_DIE_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_DIE) >> 17) & 0x1)
+#define LTQ_ES_SW_GCTL1_REG_DIE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL1_REG_DIE) | (((val) & 0x1) << 17))
+/* Do not Identify IP in PPPOE (16) */
+#define LTQ_ES_SW_GCTL1_REG_DIIP (0x1 << 16)
+#define LTQ_ES_SW_GCTL1_REG_DIIP_VAL(val) (((val) & 0x1) << 16)
+#define LTQ_ES_SW_GCTL1_REG_DIIP_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_DIIP) >> 16) & 0x1)
+#define LTQ_ES_SW_GCTL1_REG_DIIP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL1_REG_DIIP) | (((val) & 0x1) << 16))
+/* Do not Identify SNAP (15) */
+#define LTQ_ES_SW_GCTL1_REG_DIS (0x1 << 15)
+#define LTQ_ES_SW_GCTL1_REG_DIS_VAL(val) (((val) & 0x1) << 15)
+#define LTQ_ES_SW_GCTL1_REG_DIS_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_DIS) >> 15) & 0x1)
+#define LTQ_ES_SW_GCTL1_REG_DIS_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL1_REG_DIS) | (((val) & 0x1) << 15))
+/* Unicast Portmap (14:12) */
+#define LTQ_ES_SW_GCTL1_REG_UP (0x7 << 12)
+#define LTQ_ES_SW_GCTL1_REG_UP_VAL(val) (((val) & 0x7) << 12)
+#define LTQ_ES_SW_GCTL1_REG_UP_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_UP) >> 12) & 0x7)
+#define LTQ_ES_SW_GCTL1_REG_UP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL1_REG_UP) | (((val) & 0x7) << 12))
+/* Broadcast Portmap (10:8) */
+#define LTQ_ES_SW_GCTL1_REG_BP (0x7 << 8)
+#define LTQ_ES_SW_GCTL1_REG_BP_VAL(val) (((val) & 0x7) << 8)
+#define LTQ_ES_SW_GCTL1_REG_BP_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_BP) >> 8) & 0x7)
+#define LTQ_ES_SW_GCTL1_REG_BP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL1_REG_BP) | (((val) & 0x7) << 8))
+/* Multicast Portmap (6:4) */
+#define LTQ_ES_SW_GCTL1_REG_MP (0x7 << 4)
+#define LTQ_ES_SW_GCTL1_REG_MP_VAL(val) (((val) & 0x7) << 4)
+#define LTQ_ES_SW_GCTL1_REG_MP_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_MP) >> 4) & 0x7)
+#define LTQ_ES_SW_GCTL1_REG_MP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL1_REG_MP) | (((val) & 0x7) << 4))
+/* Reserve Portmap (2:0) */
+#define LTQ_ES_SW_GCTL1_REG_RP (0x7)
+#define LTQ_ES_SW_GCTL1_REG_RP_VAL(val) (((val) & 0x7) << 0)
+#define LTQ_ES_SW_GCTL1_REG_RP_GET(val) ((((val) & LTQ_ES_SW_GCTL1_REG_RP) >> 0) & 0x7)
+#define LTQ_ES_SW_GCTL1_REG_RP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_SW_GCTL1_REG_RP) | (((val) & 0x7) << 0))
+
+/*******************************************************************************
+ * ARP/RARP Register
+ ******************************************************************************/
+
+/* MAC Control Action (15:14) */
+#define LTQ_ES_ARP_REG_MACA (0x3 << 14)
+#define LTQ_ES_ARP_REG_MACA_VAL(val) (((val) & 0x3) << 14)
+#define LTQ_ES_ARP_REG_MACA_GET(val) ((((val) & LTQ_ES_ARP_REG_MACA) >> 14) & 0x3)
+#define LTQ_ES_ARP_REG_MACA_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ARP_REG_MACA) | (((val) & 0x3) << 14))
+/* Unicast packet Treated as Cross_VLAN packet (13) */
+#define LTQ_ES_ARP_REG_UPT (0x1 << 13)
+#define LTQ_ES_ARP_REG_UPT_VAL(val) (((val) & 0x1) << 13)
+#define LTQ_ES_ARP_REG_UPT_GET(val) ((((val) & LTQ_ES_ARP_REG_UPT) >> 13) & 0x1)
+#define LTQ_ES_ARP_REG_UPT_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ARP_REG_UPT) | (((val) & 0x1) << 13))
+/* RARP Packet Treated as Cross_VLAN Packet (12) */
+#define LTQ_ES_ARP_REG_RPT (0x1 << 12)
+#define LTQ_ES_ARP_REG_RPT_VAL(val) (((val) & 0x1) << 12)
+#define LTQ_ES_ARP_REG_RPT_GET(val) ((((val) & LTQ_ES_ARP_REG_RPT) >> 12) & 0x1)
+#define LTQ_ES_ARP_REG_RPT_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ARP_REG_RPT) | (((val) & 0x1) << 12))
+/* RARP/ARP Packet Action (11:10) */
+#define LTQ_ES_ARP_REG_RAPA (0x3 << 10)
+#define LTQ_ES_ARP_REG_RAPA_VAL(val) (((val) & 0x3) << 10)
+#define LTQ_ES_ARP_REG_RAPA_GET(val) ((((val) & LTQ_ES_ARP_REG_RAPA) >> 10) & 0x3)
+#define LTQ_ES_ARP_REG_RAPA_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ARP_REG_RAPA) | (((val) & 0x3) << 10))
+/* RARP/ARP Packet Priority Enable (9) */
+#define LTQ_ES_ARP_REG_RAPPE (0x1 << 9)
+#define LTQ_ES_ARP_REG_RAPPE_VAL(val) (((val) & 0x1) << 9)
+#define LTQ_ES_ARP_REG_RAPPE_GET(val) ((((val) & LTQ_ES_ARP_REG_RAPPE) >> 9) & 0x1)
+#define LTQ_ES_ARP_REG_RAPPE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ARP_REG_RAPPE) | (((val) & 0x1) << 9))
+/* RARP/ARP Packet Priority (8:7) */
+#define LTQ_ES_ARP_REG_RAPP (0x3 << 7)
+#define LTQ_ES_ARP_REG_RAPP_VAL(val) (((val) & 0x3) << 7)
+#define LTQ_ES_ARP_REG_RAPP_GET(val) ((((val) & LTQ_ES_ARP_REG_RAPP) >> 7) & 0x3)
+#define LTQ_ES_ARP_REG_RAPP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ARP_REG_RAPP) | (((val) & 0x3) << 7))
+/* RARP/ARP Packet Output Tag Handle (6:5) */
+#define LTQ_ES_ARP_REG_RAPOTH (0x3 << 5)
+#define LTQ_ES_ARP_REG_RAPOTH_VAL(val) (((val) & 0x3) << 5)
+#define LTQ_ES_ARP_REG_RAPOTH_GET(val) ((((val) & LTQ_ES_ARP_REG_RAPOTH) >> 5) & 0x3)
+#define LTQ_ES_ARP_REG_RAPOTH_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ARP_REG_RAPOTH) | (((val) & 0x3) << 5))
+/* ARP Packet Treated as Cross _ VLAN Packet (4) */
+#define LTQ_ES_ARP_REG_APT (0x1 << 4)
+#define LTQ_ES_ARP_REG_APT_VAL(val) (((val) & 0x1) << 4)
+#define LTQ_ES_ARP_REG_APT_GET(val) ((((val) & LTQ_ES_ARP_REG_APT) >> 4) & 0x1)
+#define LTQ_ES_ARP_REG_APT_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ARP_REG_APT) | (((val) & 0x1) << 4))
+/* RARP/ARP Packet Treated as Management Packet (3) */
+#define LTQ_ES_ARP_REG_RAPTM (0x1 << 3)
+#define LTQ_ES_ARP_REG_RAPTM_VAL(val) (((val) & 0x1) << 3)
+#define LTQ_ES_ARP_REG_RAPTM_GET(val) ((((val) & LTQ_ES_ARP_REG_RAPTM) >> 3) & 0x1)
+#define LTQ_ES_ARP_REG_RAPTM_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ARP_REG_RAPTM) | (((val) & 0x1) << 3))
+/* RARP/ARP Packet Treated as Span Packet (2) */
+#define LTQ_ES_ARP_REG_TAPTS (0x1 << 2)
+#define LTQ_ES_ARP_REG_TAPTS_VAL(val) (((val) & 0x1) << 2)
+#define LTQ_ES_ARP_REG_TAPTS_GET(val) ((((val) & LTQ_ES_ARP_REG_TAPTS) >> 2) & 0x1)
+#define LTQ_ES_ARP_REG_TAPTS_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ARP_REG_TAPTS) | (((val) & 0x1) << 2))
+/* Trap ARP Packet (1) */
+#define LTQ_ES_ARP_REG_TAP (0x1 << 1)
+#define LTQ_ES_ARP_REG_TAP_VAL(val) (((val) & 0x1) << 1)
+#define LTQ_ES_ARP_REG_TAP_GET(val) ((((val) & LTQ_ES_ARP_REG_TAP) >> 1) & 0x1)
+#define LTQ_ES_ARP_REG_TAP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ARP_REG_TAP) | (((val) & 0x1) << 1))
+/* Trap RARP Packet (0) */
+#define LTQ_ES_ARP_REG_TRP (0x1)
+#define LTQ_ES_ARP_REG_TRP_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_ES_ARP_REG_TRP_GET(val) ((((val) & LTQ_ES_ARP_REG_TRP) >> 0) & 0x1)
+#define LTQ_ES_ARP_REG_TRP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ARP_REG_TRP) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Storm control Register
+ ******************************************************************************/
+
+/* Reserved (31:29) */
+#define LTQ_ES_STRM_CTL_REG_RES (0x7 << 29)
+#define LTQ_ES_STRM_CTL_REG_RES_GET(val) ((((val) & LTQ_ES_STRM_CTL_REG_RES) >> 29) & 0x7)
+/* 10M Threshold (28:16) */
+#define LTQ_ES_STRM_CTL_REG_STORM_10_TH (0x1fff << 16)
+#define LTQ_ES_STRM_CTL_REG_STORM_10_TH_VAL(val) (((val) & 0x1fff) << 16)
+#define LTQ_ES_STRM_CTL_REG_STORM_10_TH_GET(val) ((((val) & LTQ_ES_STRM_CTL_REG_STORM_10_TH) >> 16) & 0x1fff)
+#define LTQ_ES_STRM_CTL_REG_STORM_10_TH_SET(reg,val) (reg) = ((reg & ~LTQ_ES_STRM_CTL_REG_STORM_10_TH) | (((val) & 0x1fff) << 16))
+/* Storm Enable for Broadcast Packets (15) */
+#define LTQ_ES_STRM_CTL_REG_STORM_B (0x1 << 15)
+#define LTQ_ES_STRM_CTL_REG_STORM_B_VAL(val) (((val) & 0x1) << 15)
+#define LTQ_ES_STRM_CTL_REG_STORM_B_GET(val) ((((val) & LTQ_ES_STRM_CTL_REG_STORM_B) >> 15) & 0x1)
+#define LTQ_ES_STRM_CTL_REG_STORM_B_SET(reg,val) (reg) = ((reg & ~LTQ_ES_STRM_CTL_REG_STORM_B) | (((val) & 0x1) << 15))
+/* Storm Enable for Multicast Packets (14) */
+#define LTQ_ES_STRM_CTL_REG_STORM_M (0x1 << 14)
+#define LTQ_ES_STRM_CTL_REG_STORM_M_VAL(val) (((val) & 0x1) << 14)
+#define LTQ_ES_STRM_CTL_REG_STORM_M_GET(val) ((((val) & LTQ_ES_STRM_CTL_REG_STORM_M) >> 14) & 0x1)
+#define LTQ_ES_STRM_CTL_REG_STORM_M_SET(reg,val) (reg) = ((reg & ~LTQ_ES_STRM_CTL_REG_STORM_M) | (((val) & 0x1) << 14))
+/* Storm Enable for Un-learned Unicast Packets (13) */
+#define LTQ_ES_STRM_CTL_REG_STORM_U (0x1 << 13)
+#define LTQ_ES_STRM_CTL_REG_STORM_U_VAL(val) (((val) & 0x1) << 13)
+#define LTQ_ES_STRM_CTL_REG_STORM_U_GET(val) ((((val) & LTQ_ES_STRM_CTL_REG_STORM_U) >> 13) & 0x1)
+#define LTQ_ES_STRM_CTL_REG_STORM_U_SET(reg,val) (reg) = ((reg & ~LTQ_ES_STRM_CTL_REG_STORM_U) | (((val) & 0x1) << 13))
+/* 100M Threshold (12:0) */
+#define LTQ_ES_STRM_CTL_REG_STORM_100_TH (0x1fff)
+#define LTQ_ES_STRM_CTL_REG_STORM_100_TH_VAL(val) (((val) & 0x1fff) << 0)
+#define LTQ_ES_STRM_CTL_REG_STORM_100_TH_GET(val) ((((val) & LTQ_ES_STRM_CTL_REG_STORM_100_TH) >> 0) & 0x1fff)
+#define LTQ_ES_STRM_CTL_REG_STORM_100_TH_SET(reg,val) (reg) = ((reg & ~LTQ_ES_STRM_CTL_REG_STORM_100_TH) | (((val) & 0x1fff) << 0))
+
+/*******************************************************************************
+ * RGMII/GMII Port Control Register
+ ******************************************************************************/
+
+/* Management Clock Select (31:24) */
+#define LTQ_ES_RGMII_CTL_REG_MCS (0xff << 24)
+#define LTQ_ES_RGMII_CTL_REG_MCS_VAL(val) (((val) & 0xff) << 24)
+#define LTQ_ES_RGMII_CTL_REG_MCS_GET(val) ((((val) & LTQ_ES_RGMII_CTL_REG_MCS) >> 24) & 0xff)
+#define LTQ_ES_RGMII_CTL_REG_MCS_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RGMII_CTL_REG_MCS) | (((val) & 0xff) << 24))
+/* Interface Selection (19:18) */
+#define LTQ_ES_RGMII_CTL_REG_IS (0x3 << 18)
+#define LTQ_ES_RGMII_CTL_REG_IS_GET(val) ((((val) & LTQ_ES_RGMII_CTL_REG_IS) >> 18) & 0x3)
+/* Port 1 RGMII Rx Clock Delay (17:16) */
+#define LTQ_ES_RGMII_CTL_REG_P1RDLY (0x3 << 16)
+#define LTQ_ES_RGMII_CTL_REG_P1RDLY_VAL(val) (((val) & 0x3) << 16)
+#define LTQ_ES_RGMII_CTL_REG_P1RDLY_GET(val) ((((val) & LTQ_ES_RGMII_CTL_REG_P1RDLY) >> 16) & 0x3)
+#define LTQ_ES_RGMII_CTL_REG_P1RDLY_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RGMII_CTL_REG_P1RDLY) | (((val) & 0x3) << 16))
+/* Port 1 RGMII Tx Clock Delay (15:14) */
+#define LTQ_ES_RGMII_CTL_REG_P1TDLY (0x3 << 14)
+#define LTQ_ES_RGMII_CTL_REG_P1TDLY_VAL(val) (((val) & 0x3) << 14)
+#define LTQ_ES_RGMII_CTL_REG_P1TDLY_GET(val) ((((val) & LTQ_ES_RGMII_CTL_REG_P1TDLY) >> 14) & 0x3)
+#define LTQ_ES_RGMII_CTL_REG_P1TDLY_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RGMII_CTL_REG_P1TDLY) | (((val) & 0x3) << 14))
+/* Port 1 Speed (13:12) */
+#define LTQ_ES_RGMII_CTL_REG_P1SPD (0x3 << 12)
+#define LTQ_ES_RGMII_CTL_REG_P1SPD_VAL(val) (((val) & 0x3) << 12)
+#define LTQ_ES_RGMII_CTL_REG_P1SPD_GET(val) ((((val) & LTQ_ES_RGMII_CTL_REG_P1SPD) >> 12) & 0x3)
+#define LTQ_ES_RGMII_CTL_REG_P1SPD_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RGMII_CTL_REG_P1SPD) | (((val) & 0x3) << 12))
+/* Port 1 Duplex mode (11) */
+#define LTQ_ES_RGMII_CTL_REG_P1DUP (0x1 << 11)
+#define LTQ_ES_RGMII_CTL_REG_P1DUP_VAL(val) (((val) & 0x1) << 11)
+#define LTQ_ES_RGMII_CTL_REG_P1DUP_GET(val) ((((val) & LTQ_ES_RGMII_CTL_REG_P1DUP) >> 11) & 0x1)
+#define LTQ_ES_RGMII_CTL_REG_P1DUP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RGMII_CTL_REG_P1DUP) | (((val) & 0x1) << 11))
+/* Port 1 Flow Control Enable (10) */
+#define LTQ_ES_RGMII_CTL_REG_P1FCE (0x1 << 10)
+#define LTQ_ES_RGMII_CTL_REG_P1FCE_VAL(val) (((val) & 0x1) << 10)
+#define LTQ_ES_RGMII_CTL_REG_P1FCE_GET(val) ((((val) & LTQ_ES_RGMII_CTL_REG_P1FCE) >> 10) & 0x1)
+#define LTQ_ES_RGMII_CTL_REG_P1FCE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RGMII_CTL_REG_P1FCE) | (((val) & 0x1) << 10))
+/* Port 0 RGMII Rx Clock Delay (7:6) */
+#define LTQ_ES_RGMII_CTL_REG_P0RDLY (0x3 << 6)
+#define LTQ_ES_RGMII_CTL_REG_P0RDLY_VAL(val) (((val) & 0x3) << 6)
+#define LTQ_ES_RGMII_CTL_REG_P0RDLY_GET(val) ((((val) & LTQ_ES_RGMII_CTL_REG_P0RDLY) >> 6) & 0x3)
+#define LTQ_ES_RGMII_CTL_REG_P0RDLY_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RGMII_CTL_REG_P0RDLY) | (((val) & 0x3) << 6))
+/* Port 0 RGMII Tx Clock Delay (5:4) */
+#define LTQ_ES_RGMII_CTL_REG_P0TDLY (0x3 << 4)
+#define LTQ_ES_RGMII_CTL_REG_P0TDLY_VAL(val) (((val) & 0x3) << 4)
+#define LTQ_ES_RGMII_CTL_REG_P0TDLY_GET(val) ((((val) & LTQ_ES_RGMII_CTL_REG_P0TDLY) >> 4) & 0x3)
+#define LTQ_ES_RGMII_CTL_REG_P0TDLY_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RGMII_CTL_REG_P0TDLY) | (((val) & 0x3) << 4))
+/* Port 0 Speed (3:2) */
+#define LTQ_ES_RGMII_CTL_REG_P0SPD (0x3 << 2)
+#define LTQ_ES_RGMII_CTL_REG_P0SPD_VAL(val) (((val) & 0x3) << 2)
+#define LTQ_ES_RGMII_CTL_REG_P0SPD_GET(val) ((((val) & LTQ_ES_RGMII_CTL_REG_P0SPD) >> 2) & 0x3)
+#define LTQ_ES_RGMII_CTL_REG_P0SPD_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RGMII_CTL_REG_P0SPD) | (((val) & 0x3) << 2))
+/* Port 0 Duplex mode (1) */
+#define LTQ_ES_RGMII_CTL_REG_P0DUP (0x1 << 1)
+#define LTQ_ES_RGMII_CTL_REG_P0DUP_VAL(val) (((val) & 0x1) << 1)
+#define LTQ_ES_RGMII_CTL_REG_P0DUP_GET(val) ((((val) & LTQ_ES_RGMII_CTL_REG_P0DUP) >> 1) & 0x1)
+#define LTQ_ES_RGMII_CTL_REG_P0DUP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RGMII_CTL_REG_P0DUP) | (((val) & 0x1) << 1))
+/* Port 0 Flow Control Enable (0) */
+#define LTQ_ES_RGMII_CTL_REG_P0FCE (0x1)
+#define LTQ_ES_RGMII_CTL_REG_P0FCE_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_ES_RGMII_CTL_REG_P0FCE_GET(val) ((((val) & LTQ_ES_RGMII_CTL_REG_P0FCE) >> 0) & 0x1)
+#define LTQ_ES_RGMII_CTL_REG_P0FCE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RGMII_CTL_REG_P0FCE) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * 802.1p Priority Map Register
+ ******************************************************************************/
+
+/* Priority Queue 7 (15:14) */
+#define LTQ_ES_PRT_1P_REG_1PPQ7 (0x3 << 14)
+#define LTQ_ES_PRT_1P_REG_1PPQ7_VAL(val) (((val) & 0x3) << 14)
+#define LTQ_ES_PRT_1P_REG_1PPQ7_GET(val) ((((val) & LTQ_ES_PRT_1P_REG_1PPQ7) >> 14) & 0x3)
+#define LTQ_ES_PRT_1P_REG_1PPQ7_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRT_1P_REG_1PPQ7) | (((val) & 0x3) << 14))
+/* Priority Queue 6 (13:12) */
+#define LTQ_ES_PRT_1P_REG_1PPQ6 (0x3 << 12)
+#define LTQ_ES_PRT_1P_REG_1PPQ6_VAL(val) (((val) & 0x3) << 12)
+#define LTQ_ES_PRT_1P_REG_1PPQ6_GET(val) ((((val) & LTQ_ES_PRT_1P_REG_1PPQ6) >> 12) & 0x3)
+#define LTQ_ES_PRT_1P_REG_1PPQ6_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRT_1P_REG_1PPQ6) | (((val) & 0x3) << 12))
+/* Priority Queue 5 (11:10) */
+#define LTQ_ES_PRT_1P_REG_1PPQ5 (0x3 << 10)
+#define LTQ_ES_PRT_1P_REG_1PPQ5_VAL(val) (((val) & 0x3) << 10)
+#define LTQ_ES_PRT_1P_REG_1PPQ5_GET(val) ((((val) & LTQ_ES_PRT_1P_REG_1PPQ5) >> 10) & 0x3)
+#define LTQ_ES_PRT_1P_REG_1PPQ5_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRT_1P_REG_1PPQ5) | (((val) & 0x3) << 10))
+/* Priority Queue 4 (9:8) */
+#define LTQ_ES_PRT_1P_REG_1PPQ4 (0x3 << 8)
+#define LTQ_ES_PRT_1P_REG_1PPQ4_VAL(val) (((val) & 0x3) << 8)
+#define LTQ_ES_PRT_1P_REG_1PPQ4_GET(val) ((((val) & LTQ_ES_PRT_1P_REG_1PPQ4) >> 8) & 0x3)
+#define LTQ_ES_PRT_1P_REG_1PPQ4_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRT_1P_REG_1PPQ4) | (((val) & 0x3) << 8))
+/* Priority Queue 3 (7:6) */
+#define LTQ_ES_PRT_1P_REG_1PPQ3 (0x3 << 6)
+#define LTQ_ES_PRT_1P_REG_1PPQ3_VAL(val) (((val) & 0x3) << 6)
+#define LTQ_ES_PRT_1P_REG_1PPQ3_GET(val) ((((val) & LTQ_ES_PRT_1P_REG_1PPQ3) >> 6) & 0x3)
+#define LTQ_ES_PRT_1P_REG_1PPQ3_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRT_1P_REG_1PPQ3) | (((val) & 0x3) << 6))
+/* Priority Queue 2 (5:4) */
+#define LTQ_ES_PRT_1P_REG_1PPQ2 (0x3 << 4)
+#define LTQ_ES_PRT_1P_REG_1PPQ2_VAL(val) (((val) & 0x3) << 4)
+#define LTQ_ES_PRT_1P_REG_1PPQ2_GET(val) ((((val) & LTQ_ES_PRT_1P_REG_1PPQ2) >> 4) & 0x3)
+#define LTQ_ES_PRT_1P_REG_1PPQ2_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRT_1P_REG_1PPQ2) | (((val) & 0x3) << 4))
+/* Priority Queue 1 (3:2) */
+#define LTQ_ES_PRT_1P_REG_1PPQ1 (0x3 << 2)
+#define LTQ_ES_PRT_1P_REG_1PPQ1_VAL(val) (((val) & 0x3) << 2)
+#define LTQ_ES_PRT_1P_REG_1PPQ1_GET(val) ((((val) & LTQ_ES_PRT_1P_REG_1PPQ1) >> 2) & 0x3)
+#define LTQ_ES_PRT_1P_REG_1PPQ1_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRT_1P_REG_1PPQ1) | (((val) & 0x3) << 2))
+/* Priority Queue 0 (1:0) */
+#define LTQ_ES_PRT_1P_REG_1PPQ0 (0x3)
+#define LTQ_ES_PRT_1P_REG_1PPQ0_VAL(val) (((val) & 0x3) << 0)
+#define LTQ_ES_PRT_1P_REG_1PPQ0_GET(val) ((((val) & LTQ_ES_PRT_1P_REG_1PPQ0) >> 0) & 0x3)
+#define LTQ_ES_PRT_1P_REG_1PPQ0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRT_1P_REG_1PPQ0) | (((val) & 0x3) << 0))
+
+/*******************************************************************************
+ * Global Bucket Size Base counter
+ ******************************************************************************/
+
+/* Reserved (31:18) */
+#define LTQ_ES_GBKT_SZBS_REG_REV (0x3fff << 18)
+#define LTQ_ES_GBKT_SZBS_REG_REV_GET(val) ((((val) & LTQ_ES_GBKT_SZBS_REG_REV) >> 18) & 0x3fff)
+/* Base[17:0] (17:0) */
+#define LTQ_ES_GBKT_SZBS_REG_BASE17_0 (0x3ffff)
+#define LTQ_ES_GBKT_SZBS_REG_BASE17_0_VAL(val) (((val) & 0x3ffff) << 0)
+#define LTQ_ES_GBKT_SZBS_REG_BASE17_0_GET(val) ((((val) & LTQ_ES_GBKT_SZBS_REG_BASE17_0) >> 0) & 0x3ffff)
+#define LTQ_ES_GBKT_SZBS_REG_BASE17_0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_GBKT_SZBS_REG_BASE17_0) | (((val) & 0x3ffff) << 0))
+
+/*******************************************************************************
+ * Global Bucket Size Extend Base Counter
+ ******************************************************************************/
+
+/* Reserved (31:18) */
+#define LTQ_ES_GBKT_SZEBS_REG_REV (0x3fff << 18)
+#define LTQ_ES_GBKT_SZEBS_REG_REV_GET(val) ((((val) & LTQ_ES_GBKT_SZEBS_REG_REV) >> 18) & 0x3fff)
+/* Extend Base[17:0] (17:0) */
+#define LTQ_ES_GBKT_SZEBS_REG_EBASE17_0 (0x3ffff)
+#define LTQ_ES_GBKT_SZEBS_REG_EBASE17_0_VAL(val) (((val) & 0x3ffff) << 0)
+#define LTQ_ES_GBKT_SZEBS_REG_EBASE17_0_GET(val) ((((val) & LTQ_ES_GBKT_SZEBS_REG_EBASE17_0) >> 0) & 0x3ffff)
+#define LTQ_ES_GBKT_SZEBS_REG_EBASE17_0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_GBKT_SZEBS_REG_EBASE17_0) | (((val) & 0x3ffff) << 0))
+
+/*******************************************************************************
+ * Buffer Threshold Register
+ ******************************************************************************/
+
+/* Port Unfull Offset 3 (31:30) */
+#define LTQ_ES_BF_TH_REG_PUO3 (0x3 << 30)
+#define LTQ_ES_BF_TH_REG_PUO3_VAL(val) (((val) & 0x3) << 30)
+#define LTQ_ES_BF_TH_REG_PUO3_GET(val) ((((val) & LTQ_ES_BF_TH_REG_PUO3) >> 30) & 0x3)
+#define LTQ_ES_BF_TH_REG_PUO3_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_PUO3) | (((val) & 0x3) << 30))
+/* Port Unfull Offset 2 (29:28) */
+#define LTQ_ES_BF_TH_REG_PUO2 (0x3 << 28)
+#define LTQ_ES_BF_TH_REG_PUO2_VAL(val) (((val) & 0x3) << 28)
+#define LTQ_ES_BF_TH_REG_PUO2_GET(val) ((((val) & LTQ_ES_BF_TH_REG_PUO2) >> 28) & 0x3)
+#define LTQ_ES_BF_TH_REG_PUO2_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_PUO2) | (((val) & 0x3) << 28))
+/* Port Unfull Offset 1 (27:26) */
+#define LTQ_ES_BF_TH_REG_PUO1 (0x3 << 26)
+#define LTQ_ES_BF_TH_REG_PUO1_VAL(val) (((val) & 0x3) << 26)
+#define LTQ_ES_BF_TH_REG_PUO1_GET(val) ((((val) & LTQ_ES_BF_TH_REG_PUO1) >> 26) & 0x3)
+#define LTQ_ES_BF_TH_REG_PUO1_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_PUO1) | (((val) & 0x3) << 26))
+/* Port Unfull Offset 0 (25:24) */
+#define LTQ_ES_BF_TH_REG_PUO0 (0x3 << 24)
+#define LTQ_ES_BF_TH_REG_PUO0_VAL(val) (((val) & 0x3) << 24)
+#define LTQ_ES_BF_TH_REG_PUO0_GET(val) ((((val) & LTQ_ES_BF_TH_REG_PUO0) >> 24) & 0x3)
+#define LTQ_ES_BF_TH_REG_PUO0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_PUO0) | (((val) & 0x3) << 24))
+/* Port Full Offset 3 (23:22) */
+#define LTQ_ES_BF_TH_REG_PFO3 (0x3 << 22)
+#define LTQ_ES_BF_TH_REG_PFO3_VAL(val) (((val) & 0x3) << 22)
+#define LTQ_ES_BF_TH_REG_PFO3_GET(val) ((((val) & LTQ_ES_BF_TH_REG_PFO3) >> 22) & 0x3)
+#define LTQ_ES_BF_TH_REG_PFO3_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_PFO3) | (((val) & 0x3) << 22))
+/* Port Full Offset 2 (21:20) */
+#define LTQ_ES_BF_TH_REG_PFO2 (0x3 << 20)
+#define LTQ_ES_BF_TH_REG_PFO2_VAL(val) (((val) & 0x3) << 20)
+#define LTQ_ES_BF_TH_REG_PFO2_GET(val) ((((val) & LTQ_ES_BF_TH_REG_PFO2) >> 20) & 0x3)
+#define LTQ_ES_BF_TH_REG_PFO2_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_PFO2) | (((val) & 0x3) << 20))
+/* Port Full Offset 1 (19:18) */
+#define LTQ_ES_BF_TH_REG_PFO1 (0x3 << 18)
+#define LTQ_ES_BF_TH_REG_PFO1_VAL(val) (((val) & 0x3) << 18)
+#define LTQ_ES_BF_TH_REG_PFO1_GET(val) ((((val) & LTQ_ES_BF_TH_REG_PFO1) >> 18) & 0x3)
+#define LTQ_ES_BF_TH_REG_PFO1_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_PFO1) | (((val) & 0x3) << 18))
+/* Port Full Offset 0 (17:16) */
+#define LTQ_ES_BF_TH_REG_PFO0 (0x3 << 16)
+#define LTQ_ES_BF_TH_REG_PFO0_VAL(val) (((val) & 0x3) << 16)
+#define LTQ_ES_BF_TH_REG_PFO0_GET(val) ((((val) & LTQ_ES_BF_TH_REG_PFO0) >> 16) & 0x3)
+#define LTQ_ES_BF_TH_REG_PFO0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_PFO0) | (((val) & 0x3) << 16))
+/* Reserved (15:14) */
+#define LTQ_ES_BF_TH_REG_RES (0x3 << 14)
+#define LTQ_ES_BF_TH_REG_RES_GET(val) ((((val) & LTQ_ES_BF_TH_REG_RES) >> 14) & 0x3)
+/* Total Low Add (13) */
+#define LTQ_ES_BF_TH_REG_TLA (0x1 << 13)
+#define LTQ_ES_BF_TH_REG_TLA_VAL(val) (((val) & 0x1) << 13)
+#define LTQ_ES_BF_TH_REG_TLA_GET(val) ((((val) & LTQ_ES_BF_TH_REG_TLA) >> 13) & 0x1)
+#define LTQ_ES_BF_TH_REG_TLA_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_TLA) | (((val) & 0x1) << 13))
+/* Total High Add (12) */
+#define LTQ_ES_BF_TH_REG_THA (0x1 << 12)
+#define LTQ_ES_BF_TH_REG_THA_VAL(val) (((val) & 0x1) << 12)
+#define LTQ_ES_BF_TH_REG_THA_GET(val) ((((val) & LTQ_ES_BF_TH_REG_THA) >> 12) & 0x1)
+#define LTQ_ES_BF_TH_REG_THA_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_THA) | (((val) & 0x1) << 12))
+/* Total Low Offset (11:10) */
+#define LTQ_ES_BF_TH_REG_TLO (0x3 << 10)
+#define LTQ_ES_BF_TH_REG_TLO_VAL(val) (((val) & 0x3) << 10)
+#define LTQ_ES_BF_TH_REG_TLO_GET(val) ((((val) & LTQ_ES_BF_TH_REG_TLO) >> 10) & 0x3)
+#define LTQ_ES_BF_TH_REG_TLO_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_TLO) | (((val) & 0x3) << 10))
+/* Total High Offset (9:8) */
+#define LTQ_ES_BF_TH_REG_THO (0x3 << 8)
+#define LTQ_ES_BF_TH_REG_THO_VAL(val) (((val) & 0x3) << 8)
+#define LTQ_ES_BF_TH_REG_THO_GET(val) ((((val) & LTQ_ES_BF_TH_REG_THO) >> 8) & 0x3)
+#define LTQ_ES_BF_TH_REG_THO_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_THO) | (((val) & 0x3) << 8))
+/* Port Unfull Add (7:4) */
+#define LTQ_ES_BF_TH_REG_PUA (0xf << 4)
+#define LTQ_ES_BF_TH_REG_PUA_VAL(val) (((val) & 0xf) << 4)
+#define LTQ_ES_BF_TH_REG_PUA_GET(val) ((((val) & LTQ_ES_BF_TH_REG_PUA) >> 4) & 0xf)
+#define LTQ_ES_BF_TH_REG_PUA_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_PUA) | (((val) & 0xf) << 4))
+/* Port Full Add (3:0) */
+#define LTQ_ES_BF_TH_REG_PFA (0xf)
+#define LTQ_ES_BF_TH_REG_PFA_VAL(val) (((val) & 0xf) << 0)
+#define LTQ_ES_BF_TH_REG_PFA_GET(val) ((((val) & LTQ_ES_BF_TH_REG_PFA) >> 0) & 0xf)
+#define LTQ_ES_BF_TH_REG_PFA_SET(reg,val) (reg) = ((reg & ~LTQ_ES_BF_TH_REG_PFA) | (((val) & 0xf) << 0))
+
+/*******************************************************************************
+ * PMAC Header Control Register
+ ******************************************************************************/
+
+/* Reserved (31:22) */
+#define LTQ_ES_PMAC_HD_CTL_RES (0x3ff << 22)
+#define LTQ_ES_PMAC_HD_CTL_RES_GET(val) ((((val) & LTQ_ES_PMAC_HD_CTL_RES) >> 22) & 0x3ff)
+/* Remove Layer-2 Header from Packets Going from PMAC to DMA (21) */
+#define LTQ_ES_PMAC_HD_CTL_RL2 (0x1 << 21)
+#define LTQ_ES_PMAC_HD_CTL_RL2_VAL(val) (((val) & 0x1) << 21)
+#define LTQ_ES_PMAC_HD_CTL_RL2_GET(val) ((((val) & LTQ_ES_PMAC_HD_CTL_RL2) >> 21) & 0x1)
+#define LTQ_ES_PMAC_HD_CTL_RL2_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_HD_CTL_RL2) | (((val) & 0x1) << 21))
+/* Remove CRC from Packets Going from PMAC to DMA (20) */
+#define LTQ_ES_PMAC_HD_CTL_RC (0x1 << 20)
+#define LTQ_ES_PMAC_HD_CTL_RC_VAL(val) (((val) & 0x1) << 20)
+#define LTQ_ES_PMAC_HD_CTL_RC_GET(val) ((((val) & LTQ_ES_PMAC_HD_CTL_RC) >> 20) & 0x1)
+#define LTQ_ES_PMAC_HD_CTL_RC_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_HD_CTL_RC) | (((val) & 0x1) << 20))
+/* Status Header for Packets from PMAC to DMA (19) */
+#define LTQ_ES_PMAC_HD_CTL_AS (0x1 << 19)
+#define LTQ_ES_PMAC_HD_CTL_AS_VAL(val) (((val) & 0x1) << 19)
+#define LTQ_ES_PMAC_HD_CTL_AS_GET(val) ((((val) & LTQ_ES_PMAC_HD_CTL_AS) >> 19) & 0x1)
+#define LTQ_ES_PMAC_HD_CTL_AS_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_HD_CTL_AS) | (((val) & 0x1) << 19))
+/* Add CRC for packets from DMA to PMAC (18) */
+#define LTQ_ES_PMAC_HD_CTL_AC (0x1 << 18)
+#define LTQ_ES_PMAC_HD_CTL_AC_VAL(val) (((val) & 0x1) << 18)
+#define LTQ_ES_PMAC_HD_CTL_AC_GET(val) ((((val) & LTQ_ES_PMAC_HD_CTL_AC) >> 18) & 0x1)
+#define LTQ_ES_PMAC_HD_CTL_AC_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_HD_CTL_AC) | (((val) & 0x1) << 18))
+/* Contains the length/type value to the added to packets from DMA to PMAC (17:2) */
+#define LTQ_ES_PMAC_HD_CTL_TYPE_LEN (0xffff << 2)
+#define LTQ_ES_PMAC_HD_CTL_TYPE_LEN_VAL(val) (((val) & 0xffff) << 2)
+#define LTQ_ES_PMAC_HD_CTL_TYPE_LEN_GET(val) ((((val) & LTQ_ES_PMAC_HD_CTL_TYPE_LEN) >> 2) & 0xffff)
+#define LTQ_ES_PMAC_HD_CTL_TYPE_LEN_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_HD_CTL_TYPE_LEN) | (((val) & 0xffff) << 2))
+/* Add TAG to Packets from DMA to PMAC (1) */
+#define LTQ_ES_PMAC_HD_CTL_TAG (0x1 << 1)
+#define LTQ_ES_PMAC_HD_CTL_TAG_VAL(val) (((val) & 0x1) << 1)
+#define LTQ_ES_PMAC_HD_CTL_TAG_GET(val) ((((val) & LTQ_ES_PMAC_HD_CTL_TAG) >> 1) & 0x1)
+#define LTQ_ES_PMAC_HD_CTL_TAG_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_HD_CTL_TAG) | (((val) & 0x1) << 1))
+/* ADD Header to Packets from DMA to PMAC (0) */
+#define LTQ_ES_PMAC_HD_CTL_ADD (0x1)
+#define LTQ_ES_PMAC_HD_CTL_ADD_VAL(val) (((val) & 0x1) << 0)
+#define LTQ_ES_PMAC_HD_CTL_ADD_GET(val) ((((val) & LTQ_ES_PMAC_HD_CTL_ADD) >> 0) & 0x1)
+#define LTQ_ES_PMAC_HD_CTL_ADD_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_HD_CTL_ADD) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * PMAC Source Address Register 1
+ ******************************************************************************/
+
+/* Source Address to be inserted as a part of the Ethernet header. (15:0) */
+#define LTQ_ES_PMAC_SA1_SA_47_32 (0xffff)
+#define LTQ_ES_PMAC_SA1_SA_47_32_VAL(val) (((val) & 0xffff) << 0)
+#define LTQ_ES_PMAC_SA1_SA_47_32_GET(val) ((((val) & LTQ_ES_PMAC_SA1_SA_47_32) >> 0) & 0xffff)
+#define LTQ_ES_PMAC_SA1_SA_47_32_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_SA1_SA_47_32) | (((val) & 0xffff) << 0))
+
+/*******************************************************************************
+ * PMAC Source Address Register 2
+ ******************************************************************************/
+
+/* Source Address (31:0) */
+#define LTQ_ES_PMAC_SA2_SA_31_0 (0xFFFFFFFFL)
+#define LTQ_ES_PMAC_SA2_SA_31_0_VAL(val) (((val) & 0xFFFFFFFFL) << 0)
+#define LTQ_ES_PMAC_SA2_SA_31_0_GET(val) ((((val) & LTQ_ES_PMAC_SA2_SA_31_0) >> 0) & 0xFFFFFFFFL)
+#define LTQ_ES_PMAC_SA2_SA_31_0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_SA2_SA_31_0) | (((val) & 0xFFFFFFFFL) << 0))
+
+/*******************************************************************************
+ * PMAC Destination Address Register 1
+ ******************************************************************************/
+
+/* Destination Address (15:0) */
+#define LTQ_ES_PMAC_DA1_DA_47_32 (0xffff)
+#define LTQ_ES_PMAC_DA1_DA_47_32_VAL(val) (((val) & 0xffff) << 0)
+#define LTQ_ES_PMAC_DA1_DA_47_32_GET(val) ((((val) & LTQ_ES_PMAC_DA1_DA_47_32) >> 0) & 0xffff)
+#define LTQ_ES_PMAC_DA1_DA_47_32_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_DA1_DA_47_32) | (((val) & 0xffff) << 0))
+
+/*******************************************************************************
+ * PMAC Destination Address Register 2
+ ******************************************************************************/
+
+/* Destination Address to be inserted as a part of the Ethernet header. (31:0) */
+#define LTQ_ES_PMAC_DA2_DA_31_0 (0xFFFFFFFFL)
+#define LTQ_ES_PMAC_DA2_DA_31_0_VAL(val) (((val) & 0xFFFFFFFFL) << 0)
+#define LTQ_ES_PMAC_DA2_DA_31_0_GET(val) ((((val) & LTQ_ES_PMAC_DA2_DA_31_0) >> 0) & 0xFFFFFFFFL)
+#define LTQ_ES_PMAC_DA2_DA_31_0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_DA2_DA_31_0) | (((val) & 0xFFFFFFFFL) << 0))
+
+/*******************************************************************************
+ * PMAC VLAN Register
+ ******************************************************************************/
+
+/* Priority to be inserted as a part of VLAN tag (15:13) */
+#define LTQ_ES_PMAC_VLAN_PRI (0x7 << 13)
+#define LTQ_ES_PMAC_VLAN_PRI_VAL(val) (((val) & 0x7) << 13)
+#define LTQ_ES_PMAC_VLAN_PRI_GET(val) ((((val) & LTQ_ES_PMAC_VLAN_PRI) >> 13) & 0x7)
+#define LTQ_ES_PMAC_VLAN_PRI_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_VLAN_PRI) | (((val) & 0x7) << 13))
+/* CFI bit to be inserted as a part of VLAN tag (12) */
+#define LTQ_ES_PMAC_VLAN_CFI (0x1 << 12)
+#define LTQ_ES_PMAC_VLAN_CFI_VAL(val) (((val) & 0x1) << 12)
+#define LTQ_ES_PMAC_VLAN_CFI_GET(val) ((((val) & LTQ_ES_PMAC_VLAN_CFI) >> 12) & 0x1)
+#define LTQ_ES_PMAC_VLAN_CFI_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_VLAN_CFI) | (((val) & 0x1) << 12))
+/* VLAN ID to be inserted as a part of VLAN tag (11:0) */
+#define LTQ_ES_PMAC_VLAN_VLAN_ID (0xfff)
+#define LTQ_ES_PMAC_VLAN_VLAN_ID_VAL(val) (((val) & 0xfff) << 0)
+#define LTQ_ES_PMAC_VLAN_VLAN_ID_GET(val) ((((val) & LTQ_ES_PMAC_VLAN_VLAN_ID) >> 0) & 0xfff)
+#define LTQ_ES_PMAC_VLAN_VLAN_ID_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_VLAN_VLAN ID) | (((val) & 0xfff) << 0))
+
+/*******************************************************************************
+ * PMAC TX IPG Counter Register
+ ******************************************************************************/
+
+/* IPG Counter (7:0) */
+#define LTQ_ES_PMAC_TX_IPG_IPG_CNT (0xff)
+#define LTQ_ES_PMAC_TX_IPG_IPG_CNT_VAL(val) (((val) & 0xff) << 0)
+#define LTQ_ES_PMAC_TX_IPG_IPG_CNT_GET(val) ((((val) & LTQ_ES_PMAC_TX_IPG_IPG_CNT) >> 0) & 0xff)
+#define LTQ_ES_PMAC_TX_IPG_IPG_CNT_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_TX_IPG_IPG_CNT) | (((val) & 0xff) << 0))
+
+/*******************************************************************************
+ * PMAC RX IPG Counter Register
+ ******************************************************************************/
+
+/* IPG Counter (7:0) */
+#define LTQ_ES_PMAC_RX_IPG_IPG_CNT (0xff)
+#define LTQ_ES_PMAC_RX_IPG_IPG_CNT_VAL(val) (((val) & 0xff) << 0)
+#define LTQ_ES_PMAC_RX_IPG_IPG_CNT_GET(val) ((((val) & LTQ_ES_PMAC_RX_IPG_IPG_CNT) >> 0) & 0xff)
+#define LTQ_ES_PMAC_RX_IPG_IPG_CNT_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PMAC_RX_IPG_IPG_CNT) | (((val) & 0xff) << 0))
+
+/*******************************************************************************
+ * Address Table Control 0 Register
+ ******************************************************************************/
+
+/* Address [31:0] (31:0) */
+#define LTQ_ES_ADR_TB_CTL0_REG_ADDR31_0 (0xFFFFFFFFL)
+#define LTQ_ES_ADR_TB_CTL0_REG_ADDR31_0_VAL(val) (((val) & 0xFFFFFFFFL) << 0)
+#define LTQ_ES_ADR_TB_CTL0_REG_ADDR31_0_GET(val) ((((val) & LTQ_ES_ADR_TB_CTL0_REG_ADDR31_0) >> 0) & 0xFFFFFFFFL)
+#define LTQ_ES_ADR_TB_CTL0_REG_ADDR31_0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ADR_TB_CTL0_REG_ADDR31_0) | (((val) & 0xFFFFFFFFL) << 0))
+
+/*******************************************************************************
+ * Address Table Control 1 Register
+ ******************************************************************************/
+
+/* Port Map (22:20) */
+#define LTQ_ES_ADR_TB_CTL1_REG_PMAP (0x7 << 20)
+#define LTQ_ES_ADR_TB_CTL1_REG_PMAP_VAL(val) (((val) & 0x7) << 20)
+#define LTQ_ES_ADR_TB_CTL1_REG_PMAP_GET(val) ((((val) & LTQ_ES_ADR_TB_CTL1_REG_PMAP) >> 20) & 0x7)
+#define LTQ_ES_ADR_TB_CTL1_REG_PMAP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ADR_TB_CTL1_REG_PMAP) | (((val) & 0x7) << 20))
+/* FID group (17:16) */
+#define LTQ_ES_ADR_TB_CTL1_REG_FID (0x3 << 16)
+#define LTQ_ES_ADR_TB_CTL1_REG_FID_VAL(val) (((val) & 0x3) << 16)
+#define LTQ_ES_ADR_TB_CTL1_REG_FID_GET(val) ((((val) & LTQ_ES_ADR_TB_CTL1_REG_FID) >> 16) & 0x3)
+#define LTQ_ES_ADR_TB_CTL1_REG_FID_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ADR_TB_CTL1_REG_FID) | (((val) & 0x3) << 16))
+/* Address [47:32] (15:0) */
+#define LTQ_ES_ADR_TB_CTL1_REG_ADDR47_32 (0xffff)
+#define LTQ_ES_ADR_TB_CTL1_REG_ADDR47_32_VAL(val) (((val) & 0xffff) << 0)
+#define LTQ_ES_ADR_TB_CTL1_REG_ADDR47_32_GET(val) ((((val) & LTQ_ES_ADR_TB_CTL1_REG_ADDR47_32) >> 0) & 0xffff)
+#define LTQ_ES_ADR_TB_CTL1_REG_ADDR47_32_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ADR_TB_CTL1_REG_ADDR47_32) | (((val) & 0xffff) << 0))
+
+/*******************************************************************************
+ * Address Table Control 2 Register
+ ******************************************************************************/
+
+/* Command (22:20) */
+#define LTQ_ES_ADR_TB_CTL2_REG_CMD (0x7 << 20)
+#define LTQ_ES_ADR_TB_CTL2_REG_CMD_VAL(val) (((val) & 0x7) << 20)
+#define LTQ_ES_ADR_TB_CTL2_REG_CMD_GET(val) ((((val) & LTQ_ES_ADR_TB_CTL2_REG_CMD) >> 20) & 0x7)
+#define LTQ_ES_ADR_TB_CTL2_REG_CMD_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ADR_TB_CTL2_REG_CMD) | (((val) & 0x7) << 20))
+/* Access Control (19:16) */
+#define LTQ_ES_ADR_TB_CTL2_REG_AC (0xf << 16)
+#define LTQ_ES_ADR_TB_CTL2_REG_AC_VAL(val) (((val) & 0xf) << 16)
+#define LTQ_ES_ADR_TB_CTL2_REG_AC_GET(val) ((((val) & LTQ_ES_ADR_TB_CTL2_REG_AC) >> 16) & 0xf)
+#define LTQ_ES_ADR_TB_CTL2_REG_AC_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ADR_TB_CTL2_REG_AC) | (((val) & 0xf) << 16))
+/* Info Type: Static address (12) */
+#define LTQ_ES_ADR_TB_CTL2_REG_INFOT (0x1 << 12)
+#define LTQ_ES_ADR_TB_CTL2_REG_INFOT_VAL(val) (((val) & 0x1) << 12)
+#define LTQ_ES_ADR_TB_CTL2_REG_INFOT_GET(val) ((((val) & LTQ_ES_ADR_TB_CTL2_REG_INFOT) >> 12) & 0x1)
+#define LTQ_ES_ADR_TB_CTL2_REG_INFOT_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ADR_TB_CTL2_REG_INFOT) | (((val) & 0x1) << 12))
+/* Info_Ctrl/Age Timer (10:0) */
+#define LTQ_ES_ADR_TB_CTL2_REG_ITAT (0x7ff)
+#define LTQ_ES_ADR_TB_CTL2_REG_ITAT_VAL(val) (((val) & 0x7ff) << 0)
+#define LTQ_ES_ADR_TB_CTL2_REG_ITAT_GET(val) ((((val) & LTQ_ES_ADR_TB_CTL2_REG_ITAT) >> 0) & 0x7ff)
+#define LTQ_ES_ADR_TB_CTL2_REG_ITAT_SET(reg,val) (reg) = ((reg & ~LTQ_ES_ADR_TB_CTL2_REG_ITAT) | (((val) & 0x7ff) << 0))
+
+/*******************************************************************************
+ * Address Table Status 0 Register
+ ******************************************************************************/
+
+/* Address [31:0] (31:0) */
+#define LTQ_ES_ADR_TB_ST0_REG_ADDRS31_0 (0xFFFFFFFFL)
+#define LTQ_ES_ADR_TB_ST0_REG_ADDRS31_0_GET(val) ((((val) & LTQ_ES_ADR_TB_ST0_REG_ADDRS31_0) >> 0) & 0xFFFFFFFFL)
+
+/*******************************************************************************
+ * Address Table Status 1 Register
+ ******************************************************************************/
+
+/* Port Map (22:20) */
+#define LTQ_ES_ADR_TB_ST1_REG_PMAPS (0x7 << 20)
+#define LTQ_ES_ADR_TB_ST1_REG_PMAPS_GET(val) ((((val) & LTQ_ES_ADR_TB_ST1_REG_PMAPS) >> 20) & 0x7)
+/* FID group (17:16) */
+#define LTQ_ES_ADR_TB_ST1_REG_FIDS (0x3 << 16)
+#define LTQ_ES_ADR_TB_ST1_REG_FIDS_GET(val) ((((val) & LTQ_ES_ADR_TB_ST1_REG_FIDS) >> 16) & 0x3)
+/* Address [47:32] (15:0) */
+#define LTQ_ES_ADR_TB_ST1_REG_ADDRS47_32 (0xffff)
+#define LTQ_ES_ADR_TB_ST1_REG_ADDRS47_32_GET(val) ((((val) & LTQ_ES_ADR_TB_ST1_REG_ADDRS47_32) >> 0) & 0xffff)
+
+/*******************************************************************************
+ * Address Table Status 2 Register
+ ******************************************************************************/
+
+/* Busy (31) */
+#define LTQ_ES_ADR_TB_ST2_REG_BUSY (0x1 << 31)
+#define LTQ_ES_ADR_TB_ST2_REG_BUSY_GET(val) ((((val) & LTQ_ES_ADR_TB_ST2_REG_BUSY) >> 31) & 0x1)
+/* Result (30:28) */
+#define LTQ_ES_ADR_TB_ST2_REG_RSLT (0x7 << 28)
+#define LTQ_ES_ADR_TB_ST2_REG_RSLT_GET(val) ((((val) & LTQ_ES_ADR_TB_ST2_REG_RSLT) >> 28) & 0x7)
+/* Command (22:20) */
+#define LTQ_ES_ADR_TB_ST2_REG_CMD (0x7 << 20)
+#define LTQ_ES_ADR_TB_ST2_REG_CMD_GET(val) ((((val) & LTQ_ES_ADR_TB_ST2_REG_CMD) >> 20) & 0x7)
+/* Access Control (19:16) */
+#define LTQ_ES_ADR_TB_ST2_REG_AC (0xf << 16)
+#define LTQ_ES_ADR_TB_ST2_REG_AC_GET(val) ((((val) & LTQ_ES_ADR_TB_ST2_REG_AC) >> 16) & 0xf)
+/* Bad Status (14) */
+#define LTQ_ES_ADR_TB_ST2_REG_BAD (0x1 << 14)
+#define LTQ_ES_ADR_TB_ST2_REG_BAD_GET(val) ((((val) & LTQ_ES_ADR_TB_ST2_REG_BAD) >> 14) & 0x1)
+/* Occupy (13) */
+#define LTQ_ES_ADR_TB_ST2_REG_OCP (0x1 << 13)
+#define LTQ_ES_ADR_TB_ST2_REG_OCP_GET(val) ((((val) & LTQ_ES_ADR_TB_ST2_REG_OCP) >> 13) & 0x1)
+/* Info Type: Static address (12) */
+#define LTQ_ES_ADR_TB_ST2_REG_INFOTS (0x1 << 12)
+#define LTQ_ES_ADR_TB_ST2_REG_INFOTS_GET(val) ((((val) & LTQ_ES_ADR_TB_ST2_REG_INFOTS) >> 12) & 0x1)
+/* Info_Ctrl/Age Timer Status (10:0) */
+#define LTQ_ES_ADR_TB_ST2_REG_ITATS (0x7ff)
+#define LTQ_ES_ADR_TB_ST2_REG_ITATS_GET(val) ((((val) & LTQ_ES_ADR_TB_ST2_REG_ITATS) >> 0) & 0x7ff)
+
+/*******************************************************************************
+ * RMON Counter Control Register
+ ******************************************************************************/
+
+/* Reserved (31:12) */
+#define LTQ_ES_RMON_CTL_REG_RES (0xfffff << 12)
+#define LTQ_ES_RMON_CTL_REG_RES_GET(val) ((((val) & LTQ_ES_RMON_CTL_REG_RES) >> 12) & 0xfffff)
+/* Busy/Access Start (11) */
+#define LTQ_ES_RMON_CTL_REG_BAS (0x1 << 11)
+#define LTQ_ES_RMON_CTL_REG_BAS_VAL(val) (((val) & 0x1) << 11)
+#define LTQ_ES_RMON_CTL_REG_BAS_GET(val) ((((val) & LTQ_ES_RMON_CTL_REG_BAS) >> 11) & 0x1)
+#define LTQ_ES_RMON_CTL_REG_BAS_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RMON_CTL_REG_BAS) | (((val) & 0x1) << 11))
+/* Command for access counter (10:9) */
+#define LTQ_ES_RMON_CTL_REG_CAC (0x3 << 9)
+#define LTQ_ES_RMON_CTL_REG_CAC_VAL(val) (((val) & 0x3) << 9)
+#define LTQ_ES_RMON_CTL_REG_CAC_GET(val) ((((val) & LTQ_ES_RMON_CTL_REG_CAC) >> 9) & 0x3)
+#define LTQ_ES_RMON_CTL_REG_CAC_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RMON_CTL_REG_CAC) | (((val) & 0x3) << 9))
+/* Port (8:6) */
+#define LTQ_ES_RMON_CTL_REG_PORTC (0x7 << 6)
+#define LTQ_ES_RMON_CTL_REG_PORTC_VAL(val) (((val) & 0x7) << 6)
+#define LTQ_ES_RMON_CTL_REG_PORTC_GET(val) ((((val) & LTQ_ES_RMON_CTL_REG_PORTC) >> 6) & 0x7)
+#define LTQ_ES_RMON_CTL_REG_PORTC_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RMON_CTL_REG_PORTC) | (((val) & 0x7) << 6))
+/* Counter Offset (5:0) */
+#define LTQ_ES_RMON_CTL_REG_OFFSET (0x3f)
+#define LTQ_ES_RMON_CTL_REG_OFFSET_VAL(val) (((val) & 0x3f) << 0)
+#define LTQ_ES_RMON_CTL_REG_OFFSET_GET(val) ((((val) & LTQ_ES_RMON_CTL_REG_OFFSET) >> 0) & 0x3f)
+#define LTQ_ES_RMON_CTL_REG_OFFSET_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RMON_CTL_REG_OFFSET) | (((val) & 0x3f) << 0))
+
+/*******************************************************************************
+ * RMON Counter Status Register
+ ******************************************************************************/
+
+/* Counter [31:0] or Counter[63:32] for byte count (31:0) */
+#define LTQ_ES_RMON_ST_REG_COUNTER (0xFFFFFFFFL)
+#define LTQ_ES_RMON_ST_REG_COUNTER_GET(val) ((((val) & LTQ_ES_RMON_ST_REG_COUNTER) >> 0) & 0xFFFFFFFFL)
+
+/*******************************************************************************
+ * MDIO Indirect Access Control
+ ******************************************************************************/
+
+/* The Write Data to the MDIO register (31:16) */
+#define LTQ_ES_MDIO_CTL_REG_WD (0xffff << 16)
+#define LTQ_ES_MDIO_CTL_REG_WD_VAL(val) (((val) & 0xffff) << 16)
+#define LTQ_ES_MDIO_CTL_REG_WD_GET(val) ((((val) & LTQ_ES_MDIO_CTL_REG_WD) >> 16) & 0xffff)
+#define LTQ_ES_MDIO_CTL_REG_WD_SET(reg,val) (reg) = ((reg & ~LTQ_ES_MDIO_CTL_REG_WD) | (((val) & 0xffff) << 16))
+/* Busy state (15) */
+#define LTQ_ES_MDIO_CTL_REG_MBUSY (0x1 << 15)
+#define LTQ_ES_MDIO_CTL_REG_MBUSY_VAL(val) (((val) & 0x1) << 15)
+#define LTQ_ES_MDIO_CTL_REG_MBUSY_GET(val) ((((val) & LTQ_ES_MDIO_CTL_REG_MBUSY) >> 15) & 0x1)
+#define LTQ_ES_MDIO_CTL_REG_MBUSY_SET(reg,val) (reg) = ((reg & ~LTQ_ES_MDIO_CTL_REG_MBUSY) | (((val) & 0x1) << 15))
+/* Reserved (14:12) */
+#define LTQ_ES_MDIO_CTL_REG_RES (0x7 << 12)
+#define LTQ_ES_MDIO_CTL_REG_RES_GET(val) ((((val) & LTQ_ES_MDIO_CTL_REG_RES) >> 12) & 0x7)
+/* Operation Code (11:10) */
+#define LTQ_ES_MDIO_CTL_REG_OP (0x3 << 10)
+#define LTQ_ES_MDIO_CTL_REG_OP_VAL(val) (((val) & 0x3) << 10)
+#define LTQ_ES_MDIO_CTL_REG_OP_GET(val) ((((val) & LTQ_ES_MDIO_CTL_REG_OP) >> 10) & 0x3)
+#define LTQ_ES_MDIO_CTL_REG_OP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_MDIO_CTL_REG_OP) | (((val) & 0x3) << 10))
+/* PHY Address (9:5) */
+#define LTQ_ES_MDIO_CTL_REG_PHYAD (0x1f << 5)
+#define LTQ_ES_MDIO_CTL_REG_PHYAD_VAL(val) (((val) & 0x1f) << 5)
+#define LTQ_ES_MDIO_CTL_REG_PHYAD_GET(val) ((((val) & LTQ_ES_MDIO_CTL_REG_PHYAD) >> 5) & 0x1f)
+#define LTQ_ES_MDIO_CTL_REG_PHYAD_SET(reg,val) (reg) = ((reg & ~LTQ_ES_MDIO_CTL_REG_PHYAD) | (((val) & 0x1f) << 5))
+/* Register Address (4:0) */
+#define LTQ_ES_MDIO_CTL_REG_REGAD (0x1f)
+#define LTQ_ES_MDIO_CTL_REG_REGAD_VAL(val) (((val) & 0x1f) << 0)
+#define LTQ_ES_MDIO_CTL_REG_REGAD_GET(val) ((((val) & LTQ_ES_MDIO_CTL_REG_REGAD) >> 0) & 0x1f)
+#define LTQ_ES_MDIO_CTL_REG_REGAD_SET(reg,val) (reg) = ((reg & ~LTQ_ES_MDIO_CTL_REG_REGAD) | (((val) & 0x1f) << 0))
+
+/*******************************************************************************
+ * MDIO Indirect Read Data
+ ******************************************************************************/
+
+/* Reserved (31:16) */
+#define LTQ_ES_MDIO_DATA_REG_RES (0xffff << 16)
+#define LTQ_ES_MDIO_DATA_REG_RES_GET(val) ((((val) & LTQ_ES_MDIO_DATA_REG_RES) >> 16) & 0xffff)
+/* The Read Data (15:0) */
+#define LTQ_ES_MDIO_DATA_REG_RD (0xffff)
+#define LTQ_ES_MDIO_DATA_REG_RD_GET(val) ((((val) & LTQ_ES_MDIO_DATA_REG_RD) >> 0) & 0xffff)
+
+/*******************************************************************************
+ * Type Filter Action
+ ******************************************************************************/
+
+/* Destination Queue for Type Filter 7 (31:30) */
+#define LTQ_ES_TP_FLT_ACT_REG_QATF7 (0x3 << 30)
+#define LTQ_ES_TP_FLT_ACT_REG_QATF7_VAL(val) (((val) & 0x3) << 30)
+#define LTQ_ES_TP_FLT_ACT_REG_QATF7_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_QATF7) >> 30) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_QATF7_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_QATF7) | (((val) & 0x3) << 30))
+/* Destination Queue for Type Filter 6 (29:28) */
+#define LTQ_ES_TP_FLT_ACT_REG_QATF6 (0x3 << 28)
+#define LTQ_ES_TP_FLT_ACT_REG_QATF6_VAL(val) (((val) & 0x3) << 28)
+#define LTQ_ES_TP_FLT_ACT_REG_QATF6_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_QATF6) >> 28) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_QATF6_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_QATF6) | (((val) & 0x3) << 28))
+/* Destination Queue for Type Filter 5 (27:26) */
+#define LTQ_ES_TP_FLT_ACT_REG_QTF5 (0x3 << 26)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF5_VAL(val) (((val) & 0x3) << 26)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF5_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_QTF5) >> 26) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF5_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_QTF5) | (((val) & 0x3) << 26))
+/* Destination Queue for Type Filter 4 (25:24) */
+#define LTQ_ES_TP_FLT_ACT_REG_QTF4 (0x3 << 24)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF4_VAL(val) (((val) & 0x3) << 24)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF4_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_QTF4) >> 24) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF4_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_QTF4) | (((val) & 0x3) << 24))
+/* Destination Queue for Type Filter 3 (23:22) */
+#define LTQ_ES_TP_FLT_ACT_REG_QTF3 (0x3 << 22)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF3_VAL(val) (((val) & 0x3) << 22)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF3_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_QTF3) >> 22) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF3_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_QTF3) | (((val) & 0x3) << 22))
+/* Destination Queue for Type Filter 2 (21:20) */
+#define LTQ_ES_TP_FLT_ACT_REG_QTF2 (0x3 << 20)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF2_VAL(val) (((val) & 0x3) << 20)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF2_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_QTF2) >> 20) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF2_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_QTF2) | (((val) & 0x3) << 20))
+/* Destination Queue for Type Filter 1 (19:18) */
+#define LTQ_ES_TP_FLT_ACT_REG_QTF1 (0x3 << 18)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF1_VAL(val) (((val) & 0x3) << 18)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF1_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_QTF1) >> 18) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF1_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_QTF1) | (((val) & 0x3) << 18))
+/* Destination Queue for Type Filter 0 (17:16) */
+#define LTQ_ES_TP_FLT_ACT_REG_QTF0 (0x3 << 16)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF0_VAL(val) (((val) & 0x3) << 16)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF0_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_QTF0) >> 16) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_QTF0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_QTF0) | (((val) & 0x3) << 16))
+/* Action for Type Filter 7 (15:14) */
+#define LTQ_ES_TP_FLT_ACT_REG_ATF7 (0x3 << 14)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF7_VAL(val) (((val) & 0x3) << 14)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF7_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_ATF7) >> 14) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF7_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_ATF7) | (((val) & 0x3) << 14))
+/* Action for Type Filter 6 (13:12) */
+#define LTQ_ES_TP_FLT_ACT_REG_ATF6 (0x3 << 12)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF6_VAL(val) (((val) & 0x3) << 12)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF6_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_ATF6) >> 12) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF6_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_ATF6) | (((val) & 0x3) << 12))
+/* Action for Type Filter 5 (11:10) */
+#define LTQ_ES_TP_FLT_ACT_REG_ATF5 (0x3 << 10)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF5_VAL(val) (((val) & 0x3) << 10)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF5_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_ATF5) >> 10) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF5_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_ATF5) | (((val) & 0x3) << 10))
+/* Action for Type Filter 4 (9:8) */
+#define LTQ_ES_TP_FLT_ACT_REG_ATF4 (0x3 << 8)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF4_VAL(val) (((val) & 0x3) << 8)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF4_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_ATF4) >> 8) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF4_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_ATF4) | (((val) & 0x3) << 8))
+/* Action for Type Filter 3 (7:6) */
+#define LTQ_ES_TP_FLT_ACT_REG_ATF3 (0x3 << 6)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF3_VAL(val) (((val) & 0x3) << 6)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF3_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_ATF3) >> 6) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF3_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_ATF3) | (((val) & 0x3) << 6))
+/* Action for Type Filter 2 (5:4) */
+#define LTQ_ES_TP_FLT_ACT_REG_ATF2 (0x3 << 4)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF2_VAL(val) (((val) & 0x3) << 4)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF2_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_ATF2) >> 4) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF2_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_ATF2) | (((val) & 0x3) << 4))
+/* Action for Type Filter 1 (3:2) */
+#define LTQ_ES_TP_FLT_ACT_REG_ATF1 (0x3 << 2)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF1_VAL(val) (((val) & 0x3) << 2)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF1_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_ATF1) >> 2) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF1_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_ATF1) | (((val) & 0x3) << 2))
+/* Action for Type Filter 0 (1:0) */
+#define LTQ_ES_TP_FLT_ACT_REG_ATF0 (0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF0_VAL(val) (((val) & 0x3) << 0)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF0_GET(val) ((((val) & LTQ_ES_TP_FLT_ACT_REG_ATF0) >> 0) & 0x3)
+#define LTQ_ES_TP_FLT_ACT_REG_ATF0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT_ACT_REG_ATF0) | (((val) & 0x3) << 0))
+
+/*******************************************************************************
+ * Protocol Filter Action
+ ******************************************************************************/
+
+/* Action for Protocol Filter 7 (15:14) */
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF7 (0x3 << 14)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF7_VAL(val) (((val) & 0x3) << 14)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF7_GET(val) ((((val) & LTQ_ES_PRTCL_FLT_ACT_REG_APF7) >> 14) & 0x3)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF7_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRTCL_FLT_ACT_REG_APF7) | (((val) & 0x3) << 14))
+/* Action for Protocol Filter 6 (13:12) */
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF6 (0x3 << 12)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF6_VAL(val) (((val) & 0x3) << 12)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF6_GET(val) ((((val) & LTQ_ES_PRTCL_FLT_ACT_REG_APF6) >> 12) & 0x3)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF6_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRTCL_FLT_ACT_REG_APF6) | (((val) & 0x3) << 12))
+/* Action for Protocol Filter 5 (11:10) */
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF5 (0x3 << 10)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF5_VAL(val) (((val) & 0x3) << 10)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF5_GET(val) ((((val) & LTQ_ES_PRTCL_FLT_ACT_REG_APF5) >> 10) & 0x3)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF5_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRTCL_FLT_ACT_REG_APF5) | (((val) & 0x3) << 10))
+/* Action for Protocol Filter 4 (9:8) */
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF4 (0x3 << 8)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF4_VAL(val) (((val) & 0x3) << 8)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF4_GET(val) ((((val) & LTQ_ES_PRTCL_FLT_ACT_REG_APF4) >> 8) & 0x3)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF4_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRTCL_FLT_ACT_REG_APF4) | (((val) & 0x3) << 8))
+/* Action for Protocol Filter 3 (7:6) */
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF3 (0x3 << 6)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF3_VAL(val) (((val) & 0x3) << 6)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF3_GET(val) ((((val) & LTQ_ES_PRTCL_FLT_ACT_REG_APF3) >> 6) & 0x3)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF3_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRTCL_FLT_ACT_REG_APF3) | (((val) & 0x3) << 6))
+/* Action for Protocol Filter 2 (5:4) */
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF2 (0x3 << 4)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF2_VAL(val) (((val) & 0x3) << 4)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF2_GET(val) ((((val) & LTQ_ES_PRTCL_FLT_ACT_REG_APF2) >> 4) & 0x3)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF2_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRTCL_FLT_ACT_REG_APF2) | (((val) & 0x3) << 4))
+/* Action for Protocol Filter 1 (3:2) */
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF1 (0x3 << 2)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF1_VAL(val) (((val) & 0x3) << 2)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF1_GET(val) ((((val) & LTQ_ES_PRTCL_FLT_ACT_REG_APF1) >> 2) & 0x3)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF1_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRTCL_FLT_ACT_REG_APF1) | (((val) & 0x3) << 2))
+/* Action for Protocol Filter 0 (1:0) */
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF0 (0x3)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF0_VAL(val) (((val) & 0x3) << 0)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF0_GET(val) ((((val) & LTQ_ES_PRTCL_FLT_ACT_REG_APF0) >> 0) & 0x3)
+#define LTQ_ES_PRTCL_FLT_ACT_REG_APF0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRTCL_FLT_ACT_REG_APF0) | (((val) & 0x3) << 0))
+
+/*******************************************************************************
+ * VLAN Filter 0
+ ******************************************************************************/
+
+/* Res (31:24) */
+#define LTQ_ES_VLAN_FLT0_REG_RES (0xff << 24)
+#define LTQ_ES_VLAN_FLT0_REG_RES_VAL(val) (((val) & 0xff) << 24)
+#define LTQ_ES_VLAN_FLT0_REG_RES_GET(val) ((((val) & LTQ_ES_VLAN_FLT0_REG_RES) >> 24) & 0xff)
+#define LTQ_ES_VLAN_FLT0_REG_RES_SET(reg,val) (reg) = ((reg & ~LTQ_ES_VLAN_FLT0_REG_RES) | (((val) & 0xff) << 24))
+/* FID (23:22) */
+#define LTQ_ES_VLAN_FLT0_REG_FID (0x3 << 22)
+#define LTQ_ES_VLAN_FLT0_REG_FID_VAL(val) (((val) & 0x3) << 22)
+#define LTQ_ES_VLAN_FLT0_REG_FID_GET(val) ((((val) & LTQ_ES_VLAN_FLT0_REG_FID) >> 22) & 0x3)
+#define LTQ_ES_VLAN_FLT0_REG_FID_SET(reg,val) (reg) = ((reg & ~LTQ_ES_VLAN_FLT0_REG_FID) | (((val) & 0x3) << 22))
+/* Tagged Member (21:19) */
+#define LTQ_ES_VLAN_FLT0_REG_TM (0x7 << 19)
+#define LTQ_ES_VLAN_FLT0_REG_TM_VAL(val) (((val) & 0x7) << 19)
+#define LTQ_ES_VLAN_FLT0_REG_TM_GET(val) ((((val) & LTQ_ES_VLAN_FLT0_REG_TM) >> 19) & 0x7)
+#define LTQ_ES_VLAN_FLT0_REG_TM_SET(reg,val) (reg) = ((reg & ~LTQ_ES_VLAN_FLT0_REG_TM) | (((val) & 0x7) << 19))
+/* Member (18:16) */
+#define LTQ_ES_VLAN_FLT0_REG_M (0x7 << 16)
+#define LTQ_ES_VLAN_FLT0_REG_M_VAL(val) (((val) & 0x7) << 16)
+#define LTQ_ES_VLAN_FLT0_REG_M_GET(val) ((((val) & LTQ_ES_VLAN_FLT0_REG_M) >> 16) & 0x7)
+#define LTQ_ES_VLAN_FLT0_REG_M_SET(reg,val) (reg) = ((reg & ~LTQ_ES_VLAN_FLT0_REG_M) | (((val) & 0x7) << 16))
+/* VLAN_Valid (15) */
+#define LTQ_ES_VLAN_FLT0_REG_VV (0x1 << 15)
+#define LTQ_ES_VLAN_FLT0_REG_VV_VAL(val) (((val) & 0x1) << 15)
+#define LTQ_ES_VLAN_FLT0_REG_VV_GET(val) ((((val) & LTQ_ES_VLAN_FLT0_REG_VV) >> 15) & 0x1)
+#define LTQ_ES_VLAN_FLT0_REG_VV_SET(reg,val) (reg) = ((reg & ~LTQ_ES_VLAN_FLT0_REG_VV) | (((val) & 0x1) << 15))
+/* VLAN PRI (14:12) */
+#define LTQ_ES_VLAN_FLT0_REG_VP (0x7 << 12)
+#define LTQ_ES_VLAN_FLT0_REG_VP_VAL(val) (((val) & 0x7) << 12)
+#define LTQ_ES_VLAN_FLT0_REG_VP_GET(val) ((((val) & LTQ_ES_VLAN_FLT0_REG_VP) >> 12) & 0x7)
+#define LTQ_ES_VLAN_FLT0_REG_VP_SET(reg,val) (reg) = ((reg & ~LTQ_ES_VLAN_FLT0_REG_VP) | (((val) & 0x7) << 12))
+/* VID (11:0) */
+#define LTQ_ES_VLAN_FLT0_REG_VID (0xfff)
+#define LTQ_ES_VLAN_FLT0_REG_VID_VAL(val) (((val) & 0xfff) << 0)
+#define LTQ_ES_VLAN_FLT0_REG_VID_GET(val) ((((val) & LTQ_ES_VLAN_FLT0_REG_VID) >> 0) & 0xfff)
+#define LTQ_ES_VLAN_FLT0_REG_VID_SET(reg,val) (reg) = ((reg & ~LTQ_ES_VLAN_FLT0_REG_VID) | (((val) & 0xfff) << 0))
+
+/*******************************************************************************
+ * Type Filter 10
+ ******************************************************************************/
+
+/* Value 1 Compared with Ether-Type (31:16) */
+#define LTQ_ES_TP_FLT10_REG_VCET1 (0xffff << 16)
+#define LTQ_ES_TP_FLT10_REG_VCET1_VAL(val) (((val) & 0xffff) << 16)
+#define LTQ_ES_TP_FLT10_REG_VCET1_GET(val) ((((val) & LTQ_ES_TP_FLT10_REG_VCET1) >> 16) & 0xffff)
+#define LTQ_ES_TP_FLT10_REG_VCET1_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT10_REG_VCET1) | (((val) & 0xffff) << 16))
+/* Value 0 Compared with Ether-Type (15:0) */
+#define LTQ_ES_TP_FLT10_REG_VCET0 (0xffff)
+#define LTQ_ES_TP_FLT10_REG_VCET0_VAL(val) (((val) & 0xffff) << 0)
+#define LTQ_ES_TP_FLT10_REG_VCET0_GET(val) ((((val) & LTQ_ES_TP_FLT10_REG_VCET0) >> 0) & 0xffff)
+#define LTQ_ES_TP_FLT10_REG_VCET0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TP_FLT10_REG_VCET0) | (((val) & 0xffff) << 0))
+
+/*******************************************************************************
+ * DiffServMapping 0
+ ******************************************************************************/
+
+/* Priority Queue F (31:30) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQF (0x3 << 30)
+#define LTQ_ES_DFSRV_MAP0_REG_PQF_VAL(val) (((val) & 0x3) << 30)
+#define LTQ_ES_DFSRV_MAP0_REG_PQF_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQF) >> 30) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQF_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQF) | (((val) & 0x3) << 30))
+/* Priority Queue E (29:28) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQE (0x3 << 28)
+#define LTQ_ES_DFSRV_MAP0_REG_PQE_VAL(val) (((val) & 0x3) << 28)
+#define LTQ_ES_DFSRV_MAP0_REG_PQE_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQE) >> 28) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQE_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQE) | (((val) & 0x3) << 28))
+/* Priority Queue D (27:26) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQD (0x3 << 26)
+#define LTQ_ES_DFSRV_MAP0_REG_PQD_VAL(val) (((val) & 0x3) << 26)
+#define LTQ_ES_DFSRV_MAP0_REG_PQD_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQD) >> 26) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQD_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQD) | (((val) & 0x3) << 26))
+/* Priority Queue C (25:24) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQC (0x3 << 24)
+#define LTQ_ES_DFSRV_MAP0_REG_PQC_VAL(val) (((val) & 0x3) << 24)
+#define LTQ_ES_DFSRV_MAP0_REG_PQC_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQC) >> 24) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQC_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQC) | (((val) & 0x3) << 24))
+/* Priority Queue B (23:22) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQB (0x3 << 22)
+#define LTQ_ES_DFSRV_MAP0_REG_PQB_VAL(val) (((val) & 0x3) << 22)
+#define LTQ_ES_DFSRV_MAP0_REG_PQB_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQB) >> 22) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQB_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQB) | (((val) & 0x3) << 22))
+/* Priority Queue A (21:20) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQA (0x3 << 20)
+#define LTQ_ES_DFSRV_MAP0_REG_PQA_VAL(val) (((val) & 0x3) << 20)
+#define LTQ_ES_DFSRV_MAP0_REG_PQA_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQA) >> 20) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQA_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQA) | (((val) & 0x3) << 20))
+/* Priority Queue 9 (19:18) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQ9 (0x3 << 18)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ9_VAL(val) (((val) & 0x3) << 18)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ9_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQ9) >> 18) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ9_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQ9) | (((val) & 0x3) << 18))
+/* Priority Queue 8 (17:16) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQ8 (0x3 << 16)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ8_VAL(val) (((val) & 0x3) << 16)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ8_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQ8) >> 16) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ8_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQ8) | (((val) & 0x3) << 16))
+/* Priority Queue 7 (15:14) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQ7 (0x3 << 14)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ7_VAL(val) (((val) & 0x3) << 14)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ7_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQ7) >> 14) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ7_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQ7) | (((val) & 0x3) << 14))
+/* Priority Queue 6 (13:12) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQ6 (0x3 << 12)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ6_VAL(val) (((val) & 0x3) << 12)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ6_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQ6) >> 12) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ6_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQ6) | (((val) & 0x3) << 12))
+/* Priority Queue 5 (11:10) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQ5 (0x3 << 10)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ5_VAL(val) (((val) & 0x3) << 10)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ5_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQ5) >> 10) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ5_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQ5) | (((val) & 0x3) << 10))
+/* Priority Queue 4 (9:8) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQ4 (0x3 << 8)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ4_VAL(val) (((val) & 0x3) << 8)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ4_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQ4) >> 8) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ4_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQ4) | (((val) & 0x3) << 8))
+/* Priority Queue 3 (7:6) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQ3 (0x3 << 6)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ3_VAL(val) (((val) & 0x3) << 6)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ3_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQ3) >> 6) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ3_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQ3) | (((val) & 0x3) << 6))
+/* Priority Queue 2 (5:4) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQ2 (0x3 << 4)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ2_VAL(val) (((val) & 0x3) << 4)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ2_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQ2) >> 4) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ2_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQ2) | (((val) & 0x3) << 4))
+/* Priority Queue 1 (3:2) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQ1 (0x3 << 2)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ1_VAL(val) (((val) & 0x3) << 2)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ1_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQ1) >> 2) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ1_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQ1) | (((val) & 0x3) << 2))
+/* Priority Queue 0 (1:0) */
+#define LTQ_ES_DFSRV_MAP0_REG_PQ0 (0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ0_VAL(val) (((val) & 0x3) << 0)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ0_GET(val) ((((val) & LTQ_ES_DFSRV_MAP0_REG_PQ0) >> 0) & 0x3)
+#define LTQ_ES_DFSRV_MAP0_REG_PQ0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP0_REG_PQ0) | (((val) & 0x3) << 0))
+
+/*******************************************************************************
+ * DiffServMapping 1
+ ******************************************************************************/
+
+/* Priority Queue 1F (31:30) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1F (0x3 << 30)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1F_VAL(val) (((val) & 0x3) << 30)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1F_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ1F) >> 30) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1F_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ1F) | (((val) & 0x3) << 30))
+/* Priority Queue 1E (29:28) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1E (0x3 << 28)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1E_VAL(val) (((val) & 0x3) << 28)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1E_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ1E) >> 28) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1E_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ1E) | (((val) & 0x3) << 28))
+/* Priority Queue 1D (27:26) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1D (0x3 << 26)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1D_VAL(val) (((val) & 0x3) << 26)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1D_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ1D) >> 26) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1D_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ1D) | (((val) & 0x3) << 26))
+/* Priority Queue 1C (25:24) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1C (0x3 << 24)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1C_VAL(val) (((val) & 0x3) << 24)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1C_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ1C) >> 24) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1C_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ1C) | (((val) & 0x3) << 24))
+/* Priority Queue 1B (23:22) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1B (0x3 << 22)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1B_VAL(val) (((val) & 0x3) << 22)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1B_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ1B) >> 22) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1B_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ1B) | (((val) & 0x3) << 22))
+/* Priority Queue 1A (21:20) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1A (0x3 << 20)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1A_VAL(val) (((val) & 0x3) << 20)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1A_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ1A) >> 20) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ1A_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ1A) | (((val) & 0x3) << 20))
+/* Priority Queue 19 (19:18) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ19 (0x3 << 18)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ19_VAL(val) (((val) & 0x3) << 18)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ19_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ19) >> 18) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ19_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ19) | (((val) & 0x3) << 18))
+/* Priority Queue 18 (17:16) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ18 (0x3 << 16)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ18_VAL(val) (((val) & 0x3) << 16)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ18_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ18) >> 16) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ18_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ18) | (((val) & 0x3) << 16))
+/* Priority Queue 17 (15:14) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ17 (0x3 << 14)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ17_VAL(val) (((val) & 0x3) << 14)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ17_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ17) >> 14) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ17_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ17) | (((val) & 0x3) << 14))
+/* Priority Queue 16 (13:12) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ16 (0x3 << 12)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ16_VAL(val) (((val) & 0x3) << 12)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ16_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ16) >> 12) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ16_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ16) | (((val) & 0x3) << 12))
+/* Priority Queue 15 (11:10) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ15 (0x3 << 10)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ15_VAL(val) (((val) & 0x3) << 10)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ15_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ15) >> 10) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ15_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ15) | (((val) & 0x3) << 10))
+/* Priority Queue 14 (9:8) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ14 (0x3 << 8)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ14_VAL(val) (((val) & 0x3) << 8)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ14_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ14) >> 8) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ14_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ14) | (((val) & 0x3) << 8))
+/* Priority Queue 13 (7:6) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ13 (0x3 << 6)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ13_VAL(val) (((val) & 0x3) << 6)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ13_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ13) >> 6) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ13_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ13) | (((val) & 0x3) << 6))
+/* Priority Queue 12 (5:4) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ12 (0x3 << 4)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ12_VAL(val) (((val) & 0x3) << 4)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ12_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ12) >> 4) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ12_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ12) | (((val) & 0x3) << 4))
+/* Priority Queue 11 (3:2) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ11 (0x3 << 2)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ11_VAL(val) (((val) & 0x3) << 2)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ11_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ11) >> 2) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ11_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ11) | (((val) & 0x3) << 2))
+/* Priority Queue 10 (1:0) */
+#define LTQ_ES_DFSRV_MAP1_REG_PQ10 (0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ10_VAL(val) (((val) & 0x3) << 0)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ10_GET(val) ((((val) & LTQ_ES_DFSRV_MAP1_REG_PQ10) >> 0) & 0x3)
+#define LTQ_ES_DFSRV_MAP1_REG_PQ10_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP1_REG_PQ10) | (((val) & 0x3) << 0))
+
+/*******************************************************************************
+ * DiffServMapping 2
+ ******************************************************************************/
+
+/* Priority Queue 2F (31:30) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2F (0x3 << 30)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2F_VAL(val) (((val) & 0x3) << 30)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2F_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ2F) >> 30) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2F_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ2F) | (((val) & 0x3) << 30))
+/* Priority Queue 2E (29:28) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2E (0x3 << 28)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2E_VAL(val) (((val) & 0x3) << 28)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2E_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ2E) >> 28) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2E_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ2E) | (((val) & 0x3) << 28))
+/* Priority Queue 2D (27:26) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2D (0x3 << 26)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2D_VAL(val) (((val) & 0x3) << 26)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2D_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ2D) >> 26) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2D_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ2D) | (((val) & 0x3) << 26))
+/* Priority Queue 2C (25:24) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2C (0x3 << 24)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2C_VAL(val) (((val) & 0x3) << 24)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2C_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ2C) >> 24) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2C_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ2C) | (((val) & 0x3) << 24))
+/* Priority Queue 2B (23:22) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2B (0x3 << 22)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2B_VAL(val) (((val) & 0x3) << 22)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2B_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ2B) >> 22) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2B_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ2B) | (((val) & 0x3) << 22))
+/* Priority Queue 2A (21:20) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2A (0x3 << 20)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2A_VAL(val) (((val) & 0x3) << 20)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2A_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ2A) >> 20) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ2A_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ2A) | (((val) & 0x3) << 20))
+/* Priority Queue 29 (19:18) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ29 (0x3 << 18)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ29_VAL(val) (((val) & 0x3) << 18)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ29_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ29) >> 18) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ29_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ29) | (((val) & 0x3) << 18))
+/* Priority Queue 28 (17:16) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ28 (0x3 << 16)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ28_VAL(val) (((val) & 0x3) << 16)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ28_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ28) >> 16) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ28_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ28) | (((val) & 0x3) << 16))
+/* Priority Queue 27 (15:14) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ27 (0x3 << 14)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ27_VAL(val) (((val) & 0x3) << 14)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ27_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ27) >> 14) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ27_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ27) | (((val) & 0x3) << 14))
+/* Priority Queue 26 (13:12) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ26 (0x3 << 12)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ26_VAL(val) (((val) & 0x3) << 12)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ26_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ26) >> 12) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ26_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ26) | (((val) & 0x3) << 12))
+/* Priority Queue 25 (11:10) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ25 (0x3 << 10)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ25_VAL(val) (((val) & 0x3) << 10)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ25_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ25) >> 10) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ25_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ25) | (((val) & 0x3) << 10))
+/* Priority Queue 24 (9:8) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ24 (0x3 << 8)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ24_VAL(val) (((val) & 0x3) << 8)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ24_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ24) >> 8) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ24_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ24) | (((val) & 0x3) << 8))
+/* Priority Queue 23 (7:6) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ23 (0x3 << 6)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ23_VAL(val) (((val) & 0x3) << 6)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ23_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ23) >> 6) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ23_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ23) | (((val) & 0x3) << 6))
+/* Priority Queue 22 (5:4) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ22 (0x3 << 4)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ22_VAL(val) (((val) & 0x3) << 4)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ22_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ22) >> 4) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ22_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ22) | (((val) & 0x3) << 4))
+/* Priority Queue 21 (3:2) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ21 (0x3 << 2)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ21_VAL(val) (((val) & 0x3) << 2)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ21_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ21) >> 2) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ21_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ21) | (((val) & 0x3) << 2))
+/* Priority Queue 20 (1:0) */
+#define LTQ_ES_DFSRV_MAP2_REG_PQ20 (0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ20_VAL(val) (((val) & 0x3) << 0)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ20_GET(val) ((((val) & LTQ_ES_DFSRV_MAP2_REG_PQ20) >> 0) & 0x3)
+#define LTQ_ES_DFSRV_MAP2_REG_PQ20_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP2_REG_PQ20) | (((val) & 0x3) << 0))
+
+/*******************************************************************************
+ * DiffServMapping 3
+ ******************************************************************************/
+
+/* Priority Queue 3F (31:30) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3F (0x3 << 30)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3F_VAL(val) (((val) & 0x3) << 30)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3F_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ3F) >> 30) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3F_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ3F) | (((val) & 0x3) << 30))
+/* Priority Queue 3E (29:28) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3E (0x3 << 28)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3E_VAL(val) (((val) & 0x3) << 28)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3E_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ3E) >> 28) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3E_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ3E) | (((val) & 0x3) << 28))
+/* Priority Queue 3D (27:26) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3D (0x3 << 26)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3D_VAL(val) (((val) & 0x3) << 26)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3D_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ3D) >> 26) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3D_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ3D) | (((val) & 0x3) << 26))
+/* Priority Queue 3C (25:24) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3C (0x3 << 24)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3C_VAL(val) (((val) & 0x3) << 24)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3C_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ3C) >> 24) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3C_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ3C) | (((val) & 0x3) << 24))
+/* Priority Queue 3B (23:22) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3B (0x3 << 22)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3B_VAL(val) (((val) & 0x3) << 22)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3B_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ3B) >> 22) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3B_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ3B) | (((val) & 0x3) << 22))
+/* Priority Queue 3A (21:20) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3A (0x3 << 20)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3A_VAL(val) (((val) & 0x3) << 20)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3A_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ3A) >> 20) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ3A_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ3A) | (((val) & 0x3) << 20))
+/* Priority Queue 39 (19:18) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ39 (0x3 << 18)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ39_VAL(val) (((val) & 0x3) << 18)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ39_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ39) >> 18) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ39_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ39) | (((val) & 0x3) << 18))
+/* Priority Queue 38 (17:16) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ38 (0x3 << 16)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ38_VAL(val) (((val) & 0x3) << 16)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ38_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ38) >> 16) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ38_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ38) | (((val) & 0x3) << 16))
+/* Priority Queue 37 (15:14) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ37 (0x3 << 14)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ37_VAL(val) (((val) & 0x3) << 14)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ37_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ37) >> 14) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ37_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ37) | (((val) & 0x3) << 14))
+/* Priority Queue 36 (13:12) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ36 (0x3 << 12)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ36_VAL(val) (((val) & 0x3) << 12)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ36_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ36) >> 12) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ36_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ36) | (((val) & 0x3) << 12))
+/* Priority Queue 35 (11:10) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ35 (0x3 << 10)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ35_VAL(val) (((val) & 0x3) << 10)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ35_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ35) >> 10) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ35_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ35) | (((val) & 0x3) << 10))
+/* Priority Queue 34 (9:8) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ34 (0x3 << 8)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ34_VAL(val) (((val) & 0x3) << 8)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ34_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ34) >> 8) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ34_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ34) | (((val) & 0x3) << 8))
+/* Priority Queue 33 (7:6) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ33 (0x3 << 6)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ33_VAL(val) (((val) & 0x3) << 6)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ33_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ33) >> 6) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ33_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ33) | (((val) & 0x3) << 6))
+/* Priority Queue 32 (5:4) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ32 (0x3 << 4)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ32_VAL(val) (((val) & 0x3) << 4)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ32_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ32) >> 4) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ32_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ32) | (((val) & 0x3) << 4))
+/* Priority Queue 31 (3:2) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ31 (0x3 << 2)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ31_VAL(val) (((val) & 0x3) << 2)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ31_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ31) >> 2) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ31_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ31) | (((val) & 0x3) << 2))
+/* Priority Queue 30 (1:0) */
+#define LTQ_ES_DFSRV_MAP3_REG_PQ30 (0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ30_VAL(val) (((val) & 0x3) << 0)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ30_GET(val) ((((val) & LTQ_ES_DFSRV_MAP3_REG_PQ30) >> 0) & 0x3)
+#define LTQ_ES_DFSRV_MAP3_REG_PQ30_SET(reg,val) (reg) = ((reg & ~LTQ_ES_DFSRV_MAP3_REG_PQ30) | (((val) & 0x3) << 0))
+
+/*******************************************************************************
+ * TCP/UDP Port Filter 0
+ ******************************************************************************/
+
+/* Reserved (31:30) */
+#define LTQ_ES_TCP_PF0_REG_RES (0x3 << 30)
+#define LTQ_ES_TCP_PF0_REG_RES_GET(val) ((((val) & LTQ_ES_TCP_PF0_REG_RES) >> 30) & 0x3)
+/* Action for TCP/UDP Port Filter 0 (29:28) */
+#define LTQ_ES_TCP_PF0_REG_ATUF0 (0x3 << 28)
+#define LTQ_ES_TCP_PF0_REG_ATUF0_VAL(val) (((val) & 0x3) << 28)
+#define LTQ_ES_TCP_PF0_REG_ATUF0_GET(val) ((((val) & LTQ_ES_TCP_PF0_REG_ATUF0) >> 28) & 0x3)
+#define LTQ_ES_TCP_PF0_REG_ATUF0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TCP_PF0_REG_ATUF0) | (((val) & 0x3) << 28))
+/* TCP/UDP PRI for TCP/UDP Port Filter 0 (27:26) */
+#define LTQ_ES_TCP_PF0_REG_TUPF0 (0x3 << 26)
+#define LTQ_ES_TCP_PF0_REG_TUPF0_VAL(val) (((val) & 0x3) << 26)
+#define LTQ_ES_TCP_PF0_REG_TUPF0_GET(val) ((((val) & LTQ_ES_TCP_PF0_REG_TUPF0) >> 26) & 0x3)
+#define LTQ_ES_TCP_PF0_REG_TUPF0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TCP_PF0_REG_TUPF0) | (((val) & 0x3) << 26))
+/* Compare TCP/UDP Source Port or Destination Port (25:24) */
+#define LTQ_ES_TCP_PF0_REG_COMP0 (0x3 << 24)
+#define LTQ_ES_TCP_PF0_REG_COMP0_VAL(val) (((val) & 0x3) << 24)
+#define LTQ_ES_TCP_PF0_REG_COMP0_GET(val) ((((val) & LTQ_ES_TCP_PF0_REG_COMP0) >> 24) & 0x3)
+#define LTQ_ES_TCP_PF0_REG_COMP0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TCP_PF0_REG_COMP0) | (((val) & 0x3) << 24))
+/* Port Range in TCP/UDP (23:16) */
+#define LTQ_ES_TCP_PF0_REG_PRANGE0 (0xff << 16)
+#define LTQ_ES_TCP_PF0_REG_PRANGE0_VAL(val) (((val) & 0xff) << 16)
+#define LTQ_ES_TCP_PF0_REG_PRANGE0_GET(val) ((((val) & LTQ_ES_TCP_PF0_REG_PRANGE0) >> 16) & 0xff)
+#define LTQ_ES_TCP_PF0_REG_PRANGE0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TCP_PF0_REG_PRANGE0) | (((val) & 0xff) << 16))
+/* Base Port number 0 (15:0) */
+#define LTQ_ES_TCP_PF0_REG_BASEPT0 (0xffff)
+#define LTQ_ES_TCP_PF0_REG_BASEPT0_VAL(val) (((val) & 0xffff) << 0)
+#define LTQ_ES_TCP_PF0_REG_BASEPT0_GET(val) ((((val) & LTQ_ES_TCP_PF0_REG_BASEPT0) >> 0) & 0xffff)
+#define LTQ_ES_TCP_PF0_REG_BASEPT0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_TCP_PF0_REG_BASEPT0) | (((val) & 0xffff) << 0))
+
+/*******************************************************************************
+ * Reserved DA(0180C2000003~0180C2000000) control register
+ ******************************************************************************/
+
+/* Valid bit for 0180C2000003 (31) */
+#define LTQ_ES_RA_03_00_REG_RA03_VALID (0x1 << 31)
+#define LTQ_ES_RA_03_00_REG_RA03_VALID_VAL(val) (((val) & 0x1) << 31)
+#define LTQ_ES_RA_03_00_REG_RA03_VALID_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA03_VALID) >> 31) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA03_VALID_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA03_VALID) | (((val) & 0x1) << 31))
+/* Span bit for 0180C2000003 (30) */
+#define LTQ_ES_RA_03_00_REG_RA03_SPAN (0x1 << 30)
+#define LTQ_ES_RA_03_00_REG_RA03_SPAN_VAL(val) (((val) & 0x1) << 30)
+#define LTQ_ES_RA_03_00_REG_RA03_SPAN_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA03_SPAN) >> 30) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA03_SPAN_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA03_SPAN) | (((val) & 0x1) << 30))
+/* Management bit for 0180C2000003 (29) */
+#define LTQ_ES_RA_03_00_REG_RA03_MG (0x1 << 29)
+#define LTQ_ES_RA_03_00_REG_RA03_MG_VAL(val) (((val) & 0x1) << 29)
+#define LTQ_ES_RA_03_00_REG_RA03_MG_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA03_MG) >> 29) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA03_MG_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA03_MG) | (((val) & 0x1) << 29))
+/* Cross_VLAN bit for 0180C2000003 (28) */
+#define LTQ_ES_RA_03_00_REG_RA03_CV (0x1 << 28)
+#define LTQ_ES_RA_03_00_REG_RA03_CV_VAL(val) (((val) & 0x1) << 28)
+#define LTQ_ES_RA_03_00_REG_RA03_CV_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA03_CV) >> 28) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA03_CV_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA03_CV) | (((val) & 0x1) << 28))
+/* TXTAG bit for 0180C2000003 (27:26) */
+#define LTQ_ES_RA_03_00_REG_RA03_TXTAG (0x3 << 26)
+#define LTQ_ES_RA_03_00_REG_RA03_TXTAG_VAL(val) (((val) & 0x3) << 26)
+#define LTQ_ES_RA_03_00_REG_RA03_TXTAG_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA03_TXTAG) >> 26) & 0x3)
+#define LTQ_ES_RA_03_00_REG_RA03_TXTAG_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA03_TXTAG) | (((val) & 0x3) << 26))
+/* Action bit for 0180C2000003 (25:24) */
+#define LTQ_ES_RA_03_00_REG_RA03_ACT (0x3 << 24)
+#define LTQ_ES_RA_03_00_REG_RA03_ACT_VAL(val) (((val) & 0x3) << 24)
+#define LTQ_ES_RA_03_00_REG_RA03_ACT_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA03_ACT) >> 24) & 0x3)
+#define LTQ_ES_RA_03_00_REG_RA03_ACT_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA03_ACT) | (((val) & 0x3) << 24))
+/* Valid bit for 0180C2000002 (23) */
+#define LTQ_ES_RA_03_00_REG_RA02_VALID (0x1 << 23)
+#define LTQ_ES_RA_03_00_REG_RA02_VALID_VAL(val) (((val) & 0x1) << 23)
+#define LTQ_ES_RA_03_00_REG_RA02_VALID_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA02_VALID) >> 23) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA02_VALID_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA02_VALID) | (((val) & 0x1) << 23))
+/* Span bit for 0180C2000002 (22) */
+#define LTQ_ES_RA_03_00_REG_RA02_SPAN (0x1 << 22)
+#define LTQ_ES_RA_03_00_REG_RA02_SPAN_VAL(val) (((val) & 0x1) << 22)
+#define LTQ_ES_RA_03_00_REG_RA02_SPAN_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA02_SPAN) >> 22) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA02_SPAN_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA02_SPAN) | (((val) & 0x1) << 22))
+/* Management bit for 0180C2000002 (21) */
+#define LTQ_ES_RA_03_00_REG_RA02_MG (0x1 << 21)
+#define LTQ_ES_RA_03_00_REG_RA02_MG_VAL(val) (((val) & 0x1) << 21)
+#define LTQ_ES_RA_03_00_REG_RA02_MG_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA02_MG) >> 21) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA02_MG_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA02_MG) | (((val) & 0x1) << 21))
+/* Cross_VLAN bit for 0180C2000002 (20) */
+#define LTQ_ES_RA_03_00_REG_RA02_CV (0x1 << 20)
+#define LTQ_ES_RA_03_00_REG_RA02_CV_VAL(val) (((val) & 0x1) << 20)
+#define LTQ_ES_RA_03_00_REG_RA02_CV_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA02_CV) >> 20) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA02_CV_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA02_CV) | (((val) & 0x1) << 20))
+/* TXTAG bit for 0180C2000002 (19:18) */
+#define LTQ_ES_RA_03_00_REG_RA02_TXTAG (0x3 << 18)
+#define LTQ_ES_RA_03_00_REG_RA02_TXTAG_VAL(val) (((val) & 0x3) << 18)
+#define LTQ_ES_RA_03_00_REG_RA02_TXTAG_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA02_TXTAG) >> 18) & 0x3)
+#define LTQ_ES_RA_03_00_REG_RA02_TXTAG_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA02_TXTAG) | (((val) & 0x3) << 18))
+/* Action bit for 0180C2000002 (17:16) */
+#define LTQ_ES_RA_03_00_REG_RA02_ACT (0x3 << 16)
+#define LTQ_ES_RA_03_00_REG_RA02_ACT_VAL(val) (((val) & 0x3) << 16)
+#define LTQ_ES_RA_03_00_REG_RA02_ACT_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA02_ACT) >> 16) & 0x3)
+#define LTQ_ES_RA_03_00_REG_RA02_ACT_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA02_ACT) | (((val) & 0x3) << 16))
+/* Valid bit for 0180C2000001 (15) */
+#define LTQ_ES_RA_03_00_REG_RA01_VALID (0x1 << 15)
+#define LTQ_ES_RA_03_00_REG_RA01_VALID_VAL(val) (((val) & 0x1) << 15)
+#define LTQ_ES_RA_03_00_REG_RA01_VALID_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA01_VALID) >> 15) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA01_VALID_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA01_VALID) | (((val) & 0x1) << 15))
+/* Span bit for 0180C2000001 (14) */
+#define LTQ_ES_RA_03_00_REG_RA01_SPAN (0x1 << 14)
+#define LTQ_ES_RA_03_00_REG_RA01_SPAN_VAL(val) (((val) & 0x1) << 14)
+#define LTQ_ES_RA_03_00_REG_RA01_SPAN_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA01_SPAN) >> 14) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA01_SPAN_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA01_SPAN) | (((val) & 0x1) << 14))
+/* Management bit for 0180C2000001 (13) */
+#define LTQ_ES_RA_03_00_REG_RA01_MG (0x1 << 13)
+#define LTQ_ES_RA_03_00_REG_RA01_MG_VAL(val) (((val) & 0x1) << 13)
+#define LTQ_ES_RA_03_00_REG_RA01_MG_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA01_MG) >> 13) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA01_MG_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA01_MG) | (((val) & 0x1) << 13))
+/* Cross_VLAN bit for 0180C2000001 (12) */
+#define LTQ_ES_RA_03_00_REG_RA01_CV (0x1 << 12)
+#define LTQ_ES_RA_03_00_REG_RA01_CV_VAL(val) (((val) & 0x1) << 12)
+#define LTQ_ES_RA_03_00_REG_RA01_CV_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA01_CV) >> 12) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA01_CV_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA01_CV) | (((val) & 0x1) << 12))
+/* TXTAG bit for 0180C2000001 (11:10) */
+#define LTQ_ES_RA_03_00_REG_RA01_TXTAG (0x3 << 10)
+#define LTQ_ES_RA_03_00_REG_RA01_TXTAG_VAL(val) (((val) & 0x3) << 10)
+#define LTQ_ES_RA_03_00_REG_RA01_TXTAG_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA01_TXTAG) >> 10) & 0x3)
+#define LTQ_ES_RA_03_00_REG_RA01_TXTAG_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA01_TXTAG) | (((val) & 0x3) << 10))
+/* Action bit for 0180C2000001 (9:8) */
+#define LTQ_ES_RA_03_00_REG_RA01_ACT (0x3 << 8)
+#define LTQ_ES_RA_03_00_REG_RA01_ACT_VAL(val) (((val) & 0x3) << 8)
+#define LTQ_ES_RA_03_00_REG_RA01_ACT_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA01_ACT) >> 8) & 0x3)
+#define LTQ_ES_RA_03_00_REG_RA01_ACT_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA01_ACT) | (((val) & 0x3) << 8))
+/* Valid bit for 0180C2000000 (7) */
+#define LTQ_ES_RA_03_00_REG_RA00_VALID (0x1 << 7)
+#define LTQ_ES_RA_03_00_REG_RA00_VALID_VAL(val) (((val) & 0x1) << 7)
+#define LTQ_ES_RA_03_00_REG_RA00_VALID_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA00_VALID) >> 7) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA00_VALID_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA00_VALID) | (((val) & 0x1) << 7))
+/* Span bit for 0180C2000000 (6) */
+#define LTQ_ES_RA_03_00_REG_RA00_SPAN (0x1 << 6)
+#define LTQ_ES_RA_03_00_REG_RA00_SPAN_VAL(val) (((val) & 0x1) << 6)
+#define LTQ_ES_RA_03_00_REG_RA00_SPAN_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA00_SPAN) >> 6) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA00_SPAN_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA00_SPAN) | (((val) & 0x1) << 6))
+/* Management bit for 0180C2000000 (5) */
+#define LTQ_ES_RA_03_00_REG_RA00_MG (0x1 << 5)
+#define LTQ_ES_RA_03_00_REG_RA00_MG_VAL(val) (((val) & 0x1) << 5)
+#define LTQ_ES_RA_03_00_REG_RA00_MG_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA00_MG) >> 5) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA00_MG_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA00_MG) | (((val) & 0x1) << 5))
+/* Cross_VLAN bit for 0180C2000000 (4) */
+#define LTQ_ES_RA_03_00_REG_RA00_CV (0x1 << 4)
+#define LTQ_ES_RA_03_00_REG_RA00_CV_VAL(val) (((val) & 0x1) << 4)
+#define LTQ_ES_RA_03_00_REG_RA00_CV_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA00_CV) >> 4) & 0x1)
+#define LTQ_ES_RA_03_00_REG_RA00_CV_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA00_CV) | (((val) & 0x1) << 4))
+/* TXTAG bit for 0180C2000000 (3:2) */
+#define LTQ_ES_RA_03_00_REG_RA00_TXTAG (0x3 << 2)
+#define LTQ_ES_RA_03_00_REG_RA00_TXTAG_VAL(val) (((val) & 0x3) << 2)
+#define LTQ_ES_RA_03_00_REG_RA00_TXTAG_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA00_TXTAG) >> 2) & 0x3)
+#define LTQ_ES_RA_03_00_REG_RA00_TXTAG_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA00_TXTAG) | (((val) & 0x3) << 2))
+/* Action bit for 0180C2000000 (1:0) */
+#define LTQ_ES_RA_03_00_REG_RA00_ACT (0x3)
+#define LTQ_ES_RA_03_00_REG_RA00_ACT_VAL(val) (((val) & 0x3) << 0)
+#define LTQ_ES_RA_03_00_REG_RA00_ACT_GET(val) ((((val) & LTQ_ES_RA_03_00_REG_RA00_ACT) >> 0) & 0x3)
+#define LTQ_ES_RA_03_00_REG_RA00_ACT_SET(reg,val) (reg) = ((reg & ~LTQ_ES_RA_03_00_REG_RA00_ACT) | (((val) & 0x3) << 0))
+
+/*******************************************************************************
+ * Protocol Filter 0
+ ******************************************************************************/
+
+/* Value Compared with Protocol in IP Header (31:24) */
+#define LTQ_ES_PRTCL_F0_REG_PFR3 (0xff << 24)
+#define LTQ_ES_PRTCL_F0_REG_PFR3_VAL(val) (((val) & 0xff) << 24)
+#define LTQ_ES_PRTCL_F0_REG_PFR3_GET(val) ((((val) & LTQ_ES_PRTCL_F0_REG_PFR3) >> 24) & 0xff)
+#define LTQ_ES_PRTCL_F0_REG_PFR3_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRTCL_F0_REG_PFR3) | (((val) & 0xff) << 24))
+/* Value Compared with Protocol in IP Header (23:16) */
+#define LTQ_ES_PRTCL_F0_REG_PFR2 (0xff << 16)
+#define LTQ_ES_PRTCL_F0_REG_PFR2_VAL(val) (((val) & 0xff) << 16)
+#define LTQ_ES_PRTCL_F0_REG_PFR2_GET(val) ((((val) & LTQ_ES_PRTCL_F0_REG_PFR2) >> 16) & 0xff)
+#define LTQ_ES_PRTCL_F0_REG_PFR2_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRTCL_F0_REG_PFR2) | (((val) & 0xff) << 16))
+/* Value Compared with Protocol in IP Header (15:8) */
+#define LTQ_ES_PRTCL_F0_REG_PFR1 (0xff << 8)
+#define LTQ_ES_PRTCL_F0_REG_PFR1_VAL(val) (((val) & 0xff) << 8)
+#define LTQ_ES_PRTCL_F0_REG_PFR1_GET(val) ((((val) & LTQ_ES_PRTCL_F0_REG_PFR1) >> 8) & 0xff)
+#define LTQ_ES_PRTCL_F0_REG_PFR1_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRTCL_F0_REG_PFR1) | (((val) & 0xff) << 8))
+/* Value Compared with Protocol in IP Header (7:0) */
+#define LTQ_ES_PRTCL_F0_REG_PFR0 (0xff)
+#define LTQ_ES_PRTCL_F0_REG_PFR0_VAL(val) (((val) & 0xff) << 0)
+#define LTQ_ES_PRTCL_F0_REG_PFR0_GET(val) ((((val) & LTQ_ES_PRTCL_F0_REG_PFR0) >> 0) & 0xff)
+#define LTQ_ES_PRTCL_F0_REG_PFR0_SET(reg,val) (reg) = ((reg & ~LTQ_ES_PRTCL_F0_REG_PFR0) | (((val) & 0xff) << 0))
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/irq.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/irq.h
new file mode 100644
index 0000000000..06dc173544
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/irq.h
@@ -0,0 +1,36 @@
+/*
+ * arch/mips/include/asm/mach-lantiq/svip/irq.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2010 Lantiq
+ *
+ */
+
+#ifndef __IRQ_H
+#define __IRQ_H
+
+#include <svip_irq.h>
+
+#define NR_IRQS 264
+
+#include_next <irq.h>
+
+/* Functions for EXINT handling */
+extern int ifx_enable_external_int(u32 exint, u32 mode);
+extern int ifx_disable_external_int(u32 exint);
+extern int ifx_external_int_level(u32 exint);
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/lantiq_soc.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/lantiq_soc.h
new file mode 100644
index 0000000000..697d672f36
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/lantiq_soc.h
@@ -0,0 +1,71 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _LTQ_SVIP_H__
+#define _LTQ_SVIP_H__
+
+#ifdef CONFIG_SOC_SVIP
+
+#include <lantiq.h>
+
+/* Chip IDs */
+#define SOC_ID_SVIP 0x169
+
+/* SoC Types */
+#define SOC_TYPE_SVIP 0x01
+
+/* ASC0/1 - serial port */
+#define LTQ_ASC0_BASE_ADDR 0x14100100
+#define LTQ_ASC1_BASE_ADDR 0x14100200
+#define LTQ_ASC_SIZE 0x100
+#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC0_BASE_ADDR)
+
+#define LTQ_ASC_TIR(x) (INT_NUM_IM0_IRL0 + (x * 8))
+#define LTQ_ASC_RIR(x) (INT_NUM_IM0_IRL0 + (x * 8) + 2)
+#define LTQ_ASC_EIR(x) (INT_NUM_IM0_IRL0 + (x * 8) + 3)
+
+/* ICU - interrupt control unit */
+#define LTQ_ICU_BASE_ADDR 0x14106000
+#define LTQ_ICU_BASE_ADDR1 0x14106028
+#define LTQ_ICU_BASE_ADDR2 0x1E016000
+#define LTQ_ICU_BASE_ADDR3 0x1E016028
+#define LTQ_ICU_BASE_ADDR4 0x14106050
+#define LTQ_ICU_BASE_ADDR5 0x14106078
+#define LTQ_ICU_SIZE 0x100
+
+/* WDT */
+#define LTQ_WDT_BASE_ADDR 0x1F8803F0
+#define LTQ_WDT_SIZE 0x10
+
+/* Status */
+#define LTQ_STATUS_BASE_ADDR (KSEG1 + 0x1E000500)
+#define LTQ_STATUS_CHIPID ((u32 *)(LTQ_STATUS_BASE_ADDR + 0x000C))
+
+#define LTQ_EIU_BASE_ADDR 0
+
+#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y))
+#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x))
+
+extern __iomem void *ltq_ebu_membase;
+
+extern void ltq_gpio_configure(int port, int pin, bool dirin, bool puen,
+ bool altsel0, bool altsel1);
+extern int ltq_port_get_dir(unsigned int port, unsigned int pin);
+extern int ltq_port_get_puden(unsigned int port, unsigned int pin);
+extern int ltq_port_get_altsel0(unsigned int port, unsigned int pin);
+extern int ltq_port_get_altsel1(unsigned int port, unsigned int pin);
+
+#define ltq_is_ar9() 0
+#define ltq_is_vr9() 0
+#define ltq_is_falcon() 0
+
+#define BS_FLASH 0
+#define LTQ_RST_CAUSE_WDTRST 0x2
+
+#endif /* CONFIG_SOC_SVIP */
+#endif /* _LTQ_SVIP_H__ */
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/mps_reg.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/mps_reg.h
new file mode 100644
index 0000000000..79966b7be7
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/mps_reg.h
@@ -0,0 +1,242 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ St. Martin Strasse 53; 81669 Munich, Germany
+
+ Any use of this Software is subject to the conclusion of a respective
+ License Agreement. Without such a License Agreement no rights to the
+ Software are granted.
+
+ ******************************************************************************/
+
+#ifndef __MPS_REG_H
+#define __MPS_REG_H
+
+#define mbs_r32(reg) ltq_r32(&mbs->reg)
+#define mbs_w32(val, reg) ltq_w32(val, &mbs->reg)
+#define mbs_w32_mask(clear, set, reg) ltq_w32_mask(clear, set, &mbs->reg)
+
+/** MBS register structure */
+struct svip_reg_mbs {
+ unsigned long reserved0[4];
+ unsigned long mbsr0; /* 0x0010 */
+ unsigned long mbsr1; /* 0x0014 */
+ unsigned long mbsr2; /* 0x0018 */
+ unsigned long mbsr3; /* 0x001c */
+ unsigned long mbsr4; /* 0x0020 */
+ unsigned long mbsr5; /* 0x0024 */
+ unsigned long mbsr6; /* 0x0028 */
+ unsigned long mbsr7; /* 0x002c */
+ unsigned long mbsr8; /* 0x0030 */
+ unsigned long mbsr9; /* 0x0034 */
+ unsigned long mbsr10; /* 0x0038 */
+ unsigned long mbsr11; /* 0x003c */
+ unsigned long mbsr12; /* 0x0040 */
+ unsigned long mbsr13; /* 0x0044 */
+ unsigned long mbsr14; /* 0x0048 */
+ unsigned long mbsr15; /* 0x004c */
+ unsigned long mbsr16; /* 0x0050 */
+ unsigned long mbsr17; /* 0x0054 */
+ unsigned long mbsr18; /* 0x0058 */
+ unsigned long mbsr19; /* 0x005c */
+ unsigned long mbsr20; /* 0x0060 */
+ unsigned long mbsr21; /* 0x0064 */
+ unsigned long mbsr22; /* 0x0068 */
+ unsigned long mbsr23; /* 0x006c */
+ unsigned long mbsr24; /* 0x0070 */
+ unsigned long mbsr25; /* 0x0074 */
+ unsigned long mbsr26; /* 0x0078 */
+ unsigned long mbsr27; /* 0x007c */
+ unsigned long mbsr28; /* 0x0080 */
+};
+
+/** MPS register structure */
+struct svip_reg_mps {
+ volatile unsigned long mps_swirn0set; /* 0x0000 */
+ volatile unsigned long mps_swirn0en; /* 0x0004 */
+ volatile unsigned long mps_swirn0cr; /* 0x0008 */
+ volatile unsigned long mps_swirn0icr; /* 0x000C */
+ volatile unsigned long mps_swirn1set; /* 0x0010 */
+ volatile unsigned long mps_swirn1en; /* 0x0014 */
+ volatile unsigned long mps_swirn1cr; /* 0x0018 */
+ volatile unsigned long mps_swirn1icr; /* 0x001C */
+ volatile unsigned long mps_swirn2set; /* 0x0020 */
+ volatile unsigned long mps_swirn2en; /* 0x0024 */
+ volatile unsigned long mps_swirn2cr; /* 0x0028 */
+ volatile unsigned long mps_swirn2icr; /* 0x002C */
+ volatile unsigned long mps_swirn3set; /* 0x0030 */
+ volatile unsigned long mps_swirn3en; /* 0x0034 */
+ volatile unsigned long mps_swirn3cr; /* 0x0038 */
+ volatile unsigned long mps_swirn3icr; /* 0x003C */
+ volatile unsigned long mps_swirn4set; /* 0x0040 */
+ volatile unsigned long mps_swirn4en; /* 0x0044 */
+ volatile unsigned long mps_swirn4cr; /* 0x0048 */
+ volatile unsigned long mps_swirn4icr; /* 0x004C */
+ volatile unsigned long mps_swirn5set; /* 0x0050 */
+ volatile unsigned long mps_swirn5en; /* 0x0054 */
+ volatile unsigned long mps_swirn5cr; /* 0x0058 */
+ volatile unsigned long mps_swirn5icr; /* 0x005C */
+ volatile unsigned long mps_swirn6set; /* 0x0060 */
+ volatile unsigned long mps_swirn6en; /* 0x0064 */
+ volatile unsigned long mps_swirn6cr; /* 0x0068 */
+ volatile unsigned long mps_swirn6icr; /* 0x006C */
+ volatile unsigned long mps_swirn7set; /* 0x0070 */
+ volatile unsigned long mps_swirn7en; /* 0x0074 */
+ volatile unsigned long mps_swirn7cr; /* 0x0078 */
+ volatile unsigned long mps_swirn7icr; /* 0x007C */
+ volatile unsigned long mps_swirn8set; /* 0x0080 */
+ volatile unsigned long mps_swirn8en; /* 0x0084 */
+ volatile unsigned long mps_swirn8cr; /* 0x0088 */
+ volatile unsigned long mps_swirn8icr; /* 0x008C */
+};
+
+/* Software Interrupt */
+#define IFX_MPS_SWIRN0SET ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0000))
+#define IFX_MPS_SWIRN0EN ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0004))
+#define IFX_MPS_SWIRN0CR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0008))
+#define IFX_MPS_SWIRN0ICR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x000C))
+#define IFX_MPS_SWIRN1SET ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0010))
+#define IFX_MPS_SWIRN1EN ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0014))
+#define IFX_MPS_SWIRN1CR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0018))
+#define IFX_MPS_SWIRN1ICR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x001C))
+#define IFX_MPS_SWIRN2SET ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0020))
+#define IFX_MPS_SWIRN2EN ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0024))
+#define IFX_MPS_SWIRN2CR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0028))
+#define IFX_MPS_SWIRN2ICR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x002C))
+#define IFX_MPS_SWIRN3SET ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0030))
+#define IFX_MPS_SWIRN3EN ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0034))
+#define IFX_MPS_SWIRN3CR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0038))
+#define IFX_MPS_SWIRN3ICR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x003C))
+#define IFX_MPS_SWIRN4SET ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0040))
+#define IFX_MPS_SWIRN4EN ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0044))
+#define IFX_MPS_SWIRN4CR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0048))
+#define IFX_MPS_SWIRN4ICR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x004C))
+#define IFX_MPS_SWIRN5SET ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0050))
+#define IFX_MPS_SWIRN5EN ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0054))
+#define IFX_MPS_SWIRN5CR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0058))
+#define IFX_MPS_SWIRN5ICR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x005C))
+#define IFX_MPS_SWIRN6SET ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0060))
+#define IFX_MPS_SWIRN6EN ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0064))
+#define IFX_MPS_SWIRN6CR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0068))
+#define IFX_MPS_SWIRN6ICR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x006C))
+#define IFX_MPS_SWIRN7SET ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0070))
+#define IFX_MPS_SWIRN7EN ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0074))
+#define IFX_MPS_SWIRN7CR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0078))
+#define IFX_MPS_SWIRN7ICR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x007C))
+#define IFX_MPS_SWIRN8SET ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0080))
+#define IFX_MPS_SWIRN8EN ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0084))
+#define IFX_MPS_SWIRN8ICR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x008C))
+#define IFX_MPS_SWIRN8CR ((volatile unsigned int*)(LTQ_SWINT_BASE + 0x0088))
+
+/*******************************************************************************
+ * MPS_SWIRNSET Register
+ ******************************************************************************/
+
+/* Software Interrupt Request IR5 (5) */
+#define IFX_MPS_SWIRNSET_IR5 (0x1 << 5)
+#define IFX_MPS_SWIRNSET_IR5_VAL(val) (((val) & 0x1) << 5)
+#define IFX_MPS_SWIRNSET_IR5_SET(reg,val) (reg) = (((reg & ~IFX_MPS_SWIRNSET_IR5) | (val) & 1) << 5)
+/* Software Interrupt Request IR4 (4) */
+#define IFX_MPS_SWIRNSET_IR4 (0x1 << 4)
+#define IFX_MPS_SWIRNSET_IR4_VAL(val) (((val) & 0x1) << 4)
+#define IFX_MPS_SWIRNSET_IR4_SET(reg,val) (reg) = (((reg & ~IFX_MPS_SWIRNSET_IR4) | (val) & 1) << 4)
+/* Software Interrupt Request IR3 (3) */
+#define IFX_MPS_SWIRNSET_IR3 (0x1 << 3)
+#define IFX_MPS_SWIRNSET_IR3_VAL(val) (((val) & 0x1) << 3)
+#define IFX_MPS_SWIRNSET_IR3_SET(reg,val) (reg) = (((reg & ~IFX_MPS_SWIRNSET_IR3) | (val) & 1) << 3)
+/* Software Interrupt Request IR2 (2) */
+#define IFX_MPS_SWIRNSET_IR2 (0x1 << 2)
+#define IFX_MPS_SWIRNSET_IR2_VAL(val) (((val) & 0x1) << 2)
+#define IFX_MPS_SWIRNSET_IR2_SET(reg,val) (reg) = (((reg & ~IFX_MPS_SWIRNSET_IR2) | (val) & 1) << 2)
+/* Software Interrupt Request IR1 (1) */
+#define IFX_MPS_SWIRNSET_IR1 (0x1 << 1)
+#define IFX_MPS_SWIRNSET_IR1_VAL(val) (((val) & 0x1) << 1)
+#define IFX_MPS_SWIRNSET_IR1_SET(reg,val) (reg) = (((reg & ~IFX_MPS_SWIRNSET_IR1) | (val) & 1) << 1)
+/* Software Interrupt Request IR0 (0) */
+#define IFX_MPS_SWIRNSET_IR0 (0x1)
+#define IFX_MPS_SWIRNSET_IR0_VAL(val) (((val) & 0x1) << 0)
+#define IFX_MPS_SWIRNSET_IR0_SET(reg,val) (reg) = (((reg & ~IFX_MPS_SWIRNSET_IR0) | (val) & 1) << 0)
+
+/*******************************************************************************
+ * MPS_SWIRNEN Register
+ ******************************************************************************/
+
+/* Software Interrupt Request IR5 (5) */
+#define IFX_MPS_SWIRNEN_IR5 (0x1 << 5)
+#define IFX_MPS_SWIRNEN_IR5_VAL(val) (((val) & 0x1) << 5)
+#define IFX_MPS_SWIRNEN_IR5_GET(val) ((((val) & IFX_MPS_SWIRNEN_IR5) >> 5) & 0x1)
+#define IFX_MPS_SWIRNEN_IR5_SET(reg,val) (reg) = ((reg & ~IFX_MPS_SWIRNEN_IR5) | (((val) & 0x1) << 5))
+/* Software Interrupt Request IR4 (4) */
+#define IFX_MPS_SWIRNEN_IR4 (0x1 << 4)
+#define IFX_MPS_SWIRNEN_IR4_VAL(val) (((val) & 0x1) << 4)
+#define IFX_MPS_SWIRNEN_IR4_GET(val) ((((val) & IFX_MPS_SWIRNEN_IR4) >> 4) & 0x1)
+#define IFX_MPS_SWIRNEN_IR4_SET(reg,val) (reg) = ((reg & ~IFX_MPS_SWIRNEN_IR4) | (((val) & 0x1) << 4))
+/* Software Interrupt Request IR3 (3) */
+#define IFX_MPS_SWIRNEN_IR3 (0x1 << 3)
+#define IFX_MPS_SWIRNEN_IR3_VAL(val) (((val) & 0x1) << 3)
+#define IFX_MPS_SWIRNEN_IR3_GET(val) ((((val) & IFX_MPS_SWIRNEN_IR3) >> 3) & 0x1)
+#define IFX_MPS_SWIRNEN_IR3_SET(reg,val) (reg) = ((reg & ~IFX_MPS_SWIRNEN_IR3) | (((val) & 0x1) << 3))
+/* Software Interrupt Request IR2 (2) */
+#define IFX_MPS_SWIRNEN_IR2 (0x1 << 2)
+#define IFX_MPS_SWIRNEN_IR2_VAL(val) (((val) & 0x1) << 2)
+#define IFX_MPS_SWIRNEN_IR2_GET(val) ((((val) & IFX_MPS_SWIRNEN_IR2) >> 2) & 0x1)
+#define IFX_MPS_SWIRNEN_IR2_SET(reg,val) (reg) = ((reg & ~IFX_MPS_SWIRNEN_IR2) | (((val) & 0x1) << 2))
+/* Software Interrupt Request IR1 (1) */
+#define IFX_MPS_SWIRNEN_IR1 (0x1 << 1)
+#define IFX_MPS_SWIRNEN_IR1_VAL(val) (((val) & 0x1) << 1)
+#define IFX_MPS_SWIRNEN_IR1_GET(val) ((((val) & IFX_MPS_SWIRNEN_IR1) >> 1) & 0x1)
+#define IFX_MPS_SWIRNEN_IR1_SET(reg,val) (reg) = ((reg & ~IFX_MPS_SWIRNEN_IR1) | (((val) & 0x1) << 1))
+/* Software Interrupt Request IR0 (0) */
+#define IFX_MPS_SWIRNEN_IR0 (0x1)
+#define IFX_MPS_SWIRNEN_IR0_VAL(val) (((val) & 0x1) << 0)
+#define IFX_MPS_SWIRNEN_IR0_GET(val) ((((val) & IFX_MPS_SWIRNEN_IR0) >> 0) & 0x1)
+#define IFX_MPS_SWIRNEN_IR0_SET(reg,val) (reg) = ((reg & ~IFX_MPS_SWIRNEN_IR0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * MPS_SWIRNICR Register
+ ******************************************************************************/
+
+/* Software Interrupt Request IR5 (5) */
+#define IFX_MPS_SWIRNICR_IR5 (0x1 << 5)
+#define IFX_MPS_SWIRNICR_IR5_GET(val) ((((val) & IFX_MPS_SWIRNICR_IR5) >> 5) & 0x1)
+/* Software Interrupt Request IR4 (4) */
+#define IFX_MPS_SWIRNICR_IR4 (0x1 << 4)
+#define IFX_MPS_SWIRNICR_IR4_GET(val) ((((val) & IFX_MPS_SWIRNICR_IR4) >> 4) & 0x1)
+/* Software Interrupt Request IR3 (3) */
+#define IFX_MPS_SWIRNICR_IR3 (0x1 << 3)
+#define IFX_MPS_SWIRNICR_IR3_GET(val) ((((val) & IFX_MPS_SWIRNICR_IR3) >> 3) & 0x1)
+/* Software Interrupt Request IR2 (2) */
+#define IFX_MPS_SWIRNICR_IR2 (0x1 << 2)
+#define IFX_MPS_SWIRNICR_IR2_GET(val) ((((val) & IFX_MPS_SWIRNICR_IR2) >> 2) & 0x1)
+/* Software Interrupt Request IR1 (1) */
+#define IFX_MPS_SWIRNICR_IR1 (0x1 << 1)
+#define IFX_MPS_SWIRNICR_IR1_GET(val) ((((val) & IFX_MPS_SWIRNICR_IR1) >> 1) & 0x1)
+/* Software Interrupt Request IR0 (0) */
+#define IFX_MPS_SWIRNICR_IR0 (0x1)
+#define IFX_MPS_SWIRNICR_IR0_GET(val) ((((val) & IFX_MPS_SWIRNICR_IR0) >> 0) & 0x1)
+
+/*******************************************************************************
+ * MPS_SWIRNCR Register
+ ******************************************************************************/
+
+/* Software Interrupt Request IR5 (5) */
+#define IFX_MPS_SWIRNCR_IR5 (0x1 << 5)
+#define IFX_MPS_SWIRNCR_IR5_GET(val) ((((val) & IFX_MPS_SWIRNCR_IR5) >> 5) & 0x1)
+/* Software Interrupt Request IR4 (4) */
+#define IFX_MPS_SWIRNCR_IR4 (0x1 << 4)
+#define IFX_MPS_SWIRNCR_IR4_GET(val) ((((val) & IFX_MPS_SWIRNCR_IR4) >> 4) & 0x1)
+/* Software Interrupt Request IR3 (3) */
+#define IFX_MPS_SWIRNCR_IR3 (0x1 << 3)
+#define IFX_MPS_SWIRNCR_IR3_GET(val) ((((val) & IFX_MPS_SWIRNCR_IR3) >> 3) & 0x1)
+/* Software Interrupt Request IR2 (2) */
+#define IFX_MPS_SWIRNCR_IR2 (0x1 << 2)
+#define IFX_MPS_SWIRNCR_IR2_GET(val) ((((val) & IFX_MPS_SWIRNCR_IR2) >> 2) & 0x1)
+/* Software Interrupt Request IR1 (1) */
+#define IFX_MPS_SWIRNCR_IR1 (0x1 << 1)
+#define IFX_MPS_SWIRNCR_IR1_GET(val) ((((val) & IFX_MPS_SWIRNCR_IR1) >> 1) & 0x1)
+/* Software Interrupt Request IR0 (0) */
+#define IFX_MPS_SWIRNCR_IR0 (0x1)
+#define IFX_MPS_SWIRNCR_IR0_GET(val) ((((val) & IFX_MPS_SWIRNCR_IR0) >> 0) & 0x1)
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/port_reg.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/port_reg.h
new file mode 100644
index 0000000000..57d04917fc
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/port_reg.h
@@ -0,0 +1,3262 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ St. Martin Strasse 53; 81669 Munich, Germany
+
+ Any use of this Software is subject to the conclusion of a respective
+ License Agreement. Without such a License Agreement no rights to the
+ Software are granted.
+
+ ******************************************************************************/
+
+#ifndef __PORT_REG_H
+#define __PORT_REG_H
+
+#define port_r32(reg) __raw_readl(&reg)
+#define port_w32(val, reg) __raw_writel(val, &reg)
+
+/** PORT register structure */
+struct svip_reg_port {
+ volatile u32 out; /* 0x0000 */
+ volatile u32 in; /* 0x0004 */
+ volatile u32 dir; /* 0x0008 */
+ volatile u32 altsel0; /* 0x000C */
+ volatile u32 altsel1; /* 0x0010 */
+ volatile u32 puen; /* 0x0014 */
+ volatile u32 exintcr0; /* 0x0018 */
+ volatile u32 exintcr1; /* 0x001C */
+ volatile u32 irncr; /* 0x0020 */
+ volatile u32 irnicr; /* 0x0024 */
+ volatile u32 irnen; /* 0x0028 */
+ volatile u32 irncfg; /* 0x002C */
+ volatile u32 irnenset; /* 0x0030 */
+ volatile u32 irnenclr; /* 0x0034 */
+};
+
+/*******************************************************************************
+ * Port 0 Data Output Register
+ ******************************************************************************/
+
+/* Port 0 Pin # Output Value (19) */
+#define PORT_P0_OUT_P19 (0x1 << 19)
+#define PORT_P0_OUT_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P0_OUT_P19_GET(val) ((((val) & PORT_P0_OUT_P19) >> 19) & 0x1)
+#define PORT_P0_OUT_P19_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P19) | (((val) & 0x1) << 19))
+/* Port 0 Pin # Output Value (18) */
+#define PORT_P0_OUT_P18 (0x1 << 18)
+#define PORT_P0_OUT_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P0_OUT_P18_GET(val) ((((val) & PORT_P0_OUT_P18) >> 18) & 0x1)
+#define PORT_P0_OUT_P18_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P18) | (((val) & 0x1) << 18))
+/* Port 0 Pin # Output Value (17) */
+#define PORT_P0_OUT_P17 (0x1 << 17)
+#define PORT_P0_OUT_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P0_OUT_P17_GET(val) ((((val) & PORT_P0_OUT_P17) >> 17) & 0x1)
+#define PORT_P0_OUT_P17_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P17) | (((val) & 0x1) << 17))
+/* Port 0 Pin # Output Value (16) */
+#define PORT_P0_OUT_P16 (0x1 << 16)
+#define PORT_P0_OUT_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P0_OUT_P16_GET(val) ((((val) & PORT_P0_OUT_P16) >> 16) & 0x1)
+#define PORT_P0_OUT_P16_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P16) | (((val) & 0x1) << 16))
+/* Port 0 Pin # Output Value (15) */
+#define PORT_P0_OUT_P15 (0x1 << 15)
+#define PORT_P0_OUT_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P0_OUT_P15_GET(val) ((((val) & PORT_P0_OUT_P15) >> 15) & 0x1)
+#define PORT_P0_OUT_P15_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P15) | (((val) & 0x1) << 15))
+/* Port 0 Pin # Output Value (14) */
+#define PORT_P0_OUT_P14 (0x1 << 14)
+#define PORT_P0_OUT_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P0_OUT_P14_GET(val) ((((val) & PORT_P0_OUT_P14) >> 14) & 0x1)
+#define PORT_P0_OUT_P14_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P14) | (((val) & 0x1) << 14))
+/* Port 0 Pin # Output Value (13) */
+#define PORT_P0_OUT_P13 (0x1 << 13)
+#define PORT_P0_OUT_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P0_OUT_P13_GET(val) ((((val) & PORT_P0_OUT_P13) >> 13) & 0x1)
+#define PORT_P0_OUT_P13_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P13) | (((val) & 0x1) << 13))
+/* Port 0 Pin # Output Value (12) */
+#define PORT_P0_OUT_P12 (0x1 << 12)
+#define PORT_P0_OUT_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P0_OUT_P12_GET(val) ((((val) & PORT_P0_OUT_P12) >> 12) & 0x1)
+#define PORT_P0_OUT_P12_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P12) | (((val) & 0x1) << 12))
+/* Port 0 Pin # Output Value (11) */
+#define PORT_P0_OUT_P11 (0x1 << 11)
+#define PORT_P0_OUT_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P0_OUT_P11_GET(val) ((((val) & PORT_P0_OUT_P11) >> 11) & 0x1)
+#define PORT_P0_OUT_P11_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P11) | (((val) & 0x1) << 11))
+/* Port 0 Pin # Output Value (10) */
+#define PORT_P0_OUT_P10 (0x1 << 10)
+#define PORT_P0_OUT_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P0_OUT_P10_GET(val) ((((val) & PORT_P0_OUT_P10) >> 10) & 0x1)
+#define PORT_P0_OUT_P10_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P10) | (((val) & 0x1) << 10))
+/* Port 0 Pin # Output Value (9) */
+#define PORT_P0_OUT_P9 (0x1 << 9)
+#define PORT_P0_OUT_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P0_OUT_P9_GET(val) ((((val) & PORT_P0_OUT_P9) >> 9) & 0x1)
+#define PORT_P0_OUT_P9_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P9) | (((val) & 0x1) << 9))
+/* Port 0 Pin # Output Value (8) */
+#define PORT_P0_OUT_P8 (0x1 << 8)
+#define PORT_P0_OUT_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P0_OUT_P8_GET(val) ((((val) & PORT_P0_OUT_P8) >> 8) & 0x1)
+#define PORT_P0_OUT_P8_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P8) | (((val) & 0x1) << 8))
+/* Port 0 Pin # Output Value (7) */
+#define PORT_P0_OUT_P7 (0x1 << 7)
+#define PORT_P0_OUT_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P0_OUT_P7_GET(val) ((((val) & PORT_P0_OUT_P7) >> 7) & 0x1)
+#define PORT_P0_OUT_P7_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P7) | (((val) & 0x1) << 7))
+/* Port 0 Pin # Output Value (6) */
+#define PORT_P0_OUT_P6 (0x1 << 6)
+#define PORT_P0_OUT_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P0_OUT_P6_GET(val) ((((val) & PORT_P0_OUT_P6) >> 6) & 0x1)
+#define PORT_P0_OUT_P6_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P6) | (((val) & 0x1) << 6))
+/* Port 0 Pin # Output Value (5) */
+#define PORT_P0_OUT_P5 (0x1 << 5)
+#define PORT_P0_OUT_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P0_OUT_P5_GET(val) ((((val) & PORT_P0_OUT_P5) >> 5) & 0x1)
+#define PORT_P0_OUT_P5_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P5) | (((val) & 0x1) << 5))
+/* Port 0 Pin # Output Value (4) */
+#define PORT_P0_OUT_P4 (0x1 << 4)
+#define PORT_P0_OUT_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P0_OUT_P4_GET(val) ((((val) & PORT_P0_OUT_P4) >> 4) & 0x1)
+#define PORT_P0_OUT_P4_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P4) | (((val) & 0x1) << 4))
+/* Port 0 Pin # Output Value (3) */
+#define PORT_P0_OUT_P3 (0x1 << 3)
+#define PORT_P0_OUT_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P0_OUT_P3_GET(val) ((((val) & PORT_P0_OUT_P3) >> 3) & 0x1)
+#define PORT_P0_OUT_P3_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P3) | (((val) & 0x1) << 3))
+/* Port 0 Pin # Output Value (2) */
+#define PORT_P0_OUT_P2 (0x1 << 2)
+#define PORT_P0_OUT_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P0_OUT_P2_GET(val) ((((val) & PORT_P0_OUT_P2) >> 2) & 0x1)
+#define PORT_P0_OUT_P2_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P2) | (((val) & 0x1) << 2))
+/* Port 0 Pin # Output Value (1) */
+#define PORT_P0_OUT_P1 (0x1 << 1)
+#define PORT_P0_OUT_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P0_OUT_P1_GET(val) ((((val) & PORT_P0_OUT_P1) >> 1) & 0x1)
+#define PORT_P0_OUT_P1_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P1) | (((val) & 0x1) << 1))
+/* Port 0 Pin # Output Value (0) */
+#define PORT_P0_OUT_P0 (0x1)
+#define PORT_P0_OUT_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P0_OUT_P0_GET(val) ((((val) & PORT_P0_OUT_P0) >> 0) & 0x1)
+#define PORT_P0_OUT_P0_SET(reg,val) (reg) = ((reg & ~PORT_P0_OUT_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 0 Data Input Register
+ ******************************************************************************/
+
+/* Port 0 Pin # Latched Input Value (19) */
+#define PORT_P0_IN_P19 (0x1 << 19)
+#define PORT_P0_IN_P19_GET(val) ((((val) & PORT_P0_IN_P19) >> 19) & 0x1)
+/* Port 0 Pin # Latched Input Value (18) */
+#define PORT_P0_IN_P18 (0x1 << 18)
+#define PORT_P0_IN_P18_GET(val) ((((val) & PORT_P0_IN_P18) >> 18) & 0x1)
+/* Port 0 Pin # Latched Input Value (17) */
+#define PORT_P0_IN_P17 (0x1 << 17)
+#define PORT_P0_IN_P17_GET(val) ((((val) & PORT_P0_IN_P17) >> 17) & 0x1)
+/* Port 0 Pin # Latched Input Value (16) */
+#define PORT_P0_IN_P16 (0x1 << 16)
+#define PORT_P0_IN_P16_GET(val) ((((val) & PORT_P0_IN_P16) >> 16) & 0x1)
+/* Port 0 Pin # Latched Input Value (15) */
+#define PORT_P0_IN_P15 (0x1 << 15)
+#define PORT_P0_IN_P15_GET(val) ((((val) & PORT_P0_IN_P15) >> 15) & 0x1)
+/* Port 0 Pin # Latched Input Value (14) */
+#define PORT_P0_IN_P14 (0x1 << 14)
+#define PORT_P0_IN_P14_GET(val) ((((val) & PORT_P0_IN_P14) >> 14) & 0x1)
+/* Port 0 Pin # Latched Input Value (13) */
+#define PORT_P0_IN_P13 (0x1 << 13)
+#define PORT_P0_IN_P13_GET(val) ((((val) & PORT_P0_IN_P13) >> 13) & 0x1)
+/* Port 0 Pin # Latched Input Value (12) */
+#define PORT_P0_IN_P12 (0x1 << 12)
+#define PORT_P0_IN_P12_GET(val) ((((val) & PORT_P0_IN_P12) >> 12) & 0x1)
+/* Port 0 Pin # Latched Input Value (11) */
+#define PORT_P0_IN_P11 (0x1 << 11)
+#define PORT_P0_IN_P11_GET(val) ((((val) & PORT_P0_IN_P11) >> 11) & 0x1)
+/* Port 0 Pin # Latched Input Value (10) */
+#define PORT_P0_IN_P10 (0x1 << 10)
+#define PORT_P0_IN_P10_GET(val) ((((val) & PORT_P0_IN_P10) >> 10) & 0x1)
+/* Port 0 Pin # Latched Input Value (9) */
+#define PORT_P0_IN_P9 (0x1 << 9)
+#define PORT_P0_IN_P9_GET(val) ((((val) & PORT_P0_IN_P9) >> 9) & 0x1)
+/* Port 0 Pin # Latched Input Value (8) */
+#define PORT_P0_IN_P8 (0x1 << 8)
+#define PORT_P0_IN_P8_GET(val) ((((val) & PORT_P0_IN_P8) >> 8) & 0x1)
+/* Port 0 Pin # Latched Input Value (7) */
+#define PORT_P0_IN_P7 (0x1 << 7)
+#define PORT_P0_IN_P7_GET(val) ((((val) & PORT_P0_IN_P7) >> 7) & 0x1)
+/* Port 0 Pin # Latched Input Value (6) */
+#define PORT_P0_IN_P6 (0x1 << 6)
+#define PORT_P0_IN_P6_GET(val) ((((val) & PORT_P0_IN_P6) >> 6) & 0x1)
+/* Port 0 Pin # Latched Input Value (5) */
+#define PORT_P0_IN_P5 (0x1 << 5)
+#define PORT_P0_IN_P5_GET(val) ((((val) & PORT_P0_IN_P5) >> 5) & 0x1)
+/* Port 0 Pin # Latched Input Value (4) */
+#define PORT_P0_IN_P4 (0x1 << 4)
+#define PORT_P0_IN_P4_GET(val) ((((val) & PORT_P0_IN_P4) >> 4) & 0x1)
+/* Port 0 Pin # Latched Input Value (3) */
+#define PORT_P0_IN_P3 (0x1 << 3)
+#define PORT_P0_IN_P3_GET(val) ((((val) & PORT_P0_IN_P3) >> 3) & 0x1)
+/* Port 0 Pin # Latched Input Value (2) */
+#define PORT_P0_IN_P2 (0x1 << 2)
+#define PORT_P0_IN_P2_GET(val) ((((val) & PORT_P0_IN_P2) >> 2) & 0x1)
+/* Port 0 Pin # Latched Input Value (1) */
+#define PORT_P0_IN_P1 (0x1 << 1)
+#define PORT_P0_IN_P1_GET(val) ((((val) & PORT_P0_IN_P1) >> 1) & 0x1)
+/* Port 0 Pin # Latched Input Value (0) */
+#define PORT_P0_IN_P0 (0x1)
+#define PORT_P0_IN_P0_GET(val) ((((val) & PORT_P0_IN_P0) >> 0) & 0x1)
+
+/*******************************************************************************
+ * Port 0 Direction Register
+ ******************************************************************************/
+
+/* Port 0 Pin #Direction Control (19) */
+#define PORT_P0_DIR_P19 (0x1 << 19)
+#define PORT_P0_DIR_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P0_DIR_P19_GET(val) ((((val) & PORT_P0_DIR_P19) >> 19) & 0x1)
+#define PORT_P0_DIR_P19_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P19) | (((val) & 0x1) << 19))
+/* Port 0 Pin #Direction Control (18) */
+#define PORT_P0_DIR_P18 (0x1 << 18)
+#define PORT_P0_DIR_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P0_DIR_P18_GET(val) ((((val) & PORT_P0_DIR_P18) >> 18) & 0x1)
+#define PORT_P0_DIR_P18_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P18) | (((val) & 0x1) << 18))
+/* Port 0 Pin #Direction Control (17) */
+#define PORT_P0_DIR_P17 (0x1 << 17)
+#define PORT_P0_DIR_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P0_DIR_P17_GET(val) ((((val) & PORT_P0_DIR_P17) >> 17) & 0x1)
+#define PORT_P0_DIR_P17_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P17) | (((val) & 0x1) << 17))
+/* Port 0 Pin #Direction Control (16) */
+#define PORT_P0_DIR_P16 (0x1 << 16)
+#define PORT_P0_DIR_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P0_DIR_P16_GET(val) ((((val) & PORT_P0_DIR_P16) >> 16) & 0x1)
+#define PORT_P0_DIR_P16_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P16) | (((val) & 0x1) << 16))
+/* Port 0 Pin #Direction Control (15) */
+#define PORT_P0_DIR_P15 (0x1 << 15)
+#define PORT_P0_DIR_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P0_DIR_P15_GET(val) ((((val) & PORT_P0_DIR_P15) >> 15) & 0x1)
+#define PORT_P0_DIR_P15_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P15) | (((val) & 0x1) << 15))
+/* Port 0 Pin #Direction Control (14) */
+#define PORT_P0_DIR_P14 (0x1 << 14)
+#define PORT_P0_DIR_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P0_DIR_P14_GET(val) ((((val) & PORT_P0_DIR_P14) >> 14) & 0x1)
+#define PORT_P0_DIR_P14_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P14) | (((val) & 0x1) << 14))
+/* Port 0 Pin #Direction Control (13) */
+#define PORT_P0_DIR_P13 (0x1 << 13)
+#define PORT_P0_DIR_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P0_DIR_P13_GET(val) ((((val) & PORT_P0_DIR_P13) >> 13) & 0x1)
+#define PORT_P0_DIR_P13_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P13) | (((val) & 0x1) << 13))
+/* Port 0 Pin #Direction Control (12) */
+#define PORT_P0_DIR_P12 (0x1 << 12)
+#define PORT_P0_DIR_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P0_DIR_P12_GET(val) ((((val) & PORT_P0_DIR_P12) >> 12) & 0x1)
+#define PORT_P0_DIR_P12_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P12) | (((val) & 0x1) << 12))
+/* Port 0 Pin #Direction Control (11) */
+#define PORT_P0_DIR_P11 (0x1 << 11)
+#define PORT_P0_DIR_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P0_DIR_P11_GET(val) ((((val) & PORT_P0_DIR_P11) >> 11) & 0x1)
+#define PORT_P0_DIR_P11_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P11) | (((val) & 0x1) << 11))
+/* Port 0 Pin #Direction Control (10) */
+#define PORT_P0_DIR_P10 (0x1 << 10)
+#define PORT_P0_DIR_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P0_DIR_P10_GET(val) ((((val) & PORT_P0_DIR_P10) >> 10) & 0x1)
+#define PORT_P0_DIR_P10_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P10) | (((val) & 0x1) << 10))
+/* Port 0 Pin #Direction Control (9) */
+#define PORT_P0_DIR_P9 (0x1 << 9)
+#define PORT_P0_DIR_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P0_DIR_P9_GET(val) ((((val) & PORT_P0_DIR_P9) >> 9) & 0x1)
+#define PORT_P0_DIR_P9_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P9) | (((val) & 0x1) << 9))
+/* Port 0 Pin #Direction Control (8) */
+#define PORT_P0_DIR_P8 (0x1 << 8)
+#define PORT_P0_DIR_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P0_DIR_P8_GET(val) ((((val) & PORT_P0_DIR_P8) >> 8) & 0x1)
+#define PORT_P0_DIR_P8_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P8) | (((val) & 0x1) << 8))
+/* Port 0 Pin #Direction Control (7) */
+#define PORT_P0_DIR_P7 (0x1 << 7)
+#define PORT_P0_DIR_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P0_DIR_P7_GET(val) ((((val) & PORT_P0_DIR_P7) >> 7) & 0x1)
+#define PORT_P0_DIR_P7_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P7) | (((val) & 0x1) << 7))
+/* Port 0 Pin #Direction Control (6) */
+#define PORT_P0_DIR_P6 (0x1 << 6)
+#define PORT_P0_DIR_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P0_DIR_P6_GET(val) ((((val) & PORT_P0_DIR_P6) >> 6) & 0x1)
+#define PORT_P0_DIR_P6_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P6) | (((val) & 0x1) << 6))
+/* Port 0 Pin #Direction Control (5) */
+#define PORT_P0_DIR_P5 (0x1 << 5)
+#define PORT_P0_DIR_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P0_DIR_P5_GET(val) ((((val) & PORT_P0_DIR_P5) >> 5) & 0x1)
+#define PORT_P0_DIR_P5_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P5) | (((val) & 0x1) << 5))
+/* Port 0 Pin #Direction Control (4) */
+#define PORT_P0_DIR_P4 (0x1 << 4)
+#define PORT_P0_DIR_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P0_DIR_P4_GET(val) ((((val) & PORT_P0_DIR_P4) >> 4) & 0x1)
+#define PORT_P0_DIR_P4_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P4) | (((val) & 0x1) << 4))
+/* Port 0 Pin #Direction Control (3) */
+#define PORT_P0_DIR_P3 (0x1 << 3)
+#define PORT_P0_DIR_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P0_DIR_P3_GET(val) ((((val) & PORT_P0_DIR_P3) >> 3) & 0x1)
+#define PORT_P0_DIR_P3_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P3) | (((val) & 0x1) << 3))
+/* Port 0 Pin #Direction Control (2) */
+#define PORT_P0_DIR_P2 (0x1 << 2)
+#define PORT_P0_DIR_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P0_DIR_P2_GET(val) ((((val) & PORT_P0_DIR_P2) >> 2) & 0x1)
+#define PORT_P0_DIR_P2_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P2) | (((val) & 0x1) << 2))
+/* Port 0 Pin #Direction Control (1) */
+#define PORT_P0_DIR_P1 (0x1 << 1)
+#define PORT_P0_DIR_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P0_DIR_P1_GET(val) ((((val) & PORT_P0_DIR_P1) >> 1) & 0x1)
+#define PORT_P0_DIR_P1_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P1) | (((val) & 0x1) << 1))
+/* Port 0 Pin #Direction Control (0) */
+#define PORT_P0_DIR_P0 (0x1)
+#define PORT_P0_DIR_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P0_DIR_P0_GET(val) ((((val) & PORT_P0_DIR_P0) >> 0) & 0x1)
+#define PORT_P0_DIR_P0_SET(reg,val) (reg) = ((reg & ~PORT_P0_DIR_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 0 Alternate Function Select Register 0
+ ******************************************************************************/
+
+/* Alternate Function at Port 0 Bit # (19) */
+#define PORT_P0_ALTSEL0_P19 (0x1 << 19)
+#define PORT_P0_ALTSEL0_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P0_ALTSEL0_P19_GET(val) ((((val) & PORT_P0_ALTSEL0_P19) >> 19) & 0x1)
+#define PORT_P0_ALTSEL0_P19_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P19) | (((val) & 0x1) << 19))
+/* Alternate Function at Port 0 Bit # (18) */
+#define PORT_P0_ALTSEL0_P18 (0x1 << 18)
+#define PORT_P0_ALTSEL0_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P0_ALTSEL0_P18_GET(val) ((((val) & PORT_P0_ALTSEL0_P18) >> 18) & 0x1)
+#define PORT_P0_ALTSEL0_P18_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P18) | (((val) & 0x1) << 18))
+/* Alternate Function at Port 0 Bit # (17) */
+#define PORT_P0_ALTSEL0_P17 (0x1 << 17)
+#define PORT_P0_ALTSEL0_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P0_ALTSEL0_P17_GET(val) ((((val) & PORT_P0_ALTSEL0_P17) >> 17) & 0x1)
+#define PORT_P0_ALTSEL0_P17_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P17) | (((val) & 0x1) << 17))
+/* Alternate Function at Port 0 Bit # (16) */
+#define PORT_P0_ALTSEL0_P16 (0x1 << 16)
+#define PORT_P0_ALTSEL0_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P0_ALTSEL0_P16_GET(val) ((((val) & PORT_P0_ALTSEL0_P16) >> 16) & 0x1)
+#define PORT_P0_ALTSEL0_P16_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P16) | (((val) & 0x1) << 16))
+/* Alternate Function at Port 0 Bit # (15) */
+#define PORT_P0_ALTSEL0_P15 (0x1 << 15)
+#define PORT_P0_ALTSEL0_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P0_ALTSEL0_P15_GET(val) ((((val) & PORT_P0_ALTSEL0_P15) >> 15) & 0x1)
+#define PORT_P0_ALTSEL0_P15_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P15) | (((val) & 0x1) << 15))
+/* Alternate Function at Port 0 Bit # (14) */
+#define PORT_P0_ALTSEL0_P14 (0x1 << 14)
+#define PORT_P0_ALTSEL0_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P0_ALTSEL0_P14_GET(val) ((((val) & PORT_P0_ALTSEL0_P14) >> 14) & 0x1)
+#define PORT_P0_ALTSEL0_P14_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P14) | (((val) & 0x1) << 14))
+/* Alternate Function at Port 0 Bit # (13) */
+#define PORT_P0_ALTSEL0_P13 (0x1 << 13)
+#define PORT_P0_ALTSEL0_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P0_ALTSEL0_P13_GET(val) ((((val) & PORT_P0_ALTSEL0_P13) >> 13) & 0x1)
+#define PORT_P0_ALTSEL0_P13_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P13) | (((val) & 0x1) << 13))
+/* Alternate Function at Port 0 Bit # (12) */
+#define PORT_P0_ALTSEL0_P12 (0x1 << 12)
+#define PORT_P0_ALTSEL0_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P0_ALTSEL0_P12_GET(val) ((((val) & PORT_P0_ALTSEL0_P12) >> 12) & 0x1)
+#define PORT_P0_ALTSEL0_P12_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P12) | (((val) & 0x1) << 12))
+/* Alternate Function at Port 0 Bit # (11) */
+#define PORT_P0_ALTSEL0_P11 (0x1 << 11)
+#define PORT_P0_ALTSEL0_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P0_ALTSEL0_P11_GET(val) ((((val) & PORT_P0_ALTSEL0_P11) >> 11) & 0x1)
+#define PORT_P0_ALTSEL0_P11_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P11) | (((val) & 0x1) << 11))
+/* Alternate Function at Port 0 Bit # (10) */
+#define PORT_P0_ALTSEL0_P10 (0x1 << 10)
+#define PORT_P0_ALTSEL0_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P0_ALTSEL0_P10_GET(val) ((((val) & PORT_P0_ALTSEL0_P10) >> 10) & 0x1)
+#define PORT_P0_ALTSEL0_P10_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P10) | (((val) & 0x1) << 10))
+/* Alternate Function at Port 0 Bit # (9) */
+#define PORT_P0_ALTSEL0_P9 (0x1 << 9)
+#define PORT_P0_ALTSEL0_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P0_ALTSEL0_P9_GET(val) ((((val) & PORT_P0_ALTSEL0_P9) >> 9) & 0x1)
+#define PORT_P0_ALTSEL0_P9_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P9) | (((val) & 0x1) << 9))
+/* Alternate Function at Port 0 Bit # (8) */
+#define PORT_P0_ALTSEL0_P8 (0x1 << 8)
+#define PORT_P0_ALTSEL0_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P0_ALTSEL0_P8_GET(val) ((((val) & PORT_P0_ALTSEL0_P8) >> 8) & 0x1)
+#define PORT_P0_ALTSEL0_P8_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P8) | (((val) & 0x1) << 8))
+/* Alternate Function at Port 0 Bit # (7) */
+#define PORT_P0_ALTSEL0_P7 (0x1 << 7)
+#define PORT_P0_ALTSEL0_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P0_ALTSEL0_P7_GET(val) ((((val) & PORT_P0_ALTSEL0_P7) >> 7) & 0x1)
+#define PORT_P0_ALTSEL0_P7_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P7) | (((val) & 0x1) << 7))
+/* Alternate Function at Port 0 Bit # (6) */
+#define PORT_P0_ALTSEL0_P6 (0x1 << 6)
+#define PORT_P0_ALTSEL0_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P0_ALTSEL0_P6_GET(val) ((((val) & PORT_P0_ALTSEL0_P6) >> 6) & 0x1)
+#define PORT_P0_ALTSEL0_P6_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P6) | (((val) & 0x1) << 6))
+/* Alternate Function at Port 0 Bit # (5) */
+#define PORT_P0_ALTSEL0_P5 (0x1 << 5)
+#define PORT_P0_ALTSEL0_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P0_ALTSEL0_P5_GET(val) ((((val) & PORT_P0_ALTSEL0_P5) >> 5) & 0x1)
+#define PORT_P0_ALTSEL0_P5_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P5) | (((val) & 0x1) << 5))
+/* Alternate Function at Port 0 Bit # (4) */
+#define PORT_P0_ALTSEL0_P4 (0x1 << 4)
+#define PORT_P0_ALTSEL0_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P0_ALTSEL0_P4_GET(val) ((((val) & PORT_P0_ALTSEL0_P4) >> 4) & 0x1)
+#define PORT_P0_ALTSEL0_P4_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P4) | (((val) & 0x1) << 4))
+/* Alternate Function at Port 0 Bit # (3) */
+#define PORT_P0_ALTSEL0_P3 (0x1 << 3)
+#define PORT_P0_ALTSEL0_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P0_ALTSEL0_P3_GET(val) ((((val) & PORT_P0_ALTSEL0_P3) >> 3) & 0x1)
+#define PORT_P0_ALTSEL0_P3_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P3) | (((val) & 0x1) << 3))
+/* Alternate Function at Port 0 Bit # (2) */
+#define PORT_P0_ALTSEL0_P2 (0x1 << 2)
+#define PORT_P0_ALTSEL0_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P0_ALTSEL0_P2_GET(val) ((((val) & PORT_P0_ALTSEL0_P2) >> 2) & 0x1)
+#define PORT_P0_ALTSEL0_P2_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P2) | (((val) & 0x1) << 2))
+/* Alternate Function at Port 0 Bit # (1) */
+#define PORT_P0_ALTSEL0_P1 (0x1 << 1)
+#define PORT_P0_ALTSEL0_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P0_ALTSEL0_P1_GET(val) ((((val) & PORT_P0_ALTSEL0_P1) >> 1) & 0x1)
+#define PORT_P0_ALTSEL0_P1_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P1) | (((val) & 0x1) << 1))
+/* Alternate Function at Port 0 Bit # (0) */
+#define PORT_P0_ALTSEL0_P0 (0x1)
+#define PORT_P0_ALTSEL0_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P0_ALTSEL0_P0_GET(val) ((((val) & PORT_P0_ALTSEL0_P0) >> 0) & 0x1)
+#define PORT_P0_ALTSEL0_P0_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL0_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 0 Alternate Function Select Register 1
+ ******************************************************************************/
+
+/* Alternate Function at Port 0 Bit # (13) */
+#define PORT_P0_ALTSEL1_P13 (0x1 << 13)
+#define PORT_P0_ALTSEL1_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P0_ALTSEL1_P13_GET(val) ((((val) & PORT_P0_ALTSEL1_P13) >> 13) & 0x1)
+#define PORT_P0_ALTSEL1_P13_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL1_P13) | (((val) & 0x1) << 13))
+/* Alternate Function at Port 0 Bit # (12) */
+#define PORT_P0_ALTSEL1_P12 (0x1 << 12)
+#define PORT_P0_ALTSEL1_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P0_ALTSEL1_P12_GET(val) ((((val) & PORT_P0_ALTSEL1_P12) >> 12) & 0x1)
+#define PORT_P0_ALTSEL1_P12_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL1_P12) | (((val) & 0x1) << 12))
+/* Alternate Function at Port 0 Bit # (11) */
+#define PORT_P0_ALTSEL1_P11 (0x1 << 11)
+#define PORT_P0_ALTSEL1_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P0_ALTSEL1_P11_GET(val) ((((val) & PORT_P0_ALTSEL1_P11) >> 11) & 0x1)
+#define PORT_P0_ALTSEL1_P11_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL1_P11) | (((val) & 0x1) << 11))
+/* Alternate Function at Port 0 Bit # (10) */
+#define PORT_P0_ALTSEL1_P10 (0x1 << 10)
+#define PORT_P0_ALTSEL1_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P0_ALTSEL1_P10_GET(val) ((((val) & PORT_P0_ALTSEL1_P10) >> 10) & 0x1)
+#define PORT_P0_ALTSEL1_P10_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL1_P10) | (((val) & 0x1) << 10))
+/* Alternate Function at Port 0 Bit # (9) */
+#define PORT_P0_ALTSEL1_P9 (0x1 << 9)
+#define PORT_P0_ALTSEL1_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P0_ALTSEL1_P9_GET(val) ((((val) & PORT_P0_ALTSEL1_P9) >> 9) & 0x1)
+#define PORT_P0_ALTSEL1_P9_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL1_P9) | (((val) & 0x1) << 9))
+/* Alternate Function at Port 0 Bit # (8) */
+#define PORT_P0_ALTSEL1_P8 (0x1 << 8)
+#define PORT_P0_ALTSEL1_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P0_ALTSEL1_P8_GET(val) ((((val) & PORT_P0_ALTSEL1_P8) >> 8) & 0x1)
+#define PORT_P0_ALTSEL1_P8_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL1_P8) | (((val) & 0x1) << 8))
+/* Alternate Function at Port 0 Bit # (7) */
+#define PORT_P0_ALTSEL1_P7 (0x1 << 7)
+#define PORT_P0_ALTSEL1_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P0_ALTSEL1_P7_GET(val) ((((val) & PORT_P0_ALTSEL1_P7) >> 7) & 0x1)
+#define PORT_P0_ALTSEL1_P7_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL1_P7) | (((val) & 0x1) << 7))
+/* Alternate Function at Port 0 Bit # (6) */
+#define PORT_P0_ALTSEL1_P6 (0x1 << 6)
+#define PORT_P0_ALTSEL1_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P0_ALTSEL1_P6_GET(val) ((((val) & PORT_P0_ALTSEL1_P6) >> 6) & 0x1)
+#define PORT_P0_ALTSEL1_P6_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL1_P6) | (((val) & 0x1) << 6))
+/* Alternate Function at Port 0 Bit # (3) */
+#define PORT_P0_ALTSEL1_P3 (0x1 << 3)
+#define PORT_P0_ALTSEL1_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P0_ALTSEL1_P3_GET(val) ((((val) & PORT_P0_ALTSEL1_P3) >> 3) & 0x1)
+#define PORT_P0_ALTSEL1_P3_SET(reg,val) (reg) = ((reg & ~PORT_P0_ALTSEL1_P3) | (((val) & 0x1) << 3))
+
+/*******************************************************************************
+ * Port 0 Pull Up Enable Register
+ ******************************************************************************/
+
+/* Pull Up Device Enable at Port 0 Bit # (19) */
+#define PORT_P0_PUEN_P19 (0x1 << 19)
+#define PORT_P0_PUEN_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P0_PUEN_P19_GET(val) ((((val) & PORT_P0_PUEN_P19) >> 19) & 0x1)
+#define PORT_P0_PUEN_P19_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P19) | (((val) & 0x1) << 19))
+/* Pull Up Device Enable at Port 0 Bit # (18) */
+#define PORT_P0_PUEN_P18 (0x1 << 18)
+#define PORT_P0_PUEN_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P0_PUEN_P18_GET(val) ((((val) & PORT_P0_PUEN_P18) >> 18) & 0x1)
+#define PORT_P0_PUEN_P18_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P18) | (((val) & 0x1) << 18))
+/* Pull Up Device Enable at Port 0 Bit # (17) */
+#define PORT_P0_PUEN_P17 (0x1 << 17)
+#define PORT_P0_PUEN_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P0_PUEN_P17_GET(val) ((((val) & PORT_P0_PUEN_P17) >> 17) & 0x1)
+#define PORT_P0_PUEN_P17_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P17) | (((val) & 0x1) << 17))
+/* Pull Up Device Enable at Port 0 Bit # (16) */
+#define PORT_P0_PUEN_P16 (0x1 << 16)
+#define PORT_P0_PUEN_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P0_PUEN_P16_GET(val) ((((val) & PORT_P0_PUEN_P16) >> 16) & 0x1)
+#define PORT_P0_PUEN_P16_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P16) | (((val) & 0x1) << 16))
+/* Pull Up Device Enable at Port 0 Bit # (15) */
+#define PORT_P0_PUEN_P15 (0x1 << 15)
+#define PORT_P0_PUEN_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P0_PUEN_P15_GET(val) ((((val) & PORT_P0_PUEN_P15) >> 15) & 0x1)
+#define PORT_P0_PUEN_P15_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P15) | (((val) & 0x1) << 15))
+/* Pull Up Device Enable at Port 0 Bit # (14) */
+#define PORT_P0_PUEN_P14 (0x1 << 14)
+#define PORT_P0_PUEN_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P0_PUEN_P14_GET(val) ((((val) & PORT_P0_PUEN_P14) >> 14) & 0x1)
+#define PORT_P0_PUEN_P14_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P14) | (((val) & 0x1) << 14))
+/* Pull Up Device Enable at Port 0 Bit # (13) */
+#define PORT_P0_PUEN_P13 (0x1 << 13)
+#define PORT_P0_PUEN_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P0_PUEN_P13_GET(val) ((((val) & PORT_P0_PUEN_P13) >> 13) & 0x1)
+#define PORT_P0_PUEN_P13_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P13) | (((val) & 0x1) << 13))
+/* Pull Up Device Enable at Port 0 Bit # (12) */
+#define PORT_P0_PUEN_P12 (0x1 << 12)
+#define PORT_P0_PUEN_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P0_PUEN_P12_GET(val) ((((val) & PORT_P0_PUEN_P12) >> 12) & 0x1)
+#define PORT_P0_PUEN_P12_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P12) | (((val) & 0x1) << 12))
+/* Pull Up Device Enable at Port 0 Bit # (11) */
+#define PORT_P0_PUEN_P11 (0x1 << 11)
+#define PORT_P0_PUEN_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P0_PUEN_P11_GET(val) ((((val) & PORT_P0_PUEN_P11) >> 11) & 0x1)
+#define PORT_P0_PUEN_P11_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P11) | (((val) & 0x1) << 11))
+/* Pull Up Device Enable at Port 0 Bit # (10) */
+#define PORT_P0_PUEN_P10 (0x1 << 10)
+#define PORT_P0_PUEN_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P0_PUEN_P10_GET(val) ((((val) & PORT_P0_PUEN_P10) >> 10) & 0x1)
+#define PORT_P0_PUEN_P10_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P10) | (((val) & 0x1) << 10))
+/* Pull Up Device Enable at Port 0 Bit # (9) */
+#define PORT_P0_PUEN_P9 (0x1 << 9)
+#define PORT_P0_PUEN_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P0_PUEN_P9_GET(val) ((((val) & PORT_P0_PUEN_P9) >> 9) & 0x1)
+#define PORT_P0_PUEN_P9_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P9) | (((val) & 0x1) << 9))
+/* Pull Up Device Enable at Port 0 Bit # (8) */
+#define PORT_P0_PUEN_P8 (0x1 << 8)
+#define PORT_P0_PUEN_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P0_PUEN_P8_GET(val) ((((val) & PORT_P0_PUEN_P8) >> 8) & 0x1)
+#define PORT_P0_PUEN_P8_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P8) | (((val) & 0x1) << 8))
+/* Pull Up Device Enable at Port 0 Bit # (7) */
+#define PORT_P0_PUEN_P7 (0x1 << 7)
+#define PORT_P0_PUEN_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P0_PUEN_P7_GET(val) ((((val) & PORT_P0_PUEN_P7) >> 7) & 0x1)
+#define PORT_P0_PUEN_P7_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P7) | (((val) & 0x1) << 7))
+/* Pull Up Device Enable at Port 0 Bit # (6) */
+#define PORT_P0_PUEN_P6 (0x1 << 6)
+#define PORT_P0_PUEN_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P0_PUEN_P6_GET(val) ((((val) & PORT_P0_PUEN_P6) >> 6) & 0x1)
+#define PORT_P0_PUEN_P6_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P6) | (((val) & 0x1) << 6))
+/* Pull Up Device Enable at Port 0 Bit # (5) */
+#define PORT_P0_PUEN_P5 (0x1 << 5)
+#define PORT_P0_PUEN_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P0_PUEN_P5_GET(val) ((((val) & PORT_P0_PUEN_P5) >> 5) & 0x1)
+#define PORT_P0_PUEN_P5_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P5) | (((val) & 0x1) << 5))
+/* Pull Up Device Enable at Port 0 Bit # (4) */
+#define PORT_P0_PUEN_P4 (0x1 << 4)
+#define PORT_P0_PUEN_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P0_PUEN_P4_GET(val) ((((val) & PORT_P0_PUEN_P4) >> 4) & 0x1)
+#define PORT_P0_PUEN_P4_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P4) | (((val) & 0x1) << 4))
+/* Pull Up Device Enable at Port 0 Bit # (3) */
+#define PORT_P0_PUEN_P3 (0x1 << 3)
+#define PORT_P0_PUEN_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P0_PUEN_P3_GET(val) ((((val) & PORT_P0_PUEN_P3) >> 3) & 0x1)
+#define PORT_P0_PUEN_P3_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P3) | (((val) & 0x1) << 3))
+/* Pull Up Device Enable at Port 0 Bit # (2) */
+#define PORT_P0_PUEN_P2 (0x1 << 2)
+#define PORT_P0_PUEN_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P0_PUEN_P2_GET(val) ((((val) & PORT_P0_PUEN_P2) >> 2) & 0x1)
+#define PORT_P0_PUEN_P2_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P2) | (((val) & 0x1) << 2))
+/* Pull Up Device Enable at Port 0 Bit # (1) */
+#define PORT_P0_PUEN_P1 (0x1 << 1)
+#define PORT_P0_PUEN_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P0_PUEN_P1_GET(val) ((((val) & PORT_P0_PUEN_P1) >> 1) & 0x1)
+#define PORT_P0_PUEN_P1_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P1) | (((val) & 0x1) << 1))
+/* Pull Up Device Enable at Port 0 Bit # (0) */
+#define PORT_P0_PUEN_P0 (0x1)
+#define PORT_P0_PUEN_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P0_PUEN_P0_GET(val) ((((val) & PORT_P0_PUEN_P0) >> 0) & 0x1)
+#define PORT_P0_PUEN_P0_SET(reg,val) (reg) = ((reg & ~PORT_P0_PUEN_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * External Interrupt Control Register 0
+ ******************************************************************************/
+
+/* Type of Level or Edge Detection of EXINT16 (19) */
+#define PORT_P0_EXINTCR0_EXINT16 (0x1 << 19)
+#define PORT_P0_EXINTCR0_EXINT16_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P0_EXINTCR0_EXINT16_GET(val) ((((val) & PORT_P0_EXINTCR0_EXINT16) >> 19) & 0x1)
+#define PORT_P0_EXINTCR0_EXINT16_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR0_EXINT16) | (((val) & 0x1) << 19))
+/* Type of Level or Edge Detection of EXINT10 (17) */
+#define PORT_P0_EXINTCR0_EXINT10 (0x1 << 17)
+#define PORT_P0_EXINTCR0_EXINT10_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P0_EXINTCR0_EXINT10_GET(val) ((((val) & PORT_P0_EXINTCR0_EXINT10) >> 17) & 0x1)
+#define PORT_P0_EXINTCR0_EXINT10_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR0_EXINT10) | (((val) & 0x1) << 17))
+/* Type of Level or Edge Detection of EXINT9 (16) */
+#define PORT_P0_EXINTCR0_EXINT9 (0x1 << 16)
+#define PORT_P0_EXINTCR0_EXINT9_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P0_EXINTCR0_EXINT9_GET(val) ((((val) & PORT_P0_EXINTCR0_EXINT9) >> 16) & 0x1)
+#define PORT_P0_EXINTCR0_EXINT9_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR0_EXINT9) | (((val) & 0x1) << 16))
+/* Type of Level or Edge Detection of EXINT8 (15) */
+#define PORT_P0_EXINTCR0_EXINT8 (0x1 << 15)
+#define PORT_P0_EXINTCR0_EXINT8_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P0_EXINTCR0_EXINT8_GET(val) ((((val) & PORT_P0_EXINTCR0_EXINT8) >> 15) & 0x1)
+#define PORT_P0_EXINTCR0_EXINT8_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR0_EXINT8) | (((val) & 0x1) << 15))
+/* Type of Level or Edge Detection of EXINT7 (14) */
+#define PORT_P0_EXINTCR0_EXINT7 (0x1 << 14)
+#define PORT_P0_EXINTCR0_EXINT7_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P0_EXINTCR0_EXINT7_GET(val) ((((val) & PORT_P0_EXINTCR0_EXINT7) >> 14) & 0x1)
+#define PORT_P0_EXINTCR0_EXINT7_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR0_EXINT7) | (((val) & 0x1) << 14))
+/* Type of Level or Edge Detection of EXINT6 (13) */
+#define PORT_P0_EXINTCR0_EXINT6 (0x1 << 13)
+#define PORT_P0_EXINTCR0_EXINT6_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P0_EXINTCR0_EXINT6_GET(val) ((((val) & PORT_P0_EXINTCR0_EXINT6) >> 13) & 0x1)
+#define PORT_P0_EXINTCR0_EXINT6_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR0_EXINT6) | (((val) & 0x1) << 13))
+/* Type of Level or Edge Detection of EXINT5 (12) */
+#define PORT_P0_EXINTCR0_EXINT5 (0x1 << 12)
+#define PORT_P0_EXINTCR0_EXINT5_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P0_EXINTCR0_EXINT5_GET(val) ((((val) & PORT_P0_EXINTCR0_EXINT5) >> 12) & 0x1)
+#define PORT_P0_EXINTCR0_EXINT5_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR0_EXINT5) | (((val) & 0x1) << 12))
+/* Type of Level or Edge Detection of EXINT4 (11) */
+#define PORT_P0_EXINTCR0_EXINT4 (0x1 << 11)
+#define PORT_P0_EXINTCR0_EXINT4_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P0_EXINTCR0_EXINT4_GET(val) ((((val) & PORT_P0_EXINTCR0_EXINT4) >> 11) & 0x1)
+#define PORT_P0_EXINTCR0_EXINT4_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR0_EXINT4) | (((val) & 0x1) << 11))
+/* Type of Level or Edge Detection of EXINT3 (10) */
+#define PORT_P0_EXINTCR0_EXINT3 (0x1 << 10)
+#define PORT_P0_EXINTCR0_EXINT3_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P0_EXINTCR0_EXINT3_GET(val) ((((val) & PORT_P0_EXINTCR0_EXINT3) >> 10) & 0x1)
+#define PORT_P0_EXINTCR0_EXINT3_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR0_EXINT3) | (((val) & 0x1) << 10))
+/* Type of Level or Edge Detection of EXINT2 (9) */
+#define PORT_P0_EXINTCR0_EXINT2 (0x1 << 9)
+#define PORT_P0_EXINTCR0_EXINT2_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P0_EXINTCR0_EXINT2_GET(val) ((((val) & PORT_P0_EXINTCR0_EXINT2) >> 9) & 0x1)
+#define PORT_P0_EXINTCR0_EXINT2_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR0_EXINT2) | (((val) & 0x1) << 9))
+/* Type of Level or Edge Detection of EXINT1 (8) */
+#define PORT_P0_EXINTCR0_EXINT1 (0x1 << 8)
+#define PORT_P0_EXINTCR0_EXINT1_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P0_EXINTCR0_EXINT1_GET(val) ((((val) & PORT_P0_EXINTCR0_EXINT1) >> 8) & 0x1)
+#define PORT_P0_EXINTCR0_EXINT1_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR0_EXINT1) | (((val) & 0x1) << 8))
+/* Type of Level or Edge Detection of EXINT0 (7) */
+#define PORT_P0_EXINTCR0_EXINT0 (0x1 << 7)
+#define PORT_P0_EXINTCR0_EXINT0_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P0_EXINTCR0_EXINT0_GET(val) ((((val) & PORT_P0_EXINTCR0_EXINT0) >> 7) & 0x1)
+#define PORT_P0_EXINTCR0_EXINT0_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR0_EXINT0) | (((val) & 0x1) << 7))
+
+/*******************************************************************************
+ * External Interrupt Control Register 1
+ ******************************************************************************/
+
+/* Type of Level or Edge Detection of EXINT16 (19) */
+#define PORT_P0_EXINTCR1_EXINT16 (0x1 << 19)
+#define PORT_P0_EXINTCR1_EXINT16_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P0_EXINTCR1_EXINT16_GET(val) ((((val) & PORT_P0_EXINTCR1_EXINT16) >> 19) & 0x1)
+#define PORT_P0_EXINTCR1_EXINT16_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR1_EXINT16) | (((val) & 0x1) << 19))
+/* Type of Level or Edge Detection of EXINT10 (17) */
+#define PORT_P0_EXINTCR1_EXINT10 (0x1 << 17)
+#define PORT_P0_EXINTCR1_EXINT10_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P0_EXINTCR1_EXINT10_GET(val) ((((val) & PORT_P0_EXINTCR1_EXINT10) >> 17) & 0x1)
+#define PORT_P0_EXINTCR1_EXINT10_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR1_EXINT10) | (((val) & 0x1) << 17))
+/* Type of Level or Edge Detection of EXINT9 (16) */
+#define PORT_P0_EXINTCR1_EXINT9 (0x1 << 16)
+#define PORT_P0_EXINTCR1_EXINT9_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P0_EXINTCR1_EXINT9_GET(val) ((((val) & PORT_P0_EXINTCR1_EXINT9) >> 16) & 0x1)
+#define PORT_P0_EXINTCR1_EXINT9_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR1_EXINT9) | (((val) & 0x1) << 16))
+/* Type of Level or Edge Detection of EXINT8 (15) */
+#define PORT_P0_EXINTCR1_EXINT8 (0x1 << 15)
+#define PORT_P0_EXINTCR1_EXINT8_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P0_EXINTCR1_EXINT8_GET(val) ((((val) & PORT_P0_EXINTCR1_EXINT8) >> 15) & 0x1)
+#define PORT_P0_EXINTCR1_EXINT8_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR1_EXINT8) | (((val) & 0x1) << 15))
+/* Type of Level or Edge Detection of EXINT7 (14) */
+#define PORT_P0_EXINTCR1_EXINT7 (0x1 << 14)
+#define PORT_P0_EXINTCR1_EXINT7_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P0_EXINTCR1_EXINT7_GET(val) ((((val) & PORT_P0_EXINTCR1_EXINT7) >> 14) & 0x1)
+#define PORT_P0_EXINTCR1_EXINT7_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR1_EXINT7) | (((val) & 0x1) << 14))
+/* Type of Level or Edge Detection of EXINT6 (13) */
+#define PORT_P0_EXINTCR1_EXINT6 (0x1 << 13)
+#define PORT_P0_EXINTCR1_EXINT6_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P0_EXINTCR1_EXINT6_GET(val) ((((val) & PORT_P0_EXINTCR1_EXINT6) >> 13) & 0x1)
+#define PORT_P0_EXINTCR1_EXINT6_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR1_EXINT6) | (((val) & 0x1) << 13))
+/* Type of Level or Edge Detection of EXINT5 (12) */
+#define PORT_P0_EXINTCR1_EXINT5 (0x1 << 12)
+#define PORT_P0_EXINTCR1_EXINT5_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P0_EXINTCR1_EXINT5_GET(val) ((((val) & PORT_P0_EXINTCR1_EXINT5) >> 12) & 0x1)
+#define PORT_P0_EXINTCR1_EXINT5_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR1_EXINT5) | (((val) & 0x1) << 12))
+/* Type of Level or Edge Detection of EXINT4 (11) */
+#define PORT_P0_EXINTCR1_EXINT4 (0x1 << 11)
+#define PORT_P0_EXINTCR1_EXINT4_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P0_EXINTCR1_EXINT4_GET(val) ((((val) & PORT_P0_EXINTCR1_EXINT4) >> 11) & 0x1)
+#define PORT_P0_EXINTCR1_EXINT4_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR1_EXINT4) | (((val) & 0x1) << 11))
+/* Type of Level or Edge Detection of EXINT3 (10) */
+#define PORT_P0_EXINTCR1_EXINT3 (0x1 << 10)
+#define PORT_P0_EXINTCR1_EXINT3_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P0_EXINTCR1_EXINT3_GET(val) ((((val) & PORT_P0_EXINTCR1_EXINT3) >> 10) & 0x1)
+#define PORT_P0_EXINTCR1_EXINT3_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR1_EXINT3) | (((val) & 0x1) << 10))
+/* Type of Level or Edge Detection of EXINT2 (9) */
+#define PORT_P0_EXINTCR1_EXINT2 (0x1 << 9)
+#define PORT_P0_EXINTCR1_EXINT2_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P0_EXINTCR1_EXINT2_GET(val) ((((val) & PORT_P0_EXINTCR1_EXINT2) >> 9) & 0x1)
+#define PORT_P0_EXINTCR1_EXINT2_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR1_EXINT2) | (((val) & 0x1) << 9))
+/* Type of Level or Edge Detection of EXINT1 (8) */
+#define PORT_P0_EXINTCR1_EXINT1 (0x1 << 8)
+#define PORT_P0_EXINTCR1_EXINT1_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P0_EXINTCR1_EXINT1_GET(val) ((((val) & PORT_P0_EXINTCR1_EXINT1) >> 8) & 0x1)
+#define PORT_P0_EXINTCR1_EXINT1_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR1_EXINT1) | (((val) & 0x1) << 8))
+/* Type of Level or Edge Detection of EXINT0 (7) */
+#define PORT_P0_EXINTCR1_EXINT0 (0x1 << 7)
+#define PORT_P0_EXINTCR1_EXINT0_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P0_EXINTCR1_EXINT0_GET(val) ((((val) & PORT_P0_EXINTCR1_EXINT0) >> 7) & 0x1)
+#define PORT_P0_EXINTCR1_EXINT0_SET(reg,val) (reg) = ((reg & ~PORT_P0_EXINTCR1_EXINT0) | (((val) & 0x1) << 7))
+
+/*******************************************************************************
+ * P0_IRNEN Register
+ ******************************************************************************/
+
+/* EXINT16 Interrupt Request Enable (19) */
+#define PORT_P0_IRNEN_EXINT16 (0x1 << 19)
+#define PORT_P0_IRNEN_EXINT16_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P0_IRNEN_EXINT16_GET(val) ((((val) & PORT_P0_IRNEN_EXINT16) >> 19) & 0x1)
+#define PORT_P0_IRNEN_EXINT16_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNEN_EXINT16) | (((val) & 0x1) << 19))
+/* EXINT10 Interrupt Request Enable (17) */
+#define PORT_P0_IRNEN_EXINT10 (0x1 << 17)
+#define PORT_P0_IRNEN_EXINT10_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P0_IRNEN_EXINT10_GET(val) ((((val) & PORT_P0_IRNEN_EXINT10) >> 17) & 0x1)
+#define PORT_P0_IRNEN_EXINT10_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNEN_EXINT10) | (((val) & 0x1) << 17))
+/* EXINT9 Interrupt Request Enable (16) */
+#define PORT_P0_IRNEN_EXINT9 (0x1 << 16)
+#define PORT_P0_IRNEN_EXINT9_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P0_IRNEN_EXINT9_GET(val) ((((val) & PORT_P0_IRNEN_EXINT9) >> 16) & 0x1)
+#define PORT_P0_IRNEN_EXINT9_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNEN_EXINT9) | (((val) & 0x1) << 16))
+/* EXINT8 Interrupt Request Enable (15) */
+#define PORT_P0_IRNEN_EXINT8 (0x1 << 15)
+#define PORT_P0_IRNEN_EXINT8_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P0_IRNEN_EXINT8_GET(val) ((((val) & PORT_P0_IRNEN_EXINT8) >> 15) & 0x1)
+#define PORT_P0_IRNEN_EXINT8_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNEN_EXINT8) | (((val) & 0x1) << 15))
+/* EXINT7 Interrupt Request Enable (14) */
+#define PORT_P0_IRNEN_EXINT7 (0x1 << 14)
+#define PORT_P0_IRNEN_EXINT7_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P0_IRNEN_EXINT7_GET(val) ((((val) & PORT_P0_IRNEN_EXINT7) >> 14) & 0x1)
+#define PORT_P0_IRNEN_EXINT7_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNEN_EXINT7) | (((val) & 0x1) << 14))
+/* EXINT6 Interrupt Request Enable (13) */
+#define PORT_P0_IRNEN_EXINT6 (0x1 << 13)
+#define PORT_P0_IRNEN_EXINT6_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P0_IRNEN_EXINT6_GET(val) ((((val) & PORT_P0_IRNEN_EXINT6) >> 13) & 0x1)
+#define PORT_P0_IRNEN_EXINT6_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNEN_EXINT6) | (((val) & 0x1) << 13))
+/* EXINT5 Interrupt Request Enable (12) */
+#define PORT_P0_IRNEN_EXINT5 (0x1 << 12)
+#define PORT_P0_IRNEN_EXINT5_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P0_IRNEN_EXINT5_GET(val) ((((val) & PORT_P0_IRNEN_EXINT5) >> 12) & 0x1)
+#define PORT_P0_IRNEN_EXINT5_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNEN_EXINT5) | (((val) & 0x1) << 12))
+/* EXINT4 Interrupt Request Enable (11) */
+#define PORT_P0_IRNEN_EXINT4 (0x1 << 11)
+#define PORT_P0_IRNEN_EXINT4_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P0_IRNEN_EXINT4_GET(val) ((((val) & PORT_P0_IRNEN_EXINT4) >> 11) & 0x1)
+#define PORT_P0_IRNEN_EXINT4_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNEN_EXINT4) | (((val) & 0x1) << 11))
+/* EXINT3 Interrupt Request Enable (10) */
+#define PORT_P0_IRNEN_EXINT3 (0x1 << 10)
+#define PORT_P0_IRNEN_EXINT3_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P0_IRNEN_EXINT3_GET(val) ((((val) & PORT_P0_IRNEN_EXINT3) >> 10) & 0x1)
+#define PORT_P0_IRNEN_EXINT3_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNEN_EXINT3) | (((val) & 0x1) << 10))
+/* EXINT2 Interrupt Request Enable (9) */
+#define PORT_P0_IRNEN_EXINT2 (0x1 << 9)
+#define PORT_P0_IRNEN_EXINT2_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P0_IRNEN_EXINT2_GET(val) ((((val) & PORT_P0_IRNEN_EXINT2) >> 9) & 0x1)
+#define PORT_P0_IRNEN_EXINT2_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNEN_EXINT2) | (((val) & 0x1) << 9))
+/* EXINT1 Interrupt Request Enable (8) */
+#define PORT_P0_IRNEN_EXINT1 (0x1 << 8)
+#define PORT_P0_IRNEN_EXINT1_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P0_IRNEN_EXINT1_GET(val) ((((val) & PORT_P0_IRNEN_EXINT1) >> 8) & 0x1)
+#define PORT_P0_IRNEN_EXINT1_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNEN_EXINT1) | (((val) & 0x1) << 8))
+/* EXINT0 Interrupt Request Enable (7) */
+#define PORT_P0_IRNEN_EXINT0 (0x1 << 7)
+#define PORT_P0_IRNEN_EXINT0_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P0_IRNEN_EXINT0_GET(val) ((((val) & PORT_P0_IRNEN_EXINT0) >> 7) & 0x1)
+#define PORT_P0_IRNEN_EXINT0_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNEN_EXINT0) | (((val) & 0x1) << 7))
+
+/*******************************************************************************
+ * P0_IRNICR Register
+ ******************************************************************************/
+
+/* EXINT16 Interrupt Request (19) */
+#define PORT_P0_IRNICR_EXINT16 (0x1 << 19)
+#define PORT_P0_IRNICR_EXINT16_GET(val) ((((val) & PORT_P0_IRNICR_EXINT16) >> 19) & 0x1)
+/* EXINT10 Interrupt Request (17) */
+#define PORT_P0_IRNICR_EXINT10 (0x1 << 17)
+#define PORT_P0_IRNICR_EXINT10_GET(val) ((((val) & PORT_P0_IRNICR_EXINT10) >> 17) & 0x1)
+/* EXINT9 Interrupt Request (16) */
+#define PORT_P0_IRNICR_EXINT9 (0x1 << 16)
+#define PORT_P0_IRNICR_EXINT9_GET(val) ((((val) & PORT_P0_IRNICR_EXINT9) >> 16) & 0x1)
+/* EXINT8 Interrupt Request (15) */
+#define PORT_P0_IRNICR_EXINT8 (0x1 << 15)
+#define PORT_P0_IRNICR_EXINT8_GET(val) ((((val) & PORT_P0_IRNICR_EXINT8) >> 15) & 0x1)
+/* EXINT7 Interrupt Request (14) */
+#define PORT_P0_IRNICR_EXINT7 (0x1 << 14)
+#define PORT_P0_IRNICR_EXINT7_GET(val) ((((val) & PORT_P0_IRNICR_EXINT7) >> 14) & 0x1)
+/* EXINT6 Interrupt Request (13) */
+#define PORT_P0_IRNICR_EXINT6 (0x1 << 13)
+#define PORT_P0_IRNICR_EXINT6_GET(val) ((((val) & PORT_P0_IRNICR_EXINT6) >> 13) & 0x1)
+/* EXINT5 Interrupt Request (12) */
+#define PORT_P0_IRNICR_EXINT5 (0x1 << 12)
+#define PORT_P0_IRNICR_EXINT5_GET(val) ((((val) & PORT_P0_IRNICR_EXINT5) >> 12) & 0x1)
+/* EXINT4 Interrupt Request (11) */
+#define PORT_P0_IRNICR_EXINT4 (0x1 << 11)
+#define PORT_P0_IRNICR_EXINT4_GET(val) ((((val) & PORT_P0_IRNICR_EXINT4) >> 11) & 0x1)
+/* EXINT3 Interrupt Request (10) */
+#define PORT_P0_IRNICR_EXINT3 (0x1 << 10)
+#define PORT_P0_IRNICR_EXINT3_GET(val) ((((val) & PORT_P0_IRNICR_EXINT3) >> 10) & 0x1)
+/* EXINT2 Interrupt Request (9) */
+#define PORT_P0_IRNICR_EXINT2 (0x1 << 9)
+#define PORT_P0_IRNICR_EXINT2_GET(val) ((((val) & PORT_P0_IRNICR_EXINT2) >> 9) & 0x1)
+/* EXINT1 Interrupt Request (8) */
+#define PORT_P0_IRNICR_EXINT1 (0x1 << 8)
+#define PORT_P0_IRNICR_EXINT1_GET(val) ((((val) & PORT_P0_IRNICR_EXINT1) >> 8) & 0x1)
+/* EXINT0 Interrupt Request (7) */
+#define PORT_P0_IRNICR_EXINT0 (0x1 << 7)
+#define PORT_P0_IRNICR_EXINT0_GET(val) ((((val) & PORT_P0_IRNICR_EXINT0) >> 7) & 0x1)
+
+/*******************************************************************************
+ * P0_IRNCR Register
+ ******************************************************************************/
+
+/* EXINT16 Interrupt Request (19) */
+#define PORT_P0_IRNCR_EXINT16 (0x1 << 19)
+#define PORT_P0_IRNCR_EXINT16_GET(val) ((((val) & PORT_P0_IRNCR_EXINT16) >> 19) & 0x1)
+/* EXINT10 Interrupt Request (17) */
+#define PORT_P0_IRNCR_EXINT10 (0x1 << 17)
+#define PORT_P0_IRNCR_EXINT10_GET(val) ((((val) & PORT_P0_IRNCR_EXINT10) >> 17) & 0x1)
+/* EXINT9 Interrupt Request (16) */
+#define PORT_P0_IRNCR_EXINT9 (0x1 << 16)
+#define PORT_P0_IRNCR_EXINT9_GET(val) ((((val) & PORT_P0_IRNCR_EXINT9) >> 16) & 0x1)
+/* EXINT8 Interrupt Request (15) */
+#define PORT_P0_IRNCR_EXINT8 (0x1 << 15)
+#define PORT_P0_IRNCR_EXINT8_GET(val) ((((val) & PORT_P0_IRNCR_EXINT8) >> 15) & 0x1)
+/* EXINT7 Interrupt Request (14) */
+#define PORT_P0_IRNCR_EXINT7 (0x1 << 14)
+#define PORT_P0_IRNCR_EXINT7_GET(val) ((((val) & PORT_P0_IRNCR_EXINT7) >> 14) & 0x1)
+/* EXINT6 Interrupt Request (13) */
+#define PORT_P0_IRNCR_EXINT6 (0x1 << 13)
+#define PORT_P0_IRNCR_EXINT6_GET(val) ((((val) & PORT_P0_IRNCR_EXINT6) >> 13) & 0x1)
+/* EXINT5 Interrupt Request (12) */
+#define PORT_P0_IRNCR_EXINT5 (0x1 << 12)
+#define PORT_P0_IRNCR_EXINT5_GET(val) ((((val) & PORT_P0_IRNCR_EXINT5) >> 12) & 0x1)
+/* EXINT4 Interrupt Request (11) */
+#define PORT_P0_IRNCR_EXINT4 (0x1 << 11)
+#define PORT_P0_IRNCR_EXINT4_GET(val) ((((val) & PORT_P0_IRNCR_EXINT4) >> 11) & 0x1)
+/* EXINT3 Interrupt Request (10) */
+#define PORT_P0_IRNCR_EXINT3 (0x1 << 10)
+#define PORT_P0_IRNCR_EXINT3_GET(val) ((((val) & PORT_P0_IRNCR_EXINT3) >> 10) & 0x1)
+/* EXINT2 Interrupt Request (9) */
+#define PORT_P0_IRNCR_EXINT2 (0x1 << 9)
+#define PORT_P0_IRNCR_EXINT2_GET(val) ((((val) & PORT_P0_IRNCR_EXINT2) >> 9) & 0x1)
+/* EXINT1 Interrupt Request (8) */
+#define PORT_P0_IRNCR_EXINT1 (0x1 << 8)
+#define PORT_P0_IRNCR_EXINT1_GET(val) ((((val) & PORT_P0_IRNCR_EXINT1) >> 8) & 0x1)
+/* EXINT0 Interrupt Request (7) */
+#define PORT_P0_IRNCR_EXINT0 (0x1 << 7)
+#define PORT_P0_IRNCR_EXINT0_GET(val) ((((val) & PORT_P0_IRNCR_EXINT0) >> 7) & 0x1)
+
+/*******************************************************************************
+ * P0 External Event Detection Configuration Register
+ ******************************************************************************/
+
+/* EXINT16 configured for Edge or Level Detection (19) */
+#define PORT_P0_IRNCFG_EXINT16 (0x1 << 19)
+#define PORT_P0_IRNCFG_EXINT16_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P0_IRNCFG_EXINT16_GET(val) ((((val) & PORT_P0_IRNCFG_EXINT16) >> 19) & 0x1)
+#define PORT_P0_IRNCFG_EXINT16_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNCFG_EXINT16) | (((val) & 0x1) << 19))
+/* EXINT10 configured for Edge or Level Detection (17) */
+#define PORT_P0_IRNCFG_EXINT10 (0x1 << 17)
+#define PORT_P0_IRNCFG_EXINT10_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P0_IRNCFG_EXINT10_GET(val) ((((val) & PORT_P0_IRNCFG_EXINT10) >> 17) & 0x1)
+#define PORT_P0_IRNCFG_EXINT10_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNCFG_EXINT10) | (((val) & 0x1) << 17))
+/* EXINT9 configured for Edge or Level Detection (16) */
+#define PORT_P0_IRNCFG_EXINT9 (0x1 << 16)
+#define PORT_P0_IRNCFG_EXINT9_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P0_IRNCFG_EXINT9_GET(val) ((((val) & PORT_P0_IRNCFG_EXINT9) >> 16) & 0x1)
+#define PORT_P0_IRNCFG_EXINT9_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNCFG_EXINT9) | (((val) & 0x1) << 16))
+/* EXINT8 configured for Edge or Level Detection (15) */
+#define PORT_P0_IRNCFG_EXINT8 (0x1 << 15)
+#define PORT_P0_IRNCFG_EXINT8_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P0_IRNCFG_EXINT8_GET(val) ((((val) & PORT_P0_IRNCFG_EXINT8) >> 15) & 0x1)
+#define PORT_P0_IRNCFG_EXINT8_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNCFG_EXINT8) | (((val) & 0x1) << 15))
+/* EXINT7 configured for Edge or Level Detection (14) */
+#define PORT_P0_IRNCFG_EXINT7 (0x1 << 14)
+#define PORT_P0_IRNCFG_EXINT7_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P0_IRNCFG_EXINT7_GET(val) ((((val) & PORT_P0_IRNCFG_EXINT7) >> 14) & 0x1)
+#define PORT_P0_IRNCFG_EXINT7_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNCFG_EXINT7) | (((val) & 0x1) << 14))
+/* EXINT6 configured for Edge or Level Detection (13) */
+#define PORT_P0_IRNCFG_EXINT6 (0x1 << 13)
+#define PORT_P0_IRNCFG_EXINT6_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P0_IRNCFG_EXINT6_GET(val) ((((val) & PORT_P0_IRNCFG_EXINT6) >> 13) & 0x1)
+#define PORT_P0_IRNCFG_EXINT6_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNCFG_EXINT6) | (((val) & 0x1) << 13))
+/* EXINT5 configured for Edge or Level Detection (12) */
+#define PORT_P0_IRNCFG_EXINT5 (0x1 << 12)
+#define PORT_P0_IRNCFG_EXINT5_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P0_IRNCFG_EXINT5_GET(val) ((((val) & PORT_P0_IRNCFG_EXINT5) >> 12) & 0x1)
+#define PORT_P0_IRNCFG_EXINT5_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNCFG_EXINT5) | (((val) & 0x1) << 12))
+/* EXINT4 configured for Edge or Level Detection (11) */
+#define PORT_P0_IRNCFG_EXINT4 (0x1 << 11)
+#define PORT_P0_IRNCFG_EXINT4_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P0_IRNCFG_EXINT4_GET(val) ((((val) & PORT_P0_IRNCFG_EXINT4) >> 11) & 0x1)
+#define PORT_P0_IRNCFG_EXINT4_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNCFG_EXINT4) | (((val) & 0x1) << 11))
+/* EXINT3 configured for Edge or Level Detection (10) */
+#define PORT_P0_IRNCFG_EXINT3 (0x1 << 10)
+#define PORT_P0_IRNCFG_EXINT3_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P0_IRNCFG_EXINT3_GET(val) ((((val) & PORT_P0_IRNCFG_EXINT3) >> 10) & 0x1)
+#define PORT_P0_IRNCFG_EXINT3_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNCFG_EXINT3) | (((val) & 0x1) << 10))
+/* EXINT2 configured for Edge or Level Detection (9) */
+#define PORT_P0_IRNCFG_EXINT2 (0x1 << 9)
+#define PORT_P0_IRNCFG_EXINT2_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P0_IRNCFG_EXINT2_GET(val) ((((val) & PORT_P0_IRNCFG_EXINT2) >> 9) & 0x1)
+#define PORT_P0_IRNCFG_EXINT2_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNCFG_EXINT2) | (((val) & 0x1) << 9))
+/* EXINT1 configured for Edge or Level Detection (8) */
+#define PORT_P0_IRNCFG_EXINT1 (0x1 << 8)
+#define PORT_P0_IRNCFG_EXINT1_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P0_IRNCFG_EXINT1_GET(val) ((((val) & PORT_P0_IRNCFG_EXINT1) >> 8) & 0x1)
+#define PORT_P0_IRNCFG_EXINT1_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNCFG_EXINT1) | (((val) & 0x1) << 8))
+/* EXINT0 configured for Edge or Level Detection (7) */
+#define PORT_P0_IRNCFG_EXINT0 (0x1 << 7)
+#define PORT_P0_IRNCFG_EXINT0_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P0_IRNCFG_EXINT0_GET(val) ((((val) & PORT_P0_IRNCFG_EXINT0) >> 7) & 0x1)
+#define PORT_P0_IRNCFG_EXINT0_SET(reg,val) (reg) = ((reg & ~PORT_P0_IRNCFG_EXINT0) | (((val) & 0x1) << 7))
+
+/*******************************************************************************
+ * P0_IRNENSET Register
+ ******************************************************************************/
+
+/* Set Interrupt Node Enable Flag EXINT16 (19) */
+#define PORT_P0_IRNENSET_EXINT16 (0x1 << 19)
+#define PORT_P0_IRNENSET_EXINT16_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P0_IRNENSET_EXINT16_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENSET_EXINT16) | (val) & 1) << 19)
+/* Set Interrupt Node Enable Flag EXINT10 (17) */
+#define PORT_P0_IRNENSET_EXINT10 (0x1 << 17)
+#define PORT_P0_IRNENSET_EXINT10_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P0_IRNENSET_EXINT10_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENSET_EXINT10) | (val) & 1) << 17)
+/* Set Interrupt Node Enable Flag EXINT9 (16) */
+#define PORT_P0_IRNENSET_EXINT9 (0x1 << 16)
+#define PORT_P0_IRNENSET_EXINT9_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P0_IRNENSET_EXINT9_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENSET_EXINT9) | (val) & 1) << 16)
+/* Set Interrupt Node Enable Flag EXINT8 (15) */
+#define PORT_P0_IRNENSET_EXINT8 (0x1 << 15)
+#define PORT_P0_IRNENSET_EXINT8_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P0_IRNENSET_EXINT8_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENSET_EXINT8) | (val) & 1) << 15)
+/* Set Interrupt Node Enable Flag EXINT7 (14) */
+#define PORT_P0_IRNENSET_EXINT7 (0x1 << 14)
+#define PORT_P0_IRNENSET_EXINT7_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P0_IRNENSET_EXINT7_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENSET_EXINT7) | (val) & 1) << 14)
+/* Set Interrupt Node Enable Flag EXINT6 (13) */
+#define PORT_P0_IRNENSET_EXINT6 (0x1 << 13)
+#define PORT_P0_IRNENSET_EXINT6_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P0_IRNENSET_EXINT6_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENSET_EXINT6) | (val) & 1) << 13)
+/* Set Interrupt Node Enable Flag EXINT5 (12) */
+#define PORT_P0_IRNENSET_EXINT5 (0x1 << 12)
+#define PORT_P0_IRNENSET_EXINT5_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P0_IRNENSET_EXINT5_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENSET_EXINT5) | (val) & 1) << 12)
+/* Set Interrupt Node Enable Flag EXINT4 (11) */
+#define PORT_P0_IRNENSET_EXINT4 (0x1 << 11)
+#define PORT_P0_IRNENSET_EXINT4_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P0_IRNENSET_EXINT4_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENSET_EXINT4) | (val) & 1) << 11)
+/* Set Interrupt Node Enable Flag EXINT3 (10) */
+#define PORT_P0_IRNENSET_EXINT3 (0x1 << 10)
+#define PORT_P0_IRNENSET_EXINT3_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P0_IRNENSET_EXINT3_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENSET_EXINT3) | (val) & 1) << 10)
+/* Set Interrupt Node Enable Flag EXINT2 (9) */
+#define PORT_P0_IRNENSET_EXINT2 (0x1 << 9)
+#define PORT_P0_IRNENSET_EXINT2_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P0_IRNENSET_EXINT2_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENSET_EXINT2) | (val) & 1) << 9)
+/* Set Interrupt Node Enable Flag EXINT1 (8) */
+#define PORT_P0_IRNENSET_EXINT1 (0x1 << 8)
+#define PORT_P0_IRNENSET_EXINT1_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P0_IRNENSET_EXINT1_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENSET_EXINT1) | (val) & 1) << 8)
+/* Set Interrupt Node Enable Flag EXINT0 (7) */
+#define PORT_P0_IRNENSET_EXINT0 (0x1 << 7)
+#define PORT_P0_IRNENSET_EXINT0_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P0_IRNENSET_EXINT0_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENSET_EXINT0) | (val) & 1) << 7)
+
+/*******************************************************************************
+ * P0_IRNENCLR Register
+ ******************************************************************************/
+
+/* Clear Interrupt Node Enable Flag EXINT16 (19) */
+#define PORT_P0_IRNENCLR_EXINT16 (0x1 << 19)
+#define PORT_P0_IRNENCLR_EXINT16_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P0_IRNENCLR_EXINT16_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENCLR_EXINT16) | (val) & 1) << 19)
+/* Clear Interrupt Node Enable Flag EXINT10 (17) */
+#define PORT_P0_IRNENCLR_EXINT10 (0x1 << 17)
+#define PORT_P0_IRNENCLR_EXINT10_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P0_IRNENCLR_EXINT10_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENCLR_EXINT10) | (val) & 1) << 17)
+/* Clear Interrupt Node Enable Flag EXINT9 (16) */
+#define PORT_P0_IRNENCLR_EXINT9 (0x1 << 16)
+#define PORT_P0_IRNENCLR_EXINT9_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P0_IRNENCLR_EXINT9_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENCLR_EXINT9) | (val) & 1) << 16)
+/* Clear Interrupt Node Enable Flag EXINT8 (15) */
+#define PORT_P0_IRNENCLR_EXINT8 (0x1 << 15)
+#define PORT_P0_IRNENCLR_EXINT8_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P0_IRNENCLR_EXINT8_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENCLR_EXINT8) | (val) & 1) << 15)
+/* Clear Interrupt Node Enable Flag EXINT7 (14) */
+#define PORT_P0_IRNENCLR_EXINT7 (0x1 << 14)
+#define PORT_P0_IRNENCLR_EXINT7_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P0_IRNENCLR_EXINT7_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENCLR_EXINT7) | (val) & 1) << 14)
+/* Clear Interrupt Node Enable Flag EXINT6 (13) */
+#define PORT_P0_IRNENCLR_EXINT6 (0x1 << 13)
+#define PORT_P0_IRNENCLR_EXINT6_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P0_IRNENCLR_EXINT6_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENCLR_EXINT6) | (val) & 1) << 13)
+/* Clear Interrupt Node Enable Flag EXINT5 (12) */
+#define PORT_P0_IRNENCLR_EXINT5 (0x1 << 12)
+#define PORT_P0_IRNENCLR_EXINT5_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P0_IRNENCLR_EXINT5_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENCLR_EXINT5) | (val) & 1) << 12)
+/* Clear Interrupt Node Enable Flag EXINT4 (11) */
+#define PORT_P0_IRNENCLR_EXINT4 (0x1 << 11)
+#define PORT_P0_IRNENCLR_EXINT4_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P0_IRNENCLR_EXINT4_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENCLR_EXINT4) | (val) & 1) << 11)
+/* Clear Interrupt Node Enable Flag EXINT3 (10) */
+#define PORT_P0_IRNENCLR_EXINT3 (0x1 << 10)
+#define PORT_P0_IRNENCLR_EXINT3_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P0_IRNENCLR_EXINT3_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENCLR_EXINT3) | (val) & 1) << 10)
+/* Clear Interrupt Node Enable Flag EXINT2 (9) */
+#define PORT_P0_IRNENCLR_EXINT2 (0x1 << 9)
+#define PORT_P0_IRNENCLR_EXINT2_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P0_IRNENCLR_EXINT2_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENCLR_EXINT2) | (val) & 1) << 9)
+/* Clear Interrupt Node Enable Flag EXINT1 (8) */
+#define PORT_P0_IRNENCLR_EXINT1 (0x1 << 8)
+#define PORT_P0_IRNENCLR_EXINT1_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P0_IRNENCLR_EXINT1_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENCLR_EXINT1) | (val) & 1) << 8)
+/* Clear Interrupt Node Enable Flag EXINT0 (7) */
+#define PORT_P0_IRNENCLR_EXINT0 (0x1 << 7)
+#define PORT_P0_IRNENCLR_EXINT0_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P0_IRNENCLR_EXINT0_SET(reg,val) (reg) = (((reg & ~PORT_P0_IRNENCLR_EXINT0) | (val) & 1) << 7)
+
+/*******************************************************************************
+ * Port 1 Data Output Register
+ ******************************************************************************/
+
+/* Port 1 Pin # Output Value (19) */
+#define PORT_P1_OUT_P19 (0x1 << 19)
+#define PORT_P1_OUT_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P1_OUT_P19_GET(val) ((((val) & PORT_P1_OUT_P19) >> 19) & 0x1)
+#define PORT_P1_OUT_P19_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P19) | (((val) & 0x1) << 19))
+/* Port 1 Pin # Output Value (18) */
+#define PORT_P1_OUT_P18 (0x1 << 18)
+#define PORT_P1_OUT_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P1_OUT_P18_GET(val) ((((val) & PORT_P1_OUT_P18) >> 18) & 0x1)
+#define PORT_P1_OUT_P18_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P18) | (((val) & 0x1) << 18))
+/* Port 1 Pin # Output Value (17) */
+#define PORT_P1_OUT_P17 (0x1 << 17)
+#define PORT_P1_OUT_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P1_OUT_P17_GET(val) ((((val) & PORT_P1_OUT_P17) >> 17) & 0x1)
+#define PORT_P1_OUT_P17_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P17) | (((val) & 0x1) << 17))
+/* Port 1 Pin # Output Value (16) */
+#define PORT_P1_OUT_P16 (0x1 << 16)
+#define PORT_P1_OUT_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P1_OUT_P16_GET(val) ((((val) & PORT_P1_OUT_P16) >> 16) & 0x1)
+#define PORT_P1_OUT_P16_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P16) | (((val) & 0x1) << 16))
+/* Port 1 Pin # Output Value (15) */
+#define PORT_P1_OUT_P15 (0x1 << 15)
+#define PORT_P1_OUT_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P1_OUT_P15_GET(val) ((((val) & PORT_P1_OUT_P15) >> 15) & 0x1)
+#define PORT_P1_OUT_P15_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P15) | (((val) & 0x1) << 15))
+/* Port 1 Pin # Output Value (14) */
+#define PORT_P1_OUT_P14 (0x1 << 14)
+#define PORT_P1_OUT_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P1_OUT_P14_GET(val) ((((val) & PORT_P1_OUT_P14) >> 14) & 0x1)
+#define PORT_P1_OUT_P14_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P14) | (((val) & 0x1) << 14))
+/* Port 1 Pin # Output Value (13) */
+#define PORT_P1_OUT_P13 (0x1 << 13)
+#define PORT_P1_OUT_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P1_OUT_P13_GET(val) ((((val) & PORT_P1_OUT_P13) >> 13) & 0x1)
+#define PORT_P1_OUT_P13_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P13) | (((val) & 0x1) << 13))
+/* Port 1 Pin # Output Value (12) */
+#define PORT_P1_OUT_P12 (0x1 << 12)
+#define PORT_P1_OUT_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P1_OUT_P12_GET(val) ((((val) & PORT_P1_OUT_P12) >> 12) & 0x1)
+#define PORT_P1_OUT_P12_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P12) | (((val) & 0x1) << 12))
+/* Port 1 Pin # Output Value (11) */
+#define PORT_P1_OUT_P11 (0x1 << 11)
+#define PORT_P1_OUT_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P1_OUT_P11_GET(val) ((((val) & PORT_P1_OUT_P11) >> 11) & 0x1)
+#define PORT_P1_OUT_P11_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P11) | (((val) & 0x1) << 11))
+/* Port 1 Pin # Output Value (10) */
+#define PORT_P1_OUT_P10 (0x1 << 10)
+#define PORT_P1_OUT_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P1_OUT_P10_GET(val) ((((val) & PORT_P1_OUT_P10) >> 10) & 0x1)
+#define PORT_P1_OUT_P10_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P10) | (((val) & 0x1) << 10))
+/* Port 1 Pin # Output Value (9) */
+#define PORT_P1_OUT_P9 (0x1 << 9)
+#define PORT_P1_OUT_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P1_OUT_P9_GET(val) ((((val) & PORT_P1_OUT_P9) >> 9) & 0x1)
+#define PORT_P1_OUT_P9_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P9) | (((val) & 0x1) << 9))
+/* Port 1 Pin # Output Value (8) */
+#define PORT_P1_OUT_P8 (0x1 << 8)
+#define PORT_P1_OUT_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P1_OUT_P8_GET(val) ((((val) & PORT_P1_OUT_P8) >> 8) & 0x1)
+#define PORT_P1_OUT_P8_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P8) | (((val) & 0x1) << 8))
+/* Port 1 Pin # Output Value (7) */
+#define PORT_P1_OUT_P7 (0x1 << 7)
+#define PORT_P1_OUT_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P1_OUT_P7_GET(val) ((((val) & PORT_P1_OUT_P7) >> 7) & 0x1)
+#define PORT_P1_OUT_P7_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P7) | (((val) & 0x1) << 7))
+/* Port 1 Pin # Output Value (6) */
+#define PORT_P1_OUT_P6 (0x1 << 6)
+#define PORT_P1_OUT_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P1_OUT_P6_GET(val) ((((val) & PORT_P1_OUT_P6) >> 6) & 0x1)
+#define PORT_P1_OUT_P6_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P6) | (((val) & 0x1) << 6))
+/* Port 1 Pin # Output Value (5) */
+#define PORT_P1_OUT_P5 (0x1 << 5)
+#define PORT_P1_OUT_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P1_OUT_P5_GET(val) ((((val) & PORT_P1_OUT_P5) >> 5) & 0x1)
+#define PORT_P1_OUT_P5_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P5) | (((val) & 0x1) << 5))
+/* Port 1 Pin # Output Value (4) */
+#define PORT_P1_OUT_P4 (0x1 << 4)
+#define PORT_P1_OUT_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P1_OUT_P4_GET(val) ((((val) & PORT_P1_OUT_P4) >> 4) & 0x1)
+#define PORT_P1_OUT_P4_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P4) | (((val) & 0x1) << 4))
+/* Port 1 Pin # Output Value (3) */
+#define PORT_P1_OUT_P3 (0x1 << 3)
+#define PORT_P1_OUT_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P1_OUT_P3_GET(val) ((((val) & PORT_P1_OUT_P3) >> 3) & 0x1)
+#define PORT_P1_OUT_P3_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P3) | (((val) & 0x1) << 3))
+/* Port 1 Pin # Output Value (2) */
+#define PORT_P1_OUT_P2 (0x1 << 2)
+#define PORT_P1_OUT_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P1_OUT_P2_GET(val) ((((val) & PORT_P1_OUT_P2) >> 2) & 0x1)
+#define PORT_P1_OUT_P2_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P2) | (((val) & 0x1) << 2))
+/* Port 1 Pin # Output Value (1) */
+#define PORT_P1_OUT_P1 (0x1 << 1)
+#define PORT_P1_OUT_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P1_OUT_P1_GET(val) ((((val) & PORT_P1_OUT_P1) >> 1) & 0x1)
+#define PORT_P1_OUT_P1_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P1) | (((val) & 0x1) << 1))
+/* Port 1 Pin # Output Value (0) */
+#define PORT_P1_OUT_P0 (0x1)
+#define PORT_P1_OUT_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P1_OUT_P0_GET(val) ((((val) & PORT_P1_OUT_P0) >> 0) & 0x1)
+#define PORT_P1_OUT_P0_SET(reg,val) (reg) = ((reg & ~PORT_P1_OUT_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 1 Data Input Register
+ ******************************************************************************/
+
+/* Port 1 Pin # Latched Input Value (19) */
+#define PORT_P1_IN_P19 (0x1 << 19)
+#define PORT_P1_IN_P19_GET(val) ((((val) & PORT_P1_IN_P19) >> 19) & 0x1)
+/* Port 1 Pin # Latched Input Value (18) */
+#define PORT_P1_IN_P18 (0x1 << 18)
+#define PORT_P1_IN_P18_GET(val) ((((val) & PORT_P1_IN_P18) >> 18) & 0x1)
+/* Port 1 Pin # Latched Input Value (17) */
+#define PORT_P1_IN_P17 (0x1 << 17)
+#define PORT_P1_IN_P17_GET(val) ((((val) & PORT_P1_IN_P17) >> 17) & 0x1)
+/* Port 1 Pin # Latched Input Value (16) */
+#define PORT_P1_IN_P16 (0x1 << 16)
+#define PORT_P1_IN_P16_GET(val) ((((val) & PORT_P1_IN_P16) >> 16) & 0x1)
+/* Port 1 Pin # Latched Input Value (15) */
+#define PORT_P1_IN_P15 (0x1 << 15)
+#define PORT_P1_IN_P15_GET(val) ((((val) & PORT_P1_IN_P15) >> 15) & 0x1)
+/* Port 1 Pin # Latched Input Value (14) */
+#define PORT_P1_IN_P14 (0x1 << 14)
+#define PORT_P1_IN_P14_GET(val) ((((val) & PORT_P1_IN_P14) >> 14) & 0x1)
+/* Port 1 Pin # Latched Input Value (13) */
+#define PORT_P1_IN_P13 (0x1 << 13)
+#define PORT_P1_IN_P13_GET(val) ((((val) & PORT_P1_IN_P13) >> 13) & 0x1)
+/* Port 1 Pin # Latched Input Value (12) */
+#define PORT_P1_IN_P12 (0x1 << 12)
+#define PORT_P1_IN_P12_GET(val) ((((val) & PORT_P1_IN_P12) >> 12) & 0x1)
+/* Port 1 Pin # Latched Input Value (11) */
+#define PORT_P1_IN_P11 (0x1 << 11)
+#define PORT_P1_IN_P11_GET(val) ((((val) & PORT_P1_IN_P11) >> 11) & 0x1)
+/* Port 1 Pin # Latched Input Value (10) */
+#define PORT_P1_IN_P10 (0x1 << 10)
+#define PORT_P1_IN_P10_GET(val) ((((val) & PORT_P1_IN_P10) >> 10) & 0x1)
+/* Port 1 Pin # Latched Input Value (9) */
+#define PORT_P1_IN_P9 (0x1 << 9)
+#define PORT_P1_IN_P9_GET(val) ((((val) & PORT_P1_IN_P9) >> 9) & 0x1)
+/* Port 1 Pin # Latched Input Value (8) */
+#define PORT_P1_IN_P8 (0x1 << 8)
+#define PORT_P1_IN_P8_GET(val) ((((val) & PORT_P1_IN_P8) >> 8) & 0x1)
+/* Port 1 Pin # Latched Input Value (7) */
+#define PORT_P1_IN_P7 (0x1 << 7)
+#define PORT_P1_IN_P7_GET(val) ((((val) & PORT_P1_IN_P7) >> 7) & 0x1)
+/* Port 1 Pin # Latched Input Value (6) */
+#define PORT_P1_IN_P6 (0x1 << 6)
+#define PORT_P1_IN_P6_GET(val) ((((val) & PORT_P1_IN_P6) >> 6) & 0x1)
+/* Port 1 Pin # Latched Input Value (5) */
+#define PORT_P1_IN_P5 (0x1 << 5)
+#define PORT_P1_IN_P5_GET(val) ((((val) & PORT_P1_IN_P5) >> 5) & 0x1)
+/* Port 1 Pin # Latched Input Value (4) */
+#define PORT_P1_IN_P4 (0x1 << 4)
+#define PORT_P1_IN_P4_GET(val) ((((val) & PORT_P1_IN_P4) >> 4) & 0x1)
+/* Port 1 Pin # Latched Input Value (3) */
+#define PORT_P1_IN_P3 (0x1 << 3)
+#define PORT_P1_IN_P3_GET(val) ((((val) & PORT_P1_IN_P3) >> 3) & 0x1)
+/* Port 1 Pin # Latched Input Value (2) */
+#define PORT_P1_IN_P2 (0x1 << 2)
+#define PORT_P1_IN_P2_GET(val) ((((val) & PORT_P1_IN_P2) >> 2) & 0x1)
+/* Port 1 Pin # Latched Input Value (1) */
+#define PORT_P1_IN_P1 (0x1 << 1)
+#define PORT_P1_IN_P1_GET(val) ((((val) & PORT_P1_IN_P1) >> 1) & 0x1)
+/* Port 1 Pin # Latched Input Value (0) */
+#define PORT_P1_IN_P0 (0x1)
+#define PORT_P1_IN_P0_GET(val) ((((val) & PORT_P1_IN_P0) >> 0) & 0x1)
+
+/*******************************************************************************
+ * Port 1 Direction Register
+ ******************************************************************************/
+
+/* Port 1 Pin #Direction Control (19) */
+#define PORT_P1_DIR_P19 (0x1 << 19)
+#define PORT_P1_DIR_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P1_DIR_P19_GET(val) ((((val) & PORT_P1_DIR_P19) >> 19) & 0x1)
+#define PORT_P1_DIR_P19_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P19) | (((val) & 0x1) << 19))
+/* Port 1 Pin #Direction Control (18) */
+#define PORT_P1_DIR_P18 (0x1 << 18)
+#define PORT_P1_DIR_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P1_DIR_P18_GET(val) ((((val) & PORT_P1_DIR_P18) >> 18) & 0x1)
+#define PORT_P1_DIR_P18_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P18) | (((val) & 0x1) << 18))
+/* Port 1 Pin #Direction Control (17) */
+#define PORT_P1_DIR_P17 (0x1 << 17)
+#define PORT_P1_DIR_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P1_DIR_P17_GET(val) ((((val) & PORT_P1_DIR_P17) >> 17) & 0x1)
+#define PORT_P1_DIR_P17_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P17) | (((val) & 0x1) << 17))
+/* Port 1 Pin #Direction Control (16) */
+#define PORT_P1_DIR_P16 (0x1 << 16)
+#define PORT_P1_DIR_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P1_DIR_P16_GET(val) ((((val) & PORT_P1_DIR_P16) >> 16) & 0x1)
+#define PORT_P1_DIR_P16_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P16) | (((val) & 0x1) << 16))
+/* Port 1 Pin #Direction Control (15) */
+#define PORT_P1_DIR_P15 (0x1 << 15)
+#define PORT_P1_DIR_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P1_DIR_P15_GET(val) ((((val) & PORT_P1_DIR_P15) >> 15) & 0x1)
+#define PORT_P1_DIR_P15_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P15) | (((val) & 0x1) << 15))
+/* Port 1 Pin #Direction Control (14) */
+#define PORT_P1_DIR_P14 (0x1 << 14)
+#define PORT_P1_DIR_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P1_DIR_P14_GET(val) ((((val) & PORT_P1_DIR_P14) >> 14) & 0x1)
+#define PORT_P1_DIR_P14_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P14) | (((val) & 0x1) << 14))
+/* Port 1 Pin #Direction Control (13) */
+#define PORT_P1_DIR_P13 (0x1 << 13)
+#define PORT_P1_DIR_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P1_DIR_P13_GET(val) ((((val) & PORT_P1_DIR_P13) >> 13) & 0x1)
+#define PORT_P1_DIR_P13_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P13) | (((val) & 0x1) << 13))
+/* Port 1 Pin #Direction Control (12) */
+#define PORT_P1_DIR_P12 (0x1 << 12)
+#define PORT_P1_DIR_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P1_DIR_P12_GET(val) ((((val) & PORT_P1_DIR_P12) >> 12) & 0x1)
+#define PORT_P1_DIR_P12_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P12) | (((val) & 0x1) << 12))
+/* Port 1 Pin #Direction Control (11) */
+#define PORT_P1_DIR_P11 (0x1 << 11)
+#define PORT_P1_DIR_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P1_DIR_P11_GET(val) ((((val) & PORT_P1_DIR_P11) >> 11) & 0x1)
+#define PORT_P1_DIR_P11_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P11) | (((val) & 0x1) << 11))
+/* Port 1 Pin #Direction Control (10) */
+#define PORT_P1_DIR_P10 (0x1 << 10)
+#define PORT_P1_DIR_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P1_DIR_P10_GET(val) ((((val) & PORT_P1_DIR_P10) >> 10) & 0x1)
+#define PORT_P1_DIR_P10_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P10) | (((val) & 0x1) << 10))
+/* Port 1 Pin #Direction Control (9) */
+#define PORT_P1_DIR_P9 (0x1 << 9)
+#define PORT_P1_DIR_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P1_DIR_P9_GET(val) ((((val) & PORT_P1_DIR_P9) >> 9) & 0x1)
+#define PORT_P1_DIR_P9_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P9) | (((val) & 0x1) << 9))
+/* Port 1 Pin #Direction Control (8) */
+#define PORT_P1_DIR_P8 (0x1 << 8)
+#define PORT_P1_DIR_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P1_DIR_P8_GET(val) ((((val) & PORT_P1_DIR_P8) >> 8) & 0x1)
+#define PORT_P1_DIR_P8_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P8) | (((val) & 0x1) << 8))
+/* Port 1 Pin #Direction Control (7) */
+#define PORT_P1_DIR_P7 (0x1 << 7)
+#define PORT_P1_DIR_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P1_DIR_P7_GET(val) ((((val) & PORT_P1_DIR_P7) >> 7) & 0x1)
+#define PORT_P1_DIR_P7_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P7) | (((val) & 0x1) << 7))
+/* Port 1 Pin #Direction Control (6) */
+#define PORT_P1_DIR_P6 (0x1 << 6)
+#define PORT_P1_DIR_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P1_DIR_P6_GET(val) ((((val) & PORT_P1_DIR_P6) >> 6) & 0x1)
+#define PORT_P1_DIR_P6_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P6) | (((val) & 0x1) << 6))
+/* Port 1 Pin #Direction Control (5) */
+#define PORT_P1_DIR_P5 (0x1 << 5)
+#define PORT_P1_DIR_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P1_DIR_P5_GET(val) ((((val) & PORT_P1_DIR_P5) >> 5) & 0x1)
+#define PORT_P1_DIR_P5_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P5) | (((val) & 0x1) << 5))
+/* Port 1 Pin #Direction Control (4) */
+#define PORT_P1_DIR_P4 (0x1 << 4)
+#define PORT_P1_DIR_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P1_DIR_P4_GET(val) ((((val) & PORT_P1_DIR_P4) >> 4) & 0x1)
+#define PORT_P1_DIR_P4_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P4) | (((val) & 0x1) << 4))
+/* Port 1 Pin #Direction Control (3) */
+#define PORT_P1_DIR_P3 (0x1 << 3)
+#define PORT_P1_DIR_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P1_DIR_P3_GET(val) ((((val) & PORT_P1_DIR_P3) >> 3) & 0x1)
+#define PORT_P1_DIR_P3_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P3) | (((val) & 0x1) << 3))
+/* Port 1 Pin #Direction Control (2) */
+#define PORT_P1_DIR_P2 (0x1 << 2)
+#define PORT_P1_DIR_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P1_DIR_P2_GET(val) ((((val) & PORT_P1_DIR_P2) >> 2) & 0x1)
+#define PORT_P1_DIR_P2_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P2) | (((val) & 0x1) << 2))
+/* Port 1 Pin #Direction Control (1) */
+#define PORT_P1_DIR_P1 (0x1 << 1)
+#define PORT_P1_DIR_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P1_DIR_P1_GET(val) ((((val) & PORT_P1_DIR_P1) >> 1) & 0x1)
+#define PORT_P1_DIR_P1_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P1) | (((val) & 0x1) << 1))
+/* Port 1 Pin #Direction Control (0) */
+#define PORT_P1_DIR_P0 (0x1)
+#define PORT_P1_DIR_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P1_DIR_P0_GET(val) ((((val) & PORT_P1_DIR_P0) >> 0) & 0x1)
+#define PORT_P1_DIR_P0_SET(reg,val) (reg) = ((reg & ~PORT_P1_DIR_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 1 Alternate Function Select Register 0
+ ******************************************************************************/
+
+/* Alternate Function at Port 1 Bit # (19) */
+#define PORT_P1_ALTSEL0_P19 (0x1 << 19)
+#define PORT_P1_ALTSEL0_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P1_ALTSEL0_P19_GET(val) ((((val) & PORT_P1_ALTSEL0_P19) >> 19) & 0x1)
+#define PORT_P1_ALTSEL0_P19_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P19) | (((val) & 0x1) << 19))
+/* Alternate Function at Port 1 Bit # (18) */
+#define PORT_P1_ALTSEL0_P18 (0x1 << 18)
+#define PORT_P1_ALTSEL0_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P1_ALTSEL0_P18_GET(val) ((((val) & PORT_P1_ALTSEL0_P18) >> 18) & 0x1)
+#define PORT_P1_ALTSEL0_P18_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P18) | (((val) & 0x1) << 18))
+/* Alternate Function at Port 1 Bit # (17) */
+#define PORT_P1_ALTSEL0_P17 (0x1 << 17)
+#define PORT_P1_ALTSEL0_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P1_ALTSEL0_P17_GET(val) ((((val) & PORT_P1_ALTSEL0_P17) >> 17) & 0x1)
+#define PORT_P1_ALTSEL0_P17_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P17) | (((val) & 0x1) << 17))
+/* Alternate Function at Port 1 Bit # (16) */
+#define PORT_P1_ALTSEL0_P16 (0x1 << 16)
+#define PORT_P1_ALTSEL0_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P1_ALTSEL0_P16_GET(val) ((((val) & PORT_P1_ALTSEL0_P16) >> 16) & 0x1)
+#define PORT_P1_ALTSEL0_P16_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P16) | (((val) & 0x1) << 16))
+/* Alternate Function at Port 1 Bit # (15) */
+#define PORT_P1_ALTSEL0_P15 (0x1 << 15)
+#define PORT_P1_ALTSEL0_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P1_ALTSEL0_P15_GET(val) ((((val) & PORT_P1_ALTSEL0_P15) >> 15) & 0x1)
+#define PORT_P1_ALTSEL0_P15_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P15) | (((val) & 0x1) << 15))
+/* Alternate Function at Port 1 Bit # (14) */
+#define PORT_P1_ALTSEL0_P14 (0x1 << 14)
+#define PORT_P1_ALTSEL0_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P1_ALTSEL0_P14_GET(val) ((((val) & PORT_P1_ALTSEL0_P14) >> 14) & 0x1)
+#define PORT_P1_ALTSEL0_P14_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P14) | (((val) & 0x1) << 14))
+/* Alternate Function at Port 1 Bit # (13) */
+#define PORT_P1_ALTSEL0_P13 (0x1 << 13)
+#define PORT_P1_ALTSEL0_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P1_ALTSEL0_P13_GET(val) ((((val) & PORT_P1_ALTSEL0_P13) >> 13) & 0x1)
+#define PORT_P1_ALTSEL0_P13_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P13) | (((val) & 0x1) << 13))
+/* Alternate Function at Port 1 Bit # (12) */
+#define PORT_P1_ALTSEL0_P12 (0x1 << 12)
+#define PORT_P1_ALTSEL0_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P1_ALTSEL0_P12_GET(val) ((((val) & PORT_P1_ALTSEL0_P12) >> 12) & 0x1)
+#define PORT_P1_ALTSEL0_P12_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P12) | (((val) & 0x1) << 12))
+/* Alternate Function at Port 1 Bit # (11) */
+#define PORT_P1_ALTSEL0_P11 (0x1 << 11)
+#define PORT_P1_ALTSEL0_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P1_ALTSEL0_P11_GET(val) ((((val) & PORT_P1_ALTSEL0_P11) >> 11) & 0x1)
+#define PORT_P1_ALTSEL0_P11_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P11) | (((val) & 0x1) << 11))
+/* Alternate Function at Port 1 Bit # (10) */
+#define PORT_P1_ALTSEL0_P10 (0x1 << 10)
+#define PORT_P1_ALTSEL0_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P1_ALTSEL0_P10_GET(val) ((((val) & PORT_P1_ALTSEL0_P10) >> 10) & 0x1)
+#define PORT_P1_ALTSEL0_P10_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P10) | (((val) & 0x1) << 10))
+/* Alternate Function at Port 1 Bit # (9) */
+#define PORT_P1_ALTSEL0_P9 (0x1 << 9)
+#define PORT_P1_ALTSEL0_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P1_ALTSEL0_P9_GET(val) ((((val) & PORT_P1_ALTSEL0_P9) >> 9) & 0x1)
+#define PORT_P1_ALTSEL0_P9_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P9) | (((val) & 0x1) << 9))
+/* Alternate Function at Port 1 Bit # (8) */
+#define PORT_P1_ALTSEL0_P8 (0x1 << 8)
+#define PORT_P1_ALTSEL0_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P1_ALTSEL0_P8_GET(val) ((((val) & PORT_P1_ALTSEL0_P8) >> 8) & 0x1)
+#define PORT_P1_ALTSEL0_P8_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P8) | (((val) & 0x1) << 8))
+/* Alternate Function at Port 1 Bit # (7) */
+#define PORT_P1_ALTSEL0_P7 (0x1 << 7)
+#define PORT_P1_ALTSEL0_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P1_ALTSEL0_P7_GET(val) ((((val) & PORT_P1_ALTSEL0_P7) >> 7) & 0x1)
+#define PORT_P1_ALTSEL0_P7_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P7) | (((val) & 0x1) << 7))
+/* Alternate Function at Port 1 Bit # (6) */
+#define PORT_P1_ALTSEL0_P6 (0x1 << 6)
+#define PORT_P1_ALTSEL0_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P1_ALTSEL0_P6_GET(val) ((((val) & PORT_P1_ALTSEL0_P6) >> 6) & 0x1)
+#define PORT_P1_ALTSEL0_P6_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P6) | (((val) & 0x1) << 6))
+/* Alternate Function at Port 1 Bit # (5) */
+#define PORT_P1_ALTSEL0_P5 (0x1 << 5)
+#define PORT_P1_ALTSEL0_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P1_ALTSEL0_P5_GET(val) ((((val) & PORT_P1_ALTSEL0_P5) >> 5) & 0x1)
+#define PORT_P1_ALTSEL0_P5_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P5) | (((val) & 0x1) << 5))
+/* Alternate Function at Port 1 Bit # (4) */
+#define PORT_P1_ALTSEL0_P4 (0x1 << 4)
+#define PORT_P1_ALTSEL0_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P1_ALTSEL0_P4_GET(val) ((((val) & PORT_P1_ALTSEL0_P4) >> 4) & 0x1)
+#define PORT_P1_ALTSEL0_P4_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P4) | (((val) & 0x1) << 4))
+/* Alternate Function at Port 1 Bit # (3) */
+#define PORT_P1_ALTSEL0_P3 (0x1 << 3)
+#define PORT_P1_ALTSEL0_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P1_ALTSEL0_P3_GET(val) ((((val) & PORT_P1_ALTSEL0_P3) >> 3) & 0x1)
+#define PORT_P1_ALTSEL0_P3_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P3) | (((val) & 0x1) << 3))
+/* Alternate Function at Port 1 Bit # (2) */
+#define PORT_P1_ALTSEL0_P2 (0x1 << 2)
+#define PORT_P1_ALTSEL0_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P1_ALTSEL0_P2_GET(val) ((((val) & PORT_P1_ALTSEL0_P2) >> 2) & 0x1)
+#define PORT_P1_ALTSEL0_P2_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P2) | (((val) & 0x1) << 2))
+/* Alternate Function at Port 1 Bit # (1) */
+#define PORT_P1_ALTSEL0_P1 (0x1 << 1)
+#define PORT_P1_ALTSEL0_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P1_ALTSEL0_P1_GET(val) ((((val) & PORT_P1_ALTSEL0_P1) >> 1) & 0x1)
+#define PORT_P1_ALTSEL0_P1_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P1) | (((val) & 0x1) << 1))
+/* Alternate Function at Port 1 Bit # (0) */
+#define PORT_P1_ALTSEL0_P0 (0x1)
+#define PORT_P1_ALTSEL0_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P1_ALTSEL0_P0_GET(val) ((((val) & PORT_P1_ALTSEL0_P0) >> 0) & 0x1)
+#define PORT_P1_ALTSEL0_P0_SET(reg,val) (reg) = ((reg & ~PORT_P1_ALTSEL0_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 1 Pull Up Device Enable Register
+ ******************************************************************************/
+
+/* Pull Up Device Enable at Port 1 Bit # (19) */
+#define PORT_P1_PUEN_P19 (0x1 << 19)
+#define PORT_P1_PUEN_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P1_PUEN_P19_GET(val) ((((val) & PORT_P1_PUEN_P19) >> 19) & 0x1)
+#define PORT_P1_PUEN_P19_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P19) | (((val) & 0x1) << 19))
+/* Pull Up Device Enable at Port 1 Bit # (18) */
+#define PORT_P1_PUEN_P18 (0x1 << 18)
+#define PORT_P1_PUEN_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P1_PUEN_P18_GET(val) ((((val) & PORT_P1_PUEN_P18) >> 18) & 0x1)
+#define PORT_P1_PUEN_P18_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P18) | (((val) & 0x1) << 18))
+/* Pull Up Device Enable at Port 1 Bit # (17) */
+#define PORT_P1_PUEN_P17 (0x1 << 17)
+#define PORT_P1_PUEN_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P1_PUEN_P17_GET(val) ((((val) & PORT_P1_PUEN_P17) >> 17) & 0x1)
+#define PORT_P1_PUEN_P17_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P17) | (((val) & 0x1) << 17))
+/* Pull Up Device Enable at Port 1 Bit # (16) */
+#define PORT_P1_PUEN_P16 (0x1 << 16)
+#define PORT_P1_PUEN_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P1_PUEN_P16_GET(val) ((((val) & PORT_P1_PUEN_P16) >> 16) & 0x1)
+#define PORT_P1_PUEN_P16_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P16) | (((val) & 0x1) << 16))
+/* Pull Up Device Enable at Port 1 Bit # (15) */
+#define PORT_P1_PUEN_P15 (0x1 << 15)
+#define PORT_P1_PUEN_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P1_PUEN_P15_GET(val) ((((val) & PORT_P1_PUEN_P15) >> 15) & 0x1)
+#define PORT_P1_PUEN_P15_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P15) | (((val) & 0x1) << 15))
+/* Pull Up Device Enable at Port 1 Bit # (14) */
+#define PORT_P1_PUEN_P14 (0x1 << 14)
+#define PORT_P1_PUEN_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P1_PUEN_P14_GET(val) ((((val) & PORT_P1_PUEN_P14) >> 14) & 0x1)
+#define PORT_P1_PUEN_P14_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P14) | (((val) & 0x1) << 14))
+/* Pull Up Device Enable at Port 1 Bit # (13) */
+#define PORT_P1_PUEN_P13 (0x1 << 13)
+#define PORT_P1_PUEN_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P1_PUEN_P13_GET(val) ((((val) & PORT_P1_PUEN_P13) >> 13) & 0x1)
+#define PORT_P1_PUEN_P13_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P13) | (((val) & 0x1) << 13))
+/* Pull Up Device Enable at Port 1 Bit # (12) */
+#define PORT_P1_PUEN_P12 (0x1 << 12)
+#define PORT_P1_PUEN_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P1_PUEN_P12_GET(val) ((((val) & PORT_P1_PUEN_P12) >> 12) & 0x1)
+#define PORT_P1_PUEN_P12_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P12) | (((val) & 0x1) << 12))
+/* Pull Up Device Enable at Port 1 Bit # (11) */
+#define PORT_P1_PUEN_P11 (0x1 << 11)
+#define PORT_P1_PUEN_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P1_PUEN_P11_GET(val) ((((val) & PORT_P1_PUEN_P11) >> 11) & 0x1)
+#define PORT_P1_PUEN_P11_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P11) | (((val) & 0x1) << 11))
+/* Pull Up Device Enable at Port 1 Bit # (10) */
+#define PORT_P1_PUEN_P10 (0x1 << 10)
+#define PORT_P1_PUEN_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P1_PUEN_P10_GET(val) ((((val) & PORT_P1_PUEN_P10) >> 10) & 0x1)
+#define PORT_P1_PUEN_P10_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P10) | (((val) & 0x1) << 10))
+/* Pull Up Device Enable at Port 1 Bit # (9) */
+#define PORT_P1_PUEN_P9 (0x1 << 9)
+#define PORT_P1_PUEN_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P1_PUEN_P9_GET(val) ((((val) & PORT_P1_PUEN_P9) >> 9) & 0x1)
+#define PORT_P1_PUEN_P9_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P9) | (((val) & 0x1) << 9))
+/* Pull Up Device Enable at Port 1 Bit # (8) */
+#define PORT_P1_PUEN_P8 (0x1 << 8)
+#define PORT_P1_PUEN_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P1_PUEN_P8_GET(val) ((((val) & PORT_P1_PUEN_P8) >> 8) & 0x1)
+#define PORT_P1_PUEN_P8_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P8) | (((val) & 0x1) << 8))
+/* Pull Up Device Enable at Port 1 Bit # (7) */
+#define PORT_P1_PUEN_P7 (0x1 << 7)
+#define PORT_P1_PUEN_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P1_PUEN_P7_GET(val) ((((val) & PORT_P1_PUEN_P7) >> 7) & 0x1)
+#define PORT_P1_PUEN_P7_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P7) | (((val) & 0x1) << 7))
+/* Pull Up Device Enable at Port 1 Bit # (6) */
+#define PORT_P1_PUEN_P6 (0x1 << 6)
+#define PORT_P1_PUEN_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P1_PUEN_P6_GET(val) ((((val) & PORT_P1_PUEN_P6) >> 6) & 0x1)
+#define PORT_P1_PUEN_P6_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P6) | (((val) & 0x1) << 6))
+/* Pull Up Device Enable at Port 1 Bit # (5) */
+#define PORT_P1_PUEN_P5 (0x1 << 5)
+#define PORT_P1_PUEN_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P1_PUEN_P5_GET(val) ((((val) & PORT_P1_PUEN_P5) >> 5) & 0x1)
+#define PORT_P1_PUEN_P5_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P5) | (((val) & 0x1) << 5))
+/* Pull Up Device Enable at Port 1 Bit # (4) */
+#define PORT_P1_PUEN_P4 (0x1 << 4)
+#define PORT_P1_PUEN_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P1_PUEN_P4_GET(val) ((((val) & PORT_P1_PUEN_P4) >> 4) & 0x1)
+#define PORT_P1_PUEN_P4_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P4) | (((val) & 0x1) << 4))
+/* Pull Up Device Enable at Port 1 Bit # (3) */
+#define PORT_P1_PUEN_P3 (0x1 << 3)
+#define PORT_P1_PUEN_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P1_PUEN_P3_GET(val) ((((val) & PORT_P1_PUEN_P3) >> 3) & 0x1)
+#define PORT_P1_PUEN_P3_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P3) | (((val) & 0x1) << 3))
+/* Pull Up Device Enable at Port 1 Bit # (2) */
+#define PORT_P1_PUEN_P2 (0x1 << 2)
+#define PORT_P1_PUEN_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P1_PUEN_P2_GET(val) ((((val) & PORT_P1_PUEN_P2) >> 2) & 0x1)
+#define PORT_P1_PUEN_P2_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P2) | (((val) & 0x1) << 2))
+/* Pull Up Device Enable at Port 1 Bit # (1) */
+#define PORT_P1_PUEN_P1 (0x1 << 1)
+#define PORT_P1_PUEN_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P1_PUEN_P1_GET(val) ((((val) & PORT_P1_PUEN_P1) >> 1) & 0x1)
+#define PORT_P1_PUEN_P1_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P1) | (((val) & 0x1) << 1))
+/* Pull Up Device Enable at Port 1 Bit # (0) */
+#define PORT_P1_PUEN_P0 (0x1)
+#define PORT_P1_PUEN_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P1_PUEN_P0_GET(val) ((((val) & PORT_P1_PUEN_P0) >> 0) & 0x1)
+#define PORT_P1_PUEN_P0_SET(reg,val) (reg) = ((reg & ~PORT_P1_PUEN_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * External Interrupt Control Register 0
+ ******************************************************************************/
+
+/* Type of Level or Edge Detection of EXINT15 (19) */
+#define PORT_P1_EXINTCR0_EXINT15 (0x1 << 19)
+#define PORT_P1_EXINTCR0_EXINT15_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P1_EXINTCR0_EXINT15_GET(val) ((((val) & PORT_P1_EXINTCR0_EXINT15) >> 19) & 0x1)
+#define PORT_P1_EXINTCR0_EXINT15_SET(reg,val) (reg) = ((reg & ~PORT_P1_EXINTCR0_EXINT15) | (((val) & 0x1) << 19))
+/* Type of Level or Edge Detection of EXINT11 (18) */
+#define PORT_P1_EXINTCR0_EXINT11 (0x1 << 18)
+#define PORT_P1_EXINTCR0_EXINT11_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P1_EXINTCR0_EXINT11_GET(val) ((((val) & PORT_P1_EXINTCR0_EXINT11) >> 18) & 0x1)
+#define PORT_P1_EXINTCR0_EXINT11_SET(reg,val) (reg) = ((reg & ~PORT_P1_EXINTCR0_EXINT11) | (((val) & 0x1) << 18))
+/* Type of Level or Edge Detection of EXINT12 (17) */
+#define PORT_P1_EXINTCR0_EXINT12 (0x1 << 17)
+#define PORT_P1_EXINTCR0_EXINT12_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P1_EXINTCR0_EXINT12_GET(val) ((((val) & PORT_P1_EXINTCR0_EXINT12) >> 17) & 0x1)
+#define PORT_P1_EXINTCR0_EXINT12_SET(reg,val) (reg) = ((reg & ~PORT_P1_EXINTCR0_EXINT12) | (((val) & 0x1) << 17))
+/* Type of Level or Edge Detection of EXINT13 (16) */
+#define PORT_P1_EXINTCR0_EXINT13 (0x1 << 16)
+#define PORT_P1_EXINTCR0_EXINT13_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P1_EXINTCR0_EXINT13_GET(val) ((((val) & PORT_P1_EXINTCR0_EXINT13) >> 16) & 0x1)
+#define PORT_P1_EXINTCR0_EXINT13_SET(reg,val) (reg) = ((reg & ~PORT_P1_EXINTCR0_EXINT13) | (((val) & 0x1) << 16))
+/* Type of Level or Edge Detection of EXINT14 (15) */
+#define PORT_P1_EXINTCR0_EXINT14 (0x1 << 15)
+#define PORT_P1_EXINTCR0_EXINT14_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P1_EXINTCR0_EXINT14_GET(val) ((((val) & PORT_P1_EXINTCR0_EXINT14) >> 15) & 0x1)
+#define PORT_P1_EXINTCR0_EXINT14_SET(reg,val) (reg) = ((reg & ~PORT_P1_EXINTCR0_EXINT14) | (((val) & 0x1) << 15))
+
+/*******************************************************************************
+ * External Interrupt Control Register 1
+ ******************************************************************************/
+
+/* Type of Level or Edge Detection of EXINT15 (19) */
+#define PORT_P1_EXINTCR1_EXINT15 (0x1 << 19)
+#define PORT_P1_EXINTCR1_EXINT15_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P1_EXINTCR1_EXINT15_GET(val) ((((val) & PORT_P1_EXINTCR1_EXINT15) >> 19) & 0x1)
+#define PORT_P1_EXINTCR1_EXINT15_SET(reg,val) (reg) = ((reg & ~PORT_P1_EXINTCR1_EXINT15) | (((val) & 0x1) << 19))
+/* Type of Level or Edge Detection of EXINT11 (18) */
+#define PORT_P1_EXINTCR1_EXINT11 (0x1 << 18)
+#define PORT_P1_EXINTCR1_EXINT11_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P1_EXINTCR1_EXINT11_GET(val) ((((val) & PORT_P1_EXINTCR1_EXINT11) >> 18) & 0x1)
+#define PORT_P1_EXINTCR1_EXINT11_SET(reg,val) (reg) = ((reg & ~PORT_P1_EXINTCR1_EXINT11) | (((val) & 0x1) << 18))
+/* Type of Level or Edge Detection of EXINT12 (17) */
+#define PORT_P1_EXINTCR1_EXINT12 (0x1 << 17)
+#define PORT_P1_EXINTCR1_EXINT12_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P1_EXINTCR1_EXINT12_GET(val) ((((val) & PORT_P1_EXINTCR1_EXINT12) >> 17) & 0x1)
+#define PORT_P1_EXINTCR1_EXINT12_SET(reg,val) (reg) = ((reg & ~PORT_P1_EXINTCR1_EXINT12) | (((val) & 0x1) << 17))
+/* Type of Level or Edge Detection of EXINT13 (16) */
+#define PORT_P1_EXINTCR1_EXINT13 (0x1 << 16)
+#define PORT_P1_EXINTCR1_EXINT13_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P1_EXINTCR1_EXINT13_GET(val) ((((val) & PORT_P1_EXINTCR1_EXINT13) >> 16) & 0x1)
+#define PORT_P1_EXINTCR1_EXINT13_SET(reg,val) (reg) = ((reg & ~PORT_P1_EXINTCR1_EXINT13) | (((val) & 0x1) << 16))
+/* Type of Level or Edge Detection of EXINT14 (15) */
+#define PORT_P1_EXINTCR1_EXINT14 (0x1 << 15)
+#define PORT_P1_EXINTCR1_EXINT14_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P1_EXINTCR1_EXINT14_GET(val) ((((val) & PORT_P1_EXINTCR1_EXINT14) >> 15) & 0x1)
+#define PORT_P1_EXINTCR1_EXINT14_SET(reg,val) (reg) = ((reg & ~PORT_P1_EXINTCR1_EXINT14) | (((val) & 0x1) << 15))
+
+/*******************************************************************************
+ * P1_IRNEN Register
+ ******************************************************************************/
+
+/* EXINT15 Interrupt Request Enable (19) */
+#define PORT_P1_IRNEN_EXINT15 (0x1 << 19)
+#define PORT_P1_IRNEN_EXINT15_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P1_IRNEN_EXINT15_GET(val) ((((val) & PORT_P1_IRNEN_EXINT15) >> 19) & 0x1)
+#define PORT_P1_IRNEN_EXINT15_SET(reg,val) (reg) = ((reg & ~PORT_P1_IRNEN_EXINT15) | (((val) & 0x1) << 19))
+/* EXINT11 Interrupt Request Enable (18) */
+#define PORT_P1_IRNEN_EXINT11 (0x1 << 18)
+#define PORT_P1_IRNEN_EXINT11_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P1_IRNEN_EXINT11_GET(val) ((((val) & PORT_P1_IRNEN_EXINT11) >> 18) & 0x1)
+#define PORT_P1_IRNEN_EXINT11_SET(reg,val) (reg) = ((reg & ~PORT_P1_IRNEN_EXINT11) | (((val) & 0x1) << 18))
+/* EXINT12 Interrupt Request Enable (17) */
+#define PORT_P1_IRNEN_EXINT12 (0x1 << 17)
+#define PORT_P1_IRNEN_EXINT12_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P1_IRNEN_EXINT12_GET(val) ((((val) & PORT_P1_IRNEN_EXINT12) >> 17) & 0x1)
+#define PORT_P1_IRNEN_EXINT12_SET(reg,val) (reg) = ((reg & ~PORT_P1_IRNEN_EXINT12) | (((val) & 0x1) << 17))
+/* EXINT13 Interrupt Request Enable (16) */
+#define PORT_P1_IRNEN_EXINT13 (0x1 << 16)
+#define PORT_P1_IRNEN_EXINT13_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P1_IRNEN_EXINT13_GET(val) ((((val) & PORT_P1_IRNEN_EXINT13) >> 16) & 0x1)
+#define PORT_P1_IRNEN_EXINT13_SET(reg,val) (reg) = ((reg & ~PORT_P1_IRNEN_EXINT13) | (((val) & 0x1) << 16))
+/* EXINT14 Interrupt Request Enable (15) */
+#define PORT_P1_IRNEN_EXINT14 (0x1 << 15)
+#define PORT_P1_IRNEN_EXINT14_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P1_IRNEN_EXINT14_GET(val) ((((val) & PORT_P1_IRNEN_EXINT14) >> 15) & 0x1)
+#define PORT_P1_IRNEN_EXINT14_SET(reg,val) (reg) = ((reg & ~PORT_P1_IRNEN_EXINT14) | (((val) & 0x1) << 15))
+
+/*******************************************************************************
+ * P1_IRNICR Register
+ ******************************************************************************/
+
+/* EXINT15 Interrupt Request (19) */
+#define PORT_P1_IRNICR_EXINT15 (0x1 << 19)
+#define PORT_P1_IRNICR_EXINT15_GET(val) ((((val) & PORT_P1_IRNICR_EXINT15) >> 19) & 0x1)
+/* EXINT11 Interrupt Request (18) */
+#define PORT_P1_IRNICR_EXINT11 (0x1 << 18)
+#define PORT_P1_IRNICR_EXINT11_GET(val) ((((val) & PORT_P1_IRNICR_EXINT11) >> 18) & 0x1)
+/* EXINT12 Interrupt Request (17) */
+#define PORT_P1_IRNICR_EXINT12 (0x1 << 17)
+#define PORT_P1_IRNICR_EXINT12_GET(val) ((((val) & PORT_P1_IRNICR_EXINT12) >> 17) & 0x1)
+/* EXINT13 Interrupt Request (16) */
+#define PORT_P1_IRNICR_EXINT13 (0x1 << 16)
+#define PORT_P1_IRNICR_EXINT13_GET(val) ((((val) & PORT_P1_IRNICR_EXINT13) >> 16) & 0x1)
+/* EXINT14 Interrupt Request (15) */
+#define PORT_P1_IRNICR_EXINT14 (0x1 << 15)
+#define PORT_P1_IRNICR_EXINT14_GET(val) ((((val) & PORT_P1_IRNICR_EXINT14) >> 15) & 0x1)
+
+/*******************************************************************************
+ * P1_IRNCR Register
+ ******************************************************************************/
+
+/* EXINT15 Interrupt Request (19) */
+#define PORT_P1_IRNCR_EXINT15 (0x1 << 19)
+#define PORT_P1_IRNCR_EXINT15_GET(val) ((((val) & PORT_P1_IRNCR_EXINT15) >> 19) & 0x1)
+/* EXINT11 Interrupt Request (18) */
+#define PORT_P1_IRNCR_EXINT11 (0x1 << 18)
+#define PORT_P1_IRNCR_EXINT11_GET(val) ((((val) & PORT_P1_IRNCR_EXINT11) >> 18) & 0x1)
+/* EXINT12 Interrupt Request (17) */
+#define PORT_P1_IRNCR_EXINT12 (0x1 << 17)
+#define PORT_P1_IRNCR_EXINT12_GET(val) ((((val) & PORT_P1_IRNCR_EXINT12) >> 17) & 0x1)
+/* EXINT13 Interrupt Request (16) */
+#define PORT_P1_IRNCR_EXINT13 (0x1 << 16)
+#define PORT_P1_IRNCR_EXINT13_GET(val) ((((val) & PORT_P1_IRNCR_EXINT13) >> 16) & 0x1)
+/* EXINT14 Interrupt Request (15) */
+#define PORT_P1_IRNCR_EXINT14 (0x1 << 15)
+#define PORT_P1_IRNCR_EXINT14_GET(val) ((((val) & PORT_P1_IRNCR_EXINT14) >> 15) & 0x1)
+
+/*******************************************************************************
+ * P1 External Event Detection Configuration Register
+ ******************************************************************************/
+
+/* EXINT15 configured for Edge or Level Detection (19) */
+#define PORT_P1_IRNCFG_EXINT15 (0x1 << 19)
+#define PORT_P1_IRNCFG_EXINT15_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P1_IRNCFG_EXINT15_GET(val) ((((val) & PORT_P1_IRNCFG_EXINT15) >> 19) & 0x1)
+#define PORT_P1_IRNCFG_EXINT15_SET(reg,val) (reg) = ((reg & ~PORT_P1_IRNCFG_EXINT15) | (((val) & 0x1) << 19))
+/* EXINT11 configured for Edge or Level Detection (18) */
+#define PORT_P1_IRNCFG_EXINT11 (0x1 << 18)
+#define PORT_P1_IRNCFG_EXINT11_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P1_IRNCFG_EXINT11_GET(val) ((((val) & PORT_P1_IRNCFG_EXINT11) >> 18) & 0x1)
+#define PORT_P1_IRNCFG_EXINT11_SET(reg,val) (reg) = ((reg & ~PORT_P1_IRNCFG_EXINT11) | (((val) & 0x1) << 18))
+/* EXINT12 configured for Edge or Level Detection (17) */
+#define PORT_P1_IRNCFG_EXINT12 (0x1 << 17)
+#define PORT_P1_IRNCFG_EXINT12_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P1_IRNCFG_EXINT12_GET(val) ((((val) & PORT_P1_IRNCFG_EXINT12) >> 17) & 0x1)
+#define PORT_P1_IRNCFG_EXINT12_SET(reg,val) (reg) = ((reg & ~PORT_P1_IRNCFG_EXINT12) | (((val) & 0x1) << 17))
+/* EXINT13 configured for Edge or Level Detection (16) */
+#define PORT_P1_IRNCFG_EXINT13 (0x1 << 16)
+#define PORT_P1_IRNCFG_EXINT13_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P1_IRNCFG_EXINT13_GET(val) ((((val) & PORT_P1_IRNCFG_EXINT13) >> 16) & 0x1)
+#define PORT_P1_IRNCFG_EXINT13_SET(reg,val) (reg) = ((reg & ~PORT_P1_IRNCFG_EXINT13) | (((val) & 0x1) << 16))
+/* EXINT14 configured for Edge or Level Detection (15) */
+#define PORT_P1_IRNCFG_EXINT14 (0x1 << 15)
+#define PORT_P1_IRNCFG_EXINT14_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P1_IRNCFG_EXINT14_GET(val) ((((val) & PORT_P1_IRNCFG_EXINT14) >> 15) & 0x1)
+#define PORT_P1_IRNCFG_EXINT14_SET(reg,val) (reg) = ((reg & ~PORT_P1_IRNCFG_EXINT14) | (((val) & 0x1) << 15))
+
+/*******************************************************************************
+ * P1_IRNENSET Register
+ ******************************************************************************/
+
+/* Set Interrupt Node Enable Flag EXINT15 (19) */
+#define PORT_P1_IRNENSET_EXINT15 (0x1 << 19)
+#define PORT_P1_IRNENSET_EXINT15_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P1_IRNENSET_EXINT15_SET(reg,val) (reg) = (((reg & ~PORT_P1_IRNENSET_EXINT15) | (val) & 1) << 19)
+/* Set Interrupt Node Enable Flag EXINT11 (18) */
+#define PORT_P1_IRNENSET_EXINT11 (0x1 << 18)
+#define PORT_P1_IRNENSET_EXINT11_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P1_IRNENSET_EXINT11_SET(reg,val) (reg) = (((reg & ~PORT_P1_IRNENSET_EXINT11) | (val) & 1) << 18)
+/* Set Interrupt Node Enable Flag EXINT12 (17) */
+#define PORT_P1_IRNENSET_EXINT12 (0x1 << 17)
+#define PORT_P1_IRNENSET_EXINT12_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P1_IRNENSET_EXINT12_SET(reg,val) (reg) = (((reg & ~PORT_P1_IRNENSET_EXINT12) | (val) & 1) << 17)
+/* Set Interrupt Node Enable Flag EXINT13 (16) */
+#define PORT_P1_IRNENSET_EXINT13 (0x1 << 16)
+#define PORT_P1_IRNENSET_EXINT13_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P1_IRNENSET_EXINT13_SET(reg,val) (reg) = (((reg & ~PORT_P1_IRNENSET_EXINT13) | (val) & 1) << 16)
+/* Set Interrupt Node Enable Flag EXINT14 (15) */
+#define PORT_P1_IRNENSET_EXINT14 (0x1 << 15)
+#define PORT_P1_IRNENSET_EXINT14_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P1_IRNENSET_EXINT14_SET(reg,val) (reg) = (((reg & ~PORT_P1_IRNENSET_EXINT14) | (val) & 1) << 15)
+
+/*******************************************************************************
+ * P1_IRNENCLR Register
+ ******************************************************************************/
+
+/* Clear Interrupt Node Enable Flag EXINT15 (19) */
+#define PORT_P1_IRNENCLR_EXINT15 (0x1 << 19)
+#define PORT_P1_IRNENCLR_EXINT15_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P1_IRNENCLR_EXINT15_SET(reg,val) (reg) = (((reg & ~PORT_P1_IRNENCLR_EXINT15) | (val) & 1) << 19)
+/* Clear Interrupt Node Enable Flag EXINT11 (18) */
+#define PORT_P1_IRNENCLR_EXINT11 (0x1 << 18)
+#define PORT_P1_IRNENCLR_EXINT11_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P1_IRNENCLR_EXINT11_SET(reg,val) (reg) = (((reg & ~PORT_P1_IRNENCLR_EXINT11) | (val) & 1) << 18)
+/* Clear Interrupt Node Enable Flag EXINT12 (17) */
+#define PORT_P1_IRNENCLR_EXINT12 (0x1 << 17)
+#define PORT_P1_IRNENCLR_EXINT12_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P1_IRNENCLR_EXINT12_SET(reg,val) (reg) = (((reg & ~PORT_P1_IRNENCLR_EXINT12) | (val) & 1) << 17)
+/* Clear Interrupt Node Enable Flag EXINT13 (16) */
+#define PORT_P1_IRNENCLR_EXINT13 (0x1 << 16)
+#define PORT_P1_IRNENCLR_EXINT13_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P1_IRNENCLR_EXINT13_SET(reg,val) (reg) = (((reg & ~PORT_P1_IRNENCLR_EXINT13) | (val) & 1) << 16)
+/* Clear Interrupt Node Enable Flag EXINT14 (15) */
+#define PORT_P1_IRNENCLR_EXINT14 (0x1 << 15)
+#define PORT_P1_IRNENCLR_EXINT14_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P1_IRNENCLR_EXINT14_SET(reg,val) (reg) = (((reg & ~PORT_P1_IRNENCLR_EXINT14) | (val) & 1) << 15)
+
+/*******************************************************************************
+ * Port 2 Data Output Register
+ ******************************************************************************/
+
+/* Port 2 Pin # Output Value (19) */
+#define PORT_P2_OUT_P19 (0x1 << 19)
+#define PORT_P2_OUT_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P2_OUT_P19_GET(val) ((((val) & PORT_P2_OUT_P19) >> 19) & 0x1)
+#define PORT_P2_OUT_P19_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P19) | (((val) & 0x1) << 19))
+/* Port 2 Pin # Output Value (18) */
+#define PORT_P2_OUT_P18 (0x1 << 18)
+#define PORT_P2_OUT_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P2_OUT_P18_GET(val) ((((val) & PORT_P2_OUT_P18) >> 18) & 0x1)
+#define PORT_P2_OUT_P18_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P18) | (((val) & 0x1) << 18))
+/* Port 2 Pin # Output Value (17) */
+#define PORT_P2_OUT_P17 (0x1 << 17)
+#define PORT_P2_OUT_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P2_OUT_P17_GET(val) ((((val) & PORT_P2_OUT_P17) >> 17) & 0x1)
+#define PORT_P2_OUT_P17_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P17) | (((val) & 0x1) << 17))
+/* Port 2 Pin # Output Value (16) */
+#define PORT_P2_OUT_P16 (0x1 << 16)
+#define PORT_P2_OUT_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P2_OUT_P16_GET(val) ((((val) & PORT_P2_OUT_P16) >> 16) & 0x1)
+#define PORT_P2_OUT_P16_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P16) | (((val) & 0x1) << 16))
+/* Port 2 Pin # Output Value (15) */
+#define PORT_P2_OUT_P15 (0x1 << 15)
+#define PORT_P2_OUT_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P2_OUT_P15_GET(val) ((((val) & PORT_P2_OUT_P15) >> 15) & 0x1)
+#define PORT_P2_OUT_P15_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P15) | (((val) & 0x1) << 15))
+/* Port 2 Pin # Output Value (14) */
+#define PORT_P2_OUT_P14 (0x1 << 14)
+#define PORT_P2_OUT_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P2_OUT_P14_GET(val) ((((val) & PORT_P2_OUT_P14) >> 14) & 0x1)
+#define PORT_P2_OUT_P14_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P14) | (((val) & 0x1) << 14))
+/* Port 2 Pin # Output Value (13) */
+#define PORT_P2_OUT_P13 (0x1 << 13)
+#define PORT_P2_OUT_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P2_OUT_P13_GET(val) ((((val) & PORT_P2_OUT_P13) >> 13) & 0x1)
+#define PORT_P2_OUT_P13_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P13) | (((val) & 0x1) << 13))
+/* Port 2 Pin # Output Value (12) */
+#define PORT_P2_OUT_P12 (0x1 << 12)
+#define PORT_P2_OUT_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P2_OUT_P12_GET(val) ((((val) & PORT_P2_OUT_P12) >> 12) & 0x1)
+#define PORT_P2_OUT_P12_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P12) | (((val) & 0x1) << 12))
+/* Port 2 Pin # Output Value (11) */
+#define PORT_P2_OUT_P11 (0x1 << 11)
+#define PORT_P2_OUT_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P2_OUT_P11_GET(val) ((((val) & PORT_P2_OUT_P11) >> 11) & 0x1)
+#define PORT_P2_OUT_P11_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P11) | (((val) & 0x1) << 11))
+/* Port 2 Pin # Output Value (10) */
+#define PORT_P2_OUT_P10 (0x1 << 10)
+#define PORT_P2_OUT_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P2_OUT_P10_GET(val) ((((val) & PORT_P2_OUT_P10) >> 10) & 0x1)
+#define PORT_P2_OUT_P10_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P10) | (((val) & 0x1) << 10))
+/* Port 2 Pin # Output Value (9) */
+#define PORT_P2_OUT_P9 (0x1 << 9)
+#define PORT_P2_OUT_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P2_OUT_P9_GET(val) ((((val) & PORT_P2_OUT_P9) >> 9) & 0x1)
+#define PORT_P2_OUT_P9_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P9) | (((val) & 0x1) << 9))
+/* Port 2 Pin # Output Value (8) */
+#define PORT_P2_OUT_P8 (0x1 << 8)
+#define PORT_P2_OUT_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P2_OUT_P8_GET(val) ((((val) & PORT_P2_OUT_P8) >> 8) & 0x1)
+#define PORT_P2_OUT_P8_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P8) | (((val) & 0x1) << 8))
+/* Port 2 Pin # Output Value (7) */
+#define PORT_P2_OUT_P7 (0x1 << 7)
+#define PORT_P2_OUT_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P2_OUT_P7_GET(val) ((((val) & PORT_P2_OUT_P7) >> 7) & 0x1)
+#define PORT_P2_OUT_P7_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P7) | (((val) & 0x1) << 7))
+/* Port 2 Pin # Output Value (6) */
+#define PORT_P2_OUT_P6 (0x1 << 6)
+#define PORT_P2_OUT_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P2_OUT_P6_GET(val) ((((val) & PORT_P2_OUT_P6) >> 6) & 0x1)
+#define PORT_P2_OUT_P6_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P6) | (((val) & 0x1) << 6))
+/* Port 2 Pin # Output Value (5) */
+#define PORT_P2_OUT_P5 (0x1 << 5)
+#define PORT_P2_OUT_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P2_OUT_P5_GET(val) ((((val) & PORT_P2_OUT_P5) >> 5) & 0x1)
+#define PORT_P2_OUT_P5_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P5) | (((val) & 0x1) << 5))
+/* Port 2 Pin # Output Value (4) */
+#define PORT_P2_OUT_P4 (0x1 << 4)
+#define PORT_P2_OUT_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P2_OUT_P4_GET(val) ((((val) & PORT_P2_OUT_P4) >> 4) & 0x1)
+#define PORT_P2_OUT_P4_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P4) | (((val) & 0x1) << 4))
+/* Port 2 Pin # Output Value (3) */
+#define PORT_P2_OUT_P3 (0x1 << 3)
+#define PORT_P2_OUT_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P2_OUT_P3_GET(val) ((((val) & PORT_P2_OUT_P3) >> 3) & 0x1)
+#define PORT_P2_OUT_P3_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P3) | (((val) & 0x1) << 3))
+/* Port 2 Pin # Output Value (2) */
+#define PORT_P2_OUT_P2 (0x1 << 2)
+#define PORT_P2_OUT_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P2_OUT_P2_GET(val) ((((val) & PORT_P2_OUT_P2) >> 2) & 0x1)
+#define PORT_P2_OUT_P2_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P2) | (((val) & 0x1) << 2))
+/* Port 2 Pin # Output Value (1) */
+#define PORT_P2_OUT_P1 (0x1 << 1)
+#define PORT_P2_OUT_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P2_OUT_P1_GET(val) ((((val) & PORT_P2_OUT_P1) >> 1) & 0x1)
+#define PORT_P2_OUT_P1_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P1) | (((val) & 0x1) << 1))
+/* Port 2 Pin # Output Value (0) */
+#define PORT_P2_OUT_P0 (0x1)
+#define PORT_P2_OUT_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P2_OUT_P0_GET(val) ((((val) & PORT_P2_OUT_P0) >> 0) & 0x1)
+#define PORT_P2_OUT_P0_SET(reg,val) (reg) = ((reg & ~PORT_P2_OUT_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 2 Data Input Register
+ ******************************************************************************/
+
+/* Port 2 Pin # Latched Input Value (19) */
+#define PORT_P2_IN_P19 (0x1 << 19)
+#define PORT_P2_IN_P19_GET(val) ((((val) & PORT_P2_IN_P19) >> 19) & 0x1)
+/* Port 2 Pin # Latched Input Value (18) */
+#define PORT_P2_IN_P18 (0x1 << 18)
+#define PORT_P2_IN_P18_GET(val) ((((val) & PORT_P2_IN_P18) >> 18) & 0x1)
+/* Port 2 Pin # Latched Input Value (17) */
+#define PORT_P2_IN_P17 (0x1 << 17)
+#define PORT_P2_IN_P17_GET(val) ((((val) & PORT_P2_IN_P17) >> 17) & 0x1)
+/* Port 2 Pin # Latched Input Value (16) */
+#define PORT_P2_IN_P16 (0x1 << 16)
+#define PORT_P2_IN_P16_GET(val) ((((val) & PORT_P2_IN_P16) >> 16) & 0x1)
+/* Port 2 Pin # Latched Input Value (15) */
+#define PORT_P2_IN_P15 (0x1 << 15)
+#define PORT_P2_IN_P15_GET(val) ((((val) & PORT_P2_IN_P15) >> 15) & 0x1)
+/* Port 2 Pin # Latched Input Value (14) */
+#define PORT_P2_IN_P14 (0x1 << 14)
+#define PORT_P2_IN_P14_GET(val) ((((val) & PORT_P2_IN_P14) >> 14) & 0x1)
+/* Port 2 Pin # Latched Input Value (13) */
+#define PORT_P2_IN_P13 (0x1 << 13)
+#define PORT_P2_IN_P13_GET(val) ((((val) & PORT_P2_IN_P13) >> 13) & 0x1)
+/* Port 2 Pin # Latched Input Value (12) */
+#define PORT_P2_IN_P12 (0x1 << 12)
+#define PORT_P2_IN_P12_GET(val) ((((val) & PORT_P2_IN_P12) >> 12) & 0x1)
+/* Port 2 Pin # Latched Input Value (11) */
+#define PORT_P2_IN_P11 (0x1 << 11)
+#define PORT_P2_IN_P11_GET(val) ((((val) & PORT_P2_IN_P11) >> 11) & 0x1)
+/* Port 2 Pin # Latched Input Value (10) */
+#define PORT_P2_IN_P10 (0x1 << 10)
+#define PORT_P2_IN_P10_GET(val) ((((val) & PORT_P2_IN_P10) >> 10) & 0x1)
+/* Port 2 Pin # Latched Input Value (9) */
+#define PORT_P2_IN_P9 (0x1 << 9)
+#define PORT_P2_IN_P9_GET(val) ((((val) & PORT_P2_IN_P9) >> 9) & 0x1)
+/* Port 2 Pin # Latched Input Value (8) */
+#define PORT_P2_IN_P8 (0x1 << 8)
+#define PORT_P2_IN_P8_GET(val) ((((val) & PORT_P2_IN_P8) >> 8) & 0x1)
+/* Port 2 Pin # Latched Input Value (7) */
+#define PORT_P2_IN_P7 (0x1 << 7)
+#define PORT_P2_IN_P7_GET(val) ((((val) & PORT_P2_IN_P7) >> 7) & 0x1)
+/* Port 2 Pin # Latched Input Value (6) */
+#define PORT_P2_IN_P6 (0x1 << 6)
+#define PORT_P2_IN_P6_GET(val) ((((val) & PORT_P2_IN_P6) >> 6) & 0x1)
+/* Port 2 Pin # Latched Input Value (5) */
+#define PORT_P2_IN_P5 (0x1 << 5)
+#define PORT_P2_IN_P5_GET(val) ((((val) & PORT_P2_IN_P5) >> 5) & 0x1)
+/* Port 2 Pin # Latched Input Value (4) */
+#define PORT_P2_IN_P4 (0x1 << 4)
+#define PORT_P2_IN_P4_GET(val) ((((val) & PORT_P2_IN_P4) >> 4) & 0x1)
+/* Port 2 Pin # Latched Input Value (3) */
+#define PORT_P2_IN_P3 (0x1 << 3)
+#define PORT_P2_IN_P3_GET(val) ((((val) & PORT_P2_IN_P3) >> 3) & 0x1)
+/* Port 2 Pin # Latched Input Value (2) */
+#define PORT_P2_IN_P2 (0x1 << 2)
+#define PORT_P2_IN_P2_GET(val) ((((val) & PORT_P2_IN_P2) >> 2) & 0x1)
+/* Port 2 Pin # Latched Input Value (1) */
+#define PORT_P2_IN_P1 (0x1 << 1)
+#define PORT_P2_IN_P1_GET(val) ((((val) & PORT_P2_IN_P1) >> 1) & 0x1)
+/* Port 2 Pin # Latched Input Value (0) */
+#define PORT_P2_IN_P0 (0x1)
+#define PORT_P2_IN_P0_GET(val) ((((val) & PORT_P2_IN_P0) >> 0) & 0x1)
+
+/*******************************************************************************
+ * Port 2 Direction Register
+ ******************************************************************************/
+
+/* Port 2 Pin #Direction Control (19) */
+#define PORT_P2_DIR_P19 (0x1 << 19)
+#define PORT_P2_DIR_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P2_DIR_P19_GET(val) ((((val) & PORT_P2_DIR_P19) >> 19) & 0x1)
+#define PORT_P2_DIR_P19_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P19) | (((val) & 0x1) << 19))
+/* Port 2 Pin #Direction Control (18) */
+#define PORT_P2_DIR_P18 (0x1 << 18)
+#define PORT_P2_DIR_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P2_DIR_P18_GET(val) ((((val) & PORT_P2_DIR_P18) >> 18) & 0x1)
+#define PORT_P2_DIR_P18_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P18) | (((val) & 0x1) << 18))
+/* Port 2 Pin #Direction Control (17) */
+#define PORT_P2_DIR_P17 (0x1 << 17)
+#define PORT_P2_DIR_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P2_DIR_P17_GET(val) ((((val) & PORT_P2_DIR_P17) >> 17) & 0x1)
+#define PORT_P2_DIR_P17_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P17) | (((val) & 0x1) << 17))
+/* Port 2 Pin #Direction Control (16) */
+#define PORT_P2_DIR_P16 (0x1 << 16)
+#define PORT_P2_DIR_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P2_DIR_P16_GET(val) ((((val) & PORT_P2_DIR_P16) >> 16) & 0x1)
+#define PORT_P2_DIR_P16_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P16) | (((val) & 0x1) << 16))
+/* Port 2 Pin #Direction Control (15) */
+#define PORT_P2_DIR_P15 (0x1 << 15)
+#define PORT_P2_DIR_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P2_DIR_P15_GET(val) ((((val) & PORT_P2_DIR_P15) >> 15) & 0x1)
+#define PORT_P2_DIR_P15_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P15) | (((val) & 0x1) << 15))
+/* Port 2 Pin #Direction Control (14) */
+#define PORT_P2_DIR_P14 (0x1 << 14)
+#define PORT_P2_DIR_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P2_DIR_P14_GET(val) ((((val) & PORT_P2_DIR_P14) >> 14) & 0x1)
+#define PORT_P2_DIR_P14_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P14) | (((val) & 0x1) << 14))
+/* Port 2 Pin #Direction Control (13) */
+#define PORT_P2_DIR_P13 (0x1 << 13)
+#define PORT_P2_DIR_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P2_DIR_P13_GET(val) ((((val) & PORT_P2_DIR_P13) >> 13) & 0x1)
+#define PORT_P2_DIR_P13_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P13) | (((val) & 0x1) << 13))
+/* Port 2 Pin #Direction Control (12) */
+#define PORT_P2_DIR_P12 (0x1 << 12)
+#define PORT_P2_DIR_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P2_DIR_P12_GET(val) ((((val) & PORT_P2_DIR_P12) >> 12) & 0x1)
+#define PORT_P2_DIR_P12_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P12) | (((val) & 0x1) << 12))
+/* Port 2 Pin #Direction Control (11) */
+#define PORT_P2_DIR_P11 (0x1 << 11)
+#define PORT_P2_DIR_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P2_DIR_P11_GET(val) ((((val) & PORT_P2_DIR_P11) >> 11) & 0x1)
+#define PORT_P2_DIR_P11_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P11) | (((val) & 0x1) << 11))
+/* Port 2 Pin #Direction Control (10) */
+#define PORT_P2_DIR_P10 (0x1 << 10)
+#define PORT_P2_DIR_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P2_DIR_P10_GET(val) ((((val) & PORT_P2_DIR_P10) >> 10) & 0x1)
+#define PORT_P2_DIR_P10_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P10) | (((val) & 0x1) << 10))
+/* Port 2 Pin #Direction Control (9) */
+#define PORT_P2_DIR_P9 (0x1 << 9)
+#define PORT_P2_DIR_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P2_DIR_P9_GET(val) ((((val) & PORT_P2_DIR_P9) >> 9) & 0x1)
+#define PORT_P2_DIR_P9_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P9) | (((val) & 0x1) << 9))
+/* Port 2 Pin #Direction Control (8) */
+#define PORT_P2_DIR_P8 (0x1 << 8)
+#define PORT_P2_DIR_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P2_DIR_P8_GET(val) ((((val) & PORT_P2_DIR_P8) >> 8) & 0x1)
+#define PORT_P2_DIR_P8_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P8) | (((val) & 0x1) << 8))
+/* Port 2 Pin #Direction Control (7) */
+#define PORT_P2_DIR_P7 (0x1 << 7)
+#define PORT_P2_DIR_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P2_DIR_P7_GET(val) ((((val) & PORT_P2_DIR_P7) >> 7) & 0x1)
+#define PORT_P2_DIR_P7_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P7) | (((val) & 0x1) << 7))
+/* Port 2 Pin #Direction Control (6) */
+#define PORT_P2_DIR_P6 (0x1 << 6)
+#define PORT_P2_DIR_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P2_DIR_P6_GET(val) ((((val) & PORT_P2_DIR_P6) >> 6) & 0x1)
+#define PORT_P2_DIR_P6_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P6) | (((val) & 0x1) << 6))
+/* Port 2 Pin #Direction Control (5) */
+#define PORT_P2_DIR_P5 (0x1 << 5)
+#define PORT_P2_DIR_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P2_DIR_P5_GET(val) ((((val) & PORT_P2_DIR_P5) >> 5) & 0x1)
+#define PORT_P2_DIR_P5_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P5) | (((val) & 0x1) << 5))
+/* Port 2 Pin #Direction Control (4) */
+#define PORT_P2_DIR_P4 (0x1 << 4)
+#define PORT_P2_DIR_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P2_DIR_P4_GET(val) ((((val) & PORT_P2_DIR_P4) >> 4) & 0x1)
+#define PORT_P2_DIR_P4_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P4) | (((val) & 0x1) << 4))
+/* Port 2 Pin #Direction Control (3) */
+#define PORT_P2_DIR_P3 (0x1 << 3)
+#define PORT_P2_DIR_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P2_DIR_P3_GET(val) ((((val) & PORT_P2_DIR_P3) >> 3) & 0x1)
+#define PORT_P2_DIR_P3_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P3) | (((val) & 0x1) << 3))
+/* Port 2 Pin #Direction Control (2) */
+#define PORT_P2_DIR_P2 (0x1 << 2)
+#define PORT_P2_DIR_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P2_DIR_P2_GET(val) ((((val) & PORT_P2_DIR_P2) >> 2) & 0x1)
+#define PORT_P2_DIR_P2_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P2) | (((val) & 0x1) << 2))
+/* Port 2 Pin #Direction Control (1) */
+#define PORT_P2_DIR_P1 (0x1 << 1)
+#define PORT_P2_DIR_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P2_DIR_P1_GET(val) ((((val) & PORT_P2_DIR_P1) >> 1) & 0x1)
+#define PORT_P2_DIR_P1_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P1) | (((val) & 0x1) << 1))
+/* Port 2 Pin #Direction Control (0) */
+#define PORT_P2_DIR_P0 (0x1)
+#define PORT_P2_DIR_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P2_DIR_P0_GET(val) ((((val) & PORT_P2_DIR_P0) >> 0) & 0x1)
+#define PORT_P2_DIR_P0_SET(reg,val) (reg) = ((reg & ~PORT_P2_DIR_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 2 Alternate Function Select Register 0
+ ******************************************************************************/
+
+/* Alternate Function at Port 2 Bit # (19) */
+#define PORT_P2_ALTSEL0_P19 (0x1 << 19)
+#define PORT_P2_ALTSEL0_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P2_ALTSEL0_P19_GET(val) ((((val) & PORT_P2_ALTSEL0_P19) >> 19) & 0x1)
+#define PORT_P2_ALTSEL0_P19_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P19) | (((val) & 0x1) << 19))
+/* Alternate Function at Port 2 Bit # (18) */
+#define PORT_P2_ALTSEL0_P18 (0x1 << 18)
+#define PORT_P2_ALTSEL0_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P2_ALTSEL0_P18_GET(val) ((((val) & PORT_P2_ALTSEL0_P18) >> 18) & 0x1)
+#define PORT_P2_ALTSEL0_P18_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P18) | (((val) & 0x1) << 18))
+/* Alternate Function at Port 2 Bit # (17) */
+#define PORT_P2_ALTSEL0_P17 (0x1 << 17)
+#define PORT_P2_ALTSEL0_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P2_ALTSEL0_P17_GET(val) ((((val) & PORT_P2_ALTSEL0_P17) >> 17) & 0x1)
+#define PORT_P2_ALTSEL0_P17_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P17) | (((val) & 0x1) << 17))
+/* Alternate Function at Port 2 Bit # (16) */
+#define PORT_P2_ALTSEL0_P16 (0x1 << 16)
+#define PORT_P2_ALTSEL0_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P2_ALTSEL0_P16_GET(val) ((((val) & PORT_P2_ALTSEL0_P16) >> 16) & 0x1)
+#define PORT_P2_ALTSEL0_P16_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P16) | (((val) & 0x1) << 16))
+/* Alternate Function at Port 2 Bit # (15) */
+#define PORT_P2_ALTSEL0_P15 (0x1 << 15)
+#define PORT_P2_ALTSEL0_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P2_ALTSEL0_P15_GET(val) ((((val) & PORT_P2_ALTSEL0_P15) >> 15) & 0x1)
+#define PORT_P2_ALTSEL0_P15_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P15) | (((val) & 0x1) << 15))
+/* Alternate Function at Port 2 Bit # (14) */
+#define PORT_P2_ALTSEL0_P14 (0x1 << 14)
+#define PORT_P2_ALTSEL0_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P2_ALTSEL0_P14_GET(val) ((((val) & PORT_P2_ALTSEL0_P14) >> 14) & 0x1)
+#define PORT_P2_ALTSEL0_P14_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P14) | (((val) & 0x1) << 14))
+/* Alternate Function at Port 2 Bit # (13) */
+#define PORT_P2_ALTSEL0_P13 (0x1 << 13)
+#define PORT_P2_ALTSEL0_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P2_ALTSEL0_P13_GET(val) ((((val) & PORT_P2_ALTSEL0_P13) >> 13) & 0x1)
+#define PORT_P2_ALTSEL0_P13_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P13) | (((val) & 0x1) << 13))
+/* Alternate Function at Port 2 Bit # (12) */
+#define PORT_P2_ALTSEL0_P12 (0x1 << 12)
+#define PORT_P2_ALTSEL0_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P2_ALTSEL0_P12_GET(val) ((((val) & PORT_P2_ALTSEL0_P12) >> 12) & 0x1)
+#define PORT_P2_ALTSEL0_P12_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P12) | (((val) & 0x1) << 12))
+/* Alternate Function at Port 2 Bit # (11) */
+#define PORT_P2_ALTSEL0_P11 (0x1 << 11)
+#define PORT_P2_ALTSEL0_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P2_ALTSEL0_P11_GET(val) ((((val) & PORT_P2_ALTSEL0_P11) >> 11) & 0x1)
+#define PORT_P2_ALTSEL0_P11_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P11) | (((val) & 0x1) << 11))
+/* Alternate Function at Port 2 Bit # (10) */
+#define PORT_P2_ALTSEL0_P10 (0x1 << 10)
+#define PORT_P2_ALTSEL0_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P2_ALTSEL0_P10_GET(val) ((((val) & PORT_P2_ALTSEL0_P10) >> 10) & 0x1)
+#define PORT_P2_ALTSEL0_P10_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P10) | (((val) & 0x1) << 10))
+/* Alternate Function at Port 2 Bit # (9) */
+#define PORT_P2_ALTSEL0_P9 (0x1 << 9)
+#define PORT_P2_ALTSEL0_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P2_ALTSEL0_P9_GET(val) ((((val) & PORT_P2_ALTSEL0_P9) >> 9) & 0x1)
+#define PORT_P2_ALTSEL0_P9_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P9) | (((val) & 0x1) << 9))
+/* Alternate Function at Port 2 Bit # (8) */
+#define PORT_P2_ALTSEL0_P8 (0x1 << 8)
+#define PORT_P2_ALTSEL0_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P2_ALTSEL0_P8_GET(val) ((((val) & PORT_P2_ALTSEL0_P8) >> 8) & 0x1)
+#define PORT_P2_ALTSEL0_P8_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P8) | (((val) & 0x1) << 8))
+/* Alternate Function at Port 2 Bit # (7) */
+#define PORT_P2_ALTSEL0_P7 (0x1 << 7)
+#define PORT_P2_ALTSEL0_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P2_ALTSEL0_P7_GET(val) ((((val) & PORT_P2_ALTSEL0_P7) >> 7) & 0x1)
+#define PORT_P2_ALTSEL0_P7_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P7) | (((val) & 0x1) << 7))
+/* Alternate Function at Port 2 Bit # (6) */
+#define PORT_P2_ALTSEL0_P6 (0x1 << 6)
+#define PORT_P2_ALTSEL0_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P2_ALTSEL0_P6_GET(val) ((((val) & PORT_P2_ALTSEL0_P6) >> 6) & 0x1)
+#define PORT_P2_ALTSEL0_P6_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P6) | (((val) & 0x1) << 6))
+/* Alternate Function at Port 2 Bit # (5) */
+#define PORT_P2_ALTSEL0_P5 (0x1 << 5)
+#define PORT_P2_ALTSEL0_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P2_ALTSEL0_P5_GET(val) ((((val) & PORT_P2_ALTSEL0_P5) >> 5) & 0x1)
+#define PORT_P2_ALTSEL0_P5_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P5) | (((val) & 0x1) << 5))
+/* Alternate Function at Port 2 Bit # (4) */
+#define PORT_P2_ALTSEL0_P4 (0x1 << 4)
+#define PORT_P2_ALTSEL0_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P2_ALTSEL0_P4_GET(val) ((((val) & PORT_P2_ALTSEL0_P4) >> 4) & 0x1)
+#define PORT_P2_ALTSEL0_P4_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P4) | (((val) & 0x1) << 4))
+/* Alternate Function at Port 2 Bit # (3) */
+#define PORT_P2_ALTSEL0_P3 (0x1 << 3)
+#define PORT_P2_ALTSEL0_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P2_ALTSEL0_P3_GET(val) ((((val) & PORT_P2_ALTSEL0_P3) >> 3) & 0x1)
+#define PORT_P2_ALTSEL0_P3_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P3) | (((val) & 0x1) << 3))
+/* Alternate Function at Port 2 Bit # (2) */
+#define PORT_P2_ALTSEL0_P2 (0x1 << 2)
+#define PORT_P2_ALTSEL0_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P2_ALTSEL0_P2_GET(val) ((((val) & PORT_P2_ALTSEL0_P2) >> 2) & 0x1)
+#define PORT_P2_ALTSEL0_P2_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P2) | (((val) & 0x1) << 2))
+/* Alternate Function at Port 2 Bit # (1) */
+#define PORT_P2_ALTSEL0_P1 (0x1 << 1)
+#define PORT_P2_ALTSEL0_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P2_ALTSEL0_P1_GET(val) ((((val) & PORT_P2_ALTSEL0_P1) >> 1) & 0x1)
+#define PORT_P2_ALTSEL0_P1_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P1) | (((val) & 0x1) << 1))
+/* Alternate Function at Port 2 Bit # (0) */
+#define PORT_P2_ALTSEL0_P0 (0x1)
+#define PORT_P2_ALTSEL0_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P2_ALTSEL0_P0_GET(val) ((((val) & PORT_P2_ALTSEL0_P0) >> 0) & 0x1)
+#define PORT_P2_ALTSEL0_P0_SET(reg,val) (reg) = ((reg & ~PORT_P2_ALTSEL0_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 2 Pull Up Device Enable Register
+ ******************************************************************************/
+
+/* Pull Up Device Enable at Port 2 Bit # (19) */
+#define PORT_P2_PUEN_P19 (0x1 << 19)
+#define PORT_P2_PUEN_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P2_PUEN_P19_GET(val) ((((val) & PORT_P2_PUEN_P19) >> 19) & 0x1)
+#define PORT_P2_PUEN_P19_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P19) | (((val) & 0x1) << 19))
+/* Pull Up Device Enable at Port 2 Bit # (18) */
+#define PORT_P2_PUEN_P18 (0x1 << 18)
+#define PORT_P2_PUEN_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P2_PUEN_P18_GET(val) ((((val) & PORT_P2_PUEN_P18) >> 18) & 0x1)
+#define PORT_P2_PUEN_P18_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P18) | (((val) & 0x1) << 18))
+/* Pull Up Device Enable at Port 2 Bit # (17) */
+#define PORT_P2_PUEN_P17 (0x1 << 17)
+#define PORT_P2_PUEN_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P2_PUEN_P17_GET(val) ((((val) & PORT_P2_PUEN_P17) >> 17) & 0x1)
+#define PORT_P2_PUEN_P17_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P17) | (((val) & 0x1) << 17))
+/* Pull Up Device Enable at Port 2 Bit # (16) */
+#define PORT_P2_PUEN_P16 (0x1 << 16)
+#define PORT_P2_PUEN_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P2_PUEN_P16_GET(val) ((((val) & PORT_P2_PUEN_P16) >> 16) & 0x1)
+#define PORT_P2_PUEN_P16_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P16) | (((val) & 0x1) << 16))
+/* Pull Up Device Enable at Port 2 Bit # (15) */
+#define PORT_P2_PUEN_P15 (0x1 << 15)
+#define PORT_P2_PUEN_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P2_PUEN_P15_GET(val) ((((val) & PORT_P2_PUEN_P15) >> 15) & 0x1)
+#define PORT_P2_PUEN_P15_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P15) | (((val) & 0x1) << 15))
+/* Pull Up Device Enable at Port 2 Bit # (14) */
+#define PORT_P2_PUEN_P14 (0x1 << 14)
+#define PORT_P2_PUEN_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P2_PUEN_P14_GET(val) ((((val) & PORT_P2_PUEN_P14) >> 14) & 0x1)
+#define PORT_P2_PUEN_P14_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P14) | (((val) & 0x1) << 14))
+/* Pull Up Device Enable at Port 2 Bit # (13) */
+#define PORT_P2_PUEN_P13 (0x1 << 13)
+#define PORT_P2_PUEN_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P2_PUEN_P13_GET(val) ((((val) & PORT_P2_PUEN_P13) >> 13) & 0x1)
+#define PORT_P2_PUEN_P13_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P13) | (((val) & 0x1) << 13))
+/* Pull Up Device Enable at Port 2 Bit # (12) */
+#define PORT_P2_PUEN_P12 (0x1 << 12)
+#define PORT_P2_PUEN_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P2_PUEN_P12_GET(val) ((((val) & PORT_P2_PUEN_P12) >> 12) & 0x1)
+#define PORT_P2_PUEN_P12_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P12) | (((val) & 0x1) << 12))
+/* Pull Up Device Enable at Port 2 Bit # (11) */
+#define PORT_P2_PUEN_P11 (0x1 << 11)
+#define PORT_P2_PUEN_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P2_PUEN_P11_GET(val) ((((val) & PORT_P2_PUEN_P11) >> 11) & 0x1)
+#define PORT_P2_PUEN_P11_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P11) | (((val) & 0x1) << 11))
+/* Pull Up Device Enable at Port 2 Bit # (10) */
+#define PORT_P2_PUEN_P10 (0x1 << 10)
+#define PORT_P2_PUEN_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P2_PUEN_P10_GET(val) ((((val) & PORT_P2_PUEN_P10) >> 10) & 0x1)
+#define PORT_P2_PUEN_P10_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P10) | (((val) & 0x1) << 10))
+/* Pull Up Device Enable at Port 2 Bit # (9) */
+#define PORT_P2_PUEN_P9 (0x1 << 9)
+#define PORT_P2_PUEN_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P2_PUEN_P9_GET(val) ((((val) & PORT_P2_PUEN_P9) >> 9) & 0x1)
+#define PORT_P2_PUEN_P9_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P9) | (((val) & 0x1) << 9))
+/* Pull Up Device Enable at Port 2 Bit # (8) */
+#define PORT_P2_PUEN_P8 (0x1 << 8)
+#define PORT_P2_PUEN_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P2_PUEN_P8_GET(val) ((((val) & PORT_P2_PUEN_P8) >> 8) & 0x1)
+#define PORT_P2_PUEN_P8_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P8) | (((val) & 0x1) << 8))
+/* Pull Up Device Enable at Port 2 Bit # (7) */
+#define PORT_P2_PUEN_P7 (0x1 << 7)
+#define PORT_P2_PUEN_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P2_PUEN_P7_GET(val) ((((val) & PORT_P2_PUEN_P7) >> 7) & 0x1)
+#define PORT_P2_PUEN_P7_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P7) | (((val) & 0x1) << 7))
+/* Pull Up Device Enable at Port 2 Bit # (6) */
+#define PORT_P2_PUEN_P6 (0x1 << 6)
+#define PORT_P2_PUEN_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P2_PUEN_P6_GET(val) ((((val) & PORT_P2_PUEN_P6) >> 6) & 0x1)
+#define PORT_P2_PUEN_P6_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P6) | (((val) & 0x1) << 6))
+/* Pull Up Device Enable at Port 2 Bit # (5) */
+#define PORT_P2_PUEN_P5 (0x1 << 5)
+#define PORT_P2_PUEN_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P2_PUEN_P5_GET(val) ((((val) & PORT_P2_PUEN_P5) >> 5) & 0x1)
+#define PORT_P2_PUEN_P5_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P5) | (((val) & 0x1) << 5))
+/* Pull Up Device Enable at Port 2 Bit # (4) */
+#define PORT_P2_PUEN_P4 (0x1 << 4)
+#define PORT_P2_PUEN_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P2_PUEN_P4_GET(val) ((((val) & PORT_P2_PUEN_P4) >> 4) & 0x1)
+#define PORT_P2_PUEN_P4_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P4) | (((val) & 0x1) << 4))
+/* Pull Up Device Enable at Port 2 Bit # (3) */
+#define PORT_P2_PUEN_P3 (0x1 << 3)
+#define PORT_P2_PUEN_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P2_PUEN_P3_GET(val) ((((val) & PORT_P2_PUEN_P3) >> 3) & 0x1)
+#define PORT_P2_PUEN_P3_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P3) | (((val) & 0x1) << 3))
+/* Pull Up Device Enable at Port 2 Bit # (2) */
+#define PORT_P2_PUEN_P2 (0x1 << 2)
+#define PORT_P2_PUEN_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P2_PUEN_P2_GET(val) ((((val) & PORT_P2_PUEN_P2) >> 2) & 0x1)
+#define PORT_P2_PUEN_P2_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P2) | (((val) & 0x1) << 2))
+/* Pull Up Device Enable at Port 2 Bit # (1) */
+#define PORT_P2_PUEN_P1 (0x1 << 1)
+#define PORT_P2_PUEN_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P2_PUEN_P1_GET(val) ((((val) & PORT_P2_PUEN_P1) >> 1) & 0x1)
+#define PORT_P2_PUEN_P1_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P1) | (((val) & 0x1) << 1))
+/* Pull Up Device Enable at Port 2 Bit # (0) */
+#define PORT_P2_PUEN_P0 (0x1)
+#define PORT_P2_PUEN_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P2_PUEN_P0_GET(val) ((((val) & PORT_P2_PUEN_P0) >> 0) & 0x1)
+#define PORT_P2_PUEN_P0_SET(reg,val) (reg) = ((reg & ~PORT_P2_PUEN_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 3 Data Output Register
+ ******************************************************************************/
+
+/* Port 3 Pin # Output Value (19) */
+#define PORT_P3_OUT_P19 (0x1 << 19)
+#define PORT_P3_OUT_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P3_OUT_P19_GET(val) ((((val) & PORT_P3_OUT_P19) >> 19) & 0x1)
+#define PORT_P3_OUT_P19_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P19) | (((val) & 0x1) << 19))
+/* Port 3 Pin # Output Value (18) */
+#define PORT_P3_OUT_P18 (0x1 << 18)
+#define PORT_P3_OUT_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P3_OUT_P18_GET(val) ((((val) & PORT_P3_OUT_P18) >> 18) & 0x1)
+#define PORT_P3_OUT_P18_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P18) | (((val) & 0x1) << 18))
+/* Port 3 Pin # Output Value (17) */
+#define PORT_P3_OUT_P17 (0x1 << 17)
+#define PORT_P3_OUT_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P3_OUT_P17_GET(val) ((((val) & PORT_P3_OUT_P17) >> 17) & 0x1)
+#define PORT_P3_OUT_P17_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P17) | (((val) & 0x1) << 17))
+/* Port 3 Pin # Output Value (16) */
+#define PORT_P3_OUT_P16 (0x1 << 16)
+#define PORT_P3_OUT_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P3_OUT_P16_GET(val) ((((val) & PORT_P3_OUT_P16) >> 16) & 0x1)
+#define PORT_P3_OUT_P16_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P16) | (((val) & 0x1) << 16))
+/* Port 3 Pin # Output Value (15) */
+#define PORT_P3_OUT_P15 (0x1 << 15)
+#define PORT_P3_OUT_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P3_OUT_P15_GET(val) ((((val) & PORT_P3_OUT_P15) >> 15) & 0x1)
+#define PORT_P3_OUT_P15_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P15) | (((val) & 0x1) << 15))
+/* Port 3 Pin # Output Value (14) */
+#define PORT_P3_OUT_P14 (0x1 << 14)
+#define PORT_P3_OUT_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P3_OUT_P14_GET(val) ((((val) & PORT_P3_OUT_P14) >> 14) & 0x1)
+#define PORT_P3_OUT_P14_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P14) | (((val) & 0x1) << 14))
+/* Port 3 Pin # Output Value (13) */
+#define PORT_P3_OUT_P13 (0x1 << 13)
+#define PORT_P3_OUT_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P3_OUT_P13_GET(val) ((((val) & PORT_P3_OUT_P13) >> 13) & 0x1)
+#define PORT_P3_OUT_P13_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P13) | (((val) & 0x1) << 13))
+/* Port 3 Pin # Output Value (12) */
+#define PORT_P3_OUT_P12 (0x1 << 12)
+#define PORT_P3_OUT_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P3_OUT_P12_GET(val) ((((val) & PORT_P3_OUT_P12) >> 12) & 0x1)
+#define PORT_P3_OUT_P12_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P12) | (((val) & 0x1) << 12))
+/* Port 3 Pin # Output Value (11) */
+#define PORT_P3_OUT_P11 (0x1 << 11)
+#define PORT_P3_OUT_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P3_OUT_P11_GET(val) ((((val) & PORT_P3_OUT_P11) >> 11) & 0x1)
+#define PORT_P3_OUT_P11_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P11) | (((val) & 0x1) << 11))
+/* Port 3 Pin # Output Value (10) */
+#define PORT_P3_OUT_P10 (0x1 << 10)
+#define PORT_P3_OUT_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P3_OUT_P10_GET(val) ((((val) & PORT_P3_OUT_P10) >> 10) & 0x1)
+#define PORT_P3_OUT_P10_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P10) | (((val) & 0x1) << 10))
+/* Port 3 Pin # Output Value (9) */
+#define PORT_P3_OUT_P9 (0x1 << 9)
+#define PORT_P3_OUT_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P3_OUT_P9_GET(val) ((((val) & PORT_P3_OUT_P9) >> 9) & 0x1)
+#define PORT_P3_OUT_P9_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P9) | (((val) & 0x1) << 9))
+/* Port 3 Pin # Output Value (8) */
+#define PORT_P3_OUT_P8 (0x1 << 8)
+#define PORT_P3_OUT_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P3_OUT_P8_GET(val) ((((val) & PORT_P3_OUT_P8) >> 8) & 0x1)
+#define PORT_P3_OUT_P8_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P8) | (((val) & 0x1) << 8))
+/* Port 3 Pin # Output Value (7) */
+#define PORT_P3_OUT_P7 (0x1 << 7)
+#define PORT_P3_OUT_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P3_OUT_P7_GET(val) ((((val) & PORT_P3_OUT_P7) >> 7) & 0x1)
+#define PORT_P3_OUT_P7_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P7) | (((val) & 0x1) << 7))
+/* Port 3 Pin # Output Value (6) */
+#define PORT_P3_OUT_P6 (0x1 << 6)
+#define PORT_P3_OUT_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P3_OUT_P6_GET(val) ((((val) & PORT_P3_OUT_P6) >> 6) & 0x1)
+#define PORT_P3_OUT_P6_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P6) | (((val) & 0x1) << 6))
+/* Port 3 Pin # Output Value (5) */
+#define PORT_P3_OUT_P5 (0x1 << 5)
+#define PORT_P3_OUT_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P3_OUT_P5_GET(val) ((((val) & PORT_P3_OUT_P5) >> 5) & 0x1)
+#define PORT_P3_OUT_P5_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P5) | (((val) & 0x1) << 5))
+/* Port 3 Pin # Output Value (4) */
+#define PORT_P3_OUT_P4 (0x1 << 4)
+#define PORT_P3_OUT_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P3_OUT_P4_GET(val) ((((val) & PORT_P3_OUT_P4) >> 4) & 0x1)
+#define PORT_P3_OUT_P4_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P4) | (((val) & 0x1) << 4))
+/* Port 3 Pin # Output Value (3) */
+#define PORT_P3_OUT_P3 (0x1 << 3)
+#define PORT_P3_OUT_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P3_OUT_P3_GET(val) ((((val) & PORT_P3_OUT_P3) >> 3) & 0x1)
+#define PORT_P3_OUT_P3_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P3) | (((val) & 0x1) << 3))
+/* Port 3 Pin # Output Value (2) */
+#define PORT_P3_OUT_P2 (0x1 << 2)
+#define PORT_P3_OUT_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P3_OUT_P2_GET(val) ((((val) & PORT_P3_OUT_P2) >> 2) & 0x1)
+#define PORT_P3_OUT_P2_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P2) | (((val) & 0x1) << 2))
+/* Port 3 Pin # Output Value (1) */
+#define PORT_P3_OUT_P1 (0x1 << 1)
+#define PORT_P3_OUT_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P3_OUT_P1_GET(val) ((((val) & PORT_P3_OUT_P1) >> 1) & 0x1)
+#define PORT_P3_OUT_P1_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P1) | (((val) & 0x1) << 1))
+/* Port 3 Pin # Output Value (0) */
+#define PORT_P3_OUT_P0 (0x1)
+#define PORT_P3_OUT_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P3_OUT_P0_GET(val) ((((val) & PORT_P3_OUT_P0) >> 0) & 0x1)
+#define PORT_P3_OUT_P0_SET(reg,val) (reg) = ((reg & ~PORT_P3_OUT_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 3 Data Input Register
+ ******************************************************************************/
+
+/* Port 3 Pin # Latched Input Value (19) */
+#define PORT_P3_IN_P19 (0x1 << 19)
+#define PORT_P3_IN_P19_GET(val) ((((val) & PORT_P3_IN_P19) >> 19) & 0x1)
+/* Port 3 Pin # Latched Input Value (18) */
+#define PORT_P3_IN_P18 (0x1 << 18)
+#define PORT_P3_IN_P18_GET(val) ((((val) & PORT_P3_IN_P18) >> 18) & 0x1)
+/* Port 3 Pin # Latched Input Value (17) */
+#define PORT_P3_IN_P17 (0x1 << 17)
+#define PORT_P3_IN_P17_GET(val) ((((val) & PORT_P3_IN_P17) >> 17) & 0x1)
+/* Port 3 Pin # Latched Input Value (16) */
+#define PORT_P3_IN_P16 (0x1 << 16)
+#define PORT_P3_IN_P16_GET(val) ((((val) & PORT_P3_IN_P16) >> 16) & 0x1)
+/* Port 3 Pin # Latched Input Value (15) */
+#define PORT_P3_IN_P15 (0x1 << 15)
+#define PORT_P3_IN_P15_GET(val) ((((val) & PORT_P3_IN_P15) >> 15) & 0x1)
+/* Port 3 Pin # Latched Input Value (14) */
+#define PORT_P3_IN_P14 (0x1 << 14)
+#define PORT_P3_IN_P14_GET(val) ((((val) & PORT_P3_IN_P14) >> 14) & 0x1)
+/* Port 3 Pin # Latched Input Value (13) */
+#define PORT_P3_IN_P13 (0x1 << 13)
+#define PORT_P3_IN_P13_GET(val) ((((val) & PORT_P3_IN_P13) >> 13) & 0x1)
+/* Port 3 Pin # Latched Input Value (12) */
+#define PORT_P3_IN_P12 (0x1 << 12)
+#define PORT_P3_IN_P12_GET(val) ((((val) & PORT_P3_IN_P12) >> 12) & 0x1)
+/* Port 3 Pin # Latched Input Value (11) */
+#define PORT_P3_IN_P11 (0x1 << 11)
+#define PORT_P3_IN_P11_GET(val) ((((val) & PORT_P3_IN_P11) >> 11) & 0x1)
+/* Port 3 Pin # Latched Input Value (10) */
+#define PORT_P3_IN_P10 (0x1 << 10)
+#define PORT_P3_IN_P10_GET(val) ((((val) & PORT_P3_IN_P10) >> 10) & 0x1)
+/* Port 3 Pin # Latched Input Value (9) */
+#define PORT_P3_IN_P9 (0x1 << 9)
+#define PORT_P3_IN_P9_GET(val) ((((val) & PORT_P3_IN_P9) >> 9) & 0x1)
+/* Port 3 Pin # Latched Input Value (8) */
+#define PORT_P3_IN_P8 (0x1 << 8)
+#define PORT_P3_IN_P8_GET(val) ((((val) & PORT_P3_IN_P8) >> 8) & 0x1)
+/* Port 3 Pin # Latched Input Value (7) */
+#define PORT_P3_IN_P7 (0x1 << 7)
+#define PORT_P3_IN_P7_GET(val) ((((val) & PORT_P3_IN_P7) >> 7) & 0x1)
+/* Port 3 Pin # Latched Input Value (6) */
+#define PORT_P3_IN_P6 (0x1 << 6)
+#define PORT_P3_IN_P6_GET(val) ((((val) & PORT_P3_IN_P6) >> 6) & 0x1)
+/* Port 3 Pin # Latched Input Value (5) */
+#define PORT_P3_IN_P5 (0x1 << 5)
+#define PORT_P3_IN_P5_GET(val) ((((val) & PORT_P3_IN_P5) >> 5) & 0x1)
+/* Port 3 Pin # Latched Input Value (4) */
+#define PORT_P3_IN_P4 (0x1 << 4)
+#define PORT_P3_IN_P4_GET(val) ((((val) & PORT_P3_IN_P4) >> 4) & 0x1)
+/* Port 3 Pin # Latched Input Value (3) */
+#define PORT_P3_IN_P3 (0x1 << 3)
+#define PORT_P3_IN_P3_GET(val) ((((val) & PORT_P3_IN_P3) >> 3) & 0x1)
+/* Port 3 Pin # Latched Input Value (2) */
+#define PORT_P3_IN_P2 (0x1 << 2)
+#define PORT_P3_IN_P2_GET(val) ((((val) & PORT_P3_IN_P2) >> 2) & 0x1)
+/* Port 3 Pin # Latched Input Value (1) */
+#define PORT_P3_IN_P1 (0x1 << 1)
+#define PORT_P3_IN_P1_GET(val) ((((val) & PORT_P3_IN_P1) >> 1) & 0x1)
+/* Port 3 Pin # Latched Input Value (0) */
+#define PORT_P3_IN_P0 (0x1)
+#define PORT_P3_IN_P0_GET(val) ((((val) & PORT_P3_IN_P0) >> 0) & 0x1)
+
+/*******************************************************************************
+ * Port 3 Direction Register
+ ******************************************************************************/
+
+/* Port 3 Pin #Direction Control (19) */
+#define PORT_P3_DIR_P19 (0x1 << 19)
+#define PORT_P3_DIR_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P3_DIR_P19_GET(val) ((((val) & PORT_P3_DIR_P19) >> 19) & 0x1)
+#define PORT_P3_DIR_P19_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P19) | (((val) & 0x1) << 19))
+/* Port 3 Pin #Direction Control (18) */
+#define PORT_P3_DIR_P18 (0x1 << 18)
+#define PORT_P3_DIR_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P3_DIR_P18_GET(val) ((((val) & PORT_P3_DIR_P18) >> 18) & 0x1)
+#define PORT_P3_DIR_P18_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P18) | (((val) & 0x1) << 18))
+/* Port 3 Pin #Direction Control (17) */
+#define PORT_P3_DIR_P17 (0x1 << 17)
+#define PORT_P3_DIR_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P3_DIR_P17_GET(val) ((((val) & PORT_P3_DIR_P17) >> 17) & 0x1)
+#define PORT_P3_DIR_P17_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P17) | (((val) & 0x1) << 17))
+/* Port 3 Pin #Direction Control (16) */
+#define PORT_P3_DIR_P16 (0x1 << 16)
+#define PORT_P3_DIR_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P3_DIR_P16_GET(val) ((((val) & PORT_P3_DIR_P16) >> 16) & 0x1)
+#define PORT_P3_DIR_P16_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P16) | (((val) & 0x1) << 16))
+/* Port 3 Pin #Direction Control (15) */
+#define PORT_P3_DIR_P15 (0x1 << 15)
+#define PORT_P3_DIR_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P3_DIR_P15_GET(val) ((((val) & PORT_P3_DIR_P15) >> 15) & 0x1)
+#define PORT_P3_DIR_P15_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P15) | (((val) & 0x1) << 15))
+/* Port 3 Pin #Direction Control (14) */
+#define PORT_P3_DIR_P14 (0x1 << 14)
+#define PORT_P3_DIR_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P3_DIR_P14_GET(val) ((((val) & PORT_P3_DIR_P14) >> 14) & 0x1)
+#define PORT_P3_DIR_P14_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P14) | (((val) & 0x1) << 14))
+/* Port 3 Pin #Direction Control (13) */
+#define PORT_P3_DIR_P13 (0x1 << 13)
+#define PORT_P3_DIR_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P3_DIR_P13_GET(val) ((((val) & PORT_P3_DIR_P13) >> 13) & 0x1)
+#define PORT_P3_DIR_P13_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P13) | (((val) & 0x1) << 13))
+/* Port 3 Pin #Direction Control (12) */
+#define PORT_P3_DIR_P12 (0x1 << 12)
+#define PORT_P3_DIR_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P3_DIR_P12_GET(val) ((((val) & PORT_P3_DIR_P12) >> 12) & 0x1)
+#define PORT_P3_DIR_P12_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P12) | (((val) & 0x1) << 12))
+/* Port 3 Pin #Direction Control (11) */
+#define PORT_P3_DIR_P11 (0x1 << 11)
+#define PORT_P3_DIR_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P3_DIR_P11_GET(val) ((((val) & PORT_P3_DIR_P11) >> 11) & 0x1)
+#define PORT_P3_DIR_P11_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P11) | (((val) & 0x1) << 11))
+/* Port 3 Pin #Direction Control (10) */
+#define PORT_P3_DIR_P10 (0x1 << 10)
+#define PORT_P3_DIR_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P3_DIR_P10_GET(val) ((((val) & PORT_P3_DIR_P10) >> 10) & 0x1)
+#define PORT_P3_DIR_P10_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P10) | (((val) & 0x1) << 10))
+/* Port 3 Pin #Direction Control (9) */
+#define PORT_P3_DIR_P9 (0x1 << 9)
+#define PORT_P3_DIR_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P3_DIR_P9_GET(val) ((((val) & PORT_P3_DIR_P9) >> 9) & 0x1)
+#define PORT_P3_DIR_P9_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P9) | (((val) & 0x1) << 9))
+/* Port 3 Pin #Direction Control (8) */
+#define PORT_P3_DIR_P8 (0x1 << 8)
+#define PORT_P3_DIR_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P3_DIR_P8_GET(val) ((((val) & PORT_P3_DIR_P8) >> 8) & 0x1)
+#define PORT_P3_DIR_P8_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P8) | (((val) & 0x1) << 8))
+/* Port 3 Pin #Direction Control (7) */
+#define PORT_P3_DIR_P7 (0x1 << 7)
+#define PORT_P3_DIR_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P3_DIR_P7_GET(val) ((((val) & PORT_P3_DIR_P7) >> 7) & 0x1)
+#define PORT_P3_DIR_P7_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P7) | (((val) & 0x1) << 7))
+/* Port 3 Pin #Direction Control (6) */
+#define PORT_P3_DIR_P6 (0x1 << 6)
+#define PORT_P3_DIR_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P3_DIR_P6_GET(val) ((((val) & PORT_P3_DIR_P6) >> 6) & 0x1)
+#define PORT_P3_DIR_P6_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P6) | (((val) & 0x1) << 6))
+/* Port 3 Pin #Direction Control (5) */
+#define PORT_P3_DIR_P5 (0x1 << 5)
+#define PORT_P3_DIR_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P3_DIR_P5_GET(val) ((((val) & PORT_P3_DIR_P5) >> 5) & 0x1)
+#define PORT_P3_DIR_P5_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P5) | (((val) & 0x1) << 5))
+/* Port 3 Pin #Direction Control (4) */
+#define PORT_P3_DIR_P4 (0x1 << 4)
+#define PORT_P3_DIR_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P3_DIR_P4_GET(val) ((((val) & PORT_P3_DIR_P4) >> 4) & 0x1)
+#define PORT_P3_DIR_P4_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P4) | (((val) & 0x1) << 4))
+/* Port 3 Pin #Direction Control (3) */
+#define PORT_P3_DIR_P3 (0x1 << 3)
+#define PORT_P3_DIR_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P3_DIR_P3_GET(val) ((((val) & PORT_P3_DIR_P3) >> 3) & 0x1)
+#define PORT_P3_DIR_P3_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P3) | (((val) & 0x1) << 3))
+/* Port 3 Pin #Direction Control (2) */
+#define PORT_P3_DIR_P2 (0x1 << 2)
+#define PORT_P3_DIR_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P3_DIR_P2_GET(val) ((((val) & PORT_P3_DIR_P2) >> 2) & 0x1)
+#define PORT_P3_DIR_P2_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P2) | (((val) & 0x1) << 2))
+/* Port 3 Pin #Direction Control (1) */
+#define PORT_P3_DIR_P1 (0x1 << 1)
+#define PORT_P3_DIR_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P3_DIR_P1_GET(val) ((((val) & PORT_P3_DIR_P1) >> 1) & 0x1)
+#define PORT_P3_DIR_P1_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P1) | (((val) & 0x1) << 1))
+/* Port 3 Pin #Direction Control (0) */
+#define PORT_P3_DIR_P0 (0x1)
+#define PORT_P3_DIR_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P3_DIR_P0_GET(val) ((((val) & PORT_P3_DIR_P0) >> 0) & 0x1)
+#define PORT_P3_DIR_P0_SET(reg,val) (reg) = ((reg & ~PORT_P3_DIR_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 3 Alternate Function Select Register 0
+ ******************************************************************************/
+
+/* Alternate Function at Port 3 Bit # (19) */
+#define PORT_P3_ALTSEL0_P19 (0x1 << 19)
+#define PORT_P3_ALTSEL0_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P3_ALTSEL0_P19_GET(val) ((((val) & PORT_P3_ALTSEL0_P19) >> 19) & 0x1)
+#define PORT_P3_ALTSEL0_P19_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P19) | (((val) & 0x1) << 19))
+/* Alternate Function at Port 3 Bit # (18) */
+#define PORT_P3_ALTSEL0_P18 (0x1 << 18)
+#define PORT_P3_ALTSEL0_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P3_ALTSEL0_P18_GET(val) ((((val) & PORT_P3_ALTSEL0_P18) >> 18) & 0x1)
+#define PORT_P3_ALTSEL0_P18_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P18) | (((val) & 0x1) << 18))
+/* Alternate Function at Port 3 Bit # (17) */
+#define PORT_P3_ALTSEL0_P17 (0x1 << 17)
+#define PORT_P3_ALTSEL0_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P3_ALTSEL0_P17_GET(val) ((((val) & PORT_P3_ALTSEL0_P17) >> 17) & 0x1)
+#define PORT_P3_ALTSEL0_P17_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P17) | (((val) & 0x1) << 17))
+/* Alternate Function at Port 3 Bit # (16) */
+#define PORT_P3_ALTSEL0_P16 (0x1 << 16)
+#define PORT_P3_ALTSEL0_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P3_ALTSEL0_P16_GET(val) ((((val) & PORT_P3_ALTSEL0_P16) >> 16) & 0x1)
+#define PORT_P3_ALTSEL0_P16_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P16) | (((val) & 0x1) << 16))
+/* Alternate Function at Port 3 Bit # (15) */
+#define PORT_P3_ALTSEL0_P15 (0x1 << 15)
+#define PORT_P3_ALTSEL0_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P3_ALTSEL0_P15_GET(val) ((((val) & PORT_P3_ALTSEL0_P15) >> 15) & 0x1)
+#define PORT_P3_ALTSEL0_P15_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P15) | (((val) & 0x1) << 15))
+/* Alternate Function at Port 3 Bit # (14) */
+#define PORT_P3_ALTSEL0_P14 (0x1 << 14)
+#define PORT_P3_ALTSEL0_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P3_ALTSEL0_P14_GET(val) ((((val) & PORT_P3_ALTSEL0_P14) >> 14) & 0x1)
+#define PORT_P3_ALTSEL0_P14_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P14) | (((val) & 0x1) << 14))
+/* Alternate Function at Port 3 Bit # (13) */
+#define PORT_P3_ALTSEL0_P13 (0x1 << 13)
+#define PORT_P3_ALTSEL0_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P3_ALTSEL0_P13_GET(val) ((((val) & PORT_P3_ALTSEL0_P13) >> 13) & 0x1)
+#define PORT_P3_ALTSEL0_P13_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P13) | (((val) & 0x1) << 13))
+/* Alternate Function at Port 3 Bit # (12) */
+#define PORT_P3_ALTSEL0_P12 (0x1 << 12)
+#define PORT_P3_ALTSEL0_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P3_ALTSEL0_P12_GET(val) ((((val) & PORT_P3_ALTSEL0_P12) >> 12) & 0x1)
+#define PORT_P3_ALTSEL0_P12_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P12) | (((val) & 0x1) << 12))
+/* Alternate Function at Port 3 Bit # (11) */
+#define PORT_P3_ALTSEL0_P11 (0x1 << 11)
+#define PORT_P3_ALTSEL0_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P3_ALTSEL0_P11_GET(val) ((((val) & PORT_P3_ALTSEL0_P11) >> 11) & 0x1)
+#define PORT_P3_ALTSEL0_P11_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P11) | (((val) & 0x1) << 11))
+/* Alternate Function at Port 3 Bit # (10) */
+#define PORT_P3_ALTSEL0_P10 (0x1 << 10)
+#define PORT_P3_ALTSEL0_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P3_ALTSEL0_P10_GET(val) ((((val) & PORT_P3_ALTSEL0_P10) >> 10) & 0x1)
+#define PORT_P3_ALTSEL0_P10_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P10) | (((val) & 0x1) << 10))
+/* Alternate Function at Port 3 Bit # (9) */
+#define PORT_P3_ALTSEL0_P9 (0x1 << 9)
+#define PORT_P3_ALTSEL0_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P3_ALTSEL0_P9_GET(val) ((((val) & PORT_P3_ALTSEL0_P9) >> 9) & 0x1)
+#define PORT_P3_ALTSEL0_P9_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P9) | (((val) & 0x1) << 9))
+/* Alternate Function at Port 3 Bit # (8) */
+#define PORT_P3_ALTSEL0_P8 (0x1 << 8)
+#define PORT_P3_ALTSEL0_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P3_ALTSEL0_P8_GET(val) ((((val) & PORT_P3_ALTSEL0_P8) >> 8) & 0x1)
+#define PORT_P3_ALTSEL0_P8_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P8) | (((val) & 0x1) << 8))
+/* Alternate Function at Port 3 Bit # (7) */
+#define PORT_P3_ALTSEL0_P7 (0x1 << 7)
+#define PORT_P3_ALTSEL0_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P3_ALTSEL0_P7_GET(val) ((((val) & PORT_P3_ALTSEL0_P7) >> 7) & 0x1)
+#define PORT_P3_ALTSEL0_P7_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P7) | (((val) & 0x1) << 7))
+/* Alternate Function at Port 3 Bit # (6) */
+#define PORT_P3_ALTSEL0_P6 (0x1 << 6)
+#define PORT_P3_ALTSEL0_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P3_ALTSEL0_P6_GET(val) ((((val) & PORT_P3_ALTSEL0_P6) >> 6) & 0x1)
+#define PORT_P3_ALTSEL0_P6_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P6) | (((val) & 0x1) << 6))
+/* Alternate Function at Port 3 Bit # (5) */
+#define PORT_P3_ALTSEL0_P5 (0x1 << 5)
+#define PORT_P3_ALTSEL0_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P3_ALTSEL0_P5_GET(val) ((((val) & PORT_P3_ALTSEL0_P5) >> 5) & 0x1)
+#define PORT_P3_ALTSEL0_P5_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P5) | (((val) & 0x1) << 5))
+/* Alternate Function at Port 3 Bit # (4) */
+#define PORT_P3_ALTSEL0_P4 (0x1 << 4)
+#define PORT_P3_ALTSEL0_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P3_ALTSEL0_P4_GET(val) ((((val) & PORT_P3_ALTSEL0_P4) >> 4) & 0x1)
+#define PORT_P3_ALTSEL0_P4_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P4) | (((val) & 0x1) << 4))
+/* Alternate Function at Port 3 Bit # (3) */
+#define PORT_P3_ALTSEL0_P3 (0x1 << 3)
+#define PORT_P3_ALTSEL0_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P3_ALTSEL0_P3_GET(val) ((((val) & PORT_P3_ALTSEL0_P3) >> 3) & 0x1)
+#define PORT_P3_ALTSEL0_P3_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P3) | (((val) & 0x1) << 3))
+/* Alternate Function at Port 3 Bit # (2) */
+#define PORT_P3_ALTSEL0_P2 (0x1 << 2)
+#define PORT_P3_ALTSEL0_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P3_ALTSEL0_P2_GET(val) ((((val) & PORT_P3_ALTSEL0_P2) >> 2) & 0x1)
+#define PORT_P3_ALTSEL0_P2_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P2) | (((val) & 0x1) << 2))
+/* Alternate Function at Port 3 Bit # (1) */
+#define PORT_P3_ALTSEL0_P1 (0x1 << 1)
+#define PORT_P3_ALTSEL0_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P3_ALTSEL0_P1_GET(val) ((((val) & PORT_P3_ALTSEL0_P1) >> 1) & 0x1)
+#define PORT_P3_ALTSEL0_P1_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P1) | (((val) & 0x1) << 1))
+/* Alternate Function at Port 3 Bit # (0) */
+#define PORT_P3_ALTSEL0_P0 (0x1)
+#define PORT_P3_ALTSEL0_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P3_ALTSEL0_P0_GET(val) ((((val) & PORT_P3_ALTSEL0_P0) >> 0) & 0x1)
+#define PORT_P3_ALTSEL0_P0_SET(reg,val) (reg) = ((reg & ~PORT_P3_ALTSEL0_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 3 Pull Up Device Enable Register
+ ******************************************************************************/
+
+/* Pull Up Device Enable at Port 3 Bit # (19) */
+#define PORT_P3_PUEN_P19 (0x1 << 19)
+#define PORT_P3_PUEN_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P3_PUEN_P19_GET(val) ((((val) & PORT_P3_PUEN_P19) >> 19) & 0x1)
+#define PORT_P3_PUEN_P19_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P19) | (((val) & 0x1) << 19))
+/* Pull Up Device Enable at Port 3 Bit # (18) */
+#define PORT_P3_PUEN_P18 (0x1 << 18)
+#define PORT_P3_PUEN_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P3_PUEN_P18_GET(val) ((((val) & PORT_P3_PUEN_P18) >> 18) & 0x1)
+#define PORT_P3_PUEN_P18_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P18) | (((val) & 0x1) << 18))
+/* Pull Up Device Enable at Port 3 Bit # (17) */
+#define PORT_P3_PUEN_P17 (0x1 << 17)
+#define PORT_P3_PUEN_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P3_PUEN_P17_GET(val) ((((val) & PORT_P3_PUEN_P17) >> 17) & 0x1)
+#define PORT_P3_PUEN_P17_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P17) | (((val) & 0x1) << 17))
+/* Pull Up Device Enable at Port 3 Bit # (16) */
+#define PORT_P3_PUEN_P16 (0x1 << 16)
+#define PORT_P3_PUEN_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P3_PUEN_P16_GET(val) ((((val) & PORT_P3_PUEN_P16) >> 16) & 0x1)
+#define PORT_P3_PUEN_P16_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P16) | (((val) & 0x1) << 16))
+/* Pull Up Device Enable at Port 3 Bit # (15) */
+#define PORT_P3_PUEN_P15 (0x1 << 15)
+#define PORT_P3_PUEN_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P3_PUEN_P15_GET(val) ((((val) & PORT_P3_PUEN_P15) >> 15) & 0x1)
+#define PORT_P3_PUEN_P15_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P15) | (((val) & 0x1) << 15))
+/* Pull Up Device Enable at Port 3 Bit # (14) */
+#define PORT_P3_PUEN_P14 (0x1 << 14)
+#define PORT_P3_PUEN_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P3_PUEN_P14_GET(val) ((((val) & PORT_P3_PUEN_P14) >> 14) & 0x1)
+#define PORT_P3_PUEN_P14_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P14) | (((val) & 0x1) << 14))
+/* Pull Up Device Enable at Port 3 Bit # (13) */
+#define PORT_P3_PUEN_P13 (0x1 << 13)
+#define PORT_P3_PUEN_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P3_PUEN_P13_GET(val) ((((val) & PORT_P3_PUEN_P13) >> 13) & 0x1)
+#define PORT_P3_PUEN_P13_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P13) | (((val) & 0x1) << 13))
+/* Pull Up Device Enable at Port 3 Bit # (12) */
+#define PORT_P3_PUEN_P12 (0x1 << 12)
+#define PORT_P3_PUEN_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P3_PUEN_P12_GET(val) ((((val) & PORT_P3_PUEN_P12) >> 12) & 0x1)
+#define PORT_P3_PUEN_P12_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P12) | (((val) & 0x1) << 12))
+/* Pull Up Device Enable at Port 3 Bit # (11) */
+#define PORT_P3_PUEN_P11 (0x1 << 11)
+#define PORT_P3_PUEN_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P3_PUEN_P11_GET(val) ((((val) & PORT_P3_PUEN_P11) >> 11) & 0x1)
+#define PORT_P3_PUEN_P11_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P11) | (((val) & 0x1) << 11))
+/* Pull Up Device Enable at Port 3 Bit # (10) */
+#define PORT_P3_PUEN_P10 (0x1 << 10)
+#define PORT_P3_PUEN_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P3_PUEN_P10_GET(val) ((((val) & PORT_P3_PUEN_P10) >> 10) & 0x1)
+#define PORT_P3_PUEN_P10_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P10) | (((val) & 0x1) << 10))
+/* Pull Up Device Enable at Port 3 Bit # (9) */
+#define PORT_P3_PUEN_P9 (0x1 << 9)
+#define PORT_P3_PUEN_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P3_PUEN_P9_GET(val) ((((val) & PORT_P3_PUEN_P9) >> 9) & 0x1)
+#define PORT_P3_PUEN_P9_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P9) | (((val) & 0x1) << 9))
+/* Pull Up Device Enable at Port 3 Bit # (8) */
+#define PORT_P3_PUEN_P8 (0x1 << 8)
+#define PORT_P3_PUEN_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P3_PUEN_P8_GET(val) ((((val) & PORT_P3_PUEN_P8) >> 8) & 0x1)
+#define PORT_P3_PUEN_P8_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P8) | (((val) & 0x1) << 8))
+/* Pull Up Device Enable at Port 3 Bit # (7) */
+#define PORT_P3_PUEN_P7 (0x1 << 7)
+#define PORT_P3_PUEN_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P3_PUEN_P7_GET(val) ((((val) & PORT_P3_PUEN_P7) >> 7) & 0x1)
+#define PORT_P3_PUEN_P7_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P7) | (((val) & 0x1) << 7))
+/* Pull Up Device Enable at Port 3 Bit # (6) */
+#define PORT_P3_PUEN_P6 (0x1 << 6)
+#define PORT_P3_PUEN_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P3_PUEN_P6_GET(val) ((((val) & PORT_P3_PUEN_P6) >> 6) & 0x1)
+#define PORT_P3_PUEN_P6_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P6) | (((val) & 0x1) << 6))
+/* Pull Up Device Enable at Port 3 Bit # (5) */
+#define PORT_P3_PUEN_P5 (0x1 << 5)
+#define PORT_P3_PUEN_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P3_PUEN_P5_GET(val) ((((val) & PORT_P3_PUEN_P5) >> 5) & 0x1)
+#define PORT_P3_PUEN_P5_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P5) | (((val) & 0x1) << 5))
+/* Pull Up Device Enable at Port 3 Bit # (4) */
+#define PORT_P3_PUEN_P4 (0x1 << 4)
+#define PORT_P3_PUEN_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P3_PUEN_P4_GET(val) ((((val) & PORT_P3_PUEN_P4) >> 4) & 0x1)
+#define PORT_P3_PUEN_P4_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P4) | (((val) & 0x1) << 4))
+/* Pull Up Device Enable at Port 3 Bit # (3) */
+#define PORT_P3_PUEN_P3 (0x1 << 3)
+#define PORT_P3_PUEN_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P3_PUEN_P3_GET(val) ((((val) & PORT_P3_PUEN_P3) >> 3) & 0x1)
+#define PORT_P3_PUEN_P3_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P3) | (((val) & 0x1) << 3))
+/* Pull Up Device Enable at Port 3 Bit # (2) */
+#define PORT_P3_PUEN_P2 (0x1 << 2)
+#define PORT_P3_PUEN_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P3_PUEN_P2_GET(val) ((((val) & PORT_P3_PUEN_P2) >> 2) & 0x1)
+#define PORT_P3_PUEN_P2_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P2) | (((val) & 0x1) << 2))
+/* Pull Up Device Enable at Port 3 Bit # (1) */
+#define PORT_P3_PUEN_P1 (0x1 << 1)
+#define PORT_P3_PUEN_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P3_PUEN_P1_GET(val) ((((val) & PORT_P3_PUEN_P1) >> 1) & 0x1)
+#define PORT_P3_PUEN_P1_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P1) | (((val) & 0x1) << 1))
+/* Pull Up Device Enable at Port 3 Bit # (0) */
+#define PORT_P3_PUEN_P0 (0x1)
+#define PORT_P3_PUEN_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P3_PUEN_P0_GET(val) ((((val) & PORT_P3_PUEN_P0) >> 0) & 0x1)
+#define PORT_P3_PUEN_P0_SET(reg,val) (reg) = ((reg & ~PORT_P3_PUEN_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 4 Data Output Register
+ ******************************************************************************/
+
+/* Port 4 Pin # Output Value (23) */
+#define PORT_P4_OUT_P23 (0x1 << 23)
+#define PORT_P4_OUT_P23_VAL(val) (((val) & 0x1) << 23)
+#define PORT_P4_OUT_P23_GET(val) ((((val) & PORT_P4_OUT_P23) >> 23) & 0x1)
+#define PORT_P4_OUT_P23_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P23) | (((val) & 0x1) << 23))
+/* Port 4 Pin # Output Value (22) */
+#define PORT_P4_OUT_P22 (0x1 << 22)
+#define PORT_P4_OUT_P22_VAL(val) (((val) & 0x1) << 22)
+#define PORT_P4_OUT_P22_GET(val) ((((val) & PORT_P4_OUT_P22) >> 22) & 0x1)
+#define PORT_P4_OUT_P22_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P22) | (((val) & 0x1) << 22))
+/* Port 4 Pin # Output Value (21) */
+#define PORT_P4_OUT_P21 (0x1 << 21)
+#define PORT_P4_OUT_P21_VAL(val) (((val) & 0x1) << 21)
+#define PORT_P4_OUT_P21_GET(val) ((((val) & PORT_P4_OUT_P21) >> 21) & 0x1)
+#define PORT_P4_OUT_P21_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P21) | (((val) & 0x1) << 21))
+/* Port 4 Pin # Output Value (20) */
+#define PORT_P4_OUT_P20 (0x1 << 20)
+#define PORT_P4_OUT_P20_VAL(val) (((val) & 0x1) << 20)
+#define PORT_P4_OUT_P20_GET(val) ((((val) & PORT_P4_OUT_P20) >> 20) & 0x1)
+#define PORT_P4_OUT_P20_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P20) | (((val) & 0x1) << 20))
+/* Port 4 Pin # Output Value (19) */
+#define PORT_P4_OUT_P19 (0x1 << 19)
+#define PORT_P4_OUT_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P4_OUT_P19_GET(val) ((((val) & PORT_P4_OUT_P19) >> 19) & 0x1)
+#define PORT_P4_OUT_P19_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P19) | (((val) & 0x1) << 19))
+/* Port 4 Pin # Output Value (18) */
+#define PORT_P4_OUT_P18 (0x1 << 18)
+#define PORT_P4_OUT_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P4_OUT_P18_GET(val) ((((val) & PORT_P4_OUT_P18) >> 18) & 0x1)
+#define PORT_P4_OUT_P18_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P18) | (((val) & 0x1) << 18))
+/* Port 4 Pin # Output Value (17) */
+#define PORT_P4_OUT_P17 (0x1 << 17)
+#define PORT_P4_OUT_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P4_OUT_P17_GET(val) ((((val) & PORT_P4_OUT_P17) >> 17) & 0x1)
+#define PORT_P4_OUT_P17_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P17) | (((val) & 0x1) << 17))
+/* Port 4 Pin # Output Value (16) */
+#define PORT_P4_OUT_P16 (0x1 << 16)
+#define PORT_P4_OUT_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P4_OUT_P16_GET(val) ((((val) & PORT_P4_OUT_P16) >> 16) & 0x1)
+#define PORT_P4_OUT_P16_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P16) | (((val) & 0x1) << 16))
+/* Port 4 Pin # Output Value (15) */
+#define PORT_P4_OUT_P15 (0x1 << 15)
+#define PORT_P4_OUT_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P4_OUT_P15_GET(val) ((((val) & PORT_P4_OUT_P15) >> 15) & 0x1)
+#define PORT_P4_OUT_P15_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P15) | (((val) & 0x1) << 15))
+/* Port 4 Pin # Output Value (14) */
+#define PORT_P4_OUT_P14 (0x1 << 14)
+#define PORT_P4_OUT_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P4_OUT_P14_GET(val) ((((val) & PORT_P4_OUT_P14) >> 14) & 0x1)
+#define PORT_P4_OUT_P14_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P14) | (((val) & 0x1) << 14))
+/* Port 4 Pin # Output Value (13) */
+#define PORT_P4_OUT_P13 (0x1 << 13)
+#define PORT_P4_OUT_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P4_OUT_P13_GET(val) ((((val) & PORT_P4_OUT_P13) >> 13) & 0x1)
+#define PORT_P4_OUT_P13_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P13) | (((val) & 0x1) << 13))
+/* Port 4 Pin # Output Value (12) */
+#define PORT_P4_OUT_P12 (0x1 << 12)
+#define PORT_P4_OUT_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P4_OUT_P12_GET(val) ((((val) & PORT_P4_OUT_P12) >> 12) & 0x1)
+#define PORT_P4_OUT_P12_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P12) | (((val) & 0x1) << 12))
+/* Port 4 Pin # Output Value (11) */
+#define PORT_P4_OUT_P11 (0x1 << 11)
+#define PORT_P4_OUT_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P4_OUT_P11_GET(val) ((((val) & PORT_P4_OUT_P11) >> 11) & 0x1)
+#define PORT_P4_OUT_P11_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P11) | (((val) & 0x1) << 11))
+/* Port 4 Pin # Output Value (10) */
+#define PORT_P4_OUT_P10 (0x1 << 10)
+#define PORT_P4_OUT_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P4_OUT_P10_GET(val) ((((val) & PORT_P4_OUT_P10) >> 10) & 0x1)
+#define PORT_P4_OUT_P10_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P10) | (((val) & 0x1) << 10))
+/* Port 4 Pin # Output Value (9) */
+#define PORT_P4_OUT_P9 (0x1 << 9)
+#define PORT_P4_OUT_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P4_OUT_P9_GET(val) ((((val) & PORT_P4_OUT_P9) >> 9) & 0x1)
+#define PORT_P4_OUT_P9_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P9) | (((val) & 0x1) << 9))
+/* Port 4 Pin # Output Value (8) */
+#define PORT_P4_OUT_P8 (0x1 << 8)
+#define PORT_P4_OUT_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P4_OUT_P8_GET(val) ((((val) & PORT_P4_OUT_P8) >> 8) & 0x1)
+#define PORT_P4_OUT_P8_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P8) | (((val) & 0x1) << 8))
+/* Port 4 Pin # Output Value (7) */
+#define PORT_P4_OUT_P7 (0x1 << 7)
+#define PORT_P4_OUT_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P4_OUT_P7_GET(val) ((((val) & PORT_P4_OUT_P7) >> 7) & 0x1)
+#define PORT_P4_OUT_P7_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P7) | (((val) & 0x1) << 7))
+/* Port 4 Pin # Output Value (6) */
+#define PORT_P4_OUT_P6 (0x1 << 6)
+#define PORT_P4_OUT_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P4_OUT_P6_GET(val) ((((val) & PORT_P4_OUT_P6) >> 6) & 0x1)
+#define PORT_P4_OUT_P6_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P6) | (((val) & 0x1) << 6))
+/* Port 4 Pin # Output Value (5) */
+#define PORT_P4_OUT_P5 (0x1 << 5)
+#define PORT_P4_OUT_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P4_OUT_P5_GET(val) ((((val) & PORT_P4_OUT_P5) >> 5) & 0x1)
+#define PORT_P4_OUT_P5_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P5) | (((val) & 0x1) << 5))
+/* Port 4 Pin # Output Value (4) */
+#define PORT_P4_OUT_P4 (0x1 << 4)
+#define PORT_P4_OUT_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P4_OUT_P4_GET(val) ((((val) & PORT_P4_OUT_P4) >> 4) & 0x1)
+#define PORT_P4_OUT_P4_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P4) | (((val) & 0x1) << 4))
+/* Port 4 Pin # Output Value (3) */
+#define PORT_P4_OUT_P3 (0x1 << 3)
+#define PORT_P4_OUT_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P4_OUT_P3_GET(val) ((((val) & PORT_P4_OUT_P3) >> 3) & 0x1)
+#define PORT_P4_OUT_P3_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P3) | (((val) & 0x1) << 3))
+/* Port 4 Pin # Output Value (2) */
+#define PORT_P4_OUT_P2 (0x1 << 2)
+#define PORT_P4_OUT_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P4_OUT_P2_GET(val) ((((val) & PORT_P4_OUT_P2) >> 2) & 0x1)
+#define PORT_P4_OUT_P2_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P2) | (((val) & 0x1) << 2))
+/* Port 4 Pin # Output Value (1) */
+#define PORT_P4_OUT_P1 (0x1 << 1)
+#define PORT_P4_OUT_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P4_OUT_P1_GET(val) ((((val) & PORT_P4_OUT_P1) >> 1) & 0x1)
+#define PORT_P4_OUT_P1_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P1) | (((val) & 0x1) << 1))
+/* Port 4 Pin # Output Value (0) */
+#define PORT_P4_OUT_P0 (0x1)
+#define PORT_P4_OUT_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P4_OUT_P0_GET(val) ((((val) & PORT_P4_OUT_P0) >> 0) & 0x1)
+#define PORT_P4_OUT_P0_SET(reg,val) (reg) = ((reg & ~PORT_P4_OUT_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 4 Data Input Register
+ ******************************************************************************/
+
+/* Port 4 Pin # Latched Input Value (23) */
+#define PORT_P4_IN_P23 (0x1 << 23)
+#define PORT_P4_IN_P23_GET(val) ((((val) & PORT_P4_IN_P23) >> 23) & 0x1)
+/* Port 4 Pin # Latched Input Value (22) */
+#define PORT_P4_IN_P22 (0x1 << 22)
+#define PORT_P4_IN_P22_GET(val) ((((val) & PORT_P4_IN_P22) >> 22) & 0x1)
+/* Port 4 Pin # Latched Input Value (21) */
+#define PORT_P4_IN_P21 (0x1 << 21)
+#define PORT_P4_IN_P21_GET(val) ((((val) & PORT_P4_IN_P21) >> 21) & 0x1)
+/* Port 4 Pin # Latched Input Value (20) */
+#define PORT_P4_IN_P20 (0x1 << 20)
+#define PORT_P4_IN_P20_GET(val) ((((val) & PORT_P4_IN_P20) >> 20) & 0x1)
+/* Port 4 Pin # Latched Input Value (19) */
+#define PORT_P4_IN_P19 (0x1 << 19)
+#define PORT_P4_IN_P19_GET(val) ((((val) & PORT_P4_IN_P19) >> 19) & 0x1)
+/* Port 4 Pin # Latched Input Value (18) */
+#define PORT_P4_IN_P18 (0x1 << 18)
+#define PORT_P4_IN_P18_GET(val) ((((val) & PORT_P4_IN_P18) >> 18) & 0x1)
+/* Port 4 Pin # Latched Input Value (17) */
+#define PORT_P4_IN_P17 (0x1 << 17)
+#define PORT_P4_IN_P17_GET(val) ((((val) & PORT_P4_IN_P17) >> 17) & 0x1)
+/* Port 4 Pin # Latched Input Value (16) */
+#define PORT_P4_IN_P16 (0x1 << 16)
+#define PORT_P4_IN_P16_GET(val) ((((val) & PORT_P4_IN_P16) >> 16) & 0x1)
+/* Port 4 Pin # Latched Input Value (15) */
+#define PORT_P4_IN_P15 (0x1 << 15)
+#define PORT_P4_IN_P15_GET(val) ((((val) & PORT_P4_IN_P15) >> 15) & 0x1)
+/* Port 4 Pin # Latched Input Value (14) */
+#define PORT_P4_IN_P14 (0x1 << 14)
+#define PORT_P4_IN_P14_GET(val) ((((val) & PORT_P4_IN_P14) >> 14) & 0x1)
+/* Port 4 Pin # Latched Input Value (13) */
+#define PORT_P4_IN_P13 (0x1 << 13)
+#define PORT_P4_IN_P13_GET(val) ((((val) & PORT_P4_IN_P13) >> 13) & 0x1)
+/* Port 4 Pin # Latched Input Value (12) */
+#define PORT_P4_IN_P12 (0x1 << 12)
+#define PORT_P4_IN_P12_GET(val) ((((val) & PORT_P4_IN_P12) >> 12) & 0x1)
+/* Port 4 Pin # Latched Input Value (11) */
+#define PORT_P4_IN_P11 (0x1 << 11)
+#define PORT_P4_IN_P11_GET(val) ((((val) & PORT_P4_IN_P11) >> 11) & 0x1)
+/* Port 4 Pin # Latched Input Value (10) */
+#define PORT_P4_IN_P10 (0x1 << 10)
+#define PORT_P4_IN_P10_GET(val) ((((val) & PORT_P4_IN_P10) >> 10) & 0x1)
+/* Port 4 Pin # Latched Input Value (9) */
+#define PORT_P4_IN_P9 (0x1 << 9)
+#define PORT_P4_IN_P9_GET(val) ((((val) & PORT_P4_IN_P9) >> 9) & 0x1)
+/* Port 4 Pin # Latched Input Value (8) */
+#define PORT_P4_IN_P8 (0x1 << 8)
+#define PORT_P4_IN_P8_GET(val) ((((val) & PORT_P4_IN_P8) >> 8) & 0x1)
+/* Port 4 Pin # Latched Input Value (7) */
+#define PORT_P4_IN_P7 (0x1 << 7)
+#define PORT_P4_IN_P7_GET(val) ((((val) & PORT_P4_IN_P7) >> 7) & 0x1)
+/* Port 4 Pin # Latched Input Value (6) */
+#define PORT_P4_IN_P6 (0x1 << 6)
+#define PORT_P4_IN_P6_GET(val) ((((val) & PORT_P4_IN_P6) >> 6) & 0x1)
+/* Port 4 Pin # Latched Input Value (5) */
+#define PORT_P4_IN_P5 (0x1 << 5)
+#define PORT_P4_IN_P5_GET(val) ((((val) & PORT_P4_IN_P5) >> 5) & 0x1)
+/* Port 4 Pin # Latched Input Value (4) */
+#define PORT_P4_IN_P4 (0x1 << 4)
+#define PORT_P4_IN_P4_GET(val) ((((val) & PORT_P4_IN_P4) >> 4) & 0x1)
+/* Port 4 Pin # Latched Input Value (3) */
+#define PORT_P4_IN_P3 (0x1 << 3)
+#define PORT_P4_IN_P3_GET(val) ((((val) & PORT_P4_IN_P3) >> 3) & 0x1)
+/* Port 4 Pin # Latched Input Value (2) */
+#define PORT_P4_IN_P2 (0x1 << 2)
+#define PORT_P4_IN_P2_GET(val) ((((val) & PORT_P4_IN_P2) >> 2) & 0x1)
+/* Port 4 Pin # Latched Input Value (1) */
+#define PORT_P4_IN_P1 (0x1 << 1)
+#define PORT_P4_IN_P1_GET(val) ((((val) & PORT_P4_IN_P1) >> 1) & 0x1)
+/* Port 4 Pin # Latched Input Value (0) */
+#define PORT_P4_IN_P0 (0x1)
+#define PORT_P4_IN_P0_GET(val) ((((val) & PORT_P4_IN_P0) >> 0) & 0x1)
+
+/*******************************************************************************
+ * Port 4 Direction Register
+ ******************************************************************************/
+
+/* Port 4 Pin #Direction Control (23) */
+#define PORT_P4_DIR_P23 (0x1 << 23)
+#define PORT_P4_DIR_P23_VAL(val) (((val) & 0x1) << 23)
+#define PORT_P4_DIR_P23_GET(val) ((((val) & PORT_P4_DIR_P23) >> 23) & 0x1)
+#define PORT_P4_DIR_P23_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P23) | (((val) & 0x1) << 23))
+/* Port 4 Pin #Direction Control (22) */
+#define PORT_P4_DIR_P22 (0x1 << 22)
+#define PORT_P4_DIR_P22_VAL(val) (((val) & 0x1) << 22)
+#define PORT_P4_DIR_P22_GET(val) ((((val) & PORT_P4_DIR_P22) >> 22) & 0x1)
+#define PORT_P4_DIR_P22_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P22) | (((val) & 0x1) << 22))
+/* Port 4 Pin #Direction Control (21) */
+#define PORT_P4_DIR_P21 (0x1 << 21)
+#define PORT_P4_DIR_P21_VAL(val) (((val) & 0x1) << 21)
+#define PORT_P4_DIR_P21_GET(val) ((((val) & PORT_P4_DIR_P21) >> 21) & 0x1)
+#define PORT_P4_DIR_P21_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P21) | (((val) & 0x1) << 21))
+/* Port 4 Pin #Direction Control (20) */
+#define PORT_P4_DIR_P20 (0x1 << 20)
+#define PORT_P4_DIR_P20_VAL(val) (((val) & 0x1) << 20)
+#define PORT_P4_DIR_P20_GET(val) ((((val) & PORT_P4_DIR_P20) >> 20) & 0x1)
+#define PORT_P4_DIR_P20_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P20) | (((val) & 0x1) << 20))
+/* Port 4 Pin #Direction Control (19) */
+#define PORT_P4_DIR_P19 (0x1 << 19)
+#define PORT_P4_DIR_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P4_DIR_P19_GET(val) ((((val) & PORT_P4_DIR_P19) >> 19) & 0x1)
+#define PORT_P4_DIR_P19_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P19) | (((val) & 0x1) << 19))
+/* Port 4 Pin #Direction Control (18) */
+#define PORT_P4_DIR_P18 (0x1 << 18)
+#define PORT_P4_DIR_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P4_DIR_P18_GET(val) ((((val) & PORT_P4_DIR_P18) >> 18) & 0x1)
+#define PORT_P4_DIR_P18_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P18) | (((val) & 0x1) << 18))
+/* Port 4 Pin #Direction Control (17) */
+#define PORT_P4_DIR_P17 (0x1 << 17)
+#define PORT_P4_DIR_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P4_DIR_P17_GET(val) ((((val) & PORT_P4_DIR_P17) >> 17) & 0x1)
+#define PORT_P4_DIR_P17_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P17) | (((val) & 0x1) << 17))
+/* Port 4 Pin #Direction Control (16) */
+#define PORT_P4_DIR_P16 (0x1 << 16)
+#define PORT_P4_DIR_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P4_DIR_P16_GET(val) ((((val) & PORT_P4_DIR_P16) >> 16) & 0x1)
+#define PORT_P4_DIR_P16_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P16) | (((val) & 0x1) << 16))
+/* Port 4 Pin #Direction Control (15) */
+#define PORT_P4_DIR_P15 (0x1 << 15)
+#define PORT_P4_DIR_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P4_DIR_P15_GET(val) ((((val) & PORT_P4_DIR_P15) >> 15) & 0x1)
+#define PORT_P4_DIR_P15_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P15) | (((val) & 0x1) << 15))
+/* Port 4 Pin #Direction Control (14) */
+#define PORT_P4_DIR_P14 (0x1 << 14)
+#define PORT_P4_DIR_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P4_DIR_P14_GET(val) ((((val) & PORT_P4_DIR_P14) >> 14) & 0x1)
+#define PORT_P4_DIR_P14_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P14) | (((val) & 0x1) << 14))
+/* Port 4 Pin #Direction Control (13) */
+#define PORT_P4_DIR_P13 (0x1 << 13)
+#define PORT_P4_DIR_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P4_DIR_P13_GET(val) ((((val) & PORT_P4_DIR_P13) >> 13) & 0x1)
+#define PORT_P4_DIR_P13_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P13) | (((val) & 0x1) << 13))
+/* Port 4 Pin #Direction Control (12) */
+#define PORT_P4_DIR_P12 (0x1 << 12)
+#define PORT_P4_DIR_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P4_DIR_P12_GET(val) ((((val) & PORT_P4_DIR_P12) >> 12) & 0x1)
+#define PORT_P4_DIR_P12_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P12) | (((val) & 0x1) << 12))
+/* Port 4 Pin #Direction Control (11) */
+#define PORT_P4_DIR_P11 (0x1 << 11)
+#define PORT_P4_DIR_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P4_DIR_P11_GET(val) ((((val) & PORT_P4_DIR_P11) >> 11) & 0x1)
+#define PORT_P4_DIR_P11_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P11) | (((val) & 0x1) << 11))
+/* Port 4 Pin #Direction Control (10) */
+#define PORT_P4_DIR_P10 (0x1 << 10)
+#define PORT_P4_DIR_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P4_DIR_P10_GET(val) ((((val) & PORT_P4_DIR_P10) >> 10) & 0x1)
+#define PORT_P4_DIR_P10_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P10) | (((val) & 0x1) << 10))
+/* Port 4 Pin #Direction Control (9) */
+#define PORT_P4_DIR_P9 (0x1 << 9)
+#define PORT_P4_DIR_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P4_DIR_P9_GET(val) ((((val) & PORT_P4_DIR_P9) >> 9) & 0x1)
+#define PORT_P4_DIR_P9_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P9) | (((val) & 0x1) << 9))
+/* Port 4 Pin #Direction Control (8) */
+#define PORT_P4_DIR_P8 (0x1 << 8)
+#define PORT_P4_DIR_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P4_DIR_P8_GET(val) ((((val) & PORT_P4_DIR_P8) >> 8) & 0x1)
+#define PORT_P4_DIR_P8_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P8) | (((val) & 0x1) << 8))
+/* Port 4 Pin #Direction Control (7) */
+#define PORT_P4_DIR_P7 (0x1 << 7)
+#define PORT_P4_DIR_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P4_DIR_P7_GET(val) ((((val) & PORT_P4_DIR_P7) >> 7) & 0x1)
+#define PORT_P4_DIR_P7_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P7) | (((val) & 0x1) << 7))
+/* Port 4 Pin #Direction Control (6) */
+#define PORT_P4_DIR_P6 (0x1 << 6)
+#define PORT_P4_DIR_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P4_DIR_P6_GET(val) ((((val) & PORT_P4_DIR_P6) >> 6) & 0x1)
+#define PORT_P4_DIR_P6_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P6) | (((val) & 0x1) << 6))
+/* Port 4 Pin #Direction Control (5) */
+#define PORT_P4_DIR_P5 (0x1 << 5)
+#define PORT_P4_DIR_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P4_DIR_P5_GET(val) ((((val) & PORT_P4_DIR_P5) >> 5) & 0x1)
+#define PORT_P4_DIR_P5_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P5) | (((val) & 0x1) << 5))
+/* Port 4 Pin #Direction Control (4) */
+#define PORT_P4_DIR_P4 (0x1 << 4)
+#define PORT_P4_DIR_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P4_DIR_P4_GET(val) ((((val) & PORT_P4_DIR_P4) >> 4) & 0x1)
+#define PORT_P4_DIR_P4_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P4) | (((val) & 0x1) << 4))
+/* Port 4 Pin #Direction Control (3) */
+#define PORT_P4_DIR_P3 (0x1 << 3)
+#define PORT_P4_DIR_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P4_DIR_P3_GET(val) ((((val) & PORT_P4_DIR_P3) >> 3) & 0x1)
+#define PORT_P4_DIR_P3_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P3) | (((val) & 0x1) << 3))
+/* Port 4 Pin #Direction Control (2) */
+#define PORT_P4_DIR_P2 (0x1 << 2)
+#define PORT_P4_DIR_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P4_DIR_P2_GET(val) ((((val) & PORT_P4_DIR_P2) >> 2) & 0x1)
+#define PORT_P4_DIR_P2_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P2) | (((val) & 0x1) << 2))
+/* Port 4 Pin #Direction Control (1) */
+#define PORT_P4_DIR_P1 (0x1 << 1)
+#define PORT_P4_DIR_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P4_DIR_P1_GET(val) ((((val) & PORT_P4_DIR_P1) >> 1) & 0x1)
+#define PORT_P4_DIR_P1_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P1) | (((val) & 0x1) << 1))
+/* Port 4 Pin #Direction Control (0) */
+#define PORT_P4_DIR_P0 (0x1)
+#define PORT_P4_DIR_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P4_DIR_P0_GET(val) ((((val) & PORT_P4_DIR_P0) >> 0) & 0x1)
+#define PORT_P4_DIR_P0_SET(reg,val) (reg) = ((reg & ~PORT_P4_DIR_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 4 Alternate Function Select Register 0
+ ******************************************************************************/
+
+/* Alternate Function at Port 4 Bit # (23) */
+#define PORT_P4_ALTSEL0_P23 (0x1 << 23)
+#define PORT_P4_ALTSEL0_P23_VAL(val) (((val) & 0x1) << 23)
+#define PORT_P4_ALTSEL0_P23_GET(val) ((((val) & PORT_P4_ALTSEL0_P23) >> 23) & 0x1)
+#define PORT_P4_ALTSEL0_P23_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P23) | (((val) & 0x1) << 23))
+/* Alternate Function at Port 4 Bit # (22) */
+#define PORT_P4_ALTSEL0_P22 (0x1 << 22)
+#define PORT_P4_ALTSEL0_P22_VAL(val) (((val) & 0x1) << 22)
+#define PORT_P4_ALTSEL0_P22_GET(val) ((((val) & PORT_P4_ALTSEL0_P22) >> 22) & 0x1)
+#define PORT_P4_ALTSEL0_P22_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P22) | (((val) & 0x1) << 22))
+/* Alternate Function at Port 4 Bit # (21) */
+#define PORT_P4_ALTSEL0_P21 (0x1 << 21)
+#define PORT_P4_ALTSEL0_P21_VAL(val) (((val) & 0x1) << 21)
+#define PORT_P4_ALTSEL0_P21_GET(val) ((((val) & PORT_P4_ALTSEL0_P21) >> 21) & 0x1)
+#define PORT_P4_ALTSEL0_P21_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P21) | (((val) & 0x1) << 21))
+/* Alternate Function at Port 4 Bit # (20) */
+#define PORT_P4_ALTSEL0_P20 (0x1 << 20)
+#define PORT_P4_ALTSEL0_P20_VAL(val) (((val) & 0x1) << 20)
+#define PORT_P4_ALTSEL0_P20_GET(val) ((((val) & PORT_P4_ALTSEL0_P20) >> 20) & 0x1)
+#define PORT_P4_ALTSEL0_P20_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P20) | (((val) & 0x1) << 20))
+/* Alternate Function at Port 4 Bit # (19) */
+#define PORT_P4_ALTSEL0_P19 (0x1 << 19)
+#define PORT_P4_ALTSEL0_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P4_ALTSEL0_P19_GET(val) ((((val) & PORT_P4_ALTSEL0_P19) >> 19) & 0x1)
+#define PORT_P4_ALTSEL0_P19_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P19) | (((val) & 0x1) << 19))
+/* Alternate Function at Port 4 Bit # (18) */
+#define PORT_P4_ALTSEL0_P18 (0x1 << 18)
+#define PORT_P4_ALTSEL0_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P4_ALTSEL0_P18_GET(val) ((((val) & PORT_P4_ALTSEL0_P18) >> 18) & 0x1)
+#define PORT_P4_ALTSEL0_P18_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P18) | (((val) & 0x1) << 18))
+/* Alternate Function at Port 4 Bit # (17) */
+#define PORT_P4_ALTSEL0_P17 (0x1 << 17)
+#define PORT_P4_ALTSEL0_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P4_ALTSEL0_P17_GET(val) ((((val) & PORT_P4_ALTSEL0_P17) >> 17) & 0x1)
+#define PORT_P4_ALTSEL0_P17_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P17) | (((val) & 0x1) << 17))
+/* Alternate Function at Port 4 Bit # (16) */
+#define PORT_P4_ALTSEL0_P16 (0x1 << 16)
+#define PORT_P4_ALTSEL0_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P4_ALTSEL0_P16_GET(val) ((((val) & PORT_P4_ALTSEL0_P16) >> 16) & 0x1)
+#define PORT_P4_ALTSEL0_P16_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P16) | (((val) & 0x1) << 16))
+/* Alternate Function at Port 4 Bit # (15) */
+#define PORT_P4_ALTSEL0_P15 (0x1 << 15)
+#define PORT_P4_ALTSEL0_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P4_ALTSEL0_P15_GET(val) ((((val) & PORT_P4_ALTSEL0_P15) >> 15) & 0x1)
+#define PORT_P4_ALTSEL0_P15_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P15) | (((val) & 0x1) << 15))
+/* Alternate Function at Port 4 Bit # (14) */
+#define PORT_P4_ALTSEL0_P14 (0x1 << 14)
+#define PORT_P4_ALTSEL0_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P4_ALTSEL0_P14_GET(val) ((((val) & PORT_P4_ALTSEL0_P14) >> 14) & 0x1)
+#define PORT_P4_ALTSEL0_P14_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P14) | (((val) & 0x1) << 14))
+/* Alternate Function at Port 4 Bit # (13) */
+#define PORT_P4_ALTSEL0_P13 (0x1 << 13)
+#define PORT_P4_ALTSEL0_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P4_ALTSEL0_P13_GET(val) ((((val) & PORT_P4_ALTSEL0_P13) >> 13) & 0x1)
+#define PORT_P4_ALTSEL0_P13_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P13) | (((val) & 0x1) << 13))
+/* Alternate Function at Port 4 Bit # (12) */
+#define PORT_P4_ALTSEL0_P12 (0x1 << 12)
+#define PORT_P4_ALTSEL0_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P4_ALTSEL0_P12_GET(val) ((((val) & PORT_P4_ALTSEL0_P12) >> 12) & 0x1)
+#define PORT_P4_ALTSEL0_P12_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P12) | (((val) & 0x1) << 12))
+/* Alternate Function at Port 4 Bit # (11) */
+#define PORT_P4_ALTSEL0_P11 (0x1 << 11)
+#define PORT_P4_ALTSEL0_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P4_ALTSEL0_P11_GET(val) ((((val) & PORT_P4_ALTSEL0_P11) >> 11) & 0x1)
+#define PORT_P4_ALTSEL0_P11_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P11) | (((val) & 0x1) << 11))
+/* Alternate Function at Port 4 Bit # (10) */
+#define PORT_P4_ALTSEL0_P10 (0x1 << 10)
+#define PORT_P4_ALTSEL0_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P4_ALTSEL0_P10_GET(val) ((((val) & PORT_P4_ALTSEL0_P10) >> 10) & 0x1)
+#define PORT_P4_ALTSEL0_P10_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P10) | (((val) & 0x1) << 10))
+/* Alternate Function at Port 4 Bit # (9) */
+#define PORT_P4_ALTSEL0_P9 (0x1 << 9)
+#define PORT_P4_ALTSEL0_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P4_ALTSEL0_P9_GET(val) ((((val) & PORT_P4_ALTSEL0_P9) >> 9) & 0x1)
+#define PORT_P4_ALTSEL0_P9_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P9) | (((val) & 0x1) << 9))
+/* Alternate Function at Port 4 Bit # (8) */
+#define PORT_P4_ALTSEL0_P8 (0x1 << 8)
+#define PORT_P4_ALTSEL0_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P4_ALTSEL0_P8_GET(val) ((((val) & PORT_P4_ALTSEL0_P8) >> 8) & 0x1)
+#define PORT_P4_ALTSEL0_P8_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P8) | (((val) & 0x1) << 8))
+/* Alternate Function at Port 4 Bit # (7) */
+#define PORT_P4_ALTSEL0_P7 (0x1 << 7)
+#define PORT_P4_ALTSEL0_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P4_ALTSEL0_P7_GET(val) ((((val) & PORT_P4_ALTSEL0_P7) >> 7) & 0x1)
+#define PORT_P4_ALTSEL0_P7_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P7) | (((val) & 0x1) << 7))
+/* Alternate Function at Port 4 Bit # (6) */
+#define PORT_P4_ALTSEL0_P6 (0x1 << 6)
+#define PORT_P4_ALTSEL0_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P4_ALTSEL0_P6_GET(val) ((((val) & PORT_P4_ALTSEL0_P6) >> 6) & 0x1)
+#define PORT_P4_ALTSEL0_P6_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P6) | (((val) & 0x1) << 6))
+/* Alternate Function at Port 4 Bit # (5) */
+#define PORT_P4_ALTSEL0_P5 (0x1 << 5)
+#define PORT_P4_ALTSEL0_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P4_ALTSEL0_P5_GET(val) ((((val) & PORT_P4_ALTSEL0_P5) >> 5) & 0x1)
+#define PORT_P4_ALTSEL0_P5_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P5) | (((val) & 0x1) << 5))
+/* Alternate Function at Port 4 Bit # (4) */
+#define PORT_P4_ALTSEL0_P4 (0x1 << 4)
+#define PORT_P4_ALTSEL0_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P4_ALTSEL0_P4_GET(val) ((((val) & PORT_P4_ALTSEL0_P4) >> 4) & 0x1)
+#define PORT_P4_ALTSEL0_P4_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P4) | (((val) & 0x1) << 4))
+/* Alternate Function at Port 4 Bit # (3) */
+#define PORT_P4_ALTSEL0_P3 (0x1 << 3)
+#define PORT_P4_ALTSEL0_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P4_ALTSEL0_P3_GET(val) ((((val) & PORT_P4_ALTSEL0_P3) >> 3) & 0x1)
+#define PORT_P4_ALTSEL0_P3_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P3) | (((val) & 0x1) << 3))
+/* Alternate Function at Port 4 Bit # (2) */
+#define PORT_P4_ALTSEL0_P2 (0x1 << 2)
+#define PORT_P4_ALTSEL0_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P4_ALTSEL0_P2_GET(val) ((((val) & PORT_P4_ALTSEL0_P2) >> 2) & 0x1)
+#define PORT_P4_ALTSEL0_P2_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P2) | (((val) & 0x1) << 2))
+/* Alternate Function at Port 4 Bit # (1) */
+#define PORT_P4_ALTSEL0_P1 (0x1 << 1)
+#define PORT_P4_ALTSEL0_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P4_ALTSEL0_P1_GET(val) ((((val) & PORT_P4_ALTSEL0_P1) >> 1) & 0x1)
+#define PORT_P4_ALTSEL0_P1_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P1) | (((val) & 0x1) << 1))
+/* Alternate Function at Port 4 Bit # (0) */
+#define PORT_P4_ALTSEL0_P0 (0x1)
+#define PORT_P4_ALTSEL0_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P4_ALTSEL0_P0_GET(val) ((((val) & PORT_P4_ALTSEL0_P0) >> 0) & 0x1)
+#define PORT_P4_ALTSEL0_P0_SET(reg,val) (reg) = ((reg & ~PORT_P4_ALTSEL0_P0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * Port 4 Pull Up Device Enable Register
+ ******************************************************************************/
+
+/* Pull Up Device Enable at Port 4 Bit # (23) */
+#define PORT_P4_PUEN_P23 (0x1 << 23)
+#define PORT_P4_PUEN_P23_VAL(val) (((val) & 0x1) << 23)
+#define PORT_P4_PUEN_P23_GET(val) ((((val) & PORT_P4_PUEN_P23) >> 23) & 0x1)
+#define PORT_P4_PUEN_P23_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P23) | (((val) & 0x1) << 23))
+/* Pull Up Device Enable at Port 4 Bit # (22) */
+#define PORT_P4_PUEN_P22 (0x1 << 22)
+#define PORT_P4_PUEN_P22_VAL(val) (((val) & 0x1) << 22)
+#define PORT_P4_PUEN_P22_GET(val) ((((val) & PORT_P4_PUEN_P22) >> 22) & 0x1)
+#define PORT_P4_PUEN_P22_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P22) | (((val) & 0x1) << 22))
+/* Pull Up Device Enable at Port 4 Bit # (21) */
+#define PORT_P4_PUEN_P21 (0x1 << 21)
+#define PORT_P4_PUEN_P21_VAL(val) (((val) & 0x1) << 21)
+#define PORT_P4_PUEN_P21_GET(val) ((((val) & PORT_P4_PUEN_P21) >> 21) & 0x1)
+#define PORT_P4_PUEN_P21_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P21) | (((val) & 0x1) << 21))
+/* Pull Up Device Enable at Port 4 Bit # (20) */
+#define PORT_P4_PUEN_P20 (0x1 << 20)
+#define PORT_P4_PUEN_P20_VAL(val) (((val) & 0x1) << 20)
+#define PORT_P4_PUEN_P20_GET(val) ((((val) & PORT_P4_PUEN_P20) >> 20) & 0x1)
+#define PORT_P4_PUEN_P20_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P20) | (((val) & 0x1) << 20))
+/* Pull Up Device Enable at Port 4 Bit # (19) */
+#define PORT_P4_PUEN_P19 (0x1 << 19)
+#define PORT_P4_PUEN_P19_VAL(val) (((val) & 0x1) << 19)
+#define PORT_P4_PUEN_P19_GET(val) ((((val) & PORT_P4_PUEN_P19) >> 19) & 0x1)
+#define PORT_P4_PUEN_P19_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P19) | (((val) & 0x1) << 19))
+/* Pull Up Device Enable at Port 4 Bit # (18) */
+#define PORT_P4_PUEN_P18 (0x1 << 18)
+#define PORT_P4_PUEN_P18_VAL(val) (((val) & 0x1) << 18)
+#define PORT_P4_PUEN_P18_GET(val) ((((val) & PORT_P4_PUEN_P18) >> 18) & 0x1)
+#define PORT_P4_PUEN_P18_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P18) | (((val) & 0x1) << 18))
+/* Pull Up Device Enable at Port 4 Bit # (17) */
+#define PORT_P4_PUEN_P17 (0x1 << 17)
+#define PORT_P4_PUEN_P17_VAL(val) (((val) & 0x1) << 17)
+#define PORT_P4_PUEN_P17_GET(val) ((((val) & PORT_P4_PUEN_P17) >> 17) & 0x1)
+#define PORT_P4_PUEN_P17_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P17) | (((val) & 0x1) << 17))
+/* Pull Up Device Enable at Port 4 Bit # (16) */
+#define PORT_P4_PUEN_P16 (0x1 << 16)
+#define PORT_P4_PUEN_P16_VAL(val) (((val) & 0x1) << 16)
+#define PORT_P4_PUEN_P16_GET(val) ((((val) & PORT_P4_PUEN_P16) >> 16) & 0x1)
+#define PORT_P4_PUEN_P16_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P16) | (((val) & 0x1) << 16))
+/* Pull Up Device Enable at Port 4 Bit # (15) */
+#define PORT_P4_PUEN_P15 (0x1 << 15)
+#define PORT_P4_PUEN_P15_VAL(val) (((val) & 0x1) << 15)
+#define PORT_P4_PUEN_P15_GET(val) ((((val) & PORT_P4_PUEN_P15) >> 15) & 0x1)
+#define PORT_P4_PUEN_P15_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P15) | (((val) & 0x1) << 15))
+/* Pull Up Device Enable at Port 4 Bit # (14) */
+#define PORT_P4_PUEN_P14 (0x1 << 14)
+#define PORT_P4_PUEN_P14_VAL(val) (((val) & 0x1) << 14)
+#define PORT_P4_PUEN_P14_GET(val) ((((val) & PORT_P4_PUEN_P14) >> 14) & 0x1)
+#define PORT_P4_PUEN_P14_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P14) | (((val) & 0x1) << 14))
+/* Pull Up Device Enable at Port 4 Bit # (13) */
+#define PORT_P4_PUEN_P13 (0x1 << 13)
+#define PORT_P4_PUEN_P13_VAL(val) (((val) & 0x1) << 13)
+#define PORT_P4_PUEN_P13_GET(val) ((((val) & PORT_P4_PUEN_P13) >> 13) & 0x1)
+#define PORT_P4_PUEN_P13_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P13) | (((val) & 0x1) << 13))
+/* Pull Up Device Enable at Port 4 Bit # (12) */
+#define PORT_P4_PUEN_P12 (0x1 << 12)
+#define PORT_P4_PUEN_P12_VAL(val) (((val) & 0x1) << 12)
+#define PORT_P4_PUEN_P12_GET(val) ((((val) & PORT_P4_PUEN_P12) >> 12) & 0x1)
+#define PORT_P4_PUEN_P12_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P12) | (((val) & 0x1) << 12))
+/* Pull Up Device Enable at Port 4 Bit # (11) */
+#define PORT_P4_PUEN_P11 (0x1 << 11)
+#define PORT_P4_PUEN_P11_VAL(val) (((val) & 0x1) << 11)
+#define PORT_P4_PUEN_P11_GET(val) ((((val) & PORT_P4_PUEN_P11) >> 11) & 0x1)
+#define PORT_P4_PUEN_P11_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P11) | (((val) & 0x1) << 11))
+/* Pull Up Device Enable at Port 4 Bit # (10) */
+#define PORT_P4_PUEN_P10 (0x1 << 10)
+#define PORT_P4_PUEN_P10_VAL(val) (((val) & 0x1) << 10)
+#define PORT_P4_PUEN_P10_GET(val) ((((val) & PORT_P4_PUEN_P10) >> 10) & 0x1)
+#define PORT_P4_PUEN_P10_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P10) | (((val) & 0x1) << 10))
+/* Pull Up Device Enable at Port 4 Bit # (9) */
+#define PORT_P4_PUEN_P9 (0x1 << 9)
+#define PORT_P4_PUEN_P9_VAL(val) (((val) & 0x1) << 9)
+#define PORT_P4_PUEN_P9_GET(val) ((((val) & PORT_P4_PUEN_P9) >> 9) & 0x1)
+#define PORT_P4_PUEN_P9_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P9) | (((val) & 0x1) << 9))
+/* Pull Up Device Enable at Port 4 Bit # (8) */
+#define PORT_P4_PUEN_P8 (0x1 << 8)
+#define PORT_P4_PUEN_P8_VAL(val) (((val) & 0x1) << 8)
+#define PORT_P4_PUEN_P8_GET(val) ((((val) & PORT_P4_PUEN_P8) >> 8) & 0x1)
+#define PORT_P4_PUEN_P8_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P8) | (((val) & 0x1) << 8))
+/* Pull Up Device Enable at Port 4 Bit # (7) */
+#define PORT_P4_PUEN_P7 (0x1 << 7)
+#define PORT_P4_PUEN_P7_VAL(val) (((val) & 0x1) << 7)
+#define PORT_P4_PUEN_P7_GET(val) ((((val) & PORT_P4_PUEN_P7) >> 7) & 0x1)
+#define PORT_P4_PUEN_P7_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P7) | (((val) & 0x1) << 7))
+/* Pull Up Device Enable at Port 4 Bit # (6) */
+#define PORT_P4_PUEN_P6 (0x1 << 6)
+#define PORT_P4_PUEN_P6_VAL(val) (((val) & 0x1) << 6)
+#define PORT_P4_PUEN_P6_GET(val) ((((val) & PORT_P4_PUEN_P6) >> 6) & 0x1)
+#define PORT_P4_PUEN_P6_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P6) | (((val) & 0x1) << 6))
+/* Pull Up Device Enable at Port 4 Bit # (5) */
+#define PORT_P4_PUEN_P5 (0x1 << 5)
+#define PORT_P4_PUEN_P5_VAL(val) (((val) & 0x1) << 5)
+#define PORT_P4_PUEN_P5_GET(val) ((((val) & PORT_P4_PUEN_P5) >> 5) & 0x1)
+#define PORT_P4_PUEN_P5_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P5) | (((val) & 0x1) << 5))
+/* Pull Up Device Enable at Port 4 Bit # (4) */
+#define PORT_P4_PUEN_P4 (0x1 << 4)
+#define PORT_P4_PUEN_P4_VAL(val) (((val) & 0x1) << 4)
+#define PORT_P4_PUEN_P4_GET(val) ((((val) & PORT_P4_PUEN_P4) >> 4) & 0x1)
+#define PORT_P4_PUEN_P4_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P4) | (((val) & 0x1) << 4))
+/* Pull Up Device Enable at Port 4 Bit # (3) */
+#define PORT_P4_PUEN_P3 (0x1 << 3)
+#define PORT_P4_PUEN_P3_VAL(val) (((val) & 0x1) << 3)
+#define PORT_P4_PUEN_P3_GET(val) ((((val) & PORT_P4_PUEN_P3) >> 3) & 0x1)
+#define PORT_P4_PUEN_P3_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P3) | (((val) & 0x1) << 3))
+/* Pull Up Device Enable at Port 4 Bit # (2) */
+#define PORT_P4_PUEN_P2 (0x1 << 2)
+#define PORT_P4_PUEN_P2_VAL(val) (((val) & 0x1) << 2)
+#define PORT_P4_PUEN_P2_GET(val) ((((val) & PORT_P4_PUEN_P2) >> 2) & 0x1)
+#define PORT_P4_PUEN_P2_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P2) | (((val) & 0x1) << 2))
+/* Pull Up Device Enable at Port 4 Bit # (1) */
+#define PORT_P4_PUEN_P1 (0x1 << 1)
+#define PORT_P4_PUEN_P1_VAL(val) (((val) & 0x1) << 1)
+#define PORT_P4_PUEN_P1_GET(val) ((((val) & PORT_P4_PUEN_P1) >> 1) & 0x1)
+#define PORT_P4_PUEN_P1_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P1) | (((val) & 0x1) << 1))
+/* Pull Up Device Enable at Port 4 Bit # (0) */
+#define PORT_P4_PUEN_P0 (0x1)
+#define PORT_P4_PUEN_P0_VAL(val) (((val) & 0x1) << 0)
+#define PORT_P4_PUEN_P0_GET(val) ((((val) & PORT_P4_PUEN_P0) >> 0) & 0x1)
+#define PORT_P4_PUEN_P0_SET(reg,val) (reg) = ((reg & ~PORT_P4_PUEN_P0) | (((val) & 0x1) << 0))
+
+#endif /* __PORT_H */
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/ssc_reg.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/ssc_reg.h
new file mode 100644
index 0000000000..4ea2f5a685
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/ssc_reg.h
@@ -0,0 +1,624 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ St. Martin Strasse 53; 81669 Munich, Germany
+
+ Any use of this Software is subject to the conclusion of a respective
+ License Agreement. Without such a License Agreement no rights to the
+ Software are granted.
+
+ ******************************************************************************/
+
+#ifndef __SSC_REG_H
+#define __SSC_REG_H
+
+/** SSC register structure */
+struct svip_reg_ssc {
+ volatile unsigned long clc; /* 0x00 */
+ volatile unsigned long pisel; /* 0x04 */
+ volatile unsigned long id; /* 0x08 */
+ volatile unsigned long reserved0; /* 0x0c */
+ volatile unsigned long mcon; /* 0x10 */
+ volatile unsigned long state; /* 0x14 */
+ volatile unsigned long whbstate; /* 0x18 */
+ volatile unsigned long reserved1; /* 0x1c */
+ volatile unsigned long tb; /* 0x20 */
+ volatile unsigned long rb; /* 0x24 */
+ volatile unsigned long reserved2[2]; /* 0x28 */
+ volatile unsigned long rxfcon; /* 0x30 */
+ volatile unsigned long txfcon; /* 0x34 */
+ volatile unsigned long fstat; /* 0x38 */
+ volatile unsigned long reserved3; /* 0x3c */
+ volatile unsigned long br; /* 0x40 */
+ volatile unsigned long brstat; /* 0x44 */
+ volatile unsigned long reserved4[6]; /* 0x48 */
+ volatile unsigned long sfcon; /* 0x60 */
+ volatile unsigned long sfstat; /* 0x64 */
+ volatile unsigned long reserved5[2]; /* 0x68 */
+ volatile unsigned long gpocon; /* 0x70 */
+ volatile unsigned long gpostat; /* 0x74 */
+ volatile unsigned long whbgpostat; /* 0x78 */
+ volatile unsigned long reserved6; /* 0x7c */
+ volatile unsigned long rxreq; /* 0x80 */
+ volatile unsigned long rxcnt; /* 0x84 */
+ volatile unsigned long reserved7[25]; /* 0x88 */
+ volatile unsigned long dma_con; /* 0xEC */
+ volatile unsigned long reserved8; /* 0xf0 */
+ volatile unsigned long irnen; /* 0xF4 */
+ volatile unsigned long irncr; /* 0xF8 */
+ volatile unsigned long irnicr; /* 0xFC */
+};
+
+/*******************************************************************************
+ * CLC Register
+ ******************************************************************************/
+
+/* Clock Divider for Sleep Mode (23:16) */
+#define SSC_CLC_SMC (0xff << 16)
+#define SSC_CLC_SMC_VAL(val) (((val) & 0xff) << 16)
+#define SSC_CLC_SMC_GET(val) ((((val) & SSC_CLC_SMC) >> 16) & 0xff)
+#define SSC_CLC_SMC_SET(reg,val) (reg) = ((reg & ~SSC_CLC_SMC) | (((val) & 0xff) << 16))
+/* Clock Divider for Normal Run Mode (15:8) */
+#define SSC_CLC_RMC (0xff << 8)
+#define SSC_CLC_RMC_VAL(val) (((val) & 0xff) << 8)
+#define SSC_CLC_RMC_GET(val) ((((val) & SSC_CLC_RMC) >> 8) & 0xff)
+#define SSC_CLC_RMC_SET(reg,val) (reg) = ((reg & ~SSC_CLC_RMC) | (((val) & 0xff) << 8))
+/* Fast Shut-Off Enable Bit (5) */
+#define SSC_CLC_FSOE (0x1 << 5)
+#define SSC_CLC_FSOE_VAL(val) (((val) & 0x1) << 5)
+#define SSC_CLC_FSOE_GET(val) ((((val) & SSC_CLC_FSOE) >> 5) & 0x1)
+#define SSC_CLC_FSOE_SET(reg,val) (reg) = ((reg & ~SSC_CLC_FSOE) | (((val) & 0x1) << 5))
+/* Suspend Bit Write Enable for OCDS (4) */
+#define SSC_CLC_SBWE (0x1 << 4)
+#define SSC_CLC_SBWE_VAL(val) (((val) & 0x1) << 4)
+#define SSC_CLC_SBWE_SET(reg,val) (reg) = (((reg & ~SSC_CLC_SBWE) | (val) & 1) << 4)
+/* External Request Disable (3) */
+#define SSC_CLC_EDIS (0x1 << 3)
+#define SSC_CLC_EDIS_VAL(val) (((val) & 0x1) << 3)
+#define SSC_CLC_EDIS_GET(val) ((((val) & SSC_CLC_EDIS) >> 3) & 0x1)
+#define SSC_CLC_EDIS_SET(reg,val) (reg) = ((reg & ~SSC_CLC_EDIS) | (((val) & 0x1) << 3))
+/* Suspend Enable Bit for OCDS (2) */
+#define SSC_CLC_SPEN (0x1 << 2)
+#define SSC_CLC_SPEN_VAL(val) (((val) & 0x1) << 2)
+#define SSC_CLC_SPEN_GET(val) ((((val) & SSC_CLC_SPEN) >> 2) & 0x1)
+#define SSC_CLC_SPEN_SET(reg,val) (reg) = ((reg & ~SSC_CLC_SPEN) | (((val) & 0x1) << 2))
+/* Disable Status Bit (1) */
+#define SSC_CLC_DISS (0x1 << 1)
+#define SSC_CLC_DISS_GET(val) ((((val) & SSC_CLC_DISS) >> 1) & 0x1)
+/* Disable Request Bit (0) */
+#define SSC_CLC_DISR (0x1)
+#define SSC_CLC_DISR_VAL(val) (((val) & 0x1) << 0)
+#define SSC_CLC_DISR_GET(val) ((((val) & SSC_CLC_DISR) >> 0) & 0x1)
+#define SSC_CLC_DISR_SET(reg,val) (reg) = ((reg & ~SSC_CLC_DISR) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * ID Register
+ ******************************************************************************/
+
+/* Transmit FIFO Size (29:24) */
+#define SSC_ID_TXFS (0x3f << 24)
+#define SSC_ID_TXFS_GET(val) ((((val) & SSC_ID_TXFS) >> 24) & 0x3f)
+/* Receive FIFO Size (21:16) */
+#define SSC_ID_RXFS (0x3f << 16)
+#define SSC_ID_RXFS_GET(val) ((((val) & SSC_ID_RXFS) >> 16) & 0x3f)
+/* Module ID (15:8) */
+#define SSC_ID_ID (0xff << 8)
+#define SSC_ID_ID_GET(val) ((((val) & SSC_ID_ID) >> 8) & 0xff)
+/* Configuration (5) */
+#define SSC_ID_CFG (0x1 << 5)
+#define SSC_ID_CFG_GET(val) ((((val) & SSC_ID_CFG) >> 5) & 0x1)
+/* Revision (4:0) */
+#define SSC_ID_REV (0x1f)
+#define SSC_ID_REV_GET(val) ((((val) & SSC_ID_REV) >> 0) & 0x1f)
+
+/*******************************************************************************
+ * MCON Register
+ ******************************************************************************/
+
+/* Echo Mode (24) */
+#define SSC_MCON_EM (0x1 << 24)
+#define SSC_MCON_EM_VAL(val) (((val) & 0x1) << 24)
+#define SSC_MCON_EM_GET(val) ((((val) & SSC_MCON_EM) >> 24) & 0x1)
+#define SSC_MCON_EM_SET(reg,val) (reg) = ((reg & ~SSC_MCON_EM) | (((val) & 0x1) << 24))
+/* Idle Bit Value (23) */
+#define SSC_MCON_IDLE (0x1 << 23)
+#define SSC_MCON_IDLE_VAL(val) (((val) & 0x1) << 23)
+#define SSC_MCON_IDLE_GET(val) ((((val) & SSC_MCON_IDLE) >> 23) & 0x1)
+#define SSC_MCON_IDLE_SET(reg,val) (reg) = ((reg & ~SSC_MCON_IDLE) | (((val) & 0x1) << 23))
+/* Enable Byte Valid Control (22) */
+#define SSC_MCON_ENBV (0x1 << 22)
+#define SSC_MCON_ENBV_VAL(val) (((val) & 0x1) << 22)
+#define SSC_MCON_ENBV_GET(val) ((((val) & SSC_MCON_ENBV) >> 22) & 0x1)
+#define SSC_MCON_ENBV_SET(reg,val) (reg) = ((reg & ~SSC_MCON_ENBV) | (((val) & 0x1) << 22))
+/* Data Width Selection (20:16) */
+#define SSC_MCON_BM (0x1f << 16)
+#define SSC_MCON_BM_VAL(val) (((val) & 0x1f) << 16)
+#define SSC_MCON_BM_GET(val) ((((val) & SSC_MCON_BM) >> 16) & 0x1f)
+#define SSC_MCON_BM_SET(reg,val) (reg) = ((reg & ~SSC_MCON_BM) | (((val) & 0x1f) << 16))
+/* Receive Underflow Error Enable (12) */
+#define SSC_MCON_RUEN (0x1 << 12)
+#define SSC_MCON_RUEN_VAL(val) (((val) & 0x1) << 12)
+#define SSC_MCON_RUEN_GET(val) ((((val) & SSC_MCON_RUEN) >> 12) & 0x1)
+#define SSC_MCON_RUEN_SET(reg,val) (reg) = ((reg & ~SSC_MCON_RUEN) | (((val) & 0x1) << 12))
+/* Transmit Underflow Error Enable (11) */
+#define SSC_MCON_TUEN (0x1 << 11)
+#define SSC_MCON_TUEN_VAL(val) (((val) & 0x1) << 11)
+#define SSC_MCON_TUEN_GET(val) ((((val) & SSC_MCON_TUEN) >> 11) & 0x1)
+#define SSC_MCON_TUEN_SET(reg,val) (reg) = ((reg & ~SSC_MCON_TUEN) | (((val) & 0x1) << 11))
+/* Abort Error Enable (10) */
+#define SSC_MCON_AEN (0x1 << 10)
+#define SSC_MCON_AEN_VAL(val) (((val) & 0x1) << 10)
+#define SSC_MCON_AEN_GET(val) ((((val) & SSC_MCON_AEN) >> 10) & 0x1)
+#define SSC_MCON_AEN_SET(reg,val) (reg) = ((reg & ~SSC_MCON_AEN) | (((val) & 0x1) << 10))
+/* Receive Overflow Error Enable (9) */
+#define SSC_MCON_REN (0x1 << 9)
+#define SSC_MCON_REN_VAL(val) (((val) & 0x1) << 9)
+#define SSC_MCON_REN_GET(val) ((((val) & SSC_MCON_REN) >> 9) & 0x1)
+#define SSC_MCON_REN_SET(reg,val) (reg) = ((reg & ~SSC_MCON_REN) | (((val) & 0x1) << 9))
+/* Transmit Overflow Error Enable (8) */
+#define SSC_MCON_TEN (0x1 << 8)
+#define SSC_MCON_TEN_VAL(val) (((val) & 0x1) << 8)
+#define SSC_MCON_TEN_GET(val) ((((val) & SSC_MCON_TEN) >> 8) & 0x1)
+#define SSC_MCON_TEN_SET(reg,val) (reg) = ((reg & ~SSC_MCON_TEN) | (((val) & 0x1) << 8))
+/* Loop Back Control (7) */
+#define SSC_MCON_LB (0x1 << 7)
+#define SSC_MCON_LB_VAL(val) (((val) & 0x1) << 7)
+#define SSC_MCON_LB_GET(val) ((((val) & SSC_MCON_LB) >> 7) & 0x1)
+#define SSC_MCON_LB_SET(reg,val) (reg) = ((reg & ~SSC_MCON_LB) | (((val) & 0x1) << 7))
+/* Clock Polarity Control (6) */
+#define SSC_MCON_PO (0x1 << 6)
+#define SSC_MCON_PO_VAL(val) (((val) & 0x1) << 6)
+#define SSC_MCON_PO_GET(val) ((((val) & SSC_MCON_PO) >> 6) & 0x1)
+#define SSC_MCON_PO_SET(reg,val) (reg) = ((reg & ~SSC_MCON_PO) | (((val) & 0x1) << 6))
+/* Clock Phase Control (5) */
+#define SSC_MCON_PH (0x1 << 5)
+#define SSC_MCON_PH_VAL(val) (((val) & 0x1) << 5)
+#define SSC_MCON_PH_GET(val) ((((val) & SSC_MCON_PH) >> 5) & 0x1)
+#define SSC_MCON_PH_SET(reg,val) (reg) = ((reg & ~SSC_MCON_PH) | (((val) & 0x1) << 5))
+/* Heading Control (4) */
+#define SSC_MCON_HB (0x1 << 4)
+#define SSC_MCON_HB_VAL(val) (((val) & 0x1) << 4)
+#define SSC_MCON_HB_GET(val) ((((val) & SSC_MCON_HB) >> 4) & 0x1)
+#define SSC_MCON_HB_SET(reg,val) (reg) = ((reg & ~SSC_MCON_HB) | (((val) & 0x1) << 4))
+/* Chip Select Enable (3) */
+#define SSC_MCON_CSBEN (0x1 << 3)
+#define SSC_MCON_CSBEN_VAL(val) (((val) & 0x1) << 3)
+#define SSC_MCON_CSBEN_GET(val) ((((val) & SSC_MCON_CSBEN) >> 3) & 0x1)
+#define SSC_MCON_CSBEN_SET(reg,val) (reg) = ((reg & ~SSC_MCON_CSBEN) | (((val) & 0x1) << 3))
+/* Chip Select Invert (2) */
+#define SSC_MCON_CSBINV (0x1 << 2)
+#define SSC_MCON_CSBINV_VAL(val) (((val) & 0x1) << 2)
+#define SSC_MCON_CSBINV_GET(val) ((((val) & SSC_MCON_CSBINV) >> 2) & 0x1)
+#define SSC_MCON_CSBINV_SET(reg,val) (reg) = ((reg & ~SSC_MCON_CSBINV) | (((val) & 0x1) << 2))
+/* Receive Off (1) */
+#define SSC_MCON_RXOFF (0x1 << 1)
+#define SSC_MCON_RXOFF_VAL(val) (((val) & 0x1) << 1)
+#define SSC_MCON_RXOFF_GET(val) ((((val) & SSC_MCON_RXOFF) >> 1) & 0x1)
+#define SSC_MCON_RXOFF_SET(reg,val) (reg) = ((reg & ~SSC_MCON_RXOFF) | (((val) & 0x1) << 1))
+/* Transmit Off (0) */
+#define SSC_MCON_TXOFF (0x1)
+#define SSC_MCON_TXOFF_VAL(val) (((val) & 0x1) << 0)
+#define SSC_MCON_TXOFF_GET(val) ((((val) & SSC_MCON_TXOFF) >> 0) & 0x1)
+#define SSC_MCON_TXOFF_SET(reg,val) (reg) = ((reg & ~SSC_MCON_TXOFF) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * STATE Register
+ ******************************************************************************/
+
+/* Receive End-of-Message (31) */
+#define SSC_STATE_RXEOM (0x1 << 31)
+#define SSC_STATE_RXEOM_GET(val) ((((val) & SSC_STATE_RXEOM) >> 31) & 0x1)
+/* Receive Byte Valid (30:28) */
+#define SSC_STATE_RXBV (0x7 << 28)
+#define SSC_STATE_RXBV_GET(val) ((((val) & SSC_STATE_RXBV) >> 28) & 0x7)
+/* Transmit End-of-Message (27) */
+#define SSC_STATE_TXEOM (0x1 << 27)
+#define SSC_STATE_TXEOM_GET(val) ((((val) & SSC_STATE_TXEOM) >> 27) & 0x1)
+/* Transmit Byte Valid (26:24) */
+#define SSC_STATE_TXBV (0x7 << 24)
+#define SSC_STATE_TXBV_GET(val) ((((val) & SSC_STATE_TXBV) >> 24) & 0x7)
+/* Bit Count Field (20:16) */
+#define SSC_STATE_BC (0x1f << 16)
+#define SSC_STATE_BC_GET(val) ((((val) & SSC_STATE_BC) >> 16) & 0x1f)
+/* Busy Flag (13) */
+#define SSC_STATE_BSY (0x1 << 13)
+#define SSC_STATE_BSY_GET(val) ((((val) & SSC_STATE_BSY) >> 13) & 0x1)
+/* Receive Underflow Error Flag (12) */
+#define SSC_STATE_RUE (0x1 << 12)
+#define SSC_STATE_RUE_GET(val) ((((val) & SSC_STATE_RUE) >> 12) & 0x1)
+/* Transmit Underflow Error Flag (11) */
+#define SSC_STATE_TUE (0x1 << 11)
+#define SSC_STATE_TUE_GET(val) ((((val) & SSC_STATE_TUE) >> 11) & 0x1)
+/* Abort Error Flag (10) */
+#define SSC_STATE_AE (0x1 << 10)
+#define SSC_STATE_AE_GET(val) ((((val) & SSC_STATE_AE) >> 10) & 0x1)
+/* Receive Error Flag (9) */
+#define SSC_STATE_RE (0x1 << 9)
+#define SSC_STATE_RE_GET(val) ((((val) & SSC_STATE_RE) >> 9) & 0x1)
+/* Transmit Error Flag (8) */
+#define SSC_STATE_TE (0x1 << 8)
+#define SSC_STATE_TE_GET(val) ((((val) & SSC_STATE_TE) >> 8) & 0x1)
+/* Mode Error Flag (7) */
+#define SSC_STATE_ME (0x1 << 7)
+#define SSC_STATE_ME_GET(val) ((((val) & SSC_STATE_ME) >> 7) & 0x1)
+/* Slave Selected (2) */
+#define SSC_STATE_SSEL (0x1 << 2)
+#define SSC_STATE_SSEL_GET(val) ((((val) & SSC_STATE_SSEL) >> 2) & 0x1)
+/* Master Select Bit (1) */
+#define SSC_STATE_MS (0x1 << 1)
+#define SSC_STATE_MS_GET(val) ((((val) & SSC_STATE_MS) >> 1) & 0x1)
+/* Enable Bit (0) */
+#define SSC_STATE_EN (0x1)
+#define SSC_STATE_EN_GET(val) ((((val) & SSC_STATE_EN) >> 0) & 0x1)
+
+/*******************************************************************************
+ * WHBSTATE Register
+ ******************************************************************************/
+
+/* Set Transmit Underflow Error Flag Bit (15) */
+#define SSC_WHBSTATE_SETTUE (0x1 << 15)
+#define SSC_WHBSTATE_SETTUE_VAL(val) (((val) & 0x1) << 15)
+#define SSC_WHBSTATE_SETTUE_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_SETTUE) | (val) & 1) << 15)
+/* Set Abort Error Flag Bit (14) */
+#define SSC_WHBSTATE_SETAE (0x1 << 14)
+#define SSC_WHBSTATE_SETAE_VAL(val) (((val) & 0x1) << 14)
+#define SSC_WHBSTATE_SETAE_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_SETAE) | (val) & 1) << 14)
+/* Set Receive Error Flag Bit (13) */
+#define SSC_WHBSTATE_SETRE (0x1 << 13)
+#define SSC_WHBSTATE_SETRE_VAL(val) (((val) & 0x1) << 13)
+#define SSC_WHBSTATE_SETRE_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_SETRE) | (val) & 1) << 13)
+/* Set Transmit Error Flag Bit (12) */
+#define SSC_WHBSTATE_SETTE (0x1 << 12)
+#define SSC_WHBSTATE_SETTE_VAL(val) (((val) & 0x1) << 12)
+#define SSC_WHBSTATE_SETTE_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_SETTE) | (val) & 1) << 12)
+/* Clear Transmit Underflow Error Flag Bit (11) */
+#define SSC_WHBSTATE_CLRTUE (0x1 << 11)
+#define SSC_WHBSTATE_CLRTUE_VAL(val) (((val) & 0x1) << 11)
+#define SSC_WHBSTATE_CLRTUE_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_CLRTUE) | (val) & 1) << 11)
+/* Clear Abort Error Flag Bit (10) */
+#define SSC_WHBSTATE_CLRAE (0x1 << 10)
+#define SSC_WHBSTATE_CLRAE_VAL(val) (((val) & 0x1) << 10)
+#define SSC_WHBSTATE_CLRAE_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_CLRAE) | (val) & 1) << 10)
+/* Clear Receive Error Flag Bit (9) */
+#define SSC_WHBSTATE_CLRRE (0x1 << 9)
+#define SSC_WHBSTATE_CLRRE_VAL(val) (((val) & 0x1) << 9)
+#define SSC_WHBSTATE_CLRRE_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_CLRRE) | (val) & 1) << 9)
+/* Clear Transmit Error Flag Bit (8) */
+#define SSC_WHBSTATE_CLRTE (0x1 << 8)
+#define SSC_WHBSTATE_CLRTE_VAL(val) (((val) & 0x1) << 8)
+#define SSC_WHBSTATE_CLRTE_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_CLRTE) | (val) & 1) << 8)
+/* Set Mode Error Flag Bit (7) */
+#define SSC_WHBSTATE_SETME (0x1 << 7)
+#define SSC_WHBSTATE_SETME_VAL(val) (((val) & 0x1) << 7)
+#define SSC_WHBSTATE_SETME_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_SETME) | (val) & 1) << 7)
+/* Clear Mode Error Flag Bit (6) */
+#define SSC_WHBSTATE_CLRME (0x1 << 6)
+#define SSC_WHBSTATE_CLRME_VAL(val) (((val) & 0x1) << 6)
+#define SSC_WHBSTATE_CLRME_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_CLRME) | (val) & 1) << 6)
+/* Set Receive Underflow Error Bit (5) */
+#define SSC_WHBSTATE_SETRUE (0x1 << 5)
+#define SSC_WHBSTATE_SETRUE_VAL(val) (((val) & 0x1) << 5)
+#define SSC_WHBSTATE_SETRUE_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_SETRUE) | (val) & 1) << 5)
+/* Clear Receive Underflow Error Bit (4) */
+#define SSC_WHBSTATE_CLRRUE (0x1 << 4)
+#define SSC_WHBSTATE_CLRRUE_VAL(val) (((val) & 0x1) << 4)
+#define SSC_WHBSTATE_CLRRUE_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_CLRRUE) | (val) & 1) << 4)
+/* Set Master Select Bit (3) */
+#define SSC_WHBSTATE_SETMS (0x1 << 3)
+#define SSC_WHBSTATE_SETMS_VAL(val) (((val) & 0x1) << 3)
+#define SSC_WHBSTATE_SETMS_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_SETMS) | (val) & 1) << 3)
+/* Clear Master Select Bit (2) */
+#define SSC_WHBSTATE_CLRMS (0x1 << 2)
+#define SSC_WHBSTATE_CLRMS_VAL(val) (((val) & 0x1) << 2)
+#define SSC_WHBSTATE_CLRMS_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_CLRMS) | (val) & 1) << 2)
+/* Set Enable Bit (1) */
+#define SSC_WHBSTATE_SETEN (0x1 << 1)
+#define SSC_WHBSTATE_SETEN_VAL(val) (((val) & 0x1) << 1)
+#define SSC_WHBSTATE_SETEN_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_SETEN) | (val) & 1) << 1)
+/* Clear Enable Bit (0) */
+#define SSC_WHBSTATE_CLREN (0x1)
+#define SSC_WHBSTATE_CLREN_VAL(val) (((val) & 0x1) << 0)
+#define SSC_WHBSTATE_CLREN_SET(reg,val) (reg) = (((reg & ~SSC_WHBSTATE_CLREN) | (val) & 1) << 0)
+
+/*******************************************************************************
+ * TB Register
+ ******************************************************************************/
+
+/* Transmit Data Register Value (31:0) */
+#define SSC_TB_TB_VAL (0xFFFFFFFFL)
+#define SSC_TB_TB_VAL_VAL(val) (((val) & 0xFFFFFFFFL) << 0)
+#define SSC_TB_TB_VAL_GET(val) ((((val) & SSC_TB_TB_VAL) >> 0) & 0xFFFFFFFFL)
+#define SSC_TB_TB_VAL_SET(reg,val) (reg) = ((reg & ~SSC_TB_TB_VAL) | (((val) & 0xFFFFFFFFL) << 0))
+
+/*******************************************************************************
+ * RB Register
+ ******************************************************************************/
+
+/* Receive Data Register Value (31:0) */
+#define SSC_RB_RB_VAL (0xFFFFFFFFL)
+#define SSC_RB_RB_VAL_GET(val) ((((val) & SSC_RB_RB_VAL) >> 0) & 0xFFFFFFFFL)
+
+/*******************************************************************************
+ * FSTAT Register
+ ******************************************************************************/
+
+/* Transmit FIFO Filling Level (13:8) */
+#define SSC_FSTAT_TXFFL (0x3f << 8)
+#define SSC_FSTAT_TXFFL_GET(val) ((((val) & SSC_FSTAT_TXFFL) >> 8) & 0x3f)
+/* Receive FIFO Filling Level (5:0) */
+#define SSC_FSTAT_RXFFL (0x3f)
+#define SSC_FSTAT_RXFFL_GET(val) ((((val) & SSC_FSTAT_RXFFL) >> 0) & 0x3f)
+
+/*******************************************************************************
+ * PISEL Register
+ ******************************************************************************/
+
+/* Slave Mode Clock Input Select (2) */
+#define SSC_PISEL_CIS (0x1 << 2)
+#define SSC_PISEL_CIS_VAL(val) (((val) & 0x1) << 2)
+#define SSC_PISEL_CIS_GET(val) ((((val) & SSC_PISEL_CIS) >> 2) & 0x1)
+#define SSC_PISEL_CIS_SET(reg,val) (reg) = ((reg & ~SSC_PISEL_CIS) | (((val) & 0x1) << 2))
+/* Slave Mode Receiver Input Select (1) */
+#define SSC_PISEL_SIS (0x1 << 1)
+#define SSC_PISEL_SIS_VAL(val) (((val) & 0x1) << 1)
+#define SSC_PISEL_SIS_GET(val) ((((val) & SSC_PISEL_SIS) >> 1) & 0x1)
+#define SSC_PISEL_SIS_SET(reg,val) (reg) = ((reg & ~SSC_PISEL_SIS) | (((val) & 0x1) << 1))
+/* Master Mode Receiver Input Select (0) */
+#define SSC_PISEL_MIS (0x1)
+#define SSC_PISEL_MIS_VAL(val) (((val) & 0x1) << 0)
+#define SSC_PISEL_MIS_GET(val) ((((val) & SSC_PISEL_MIS) >> 0) & 0x1)
+#define SSC_PISEL_MIS_SET(reg,val) (reg) = ((reg & ~SSC_PISEL_MIS) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * RXFCON Register
+ ******************************************************************************/
+
+/* Receive FIFO Interrupt Trigger Level (13:8) */
+#define SSC_RXFCON_RXFITL (0x3f << 8)
+#define SSC_RXFCON_RXFITL_VAL(val) (((val) & 0x3f) << 8)
+#define SSC_RXFCON_RXFITL_GET(val) ((((val) & SSC_RXFCON_RXFITL) >> 8) & 0x3f)
+#define SSC_RXFCON_RXFITL_SET(reg,val) (reg) = ((reg & ~SSC_RXFCON_RXFITL) | (((val) & 0x3f) << 8))
+/* Receive FIFO Flush (1) */
+#define SSC_RXFCON_RXFLU (0x1 << 1)
+#define SSC_RXFCON_RXFLU_VAL(val) (((val) & 0x1) << 1)
+#define SSC_RXFCON_RXFLU_SET(reg,val) (reg) = (((reg & ~SSC_RXFCON_RXFLU) | (val) & 1) << 1)
+/* Receive FIFO Enable (0) */
+#define SSC_RXFCON_RXFEN (0x1)
+#define SSC_RXFCON_RXFEN_VAL(val) (((val) & 0x1) << 0)
+#define SSC_RXFCON_RXFEN_GET(val) ((((val) & SSC_RXFCON_RXFEN) >> 0) & 0x1)
+#define SSC_RXFCON_RXFEN_SET(reg,val) (reg) = ((reg & ~SSC_RXFCON_RXFEN) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * TXFCON Register
+ ******************************************************************************/
+
+/* Transmit FIFO Interrupt Trigger Level (13:8) */
+#define SSC_TXFCON_TXFITL (0x3f << 8)
+#define SSC_TXFCON_TXFITL_VAL(val) (((val) & 0x3f) << 8)
+#define SSC_TXFCON_TXFITL_GET(val) ((((val) & SSC_TXFCON_TXFITL) >> 8) & 0x3f)
+#define SSC_TXFCON_TXFITL_SET(reg,val) (reg) = ((reg & ~SSC_TXFCON_TXFITL) | (((val) & 0x3f) << 8))
+/* Transmit FIFO Flush (1) */
+#define SSC_TXFCON_TXFLU (0x1 << 1)
+#define SSC_TXFCON_TXFLU_VAL(val) (((val) & 0x1) << 1)
+#define SSC_TXFCON_TXFLU_SET(reg,val) (reg) = (((reg & ~SSC_TXFCON_TXFLU) | (val) & 1) << 1)
+/* Transmit FIFO Enable (0) */
+#define SSC_TXFCON_TXFEN (0x1)
+#define SSC_TXFCON_TXFEN_VAL(val) (((val) & 0x1) << 0)
+#define SSC_TXFCON_TXFEN_GET(val) ((((val) & SSC_TXFCON_TXFEN) >> 0) & 0x1)
+#define SSC_TXFCON_TXFEN_SET(reg,val) (reg) = ((reg & ~SSC_TXFCON_TXFEN) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * BR Register
+ ******************************************************************************/
+
+/* Baudrate Timer Reload Register Value (15:0) */
+#define SSC_BR_BR_VAL (0xffff)
+#define SSC_BR_BR_VAL_VAL(val) (((val) & 0xffff) << 0)
+#define SSC_BR_BR_VAL_GET(val) ((((val) & SSC_BR_BR_VAL) >> 0) & 0xffff)
+#define SSC_BR_BR_VAL_SET(reg,val) (reg) = ((reg & ~SSC_BR_BR_VAL) | (((val) & 0xffff) << 0))
+
+/*******************************************************************************
+ * BRSTAT Register
+ ******************************************************************************/
+
+/* Baudrate Timer Register Value (15:0) */
+#define SSC_BRSTAT_BT_VAL (0xffff)
+#define SSC_BRSTAT_BT_VAL_GET(val) ((((val) & SSC_BRSTAT_BT_VAL) >> 0) & 0xffff)
+
+/*******************************************************************************
+ * SFCON Register
+ ******************************************************************************/
+
+/* Pause Length (31:22) */
+#define SSC_SFCON_PLEN (0x3ff << 22)
+#define SSC_SFCON_PLEN_VAL(val) (((val) & 0x3ff) << 22)
+#define SSC_SFCON_PLEN_GET(val) ((((val) & SSC_SFCON_PLEN) >> 22) & 0x3ff)
+#define SSC_SFCON_PLEN_SET(reg,val) (reg) = ((reg & ~SSC_SFCON_PLEN) | (((val) & 0x3ff) << 22))
+/* Stop After Pause (20) */
+#define SSC_SFCON_STOP (0x1 << 20)
+#define SSC_SFCON_STOP_VAL(val) (((val) & 0x1) << 20)
+#define SSC_SFCON_STOP_GET(val) ((((val) & SSC_SFCON_STOP) >> 20) & 0x1)
+#define SSC_SFCON_STOP_SET(reg,val) (reg) = ((reg & ~SSC_SFCON_STOP) | (((val) & 0x1) << 20))
+/* Idle Clock Configuration (19:18) */
+#define SSC_SFCON_ICLK (0x3 << 18)
+#define SSC_SFCON_ICLK_VAL(val) (((val) & 0x3) << 18)
+#define SSC_SFCON_ICLK_GET(val) ((((val) & SSC_SFCON_ICLK) >> 18) & 0x3)
+#define SSC_SFCON_ICLK_SET(reg,val) (reg) = ((reg & ~SSC_SFCON_ICLK) | (((val) & 0x3) << 18))
+/* Idle Data Configuration (17:16) */
+#define SSC_SFCON_IDAT (0x3 << 16)
+#define SSC_SFCON_IDAT_VAL(val) (((val) & 0x3) << 16)
+#define SSC_SFCON_IDAT_GET(val) ((((val) & SSC_SFCON_IDAT) >> 16) & 0x3)
+#define SSC_SFCON_IDAT_SET(reg,val) (reg) = ((reg & ~SSC_SFCON_IDAT) | (((val) & 0x3) << 16))
+/* Data Length (15:4) */
+#define SSC_SFCON_DLEN (0xfff << 4)
+#define SSC_SFCON_DLEN_VAL(val) (((val) & 0xfff) << 4)
+#define SSC_SFCON_DLEN_GET(val) ((((val) & SSC_SFCON_DLEN) >> 4) & 0xfff)
+#define SSC_SFCON_DLEN_SET(reg,val) (reg) = ((reg & ~SSC_SFCON_DLEN) | (((val) & 0xfff) << 4))
+/* Enable Interrupt After Pause (3) */
+#define SSC_SFCON_IAEN (0x1 << 3)
+#define SSC_SFCON_IAEN_VAL(val) (((val) & 0x1) << 3)
+#define SSC_SFCON_IAEN_GET(val) ((((val) & SSC_SFCON_IAEN) >> 3) & 0x1)
+#define SSC_SFCON_IAEN_SET(reg,val) (reg) = ((reg & ~SSC_SFCON_IAEN) | (((val) & 0x1) << 3))
+/* Enable Interrupt Before Pause (2) */
+#define SSC_SFCON_IBEN (0x1 << 2)
+#define SSC_SFCON_IBEN_VAL(val) (((val) & 0x1) << 2)
+#define SSC_SFCON_IBEN_GET(val) ((((val) & SSC_SFCON_IBEN) >> 2) & 0x1)
+#define SSC_SFCON_IBEN_SET(reg,val) (reg) = ((reg & ~SSC_SFCON_IBEN) | (((val) & 0x1) << 2))
+/* Serial Frame Enable (0) */
+#define SSC_SFCON_SFEN (0x1)
+#define SSC_SFCON_SFEN_VAL(val) (((val) & 0x1) << 0)
+#define SSC_SFCON_SFEN_GET(val) ((((val) & SSC_SFCON_SFEN) >> 0) & 0x1)
+#define SSC_SFCON_SFEN_SET(reg,val) (reg) = ((reg & ~SSC_SFCON_SFEN) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * SFSTAT Register
+ ******************************************************************************/
+
+/* Pause Count (31:22) */
+#define SSC_SFSTAT_PCNT (0x3ff << 22)
+#define SSC_SFSTAT_PCNT_GET(val) ((((val) & SSC_SFSTAT_PCNT) >> 22) & 0x3ff)
+/* Data Bit Count (15:4) */
+#define SSC_SFSTAT_DCNT (0xfff << 4)
+#define SSC_SFSTAT_DCNT_GET(val) ((((val) & SSC_SFSTAT_DCNT) >> 4) & 0xfff)
+/* Pause Busy (1) */
+#define SSC_SFSTAT_PBSY (0x1 << 1)
+#define SSC_SFSTAT_PBSY_GET(val) ((((val) & SSC_SFSTAT_PBSY) >> 1) & 0x1)
+/* Data Busy (0) */
+#define SSC_SFSTAT_DBSY (0x1)
+#define SSC_SFSTAT_DBSY_GET(val) ((((val) & SSC_SFSTAT_DBSY) >> 0) & 0x1)
+
+/*******************************************************************************
+ * GPOCON Register
+ ******************************************************************************/
+
+/* Output OUTn Is Chip Select (15:8) */
+#define SSC_GPOCON_ISCSBN (0xff << 8)
+#define SSC_GPOCON_ISCSBN_VAL(val) (((val) & 0xff) << 8)
+#define SSC_GPOCON_ISCSBN_GET(val) ((((val) & SSC_GPOCON_ISCSBN) >> 8) & 0xff)
+#define SSC_GPOCON_ISCSBN_SET(reg,val) (reg) = ((reg & ~SSC_GPOCON_ISCSBN) | (((val) & 0xff) << 8))
+/* Invert Output OUTn (7:0) */
+#define SSC_GPOCON_INVOUTN (0xff)
+#define SSC_GPOCON_INVOUTN_VAL(val) (((val) & 0xff) << 0)
+#define SSC_GPOCON_INVOUTN_GET(val) ((((val) & SSC_GPOCON_INVOUTN) >> 0) & 0xff)
+#define SSC_GPOCON_INVOUTN_SET(reg,val) (reg) = ((reg & ~SSC_GPOCON_INVOUTN) | (((val) & 0xff) << 0))
+
+/*******************************************************************************
+ * GPOSTAT Register
+ ******************************************************************************/
+
+/* Output Register Bit n (7:0) */
+#define SSC_GPOSTAT_OUTN (0xff)
+#define SSC_GPOSTAT_OUTN_GET(val) ((((val) & SSC_GPOSTAT_OUTN) >> 0) & 0xff)
+
+/*******************************************************************************
+ * WHBGPOSTAT
+ ******************************************************************************/
+
+/* Set Output Register Bit n (15:8) */
+#define SSC_WHBGPOSTAT_SETOUTN (0xff << 8)
+#define SSC_WHBGPOSTAT_SETOUTN_VAL(val) (((val) & 0xff) << 8)
+#define SSC_WHBGPOSTAT_SETOUTN_SET(reg,val) (reg) = (((reg & ~SSC_WHBGPOSTAT_SETOUTN) | (val) & 1) << 8)
+/* Clear Output Register Bit n (7:0) */
+#define SSC_WHBGPOSTAT_CLROUTN (0xff)
+#define SSC_WHBGPOSTAT_CLROUTN_VAL(val) (((val) & 0xff) << 0)
+#define SSC_WHBGPOSTAT_CLROUTN_SET(reg,val) (reg) = (((reg & ~SSC_WHBGPOSTAT_CLROUTN) | (val) & 1) << 0)
+
+/*******************************************************************************
+ * RXREQ Register
+ ******************************************************************************/
+
+/* Receive Count Value (15:0) */
+#define SSC_RXREQ_RXCNT (0xffff)
+#define SSC_RXREQ_RXCNT_VAL(val) (((val) & 0xffff) << 0)
+#define SSC_RXREQ_RXCNT_GET(val) ((((val) & SSC_RXREQ_RXCNT) >> 0) & 0xffff)
+#define SSC_RXREQ_RXCNT_SET(reg,val) (reg) = ((reg & ~SSC_RXREQ_RXCNT) | (((val) & 0xffff) << 0))
+
+/*******************************************************************************
+ * RXCNT Register
+ ******************************************************************************/
+
+/* Receive To Do Value (15:0) */
+#define SSC_RXCNT_TODO (0xffff)
+#define SSC_RXCNT_TODO_GET(val) ((((val) & SSC_RXCNT_TODO) >> 0) & 0xffff)
+
+/*******************************************************************************
+ * DMA_CON Register
+ ******************************************************************************/
+
+/* Receive Class (3:2) */
+#define SSC_DMA_CON_RXCLS (0x3 << 2)
+#define SSC_DMA_CON_RXCLS_VAL(val) (((val) & 0x3) << 2)
+#define SSC_DMA_CON_RXCLS_GET(val) ((((val) & SSC_DMA_CON_RXCLS) >> 2) & 0x3)
+#define SSC_DMA_CON_RXCLS_SET(reg,val) (reg) = ((reg & ~SSC_DMA_CON_RXCLS) | (((val) & 0x3) << 2))
+/* Transmit Path On (1) */
+#define SSC_DMA_CON_TXON (0x1 << 1)
+#define SSC_DMA_CON_TXON_VAL(val) (((val) & 0x1) << 1)
+#define SSC_DMA_CON_TXON_GET(val) ((((val) & SSC_DMA_CON_TXON) >> 1) & 0x1)
+#define SSC_DMA_CON_TXON_SET(reg,val) (reg) = ((reg & ~SSC_DMA_CON_TXON) | (((val) & 0x1) << 1))
+/* Receive Path On (0) */
+#define SSC_DMA_CON_RXON (0x1)
+#define SSC_DMA_CON_RXON_VAL(val) (((val) & 0x1) << 0)
+#define SSC_DMA_CON_RXON_GET(val) ((((val) & SSC_DMA_CON_RXON) >> 0) & 0x1)
+#define SSC_DMA_CON_RXON_SET(reg,val) (reg) = ((reg & ~SSC_DMA_CON_RXON) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * IRNEN Register
+ ******************************************************************************/
+
+/* Frame End Interrupt Request Enable (3) */
+#define SSC_IRNEN_F (0x1 << 3)
+#define SSC_IRNEN_F_VAL(val) (((val) & 0x1) << 3)
+#define SSC_IRNEN_F_GET(val) ((((val) & SSC_IRNEN_F) >> 3) & 0x1)
+#define SSC_IRNEN_F_SET(reg,val) (reg) = ((reg & ~SSC_IRNEN_F) | (((val) & 0x1) << 3))
+/* Error Interrupt Request Enable (2) */
+#define SSC_IRNEN_E (0x1 << 2)
+#define SSC_IRNEN_E_VAL(val) (((val) & 0x1) << 2)
+#define SSC_IRNEN_E_GET(val) ((((val) & SSC_IRNEN_E) >> 2) & 0x1)
+#define SSC_IRNEN_E_SET(reg,val) (reg) = ((reg & ~SSC_IRNEN_E) | (((val) & 0x1) << 2))
+/* Receive Interrupt Request Enable (1) */
+#define SSC_IRNEN_R (0x1 << 1)
+#define SSC_IRNEN_R_VAL(val) (((val) & 0x1) << 1)
+#define SSC_IRNEN_R_GET(val) ((((val) & SSC_IRNEN_R) >> 1) & 0x1)
+#define SSC_IRNEN_R_SET(reg,val) (reg) = ((reg & ~SSC_IRNEN_R) | (((val) & 0x1) << 1))
+/* Transmit Interrupt Request Enable (0) */
+#define SSC_IRNEN_T (0x1)
+#define SSC_IRNEN_T_VAL(val) (((val) & 0x1) << 0)
+#define SSC_IRNEN_T_GET(val) ((((val) & SSC_IRNEN_T) >> 0) & 0x1)
+#define SSC_IRNEN_T_SET(reg,val) (reg) = ((reg & ~SSC_IRNEN_T) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * IRNICR Register
+ ******************************************************************************/
+
+/* Frame End Interrupt Request (3) */
+#define SSC_IRNICR_F (0x1 << 3)
+#define SSC_IRNICR_F_GET(val) ((((val) & SSC_IRNICR_F) >> 3) & 0x1)
+/* Error Interrupt Request (2) */
+#define SSC_IRNICR_E (0x1 << 2)
+#define SSC_IRNICR_E_GET(val) ((((val) & SSC_IRNICR_E) >> 2) & 0x1)
+/* Receive Interrupt Request (1) */
+#define SSC_IRNICR_R (0x1 << 1)
+#define SSC_IRNICR_R_GET(val) ((((val) & SSC_IRNICR_R) >> 1) & 0x1)
+/* Transmit Interrupt Request (0) */
+#define SSC_IRNICR_T (0x1)
+#define SSC_IRNICR_T_GET(val) ((((val) & SSC_IRNICR_T) >> 0) & 0x1)
+
+/*******************************************************************************
+ * IRNCR Register
+ ******************************************************************************/
+
+/* Frame End Interrupt Request (3) */
+#define SSC_IRNCR_F (0x1 << 3)
+#define SSC_IRNCR_F_GET(val) ((((val) & SSC_IRNCR_F) >> 3) & 0x1)
+/* Error Interrupt Request (2) */
+#define SSC_IRNCR_E (0x1 << 2)
+#define SSC_IRNCR_E_GET(val) ((((val) & SSC_IRNCR_E) >> 2) & 0x1)
+/* Receive Interrupt Request (1) */
+#define SSC_IRNCR_R (0x1 << 1)
+#define SSC_IRNCR_R_GET(val) ((((val) & SSC_IRNCR_R) >> 1) & 0x1)
+/* Transmit Interrupt Request (0) */
+#define SSC_IRNCR_T (0x1)
+#define SSC_IRNCR_T_GET(val) ((((val) & SSC_IRNCR_T) >> 0) & 0x1)
+
+#endif /* __SSC_H */
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/status_reg.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/status_reg.h
new file mode 100644
index 0000000000..100230fa7c
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/status_reg.h
@@ -0,0 +1,130 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ St. Martin Strasse 53; 81669 Munich, Germany
+
+ Any use of this Software is subject to the conclusion of a respective
+ License Agreement. Without such a License Agreement no rights to the
+ Software are granted.
+
+ ******************************************************************************/
+
+#ifndef __STATUS_REG_H
+#define __STATUS_REG_H
+
+#define status_r32(reg) ltq_r32(&status->reg)
+#define status_w32(val, reg) ltq_w32(val, &status->reg)
+#define status_w32_mask(clear, set, reg) ltq_w32_mask(clear, set, &status->reg)
+
+/** STATUS register structure */
+struct svip_reg_status {
+ unsigned long fuse_deu; /* 0x0000 */
+ unsigned long fuse_cpu; /* 0x0004 */
+ unsigned long fuse_pll; /* 0x0008 */
+ unsigned long chipid; /* 0x000C */
+ unsigned long config; /* 0x0010 */
+ unsigned long chip_loc; /* 0x0014 */
+ unsigned long fuse_spare; /* 0x0018 */
+};
+
+/*******************************************************************************
+ * Fuse for DEU Settings
+ ******************************************************************************/
+
+/* Fuse for Enabling the TRNG (6) */
+#define STATUS_FUSE_DEU_TRNG (0x1 << 6)
+#define STATUS_FUSE_DEU_TRNG_GET(val) ((((val) & STATUS_FUSE_DEU_TRNG) >> 6) & 0x1)
+/* Fuse for Enabling the DES Submodule (5) */
+#define STATUS_FUSE_DEU_DES (0x1 << 5)
+#define STATUS_FUSE_DEU_DES_GET(val) ((((val) & STATUS_FUSE_DEU_DES) >> 5) & 0x1)
+/* Fuse for Enabling the 3DES Submodule (4) */
+#define STATUS_FUSE_DEU_3DES (0x1 << 4)
+#define STATUS_FUSE_DEU_3DES_GET(val) ((((val) & STATUS_FUSE_DEU_3DES) >> 4) & 0x1)
+/* Fuse for Enabling the AES Submodule (3) */
+#define STATUS_FUSE_DEU_AES (0x1 << 3)
+#define STATUS_FUSE_DEU_AES_GET(val) ((((val) & STATUS_FUSE_DEU_AES) >> 3) & 0x1)
+/* Fuse for Enabling the HASH Submodule (2) */
+#define STATUS_FUSE_DEU_HASH (0x1 << 2)
+#define STATUS_FUSE_DEU_HASH_GET(val) ((((val) & STATUS_FUSE_DEU_HASH) >> 2) & 0x1)
+/* Fuse for Enabling the ARC4 Submodule (1) */
+#define STATUS_FUSE_DEU_ARC4 (0x1 << 1)
+#define STATUS_FUSE_DEU_ARC4_GET(val) ((((val) & STATUS_FUSE_DEU_ARC4) >> 1) & 0x1)
+/* Fuse for Enabling the DEU Module (0) */
+#define STATUS_FUSE_DEU_DEU (0x1)
+#define STATUS_FUSE_DEU_DEU_GET(val) ((((val) & STATUS_FUSE_DEU_DEU) >> 0) & 0x1)
+
+/*******************************************************************************
+ * Fuse for CPU Settings
+ ******************************************************************************/
+
+/* Fuse for Enabling CPU5 (5) */
+#define STATUS_FUSE_CPU_CPU5 (0x1 << 5)
+#define STATUS_FUSE_CPU_CPU5_GET(val) ((((val) & STATUS_FUSE_CPU_CPU5) >> 5) & 0x1)
+/* Fuse for Enabling the CPU4 (4) */
+#define STATUS_FUSE_CPU_CPU4 (0x1 << 4)
+#define STATUS_FUSE_CPU_CPU4_GET(val) ((((val) & STATUS_FUSE_CPU_CPU4) >> 4) & 0x1)
+/* Fuse for Enabling the CPU3 (3) */
+#define STATUS_FUSE_CPU_CPU3 (0x1 << 3)
+#define STATUS_FUSE_CPU_CPU3_GET(val) ((((val) & STATUS_FUSE_CPU_CPU3) >> 3) & 0x1)
+/* Fuse for Enabling the CPU2 (2) */
+#define STATUS_FUSE_CPU_CPU2 (0x1 << 2)
+#define STATUS_FUSE_CPU_CPU2_GET(val) ((((val) & STATUS_FUSE_CPU_CPU2) >> 2) & 0x1)
+/* Fuse for Enabling the CPU1 (1) */
+#define STATUS_FUSE_CPU_CPU1 (0x1 << 1)
+#define STATUS_FUSE_CPU_CPU1_GET(val) ((((val) & STATUS_FUSE_CPU_CPU1) >> 1) & 0x1)
+/* Fuse for Enabling the CPU0 (0) */
+#define STATUS_FUSE_CPU_CPU0 (0x1)
+#define STATUS_FUSE_CPU_CPU0_GET(val) ((((val) & STATUS_FUSE_CPU_CPU0) >> 0) & 0x1)
+
+/*******************************************************************************
+ * Fuse for PLL Settings
+ ******************************************************************************/
+
+/* Fuse for Enabling PLL (7:0) */
+#define STATUS_FUSE_PLL_PLL (0xff)
+#define STATUS_FUSE_PLL_PLL_GET(val) ((((val) & STATUS_FUSE_PLL_PLL) >> 0) & 0xff)
+
+/*******************************************************************************
+ * Chip Identification Register
+ ******************************************************************************/
+
+/* Chip Version Number (31:28) */
+#define STATUS_CHIPID_VERSION (0xf << 28)
+#define STATUS_CHIPID_VERSION_GET(val) ((((val) & STATUS_CHIPID_VERSION) >> 28) & 0xf)
+/* Part Number (27:12) */
+#define STATUS_CHIPID_PART_NUMBER (0xffff << 12)
+#define STATUS_CHIPID_PART_NUMBER_GET(val) ((((val) & STATUS_CHIPID_PART_NUMBER) >> 12) & 0xffff)
+/* Manufacturer ID (11:1) */
+#define STATUS_CHIPID_MANID (0x7ff << 1)
+#define STATUS_CHIPID_MANID_GET(val) ((((val) & STATUS_CHIPID_MANID) >> 1) & 0x7ff)
+
+/*******************************************************************************
+ * Chip Configuration Register
+ ******************************************************************************/
+
+/* Number of Analog Channels (8:5) */
+#define STATUS_CONFIG_ANA_CHAN (0xf << 5)
+#define STATUS_CONFIG_ANA_CHAN_GET(val) ((((val) & STATUS_CONFIG_ANA_CHAN) >> 5) & 0xf)
+/* Clock Mode (4) */
+#define STATUS_CONFIG_CLK_MODE (0x1 << 1)
+#define STATUS_CONFIG_CLK_MODE_GET(val) ((((val) & STATUS_CONFIG_CLK_MODE) >> 4) & 0x1)
+/* Subversion Number (3:0) */
+#define STATUS_CONFIG_SUB_VERS (0xF)
+#define STATUS_CONFIG_SUB_VERS_GET(val) ((((val) & STATUS_SUBVER_SUB_VERS) >> 0) & 0xF)
+
+/*******************************************************************************
+ * Chip Location Register
+ ******************************************************************************/
+
+/* Chip Lot ID (31:16) */
+#define STATUS_CHIP_LOC_CHIP_LOT (0xffff << 16)
+#define STATUS_CHIP_LOC_CHIP_LOT_GET(val) ((((val) & STATUS_CHIP_LOC_CHIP_LOT) >> 16) & 0xffff)
+/* Chip X Coordinate (15:8) */
+#define STATUS_CHIP_LOC_CHIP_X (0xff << 8)
+#define STATUS_CHIP_LOC_CHIP_X_GET(val) ((((val) & STATUS_CHIP_LOC_CHIP_X) >> 8) & 0xff)
+/* Chip Y Coordinate (7:0) */
+#define STATUS_CHIP_LOC_CHIP_Y (0xff)
+#define STATUS_CHIP_LOC_CHIP_Y_GET(val) ((((val) & STATUS_CHIP_LOC_CHIP_Y) >> 0) & 0xff)
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_dma.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_dma.h
new file mode 100644
index 0000000000..5c34bb6902
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_dma.h
@@ -0,0 +1,245 @@
+/************************************************************************
+ *
+ * Copyright (c) 2007
+ * Infineon Technologies AG
+ * St. Martin Strasse 53; 81669 Muenchen; Germany
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ ************************************************************************/
+
+#ifndef __SVIP_DMA_H
+#define __SVIP_DMA_H
+
+#define LTQ_DMA_CH_ON 1
+#define LTQ_DMA_CH_OFF 0
+#define LTQ_DMA_CH_DEFAULT_WEIGHT 100;
+
+#define DMA_OWN 1
+#define CPU_OWN 0
+#define DMA_MAJOR 250
+
+/* Descriptors */
+#define DMA_DESC_OWN_CPU 0x0
+#define DMA_DESC_OWN_DMA 0x80000000
+#define DMA_DESC_CPT_SET 0x40000000
+#define DMA_DESC_SOP_SET 0x20000000
+#define DMA_DESC_EOP_SET 0x10000000
+
+struct rx_desc {
+ union {
+ struct {
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ volatile u32 data_length:16;
+ volatile u32 reserve2:7;
+ volatile u32 byte_offset:2;
+ volatile u32 reserve1:3;
+ volatile u32 eop:1;
+ volatile u32 sop:1;
+ volatile u32 c:1;
+ volatile u32 own:1;
+#else
+ volatile u32 own:1;
+ volatile u32 c:1;
+ volatile u32 sop:1;
+ volatile u32 eop:1;
+ volatile u32 reserve1:3;
+ volatile u32 byte_offset:2;
+ volatile u32 reserve2:7;
+ volatile u32 data_length:16;
+#endif
+ } field;
+
+ volatile u32 word;
+ } status;
+
+ volatile u32 data_pointer;
+};
+
+struct tx_desc {
+ union {
+ struct {
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ volatile u32 data_length:16;
+ volatile u32 reserved:7;
+ volatile u32 byte_offset:5;
+ volatile u32 eop:1;
+ volatile u32 sop:1;
+ volatile u32 c:1;
+ volatile u32 own:1;
+#else
+ volatile u32 own:1;
+ volatile u32 c:1;
+ volatile u32 sop:1;
+ volatile u32 eop:1;
+ volatile u32 byte_offset:5;
+ volatile u32 reserved:7;
+ volatile u32 data_length:16;
+#endif
+ } field;
+
+ volatile u32 word;
+ } status;
+
+ volatile u32 data_pointer;
+};
+
+/* DMA pseudo interrupts notified to switch driver */
+#define RCV_INT 0x01
+#define TX_BUF_FULL_INT 0x02
+#define TRANSMIT_CPT_INT 0x04
+#define CHANNEL_CLOSED 0x10
+
+/* Parameters for switch DMA device */
+#define DEFAULT_SW_CHANNEL_WEIGHT 3
+#define DEFAULT_SW_PORT_WEIGHT 7
+
+#define DEFAULT_SW_TX_BURST_LEN 2 /* 2 words, 4 words, 8 words */
+#define DEFAULT_SW_RX_BURST_LEN 2 /* 2 words, 4 words, 8 words */
+
+#define DEFAULT_SW_TX_CHANNEL_NUM 4
+#define DEFAULT_SW_RX_CHANNEL_NUM 4
+
+#define DEFAULT_SW_TX_CHANNEL_DESCR_NUM 20
+#define DEFAULT_SW_RX_CHANNEL_DESCR_NUM 20
+
+/* Parameters for SSC DMA device */
+#define DEFAULT_SSC_CHANNEL_WEIGHT 3
+#define DEFAULT_SSC_PORT_WEIGHT 7
+
+#define DEFAULT_SSC_TX_CHANNEL_CLASS 3
+#define DEFAULT_SSC_RX_CHANNEL_CLASS 0
+
+#define DEFAULT_SSC_TX_BURST_LEN 2 /* 2 words, 4 words, 8 words */
+#define DEFAULT_SSC_RX_BURST_LEN 2 /* 2 words, 4 words, 8 words */
+
+#define DEFAULT_SSC0_TX_CHANNEL_NUM 1
+#define DEFAULT_SSC0_RX_CHANNEL_NUM 1
+#define DEFAULT_SSC1_TX_CHANNEL_NUM 1
+#define DEFAULT_SSC1_RX_CHANNEL_NUM 1
+
+#define DEFAULT_SSC_TX_CHANNEL_DESCR_NUM 10
+#define DEFAULT_SSC_RX_CHANNEL_DESCR_NUM 10
+
+/* Parameters for memory DMA device */
+#define DEFAULT_MEM_CHANNEL_WEIGHT 3
+#define DEFAULT_MEM_PORT_WEIGHT 7
+
+#define DEFAULT_MEM_TX_BURST_LEN 4 /* 2 words, 4 words, 8 words */
+#define DEFAULT_MEM_RX_BURST_LEN 4 /* 2 words, 4 words, 8 words */
+
+#define DEFAULT_MEM_TX_CHANNEL_NUM 1
+#define DEFAULT_MEM_RX_CHANNEL_NUM 1
+
+#define DEFAULT_MEM_TX_CHANNEL_DESCR_NUM 2
+#define DEFAULT_MEM_RX_CHANNEL_DESCR_NUM 2
+
+/* Parameters for DEU DMA device */
+#define DEFAULT_DEU_CHANNEL_WEIGHT 1
+#define DEFAULT_DEU_PORT_WEIGHT 1
+
+#define DEFAULT_DEU_TX_BURST_LEN 4 /* 2 words, 4 words, 8 words */
+#define DEFAULT_DEU_RX_BURST_LEN 4 /* 2 words, 4 words, 8 words */
+
+#define DEFAULT_DEU_TX_CHANNEL_DESCR_NUM 20
+#define DEFAULT_DEU_RX_CHANNEL_DESCR_NUM 20
+
+#define DMA_DESCR_NUM 30 /* number of descriptors per channel */
+
+enum dma_dir_t {
+ DIR_RX = 0,
+ DIR_TX = 1,
+};
+
+struct dma_device_info;
+
+struct dma_channel_info {
+ /*Pointer to the peripheral device who is using this channel*/
+ /*const*/ struct dma_device_info *dma_dev;
+ /*direction*/
+ const enum dma_dir_t dir; /*RX or TX*/
+ /*class for this channel for QoS*/
+ int pri;
+ /*irq number*/
+ const int irq;
+ /*relative channel number*/
+ const int rel_chan_no;
+ /*absolute channel number*/
+ int abs_chan_no;
+
+ /*specify byte_offset*/
+ int byte_offset;
+ int tx_weight;
+
+ /*descriptor parameter*/
+ int desc_base;
+ int desc_len;
+ int curr_desc;
+ int prev_desc;/*only used if it is a tx channel*/
+
+ /*weight setting for WFQ algorithm*/
+ int weight;
+ int default_weight;
+
+ int packet_size;
+
+ /*status of this channel*/
+ int control; /*on or off*/
+ int xfer_cnt;
+ int dur; /*descriptor underrun*/
+
+ /**optional information for the upper layer devices*/
+ void *opt[DMA_DESCR_NUM];
+
+ /*channel operations*/
+ int (*open)(struct dma_channel_info *ch);
+ int (*close)(struct dma_channel_info *ch);
+ int (*reset)(struct dma_channel_info *ch);
+ void (*enable_irq)(struct dma_channel_info *ch);
+ void (*disable_irq)(struct dma_channel_info *ch);
+};
+
+
+struct dma_device_info {
+ /*device name of this peripheral*/
+ const char device_name[16];
+ const int max_rx_chan_num;
+ const int max_tx_chan_num;
+ int drop_enable;
+
+ int reserved;
+
+ int tx_burst_len;
+ int rx_burst_len;
+ int tx_weight;
+
+ int current_tx_chan;
+ int current_rx_chan;
+ int num_tx_chan;
+ int num_rx_chan;
+ int tx_endianness_mode;
+ int rx_endianness_mode;
+ struct dma_channel_info *tx_chan[4];
+ struct dma_channel_info *rx_chan[4];
+
+ /*functions, optional*/
+ u8 *(*buffer_alloc)(int len,int *offset, void **opt);
+ void (*buffer_free)(u8 *dataptr, void *opt);
+ int (*intr_handler)(struct dma_device_info *dma_dev, int status);
+
+ /* used by peripheral driver only */
+ void *priv;
+};
+
+struct dma_device_info *dma_device_reserve(char *dev_name);
+int dma_device_release(struct dma_device_info *dma_dev);
+int dma_device_register(struct dma_device_info *dma_dev);
+int dma_device_unregister(struct dma_device_info *dma_dev);
+int dma_device_read(struct dma_device_info *dma_dev, u8 **dataptr, void **opt);
+int dma_device_write(struct dma_device_info *dma_dev, u8 *dataptr,
+ int len, void *opt);
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_irq.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_irq.h
new file mode 100644
index 0000000000..bca8df9fd5
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_irq.h
@@ -0,0 +1,35 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2010 Lantiq
+ */
+#ifndef __SVIP_IRQ_H
+#define __SVIP_IRQ_H
+
+#define IM_NUM 6
+
+#define INT_NUM_IRQ0 8
+#define INT_NUM_IM0_IRL0 (INT_NUM_IRQ0 + 0)
+#define INT_NUM_IM1_IRL0 (INT_NUM_IM0_IRL0 + 32)
+#define INT_NUM_IM2_IRL0 (INT_NUM_IM1_IRL0 + 32)
+#define INT_NUM_IM3_IRL0 (INT_NUM_IM2_IRL0 + 32)
+#define INT_NUM_IM4_IRL0 (INT_NUM_IM3_IRL0 + 32)
+#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
+#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
+
+#define INT_NUM_IM5_IRL0 (INT_NUM_IRQ0 + 160)
+#define MIPS_CPU_TIMER_IRQ (INT_NUM_IM5_IRL0 + 2)
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_mux.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_mux.h
new file mode 100644
index 0000000000..8ca3285f2c
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_mux.h
@@ -0,0 +1,467 @@
+/************************************************************************
+ *
+ * Copyright (c) 2007
+ * Infineon Technologies AG
+ * St. Martin Strasse 53; 81669 Muenchen; Germany
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ ************************************************************************/
+
+#ifndef __SVIP_MUX_H
+#define __SVIP_MUX_H
+
+#define LTQ_MUX_P0_PINS 20
+#define LTQ_MUX_P1_PINS 20
+#define LTQ_MUX_P2_PINS 19
+#define LTQ_MUX_P3_PINS 20
+#define LTQ_MUX_P4_PINS 24
+
+struct ltq_mux_pin {
+ int dirin;
+ int puen;
+ int altsel0;
+ int altsel1;
+};
+
+struct ltq_mux_settings {
+ const struct ltq_mux_pin *mux_p0;
+ const struct ltq_mux_pin *mux_p1;
+ const struct ltq_mux_pin *mux_p2;
+ const struct ltq_mux_pin *mux_p3;
+ const struct ltq_mux_pin *mux_p4;
+};
+
+#define LTQ_MUX_P0_19_EXINT16 { 1, 0, 1, 0 }
+#define LTQ_MUX_P0_19 { 0, 0, 1, 0 }
+
+#define LTQ_MUX_P0_18_EJ_BRKIN { 1, 0, 0, 0 }
+#define LTQ_MUX_P0_18 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_18_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P0_17_EXINT10 { 1, 0, 0, 0 }
+#define LTQ_MUX_P0_17 { 0, 0, 0, 0 }
+#define LTQ_MUX_P0_17_ASC1_RXD { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P0_16_EXINT9 { 1, 0, 0, 0 }
+#define LTQ_MUX_P0_16 { 0, 0, 0, 0 }
+#define LTQ_MUX_P0_16_ASC1_TXD { 0, 0, 1, 0 }
+
+#define LTQ_MUX_P0_15_EXINT8 { 1, 0, 0, 0 }
+#define LTQ_MUX_P0_15 { 0, 0, 0, 0 }
+#define LTQ_MUX_P0_15_ASC0_RXD { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P0_14_EXINT7 { 1, 0, 0, 0 }
+#define LTQ_MUX_P0_14 { 0, 0, 0, 0 }
+#define LTQ_MUX_P0_14_ASC0_TXD { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P0_13_SSC0_CS7 { 0, 1, 0, 0 }
+#define LTQ_MUX_P0_13_EXINT6 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_13 { 1, 0, 1, 0 }
+#define LTQ_MUX_P0_13_SSC1_CS7 { 0, 0, 0, 1 }
+#define LTQ_MUX_P0_13_SSC1_INT { 0, 0, 1, 1 }
+
+#define LTQ_MUX_P0_12_SSC0_CS6 { 0, 1, 0, 0 }
+#define LTQ_MUX_P0_12_EXINT5 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_12 { 1, 0, 1, 0 }
+#define LTQ_MUX_P0_12_SSC1_CS6 { 0, 0, 0, 1 }
+
+#define LTQ_MUX_P0_11_SSC0_CS5 { 0, 1, 0, 0 }
+#define LTQ_MUX_P0_11_EXINT4 { 1, 0, 1, 0 }
+#define LTQ_MUX_P0_11 { 1, 0, 0, 0 }
+#define LTQ_MUX_P0_11_SSC1_CS5 { 0, 0, 0, 1 }
+
+#define LTQ_MUX_P0_10_SSC0_CS4 { 0, 1, 0, 0 }
+#define LTQ_MUX_P0_10_EXINT3 { 1, 0, 1, 0 }
+#define LTQ_MUX_P0_10 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_10_SSC1_CS4 { 0, 0, 0, 1 }
+
+#define LTQ_MUX_P0_9_SSC0_CS3 { 0, 1, 0, 0 }
+#define LTQ_MUX_P0_9_EXINT2 { 1, 0, 1, 0 }
+#define LTQ_MUX_P0_9 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_9_SSC1_CS3 { 0, 0, 0, 1 }
+
+#define LTQ_MUX_P0_8_SSC0_CS2 { 0, 1, 0, 0 }
+#define LTQ_MUX_P0_8_EXINT1 { 1, 0, 1, 0 }
+#define LTQ_MUX_P0_8 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_8_SSC1_CS2 { 0, 0, 0, 1 }
+
+#define LTQ_MUX_P0_7_SSC0_CS1 { 0, 1, 0, 0 }
+#define LTQ_MUX_P0_7_EXINT0 { 1, 0, 1, 0 }
+#define LTQ_MUX_P0_7 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_7_SSC1_CS1 { 0, 0, 0, 1 }
+#define LTQ_MUX_P0_7_SSC1_CS0 { 1, 0, 0, 1 }
+#define LTQ_MUX_P0_7_SSC2_CS0 { 1, 0, 1, 1 }
+
+#define LTQ_MUX_P0_6_SSC0_CS0 { 0, 1, 0, 0 }
+#define LTQ_MUX_P0_6 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_6_IN { 1, 0, 1, 0 }
+#define LTQ_MUX_P0_6_SSC1_CS0 { 0, 0, 0, 1 }
+
+#define LTQ_MUX_P0_5_SSC1_SCLK { 0, 0, 0, 0 }
+#define LTQ_MUX_P0_5 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_5_IN { 1, 0, 1, 0 }
+#define LTQ_MUX_P0_5_SSC2_CLK { 1, 0, 0, 1 }
+
+#define LTQ_MUX_P0_4_SSC1_MRST { 1, 0, 0, 0 }
+#define LTQ_MUX_P0_4 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_4_IN { 1, 0, 1, 0 }
+#define LTQ_MUX_P0_4_SSC2_MRST { 0, 0, 0, 1 }
+
+#define LTQ_MUX_P0_3_SSC1_MTSR { 0, 0, 0, 0 }
+#define LTQ_MUX_P0_3 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_3_IN { 1, 0, 1, 0 }
+#define LTQ_MUX_P0_3_SSC2_MTSR { 0, 0, 0, 1 }
+
+#define LTQ_MUX_P0_2_SSC0_SCLK { 0, 0, 0, 0 }
+#define LTQ_MUX_P0_2 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_2_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P0_1_SSC0_MRST { 1, 0, 0, 0 }
+#define LTQ_MUX_P0_1 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_1_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P0_0_SSC0_MTSR { 0, 0, 0, 0 }
+#define LTQ_MUX_P0_0 { 0, 0, 1, 0 }
+#define LTQ_MUX_P0_0_IN { 1, 0, 1, 0 }
+
+
+#define LTQ_MUX_P1_19_PCM3_TC1 { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_19_EXINT15 { 1, 0, 1, 0 }
+#define LTQ_MUX_P1_19 { 0, 0, 1, 0 }
+
+#define LTQ_MUX_P1_18_PCM3_FSC { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_18_EXINT11 { 1, 0, 1, 0 }
+#define LTQ_MUX_P1_18 { 0, 0, 1, 0 }
+
+#define LTQ_MUX_P1_17_PCM3_PCL { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_17_EXINT12 { 1, 0, 1, 0 }
+#define LTQ_MUX_P1_17 { 0, 0, 1, 0 }
+
+#define LTQ_MUX_P1_16_PCM3_TX { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_16_EXINT13 { 1, 0, 1, 0 }
+#define LTQ_MUX_P1_16 { 0, 0, 1, 0 }
+
+#define LTQ_MUX_P1_15_PCM3_RX { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_15_EXINT14 { 1, 0, 1, 0 }
+#define LTQ_MUX_P1_15 { 0, 0, 1, 0 }
+
+#define LTQ_MUX_P1_14_PCM2_TC1 { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_14 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_14_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P1_13_PCM2_FSC { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_13 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_13_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P1_12_PCM2_PCL { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_12 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_12_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P1_11_PCM2_TX { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_11 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_11_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P1_10_PCM2_RX { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_10 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_10_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P1_9_PCM1_TC1 { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_9 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_9_IN { 0, 0, 1, 0 }
+
+#define LTQ_MUX_P1_8_PCM1_FSC { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_8 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_8_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P1_7_PCM1_PCL { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_7 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_7_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P1_6_PCM1_TX { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_6 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_6_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P1_5_PCM1_RX { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_5 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_5_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P1_4_PCM0_TC1 { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_4 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_4_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P1_3_PCM0_FSC { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_3 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_3_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P1_2_PCM0_PCL { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_2 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_2_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P1_1_PCM0_TX { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_1 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_1_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P1_0_PCM0_RX { 0, 0, 0, 0 }
+#define LTQ_MUX_P1_0 { 0, 0, 1, 0 }
+#define LTQ_MUX_P1_0_IN { 1, 0, 1, 0 }
+
+
+#define LTQ_MUX_P2_18_EBU_BC1 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_18 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_18_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_17_EBU_BC0 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_17 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_17_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_16_EBU_RDBY { 1, 0, 0, 0 }
+#define LTQ_MUX_P2_16 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_16_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_15_EBU_WAIT { 1, 0, 0, 0 }
+#define LTQ_MUX_P2_15 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_15_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_14_EBU_ALE { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_14 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_14_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_13_EBU_WR { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_13 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_13_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_12_EBU_RD { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_12 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_12_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_11_EBU_A11 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_11 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_11_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_10_EBU_A10 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_10 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_10_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_9_EBU_A9 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_9 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_9_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_8_EBU_A8 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_8 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_8_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_7_EBU_A7 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_7 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_7_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_6_EBU_A6 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_6 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_6_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_5_EBU_A5 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_5 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_5_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_4_EBU_A4 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_4 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_4_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_3_EBU_A3 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_3 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_3_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_2_EBU_A2 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_2 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_2_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_1_EBU_A1 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_1 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_1_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P2_0_EBU_A0 { 0, 0, 0, 0 }
+#define LTQ_MUX_P2_0 { 0, 0, 1, 0 }
+#define LTQ_MUX_P2_0_IN { 1, 0, 1, 0 }
+
+
+#define LTQ_MUX_P3_19_EBU_CS3 { 0, 0, 0, 0 }
+#define LTQ_MUX_P3_19 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_19_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_18_EBU_CS2 { 0, 0, 0, 0 }
+#define LTQ_MUX_P3_18 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_18_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_17_EBU_CS1 { 0, 0, 0, 0 }
+#define LTQ_MUX_P3_17 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_17_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_16_EBU_CS0 { 0, 0, 0, 0 }
+#define LTQ_MUX_P3_16 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_16_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_15_EBU_AD15 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_15 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_15_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_14_EBU_AD14 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_14 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_14_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_13_EBU_AD13 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_13 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_13_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_12_EBU_AD12 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_12 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_12_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_11_EBU_AD11 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_11 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_11_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_10_EBU_AD10 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_10 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_10_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_9_EBU_AD9 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_9 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_9_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_8_EBU_AD8 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_8 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_8_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_7_EBU_AD7 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_7 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_7_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_6_EBU_AD6 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_6 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_6_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_5_EBU_AD5 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_5 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_5_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_4_EBU_AD4 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_4 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_4_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_3_EBU_AD3 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_3 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_3_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_2_EBU_AD2 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_2 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_2_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_1_EBU_AD1 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_1 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_1_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P3_0_EBU_AD0 { 1, 0, 0, 0 }
+#define LTQ_MUX_P3_0 { 0, 0, 1, 0 }
+#define LTQ_MUX_P3_0_IN { 1, 0, 1, 0 }
+
+
+#define LTQ_MUX_P4_23_SSLIC7_CLK { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_23 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_23_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_22_SSLIC7_RX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_22 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_22_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_21_SSLIC7_TX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_21 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_21_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_20_SSLIC6_CLK { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_20 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_20_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_19_SSLIC6_RX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_19 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_19_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_18_SSLIC6_TX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_18 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_18_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_17_SSLIC5_CLK { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_17 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_17_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_16_SSLIC5_RX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_16 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_16_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_15_SSLIC5_TX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_15 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_15_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_14_SSLIC4_CLK { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_14 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_14_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_13_SSLIC4_RX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_13 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_13_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_12_SSLIC4_TX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_12 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_12_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_11_SSLIC3_CLK { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_11 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_11_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_10_SSLIC3_RX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_10 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_10_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_9_SSLIC3_TX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_9 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_9_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_8_SSLIC2_CLK { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_8 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_8_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_7_SSLIC2_RX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_7 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_7_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_6_SSLIC2_TX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_6 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_6_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_5_SSLIC1_CLK { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_5 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_5_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_4_SSLIC1_RX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_4 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_4_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_3_SSLIC1_TX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_3 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_3_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_2_SSLIC0_CLK { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_2 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_2_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_1_SSLIC0_RX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_1 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_1_IN { 1, 0, 1, 0 }
+
+#define LTQ_MUX_P4_0_SSLIC0_TX { 0, 0, 0, 0 }
+#define LTQ_MUX_P4_0 { 0, 0, 1, 0 }
+#define LTQ_MUX_P4_0_IN { 1, 0, 1, 0 }
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_pms.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_pms.h
new file mode 100644
index 0000000000..7329711858
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/svip_pms.h
@@ -0,0 +1,23 @@
+/************************************************************************
+ *
+ * Copyright (c) 2007
+ * Infineon Technologies AG
+ * St. Martin Strasse 53; 81669 Muenchen; Germany
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ ************************************************************************/
+
+#ifndef __SVIP_PMS_H
+#define __SVIP_PMS_H
+
+void svip_sys1_clk_enable(u32 mask);
+int svip_sys1_clk_is_enabled(u32 mask);
+
+void svip_sys2_clk_enable(u32 mask);
+int svip_sys2_clk_is_enabled(u32 mask);
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/sys0_reg.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/sys0_reg.h
new file mode 100644
index 0000000000..7428cccc87
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/sys0_reg.h
@@ -0,0 +1,165 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ St. Martin Strasse 53; 81669 Munich, Germany
+
+ Any use of this Software is subject to the conclusion of a respective
+ License Agreement. Without such a License Agreement no rights to the
+ Software are granted.
+
+ ******************************************************************************/
+
+#ifndef __SYS0_REG_H
+#define __SYS0_REG_H
+
+#define sys0_r32(reg) ltq_r32(&sys0->reg)
+#define sys0_w32(val, reg) ltq_w32(val, &sys0->reg)
+#define sys0_w32_mask(clear, set, reg) ltq_w32_mask(clear, set, &sys0->reg)
+
+/** SYS0 register structure */
+struct svip_reg_sys0 {
+ unsigned long sr; /* 0x0000 */
+ unsigned long bcr; /* 0x0004 */
+ unsigned long pll1cr; /* 0x0008 */
+ unsigned long pll2cr; /* 0x000c */
+ unsigned long tscr; /* 0x0010 */
+ unsigned long phyclkr; /* 0x0014 */
+};
+
+/*******************************************************************************
+ * SYS0 Status Register
+ ******************************************************************************/
+
+/* Endian select pin (31) */
+#define SYS0_SR_ESEL (0x1 << 31)
+#define SYS0_SR_ESEL_GET(val) ((((val) & SYS0_SR_ESEL) >> 31) & 0x1)
+/* Boot mode pins (27:24) */
+#define SYS0_SR_BMODE (0xf << 24)
+#define SYS0_SR_BMODE_GET(val) ((((val) & SYS0_SR_BMODE) >> 24) & 0xf)
+/* PLL2 Lock (18) */
+#define SYS0_SR_PLL2LOCK (0x1 << 18)
+#define SYS0_SR_PLL2LOCK_GET(val) ((((val) & SYS0_SR_PLL2LOCK) >> 18) & 0x1)
+/* PLL1 Lock (17) */
+#define SYS0_SR_PLL1LOCK (0x1 << 17)
+#define SYS0_SR_PLL1LOCK_GET(val) ((((val) & SYS0_SR_PLL1LOCK) >> 17) & 0x1)
+/* Discrete Timing Oscillator Lock (16) */
+#define SYS0_SR_DTOLOCK (0x1 << 16)
+#define SYS0_SR_DTOLOCK_GET(val) ((((val) & SYS0_SR_DTOLOCK) >> 16) & 0x1)
+/* Hardware Reset Indication (1) */
+#define SYS0_SR_HRSTIN (0x1 << 1)
+#define SYS0_SR_HRSTIN_VAL(val) (((val) & 0x1) << 1)
+#define SYS0_SR_HRSTIN_GET(val) ((((val) & SYS0_SR_HRSTIN) >> 1) & 0x1)
+#define SYS0_SR_HRSTIN_SET(reg,val) (reg) = ((reg & ~SYS0_SR_HRSTIN) | (((val) & 0x1) << 1))
+/* Power-on Reset Indication (0) */
+#define SYS0_SR_POR (0x1 << 0)
+#define SYS0_SR_POR_VAL(val) (((val) & 0x1) << 0)
+#define SYS0_SR_POR_GET(val) ((((val) & SYS0_SR_POR) >> 0) & 0x1)
+#define SYS0_SR_POR_SET(reg,val) (reg) = ((reg & ~SYS0_SR_POR) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * SYS0 Boot Control Register
+ ******************************************************************************/
+
+/* Configuration of Boot Source for CPU5 (25) */
+#define SYS0_BCR_BMODECPU5 (0x1 << 25)
+#define SYS0_BCR_BMODECPU5_VAL(val) (((val) & 0x1) << 25)
+#define SYS0_BCR_BMODECPU5_GET(val) ((((val) & SYS0_BCR_BMODECPU5) >> 25) & 0x1)
+#define SYS0_BCR_BMODECPU5_SET(reg,val) (reg) = ((reg & ~SYS0_BCR_BMODECPU5) | (((val) & 0x1) << 25))
+/* Configuration of Boot Source for CPU4 (24) */
+#define SYS0_BCR_BMODECPU4 (0x1 << 24)
+#define SYS0_BCR_BMODECPU4_VAL(val) (((val) & 0x1) << 24)
+#define SYS0_BCR_BMODECPU4_GET(val) ((((val) & SYS0_BCR_BMODECPU4) >> 24) & 0x1)
+#define SYS0_BCR_BMODECPU4_SET(reg,val) (reg) = ((reg & ~SYS0_BCR_BMODECPU4) | (((val) & 0x1) << 24))
+/* Configuration of Boot Source for CPU3 (23) */
+#define SYS0_BCR_BMODECPU3 (0x1 << 23)
+#define SYS0_BCR_BMODECPU3_VAL(val) (((val) & 0x1) << 23)
+#define SYS0_BCR_BMODECPU3_GET(val) ((((val) & SYS0_BCR_BMODECPU3) >> 23) & 0x1)
+#define SYS0_BCR_BMODECPU3_SET(reg,val) (reg) = ((reg & ~SYS0_BCR_BMODECPU3) | (((val) & 0x1) << 23))
+/* Configuration of Boot Source for CPU2 (22) */
+#define SYS0_BCR_BMODECPU2 (0x1 << 22)
+#define SYS0_BCR_BMODECPU2_VAL(val) (((val) & 0x1) << 22)
+#define SYS0_BCR_BMODECPU2_GET(val) ((((val) & SYS0_BCR_BMODECPU2) >> 22) & 0x1)
+#define SYS0_BCR_BMODECPU2_SET(reg,val) (reg) = ((reg & ~SYS0_BCR_BMODECPU2) | (((val) & 0x1) << 22))
+/* Configuration of Boot Source for CPU1 (21) */
+#define SYS0_BCR_BMODECPU1 (0x1 << 21)
+#define SYS0_BCR_BMODECPU1_VAL(val) (((val) & 0x1) << 21)
+#define SYS0_BCR_BMODECPU1_GET(val) ((((val) & SYS0_BCR_BMODECPU1) >> 21) & 0x1)
+#define SYS0_BCR_BMODECPU1_SET(reg,val) (reg) = ((reg & ~SYS0_BCR_BMODECPU1) | (((val) & 0x1) << 21))
+/* Configuration of Boot Source for CPU0 (20:16) */
+#define SYS0_BCR_BMODECPU0 (0x1f << 16)
+#define SYS0_BCR_BMODECPU0_VAL(val) (((val) & 0x1f) << 16)
+#define SYS0_BCR_BMODECPU0_GET(val) ((((val) & SYS0_BCR_BMODECPU0) >> 16) & 0x1f)
+#define SYS0_BCR_BMODECPU0_SET(reg,val) (reg) = ((reg & ~SYS0_BCR_BMODECPU0) | (((val) & 0x1f) << 16))
+/* Configuration of Endianess for CPU5 (5) */
+#define SYS0_BCR_ESELCPU5 (0x1 << 5)
+#define SYS0_BCR_ESELCPU5_VAL(val) (((val) & 0x1) << 5)
+#define SYS0_BCR_ESELCPU5_GET(val) ((((val) & SYS0_BCR_ESELCPU5) >> 5) & 0x1)
+#define SYS0_BCR_ESELCPU5_SET(reg,val) (reg) = ((reg & ~SYS0_BCR_ESELCPU5) | (((val) & 0x1) << 5))
+/* Configuration of Endianess for CPU4 (4) */
+#define SYS0_BCR_ESELCPU4 (0x1 << 4)
+#define SYS0_BCR_ESELCPU4_VAL(val) (((val) & 0x1) << 4)
+#define SYS0_BCR_ESELCPU4_GET(val) ((((val) & SYS0_BCR_ESELCPU4) >> 4) & 0x1)
+#define SYS0_BCR_ESELCPU4_SET(reg,val) (reg) = ((reg & ~SYS0_BCR_ESELCPU4) | (((val) & 0x1) << 4))
+/* Configuration of Endianess for CPU3 (3) */
+#define SYS0_BCR_ESELCPU3 (0x1 << 3)
+#define SYS0_BCR_ESELCPU3_VAL(val) (((val) & 0x1) << 3)
+#define SYS0_BCR_ESELCPU3_GET(val) ((((val) & SYS0_BCR_ESELCPU3) >> 3) & 0x1)
+#define SYS0_BCR_ESELCPU3_SET(reg,val) (reg) = ((reg & ~SYS0_BCR_ESELCPU3) | (((val) & 0x1) << 3))
+/* Configuration of Endianess for CPU2 (2) */
+#define SYS0_BCR_ESELCPU2 (0x1 << 2)
+#define SYS0_BCR_ESELCPU2_VAL(val) (((val) & 0x1) << 2)
+#define SYS0_BCR_ESELCPU2_GET(val) ((((val) & SYS0_BCR_ESELCPU2) >> 2) & 0x1)
+#define SYS0_BCR_ESELCPU2_SET(reg,val) (reg) = ((reg & ~SYS0_BCR_ESELCPU2) | (((val) & 0x1) << 2))
+/* Configuration of Endianess for CPU1 (1) */
+#define SYS0_BCR_ESELCPU1 (0x1 << 1)
+#define SYS0_BCR_ESELCPU1_VAL(val) (((val) & 0x1) << 1)
+#define SYS0_BCR_ESELCPU1_GET(val) ((((val) & SYS0_BCR_ESELCPU1) >> 1) & 0x1)
+#define SYS0_BCR_ESELCPU1_SET(reg,val) (reg) = ((reg & ~SYS0_BCR_ESELCPU1) | (((val) & 0x1) << 1))
+/* Configuration of Endianess for CPU0 (0) */
+#define SYS0_BCR_ESELCPU0 (0x1)
+#define SYS0_BCR_ESELCPU0_VAL(val) (((val) & 0x1) << 0)
+#define SYS0_BCR_ESELCPU0_GET(val) ((((val) & SYS0_BCR_ESELCPU0) >> 0) & 0x1)
+#define SYS0_BCR_ESELCPU0_SET(reg,val) (reg) = ((reg & ~SYS0_BCR_ESELCPU0) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * PLL1 Control Register
+ ******************************************************************************/
+
+/* PLL1 Bypass Enable (31) */
+#define SYS0_PLL1CR_OSCBYP (0x1 << 31)
+#define SYS0_PLL1CR_OSCBYP_VAL(val) (((val) & 0x1) << 31)
+#define SYS0_PLL1CR_OSCBYP_GET(val) ((((val) & SYS0_PLL1CR_OSCBYP) >> 31) & 0x1)
+#define SYS0_PLL1CR_OSCBYP_SET(reg,val) (reg) = ((reg & ~SYS0_PLL1CR_OSCBYP) | (((val) & 0x1) << 31))
+/* PLL1 Divider Value (1:0) */
+#define SYS0_PLL1CR_PLLDIV (0x3)
+#define SYS0_PLL1CR_PLLDIV_VAL(val) (((val) & 0x3) << 0)
+#define SYS0_PLL1CR_PLLDIV_GET(val) ((((val) & SYS0_PLL1CR_PLLDIV) >> 0) & 0x3)
+#define SYS0_PLL1CR_PLLDIV_SET(reg,val) (reg) = ((reg & ~SYS0_PLL1CR_PLLDIV) | (((val) & 0x3) << 0))
+
+/*******************************************************************************
+ * PLL2 Control Register
+ ******************************************************************************/
+
+/* PLL2 clear deepsleep (31) */
+#define SYS0_PLL2CR_CLRDS (0x1 << 31)
+#define SYS0_PLL2CR_CLRDS_VAL(val) (((val) & 0x1) << 31)
+#define SYS0_PLL2CR_CLRDS_GET(val) ((((val) & SYS0_PLL2CR_CLRDS) >> 31) & 0x1)
+#define SYS0_PLL2CR_CLRDS_SET(reg,val) (reg) = ((reg & ~SYS0_PLL2CR_CLRDS) | (((val) & 0x1) << 31))
+/* PLL2 set deepsleep (30) */
+#define SYS0_PLL2CR_SETDS (0x1 << 30)
+#define SYS0_PLL2CR_SETDS_VAL(val) (((val) & 0x1) << 30)
+#define SYS0_PLL2CR_SETDS_GET(val) ((((val) & SYS0_PLL2CR_SETDS) >> 30) & 0x1)
+#define SYS0_PLL2CR_SETDS_SET(reg,val) (reg) = ((reg & ~SYS0_PLL2CR_SETDS) | (((val) & 0x1) << 30))
+/* PLL2 Fractional division enable (16) */
+#define SYS0_PLL2CR_FRACTEN (0x1 << 16)
+#define SYS0_PLL2CR_FRACTEN_VAL(val) (((val) & 0x1) << 16)
+#define SYS0_PLL2CR_FRACTEN_GET(val) ((((val) & SYS0_PLL2CR_FRACTEN) >> 16) & 0x1)
+#define SYS0_PLL2CR_FRACTEN_SET(reg,val) (reg) = ((reg & ~SYS0_PLL2CR_FRACTEN) | (((val) & 0x1) << 16))
+/* PLL2 Fractional division value (9:0) */
+#define SYS0_FRACTVAL (0x3f)
+#define SYS0_FRACTVAL_VAL(val) (((val) & 0x3f) << 0)
+#define SYS0_FRACTVAL_GET(val) ((((val) & SYS0_FRACTVAL) >> 0) & 0x3f)
+#define SYS0_FRACTVAL_SET(reg,val) (reg) = ((reg & ~SYS0_FRACTVAL) | (((val) & 0x3f) << 0))
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/sys1_reg.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/sys1_reg.h
new file mode 100644
index 0000000000..e0c2e84b1a
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/sys1_reg.h
@@ -0,0 +1,370 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ St. Martin Strasse 53; 81669 Munich, Germany
+
+ Any use of this Software is subject to the conclusion of a respective
+ License Agreement. Without such a License Agreement no rights to the
+ Software are granted.
+
+ ******************************************************************************/
+
+#ifndef __SYS1_REG_H
+#define __SYS1_REG_H
+
+#define sys1_r32(reg) ltq_r32(&sys1->reg)
+#define sys1_w32(val, reg) ltq_w32(val, &sys1->reg)
+#define sys1_w32_mask(clear, set, reg) ltq_w32_mask(clear, set, &sys1->reg)
+
+/** SYS1 register structure */
+struct svip_reg_sys1 {
+ unsigned long clksr; /* 0x0000 */
+ unsigned long clkenr; /* 0x0004 */
+ unsigned long clkclr; /* 0x0008 */
+ unsigned long reserved0[1];
+ unsigned long l2ccr; /* 0x0010 */
+ unsigned long fpicr; /* 0x0014 */
+ unsigned long wdtcr; /* 0x0018 */
+ unsigned long reserved1[1];
+ unsigned long cpucr[6]; /* 0x0020 */
+ unsigned long reserved2[2];
+ unsigned long rsr; /* 0x0040 */
+ unsigned long rreqr; /* 0x0044 */
+ unsigned long rrlsr; /* 0x0048 */
+ unsigned long rbtr; /* 0x004c */
+ unsigned long irncr; /* 0x0050 */
+ unsigned long irnicr; /* 0x0054 */
+ unsigned long irnen; /* 0x0058 */
+ unsigned long reserved3[1];
+ unsigned long cpursr[6]; /* 0x0060 */
+ unsigned long reserved4[2];
+ unsigned long cpusrssr[6]; /* 0x0080 */
+ unsigned long reserved5[2];
+ unsigned long cpuwrssr[6]; /* 0x00a0 */
+};
+
+/*******************************************************************************
+ * SYS1 Clock Status Register
+ ******************************************************************************/
+/* (r) Clock Enable for L2C */
+#define SYS1_CLKSR_L2C (0x1 << 31)
+/* (r) Clock Enable for DDR2 */
+#define SYS1_CLKSR_DDR2 (0x1 << 30)
+/* (r) Clock Enable for SMI2 */
+#define SYS1_CLKSR_SMI2 (0x1 << 29)
+/* (r) Clock Enable for SMI1 */
+#define SYS1_CLKSR_SMI1 (0x1 << 28)
+/* (r) Clock Enable for SMI0 */
+#define SYS1_CLKSR_SMI0 (0x1 << 27)
+/* (r) Clock Enable for FMI0 */
+#define SYS1_CLKSR_FMI0 (0x1 << 26)
+/* (r) Clock Enable for PORT0 */
+#define SYS1_CLKSR_PORT0 (0x1 << 0)
+/* (r) Clock Enable for PCM3 */
+#define SYS1_CLKSR_PCM3 (0x1 << 19)
+/* (r) Clock Enable for PCM2 */
+#define SYS1_CLKSR_PCM2 (0x1 << 18)
+/* (r) Clock Enable for PCM1 */
+#define SYS1_CLKSR_PCM1 (0x1 << 17)
+/* (r) Clock Enable for PCM0 */
+#define SYS1_CLKSR_PCM0 (0x1 << 16)
+/* (r) Clock Enable for ASC1 */
+#define SYS1_CLKSR_ASC1 (0x1 << 15)
+/* (r) Clock Enable for ASC0 */
+#define SYS1_CLKSR_ASC0 (0x1 << 14)
+/* (r) Clock Enable for SSC2 */
+#define SYS1_CLKSR_SSC2 (0x1 << 13)
+/* (r) Clock Enable for SSC1 */
+#define SYS1_CLKSR_SSC1 (0x1 << 12)
+/* (r) Clock Enable for SSC0 */
+#define SYS1_CLKSR_SSC0 (0x1 << 11)
+/* (r) Clock Enable for GPTC */
+#define SYS1_CLKSR_GPTC (0x1 << 10)
+/* (r) Clock Enable for DMA */
+#define SYS1_CLKSR_DMA (0x1 << 9)
+/* (r) Clock Enable for FSCT */
+#define SYS1_CLKSR_FSCT (0x1 << 8)
+/* (r) Clock Enable for ETHSW */
+#define SYS1_CLKSR_ETHSW (0x1 << 7)
+/* (r) Clock Enable for EBU */
+#define SYS1_CLKSR_EBU (0x1 << 6)
+/* (r) Clock Enable for TRNG */
+#define SYS1_CLKSR_TRNG (0x1 << 5)
+/* (r) Clock Enable for DEU */
+#define SYS1_CLKSR_DEU (0x1 << 4)
+/* (r) Clock Enable for PORT3 */
+#define SYS1_CLKSR_PORT3 (0x1 << 3)
+/* (r) Clock Enable for PORT2 */
+#define SYS1_CLKSR_PORT2 (0x1 << 2)
+/* (r) Clock Enable for PORT1 */
+#define SYS1_CLKSR_PORT1 (0x1 << 1)
+
+/*******************************************************************************
+ * SYS1 Clock Enable Register
+ ******************************************************************************/
+/* (w) Clock Enable Request for L2C */
+#define SYS1_CLKENR_L2C (0x1 << 31)
+/* (w) Clock Enable Request for DDR2 */
+#define SYS1_CLKENR_DDR2 (0x1 << 30)
+/* (w) Clock Enable Request for SMI2 */
+#define SYS1_CLKENR_SMI2 (0x1 << 29)
+/* (w) Clock Enable Request for SMI1 */
+#define SYS1_CLKENR_SMI1 (0x1 << 28)
+/* (w) Clock Enable Request for SMI0 */
+#define SYS1_CLKENR_SMI0 (0x1 << 27)
+/* (w) Clock Enable Request for FMI0 */
+#define SYS1_CLKENR_FMI0 (0x1 << 26)
+/* (w) Clock Enable Request for PORT0 */
+#define SYS1_CLKENR_PORT0 (0x1 << 0)
+/* (w) Clock Enable Request for PCM3 */
+#define SYS1_CLKENR_PCM3 (0x1 << 19)
+/* (w) Clock Enable Request for PCM2 */
+#define SYS1_CLKENR_PCM2 (0x1 << 18)
+/* (w) Clock Enable Request for PCM1 */
+#define SYS1_CLKENR_PCM1 (0x1 << 17)
+/* (w) Clock Enable Request for PCM0 */
+#define SYS1_CLKENR_PCM0 (0x1 << 16)
+/* (w) Clock Enable Request for ASC1 */
+#define SYS1_CLKENR_ASC1 (0x1 << 15)
+/* (w) Clock Enable Request for ASC0 */
+#define SYS1_CLKENR_ASC0 (0x1 << 14)
+/* (w) Clock Enable Request for SSC2 */
+#define SYS1_CLKENR_SSC2 (0x1 << 13)
+/* (w) Clock Enable Request for SSC1 */
+#define SYS1_CLKENR_SSC1 (0x1 << 12)
+/* (w) Clock Enable Request for SSC0 */
+#define SYS1_CLKENR_SSC0 (0x1 << 11)
+/* (w) Clock Enable Request for GPTC */
+#define SYS1_CLKENR_GPTC (0x1 << 10)
+/* (w) Clock Enable Request for DMA */
+#define SYS1_CLKENR_DMA (0x1 << 9)
+/* (w) Clock Enable Request for FSCT */
+#define SYS1_CLKENR_FSCT (0x1 << 8)
+/* (w) Clock Enable Request for ETHSW */
+#define SYS1_CLKENR_ETHSW (0x1 << 7)
+/* (w) Clock Enable Request for EBU */
+#define SYS1_CLKENR_EBU (0x1 << 6)
+/* (w) Clock Enable Request for TRNG */
+#define SYS1_CLKENR_TRNG (0x1 << 5)
+/* (w) Clock Enable Request for DEU */
+#define SYS1_CLKENR_DEU (0x1 << 4)
+/* (w) Clock Enable Request for PORT3 */
+#define SYS1_CLKENR_PORT3 (0x1 << 3)
+/* (w) Clock Enable Request for PORT2 */
+#define SYS1_CLKENR_PORT2 (0x1 << 2)
+/* (w) Clock Enable Request for PORT1 */
+#define SYS1_CLKENR_PORT1 (0x1 << 1)
+
+/*******************************************************************************
+ * SYS1 Clock Clear Register
+ ******************************************************************************/
+/* (w) Clock Disable Request for L2C */
+#define SYS1_CLKCLR_L2C (0x1 << 31)
+/* (w) Clock Disable Request for DDR2 */
+#define SYS1_CLKCLR_DDR2 (0x1 << 30)
+/* (w) Clock Disable Request for SMI2 */
+#define SYS1_CLKCLR_SMI2 (0x1 << 29)
+/* (w) Clock Disable Request for SMI1 */
+#define SYS1_CLKCLR_SMI1 (0x1 << 28)
+/* (w) Clock Disable Request for SMI0 */
+#define SYS1_CLKCLR_SMI0 (0x1 << 27)
+/* (w) Clock Disable Request for FMI0 */
+#define SYS1_CLKCLR_FMI0 (0x1 << 26)
+/* (w) Clock Disable Request for PORT0 */
+#define SYS1_CLKCLR_PORT0 (0x1 << 0)
+/* (w) Clock Disable Request for PCM3 */
+#define SYS1_CLKCLR_PCM3 (0x1 << 19)
+/* (w) Clock Disable Request for PCM2 */
+#define SYS1_CLKCLR_PCM2 (0x1 << 18)
+/* (w) Clock Disable Request for PCM1 */
+#define SYS1_CLKCLR_PCM1 (0x1 << 17)
+/* (w) Clock Disable Request for PCM0 */
+#define SYS1_CLKCLR_PCM0 (0x1 << 16)
+/* (w) Clock Disable Request for ASC1 */
+#define SYS1_CLKCLR_ASC1 (0x1 << 15)
+/* (w) Clock Disable Request for ASC0 */
+#define SYS1_CLKCLR_ASC0 (0x1 << 14)
+/* (w) Clock Disable Request for SSC2 */
+#define SYS1_CLKCLR_SSC2 (0x1 << 13)
+/* (w) Clock Disable Request for SSC1 */
+#define SYS1_CLKCLR_SSC1 (0x1 << 12)
+/* (w) Clock Disable Request for SSC0 */
+#define SYS1_CLKCLR_SSC0 (0x1 << 11)
+/* (w) Clock Disable Request for GPTC */
+#define SYS1_CLKCLR_GPTC (0x1 << 10)
+/* (w) Clock Disable Request for DMA */
+#define SYS1_CLKCLR_DMA (0x1 << 9)
+/* (w) Clock Disable Request for FSCT */
+#define SYS1_CLKCLR_FSCT (0x1 << 8)
+/* (w) Clock Disable Request for ETHSW */
+#define SYS1_CLKCLR_ETHSW (0x1 << 7)
+/* (w) Clock Disable Request for EBU */
+#define SYS1_CLKCLR_EBU (0x1 << 6)
+/* (w) Clock Disable Request for TRNG */
+#define SYS1_CLKCLR_TRNG (0x1 << 5)
+/* (w) Clock Disable Request for DEU */
+#define SYS1_CLKCLR_DEU (0x1 << 4)
+/* (w) Clock Disable Request for PORT3 */
+#define SYS1_CLKCLR_PORT3 (0x1 << 3)
+/* (w) Clock Disable Request for PORT2 */
+#define SYS1_CLKCLR_PORT2 (0x1 << 2)
+/* (w) Clock Disable Request for PORT1 */
+#define SYS1_CLKCLR_PORT1 (0x1 << 1)
+
+/*******************************************************************************
+ * SYS1 FPI Control Register
+ ******************************************************************************/
+
+/* FPI Bus Clock divider (0) */
+#define SYS1_FPICR_FPIDIV (0x1)
+#define SYS1_FPICR_FPIDIV_VAL(val) (((val) & 0x1) << 0)
+#define SYS1_FPICR_FPIDIV_GET(val) ((((val) & SYS1_FPICR_FPIDIV) >> 0) & 0x1)
+#define SYS1_FPICR_FPIDIV_SET(reg,val) (reg) = ((reg & ~SYS1_FPICR_FPIDIV) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * SYS1 Clock Control Register for CPUn
+ ******************************************************************************/
+
+/* Enable bit for clock of CPUn (1) */
+#define SYS1_CPUCR_CPUCLKEN (0x1 << 1)
+#define SYS1_CPUCR_CPUCLKEN_VAL(val) (((val) & 0x1) << 1)
+#define SYS1_CPUCR_CPUCLKEN_GET(val) ((((val) & SYS1_CPUCR_CPUCLKEN) >> 1) & 0x1)
+#define SYS1_CPUCR_CPUCLKEN_SET(reg,val) (reg) = ((reg & ~SYS1_CPUCR_CPUCLKEN) | (((val) & 0x1) << 1))
+/* Divider factor for clock of CPUn (0) */
+#define SYS1_CPUCR_CPUDIV (0x1)
+#define SYS1_CPUCR_CPUDIV_VAL(val) (((val) & 0x1) << 0)
+#define SYS1_CPUCR_CPUDIV_GET(val) ((((val) & SYS1_CPUCR_CPUDIV) >> 0) & 0x1)
+#define SYS1_CPUCR_CPUDIV_SET(reg,val) (reg) = ((reg & ~SYS1_CPUCR_CPUDIV) | (((val) & 0x1) << 0))
+
+/*******************************************************************************
+ * SYS1 Reset Request Register
+ ******************************************************************************/
+
+/* HRSTOUT Reset Request (18) */
+#define SYS1_RREQ_HRSTOUT (0x1 << 18)
+#define SYS1_RREQ_HRSTOUT_VAL(val) (((val) & 0x1) << 18)
+#define SYS1_RREQ_HRSTOUT_SET(reg,val) (reg) = (((reg & ~SYS1_RREQ_HRSTOUT) | (((val) & 1) << 18))
+ /* FBS0 Reset Request (17) */
+#define SYS1_RREQ_FBS0 (0x1 << 17)
+#define SYS1_RREQ_FBS0_VAL(val) (((val) & 0x1) << 17)
+#define SYS1_RREQ_FBS0_SET(reg,val) (reg) = (((reg & ~SYS1_RREQ_FBS0) | (((val) & 1) << 17))
+ /* SUBSYS Reset Request (16) */
+#define SYS1_RREQ_SUBSYS (0x1 << 16)
+#define SYS1_RREQ_SUBSYS_VAL(val) (((val) & 0x1) << 16)
+#define SYS1_RREQ_SUBSYS_SET(reg,val) (reg) = (((reg & ~SYS1_RREQ_SUBSYS) | (((val) & 1) << 16))
+ /* Watchdog5 Reset Request (13) */
+#define SYS1_RREQ_WDT5 (0x1 << 13)
+#define SYS1_RREQ_WDT5_VAL(val) (((val) & 0x1) << 13)
+#define SYS1_RREQ_WDT5_SET(reg,val) (reg) = (((reg & ~SYS1_RREQ_WDT5) | (((val) & 1) << 13))
+ /* Watchdog4 Reset Request (12) */
+#define SYS1_RREQ_WDT4 (0x1 << 12)
+#define SYS1_RREQ_WDT4_VAL(val) (((val) & 0x1) << 12)
+#define SYS1_RREQ_WDT4_SET(reg,val) (reg) = (((reg & ~SYS1_RREQ_WDT4) | (((val) & 1) << 12))
+ /* Watchdog3 Reset Request (11) */
+#define SYS1_RREQ_WDT3 (0x1 << 11)
+#define SYS1_RREQ_WDT3_VAL(val) (((val) & 0x1) << 11)
+#define SYS1_RREQ_WDT3_SET(reg,val) (reg) = (((reg & ~SYS1_RREQ_WDT3) | (((val) & 1) << 11))
+ /* Watchdog2 Reset Request (10) */
+#define SYS1_RREQ_WDT2 (0x1 << 10)
+#define SYS1_RREQ_WDT2_VAL(val) (((val) & 0x1) << 10)
+#define SYS1_RREQ_WDT2_SET(reg,val) (reg) = (((reg & ~SYS1_RREQ_WDT2) | (((val) & 1) << 10))
+ /* Watchdog1 Reset Request (9) */
+#define SYS1_RREQ_WDT1 (0x1 << 9)
+#define SYS1_RREQ_WDT1_VAL(val) (((val) & 0x1) << 9)
+#define SYS1_RREQ_WDT1_SET(reg,val) (reg) = (((reg & ~SYS1_RREQ_WDT1) | (((val) & 1) << 9))
+ /* Watchdog0 Reset Request (8) */
+#define SYS1_RREQ_WDT0 (0x1 << 8)
+#define SYS1_RREQ_WDT0_VAL(val) (((val) & 0x1) << 8)
+#define SYS1_RREQ_WDT0_SET(reg,val) (reg) = (((reg & ~SYS1_RREQ_WDT0) | (((val) & 1) << 8))
+ /* CPU5 Reset Request (5) */
+#define SYS1_RREQ_CPU5 (0x1 << 5)
+#define SYS1_RREQ_CPU5_VAL(val) (((val) & 0x1) << 5)
+#define SYS1_RREQ_CPU5_SET(reg,val) (reg) = ((reg & ~SYS1_RREQ_CPU5) | (((val) & 1) << 5))
+ /* CPU4 Reset Request (4) */
+#define SYS1_RREQ_CPU4 (0x1 << 4)
+#define SYS1_RREQ_CPU4_VAL(val) (((val) & 0x1) << 4)
+#define SYS1_RREQ_CPU4_SET(reg,val) (reg) = ((reg & ~SYS1_RREQ_CPU4) | (((val) & 1) << 4))
+ /* CPU3 Reset Request (3) */
+#define SYS1_RREQ_CPU3 (0x1 << 3)
+#define SYS1_RREQ_CPU3_VAL(val) (((val) & 0x1) << 3)
+#define SYS1_RREQ_CPU3_SET(reg,val) (reg) = ((reg & ~SYS1_RREQ_CPU3) | (((val) & 1) << 3))
+ /* CPU2 Reset Request (2) */
+#define SYS1_RREQ_CPU2 (0x1 << 2)
+#define SYS1_RREQ_CPU2_VAL(val) (((val) & 0x1) << 2)
+#define SYS1_RREQ_CPU2_SET(reg,val) (reg) = ((reg & ~SYS1_RREQ_CPU2) | (((val) & 1) << 2))
+ /* CPU1 Reset Request (1) */
+#define SYS1_RREQ_CPU1 (0x1 << 1)
+#define SYS1_RREQ_CPU1_VAL(val) (((val) & 0x1) << 1)
+#define SYS1_RREQ_CPU1_SET(reg,val) (reg) = ((reg & ~SYS1_RREQ_CPU1) | (((val) & 1) << 1))
+/* CPU0 Reset Request (0) */
+#define SYS1_RREQ_CPU0 (0x1)
+#define SYS1_RREQ_CPU0_VAL(val) (((val) & 0x1) << 0)
+#define SYS1_RREQ_CPU0_SET(reg,val) (reg) = ((reg & ~SYS1_RREQ_CPU0) | (((val) & 1) << 0))
+
+/*******************************************************************************
+ * SYS1 Reset Release Register
+ ******************************************************************************/
+
+/* HRSTOUT Reset Release (18) */
+#define SYS1_RRLSR_HRSTOUT (0x1 << 18)
+#define SYS1_RRLSR_HRSTOUT_VAL(val) (((val) & 0x1) << 18)
+#define SYS1_RRLSR_HRSTOUT_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_HRSTOUT) | (((val) & 1) << 18))
+/* FBS0 Reset Release (17) */
+#define SYS1_RRLSR_FBS0 (0x1 << 17)
+#define SYS1_RRLSR_FBS0_VAL(val) (((val) & 0x1) << 17)
+#define SYS1_RRLSR_FBS0_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_FBS0) | (((val) & 1) << 17))
+/* SUBSYS Reset Release (16) */
+#define SYS1_RRLSR_SUBSYS (0x1 << 16)
+#define SYS1_RRLSR_SUBSYS_VAL(val) (((val) & 0x1) << 16)
+#define SYS1_RRLSR_SUBSYS_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_SUBSYS) | (((val) & 1) << 16))
+/* Watchdog5 Reset Release (13) */
+#define SYS1_RRLSR_WDT5 (0x1 << 13)
+#define SYS1_RRLSR_WDT5_VAL(val) (((val) & 0x1) << 13)
+#define SYS1_RRLSR_WDT5_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_WDT5) | (((val) & 1) << 13))
+/* Watchdog4 Reset Release (12) */
+#define SYS1_RRLSR_WDT4 (0x1 << 12)
+#define SYS1_RRLSR_WDT4_VAL(val) (((val) & 0x1) << 12)
+#define SYS1_RRLSR_WDT4_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_WDT4) | (((val) & 1) << 12))
+/* Watchdog3 Reset Release (11) */
+#define SYS1_RRLSR_WDT3 (0x1 << 11)
+#define SYS1_RRLSR_WDT3_VAL(val) (((val) & 0x1) << 11)
+#define SYS1_RRLSR_WDT3_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_WDT3) | (((val) & 1) << 11))
+/* Watchdog2 Reset Release (10) */
+#define SYS1_RRLSR_WDT2 (0x1 << 10)
+#define SYS1_RRLSR_WDT2_VAL(val) (((val) & 0x1) << 10)
+#define SYS1_RRLSR_WDT2_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_WDT2) | (((val) & 1) << 10))
+/* Watchdog1 Reset Release (9) */
+#define SYS1_RRLSR_WDT1 (0x1 << 9)
+#define SYS1_RRLSR_WDT1_VAL(val) (((val) & 0x1) << 9)
+#define SYS1_RRLSR_WDT1_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_WDT1) | (((val) & 1) << 9))
+/* Watchdog0 Reset Release (8) */
+#define SYS1_RRLSR_WDT0 (0x1 << 8)
+#define SYS1_RRLSR_WDT0_VAL(val) (((val) & 0x1) << 8)
+#define SYS1_RRLSR_WDT0_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_WDT0) | (((val) & 1) << 8))
+/* CPU5 Reset Release (5) */
+#define SYS1_RRLSR_CPU5 (0x1 << 5)
+#define SYS1_RRLSR_CPU5_VAL(val) (((val) & 0x1) << 5)
+#define SYS1_RRLSR_CPU5_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_CPU5) | (((val) & 1) << 5))
+/* CPU4 Reset Release (4) */
+#define SYS1_RRLSR_CPU4 (0x1 << 4)
+#define SYS1_RRLSR_CPU4_VAL(val) (((val) & 0x1) << 4)
+#define SYS1_RRLSR_CPU4_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_CPU4) | (((val) & 1) << 4))
+/* CPU3 Reset Release (3) */
+#define SYS1_RRLSR_CPU3 (0x1 << 3)
+#define SYS1_RRLSR_CPU3_VAL(val) (((val) & 0x1) << 3)
+#define SYS1_RRLSR_CPU3_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_CPU3) | (((val) & 1) << 3))
+/* CPU2 Reset Release (2) */
+#define SYS1_RRLSR_CPU2 (0x1 << 2)
+#define SYS1_RRLSR_CPU2_VAL(val) (((val) & 0x1) << 2)
+#define SYS1_RRLSR_CPU2_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_CPU2) | (((val) & 1) << 2))
+/* CPU1 Reset Release (1) */
+#define SYS1_RRLSR_CPU1 (0x1 << 1)
+#define SYS1_RRLSR_CPU1_VAL(val) (((val) & 0x1) << 1)
+#define SYS1_RRLSR_CPU1_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_CPU1) | (((val) & 1) << 1))
+/* CPU0 Reset Release (0) */
+#define SYS1_RRLSR_CPU0 (0x1)
+#define SYS1_RRLSR_CPU0_VAL(val) (((val) & 0x1) << 0)
+#define SYS1_RRLSR_CPU0_SET(reg,val) (reg) = ((reg & ~SYS1_RRLSR_CPU0) | (((val) & 1) << 0))
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/sys2_reg.h b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/sys2_reg.h
new file mode 100644
index 0000000000..ff9f04b4d7
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/include/asm/mach-lantiq/svip/sys2_reg.h
@@ -0,0 +1,494 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ St. Martin Strasse 53; 81669 Munich, Germany
+
+ Any use of this Software is subject to the conclusion of a respective
+ License Agreement. Without such a License Agreement no rights to the
+ Software are granted.
+
+ ******************************************************************************/
+
+#ifndef __SYS2_REG_H
+#define __SYS2_REG_H
+
+#define sys2_r32(reg) ltq_r32(&sys2->reg)
+#define sys2_w32(val, reg) ltq_w32(val, &sys2->reg)
+#define sys2_w32_mask(clear, set, reg) ltq_w32_mask(clear, set, &sys2->reg)
+
+/** SYS2 register structure */
+struct svip_reg_sys2 {
+ volatile unsigned long clksr; /* 0x0000 */
+ volatile unsigned long clkenr; /* 0x0004 */
+ volatile unsigned long clkclr; /* 0x0008 */
+ volatile unsigned long reserved0[1];
+ volatile unsigned long rsr; /* 0x0010 */
+ volatile unsigned long rreqr; /* 0x0014 */
+ volatile unsigned long rrlsr; /* 0x0018 */
+};
+
+/*******************************************************************************
+ * SYS2 Clock Status Register
+ ******************************************************************************/
+
+/* Clock Enable for PORT4 */
+#define SYS2_CLKSR_PORT4 (0x1 << 27)
+#define SYS2_CLKSR_PORT4_VAL(val) (((val) & 0x1) << 27)
+#define SYS2_CLKSR_PORT4_GET(val) (((val) & SYS2_CLKSR_PORT4) >> 27)
+/* Clock Enable for HWSYNC */
+#define SYS2_CLKSR_HWSYNC (0x1 << 26)
+#define SYS2_CLKSR_HWSYNC_VAL(val) (((val) &
+#define SYS2_CLKSR_HWSYNC_GET(val) (((val) & SYS2_CLKSR_HWSYNC) >> 26)
+ /* Clock Enable for MBS */
+#define SYS2_CLKSR_MBS (0x1 << 25)
+#define SYS2_CLKSR_MBS_VAL(val) (((val) & 0x1) << 25)
+#define SYS2_CLKSR_MBS_GET(val) (((val) & SYS2_CLKSR_MBS) >> 25)
+ /* Clock Enable for SWINT */
+#define SYS2_CLKSR_SWINT (0x1 << 24)
+#define SYS2_CLKSR_SWINT_VAL(val) (((val) & 0x1) << 24)
+#define SYS2_CLKSR_SWINT_GET(val) (((val) & SYS2_CLKSR_SWINT) >> 24)
+ /* Clock Enable for HWACC3 */
+#define SYS2_CLKSR_HWACC3 (0x1 << 19)
+#define SYS2_CLKSR_HWACC3_VAL(val) (((val) &
+#define SYS2_CLKSR_HWACC3_GET(val) (((val) & SYS2_CLKSR_HWACC3) >> 19)
+ /* Clock Enable for HWACC2 */
+#define SYS2_CLKSR_HWACC2 (0x1 << 18)
+#define SYS2_CLKSR_HWACC2_VAL(val) (((val) &
+#define SYS2_CLKSR_HWACC2_GET(val) (((val) & SYS2_CLKSR_HWACC2) >> 18)
+ /* Clock Enable for HWACC1 */
+#define SYS2_CLKSR_HWACC1 (0x1 << 17)
+#define SYS2_CLKSR_HWACC1_VAL(val) (((val) &
+#define SYS2_CLKSR_HWACC1_GET(val) (((val) & SYS2_CLKSR_HWACC1) >> 17)
+ /* Clock Enable for HWACC0 */
+#define SYS2_CLKSR_HWACC0 (0x1 << 16)
+#define SYS2_CLKSR_HWACC0_VAL(val) (((val) &
+#define SYS2_CLKSR_HWACC0_GET(val) (((val) & SYS2_CLKSR_HWACC0) >> 16)
+ /* Clock Enable for SIF7 */
+#define SYS2_CLKSR_SIF7 (0x1 << 15)
+#define SYS2_CLKSR_SIF7_VAL(val) (((val) & 0x1) << 15)
+#define SYS2_CLKSR_SIF7_GET(val) (((val) & SYS2_CLKSR_SIF7) >> 15)
+ /* Clock Enable for SIF6 */
+#define SYS2_CLKSR_SIF6 (0x1 << 14)
+#define SYS2_CLKSR_SIF6_VAL(val) (((val) & 0x1) << 14)
+#define SYS2_CLKSR_SIF6_GET(val) (((val) & SYS2_CLKSR_SIF6) >> 14)
+ /* Clock Enable for SIF5 */
+#define SYS2_CLKSR_SIF5 (0x1 << 13)
+#define SYS2_CLKSR_SIF5_VAL(val) (((val) & 0x1) << 13)
+#define SYS2_CLKSR_SIF5_GET(val) (((val) & SYS2_CLKSR_SIF5) >> 13)
+ /* Clock Enable for SIF4 */
+#define SYS2_CLKSR_SIF4 (0x1 << 12)
+#define SYS2_CLKSR_SIF4_VAL(val) (((val) & 0x1) << 12)
+#define SYS2_CLKSR_SIF4_GET(val) (((val) & SYS2_CLKSR_SIF4) >> 12)
+ /* Clock Enable for SIF3 */
+#define SYS2_CLKSR_SIF3 (0x1 << 11)
+#define SYS2_CLKSR_SIF3_VAL(val) (((val) & 0x1) << 11)
+#define SYS2_CLKSR_SIF3_GET(val) (((val) & SYS2_CLKSR_SIF3) >> 11)
+/* Clock Enable for SIF2 */
+#define SYS2_CLKSR_SIF2 (0x1 << 10)
+#define SYS2_CLKSR_SIF2_VAL(val) (((val) & 0x1) << 10)
+#define SYS2_CLKSR_SIF2_GET(val) (((val) & SYS2_CLKSR_SIF2) >> 10)
+/* Clock Enable for SIF1 */
+#define SYS2_CLKSR_SIF1 (0x1 << 9)
+#define SYS2_CLKSR_SIF1_VAL(val) (((val) & 0x1) << 9)
+#define SYS2_CLKSR_SIF1_GET(val) (((val) & SYS2_CLKSR_SIF1) >> 9)
+/* Clock Enable for SIF0 */
+#define SYS2_CLKSR_SIF0 (0x1 << 8)
+#define SYS2_CLKSR_SIF0_VAL(val) (((val) & 0x1) << 8)
+#define SYS2_CLKSR_SIF0_GET(val) (((val) & SYS2_CLKSR_SIF0) >> 8)
+/* Clock Enable for DFEV7 */
+#define SYS2_CLKSR_DFEV7 (0x1 << 7)
+#define SYS2_CLKSR_DFEV7_VAL(val) (((val) & 0x1) << 7)
+#define SYS2_CLKSR_DFEV7_GET(val) (((val) & SYS2_CLKSR_DFEV7) >> 7)
+/* Clock Enable for DFEV6 */
+#define SYS2_CLKSR_DFEV6 (0x1 << 6)
+#define SYS2_CLKSR_DFEV6_VAL(val) (((val) & 0x1) << 6)
+#define SYS2_CLKSR_DFEV6_GET(val) (((val) & SYS2_CLKSR_DFEV6) >> 6)
+/* Clock Enable for DFEV5 */
+#define SYS2_CLKSR_DFEV5 (0x1 << 5)
+#define SYS2_CLKSR_DFEV5_VAL(val) (((val) & 0x1) << 5)
+#define SYS2_CLKSR_DFEV5_GET(val) (((val) & SYS2_CLKSR_DFEV5) >> 5)
+/* Clock Enable for DFEV4 */
+#define SYS2_CLKSR_DFEV4 (0x1 << 4)
+#define SYS2_CLKSR_DFEV4_VAL(val) (((val) & 0x1) << 4)
+#define SYS2_CLKSR_DFEV4_GET(val) (((val) & SYS2_CLKSR_DFEV4) >> 4)
+/* Clock Enable for DFEV3 */
+#define SYS2_CLKSR_DFEV3 (0x1 << 3)
+#define SYS2_CLKSR_DFEV3_VAL(val) (((val) & 0x1) << 3)
+#define SYS2_CLKSR_DFEV3_GET(val) (((val) & SYS2_CLKSR_DFEV3) >> 3)
+/* Clock Enable for DFEV2 */
+#define SYS2_CLKSR_DFEV2 (0x1 << 2)
+#define SYS2_CLKSR_DFEV2_VAL(val) (((val) & 0x1) << 2)
+#define SYS2_CLKSR_DFEV2_GET(val) (((val) & SYS2_CLKSR_DFEV2) >> 2)
+/* Clock Enable for DFEV1 */
+#define SYS2_CLKSR_DFEV1 (0x1 << 1)
+#define SYS2_CLKSR_DFEV1_VAL(val) (((val) & 0x1) << 1)
+#define SYS2_CLKSR_DFEV1_GET(val) (((val) & SYS2_CLKSR_DFEV1) >> 1)
+/* Clock Enable for DFEV0 */
+#define SYS2_CLKSR_DFEV0 (0x1)
+#define SYS2_CLKSR_DFEV0_VAL(val) (((val) & 0x1))
+#define SYS2_CLKSR_DFEV0_GET(val) ((val) & SYS2_CLKSR_DFEV0)
+
+/*******************************************************************************
+ * SYS2 Clock Enable Register
+ ******************************************************************************/
+
+/* Clock Enable Request for PORT4 */
+#define SYS2_CLKENR_PORT4 (0x1 << 27)
+#define SYS2_CLKENR_PORT4_VAL(val) (((val) & 0x1) << 27)
+#define SYS2_CLKENR_PORT4_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_PORT4) | ((val & 0x1) << 27))
+/* Clock Enable Request for HWSYNC */
+#define SYS2_CLKENR_HWSYNC (0x1 << 26)
+#define SYS2_CLKENR_HWSYNC_VAL(val) (((val) & 0x1) << 26)
+#define SYS2_CLKENR_HWSYNC_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_HWSYNC) | ((val & 0x1) << 26))
+/* Clock Enable Request for MBS */
+#define SYS2_CLKENR_MBS (0x1 << 25)
+#define SYS2_CLKENR_MBS_VAL(val) (((val) & 0x1) << 25)
+#define SYS2_CLKENR_MBS_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_MBS) | ((val & 0x1) << 25))
+/* Clock Enable Request for SWINT */
+#define SYS2_CLKENR_SWINT (0x1 << 24)
+#define SYS2_CLKENR_SWINT_VAL(val) (((val) & 0x1) << 24)
+#define SYS2_CLKENR_SWINT_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_SWINT) | ((val & 0x1) << 24))
+/* Clock Enable Request for HWACC3 */
+#define SYS2_CLKENR_HWACC3 (0x1 << 19)
+#define SYS2_CLKENR_HWACC3_VAL(val) (((val) & 0x1) << 19)
+#define SYS2_CLKENR_HWACC3_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_HWACC3) | ((val & 0x1) << 19))
+/* Clock Enable Request for HWACC2 */
+#define SYS2_CLKENR_HWACC2 (0x1 << 18)
+#define SYS2_CLKENR_HWACC2_VAL(val) (((val) & 0x1) << 18)
+#define SYS2_CLKENR_HWACC2_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_HWACC2) | ((val & 0x1) << 18))
+/* Clock Enable Request for HWACC1 */
+#define SYS2_CLKENR_HWACC1 (0x1 << 17)
+#define SYS2_CLKENR_HWACC1_VAL(val) (((val) & 0x1) << 17)
+#define SYS2_CLKENR_HWACC1_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_HWACC1) | ((val & 0x1) << 17))
+/* Clock Enable Request for HWACC0 */
+#define SYS2_CLKENR_HWACC0 (0x1 << 16)
+#define SYS2_CLKENR_HWACC0_VAL(val) (((val) & 0x1) << 16)
+#define SYS2_CLKENR_HWACC0_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_HWACC0) | ((val & 0x1) << 16))
+/* Clock Enable Request for SIF7 */
+#define SYS2_CLKENR_SIF7 (0x1 << 15)
+#define SYS2_CLKENR_SIF7_VAL(val) (((val) & 0x1) << 15)
+#define SYS2_CLKENR_SIF7_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_SIF7) | ((val & 0x1) << 15))
+/* Clock Enable Request for SIF6 */
+#define SYS2_CLKENR_SIF6 (0x1 << 14)
+#define SYS2_CLKENR_SIF6_VAL(val) (((val) & 0x1) << 14)
+#define SYS2_CLKENR_SIF6_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_SIF6) | ((val & 0x1) << 14))
+/* Clock Enable Request for SIF5 */
+#define SYS2_CLKENR_SIF5 (0x1 << 13)
+#define SYS2_CLKENR_SIF5_VAL(val) (((val) & 0x1) << 13)
+#define SYS2_CLKENR_SIF5_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_SIF5) | ((val & 0x1) << 13))
+/* Clock Enable Request for SIF4 */
+#define SYS2_CLKENR_SIF4 (0x1 << 12)
+#define SYS2_CLKENR_SIF4_VAL(val) (((val) & 0x1) << 12)
+#define SYS2_CLKENR_SIF4_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_SIF4) | ((val & 0x1) << 12))
+/* Clock Enable Request for SIF3 */
+#define SYS2_CLKENR_SIF3 (0x1 << 11)
+#define SYS2_CLKENR_SIF3_VAL(val) (((val) & 0x1) << 11)
+#define SYS2_CLKENR_SIF3_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_SIF3) | ((val & 0x1) << 11))
+/* Clock Enable Request for SIF2 */
+#define SYS2_CLKENR_SIF2 (0x1 << 10)
+#define SYS2_CLKENR_SIF2_VAL(val) (((val) & 0x1) << 10)
+#define SYS2_CLKENR_SIF2_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_SIF2) | ((val & 0x1) << 10))
+/* Clock Enable Request for SIF1 */
+#define SYS2_CLKENR_SIF1 (0x1 << 9)
+#define SYS2_CLKENR_SIF1_VAL(val) (((val) & 0x1) << 9)
+#define SYS2_CLKENR_SIF1_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_SIF1) | ((val & 0x1) << 9))
+/* Clock Enable Request for SIF0 */
+#define SYS2_CLKENR_SIF0 (0x1 << 8)
+#define SYS2_CLKENR_SIF0_VAL(val) (((val) & 0x1) << 8)
+#define SYS2_CLKENR_SIF0_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_SIF0) | ((val & 0x1) << 8))
+/* Clock Enable Request for DFEV7 */
+#define SYS2_CLKENR_DFEV7 (0x1 << 7)
+#define SYS2_CLKENR_DFEV7_VAL(val) (((val) & 0x1) << 7)
+#define SYS2_CLKENR_DFEV7_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_DFEV7) | ((val & 0x1) << 7))
+/* Clock Enable Request for DFEV6 */
+#define SYS2_CLKENR_DFEV6 (0x1 << 6)
+#define SYS2_CLKENR_DFEV6_VAL(val) (((val) & 0x1) << 6)
+#define SYS2_CLKENR_DFEV6_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_DFEV6) | ((val & 0x1) << 6))
+/* Clock Enable Request for DFEV5 */
+#define SYS2_CLKENR_DFEV5 (0x1 << 5)
+#define SYS2_CLKENR_DFEV5_VAL(val) (((val) & 0x1) << 5)
+#define SYS2_CLKENR_DFEV5_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_DFEV5) | ((val & 0x1) << 5))
+/* Clock Enable Request for DFEV4 */
+#define SYS2_CLKENR_DFEV4 (0x1 << 4)
+#define SYS2_CLKENR_DFEV4_VAL(val) (((val) & 0x1) << 4)
+#define SYS2_CLKENR_DFEV4_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_DFEV4) | ((val & 0x1) << 4))
+/* Clock Enable Request for DFEV3 */
+#define SYS2_CLKENR_DFEV3 (0x1 << 3)
+#define SYS2_CLKENR_DFEV3_VAL(val) (((val) & 0x1) << 3)
+#define SYS2_CLKENR_DFEV3_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_DFEV3) | ((val & 0x1) << 3))
+/* Clock Enable Request for DFEV2 */
+#define SYS2_CLKENR_DFEV2 (0x1 << 2)
+#define SYS2_CLKENR_DFEV2_VAL(val) (((val) & 0x1) << 2)
+#define SYS2_CLKENR_DFEV2_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_DFEV2) | ((val & 0x1) << 2))
+/* Clock Enable Request for DFEV1 */
+#define SYS2_CLKENR_DFEV1 (0x1 << 1)
+#define SYS2_CLKENR_DFEV1_VAL(val) (((val) & 0x1) << 1)
+#define SYS2_CLKENR_DFEV1_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_DFEV1) | ((val & 0x1) << 1))
+/* Clock Enable Request for DFEV0 */
+#define SYS2_CLKENR_DFEV0 (0x1)
+#define SYS2_CLKENR_DFEV0_VAL(val) (((val) & 0x1))
+#define SYS2_CLKENR_DFEV0_SET (reg,val) (reg) = ((reg & ~SYS2_CLKENR_DFEV0) | ((val & 0x1)))
+
+/*******************************************************************************
+ * SYS2 Clock Clear Register
+ ******************************************************************************/
+
+/* Clock Disable Request for PORT4 */
+#define SYS2_CLKCLR_PORT4 (0x1 << 27)
+#define SYS2_CLKCLR_PORT4_VAL(val) (((val) & 0x1) << 27)
+#define SYS2_CLKCLR_PORT4_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_PORT4) | ((val & 0x1) << 27))
+/* Clock Disable Request for HWSYNC */
+#define SYS2_CLKCLR_HWSYNC (0x1 << 26)
+#define SYS2_CLKCLR_HWSYNC_VAL(val) (((val) & 0x1) << 26)
+#define SYS2_CLKCLR_HWSYNC_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_HWSYNC) | ((val & 0x1) << 26))
+/* Clock Disable Request for MBS */
+#define SYS2_CLKCLR_MBS (0x1 << 25)
+#define SYS2_CLKCLR_MBS_VAL(val) (((val) & 0x1) << 25)
+#define SYS2_CLKCLR_MBS_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_MBS) | ((val & 0x1) << 25))
+/* Clock Disable Request for SWINT */
+#define SYS2_CLKCLR_SWINT (0x1 << 24)
+#define SYS2_CLKCLR_SWINT_VAL(val) (((val) & 0x1) << 24)
+#define SYS2_CLKCLR_SWINT_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_SWINT) | ((val & 0x1) << 24))
+/* Clock Disable Request for HWACC3 */
+#define SYS2_CLKCLR_HWACC3 (0x1 << 19)
+#define SYS2_CLKCLR_HWACC3_VAL(val) (((val) & 0x1) << 19)
+#define SYS2_CLKCLR_HWACC3_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_HWACC3) | ((val & 0x1) << 19))
+/* Clock Disable Request for HWACC2 */
+#define SYS2_CLKCLR_HWACC2 (0x1 << 18)
+#define SYS2_CLKCLR_HWACC2_VAL(val) (((val) & 0x1) << 18)
+#define SYS2_CLKCLR_HWACC2_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_HWACC2) | ((val & 0x1) << 18))
+/* Clock Disable Request for HWACC1 */
+#define SYS2_CLKCLR_HWACC1 (0x1 << 17)
+#define SYS2_CLKCLR_HWACC1_VAL(val) (((val) & 0x1) << 17)
+#define SYS2_CLKCLR_HWACC1_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_HWACC1) | ((val & 0x1) << 17))
+/* Clock Disable Request for HWACC0 */
+#define SYS2_CLKCLR_HWACC0 (0x1 << 16)
+#define SYS2_CLKCLR_HWACC0_VAL(val) (((val) & 0x1) << 16)
+#define SYS2_CLKCLR_HWACC0_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_HWACC0) | ((val & 0x1) << 16))
+/* Clock Disable Request for SIF7 */
+#define SYS2_CLKCLR_SIF7 (0x1 << 15)
+#define SYS2_CLKCLR_SIF7_VAL(val) (((val) & 0x1) << 15)
+#define SYS2_CLKCLR_SIF7_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_SIF7) | ((val & 0x1) << 15))
+/* Clock Disable Request for SIF6 */
+#define SYS2_CLKCLR_SIF6 (0x1 << 14)
+#define SYS2_CLKCLR_SIF6_VAL(val) (((val) & 0x1) << 14)
+#define SYS2_CLKCLR_SIF6_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_SIF6) | ((val & 0x1) << 14))
+/* Clock Disable Request for SIF5 */
+#define SYS2_CLKCLR_SIF5 (0x1 << 13)
+#define SYS2_CLKCLR_SIF5_VAL(val) (((val) & 0x1) << 13)
+#define SYS2_CLKCLR_SIF5_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_SIF5) | ((val & 0x1) << 13))
+/* Clock Disable Request for SIF4 */
+#define SYS2_CLKCLR_SIF4 (0x1 << 12)
+#define SYS2_CLKCLR_SIF4_VAL(val) (((val) & 0x1) << 12)
+#define SYS2_CLKCLR_SIF4_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_SIF4) | ((val & 0x1) << 12))
+/* Clock Disable Request for SIF3 */
+#define SYS2_CLKCLR_SIF3 (0x1 << 11)
+#define SYS2_CLKCLR_SIF3_VAL(val) (((val) & 0x1) << 11)
+#define SYS2_CLKCLR_SIF3_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_SIF3) | ((val & 0x1) << 11))
+/* Clock Disable Request for SIF2 */
+#define SYS2_CLKCLR_SIF2 (0x1 << 10)
+#define SYS2_CLKCLR_SIF2_VAL(val) (((val) & 0x1) << 10)
+#define SYS2_CLKCLR_SIF2_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_SIF2) | ((val & 0x1) << 10))
+/* Clock Disable Request for SIF1 */
+#define SYS2_CLKCLR_SIF1 (0x1 << 9)
+#define SYS2_CLKCLR_SIF1_VAL(val) (((val) & 0x1) << 9)
+#define SYS2_CLKCLR_SIF1_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_SIF1) | ((val & 0x1) << 9))
+/* Clock Disable Request for SIF0 */
+#define SYS2_CLKCLR_SIF0 (0x1 << 8)
+#define SYS2_CLKCLR_SIF0_VAL(val) (((val) & 0x1) << 8)
+#define SYS2_CLKCLR_SIF0_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_SIF0) | ((val & 0x1) << 8))
+/* Clock Disable Request for DFEV7 */
+#define SYS2_CLKCLR_DFEV7 (0x1 << 7)
+#define SYS2_CLKCLR_DFEV7_VAL(val) (((val) & 0x1) << 7)
+#define SYS2_CLKCLR_DFEV7_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_DFEV7) | ((val & 0x1) << 7))
+/* Clock Disable Request for DFEV6 */
+#define SYS2_CLKCLR_DFEV6 (0x1 << 6)
+#define SYS2_CLKCLR_DFEV6_VAL(val) (((val) & 0x1) << 6)
+#define SYS2_CLKCLR_DFEV6_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_DFEV6) | ((val & 0x1) << 6))
+/* Clock Disable Request for DFEV5 */
+#define SYS2_CLKCLR_DFEV5 (0x1 << 5)
+#define SYS2_CLKCLR_DFEV5_VAL(val) (((val) & 0x1) << 5)
+#define SYS2_CLKCLR_DFEV5_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_DFEV5) | ((val & 0x1) << 5))
+/* Clock Disable Request for DFEV4 */
+#define SYS2_CLKCLR_DFEV4 (0x1 << 4)
+#define SYS2_CLKCLR_DFEV4_VAL(val) (((val) & 0x1) << 4)
+#define SYS2_CLKCLR_DFEV4_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_DFEV4) | ((val & 0x1) << 4))
+/* Clock Disable Request for DFEV3 */
+#define SYS2_CLKCLR_DFEV3 (0x1 << 3)
+#define SYS2_CLKCLR_DFEV3_VAL(val) (((val) & 0x1) << 3)
+#define SYS2_CLKCLR_DFEV3_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_DFEV3) | ((val & 0x1) << 3))
+/* Clock Disable Request for DFEV2 */
+#define SYS2_CLKCLR_DFEV2 (0x1 << 2)
+#define SYS2_CLKCLR_DFEV2_VAL(val) (((val) & 0x1) << 2)
+#define SYS2_CLKCLR_DFEV2_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_DFEV2) | ((val & 0x1) << 2))
+/* Clock Disable Request for DFEV1 */
+#define SYS2_CLKCLR_DFEV1 (0x1 << 1)
+#define SYS2_CLKCLR_DFEV1_VAL(val) (((val) & 0x1) << 1)
+#define SYS2_CLKCLR_DFEV1_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_DFEV1) | ((val & 0x1) << 1))
+/* Clock Disable Request for DFEV0 */
+#define SYS2_CLKCLR_DFEV0 (0x1)
+#define SYS2_CLKCLR_DFEV0_VAL(val) (((val) & 0x1))
+#define SYS2_CLKCLR_DFEV0_SET (reg,val) (reg) = ((reg & ~SYS2_CLKCLR_DFEV0) | ((val & 0x1)))
+
+/*******************************************************************************
+ * SYS2 Reset Status Register
+ ******************************************************************************/
+
+/* HWACC3 Reset */
+#define SYS2_RSR_HWACC3 (0x1 << 11)
+#define SYS2_RSR_HWACC3_VAL(val) (((val) & 0x1) << 11)
+#define SYS2_RSR_HWACC3_GET(val) (((val) & SYS2_RSR_HWACC3) >> 11)
+/* HWACC2 Reset */
+#define SYS2_RSR_HWACC2 (0x1 << 10)
+#define SYS2_RSR_HWACC2_VAL(val) (((val) & 0x1) << 10)
+#define SYS2_RSR_HWACC2_GET(val) (((val) & SYS2_RSR_HWACC2) >> 10)
+/* HWACC1 Reset */
+#define SYS2_RSR_HWACC1 (0x1 << 9)
+#define SYS2_RSR_HWACC1_VAL(val) (((val) & 0x1) << 9)
+#define SYS2_RSR_HWACC1_GET(val) (((val) & SYS2_RSR_HWACC1) >> 9)
+/* HWACC0 Reset */
+#define SYS2_RSR_HWACC0 (0x1 << 8)
+#define SYS2_RSR_HWACC0_VAL(val) (((val) & 0x1) << 8)
+#define SYS2_RSR_HWACC0_GET(val) (((val) & SYS2_RSR_HWACC0) >> 8)
+/* DFEV7 Reset */
+#define SYS2_RSR_DFEV7 (0x1 << 7)
+#define SYS2_RSR_DFEV7_VAL(val) (((val) & 0x1) << 7)
+#define SYS2_RSR_DFEV7_GET(val) (((val) & SYS2_RSR_DFEV7) >> 7)
+/* DFEV6 Reset */
+#define SYS2_RSR_DFEV6 (0x1 << 6)
+#define SYS2_RSR_DFEV6_VAL(val) (((val) & 0x1) << 6)
+#define SYS2_RSR_DFEV6_GET(val) (((val) & SYS2_RSR_DFEV6) >> 6)
+/* DFEV5 Reset */
+#define SYS2_RSR_DFEV5 (0x1 << 5)
+#define SYS2_RSR_DFEV5_VAL(val) (((val) & 0x1) << 5)
+#define SYS2_RSR_DFEV5_GET(val) (((val) & SYS2_RSR_DFEV5) >> 5)
+/* DFEV4 Reset */
+#define SYS2_RSR_DFEV4 (0x1 << 4)
+#define SYS2_RSR_DFEV4_VAL(val) (((val) & 0x1) << 4)
+#define SYS2_RSR_DFEV4_GET(val) (((val) & SYS2_RSR_DFEV4) >> 4)
+/* DFEV3 Reset */
+#define SYS2_RSR_DFEV3 (0x1 << 3)
+#define SYS2_RSR_DFEV3_VAL(val) (((val) & 0x1) << 3)
+#define SYS2_RSR_DFEV3_GET(val) (((val) & SYS2_RSR_DFEV3) >> 3)
+/* DFEV2 Reset */
+#define SYS2_RSR_DFEV2 (0x1 << 2)
+#define SYS2_RSR_DFEV2_VAL(val) (((val) & 0x1) << 2)
+#define SYS2_RSR_DFEV2_GET(val) (((val) & SYS2_RSR_DFEV2) >> 2)
+/* DFEV1 Reset */
+#define SYS2_RSR_DFEV1 (0x1 << 1)
+#define SYS2_RSR_DFEV1_VAL(val) (((val) & 0x1) << 1)
+#define SYS2_RSR_DFEV1_GET(val) (((val) & SYS2_RSR_DFEV1) >> 1)
+/* DFEV0 Reset */
+#define SYS2_RSR_DFEV0 (0x1)
+#define SYS2_RSR_DFEV0_VAL(val) (((val) & 0x1))
+#define SYS2_RSR_DFEV0_GET(val) ((val) & SYS2_RSR_DFEV0)
+
+/******************************************************************************
+ * SYS2 Reset Request Register
+ ******************************************************************************/
+
+/* HWACC3 Reset Request */
+#define SYS2_RREQR_HWACC3 (0x1 << 11)
+#define SYS2_RREQR_HWACC3_VAL(val) (((val) & 0x1) << 11)
+#define SYS2_RREQR_HWACC3_SET (reg,val) (reg) = ((reg & ~SYS2_RREQR_HWACC3) | ((val & 0x1) << 11))
+/* HWACC2 Reset Request */
+#define SYS2_RREQR_HWACC2 (0x1 << 10)
+#define SYS2_RREQR_HWACC2_VAL(val) (((val) & 0x1) << 10)
+#define SYS2_RREQR_HWACC2_SET (reg,val) (reg) = ((reg & ~SYS2_RREQR_HWACC2) | ((val & 0x1) << 10))
+/* HWACC1 Reset Request */
+#define SYS2_RREQR_HWACC1 (0x1 << 9)
+#define SYS2_RREQR_HWACC1_VAL(val) (((val) & 0x1) << 9)
+#define SYS2_RREQR_HWACC1_SET (reg,val) (reg) = ((reg & ~SYS2_RREQR_HWACC1) | ((val & 0x1) << 9))
+/* HWACC0 Reset Request */
+#define SYS2_RREQR_HWACC0 (0x1 << 8)
+#define SYS2_RREQR_HWACC0_VAL(val) (((val) & 0x1) << 8)
+#define SYS2_RREQR_HWACC0_SET (reg,val) (reg) = ((reg & ~SYS2_RREQR_HWACC0) | ((val & 0x1) << 8))
+/* DFEV7 Reset Request */
+#define SYS2_RREQR_DFEV7 (0x1 << 7)
+#define SYS2_RREQR_DFEV7_VAL(val) (((val) & 0x1) << 7)
+#define SYS2_RREQR_DFEV7_SET (reg,val) (reg) = ((reg & ~SYS2_RREQR_DFEV7) | ((val & 0x1) << 7))
+/* DFEV6 Reset Request */
+#define SYS2_RREQR_DFEV6 (0x1 << 6)
+#define SYS2_RREQR_DFEV6_VAL(val) (((val) & 0x1) << 6)
+#define SYS2_RREQR_DFEV6_SET (reg,val) (reg) = ((reg & ~SYS2_RREQR_DFEV6) | ((val & 0x1) << 6))
+/* DFEV5 Reset Request */
+#define SYS2_RREQR_DFEV5 (0x1 << 5)
+#define SYS2_RREQR_DFEV5_VAL(val) (((val) & 0x1) << 5)
+#define SYS2_RREQR_DFEV5_SET (reg,val) (reg) = ((reg & ~SYS2_RREQR_DFEV5) | ((val & 0x1) << 5))
+/* DFEV4 Reset Request */
+#define SYS2_RREQR_DFEV4 (0x1 << 4)
+#define SYS2_RREQR_DFEV4_VAL(val) (((val) & 0x1) << 4)
+#define SYS2_RREQR_DFEV4_SET (reg,val) (reg) = ((reg & ~SYS2_RREQR_DFEV4) | ((val & 0x1) << 4))
+/* DFEV3 Reset Request */
+#define SYS2_RREQR_DFEV3 (0x1 << 3)
+#define SYS2_RREQR_DFEV3_VAL(val) (((val) & 0x1) << 3)
+#define SYS2_RREQR_DFEV3_SET (reg,val) (reg) = ((reg & ~SYS2_RREQR_DFEV3) | ((val & 0x1) << 3))
+/* DFEV2 Reset Request */
+#define SYS2_RREQR_DFEV2 (0x1 << 2)
+#define SYS2_RREQR_DFEV2_VAL(val) (((val) & 0x1) << 2)
+#define SYS2_RREQR_DFEV2_SET (reg,val) (reg) = ((reg & ~SYS2_RREQR_DFEV2) | ((val & 0x1) << 2))
+/* DFEV1 Reset Request */
+#define SYS2_RREQR_DFEV1 (0x1 << 1)
+#define SYS2_RREQR_DFEV1_VAL(val) (((val) & 0x1) << 1)
+#define SYS2_RREQR_DFEV1_SET (reg,val) (reg) = ((reg & ~SYS2_RREQR_DFEV1) | ((val & 0x1) << 1))
+/* DFEV0 Reset Request */
+#define SYS2_RREQR_DFEV0 (0x1)
+#define SYS2_RREQR_DFEV0_VAL(val) (((val) & 0x1))
+#define SYS2_RREQR_DFEV0_SET (reg,val) (reg) = ((reg & ~SYS2_RREQR_DFEV0) | ((val & 0x1)))
+
+/*******************************************************************************
+ * SYS2 Reset Release Register
+ ******************************************************************************/
+
+/* HWACC3 Reset Release */
+#define SYS2_RRLSR_HWACC3 (0x1 << 11)
+#define SYS2_RRLSR_HWACC3_VAL(val) (((val) & 0x1) << 11)
+#define SYS2_RRLSR_HWACC3_SET (reg,val) (reg) = ((reg & ~SYS2_RRLSR_HWACC3) | ((val & 0x1) << 11))
+/* HWACC2 Reset Release */
+#define SYS2_RRLSR_HWACC2 (0x1 << 10)
+#define SYS2_RRLSR_HWACC2_VAL(val) (((val) & 0x1) << 10)
+#define SYS2_RRLSR_HWACC2_SET (reg,val) (reg) = ((reg & ~SYS2_RRLSR_HWACC2) | ((val & 0x1) << 10))
+/* HWACC1 Reset Release */
+#define SYS2_RRLSR_HWACC1 (0x1 << 9)
+#define SYS2_RRLSR_HWACC1_VAL(val) (((val) & 0x1) << 9)
+#define SYS2_RRLSR_HWACC1_SET (reg,val) (reg) = ((reg & ~SYS2_RRLSR_HWACC1) | ((val & 0x1) << 9))
+/* HWACC0 Reset Release */
+#define SYS2_RRLSR_HWACC0 (0x1 << 8)
+#define SYS2_RRLSR_HWACC0_VAL(val) (((val) & 0x1) << 8)
+#define SYS2_RRLSR_HWACC0_SET (reg,val) (reg) = ((reg & ~SYS2_RRLSR_HWACC0) | ((val & 0x1) << 8))
+/* DFEV7 Reset Release */
+#define SYS2_RRLSR_DFEV7 (0x1 << 7)
+#define SYS2_RRLSR_DFEV7_VAL(val) (((val) & 0x1) << 7)
+#define SYS2_RRLSR_DFEV7_SET (reg,val) (reg) = ((reg & ~SYS2_RRLSR_DFEV7) | ((val & 0x1) << 7))
+/* DFEV6 Reset Release */
+#define SYS2_RRLSR_DFEV6 (0x1 << 6)
+#define SYS2_RRLSR_DFEV6_VAL(val) (((val) & 0x1) << 6)
+#define SYS2_RRLSR_DFEV6_SET (reg,val) (reg) = ((reg & ~SYS2_RRLSR_DFEV6) | ((val & 0x1) << 6))
+/* DFEV5 Reset Release */
+#define SYS2_RRLSR_DFEV5 (0x1 << 5)
+#define SYS2_RRLSR_DFEV5_VAL(val) (((val) & 0x1) << 5)
+#define SYS2_RRLSR_DFEV5_SET (reg,val) (reg) = ((reg & ~SYS2_RRLSR_DFEV5) | ((val & 0x1) << 5))
+/* DFEV4 Reset Release */
+#define SYS2_RRLSR_DFEV4 (0x1 << 4)
+#define SYS2_RRLSR_DFEV4_VAL(val) (((val) & 0x1) << 4)
+#define SYS2_RRLSR_DFEV4_SET (reg,val) (reg) = ((reg & ~SYS2_RRLSR_DFEV4) | ((val & 0x1) << 4))
+/* DFEV3 Reset Release */
+#define SYS2_RRLSR_DFEV3 (0x1 << 3)
+#define SYS2_RRLSR_DFEV3_VAL(val) (((val) & 0x1) << 3)
+#define SYS2_RRLSR_DFEV3_SET (reg,val) (reg) = ((reg & ~SYS2_RRLSR_DFEV3) | ((val & 0x1) << 3))
+/* DFEV2 Reset Release */
+#define SYS2_RRLSR_DFEV2 (0x1 << 2)
+#define SYS2_RRLSR_DFEV2_VAL(val) (((val) & 0x1) << 2)
+#define SYS2_RRLSR_DFEV2_SET (reg,val) (reg) = ((reg & ~SYS2_RRLSR_DFEV2) | ((val & 0x1) << 2))
+/* DFEV1 Reset Release */
+#define SYS2_RRLSR_DFEV1 (0x1 << 1)
+#define SYS2_RRLSR_DFEV1_VAL(val) (((val) & 0x1) << 1)
+#define SYS2_RRLSR_DFEV1_SET (reg,val) (reg) = ((reg & ~SYS2_RRLSR_DFEV1) | ((val & 0x1) << 1))
+/* DFEV0 Reset Release */
+#define SYS2_RRLSR_DFEV0 (0x1)
+#define SYS2_RRLSR_DFEV0_VAL(val) (((val) & 0x1))
+#define SYS2_RRLSR_DFEV0_SET (reg,val) (reg) = ((reg & ~SYS2_RRLSR_DFEV0) | ((val & 0x1)))
+
+#endif /* __SYS2_H */
+
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/dev-gpio-buttons.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/dev-gpio-buttons.c
new file mode 100644
index 0000000000..ad25cac797
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/dev-gpio-buttons.c
@@ -0,0 +1,58 @@
+/*
+ * Lantiq GPIO button support
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include <dev-gpio-buttons.h>
+
+void __init ltq_register_gpio_keys_polled(int id,
+ unsigned poll_interval,
+ unsigned nbuttons,
+ struct gpio_keys_button *buttons)
+{
+ struct platform_device *pdev;
+ struct gpio_keys_platform_data pdata;
+ struct gpio_keys_button *p;
+ int err;
+
+ p = kmalloc(nbuttons * sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return;
+
+ memcpy(p, buttons, nbuttons * sizeof(*p));
+
+ pdev = platform_device_alloc("gpio-keys-polled", id);
+ if (!pdev)
+ goto err_free_buttons;
+
+ memset(&pdata, 0, sizeof(pdata));
+ pdata.poll_interval = poll_interval;
+ pdata.nbuttons = nbuttons;
+ pdata.buttons = p;
+
+ err = platform_device_add_data(pdev, &pdata, sizeof(pdata));
+ if (err)
+ goto err_put_pdev;
+
+ err = platform_device_add(pdev);
+ if (err)
+ goto err_put_pdev;
+
+ return;
+
+err_put_pdev:
+ platform_device_put(pdev);
+
+err_free_buttons:
+ kfree(p);
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/dev-gpio-leds.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/dev-gpio-leds.c
new file mode 100644
index 0000000000..89dc79de61
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/dev-gpio-leds.c
@@ -0,0 +1,57 @@
+/*
+ * Lantiq GPIO LED device support
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * Parts of this file are based on Atheros' 2.6.15 BSP
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include <dev-gpio-leds.h>
+
+void __init ltq_add_device_gpio_leds(int id, unsigned num_leds,
+ struct gpio_led *leds)
+{
+ struct platform_device *pdev;
+ struct gpio_led_platform_data pdata;
+ struct gpio_led *p;
+ int err;
+
+ p = kmalloc(num_leds * sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return;
+
+ memcpy(p, leds, num_leds * sizeof(*p));
+
+ pdev = platform_device_alloc("leds-gpio", id);
+ if (!pdev)
+ goto err_free_leds;
+
+ memset(&pdata, 0, sizeof(pdata));
+ pdata.num_leds = num_leds;
+ pdata.leds = p;
+
+ err = platform_device_add_data(pdev, &pdata, sizeof(pdata));
+ if (err)
+ goto err_put_pdev;
+
+ err = platform_device_add(pdev);
+ if (err)
+ goto err_put_pdev;
+
+ return;
+
+err_put_pdev:
+ platform_device_put(pdev);
+
+err_free_leds:
+ kfree(p);
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/Kconfig b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/Kconfig
new file mode 100644
index 0000000000..03e999d92e
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/Kconfig
@@ -0,0 +1,11 @@
+if SOC_FALCON
+
+menu "MIPS Machine"
+
+config LANTIQ_MACH_EASY98000
+ bool "Easy98000"
+ default y
+
+endmenu
+
+endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/Makefile b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/Makefile
new file mode 100644
index 0000000000..3634154fb9
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/Makefile
@@ -0,0 +1,2 @@
+obj-y := prom.o reset.o sysctrl.o devices.o gpio.o
+obj-$(CONFIG_LANTIQ_MACH_EASY98000) += mach-easy98000.o
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/addon-easy98000.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/addon-easy98000.c
new file mode 100644
index 0000000000..317ee4001b
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/addon-easy98000.c
@@ -0,0 +1,213 @@
+/*
+ * EASY98000 CPLD Addon driver
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+struct easy98000_reg_cpld {
+ u16 cmdreg1; /* 0x1 */
+ u16 cmdreg0; /* 0x0 */
+ u16 idreg0; /* 0x3 */
+ u16 resreg; /* 0x2 */
+ u16 intreg; /* 0x5 */
+ u16 idreg1; /* 0x4 */
+ u16 ledreg; /* 0x7 */
+ u16 pcmconconfig; /* 0x6 */
+ u16 res0; /* 0x9 */
+ u16 ethledreg; /* 0x8 */
+ u16 res1[4]; /* 0xa-0xd */
+ u16 cpld1v; /* 0xf */
+ u16 cpld2v; /* 0xe */
+};
+static struct easy98000_reg_cpld * const cpld =
+ (struct easy98000_reg_cpld *)(KSEG1 | 0x17c00000);
+#define cpld_r8(reg) (__raw_readw(&cpld->reg) & 0xFF)
+#define cpld_w8(val, reg) __raw_writew((val) & 0xFF, &cpld->reg)
+
+int easy98000_addon_has_dm9000(void)
+{
+ if ((cpld_r8(idreg0) & 0xF) == 1)
+ return 1;
+ return 0;
+}
+
+#if defined(CONFIG_PROC_FS)
+typedef void (*cpld_dump) (struct seq_file *s);
+struct proc_entry {
+ char *name;
+ void *callback;
+};
+
+static int cpld_proc_show ( struct seq_file *s, void *p )
+{
+ cpld_dump dump = s->private;
+
+ if ( dump != NULL )
+ dump(s);
+
+ return 0;
+}
+
+static int cpld_proc_open ( struct inode *inode, struct file *file )
+{
+ return single_open ( file, cpld_proc_show, PDE(inode)->data );
+}
+
+static void cpld_versions_get ( struct seq_file *s )
+{
+ seq_printf(s, "CPLD1: V%d\n", cpld_r8(cpld1v));
+ seq_printf(s, "CPLD2: V%d\n", cpld_r8(cpld2v));
+}
+
+static void cpld_ebu_module_get ( struct seq_file *s )
+{
+ u8 addon_id;
+
+ addon_id = cpld_r8(idreg0) & 0xF;
+ switch (addon_id) {
+ case 0xF: /* nothing connected */
+ break;
+ case 1:
+ seq_printf(s, "Ethernet Controller module (dm9000)\n");
+ break;
+ default:
+ seq_printf(s, "Unknown EBU module (EBU_ID=0x%02X)\n", addon_id);
+ break;
+ }
+}
+
+static void cpld_xmii_module_get ( struct seq_file *s )
+{
+ u8 addon_id;
+ char *mod = NULL;
+
+ addon_id = cpld_r8(idreg1) & 0xF;
+ switch (addon_id) {
+ case 0xF:
+ mod = "no module";
+ break;
+ case 0x1:
+ mod = "RGMII module";
+ break;
+ case 0x4:
+ mod = "GMII MAC Mode (XWAY TANTOS-3G)";
+ break;
+ case 0x6:
+ mod = "TMII MAC Mode (XWAY TANTOS-3G)";
+ break;
+ case 0x8:
+ mod = "GMII PHY module";
+ break;
+ case 0x9:
+ mod = "MII PHY module";
+ break;
+ case 0xA:
+ mod = "RMII PHY module";
+ break;
+ default:
+ break;
+ }
+ if (mod)
+ seq_printf(s, "%s\n", mod);
+ else
+ seq_printf(s, "unknown xMII module (xMII_ID=0x%02X)\n", addon_id);
+}
+
+static struct proc_entry proc_entries[] = {
+ {"versions", cpld_versions_get},
+ {"ebu", cpld_ebu_module_get},
+ {"xmii", cpld_xmii_module_get},
+};
+
+static struct file_operations ops = {
+ .owner = THIS_MODULE,
+ .open = cpld_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void cpld_proc_entry_create(struct proc_dir_entry *parent_node,
+ struct proc_entry *proc_entry)
+{
+ proc_create_data ( proc_entry->name, (S_IFREG | S_IRUGO), parent_node,
+ &ops, proc_entry->callback);
+}
+
+static int cpld_proc_install(void)
+{
+ struct proc_dir_entry *driver_proc_node;
+
+ driver_proc_node = proc_mkdir("cpld", NULL);
+ if (driver_proc_node != NULL) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(proc_entries); i++)
+ cpld_proc_entry_create(driver_proc_node,
+ &proc_entries[i]);
+ } else {
+ printk("cannot create proc entry");
+ return -1;
+ }
+ return 0;
+}
+#else
+static inline int cpld_proc_install(void) {}
+#endif
+
+static int easy98000_addon_probe(struct platform_device *pdev)
+{
+ return cpld_proc_install();
+}
+
+static int easy98000_addon_remove(struct platform_device *pdev)
+{
+#if defined(CONFIG_PROC_FS)
+ char buf[64];
+ int i;
+
+ for (i = 0; i < sizeof(proc_entries) / sizeof(proc_entries[0]); i++) {
+ sprintf(buf, "cpld/%s", proc_entries[i].name);
+ remove_proc_entry(buf, 0);
+ }
+ remove_proc_entry("cpld", 0);
+#endif
+ return 0;
+}
+
+static struct platform_driver easy98000_addon_driver = {
+ .probe = easy98000_addon_probe,
+ .remove = __devexit_p(easy98000_addon_remove),
+ .driver = {
+ .name = "easy98000_addon",
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init easy98000_addon_init(void)
+{
+ return platform_driver_register(&easy98000_addon_driver);
+}
+
+void __exit easy98000_addon_exit(void)
+{
+ platform_driver_unregister(&easy98000_addon_driver);
+}
+
+module_init(easy98000_addon_init);
+module_exit(easy98000_addon_exit);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/dev-leds-easy98000-cpld.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/dev-leds-easy98000-cpld.c
new file mode 100644
index 0000000000..94622cfda8
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/dev-leds-easy98000-cpld.c
@@ -0,0 +1,161 @@
+/*
+ * EASY98000 CPLD LED driver
+ *
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+
+#include "dev-leds-easy98000-cpld.h"
+
+const char *led_name[8] = {
+ "ge0_act",
+ "ge0_link",
+ "ge1_act",
+ "ge1_link",
+ "fe2_act",
+ "fe2_link",
+ "fe3_act",
+ "fe3_link"
+};
+
+#define cpld_base7 ((u16 *)(KSEG1 | 0x17c0000c))
+#define cpld_base8 ((u16 *)(KSEG1 | 0x17c00012))
+
+#define ltq_r16(reg) __raw_readw(reg)
+#define ltq_w16(val, reg) __raw_writew(val, reg)
+
+struct cpld_led_dev {
+ struct led_classdev cdev;
+ u8 mask;
+ u16 *base;
+};
+
+struct cpld_led_drvdata {
+ struct cpld_led_dev *led_devs;
+ int num_leds;
+};
+
+void led_set(u8 mask, u16 *base)
+{
+ ltq_w16(ltq_r16(base) | mask, base);
+}
+
+void led_clear(u8 mask, u16 *base)
+{
+ ltq_w16(ltq_r16(base) & (~mask), base);
+}
+
+void led_blink_clear(u8 mask, u16 *base)
+{
+ led_clear(mask, base);
+}
+
+static void led_brightness(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct cpld_led_dev *led_dev =
+ container_of(led_cdev, struct cpld_led_dev, cdev);
+
+ if (value)
+ led_set(led_dev->mask, led_dev->base);
+ else
+ led_clear(led_dev->mask, led_dev->base);
+}
+
+static int led_probe(struct platform_device *pdev)
+{
+ int i;
+ char name[32];
+ struct cpld_led_drvdata *drvdata;
+ int ret = 0;
+
+ drvdata = kzalloc(sizeof(struct cpld_led_drvdata) +
+ sizeof(struct cpld_led_dev) * MAX_LED,
+ GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->led_devs = (struct cpld_led_dev *) &drvdata[1];
+
+ for (i = 0; i < MAX_LED; i++) {
+ struct cpld_led_dev *led_dev = &drvdata->led_devs[i];
+ led_dev->cdev.brightness_set = led_brightness;
+ led_dev->cdev.default_trigger = NULL;
+ led_dev->mask = 1 << (i % 8);
+ if(i < 8) {
+ sprintf(name, "easy98000-cpld:%s", led_name[i]);
+ led_dev->base = cpld_base8;
+ } else {
+ sprintf(name, "easy98000-cpld:red:%d", i-8);
+ led_dev->base = cpld_base7;
+ }
+ led_dev->cdev.name = name;
+ ret = led_classdev_register(&pdev->dev, &led_dev->cdev);
+ if (ret)
+ goto err;
+ }
+ platform_set_drvdata(pdev, drvdata);
+ return 0;
+
+err:
+ printk("led_probe: 3\n");
+ for (i = i - 1; i >= 0; i--)
+ led_classdev_unregister(&drvdata->led_devs[i].cdev);
+
+ kfree(drvdata);
+ return ret;
+}
+
+static int led_remove(struct platform_device *pdev)
+{
+ int i;
+ struct cpld_led_drvdata *drvdata = platform_get_drvdata(pdev);
+ for (i = 0; i < MAX_LED; i++)
+ led_classdev_unregister(&drvdata->led_devs[i].cdev);
+ kfree(drvdata);
+ return 0;
+}
+
+static struct platform_driver led_driver = {
+ .probe = led_probe,
+ .remove = __devexit_p(led_remove),
+ .driver = {
+ .name = LED_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init easy98000_cpld_led_init(void)
+{
+ pr_info(LED_DESC ", Version " LED_VERSION
+ " (c) Copyright 2011, Lantiq Deutschland GmbH\n");
+ return platform_driver_register(&led_driver);
+}
+
+void __exit easy98000_cpld_led_exit(void)
+{
+ platform_driver_unregister(&led_driver);
+}
+
+module_init(easy98000_cpld_led_init);
+module_exit(easy98000_cpld_led_exit);
+
+MODULE_DESCRIPTION(LED_NAME);
+MODULE_DESCRIPTION(LED_DESC);
+MODULE_AUTHOR("Ralph Hempel <ralph.hempel@lantiq.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/dev-leds-easy98000-cpld.h b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/dev-leds-easy98000-cpld.h
new file mode 100644
index 0000000000..3160189fe9
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/dev-leds-easy98000-cpld.h
@@ -0,0 +1,20 @@
+/*
+ * EASY98000 CPLD LED driver
+ *
+ * Copyright (C) 2010 Ralph Hempel <ralph.hempel@lantiq.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _INCLUDE_EASY98000_CPLD_LED_H_
+#define _INCLUDE_EASY98000_CPLD_LED_H_
+
+#define LED_NAME "easy98000_cpld_led"
+#define LED_DESC "EASY98000 LED driver"
+#define LED_VERSION "1.0.0"
+
+#define MAX_LED 16
+
+#endif /* _INCLUDE_EASY98000_CPLD_LED_H_ */
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/devices.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/devices.c
new file mode 100644
index 0000000000..e684ed470f
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/devices.c
@@ -0,0 +1,152 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/mtd/nand.h>
+#include <linux/gpio.h>
+
+#include <lantiq_soc.h>
+
+#include "devices.h"
+
+/* nand flash */
+/* address lines used for NAND control signals */
+#define NAND_ADDR_ALE 0x10000
+#define NAND_ADDR_CLE 0x20000
+/* Ready/Busy Status */
+#define MODCON_STS 0x0002
+/* Ready/Busy Status Edge */
+#define MODCON_STSEDGE 0x0004
+
+static const char *part_probes[] = { "cmdlinepart", NULL };
+
+static int
+falcon_nand_ready(struct mtd_info *mtd)
+{
+ u32 modcon = ltq_ebu_r32(LTQ_EBU_MODCON);
+
+ return (((modcon & (MODCON_STS | MODCON_STSEDGE)) ==
+ (MODCON_STS | MODCON_STSEDGE)));
+}
+
+static void
+falcon_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long nandaddr = (unsigned long) this->IO_ADDR_W;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ nandaddr &= ~(NAND_ADDR_ALE | NAND_ADDR_CLE);
+
+ if (ctrl & NAND_CLE)
+ nandaddr |= NAND_ADDR_CLE;
+ if (ctrl & NAND_ALE)
+ nandaddr |= NAND_ADDR_ALE;
+
+ this->IO_ADDR_W = (void __iomem *) nandaddr;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+static struct platform_nand_data falcon_flash_nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_delay = 25,
+ .part_probe_types = part_probes,
+ },
+ .ctrl = {
+ .cmd_ctrl = falcon_hwcontrol,
+ .dev_ready = falcon_nand_ready,
+ }
+};
+
+static struct resource ltq_nand_res =
+ MEM_RES("nand", LTQ_FLASH_START, LTQ_FLASH_MAX);
+
+static struct platform_device ltq_flash_nand = {
+ .name = "gen_nand",
+ .id = -1,
+ .num_resources = 1,
+ .resource = &ltq_nand_res,
+ .dev = {
+ .platform_data = &falcon_flash_nand_data,
+ },
+};
+
+void __init
+falcon_register_nand(void)
+{
+ platform_device_register(&ltq_flash_nand);
+}
+
+/* gpio */
+#define DECLARE_GPIO_RES(port) \
+static struct resource falcon_gpio ## port ## _res[] = { \
+ MEM_RES("gpio"#port, LTQ_GPIO ## port ## _BASE_ADDR, \
+ LTQ_GPIO ## port ## _SIZE), \
+ MEM_RES("padctrl"#port, LTQ_PADCTRL ## port ## _BASE_ADDR, \
+ LTQ_PADCTRL ## port ## _SIZE), \
+ IRQ_RES("gpio_mux"#port, FALCON_IRQ_GPIO_P ## port) \
+}
+DECLARE_GPIO_RES(0);
+DECLARE_GPIO_RES(1);
+DECLARE_GPIO_RES(2);
+DECLARE_GPIO_RES(3);
+DECLARE_GPIO_RES(4);
+
+void __init
+falcon_register_gpio(void)
+{
+ platform_device_register_simple("falcon_gpio", 0,
+ falcon_gpio0_res, ARRAY_SIZE(falcon_gpio0_res));
+ platform_device_register_simple("falcon_gpio", 1,
+ falcon_gpio1_res, ARRAY_SIZE(falcon_gpio1_res));
+ platform_device_register_simple("falcon_gpio", 2,
+ falcon_gpio2_res, ARRAY_SIZE(falcon_gpio2_res));
+}
+
+void __init
+falcon_register_gpio_extra(void)
+{
+ platform_device_register_simple("falcon_gpio", 3,
+ falcon_gpio3_res, ARRAY_SIZE(falcon_gpio3_res));
+ platform_device_register_simple("falcon_gpio", 4,
+ falcon_gpio4_res, ARRAY_SIZE(falcon_gpio4_res));
+}
+
+/* spi flash */
+static struct platform_device ltq_spi = {
+ .name = "falcon_spi",
+ .num_resources = 0,
+};
+
+void __init
+falcon_register_spi_flash(struct spi_board_info *data)
+{
+ spi_register_board_info(data, 1);
+ platform_device_register(&ltq_spi);
+}
+
+/* i2c */
+static struct resource falcon_i2c_resources[] = {
+ MEM_RES("i2c", GPON_I2C_BASE, GPON_I2C_SIZE),
+ IRQ_RES(i2c_lb, FALCON_IRQ_I2C_LBREQ),
+ IRQ_RES(i2c_b, FALCON_IRQ_I2C_BREQ),
+ IRQ_RES(i2c_err, FALCON_IRQ_I2C_I2C_ERR),
+ IRQ_RES(i2c_p, FALCON_IRQ_I2C_I2C_P),
+};
+
+void __init
+falcon_register_i2c(void)
+{
+ platform_device_register_simple("i2c-falcon", 0,
+ falcon_i2c_resources, ARRAY_SIZE(falcon_i2c_resources));
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/devices.h b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/devices.h
new file mode 100644
index 0000000000..d81edbe74e
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/devices.h
@@ -0,0 +1,25 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _FALCON_DEVICES_H__
+#define _FALCON_DEVICES_H__
+
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+
+#include "../devices.h"
+
+extern void falcon_register_nand(void);
+extern void falcon_register_gpio(void);
+extern void falcon_register_gpio_extra(void);
+extern void falcon_register_spi_flash(struct spi_board_info *data);
+extern void falcon_register_i2c(void);
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/gpio.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/gpio.c
new file mode 100644
index 0000000000..4147d61c7d
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/gpio.c
@@ -0,0 +1,409 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+#include <lantiq_soc.h>
+
+/* Multiplexer Control Register */
+#define LTQ_PADC_MUX(x) (x * 0x4)
+/* Pad Control Availability Register */
+#define LTQ_PADC_AVAIL 0x000000F0
+
+/* Data Output Register */
+#define LTQ_GPIO_OUT 0x00000000
+/* Data Input Register */
+#define LTQ_GPIO_IN 0x00000004
+/* Direction Register */
+#define LTQ_GPIO_DIR 0x00000008
+/* External Interrupt Control Register 0 */
+#define LTQ_GPIO_EXINTCR0 0x00000018
+/* External Interrupt Control Register 1 */
+#define LTQ_GPIO_EXINTCR1 0x0000001C
+/* IRN Capture Register */
+#define LTQ_GPIO_IRNCR 0x00000020
+/* IRN Interrupt Configuration Register */
+#define LTQ_GPIO_IRNCFG 0x0000002C
+/* IRN Interrupt Enable Set Register */
+#define LTQ_GPIO_IRNRNSET 0x00000030
+/* IRN Interrupt Enable Clear Register */
+#define LTQ_GPIO_IRNENCLR 0x00000034
+/* Output Set Register */
+#define LTQ_GPIO_OUTSET 0x00000040
+/* Output Cler Register */
+#define LTQ_GPIO_OUTCLR 0x00000044
+/* Direction Clear Register */
+#define LTQ_GPIO_DIRSET 0x00000048
+/* Direction Set Register */
+#define LTQ_GPIO_DIRCLR 0x0000004C
+
+/* turn a gpio_chip into a falcon_gpio_port */
+#define ctop(c) container_of(c, struct falcon_gpio_port, gpio_chip)
+/* turn a irq_data into a falcon_gpio_port */
+#define itop(i) ((struct falcon_gpio_port *) irq_get_chip_data(i->irq))
+
+#define ltq_pad_r32(p, reg) ltq_r32(p->pad + reg)
+#define ltq_pad_w32(p, val, reg) ltq_w32(val, p->pad + reg)
+#define ltq_pad_w32_mask(c, clear, set, reg) \
+ ltq_pad_w32(c, (ltq_pad_r32(c, reg) & ~(clear)) | (set), reg)
+
+#define ltq_port_r32(p, reg) ltq_r32(p->port + reg)
+#define ltq_port_w32(p, val, reg) ltq_w32(val, p->port + reg)
+#define ltq_port_w32_mask(p, clear, set, reg) \
+ ltq_port_w32(p, (ltq_port_r32(p, reg) & ~(clear)) | (set), reg)
+
+#define MAX_PORTS 5
+#define PINS_PER_PORT 32
+
+struct falcon_gpio_port {
+ struct gpio_chip gpio_chip;
+ void __iomem *pad;
+ void __iomem *port;
+ unsigned int irq_base;
+ unsigned int chained_irq;
+ struct clk *clk;
+};
+
+static struct falcon_gpio_port ltq_gpio_port[MAX_PORTS];
+
+int gpio_to_irq(unsigned int gpio)
+{
+ return __gpio_to_irq(gpio);
+}
+EXPORT_SYMBOL(gpio_to_irq);
+
+int ltq_gpio_mux_set(unsigned int pin, unsigned int mux)
+{
+ int port = pin / 100;
+ int offset = pin % 100;
+ struct falcon_gpio_port *gpio_port;
+
+ if ((offset >= PINS_PER_PORT) || (port >= MAX_PORTS))
+ return -EINVAL;
+
+ gpio_port = &ltq_gpio_port[port];
+ ltq_pad_w32(gpio_port, mux & 0x3, LTQ_PADC_MUX(offset));
+
+ return 0;
+}
+EXPORT_SYMBOL(ltq_gpio_mux_set);
+
+int ltq_gpio_request(struct device *dev, unsigned int pin, unsigned int mux,
+ unsigned int dir, const char *name)
+{
+ int port = pin / 100;
+ int offset = pin % 100;
+
+ if (offset >= PINS_PER_PORT || port >= MAX_PORTS)
+ return -EINVAL;
+
+ if (devm_gpio_request(dev, pin, name)) {
+ pr_err("failed to setup lantiq gpio: %s\n", name);
+ return -EBUSY;
+ }
+
+ if (dir)
+ gpio_direction_output(pin, 1);
+ else
+ gpio_direction_input(pin);
+
+ return ltq_gpio_mux_set(pin, mux);
+}
+EXPORT_SYMBOL(ltq_gpio_request);
+
+static int
+falcon_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ ltq_port_w32(ctop(chip), 1 << offset, LTQ_GPIO_DIRCLR);
+
+ return 0;
+}
+
+static void
+falcon_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ if (value)
+ ltq_port_w32(ctop(chip), 1 << offset, LTQ_GPIO_OUTSET);
+ else
+ ltq_port_w32(ctop(chip), 1 << offset, LTQ_GPIO_OUTCLR);
+}
+
+static int
+falcon_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ falcon_gpio_set(chip, offset, value);
+ ltq_port_w32(ctop(chip), 1 << offset, LTQ_GPIO_DIRSET);
+
+ return 0;
+}
+
+static int
+falcon_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ if ((ltq_port_r32(ctop(chip), LTQ_GPIO_DIR) >> offset) & 1)
+ return (ltq_port_r32(ctop(chip), LTQ_GPIO_OUT) >> offset) & 1;
+ else
+ return (ltq_port_r32(ctop(chip), LTQ_GPIO_IN) >> offset) & 1;
+}
+
+static int
+falcon_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ if ((ltq_pad_r32(ctop(chip), LTQ_PADC_AVAIL) >> offset) & 1) {
+ if (ltq_pad_r32(ctop(chip), LTQ_PADC_MUX(offset)) > 1)
+ return -EBUSY;
+ /* switch on gpio function */
+ ltq_pad_w32(ctop(chip), 1, LTQ_PADC_MUX(offset));
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void
+falcon_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ if ((ltq_pad_r32(ctop(chip), LTQ_PADC_AVAIL) >> offset) & 1) {
+ if (ltq_pad_r32(ctop(chip), LTQ_PADC_MUX(offset)) > 1)
+ return;
+ /* switch off gpio function */
+ ltq_pad_w32(ctop(chip), 0, LTQ_PADC_MUX(offset));
+ }
+}
+
+static int
+falcon_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return ctop(chip)->irq_base + offset;
+}
+
+static void
+falcon_gpio_disable_irq(struct irq_data *d)
+{
+ unsigned int offset = d->irq - itop(d)->irq_base;
+
+ ltq_port_w32(itop(d), 1 << offset, LTQ_GPIO_IRNENCLR);
+}
+
+static void
+falcon_gpio_enable_irq(struct irq_data *d)
+{
+ unsigned int offset = d->irq - itop(d)->irq_base;
+
+ if (!ltq_pad_r32(itop(d), LTQ_PADC_MUX(offset)) < 1)
+ /* switch on gpio function */
+ ltq_pad_w32(itop(d), 1, LTQ_PADC_MUX(offset));
+
+ ltq_port_w32(itop(d), 1 << offset, LTQ_GPIO_IRNRNSET);
+}
+
+static void
+falcon_gpio_ack_irq(struct irq_data *d)
+{
+ unsigned int offset = d->irq - itop(d)->irq_base;
+
+ ltq_port_w32(itop(d), 1 << offset, LTQ_GPIO_IRNCR);
+}
+
+static void
+falcon_gpio_mask_and_ack_irq(struct irq_data *d)
+{
+ unsigned int offset = d->irq - itop(d)->irq_base;
+
+ ltq_port_w32(itop(d), 1 << offset, LTQ_GPIO_IRNENCLR);
+ ltq_port_w32(itop(d), 1 << offset, LTQ_GPIO_IRNCR);
+}
+
+static struct irq_chip falcon_gpio_irq_chip;
+static int
+falcon_gpio_irq_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int offset = d->irq - itop(d)->irq_base;
+ unsigned int mask = 1 << offset;
+
+ if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_NONE)
+ return 0;
+
+ if ((type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) != 0) {
+ /* level triggered */
+ ltq_port_w32_mask(itop(d), 0, mask, LTQ_GPIO_IRNCFG);
+ irq_set_chip_and_handler_name(d->irq,
+ &falcon_gpio_irq_chip, handle_level_irq, "mux");
+ } else {
+ /* edge triggered */
+ ltq_port_w32_mask(itop(d), mask, 0, LTQ_GPIO_IRNCFG);
+ irq_set_chip_and_handler_name(d->irq,
+ &falcon_gpio_irq_chip, handle_simple_irq, "mux");
+ }
+
+ if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
+ ltq_port_w32_mask(itop(d), mask, 0, LTQ_GPIO_EXINTCR0);
+ ltq_port_w32_mask(itop(d), 0, mask, LTQ_GPIO_EXINTCR1);
+ } else {
+ if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)) != 0)
+ /* positive logic: rising edge, high level */
+ ltq_port_w32_mask(itop(d), mask, 0, LTQ_GPIO_EXINTCR0);
+ else
+ /* negative logic: falling edge, low level */
+ ltq_port_w32_mask(itop(d), 0, mask, LTQ_GPIO_EXINTCR0);
+ ltq_port_w32_mask(itop(d), mask, 0, LTQ_GPIO_EXINTCR1);
+ }
+
+ return gpio_direction_input(itop(d)->gpio_chip.base + offset);
+}
+
+static void
+falcon_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ struct falcon_gpio_port *gpio_port = irq_desc_get_handler_data(desc);
+ unsigned long irncr;
+ int offset;
+
+ /* acknowledge interrupt */
+ irncr = ltq_port_r32(gpio_port, LTQ_GPIO_IRNCR);
+ ltq_port_w32(gpio_port, irncr, LTQ_GPIO_IRNCR);
+
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+
+ for_each_set_bit(offset, &irncr, gpio_port->gpio_chip.ngpio)
+ generic_handle_irq(gpio_port->irq_base + offset);
+}
+
+static struct irq_chip falcon_gpio_irq_chip = {
+ .name = "gpio_irq_mux",
+ .irq_mask = falcon_gpio_disable_irq,
+ .irq_unmask = falcon_gpio_enable_irq,
+ .irq_ack = falcon_gpio_ack_irq,
+ .irq_mask_ack = falcon_gpio_mask_and_ack_irq,
+ .irq_set_type = falcon_gpio_irq_type,
+};
+
+static struct irqaction gpio_cascade = {
+ .handler = no_action,
+ .flags = IRQF_DISABLED,
+ .name = "gpio_cascade",
+};
+
+static int
+falcon_gpio_probe(struct platform_device *pdev)
+{
+ struct falcon_gpio_port *gpio_port;
+ int ret, i;
+ struct resource *gpiores, *padres;
+ int irq;
+
+ if (pdev->id >= MAX_PORTS)
+ return -ENODEV;
+
+ gpiores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ padres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ irq = platform_get_irq(pdev, 0);
+ if (!gpiores || !padres)
+ return -ENODEV;
+
+ gpio_port = &ltq_gpio_port[pdev->id];
+ gpio_port->gpio_chip.label = "falcon-gpio";
+ gpio_port->gpio_chip.direction_input = falcon_gpio_direction_input;
+ gpio_port->gpio_chip.direction_output = falcon_gpio_direction_output;
+ gpio_port->gpio_chip.get = falcon_gpio_get;
+ gpio_port->gpio_chip.set = falcon_gpio_set;
+ gpio_port->gpio_chip.request = falcon_gpio_request;
+ gpio_port->gpio_chip.free = falcon_gpio_free;
+ gpio_port->gpio_chip.base = 100 * pdev->id;
+ gpio_port->gpio_chip.ngpio = 32;
+ gpio_port->gpio_chip.dev = &pdev->dev;
+
+ gpio_port->port = ltq_remap_resource(gpiores);
+ gpio_port->pad = ltq_remap_resource(padres);
+
+ if (!gpio_port->port || !gpio_port->pad) {
+ dev_err(&pdev->dev, "Could not map io ranges\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ gpio_port->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(gpio_port->clk)) {
+ dev_err(&pdev->dev, "Could not get clock\n");
+ ret = PTR_ERR(gpio_port->clk);;
+ goto err;
+ }
+ clk_enable(gpio_port->clk);
+
+ if (irq > 0) {
+ /* irq_chip support */
+ gpio_port->gpio_chip.to_irq = falcon_gpio_to_irq;
+ gpio_port->irq_base = INT_NUM_EXTRA_START + (32 * pdev->id);
+
+ for (i = 0; i < 32; i++) {
+ irq_set_chip_and_handler_name(gpio_port->irq_base + i,
+ &falcon_gpio_irq_chip, handle_simple_irq,
+ "mux");
+ irq_set_chip_data(gpio_port->irq_base + i, gpio_port);
+ /* set to negative logic (falling edge, low level) */
+ ltq_port_w32_mask(gpio_port, 0, 1 << i,
+ LTQ_GPIO_EXINTCR0);
+ }
+
+ gpio_port->chained_irq = irq;
+ setup_irq(irq, &gpio_cascade);
+ irq_set_handler_data(irq, gpio_port);
+ irq_set_chained_handler(irq, falcon_gpio_irq_handler);
+ }
+
+ ret = gpiochip_add(&gpio_port->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip %d, %d\n",
+ pdev->id, ret);
+ goto err;
+ }
+ platform_set_drvdata(pdev, gpio_port);
+ return ret;
+
+err:
+ dev_err(&pdev->dev, "Error in gpio_probe %d, %d\n", pdev->id, ret);
+ if (gpiores)
+ release_resource(gpiores);
+ if (padres)
+ release_resource(padres);
+
+ if (gpio_port->port)
+ iounmap(gpio_port->port);
+ if (gpio_port->pad)
+ iounmap(gpio_port->pad);
+ return ret;
+}
+
+static struct platform_driver falcon_gpio_driver = {
+ .probe = falcon_gpio_probe,
+ .driver = {
+ .name = "falcon_gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init
+falcon_gpio_init(void)
+{
+ int ret;
+
+ pr_info("FALC(tm) ON GPIO Driver, (C) 2011 Lantiq Deutschland Gmbh\n");
+ ret = platform_driver_register(&falcon_gpio_driver);
+ if (ret)
+ pr_err("falcon_gpio: Error registering platform driver!");
+ return ret;
+}
+
+postcore_initcall(falcon_gpio_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/mach-95C3AM1.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/mach-95C3AM1.c
new file mode 100644
index 0000000000..42a3344ae9
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/mach-95C3AM1.c
@@ -0,0 +1,94 @@
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/i2c-gpio.h>
+
+#include <dev-gpio-leds.h>
+
+#include "../machtypes.h"
+#include "devices.h"
+
+#define BOARD_95C3AM1_GPIO_LED_0 10
+#define BOARD_95C3AM1_GPIO_LED_1 11
+#define BOARD_95C3AM1_GPIO_LED_2 12
+#define BOARD_95C3AM1_GPIO_LED_3 13
+
+static struct mtd_partition board_95C3AM1_partitions[] =
+{
+ {
+ .name = "uboot",
+ .offset = 0x0,
+ .size = 0x40000,
+ },
+ {
+ .name = "uboot_env",
+ .offset = 0x40000,
+ .size = 0x40000, /* 2 sectors for redundant env. */
+ },
+ {
+ .name = "linux",
+ .offset = 0x80000,
+ .size = 0xF80000, /* map only 16 MiB */
+ },
+};
+
+static struct flash_platform_data board_95C3AM1_flash_platform_data = {
+ .name = "sflash",
+ .parts = board_95C3AM1_partitions,
+ .nr_parts = ARRAY_SIZE(board_95C3AM1_partitions)
+};
+
+static struct spi_board_info board_95C3AM1_flash_data __initdata = {
+ .modalias = "m25p80",
+ .bus_num = 0,
+ .chip_select = 0,
+ .max_speed_hz = 10 * 1000 * 1000,
+ .mode = SPI_MODE_3,
+ .platform_data = &board_95C3AM1_flash_platform_data
+};
+
+static struct gpio_led board_95C3AM1_gpio_leds[] __initdata = {
+ {
+ .name = "power",
+ .gpio = BOARD_95C3AM1_GPIO_LED_0,
+ .active_low = 0,
+ }, {
+ .name = "optical",
+ .gpio = BOARD_95C3AM1_GPIO_LED_1,
+ .active_low = 0,
+ }, {
+ .name = "lan",
+ .gpio = BOARD_95C3AM1_GPIO_LED_2,
+ .active_low = 0,
+ }, {
+ .name = "update",
+ .gpio = BOARD_95C3AM1_GPIO_LED_3,
+ .active_low = 0,
+ }
+};
+
+static struct i2c_gpio_platform_data board_95C3AM1_i2c_gpio_data = {
+ .sda_pin = 107,
+ .scl_pin = 108,
+};
+
+static struct platform_device board_95C3AM1_i2c_gpio_device = {
+ .name = "i2c-gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &board_95C3AM1_i2c_gpio_data,
+ }
+};
+
+static void __init board_95C3AM1_init(void)
+{
+ falcon_register_i2c();
+ falcon_register_spi_flash(&board_95C3AM1_flash_data);
+ platform_device_register(&board_95C3AM1_i2c_gpio_device);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(board_95C3AM1_gpio_leds),
+ board_95C3AM1_gpio_leds);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_95C3AM1,
+ "95C3AM1",
+ "95C3AM1 Board",
+ board_95C3AM1_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/mach-easy98000.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/mach-easy98000.c
new file mode 100644
index 0000000000..fc5720d4ea
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/mach-easy98000.c
@@ -0,0 +1,138 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_gpio.h>
+#include <linux/spi/eeprom.h>
+
+#include "../machtypes.h"
+
+#include "devices.h"
+
+static struct mtd_partition easy98000_nor_partitions[] = {
+ {
+ .name = "uboot",
+ .offset = 0x0,
+ .size = 0x40000,
+ },
+ {
+ .name = "uboot_env",
+ .offset = 0x40000,
+ .size = 0x40000, /* 2 sectors for redundant env. */
+ },
+ {
+ .name = "linux",
+ .offset = 0x80000,
+ .size = 0xF80000, /* map only 16 MiB */
+ },
+};
+
+struct physmap_flash_data easy98000_nor_flash_data = {
+ .nr_parts = ARRAY_SIZE(easy98000_nor_partitions),
+ .parts = easy98000_nor_partitions,
+};
+
+static struct flash_platform_data easy98000_spi_flash_platform_data = {
+ .name = "sflash",
+ .parts = easy98000_nor_partitions,
+ .nr_parts = ARRAY_SIZE(easy98000_nor_partitions)
+};
+
+static struct spi_board_info easy98000_spi_flash_data __initdata = {
+ .modalias = "m25p80",
+ .bus_num = 0,
+ .chip_select = 0,
+ .max_speed_hz = 10 * 1000 * 1000,
+ .mode = SPI_MODE_3,
+ .platform_data = &easy98000_spi_flash_platform_data
+};
+
+/* setup gpio based spi bus/device for access to the eeprom on the board */
+#define SPI_GPIO_MRST 102
+#define SPI_GPIO_MTSR 103
+#define SPI_GPIO_CLK 104
+#define SPI_GPIO_CS0 105
+#define SPI_GPIO_CS1 106
+#define SPI_GPIO_BUS_NUM 1
+
+static struct spi_gpio_platform_data easy98000_spi_gpio_data = {
+ .sck = SPI_GPIO_CLK,
+ .mosi = SPI_GPIO_MTSR,
+ .miso = SPI_GPIO_MRST,
+ .num_chipselect = 2,
+};
+
+static struct platform_device easy98000_spi_gpio_device = {
+ .name = "spi_gpio",
+ .id = SPI_GPIO_BUS_NUM,
+ .dev.platform_data = &easy98000_spi_gpio_data,
+};
+
+static struct spi_eeprom at25160n = {
+ .byte_len = 16 * 1024 / 8,
+ .name = "at25160n",
+ .page_size = 32,
+ .flags = EE_ADDR2,
+};
+
+static struct spi_board_info easy98000_spi_gpio_devices __initdata = {
+ .modalias = "at25",
+ .bus_num = SPI_GPIO_BUS_NUM,
+ .max_speed_hz = 1000 * 1000,
+ .mode = SPI_MODE_3,
+ .chip_select = 1,
+ .controller_data = (void *) SPI_GPIO_CS1,
+ .platform_data = &at25160n,
+};
+
+static void __init
+easy98000_init_common(void)
+{
+ spi_register_board_info(&easy98000_spi_gpio_devices, 1);
+ platform_device_register(&easy98000_spi_gpio_device);
+ falcon_register_i2c();
+}
+
+static void __init
+easy98000_init(void)
+{
+ easy98000_init_common();
+ ltq_register_nor(&easy98000_nor_flash_data);
+}
+
+static void __init
+easy98000sf_init(void)
+{
+ easy98000_init_common();
+ falcon_register_spi_flash(&easy98000_spi_flash_data);
+}
+
+static void __init
+easy98000nand_init(void)
+{
+ easy98000_init_common();
+ falcon_register_nand();
+}
+
+MIPS_MACHINE(LANTIQ_MACH_EASY98000,
+ "EASY98000",
+ "EASY98000 Eval Board",
+ easy98000_init);
+
+MIPS_MACHINE(LANTIQ_MACH_EASY98000SF,
+ "EASY98000SF",
+ "EASY98000 Eval Board (Serial Flash)",
+ easy98000sf_init);
+
+MIPS_MACHINE(LANTIQ_MACH_EASY98000NAND,
+ "EASY98000NAND",
+ "EASY98000 Eval Board (NAND Flash)",
+ easy98000nand_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/mach-easy98020.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/mach-easy98020.c
new file mode 100644
index 0000000000..4cdfc199db
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/mach-easy98020.c
@@ -0,0 +1,118 @@
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/gpio_buttons.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+
+#include <dev-gpio-leds.h>
+
+#include "../machtypes.h"
+#include "devices.h"
+
+#define EASY98020_GPIO_LED_0 9
+#define EASY98020_GPIO_LED_1 10
+#define EASY98020_GPIO_LED_2 11
+#define EASY98020_GPIO_LED_3 12
+#define EASY98020_GPIO_LED_GE0_ACT 110
+#define EASY98020_GPIO_LED_GE0_LINK 109
+#define EASY98020_GPIO_LED_GE1_ACT 106
+#define EASY98020_GPIO_LED_GE1_LINK 105
+
+static struct mtd_partition easy98020_spi_partitions[] =
+{
+ {
+ .name = "uboot",
+ .offset = 0x0,
+ .size = 0x40000,
+ },
+ {
+ .name = "uboot_env",
+ .offset = 0x40000,
+ .size = 0x40000, /* 2 sectors for redundant env. */
+ },
+ {
+ .name = "linux",
+ .offset = 0x80000,
+ .size = 0xF80000, /* map only 16 MiB */
+ },
+};
+
+static struct flash_platform_data easy98020_spi_flash_platform_data = {
+ .name = "sflash",
+ .parts = easy98020_spi_partitions,
+ .nr_parts = ARRAY_SIZE(easy98020_spi_partitions)
+};
+
+static struct spi_board_info easy98020_spi_flash_data __initdata = {
+ .modalias = "m25p80",
+ .bus_num = 0,
+ .chip_select = 0,
+ .max_speed_hz = 10 * 1000 * 1000,
+ .mode = SPI_MODE_3,
+ .platform_data = &easy98020_spi_flash_platform_data
+};
+
+static struct gpio_led easy98020_gpio_leds[] __initdata = {
+ {
+ .name = "easy98020:green:0",
+ .gpio = EASY98020_GPIO_LED_0,
+ .active_low = 0,
+ }, {
+ .name = "easy98020:green:1",
+ .gpio = EASY98020_GPIO_LED_1,
+ .active_low = 0,
+ }, {
+ .name = "easy98020:green:2",
+ .gpio = EASY98020_GPIO_LED_2,
+ .active_low = 0,
+ }, {
+ .name = "easy98020:green:3",
+ .gpio = EASY98020_GPIO_LED_3,
+ .active_low = 0,
+ }, {
+ .name = "easy98020:ge0_act",
+ .gpio = EASY98020_GPIO_LED_GE0_ACT,
+ .active_low = 0,
+ }, {
+ .name = "easy98020:ge0_link",
+ .gpio = EASY98020_GPIO_LED_GE0_LINK,
+ .active_low = 0,
+ }, {
+ .name = "easy98020:ge1_act",
+ .gpio = EASY98020_GPIO_LED_GE1_ACT,
+ .active_low = 0,
+ }, {
+ .name = "easy98020:ge1_link",
+ .gpio = EASY98020_GPIO_LED_GE1_LINK,
+ .active_low = 0,
+ }
+};
+
+static void __init easy98020_init(void)
+{
+ falcon_register_i2c();
+ falcon_register_spi_flash(&easy98020_spi_flash_data);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(easy98020_gpio_leds),
+ easy98020_gpio_leds);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_EASY98020,
+ "EASY98020",
+ "EASY98020 Eval Board",
+ easy98020_init);
+
+MIPS_MACHINE(LANTIQ_MACH_EASY98020_1LAN,
+ "EASY98020_1LAN",
+ "EASY98020 Eval Board (1 LAN port)",
+ easy98020_init);
+
+MIPS_MACHINE(LANTIQ_MACH_EASY98020_2LAN,
+ "EASY98020_2LAN",
+ "EASY98020 Eval Board (2 LAN ports)",
+ easy98020_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/prom.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/prom.c
new file mode 100644
index 0000000000..2a4eea1782
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/prom.c
@@ -0,0 +1,84 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <lantiq_soc.h>
+
+#include "devices.h"
+
+#include "../prom.h"
+
+#define SOC_FALCON "Falcon"
+#define SOC_FALCON_D "Falcon-D"
+#define SOC_FALCON_V "Falcon-V"
+#define SOC_FALCON_M "Falcon-M"
+
+#define PART_SHIFT 12
+#define PART_MASK 0x0FFFF000
+#define REV_SHIFT 28
+#define REV_MASK 0xF0000000
+#define SREV_SHIFT 22
+#define SREV_MASK 0x03C00000
+#define TYPE_SHIFT 26
+#define TYPE_MASK 0x3C000000
+
+/* this parameter allows us enable/disable asc1 via commandline */
+static int register_asc1;
+static int __init
+ltq_parse_asc1(char *p)
+{
+ register_asc1 = 1;
+ return 0;
+}
+__setup("use_asc1", ltq_parse_asc1);
+
+void __init
+ltq_soc_setup(void)
+{
+ ltq_register_asc(0);
+ ltq_register_wdt();
+ falcon_register_gpio();
+ if (register_asc1)
+ ltq_register_asc(1);
+}
+
+void __init
+ltq_soc_detect(struct ltq_soc_info *i)
+{
+ u32 type;
+ i->partnum = (ltq_r32(LTQ_FALCON_CHIPID) & PART_MASK) >> PART_SHIFT;
+ i->rev = (ltq_r32(LTQ_FALCON_CHIPID) & REV_MASK) >> REV_SHIFT;
+ i->srev = ((ltq_r32(LTQ_FALCON_CHIPCONF) & SREV_MASK) >> SREV_SHIFT);
+ sprintf(i->rev_type, "%c%d%d", (i->srev & 0x4) ? ('B') : ('A'),
+ i->rev & 0x7, (i->srev & 0x3) + 1);
+
+ switch (i->partnum) {
+ case SOC_ID_FALCON:
+ type = (ltq_r32(LTQ_FALCON_CHIPTYPE) & TYPE_MASK) >> TYPE_SHIFT;
+ switch (type) {
+ case 0:
+ i->name = SOC_FALCON_D;
+ break;
+ case 1:
+ i->name = SOC_FALCON_V;
+ break;
+ case 2:
+ i->name = SOC_FALCON_M;
+ break;
+ default:
+ i->name = SOC_FALCON;
+ break;
+ }
+ i->type = SOC_TYPE_FALCON;
+ break;
+
+ default:
+ unreachable();
+ break;
+ }
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/reset.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/reset.c
new file mode 100644
index 0000000000..cbcadc5c48
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/reset.c
@@ -0,0 +1,87 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+#include <linux/export.h>
+
+#include <lantiq_soc.h>
+
+/* CPU0 Reset Source Register */
+#define LTQ_SYS1_CPU0RS 0x0040
+/* reset cause mask */
+#define LTQ_CPU0RS_MASK 0x0003
+
+int
+ltq_reset_cause(void)
+{
+ return ltq_sys1_r32(LTQ_SYS1_CPU0RS) & LTQ_CPU0RS_MASK;
+}
+EXPORT_SYMBOL_GPL(ltq_reset_cause);
+
+#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
+#define BOOT_PW1_REG (BOOT_REG_BASE | 0x20)
+#define BOOT_PW2_REG (BOOT_REG_BASE | 0x24)
+#define BOOT_PW1 0x4C545100
+#define BOOT_PW2 0x0051544C
+
+#define WDT_REG_BASE (KSEG1 | 0x1F8803F0)
+#define WDT_PW1 0x00BE0000
+#define WDT_PW2 0x00DC0000
+
+static void
+ltq_machine_restart(char *command)
+{
+ pr_notice("System restart\n");
+ local_irq_disable();
+
+ /* reboot magic */
+ ltq_w32(BOOT_PW1, (void *)BOOT_PW1_REG); /* 'LTQ\0' */
+ ltq_w32(BOOT_PW2, (void *)BOOT_PW2_REG); /* '\0QTL' */
+ ltq_w32(0, (void *)BOOT_REG_BASE); /* reset Bootreg RVEC */
+
+ /* watchdog magic */
+ ltq_w32(WDT_PW1, (void *)WDT_REG_BASE);
+ ltq_w32(WDT_PW2 |
+ (0x3 << 26) | /* PWL */
+ (0x2 << 24) | /* CLKDIV */
+ (0x1 << 31) | /* enable */
+ (1), /* reload */
+ (void *)WDT_REG_BASE);
+ unreachable();
+}
+
+static void
+ltq_machine_halt(void)
+{
+ pr_notice("System halted.\n");
+ local_irq_disable();
+ unreachable();
+}
+
+static void
+ltq_machine_power_off(void)
+{
+ pr_notice("Please turn off the power now.\n");
+ local_irq_disable();
+ unreachable();
+}
+
+static int __init
+mips_reboot_setup(void)
+{
+ _machine_restart = ltq_machine_restart;
+ _machine_halt = ltq_machine_halt;
+ pm_power_off = ltq_machine_power_off;
+ return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/sysctrl.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/sysctrl.c
new file mode 100644
index 0000000000..f27d694826
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/falcon/sysctrl.c
@@ -0,0 +1,211 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <asm/delay.h>
+
+#include <lantiq_soc.h>
+
+#include "devices.h"
+#include "../clk.h"
+
+/* infrastructure control register */
+#define SYS1_INFRAC 0x00bc
+/* Configuration fuses for drivers and pll */
+#define STATUS_CONFIG 0x0040
+
+/* GPE frequency selection */
+#define GPPC_OFFSET 24
+#define GPEFREQ_MASK 0x00000C0
+#define GPEFREQ_OFFSET 10
+/* Clock status register */
+#define LTQ_SYSCTL_CLKS 0x0000
+/* Clock enable register */
+#define LTQ_SYSCTL_CLKEN 0x0004
+/* Clock clear register */
+#define LTQ_SYSCTL_CLKCLR 0x0008
+/* Activation Status Register */
+#define LTQ_SYSCTL_ACTS 0x0020
+/* Activation Register */
+#define LTQ_SYSCTL_ACT 0x0024
+/* Deactivation Register */
+#define LTQ_SYSCTL_DEACT 0x0028
+/* reboot Register */
+#define LTQ_SYSCTL_RBT 0x002c
+/* CPU0 Clock Control Register */
+#define LTQ_SYS1_CPU0CC 0x0040
+/* clock divider bit */
+#define LTQ_CPU0CC_CPUDIV 0x0001
+
+static struct resource ltq_sysctl_res[] = {
+ MEM_RES("sys1", LTQ_SYS1_BASE_ADDR, LTQ_SYS1_SIZE),
+ MEM_RES("syseth", LTQ_SYS_ETH_BASE_ADDR, LTQ_SYS_ETH_SIZE),
+ MEM_RES("sysgpe", LTQ_SYS_GPE_BASE_ADDR, LTQ_SYS_GPE_SIZE),
+};
+
+static struct resource ltq_status_res =
+ MEM_RES("status", LTQ_STATUS_BASE_ADDR, LTQ_STATUS_SIZE);
+static struct resource ltq_ebu_res =
+ MEM_RES("ebu", LTQ_EBU_BASE_ADDR, LTQ_EBU_SIZE);
+
+static void __iomem *ltq_sysctl[3];
+static void __iomem *ltq_status_membase;
+void __iomem *ltq_sys1_membase;
+void __iomem *ltq_ebu_membase;
+
+#define ltq_reg_w32(m, x, y) ltq_w32((x), ltq_sysctl[m] + (y))
+#define ltq_reg_r32(m, x) ltq_r32(ltq_sysctl[m] + (x))
+#define ltq_reg_w32_mask(m, clear, set, reg) \
+ ltq_reg_w32(m, (ltq_reg_r32(m, reg) & ~(clear)) | (set), reg)
+
+#define ltq_status_w32(x, y) ltq_w32((x), ltq_status_membase + (y))
+#define ltq_status_r32(x) ltq_r32(ltq_status_membase + (x))
+
+static inline void
+ltq_sysctl_wait(struct clk *clk,
+ unsigned int test, unsigned int reg)
+{
+ int err = 1000000;
+
+ do {} while (--err && ((ltq_reg_r32(clk->module, reg)
+ & clk->bits) != test));
+ if (!err)
+ pr_err("module de/activation failed %d %08X %08X %08X\n",
+ clk->module, clk->bits, test,
+ ltq_reg_r32(clk->module, reg) & clk->bits);
+}
+
+static int
+ltq_sysctl_activate(struct clk *clk)
+{
+ ltq_reg_w32(clk->module, clk->bits, LTQ_SYSCTL_CLKEN);
+ ltq_reg_w32(clk->module, clk->bits, LTQ_SYSCTL_ACT);
+ ltq_sysctl_wait(clk, clk->bits, LTQ_SYSCTL_ACTS);
+ return 0;
+}
+
+static void
+ltq_sysctl_deactivate(struct clk *clk)
+{
+ ltq_reg_w32(clk->module, clk->bits, LTQ_SYSCTL_CLKCLR);
+ ltq_reg_w32(clk->module, clk->bits, LTQ_SYSCTL_DEACT);
+ ltq_sysctl_wait(clk, 0, LTQ_SYSCTL_ACTS);
+}
+
+static int
+ltq_sysctl_clken(struct clk *clk)
+{
+ ltq_reg_w32(clk->module, clk->bits, LTQ_SYSCTL_CLKEN);
+ ltq_sysctl_wait(clk, clk->bits, LTQ_SYSCTL_CLKS);
+ return 0;
+}
+
+static void
+ltq_sysctl_clkdis(struct clk *clk)
+{
+ ltq_reg_w32(clk->module, clk->bits, LTQ_SYSCTL_CLKCLR);
+ ltq_sysctl_wait(clk, 0, LTQ_SYSCTL_CLKS);
+}
+
+static void
+ltq_sysctl_reboot(struct clk *clk)
+{
+ unsigned int act;
+ unsigned int bits;
+
+ act = ltq_reg_r32(clk->module, LTQ_SYSCTL_ACT);
+ bits = ~act & clk->bits;
+ if (bits != 0) {
+ ltq_reg_w32(clk->module, bits, LTQ_SYSCTL_CLKEN);
+ ltq_reg_w32(clk->module, bits, LTQ_SYSCTL_ACT);
+ ltq_sysctl_wait(clk, bits, LTQ_SYSCTL_ACTS);
+ }
+ ltq_reg_w32(clk->module, act & clk->bits, LTQ_SYSCTL_RBT);
+ ltq_sysctl_wait(clk, clk->bits, LTQ_SYSCTL_ACTS);
+}
+
+/* enable the ONU core */
+static void
+ltq_gpe_enable(void)
+{
+ unsigned int freq;
+ unsigned int status;
+
+ /* if if the clock is already enabled */
+ status = ltq_reg_r32(SYSCTL_SYS1, SYS1_INFRAC);
+ if (status & (1 << (GPPC_OFFSET + 1)))
+ return;
+
+ if (ltq_status_r32(STATUS_CONFIG) == 0)
+ freq = 1; /* use 625MHz on unfused chip */
+ else
+ freq = (ltq_status_r32(STATUS_CONFIG) &
+ GPEFREQ_MASK) >>
+ GPEFREQ_OFFSET;
+
+ /* apply new frequency */
+ ltq_reg_w32_mask(SYSCTL_SYS1, 7 << (GPPC_OFFSET + 1),
+ freq << (GPPC_OFFSET + 2) , SYS1_INFRAC);
+ udelay(1);
+
+ /* enable new frequency */
+ ltq_reg_w32_mask(SYSCTL_SYS1, 0, 1 << (GPPC_OFFSET + 1), SYS1_INFRAC);
+ udelay(1);
+}
+
+static inline void
+clkdev_add_sys(const char *dev, unsigned int module,
+ unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = NULL;
+ clk->cl.clk = clk;
+ clk->module = module;
+ clk->bits = bits;
+ clk->activate = ltq_sysctl_activate;
+ clk->deactivate = ltq_sysctl_deactivate;
+ clk->enable = ltq_sysctl_clken;
+ clk->disable = ltq_sysctl_clkdis;
+ clk->reboot = ltq_sysctl_reboot;
+ clkdev_add(&clk->cl);
+}
+
+void __init
+ltq_soc_init(void)
+{
+ int i;
+
+ for (i = 0; i < 3; i++)
+ ltq_sysctl[i] = ltq_remap_resource(&ltq_sysctl_res[i]);
+
+ ltq_sys1_membase = ltq_sysctl[0];
+ ltq_status_membase = ltq_remap_resource(&ltq_status_res);
+ ltq_ebu_membase = ltq_remap_resource(&ltq_ebu_res);
+
+ ltq_gpe_enable();
+
+ /* get our 3 static rates for cpu, fpi and io clocks */
+ if (ltq_sys1_r32(LTQ_SYS1_CPU0CC) & LTQ_CPU0CC_CPUDIV)
+ clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M);
+ else
+ clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M);
+
+ /* add our clock domains */
+ clkdev_add_sys("falcon_gpio.0", SYSCTL_SYSETH, ACTS_PADCTRL0 | ACTS_P0);
+ clkdev_add_sys("falcon_gpio.1", SYSCTL_SYS1, ACTS_PADCTRL1 | ACTS_P1);
+ clkdev_add_sys("falcon_gpio.2", SYSCTL_SYSETH, ACTS_PADCTRL2 | ACTS_P2);
+ clkdev_add_sys("falcon_gpio.3", SYSCTL_SYS1, ACTS_PADCTRL3 | ACTS_P3);
+ clkdev_add_sys("falcon_gpio.4", SYSCTL_SYS1, ACTS_PADCTRL4 | ACTS_P4);
+ clkdev_add_sys("ltq_asc.1", SYSCTL_SYS1, ACTS_ASC1_ACT);
+ clkdev_add_sys("i2c-falcon.0", SYSCTL_SYS1, ACTS_I2C_ACT);
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/Kconfig b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/Kconfig
new file mode 100644
index 0000000000..f351a189a7
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/Kconfig
@@ -0,0 +1,16 @@
+if SOC_SVIP
+
+menu "Mips Machine"
+
+config LANTIQ_MACH_EASY33016
+ bool "Easy33016"
+ default y
+
+config LANTIQ_MACH_EASY336
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ bool "Easy336"
+ default y
+
+endmenu
+
+endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/Makefile b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/Makefile
new file mode 100644
index 0000000000..405d652191
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/Makefile
@@ -0,0 +1,3 @@
+obj-y := devices.o prom.o reset.o clk-svip.o gpio.o dma.o switchip_setup.o pms.o mux.o
+obj-$(CONFIG_LANTIQ_MACH_EASY33016) += mach-easy33016.o
+obj-$(CONFIG_LANTIQ_MACH_EASY336) += mach-easy336.o
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/clk-svip.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/clk-svip.c
new file mode 100644
index 0000000000..4a14df509f
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/clk-svip.c
@@ -0,0 +1,100 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/time.h>
+
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+#include <lantiq_soc.h>
+#include <base_reg.h>
+#include <sys0_reg.h>
+#include <sys1_reg.h>
+#include <status_reg.h>
+
+static struct svip_reg_status *const status =
+(struct svip_reg_status *)LTQ_STATUS_BASE;
+static struct svip_reg_sys0 *const sys0 = (struct svip_reg_sys0 *)LTQ_SYS0_BASE;
+static struct svip_reg_sys1 *const sys1 = (struct svip_reg_sys1 *)LTQ_SYS1_BASE;
+
+unsigned int ltq_svip_io_region_clock(void)
+{
+ return 200000000; /* 200 MHz */
+}
+EXPORT_SYMBOL(ltq_svip_io_region_clock);
+
+unsigned int ltq_svip_cpu_hz(void)
+{
+ /* Magic BootROM speed location... */
+ if ((*(u32 *)0x9fc07ff0) == 1)
+ return *(u32 *)0x9fc07ff4;
+
+ if (STATUS_CONFIG_CLK_MODE_GET(status_r32(config)) == 1) {
+ /* xT16 */
+ return 393216000;
+ } else {
+ switch (SYS0_PLL1CR_PLLDIV_GET(sys0_r32(pll1cr))) {
+ case 3:
+ return 475000000;
+ case 2:
+ return 450000000;
+ case 1:
+ return 425000000;
+ default:
+ return 400000000;
+ }
+ }
+}
+EXPORT_SYMBOL(ltq_svip_cpu_hz);
+
+unsigned int ltq_svip_fpi_hz(void)
+{
+ u32 fbs0_div[2] = {4, 8};
+ u32 div;
+
+ div = SYS1_FPICR_FPIDIV_GET(sys1_r32(fpicr));
+ return ltq_svip_cpu_hz()/fbs0_div[div];
+}
+EXPORT_SYMBOL(ltq_svip_fpi_hz);
+
+unsigned int ltq_get_ppl_hz(void)
+{
+ /* Magic BootROM speed location... */
+ if ((*(u32 *)0x9fc07ff0) == 1)
+ return *(u32 *)0x9fc07ff4;
+
+ if (STATUS_CONFIG_CLK_MODE_GET(status_r32(config)) == 1) {
+ /* xT16 */
+ return 393216000;
+ } else {
+ switch (SYS0_PLL1CR_PLLDIV_GET(sys0_r32(pll1cr))) {
+ case 3:
+ return 475000000;
+ case 2:
+ return 450000000;
+ case 1:
+ return 425000000;
+ default:
+ return 400000000;
+ }
+ }
+}
+
+unsigned int ltq_get_fbs0_hz(void)
+{
+ u32 fbs0_div[2] = {4, 8};
+ u32 div;
+
+ div = SYS1_FPICR_FPIDIV_GET(sys1_r32(fpicr));
+ return ltq_get_ppl_hz()/fbs0_div[div];
+}
+EXPORT_SYMBOL(ltq_get_fbs0_hz);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/devices.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/devices.c
new file mode 100644
index 0000000000..735b9419f7
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/devices.c
@@ -0,0 +1,380 @@
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/mtd/physmap.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/etherdevice.h>
+#include <linux/reboot.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/leds.h>
+#include <linux/spi/spi.h>
+#include <linux/mtd/nand.h>
+
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+
+#include <lantiq.h>
+
+#include <base_reg.h>
+#include <sys1_reg.h>
+#include <sys2_reg.h>
+#include <ebu_reg.h>
+
+#include "devices.h"
+
+#include <lantiq_soc.h>
+#include <svip_mux.h>
+#include <svip_pms.h>
+
+/* ASC */
+void __init svip_register_asc(int port)
+{
+ switch (port) {
+ case 0:
+ ltq_register_asc(0);
+ svip_sys1_clk_enable(SYS1_CLKENR_ASC0);
+ break;
+ case 1:
+ ltq_register_asc(1);
+ svip_sys1_clk_enable(SYS1_CLKENR_ASC1);
+ break;
+ default:
+ break;
+ };
+}
+
+/* Ethernet */
+static unsigned char svip_ethaddr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+static struct platform_device ltq_mii = {
+ .name = "ifxmips_mii0",
+ .dev = {
+ .platform_data = svip_ethaddr,
+ },
+};
+
+static int __init svip_set_ethaddr(char *str)
+{
+ sscanf(str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ &svip_ethaddr[0], &svip_ethaddr[1], &svip_ethaddr[2],
+ &svip_ethaddr[3], &svip_ethaddr[4], &svip_ethaddr[5]);
+ return 0;
+}
+__setup("ethaddr=", svip_set_ethaddr);
+
+void __init svip_register_eth(void)
+{
+ if (!is_valid_ether_addr(svip_ethaddr))
+ random_ether_addr(svip_ethaddr);
+
+ platform_device_register(&ltq_mii);
+ svip_sys1_clk_enable(SYS1_CLKENR_ETHSW);
+}
+
+/* Virtual Ethernet */
+static struct platform_device ltq_ve = {
+ .name = "ifxmips_svip_ve",
+};
+
+void __init svip_register_virtual_eth(void)
+{
+ platform_device_register(&ltq_ve);
+}
+
+/* SPI */
+static void __init ltq_register_ssc(int bus_num, unsigned long base, int irq_rx,
+ int irq_tx, int irq_err, int irq_frm)
+{
+ struct resource res[] = {
+ {
+ .name = "regs",
+ .start = base,
+ .end = base + 0x20 - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .name = "rx",
+ .start = irq_rx,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .name = "tx",
+ .start = irq_tx,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .name = "err",
+ .start = irq_err,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .name = "frm",
+ .start = irq_frm,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+
+ platform_device_register_simple("ifx_ssc", bus_num, res,
+ ARRAY_SIZE(res));
+}
+
+static struct spi_board_info bdinfo[] __initdata = {
+ {
+ .modalias = "xt16",
+ .mode = SPI_MODE_3,
+ .irq = INT_NUM_IM5_IRL0 + 28,
+ .max_speed_hz = 1000000,
+ .bus_num = 0,
+ .chip_select = 1,
+ },
+ {
+ .modalias = "xt16",
+ .mode = SPI_MODE_3,
+ .irq = INT_NUM_IM5_IRL0 + 19,
+ .max_speed_hz = 1000000,
+ .bus_num = 0,
+ .chip_select = 2,
+ },
+ {
+ .modalias = "loop",
+ .mode = SPI_MODE_0 | SPI_LOOP,
+ .irq = -1,
+ .max_speed_hz = 10000000,
+ .bus_num = 0,
+ .chip_select = 3,
+ },
+};
+
+void __init svip_register_spi(void)
+{
+
+ ltq_register_ssc(0, LTQ_SSC0_BASE, INT_NUM_IM1_IRL0 + 6,
+ INT_NUM_IM1_IRL0 + 7, INT_NUM_IM1_IRL0 + 8,
+ INT_NUM_IM1_IRL0 + 9);
+
+ ltq_register_ssc(1, LTQ_SSC1_BASE, INT_NUM_IM1_IRL0 + 10,
+ INT_NUM_IM1_IRL0 + 11, INT_NUM_IM1_IRL0 + 12,
+ INT_NUM_IM1_IRL0 + 13);
+
+ spi_register_board_info(bdinfo, ARRAY_SIZE(bdinfo));
+
+ svip_sys1_clk_enable(SYS1_CLKENR_SSC0 | SYS1_CLKENR_SSC1);
+}
+
+void __init svip_register_spi_flash(struct spi_board_info *bdinfo)
+{
+ spi_register_board_info(bdinfo, 1);
+}
+
+/* GPIO */
+static struct platform_device ltq_gpio = {
+ .name = "ifxmips_gpio",
+};
+
+void __init svip_register_gpio(void)
+{
+ platform_device_register(&ltq_gpio);
+}
+
+/* MUX */
+static struct ltq_mux_settings ltq_mux_settings;
+
+static struct platform_device ltq_mux = {
+ .name = "ltq_mux",
+ .dev = {
+ .platform_data = &ltq_mux_settings,
+ }
+};
+
+void __init svip_register_mux(const struct ltq_mux_pin mux_p0[LTQ_MUX_P0_PINS],
+ const struct ltq_mux_pin mux_p1[LTQ_MUX_P1_PINS],
+ const struct ltq_mux_pin mux_p2[LTQ_MUX_P2_PINS],
+ const struct ltq_mux_pin mux_p3[LTQ_MUX_P3_PINS],
+ const struct ltq_mux_pin mux_p4[LTQ_MUX_P4_PINS])
+{
+ ltq_mux_settings.mux_p0 = mux_p0;
+ ltq_mux_settings.mux_p1 = mux_p1;
+ ltq_mux_settings.mux_p2 = mux_p2;
+ ltq_mux_settings.mux_p3 = mux_p3;
+ ltq_mux_settings.mux_p4 = mux_p4;
+
+ if (mux_p0)
+ svip_sys1_clk_enable(SYS1_CLKENR_PORT0);
+
+ if (mux_p1)
+ svip_sys1_clk_enable(SYS1_CLKENR_PORT1);
+
+ if (mux_p2)
+ svip_sys1_clk_enable(SYS1_CLKENR_PORT2);
+
+ if (mux_p3)
+ svip_sys1_clk_enable(SYS1_CLKENR_PORT3);
+
+ if (mux_p4)
+ svip_sys2_clk_enable(SYS2_CLKENR_PORT4);
+
+ platform_device_register(&ltq_mux);
+}
+
+/* NAND */
+#define NAND_ADDR_REGION_BASE (LTQ_EBU_SEG1_BASE)
+#define NAND_CLE_BIT (1 << 3)
+#define NAND_ALE_BIT (1 << 2)
+
+static struct svip_reg_ebu *const ebu = (struct svip_reg_ebu *)LTQ_EBU_BASE;
+
+static int svip_nand_probe(struct platform_device *pdev)
+{
+ ebu_w32(LTQ_EBU_ADDR_SEL_0_BASE_VAL(CPHYSADDR(NAND_ADDR_REGION_BASE)
+ >> 12)
+ | LTQ_EBU_ADDR_SEL_0_MASK_VAL(15)
+ | LTQ_EBU_ADDR_SEL_0_MRME_VAL(0)
+ | LTQ_EBU_ADDR_SEL_0_REGEN_VAL(1),
+ addr_sel_0);
+
+ ebu_w32(LTQ_EBU_CON_0_WRDIS_VAL(0)
+ | LTQ_EBU_CON_0_ADSWP_VAL(1)
+ | LTQ_EBU_CON_0_AGEN_VAL(0x00)
+ | LTQ_EBU_CON_0_SETUP_VAL(1)
+ | LTQ_EBU_CON_0_WAIT_VAL(0x00)
+ | LTQ_EBU_CON_0_WINV_VAL(0)
+ | LTQ_EBU_CON_0_PW_VAL(0x00)
+ | LTQ_EBU_CON_0_ALEC_VAL(0)
+ | LTQ_EBU_CON_0_BCGEN_VAL(0x01)
+ | LTQ_EBU_CON_0_WAITWRC_VAL(1)
+ | LTQ_EBU_CON_0_WAITRDC_VAL(1)
+ | LTQ_EBU_CON_0_HOLDC_VAL(1)
+ | LTQ_EBU_CON_0_RECOVC_VAL(0)
+ | LTQ_EBU_CON_0_CMULT_VAL(0x01),
+ con_0);
+
+ /*
+ * ECC disabled
+ * CLE, ALE and CS are pulse, all other signal are latches based
+ * CLE and ALE are active high, PRE, WP, SE and CS/CE are active low
+ * OUT_CS_S is disabled
+ * NAND mode is disabled
+ */
+ ebu_w32(LTQ_EBU_NAND_CON_ECC_ON_VAL(0)
+ | LTQ_EBU_NAND_CON_LAT_EN_VAL(0x38)
+ | LTQ_EBU_NAND_CON_OUT_CS_S_VAL(0)
+ | LTQ_EBU_NAND_CON_IN_CS_S_VAL(0)
+ | LTQ_EBU_NAND_CON_PRE_P_VAL(1)
+ | LTQ_EBU_NAND_CON_WP_P_VAL(1)
+ | LTQ_EBU_NAND_CON_SE_P_VAL(1)
+ | LTQ_EBU_NAND_CON_CS_P_VAL(1)
+ | LTQ_EBU_NAND_CON_CLE_P_VAL(0)
+ | LTQ_EBU_NAND_CON_ALE_P_VAL(0)
+ | LTQ_EBU_NAND_CON_CSMUX_E_VAL(0)
+ | LTQ_EBU_NAND_CON_NANDMODE_VAL(0),
+ nand_con);
+
+ return 0;
+}
+
+static void svip_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ unsigned long adr;
+ /* Coming here means to change either the enable state or
+ * the address for controlling ALE or CLE */
+
+ /* NAND_NCE: Select the chip by setting nCE to low.
+ * This is done in CON register */
+ if (ctrl & NAND_NCE)
+ ebu_w32_mask(0, LTQ_EBU_NAND_CON_NANDMODE_VAL(1),
+ nand_con);
+ else
+ ebu_w32_mask(LTQ_EBU_NAND_CON_NANDMODE_VAL(1),
+ 0, nand_con);
+
+ /* The addressing of CLE or ALE is done via different addresses.
+ We are now changing the address depending on the given action
+ SVIPs NAND_CLE_BIT = (1 << 3), NAND_CLE = 0x02
+ NAND_ALE_BIT = (1 << 2) = NAND_ALE (0x04) */
+ adr = (unsigned long)this->IO_ADDR_W;
+ adr &= ~(NAND_CLE_BIT | NAND_ALE_BIT);
+ adr |= (ctrl & NAND_CLE) << 2 | (ctrl & NAND_ALE);
+ this->IO_ADDR_W = (void __iomem *)adr;
+ }
+
+ if (cmd != NAND_CMD_NONE)
+ writeb(cmd, this->IO_ADDR_W);
+}
+
+static int svip_nand_ready(struct mtd_info *mtd)
+{
+ return (ebu_r32(nand_wait) & 0x01) == 0x01;
+}
+
+static inline void svip_nand_wait(void)
+{
+ static const int nops = 150;
+ int i;
+
+ for (i = 0; i < nops; i++)
+ asm("nop");
+}
+
+static void svip_nand_write_buf(struct mtd_info *mtd,
+ const u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++) {
+ writeb(buf[i], this->IO_ADDR_W);
+ svip_nand_wait();
+ }
+}
+
+static void svip_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ int i;
+ struct nand_chip *this = mtd->priv;
+
+ for (i = 0; i < len; i++) {
+ buf[i] = readb(this->IO_ADDR_R);
+ svip_nand_wait();
+ }
+}
+
+static const char *part_probes[] = { "cmdlinepart", NULL };
+
+static struct platform_nand_data svip_flash_nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .part_probe_types = part_probes,
+ },
+ .ctrl = {
+ .probe = svip_nand_probe,
+ .cmd_ctrl = svip_nand_hwcontrol,
+ .dev_ready = svip_nand_ready,
+ .write_buf = svip_nand_write_buf,
+ .read_buf = svip_nand_read_buf,
+ }
+};
+
+static struct resource svip_nand_resources[] = {
+ MEM_RES("nand", LTQ_FLASH_START, LTQ_FLASH_MAX),
+};
+
+static struct platform_device svip_flash_nand = {
+ .name = "gen_nand",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(svip_nand_resources),
+ .resource = svip_nand_resources,
+ .dev = {
+ .platform_data = &svip_flash_nand_data,
+ },
+};
+
+void __init svip_register_nand(void)
+{
+ platform_device_register(&svip_flash_nand);
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/devices.h b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/devices.h
new file mode 100644
index 0000000000..a064e67ec1
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/devices.h
@@ -0,0 +1,23 @@
+#ifndef _SVIP_DEVICES_H__
+#define _SVIP_DEVICES_H__
+
+#include <linux/mtd/physmap.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <svip_mux.h>
+#include "../devices.h"
+
+extern void __init svip_register_asc(int port);
+extern void __init svip_register_eth(void);
+extern void __init svip_register_virtual_eth(void);
+extern void __init svip_register_spi(void);
+extern void __init svip_register_spi_flash(struct spi_board_info *bdinfo);
+extern void __init svip_register_gpio(void);
+extern void __init svip_register_mux(const struct ltq_mux_pin mux_p0[LTQ_MUX_P0_PINS],
+ const struct ltq_mux_pin mux_p1[LTQ_MUX_P1_PINS],
+ const struct ltq_mux_pin mux_p2[LTQ_MUX_P2_PINS],
+ const struct ltq_mux_pin mux_p3[LTQ_MUX_P3_PINS],
+ const struct ltq_mux_pin mux_p4[LTQ_MUX_P4_PINS]);
+extern void __init svip_register_nand(void);
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/dma.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/dma.c
new file mode 100644
index 0000000000..7464be1bd5
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/dma.c
@@ -0,0 +1,1206 @@
+/*
+ ** Copyright (C) 2005 Wu Qi Ming <Qi-Ming.Wu@infineon.com>
+ **
+ ** This program is free software; you can redistribute it and/or modify
+ ** it under the terms of the GNU General Public License as published by
+ ** the Free Software Foundation; either version 2 of the License, or
+ ** (at your option) any later version.
+ **
+ ** This program is distributed in the hope that it will be useful,
+ ** but WITHOUT ANY WARRANTY; without even the implied warranty of
+ ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ ** GNU General Public License for more details.
+ **
+ ** You should have received a copy of the GNU General Public License
+ ** along with this program; if not, write to the Free Software
+ ** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+/*
+ * Description:
+ * Driver for SVIP DMA
+ * Author: Wu Qi Ming[Qi-Ming.Wu@infineon.com]
+ * Created: 26-September-2005
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/tty.h>
+#include <linux/selection.h>
+#include <linux/kmod.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+
+#include <base_reg.h>
+#include <mps_reg.h>
+#include <dma_reg.h>
+#include <svip_dma.h>
+#include <lantiq_soc.h>
+#include <irq.h>
+#include <sys1_reg.h>
+
+static struct svip_reg_sys1 *const sys1 = (struct svip_reg_sys1 *)LTQ_SYS1_BASE;
+static struct svip_reg_dma *const dma = (struct svip_reg_dma *)LTQ_DMA_BASE;
+static struct svip_reg_mbs *const mbs = (struct svip_reg_mbs *)LTQ_MBS_BASE;
+
+#define DRV_NAME "ltq_dma"
+extern void ltq_mask_and_ack_irq(struct irq_data *data);
+extern void ltq_enable_irq(struct irq_data *data);
+
+static inline void mask_and_ack_irq(unsigned int irq_nr)
+{
+ static int i = 0;
+ struct irq_data data;
+ data.irq = irq_nr;
+ if ((i < 2) && (irq_nr == 137)) {
+ printk("eth delay hack\n");
+ i++;
+ }
+ ltq_mask_and_ack_irq(&data);
+}
+
+static inline void svip_enable_irq(unsigned int irq_nr)
+{
+ struct irq_data data;
+ data.irq = irq_nr;
+ ltq_enable_irq(&data);
+}
+
+#define DMA_EMSG(fmt, args...) \
+ printk(KERN_ERR "%s: " fmt, __func__, ## args)
+
+static inline void mbs_grab(void)
+{
+ while (mbs_r32(mbsr0) != 0);
+}
+
+static inline void mbs_release(void)
+{
+ mbs_w32(0, mbsr0);
+ asm("sync");
+}
+
+/* max ports connecting to dma */
+#define LTQ_MAX_DMA_DEVICE_NUM ARRAY_SIZE(dma_devices)
+/* max dma channels */
+#define LTQ_MAX_DMA_CHANNEL_NUM ARRAY_SIZE(dma_chan)
+
+/* bytes per descriptor */
+#define DMA_DESCR_SIZE 8
+
+#define DMA_DESCR_CH_SIZE (DMA_DESCR_NUM * DMA_DESCR_SIZE)
+#define DMA_DESCR_TOTAL_SIZE (LTQ_MAX_DMA_CHANNEL_NUM * DMA_DESCR_CH_SIZE)
+#define DMA_DESCR_MEM_PAGES ((DMA_DESCR_TOTAL_SIZE / PAGE_SIZE) + \
+ (((DMA_DESCR_TOTAL_SIZE % PAGE_SIZE) > 0)))
+
+/* budget for interrupt handling */
+#define DMA_INT_BUDGET 100
+/* set the correct counter value here! */
+#define DMA_POLL_COUNTER 32
+
+struct proc_dir_entry *g_dma_dir;
+
+/* device_name | max_rx_chan_num | max_tx_chan_num | drop_enable */
+struct dma_device_info dma_devices[] = {
+ { "SW", 4, 4, 0 },
+ { "DEU", 1, 1, 0 },
+ { "SSC0", 1, 1, 0 },
+ { "SSC1", 1, 1, 0 },
+ { "MCTRL", 1, 1, 0 },
+ { "PCM0", 1, 1, 0 },
+ { "PCM1", 1, 1, 0 },
+ { "PCM2", 1, 1, 0 },
+ { "PCM3", 1, 1, 0 }
+};
+
+/* *dma_dev | dir | pri | irq | rel_chan_no */
+struct dma_channel_info dma_chan[] = {
+ { &dma_devices[0], DIR_RX, 0, INT_NUM_IM4_IRL0 + 0, 0 },
+ { &dma_devices[0], DIR_TX, 0, INT_NUM_IM4_IRL0 + 1, 0 },
+ { &dma_devices[0], DIR_RX, 1, INT_NUM_IM4_IRL0 + 2, 1 },
+ { &dma_devices[0], DIR_TX, 1, INT_NUM_IM4_IRL0 + 3, 1 },
+ { &dma_devices[0], DIR_RX, 2, INT_NUM_IM4_IRL0 + 4, 2 },
+ { &dma_devices[0], DIR_TX, 2, INT_NUM_IM4_IRL0 + 5, 2 },
+ { &dma_devices[0], DIR_RX, 3, INT_NUM_IM4_IRL0 + 6, 3 },
+ { &dma_devices[0], DIR_TX, 3, INT_NUM_IM4_IRL0 + 7, 3 },
+ { &dma_devices[1], DIR_RX, 0, INT_NUM_IM4_IRL0 + 8, 0 },
+ { &dma_devices[1], DIR_TX, 0, INT_NUM_IM4_IRL0 + 9, 0 },
+ { &dma_devices[2], DIR_RX, 0, INT_NUM_IM4_IRL0 + 10, 0 },
+ { &dma_devices[2], DIR_TX, 0, INT_NUM_IM4_IRL0 + 11, 0 },
+ { &dma_devices[3], DIR_RX, 0, INT_NUM_IM4_IRL0 + 12, 0 },
+ { &dma_devices[3], DIR_TX, 0, INT_NUM_IM4_IRL0 + 13, 0 },
+ { &dma_devices[4], DIR_RX, 0, INT_NUM_IM4_IRL0 + 14, 0 },
+ { &dma_devices[4], DIR_TX, 0, INT_NUM_IM4_IRL0 + 15, 0 },
+ { &dma_devices[5], DIR_RX, 0, INT_NUM_IM4_IRL0 + 16, 0 },
+ { &dma_devices[5], DIR_TX, 0, INT_NUM_IM4_IRL0 + 17, 0 },
+ { &dma_devices[6], DIR_RX, 1, INT_NUM_IM3_IRL0 + 18, 0 },
+ { &dma_devices[6], DIR_TX, 1, INT_NUM_IM3_IRL0 + 19, 0 },
+ { &dma_devices[7], DIR_RX, 2, INT_NUM_IM4_IRL0 + 20, 0 },
+ { &dma_devices[7], DIR_TX, 2, INT_NUM_IM4_IRL0 + 21, 0 },
+ { &dma_devices[8], DIR_RX, 3, INT_NUM_IM4_IRL0 + 22, 0 },
+ { &dma_devices[8], DIR_TX, 3, INT_NUM_IM4_IRL0 + 23, 0 }
+};
+
+u64 *g_desc_list[DMA_DESCR_MEM_PAGES];
+
+volatile u32 g_dma_int_status = 0;
+
+/* 0 - not in process, 1 - in process */
+volatile int g_dma_in_process;
+
+int ltq_dma_init(void);
+void do_dma_tasklet(unsigned long);
+DECLARE_TASKLET(dma_tasklet, do_dma_tasklet, 0);
+irqreturn_t dma_interrupt(int irq, void *dev_id);
+
+u8 *common_buffer_alloc(int len, int *byte_offset, void **opt)
+{
+ u8 *buffer = kmalloc(len * sizeof(u8), GFP_KERNEL);
+ *byte_offset = 0;
+ return buffer;
+}
+
+void common_buffer_free(u8 *dataptr, void *opt)
+{
+ kfree(dataptr);
+}
+
+void enable_ch_irq(struct dma_channel_info *ch)
+{
+ int chan_no = (int)(ch - dma_chan);
+ unsigned long flag;
+ u32 val;
+
+ if (ch->dir == DIR_RX)
+ val = DMA_CIE_DESCPT | DMA_CIE_DUR;
+ else
+ val = DMA_CIE_DESCPT;
+
+ local_irq_save(flag);
+ mbs_grab();
+ dma_w32(chan_no, cs);
+ dma_w32(val, cie);
+ dma_w32_mask(0, 1 << chan_no, irnen);
+ mbs_release();
+ local_irq_restore(flag);
+
+ svip_enable_irq(ch->irq);
+}
+
+void disable_ch_irq(struct dma_channel_info *ch)
+{
+ unsigned long flag;
+ int chan_no = (int)(ch - dma_chan);
+
+ local_irq_save(flag);
+ g_dma_int_status &= ~(1 << chan_no);
+ mbs_grab();
+ dma_w32(chan_no, cs);
+ dma_w32(0, cie);
+ mbs_release();
+ dma_w32_mask(1 << chan_no, 0, irnen);
+ local_irq_restore(flag);
+
+ mask_and_ack_irq(ch->irq);
+}
+
+int open_chan(struct dma_channel_info *ch)
+{
+ unsigned long flag;
+ int j;
+ int chan_no = (int)(ch - dma_chan);
+ u8 *buffer;
+ int byte_offset;
+ struct rx_desc *rx_desc_p;
+ struct tx_desc *tx_desc_p;
+
+ if (ch->control == LTQ_DMA_CH_ON)
+ return -1;
+
+ if (ch->dir == DIR_RX) {
+ for (j = 0; j < ch->desc_len; j++) {
+ rx_desc_p = (struct rx_desc *)ch->desc_base+j;
+ buffer = ch->dma_dev->buffer_alloc(ch->packet_size,
+ &byte_offset,
+ (void *)&ch->opt[j]);
+ if (!buffer)
+ return -ENOBUFS;
+
+ rx_desc_p->data_pointer = (u32)CPHYSADDR((u32)buffer);
+ rx_desc_p->status.word = 0;
+ rx_desc_p->status.field.byte_offset = byte_offset;
+ rx_desc_p->status.field.data_length = ch->packet_size;
+ rx_desc_p->status.field.own = DMA_OWN;
+ }
+ } else {
+ for (j = 0; j < ch->desc_len; j++) {
+ tx_desc_p = (struct tx_desc *)ch->desc_base + j;
+ tx_desc_p->data_pointer = 0;
+ tx_desc_p->status.word = 0;
+ }
+ }
+ ch->xfer_cnt = 0;
+
+ local_irq_save(flag);
+ mbs_grab();
+ dma_w32(chan_no, cs);
+ dma_w32(ch->desc_len, cdlen);
+ dma_w32(0x7e, cis);
+ dma_w32(DMA_CCTRL_TXWGT_VAL(ch->tx_weight)
+ | DMA_CCTRL_CLASS_VAL(ch->pri)
+ | (ch->dir == DIR_RX ? DMA_CCTRL_ON_OFF : 0), cctrl);
+ mbs_release();
+ ch->control = LTQ_DMA_CH_ON;
+ local_irq_restore(flag);
+
+ if (request_irq(ch->irq, dma_interrupt,
+ IRQF_DISABLED, "dma-core", (void *)ch) != 0) {
+ printk(KERN_ERR "error, cannot get dma_irq!\n");
+ return -EFAULT;
+ }
+
+ enable_ch_irq(ch);
+ return 0;
+}
+
+int close_chan(struct dma_channel_info *ch)
+{
+ unsigned long flag;
+ int j;
+ int chan_no = (int)(ch - dma_chan);
+ struct rx_desc *desc_p;
+
+ if (ch->control == LTQ_DMA_CH_OFF)
+ return -1;
+
+ local_irq_save(flag);
+ mbs_grab();
+ dma_w32(chan_no, cs);
+ dma_w32_mask(DMA_CCTRL_ON_OFF, 0, cctrl);
+ mbs_release();
+ disable_ch_irq(ch);
+ free_irq(ch->irq, (void *)ch);
+ ch->control = LTQ_DMA_CH_OFF;
+ local_irq_restore(flag);
+
+ /* free descriptors in use */
+ for (j = 0; j < ch->desc_len; j++) {
+ desc_p = (struct rx_desc *)ch->desc_base+j;
+ if ((desc_p->status.field.own == CPU_OWN &&
+ desc_p->status.field.c) ||
+ (desc_p->status.field.own == DMA_OWN)) {
+ if (desc_p->data_pointer) {
+ ch->dma_dev->buffer_free((u8 *)__va(desc_p->data_pointer),
+ (void *)ch->opt[j]);
+ desc_p->data_pointer = (u32)NULL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int reset_chan(struct dma_channel_info *ch)
+{
+ unsigned long flag;
+ int val;
+ int chan_no = (int)(ch - dma_chan);
+
+ close_chan(ch);
+
+ local_irq_save(flag);
+ mbs_grab();
+ dma_w32(chan_no, cs);
+ dma_w32_mask(0, DMA_CCTRL_RST, cctrl);
+ mbs_release();
+ local_irq_restore(flag);
+
+ do {
+ local_irq_save(flag);
+ mbs_grab();
+ dma_w32(chan_no, cs);
+ val = dma_r32(cctrl);
+ mbs_release();
+ local_irq_restore(flag);
+ } while (val & DMA_CCTRL_RST);
+
+ return 0;
+}
+
+static inline void rx_chan_intr_handler(int chan_no)
+{
+ struct dma_device_info *dma_dev = (struct dma_device_info *)
+ dma_chan[chan_no].dma_dev;
+ struct dma_channel_info *ch = &dma_chan[chan_no];
+ struct rx_desc *rx_desc_p;
+ unsigned long flag;
+ u32 val;
+
+ local_irq_save(flag);
+ mbs_grab();
+ dma_w32(chan_no, cs);
+ val = dma_r32(cis);
+ dma_w32(DMA_CIS_DESCPT, cis);
+ mbs_release();
+
+ /* handle command complete interrupt */
+ rx_desc_p = (struct rx_desc *)ch->desc_base + ch->curr_desc;
+ if ((rx_desc_p->status.word & (DMA_DESC_OWN_DMA | DMA_DESC_CPT_SET)) ==
+ DMA_DESC_CPT_SET) {
+ local_irq_restore(flag);
+ /* Every thing is correct, then we inform the upper layer */
+ dma_dev->current_rx_chan = ch->rel_chan_no;
+ if (dma_dev->intr_handler)
+ dma_dev->intr_handler(dma_dev, RCV_INT);
+ ch->weight--;
+ } else {
+ g_dma_int_status &= ~(1 << chan_no);
+ local_irq_restore(flag);
+ svip_enable_irq(dma_chan[chan_no].irq);
+ }
+}
+
+static inline void tx_chan_intr_handler(int chan_no)
+{
+ struct dma_device_info *dma_dev = (struct dma_device_info *)
+ dma_chan[chan_no].dma_dev;
+ struct dma_channel_info *ch = &dma_chan[chan_no];
+ struct tx_desc *tx_desc_p;
+ unsigned long flag;
+
+ local_irq_save(flag);
+ mbs_grab();
+ dma_w32(chan_no, cs);
+ dma_w32(DMA_CIS_DESCPT, cis);
+ mbs_release();
+
+ tx_desc_p = (struct tx_desc *)ch->desc_base+ch->prev_desc;
+ if ((tx_desc_p->status.word & (DMA_DESC_OWN_DMA | DMA_DESC_CPT_SET)) ==
+ DMA_DESC_CPT_SET) {
+ local_irq_restore(flag);
+
+ dma_dev->buffer_free((u8 *)__va(tx_desc_p->data_pointer),
+ ch->opt[ch->prev_desc]);
+ memset(tx_desc_p, 0, sizeof(struct tx_desc));
+ dma_dev->current_tx_chan = ch->rel_chan_no;
+ if (dma_dev->intr_handler)
+ dma_dev->intr_handler(dma_dev, TRANSMIT_CPT_INT);
+ ch->weight--;
+
+ ch->prev_desc = (ch->prev_desc + 1) % (ch->desc_len);
+ } else {
+ g_dma_int_status &= ~(1 << chan_no);
+ local_irq_restore(flag);
+ svip_enable_irq(dma_chan[chan_no].irq);
+ }
+}
+
+void do_dma_tasklet(unsigned long unused)
+{
+ int i;
+ int chan_no = 0;
+ int budget = DMA_INT_BUDGET;
+ int weight = 0;
+ unsigned long flag;
+
+ while (g_dma_int_status) {
+ if (budget-- < 0) {
+ tasklet_schedule(&dma_tasklet);
+ return;
+ }
+ chan_no = -1;
+ weight = 0;
+ /* WFQ algorithm to select the channel */
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
+ if (g_dma_int_status & (1 << i) &&
+ dma_chan[i].weight > 0) {
+ if (dma_chan[i].weight > weight) {
+ chan_no = i;
+ weight = dma_chan[chan_no].weight;
+ }
+ }
+ }
+ if (chan_no >= 0) {
+ if (dma_chan[chan_no].dir == DIR_RX)
+ rx_chan_intr_handler(chan_no);
+ else
+ tx_chan_intr_handler(chan_no);
+ } else {
+ /* reset all the channels */
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++)
+ dma_chan[i].weight = dma_chan[i].default_weight;
+ }
+ }
+
+ local_irq_save(flag);
+ g_dma_in_process = 0;
+ if (g_dma_int_status) {
+ g_dma_in_process = 1;
+ tasklet_schedule(&dma_tasklet);
+ }
+ local_irq_restore(flag);
+}
+
+irqreturn_t dma_interrupt(int irq, void *dev_id)
+{
+ struct dma_channel_info *ch;
+ int chan_no = 0;
+
+ ch = (struct dma_channel_info *)dev_id;
+ chan_no = (int)(ch - dma_chan);
+
+ if ((unsigned)chan_no >= LTQ_MAX_DMA_CHANNEL_NUM) {
+ printk(KERN_ERR "error: dma_interrupt irq=%d chan_no=%d\n",
+ irq, chan_no);
+ }
+
+ g_dma_int_status |= 1 << chan_no;
+ dma_w32(1 << chan_no, irncr);
+ mask_and_ack_irq(irq);
+
+ if (!g_dma_in_process) {
+ g_dma_in_process = 1;
+ tasklet_schedule(&dma_tasklet);
+ }
+
+ return IRQ_RETVAL(1);
+}
+
+struct dma_device_info *dma_device_reserve(char *dev_name)
+{
+ int i;
+
+ ltq_dma_init();
+ for (i = 0; i < LTQ_MAX_DMA_DEVICE_NUM; i++) {
+ if (strcmp(dev_name, dma_devices[i].device_name) == 0) {
+ if (dma_devices[i].reserved)
+ return NULL;
+ dma_devices[i].reserved = 1;
+ break;
+ }
+ }
+
+ if (i == LTQ_MAX_DMA_DEVICE_NUM)
+ return NULL;
+
+ return &dma_devices[i];
+}
+EXPORT_SYMBOL(dma_device_reserve);
+
+int dma_device_release(struct dma_device_info *dma_dev)
+{
+ dma_dev->reserved = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_device_release);
+
+int dma_device_register(struct dma_device_info *dma_dev)
+{
+ int port_no = (int)(dma_dev - dma_devices);
+ int txbl, rxbl;
+ unsigned long flag;
+
+ switch (dma_dev->tx_burst_len) {
+ case 8:
+ txbl = 3;
+ break;
+ case 4:
+ txbl = 2;
+ break;
+ default:
+ txbl = 1;
+ break;
+ }
+
+ switch (dma_dev->rx_burst_len) {
+ case 8:
+ rxbl = 3;
+ break;
+ case 4:
+ rxbl = 2;
+ break;
+ default:
+ rxbl = 1;
+ }
+
+ local_irq_save(flag);
+ mbs_grab();
+ dma_w32(port_no, ps);
+ dma_w32(DMA_PCTRL_TXWGT_VAL(dma_dev->tx_weight)
+ | DMA_PCTRL_TXENDI_VAL(dma_dev->tx_endianness_mode)
+ | DMA_PCTRL_RXENDI_VAL(dma_dev->rx_endianness_mode)
+ | DMA_PCTRL_PDEN_VAL(dma_dev->drop_enable)
+ | DMA_PCTRL_TXBL_VAL(txbl)
+ | DMA_PCTRL_RXBL_VAL(rxbl), pctrl);
+ mbs_release();
+ local_irq_restore(flag);
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_device_register);
+
+int dma_device_unregister(struct dma_device_info *dma_dev)
+{
+ int i;
+ int port_no = (int)(dma_dev - dma_devices);
+ unsigned long flag;
+
+ /* flush memcopy module; has no effect for other ports */
+ local_irq_save(flag);
+ mbs_grab();
+ dma_w32(port_no, ps);
+ dma_w32_mask(0, DMA_PCTRL_GPC, pctrl);
+ mbs_release();
+ local_irq_restore(flag);
+
+ for (i = 0; i < dma_dev->max_tx_chan_num; i++)
+ reset_chan(dma_dev->tx_chan[i]);
+
+ for (i = 0; i < dma_dev->max_rx_chan_num; i++)
+ reset_chan(dma_dev->rx_chan[i]);
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_device_unregister);
+
+/**
+ * Read Packet from DMA Rx channel.
+ * The function gets the data from the current rx descriptor assigned
+ * to the passed DMA device and passes it back to the caller.
+ * The function is called in the context of DMA interrupt.
+ * In detail the following actions are done:
+ * - get current receive descriptor
+ * - allocate memory via allocation callback function
+ * - pass data from descriptor to allocated memory
+ * - update channel weight
+ * - release descriptor
+ * - update current descriptor position
+ *
+ * \param *dma_dev - pointer to DMA device structure
+ * \param **dataptr - pointer to received data
+ * \param **opt
+ * \return packet length - length of received data
+ * \ingroup Internal
+ */
+int dma_device_read(struct dma_device_info *dma_dev, u8 **dataptr, void **opt)
+{
+ u8 *buf;
+ int len;
+ int byte_offset = 0;
+ void *p = NULL;
+
+ struct dma_channel_info *ch =
+ dma_dev->rx_chan[dma_dev->current_rx_chan];
+
+ struct rx_desc *rx_desc_p;
+
+ /* get the rx data first */
+ rx_desc_p = (struct rx_desc *)ch->desc_base+ch->curr_desc;
+ buf = (u8 *)__va(rx_desc_p->data_pointer);
+ *(u32 *)dataptr = (u32)buf;
+ len = rx_desc_p->status.field.data_length;
+#ifndef CONFIG_MIPS_UNCACHED
+ dma_cache_inv((unsigned long)buf, len);
+#endif
+ if (opt)
+ *(int *)opt = (int)ch->opt[ch->curr_desc];
+
+ /* replace with a new allocated buffer */
+ buf = dma_dev->buffer_alloc(ch->packet_size, &byte_offset, &p);
+ if (buf) {
+ ch->opt[ch->curr_desc] = p;
+
+ wmb();
+ rx_desc_p->data_pointer = (u32)CPHYSADDR((u32)buf);
+ rx_desc_p->status.word = (DMA_OWN << 31) \
+ |(byte_offset << 23) \
+ | ch->packet_size;
+
+ wmb();
+ } else {
+ *(u32 *)dataptr = 0;
+ if (opt)
+ *(int *)opt = 0;
+ }
+
+ ch->xfer_cnt++;
+ /* increase the curr_desc pointer */
+ ch->curr_desc++;
+ if (ch->curr_desc == ch->desc_len)
+ ch->curr_desc = 0;
+ /* return the length of the received packet */
+ return len;
+}
+EXPORT_SYMBOL(dma_device_read);
+
+/**
+ * Write Packet through DMA Tx channel to peripheral.
+ *
+ * \param *dma_dev - pointer to DMA device structure
+ * \param *dataptr - pointer to data to be sent
+ * \param len - amount of data bytes to be sent
+ * \param *opt
+ * \return len - length of transmitted data
+ * \ingroup Internal
+ */
+int dma_device_write(struct dma_device_info *dma_dev, u8 *dataptr, int len,
+ void *opt)
+{
+ unsigned long flag;
+ u32 byte_offset;
+ struct dma_channel_info *ch;
+ int chan_no;
+ struct tx_desc *tx_desc_p;
+ local_irq_save(flag);
+
+ ch = dma_dev->tx_chan[dma_dev->current_tx_chan];
+ chan_no = (int)(ch - dma_chan);
+
+ if (ch->control == LTQ_DMA_CH_OFF) {
+ local_irq_restore(flag);
+ printk(KERN_ERR "%s: dma channel %d not enabled!\n",
+ __func__, chan_no);
+ return 0;
+ }
+
+ tx_desc_p = (struct tx_desc *)ch->desc_base+ch->curr_desc;
+ /* Check whether this descriptor is available */
+ if (tx_desc_p->status.word & (DMA_DESC_OWN_DMA | DMA_DESC_CPT_SET)) {
+ /* if not , the tell the upper layer device */
+ dma_dev->intr_handler(dma_dev, TX_BUF_FULL_INT);
+ local_irq_restore(flag);
+ return 0;
+ }
+ ch->opt[ch->curr_desc] = opt;
+ /* byte offset----to adjust the starting address of the data buffer,
+ * should be multiple of the burst length.*/
+ byte_offset = ((u32)CPHYSADDR((u32)dataptr)) %
+ (dma_dev->tx_burst_len * 4);
+#ifndef CONFIG_MIPS_UNCACHED
+ dma_cache_wback((unsigned long)dataptr, len);
+ wmb();
+#endif
+ tx_desc_p->data_pointer = (u32)CPHYSADDR((u32)dataptr) - byte_offset;
+ wmb();
+ tx_desc_p->status.word = (DMA_OWN << 31)
+ | DMA_DESC_SOP_SET
+ | DMA_DESC_EOP_SET
+ | (byte_offset << 23)
+ | len;
+ wmb();
+
+ if (ch->xfer_cnt == 0) {
+ mbs_grab();
+ dma_w32(chan_no, cs);
+ dma_w32_mask(0, DMA_CCTRL_ON_OFF, cctrl);
+ mbs_release();
+ }
+
+ ch->xfer_cnt++;
+ ch->curr_desc++;
+ if (ch->curr_desc == ch->desc_len)
+ ch->curr_desc = 0;
+
+ local_irq_restore(flag);
+ return len;
+}
+EXPORT_SYMBOL(dma_device_write);
+
+/**
+ * Display descriptor list via proc file
+ *
+ * \param chan_no - logical channel number
+ * \ingroup Internal
+ */
+int desc_list_proc_read(char *buf, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ int len = 0;
+ int i;
+ static int chan_no;
+ u32 *p;
+
+ if ((chan_no == 0) && (offset > count)) {
+ *eof = 1;
+ return 0;
+ }
+
+ if (chan_no != 0) {
+ *start = buf;
+ } else {
+ buf = buf + offset;
+ *start = buf;
+ }
+
+ p = (u32 *)dma_chan[chan_no].desc_base;
+
+ if (dma_chan[chan_no].dir == DIR_RX)
+ len += sprintf(buf + len,
+ "channel %d %s Rx descriptor list:\n",
+ chan_no, dma_chan[chan_no].dma_dev->device_name);
+ else
+ len += sprintf(buf + len,
+ "channel %d %s Tx descriptor list:\n",
+ chan_no, dma_chan[chan_no].dma_dev->device_name);
+ len += sprintf(buf + len,
+ " no address data pointer command bits "
+ "(Own, Complete, SoP, EoP, Offset) \n");
+ len += sprintf(buf + len,
+ "----------------------------------------------"
+ "-----------------------------------\n");
+ for (i = 0; i < dma_chan[chan_no].desc_len; i++) {
+ len += sprintf(buf + len, "%3d ", i);
+ len += sprintf(buf + len, "0x%08x ", (u32)(p + (i * 2)));
+ len += sprintf(buf + len, "%08x ", *(p + (i * 2 + 1)));
+ len += sprintf(buf + len, "%08x ", *(p + (i * 2)));
+
+ if (*(p + (i * 2)) & 0x80000000)
+ len += sprintf(buf + len, "D ");
+ else
+ len += sprintf(buf + len, "C ");
+ if (*(p + (i * 2)) & 0x40000000)
+ len += sprintf(buf + len, "C ");
+ else
+ len += sprintf(buf + len, "c ");
+ if (*(p + (i * 2)) & 0x20000000)
+ len += sprintf(buf + len, "S ");
+ else
+ len += sprintf(buf + len, "s ");
+ if (*(p + (i * 2)) & 0x10000000)
+ len += sprintf(buf + len, "E ");
+ else
+ len += sprintf(buf + len, "e ");
+
+ /* byte offset is different for rx and tx descriptors*/
+ if (dma_chan[chan_no].dir == DIR_RX) {
+ len += sprintf(buf + len, "%01x ",
+ (*(p + (i * 2)) & 0x01800000) >> 23);
+ } else {
+ len += sprintf(buf + len, "%02x ",
+ (*(p + (i * 2)) & 0x0F800000) >> 23);
+ }
+
+ if (dma_chan[chan_no].curr_desc == i)
+ len += sprintf(buf + len, "<- CURR");
+
+ if (dma_chan[chan_no].prev_desc == i)
+ len += sprintf(buf + len, "<- PREV");
+
+ len += sprintf(buf + len, "\n");
+
+ }
+
+ len += sprintf(buf + len, "\n");
+ chan_no++;
+ if (chan_no > LTQ_MAX_DMA_CHANNEL_NUM - 1)
+ chan_no = 0;
+
+ *eof = 1;
+ return len;
+}
+
+/**
+ * Displays the weight of all DMA channels via proc file
+ *
+ *
+ *
+ * \param *buf
+ * \param **start
+ * \param offset
+ * \param count
+ * \param *eof
+ * \param *data
+ * \return len - amount of bytes written to file
+ */
+int channel_weight_proc_read(char *buf, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ int i;
+ int len = 0;
+ len += sprintf(buf + len, "Qos dma channel weight list\n");
+ len += sprintf(buf + len, "channel_num default_weight "
+ "current_weight device Tx/Rx\n");
+ len += sprintf(buf + len, "---------------------------"
+ "---------------------------------\n");
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
+ struct dma_channel_info *ch = &dma_chan[i];
+
+ if (ch->dir == DIR_RX) {
+ len += sprintf(buf + len,
+ " %2d %08x "
+ "%08x %10s Rx\n",
+ i, ch->default_weight, ch->weight,
+ ch->dma_dev->device_name);
+ } else {
+ len += sprintf(buf + len,
+ " %2d %08x "
+ "%08x %10s Tx\n",
+ i, ch->default_weight, ch->weight,
+ ch->dma_dev->device_name);
+ }
+ }
+
+ return len;
+}
+
+/**
+ * Provides DMA Register Content to proc file
+ * This function reads the content of general DMA Registers, DMA Channel
+ * Registers and DMA Port Registers and performs a structures output to the
+ * DMA proc file
+ *
+ * \param *buf
+ * \param **start
+ * \param offset
+ * \param count
+ * \param *eof
+ * \param *data
+ * \return len - amount of bytes written to file
+ */
+int dma_register_proc_read(char *buf, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ int len = 0;
+ int i;
+ int limit = count;
+ unsigned long flags;
+ static int blockcount;
+ static int channel_no;
+
+ if ((blockcount == 0) && (offset > count)) {
+ *eof = 1;
+ return 0;
+ }
+
+ switch (blockcount) {
+ case 0:
+ len += sprintf(buf + len, "\nGeneral DMA Registers\n");
+ len += sprintf(buf + len, "-------------------------"
+ "----------------\n");
+ len += sprintf(buf + len, "CLC= %08x\n", dma_r32(clc));
+ len += sprintf(buf + len, "ID= %08x\n", dma_r32(id));
+ len += sprintf(buf + len, "DMA_CPOLL= %08x\n", dma_r32(cpoll));
+ len += sprintf(buf + len, "DMA_CS= %08x\n", dma_r32(cs));
+ len += sprintf(buf + len, "DMA_PS= %08x\n", dma_r32(ps));
+ len += sprintf(buf + len, "DMA_IRNEN= %08x\n", dma_r32(irnen));
+ len += sprintf(buf + len, "DMA_IRNCR= %08x\n", dma_r32(irncr));
+ len += sprintf(buf + len, "DMA_IRNICR= %08x\n",
+ dma_r32(irnicr));
+ len += sprintf(buf + len, "\nDMA Channel Registers\n");
+ blockcount = 1;
+ return len;
+ break;
+ case 1:
+ /* If we had an overflow start at beginning of buffer
+ * otherwise use offset */
+ if (channel_no != 0) {
+ *start = buf;
+ } else {
+ buf = buf + offset;
+ *start = buf;
+ }
+
+ local_irq_save(flags);
+ for (i = channel_no; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
+ struct dma_channel_info *ch = &dma_chan[i];
+
+ if (len + 300 > limit) {
+ local_irq_restore(flags);
+ channel_no = i;
+ blockcount = 1;
+ return len;
+ }
+ len += sprintf(buf + len, "----------------------"
+ "-------------------\n");
+ if (ch->dir == DIR_RX) {
+ len += sprintf(buf + len,
+ "Channel %d - Device %s Rx\n",
+ i, ch->dma_dev->device_name);
+ } else {
+ len += sprintf(buf + len,
+ "Channel %d - Device %s Tx\n",
+ i, ch->dma_dev->device_name);
+ }
+ dma_w32(i, cs);
+ len += sprintf(buf + len, "DMA_CCTRL= %08x\n",
+ dma_r32(cctrl));
+ len += sprintf(buf + len, "DMA_CDBA= %08x\n",
+ dma_r32(cdba));
+ len += sprintf(buf + len, "DMA_CIE= %08x\n",
+ dma_r32(cie));
+ len += sprintf(buf + len, "DMA_CIS= %08x\n",
+ dma_r32(cis));
+ len += sprintf(buf + len, "DMA_CDLEN= %08x\n",
+ dma_r32(cdlen));
+ }
+ local_irq_restore(flags);
+ blockcount = 2;
+ channel_no = 0;
+ return len;
+ break;
+ case 2:
+ *start = buf;
+ /*
+ * display port dependent registers
+ */
+ len += sprintf(buf + len, "\nDMA Port Registers\n");
+ len += sprintf(buf + len,
+ "-----------------------------------------\n");
+ local_irq_save(flags);
+ for (i = 0; i < LTQ_MAX_DMA_DEVICE_NUM; i++) {
+ dma_w32(i, ps);
+ len += sprintf(buf + len,
+ "Port %d DMA_PCTRL= %08x\n",
+ i, dma_r32(pctrl));
+ }
+ local_irq_restore(flags);
+ blockcount = 0;
+ *eof = 1;
+ return len;
+ break;
+ }
+
+ blockcount = 0;
+ *eof = 1;
+ return 0;
+}
+
+/**
+ * Open Method of DMA Device Driver
+ * This function increments the device driver's use counter.
+ *
+ *
+ * \param
+ * \return
+ */
+static int dma_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/**
+ * Release Method of DMA Device driver.
+ * This function decrements the device driver's use counter.
+ *
+ *
+ * \param
+ * \return
+ */
+static int dma_release(struct inode *inode, struct file *file)
+{
+ /* release the resources */
+ return 0;
+}
+
+/**
+ * Ioctl Interface to DMA Module
+ *
+ * \param None
+ * \return 0 - initialization successful
+ * <0 - failed initialization
+ */
+static long dma_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int result = 0;
+ /* TODO: add some user controled functions here */
+ return result;
+}
+
+const static struct file_operations dma_fops = {
+ .owner = THIS_MODULE,
+ .open = dma_open,
+ .release = dma_release,
+ .unlocked_ioctl = dma_ioctl,
+};
+
+void map_dma_chan(struct dma_channel_info *map)
+{
+ int i;
+
+ /* assign default values for channel settings */
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
+ dma_chan[i].byte_offset = 0;
+ dma_chan[i].open = &open_chan;
+ dma_chan[i].close = &close_chan;
+ dma_chan[i].reset = &reset_chan;
+ dma_chan[i].enable_irq = enable_ch_irq;
+ dma_chan[i].disable_irq = disable_ch_irq;
+ dma_chan[i].tx_weight = 1;
+ dma_chan[i].control = 0;
+ dma_chan[i].default_weight = LTQ_DMA_CH_DEFAULT_WEIGHT;
+ dma_chan[i].weight = dma_chan[i].default_weight;
+ dma_chan[i].curr_desc = 0;
+ dma_chan[i].prev_desc = 0;
+ }
+
+ /* assign default values for port settings */
+ for (i = 0; i < LTQ_MAX_DMA_DEVICE_NUM; i++) {
+ /*set default tx channel number to be one*/
+ dma_devices[i].num_tx_chan = 1;
+ /*set default rx channel number to be one*/
+ dma_devices[i].num_rx_chan = 1;
+ dma_devices[i].buffer_alloc = common_buffer_alloc;
+ dma_devices[i].buffer_free = common_buffer_free;
+ dma_devices[i].intr_handler = NULL;
+ dma_devices[i].tx_burst_len = 4;
+ dma_devices[i].rx_burst_len = 4;
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ dma_devices[i].tx_endianness_mode = 0;
+ dma_devices[i].rx_endianness_mode = 0;
+#else
+ dma_devices[i].tx_endianness_mode = 3;
+ dma_devices[i].rx_endianness_mode = 3;
+#endif
+ }
+}
+
+void dma_chip_init(void)
+{
+ int i;
+
+ sys1_w32(SYS1_CLKENR_DMA, clkenr);
+ wmb();
+ /* reset DMA */
+ dma_w32(DMA_CTRL_RST, ctrl);
+ wmb();
+ /* disable all the interrupts first */
+ dma_w32(0, irnen);
+
+ /* enable polling for all channels */
+ dma_w32(DMA_CPOLL_EN | DMA_CPOLL_CNT_VAL(DMA_POLL_COUNTER), cpoll);
+
+ /****************************************************/
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++)
+ disable_ch_irq(&dma_chan[i]);
+}
+
+int ltq_dma_init(void)
+{
+ int result = 0;
+ int i;
+ unsigned long flag;
+ static int dma_initialized;
+
+ if (dma_initialized == 1)
+ return 0;
+ dma_initialized = 1;
+
+ result = register_chrdev(DMA_MAJOR, "dma-core", &dma_fops);
+ if (result) {
+ DMA_EMSG("cannot register device dma-core!\n");
+ return result;
+ }
+
+ dma_chip_init();
+ map_dma_chan(dma_chan);
+
+ /* allocate DMA memory for buffer descriptors */
+ for (i = 0; i < DMA_DESCR_MEM_PAGES; i++) {
+ g_desc_list[i] = (u64 *)__get_free_page(GFP_DMA);
+ if (g_desc_list[i] == NULL) {
+ DMA_EMSG("no memory for desriptor\n");
+ return -ENOMEM;
+ }
+ g_desc_list[i] = (u64 *)KSEG1ADDR(g_desc_list[i]);
+ memset(g_desc_list[i], 0, PAGE_SIZE);
+ }
+
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++) {
+ int page_index, ch_per_page;
+ /* cross-link relative channels of a port to
+ * corresponding absolute channels */
+ if (dma_chan[i].dir == DIR_RX) {
+ ((struct dma_device_info *)(dma_chan[i].dma_dev))->
+ rx_chan[dma_chan[i].rel_chan_no] = &dma_chan[i];
+ } else {
+ ((struct dma_device_info *)(dma_chan[i].dma_dev))->
+ tx_chan[dma_chan[i].rel_chan_no] = &dma_chan[i];
+ }
+ dma_chan[i].abs_chan_no = i;
+
+ page_index = i * DMA_DESCR_CH_SIZE / PAGE_SIZE;
+ ch_per_page = PAGE_SIZE / DMA_DESCR_CH_SIZE +
+ ((PAGE_SIZE % DMA_DESCR_CH_SIZE) > 0);
+ dma_chan[i].desc_base =
+ (u32)g_desc_list[page_index] +
+ (i - page_index*ch_per_page) * DMA_DESCR_NUM*8;
+ dma_chan[i].curr_desc = 0;
+ dma_chan[i].desc_len = DMA_DESCR_NUM;
+
+ local_irq_save(flag);
+ mbs_grab();
+ dma_w32(i, cs);
+ dma_w32((u32)CPHYSADDR(dma_chan[i].desc_base), cdba);
+ mbs_release();
+ local_irq_restore(flag);
+ }
+
+ g_dma_dir = proc_mkdir("driver/" DRV_NAME, NULL);
+
+ create_proc_read_entry("dma_register",
+ 0,
+ g_dma_dir,
+ dma_register_proc_read,
+ NULL);
+
+ create_proc_read_entry("g_desc_list",
+ 0,
+ g_dma_dir,
+ desc_list_proc_read,
+ NULL);
+
+ create_proc_read_entry("channel_weight",
+ 0,
+ g_dma_dir,
+ channel_weight_proc_read,
+ NULL);
+
+ printk(KERN_NOTICE "SVIP DMA engine initialized\n");
+
+ return 0;
+}
+
+/**
+ * Cleanup DMA device
+ * This function releases all resources used by the DMA device driver on
+ * module removal.
+ *
+ *
+ * \param None
+ * \return Nothing
+ */
+void dma_cleanup(void)
+{
+ int i;
+ unregister_chrdev(DMA_MAJOR, "dma-core");
+
+ for (i = 0; i < DMA_DESCR_MEM_PAGES; i++)
+ free_page(KSEG0ADDR((unsigned long)g_desc_list[i]));
+ remove_proc_entry("channel_weight", g_dma_dir);
+ remove_proc_entry("g_desc_list", g_dma_dir);
+ remove_proc_entry("dma_register", g_dma_dir);
+ remove_proc_entry("driver/" DRV_NAME, NULL);
+ /* release the resources */
+ for (i = 0; i < LTQ_MAX_DMA_CHANNEL_NUM; i++)
+ free_irq(dma_chan[i].irq, (void *)&dma_chan[i]);
+}
+
+arch_initcall(ltq_dma_init);
+
+MODULE_LICENSE("GPL");
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/gpio.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/gpio.c
new file mode 100644
index 0000000000..398339201e
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/gpio.c
@@ -0,0 +1,553 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/kobject.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/platform_device.h>
+#include <net/sock.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/semaphore.h>
+
+#include <lantiq_soc.h>
+#include <svip_mux.h>
+#include <base_reg.h>
+#include <port_reg.h>
+
+#define DRV_NAME "ifxmips_gpio"
+
+int gpio_to_irq(unsigned int gpio)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL(gpio_to_irq);
+
+int irq_to_gpio(unsigned int gpio)
+{
+ return -EINVAL;
+}
+EXPORT_SYMBOL(irq_to_gpio);
+
+struct ltq_port_base {
+ struct svip_reg_port *base;
+ u32 pins;
+};
+
+/* Base addresses for ports */
+static const struct ltq_port_base ltq_port_base[] = {
+ { (struct svip_reg_port *)LTQ_PORT_P0_BASE, 20 },
+ { (struct svip_reg_port *)LTQ_PORT_P1_BASE, 20 },
+ { (struct svip_reg_port *)LTQ_PORT_P2_BASE, 19 },
+ { (struct svip_reg_port *)LTQ_PORT_P3_BASE, 20 },
+ { (struct svip_reg_port *)LTQ_PORT_P4_BASE, 24 }
+};
+
+#define MAX_PORTS ARRAY_SIZE(ltq_port_base)
+#define PINS_PER_PORT(port) (ltq_port_base[port].pins)
+
+static inline
+void ltq_port_set_exintcr0(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->exintcr0) | (1 << pin),
+ ltq_port_base[port].base->exintcr0);
+}
+
+static inline
+void ltq_port_clear_exintcr0(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->exintcr0) & ~(1 << pin),
+ ltq_port_base[port].base->exintcr0);
+}
+
+static inline
+void ltq_port_set_exintcr1(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->exintcr1) | (1 << pin),
+ ltq_port_base[port].base->exintcr1);
+}
+
+static inline
+void ltq_port_clear_exintcr1(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->exintcr1) & ~(1 << pin),
+ ltq_port_base[port].base->exintcr1);
+}
+
+static inline
+void ltq_port_set_irncfg(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->irncfg) | (1 << pin),
+ ltq_port_base[port].base->irncfg);
+}
+
+static inline
+void ltq_port_clear_irncfg(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->irncfg) & ~(1 << pin),
+ ltq_port_base[port].base->irncfg);
+}
+
+static inline
+void ltq_port_set_irnen(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(1 << pin, ltq_port_base[port].base->irnenset);
+}
+
+static inline
+void ltq_port_clear_irnen(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(1 << pin, ltq_port_base[port].base->irnenclr);
+}
+
+static inline
+void ltq_port_set_dir_out(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->dir) | (1 << pin),
+ ltq_port_base[port].base->dir);
+}
+
+static inline
+void ltq_port_set_dir_in(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->dir) & ~(1 << pin),
+ ltq_port_base[port].base->dir);
+}
+
+static inline
+void ltq_port_set_output(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->out) | (1 << pin),
+ ltq_port_base[port].base->out);
+}
+
+static inline
+void ltq_port_clear_output(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->out) & ~(1 << pin),
+ ltq_port_base[port].base->out);
+}
+
+static inline
+int ltq_port_get_input(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return -EINVAL;
+
+ return (port_r32(ltq_port_base[port].base->in) & (1 << pin)) == 0;
+}
+
+static inline
+void ltq_port_set_puen(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->puen) | (1 << pin),
+ ltq_port_base[port].base->puen);
+}
+
+static inline
+void ltq_port_clear_puen(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->puen) & ~(1 << pin),
+ ltq_port_base[port].base->puen);
+}
+
+static inline
+void ltq_port_set_altsel0(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->altsel0) | (1 << pin),
+ ltq_port_base[port].base->altsel0);
+}
+
+static inline
+void ltq_port_clear_altsel0(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->altsel0) & ~(1 << pin),
+ ltq_port_base[port].base->altsel0);
+}
+
+static inline
+void ltq_port_set_altsel1(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->altsel1) | (1 << pin),
+ ltq_port_base[port].base->altsel1);
+}
+
+static inline
+void ltq_port_clear_altsel1(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return;
+
+ port_w32(port_r32(ltq_port_base[port].base->altsel1) & ~(1 << pin),
+ ltq_port_base[port].base->altsel1);
+}
+
+void ltq_gpio_configure(int port, int pin, bool dirin, bool puen,
+ bool altsel0, bool altsel1)
+{
+ if (dirin)
+ ltq_port_set_dir_in(port, pin);
+ else
+ ltq_port_set_dir_out(port, pin);
+
+ if (puen)
+ ltq_port_set_puen(port, pin);
+ else
+ ltq_port_clear_puen(port, pin);
+
+ if (altsel0)
+ ltq_port_set_altsel0(port, pin);
+ else
+ ltq_port_clear_altsel0(port, pin);
+
+ if (altsel1)
+ ltq_port_set_altsel1(port, pin);
+ else
+ ltq_port_clear_altsel1(port, pin);
+}
+
+int ltq_port_get_dir(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return -EINVAL;
+
+ return (port_r32(ltq_port_base[port].base->dir) & (1 << pin)) != 0;
+}
+
+int ltq_port_get_puden(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return -EINVAL;
+
+ return (port_r32(ltq_port_base[port].base->puen) & (1 << pin)) != 0;
+}
+
+int ltq_port_get_altsel0(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return -EINVAL;
+
+ return (port_r32(ltq_port_base[port].base->altsel0) & (1 << pin)) != 0;
+}
+
+int ltq_port_get_altsel1(unsigned int port, unsigned int pin)
+{
+ if (port >= MAX_PORTS || pin >= PINS_PER_PORT(port))
+ return -EINVAL;
+
+ return (port_r32(ltq_port_base[port].base->altsel1) & (1 << pin)) != 0;
+}
+
+struct ltq_gpio_port {
+ struct gpio_chip gpio_chip;
+ unsigned int irq_base;
+ unsigned int chained_irq;
+};
+
+static struct ltq_gpio_port ltq_gpio_port[MAX_PORTS];
+
+static int gpio_exported;
+static int __init gpio_export_setup(char *str)
+{
+ get_option(&str, &gpio_exported);
+ return 1;
+}
+__setup("gpio_exported=", gpio_export_setup);
+
+static inline unsigned int offset2port(unsigned int offset)
+{
+ unsigned int i;
+ unsigned int prev = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ltq_port_base); i++) {
+ if (offset >= prev &&
+ offset < prev + ltq_port_base[i].pins)
+ return i;
+
+ prev = ltq_port_base[i].pins;
+ }
+
+ return 0;
+}
+
+static inline unsigned int offset2pin(unsigned int offset)
+{
+ unsigned int i;
+ unsigned int prev = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ltq_port_base); i++) {
+ if (offset >= prev &&
+ offset < prev + ltq_port_base[i].pins)
+ return offset - prev;
+
+ prev = ltq_port_base[i].pins;
+ }
+
+ return 0;
+}
+
+static int ltq_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ ltq_port_set_dir_in(offset2port(offset), offset2pin(offset));
+ return 0;
+}
+
+static int ltq_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ ltq_port_set_dir_out(offset2port(offset), offset2pin(offset));
+ return 0;
+}
+
+static int ltq_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ return ltq_port_get_input(offset2port(offset), offset2pin(offset));
+}
+
+static void ltq_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ if (value)
+ ltq_port_set_output(offset2port(offset), offset2pin(offset));
+ else
+ ltq_port_clear_output(offset2port(offset), offset2pin(offset));
+}
+
+static int svip_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return 0;
+}
+
+static void ltq_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+}
+
+static int ltq_gpio_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct ltq_gpio_port *gpio_port;
+
+ if (pdev->id >= MAX_PORTS)
+ return -ENODEV;
+
+ gpio_port = &ltq_gpio_port[pdev->id];
+ gpio_port->gpio_chip.label = "ltq-gpio";
+
+ gpio_port->gpio_chip.direction_input = ltq_gpio_direction_input;
+ gpio_port->gpio_chip.direction_output = ltq_gpio_direction_output;
+ gpio_port->gpio_chip.get = ltq_gpio_get;
+ gpio_port->gpio_chip.set = ltq_gpio_set;
+ gpio_port->gpio_chip.request = svip_gpio_request;
+ gpio_port->gpio_chip.free = ltq_gpio_free;
+ gpio_port->gpio_chip.base = 100 * pdev->id;
+ gpio_port->gpio_chip.ngpio = 32;
+ gpio_port->gpio_chip.dev = &pdev->dev;
+ gpio_port->gpio_chip.exported = gpio_exported;
+
+ ret = gpiochip_add(&gpio_port->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip %d, %d\n",
+ pdev->id, ret);
+ goto err;
+ }
+ platform_set_drvdata(pdev, gpio_port);
+
+ return 0;
+
+err:
+ return ret;
+}
+
+static int ltq_gpio_remove(struct platform_device *pdev)
+{
+ struct ltq_gpio_port *gpio_port = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&gpio_port->gpio_chip);
+
+ return ret;
+}
+
+static struct platform_driver ltq_gpio_driver = {
+ .probe = ltq_gpio_probe,
+ .remove = __devexit_p(ltq_gpio_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init ltq_gpio_init(void)
+{
+ int ret = platform_driver_register(&ltq_gpio_driver);
+ if (ret)
+ printk(KERN_INFO DRV_NAME
+ ": Error registering platform driver!");
+ return ret;
+}
+
+postcore_initcall(ltq_gpio_init);
+
+/**
+ * Convert interrupt number to corresponding port/pin pair
+ * Returns the port/pin pair serving the selected external interrupt;
+ * needed since mapping not linear.
+ *
+ * \param exint External interrupt number
+ * \param port Pointer for resulting port
+ * \param pin Pointer for resutling pin
+ * \return -EINVAL Invalid exint
+ * \return 0 port/pin updated
+ * \ingroup API
+ */
+static int ltq_exint2port(u32 exint, int *port, int *pin)
+{
+ if ((exint >= 0) && (exint <= 10)) {
+ *port = 0;
+ *pin = exint + 7;
+ } else if ((exint >= 11) && (exint <= 14)) {
+ *port = 1;
+ *pin = 18 - (exint - 11) ;
+ } else if (exint == 15) {
+ *port = 1;
+ *pin = 19;
+ } else if (exint == 16) {
+ *port = 0;
+ *pin = 19;
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * Enable external interrupt.
+ * This function enables an external interrupt and sets the given mode.
+ * valid values for mode are:
+ * - 0 = Interrupt generation disabled
+ * - 1 = Interrupt on rising edge
+ * - 2 = Interrupt on falling edge
+ * - 3 = Interrupt on rising and falling edge
+ * - 5 = Interrupt on high level detection
+ * - 6 = Interrupt on low level detection
+ *
+ * \param exint - Number of external interrupt
+ * \param mode - Trigger mode
+ * \return 0 on success
+ * \ingroup API
+ */
+int ifx_enable_external_int(u32 exint, u32 mode)
+{
+ int port;
+ int pin;
+
+ if ((mode < 0) || (mode > 6))
+ return -EINVAL;
+
+ if (ltq_exint2port(exint, &port, &pin))
+ return -EINVAL;
+
+ ltq_port_clear_exintcr0(port, pin);
+ ltq_port_clear_exintcr1(port, pin);
+ ltq_port_clear_irncfg(port, pin);
+
+ if (mode & 0x1)
+ ltq_port_set_exintcr0(port, pin);
+ if (mode & 0x2)
+ ltq_port_set_exintcr1(port, pin);
+ if (mode & 0x4)
+ ltq_port_set_irncfg(port, pin);
+
+ ltq_port_set_irnen(port, pin);
+ return 0;
+}
+EXPORT_SYMBOL(ifx_enable_external_int);
+
+/**
+ * Disable external interrupt.
+ * This function disables an external interrupt and sets mode to 0x00.
+ *
+ * \param exint - Number of external interrupt
+ * \return 0 on success
+ * \ingroup API
+ */
+int ifx_disable_external_int(u32 exint)
+{
+ int port;
+ int pin;
+
+ if (ltq_exint2port(exint, &port, &pin))
+ return -EINVAL;
+
+ ltq_port_clear_irnen(port, pin);
+ return 0;
+}
+EXPORT_SYMBOL(ifx_disable_external_int);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/mach-easy33016.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/mach-easy33016.c
new file mode 100644
index 0000000000..c5993efd6d
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/mach-easy33016.c
@@ -0,0 +1,73 @@
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/gpio_buttons.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include "../machtypes.h"
+
+#include <sys1_reg.h>
+#include <sys2_reg.h>
+#include <svip_pms.h>
+
+#include "devices.h"
+
+static const struct ltq_mux_pin mux_p0[LTQ_MUX_P0_PINS] = {
+ LTQ_MUX_P0_0_SSC0_MTSR,
+ LTQ_MUX_P0_1_SSC0_MRST,
+ LTQ_MUX_P0_2_SSC0_SCLK,
+ LTQ_MUX_P0_3_SSC1_MTSR,
+ LTQ_MUX_P0_4_SSC1_MRST,
+ LTQ_MUX_P0_5_SSC1_SCLK,
+ LTQ_MUX_P0_6_SSC0_CS0,
+ LTQ_MUX_P0_7_SSC0_CS1,
+ LTQ_MUX_P0_8_SSC0_CS2,
+ LTQ_MUX_P0_9,
+ LTQ_MUX_P0_10,
+ LTQ_MUX_P0_11_EXINT4,
+ LTQ_MUX_P0_12,
+ LTQ_MUX_P0_13,
+ LTQ_MUX_P0_14_ASC0_TXD,
+ LTQ_MUX_P0_15_ASC0_RXD,
+ LTQ_MUX_P0_16_EXINT9,
+ LTQ_MUX_P0_17_EXINT10,
+ LTQ_MUX_P0_18_EJ_BRKIN,
+ LTQ_MUX_P0_19_EXINT16
+};
+
+static void __init easy33016_init(void)
+{
+ svip_sys1_clk_enable(SYS1_CLKENR_L2C |
+ SYS1_CLKENR_DDR2 |
+ SYS1_CLKENR_SMI2 |
+ SYS1_CLKENR_SMI1 |
+ SYS1_CLKENR_SMI0 |
+ SYS1_CLKENR_FMI0 |
+ SYS1_CLKENR_DMA |
+ SYS1_CLKENR_SSC0 |
+ SYS1_CLKENR_SSC1 |
+ SYS1_CLKENR_EBU);
+
+ svip_sys2_clk_enable(SYS2_CLKENR_HWSYNC |
+ SYS2_CLKENR_MBS |
+ SYS2_CLKENR_SWINT);
+
+ svip_register_mux(mux_p0, NULL, NULL, NULL, NULL);
+ svip_register_asc(0);
+ svip_register_eth();
+ svip_register_virtual_eth();
+ ltq_register_wdt();
+ svip_register_gpio();
+ svip_register_spi();
+ svip_register_nand();
+}
+
+MIPS_MACHINE(LANTIQ_MACH_EASY33016,
+ "EASY33016",
+ "EASY33016",
+ easy33016_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/mach-easy336.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/mach-easy336.c
new file mode 100644
index 0000000000..460bb7d85d
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/mach-easy336.c
@@ -0,0 +1,221 @@
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/gpio_buttons.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include "../machtypes.h"
+
+#include <sys1_reg.h>
+#include <sys2_reg.h>
+#include <svip_pms.h>
+
+#include "devices.h"
+
+static struct mtd_partition easy336_sflash_partitions[] = {
+ {
+ .name = "SPI flash",
+ .size = MTDPART_SIZ_FULL,
+ .offset = 0,
+ },
+};
+
+static struct flash_platform_data easy336_sflash_data = {
+ .name = "m25p32",
+ .parts = (void *)&easy336_sflash_partitions,
+ .nr_parts = ARRAY_SIZE(easy336_sflash_partitions),
+ .type = "m25p32",
+};
+
+static struct spi_board_info bdinfo[] __initdata = {
+ {
+ .modalias = "m25p80",
+ .platform_data = &easy336_sflash_data,
+ .mode = SPI_MODE_0,
+ .irq = -1,
+ .max_speed_hz = 25000000,
+ .bus_num = 0,
+ .chip_select = 0,
+ }
+};
+
+static struct mtd_partition easy336_partitions[] = {
+ {
+ .name = "uboot",
+ .offset = 0x0,
+ .size = 0x40000,
+ },
+ {
+ .name = "uboot_env",
+ .offset = 0x40000,
+ .size = 0x20000,
+ },
+ {
+ .name = "linux",
+ .offset = 0x60000,
+ .size = 0x1a0000,
+ },
+ {
+ .name = "rootfs",
+ .offset = 0x200000,
+ .size = 0x500000,
+ },
+};
+
+static struct physmap_flash_data easy336_flash_data = {
+ .nr_parts = ARRAY_SIZE(easy336_partitions),
+ .parts = easy336_partitions,
+};
+
+static const struct ltq_mux_pin mux_p0[LTQ_MUX_P0_PINS] = {
+ LTQ_MUX_P0_0_SSC0_MTSR,
+ LTQ_MUX_P0_1_SSC0_MRST,
+ LTQ_MUX_P0_2_SSC0_SCLK,
+ LTQ_MUX_P0_3_SSC1_MTSR,
+ LTQ_MUX_P0_4_SSC1_MRST,
+ LTQ_MUX_P0_5_SSC1_SCLK,
+ LTQ_MUX_P0_6_SSC0_CS0,
+ LTQ_MUX_P0_7_SSC0_CS1,
+ LTQ_MUX_P0_8_SSC0_CS2,
+ LTQ_MUX_P0_9_SSC0_CS3,
+ LTQ_MUX_P0_10_SSC0_CS4,
+ LTQ_MUX_P0_11_SSC0_CS5,
+ LTQ_MUX_P0_12_EXINT5,
+ LTQ_MUX_P0_13_EXINT6,
+ LTQ_MUX_P0_14_ASC0_TXD,
+ LTQ_MUX_P0_15_ASC0_RXD,
+ LTQ_MUX_P0_16_EXINT9,
+ LTQ_MUX_P0_17_EXINT10,
+ LTQ_MUX_P0_18_EJ_BRKIN,
+ LTQ_MUX_P0_19_EXINT16
+};
+
+static const struct ltq_mux_pin mux_p2[LTQ_MUX_P2_PINS] = {
+ LTQ_MUX_P2_0_EBU_A0,
+ LTQ_MUX_P2_1_EBU_A1,
+ LTQ_MUX_P2_2_EBU_A2,
+ LTQ_MUX_P2_3_EBU_A3,
+ LTQ_MUX_P2_4_EBU_A4,
+ LTQ_MUX_P2_5_EBU_A5,
+ LTQ_MUX_P2_6_EBU_A6,
+ LTQ_MUX_P2_7_EBU_A7,
+ LTQ_MUX_P2_8_EBU_A8,
+ LTQ_MUX_P2_9_EBU_A9,
+ LTQ_MUX_P2_10_EBU_A10,
+ LTQ_MUX_P2_11_EBU_A11,
+ LTQ_MUX_P2_12_EBU_RD,
+ LTQ_MUX_P2_13_EBU_WR,
+ LTQ_MUX_P2_14_EBU_ALE,
+ LTQ_MUX_P2_15_EBU_WAIT,
+ LTQ_MUX_P2_16_EBU_RDBY,
+ LTQ_MUX_P2_17_EBU_BC0,
+ LTQ_MUX_P2_18_EBU_BC1
+};
+
+static const struct ltq_mux_pin mux_p3[LTQ_MUX_P3_PINS] = {
+ LTQ_MUX_P3_0_EBU_AD0,
+ LTQ_MUX_P3_1_EBU_AD1,
+ LTQ_MUX_P3_2_EBU_AD2,
+ LTQ_MUX_P3_3_EBU_AD3,
+ LTQ_MUX_P3_4_EBU_AD4,
+ LTQ_MUX_P3_5_EBU_AD5,
+ LTQ_MUX_P3_6_EBU_AD6,
+ LTQ_MUX_P3_7_EBU_AD7,
+ LTQ_MUX_P3_8_EBU_AD8,
+ LTQ_MUX_P3_9_EBU_AD9,
+ LTQ_MUX_P3_10_EBU_AD10,
+ LTQ_MUX_P3_11_EBU_AD11,
+ LTQ_MUX_P3_12_EBU_AD12,
+ LTQ_MUX_P3_13_EBU_AD13,
+ LTQ_MUX_P3_14_EBU_AD14,
+ LTQ_MUX_P3_15_EBU_AD15,
+ LTQ_MUX_P3_16_EBU_CS0,
+ LTQ_MUX_P3_17_EBU_CS1,
+ LTQ_MUX_P3_18_EBU_CS2,
+ LTQ_MUX_P3_19_EBU_CS3
+};
+
+static void __init easy336_init_common(void)
+{
+ svip_sys1_clk_enable(SYS1_CLKENR_L2C |
+ SYS1_CLKENR_DDR2 |
+ SYS1_CLKENR_SMI2 |
+ SYS1_CLKENR_SMI1 |
+ SYS1_CLKENR_SMI0 |
+ SYS1_CLKENR_FMI0 |
+ SYS1_CLKENR_DMA |
+ SYS1_CLKENR_GPTC |
+ SYS1_CLKENR_EBU);
+
+ svip_sys2_clk_enable(SYS2_CLKENR_HWSYNC |
+ SYS2_CLKENR_MBS |
+ SYS2_CLKENR_SWINT |
+ SYS2_CLKENR_HWACC3 |
+ SYS2_CLKENR_HWACC2 |
+ SYS2_CLKENR_HWACC1 |
+ SYS2_CLKENR_HWACC0 |
+ SYS2_CLKENR_SIF7 |
+ SYS2_CLKENR_SIF6 |
+ SYS2_CLKENR_SIF5 |
+ SYS2_CLKENR_SIF4 |
+ SYS2_CLKENR_SIF3 |
+ SYS2_CLKENR_SIF2 |
+ SYS2_CLKENR_SIF1 |
+ SYS2_CLKENR_SIF0 |
+ SYS2_CLKENR_DFEV7 |
+ SYS2_CLKENR_DFEV6 |
+ SYS2_CLKENR_DFEV5 |
+ SYS2_CLKENR_DFEV4 |
+ SYS2_CLKENR_DFEV3 |
+ SYS2_CLKENR_DFEV2 |
+ SYS2_CLKENR_DFEV1 |
+ SYS2_CLKENR_DFEV0);
+
+ svip_register_mux(mux_p0, NULL, mux_p2, mux_p3, NULL);
+ svip_register_asc(0);
+ svip_register_eth();
+ svip_register_virtual_eth();
+ /* ltq_register_wdt(); - conflicts with lq_switch */
+ svip_register_gpio();
+ svip_register_spi();
+ ltq_register_tapi();
+}
+
+static void __init easy336_init(void)
+{
+ easy336_init_common();
+ ltq_register_nor(&easy336_flash_data);
+}
+
+static void __init easy336sf_init(void)
+{
+ easy336_init_common();
+ svip_register_spi_flash(bdinfo);
+}
+
+static void __init easy336nand_init(void)
+{
+ easy336_init_common();
+ svip_register_nand();
+}
+
+MIPS_MACHINE(LANTIQ_MACH_EASY336,
+ "EASY336",
+ "EASY336",
+ easy336_init);
+
+MIPS_MACHINE(LANTIQ_MACH_EASY336SF,
+ "EASY336SF",
+ "EASY336 (Serial Flash)",
+ easy336sf_init);
+
+MIPS_MACHINE(LANTIQ_MACH_EASY336NAND,
+ "EASY336NAND",
+ "EASY336 (NAND Flash)",
+ easy336nand_init);
+
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/mux.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/mux.c
new file mode 100644
index 0000000000..56805e55bb
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/mux.c
@@ -0,0 +1,187 @@
+/************************************************************************
+ *
+ * Copyright (c) 2007
+ * Infineon Technologies AG
+ * St. Martin Strasse 53; 81669 Muenchen; Germany
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ ************************************************************************/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <asm/addrspace.h>
+#include <linux/platform_device.h>
+
+#include <lantiq_soc.h>
+#include <svip_mux.h>
+#include <sys1_reg.h>
+#include <sys2_reg.h>
+#include <svip_pms.h>
+
+#define DRV_NAME "ltq_mux"
+
+static void ltq_mux_port_init(const int port,
+ const struct ltq_mux_pin *pins,
+ const int pin_max)
+{
+ unsigned int i;
+
+ for (i = 0; i < pin_max; i++)
+ ltq_gpio_configure(port,
+ i,
+ pins[i].dirin,
+ pins[i].puen,
+ pins[i].altsel0,
+ pins[i].altsel1);
+}
+
+static int ltq_mux_probe(struct platform_device *pdev)
+{
+ struct ltq_mux_settings *mux_settings = dev_get_platdata(&pdev->dev);
+
+ if (mux_settings->mux_p0)
+ ltq_mux_port_init(0,
+ mux_settings->mux_p0,
+ LTQ_MUX_P0_PINS);
+
+ if (mux_settings->mux_p1)
+ ltq_mux_port_init(1,
+ mux_settings->mux_p1,
+ LTQ_MUX_P1_PINS);
+
+ if (mux_settings->mux_p2)
+ ltq_mux_port_init(2,
+ mux_settings->mux_p2,
+ LTQ_MUX_P2_PINS);
+
+ if (mux_settings->mux_p3)
+ ltq_mux_port_init(3,
+ mux_settings->mux_p3,
+ LTQ_MUX_P3_PINS);
+
+ if (mux_settings->mux_p4)
+ ltq_mux_port_init(4,
+ mux_settings->mux_p4,
+ LTQ_MUX_P4_PINS);
+
+ return 0;
+}
+
+int ltq_mux_read_procmem(char *buf, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ int len = 0;
+ int t = 0, i = 0;
+ u32 port_clk[5] = {
+ SYS1_CLKENR_PORT0,
+ SYS1_CLKENR_PORT1,
+ SYS1_CLKENR_PORT2,
+ SYS1_CLKENR_PORT3,
+ SYS2_CLKENR_PORT4,
+ };
+
+#define PROC_PRINT(fmt, args...) \
+ do { \
+ int c_len = 0; \
+ c_len = snprintf(buf + len, count - len, fmt, ## args); \
+ if (c_len <= 0) \
+ goto out; \
+ if (c_len >= (count - len)) { \
+ len += (count - len); \
+ goto out; \
+ } \
+ len += c_len; \
+ if (offset > 0) { \
+ if (len > offset) { \
+ len -= offset; \
+ memmove(buf, buf + offset, len); \
+ offset = 0; \
+ } else { \
+ offset -= len; \
+ len = 0; \
+ } \
+ } \
+ } while (0)
+
+ PROC_PRINT("\nVINETIC-SVIP Multiplex Settings\n");
+ PROC_PRINT(" 3 2 1 0\n");
+ PROC_PRINT(" 10987654321098765432109876543210\n");
+ PROC_PRINT(" --------------------------------\n");
+
+ for (i = 0; i < ARRAY_SIZE(port_clk); i++) {
+ if (i < 4) {
+ if (!svip_sys1_clk_is_enabled(port_clk[i]))
+ continue;
+ } else {
+ if (!svip_sys2_clk_is_enabled(port_clk[i]))
+ continue;
+ }
+
+ PROC_PRINT("P%d.%-10s", i, "DIR:");
+
+ for (t = 31; t != -1; t--)
+ PROC_PRINT("%d", ltq_port_get_dir(i, t) == 1 ? 1 : 0);
+ PROC_PRINT("\n");
+
+ PROC_PRINT("P%d.%-10s", i, "PUEN:");
+ for (t = 31; t != -1; t--)
+ PROC_PRINT("%d", ltq_port_get_puden(i, t) == 1 ? 1 : 0);
+ PROC_PRINT("\n");
+
+ PROC_PRINT("P%d.%-10s", i, "ALTSEL0:");
+ for (t = 31; t != -1; t--)
+ PROC_PRINT("%d",
+ ltq_port_get_altsel0(i, t) == 1 ? 1 : 0);
+ PROC_PRINT("\n");
+
+ PROC_PRINT("P%d.%-10s", i, "ALTSEL1:");
+ for (t = 31; t != -1; t--)
+ PROC_PRINT("%d",
+ ltq_port_get_altsel1(i, t) == 1 ? 1 : 0);
+ PROC_PRINT("\n\n");
+ }
+
+out:
+ if (len < 0) {
+ len = 0;
+ *eof = 1;
+ } else if (len < count) {
+ *eof = 1;
+ } else {
+ len = count;
+ }
+
+ *start = buf;
+
+ return len;
+}
+
+static struct platform_driver ltq_mux_driver = {
+ .probe = ltq_mux_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init ltq_mux_init(void)
+{
+ int ret = platform_driver_register(&ltq_mux_driver);
+ if (ret) {
+ printk(KERN_INFO DRV_NAME
+ ": Error registering platform driver!");
+ return ret;
+ }
+
+ return create_proc_read_entry("driver/ltq_mux", 0, NULL,
+ ltq_mux_read_procmem, NULL) == NULL;
+}
+
+module_init(ltq_mux_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/pms.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/pms.c
new file mode 100644
index 0000000000..5c0c808a04
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/pms.c
@@ -0,0 +1,101 @@
+/************************************************************************
+ *
+ * Copyright (c) 2007
+ * Infineon Technologies AG
+ * St. Martin Strasse 53; 81669 Muenchen; Germany
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ ************************************************************************/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <asm/addrspace.h>
+
+#include <base_reg.h>
+#include <sys1_reg.h>
+#include <sys2_reg.h>
+#include <lantiq_soc.h>
+
+static struct svip_reg_sys1 *const sys1 = (struct svip_reg_sys1 *)LTQ_SYS1_BASE;
+static struct svip_reg_sys2 *const sys2 = (struct svip_reg_sys2 *)LTQ_SYS2_BASE;
+
+void svip_sys1_clk_enable(u32 mask)
+{
+ sys1_w32(sys1_r32(clksr) | mask, clkenr);
+ asm("sync;");
+}
+EXPORT_SYMBOL(svip_sys1_clk_enable);
+
+int svip_sys1_clk_is_enabled(u32 mask)
+{
+ return (sys1_r32(clksr) & mask) != 0;
+}
+EXPORT_SYMBOL(svip_sys1_clk_is_enabled);
+
+void svip_sys2_clk_enable(u32 mask)
+{
+ sys2_w32(sys2_r32(clksr) | mask, clkenr);
+ asm("sync;");
+}
+EXPORT_SYMBOL(svip_sys2_clk_enable);
+
+int svip_sys2_clk_is_enabled(u32 mask)
+{
+ return (sys2_r32(clksr) & mask) != 0;
+}
+EXPORT_SYMBOL(svip_sys2_clk_is_enabled);
+
+int ltq_pms_read_procmem(char *buf, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ long len = 0;
+ int t = 0;
+ u32 bit = 0;
+ u32 reg_tmp, bits_tmp;
+
+ len = sprintf(buf, "\nSVIP PMS Settings\n");
+ len = len + sprintf(buf + len,
+ " 3 2 1 0\n");
+ len = len + sprintf(buf + len,
+ " 210987654321098765432109876543210\n");
+ len = len + sprintf(buf + len,
+ "---------------------------------------------\n");
+ len = len + sprintf(buf + len,
+ "SYS1_CLKSR: ");
+ reg_tmp = sys1_r32(clksr);
+ bit = 0x80000000;
+ for (t = 31; t != -1; t--) {
+ bits_tmp = (reg_tmp & bit) >> t;
+ len = len + sprintf(buf + len, "%d", bits_tmp);
+ bit = bit >> 1;
+ }
+ len = len + sprintf(buf + len, "\n\n");
+ len = len + sprintf(buf + len, "SYS2_CLKSR: ");
+ reg_tmp = sys2_r32(clksr);
+ bit = 0x80000000;
+ for (t = 31; t != -1; t--) {
+ bits_tmp = (reg_tmp & bit) >> t;
+ len = len + sprintf(buf + len, "%d", bits_tmp);
+ bit = bit >> 1;
+ }
+ len = len + sprintf(buf + len, "\n\n");
+
+ *eof = 1;
+
+ return len;
+}
+
+int __init ltq_pms_init_proc(void)
+{
+ return create_proc_read_entry("driver/ltq_pms", 0, NULL,
+ ltq_pms_read_procmem, NULL) == NULL;
+}
+
+module_init(ltq_pms_init_proc);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/prom.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/prom.c
new file mode 100644
index 0000000000..1c17531606
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/prom.c
@@ -0,0 +1,73 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/time.h>
+#include <asm/bootinfo.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+#include "../clk.h"
+#include "../machtypes.h"
+
+#include <base_reg.h>
+#include <ebu_reg.h>
+
+#define SOC_SVIP "SVIP"
+
+#define PART_SHIFT 12
+#define PART_MASK 0x0FFFF000
+#define REV_SHIFT 28
+#define REV_MASK 0xF0000000
+
+static struct svip_reg_ebu *const ebu = (struct svip_reg_ebu *)LTQ_EBU_BASE;
+
+void __init ltq_soc_init(void)
+{
+ clkdev_add_static(ltq_svip_cpu_hz(), ltq_svip_fpi_hz(),
+ ltq_svip_io_region_clock());
+}
+
+void __init
+ltq_soc_setup(void)
+{
+ if (mips_machtype == LANTIQ_MACH_EASY33016 ||
+ mips_machtype == LANTIQ_MACH_EASY336) {
+ ebu_w32(0x120000f1, addr_sel_2);
+ ebu_w32(LTQ_EBU_CON_0_ADSWP |
+ LTQ_EBU_CON_0_SETUP |
+ LTQ_EBU_CON_0_BCGEN_VAL(0x02) |
+ LTQ_EBU_CON_0_WAITWRC_VAL(7) |
+ LTQ_EBU_CON_0_WAITRDC_VAL(3) |
+ LTQ_EBU_CON_0_HOLDC_VAL(3) |
+ LTQ_EBU_CON_0_RECOVC_VAL(3) |
+ LTQ_EBU_CON_0_CMULT_VAL(3), con_2);
+ }
+}
+
+void __init
+ltq_soc_detect(struct ltq_soc_info *i)
+{
+ i->partnum = (ltq_r32(LTQ_STATUS_CHIPID) & PART_MASK) >> PART_SHIFT;
+ i->rev = (ltq_r32(LTQ_STATUS_CHIPID) & REV_MASK) >> REV_SHIFT;
+ sprintf(i->rev_type, "1.%d", i->rev);
+ switch (i->partnum) {
+ case SOC_ID_SVIP:
+ i->name = SOC_SVIP;
+ i->type = SOC_TYPE_SVIP;
+ break;
+
+ default:
+ printk(KERN_ERR "unknown partnum : 0x%08X\n", i->partnum);
+ while (1);
+ break;
+ }
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/reset.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/reset.c
new file mode 100644
index 0000000000..5551875da6
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/reset.c
@@ -0,0 +1,95 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/pm.h>
+#include <linux/module.h>
+#include <asm/reboot.h>
+
+#include <lantiq_soc.h>
+#include "../machtypes.h"
+#include <base_reg.h>
+#include <sys1_reg.h>
+#include <boot_reg.h>
+#include <ebu_reg.h>
+
+static struct svip_reg_sys1 *const sys1 = (struct svip_reg_sys1 *)LTQ_SYS1_BASE;
+static struct svip_reg_ebu *const ebu = (struct svip_reg_ebu *)LTQ_EBU_BASE;
+
+#define CPLD_CMDREG3 ((volatile unsigned char*)(KSEG1 + 0x120000f3))
+extern void switchip_reset(void);
+
+static void ltq_machine_restart(char *command)
+{
+ printk(KERN_NOTICE "System restart\n");
+ local_irq_disable();
+
+ if (mips_machtype == LANTIQ_MACH_EASY33016 ||
+ mips_machtype == LANTIQ_MACH_EASY336) {
+ /* We just use the CPLD function to reset the entire system as a
+ workaround for the switch reset problem */
+ local_irq_disable();
+ ebu_w32(0x120000f1, addr_sel_2);
+ ebu_w32(0x404027ff, con_2);
+
+ if (mips_machtype == LANTIQ_MACH_EASY336)
+ /* set bit 0 to reset SVIP */
+ *CPLD_CMDREG3 = (1<<0);
+ else
+ /* set bit 7 to reset SVIP, set bit 3 to reset xT */
+ *CPLD_CMDREG3 = (1<<7) | (1<<3);
+ } else {
+ *LTQ_BOOT_RVEC(0) = 0;
+ /* reset all except PER, SUBSYS and CPU0 */
+ sys1_w32(0x00043F3E, rreqr);
+ /* release WDT0 reset */
+ sys1_w32(0x00000100, rrlsr);
+ /* restore reset value for clock enables */
+ sys1_w32(~0x0c000040, clkclr);
+ /* reset SUBSYS (incl. DDR2) and CPU0 */
+ sys1_w32(0x00030001, rbtr);
+ }
+
+ for (;;)
+ ;
+}
+
+static void ltq_machine_halt(void)
+{
+ printk(KERN_NOTICE "System halted.\n");
+ local_irq_disable();
+ for (;;)
+ ;
+}
+
+static void ltq_machine_power_off(void)
+{
+ printk(KERN_NOTICE "Please turn off the power now.\n");
+ local_irq_disable();
+ for (;;)
+ ;
+}
+
+/* This function is used by the watchdog driver */
+int ltq_reset_cause(void)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ltq_reset_cause);
+
+static int __init mips_reboot_setup(void)
+{
+ _machine_restart = ltq_machine_restart;
+ _machine_halt = ltq_machine_halt;
+ pm_power_off = ltq_machine_power_off;
+ return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/switchip_setup.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/switchip_setup.c
new file mode 100644
index 0000000000..5da15327ea
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/svip/switchip_setup.c
@@ -0,0 +1,666 @@
+/******************************************************************************
+ Copyright (c) 2007, Infineon Technologies. All rights reserved.
+
+ No Warranty
+ Because the program is licensed free of charge, there is no warranty for
+ the program, to the extent permitted by applicable law. Except when
+ otherwise stated in writing the copyright holders and/or other parties
+ provide the program "as is" without warranty of any kind, either
+ expressed or implied, including, but not limited to, the implied
+ warranties of merchantability and fitness for a particular purpose. The
+ entire risk as to the quality and performance of the program is with
+ you. should the program prove defective, you assume the cost of all
+ necessary servicing, repair or correction.
+
+ In no event unless required by applicable law or agreed to in writing
+ will any copyright holder, or any other party who may modify and/or
+ redistribute the program as permitted above, be liable to you for
+ damages, including any general, special, incidental or consequential
+ damages arising out of the use or inability to use the program
+ (including but not limited to loss of data or data being rendered
+ inaccurate or losses sustained by you or third parties or a failure of
+ the program to operate with any other programs), even if such holder or
+ other party has been advised of the possibility of such damages.
+ ******************************************************************************
+ Module : switchip_setup.c
+ Date : 2007-11-09
+ Description : Basic setup of embedded ethernet switch "SwitchIP"
+ Remarks: andreas.schmidt@infineon.com
+
+ *****************************************************************************/
+
+/* TODO: get rid of #ifdef CONFIG_LANTIQ_MACH_EASY336 */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/time.h>
+
+#include <base_reg.h>
+#include <es_reg.h>
+#include <sys1_reg.h>
+#include <dma_reg.h>
+#include <lantiq_soc.h>
+
+static struct svip_reg_sys1 *const sys1 = (struct svip_reg_sys1 *)LTQ_SYS1_BASE;
+static struct svip_reg_es *const es = (struct svip_reg_es *)LTQ_ES_BASE;
+
+/* PHY Organizationally Unique Identifier (OUI) */
+#define PHY_OUI_PMC 0x00E004
+#define PHY_OUI_VITESSE 0x008083
+#define PHY_OUI_DEFAULT 0xFFFFFF
+
+unsigned short switchip_phy_read(unsigned int phyaddr, unsigned int regaddr);
+void switchip_phy_write(unsigned int phyaddr, unsigned int regaddr,
+ unsigned short data);
+
+static int phy_address[2] = {0, 1};
+static u32 phy_oui;
+static void switchip_mdio_poll_init(void);
+static void _switchip_mdio_poll(struct work_struct *work);
+
+/* struct workqueue_struct mdio_poll_task; */
+static struct workqueue_struct *mdio_poll_workqueue;
+DECLARE_DELAYED_WORK(mdio_poll_work, _switchip_mdio_poll);
+static int old_link_status[2] = {-1, -1};
+
+/**
+ * Autonegotiation check.
+ * This funtion checks for link changes. If a link change has occured it will
+ * update certain switch registers.
+ */
+static void _switchip_check_phy_status(int port)
+{
+ int new_link_status;
+ unsigned short reg1;
+
+ reg1 = switchip_phy_read(phy_address[port], 1);
+ if ((reg1 == 0xFFFF) || (reg1 == 0x0000))
+ return; /* no PHY connected */
+
+ new_link_status = reg1 & 4;
+ if (old_link_status[port] ^ new_link_status) {
+ /* link status change */
+ if (!new_link_status) {
+ if (port == 0)
+ es_w32_mask(LTQ_ES_P0_CTL_REG_FLP, 0, p0_ctl);
+ else
+ es_w32_mask(LTQ_ES_P0_CTL_REG_FLP, 0, p1_ctl);
+
+ /* read again; link bit is latched low! */
+ reg1 = switchip_phy_read(phy_address[port], 1);
+ new_link_status = reg1 & 4;
+ }
+
+ if (new_link_status) {
+ unsigned short reg0, reg4, reg5, reg9, reg10;
+ int phy_pause, phy_speed, phy_duplex;
+ int aneg_enable, aneg_cmpt;
+
+ reg0 = switchip_phy_read(phy_address[port], 0);
+ reg4 = switchip_phy_read(phy_address[port], 4);
+ aneg_enable = reg0 & 0x1000;
+ aneg_cmpt = reg1 & 0x20;
+
+ if (aneg_enable && aneg_cmpt) {
+ reg5 = switchip_phy_read(phy_address[port], 5);
+ switch (phy_oui) {
+#ifdef CONFIG_LANTIQ_MACH_EASY336
+ case PHY_OUI_PMC:
+ /* PMC Sierra supports 1Gigabit FD,
+ * only. On successful
+ * auto-negotiation, we are sure this
+ * is what the LP can. */
+ phy_pause = ((reg4 & reg5) & 0x0080) >> 7;
+ phy_speed = 2;
+ phy_duplex = 1;
+ break;
+#endif
+ case PHY_OUI_VITESSE:
+ case PHY_OUI_DEFAULT:
+ reg9 = switchip_phy_read(phy_address[port], 9);
+ reg10 = switchip_phy_read(phy_address[port], 10);
+
+ /* Check if advertise and partner
+ * agree on pause */
+ phy_pause = ((reg4 & reg5) & 0x0400) >> 10;
+
+ /* Find the best mode both partners
+ * support
+ * Priority: 1GB-FD, 1GB-HD, 100MB-FD,
+ * 100MB-HD, 10MB-FD, 10MB-HD */
+ phy_speed = ((((reg9<<2) & reg10)
+ & 0x0c00) >> 6) |
+ (((reg4 & reg5) & 0x01e0) >> 5);
+
+ if (phy_speed >= 0x0020) {
+ phy_speed = 2;
+ phy_duplex = 1;
+ } else if (phy_speed >= 0x0010) {
+ phy_speed = 2;
+ phy_duplex = 0;
+ } else if (phy_speed >= 0x0008) {
+ phy_speed = 1;
+ phy_duplex = 1;
+ } else if (phy_speed >= 0x0004) {
+ phy_speed = 1;
+ phy_duplex = 0;
+ } else if (phy_speed >= 0x0002) {
+ phy_speed = 0;
+ phy_duplex = 1;
+ } else {
+ phy_speed = 0;
+ phy_duplex = 0;
+ }
+ break;
+ default:
+ phy_pause = (reg4 & 0x0400) >> 10;
+ phy_speed = (reg0 & 0x40 ? 2 : (reg0 >> 13)&1);
+ phy_duplex = (reg0 >> 8)&1;
+ break;
+ }
+ } else {
+ /* parallel detection or fixed speed */
+ phy_pause = (reg4 & 0x0400) >> 10;
+ phy_speed = (reg0 & 0x40 ? 2 : (reg0 >> 13)&1);
+ phy_duplex = (reg0 >> 8)&1;
+ }
+
+ if (port == 0) {
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P0SPD,
+ LTQ_ES_RGMII_CTL_REG_P0SPD_VAL(phy_speed),
+ rgmii_ctl);
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P0DUP,
+ LTQ_ES_RGMII_CTL_REG_P0DUP_VAL(phy_duplex),
+ rgmii_ctl);
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P0FCE,
+ LTQ_ES_RGMII_CTL_REG_P0FCE_VAL(phy_pause),
+ rgmii_ctl);
+
+ es_w32_mask(0, LTQ_ES_P0_CTL_REG_FLP, p0_ctl);
+ } else {
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P1SPD,
+ LTQ_ES_RGMII_CTL_REG_P1SPD_VAL(phy_speed),
+ rgmii_ctl);
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P1DUP,
+ LTQ_ES_RGMII_CTL_REG_P1DUP_VAL(phy_duplex),
+ rgmii_ctl);
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P1FCE,
+ LTQ_ES_RGMII_CTL_REG_P0FCE_VAL(phy_pause),
+ rgmii_ctl);
+
+ es_w32_mask(1, LTQ_ES_P0_CTL_REG_FLP, p1_ctl);
+ }
+ }
+ }
+ old_link_status[port] = new_link_status;
+}
+
+static void _switchip_mdio_poll(struct work_struct *work)
+{
+ if (es_r32(sw_gctl0) & LTQ_ES_SW_GCTL0_REG_SE) {
+ _switchip_check_phy_status(0);
+ _switchip_check_phy_status(1);
+ }
+
+ queue_delayed_work(mdio_poll_workqueue, &mdio_poll_work, HZ/2);
+}
+
+static void switchip_mdio_poll_init(void)
+{
+ mdio_poll_workqueue = create_workqueue("SVIP MDIP poll");
+ INIT_DELAYED_WORK(&mdio_poll_work, _switchip_mdio_poll);
+
+ queue_delayed_work(mdio_poll_workqueue, &mdio_poll_work, HZ/2);
+
+}
+
+unsigned short switchip_phy_read(unsigned int phyaddr, unsigned int regaddr)
+{
+ /* TODO: protect MDIO access with semaphore */
+ es_w32(LTQ_ES_MDIO_CTL_REG_MBUSY
+ | LTQ_ES_MDIO_CTL_REG_OP_VAL(2) /* read operation */
+ | LTQ_ES_MDIO_CTL_REG_PHYAD_VAL(phyaddr)
+ | LTQ_ES_MDIO_CTL_REG_REGAD_VAL(regaddr), mdio_ctl);
+ while (es_r32(mdio_ctl) & LTQ_ES_MDIO_CTL_REG_MBUSY);
+
+ return es_r32(mdio_data) & 0xFFFF;
+}
+EXPORT_SYMBOL(switchip_phy_read);
+
+void switchip_phy_write(unsigned int phyaddr, unsigned int regaddr,
+ unsigned short data)
+{
+ /* TODO: protect MDIO access with semaphore */
+ es_w32(LTQ_ES_MDIO_CTL_REG_WD_VAL(data)
+ | LTQ_ES_MDIO_CTL_REG_MBUSY
+ | LTQ_ES_MDIO_CTL_REG_OP_VAL(1) /* write operation */
+ | LTQ_ES_MDIO_CTL_REG_PHYAD_VAL(phyaddr)
+ | LTQ_ES_MDIO_CTL_REG_REGAD_VAL(regaddr), mdio_ctl);
+ while (es_r32(mdio_ctl) & LTQ_ES_MDIO_CTL_REG_MBUSY);
+
+ return;
+}
+EXPORT_SYMBOL(switchip_phy_write);
+
+const static u32 switch_reset_offset_000[] = {
+ /*b8000000:*/ 0xffffffff, 0x00000001, 0x00000001, 0x00000003,
+ /*b8000010:*/ 0x04070001, 0x04070001, 0x04070001, 0xffffffff,
+ /*b8000020:*/ 0x00001be8, 0x00001be8, 0x00001be8, 0xffffffff,
+ /*b8000030:*/ 0x00000000, 0x00000000, 0x00080004, 0x00020001,
+ /*b8000040:*/ 0x00000000, 0x00000000, 0x00080004, 0x00020001,
+ /*b8000050:*/ 0x00000000, 0x00000000, 0x00080004, 0x00020001,
+ /*b8000060:*/ 0x00000000, 0x00000000, 0x00081000, 0x001f7777,
+ /*b8000070:*/ 0x00000000, 0x00000000, 0x0c00ac2b, 0x0000fa50,
+ /*b8000080:*/ 0x00001000, 0x00001800, 0x00000000, 0x00000000,
+ /*b8000090:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b80000a0:*/ 0x00000000, 0x00000050, 0x00000010, 0x00000000,
+ /*b80000b0:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b80000c0:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b80000d0:*/ 0xffffffff, 0x00000000, 0x00000000
+};
+const static u32 switch_reset_offset_100[] = {
+ /*b8000100:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b8000110:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b8000120:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b8000130:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b8000140:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b8000150:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b8000160:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b8000170:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b8000180:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b8000190:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b80001a0:*/ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ /*b80001b0:*/ 0x00000000, 0x00000000
+};
+
+/*
+ * Switch Reset.
+ */
+void switchip_reset(void)
+{
+ volatile unsigned int *reg;
+ volatile unsigned int rdreg;
+ int i;
+
+ sys1_w32(SYS1_CLKENR_ETHSW, clkenr);
+ asm("sync");
+
+ /* disable P0 */
+ es_w32_mask(0, LTQ_ES_P0_CTL_REG_SPS_VAL(1), p0_ctl);
+ /* disable P1 */
+ es_w32_mask(0, LTQ_ES_P0_CTL_REG_SPS_VAL(1), p1_ctl);
+ /* disable P2 */
+ es_w32_mask(0, LTQ_ES_P0_CTL_REG_SPS_VAL(1), p2_ctl);
+
+ /**************************************
+ * BEGIN: Procedure to clear MAC table
+ **************************************/
+ for (i = 0; i < 3; i++) {
+ int result;
+
+ /* check if access engine is available */
+ while (es_r32(adr_tb_st2) & LTQ_ES_ADR_TB_ST2_REG_BUSY);
+
+ /* initialise to first address */
+ es_w32(LTQ_ES_ADR_TB_CTL2_REG_CMD_VAL(3)
+ | LTQ_ES_ADR_TB_CTL2_REG_AC_VAL(0), adr_tb_ctl2);
+
+ /* wait while busy */
+ while (es_r32(adr_tb_st2) & LTQ_ES_ADR_TB_ST2_REG_BUSY);
+
+ /* setup the portmap */
+ es_w32_mask(0, LTQ_ES_ADR_TB_CTL1_REG_PMAP_VAL(1 << i),
+ adr_tb_ctl1);
+
+ do {
+ /* search for addresses by port */
+ es_w32(LTQ_ES_ADR_TB_CTL2_REG_CMD_VAL(2)
+ | LTQ_ES_ADR_TB_CTL2_REG_AC_VAL(9), adr_tb_ctl2);
+
+ /* wait while busy */
+ while (es_r32(adr_tb_st2) & LTQ_ES_ADR_TB_ST2_REG_BUSY);
+
+ result = LTQ_ES_ADR_TB_ST2_REG_RSLT_GET(es_r32(adr_tb_st2));
+ if (result == 0x101) {
+ printk(KERN_ERR "%s, cmd error\n", __func__);
+ return;
+ }
+ /* if Command OK, address found... */
+ if (result == 0) {
+ unsigned char mac[6];
+
+ mac[5] = (es_r32(adr_tb_st0) >> 0) & 0xff;
+ mac[4] = (es_r32(adr_tb_st0) >> 8) & 0xff;
+ mac[3] = (es_r32(adr_tb_st0) >> 16) & 0xff;
+ mac[2] = (es_r32(adr_tb_st0) >> 24) & 0xff;
+ mac[1] = (es_r32(adr_tb_st1) >> 0) & 0xff;
+ mac[0] = (es_r32(adr_tb_st1) >> 8) & 0xff;
+
+ /* setup address */
+ es_w32((mac[5] << 0) |
+ (mac[4] << 8) |
+ (mac[3] << 16) |
+ (mac[2] << 24), adr_tb_ctl0);
+ es_w32(LTQ_ES_ADR_TB_CTL1_REG_PMAP_VAL(1<<i) |
+ LTQ_ES_ADR_TB_CTL1_REG_FID_VAL(0) |
+ (mac[0] << 8) |
+ (mac[1] << 0), adr_tb_ctl1);
+ /* erase address */
+
+ es_w32(LTQ_ES_ADR_TB_CTL2_REG_CMD_VAL(1) |
+ LTQ_ES_ADR_TB_CTL2_REG_AC_VAL(15),
+ adr_tb_ctl2);
+
+ /* wait, while busy */
+ while (es_r32(adr_tb_st2) &
+ LTQ_ES_ADR_TB_ST2_REG_BUSY);
+ }
+ } while (result == 0);
+ }
+ /**************************************
+ * END: Procedure to clear MAC table
+ **************************************/
+
+ /* reset RMON counters */
+ es_w32(LTQ_ES_RMON_CTL_REG_BAS | LTQ_ES_RMON_CTL_REG_CAC_VAL(3),
+ rmon_ctl);
+
+ /* bring all registers to reset state */
+ reg = LTQ_ES_PS_REG;
+ for (i = 0; i < ARRAY_SIZE(switch_reset_offset_000); i++) {
+ if ((reg == LTQ_ES_PS_REG) ||
+ (reg >= LTQ_ES_ADR_TB_CTL0_REG &&
+ reg <= LTQ_ES_ADR_TB_ST2_REG))
+ continue;
+
+ if (switch_reset_offset_000[i] != 0xFFFFFFFF) {
+ /* write reset value to register */
+ *reg = switch_reset_offset_000[i];
+ /* read register value back */
+ rdreg = *reg;
+ if (reg == LTQ_ES_SW_GCTL1_REG)
+ rdreg &= ~LTQ_ES_SW_GCTL1_REG_BISTDN;
+ /* compare read value with written one */
+ if (rdreg != switch_reset_offset_000[i]) {
+ printk(KERN_ERR "%s,%d: reg %08x mismatch "
+ "[has:%08x, expect:%08x]\n",
+ __func__, __LINE__,
+ (unsigned int)reg, rdreg,
+ switch_reset_offset_000[i]);
+ }
+ }
+ reg++;
+ }
+
+ reg = LTQ_ES_VLAN_FLT0_REG;
+ for (i = 0; i < ARRAY_SIZE(switch_reset_offset_100); i++) {
+ *reg = switch_reset_offset_100[i];
+ rdreg = *reg;
+ if (rdreg != switch_reset_offset_100[i]) {
+ printk(KERN_ERR "%s,%d: reg %08x mismatch "
+ "[has:%08x, expect:%08x]\n", __func__, __LINE__,
+ (unsigned int)reg, rdreg,
+ switch_reset_offset_100[i]);
+ }
+ reg++;
+ }
+}
+EXPORT_SYMBOL(switchip_reset);
+
+static u32 get_phy_oui(unsigned char phy_addr)
+{
+ u32 oui;
+ int i, bit, byte, shift, w;
+ u16 reg_id[2];
+
+ /* read PHY identifier registers 1 and 2 */
+ reg_id[0] = switchip_phy_read(phy_addr, 2);
+ reg_id[1] = switchip_phy_read(phy_addr, 3);
+
+ oui = 0;
+ w = 1;
+ shift = 7;
+ byte = 1;
+ for (i = 0, bit = 10; i <= 21; i++, bit++) {
+ oui |= ((reg_id[w] & (1<<bit)) ? 1 : 0) << shift;
+ if (!(shift % 8)) {
+ byte++;
+ if (byte == 2)
+ shift = 15;
+ else
+ shift = 21;
+ } else {
+ shift--;
+ }
+ if (w == 1 && bit == 15) {
+ bit = -1;
+ w = 0;
+ }
+ }
+ return oui;
+}
+
+/*
+ * Switch Initialization.
+ */
+int switchip_init(void)
+{
+ int eth_port, phy_present = 0;
+ u16 reg, mode;
+
+ sys1_w32(SYS1_CLKENR_ETHSW, clkenr);
+ asm("sync");
+
+ /* Enable Switch, if not already done so */
+ if ((es_r32(sw_gctl0) & LTQ_ES_SW_GCTL0_REG_SE) == 0)
+ es_w32_mask(0, LTQ_ES_SW_GCTL0_REG_SE, sw_gctl0);
+ /* Wait for completion of MBIST */
+ while (LTQ_ES_SW_GCTL1_REG_BISTDN_GET(es_r32(sw_gctl1)) == 0);
+
+ switchip_reset();
+
+ mode = LTQ_ES_RGMII_CTL_REG_IS_GET(es_r32(rgmii_ctl));
+ eth_port = (mode == 2 ? 1 : 0);
+
+ /* Set the primary port(port toward backplane) as sniffer port,
+ changing from P2 which is the reset setting */
+ es_w32_mask(LTQ_ES_SW_GCTL0_REG_SNIFFPN,
+ LTQ_ES_SW_GCTL0_REG_SNIFFPN_VAL(eth_port),
+ sw_gctl0);
+
+ /* Point MDIO state machine to invalid PHY addresses 8 and 9 */
+ es_w32_mask(0, LTQ_ES_SW_GCTL0_REG_PHYBA, sw_gctl0);
+
+ /* Add CRC for packets from DMA to PMAC.
+ Remove CRC for packets from PMAC to DMA. */
+ es_w32(LTQ_ES_PMAC_HD_CTL_RC | LTQ_ES_PMAC_HD_CTL_AC, pmac_hd_ctl);
+
+ phy_oui = get_phy_oui(0);
+ switch (phy_oui) {
+#ifdef CONFIG_LANTIQ_MACH_EASY336
+ case PHY_OUI_PMC:
+ phy_address[0] = (mode == 2 ? -1 : 2);
+ phy_address[1] = (mode == 2 ? 2 : -1);
+ break;
+#endif
+ case PHY_OUI_VITESSE:
+ default:
+ phy_oui = PHY_OUI_DEFAULT;
+ phy_address[0] = (mode == 2 ? 1 : 0);
+ phy_address[1] = (mode == 2 ? 0 : 1);
+ break;
+ }
+
+ /****** PORT 0 *****/
+ reg = switchip_phy_read(phy_address[0], 1);
+ if ((reg != 0x0000) && (reg != 0xffff)) {
+ /* PHY connected? */
+ phy_present |= 1;
+ /* Set Rx- and TxDelay in case of RGMII */
+ switch (mode) {
+ case 0: /* *RGMII,RGMII */
+ case 2: /* RGMII,*GMII */
+ /* program clock delay in PHY, not in SVIP */
+
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P0RDLY, 0, rgmii_ctl);
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P0TDLY, 0, rgmii_ctl);
+ if (phy_oui == PHY_OUI_VITESSE ||
+ phy_oui == PHY_OUI_DEFAULT) {
+ switchip_phy_write(phy_address[0], 31, 0x0001);
+ switchip_phy_write(phy_address[0], 28, 0xA000);
+ switchip_phy_write(phy_address[0], 31, 0x0000);
+ }
+ default:
+ break;
+ }
+ if (phy_oui == PHY_OUI_VITESSE ||
+ phy_oui == PHY_OUI_DEFAULT) {
+ /* Program PHY advertisements and
+ * restart auto-negotiation */
+ switchip_phy_write(phy_address[0], 4, 0x05E1);
+ switchip_phy_write(phy_address[0], 9, 0x0300);
+ switchip_phy_write(phy_address[0], 0, 0x3300);
+ } else {
+ reg = switchip_phy_read(phy_address[1], 0);
+ reg |= 0x1000; /* auto-negotiation enable */
+ switchip_phy_write(phy_address[1], 0, reg);
+ reg |= 0x0200; /* auto-negotiation restart */
+ switchip_phy_write(phy_address[1], 0, reg);
+ }
+ } else {
+ /* Force SWITCH link with highest capability:
+ * 100M FD for MII
+ * 1G FD for GMII/RGMII
+ */
+ switch (mode) {
+ case 1: /* *MII,MII */
+ case 3: /* *MII,RGMII */
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P0SPD_VAL(1),
+ rgmii_ctl);
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P0DUP_VAL(1),
+ rgmii_ctl);
+ break;
+ case 0: /* *RGMII,RGMII */
+ case 2: /* RGMII,*GMII */
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P0SPD_VAL(2),
+ rgmii_ctl);
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P0DUP_VAL(1),
+ rgmii_ctl);
+
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P0RDLY, 0, rgmii_ctl);
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P0TDLY_VAL(2),
+ rgmii_ctl);
+ break;
+ }
+
+ es_w32_mask(0, LTQ_ES_P0_CTL_REG_FLP, p0_ctl);
+ }
+
+ /****** PORT 1 *****/
+ reg = switchip_phy_read(phy_address[1], 1);
+ if ((reg != 0x0000) && (reg != 0xffff)) {
+ /* PHY connected? */
+ phy_present |= 2;
+ /* Set Rx- and TxDelay in case of RGMII */
+ switch (mode) {
+ case 0: /* *RGMII,RGMII */
+ case 3: /* *MII,RGMII */
+ /* program clock delay in PHY, not in SVIP */
+
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P1RDLY, 0, rgmii_ctl);
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P1TDLY, 0, rgmii_ctl);
+ if (phy_oui == PHY_OUI_VITESSE ||
+ phy_oui == PHY_OUI_DEFAULT) {
+ switchip_phy_write(phy_address[1], 31, 0x0001);
+ switchip_phy_write(phy_address[1], 28, 0xA000);
+ switchip_phy_write(phy_address[1], 31, 0x0000);
+ }
+ break;
+ case 2: /* RGMII,*GMII */
+
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1SPD_VAL(2),
+ rgmii_ctl);
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1DUP, rgmii_ctl);
+#ifdef CONFIG_LANTIQ_MACH_EASY336
+ if (phy_oui == PHY_OUI_PMC) {
+ switchip_phy_write(phy_address[1], 24, 0x0510);
+ switchip_phy_write(phy_address[1], 17, 0xA38C);
+ switchip_phy_write(phy_address[1], 17, 0xA384);
+ }
+#endif
+ break;
+ default:
+ break;
+ }
+ /* Program PHY advertisements and restart auto-negotiation */
+ if (phy_oui == PHY_OUI_VITESSE ||
+ phy_oui == PHY_OUI_DEFAULT) {
+ switchip_phy_write(phy_address[1], 4, 0x05E1);
+ switchip_phy_write(phy_address[1], 9, 0x0300);
+ switchip_phy_write(phy_address[1], 0, 0x3300);
+ } else {
+ reg = switchip_phy_read(phy_address[1], 0);
+ reg |= 0x1000; /* auto-negotiation enable */
+ switchip_phy_write(phy_address[1], 0, reg);
+ reg |= 0x0200; /* auto-negotiation restart */
+ switchip_phy_write(phy_address[1], 0, reg);
+ }
+ } else {
+ /* Force SWITCH link with highest capability:
+ * 100M FD for MII
+ * 1G FD for GMII/RGMII
+ */
+ switch (mode) {
+ case 1: /* *MII,MII */
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1SPD_VAL(1),
+ rgmii_ctl);
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1DUP, rgmii_ctl);
+ break;
+ case 0: /* *RGMII,RGMII */
+ case 3: /* *MII,RGMII */
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1SPD_VAL(2),
+ rgmii_ctl);
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1DUP, rgmii_ctl);
+ es_w32_mask(LTQ_ES_RGMII_CTL_REG_P1RDLY, 0, rgmii_ctl);
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1TDLY_VAL(2),
+ rgmii_ctl);
+ break;
+ case 2: /* RGMII,*GMII */
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1SPD_VAL(2),
+ rgmii_ctl);
+ es_w32_mask(0, LTQ_ES_RGMII_CTL_REG_P1DUP, rgmii_ctl);
+ break;
+ }
+ es_w32_mask(0, LTQ_ES_P0_CTL_REG_FLP, p0_ctl);
+ }
+
+ /*
+ * Allow unknown unicast/multicast and broadcasts
+ * on all ports.
+ */
+
+ es_w32_mask(0, LTQ_ES_SW_GCTL1_REG_UP_VAL(7), sw_gctl1);
+ es_w32_mask(0, LTQ_ES_SW_GCTL1_REG_BP_VAL(7), sw_gctl1);
+ es_w32_mask(0, LTQ_ES_SW_GCTL1_REG_MP_VAL(7), sw_gctl1);
+ es_w32_mask(0, LTQ_ES_SW_GCTL1_REG_RP_VAL(7), sw_gctl1);
+
+ /* Enable LAN port(s) */
+ if (eth_port == 0)
+ es_w32_mask(LTQ_ES_P0_CTL_REG_SPS, 0, p0_ctl);
+ else
+ es_w32_mask(LTQ_ES_P0_CTL_REG_SPS, 0, p1_ctl);
+ /* Enable CPU Port (Forwarding State) */
+ es_w32_mask(LTQ_ES_P0_CTL_REG_SPS, 0, p2_ctl);
+
+ if (phy_present)
+ switchip_mdio_poll_init();
+
+ return 0;
+}
+EXPORT_SYMBOL(switchip_init);
+
+device_initcall(switchip_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/clk.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/clk.c
new file mode 100644
index 0000000000..5d850dc7b3
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/clk.c
@@ -0,0 +1,329 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/clk.h>
+
+#include <asm/time.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+static unsigned int ltq_ram_clocks[] = {
+ CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
+#define DDR_HZ ltq_ram_clocks[ltq_cgu_r32(LTQ_CGU_SYS) & 0x3]
+
+#define BASIC_FREQUENCY_1 35328000
+#define BASIC_FREQUENCY_2 36000000
+#define BASIS_REQUENCY_USB 12000000
+
+#define GET_BITS(x, msb, lsb) \
+ (((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb))
+
+/* legacy xway clock */
+#define LTQ_CGU_PLL0_CFG 0x0004
+#define LTQ_CGU_PLL1_CFG 0x0008
+#define LTQ_CGU_PLL2_CFG 0x000C
+#define LTQ_CGU_SYS 0x0010
+#define LTQ_CGU_UPDATE 0x0014
+#define LTQ_CGU_IF_CLK 0x0018
+#define LTQ_CGU_OSC_CON 0x001C
+#define LTQ_CGU_SMD 0x0020
+#define LTQ_CGU_CT1SR 0x0028
+#define LTQ_CGU_CT2SR 0x002C
+#define LTQ_CGU_PCMCR 0x0030
+#define LTQ_CGU_PCI_CR 0x0034
+#define LTQ_CGU_PD_PC 0x0038
+#define LTQ_CGU_FMR 0x003C
+
+#define CGU_PLL0_PHASE_DIVIDER_ENABLE \
+ (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 31))
+#define CGU_PLL0_BYPASS \
+ (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 30))
+#define CGU_PLL0_CFG_DSMSEL \
+ (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 28))
+#define CGU_PLL0_CFG_FRAC_EN \
+ (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 27))
+#define CGU_PLL1_SRC \
+ (ltq_cgu_r32(LTQ_CGU_PLL1_CFG) & (1 << 31))
+#define CGU_PLL2_PHASE_DIVIDER_ENABLE \
+ (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & (1 << 20))
+#define CGU_SYS_FPI_SEL (1 << 6)
+#define CGU_SYS_DDR_SEL 0x3
+#define CGU_PLL0_SRC (1 << 29)
+
+#define CGU_PLL0_CFG_PLLK GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 26, 17)
+#define CGU_PLL0_CFG_PLLN GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 12, 6)
+#define CGU_PLL0_CFG_PLLM GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 5, 2)
+#define CGU_PLL2_SRC GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 18, 17)
+#define CGU_PLL2_CFG_INPUT_DIV GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 16, 13)
+
+/* vr9 clock */
+#define LTQ_CGU_SYS_VR9 0x0c
+#define LTQ_CGU_IF_CLK_VR9 0x24
+
+
+static unsigned int ltq_get_pll0_fdiv(void);
+
+static inline unsigned int get_input_clock(int pll)
+{
+ switch (pll) {
+ case 0:
+ if (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & CGU_PLL0_SRC)
+ return BASIS_REQUENCY_USB;
+ else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
+ return BASIC_FREQUENCY_1;
+ else
+ return BASIC_FREQUENCY_2;
+ case 1:
+ if (CGU_PLL1_SRC)
+ return BASIS_REQUENCY_USB;
+ else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
+ return BASIC_FREQUENCY_1;
+ else
+ return BASIC_FREQUENCY_2;
+ case 2:
+ switch (CGU_PLL2_SRC) {
+ case 0:
+ return ltq_get_pll0_fdiv();
+ case 1:
+ return CGU_PLL2_PHASE_DIVIDER_ENABLE ?
+ BASIC_FREQUENCY_1 :
+ BASIC_FREQUENCY_2;
+ case 2:
+ return BASIS_REQUENCY_USB;
+ }
+ default:
+ return 0;
+ }
+}
+
+static inline unsigned int cal_dsm(int pll, unsigned int num, unsigned int den)
+{
+ u64 res, clock = get_input_clock(pll);
+
+ res = num * clock;
+ do_div(res, den);
+ return res;
+}
+
+static inline unsigned int mash_dsm(int pll, unsigned int M, unsigned int N,
+ unsigned int K)
+{
+ unsigned int num = ((N + 1) << 10) + K;
+ unsigned int den = (M + 1) << 10;
+
+ return cal_dsm(pll, num, den);
+}
+
+static inline unsigned int ssff_dsm_1(int pll, unsigned int M, unsigned int N,
+ unsigned int K)
+{
+ unsigned int num = ((N + 1) << 11) + K + 512;
+ unsigned int den = (M + 1) << 11;
+
+ return cal_dsm(pll, num, den);
+}
+
+static inline unsigned int ssff_dsm_2(int pll, unsigned int M, unsigned int N,
+ unsigned int K)
+{
+ unsigned int num = K >= 512 ?
+ ((N + 1) << 12) + K - 512 : ((N + 1) << 12) + K + 3584;
+ unsigned int den = (M + 1) << 12;
+
+ return cal_dsm(pll, num, den);
+}
+
+static inline unsigned int dsm(int pll, unsigned int M, unsigned int N,
+ unsigned int K, unsigned int dsmsel, unsigned int phase_div_en)
+{
+ if (!dsmsel)
+ return mash_dsm(pll, M, N, K);
+ else if (!phase_div_en)
+ return mash_dsm(pll, M, N, K);
+ else
+ return ssff_dsm_2(pll, M, N, K);
+}
+
+static inline unsigned int ltq_get_pll0_fosc(void)
+{
+ if (CGU_PLL0_BYPASS)
+ return get_input_clock(0);
+ else
+ return !CGU_PLL0_CFG_FRAC_EN
+ ? dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, 0,
+ CGU_PLL0_CFG_DSMSEL,
+ CGU_PLL0_PHASE_DIVIDER_ENABLE)
+ : dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN,
+ CGU_PLL0_CFG_PLLK, CGU_PLL0_CFG_DSMSEL,
+ CGU_PLL0_PHASE_DIVIDER_ENABLE);
+}
+
+static unsigned int ltq_get_pll0_fdiv(void)
+{
+ unsigned int div = CGU_PLL2_CFG_INPUT_DIV + 1;
+
+ return (ltq_get_pll0_fosc() + (div >> 1)) / div;
+}
+
+unsigned long ltq_danube_io_region_clock(void)
+{
+ unsigned int ret = ltq_get_pll0_fosc();
+
+ switch (ltq_cgu_r32(LTQ_CGU_SYS) & 0x3) {
+ default:
+ case 0:
+ return (ret + 1) / 2;
+ case 1:
+ return (ret * 2 + 2) / 5;
+ case 2:
+ return (ret + 1) / 3;
+ case 3:
+ return (ret + 2) / 4;
+ }
+}
+
+unsigned long ltq_danube_fpi_bus_clock(int fpi)
+{
+ unsigned long ret = ltq_danube_io_region_clock();
+
+ if ((fpi == 2) && (ltq_cgu_r32(LTQ_CGU_SYS) & CGU_SYS_FPI_SEL))
+ ret >>= 1;
+ return ret;
+}
+
+unsigned long ltq_danube_fpi_hz(void)
+{
+ unsigned long ddr_clock = DDR_HZ;
+
+ if (ltq_cgu_r32(LTQ_CGU_SYS) & 0x40)
+ return ddr_clock >> 1;
+ return ddr_clock;
+}
+
+unsigned long ltq_danube_cpu_hz(void)
+{
+ switch (ltq_cgu_r32(LTQ_CGU_SYS) & 0xc) {
+ case 0:
+ return CLOCK_333M;
+ case 4:
+ return DDR_HZ;
+ case 8:
+ return DDR_HZ << 1;
+ default:
+ return DDR_HZ >> 1;
+ }
+}
+
+unsigned long ltq_ar9_sys_hz(void)
+{
+ if (((ltq_cgu_r32(LTQ_CGU_SYS) >> 3) & 0x3) == 0x2)
+ return CLOCK_393M;
+ return CLOCK_333M;
+}
+
+unsigned long ltq_ar9_fpi_hz(void)
+{
+ unsigned long sys = ltq_ar9_sys_hz();
+
+ if (ltq_cgu_r32(LTQ_CGU_SYS) & BIT(0))
+ return sys;
+ return sys >> 1;
+}
+
+unsigned long ltq_ar9_cpu_hz(void)
+{
+ if (ltq_cgu_r32(LTQ_CGU_SYS) & BIT(2))
+ return ltq_ar9_fpi_hz();
+ else
+ return ltq_ar9_sys_hz();
+}
+
+unsigned long ltq_vr9_cpu_hz(void)
+{
+ unsigned int cpu_sel;
+ unsigned long clk;
+
+ cpu_sel = (ltq_cgu_r32(LTQ_CGU_SYS_VR9) >> 4) & 0xf;
+
+ switch (cpu_sel) {
+ case 0:
+ clk = CLOCK_600M;
+ break;
+ case 1:
+ clk = CLOCK_500M;
+ break;
+ case 2:
+ clk = CLOCK_393M;
+ break;
+ case 3:
+ clk = CLOCK_333M;
+ break;
+ case 5:
+ case 6:
+ clk = CLOCK_196_608M;
+ break;
+ case 7:
+ clk = CLOCK_167M;
+ break;
+ case 4:
+ case 8:
+ case 9:
+ clk = CLOCK_125M;
+ break;
+ default:
+ clk = 0;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_vr9_fpi_hz(void)
+{
+ unsigned int ocp_sel, cpu_clk;
+ unsigned long clk;
+
+ cpu_clk = ltq_vr9_cpu_hz();
+ ocp_sel = ltq_cgu_r32(LTQ_CGU_SYS_VR9) & 0x3;
+
+ switch (ocp_sel) {
+ case 0:
+ /* OCP ratio 1 */
+ clk = cpu_clk;
+ break;
+ case 2:
+ /* OCP ratio 2 */
+ clk = cpu_clk / 2;
+ break;
+ case 3:
+ /* OCP ratio 2.5 */
+ clk = (cpu_clk * 2) / 5;
+ break;
+ case 4:
+ /* OCP ratio 3 */
+ clk = cpu_clk / 3;
+ break;
+ default:
+ clk = 0;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_vr9_fpi_bus_clock(int fpi)
+{
+ return ltq_vr9_fpi_hz();
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-dwc_otg.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-dwc_otg.c
new file mode 100644
index 0000000000..56086fa139
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-dwc_otg.c
@@ -0,0 +1,70 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/mtd/physmap.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/etherdevice.h>
+#include <linux/reboot.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/leds.h>
+
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+
+#include <lantiq_soc.h>
+#include <lantiq_irq.h>
+#include <lantiq_platform.h>
+
+#define LTQ_USB_IOMEM_BASE 0x1e101000
+#define LTQ_USB_IOMEM_SIZE 0x00001000
+
+static struct resource resources[] =
+{
+ [0] = {
+ .name = "dwc_otg_membase",
+ .start = LTQ_USB_IOMEM_BASE,
+ .end = LTQ_USB_IOMEM_BASE + LTQ_USB_IOMEM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .name = "dwc_otg_irq",
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static u64 dwc_dmamask = (u32)0x1fffffff;
+
+static struct platform_device platform_dev = {
+ .name = "dwc_otg",
+ .dev = {
+ .dma_mask = &dwc_dmamask,
+ },
+ .resource = resources,
+ .num_resources = ARRAY_SIZE(resources),
+};
+
+int __init
+xway_register_dwc(int pin)
+{
+ struct irq_data d;
+ d.irq = resources[1].start;
+ ltq_enable_irq(&d);
+ resources[1].start = ltq_is_ase() ? LTQ_USB_ASE_INT : LTQ_USB_INT;
+ platform_dev.dev.platform_data = (void*) pin;
+ return platform_device_register(&platform_dev);
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-dwc_otg.h b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-dwc_otg.h
new file mode 100644
index 0000000000..521fad05ef
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-dwc_otg.h
@@ -0,0 +1,17 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _LTQ_DEV_DWC_H__
+#define _LTQ_DEV_DWC_H__
+
+#include <lantiq_platform.h>
+
+extern void __init xway_register_dwc(int pin);
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-ifxhcd.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-ifxhcd.c
new file mode 100644
index 0000000000..ea08a359ff
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-ifxhcd.c
@@ -0,0 +1,45 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/mtd/physmap.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/etherdevice.h>
+#include <linux/reboot.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/leds.h>
+
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+
+#include <lantiq_soc.h>
+#include <lantiq_irq.h>
+#include <lantiq_platform.h>
+
+static u64 dmamask = (u32)0x1fffffff;
+
+static struct platform_device platform_dev = {
+ .name = "ifxusb_hcd",
+ .dev.dma_mask = &dmamask,
+};
+
+int __init
+xway_register_hcd(int *pins)
+{
+ platform_dev.dev.platform_data = pins;
+ return platform_device_register(&platform_dev);
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-ifxhcd.h b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-ifxhcd.h
new file mode 100644
index 0000000000..18b3d2dae4
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-ifxhcd.h
@@ -0,0 +1,17 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _LTQ_DEV_HCD_H__
+#define _LTQ_DEV_HCD_H__
+
+#include <lantiq_platform.h>
+
+extern void __init xway_register_hcd(int *pin);
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-athxk.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-athxk.c
new file mode 100644
index 0000000000..a75abe3e00
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-athxk.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2011 Andrej Vlašić <andrej.vlasic0@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/ath5k_platform.h>
+#include <linux/ath9k_platform.h>
+#include <linux/pci.h>
+
+#include "dev-wifi-athxk.h"
+
+extern int (*ltqpci_plat_dev_init)(struct pci_dev *dev);
+struct ath5k_platform_data ath5k_pdata;
+struct ath9k_platform_data ath9k_pdata = {
+ .led_pin = -1,
+ .endian_check = true,
+};
+
+static int
+ath5k_pci_plat_dev_init(struct pci_dev *dev)
+{
+ dev->dev.platform_data = &ath5k_pdata;
+ return 0;
+}
+
+static int
+ath9k_pci_plat_dev_init(struct pci_dev *dev)
+{
+ dev->dev.platform_data = &ath9k_pdata;
+ return 0;
+}
+
+void __init
+ltq_register_ath5k(u16 *eeprom_data, u8 *macaddr)
+{
+ ath5k_pdata.eeprom_data = eeprom_data;
+ ath5k_pdata.macaddr = macaddr;
+ ltqpci_plat_dev_init = ath5k_pci_plat_dev_init;
+}
+
+void __init
+ltq_register_ath9k(u16 *eeprom_data, u8 *macaddr)
+{
+ memcpy(ath9k_pdata.eeprom_data, eeprom_data, sizeof(ath9k_pdata.eeprom_data));
+ ath9k_pdata.macaddr = macaddr;
+ ltqpci_plat_dev_init = ath9k_pci_plat_dev_init;
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-athxk.h b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-athxk.h
new file mode 100644
index 0000000000..5fdb46b612
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-athxk.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2011 Andrej Vlašić <andrej.vlasic0@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _DEV_WIFI_ATHXK_H__
+#define _DEV_WIFI_ATHXK_H__
+
+extern void ltq_register_ath5k(u16 *eeprom_data, u8 *macaddr);
+extern void ltq_register_ath9k(u16 *eeprom_data, u8 *macaddr);
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-rt2x00.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-rt2x00.c
new file mode 100644
index 0000000000..8e271f06ec
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-rt2x00.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/rt2x00_platform.h>
+#include <linux/pci.h>
+
+#include "dev-wifi-rt2x00.h"
+
+extern int (*ltqpci_plat_dev_init)(struct pci_dev *dev);
+struct rt2x00_platform_data rt2x00_pdata;
+
+static int
+rt2x00_pci_plat_dev_init(struct pci_dev *dev)
+{
+ dev->dev.platform_data = &rt2x00_pdata;
+ return 0;
+}
+
+void __init
+ltq_register_rt2x00(const char *firmware, const u8 *mac)
+{
+ rt2x00_pdata.eeprom_file_name = kstrdup(firmware, GFP_KERNEL);
+ rt2x00_pdata.mac_address = mac;
+ ltqpci_plat_dev_init = rt2x00_pci_plat_dev_init;
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-rt2x00.h b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-rt2x00.h
new file mode 100644
index 0000000000..941c26535f
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/dev-wifi-rt2x00.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _DEV_WIFI_RT2X00_H__
+#define _DEV_WIFI_RT2X00_H__
+
+extern void ltq_register_rt2x00(const char *firmware, const u8 *mac);
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/gptu.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/gptu.c
new file mode 100644
index 0000000000..ac82c37eba
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/gptu.c
@@ -0,0 +1,176 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/pm.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <asm/reboot.h>
+
+#include <lantiq_soc.h>
+#include "../clk.h"
+
+#include "../devices.h"
+
+#define ltq_gptu_w32(x, y) ltq_w32((x), ltq_gptu_membase + (y))
+#define ltq_gptu_r32(x) ltq_r32(ltq_gptu_membase + (x))
+
+
+/* the magic ID byte of the core */
+#define GPTU_MAGIC 0x59
+/* clock control register */
+#define GPTU_CLC 0x00
+/* id register */
+#define GPTU_ID 0x08
+/* interrupt node enable */
+#define GPTU_IRNEN 0xf4
+/* interrupt control register */
+#define GPTU_IRCR 0xf8
+/* interrupt capture register */
+#define GPTU_IRNCR 0xfc
+/* there are 3 identical blocks of 2 timers. calculate register offsets */
+#define GPTU_SHIFT(x) (x % 2 ? 4 : 0)
+#define GPTU_BASE(x) (((x >> 1) * 0x20) + 0x10)
+/* timer control register */
+#define GPTU_CON(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x00)
+/* timer auto reload register */
+#define GPTU_RUN(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x08)
+/* timer manual reload register */
+#define GPTU_RLD(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x10)
+/* timer count register */
+#define GPTU_CNT(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x18)
+
+/* GPTU_CON(x) */
+#define CON_CNT BIT(2)
+#define CON_EDGE_FALL BIT(7)
+#define CON_SYNC BIT(8)
+#define CON_CLK_INT BIT(10)
+
+/* GPTU_RUN(x) */
+#define RUN_SEN BIT(0)
+#define RUN_RL BIT(2)
+
+/* set clock to runmode */
+#define CLC_RMC BIT(8)
+/* bring core out of suspend */
+#define CLC_SUSPEND BIT(4)
+/* the disable bit */
+#define CLC_DISABLE BIT(0)
+
+#define TIMER_INTERRUPT (INT_NUM_IM3_IRL0 + 22)
+
+enum gptu_timer {
+ TIMER1A = 0,
+ TIMER1B,
+ TIMER2A,
+ TIMER2B,
+ TIMER3A,
+ TIMER3B
+};
+
+static struct resource ltq_gptu_resource =
+ MEM_RES("GPTU", LTQ_GPTU_BASE_ADDR, LTQ_GPTU_SIZE);
+
+static void __iomem *ltq_gptu_membase;
+
+static irqreturn_t timer_irq_handler(int irq, void *priv)
+{
+ int timer = irq - TIMER_INTERRUPT;
+ ltq_gptu_w32(1 << timer, GPTU_IRNCR);
+ return IRQ_HANDLED;
+}
+
+static void gptu_hwinit(void)
+{
+ struct clk *clk = clk_get_sys("ltq_gptu", NULL);
+ clk_enable(clk);
+ ltq_gptu_w32(0x00, GPTU_IRNEN);
+ ltq_gptu_w32(0xff, GPTU_IRNCR);
+ ltq_gptu_w32(CLC_RMC | CLC_SUSPEND, GPTU_CLC);
+}
+
+static void gptu_hwexit(void)
+{
+ ltq_gptu_w32(0x00, GPTU_IRNEN);
+ ltq_gptu_w32(0xff, GPTU_IRNCR);
+ ltq_gptu_w32(CLC_DISABLE, GPTU_CLC);
+}
+
+static int ltq_gptu_enable(struct clk *clk)
+{
+ int ret = request_irq(TIMER_INTERRUPT + clk->bits, timer_irq_handler,
+ IRQF_TIMER, "timer", NULL);
+ if (ret) {
+ pr_err("gptu: failed to request irq\n");
+ return ret;
+ }
+
+ ltq_gptu_w32(CON_CNT | CON_EDGE_FALL | CON_SYNC | CON_CLK_INT,
+ GPTU_CON(clk->bits));
+ ltq_gptu_w32(1, GPTU_RLD(clk->bits));
+ ltq_gptu_w32(ltq_gptu_r32(GPTU_IRNEN) | clk->bits, GPTU_IRNEN);
+ ltq_gptu_w32(RUN_SEN | RUN_RL, GPTU_RUN(clk->bits));
+ return 0;
+}
+
+static void ltq_gptu_disable(struct clk *clk)
+{
+ ltq_gptu_w32(0, GPTU_RUN(clk->bits));
+ ltq_gptu_w32(0, GPTU_CON(clk->bits));
+ ltq_gptu_w32(0, GPTU_RLD(clk->bits));
+ ltq_gptu_w32(ltq_gptu_r32(GPTU_IRNEN) & ~clk->bits, GPTU_IRNEN);
+ free_irq(TIMER_INTERRUPT + clk->bits, NULL);
+}
+
+static inline void clkdev_add_gptu(const char *con, unsigned int timer)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ clk->cl.dev_id = "ltq_gptu";
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = ltq_gptu_enable;
+ clk->disable = ltq_gptu_disable;
+ clk->bits = timer;
+ clkdev_add(&clk->cl);
+}
+
+static int __init gptu_setup(void)
+{
+ /* remap gptu register range */
+ ltq_gptu_membase = ltq_remap_resource(&ltq_gptu_resource);
+ if (!ltq_gptu_membase)
+ panic("Failed to remap gptu memory");
+
+ /* power up the core */
+ gptu_hwinit();
+
+ /* the gptu has a ID register */
+ if (((ltq_gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) {
+ pr_err("gptu: failed to find magic\n");
+ gptu_hwexit();
+ return -ENAVAIL;
+ }
+
+ /* register the clocks */
+ clkdev_add_gptu("timer1a", TIMER1A);
+ clkdev_add_gptu("timer1b", TIMER1B);
+ clkdev_add_gptu("timer2a", TIMER2A);
+ clkdev_add_gptu("timer2b", TIMER2B);
+ clkdev_add_gptu("timer3a", TIMER3A);
+ clkdev_add_gptu("timer3b", TIMER3B);
+
+ pr_info("gptu: 6 timers loaded\n");
+
+ return 0;
+}
+
+arch_initcall(gptu_setup);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-arv.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-arv.c
new file mode 100644
index 0000000000..6857e993d2
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-arv.c
@@ -0,0 +1,793 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/gpio_buttons.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/input.h>
+#include <linux/etherdevice.h>
+#include <linux/ath5k_platform.h>
+#include <linux/ath9k_platform.h>
+#include <linux/pci.h>
+
+#include <lantiq_soc.h>
+#include <lantiq_platform.h>
+#include <dev-gpio-leds.h>
+#include <dev-gpio-buttons.h>
+
+#include "../machtypes.h"
+#include "dev-wifi-rt2x00.h"
+#include "dev-wifi-athxk.h"
+#include "devices.h"
+#include "dev-dwc_otg.h"
+#include "pci-ath-fixup.h"
+
+static struct mtd_partition arv45xx_brnboot_partitions[] =
+{
+ {
+ .name = "brn-boot",
+ .offset = 0x0,
+ .size = 0x20000,
+ },
+ {
+ .name = "config",
+ .offset = 0x20000,
+ .size = 0x30000,
+ },
+ {
+ .name = "linux",
+ .offset = 0x50000,
+ .size = 0x390000,
+ },
+ {
+ .name = "reserved", /* 12-byte signature at 0x3efff4 :/ */
+ .offset = 0x3e0000,
+ .size = 0x010000,
+ },
+ {
+ .name = "eeprom",
+ .offset = 0x3f0000,
+ .size = 0x10000,
+ },
+};
+
+static struct mtd_partition arv75xx_brnboot_partitions[] =
+{
+ {
+ .name = "brn-boot",
+ .offset = 0x0,
+ .size = 0x20000,
+ },
+ {
+ .name = "config",
+ .offset = 0x20000,
+ .size = 0x40000,
+ },
+ {
+ .name = "linux",
+ .offset = 0x440000,
+ .size = 0x3a0000,
+ },
+ {
+ .name = "reserved", /* 12-byte signature at 0x7efff4 :/ */
+ .offset = 0x7e0000,
+ .size = 0x010000,
+ },
+ {
+ .name = "board_config",
+ .offset = 0x7f0000,
+ .size = 0x10000,
+ },
+};
+
+/*
+ * this is generic configuration for all arv based boards, note that it can be
+ * rewriten in arv_load_nor()
+ */
+static struct mtd_partition arv_partitions[] =
+{
+ {
+ .name = "uboot",
+ .offset = 0x0,
+ .size = 0x20000,
+ },
+ {
+ .name = "uboot_env",
+ .offset = 0x20000,
+ .size = 0x10000,
+ },
+ {
+ .name = "linux",
+ .offset = 0x30000,
+ .size = 0x3c0000,
+ },
+ {
+ .name = "board_config",
+ .offset = 0x3f0000,
+ .size = 0x10000,
+ },
+};
+
+static struct physmap_flash_data arv45xx_brnboot_flash_data = {
+ .nr_parts = ARRAY_SIZE(arv45xx_brnboot_partitions),
+ .parts = arv45xx_brnboot_partitions,
+};
+
+static struct physmap_flash_data arv75xx_brnboot_flash_data = {
+ .nr_parts = ARRAY_SIZE(arv75xx_brnboot_partitions),
+ .parts = arv75xx_brnboot_partitions,
+};
+
+static struct physmap_flash_data arv_flash_data = {
+ .nr_parts = ARRAY_SIZE(arv_partitions),
+ .parts = arv_partitions,
+};
+
+static struct ltq_pci_data ltq_pci_data = {
+ .clock = PCI_CLOCK_EXT,
+ .gpio = PCI_GNT1 | PCI_REQ1,
+ .irq = {
+ [14] = INT_NUM_IM0_IRL0 + 22,
+ },
+};
+
+static struct ltq_eth_data ltq_eth_data = {
+ .mii_mode = PHY_INTERFACE_MODE_RMII,
+};
+
+static struct gpio_led
+arv4510pw_gpio_leds[] __initdata = {
+ { .name = "soc:green:foo", .gpio = 4, .active_low = 1, },
+};
+
+static struct gpio_led
+arv4518pw_gpio_leds[] __initdata = {
+ { .name = "soc:green:power", .gpio = 3, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:adsl", .gpio = 4, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:internet", .gpio = 5, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:wifi", .gpio = 6, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:yellow:wps", .gpio = 7, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:red:fail", .gpio = 8, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:usb", .gpio = 19, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:voip", .gpio = 100, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:fxs1", .gpio = 101, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:fxs2", .gpio = 102, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:fxo", .gpio = 103, .active_low = 1, .default_trigger = "default-on" },
+};
+
+static struct gpio_keys_button
+arv4518pw_gpio_keys[] __initdata = {
+ {
+ .desc = "wifi",
+ .type = EV_KEY,
+ .code = BTN_0,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 28,
+ .active_low = 1,
+ },
+ {
+ .desc = "reset",
+ .type = EV_KEY,
+ .code = BTN_1,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 30,
+ .active_low = 1,
+ },
+ {
+ .desc = "wps",
+ .type = EV_KEY,
+ .code = BTN_2,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 29,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led
+arv4519pw_gpio_leds[] __initdata = {
+ { .name = "soc:red:power", .gpio = 7, .active_low = 1, },
+ { .name = "soc:green:power", .gpio = 2, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:wifi", .gpio = 6, .active_low = 1, },
+ { .name = "soc:green:adsl", .gpio = 4, .active_low = 1, },
+ { .name = "soc:green:internet", .gpio = 5, .active_low = 1, },
+ { .name = "soc:red:internet", .gpio = 8, .active_low = 1, },
+ { .name = "soc:green:voip", .gpio = 100, .active_low = 1, },
+ { .name = "soc:green:phone1", .gpio = 101, .active_low = 1, },
+ { .name = "soc:green:phone2", .gpio = 102, .active_low = 1, },
+ { .name = "soc:green:fxo", .gpio = 103, .active_low = 1, },
+ { .name = "soc:green:usb", .gpio = 19, .active_low = 1, },
+ { .name = "soc:orange:wps", .gpio = 104, .active_low = 1, },
+ { .name = "soc:green:wps", .gpio = 105, .active_low = 1, },
+ { .name = "soc:red:wps", .gpio = 106, .active_low = 1, },
+
+};
+
+static struct gpio_keys_button
+arv4519pw_gpio_keys[] __initdata = {
+ {
+ .desc = "reset",
+ .type = EV_KEY,
+ .code = BTN_1,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 30,
+ .active_low = 1,
+ },
+ {
+ .desc = "wlan",
+ .type = EV_KEY,
+ .code = BTN_2,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 28,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led
+arv4520pw_gpio_leds[] __initdata = {
+ { .name = "soc:blue:power", .gpio = 3, .active_low = 1, },
+ { .name = "soc:blue:adsl", .gpio = 4, .active_low = 1, },
+ { .name = "soc:blue:internet", .gpio = 5, .active_low = 1, },
+ { .name = "soc:red:power", .gpio = 6, .active_low = 1, },
+ { .name = "soc:yellow:wps", .gpio = 7, .active_low = 1, },
+ { .name = "soc:red:wps", .gpio = 9, .active_low = 1, },
+ { .name = "soc:blue:voip", .gpio = 100, .active_low = 1, },
+ { .name = "soc:blue:fxs1", .gpio = 101, .active_low = 1, },
+ { .name = "soc:blue:fxs2", .gpio = 102, .active_low = 1, },
+ { .name = "soc:blue:fxo", .gpio = 103, .active_low = 1, },
+ { .name = "soc:blue:voice", .gpio = 104, .active_low = 1, },
+ { .name = "soc:blue:usb", .gpio = 105, .active_low = 1, },
+ { .name = "soc:blue:wifi", .gpio = 106, .active_low = 1, },
+};
+
+static struct gpio_led
+arv452cpw_gpio_leds[] __initdata = {
+ { .name = "soc:blue:power", .gpio = 3, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:blue:adsl", .gpio = 4, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:blue:isdn", .gpio = 5, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:red:power", .gpio = 6, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:yellow:wps", .gpio = 7, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:red:wps", .gpio = 9, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:blue:fxs1", .gpio = 100, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:blue:fxs2", .gpio = 101, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:blue:wps", .gpio = 102, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:blue:fxo", .gpio = 103, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:blue:voice", .gpio = 104, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:blue:usb", .gpio = 105, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:blue:wifi", .gpio = 106, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:blue:internet", .gpio = 108, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:red:internet", .gpio = 109, .active_low = 1, .default_trigger = "default-on" },
+};
+
+static struct gpio_led
+arv4525pw_gpio_leds[] __initdata = {
+ { .name = "soc:green:dsl", .gpio = 6, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:wifi", .gpio = 8, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:online", .gpio = 9, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:fxs-internet", .gpio = 5, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:fxs-festnetz", .gpio = 4, .active_low = 1, .default_trigger = "default-on" },
+};
+
+#define ARV4525PW_PHYRESET 13
+#define ARV4525PW_RELAY 31
+
+static struct gpio
+arv4525pw_gpios[] __initdata = {
+ { ARV4525PW_PHYRESET, GPIOF_OUT_INIT_HIGH, "phyreset" },
+ { ARV4525PW_RELAY, GPIOF_OUT_INIT_HIGH, "relay" },
+};
+
+
+static struct gpio_led
+arv752dpw22_gpio_leds[] __initdata = {
+ { .name = "soc:blue:power", .gpio = 3, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:red:internet", .gpio = 5, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:red:power", .gpio = 6, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:red:wps", .gpio = 8, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:red:fxo", .gpio = 103, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:red:voice", .gpio = 104, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:usb", .gpio = 105, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:wifi", .gpio = 106, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:wifi1", .gpio = 107, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:blue:wifi", .gpio = 108, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:blue:wifi1", .gpio = 109, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:eth1", .gpio = 111, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:eth2", .gpio = 112, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:eth3", .gpio = 113, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:eth4", .gpio = 114, .active_low = 1, .default_trigger = "default-on", },
+};
+
+static struct gpio_keys_button
+arv752dpw22_gpio_keys[] __initdata = {
+ {
+ .desc = "btn0",
+ .type = EV_KEY,
+ .code = BTN_0,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 12,
+ .active_low = 1,
+ },
+ {
+ .desc = "btn1",
+ .type = EV_KEY,
+ .code = BTN_1,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 13,
+ .active_low = 1,
+ },
+ {
+ .desc = "btn2",
+ .type = EV_KEY,
+ .code = BTN_2,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 28,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led
+arv7518pw_gpio_leds[] __initdata = {
+ { .name = "soc:red:power", .gpio = 7, .active_low = 1, },
+ { .name = "soc:green:power", .gpio = 2, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:wifi", .gpio = 6, .active_low = 1, },
+ { .name = "soc:green:adsl", .gpio = 4, .active_low = 1, },
+ { .name = "soc:green:internet", .gpio = 5, .active_low = 1, },
+ { .name = "soc:red:internet", .gpio = 8, .active_low = 1, },
+ { .name = "soc:green:voip", .gpio = 100, .active_low = 1, },
+ { .name = "soc:green:phone1", .gpio = 101, .active_low = 1, },
+ { .name = "soc:green:phone2", .gpio = 102, .active_low = 1, },
+ { .name = "soc:orange:fail", .gpio = 103, .active_low = 1, },
+ { .name = "soc:green:usb", .gpio = 19, .active_low = 1, },
+ { .name = "soc:orange:wps", .gpio = 104, .active_low = 1, },
+ { .name = "soc:green:wps", .gpio = 105, .active_low = 1, },
+ { .name = "soc:red:wps", .gpio = 106, .active_low = 1, },
+
+};
+
+static struct gpio_keys_button
+arv7518pw_gpio_keys[] __initdata = {
+ /*{
+ .desc = "reset",
+ .type = EV_KEY,
+ .code = BTN_1,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 23,
+ .active_low = 1,
+ },*/
+ {
+ .desc = "wifi",
+ .type = EV_KEY,
+ .code = BTN_2,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 25,
+ .active_low = 1,
+ },
+};
+
+static struct gpio_keys_button
+arv7525pw_gpio_keys[] __initdata = {
+ {
+ .desc = "restart",
+ .type = EV_KEY,
+ .code = BTN_0,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 29,
+ .active_low = 1,
+ },
+};
+
+static void __init
+arv_load_nor(unsigned int max)
+{
+#define UBOOT_MAGIC 0x27051956
+
+ int i;
+ int sector = -1;
+
+ if (ltq_brn_boot) {
+ if (max == 0x800000)
+ ltq_register_nor(&arv75xx_brnboot_flash_data);
+ else
+ ltq_register_nor(&arv45xx_brnboot_flash_data);
+ return;
+ }
+
+ for (i = 1; i < 4 && sector < 0; i++) {
+ unsigned int uboot_magic;
+ memcpy_fromio(&uboot_magic, (void *)KSEG1ADDR(LTQ_FLASH_START) + (i * 0x10000), 4);
+ if (uboot_magic == UBOOT_MAGIC)
+ sector = i;
+ }
+
+ if (sector < 0)
+ return;
+
+ arv_partitions[0].size = arv_partitions[1].offset = (sector - 1) * 0x10000;
+ arv_partitions[2].offset = arv_partitions[0].size + 0x10000;
+ arv_partitions[2].size = max - arv_partitions[2].offset - 0x10000;
+ arv_partitions[3].offset = max - 0x10000;
+ ltq_register_nor(&arv_flash_data);
+}
+
+static void __init
+arv_register_ethernet(unsigned int mac_addr)
+{
+ memcpy_fromio(&ltq_eth_data.mac.sa_data,
+ (void *)KSEG1ADDR(LTQ_FLASH_START + mac_addr), 6);
+ ltq_register_etop(&ltq_eth_data);
+}
+
+static u16 arv_ath5k_eeprom_data[ATH5K_PLAT_EEP_MAX_WORDS];
+static u16 arv_ath9k_eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS];
+static u8 arv_athxk_eeprom_mac[6];
+
+static void __init
+arv_register_ath5k(unsigned int ath_addr, unsigned int mac_addr)
+{
+ int i;
+
+ memcpy_fromio(arv_athxk_eeprom_mac,
+ (void *)KSEG1ADDR(LTQ_FLASH_START + mac_addr), 6);
+ arv_athxk_eeprom_mac[5]++;
+ memcpy_fromio(arv_ath5k_eeprom_data,
+ (void *)KSEG1ADDR(LTQ_FLASH_START + ath_addr), ATH5K_PLAT_EEP_MAX_WORDS);
+ // swap eeprom bytes
+ for (i = 0; i < ATH5K_PLAT_EEP_MAX_WORDS>>1; i++) {
+ arv_ath5k_eeprom_data[i] = swab16(arv_ath5k_eeprom_data[i]);
+ if (i == 0x17e>>1) {
+ /*
+ * regdomain is invalid. it's unknown how did original
+ * fw convered value to 0x82d4 so for now force to 0x67
+ */
+ arv_ath5k_eeprom_data[i] &= 0x0000;
+ arv_ath5k_eeprom_data[i] |= 0x67;
+ }
+ }
+}
+
+static void __init
+arv_register_ath9k(unsigned int ath_addr, unsigned int mac_addr)
+{
+ int i;
+ u16 *eepdata, sum, el;
+
+ memcpy_fromio(arv_athxk_eeprom_mac,
+ (void *)KSEG1ADDR(LTQ_FLASH_START + mac_addr), 6);
+ arv_athxk_eeprom_mac[5]++;
+ memcpy_fromio(arv_ath9k_eeprom_data,
+ (void *)KSEG1ADDR(LTQ_FLASH_START + ath_addr), ATH9K_PLAT_EEP_MAX_WORDS);
+
+ // force regdomain to 0x67
+ arv_ath9k_eeprom_data[0x208>>1] = 0x67;
+
+ // calculate new checksum
+ sum = arv_ath9k_eeprom_data[0x200>>1];
+ el = sum / sizeof(u16) - 2; /* skip length and (old) checksum */
+ eepdata = (u16 *) (&arv_ath9k_eeprom_data[0x204>>1]); /* after checksum */
+ for (i = 0; i < el; i++)
+ sum ^= *eepdata++;
+ sum ^= 0xffff;
+ arv_ath9k_eeprom_data[0x202>>1] = sum;
+}
+
+static void __init
+arv3527p_init(void)
+{
+#define ARV3527P_MAC_ADDR 0x3f0016
+
+ ltq_register_gpio_stp();
+ // ltq_add_device_gpio_leds(arv3527p_gpio_leds, ARRAY_SIZE(arv3527p_gpio_leds));
+ arv_load_nor(0x400000);
+ arv_register_ethernet(ARV3527P_MAC_ADDR);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_ARV3527P,
+ "ARV3527P",
+ "ARV3527P - Arcor Easybox 401",
+ arv3527p_init);
+
+static void __init
+arv4510pw_init(void)
+{
+#define ARV4510PW_MAC_ADDR 0x3f0014
+
+ ltq_register_gpio_stp();
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(arv4510pw_gpio_leds), arv4510pw_gpio_leds);
+ arv_load_nor(0x400000);
+ ltq_pci_data.irq[12] = (INT_NUM_IM2_IRL0 + 31);
+ ltq_pci_data.irq[15] = (INT_NUM_IM0_IRL0 + 26);
+ ltq_pci_data.gpio |= PCI_EXIN2 | PCI_REQ2;
+ ltq_register_pci(&ltq_pci_data);
+ arv_register_ethernet(ARV4510PW_MAC_ADDR);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_ARV4510PW,
+ "ARV4510PW",
+ "ARV4510PW - Wippies Homebox",
+ arv4510pw_init);
+
+static void __init
+arv4518pw_init(void)
+{
+#define ARV4518PW_EBU 0
+#define ARV4518PW_USB 14
+#define ARV4518PW_SWITCH_RESET 13
+#define ARV4518PW_ATH_ADDR 0x3f0400
+#define ARV4518PW_MAC_ADDR 0x3f0016
+
+ ltq_register_gpio_ebu(ARV4518PW_EBU);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(arv4518pw_gpio_leds), arv4518pw_gpio_leds);
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL,
+ ARRAY_SIZE(arv4518pw_gpio_keys), arv4518pw_gpio_keys);
+ arv_load_nor(0x400000);
+ ltq_pci_data.gpio = PCI_GNT2 | PCI_REQ2;
+ ltq_register_pci(&ltq_pci_data);
+ xway_register_dwc(ARV4518PW_USB);
+ arv_register_ethernet(ARV4518PW_MAC_ADDR);
+ arv_register_ath5k(ARV4518PW_ATH_ADDR, ARV4518PW_MAC_ADDR);
+ ltq_register_ath5k(arv_ath5k_eeprom_data, arv_athxk_eeprom_mac);
+ ltq_register_tapi();
+
+ gpio_request(ARV4518PW_SWITCH_RESET, "switch");
+ gpio_direction_output(ARV4518PW_SWITCH_RESET, 1);
+ gpio_export(ARV4518PW_SWITCH_RESET, 0);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_ARV4518PW,
+ "ARV4518PW",
+ "ARV4518PW - SMC7908A-ISP, Airties WAV-221",
+ arv4518pw_init);
+
+static void __init
+arv4519pw_init(void)
+{
+#define ARV4519PW_EBU 0
+#define ARV4519PW_USB 14
+#define ARV4519PW_RELAY 31
+#define ARV4519PW_SWITCH_RESET 13
+#define ARV4519PW_ATH_ADDR 0x3f0400
+#define ARV4519PW_MAC_ADDR 0x3f0016
+
+ arv_load_nor(0x400000);
+ ltq_register_gpio_ebu(ARV4519PW_EBU);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(arv4519pw_gpio_leds), arv4519pw_gpio_leds);
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL,
+ ARRAY_SIZE(arv4519pw_gpio_keys), arv4519pw_gpio_keys);
+ ltq_pci_data.gpio = PCI_GNT2 | PCI_REQ1;
+ ltq_register_pci(&ltq_pci_data);
+ xway_register_dwc(ARV4519PW_USB);
+ arv_register_ethernet(ARV4519PW_MAC_ADDR);
+ arv_register_ath5k(ARV4519PW_ATH_ADDR, ARV4519PW_MAC_ADDR);
+ ltq_register_ath5k(arv_ath5k_eeprom_data, arv_athxk_eeprom_mac);
+ ltq_register_tapi();
+
+ gpio_request(ARV4519PW_RELAY, "relay");
+ gpio_direction_output(ARV4519PW_RELAY, 1);
+ gpio_export(ARV4519PW_RELAY, 0);
+
+ gpio_request(ARV4519PW_SWITCH_RESET, "switch");
+ gpio_set_value(ARV4519PW_SWITCH_RESET, 1);
+ gpio_export(ARV4519PW_SWITCH_RESET, 0);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_ARV4519PW,
+ "ARV4519PW",
+ "ARV4519PW - Vodafone, Pirelli",
+ arv4519pw_init);
+
+static void __init
+arv4520pw_init(void)
+{
+#define ARV4520PW_EBU 0x400
+#define ARV4520PW_USB 28
+#define ARV4520PW_SWITCH_RESET 110
+#define ARV4520PW_MAC_ADDR 0x3f0016
+
+ ltq_register_gpio_ebu(ARV4520PW_EBU);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(arv4520pw_gpio_leds), arv4520pw_gpio_leds);
+ arv_load_nor(0x400000);
+ ltq_register_pci(&ltq_pci_data);
+ ltq_register_tapi();
+ arv_register_ethernet(ARV4520PW_MAC_ADDR);
+ ltq_register_rt2x00(NULL, (const u8 *) ltq_eth_data.mac.sa_data);
+ xway_register_dwc(ARV4520PW_USB);
+ ltq_register_tapi();
+
+ gpio_request(ARV4520PW_SWITCH_RESET, "switch");
+ gpio_set_value(ARV4520PW_SWITCH_RESET, 1);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_ARV4520PW,
+ "ARV4520PW",
+ "ARV4520PW - Airties WAV-281, Arcor A800",
+ arv4520pw_init);
+
+static void __init
+arv452Cpw_init(void)
+{
+#define ARV452CPW_EBU 0x77f
+#define ARV452CPW_USB 28
+#define ARV452CPW_RELAY1 31
+#define ARV452CPW_RELAY2 107
+#define ARV452CPW_SWITCH_RESET 110
+#define ARV452CPW_ATH_ADDR 0x3f0400
+#define ARV452CPW_MAC_ADDR 0x3f0016
+
+ ltq_register_gpio_ebu(ARV452CPW_EBU);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(arv452cpw_gpio_leds), arv452cpw_gpio_leds);
+ arv_load_nor(0x400000);
+ ltq_register_pci(&ltq_pci_data);
+ xway_register_dwc(ARV452CPW_USB);
+ arv_register_ethernet(ARV452CPW_MAC_ADDR);
+ arv_register_ath5k(ARV452CPW_ATH_ADDR, ARV452CPW_MAC_ADDR);
+ ltq_register_ath5k(arv_ath5k_eeprom_data, arv_athxk_eeprom_mac);
+ ltq_register_tapi();
+
+ gpio_request(ARV452CPW_SWITCH_RESET, "switch");
+ gpio_set_value(ARV452CPW_SWITCH_RESET, 1);
+ gpio_export(ARV452CPW_SWITCH_RESET, 0);
+
+ gpio_request(ARV452CPW_RELAY1, "relay1");
+ gpio_direction_output(ARV452CPW_RELAY1, 1);
+ gpio_export(ARV452CPW_RELAY1, 0);
+
+ gpio_request(ARV452CPW_RELAY2, "relay2");
+ gpio_set_value(ARV452CPW_RELAY2, 1);
+ gpio_export(ARV452CPW_RELAY2, 0);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_ARV452CPW,
+ "ARV452CPW",
+ "ARV452CPW - Arcor A801",
+ arv452Cpw_init);
+
+static void __init
+arv4525pw_init(void)
+{
+#define ARV4525PW_ATH_ADDR 0x3f0400
+#define ARV4525PW_MAC_ADDR 0x3f0016
+
+ arv_load_nor(0x400000);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(arv4525pw_gpio_leds), arv4525pw_gpio_leds);
+ gpio_request_array(arv4525pw_gpios, ARRAY_SIZE(arv4525pw_gpios));
+ gpio_export(ARV4525PW_RELAY, false);
+ gpio_export(ARV4525PW_PHYRESET, false);
+ ltq_pci_data.clock = PCI_CLOCK_INT;
+ ltq_register_pci(&ltq_pci_data);
+ arv_register_ath5k(ARV4525PW_ATH_ADDR, ARV4525PW_MAC_ADDR);
+ ltq_register_ath5k(arv_ath5k_eeprom_data, arv_athxk_eeprom_mac);
+ ltq_eth_data.mii_mode = PHY_INTERFACE_MODE_MII;
+ arv_register_ethernet(ARV4525PW_MAC_ADDR);
+ ltq_register_tapi();
+}
+
+MIPS_MACHINE(LANTIQ_MACH_ARV4525PW,
+ "ARV4525PW",
+ "ARV4525PW - Speedport W502V",
+ arv4525pw_init);
+
+static void __init
+arv7525pw_init(void)
+{
+#define ARV7525P_MAC_ADDR 0x3f0016
+
+ arv_load_nor(0x400000);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(arv4525pw_gpio_leds), arv4525pw_gpio_leds);
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL,
+ ARRAY_SIZE(arv7525pw_gpio_keys), arv7525pw_gpio_keys);
+ ltq_pci_data.clock = PCI_CLOCK_INT;
+ ltq_pci_data.gpio = PCI_GNT1 | PCI_EXIN1;
+ ltq_pci_data.irq[14] = (INT_NUM_IM3_IRL0 + 31);
+ ltq_register_pci(&ltq_pci_data);
+ ltq_eth_data.mii_mode = PHY_INTERFACE_MODE_MII;
+ ltq_register_rt2x00("RT2860.eeprom", NULL);
+ ltq_register_tapi();
+ arv_register_ethernet(ARV7525P_MAC_ADDR);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_ARV7525PW,
+ "ARV7525PW",
+ "ARV7525PW - Speedport W303V",
+ arv7525pw_init);
+
+static void __init
+arv7518pw_init(void)
+{
+#define ARV7518PW_EBU 0x2
+#define ARV7518PW_USB 14
+#define ARV7518PW_SWITCH_RESET 13
+#define ARV7518PW_ATH_ADDR 0x7f0400
+#define ARV7518PW_MAC_ADDR 0x7f0016
+
+ arv_load_nor(0x800000);
+ ltq_register_gpio_ebu(ARV7518PW_EBU);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(arv7518pw_gpio_leds), arv7518pw_gpio_leds);
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL,
+ ARRAY_SIZE(arv7518pw_gpio_keys), arv7518pw_gpio_keys);
+ ltq_register_pci(&ltq_pci_data);
+ ltq_register_tapi();
+ xway_register_dwc(ARV7518PW_USB);
+ arv_register_ethernet(ARV7518PW_MAC_ADDR);
+ arv_register_ath9k(ARV7518PW_ATH_ADDR, ARV7518PW_MAC_ADDR);
+ ltq_register_ath9k(arv_ath9k_eeprom_data, arv_athxk_eeprom_mac);
+ ltq_pci_ath_fixup(14, arv_ath9k_eeprom_data);
+ ltq_register_tapi();
+
+ gpio_request(ARV7518PW_SWITCH_RESET, "switch");
+ gpio_direction_output(ARV7518PW_SWITCH_RESET, 1);
+ gpio_export(ARV7518PW_SWITCH_RESET, 0);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_ARV7518PW,
+ "ARV7518PW",
+ "ARV7518PW - ASTORIA",
+ arv7518pw_init);
+
+static void __init
+arv752dpw22_init(void)
+{
+#define ARV752DPW22_EBU 0x2
+#define ARV752DPW22_USB 100
+#define ARV752DPW22_RELAY 101
+#define ARV752DPW22_MAC_ADDR 0x7f0016
+
+ arv_load_nor(0x800000);
+ ltq_register_gpio_ebu(ARV752DPW22_EBU);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(arv752dpw22_gpio_leds), arv752dpw22_gpio_leds);
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL,
+ ARRAY_SIZE(arv752dpw22_gpio_keys), arv752dpw22_gpio_keys);
+ ltq_pci_data.irq[15] = (INT_NUM_IM3_IRL0 + 31);
+ ltq_pci_data.gpio |= PCI_EXIN1 | PCI_REQ2;
+ ltq_register_pci(&ltq_pci_data);
+ xway_register_dwc(ARV752DPW22_USB);
+ arv_register_ethernet(ARV752DPW22_MAC_ADDR);
+ ltq_register_tapi();
+
+ gpio_request(ARV752DPW22_RELAY, "relay");
+ gpio_set_value(ARV752DPW22_RELAY, 1);
+ gpio_export(ARV752DPW22_RELAY, 0);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_ARV752DPW22,
+ "ARV752DPW22",
+ "ARV752DPW22 - Arcor A803",
+ arv752dpw22_init);
+
+static void __init
+arv752dpw_init(void)
+{
+#define ARV752DPW22_EBU 0x2
+#define ARV752DPW22_USB 100
+#define ARV752DPW22_RELAY 101
+#define ARV752DPW22_MAC_ADDR 0x7f0016
+
+ arv_load_nor(0x800000);
+ ltq_register_gpio_ebu(ARV752DPW22_EBU);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(arv752dpw22_gpio_leds), arv752dpw22_gpio_leds);
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL, ARRAY_SIZE(arv752dpw22_gpio_keys), arv752dpw22_gpio_keys);
+ ltq_pci_data.irq[14] = (INT_NUM_IM3_IRL0 + 31);
+ ltq_pci_data.gpio |= PCI_EXIN1 | PCI_REQ2;
+ ltq_register_pci(&ltq_pci_data);
+ ltq_register_tapi();
+ xway_register_dwc(ARV752DPW22_USB);
+ ltq_register_rt2x00("RT2860.eeprom", NULL);
+ arv_register_ethernet(ARV752DPW22_MAC_ADDR);
+ gpio_request(ARV752DPW22_RELAY, "relay");
+ gpio_set_value(ARV752DPW22_RELAY, 1);
+ gpio_export(ARV752DPW22_RELAY, 0);
+
+}
+
+MIPS_MACHINE(LANTIQ_MACH_ARV752DPW,
+ "ARV752DPW",
+ "ARV752DPW - Arcor A802",
+ arv752dpw_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-bthomehubv2b.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-bthomehubv2b.c
new file mode 100644
index 0000000000..44fe2f4efe
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-bthomehubv2b.c
@@ -0,0 +1,542 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Andrej Vlašić
+ * Copyright (C) 2011 Luka Perkov
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/input.h>
+#include <linux/ath5k_platform.h>
+#include <linux/ath9k_platform.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+
+#include <irq.h>
+#include <lantiq_soc.h>
+#include <lantiq_platform.h>
+#include <dev-gpio-leds.h>
+#include <dev-gpio-buttons.h>
+
+#include "../machtypes.h"
+//#include "dev-wifi-ath9k.h"
+#include "devices.h"
+#include "dev-dwc_otg.h"
+
+#undef USE_BTHH_GPIO_INIT
+
+// this reads certain data from u-boot if it's there
+#define USE_UBOOT_ENV_DATA
+
+#define UBOOT_ENV_OFFSET 0x040000
+#define UBOOT_ENV_SIZE 0x010000
+
+#ifdef NAND_ORGLAYOUT
+// this is only here for reference
+// definition of NAND flash area
+static struct mtd_partition bthomehubv2b_nand_partitions[] =
+{
+ {
+ .name = "ART",
+ .offset = 0x0000000,
+ .size = 0x0004000,
+ },
+ {
+ .name = "image1",
+ .offset = 0x0004000,
+ .size = 0x0E00000,
+ },
+ {
+ .name = "unknown1",
+ .offset = 0x0E04000,
+ .size = 0x00FC000,
+ },
+ {
+ .name = "image2",
+ .offset = 0x0F00000,
+ .size = 0x0E00000,
+ },
+ {
+ .name = "unknown2",
+ .offset = 0x1D00000,
+ .size = 0x0300000,
+ },
+
+};
+#endif
+
+#ifdef NAND_KEEPOPENRG
+// this allows both firmwares to co-exist
+
+static struct mtd_partition bthomehubv2b_nand_partitions[] =
+{
+ {
+ .name = "art",
+ .offset = 0x0000000,
+ .size = 0x0004000,
+ },
+ {
+ .name = "Image1",
+ .offset = 0x0004000,
+ .size = 0x0E00000,
+ },
+ {
+ .name = "linux",
+ .offset = 0x0E04000,
+ .size = 0x11fC000,
+ },
+ {
+ .name = "wholeflash",
+ .offset = 0x0000000,
+ .size = 0x2000000,
+ },
+
+};
+#endif
+
+// this gives more jffs2 by overwriting openrg
+
+static struct mtd_partition bthomehubv2b_nand_partitions[] =
+{
+ {
+ .name = "art",
+ .offset = 0x0000000,
+ .size = 0x0004000,
+ },
+ {
+ .name = "linux",
+ .offset = 0x0004000,
+ .size = 0x1ffC000,
+ },
+ {
+ .name = "wholeflash",
+ .offset = 0x0000000,
+ .size = 0x2000000,
+ },
+
+};
+
+extern void __init xway_register_nand(struct mtd_partition *parts, int count);
+
+// end BTHH_USE_NAND
+
+static struct gpio_led
+bthomehubv2b_gpio_leds[] __initdata = {
+
+ { .name = "soc:orange:upgrading", .gpio = 213, },
+ { .name = "soc:orange:phone", .gpio = 214, },
+ { .name = "soc:blue:phone", .gpio = 215, },
+ { .name = "soc:orange:wireless", .gpio = 216, },
+ { .name = "soc:blue:wireless", .gpio = 217, },
+ { .name = "soc:red:broadband", .gpio = 218, },
+ { .name = "soc:orange:broadband", .gpio = 219, },
+ { .name = "soc:blue:broadband", .gpio = 220, },
+ { .name = "soc:red:power", .gpio = 221, },
+ { .name = "soc:orange:power", .gpio = 222, },
+ { .name = "soc:blue:power", .gpio = 223, },
+};
+
+static struct gpio_keys_button
+bthomehubv2b_gpio_keys[] __initdata = {
+ {
+ .desc = "restart",
+ .type = EV_KEY,
+ .code = BTN_0,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 2,
+ .active_low = 1,
+ },
+ {
+ .desc = "findhandset",
+ .type = EV_KEY,
+ .code = BTN_1,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 15,
+ .active_low = 1,
+ },
+ {
+ .desc = "wps",
+ .type = EV_KEY,
+ .code = BTN_2,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 22,
+ .active_low = 1,
+ },
+};
+
+// definition of NOR flash area - as per original.
+static struct mtd_partition bthomehubv2b_partitions[] =
+{
+ {
+ .name = "uboot",
+ .offset = 0x000000,
+ .size = 0x040000,
+ },
+ {
+ .name = "uboot_env",
+ .offset = UBOOT_ENV_OFFSET,
+ .size = UBOOT_ENV_SIZE,
+ },
+ {
+ .name = "rg_conf_1",
+ .offset = 0x050000,
+ .size = 0x010000,
+ },
+ {
+ .name = "rg_conf_2",
+ .offset = 0x060000,
+ .size = 0x010000,
+ },
+ {
+ .name = "rg_conf_factory",
+ .offset = 0x070000,
+ .size = 0x010000,
+ },
+};
+
+
+/* nor flash */
+/* bt homehubv2b has a very small nor flash */
+/* so make it look for a small one, else we get a lot of alias chips identified - */
+/* not a bug problem, but fills the logs. */
+static struct resource bthhv2b_nor_resource =
+ MEM_RES("nor", LTQ_FLASH_START, 0x80000);
+
+static struct platform_device ltq_nor = {
+ .name = "ltq_nor",
+ .resource = &bthhv2b_nor_resource,
+ .num_resources = 1,
+};
+
+static void __init bthhv2b_register_nor(struct physmap_flash_data *data)
+{
+ ltq_nor.dev.platform_data = data;
+ platform_device_register(&ltq_nor);
+}
+
+static struct physmap_flash_data bthomehubv2b_flash_data = {
+ .nr_parts = ARRAY_SIZE(bthomehubv2b_partitions),
+ .parts = bthomehubv2b_partitions,
+};
+
+
+
+
+static struct ltq_pci_data ltq_pci_data = {
+ .clock = PCI_CLOCK_INT,
+ .gpio = PCI_GNT1 | PCI_REQ1,
+ .irq = { [14] = INT_NUM_IM0_IRL0 + 22, },
+};
+
+
+
+
+static struct ltq_eth_data ltq_eth_data = {
+ .mii_mode = PHY_INTERFACE_MODE_MII,
+};
+
+
+
+
+static char __init *get_uboot_env_var(char *haystack, int haystack_len, char *needle, int needle_len) {
+ int i;
+ for (i = 0; i <= haystack_len - needle_len; i++) {
+ if (memcmp(haystack + i, needle, needle_len) == 0) {
+ return haystack + i + needle_len;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * bthomehubv2b_parse_hex_* are not uniq. in arm/orion there are also duplicates:
+ * dns323_parse_hex_*
+ * TODO: one day write a patch for this :)
+ */
+static int __init bthomehubv2b_parse_hex_nibble(char n) {
+ if (n >= '0' && n <= '9')
+ return n - '0';
+
+ if (n >= 'A' && n <= 'F')
+ return n - 'A' + 10;
+
+ if (n >= 'a' && n <= 'f')
+ return n - 'a' + 10;
+
+ return -1;
+}
+
+static int __init bthomehubv2b_parse_hex_byte(const char *b) {
+ int hi;
+ int lo;
+
+ hi = bthomehubv2b_parse_hex_nibble(b[0]);
+ lo = bthomehubv2b_parse_hex_nibble(b[1]);
+
+ if (hi < 0 || lo < 0)
+ return -1;
+
+ return (hi << 4) | lo;
+}
+
+static int __init bthomehubv2b_register_ethernet(void) {
+ u_int8_t addr[6];
+ int i;
+ char *mac = "01:02:03:04:05:06";
+ int gotmac = 0;
+ char *boardid = "BTHHV2B";
+ int gotboardid = 0;
+
+ char *uboot_env_page;
+ uboot_env_page = ioremap(LTQ_FLASH_START + UBOOT_ENV_OFFSET, UBOOT_ENV_SIZE);
+ if (uboot_env_page)
+ {
+ char *Data = NULL;
+ Data = get_uboot_env_var(uboot_env_page, UBOOT_ENV_SIZE, "\0ethaddr=", 9);
+ if (Data)
+ {
+ mac = Data;
+ }
+
+ Data = get_uboot_env_var(uboot_env_page, UBOOT_ENV_SIZE, "\0boardid=", 9);
+
+ if (Data)
+ boardid = Data;
+ }
+ else
+ {
+ printk("bthomehubv2b: Failed to get uboot_env_page");
+ }
+
+ if (!mac) {
+ goto error_fail;
+ }
+
+ if (!boardid) {
+ goto error_fail;
+ }
+
+ /* Sanity check the string we're looking at */
+ for (i = 0; i < 5; i++) {
+ if (*(mac + (i * 3) + 2) != ':') {
+ goto error_fail;
+ }
+ }
+
+ for (i = 0; i < 6; i++) {
+ int byte;
+ byte = bthomehubv2b_parse_hex_byte(mac + (i * 3));
+ if (byte < 0) {
+ goto error_fail;
+ }
+ addr[i] = byte;
+ }
+
+ if (gotmac)
+ printk("bthomehubv2b: Found ethernet MAC address: ");
+ else
+ printk("bthomehubv2b: using default MAC (pls set ethaddr in u-boot): ");
+
+ for (i = 0; i < 6; i++)
+ printk("%.2x%s", addr[i], (i < 5) ? ":" : ".\n");
+
+ memcpy(&ltq_eth_data.mac.sa_data, addr, 6);
+ ltq_register_etop(&ltq_eth_data);
+
+ //memcpy(&bthomehubv2b_ath5k_eeprom_mac, addr, 6);
+ //bthomehubv2b_ath5k_eeprom_mac[5]++;
+
+ if (gotboardid)
+ printk("bthomehubv2b: Board id is %s.", boardid);
+ else
+ printk("bthomehubv2b: Default Board id is %s.", boardid);
+
+ if (strncmp(boardid, "BTHHV2B", 7) == 0) {
+ // read in dev-wifi-ath9k
+ //memcpy(&bthomehubv2b_ath5k_eeprom_data, sx763_eeprom_data, ATH5K_PLAT_EEP_MAX_WORDS);
+ }
+ else {
+ printk("bthomehubv2b: Board id is unknown, fix uboot_env data.");
+ }
+
+
+ // should not unmap while we are using the ram?
+ if (uboot_env_page)
+ iounmap(uboot_env_page);
+
+ return 0;
+
+error_fail:
+ if (uboot_env_page)
+ iounmap(uboot_env_page);
+ return -EINVAL;
+}
+
+
+#define PORTA2_HW_PASS1 0
+#define PORTA2_HW_PASS1_SC14480 1
+#define PORTA2_HW_PASS2 2
+
+int porta2_hw_revision = -1;
+EXPORT_SYMBOL(porta2_hw_revision);
+
+#define LTQ_GPIO_OUT 0x00
+#define LTQ_GPIO_IN 0x04
+#define LTQ_GPIO_DIR 0x08
+#define LTQ_GPIO_ALTSEL0 0x0C
+#define LTQ_GPIO_ALTSEL1 0x10
+#define LTQ_GPIO_OD 0x14
+#define LTQ_GPIO_PUDSEL 0x1C
+#define LTQ_GPIO_PUDEN 0x20
+
+#ifdef USE_BTHH_GPIO_INIT
+static void bthomehubv2b_board_prom_init(void)
+{
+ int revision = 0;
+ unsigned int gpio = 0;
+ void __iomem *mem = ioremap(LTQ_GPIO0_BASE_ADDR, LTQ_GPIO_SIZE*2);
+
+#define DANUBE_GPIO_P0_OUT (unsigned short *)(mem + LTQ_GPIO_OUT)
+#define DANUBE_GPIO_P0_IN (unsigned short *)(mem + LTQ_GPIO_IN)
+#define DANUBE_GPIO_P0_DIR (unsigned short *)(mem + LTQ_GPIO_DIR)
+#define DANUBE_GPIO_P0_ALTSEL0 (unsigned short *)(mem + LTQ_GPIO_ALTSEL0)
+#define DANUBE_GPIO_P0_ALTSEL1 (unsigned short *)(mem + LTQ_GPIO_ALTSEL1)
+
+#define DANUBE_GPIO_P1_OUT (unsigned short *)(mem + LTQ_GPIO_SIZE + LTQ_GPIO_OUT)
+#define DANUBE_GPIO_P1_IN (unsigned short *)(mem + LTQ_GPIO_SIZE + LTQ_GPIO_IN)
+#define DANUBE_GPIO_P1_DIR (unsigned short *)(mem + LTQ_GPIO_SIZE + LTQ_GPIO_DIR)
+#define DANUBE_GPIO_P1_ALTSEL0 (unsigned short *)(mem + LTQ_GPIO_SIZE + LTQ_GPIO_ALTSEL0)
+#define DANUBE_GPIO_P1_ALTSEL1 (unsigned short *)(mem + LTQ_GPIO_SIZE + LTQ_GPIO_ALTSEL1)
+#define DANUBE_GPIO_P1_OD (unsigned short *)(mem + LTQ_GPIO_SIZE + LTQ_GPIO_OD)
+
+ printk("About to sense board using GPIOs at %8.8X\n", (unsigned int)mem);
+
+
+ /* First detect HW revision of the board. For that we need to set the GPIO
+ * lines according to table 7.2.1/7.2.2 in HSI */
+ *DANUBE_GPIO_P0_OUT = 0x0200;
+ *DANUBE_GPIO_P0_DIR = 0x2610;
+ *DANUBE_GPIO_P0_ALTSEL0 = 0x7812;
+ *DANUBE_GPIO_P0_ALTSEL1 = 0x0000;
+
+ *DANUBE_GPIO_P1_OUT = 0x7008;
+ *DANUBE_GPIO_P1_DIR = 0xF3AE;
+ *DANUBE_GPIO_P1_ALTSEL0 = 0x83A7;
+ *DANUBE_GPIO_P1_ALTSEL1 = 0x0400;
+
+ gpio = (*DANUBE_GPIO_P0_IN & 0xFFFF) |
+ ((*DANUBE_GPIO_P1_IN & 0xFFFF) << 16);
+
+ revision |= (gpio & (1 << 27)) ? (1 << 0) : 0;
+ revision |= (gpio & (1 << 20)) ? (1 << 1) : 0;
+ revision |= (gpio & (1 << 8)) ? (1 << 2) : 0;
+ revision |= (gpio & (1 << 6)) ? (1 << 3) : 0;
+ revision |= (gpio & (1 << 5)) ? (1 << 4) : 0;
+ revision |= (gpio & (1 << 0)) ? (1 << 5) : 0;
+
+ porta2_hw_revision = revision;
+ printk("PORTA2: detected HW revision %d\n", revision);
+
+ /* Set GPIO lines according to HW revision. */
+ /* !!! Note that we are setting SPI_CS5 (GPIO 9) to be GPIO out with value
+ * of HIGH since the FXO does not use the SPI CS mechanism, it does it
+ * manually by controlling the GPIO line. We need the CS line to be disabled
+ * (HIGH) until needed since it will intefere with other devices on the SPI
+ * bus. */
+ *DANUBE_GPIO_P0_OUT = 0x0200;
+ /*
+ * During the manufacturing process a different machine takes over uart0
+ * so set it as input (so it wouldn't drive the line)
+ */
+#define cCONFIG_SHC_BT_MFG_TEST 0
+ *DANUBE_GPIO_P0_DIR = 0x2671 | (cCONFIG_SHC_BT_MFG_TEST ? 0 : (1 << 12));
+
+ if (revision == PORTA2_HW_PASS1_SC14480 || revision == PORTA2_HW_PASS2)
+ *DANUBE_GPIO_P0_ALTSEL0 = 0x7873;
+ else
+ *DANUBE_GPIO_P0_ALTSEL0 = 0x3873;
+
+ *DANUBE_GPIO_P0_ALTSEL1 = 0x0001;
+
+
+ //###################################################################################
+ // Register values before patch
+ // P1_ALTSEL0 = 0x83A7
+ // P1_ALTSEL1 = 0x0400
+ // P1_OU T = 0x7008
+ // P1_DIR = 0xF3AE
+ // P1_OD = 0xE3Fc
+ printk("\nApplying Patch for CPU1 IRQ Issue\n");
+ *DANUBE_GPIO_P1_ALTSEL0 &= ~(1<<12); // switch P1.12 (GPIO28) to GPIO functionality
+ *DANUBE_GPIO_P1_ALTSEL1 &= ~(1<<12); // switch P1.12 (GPIO28) to GPIO functionality
+ *DANUBE_GPIO_P1_OUT &= ~(1<<12); // set P1.12 (GPIO28) to 0
+ *DANUBE_GPIO_P1_DIR |= (1<<12); // configure P1.12 (GPIO28) as output
+ *DANUBE_GPIO_P1_OD |= (1<<12); // activate Push/Pull mode
+ udelay(100); // wait a little bit (100us)
+ *DANUBE_GPIO_P1_OD &= ~(1<<12); // switch back from Push/Pull to Open Drain
+ // important: before! setting output to 1 (3,3V) the mode must be switched
+ // back to Open Drain because the reset pin of the SC14488 is internally
+ // pulled to 1,8V
+ *DANUBE_GPIO_P1_OUT |= (1<<12); // set output P1.12 (GPIO28) to 1
+ // Register values after patch, should be the same as before
+ // P1_ALTSEL0 = 0x83A7
+ // P1_ALTSEL1 = 0x0400
+ // P1_OUT = 0x7008
+ // P1_DIR = 0xF3AE
+ // P1_OD = 0xE3Fc
+ //###################################################################################
+
+
+ *DANUBE_GPIO_P1_OUT = 0x7008;
+ *DANUBE_GPIO_P1_DIR = 0xEBAE | (revision == PORTA2_HW_PASS2 ? 0x1000 : 0);
+ *DANUBE_GPIO_P1_ALTSEL0 = 0x8BA7;
+ *DANUBE_GPIO_P1_ALTSEL1 = 0x0400;
+
+ iounmap(mem);
+}
+#endif
+static void __init bthomehubv2b_init(void) {
+#define bthomehubv2b_USB 13
+
+ // read the board version
+#ifdef USE_BTHH_GPIO_INIT
+ bthomehubv2b_board_prom_init();
+#endif
+
+ // register extra GPPOs used by LEDs as GPO 0x200+
+ ltq_register_gpio_stp();
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(bthomehubv2b_gpio_leds), bthomehubv2b_gpio_leds);
+ bthhv2b_register_nor(&bthomehubv2b_flash_data);
+ xway_register_nand(bthomehubv2b_nand_partitions, ARRAY_SIZE(bthomehubv2b_nand_partitions));
+ ltq_register_pci(&ltq_pci_data);
+ ltq_register_tapi();
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL, ARRAY_SIZE(bthomehubv2b_gpio_keys), bthomehubv2b_gpio_keys);
+// ltq_register_ath9k();
+ xway_register_dwc(bthomehubv2b_USB);
+ bthomehubv2b_register_ethernet();
+
+}
+
+MIPS_MACHINE(LANTIQ_MACH_BTHOMEHUBV2BOPENRG,
+ "BTHOMEHUBV2BOPENRG",
+ "BTHOMEHUBV2B - BT Homehub V2.0 Type B with OpenRG image retained",
+ bthomehubv2b_init);
+
+MIPS_MACHINE(LANTIQ_MACH_BTHOMEHUBV2B,
+ "BTHOMEHUBV2B",
+ "BTHOMEHUBV2B - BT Homehub V2.0 Type B",
+ bthomehubv2b_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-fritz_ar9.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-fritz_ar9.c
new file mode 100644
index 0000000000..5bd6341809
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-fritz_ar9.c
@@ -0,0 +1,115 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/input.h>
+#include <linux/phy.h>
+#include <linux/spi/spi_gpio.h>
+#include <linux/spi/flash.h>
+
+#include <lantiq_soc.h>
+#include <irq.h>
+
+#include "../machtypes.h"
+#include "devices.h"
+#include "dev-ifxhcd.h"
+#include "dev-gpio-leds.h"
+#include "dev-gpio-buttons.h"
+
+static struct mtd_partition fritz7320_partitions[] = {
+ {
+ .name = "urlader",
+ .offset = 0x0,
+ .size = 0x20000,
+ },
+ {
+ .name = "linux",
+ .offset = 0x20000,
+ .size = 0xf60000,
+ },
+ {
+ .name = "tffs (1)",
+ .offset = 0xf80000,
+ .size = 0x40000,
+ },
+ {
+ .name = "tffs (2)",
+ .offset = 0xfc0000,
+ .size = 0x40000,
+ },
+};
+
+static struct physmap_flash_data fritz7320_flash_data = {
+ .nr_parts = ARRAY_SIZE(fritz7320_partitions),
+ .parts = fritz7320_partitions,
+};
+
+static struct gpio_led
+fritz7320_gpio_leds[] __initdata = {
+ { .name = "soc:green:power", .gpio = 44, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:internet", .gpio = 47, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:dect", .gpio = 38, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:wlan", .gpio = 37, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:dual1", .gpio = 35, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:red:dual2", .gpio = 45, .active_low = 1, .default_trigger = "default-on" },
+};
+
+static struct gpio_keys_button
+fritz7320_gpio_keys[] __initdata = {
+ {
+ .desc = "wifi",
+ .type = EV_KEY,
+ .code = BTN_0,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 1,
+ .active_low = 1,
+ },
+ {
+ .desc = "dect",
+ .type = EV_KEY,
+ .code = BTN_1,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 2,
+ .active_low = 1,
+ },
+};
+
+static struct ltq_pci_data ltq_pci_data = {
+ .clock = PCI_CLOCK_INT,
+ .gpio = PCI_GNT1 | PCI_REQ1,
+ .irq = {
+ [14] = INT_NUM_IM0_IRL0 + 22,
+ },
+};
+
+static struct ltq_eth_data ltq_eth_data = {
+ .mii_mode = PHY_INTERFACE_MODE_RMII,
+};
+
+static int usb_pins[2] = { 50, 51 };
+
+static void __init
+fritz7320_init(void)
+{
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL,
+ ARRAY_SIZE(fritz7320_gpio_keys), fritz7320_gpio_keys);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(fritz7320_gpio_leds), fritz7320_gpio_leds);
+ ltq_register_pci(&ltq_pci_data);
+ ltq_register_etop(&ltq_eth_data);
+ ltq_register_nor(&fritz7320_flash_data);
+ xway_register_hcd(usb_pins);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_FRITZ7320,
+ "FRITZ7320",
+ "FRITZ!BOX 7320",
+ fritz7320_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-fritz_vr9.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-fritz_vr9.c
new file mode 100644
index 0000000000..293c7b750a
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-fritz_vr9.c
@@ -0,0 +1,164 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/input.h>
+#include <linux/phy.h>
+#include <linux/spi/spi_gpio.h>
+#include <linux/spi/flash.h>
+
+#include <lantiq_soc.h>
+#include <irq.h>
+
+#include "../machtypes.h"
+#include "devices.h"
+#include "dev-ifxhcd.h"
+#include "dev-gpio-leds.h"
+#include "dev-gpio-buttons.h"
+
+static struct mtd_partition fritz3370_partitions[] = {
+ {
+ .name = "linux",
+ .offset = 0x0,
+ .size = 0x400000,
+ },
+ {
+ .name = "filesystem",
+ .offset = 0x400000,
+ .size = 0x3000000,
+ },
+ {
+ .name = "reserved-kernel",
+ .offset = 0x3400000,
+ .size = 0x400000,
+ },
+ {
+ .name = "reserved",
+ .offset = 0x3800000,
+ .size = 0x3000000,
+ },
+ {
+ .name = "config",
+ .offset = 0x6800000,
+ .size = 0x200000,
+ },
+ {
+ .name = "nand-filesystem",
+ .offset = 0x6a00000,
+ .size = 0x1600000,
+ },
+};
+
+static struct mtd_partition spi_flash_partitions[] = {
+ {
+ .name = "urlader",
+ .offset = 0x0,
+ .size = 0x20000,
+ },
+ {
+ .name = "tffs",
+ .offset = 0x20000,
+ .size = 0x10000,
+ },
+ {
+ .name = "tffs",
+ .offset = 0x30000,
+ .size = 0x10000,
+ },
+};
+
+static struct gpio_led
+fritz3370_gpio_leds[] __initdata = {
+ { .name = "soc:green:1", .gpio = 32, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:red:2", .gpio = 33, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:red:3", .gpio = 34, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:4", .gpio = 35, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:5", .gpio = 36, .active_low = 1, .default_trigger = "default-on" },
+ { .name = "soc:green:6", .gpio = 47, .active_low = 1, .default_trigger = "default-on" },
+};
+
+static struct gpio_keys_button
+fritz3370_gpio_keys[] __initdata = {
+ {
+ .desc = "wifi",
+ .type = EV_KEY,
+ .code = BTN_0,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 29,
+ .active_low = 1,
+ },
+};
+
+static struct ltq_eth_data ltq_eth_data = {
+ .mii_mode = PHY_INTERFACE_MODE_RMII,
+};
+
+static int usb_pins[2] = { 5, 14 };
+
+#define SPI_GPIO_MRST 16
+#define SPI_GPIO_MTSR 17
+#define SPI_GPIO_CLK 18
+#define SPI_GPIO_CS0 10
+
+static struct spi_gpio_platform_data spi_gpio_data = {
+ .sck = SPI_GPIO_CLK,
+ .mosi = SPI_GPIO_MTSR,
+ .miso = SPI_GPIO_MRST,
+ .num_chipselect = 2,
+};
+
+static struct platform_device spi_gpio_device = {
+ .name = "spi_gpio",
+ .dev.platform_data = &spi_gpio_data,
+};
+
+static struct flash_platform_data spi_flash_data = {
+ .name = "SPL",
+ .parts = spi_flash_partitions,
+ .nr_parts = ARRAY_SIZE(spi_flash_partitions),
+};
+
+static struct spi_board_info spi_flash __initdata = {
+ .modalias = "m25p80",
+ .bus_num = 0,
+ .chip_select = 0,
+ .max_speed_hz = 10 * 1000 * 1000,
+ .mode = SPI_MODE_3,
+ .chip_select = 0,
+ .controller_data = (void *) SPI_GPIO_CS0,
+ .platform_data = &spi_flash_data
+};
+
+static void __init
+spi_gpio_init(void)
+{
+ spi_register_board_info(&spi_flash, 1);
+ platform_device_register(&spi_gpio_device);
+}
+
+static void __init
+fritz3370_init(void)
+{
+ spi_gpio_init();
+ platform_device_register_simple("pcie-xway", 0, NULL, 0);
+ xway_register_nand(fritz3370_partitions, ARRAY_SIZE(fritz3370_partitions));
+ xway_register_hcd(usb_pins);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(fritz3370_gpio_leds), fritz3370_gpio_leds);
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL,
+ ARRAY_SIZE(fritz3370_gpio_keys), fritz3370_gpio_keys);
+ ltq_register_vrx200(&ltq_eth_data);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_FRITZ3370,
+ "FRITZ3370",
+ "FRITZ!BOX 3370",
+ fritz3370_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-gigasx76x.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-gigasx76x.c
new file mode 100644
index 0000000000..af27825cbb
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-gigasx76x.c
@@ -0,0 +1,168 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Andrej Vlašić
+ * Copyright (C) 2011 Luka Perkov
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/input.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/string.h>
+
+#include <irq.h>
+#include <lantiq_soc.h>
+#include <lantiq_platform.h>
+#include <dev-gpio-leds.h>
+#include <dev-gpio-buttons.h>
+
+#include "../machtypes.h"
+#include "dev-wifi-athxk.h"
+#include "devices.h"
+#include "dev-dwc_otg.h"
+
+#include "mach-gigasx76x.h"
+
+static u8 ltq_ethaddr[6] = { 0 };
+
+static int __init
+setup_ethaddr(char *str)
+{
+ if (!mac_pton(str, ltq_ethaddr))
+ memset(ltq_ethaddr, 0, 6);
+ return 0;
+}
+__setup("ethaddr=", setup_ethaddr);
+
+
+enum {
+ UNKNOWN = 0,
+ SX761,
+ SX762,
+ SX763,
+};
+static u8 board __initdata = SX763;
+
+static int __init
+setup_board(char *str)
+{
+ if (!strcmp(str, "sx761"))
+ board = SX761;
+ else if (!strcmp(str, "sx762"))
+ board = SX762;
+ else if (!strcmp(str, "sx763"))
+ board = SX763;
+ else
+ board = UNKNOWN;
+ return 0;
+}
+__setup("board=", setup_board);
+
+static struct mtd_partition gigasx76x_partitions[] =
+{
+ {
+ .name = "uboot",
+ .offset = 0x0,
+ .size = 0x10000,
+ },
+ {
+ .name = "uboot_env",
+ .offset = 0x10000,
+ .size = 0x10000,
+ },
+ {
+ .name = "linux",
+ .offset = 0x20000,
+ .size = 0x7e0000,
+ },
+};
+
+static struct gpio_led
+gigasx76x_gpio_leds[] __initdata = {
+ { .name = "soc:green:voip", .gpio = 216, },
+ { .name = "soc:green:adsl", .gpio = 217, },
+ { .name = "soc:green:usb", .gpio = 218, },
+ { .name = "soc:green:wifi", .gpio = 219, },
+ { .name = "soc:green:phone2", .gpio = 220, },
+ { .name = "soc:green:phone1", .gpio = 221, },
+ { .name = "soc:green:line", .gpio = 222, },
+ { .name = "soc:green:online", .gpio = 223, },
+};
+
+static struct gpio_keys_button
+gigasx76x_gpio_keys[] __initdata = {
+ {
+ .desc = "wps",
+ .type = EV_KEY,
+ .code = KEY_WPS_BUTTON,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 22,
+ .active_low = 1,
+ },
+ {
+ .desc = "reset",
+ .type = EV_KEY,
+ .code = BTN_0,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 14,
+ .active_low = 0,
+ },
+};
+
+static struct physmap_flash_data gigasx76x_flash_data = {
+ .nr_parts = ARRAY_SIZE(gigasx76x_partitions),
+ .parts = gigasx76x_partitions,
+};
+
+static struct ltq_pci_data ltq_pci_data = {
+ .clock = PCI_CLOCK_INT,
+ .gpio = PCI_GNT1 | PCI_REQ1,
+ .irq = { [14] = INT_NUM_IM0_IRL0 + 22, },
+};
+
+static struct ltq_eth_data ltq_eth_data = {
+ .mii_mode = PHY_INTERFACE_MODE_MII,
+};
+
+static void __init
+gigasx76x_init(void)
+{
+#define GIGASX76X_USB 29
+
+ ltq_register_gpio_stp();
+ ltq_register_nor(&gigasx76x_flash_data);
+ ltq_register_pci(&ltq_pci_data);
+ ltq_register_tapi();
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(gigasx76x_gpio_leds), gigasx76x_gpio_leds);
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL, ARRAY_SIZE(gigasx76x_gpio_keys), gigasx76x_gpio_keys);
+ xway_register_dwc(GIGASX76X_USB);
+
+ if (!is_valid_ether_addr(ltq_ethaddr))
+ random_ether_addr(ltq_ethaddr);
+
+ memcpy(&ltq_eth_data.mac.sa_data, ltq_ethaddr, 6);
+ ltq_register_etop(&ltq_eth_data);
+ if (board == SX762)
+ ltq_register_ath5k(sx762_eeprom_data, ltq_ethaddr);
+ else
+ ltq_register_ath5k(sx763_eeprom_data, ltq_ethaddr);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_GIGASX76X,
+ "GIGASX76X",
+ "GIGASX76X - Gigaset SX761,SX762,SX763",
+ gigasx76x_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-gigasx76x.h b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-gigasx76x.h
new file mode 100644
index 0000000000..74e5ba28fe
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-gigasx76x.h
@@ -0,0 +1,210 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Andrej Vlašić
+ * Copyright (C) 2011 Luka Perkov
+ *
+ */
+
+#ifndef _MACH_GIGASX76X_H__
+#define _MACH_GIGASX76X_H__
+
+#include <linux/ath5k_platform.h>
+
+static u16 sx763_eeprom_data[ATH5K_PLAT_EEP_MAX_WORDS] =
+{
+0x0013,0x168c,0x0200,0x0001,0x0000,0x5001,0x0000,0x2051,0x2051,0x1c0a,0x0100,
+0x0000,0x01c2,0x0002,0xc606,0x0001,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0xf165,0x7fbe,0x0003,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x5aa5,0x0000,0x0000,0x0313,0x4943,
+0x2053,0x7104,0x1202,0x0400,0x0306,0x0001,0x0000,0x0500,0x410e,0x39b1,0x1eb5,
+0x4e2d,0x3056,0xffff,0xe902,0x0700,0x0106,0x0000,0x0100,0x1500,0x0752,0x4101,
+0x6874,0x7265,0x736f,0x4320,0x6d6f,0x756d,0x696e,0x6163,0x6974,0x6e6f,0x2c73,
+0x4920,0x636e,0x002e,0x5241,0x3035,0x3130,0x302d,0x3030,0x2d30,0x3030,0x3030,
+0x5700,0x7269,0x6c65,0x7365,0x2073,0x414c,0x204e,0x6552,0x6566,0x6572,0x636e,
+0x2065,0x6143,0x6472,0x3000,0x0030,0x00ff,0x2100,0x0602,0x2201,0x0205,0x8d80,
+0x005b,0x0522,0x4002,0x8954,0x2200,0x0205,0x1b00,0x00b7,0x0522,0x8002,0x12a8,
+0x2201,0x0205,0x3600,0x016e,0x0522,0x0002,0x2551,0x2202,0x0205,0x6c00,0x02dc,
+0x0522,0x8002,0x37f9,0x2203,0x0205,0xa200,0x044a,0x0222,0x0803,0x0822,0x0604,
+0x0300,0xbe7f,0x65f1,0x0222,0x0105,0x00ff,0x0000,0x0000,0x0000,0x0000,0x0000,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0x0037,0x971f,0x5003,0x9a66,0x0001,0x81c4,0x016a,
+0x02ff,0x84ff,0x15a3,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x2d2c,0x0000,0x0000,0x0000,0x0000,0xe028,0xa492,0x1c00,
+0x000e,0xb8ca,0x0013,0x0000,0x0000,0x6b4b,0xc059,0x1571,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x2370,0x00a5,0x9618,0x419a,0x68a2,0xda35,0x001c,0x0007,0xb0ff,0x01b5,0x0000,
+0x0000,0xff70,0x19ff,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x3170,0x00a5,0x9618,0x419a,0x68a2,0xda35,
+0x001c,0x000e,0xb0ff,0x21b5,0x0000,0x2fd8,0xff70,0x1226,0x19ff,0x07be,0x6201,
+0x032e,0x0587,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x1112,
+0x1441,0x4231,0x3234,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x8000,0x0000,0x0000,0x0000,0x0000,0x8000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x4d31,0x7f54,0x3c93,0x1205,0x1931,
+0x492d,0x7f50,0x3c93,0x0e01,0x192d,0x0070,0x0000,0x8140,0x724b,0x2ba9,0x3a09,
+0x99d9,0x1949,0x0070,0x0000,0x80e0,0x624a,0x2af8,0x35c7,0x9d47,0x1938,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x7082,0x0820,0xb882,0x0820,0x7092,0x28a0,0x8992,
+0x28a0,0xa292,0x28a0,0x70a2,0xa7ac,0x0000,0x0000,0x2464,0x6424,0x0000,0x0000,
+0x70a2,0xa7ac,0x0000,0x0000,0x2464,0x6424,0x0000,0x0000,0x8989,0x0000,0x0000,
+0x0000,0x2424,0x0000,0x0000,0x0000,0x7075,0xa2ac,0xb800,0x0000,0x2464,0x2424,
+0x2400,0x0000,0x7075,0xa2ac,0x0000,0x0000,0x2464,0x2424,0x0000,0x0000,0x7075,
+0xa7ac,0x0000,0x0000,0x2464,0x6424,0x0000,0x0000,0x7075,0xa7ac,0x0000,0x0000,
+0x2464,0x6424,0x0000,0x0000,0x8989,0x0000,0x0000,0x0000,0x2424,0x0000,0x0000,
+0x0000,0x0000,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff};
+
+static u16 sx762_eeprom_data[ATH5K_PLAT_EEP_MAX_WORDS] =
+{
+0x001a,0x168c,0x0200,0x0001,0x0000,0x5001,0x0000,0x2051,0x2051,0x1c0a,0x0100,
+0x0000,0x01c2,0x0002,0xc606,0x0001,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0xf165,0x7fbe,0x0003,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x5aa5,0x0000,0x0000,0x0313,0x4943,
+0x2053,0x7104,0x1202,0x0400,0x0306,0x0001,0x0000,0x0500,0x410e,0x39b1,0x1eb5,
+0x4e2d,0x3056,0xffff,0xe902,0x0700,0x0106,0x0000,0x0100,0x1500,0x0752,0x4101,
+0x6874,0x7265,0x736f,0x4320,0x6d6f,0x756d,0x696e,0x6163,0x6974,0x6e6f,0x2c73,
+0x4920,0x636e,0x002e,0x5241,0x3035,0x3130,0x302d,0x3030,0x2d30,0x3030,0x3030,
+0x5700,0x7269,0x6c65,0x7365,0x2073,0x414c,0x204e,0x6552,0x6566,0x6572,0x636e,
+0x2065,0x6143,0x6472,0x3000,0x0030,0x00ff,0x2100,0x0602,0x2201,0x0205,0x8d80,
+0x005b,0x0522,0x4002,0x8954,0x2200,0x0205,0x1b00,0x00b7,0x0522,0x8002,0x12a8,
+0x2201,0x0205,0x3600,0x016e,0x0522,0x0002,0x2551,0x2202,0x0205,0x6c00,0x02dc,
+0x0522,0x8002,0x37f9,0x2203,0x0205,0xa200,0x044a,0x0222,0x0803,0x0822,0x0604,
+0x0300,0xbe7f,0x65f1,0x0222,0x0105,0x00ff,0x0000,0x0000,0x0000,0x0000,0x0000,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0x0037,0x6aaa,0x5003,0x9a66,0x0001,0x81c4,0x016a,
+0x02ff,0x84ff,0x15a3,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x2d2c,0x0000,0x0000,0x0000,0x0000,0xe028,0xa492,0x1c00,
+0x000e,0xb8ca,0x0013,0x0000,0x0000,0x6b4b,0xc059,0x1571,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x2370,0x00a5,0x9618,0x419a,0x68a2,0xda35,0x001c,0x0007,0xb0ff,0x01b5,0x0000,
+0x0000,0xff70,0x19ff,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x3170,0x00a5,0x9618,0x419a,0x68a2,0xda35,
+0x001c,0x000e,0xb0ff,0x21b5,0x0000,0x2fd8,0xff70,0x1226,0x19ff,0x07fa,0x6201,
+0x032e,0x0587,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x1112,
+0x1441,0x4231,0x3234,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x8000,0x0000,0x0000,0x0000,0x0000,0x8000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x4d31,0x7f54,0x3c93,0x1205,0x1931,
+0x492d,0x7f50,0x3c93,0x0e01,0x192d,0x0070,0x0000,0x8180,0x724d,0xab59,0x3a08,
+0xdd79,0x2559,0x0070,0x0000,0x81a0,0x6e4d,0x2b99,0x3a09,0x9989,0x2157,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,0x0000,
+0x0000,0x0000,0x0000,0x0000,0x7092,0x4924,0xb892,0x4924,0x7092,0x289e,0x8992,
+0x289e,0xa292,0x289e,0x70a2,0xa7ac,0x0000,0x0000,0x2462,0x5e13,0x0000,0x0000,
+0x70a2,0xa7ac,0x0000,0x0000,0x1e5c,0x5713,0x0000,0x0000,0x8989,0x0000,0x0000,
+0x0000,0x2424,0x0000,0x0000,0x0000,0x7075,0xa2ac,0xb800,0x0000,0x2868,0x2828,
+0x2800,0x0000,0x7075,0xa2ac,0x0000,0x0000,0x2868,0x2828,0x0000,0x0000,0x7075,
+0xac00,0x0000,0x0000,0x2161,0x2100,0x0000,0x0000,0x7075,0xac00,0x0000,0x0000,
+0x1b5b,0x1b00,0x0000,0x0000,0x8989,0x0000,0x0000,0x0000,0x2121,0x0000,0x0000,
+0x0000,0x0000,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,0xffff,
+0xffff,0xffff};
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-h201l.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-h201l.c
new file mode 100644
index 0000000000..86101f5888
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-h201l.c
@@ -0,0 +1,100 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 Luka Perkov <openwrt@lukaperkov.net>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/string.h>
+
+#include <lantiq_soc.h>
+#include <lantiq_platform.h>
+#include <dev-gpio-leds.h>
+#include <dev-gpio-buttons.h>
+
+#include "../machtypes.h"
+#include "devices.h"
+#include "dev-dwc_otg.h"
+
+static u8 ltq_ethaddr[6] = { 0 };
+
+static int __init
+setup_ethaddr(char *str)
+{
+ if (!mac_pton(str, ltq_ethaddr))
+ memset(ltq_ethaddr, 0, 6);
+ return 0;
+}
+__setup("ethaddr=", setup_ethaddr);
+
+static struct mtd_partition h201l_partitions[] __initdata =
+{
+ {
+ .name = "uboot",
+ .offset = 0x0,
+ .size = 0x20000,
+ },
+ {
+ .name = "uboot_env",
+ .offset = 0x20000,
+ .size = 0x10000,
+ },
+ {
+ .name = "linux",
+ .offset = 0x30000,
+ .size = 0x7d0000,
+ },
+};
+
+static struct physmap_flash_data h201l_flash_data __initdata = {
+ .nr_parts = ARRAY_SIZE(h201l_partitions),
+ .parts = h201l_partitions,
+};
+
+static struct gpio_led
+h201l_leds_gpio[] __initdata = {
+};
+
+static struct gpio_keys_button
+h201l_gpio_keys[] __initdata = {
+};
+
+static struct ltq_eth_data ltq_eth_data = {
+ .mii_mode = PHY_INTERFACE_MODE_RMII,
+};
+
+static void __init
+h201l_init(void)
+{
+ ltq_register_gpio_stp();
+ ltq_register_nor(&h201l_flash_data);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(h201l_leds_gpio), h201l_leds_gpio);
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL, ARRAY_SIZE(h201l_gpio_keys), h201l_gpio_keys);
+
+ if (!is_valid_ether_addr(ltq_ethaddr))
+ random_ether_addr(ltq_ethaddr);
+
+ memcpy(&ltq_eth_data.mac.sa_data, ltq_ethaddr, 6);
+ ltq_register_etop(&ltq_eth_data);
+
+ xway_register_dwc(-1);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_H201L,
+ "H201L",
+ "ZTE ZXV10 H201L",
+ h201l_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-netgear.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-netgear.c
new file mode 100644
index 0000000000..29b0728102
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-netgear.c
@@ -0,0 +1,239 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 Pieter Voorthuijsen <p.voorthuijsen@gmail.com>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/input.h>
+#include <linux/phy.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/spi/spi_gpio.h>
+#include <linux/ath9k_platform.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include <lantiq_soc.h>
+#include <irq.h>
+#include <dev-gpio-leds.h>
+#include <dev-gpio-buttons.h>
+#include "dev-wifi-athxk.h"
+
+#include "../machtypes.h"
+#include "devices.h"
+#include "dev-dwc_otg.h"
+#include "pci-ath-fixup.h"
+#include <mtd/mtd-abi.h>
+#include <asm-generic/sizes.h>
+
+static struct mtd_partition dgn3500_partitions[] = {
+ {
+ .name = "u-boot",
+ .offset = 0,
+ .size = 0x10000,
+ .mask_flags = MTD_WRITEABLE,
+ },
+ {
+ .name = "environment",
+ .offset = 0x10000,
+ .size = 0x10000,
+ .mask_flags = MTD_WRITEABLE,
+ },
+ {
+ .name = "calibration",
+ .offset = 0x20000,
+ .size = 0x10000,
+ .mask_flags = MTD_WRITEABLE,
+ },
+ {
+ .name = "linux",
+ .offset = 0x50000,
+ .size = 0xfa0000,
+ },
+};
+
+static struct ltq_pci_data ltq_pci_data = {
+ .clock = PCI_CLOCK_INT,
+ .gpio = PCI_GNT1 | PCI_REQ1,
+ .irq = {
+ [14] = INT_NUM_IM0_IRL0 + 22,
+ },
+};
+
+static struct ltq_eth_data ltq_eth_data = {
+ .mii_mode = PHY_INTERFACE_MODE_MII,
+};
+
+static struct gpio_led
+dgn3500_gpio_leds[] __initdata = {
+ { .name = "soc:green:power", .gpio = 34, .active_low = 1, },
+ { .name = "soc:red:power", .gpio = 39, .active_low = 1, },
+ { .name = "soc:orange:wlan", .gpio = 51, .active_low = 1, },
+ { .name = "soc:green:wps", .gpio = 52, .active_low = 1, },
+ { .name = "soc:green:usb", .gpio = 22, .active_low = 1, },
+ { .name = "soc:green:dsl", .gpio = 4, .active_low = 1, },
+ { .name = "soc:green:internet", .gpio = 2, .active_low = 1, },
+};
+
+static struct gpio_keys_button
+dgn3500_gpio_keys[] __initdata = {
+ {
+ .desc = "wps",
+ .type = EV_KEY,
+ .code = BTN_0,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 54,
+ .active_low = 1,
+ },
+ {
+ .desc = "reset",
+ .type = EV_KEY,
+ .code = BTN_1,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 36,
+ .active_low = 1,
+ },
+};
+
+#define SPI_GPIO_MRST 16
+#define SPI_GPIO_MTSR 17
+#define SPI_GPIO_CLK 18
+#define SPI_GPIO_CS0 10
+
+static struct spi_gpio_platform_data spi_gpio_data = {
+ .sck = SPI_GPIO_CLK,
+ .mosi = SPI_GPIO_MTSR,
+ .miso = SPI_GPIO_MRST,
+ .num_chipselect = 2,
+};
+
+static struct platform_device spi_gpio_device = {
+ .name = "spi_gpio",
+ .dev.platform_data = &spi_gpio_data,
+};
+
+static struct flash_platform_data spi_flash_data = {
+ .name = "sflash",
+ .parts = dgn3500_partitions,
+ .nr_parts = ARRAY_SIZE(dgn3500_partitions),
+};
+
+static struct spi_board_info spi_flash __initdata = {
+ .modalias = "m25p80",
+ .bus_num = 0,
+ .chip_select = 0,
+ .max_speed_hz = 10 * 1000 * 1000,
+ .mode = SPI_MODE_3,
+ .chip_select = 0,
+ .controller_data = (void *) SPI_GPIO_CS0,
+ .platform_data = &spi_flash_data
+};
+
+static u8 ltq_ethaddr[6] = { 0 };
+
+static int __init setup_ethaddr(char *str)
+{
+ if (!mac_pton(str, ltq_ethaddr))
+ memset(ltq_ethaddr, 0, 6);
+ return 0;
+}
+__setup("ethaddr=", setup_ethaddr);
+
+static u16 dgn3500_eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS] = {0};
+
+static ssize_t ath_eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ if (unlikely(offset >= sizeof(dgn3500_eeprom_data)))
+ return 0;
+ if ((offset + count) > sizeof(dgn3500_eeprom_data))
+ count = sizeof(dgn3500_eeprom_data) - offset;
+ if (unlikely(!count))
+ return count;
+
+ memcpy(buf, (char *)(dgn3500_eeprom_data) + offset, count);
+
+ return count;
+}
+
+extern struct ath9k_platform_data ath9k_pdata;
+
+static ssize_t ath_eeprom_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ int i;
+ char *eeprom_bytes = (char *)dgn3500_eeprom_data;
+
+ if (unlikely(offset >= sizeof(dgn3500_eeprom_data)))
+ return -EFBIG;
+ if ((offset + count) > sizeof(dgn3500_eeprom_data))
+ count = sizeof(dgn3500_eeprom_data) - offset;
+ if (unlikely(!count))
+ return count;
+ if (count % 2)
+ return 0;
+
+ /* The PCI fixup routine requires an endian swap of the calibartion data
+ * stored in flash */
+ for (i = 0; i < count; i += 2) {
+ eeprom_bytes[offset + i + 1] = buf[i];
+ eeprom_bytes[offset + i] = buf[i+1];
+ }
+
+ /* The original data does not contain a checksum. Set the country and
+ * calculate new checksum when all data is received */
+ if ((count + offset) == sizeof(dgn3500_eeprom_data))
+ memcpy(ath9k_pdata.eeprom_data, dgn3500_eeprom_data,
+ sizeof(ath9k_pdata.eeprom_data));
+
+ return count;
+}
+
+static struct bin_attribute dev_attr_ath_eeprom = {
+ .attr = {
+ .name = "ath_eeprom",
+ .mode = S_IRUGO|S_IWUSR,
+ },
+ .read = ath_eeprom_read,
+ .write = ath_eeprom_write,
+};
+
+static void __init dgn3500_init(void)
+{
+ if (sysfs_create_bin_file(firmware_kobj, &dev_attr_ath_eeprom))
+ printk(KERN_INFO "Failed to create ath eeprom sysfs entry\n");
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(dgn3500_gpio_leds),
+ dgn3500_gpio_leds);
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL,
+ ARRAY_SIZE(dgn3500_gpio_keys), dgn3500_gpio_keys);
+ platform_device_register(&spi_gpio_device);
+ ltq_register_pci(&ltq_pci_data);
+ spi_register_board_info(&spi_flash, 1);
+ if (!is_valid_ether_addr(ltq_ethaddr)) {
+ printk(KERN_INFO "MAC invalid using random\n");
+ random_ether_addr(ltq_ethaddr);
+ }
+ memcpy(&ltq_eth_data.mac.sa_data, ltq_ethaddr, 6);
+ ltq_register_etop(&ltq_eth_data);
+ ltq_register_ath9k(dgn3500_eeprom_data, ltq_ethaddr);
+ ltq_pci_ath_fixup(14, dgn3500_eeprom_data);
+ /* The usb power is always enabled, protected by a fuse */
+ xway_register_dwc(-1);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_DGN3500B,
+ "DGN3500B",
+ "Netgear DGN3500B",
+ dgn3500_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-p2601hnfx.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-p2601hnfx.c
new file mode 100644
index 0000000000..247dfb572c
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-p2601hnfx.c
@@ -0,0 +1,113 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/gpio_buttons.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/input.h>
+#include <linux/etherdevice.h>
+#include <linux/mdio-gpio.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+
+#include <lantiq_soc.h>
+#include <lantiq_platform.h>
+#include <dev-gpio-leds.h>
+#include <dev-gpio-buttons.h>
+
+#include "../machtypes.h"
+#include "devices.h"
+#include "dev-dwc_otg.h"
+
+static struct mtd_partition p2601hnfx_partitions[] __initdata =
+{
+ {
+ .name = "uboot",
+ .offset = 0x0,
+ .size = 0x20000,
+ },
+ {
+ .name = "uboot_env",
+ .offset = 0x20000,
+ .size = 0x20000,
+ },
+ {
+ .name = "linux",
+ .offset = 0x40000,
+ .size = 0xfc0000,
+ },
+};
+
+static struct physmap_flash_data p2601hnfx_flash_data __initdata = {
+ .nr_parts = ARRAY_SIZE(p2601hnfx_partitions),
+ .parts = p2601hnfx_partitions,
+};
+
+static struct gpio_led
+p2601hnfx_leds_gpio[] __initdata = {
+ { .name = "soc:yellow:phone", .gpio = 216, .active_low = 1 },
+ { .name = "soc:green:phone", .gpio = 217, .active_low = 1 },
+ { .name = "soc:yellow:wifi", .gpio = 218, .active_low = 1 },
+ { .name = "soc:green:power", .gpio = 219, .active_low = 1 },
+ { .name = "soc:red:internet", .gpio = 220, .active_low = 1 },
+ { .name = "soc:green:internet", .gpio = 221, .active_low = 1 },
+ { .name = "soc:green:dsl", .gpio = 222, .active_low = 1 },
+ { .name = "soc:green:wifi", .gpio = 223, .active_low = 1 },
+};
+
+static struct gpio_keys_button
+p2601hnfx_gpio_keys[] __initdata = {
+ {
+ .desc = "reset",
+ .type = EV_KEY,
+ .code = BTN_0,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 53,
+ .active_low = 1,
+ },
+ {
+ .desc = "wifi",
+ .type = EV_KEY,
+ .code = BTN_1,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 54,
+ .active_low = 1,
+ },
+};
+
+static struct ltq_eth_data ltq_eth_data = {
+ .mii_mode = PHY_INTERFACE_MODE_RMII,
+};
+
+static void __init
+p2601hnfx_init(void)
+{
+#define P2601HNFX_USB 9
+
+ ltq_register_gpio_stp();
+ ltq_register_nor(&p2601hnfx_flash_data);
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(p2601hnfx_leds_gpio), p2601hnfx_leds_gpio);
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL, ARRAY_SIZE(p2601hnfx_gpio_keys), p2601hnfx_gpio_keys);
+ ltq_register_etop(&ltq_eth_data);
+ xway_register_dwc(P2601HNFX_USB);
+
+ // enable the ethernet ports on the SoC
+// ltq_w32((ltq_r32(LTQ_GPORT_P0_CTL) & ~(1 << 17)) | (1 << 18), LTQ_GPORT_P0_CTL);
+// ltq_w32((ltq_r32(LTQ_GPORT_P1_CTL) & ~(1 << 17)) | (1 << 18), LTQ_GPORT_P1_CTL);
+// ltq_w32((ltq_r32(LTQ_GPORT_P2_CTL) & ~(1 << 17)) | (1 << 18), LTQ_GPORT_P2_CTL);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_P2601HNFX,
+ "P2601HNFX",
+ "ZyXEL P-2601HN-Fx",
+ p2601hnfx_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-wbmr.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-wbmr.c
new file mode 100644
index 0000000000..a57e092123
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/mach-wbmr.c
@@ -0,0 +1,120 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/gpio.h>
+#include <linux/gpio_buttons.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/input.h>
+
+#include <lantiq_soc.h>
+#include <irq.h>
+#include <dev-gpio-leds.h>
+#include <dev-gpio-buttons.h>
+
+#include "../machtypes.h"
+#include "devices.h"
+#include "dev-dwc_otg.h"
+
+static struct mtd_partition wbmr_partitions[] =
+{
+ {
+ .name = "uboot",
+ .offset = 0x0,
+ .size = 0x40000,
+ },
+ {
+ .name = "uboot-env",
+ .offset = 0x40000,
+ .size = 0x20000,
+ },
+ {
+ .name = "linux",
+ .offset = 0x60000,
+ .size = 0x1f20000,
+ },
+ {
+ .name = "calibration",
+ .offset = 0x1fe0000,
+ .size = 0x20000,
+ },
+};
+
+static struct physmap_flash_data wbmr_flash_data = {
+ .nr_parts = ARRAY_SIZE(wbmr_partitions),
+ .parts = wbmr_partitions,
+};
+
+static struct gpio_led
+wbmr_gpio_leds[] __initdata = {
+ { .name = "soc:blue:movie", .gpio = 20, .active_low = 1, },
+ { .name = "soc:red:internet", .gpio = 18, .active_low = 1, },
+ { .name = "soc:green:internet", .gpio = 17, .active_low = 1, },
+ { .name = "soc:green:adsl", .gpio = 16, .active_low = 1, },
+ { .name = "soc:green:wlan", .gpio = 15, .active_low = 1, },
+ { .name = "soc:red:security", .gpio = 14, .active_low = 1, },
+ { .name = "soc:green:power", .gpio = 1, .active_low = 1, },
+ { .name = "soc:red:power", .gpio = 5, .active_low = 1, },
+ { .name = "soc:green:usb", .gpio = 28, .active_low = 1, },
+};
+
+static struct gpio_keys_button
+wbmr_gpio_keys[] __initdata = {
+ {
+ .desc = "aoss",
+ .type = EV_KEY,
+ .code = BTN_0,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 0,
+ .active_low = 1,
+ },
+ {
+ .desc = "reset",
+ .type = EV_KEY,
+ .code = BTN_1,
+ .debounce_interval = LTQ_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = 37,
+ .active_low = 1,
+ },
+};
+
+static struct ltq_pci_data ltq_pci_data = {
+ .clock = PCI_CLOCK_INT,
+ .gpio = PCI_GNT1 | PCI_REQ1,
+ .irq = {
+ [14] = INT_NUM_IM0_IRL0 + 22,
+ },
+};
+
+static struct ltq_eth_data ltq_eth_data = {
+ .mii_mode = PHY_INTERFACE_MODE_RGMII,
+};
+
+static void __init
+wbmr_init(void)
+{
+#define WMBR_BRN_MAC 0x1fd0024
+
+ ltq_add_device_gpio_leds(-1, ARRAY_SIZE(wbmr_gpio_leds), wbmr_gpio_leds);
+ ltq_register_gpio_keys_polled(-1, LTQ_KEYS_POLL_INTERVAL, ARRAY_SIZE(wbmr_gpio_keys), wbmr_gpio_keys);
+ ltq_register_nor(&wbmr_flash_data);
+ ltq_register_pci(&ltq_pci_data);
+ memcpy_fromio(&ltq_eth_data.mac.sa_data,
+ (void *)KSEG1ADDR(LTQ_FLASH_START + WMBR_BRN_MAC), 6);
+ ltq_register_etop(&ltq_eth_data);
+ xway_register_dwc(36);
+}
+
+MIPS_MACHINE(LANTIQ_MACH_WBMR,
+ "WBMR",
+ "WBMR",
+ wbmr_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/nand.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/nand.c
new file mode 100644
index 0000000000..9ab91d8819
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/nand.c
@@ -0,0 +1,216 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/nand.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <lantiq_soc.h>
+#include <lantiq_irq.h>
+#include <lantiq_platform.h>
+
+#include "devices.h"
+
+/* nand registers */
+#define LTQ_EBU_NAND_WAIT 0xB4
+#define LTQ_EBU_NAND_ECC0 0xB8
+#define LTQ_EBU_NAND_ECC_AC 0xBC
+#define LTQ_EBU_NAND_CON 0xB0
+#define LTQ_EBU_ADDSEL1 0x24
+
+/* gpio definitions */
+#define PIN_ALE 13
+#define PIN_CLE 24
+#define PIN_CS1 23
+#define PIN_RDY 48 /* NFLASH_READY */
+#define PIN_RD 49 /* NFLASH_READ_N */
+
+#define NAND_CMD_ALE (1 << 2)
+#define NAND_CMD_CLE (1 << 3)
+#define NAND_CMD_CS (1 << 4)
+#define NAND_WRITE_CMD_RESET 0xff
+#define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE)
+#define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE)
+#define NAND_WRITE_DATA (NAND_CMD_CS)
+#define NAND_READ_DATA (NAND_CMD_CS)
+#define NAND_WAIT_WR_C (1 << 3)
+#define NAND_WAIT_RD (0x1)
+
+#define ADDSEL1_MASK(x) (x << 4)
+#define ADDSEL1_REGEN 1
+#define BUSCON1_SETUP (1 << 22)
+#define BUSCON1_BCGEN_RES (0x3 << 12)
+#define BUSCON1_WAITWRC2 (2 << 8)
+#define BUSCON1_WAITRDC2 (2 << 6)
+#define BUSCON1_HOLDC1 (1 << 4)
+#define BUSCON1_RECOVC1 (1 << 2)
+#define BUSCON1_CMULT4 1
+#define NAND_CON_NANDM 1
+#define NAND_CON_CSMUX (1 << 1)
+#define NAND_CON_CS_P (1 << 4)
+#define NAND_CON_SE_P (1 << 5)
+#define NAND_CON_WP_P (1 << 6)
+#define NAND_CON_PRE_P (1 << 7)
+#define NAND_CON_IN_CS0 0
+#define NAND_CON_OUT_CS0 0
+#define NAND_CON_IN_CS1 (1 << 8)
+#define NAND_CON_OUT_CS1 (1 << 10)
+#define NAND_CON_CE (1 << 20)
+
+#define NAND_BASE_ADDRESS (KSEG1 | 0x14000000)
+
+static const char *part_probes[] = { "cmdlinepart", NULL };
+
+static void xway_select_chip(struct mtd_info *mtd, int chip)
+{
+ switch (chip) {
+ case -1:
+ ltq_ebu_w32_mask(NAND_CON_CE, 0, LTQ_EBU_NAND_CON);
+ ltq_ebu_w32_mask(NAND_CON_NANDM, 0, LTQ_EBU_NAND_CON);
+ break;
+ case 0:
+ ltq_ebu_w32_mask(0, NAND_CON_NANDM, LTQ_EBU_NAND_CON);
+ ltq_ebu_w32_mask(0, NAND_CON_CE, LTQ_EBU_NAND_CON);
+ /* reset the nand chip */
+ while ((ltq_ebu_r32(LTQ_EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
+ ltq_w32(NAND_WRITE_CMD_RESET,
+ ((u32 *) (NAND_BASE_ADDRESS | NAND_WRITE_CMD)));
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void xway_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ if (ctrl & NAND_CLE)
+ this->IO_ADDR_W = (void __iomem *)
+ (NAND_BASE_ADDRESS | NAND_WRITE_CMD);
+ else if (ctrl & NAND_ALE)
+ this->IO_ADDR_W = (void __iomem *)
+ (NAND_BASE_ADDRESS | NAND_WRITE_ADDR);
+ }
+
+ if (data != NAND_CMD_NONE) {
+ *(volatile u8*) ((u32) this->IO_ADDR_W) = data;
+ while ((ltq_ebu_r32(LTQ_EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
+ }
+}
+
+static int xway_dev_ready(struct mtd_info *mtd)
+{
+ return ltq_ebu_r32(LTQ_EBU_NAND_WAIT) & NAND_WAIT_RD;
+}
+
+void nand_write(unsigned int addr, unsigned int val)
+{
+ ltq_w32(val, ((u32 *) (NAND_BASE_ADDRESS | addr)));
+ while ((ltq_ebu_r32(LTQ_EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
+}
+
+unsigned char xway_read_byte(struct mtd_info *mtd)
+{
+ return ltq_r8((void __iomem *)(NAND_BASE_ADDRESS | (NAND_READ_DATA)));
+}
+
+static void xway_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ {
+ unsigned char res8 = ltq_r8((void __iomem *)(NAND_BASE_ADDRESS | (NAND_READ_DATA)));
+ buf[i] = res8;
+ }
+}
+
+static void xway_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ {
+ ltq_w8(buf[i], ((u32*)(NAND_BASE_ADDRESS | (NAND_WRITE_DATA))));
+ while((ltq_ebu_r32(LTQ_EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0);
+ }
+}
+
+int xway_probe(struct platform_device *pdev)
+{
+ /* might need this later ?
+ ltq_gpio_request(PIN_CS1, 2, 1, "NAND_CS1");
+ */
+ ltq_gpio_request(&pdev->dev, PIN_CLE, 2, 1, "NAND_CLE");
+ ltq_gpio_request(&pdev->dev, PIN_ALE, 2, 1, "NAND_ALE");
+ if (ltq_is_ar9() || ltq_is_vr9()) {
+ ltq_gpio_request(&pdev->dev, PIN_RDY, 2, 0, "NAND_BSY");
+ ltq_gpio_request(&pdev->dev, PIN_RD, 2, 1, "NAND_RD");
+ }
+
+ ltq_ebu_w32((NAND_BASE_ADDRESS & 0x1fffff00)
+ | ADDSEL1_MASK(3) | ADDSEL1_REGEN, LTQ_EBU_ADDSEL1);
+
+ ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
+ | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
+ | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
+
+ ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
+ | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
+ | NAND_CON_IN_CS0 | NAND_CON_OUT_CS0, LTQ_EBU_NAND_CON);
+
+ ltq_w32(NAND_WRITE_CMD_RESET,
+ ((u32 *) (NAND_BASE_ADDRESS | NAND_WRITE_CMD)));
+ while ((ltq_ebu_r32(LTQ_EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+ ;
+
+ return 0;
+}
+
+static struct platform_nand_data falcon_flash_nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_delay = 30,
+ .part_probe_types = part_probes,
+ },
+ .ctrl = {
+ .probe = xway_probe,
+ .cmd_ctrl = xway_cmd_ctrl,
+ .dev_ready = xway_dev_ready,
+ .select_chip = xway_select_chip,
+ .read_byte = xway_read_byte,
+ .read_buf = xway_read_buf,
+ .write_buf = xway_write_buf,
+ }
+};
+
+static struct resource ltq_nand_res =
+ MEM_RES("nand", 0x14000000, 0x7ffffff);
+
+static struct platform_device ltq_flash_nand = {
+ .name = "gen_nand",
+ .id = -1,
+ .num_resources = 1,
+ .resource = &ltq_nand_res,
+ .dev = {
+ .platform_data = &falcon_flash_nand_data,
+ },
+};
+
+void __init xway_register_nand(struct mtd_partition *parts, int count)
+{
+ falcon_flash_nand_data.chip.partitions = parts;
+ falcon_flash_nand_data.chip.nr_partitions = count;
+ platform_device_register(&ltq_flash_nand);
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/pci-ath-fixup.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/pci-ath-fixup.c
new file mode 100644
index 0000000000..c87ffb2091
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/pci-ath-fixup.c
@@ -0,0 +1,109 @@
+/*
+ * Atheros AP94 reference board PCI initialization
+ *
+ * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <lantiq_soc.h>
+
+#define LTQ_PCI_MEM_BASE 0x18000000
+
+struct ath_fixup {
+ u16 *cal_data;
+ unsigned slot;
+};
+
+static int ath_num_fixups;
+static struct ath_fixup ath_fixups[2];
+
+static void ath_pci_fixup(struct pci_dev *dev)
+{
+ void __iomem *mem;
+ u16 *cal_data = NULL;
+ u16 cmd;
+ u32 bar0;
+ u32 val;
+ unsigned i;
+
+ for (i = 0; i < ath_num_fixups; i++) {
+ if (ath_fixups[i].cal_data == NULL)
+ continue;
+
+ if (ath_fixups[i].slot != PCI_SLOT(dev->devfn))
+ continue;
+
+ cal_data = ath_fixups[i].cal_data;
+ break;
+ }
+
+ if (cal_data == NULL)
+ return;
+
+ if (*cal_data != 0xa55a) {
+ pr_err("pci %s: invalid calibration data\n", pci_name(dev));
+ return;
+ }
+
+ pr_info("pci %s: fixup device configuration\n", pci_name(dev));
+
+ mem = ioremap(LTQ_PCI_MEM_BASE, 0x10000);
+ if (!mem) {
+ pr_err("pci %s: ioremap error\n", pci_name(dev));
+ return;
+ }
+
+ pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar0);
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, LTQ_PCI_MEM_BASE);
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+ /* set pointer to first reg address */
+ cal_data += 3;
+ while (*cal_data != 0xffff) {
+ u32 reg;
+ reg = *cal_data++;
+ val = *cal_data++;
+ val |= (*cal_data++) << 16;
+
+ ltq_w32(swab32(val), mem + reg);
+ udelay(100);
+ }
+
+ pci_read_config_dword(dev, PCI_VENDOR_ID, &val);
+ dev->vendor = val & 0xffff;
+ dev->device = (val >> 16) & 0xffff;
+
+ pci_read_config_dword(dev, PCI_CLASS_REVISION, &val);
+ dev->revision = val & 0xff;
+ dev->class = val >> 8; /* upper 3 bytes */
+
+ pr_info("pci %s: fixup info: [%04x:%04x] revision %02x class %#08x\n",
+ pci_name(dev), dev->vendor, dev->device, dev->revision, dev->class);
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ cmd &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
+
+ iounmap(mem);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATHEROS, PCI_ANY_ID, ath_pci_fixup);
+
+void __init ltq_pci_ath_fixup(unsigned slot, u16 *cal_data)
+{
+ if (ath_num_fixups >= ARRAY_SIZE(ath_fixups))
+ return;
+
+ ath_fixups[ath_num_fixups].slot = slot;
+ ath_fixups[ath_num_fixups].cal_data = cal_data;
+ ath_num_fixups++;
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/pci-ath-fixup.h b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/pci-ath-fixup.h
new file mode 100644
index 0000000000..095d2619ce
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/pci-ath-fixup.h
@@ -0,0 +1,6 @@
+#ifndef _PCI_ATH_FIXUP
+#define _PCI_ATH_FIXUP
+
+void ltq_pci_ath_fixup(unsigned slot, u16 *cal_data) __init;
+
+#endif /* _PCI_ATH_FIXUP */
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/prom.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/prom.c
new file mode 100644
index 0000000000..f776d5a914
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/prom.c
@@ -0,0 +1,110 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/export.h>
+#include <linux/clk.h>
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+#include "devices.h"
+
+#define SOC_DANUBE "Danube"
+#define SOC_TWINPASS "Twinpass"
+#define SOC_AMAZON_SE "Amazon_SE"
+#define SOC_AR9 "AR9"
+#define SOC_GR9 "GR9"
+#define SOC_VR9 "VR9"
+
+#define PART_SHIFT 12
+#define PART_MASK 0x0FFFFFFF
+#define REV_SHIFT 28
+#define REV_MASK 0xF0000000
+
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+ i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
+ i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
+ sprintf(i->rev_type, "1.%d", i->rev);
+ switch (i->partnum) {
+ case SOC_ID_DANUBE1:
+ case SOC_ID_DANUBE2:
+ i->name = SOC_DANUBE;
+ i->type = SOC_TYPE_DANUBE;
+ break;
+
+ case SOC_ID_TWINPASS:
+ i->name = SOC_TWINPASS;
+ i->type = SOC_TYPE_DANUBE;
+ break;
+
+ case SOC_ID_ARX188:
+ case SOC_ID_ARX168_1:
+ case SOC_ID_ARX168_2:
+ case SOC_ID_ARX182:
+ i->name = SOC_AR9;
+ i->type = SOC_TYPE_AR9;
+ break;
+
+ case SOC_ID_GRX188:
+ case SOC_ID_GRX168:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_AR9;
+ break;
+
+ case SOC_ID_AMAZON_SE_1:
+ case SOC_ID_AMAZON_SE_2:
+ i->name = SOC_AMAZON_SE;
+ i->type = SOC_TYPE_AMAZON_SE;
+#ifdef CONFIG_PCI
+ panic("ase is only supported for non pci kernels");
+#endif
+ break;
+
+ case SOC_ID_VRX282:
+ case SOC_ID_VRX268:
+ case SOC_ID_VRX288:
+ i->name = SOC_VR9;
+ i->type = SOC_TYPE_VR9_1;
+ break;
+
+ case SOC_ID_GRX268:
+ case SOC_ID_GRX288:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_VR9_1;
+ break;
+
+ case SOC_ID_VRX268_2:
+ case SOC_ID_VRX288_2:
+ i->name = SOC_VR9;
+ i->type = SOC_TYPE_VR9_2;
+ break;
+
+ case SOC_ID_GRX282_2:
+ case SOC_ID_GRX288_2:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_VR9_2;
+
+ default:
+ unreachable();
+ break;
+ }
+}
+
+void __init ltq_soc_setup(void)
+{
+ if (ltq_is_ase())
+ ltq_register_ase_asc();
+ else
+ ltq_register_asc(1);
+ ltq_register_gpio();
+ ltq_register_wdt();
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/sysctrl.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/sysctrl.c
new file mode 100644
index 0000000000..de4ce8f1cb
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/sysctrl.c
@@ -0,0 +1,283 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+#include "../devices.h"
+
+/* clock control register */
+#define CGU_IFCCR 0x0018
+/* system clock register */
+#define CGU_SYS 0x0010
+/* pci control register */
+#define CGU_PCICR 0x0034
+/* ephy configuration register */
+#define CGU_EPHY 0x10
+/* power control register */
+#define PMU_PWDCR 0x1C
+/* power status register */
+#define PMU_PWDSR 0x20
+/* power control register */
+#define PMU_PWDCR1 0x24
+/* power status register */
+#define PMU_PWDSR1 0x28
+/* power control register */
+#define PWDCR(x) ((x) ? (PMU_PWDCR1) : (PMU_PWDCR))
+/* power status register */
+#define PWDSR(x) ((x) ? (PMU_PWDSR1) : (PMU_PWDSR))
+
+/* PMU - power management unit */
+#define PMU_USB0_P BIT(0)
+#define PMU_PCI BIT(4)
+#define PMU_DMA BIT(5)
+#define PMU_USB0 BIT(6)
+#define PMU_EPHY BIT(7) /* ase */
+#define PMU_SPI BIT(8)
+#define PMU_DFE BIT(9)
+#define PMU_EBU BIT(10)
+#define PMU_STP BIT(11)
+#define PMU_GPT BIT(12)
+#define PMU_PPE BIT(13)
+#define PMU_AHBS BIT(13) /* vr9 */
+#define PMU_FPI BIT(14)
+#define PMU_AHBM BIT(15)
+#define PMU_PPE_QSB BIT(18)
+#define PMU_PPE_SLL01 BIT(19)
+#define PMU_PPE_TC BIT(21)
+#define PMU_PPE_EMA BIT(22)
+#define PMU_PPE_DPLUM BIT(23)
+#define PMU_PPE_DPLUS BIT(24)
+#define PMU_USB1_P BIT(26)
+#define PMU_USB1 BIT(27)
+#define PMU_SWITCH BIT(28)
+#define PMU_PPE_TOP BIT(29)
+#define PMU_GPHY BIT(30)
+#define PMU_PCIE_CLK BIT(31)
+
+#define PMU1_PCIE_PHY BIT(0)
+#define PMU1_PCIE_CTL BIT(1)
+#define PMU1_PCIE_PDI BIT(4)
+#define PMU1_PCIE_MSI BIT(5)
+
+#define ltq_pmu_w32(x, y) ltq_w32((x), ltq_pmu_membase + (y))
+#define ltq_pmu_r32(x) ltq_r32(ltq_pmu_membase + (x))
+
+static struct resource ltq_cgu_resource =
+ MEM_RES("cgu", LTQ_CGU_BASE_ADDR, LTQ_CGU_SIZE);
+
+static struct resource ltq_pmu_resource =
+ MEM_RES("pmu", LTQ_PMU_BASE_ADDR, LTQ_PMU_SIZE);
+
+static struct resource ltq_ebu_resource =
+ MEM_RES("ebu", LTQ_EBU_BASE_ADDR, LTQ_EBU_SIZE);
+
+void __iomem *ltq_cgu_membase;
+void __iomem *ltq_ebu_membase;
+static void __iomem *ltq_pmu_membase;
+
+static int ltq_cgu_enable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | clk->bits, CGU_IFCCR);
+ return 0;
+}
+
+static void ltq_cgu_disable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~clk->bits, CGU_IFCCR);
+}
+
+static int ltq_pmu_enable(struct clk *clk)
+{
+ int err = 1000000;
+
+ ltq_pmu_w32(ltq_pmu_r32(PWDCR(clk->module)) & ~clk->bits,
+ PWDCR(clk->module));
+ do {} while (--err && (ltq_pmu_r32(PWDSR(clk->module)) & clk->bits));
+
+ if (!err)
+ panic("activating PMU module failed!\n");
+
+ return 0;
+}
+
+static void ltq_pmu_disable(struct clk *clk)
+{
+ ltq_pmu_w32(ltq_pmu_r32(PWDCR(clk->module)) | clk->bits,
+ PWDCR(clk->module));
+}
+
+static int ltq_pci_enable(struct clk *clk)
+{
+ unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR);
+ /* set clock bus speed */
+ if (ltq_is_ar9()) {
+ ifccr &= ~0x1f00000;
+ if (clk->rate == CLOCK_33M)
+ ifccr |= 0xe00000;
+ else
+ ifccr |= 0x700000; /* 62.5M */
+ } else {
+ ifccr &= ~0xf00000;
+ if (clk->rate == CLOCK_33M)
+ ifccr |= 0x800000;
+ else
+ ifccr |= 0x400000; /* 62.5M */
+ }
+ ltq_cgu_w32(ifccr, CGU_IFCCR);
+ return 0;
+}
+
+static int ltq_pci_ext_enable(struct clk *clk)
+{
+ /* enable external pci clock */
+ ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~(1 << 16),
+ CGU_IFCCR);
+ ltq_cgu_w32((1 << 30), CGU_PCICR);
+ return 0;
+}
+
+static void ltq_pci_ext_disable(struct clk *clk)
+{
+ /* disable external pci clock (internal) */
+ ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | (1 << 16),
+ CGU_IFCCR);
+ ltq_cgu_w32((1 << 31) | (1 << 30), CGU_PCICR);
+}
+
+/* manage the clock gates via PMU */
+static inline void clkdev_add_pmu(const char *dev, const char *con,
+ unsigned int module, unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = ltq_pmu_enable;
+ clk->disable = ltq_pmu_disable;
+ clk->module = module;
+ clk->bits = bits;
+ clkdev_add(&clk->cl);
+}
+
+/* manage the clock generator */
+static inline void clkdev_add_cgu(const char *dev, const char *con,
+ unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = ltq_cgu_enable;
+ clk->disable = ltq_cgu_disable;
+ clk->bits = bits;
+ clkdev_add(&clk->cl);
+}
+
+/* pci needs its own enable function */
+static inline void clkdev_add_pci(void)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ /* main pci clock */
+ clk->cl.dev_id = "ltq_pci";
+ clk->cl.con_id = NULL;
+ clk->cl.clk = clk;
+ clk->rate = CLOCK_33M;
+ clk->enable = ltq_pci_enable;
+ clk->disable = ltq_pmu_disable;
+ clk->module = 0;
+ clk->bits = PMU_PCI;
+ clkdev_add(&clk->cl);
+
+ /* use internal/external bus clock */
+ clk_ext->cl.dev_id = "ltq_pci";
+ clk_ext->cl.con_id = "external";
+ clk_ext->cl.clk = clk_ext;
+ clk_ext->enable = ltq_pci_ext_enable;
+ clk_ext->disable = ltq_pci_ext_disable;
+ clkdev_add(&clk_ext->cl);
+
+}
+
+void __init ltq_soc_init(void)
+{
+ ltq_pmu_membase = ltq_remap_resource(&ltq_pmu_resource);
+ if (!ltq_pmu_membase)
+ panic("Failed to remap pmu memory\n");
+
+ ltq_cgu_membase = ltq_remap_resource(&ltq_cgu_resource);
+ if (!ltq_cgu_membase)
+ panic("Failed to remap cgu memory\n");
+
+ ltq_ebu_membase = ltq_remap_resource(&ltq_ebu_resource);
+ if (!ltq_ebu_membase)
+ panic("Failed to remap ebu memory\n");
+
+ /* make sure to unprotect the memory region where flash is located */
+ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
+
+ /* add our clocks */
+ clkdev_add_pmu("ltq_fpi", NULL, 0, PMU_FPI);
+ clkdev_add_pmu("ltq_dma", NULL, 0, PMU_DMA);
+ clkdev_add_pmu("ltq_stp", NULL, 0, PMU_STP);
+ clkdev_add_pmu("ltq_spi.0", NULL, 0, PMU_SPI);
+ clkdev_add_pmu("ltq_gptu", NULL, 0, PMU_GPT);
+ clkdev_add_pmu("ltq_ebu", NULL, 0, PMU_EBU);
+ if (!ltq_is_vr9())
+ clkdev_add_pmu("ltq_etop", NULL, 0, PMU_PPE);
+ if (!ltq_is_ase())
+ clkdev_add_pci();
+ if (ltq_is_ase()) {
+ if (ltq_cgu_r32(CGU_SYS) & (1 << 5))
+ clkdev_add_static(CLOCK_266M, CLOCK_133M, CLOCK_133M);
+ else
+ clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M);
+ clkdev_add_cgu("ltq_etop", "ephycgu", CGU_EPHY),
+ clkdev_add_pmu("ltq_etop", "ephy", 0, PMU_EPHY);
+ clkdev_add_pmu("ltq_dsl", NULL, 0,
+ PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
+ PMU_AHBS | PMU_DFE);
+ } else if (ltq_is_vr9()) {
+ clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(),
+ ltq_vr9_fpi_hz());
+ clkdev_add_pmu("ltq_pcie", "phy", 1, PMU1_PCIE_PHY);
+ clkdev_add_pmu("ltq_pcie", "bus", 0, PMU_PCIE_CLK);
+ clkdev_add_pmu("ltq_pcie", "msi", 1, PMU1_PCIE_MSI);
+ clkdev_add_pmu("ltq_pcie", "pdi", 1, PMU1_PCIE_PDI);
+ clkdev_add_pmu("ltq_pcie", "ctl", 1, PMU1_PCIE_CTL);
+ clkdev_add_pmu("ltq_pcie", "ahb", 0, PMU_AHBM | PMU_AHBS);
+ clkdev_add_pmu("usb0", NULL, 0, PMU_USB0 | PMU_USB0_P);
+ clkdev_add_pmu("usb1", NULL, 0, PMU_USB1 | PMU_USB1_P);
+ clkdev_add_pmu("ltq_vrx200", NULL, 0,
+ PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
+ PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
+ PMU_PPE_QSB);
+ clkdev_add_pmu("ltq_dsl", NULL, 0, PMU_DFE | PMU_AHBS);
+ } else if (ltq_is_ar9()) {
+ clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
+ ltq_ar9_fpi_hz());
+ clkdev_add_pmu("ltq_etop", "switch", 0, PMU_SWITCH);
+ clkdev_add_pmu("ltq_dsl", NULL, 0,
+ PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
+ PMU_PPE_QSB | PMU_AHBS | PMU_DFE);
+ } else {
+ clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
+ ltq_danube_io_region_clock());
+ clkdev_add_pmu("ltq_dsl", NULL, 0,
+ PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
+ PMU_PPE_QSB | PMU_AHBS | PMU_DFE);
+ }
+}
diff --git a/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/timer.c b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/timer.c
new file mode 100644
index 0000000000..9794c87c73
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/lantiq/xway/timer.c
@@ -0,0 +1,846 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <linux/uaccess.h>
+#include <linux/unistd.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+
+#include <asm/irq.h>
+#include <asm/div64.h>
+#include "../clk.h"
+
+#include <lantiq_soc.h>
+#include <lantiq_irq.h>
+#include <lantiq_timer.h>
+
+#define MAX_NUM_OF_32BIT_TIMER_BLOCKS 6
+
+#ifdef TIMER1A
+#define FIRST_TIMER TIMER1A
+#else
+#define FIRST_TIMER 2
+#endif
+
+/*
+ * GPTC divider is set or not.
+ */
+#define GPTU_CLC_RMC_IS_SET 0
+
+/*
+ * Timer Interrupt (IRQ)
+ */
+/* Must be adjusted when ICU driver is available */
+#define TIMER_INTERRUPT (INT_NUM_IM3_IRL0 + 22)
+
+/*
+ * Bits Operation
+ */
+#define GET_BITS(x, msb, lsb) \
+ (((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb))
+#define SET_BITS(x, msb, lsb, value) \
+ (((x) & ~(((1 << ((msb) + 1)) - 1) ^ ((1 << (lsb)) - 1))) | \
+ (((value) & ((1 << (1 + (msb) - (lsb))) - 1)) << (lsb)))
+
+/*
+ * GPTU Register Mapping
+ */
+#define LQ_GPTU (KSEG1 + 0x1E100A00)
+#define LQ_GPTU_CLC ((volatile u32 *)(LQ_GPTU + 0x0000))
+#define LQ_GPTU_ID ((volatile u32 *)(LQ_GPTU + 0x0008))
+#define LQ_GPTU_CON(n, X) ((volatile u32 *)(LQ_GPTU + 0x0010 + ((X) * 4) + ((n) - 1) * 0x0020)) /* X must be either A or B */
+#define LQ_GPTU_RUN(n, X) ((volatile u32 *)(LQ_GPTU + 0x0018 + ((X) * 4) + ((n) - 1) * 0x0020)) /* X must be either A or B */
+#define LQ_GPTU_RELOAD(n, X) ((volatile u32 *)(LQ_GPTU + 0x0020 + ((X) * 4) + ((n) - 1) * 0x0020)) /* X must be either A or B */
+#define LQ_GPTU_COUNT(n, X) ((volatile u32 *)(LQ_GPTU + 0x0028 + ((X) * 4) + ((n) - 1) * 0x0020)) /* X must be either A or B */
+#define LQ_GPTU_IRNEN ((volatile u32 *)(LQ_GPTU + 0x00F4))
+#define LQ_GPTU_IRNICR ((volatile u32 *)(LQ_GPTU + 0x00F8))
+#define LQ_GPTU_IRNCR ((volatile u32 *)(LQ_GPTU + 0x00FC))
+
+/*
+ * Clock Control Register
+ */
+#define GPTU_CLC_SMC GET_BITS(*LQ_GPTU_CLC, 23, 16)
+#define GPTU_CLC_RMC GET_BITS(*LQ_GPTU_CLC, 15, 8)
+#define GPTU_CLC_FSOE (*LQ_GPTU_CLC & (1 << 5))
+#define GPTU_CLC_EDIS (*LQ_GPTU_CLC & (1 << 3))
+#define GPTU_CLC_SPEN (*LQ_GPTU_CLC & (1 << 2))
+#define GPTU_CLC_DISS (*LQ_GPTU_CLC & (1 << 1))
+#define GPTU_CLC_DISR (*LQ_GPTU_CLC & (1 << 0))
+
+#define GPTU_CLC_SMC_SET(value) SET_BITS(0, 23, 16, (value))
+#define GPTU_CLC_RMC_SET(value) SET_BITS(0, 15, 8, (value))
+#define GPTU_CLC_FSOE_SET(value) ((value) ? (1 << 5) : 0)
+#define GPTU_CLC_SBWE_SET(value) ((value) ? (1 << 4) : 0)
+#define GPTU_CLC_EDIS_SET(value) ((value) ? (1 << 3) : 0)
+#define GPTU_CLC_SPEN_SET(value) ((value) ? (1 << 2) : 0)
+#define GPTU_CLC_DISR_SET(value) ((value) ? (1 << 0) : 0)
+
+/*
+ * ID Register
+ */
+#define GPTU_ID_ID GET_BITS(*LQ_GPTU_ID, 15, 8)
+#define GPTU_ID_CFG GET_BITS(*LQ_GPTU_ID, 7, 5)
+#define GPTU_ID_REV GET_BITS(*LQ_GPTU_ID, 4, 0)
+
+/*
+ * Control Register of Timer/Counter nX
+ * n is the index of block (1 based index)
+ * X is either A or B
+ */
+#define GPTU_CON_SRC_EG(n, X) (*LQ_GPTU_CON(n, X) & (1 << 10))
+#define GPTU_CON_SRC_EXT(n, X) (*LQ_GPTU_CON(n, X) & (1 << 9))
+#define GPTU_CON_SYNC(n, X) (*LQ_GPTU_CON(n, X) & (1 << 8))
+#define GPTU_CON_EDGE(n, X) GET_BITS(*LQ_GPTU_CON(n, X), 7, 6)
+#define GPTU_CON_INV(n, X) (*LQ_GPTU_CON(n, X) & (1 << 5))
+#define GPTU_CON_EXT(n, X) (*LQ_GPTU_CON(n, A) & (1 << 4)) /* Timer/Counter B does not have this bit */
+#define GPTU_CON_STP(n, X) (*LQ_GPTU_CON(n, X) & (1 << 3))
+#define GPTU_CON_CNT(n, X) (*LQ_GPTU_CON(n, X) & (1 << 2))
+#define GPTU_CON_DIR(n, X) (*LQ_GPTU_CON(n, X) & (1 << 1))
+#define GPTU_CON_EN(n, X) (*LQ_GPTU_CON(n, X) & (1 << 0))
+
+#define GPTU_CON_SRC_EG_SET(value) ((value) ? 0 : (1 << 10))
+#define GPTU_CON_SRC_EXT_SET(value) ((value) ? (1 << 9) : 0)
+#define GPTU_CON_SYNC_SET(value) ((value) ? (1 << 8) : 0)
+#define GPTU_CON_EDGE_SET(value) SET_BITS(0, 7, 6, (value))
+#define GPTU_CON_INV_SET(value) ((value) ? (1 << 5) : 0)
+#define GPTU_CON_EXT_SET(value) ((value) ? (1 << 4) : 0)
+#define GPTU_CON_STP_SET(value) ((value) ? (1 << 3) : 0)
+#define GPTU_CON_CNT_SET(value) ((value) ? (1 << 2) : 0)
+#define GPTU_CON_DIR_SET(value) ((value) ? (1 << 1) : 0)
+
+#define GPTU_RUN_RL_SET(value) ((value) ? (1 << 2) : 0)
+#define GPTU_RUN_CEN_SET(value) ((value) ? (1 << 1) : 0)
+#define GPTU_RUN_SEN_SET(value) ((value) ? (1 << 0) : 0)
+
+#define GPTU_IRNEN_TC_SET(n, X, value) ((value) ? (1 << (((n) - 1) * 2 + (X))) : 0)
+#define GPTU_IRNCR_TC_SET(n, X, value) ((value) ? (1 << (((n) - 1) * 2 + (X))) : 0)
+
+#define TIMER_FLAG_MASK_SIZE(x) (x & 0x0001)
+#define TIMER_FLAG_MASK_TYPE(x) (x & 0x0002)
+#define TIMER_FLAG_MASK_STOP(x) (x & 0x0004)
+#define TIMER_FLAG_MASK_DIR(x) (x & 0x0008)
+#define TIMER_FLAG_NONE_EDGE 0x0000
+#define TIMER_FLAG_MASK_EDGE(x) (x & 0x0030)
+#define TIMER_FLAG_REAL 0x0000
+#define TIMER_FLAG_INVERT 0x0040
+#define TIMER_FLAG_MASK_INVERT(x) (x & 0x0040)
+#define TIMER_FLAG_MASK_TRIGGER(x) (x & 0x0070)
+#define TIMER_FLAG_MASK_SYNC(x) (x & 0x0080)
+#define TIMER_FLAG_CALLBACK_IN_HB 0x0200
+#define TIMER_FLAG_MASK_HANDLE(x) (x & 0x0300)
+#define TIMER_FLAG_MASK_SRC(x) (x & 0x1000)
+
+struct timer_dev_timer {
+ unsigned int f_irq_on;
+ unsigned int irq;
+ unsigned int flag;
+ unsigned long arg1;
+ unsigned long arg2;
+};
+
+struct timer_dev {
+ struct mutex gptu_mutex;
+ unsigned int number_of_timers;
+ unsigned int occupation;
+ unsigned int f_gptu_on;
+ struct timer_dev_timer timer[MAX_NUM_OF_32BIT_TIMER_BLOCKS * 2];
+};
+
+unsigned long ltq_danube_fpi_bus_clock(int fpi);
+unsigned long ltq_vr9_fpi_bus_clock(int fpi);
+
+unsigned int ltq_get_fpi_bus_clock(int fpi) {
+ if (ltq_is_ase())
+ return CLOCK_133M;
+ else if (ltq_is_vr9())
+ return ltq_vr9_fpi_bus_clock(fpi);
+
+ return ltq_danube_fpi_bus_clock(fpi);
+}
+
+
+static long gptu_ioctl(struct file *, unsigned int, unsigned long);
+static int gptu_open(struct inode *, struct file *);
+static int gptu_release(struct inode *, struct file *);
+
+static struct file_operations gptu_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = gptu_ioctl,
+ .open = gptu_open,
+ .release = gptu_release
+};
+
+static struct miscdevice gptu_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "gptu",
+ .fops = &gptu_fops,
+};
+
+static struct timer_dev timer_dev;
+
+static irqreturn_t timer_irq_handler(int irq, void *p)
+{
+ unsigned int timer;
+ unsigned int flag;
+ struct timer_dev_timer *dev_timer = (struct timer_dev_timer *)p;
+
+ timer = irq - TIMER_INTERRUPT;
+ if (timer < timer_dev.number_of_timers
+ && dev_timer == &timer_dev.timer[timer]) {
+ /* Clear interrupt. */
+ ltq_w32(1 << timer, LQ_GPTU_IRNCR);
+
+ /* Call user hanler or signal. */
+ flag = dev_timer->flag;
+ if (!(timer & 0x01)
+ || TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT) {
+ /* 16-bit timer or timer A of 32-bit timer */
+ switch (TIMER_FLAG_MASK_HANDLE(flag)) {
+ case TIMER_FLAG_CALLBACK_IN_IRQ:
+ case TIMER_FLAG_CALLBACK_IN_HB:
+ if (dev_timer->arg1)
+ (*(timer_callback)dev_timer->arg1)(dev_timer->arg2);
+ break;
+ case TIMER_FLAG_SIGNAL:
+ send_sig((int)dev_timer->arg2, (struct task_struct *)dev_timer->arg1, 0);
+ break;
+ }
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static inline void lq_enable_gptu(void)
+{
+ struct clk *clk = clk_get_sys("ltq_gptu", NULL);
+ clk_enable(clk);
+
+ //ltq_pmu_enable(PMU_GPT);
+
+ /* Set divider as 1, disable write protection for SPEN, enable module. */
+ *LQ_GPTU_CLC =
+ GPTU_CLC_SMC_SET(0x00) |
+ GPTU_CLC_RMC_SET(0x01) |
+ GPTU_CLC_FSOE_SET(0) |
+ GPTU_CLC_SBWE_SET(1) |
+ GPTU_CLC_EDIS_SET(0) |
+ GPTU_CLC_SPEN_SET(0) |
+ GPTU_CLC_DISR_SET(0);
+}
+
+static inline void lq_disable_gptu(void)
+{
+ struct clk *clk = clk_get_sys("ltq_gptu", NULL);
+ ltq_w32(0x00, LQ_GPTU_IRNEN);
+ ltq_w32(0xfff, LQ_GPTU_IRNCR);
+
+ /* Set divider as 0, enable write protection for SPEN, disable module. */
+ *LQ_GPTU_CLC =
+ GPTU_CLC_SMC_SET(0x00) |
+ GPTU_CLC_RMC_SET(0x00) |
+ GPTU_CLC_FSOE_SET(0) |
+ GPTU_CLC_SBWE_SET(0) |
+ GPTU_CLC_EDIS_SET(0) |
+ GPTU_CLC_SPEN_SET(0) |
+ GPTU_CLC_DISR_SET(1);
+
+ clk_enable(clk);
+}
+
+int lq_request_timer(unsigned int timer, unsigned int flag,
+ unsigned long value, unsigned long arg1, unsigned long arg2)
+{
+ int ret = 0;
+ unsigned int con_reg, irnen_reg;
+ int n, X;
+
+ if (timer >= FIRST_TIMER + timer_dev.number_of_timers)
+ return -EINVAL;
+
+ printk(KERN_INFO "request_timer(%d, 0x%08X, %lu)...",
+ timer, flag, value);
+
+ if (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT)
+ value &= 0xFFFF;
+ else
+ timer &= ~0x01;
+
+ mutex_lock(&timer_dev.gptu_mutex);
+
+ /*
+ * Allocate timer.
+ */
+ if (timer < FIRST_TIMER) {
+ unsigned int mask;
+ unsigned int shift;
+ /* This takes care of TIMER1B which is the only choice for Voice TAPI system */
+ unsigned int offset = TIMER2A;
+
+ /*
+ * Pick up a free timer.
+ */
+ if (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT) {
+ mask = 1 << offset;
+ shift = 1;
+ } else {
+ mask = 3 << offset;
+ shift = 2;
+ }
+ for (timer = offset;
+ timer < offset + timer_dev.number_of_timers;
+ timer += shift, mask <<= shift)
+ if (!(timer_dev.occupation & mask)) {
+ timer_dev.occupation |= mask;
+ break;
+ }
+ if (timer >= offset + timer_dev.number_of_timers) {
+ printk("failed![%d]\n", __LINE__);
+ mutex_unlock(&timer_dev.gptu_mutex);
+ return -EINVAL;
+ } else
+ ret = timer;
+ } else {
+ register unsigned int mask;
+
+ /*
+ * Check if the requested timer is free.
+ */
+ mask = (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT ? 1 : 3) << timer;
+ if ((timer_dev.occupation & mask)) {
+ printk("failed![%d] mask %#x, timer_dev.occupation %#x\n",
+ __LINE__, mask, timer_dev.occupation);
+ mutex_unlock(&timer_dev.gptu_mutex);
+ return -EBUSY;
+ } else {
+ timer_dev.occupation |= mask;
+ ret = 0;
+ }
+ }
+
+ /*
+ * Prepare control register value.
+ */
+ switch (TIMER_FLAG_MASK_EDGE(flag)) {
+ default:
+ case TIMER_FLAG_NONE_EDGE:
+ con_reg = GPTU_CON_EDGE_SET(0x00);
+ break;
+ case TIMER_FLAG_RISE_EDGE:
+ con_reg = GPTU_CON_EDGE_SET(0x01);
+ break;
+ case TIMER_FLAG_FALL_EDGE:
+ con_reg = GPTU_CON_EDGE_SET(0x02);
+ break;
+ case TIMER_FLAG_ANY_EDGE:
+ con_reg = GPTU_CON_EDGE_SET(0x03);
+ break;
+ }
+ if (TIMER_FLAG_MASK_TYPE(flag) == TIMER_FLAG_TIMER)
+ con_reg |=
+ TIMER_FLAG_MASK_SRC(flag) ==
+ TIMER_FLAG_EXT_SRC ? GPTU_CON_SRC_EXT_SET(1) :
+ GPTU_CON_SRC_EXT_SET(0);
+ else
+ con_reg |=
+ TIMER_FLAG_MASK_SRC(flag) ==
+ TIMER_FLAG_EXT_SRC ? GPTU_CON_SRC_EG_SET(1) :
+ GPTU_CON_SRC_EG_SET(0);
+ con_reg |=
+ TIMER_FLAG_MASK_SYNC(flag) ==
+ TIMER_FLAG_UNSYNC ? GPTU_CON_SYNC_SET(0) :
+ GPTU_CON_SYNC_SET(1);
+ con_reg |=
+ TIMER_FLAG_MASK_INVERT(flag) ==
+ TIMER_FLAG_REAL ? GPTU_CON_INV_SET(0) : GPTU_CON_INV_SET(1);
+ con_reg |=
+ TIMER_FLAG_MASK_SIZE(flag) ==
+ TIMER_FLAG_16BIT ? GPTU_CON_EXT_SET(0) :
+ GPTU_CON_EXT_SET(1);
+ con_reg |=
+ TIMER_FLAG_MASK_STOP(flag) ==
+ TIMER_FLAG_ONCE ? GPTU_CON_STP_SET(1) : GPTU_CON_STP_SET(0);
+ con_reg |=
+ TIMER_FLAG_MASK_TYPE(flag) ==
+ TIMER_FLAG_TIMER ? GPTU_CON_CNT_SET(0) :
+ GPTU_CON_CNT_SET(1);
+ con_reg |=
+ TIMER_FLAG_MASK_DIR(flag) ==
+ TIMER_FLAG_UP ? GPTU_CON_DIR_SET(1) : GPTU_CON_DIR_SET(0);
+
+ /*
+ * Fill up running data.
+ */
+ timer_dev.timer[timer - FIRST_TIMER].flag = flag;
+ timer_dev.timer[timer - FIRST_TIMER].arg1 = arg1;
+ timer_dev.timer[timer - FIRST_TIMER].arg2 = arg2;
+ if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT)
+ timer_dev.timer[timer - FIRST_TIMER + 1].flag = flag;
+
+ /*
+ * Enable GPTU module.
+ */
+ if (!timer_dev.f_gptu_on) {
+ lq_enable_gptu();
+ timer_dev.f_gptu_on = 1;
+ }
+
+ /*
+ * Enable IRQ.
+ */
+ if (TIMER_FLAG_MASK_HANDLE(flag) != TIMER_FLAG_NO_HANDLE) {
+ if (TIMER_FLAG_MASK_HANDLE(flag) == TIMER_FLAG_SIGNAL)
+ timer_dev.timer[timer - FIRST_TIMER].arg1 =
+ (unsigned long) find_task_by_vpid((int) arg1);
+
+ irnen_reg = 1 << (timer - FIRST_TIMER);
+
+ if (TIMER_FLAG_MASK_HANDLE(flag) == TIMER_FLAG_SIGNAL
+ || (TIMER_FLAG_MASK_HANDLE(flag) ==
+ TIMER_FLAG_CALLBACK_IN_IRQ
+ && timer_dev.timer[timer - FIRST_TIMER].arg1)) {
+ enable_irq(timer_dev.timer[timer - FIRST_TIMER].irq);
+ timer_dev.timer[timer - FIRST_TIMER].f_irq_on = 1;
+ }
+ } else
+ irnen_reg = 0;
+
+ /*
+ * Write config register, reload value and enable interrupt.
+ */
+ n = timer >> 1;
+ X = timer & 0x01;
+ *LQ_GPTU_CON(n, X) = con_reg;
+ *LQ_GPTU_RELOAD(n, X) = value;
+ /* printk("reload value = %d\n", (u32)value); */
+ *LQ_GPTU_IRNEN |= irnen_reg;
+
+ mutex_unlock(&timer_dev.gptu_mutex);
+ printk("successful!\n");
+ return ret;
+}
+EXPORT_SYMBOL(lq_request_timer);
+
+int lq_free_timer(unsigned int timer)
+{
+ unsigned int flag;
+ unsigned int mask;
+ int n, X;
+
+ if (!timer_dev.f_gptu_on)
+ return -EINVAL;
+
+ if (timer < FIRST_TIMER || timer >= FIRST_TIMER + timer_dev.number_of_timers)
+ return -EINVAL;
+
+ mutex_lock(&timer_dev.gptu_mutex);
+
+ flag = timer_dev.timer[timer - FIRST_TIMER].flag;
+ if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT)
+ timer &= ~0x01;
+
+ mask = (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT ? 1 : 3) << timer;
+ if (((timer_dev.occupation & mask) ^ mask)) {
+ mutex_unlock(&timer_dev.gptu_mutex);
+ return -EINVAL;
+ }
+
+ n = timer >> 1;
+ X = timer & 0x01;
+
+ if (GPTU_CON_EN(n, X))
+ *LQ_GPTU_RUN(n, X) = GPTU_RUN_CEN_SET(1);
+
+ *LQ_GPTU_IRNEN &= ~GPTU_IRNEN_TC_SET(n, X, 1);
+ *LQ_GPTU_IRNCR |= GPTU_IRNCR_TC_SET(n, X, 1);
+
+ if (timer_dev.timer[timer - FIRST_TIMER].f_irq_on) {
+ disable_irq(timer_dev.timer[timer - FIRST_TIMER].irq);
+ timer_dev.timer[timer - FIRST_TIMER].f_irq_on = 0;
+ }
+
+ timer_dev.occupation &= ~mask;
+ if (!timer_dev.occupation && timer_dev.f_gptu_on) {
+ lq_disable_gptu();
+ timer_dev.f_gptu_on = 0;
+ }
+
+ mutex_unlock(&timer_dev.gptu_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(lq_free_timer);
+
+int lq_start_timer(unsigned int timer, int is_resume)
+{
+ unsigned int flag;
+ unsigned int mask;
+ int n, X;
+
+ if (!timer_dev.f_gptu_on)
+ return -EINVAL;
+
+ if (timer < FIRST_TIMER || timer >= FIRST_TIMER + timer_dev.number_of_timers)
+ return -EINVAL;
+
+ mutex_lock(&timer_dev.gptu_mutex);
+
+ flag = timer_dev.timer[timer - FIRST_TIMER].flag;
+ if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT)
+ timer &= ~0x01;
+
+ mask = (TIMER_FLAG_MASK_SIZE(flag) ==
+ TIMER_FLAG_16BIT ? 1 : 3) << timer;
+ if (((timer_dev.occupation & mask) ^ mask)) {
+ mutex_unlock(&timer_dev.gptu_mutex);
+ return -EINVAL;
+ }
+
+ n = timer >> 1;
+ X = timer & 0x01;
+
+ *LQ_GPTU_RUN(n, X) = GPTU_RUN_RL_SET(!is_resume) | GPTU_RUN_SEN_SET(1);
+
+ mutex_unlock(&timer_dev.gptu_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(lq_start_timer);
+
+int lq_stop_timer(unsigned int timer)
+{
+ unsigned int flag;
+ unsigned int mask;
+ int n, X;
+
+ if (!timer_dev.f_gptu_on)
+ return -EINVAL;
+
+ if (timer < FIRST_TIMER
+ || timer >= FIRST_TIMER + timer_dev.number_of_timers)
+ return -EINVAL;
+
+ mutex_lock(&timer_dev.gptu_mutex);
+
+ flag = timer_dev.timer[timer - FIRST_TIMER].flag;
+ if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT)
+ timer &= ~0x01;
+
+ mask = (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT ? 1 : 3) << timer;
+ if (((timer_dev.occupation & mask) ^ mask)) {
+ mutex_unlock(&timer_dev.gptu_mutex);
+ return -EINVAL;
+ }
+
+ n = timer >> 1;
+ X = timer & 0x01;
+
+ *LQ_GPTU_RUN(n, X) = GPTU_RUN_CEN_SET(1);
+
+ mutex_unlock(&timer_dev.gptu_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(lq_stop_timer);
+
+int lq_reset_counter_flags(u32 timer, u32 flags)
+{
+ unsigned int oflag;
+ unsigned int mask, con_reg;
+ int n, X;
+
+ if (!timer_dev.f_gptu_on)
+ return -EINVAL;
+
+ if (timer < FIRST_TIMER || timer >= FIRST_TIMER + timer_dev.number_of_timers)
+ return -EINVAL;
+
+ mutex_lock(&timer_dev.gptu_mutex);
+
+ oflag = timer_dev.timer[timer - FIRST_TIMER].flag;
+ if (TIMER_FLAG_MASK_SIZE(oflag) != TIMER_FLAG_16BIT)
+ timer &= ~0x01;
+
+ mask = (TIMER_FLAG_MASK_SIZE(oflag) == TIMER_FLAG_16BIT ? 1 : 3) << timer;
+ if (((timer_dev.occupation & mask) ^ mask)) {
+ mutex_unlock(&timer_dev.gptu_mutex);
+ return -EINVAL;
+ }
+
+ switch (TIMER_FLAG_MASK_EDGE(flags)) {
+ default:
+ case TIMER_FLAG_NONE_EDGE:
+ con_reg = GPTU_CON_EDGE_SET(0x00);
+ break;
+ case TIMER_FLAG_RISE_EDGE:
+ con_reg = GPTU_CON_EDGE_SET(0x01);
+ break;
+ case TIMER_FLAG_FALL_EDGE:
+ con_reg = GPTU_CON_EDGE_SET(0x02);
+ break;
+ case TIMER_FLAG_ANY_EDGE:
+ con_reg = GPTU_CON_EDGE_SET(0x03);
+ break;
+ }
+ if (TIMER_FLAG_MASK_TYPE(flags) == TIMER_FLAG_TIMER)
+ con_reg |= TIMER_FLAG_MASK_SRC(flags) == TIMER_FLAG_EXT_SRC ? GPTU_CON_SRC_EXT_SET(1) : GPTU_CON_SRC_EXT_SET(0);
+ else
+ con_reg |= TIMER_FLAG_MASK_SRC(flags) == TIMER_FLAG_EXT_SRC ? GPTU_CON_SRC_EG_SET(1) : GPTU_CON_SRC_EG_SET(0);
+ con_reg |= TIMER_FLAG_MASK_SYNC(flags) == TIMER_FLAG_UNSYNC ? GPTU_CON_SYNC_SET(0) : GPTU_CON_SYNC_SET(1);
+ con_reg |= TIMER_FLAG_MASK_INVERT(flags) == TIMER_FLAG_REAL ? GPTU_CON_INV_SET(0) : GPTU_CON_INV_SET(1);
+ con_reg |= TIMER_FLAG_MASK_SIZE(flags) == TIMER_FLAG_16BIT ? GPTU_CON_EXT_SET(0) : GPTU_CON_EXT_SET(1);
+ con_reg |= TIMER_FLAG_MASK_STOP(flags) == TIMER_FLAG_ONCE ? GPTU_CON_STP_SET(1) : GPTU_CON_STP_SET(0);
+ con_reg |= TIMER_FLAG_MASK_TYPE(flags) == TIMER_FLAG_TIMER ? GPTU_CON_CNT_SET(0) : GPTU_CON_CNT_SET(1);
+ con_reg |= TIMER_FLAG_MASK_DIR(flags) == TIMER_FLAG_UP ? GPTU_CON_DIR_SET(1) : GPTU_CON_DIR_SET(0);
+
+ timer_dev.timer[timer - FIRST_TIMER].flag = flags;
+ if (TIMER_FLAG_MASK_SIZE(flags) != TIMER_FLAG_16BIT)
+ timer_dev.timer[timer - FIRST_TIMER + 1].flag = flags;
+
+ n = timer >> 1;
+ X = timer & 0x01;
+
+ *LQ_GPTU_CON(n, X) = con_reg;
+ smp_wmb();
+ printk(KERN_INFO "[%s]: counter%d oflags %#x, nflags %#x, GPTU_CON %#x\n", __func__, timer, oflag, flags, *LQ_GPTU_CON(n, X));
+ mutex_unlock(&timer_dev.gptu_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(lq_reset_counter_flags);
+
+int lq_get_count_value(unsigned int timer, unsigned long *value)
+{
+ unsigned int flag;
+ unsigned int mask;
+ int n, X;
+
+ if (!timer_dev.f_gptu_on)
+ return -EINVAL;
+
+ if (timer < FIRST_TIMER
+ || timer >= FIRST_TIMER + timer_dev.number_of_timers)
+ return -EINVAL;
+
+ mutex_lock(&timer_dev.gptu_mutex);
+
+ flag = timer_dev.timer[timer - FIRST_TIMER].flag;
+ if (TIMER_FLAG_MASK_SIZE(flag) != TIMER_FLAG_16BIT)
+ timer &= ~0x01;
+
+ mask = (TIMER_FLAG_MASK_SIZE(flag) == TIMER_FLAG_16BIT ? 1 : 3) << timer;
+ if (((timer_dev.occupation & mask) ^ mask)) {
+ mutex_unlock(&timer_dev.gptu_mutex);
+ return -EINVAL;
+ }
+
+ n = timer >> 1;
+ X = timer & 0x01;
+
+ *value = *LQ_GPTU_COUNT(n, X);
+
+ mutex_unlock(&timer_dev.gptu_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(lq_get_count_value);
+
+u32 lq_cal_divider(unsigned long freq)
+{
+ u64 module_freq, fpi = ltq_get_fpi_bus_clock(2);
+ u32 clock_divider = 1;
+ module_freq = fpi * 1000;
+ do_div(module_freq, clock_divider * freq);
+ return module_freq;
+}
+EXPORT_SYMBOL(lq_cal_divider);
+
+int lq_set_timer(unsigned int timer, unsigned int freq, int is_cyclic,
+ int is_ext_src, unsigned int handle_flag, unsigned long arg1,
+ unsigned long arg2)
+{
+ unsigned long divider;
+ unsigned int flag;
+
+ divider = lq_cal_divider(freq);
+ if (divider == 0)
+ return -EINVAL;
+ flag = ((divider & ~0xFFFF) ? TIMER_FLAG_32BIT : TIMER_FLAG_16BIT)
+ | (is_cyclic ? TIMER_FLAG_CYCLIC : TIMER_FLAG_ONCE)
+ | (is_ext_src ? TIMER_FLAG_EXT_SRC : TIMER_FLAG_INT_SRC)
+ | TIMER_FLAG_TIMER | TIMER_FLAG_DOWN
+ | TIMER_FLAG_MASK_HANDLE(handle_flag);
+
+ printk(KERN_INFO "lq_set_timer(%d, %d), divider = %lu\n",
+ timer, freq, divider);
+ return lq_request_timer(timer, flag, divider, arg1, arg2);
+}
+EXPORT_SYMBOL(lq_set_timer);
+
+int lq_set_counter(unsigned int timer, unsigned int flag, u32 reload,
+ unsigned long arg1, unsigned long arg2)
+{
+ printk(KERN_INFO "lq_set_counter(%d, %#x, %d)\n", timer, flag, reload);
+ return lq_request_timer(timer, flag, reload, arg1, arg2);
+}
+EXPORT_SYMBOL(lq_set_counter);
+
+static long gptu_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct gptu_ioctl_param param;
+
+ if (!access_ok(VERIFY_READ, arg, sizeof(struct gptu_ioctl_param)))
+ return -EFAULT;
+ copy_from_user(&param, (void *) arg, sizeof(param));
+
+ if ((((cmd == GPTU_REQUEST_TIMER || cmd == GPTU_SET_TIMER
+ || GPTU_SET_COUNTER) && param.timer < 2)
+ || cmd == GPTU_GET_COUNT_VALUE || cmd == GPTU_CALCULATE_DIVIDER)
+ && !access_ok(VERIFY_WRITE, arg,
+ sizeof(struct gptu_ioctl_param)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case GPTU_REQUEST_TIMER:
+ ret = lq_request_timer(param.timer, param.flag, param.value,
+ (unsigned long) param.pid,
+ (unsigned long) param.sig);
+ if (ret > 0) {
+ copy_to_user(&((struct gptu_ioctl_param *) arg)->
+ timer, &ret, sizeof(&ret));
+ ret = 0;
+ }
+ break;
+ case GPTU_FREE_TIMER:
+ ret = lq_free_timer(param.timer);
+ break;
+ case GPTU_START_TIMER:
+ ret = lq_start_timer(param.timer, param.flag);
+ break;
+ case GPTU_STOP_TIMER:
+ ret = lq_stop_timer(param.timer);
+ break;
+ case GPTU_GET_COUNT_VALUE:
+ ret = lq_get_count_value(param.timer, &param.value);
+ if (!ret)
+ copy_to_user(&((struct gptu_ioctl_param *) arg)->
+ value, &param.value,
+ sizeof(param.value));
+ break;
+ case GPTU_CALCULATE_DIVIDER:
+ param.value = lq_cal_divider(param.value);
+ if (param.value == 0)
+ ret = -EINVAL;
+ else {
+ copy_to_user(&((struct gptu_ioctl_param *) arg)->
+ value, &param.value,
+ sizeof(param.value));
+ ret = 0;
+ }
+ break;
+ case GPTU_SET_TIMER:
+ ret = lq_set_timer(param.timer, param.value,
+ TIMER_FLAG_MASK_STOP(param.flag) !=
+ TIMER_FLAG_ONCE ? 1 : 0,
+ TIMER_FLAG_MASK_SRC(param.flag) ==
+ TIMER_FLAG_EXT_SRC ? 1 : 0,
+ TIMER_FLAG_MASK_HANDLE(param.flag) ==
+ TIMER_FLAG_SIGNAL ? TIMER_FLAG_SIGNAL :
+ TIMER_FLAG_NO_HANDLE,
+ (unsigned long) param.pid,
+ (unsigned long) param.sig);
+ if (ret > 0) {
+ copy_to_user(&((struct gptu_ioctl_param *) arg)->
+ timer, &ret, sizeof(&ret));
+ ret = 0;
+ }
+ break;
+ case GPTU_SET_COUNTER:
+ lq_set_counter(param.timer, param.flag, param.value, 0, 0);
+ if (ret > 0) {
+ copy_to_user(&((struct gptu_ioctl_param *) arg)->
+ timer, &ret, sizeof(&ret));
+ ret = 0;
+ }
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+static int gptu_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int gptu_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+int __init lq_gptu_init(void)
+{
+ int ret;
+ unsigned int i;
+
+ ltq_w32(0, LQ_GPTU_IRNEN);
+ ltq_w32(0xfff, LQ_GPTU_IRNCR);
+
+ memset(&timer_dev, 0, sizeof(timer_dev));
+ mutex_init(&timer_dev.gptu_mutex);
+
+ lq_enable_gptu();
+ timer_dev.number_of_timers = GPTU_ID_CFG * 2;
+ lq_disable_gptu();
+ if (timer_dev.number_of_timers > MAX_NUM_OF_32BIT_TIMER_BLOCKS * 2)
+ timer_dev.number_of_timers = MAX_NUM_OF_32BIT_TIMER_BLOCKS * 2;
+ printk(KERN_INFO "gptu: totally %d 16-bit timers/counters\n", timer_dev.number_of_timers);
+
+ ret = misc_register(&gptu_miscdev);
+ if (ret) {
+ printk(KERN_ERR "gptu: can't misc_register, get error %d\n", -ret);
+ return ret;
+ } else {
+ printk(KERN_INFO "gptu: misc_register on minor %d\n", gptu_miscdev.minor);
+ }
+
+ for (i = 0; i < timer_dev.number_of_timers; i++) {
+ ret = request_irq(TIMER_INTERRUPT + i, timer_irq_handler, IRQF_TIMER, gptu_miscdev.name, &timer_dev.timer[i]);
+ if (ret) {
+ for (; i >= 0; i--)
+ free_irq(TIMER_INTERRUPT + i, &timer_dev.timer[i]);
+ misc_deregister(&gptu_miscdev);
+ printk(KERN_ERR "gptu: failed in requesting irq (%d), get error %d\n", i, -ret);
+ return ret;
+ } else {
+ timer_dev.timer[i].irq = TIMER_INTERRUPT + i;
+ disable_irq(timer_dev.timer[i].irq);
+ printk(KERN_INFO "gptu: succeeded to request irq %d\n", timer_dev.timer[i].irq);
+ }
+ }
+
+ return 0;
+}
+
+void __exit lq_gptu_exit(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < timer_dev.number_of_timers; i++) {
+ if (timer_dev.timer[i].f_irq_on)
+ disable_irq(timer_dev.timer[i].irq);
+ free_irq(timer_dev.timer[i].irq, &timer_dev.timer[i]);
+ }
+ lq_disable_gptu();
+ misc_deregister(&gptu_miscdev);
+}
+
+module_init(lq_gptu_init);
+module_exit(lq_gptu_exit);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/pci/fixup-lantiq-pcie.c b/target/linux/lantiq/files-3.3/arch/mips/pci/fixup-lantiq-pcie.c
new file mode 100644
index 0000000000..84517dfbc3
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/pci/fixup-lantiq-pcie.c
@@ -0,0 +1,81 @@
+/******************************************************************************
+**
+** FILE NAME : ifxmips_fixup_pcie.c
+** PROJECT : IFX UEIP for VRX200
+** MODULES : PCIe
+**
+** DATE : 02 Mar 2009
+** AUTHOR : Lei Chuanhua
+** DESCRIPTION : PCIe Root Complex Driver
+** COPYRIGHT : Copyright (c) 2009
+** Infineon Technologies AG
+** Am Campeon 1-12, 85579 Neubiberg, Germany
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+** HISTORY
+** $Version $Date $Author $Comment
+** 0.0.1 17 Mar,2009 Lei Chuanhua Initial version
+*******************************************************************************/
+/*!
+ \file ifxmips_fixup_pcie.c
+ \ingroup IFX_PCIE
+ \brief PCIe Fixup functions source file
+*/
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/pci_ids.h>
+
+#include <lantiq_soc.h>
+
+#include "pcie-lantiq.h"
+
+#define PCI_VENDOR_ID_INFINEON 0x15D1
+#define PCI_DEVICE_ID_INFINEON_DANUBE 0x000F
+#define PCI_DEVICE_ID_INFINEON_PCIE 0x0011
+#define PCI_VENDOR_ID_LANTIQ 0x1BEF
+#define PCI_DEVICE_ID_LANTIQ_PCIE 0x0011
+
+
+
+static void __devinit
+ifx_pcie_fixup_resource(struct pci_dev *dev)
+{
+ u32 reg;
+
+ IFX_PCIE_PRINT(PCIE_MSG_FIXUP, "%s dev %s: enter\n", __func__, pci_name(dev));
+
+ IFX_PCIE_PRINT(PCIE_MSG_FIXUP, "%s: fixup host controller %s (%04x:%04x)\n",
+ __func__, pci_name(dev), dev->vendor, dev->device);
+
+ /* Setup COMMAND register */
+ reg = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER /* |
+ PCI_COMMAND_INTX_DISABLE */| PCI_COMMAND_SERR;
+ pci_write_config_word(dev, PCI_COMMAND, reg);
+ IFX_PCIE_PRINT(PCIE_MSG_FIXUP, "%s dev %s: exit\n", __func__, pci_name(dev));
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INFINEON, PCI_DEVICE_ID_INFINEON_PCIE, ifx_pcie_fixup_resource);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LANTIQ, PCI_VENDOR_ID_LANTIQ, ifx_pcie_fixup_resource);
+
+static void __devinit
+ifx_pcie_rc_class_early_fixup(struct pci_dev *dev)
+{
+ IFX_PCIE_PRINT(PCIE_MSG_FIXUP, "%s dev %s: enter\n", __func__, pci_name(dev));
+
+ if (dev->devfn == PCI_DEVFN(0, 0) &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
+
+ dev->class = (PCI_CLASS_BRIDGE_PCI << 8) | (dev->class & 0xff);
+
+ printk(KERN_INFO "%s: fixed pcie host bridge to pci-pci bridge\n", __func__);
+ }
+ IFX_PCIE_PRINT(PCIE_MSG_FIXUP, "%s dev %s: exit\n", __func__, pci_name(dev));
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INFINEON, PCI_DEVICE_ID_INFINEON_PCIE,
+ ifx_pcie_rc_class_early_fixup);
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LANTIQ, PCI_DEVICE_ID_LANTIQ_PCIE,
+ ifx_pcie_rc_class_early_fixup);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/pci/fixup-lantiq.c b/target/linux/lantiq/files-3.3/arch/mips/pci/fixup-lantiq.c
new file mode 100644
index 0000000000..daf5ae9e42
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/pci/fixup-lantiq.c
@@ -0,0 +1,42 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+
+int (*ltqpci_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin) = NULL;
+int (*ltqpci_plat_arch_init)(struct pci_dev *dev) = NULL;
+int (*ltqpci_plat_dev_init)(struct pci_dev *dev) = NULL;
+int *ltq_pci_irq_map;
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+ if (ltqpci_plat_arch_init)
+ return ltqpci_plat_arch_init(dev);
+
+ if (ltqpci_plat_dev_init)
+ return ltqpci_plat_dev_init(dev);
+
+ return 0;
+}
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ if (ltqpci_map_irq)
+ return ltqpci_map_irq(dev, slot, pin);
+ if (ltq_pci_irq_map[slot]) {
+ dev_info(&dev->dev, "SLOT:%d PIN:%d IRQ:%d\n", slot, pin, ltq_pci_irq_map[slot]);
+ return ltq_pci_irq_map[slot];
+ }
+ printk(KERN_ERR "lq_pci: trying to map irq for unknown slot %d\n",
+ slot);
+
+ return 0;
+}
+
+
diff --git a/target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq-msi.c b/target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq-msi.c
new file mode 100644
index 0000000000..9cbf639bb1
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq-msi.c
@@ -0,0 +1,399 @@
+/******************************************************************************
+**
+** FILE NAME : ifxmips_pcie_msi.c
+** PROJECT : IFX UEIP for VRX200
+** MODULES : PCI MSI sub module
+**
+** DATE : 02 Mar 2009
+** AUTHOR : Lei Chuanhua
+** DESCRIPTION : PCIe MSI Driver
+** COPYRIGHT : Copyright (c) 2009
+** Infineon Technologies AG
+** Am Campeon 1-12, 85579 Neubiberg, Germany
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+** HISTORY
+** $Date $Author $Comment
+** 02 Mar,2009 Lei Chuanhua Initial version
+*******************************************************************************/
+/*!
+ \defgroup IFX_PCIE_MSI MSI OS APIs
+ \ingroup IFX_PCIE
+ \brief PCIe bus driver OS interface functions
+*/
+
+/*!
+ \file ifxmips_pcie_msi.c
+ \ingroup IFX_PCIE
+ \brief PCIe MSI OS interface file
+*/
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <linux/module.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+
+#include "pcie-lantiq.h"
+
+#define IFX_MSI_IRQ_NUM 16
+#define SM(_v, _f) (((_v) << _f##_S) & (_f))
+
+#define IFX_MSI_PIC_REG_BASE (KSEG1 | 0x1F700000)
+#define IFX_PCIE_MSI_IR0 (INT_NUM_IM4_IRL0 + 27)
+#define IFX_PCIE_MSI_IR1 (INT_NUM_IM4_IRL0 + 28)
+#define IFX_PCIE_MSI_IR2 (INT_NUM_IM4_IRL0 + 29)
+#define IFX_PCIE_MSI_IR3 (INT_NUM_IM0_IRL0 + 30)
+
+#define IFX_MSI_PCI_INT_DISABLE 0x80000000
+#define IFX_MSI_PIC_INT_LINE 0x30000000
+#define IFX_MSI_PIC_MSG_ADDR 0x0FFF0000
+#define IFX_MSI_PIC_MSG_DATA 0x0000FFFF
+#define IFX_MSI_PIC_BIG_ENDIAN 1
+#define IFX_MSI_PIC_INT_LINE_S 28
+#define IFX_MSI_PIC_MSG_ADDR_S 16
+#define IFX_MSI_PIC_MSG_DATA_S 0x0
+
+enum {
+ IFX_PCIE_MSI_IDX0 = 0,
+ IFX_PCIE_MSI_IDX1,
+ IFX_PCIE_MSI_IDX2,
+ IFX_PCIE_MSI_IDX3,
+};
+
+typedef struct ifx_msi_irq_idx {
+ const int irq;
+ const int idx;
+}ifx_msi_irq_idx_t;
+
+struct ifx_msi_pic {
+ volatile u32 pic_table[IFX_MSI_IRQ_NUM];
+ volatile u32 pic_endian; /* 0x40 */
+};
+typedef struct ifx_msi_pic *ifx_msi_pic_t;
+
+typedef struct ifx_msi_irq {
+ const volatile ifx_msi_pic_t msi_pic_p;
+ const u32 msi_phy_base;
+ const ifx_msi_irq_idx_t msi_irq_idx[IFX_MSI_IRQ_NUM];
+ /*
+ * Each bit in msi_free_irq_bitmask represents a MSI interrupt that is
+ * in use.
+ */
+ u16 msi_free_irq_bitmask;
+
+ /*
+ * Each bit in msi_multiple_irq_bitmask tells that the device using
+ * this bit in msi_free_irq_bitmask is also using the next bit. This
+ * is used so we can disable all of the MSI interrupts when a device
+ * uses multiple.
+ */
+ u16 msi_multiple_irq_bitmask;
+}ifx_msi_irq_t;
+
+static ifx_msi_irq_t msi_irqs[IFX_PCIE_CORE_NR] = {
+ {
+ .msi_pic_p = (const volatile ifx_msi_pic_t)IFX_MSI_PIC_REG_BASE,
+ .msi_phy_base = PCIE_MSI_PHY_BASE,
+ .msi_irq_idx = {
+ {IFX_PCIE_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE_MSI_IR1, IFX_PCIE_MSI_IDX1},
+ {IFX_PCIE_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE_MSI_IR3, IFX_PCIE_MSI_IDX3},
+ {IFX_PCIE_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE_MSI_IR1, IFX_PCIE_MSI_IDX1},
+ {IFX_PCIE_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE_MSI_IR3, IFX_PCIE_MSI_IDX3},
+ {IFX_PCIE_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE_MSI_IR1, IFX_PCIE_MSI_IDX1},
+ {IFX_PCIE_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE_MSI_IR3, IFX_PCIE_MSI_IDX3},
+ {IFX_PCIE_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE_MSI_IR1, IFX_PCIE_MSI_IDX1},
+ {IFX_PCIE_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE_MSI_IR3, IFX_PCIE_MSI_IDX3},
+ },
+ .msi_free_irq_bitmask = 0,
+ .msi_multiple_irq_bitmask= 0,
+ },
+#ifdef CONFIG_IFX_PCIE_2ND_CORE
+ {
+ .msi_pic_p = (const volatile ifx_msi_pic_t)IFX_MSI1_PIC_REG_BASE,
+ .msi_phy_base = PCIE1_MSI_PHY_BASE,
+ .msi_irq_idx = {
+ {IFX_PCIE1_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE1_MSI_IR1, IFX_PCIE_MSI_IDX1},
+ {IFX_PCIE1_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE1_MSI_IR3, IFX_PCIE_MSI_IDX3},
+ {IFX_PCIE1_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE1_MSI_IR1, IFX_PCIE_MSI_IDX1},
+ {IFX_PCIE1_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE1_MSI_IR3, IFX_PCIE_MSI_IDX3},
+ {IFX_PCIE1_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE1_MSI_IR1, IFX_PCIE_MSI_IDX1},
+ {IFX_PCIE1_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE1_MSI_IR3, IFX_PCIE_MSI_IDX3},
+ {IFX_PCIE1_MSI_IR0, IFX_PCIE_MSI_IDX0}, {IFX_PCIE1_MSI_IR1, IFX_PCIE_MSI_IDX1},
+ {IFX_PCIE1_MSI_IR2, IFX_PCIE_MSI_IDX2}, {IFX_PCIE1_MSI_IR3, IFX_PCIE_MSI_IDX3},
+ },
+ .msi_free_irq_bitmask = 0,
+ .msi_multiple_irq_bitmask= 0,
+
+ },
+#endif /* CONFIG_IFX_PCIE_2ND_CORE */
+};
+
+/*
+ * This lock controls updates to msi_free_irq_bitmask,
+ * msi_multiple_irq_bitmask and pic register settting
+ */
+static DEFINE_SPINLOCK(ifx_pcie_msi_lock);
+
+void pcie_msi_pic_init(int pcie_port)
+{
+ spin_lock(&ifx_pcie_msi_lock);
+ msi_irqs[pcie_port].msi_pic_p->pic_endian = IFX_MSI_PIC_BIG_ENDIAN;
+ spin_unlock(&ifx_pcie_msi_lock);
+}
+
+/**
+ * \fn int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+ * \brief Called when a driver request MSI interrupts instead of the
+ * legacy INT A-D. This routine will allocate multiple interrupts
+ * for MSI devices that support them. A device can override this by
+ * programming the MSI control bits [6:4] before calling
+ * pci_enable_msi().
+ *
+ * \param[in] pdev Device requesting MSI interrupts
+ * \param[in] desc MSI descriptor
+ *
+ * \return -EINVAL Invalid pcie root port or invalid msi bit
+ * \return 0 OK
+ * \ingroup IFX_PCIE_MSI
+ */
+int
+arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+{
+ int irq, pos;
+ u16 control;
+ int irq_idx;
+ int irq_step;
+ int configured_private_bits;
+ int request_private_bits;
+ struct msi_msg msg;
+ u16 search_mask;
+ struct ifx_pci_controller *ctrl = pdev->bus->sysdata;
+ int pcie_port = ctrl->port;
+
+ IFX_PCIE_PRINT(PCIE_MSG_MSI, "%s %s enter\n", __func__, pci_name(pdev));
+
+ /* XXX, skip RC MSI itself */
+ if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) {
+ IFX_PCIE_PRINT(PCIE_MSG_MSI, "%s RC itself doesn't use MSI interrupt\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Read the MSI config to figure out how many IRQs this device
+ * wants. Most devices only want 1, which will give
+ * configured_private_bits and request_private_bits equal 0.
+ */
+ pci_read_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS, &control);
+
+ /*
+ * If the number of private bits has been configured then use
+ * that value instead of the requested number. This gives the
+ * driver the chance to override the number of interrupts
+ * before calling pci_enable_msi().
+ */
+ configured_private_bits = (control & PCI_MSI_FLAGS_QSIZE) >> 4;
+ if (configured_private_bits == 0) {
+ /* Nothing is configured, so use the hardware requested size */
+ request_private_bits = (control & PCI_MSI_FLAGS_QMASK) >> 1;
+ }
+ else {
+ /*
+ * Use the number of configured bits, assuming the
+ * driver wanted to override the hardware request
+ * value.
+ */
+ request_private_bits = configured_private_bits;
+ }
+
+ /*
+ * The PCI 2.3 spec mandates that there are at most 32
+ * interrupts. If this device asks for more, only give it one.
+ */
+ if (request_private_bits > 5) {
+ request_private_bits = 0;
+ }
+again:
+ /*
+ * The IRQs have to be aligned on a power of two based on the
+ * number being requested.
+ */
+ irq_step = (1 << request_private_bits);
+
+ /* Mask with one bit for each IRQ */
+ search_mask = (1 << irq_step) - 1;
+
+ /*
+ * We're going to search msi_free_irq_bitmask_lock for zero
+ * bits. This represents an MSI interrupt number that isn't in
+ * use.
+ */
+ spin_lock(&ifx_pcie_msi_lock);
+ for (pos = 0; pos < IFX_MSI_IRQ_NUM; pos += irq_step) {
+ if ((msi_irqs[pcie_port].msi_free_irq_bitmask & (search_mask << pos)) == 0) {
+ msi_irqs[pcie_port].msi_free_irq_bitmask |= search_mask << pos;
+ msi_irqs[pcie_port].msi_multiple_irq_bitmask |= (search_mask >> 1) << pos;
+ break;
+ }
+ }
+ spin_unlock(&ifx_pcie_msi_lock);
+
+ /* Make sure the search for available interrupts didn't fail */
+ if (pos >= IFX_MSI_IRQ_NUM) {
+ if (request_private_bits) {
+ IFX_PCIE_PRINT(PCIE_MSG_MSI, "%s: Unable to find %d free "
+ "interrupts, trying just one", __func__, 1 << request_private_bits);
+ request_private_bits = 0;
+ goto again;
+ }
+ else {
+ printk(KERN_ERR "%s: Unable to find a free MSI interrupt\n", __func__);
+ return -EINVAL;
+ }
+ }
+ irq = msi_irqs[pcie_port].msi_irq_idx[pos].irq;
+ irq_idx = msi_irqs[pcie_port].msi_irq_idx[pos].idx;
+
+ IFX_PCIE_PRINT(PCIE_MSG_MSI, "pos %d, irq %d irq_idx %d\n", pos, irq, irq_idx);
+
+ /*
+ * Initialize MSI. This has to match the memory-write endianess from the device
+ * Address bits [23:12]
+ */
+ spin_lock(&ifx_pcie_msi_lock);
+ msi_irqs[pcie_port].msi_pic_p->pic_table[pos] = SM(irq_idx, IFX_MSI_PIC_INT_LINE) |
+ SM((msi_irqs[pcie_port].msi_phy_base >> 12), IFX_MSI_PIC_MSG_ADDR) |
+ SM((1 << pos), IFX_MSI_PIC_MSG_DATA);
+
+ /* Enable this entry */
+ msi_irqs[pcie_port].msi_pic_p->pic_table[pos] &= ~IFX_MSI_PCI_INT_DISABLE;
+ spin_unlock(&ifx_pcie_msi_lock);
+
+ IFX_PCIE_PRINT(PCIE_MSG_MSI, "pic_table[%d]: 0x%08x\n",
+ pos, msi_irqs[pcie_port].msi_pic_p->pic_table[pos]);
+
+ /* Update the number of IRQs the device has available to it */
+ control &= ~PCI_MSI_FLAGS_QSIZE;
+ control |= (request_private_bits << 4);
+ pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS, control);
+
+ irq_set_msi_desc(irq, desc);
+ msg.address_hi = 0x0;
+ msg.address_lo = msi_irqs[pcie_port].msi_phy_base;
+ msg.data = SM((1 << pos), IFX_MSI_PIC_MSG_DATA);
+ IFX_PCIE_PRINT(PCIE_MSG_MSI, "msi_data: pos %d 0x%08x\n", pos, msg.data);
+
+ write_msi_msg(irq, &msg);
+ IFX_PCIE_PRINT(PCIE_MSG_MSI, "%s exit\n", __func__);
+ return 0;
+}
+
+static int
+pcie_msi_irq_to_port(unsigned int irq, int *port)
+{
+ int ret = 0;
+
+ if (irq == IFX_PCIE_MSI_IR0 || irq == IFX_PCIE_MSI_IR1 ||
+ irq == IFX_PCIE_MSI_IR2 || irq == IFX_PCIE_MSI_IR3) {
+ *port = IFX_PCIE_PORT0;
+ }
+#ifdef CONFIG_IFX_PCIE_2ND_CORE
+ else if (irq == IFX_PCIE1_MSI_IR0 || irq == IFX_PCIE1_MSI_IR1 ||
+ irq == IFX_PCIE1_MSI_IR2 || irq == IFX_PCIE1_MSI_IR3) {
+ *port = IFX_PCIE_PORT1;
+ }
+#endif /* CONFIG_IFX_PCIE_2ND_CORE */
+ else {
+ printk(KERN_ERR "%s: Attempted to teardown illegal "
+ "MSI interrupt (%d)\n", __func__, irq);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/**
+ * \fn void arch_teardown_msi_irq(unsigned int irq)
+ * \brief Called when a device no longer needs its MSI interrupts. All
+ * MSI interrupts for the device are freed.
+ *
+ * \param irq The devices first irq number. There may be multple in sequence.
+ * \return none
+ * \ingroup IFX_PCIE_MSI
+ */
+void
+arch_teardown_msi_irq(unsigned int irq)
+{
+ int pos;
+ int number_irqs;
+ u16 bitmask;
+ int pcie_port;
+
+ IFX_PCIE_PRINT(PCIE_MSG_MSI, "%s enter\n", __func__);
+
+ BUG_ON(irq > (INT_NUM_IM4_IRL0 + 31));
+
+ if (pcie_msi_irq_to_port(irq, &pcie_port) != 0) {
+ return;
+ }
+
+ /* Shift the mask to the correct bit location, not always correct
+ * Probally, the first match will be chosen.
+ */
+ for (pos = 0; pos < IFX_MSI_IRQ_NUM; pos++) {
+ if ((msi_irqs[pcie_port].msi_irq_idx[pos].irq == irq)
+ && (msi_irqs[pcie_port].msi_free_irq_bitmask & ( 1 << pos))) {
+ break;
+ }
+ }
+ if (pos >= IFX_MSI_IRQ_NUM) {
+ printk(KERN_ERR "%s: Unable to find a matched MSI interrupt\n", __func__);
+ return;
+ }
+ spin_lock(&ifx_pcie_msi_lock);
+ /* Disable this entry */
+ msi_irqs[pcie_port].msi_pic_p->pic_table[pos] |= IFX_MSI_PCI_INT_DISABLE;
+ msi_irqs[pcie_port].msi_pic_p->pic_table[pos] &= ~(IFX_MSI_PIC_INT_LINE | IFX_MSI_PIC_MSG_ADDR | IFX_MSI_PIC_MSG_DATA);
+ spin_unlock(&ifx_pcie_msi_lock);
+ /*
+ * Count the number of IRQs we need to free by looking at the
+ * msi_multiple_irq_bitmask. Each bit set means that the next
+ * IRQ is also owned by this device.
+ */
+ number_irqs = 0;
+ while (((pos + number_irqs) < IFX_MSI_IRQ_NUM) &&
+ (msi_irqs[pcie_port].msi_multiple_irq_bitmask & (1 << (pos + number_irqs)))) {
+ number_irqs++;
+ }
+ number_irqs++;
+
+ /* Mask with one bit for each IRQ */
+ bitmask = (1 << number_irqs) - 1;
+
+ bitmask <<= pos;
+ if ((msi_irqs[pcie_port].msi_free_irq_bitmask & bitmask) != bitmask) {
+ printk(KERN_ERR "%s: Attempted to teardown MSI "
+ "interrupt (%d) not in use\n", __func__, irq);
+ return;
+ }
+ /* Checks are done, update the in use bitmask */
+ spin_lock(&ifx_pcie_msi_lock);
+ msi_irqs[pcie_port].msi_free_irq_bitmask &= ~bitmask;
+ msi_irqs[pcie_port].msi_multiple_irq_bitmask &= ~(bitmask >> 1);
+ spin_unlock(&ifx_pcie_msi_lock);
+ IFX_PCIE_PRINT(PCIE_MSG_MSI, "%s exit\n", __func__);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chuanhua.Lei@infineon.com");
+MODULE_SUPPORTED_DEVICE("Infineon PCIe IP builtin MSI PIC module");
+MODULE_DESCRIPTION("Infineon PCIe IP builtin MSI PIC driver");
+
diff --git a/target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq-phy.c b/target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq-phy.c
new file mode 100644
index 0000000000..9f5027d7cc
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq-phy.c
@@ -0,0 +1,408 @@
+/******************************************************************************
+**
+** FILE NAME : ifxmips_pcie_phy.c
+** PROJECT : IFX UEIP for VRX200
+** MODULES : PCIe PHY sub module
+**
+** DATE : 14 May 2009
+** AUTHOR : Lei Chuanhua
+** DESCRIPTION : PCIe Root Complex Driver
+** COPYRIGHT : Copyright (c) 2009
+** Infineon Technologies AG
+** Am Campeon 1-12, 85579 Neubiberg, Germany
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+** HISTORY
+** $Version $Date $Author $Comment
+** 0.0.1 14 May,2009 Lei Chuanhua Initial version
+*******************************************************************************/
+/*!
+ \file ifxmips_pcie_phy.c
+ \ingroup IFX_PCIE
+ \brief PCIe PHY PLL register programming source file
+*/
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <asm/paccess.h>
+#include <linux/delay.h>
+
+#include "pcie-lantiq.h"
+
+/* PCIe PDI only supports 16 bit operation */
+
+#define IFX_PCIE_PHY_REG_WRITE16(__addr, __data) \
+ ((*(volatile u16 *) (__addr)) = (__data))
+
+#define IFX_PCIE_PHY_REG_READ16(__addr) \
+ (*(volatile u16 *) (__addr))
+
+#define IFX_PCIE_PHY_REG16(__addr) \
+ (*(volatile u16 *) (__addr))
+
+#define IFX_PCIE_PHY_REG(__reg, __value, __mask) do { \
+ u16 read_data; \
+ u16 write_data; \
+ read_data = IFX_PCIE_PHY_REG_READ16((__reg)); \
+ write_data = (read_data & ((u16)~(__mask))) | (((u16)(__value)) & ((u16)(__mask)));\
+ IFX_PCIE_PHY_REG_WRITE16((__reg), write_data); \
+} while (0)
+
+#define IFX_PCIE_PLL_TIMEOUT 1000 /* Tunnable */
+
+static void
+pcie_phy_comm_setup(int pcie_port)
+{
+ /* PLL Setting */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL1(pcie_port), 0x120e, 0xFFFF);
+
+ /* increase the bias reference voltage */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL2(pcie_port), 0x39D7, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL3(pcie_port), 0x0900, 0xFFFF);
+
+ /* Endcnt */
+ IFX_PCIE_PHY_REG(PCIE_PHY_RX1_EI(pcie_port), 0x0004, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_RX1_A_CTRL(pcie_port), 0x6803, 0xFFFF);
+
+ /* force */
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL1(pcie_port), 0x0008, 0x0008);
+
+ /* predrv_ser_en */
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_A_CTRL2(pcie_port), 0x0706, 0xFFFF);
+
+ /* ctrl_lim */
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL3(pcie_port), 0x1FFF, 0xFFFF);
+
+ /* ctrl */
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_A_CTRL1(pcie_port), 0x0800, 0xFF00);
+
+ /* predrv_ser_en */
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX2_A_CTRL2(pcie_port), 0x4702, 0x7F00);
+
+ /* RTERM*/
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL2(pcie_port), 0x2e00, 0xFFFF);
+
+ /* Improved 100MHz clock output */
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX2_CTRL2(pcie_port), 0x3096, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX2_A_CTRL2(pcie_port), 0x4707, 0xFFFF);
+
+ /* Reduced CDR BW to avoid glitches */
+ IFX_PCIE_PHY_REG(PCIE_PHY_RX1_CDR(pcie_port), 0x0235, 0xFFFF);
+}
+
+#ifdef CONFIG_IFX_PCIE_PHY_36MHZ_MODE
+static void
+pcie_phy_36mhz_mode_setup(int pcie_port)
+{
+ IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d enter\n", __func__, pcie_port);
+
+ /* en_ext_mmd_div_ratio */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0000, 0x0002);
+
+ /* ext_mmd_div_ratio*/
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0000, 0x0070);
+
+ /* pll_ensdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0200, 0x0200);
+
+ /* en_const_sdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0100, 0x0100);
+
+ /* mmd */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL3(pcie_port), 0x2000, 0xe000);
+
+ /* lf_mode */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL2(pcie_port), 0x0000, 0x4000);
+
+ /* const_sdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL1(pcie_port), 0x38e4, 0xFFFF);
+
+ /* const sdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x00ee, 0x00FF);
+
+ /* pllmod */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL7(pcie_port), 0x0002, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL6(pcie_port), 0x3a04, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL5(pcie_port), 0xfae3, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL4(pcie_port), 0x1b72, 0xFFFF);
+
+ IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d exit\n", __func__, pcie_port);
+}
+#endif /* CONFIG_IFX_PCIE_PHY_36MHZ_MODE */
+
+#ifdef CONFIG_IFX_PCIE_PHY_36MHZ_SSC_MODE
+static void
+pcie_phy_36mhz_ssc_mode_setup(int pcie_port)
+{
+ IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d enter\n", __func__, pcie_port);
+
+ /* PLL Setting */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL1(pcie_port), 0x120e, 0xFFFF);
+
+ /* Increase the bias reference voltage */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL2(pcie_port), 0x39D7, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL3(pcie_port), 0x0900, 0xFFFF);
+
+ /* Endcnt */
+ IFX_PCIE_PHY_REG(PCIE_PHY_RX1_EI(pcie_port), 0x0004, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_RX1_A_CTRL(pcie_port), 0x6803, 0xFFFF);
+
+ /* Force */
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL1(pcie_port), 0x0008, 0x0008);
+
+ /* Predrv_ser_en */
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_A_CTRL2(pcie_port), 0x0706, 0xFFFF);
+
+ /* ctrl_lim */
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL3(pcie_port), 0x1FFF, 0xFFFF);
+
+ /* ctrl */
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_A_CTRL1(pcie_port), 0x0800, 0xFF00);
+
+ /* predrv_ser_en */
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX2_A_CTRL2(pcie_port), 0x4702, 0x7F00);
+
+ /* RTERM*/
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL2(pcie_port), 0x2e00, 0xFFFF);
+
+ /* en_ext_mmd_div_ratio */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0000, 0x0002);
+
+ /* ext_mmd_div_ratio*/
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0000, 0x0070);
+
+ /* pll_ensdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0400, 0x0400);
+
+ /* en_const_sdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0200, 0x0200);
+
+ /* mmd */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL3(pcie_port), 0x2000, 0xe000);
+
+ /* lf_mode */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL2(pcie_port), 0x0000, 0x4000);
+
+ /* const_sdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL1(pcie_port), 0x38e4, 0xFFFF);
+
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0000, 0x0100);
+ /* const sdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x00ee, 0x00FF);
+
+ /* pllmod */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL7(pcie_port), 0x0002, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL6(pcie_port), 0x3a04, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL5(pcie_port), 0xfae3, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL4(pcie_port), 0x1c72, 0xFFFF);
+
+ /* improved 100MHz clock output */
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX2_CTRL2(pcie_port), 0x3096, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX2_A_CTRL2(pcie_port), 0x4707, 0xFFFF);
+
+ /* reduced CDR BW to avoid glitches */
+ IFX_PCIE_PHY_REG(PCIE_PHY_RX1_CDR(pcie_port), 0x0235, 0xFFFF);
+
+ IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d exit\n", __func__, pcie_port);
+}
+#endif /* CONFIG_IFX_PCIE_PHY_36MHZ_SSC_MODE */
+
+#ifdef CONFIG_IFX_PCIE_PHY_25MHZ_MODE
+static void
+pcie_phy_25mhz_mode_setup(int pcie_port)
+{
+ IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d enter\n", __func__, pcie_port);
+ /* en_const_sdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0100, 0x0100);
+
+ /* pll_ensdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0000, 0x0200);
+
+ /* en_ext_mmd_div_ratio*/
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0002, 0x0002);
+
+ /* ext_mmd_div_ratio*/
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0040, 0x0070);
+
+ /* mmd */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL3(pcie_port), 0x6000, 0xe000);
+
+ /* lf_mode */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL2(pcie_port), 0x4000, 0x4000);
+
+ IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d exit\n", __func__, pcie_port);
+}
+#endif /* CONFIG_IFX_PCIE_PHY_25MHZ_MODE */
+
+#ifdef CONFIG_IFX_PCIE_PHY_100MHZ_MODE
+static void
+pcie_phy_100mhz_mode_setup(int pcie_port)
+{
+ IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d enter\n", __func__, pcie_port);
+ /* en_ext_mmd_div_ratio */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0000, 0x0002);
+
+ /* ext_mmd_div_ratio*/
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL3(pcie_port), 0x0000, 0x0070);
+
+ /* pll_ensdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0200, 0x0200);
+
+ /* en_const_sdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x0100, 0x0100);
+
+ /* mmd */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL3(pcie_port), 0x2000, 0xe000);
+
+ /* lf_mode */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_A_CTRL2(pcie_port), 0x0000, 0x4000);
+
+ /* const_sdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL1(pcie_port), 0x38e4, 0xFFFF);
+
+ /* const sdm */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL2(pcie_port), 0x00ee, 0x00FF);
+
+ /* pllmod */
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL7(pcie_port), 0x0002, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL6(pcie_port), 0x3a04, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL5(pcie_port), 0xfae3, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_PLL_CTRL4(pcie_port), 0x1b72, 0xFFFF);
+
+ IFX_PCIE_PRINT(PCIE_MSG_PHY, "%s pcie_port %d exit\n", __func__, pcie_port);
+}
+#endif /* CONFIG_IFX_PCIE_PHY_100MHZ_MODE */
+
+static int
+pcie_phy_wait_startup_ready(int pcie_port)
+{
+ int i;
+
+ for (i = 0; i < IFX_PCIE_PLL_TIMEOUT; i++) {
+ if ((IFX_PCIE_PHY_REG16(PCIE_PHY_PLL_STATUS(pcie_port)) & 0x0040) != 0) {
+ break;
+ }
+ udelay(10);
+ }
+ if (i >= IFX_PCIE_PLL_TIMEOUT) {
+ printk(KERN_ERR "%s PLL Link timeout\n", __func__);
+ return -1;
+ }
+ return 0;
+}
+
+static void
+pcie_phy_load_enable(int pcie_port, int slice)
+{
+ /* Set the load_en of tx/rx slice to '1' */
+ switch (slice) {
+ case 1:
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL1(pcie_port), 0x0010, 0x0010);
+ break;
+ case 2:
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX2_CTRL1(pcie_port), 0x0010, 0x0010);
+ break;
+ case 3:
+ IFX_PCIE_PHY_REG(PCIE_PHY_RX1_CTRL1(pcie_port), 0x0002, 0x0002);
+ break;
+ }
+}
+
+static void
+pcie_phy_load_disable(int pcie_port, int slice)
+{
+ /* set the load_en of tx/rx slice to '0' */
+ switch (slice) {
+ case 1:
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_CTRL1(pcie_port), 0x0000, 0x0010);
+ break;
+ case 2:
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX2_CTRL1(pcie_port), 0x0000, 0x0010);
+ break;
+ case 3:
+ IFX_PCIE_PHY_REG(PCIE_PHY_RX1_CTRL1(pcie_port), 0x0000, 0x0002);
+ break;
+ }
+}
+
+static void pcie_phy_load_war(int pcie_port)
+{
+ int slice;
+
+ for (slice = 1; slice < 4; slice++) {
+ pcie_phy_load_enable(pcie_port, slice);
+ udelay(1);
+ pcie_phy_load_disable(pcie_port, slice);
+ }
+}
+
+static void pcie_phy_tx2_modulation(int pcie_port)
+{
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX2_MOD1(pcie_port), 0x1FFE, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX2_MOD2(pcie_port), 0xFFFE, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX2_MOD3(pcie_port), 0x0601, 0xFFFF);
+ mdelay(1);
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX2_MOD3(pcie_port), 0x0001, 0xFFFF);
+}
+
+static void pcie_phy_tx1_modulation(int pcie_port)
+{
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_MOD1(pcie_port), 0x1FFE, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_MOD2(pcie_port), 0xFFFE, 0xFFFF);
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_MOD3(pcie_port), 0x0601, 0xFFFF);
+ mdelay(1);
+ IFX_PCIE_PHY_REG(PCIE_PHY_TX1_MOD3(pcie_port), 0x0001, 0xFFFF);
+}
+
+static void pcie_phy_tx_modulation_war(int pcie_port)
+{
+ int i;
+#define PCIE_PHY_MODULATION_NUM 5
+ for (i = 0; i < PCIE_PHY_MODULATION_NUM; i++) {
+ pcie_phy_tx2_modulation(pcie_port);
+ pcie_phy_tx1_modulation(pcie_port);
+ }
+#undef PCIE_PHY_MODULATION_NUM
+}
+
+void pcie_phy_clock_mode_setup(int pcie_port)
+{
+ pcie_pdi_big_endian(pcie_port);
+
+ /* Enable PDI to access PCIe PHY register */
+ pcie_pdi_pmu_enable(pcie_port);
+
+ /* Configure PLL and PHY clock */
+ pcie_phy_comm_setup(pcie_port);
+
+#ifdef CONFIG_IFX_PCIE_PHY_36MHZ_MODE
+ pcie_phy_36mhz_mode_setup(pcie_port);
+#elif defined(CONFIG_IFX_PCIE_PHY_36MHZ_SSC_MODE)
+ pcie_phy_36mhz_ssc_mode_setup(pcie_port);
+#elif defined(CONFIG_IFX_PCIE_PHY_25MHZ_MODE)
+ pcie_phy_25mhz_mode_setup(pcie_port);
+#elif defined (CONFIG_IFX_PCIE_PHY_100MHZ_MODE)
+ pcie_phy_100mhz_mode_setup(pcie_port);
+#else
+ #error "PCIE PHY Clock Mode must be chosen first!!!!"
+#endif /* CONFIG_IFX_PCIE_PHY_36MHZ_MODE */
+
+ /* Enable PCIe PHY and make PLL setting take effect */
+ pcie_phy_pmu_enable(pcie_port);
+
+ /* Check if we are in startup_ready status */
+ pcie_phy_wait_startup_ready(pcie_port);
+
+ pcie_phy_load_war(pcie_port);
+
+ /* Apply TX modulation workarounds */
+ pcie_phy_tx_modulation_war(pcie_port);
+
+#ifdef IFX_PCI_PHY_REG_DUMP
+ IFX_PCIE_PRINT(PCIE_MSG_PHY, "Modified PHY register dump\n");
+ pcie_phy_reg_dump(pcie_port);
+#endif
+}
+
diff --git a/target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq.c b/target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq.c
new file mode 100644
index 0000000000..1df55b5c02
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq.c
@@ -0,0 +1,1146 @@
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <asm/paccess.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
+
+#define CONFIG_IFX_PCIE_1ST_CORE
+
+#include "pcie-lantiq.h"
+
+#define IFX_PCIE_IR (INT_NUM_IM4_IRL0 + 25)
+#define IFX_PCIE_INTA (INT_NUM_IM4_IRL0 + 8)
+#define IFX_PCIE_INTB (INT_NUM_IM4_IRL0 + 9)
+#define IFX_PCIE_INTC (INT_NUM_IM4_IRL0 + 10)
+#define IFX_PCIE_INTD (INT_NUM_IM4_IRL0 + 11)
+#define MS(_v, _f) (((_v) & (_f)) >> _f##_S)
+#define SM(_v, _f) (((_v) << _f##_S) & (_f))
+#define IFX_REG_SET_BIT(_f, _r) \
+ IFX_REG_W32((IFX_REG_R32((_r)) &~ (_f)) | (_f), (_r))
+#define IFX_PCIE_LTSSM_ENABLE_TIMEOUT 10
+#define IFX_PCIE_PHY_LINK_UP_TIMEOUT 1000
+#define IFX_PCIE_PHY_LOOP_CNT 5
+
+static DEFINE_SPINLOCK(ifx_pcie_lock);
+
+int pcibios_1st_host_bus_nr(void);
+
+unsigned int g_pcie_debug_flag = PCIE_MSG_ANY & (~PCIE_MSG_CFG);
+
+static ifx_pcie_irq_t pcie_irqs[IFX_PCIE_CORE_NR] = {
+ {
+ .ir_irq = {
+ .irq = IFX_PCIE_IR,
+ .name = "ifx_pcie_rc0",
+ },
+
+ .legacy_irq = {
+ {
+ .irq_bit = PCIE_IRN_INTA,
+ .irq = IFX_PCIE_INTA,
+ },
+ {
+ .irq_bit = PCIE_IRN_INTB,
+ .irq = IFX_PCIE_INTB,
+ },
+ {
+ .irq_bit = PCIE_IRN_INTC,
+ .irq = IFX_PCIE_INTC,
+ },
+ {
+ .irq_bit = PCIE_IRN_INTD,
+ .irq = IFX_PCIE_INTD,
+ },
+ },
+ },
+};
+
+static inline int pcie_ltssm_enable(int pcie_port)
+{
+ int i;
+
+ IFX_REG_W32(PCIE_RC_CCR_LTSSM_ENABLE, PCIE_RC_CCR(pcie_port)); /* Enable LTSSM */
+
+ /* Wait for the link to come up */
+ for (i = 0; i < IFX_PCIE_LTSSM_ENABLE_TIMEOUT; i++) {
+ if (!(IFX_REG_R32(PCIE_LCTLSTS(pcie_port)) & PCIE_LCTLSTS_RETRAIN_PENDING)) {
+ break;
+ }
+ udelay(10);
+ }
+ if (i >= IFX_PCIE_LTSSM_ENABLE_TIMEOUT) {
+ IFX_PCIE_PRINT(PCIE_MSG_INIT, "%s link timeout!!!!!\n", __func__);
+ return -1;
+ }
+ return 0;
+}
+
+static inline void pcie_status_register_clear(int pcie_port)
+{
+ IFX_REG_W32(0, PCIE_RC_DR(pcie_port));
+ IFX_REG_W32(0, PCIE_PCICMDSTS(pcie_port));
+ IFX_REG_W32(0, PCIE_DCTLSTS(pcie_port));
+ IFX_REG_W32(0, PCIE_LCTLSTS(pcie_port));
+ IFX_REG_W32(0, PCIE_SLCTLSTS(pcie_port));
+ IFX_REG_W32(0, PCIE_RSTS(pcie_port));
+ IFX_REG_W32(0, PCIE_UES_R(pcie_port));
+ IFX_REG_W32(0, PCIE_UEMR(pcie_port));
+ IFX_REG_W32(0, PCIE_UESR(pcie_port));
+ IFX_REG_W32(0, PCIE_CESR(pcie_port));
+ IFX_REG_W32(0, PCIE_CEMR(pcie_port));
+ IFX_REG_W32(0, PCIE_RESR(pcie_port));
+ IFX_REG_W32(0, PCIE_PVCCRSR(pcie_port));
+ IFX_REG_W32(0, PCIE_VC0_RSR0(pcie_port));
+ IFX_REG_W32(0, PCIE_TPFCS(pcie_port));
+ IFX_REG_W32(0, PCIE_TNPFCS(pcie_port));
+ IFX_REG_W32(0, PCIE_TCFCS(pcie_port));
+ IFX_REG_W32(0, PCIE_QSR(pcie_port));
+ IFX_REG_W32(0, PCIE_IOBLSECS(pcie_port));
+}
+
+static inline int ifx_pcie_link_up(int pcie_port)
+{
+ return (IFX_REG_R32(PCIE_PHY_SR(pcie_port)) & PCIE_PHY_SR_PHY_LINK_UP) ? 1 : 0;
+}
+
+static inline void pcie_mem_io_setup(int pcie_port)
+{
+ unsigned int reg;
+ /*
+ * BAR[0:1] readonly register
+ * RC contains only minimal BARs for packets mapped to this device
+ * Mem/IO filters defines a range of memory occupied by memory mapped IO devices that
+ * reside on the downstream side fo the bridge.
+ */
+ reg = SM((PCIE_MEM_PHY_PORT_TO_END(pcie_port) >> 20), PCIE_MBML_MEM_LIMIT_ADDR)
+ | SM((PCIE_MEM_PHY_PORT_TO_BASE(pcie_port) >> 20), PCIE_MBML_MEM_BASE_ADDR);
+ IFX_REG_W32(reg, PCIE_MBML(pcie_port));
+
+ /* PCIe_PBML, same as MBML */
+ IFX_REG_W32(IFX_REG_R32(PCIE_MBML(pcie_port)), PCIE_PMBL(pcie_port));
+
+ /* IO Address Range */
+ reg = SM((PCIE_IO_PHY_PORT_TO_END(pcie_port) >> 12), PCIE_IOBLSECS_IO_LIMIT_ADDR)
+ | SM((PCIE_IO_PHY_PORT_TO_BASE(pcie_port) >> 12), PCIE_IOBLSECS_IO_BASE_ADDR);
+ reg |= PCIE_IOBLSECS_32BIT_IO_ADDR;
+ IFX_REG_W32(reg, PCIE_IOBLSECS(pcie_port));
+
+ reg = SM((PCIE_IO_PHY_PORT_TO_END(pcie_port) >> 16), PCIE_IO_BANDL_UPPER_16BIT_IO_LIMIT)
+ | SM((PCIE_IO_PHY_PORT_TO_BASE(pcie_port) >> 16), PCIE_IO_BANDL_UPPER_16BIT_IO_BASE);
+ IFX_REG_W32(reg, PCIE_IO_BANDL(pcie_port));
+}
+
+static inline void pcie_msi_setup(int pcie_port)
+{
+ unsigned int reg;
+
+ /* XXX, MSI stuff should only apply to EP */
+ /* MSI Capability: Only enable 32-bit addresses */
+ reg = IFX_REG_R32(PCIE_MCAPR(pcie_port));
+ reg &= ~PCIE_MCAPR_ADDR64_CAP;
+ reg |= PCIE_MCAPR_MSI_ENABLE;
+
+ /* Disable multiple message */
+ reg &= ~(PCIE_MCAPR_MULTI_MSG_CAP | PCIE_MCAPR_MULTI_MSG_ENABLE);
+ IFX_REG_W32(reg, PCIE_MCAPR(pcie_port));
+}
+
+static inline void pcie_pm_setup(int pcie_port)
+{
+ unsigned int reg;
+
+ /* Enable PME, Soft reset enabled */
+ reg = IFX_REG_R32(PCIE_PM_CSR(pcie_port));
+ reg |= PCIE_PM_CSR_PME_ENABLE | PCIE_PM_CSR_SW_RST;
+ IFX_REG_W32(reg, PCIE_PM_CSR(pcie_port));
+}
+
+static inline void pcie_bus_setup(int pcie_port)
+{
+ unsigned int reg;
+
+ reg = SM(0, PCIE_BNR_PRIMARY_BUS_NUM) | SM(1, PCIE_PNR_SECONDARY_BUS_NUM) | SM(0xFF, PCIE_PNR_SUB_BUS_NUM);
+ IFX_REG_W32(reg, PCIE_BNR(pcie_port));
+}
+
+static inline void pcie_device_setup(int pcie_port)
+{
+ unsigned int reg;
+
+ /* Device capability register, set up Maximum payload size */
+ reg = IFX_REG_R32(PCIE_DCAP(pcie_port));
+ reg |= PCIE_DCAP_ROLE_BASE_ERR_REPORT;
+ reg |= SM(PCIE_MAX_PAYLOAD_128, PCIE_DCAP_MAX_PAYLOAD_SIZE);
+
+ /* Only available for EP */
+ reg &= ~(PCIE_DCAP_EP_L0S_LATENCY | PCIE_DCAP_EP_L1_LATENCY);
+ IFX_REG_W32(reg, PCIE_DCAP(pcie_port));
+
+ /* Device control and status register */
+ /* Set Maximum Read Request size for the device as a Requestor */
+ reg = IFX_REG_R32(PCIE_DCTLSTS(pcie_port));
+
+ /*
+ * Request size can be larger than the MPS used, but the completions returned
+ * for the read will be bounded by the MPS size.
+ * In our system, Max request size depends on AHB burst size. It is 64 bytes.
+ * but we set it as 128 as minimum one.
+ */
+ reg |= SM(PCIE_MAX_PAYLOAD_128, PCIE_DCTLSTS_MAX_READ_SIZE)
+ | SM(PCIE_MAX_PAYLOAD_128, PCIE_DCTLSTS_MAX_PAYLOAD_SIZE);
+
+ /* Enable relaxed ordering, no snoop, and all kinds of errors */
+ reg |= PCIE_DCTLSTS_RELAXED_ORDERING_EN | PCIE_DCTLSTS_ERR_EN | PCIE_DCTLSTS_NO_SNOOP_EN;
+
+ IFX_REG_W32(reg, PCIE_DCTLSTS(pcie_port));
+}
+
+static inline void pcie_link_setup(int pcie_port)
+{
+ unsigned int reg;
+
+ /*
+ * XXX, Link capability register, bit 18 for EP CLKREQ# dynamic clock management for L1, L2/3 CPM
+ * L0s is reported during link training via TS1 order set by N_FTS
+ */
+ reg = IFX_REG_R32(PCIE_LCAP(pcie_port));
+ reg &= ~PCIE_LCAP_L0S_EIXT_LATENCY;
+ reg |= SM(3, PCIE_LCAP_L0S_EIXT_LATENCY);
+ IFX_REG_W32(reg, PCIE_LCAP(pcie_port));
+
+ /* Link control and status register */
+ reg = IFX_REG_R32(PCIE_LCTLSTS(pcie_port));
+
+ /* Link Enable, ASPM enabled */
+ reg &= ~PCIE_LCTLSTS_LINK_DISABLE;
+
+#ifdef CONFIG_PCIEASPM
+ /*
+ * We use the same physical reference clock that the platform provides on the connector
+ * It paved the way for ASPM to calculate the new exit Latency
+ */
+ reg |= PCIE_LCTLSTS_SLOT_CLK_CFG;
+ reg |= PCIE_LCTLSTS_COM_CLK_CFG;
+ /*
+ * We should disable ASPM by default except that we have dedicated power management support
+ * Enable ASPM will cause the system hangup/instability, performance degration
+ */
+ reg |= PCIE_LCTLSTS_ASPM_ENABLE;
+#else
+ reg &= ~PCIE_LCTLSTS_ASPM_ENABLE;
+#endif /* CONFIG_PCIEASPM */
+
+ /*
+ * The maximum size of any completion with data packet is bounded by the MPS setting
+ * in device control register
+ */
+ /* RCB may cause multiple split transactions, two options available, we use 64 byte RCB */
+ reg &= ~ PCIE_LCTLSTS_RCB128;
+ IFX_REG_W32(reg, PCIE_LCTLSTS(pcie_port));
+}
+
+static inline void pcie_error_setup(int pcie_port)
+{
+ unsigned int reg;
+
+ /*
+ * Forward ERR_COR, ERR_NONFATAL, ERR_FATAL to the backbone
+ * Poisoned write TLPs and completions indicating poisoned TLPs will set the PCIe_PCICMDSTS.MDPE
+ */
+ reg = IFX_REG_R32(PCIE_INTRBCTRL(pcie_port));
+ reg |= PCIE_INTRBCTRL_SERR_ENABLE | PCIE_INTRBCTRL_PARITY_ERR_RESP_ENABLE;
+
+ IFX_REG_W32(reg, PCIE_INTRBCTRL(pcie_port));
+
+ /* Uncorrectable Error Mask Register, Unmask <enable> all bits in PCIE_UESR */
+ reg = IFX_REG_R32(PCIE_UEMR(pcie_port));
+ reg &= ~PCIE_ALL_UNCORRECTABLE_ERR;
+ IFX_REG_W32(reg, PCIE_UEMR(pcie_port));
+
+ /* Uncorrectable Error Severity Register, ALL errors are FATAL */
+ IFX_REG_W32(PCIE_ALL_UNCORRECTABLE_ERR, PCIE_UESR(pcie_port));
+
+ /* Correctable Error Mask Register, unmask <enable> all bits */
+ reg = IFX_REG_R32(PCIE_CEMR(pcie_port));
+ reg &= ~PCIE_CORRECTABLE_ERR;
+ IFX_REG_W32(reg, PCIE_CEMR(pcie_port));
+
+ /* Advanced Error Capabilities and Control Registr */
+ reg = IFX_REG_R32(PCIE_AECCR(pcie_port));
+ reg |= PCIE_AECCR_ECRC_CHECK_EN | PCIE_AECCR_ECRC_GEN_EN;
+ IFX_REG_W32(reg, PCIE_AECCR(pcie_port));
+
+ /* Root Error Command Register, Report all types of errors */
+ reg = IFX_REG_R32(PCIE_RECR(pcie_port));
+ reg |= PCIE_RECR_ERR_REPORT_EN;
+ IFX_REG_W32(reg, PCIE_RECR(pcie_port));
+
+ /* Clear the Root status register */
+ reg = IFX_REG_R32(PCIE_RESR(pcie_port));
+ IFX_REG_W32(reg, PCIE_RESR(pcie_port));
+}
+
+static inline void pcie_root_setup(int pcie_port)
+{
+ unsigned int reg;
+
+ /* Root control and capabilities register */
+ reg = IFX_REG_R32(PCIE_RCTLCAP(pcie_port));
+ reg |= PCIE_RCTLCAP_SERR_ENABLE | PCIE_RCTLCAP_PME_INT_EN;
+ IFX_REG_W32(reg, PCIE_RCTLCAP(pcie_port));
+}
+
+static inline void pcie_vc_setup(int pcie_port)
+{
+ unsigned int reg;
+
+ /* Port VC Capability Register 2 */
+ reg = IFX_REG_R32(PCIE_PVC2(pcie_port));
+ reg &= ~PCIE_PVC2_VC_ARB_WRR;
+ reg |= PCIE_PVC2_VC_ARB_16P_FIXED_WRR;
+ IFX_REG_W32(reg, PCIE_PVC2(pcie_port));
+
+ /* VC0 Resource Capability Register */
+ reg = IFX_REG_R32(PCIE_VC0_RC(pcie_port));
+ reg &= ~PCIE_VC0_RC_REJECT_SNOOP;
+ IFX_REG_W32(reg, PCIE_VC0_RC(pcie_port));
+}
+
+static inline void pcie_port_logic_setup(int pcie_port)
+{
+ unsigned int reg;
+
+ /* FTS number, default 12, increase to 63, may increase time from/to L0s to L0 */
+ reg = IFX_REG_R32(PCIE_AFR(pcie_port));
+ reg &= ~(PCIE_AFR_FTS_NUM | PCIE_AFR_COM_FTS_NUM);
+ reg |= SM(PCIE_AFR_FTS_NUM_DEFAULT, PCIE_AFR_FTS_NUM)
+ | SM(PCIE_AFR_FTS_NUM_DEFAULT, PCIE_AFR_COM_FTS_NUM);
+ /* L0s and L1 entry latency */
+ reg &= ~(PCIE_AFR_L0S_ENTRY_LATENCY | PCIE_AFR_L1_ENTRY_LATENCY);
+ reg |= SM(PCIE_AFR_L0S_ENTRY_LATENCY_DEFAULT, PCIE_AFR_L0S_ENTRY_LATENCY)
+ | SM(PCIE_AFR_L1_ENTRY_LATENCY_DEFAULT, PCIE_AFR_L1_ENTRY_LATENCY);
+ IFX_REG_W32(reg, PCIE_AFR(pcie_port));
+
+ /* Port Link Control Register */
+ reg = IFX_REG_R32(PCIE_PLCR(pcie_port));
+ reg |= PCIE_PLCR_DLL_LINK_EN; /* Enable the DLL link */
+ IFX_REG_W32(reg, PCIE_PLCR(pcie_port));
+
+ /* Lane Skew Register */
+ reg = IFX_REG_R32(PCIE_LSR(pcie_port));
+ /* Enable ACK/NACK and FC */
+ reg &= ~(PCIE_LSR_ACKNAK_DISABLE | PCIE_LSR_FC_DISABLE);
+ IFX_REG_W32(reg, PCIE_LSR(pcie_port));
+
+ /* Symbol Timer Register and Filter Mask Register 1 */
+ reg = IFX_REG_R32(PCIE_STRFMR(pcie_port));
+
+ /* Default SKP interval is very accurate already, 5us */
+ /* Enable IO/CFG transaction */
+ reg |= PCIE_STRFMR_RX_CFG_TRANS_ENABLE | PCIE_STRFMR_RX_IO_TRANS_ENABLE;
+ /* Disable FC WDT */
+ reg &= ~PCIE_STRFMR_FC_WDT_DISABLE;
+ IFX_REG_W32(reg, PCIE_STRFMR(pcie_port));
+
+ /* Filter Masker Register 2 */
+ reg = IFX_REG_R32(PCIE_FMR2(pcie_port));
+ reg |= PCIE_FMR2_VENDOR_MSG1_PASSED_TO_TRGT1 | PCIE_FMR2_VENDOR_MSG0_PASSED_TO_TRGT1;
+ IFX_REG_W32(reg, PCIE_FMR2(pcie_port));
+
+ /* VC0 Completion Receive Queue Control Register */
+ reg = IFX_REG_R32(PCIE_VC0_CRQCR(pcie_port));
+ reg &= ~PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE;
+ reg |= SM(PCIE_VC0_TLP_QUEUE_MODE_BYPASS, PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE);
+ IFX_REG_W32(reg, PCIE_VC0_CRQCR(pcie_port));
+}
+
+static inline void pcie_rc_cfg_reg_setup(int pcie_port)
+{
+ /* diable ltssm */
+ IFX_REG_W32(0, PCIE_RC_CCR(pcie_port));
+
+ pcie_mem_io_setup(pcie_port);
+ pcie_msi_setup(pcie_port);
+ pcie_pm_setup(pcie_port);
+ pcie_bus_setup(pcie_port);
+ pcie_device_setup(pcie_port);
+ pcie_link_setup(pcie_port);
+ pcie_error_setup(pcie_port);
+ pcie_root_setup(pcie_port);
+ pcie_vc_setup(pcie_port);
+ pcie_port_logic_setup(pcie_port);
+}
+
+static int ifx_pcie_wait_phy_link_up(int pcie_port)
+{
+ int i;
+
+ /* Wait for PHY link is up */
+ for (i = 0; i < IFX_PCIE_PHY_LINK_UP_TIMEOUT; i++) {
+ if (ifx_pcie_link_up(pcie_port)) {
+ break;
+ }
+ udelay(100);
+ }
+ if (i >= IFX_PCIE_PHY_LINK_UP_TIMEOUT) {
+ printk(KERN_ERR "%s timeout\n", __func__);
+ return -1;
+ }
+
+ /* Check data link up or not */
+ if (!(IFX_REG_R32(PCIE_RC_DR(pcie_port)) & PCIE_RC_DR_DLL_UP)) {
+ printk(KERN_ERR "%s DLL link is still down\n", __func__);
+ return -1;
+ }
+
+ /* Check Data link active or not */
+ if (!(IFX_REG_R32(PCIE_LCTLSTS(pcie_port)) & PCIE_LCTLSTS_DLL_ACTIVE)) {
+ printk(KERN_ERR "%s DLL is not active\n", __func__);
+ return -1;
+ }
+ return 0;
+}
+
+static inline int pcie_app_loigc_setup(int pcie_port)
+{
+ IFX_REG_W32(PCIE_AHB_CTRL_BUS_ERROR_SUPPRESS, PCIE_AHB_CTRL(pcie_port));
+
+ /* Pull PCIe EP out of reset */
+ pcie_device_rst_deassert(pcie_port);
+
+ /* Start LTSSM training between RC and EP */
+ pcie_ltssm_enable(pcie_port);
+
+ /* Check PHY status after enabling LTSSM */
+ if (ifx_pcie_wait_phy_link_up(pcie_port) != 0) {
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Must be done after ltssm due to based on negotiated link
+ * width and payload size
+ * Update the Replay Time Limit. Empirically, some PCIe
+ * devices take a little longer to respond than expected under
+ * load. As a workaround for this we configure the Replay Time
+ * Limit to the value expected for a 512 byte MPS instead of
+ * our actual 128 byte MPS. The numbers below are directly
+ * from the PCIe spec table 3-4/5.
+ */
+static inline void pcie_replay_time_update(int pcie_port)
+{
+ unsigned int reg;
+ int nlw;
+ int rtl;
+
+ reg = IFX_REG_R32(PCIE_LCTLSTS(pcie_port));
+
+ nlw = MS(reg, PCIE_LCTLSTS_NEGOTIATED_LINK_WIDTH);
+ switch (nlw) {
+ case PCIE_MAX_LENGTH_WIDTH_X1:
+ rtl = 1677;
+ break;
+ case PCIE_MAX_LENGTH_WIDTH_X2:
+ rtl = 867;
+ break;
+ case PCIE_MAX_LENGTH_WIDTH_X4:
+ rtl = 462;
+ break;
+ case PCIE_MAX_LENGTH_WIDTH_X8:
+ rtl = 258;
+ break;
+ default:
+ rtl = 1677;
+ break;
+ }
+ reg = IFX_REG_R32(PCIE_ALTRT(pcie_port));
+ reg &= ~PCIE_ALTRT_REPLAY_TIME_LIMIT;
+ reg |= SM(rtl, PCIE_ALTRT_REPLAY_TIME_LIMIT);
+ IFX_REG_W32(reg, PCIE_ALTRT(pcie_port));
+
+ IFX_PCIE_PRINT(PCIE_MSG_REG, "%s PCIE_ALTRT 0x%08x\n",
+ __func__, IFX_REG_R32(PCIE_ALTRT(pcie_port)));
+}
+
+/*
+ * Table 359 Enhanced Configuration Address Mapping1)
+ * 1) This table is defined in Table 7-1, page 341, PCI Express Base Specification v1.1
+ * Memory Address PCI Express Configuration Space
+ * A[(20+n-1):20] Bus Number 1 < n < 8
+ * A[19:15] Device Number
+ * A[14:12] Function Number
+ * A[11:8] Extended Register Number
+ * A[7:2] Register Number
+ * A[1:0] Along with size of the access, used to generate Byte Enables
+ * For VR9, only the address bits [22:0] are mapped to the configuration space:
+ * . Address bits [22:20] select the target bus (1-of-8)1)
+ * . Address bits [19:15] select the target device (1-of-32) on the bus
+ * . Address bits [14:12] select the target function (1-of-8) within the device.
+ * . Address bits [11:2] selects the target dword (1-of-1024) within the selected function.s configuration space
+ * . Address bits [1:0] define the start byte location within the selected dword.
+ */
+static inline unsigned int pcie_bus_addr(u8 bus_num, u16 devfn, int where)
+{
+ unsigned int addr;
+ u8 bus;
+
+ if (!bus_num) {
+ /* type 0 */
+ addr = ((PCI_SLOT(devfn) & 0x1F) << 15) | ((PCI_FUNC(devfn) & 0x7) << 12) | ((where & 0xFFF)& ~3);
+ } else {
+ bus = bus_num;
+ /* type 1, only support 8 buses */
+ addr = ((bus & 0x7) << 20) | ((PCI_SLOT(devfn) & 0x1F) << 15) |
+ ((PCI_FUNC(devfn) & 0x7) << 12) | ((where & 0xFFF) & ~3);
+ }
+ IFX_PCIE_PRINT(PCIE_MSG_CFG, "%s: bus addr : %02x:%02x.%01x/%02x, addr=%08x\n",
+ __func__, bus_num, PCI_SLOT(devfn), PCI_FUNC(devfn), where, addr);
+ return addr;
+}
+
+static int pcie_valid_config(int pcie_port, int bus, int dev)
+{
+ /* RC itself */
+ if ((bus == 0) && (dev == 0))
+ return 1;
+
+ /* No physical link */
+ if (!ifx_pcie_link_up(pcie_port))
+ return 0;
+
+ /* Bus zero only has RC itself
+ * XXX, check if EP will be integrated
+ */
+ if ((bus == 0) && (dev != 0))
+ return 0;
+
+ /* Maximum 8 buses supported for VRX */
+ if (bus > 9)
+ return 0;
+
+ /*
+ * PCIe is PtP link, one bus only supports only one device
+ * except bus zero and PCIe switch which is virtual bus device
+ * The following two conditions really depends on the system design
+ * and attached the device.
+ * XXX, how about more new switch
+ */
+ if ((bus == 1) && (dev != 0))
+ return 0;
+
+ if ((bus >= 3) && (dev != 0))
+ return 0;
+ return 1;
+}
+
+static inline unsigned int ifx_pcie_cfg_rd(int pcie_port, unsigned int reg)
+{
+ return IFX_REG_R32((volatile unsigned int *)(PCIE_CFG_PORT_TO_BASE(pcie_port) + reg));
+}
+
+static inline void ifx_pcie_cfg_wr(int pcie_port, unsigned int reg, unsigned int val)
+{
+ IFX_REG_W32( val, (volatile unsigned int *)(PCIE_CFG_PORT_TO_BASE(pcie_port) + reg));
+}
+
+static inline unsigned int ifx_pcie_rc_cfg_rd(int pcie_port, unsigned int reg)
+{
+ return IFX_REG_R32((volatile unsigned int *)(PCIE_RC_PORT_TO_BASE(pcie_port) + reg));
+}
+
+static inline void ifx_pcie_rc_cfg_wr(int pcie_port, unsigned int reg, unsigned int val)
+{
+ IFX_REG_W32(val, (volatile unsigned int *)(PCIE_RC_PORT_TO_BASE(pcie_port) + reg));
+}
+
+unsigned int ifx_pcie_bus_enum_read_hack(int where, unsigned int value)
+{
+ unsigned int tvalue = value;
+
+ if (where == PCI_PRIMARY_BUS) {
+ u8 primary, secondary, subordinate;
+
+ primary = tvalue & 0xFF;
+ secondary = (tvalue >> 8) & 0xFF;
+ subordinate = (tvalue >> 16) & 0xFF;
+ primary += pcibios_1st_host_bus_nr();
+ secondary += pcibios_1st_host_bus_nr();
+ subordinate += pcibios_1st_host_bus_nr();
+ tvalue = (tvalue & 0xFF000000) | (unsigned int)primary | (unsigned int)(secondary << 8) | (unsigned int)(subordinate << 16);
+ }
+ return tvalue;
+}
+
+unsigned int ifx_pcie_bus_enum_write_hack(int where, unsigned int value)
+{
+ unsigned int tvalue = value;
+
+ if (where == PCI_PRIMARY_BUS) {
+ u8 primary, secondary, subordinate;
+
+ primary = tvalue & 0xFF;
+ secondary = (tvalue >> 8) & 0xFF;
+ subordinate = (tvalue >> 16) & 0xFF;
+ if (primary > 0 && primary != 0xFF)
+ primary -= pcibios_1st_host_bus_nr();
+ if (secondary > 0 && secondary != 0xFF)
+ secondary -= pcibios_1st_host_bus_nr();
+ if (subordinate > 0 && subordinate != 0xFF)
+ subordinate -= pcibios_1st_host_bus_nr();
+ tvalue = (tvalue & 0xFF000000) | (unsigned int)primary | (unsigned int)(secondary << 8) | (unsigned int)(subordinate << 16);
+ } else if (where == PCI_SUBORDINATE_BUS) {
+ u8 subordinate = tvalue & 0xFF;
+ subordinate = subordinate > 0 ? subordinate - pcibios_1st_host_bus_nr() : 0;
+ tvalue = subordinate;
+ }
+ return tvalue;
+}
+
+/**
+ * \fn static int ifx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
+ * int where, int size, unsigned int *value)
+ * \brief Read a value from configuration space
+ *
+ * \param[in] bus Pointer to pci bus
+ * \param[in] devfn PCI device function number
+ * \param[in] where PCI register number
+ * \param[in] size Register read size
+ * \param[out] value Pointer to return value
+ * \return PCIBIOS_BAD_REGISTER_NUMBER Invalid register number
+ * \return PCIBIOS_FUNC_NOT_SUPPORTED PCI function not supported
+ * \return PCIBIOS_DEVICE_NOT_FOUND PCI device not found
+ * \return PCIBIOS_SUCCESSFUL OK
+ * \ingroup IFX_PCIE_OS
+ */
+static int ifx_pcie_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, unsigned int *value)
+{
+ unsigned int data = 0;
+ int bus_number = bus->number;
+ static const unsigned int mask[8] = {0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0};
+ int ret = PCIBIOS_SUCCESSFUL;
+ struct ifx_pci_controller *ctrl = bus->sysdata;
+ int pcie_port = ctrl->port;
+
+ if (unlikely(size != 1 && size != 2 && size != 4)){
+ ret = PCIBIOS_BAD_REGISTER_NUMBER;
+ goto out;
+ }
+
+ /* Make sure the address is aligned to natural boundary */
+ if (unlikely(((size - 1) & where))) {
+ ret = PCIBIOS_BAD_REGISTER_NUMBER;
+ goto out;
+ }
+
+ /*
+ * If we are second controller, we have to cheat OS so that it assume
+ * its bus number starts from 0 in host controller
+ */
+ bus_number = ifx_pcie_bus_nr_deduct(bus_number, pcie_port);
+
+ /*
+ * We need to force the bus number to be zero on the root
+ * bus. Linux numbers the 2nd root bus to start after all
+ * busses on root 0.
+ */
+ if (bus->parent == NULL)
+ bus_number = 0;
+
+ /*
+ * PCIe only has a single device connected to it. It is
+ * always device ID 0. Don't bother doing reads for other
+ * device IDs on the first segment.
+ */
+ if ((bus_number == 0) && (PCI_SLOT(devfn) != 0)) {
+ ret = PCIBIOS_FUNC_NOT_SUPPORTED;
+ goto out;
+ }
+
+ if (pcie_valid_config(pcie_port, bus_number, PCI_SLOT(devfn)) == 0) {
+ *value = 0xffffffff;
+ ret = PCIBIOS_DEVICE_NOT_FOUND;
+ goto out;
+ }
+
+ IFX_PCIE_PRINT(PCIE_MSG_READ_CFG, "%s: %02x:%02x.%01x/%02x:%01d\n", __func__, bus_number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where, size);
+
+ PCIE_IRQ_LOCK(ifx_pcie_lock);
+ if (bus_number == 0) { /* RC itself */
+ unsigned int t;
+
+ t = (where & ~3);
+ data = ifx_pcie_rc_cfg_rd(pcie_port, t);
+ IFX_PCIE_PRINT(PCIE_MSG_READ_CFG, "%s: rd local cfg, offset:%08x, data:%08x\n",
+ __func__, t, data);
+ } else {
+ unsigned int addr = pcie_bus_addr(bus_number, devfn, where);
+
+ data = ifx_pcie_cfg_rd(pcie_port, addr);
+ if (pcie_port == IFX_PCIE_PORT0) {
+#ifdef CONFIG_IFX_PCIE_HW_SWAP
+ data = le32_to_cpu(data);
+#endif /* CONFIG_IFX_PCIE_HW_SWAP */
+ } else {
+#ifdef CONFIG_IFX_PCIE1_HW_SWAP
+ data = le32_to_cpu(data);
+#endif /* CONFIG_IFX_PCIE_HW_SWAP */
+ }
+ }
+ /* To get a correct PCI topology, we have to restore the bus number to OS */
+ data = ifx_pcie_bus_enum_hack(bus, devfn, where, data, pcie_port, 1);
+
+ PCIE_IRQ_UNLOCK(ifx_pcie_lock);
+ IFX_PCIE_PRINT(PCIE_MSG_READ_CFG, "%s: read config: data=%08x raw=%08x\n",
+ __func__, (data >> (8 * (where & 3))) & mask[size & 7], data);
+
+ *value = (data >> (8 * (where & 3))) & mask[size & 7];
+out:
+ return ret;
+}
+
+static unsigned int ifx_pcie_size_to_value(int where, int size, unsigned int data, unsigned int value)
+{
+ unsigned int shift;
+ unsigned int tdata = data;
+
+ switch (size) {
+ case 1:
+ shift = (where & 0x3) << 3;
+ tdata &= ~(0xffU << shift);
+ tdata |= ((value & 0xffU) << shift);
+ break;
+ case 2:
+ shift = (where & 3) << 3;
+ tdata &= ~(0xffffU << shift);
+ tdata |= ((value & 0xffffU) << shift);
+ break;
+ case 4:
+ tdata = value;
+ break;
+ }
+ return tdata;
+}
+
+/**
+ * \fn static static int ifx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
+ * int where, int size, unsigned int value)
+ * \brief Write a value to PCI configuration space
+ *
+ * \param[in] bus Pointer to pci bus
+ * \param[in] devfn PCI device function number
+ * \param[in] where PCI register number
+ * \param[in] size The register size to be written
+ * \param[in] value The valule to be written
+ * \return PCIBIOS_BAD_REGISTER_NUMBER Invalid register number
+ * \return PCIBIOS_DEVICE_NOT_FOUND PCI device not found
+ * \return PCIBIOS_SUCCESSFUL OK
+ * \ingroup IFX_PCIE_OS
+ */
+static int ifx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, unsigned int value)
+{
+ int bus_number = bus->number;
+ int ret = PCIBIOS_SUCCESSFUL;
+ struct ifx_pci_controller *ctrl = bus->sysdata;
+ int pcie_port = ctrl->port;
+ unsigned int tvalue = value;
+ unsigned int data;
+
+ /* Make sure the address is aligned to natural boundary */
+ if (unlikely(((size - 1) & where))) {
+ ret = PCIBIOS_BAD_REGISTER_NUMBER;
+ goto out;
+ }
+ /*
+ * If we are second controller, we have to cheat OS so that it assume
+ * its bus number starts from 0 in host controller
+ */
+ bus_number = ifx_pcie_bus_nr_deduct(bus_number, pcie_port);
+
+ /*
+ * We need to force the bus number to be zero on the root
+ * bus. Linux numbers the 2nd root bus to start after all
+ * busses on root 0.
+ */
+ if (bus->parent == NULL)
+ bus_number = 0;
+
+ if (pcie_valid_config(pcie_port, bus_number, PCI_SLOT(devfn)) == 0) {
+ ret = PCIBIOS_DEVICE_NOT_FOUND;
+ goto out;
+ }
+
+ IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG, "%s: %02x:%02x.%01x/%02x:%01d value=%08x\n", __func__,
+ bus_number, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, value);
+
+ /* XXX, some PCIe device may need some delay */
+ PCIE_IRQ_LOCK(ifx_pcie_lock);
+
+ /*
+ * To configure the correct bus topology using native way, we have to cheat Os so that
+ * it can configure the PCIe hardware correctly.
+ */
+ tvalue = ifx_pcie_bus_enum_hack(bus, devfn, where, value, pcie_port, 0);
+
+ if (bus_number == 0) { /* RC itself */
+ unsigned int t;
+
+ t = (where & ~3);
+ IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG,"%s: wr local cfg, offset:%08x, fill:%08x\n", __func__, t, value);
+ data = ifx_pcie_rc_cfg_rd(pcie_port, t);
+ IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG,"%s: rd local cfg, offset:%08x, data:%08x\n", __func__, t, data);
+
+ data = ifx_pcie_size_to_value(where, size, data, tvalue);
+
+ IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG,"%s: wr local cfg, offset:%08x, value:%08x\n", __func__, t, data);
+ ifx_pcie_rc_cfg_wr(pcie_port, t, data);
+ IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG,"%s: rd local cfg, offset:%08x, value:%08x\n",
+ __func__, t, ifx_pcie_rc_cfg_rd(pcie_port, t));
+ } else {
+ unsigned int addr = pcie_bus_addr(bus_number, devfn, where);
+
+ IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG,"%s: wr cfg, offset:%08x, fill:%08x\n", __func__, addr, value);
+ data = ifx_pcie_cfg_rd(pcie_port, addr);
+ if (pcie_port == IFX_PCIE_PORT0) {
+#ifdef CONFIG_IFX_PCIE_HW_SWAP
+ data = le32_to_cpu(data);
+#endif /* CONFIG_IFX_PCIE_HW_SWAP */
+ } else {
+#ifdef CONFIG_IFX_PCIE1_HW_SWAP
+ data = le32_to_cpu(data);
+#endif /* CONFIG_IFX_PCIE_HW_SWAP */
+ }
+ IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG,"%s: rd cfg, offset:%08x, data:%08x\n", __func__, addr, data);
+
+ data = ifx_pcie_size_to_value(where, size, data, tvalue);
+ if (pcie_port == IFX_PCIE_PORT0) {
+#ifdef CONFIG_IFX_PCIE_HW_SWAP
+ data = cpu_to_le32(data);
+#endif /* CONFIG_IFX_PCIE_HW_SWAP */
+ } else {
+#ifdef CONFIG_IFX_PCIE1_HW_SWAP
+ data = cpu_to_le32(data);
+#endif /* CONFIG_IFX_PCIE_HW_SWAP */
+ }
+ IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG, "%s: wr cfg, offset:%08x, value:%08x\n", __func__, addr, data);
+ ifx_pcie_cfg_wr(pcie_port, addr, data);
+ IFX_PCIE_PRINT(PCIE_MSG_WRITE_CFG, "%s: rd cfg, offset:%08x, value:%08x\n",
+ __func__, addr, ifx_pcie_cfg_rd(pcie_port, addr));
+ }
+ PCIE_IRQ_UNLOCK(ifx_pcie_lock);
+out:
+ return ret;
+}
+
+static struct resource ifx_pcie_io_resource = {
+ .name = "PCIe0 I/O space",
+ .start = PCIE_IO_PHY_BASE,
+ .end = PCIE_IO_PHY_END,
+ .flags = IORESOURCE_IO,
+};
+
+static struct resource ifx_pcie_mem_resource = {
+ .name = "PCIe0 Memory space",
+ .start = PCIE_MEM_PHY_BASE,
+ .end = PCIE_MEM_PHY_END,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct pci_ops ifx_pcie_ops = {
+ .read = ifx_pcie_read_config,
+ .write = ifx_pcie_write_config,
+};
+
+static struct ifx_pci_controller ifx_pcie_controller[IFX_PCIE_CORE_NR] = {
+ {
+ .pcic = {
+ .pci_ops = &ifx_pcie_ops,
+ .mem_resource = &ifx_pcie_mem_resource,
+ .io_resource = &ifx_pcie_io_resource,
+ },
+ .port = IFX_PCIE_PORT0,
+ },
+};
+
+static inline void pcie_core_int_clear_all(int pcie_port)
+{
+ unsigned int reg;
+ reg = IFX_REG_R32(PCIE_IRNCR(pcie_port));
+ reg &= PCIE_RC_CORE_COMBINED_INT;
+ IFX_REG_W32(reg, PCIE_IRNCR(pcie_port));
+}
+
+static irqreturn_t pcie_rc_core_isr(int irq, void *dev_id)
+{
+ struct ifx_pci_controller *ctrl = (struct ifx_pci_controller *)dev_id;
+ int pcie_port = ctrl->port;
+
+ IFX_PCIE_PRINT(PCIE_MSG_ISR, "PCIe RC error intr %d\n", irq);
+ pcie_core_int_clear_all(pcie_port);
+ return IRQ_HANDLED;
+}
+
+static int pcie_rc_core_int_init(int pcie_port)
+{
+ int ret;
+
+ /* Enable core interrupt */
+ IFX_REG_SET_BIT(PCIE_RC_CORE_COMBINED_INT, PCIE_IRNEN(pcie_port));
+
+ /* Clear it first */
+ IFX_REG_SET_BIT(PCIE_RC_CORE_COMBINED_INT, PCIE_IRNCR(pcie_port));
+ ret = request_irq(pcie_irqs[pcie_port].ir_irq.irq, pcie_rc_core_isr, IRQF_DISABLED,
+ pcie_irqs[pcie_port].ir_irq.name, &ifx_pcie_controller[pcie_port]);
+ if (ret)
+ printk(KERN_ERR "%s request irq %d failed\n", __func__, IFX_PCIE_IR);
+
+ return ret;
+}
+
+int ifx_pcie_bios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ unsigned int irq_bit = 0;
+ int irq = 0;
+ struct ifx_pci_controller *ctrl = dev->bus->sysdata;
+ int pcie_port = ctrl->port;
+
+ IFX_PCIE_PRINT(PCIE_MSG_FIXUP, "%s port %d dev %s slot %d pin %d \n", __func__, pcie_port, pci_name(dev), slot, pin);
+
+ if ((pin == PCIE_LEGACY_DISABLE) || (pin > PCIE_LEGACY_INT_MAX)) {
+ printk(KERN_WARNING "WARNING: dev %s: invalid interrupt pin %d\n", pci_name(dev), pin);
+ return -1;
+ }
+ /* Pin index so minus one */
+ irq_bit = pcie_irqs[pcie_port].legacy_irq[pin - 1].irq_bit;
+ irq = pcie_irqs[pcie_port].legacy_irq[pin - 1].irq;
+ IFX_REG_SET_BIT(irq_bit, PCIE_IRNEN(pcie_port));
+ IFX_REG_SET_BIT(irq_bit, PCIE_IRNCR(pcie_port));
+ IFX_PCIE_PRINT(PCIE_MSG_FIXUP, "%s dev %s irq %d assigned\n", __func__, pci_name(dev), irq);
+ return irq;
+}
+
+/**
+ * \fn int ifx_pcie_bios_plat_dev_init(struct pci_dev *dev)
+ * \brief Called to perform platform specific PCI setup
+ *
+ * \param[in] dev The Linux PCI device structure for the device to map
+ * \return OK
+ * \ingroup IFX_PCIE_OS
+ */
+int ifx_pcie_bios_plat_dev_init(struct pci_dev *dev)
+{
+ u16 config;
+ unsigned int dconfig;
+ int pos;
+ /* Enable reporting System errors and parity errors on all devices */
+ /* Enable parity checking and error reporting */
+ pci_read_config_word(dev, PCI_COMMAND, &config);
+ config |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR /*| PCI_COMMAND_INVALIDATE |
+ PCI_COMMAND_FAST_BACK*/;
+ pci_write_config_word(dev, PCI_COMMAND, config);
+
+ if (dev->subordinate) {
+ /* Set latency timers on sub bridges */
+ pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 0x40); /* XXX, */
+ /* More bridge error detection */
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &config);
+ config |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, config);
+ }
+ /* Enable the PCIe normal error reporting */
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (pos) {
+ /* Disable system error generation in response to error messages */
+ pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &config);
+ config &= ~(PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE | PCI_EXP_RTCTL_SEFEE);
+ pci_write_config_word(dev, pos + PCI_EXP_RTCTL, config);
+
+ /* Clear PCIE Capability's Device Status */
+ pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &config);
+ pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, config);
+
+ /* Update Device Control */
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &config);
+ /* Correctable Error Reporting */
+ config |= PCI_EXP_DEVCTL_CERE;
+ /* Non-Fatal Error Reporting */
+ config |= PCI_EXP_DEVCTL_NFERE;
+ /* Fatal Error Reporting */
+ config |= PCI_EXP_DEVCTL_FERE;
+ /* Unsupported Request */
+ config |= PCI_EXP_DEVCTL_URRE;
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, config);
+ }
+
+ /* Find the Advanced Error Reporting capability */
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+ if (pos) {
+ /* Clear Uncorrectable Error Status */
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &dconfig);
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, dconfig);
+ /* Enable reporting of all uncorrectable errors */
+ /* Uncorrectable Error Mask - turned on bits disable errors */
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, 0);
+ /*
+ * Leave severity at HW default. This only controls if
+ * errors are reported as uncorrectable or
+ * correctable, not if the error is reported.
+ */
+ /* PCI_ERR_UNCOR_SEVER - Uncorrectable Error Severity */
+ /* Clear Correctable Error Status */
+ pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &dconfig);
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, dconfig);
+ /* Enable reporting of all correctable errors */
+ /* Correctable Error Mask - turned on bits disable errors */
+ pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, 0);
+ /* Advanced Error Capabilities */
+ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &dconfig);
+ /* ECRC Generation Enable */
+ if (dconfig & PCI_ERR_CAP_ECRC_GENC)
+ dconfig |= PCI_ERR_CAP_ECRC_GENE;
+ /* ECRC Check Enable */
+ if (dconfig & PCI_ERR_CAP_ECRC_CHKC)
+ dconfig |= PCI_ERR_CAP_ECRC_CHKE;
+ pci_write_config_dword(dev, pos + PCI_ERR_CAP, dconfig);
+
+ /* PCI_ERR_HEADER_LOG - Header Log Register (16 bytes) */
+ /* Enable Root Port's interrupt in response to error messages */
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND,
+ PCI_ERR_ROOT_CMD_COR_EN |
+ PCI_ERR_ROOT_CMD_NONFATAL_EN |
+ PCI_ERR_ROOT_CMD_FATAL_EN);
+ /* Clear the Root status register */
+ pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &dconfig);
+ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
+ }
+ /* WAR, only 128 MRRS is supported, force all EPs to support this value */
+ pcie_set_readrq(dev, 128);
+ return 0;
+}
+
+static void pcie_phy_rst(int pcie_port)
+{
+ pcie_phy_rst_assert(pcie_port);
+ pcie_phy_rst_deassert(pcie_port);
+ /* Make sure PHY PLL is stable */
+ udelay(20);
+}
+
+static int pcie_rc_initialize(int pcie_port)
+{
+ int i;
+
+ pcie_rcu_endian_setup(pcie_port);
+
+ pcie_ep_gpio_rst_init(pcie_port);
+
+ /*
+ * XXX, PCIe elastic buffer bug will cause not to be detected. One more
+ * reset PCIe PHY will solve this issue
+ */
+ for (i = 0; i < IFX_PCIE_PHY_LOOP_CNT; i++) {
+ /* Disable PCIe PHY Analog part for sanity check */
+ pcie_phy_pmu_disable(pcie_port);
+ pcie_phy_rst(pcie_port);
+ /* PCIe Core reset enabled, low active, sw programmed */
+ pcie_core_rst_assert(pcie_port);
+ /* Put PCIe EP in reset status */
+ pcie_device_rst_assert(pcie_port);
+ /* PCI PHY & Core reset disabled, high active, sw programmed */
+ pcie_core_rst_deassert(pcie_port);
+ /* Already in a quiet state, program PLL, enable PHY, check ready bit */
+ pcie_phy_clock_mode_setup(pcie_port);
+ /* Enable PCIe PHY and Clock */
+ pcie_core_pmu_setup(pcie_port);
+ /* Clear status registers */
+ pcie_status_register_clear(pcie_port);
+#ifdef CONFIG_PCI_MSI
+ pcie_msi_init(pcie_port);
+#endif /* CONFIG_PCI_MSI */
+ pcie_rc_cfg_reg_setup(pcie_port);
+
+ /* Once link is up, break out */
+ if (pcie_app_loigc_setup(pcie_port) == 0)
+ break;
+ }
+ if (i >= IFX_PCIE_PHY_LOOP_CNT) {
+ printk(KERN_ERR "%s link up failed!!!!!\n", __func__);
+ return -EIO;
+ }
+ /* NB, don't increase ACK/NACK timer timeout value, which will cause a lot of COR errors */
+ pcie_replay_time_update(pcie_port);
+ return 0;
+}
+
+static int inline ifx_pcie_startup_port_nr(void)
+{
+ int pcie_port = IFX_PCIE_PORT0;
+
+ pcie_port = IFX_PCIE_PORT0;
+ return pcie_port;
+}
+
+/**
+ * \fn static int __init ifx_pcie_bios_init(void)
+ * \brief Initialize the IFX PCIe controllers
+ *
+ * \return -EIO PCIe PHY link is not up
+ * \return -ENOMEM Configuration/IO space failed to map
+ * \return 0 OK
+ * \ingroup IFX_PCIE_OS
+ */
+extern int (*ltqpci_plat_arch_init)(struct pci_dev *dev);
+extern int (*ltqpci_map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
+static int __devinit ltq_pcie_probe(struct platform_device *pdev)
+{
+ char ver_str[128] = {0};
+ void __iomem *io_map_base;
+ int pcie_port;
+ int startup_port;
+ ltqpci_map_irq = ifx_pcie_bios_map_irq;
+ ltqpci_plat_arch_init = ifx_pcie_bios_plat_dev_init;
+ /* Enable AHB Master/ Slave */
+ pcie_ahb_pmu_setup();
+
+ startup_port = ifx_pcie_startup_port_nr();
+
+ ltq_gpio_request(&pdev->dev, IFX_PCIE_GPIO_RESET, 0, 1, "pcie-reset");
+
+ for (pcie_port = startup_port; pcie_port < IFX_PCIE_CORE_NR; pcie_port++){
+ if (pcie_rc_initialize(pcie_port) == 0) {
+ /* Otherwise, warning will pop up */
+ io_map_base = ioremap(PCIE_IO_PHY_PORT_TO_BASE(pcie_port), PCIE_IO_SIZE);
+ if (io_map_base == NULL)
+ return -ENOMEM;
+ ifx_pcie_controller[pcie_port].pcic.io_map_base = (unsigned long)io_map_base;
+ register_pci_controller(&ifx_pcie_controller[pcie_port].pcic);
+ /* XXX, clear error status */
+ pcie_rc_core_int_init(pcie_port);
+ }
+ }
+
+ printk(KERN_INFO "%s", ver_str);
+return 0;
+}
+
+static struct platform_driver ltq_pcie_driver = {
+ .probe = ltq_pcie_probe,
+ .driver = {
+ .name = "pcie-xway",
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init pciebios_init(void)
+{
+ return platform_driver_register(&ltq_pcie_driver);
+}
+
+arch_initcall(pciebios_init);
diff --git a/target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq.h b/target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq.h
new file mode 100644
index 0000000000..d877c23a8a
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/arch/mips/pci/pcie-lantiq.h
@@ -0,0 +1,1305 @@
+/******************************************************************************
+**
+** FILE NAME : ifxmips_pcie_reg.h
+** PROJECT : IFX UEIP for VRX200
+** MODULES : PCIe module
+**
+** DATE : 02 Mar 2009
+** AUTHOR : Lei Chuanhua
+** DESCRIPTION : PCIe Root Complex Driver
+** COPYRIGHT : Copyright (c) 2009
+** Infineon Technologies AG
+** Am Campeon 1-12, 85579 Neubiberg, Germany
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+** HISTORY
+** $Version $Date $Author $Comment
+** 0.0.1 17 Mar,2009 Lei Chuanhua Initial version
+*******************************************************************************/
+#ifndef IFXMIPS_PCIE_REG_H
+#define IFXMIPS_PCIE_REG_H
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+/*!
+ \file ifxmips_pcie_reg.h
+ \ingroup IFX_PCIE
+ \brief header file for PCIe module register definition
+*/
+/* PCIe Address Mapping Base */
+#define PCIE_CFG_PHY_BASE 0x1D000000UL
+#define PCIE_CFG_BASE (KSEG1 + PCIE_CFG_PHY_BASE)
+#define PCIE_CFG_SIZE (8 * 1024 * 1024)
+
+#define PCIE_MEM_PHY_BASE 0x1C000000UL
+#define PCIE_MEM_BASE (KSEG1 + PCIE_MEM_PHY_BASE)
+#define PCIE_MEM_SIZE (16 * 1024 * 1024)
+#define PCIE_MEM_PHY_END (PCIE_MEM_PHY_BASE + PCIE_MEM_SIZE - 1)
+
+#define PCIE_IO_PHY_BASE 0x1D800000UL
+#define PCIE_IO_BASE (KSEG1 + PCIE_IO_PHY_BASE)
+#define PCIE_IO_SIZE (1 * 1024 * 1024)
+#define PCIE_IO_PHY_END (PCIE_IO_PHY_BASE + PCIE_IO_SIZE - 1)
+
+#define PCIE_RC_CFG_BASE (KSEG1 + 0x1D900000)
+#define PCIE_APP_LOGIC_REG (KSEG1 + 0x1E100900)
+#define PCIE_MSI_PHY_BASE 0x1F600000UL
+
+#define PCIE_PDI_PHY_BASE 0x1F106800UL
+#define PCIE_PDI_BASE (KSEG1 + PCIE_PDI_PHY_BASE)
+#define PCIE_PDI_SIZE 0x400
+
+#define PCIE1_CFG_PHY_BASE 0x19000000UL
+#define PCIE1_CFG_BASE (KSEG1 + PCIE1_CFG_PHY_BASE)
+#define PCIE1_CFG_SIZE (8 * 1024 * 1024)
+
+#define PCIE1_MEM_PHY_BASE 0x18000000UL
+#define PCIE1_MEM_BASE (KSEG1 + PCIE1_MEM_PHY_BASE)
+#define PCIE1_MEM_SIZE (16 * 1024 * 1024)
+#define PCIE1_MEM_PHY_END (PCIE1_MEM_PHY_BASE + PCIE1_MEM_SIZE - 1)
+
+#define PCIE1_IO_PHY_BASE 0x19800000UL
+#define PCIE1_IO_BASE (KSEG1 + PCIE1_IO_PHY_BASE)
+#define PCIE1_IO_SIZE (1 * 1024 * 1024)
+#define PCIE1_IO_PHY_END (PCIE1_IO_PHY_BASE + PCIE1_IO_SIZE - 1)
+
+#define PCIE1_RC_CFG_BASE (KSEG1 + 0x19900000)
+#define PCIE1_APP_LOGIC_REG (KSEG1 + 0x1E100700)
+#define PCIE1_MSI_PHY_BASE 0x1F400000UL
+
+#define PCIE1_PDI_PHY_BASE 0x1F700400UL
+#define PCIE1_PDI_BASE (KSEG1 + PCIE1_PDI_PHY_BASE)
+#define PCIE1_PDI_SIZE 0x400
+
+#define PCIE_CFG_PORT_TO_BASE(X) ((X) > 0 ? (PCIE1_CFG_BASE) : (PCIE_CFG_BASE))
+#define PCIE_MEM_PORT_TO_BASE(X) ((X) > 0 ? (PCIE1_MEM_BASE) : (PCIE_MEM_BASE))
+#define PCIE_IO_PORT_TO_BASE(X) ((X) > 0 ? (PCIE1_IO_BASE) : (PCIE_IO_BASE))
+#define PCIE_MEM_PHY_PORT_TO_BASE(X) ((X) > 0 ? (PCIE1_MEM_PHY_BASE) : (PCIE_MEM_PHY_BASE))
+#define PCIE_MEM_PHY_PORT_TO_END(X) ((X) > 0 ? (PCIE1_MEM_PHY_END) : (PCIE_MEM_PHY_END))
+#define PCIE_IO_PHY_PORT_TO_BASE(X) ((X) > 0 ? (PCIE1_IO_PHY_BASE) : (PCIE_IO_PHY_BASE))
+#define PCIE_IO_PHY_PORT_TO_END(X) ((X) > 0 ? (PCIE1_IO_PHY_END) : (PCIE_IO_PHY_END))
+#define PCIE_APP_PORT_TO_BASE(X) ((X) > 0 ? (PCIE1_APP_LOGIC_REG) : (PCIE_APP_LOGIC_REG))
+#define PCIE_RC_PORT_TO_BASE(X) ((X) > 0 ? (PCIE1_RC_CFG_BASE) : (PCIE_RC_CFG_BASE))
+#define PCIE_PHY_PORT_TO_BASE(X) ((X) > 0 ? (PCIE1_PDI_BASE) : (PCIE_PDI_BASE))
+
+/* PCIe Application Logic Register */
+/* RC Core Control Register */
+#define PCIE_RC_CCR(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x10)
+/* This should be enabled after initializing configuratin registers
+ * Also should check link status retraining bit
+ */
+#define PCIE_RC_CCR_LTSSM_ENABLE 0x00000001 /* Enable LTSSM to continue link establishment */
+
+/* RC Core Debug Register */
+#define PCIE_RC_DR(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x14)
+#define PCIE_RC_DR_DLL_UP 0x00000001 /* Data Link Layer Up */
+#define PCIE_RC_DR_CURRENT_POWER_STATE 0x0000000E /* Current Power State */
+#define PCIE_RC_DR_CURRENT_POWER_STATE_S 1
+#define PCIE_RC_DR_CURRENT_LTSSM_STATE 0x000001F0 /* Current LTSSM State */
+#define PCIE_RC_DR_CURRENT_LTSSM_STATE_S 4
+
+#define PCIE_RC_DR_PM_DEV_STATE 0x00000E00 /* Power Management D-State */
+#define PCIE_RC_DR_PM_DEV_STATE_S 9
+
+#define PCIE_RC_DR_PM_ENABLED 0x00001000 /* Power Management State from PMU */
+#define PCIE_RC_DR_PME_EVENT_ENABLED 0x00002000 /* Power Management Event Enable State */
+#define PCIE_RC_DR_AUX_POWER_ENABLED 0x00004000 /* Auxiliary Power Enable */
+
+/* Current Power State Definition */
+enum {
+ PCIE_RC_DR_D0 = 0,
+ PCIE_RC_DR_D1, /* Not supported */
+ PCIE_RC_DR_D2, /* Not supported */
+ PCIE_RC_DR_D3,
+ PCIE_RC_DR_UN,
+};
+
+/* PHY Link Status Register */
+#define PCIE_PHY_SR(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x18)
+#define PCIE_PHY_SR_PHY_LINK_UP 0x00000001 /* PHY Link Up/Down Indicator */
+
+/* Electromechanical Control Register */
+#define PCIE_EM_CR(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x1C)
+#define PCIE_EM_CR_CARD_IS_PRESENT 0x00000001 /* Card Presence Detect State */
+#define PCIE_EM_CR_MRL_OPEN 0x00000002 /* MRL Sensor State */
+#define PCIE_EM_CR_POWER_FAULT_SET 0x00000004 /* Power Fault Detected */
+#define PCIE_EM_CR_MRL_SENSOR_SET 0x00000008 /* MRL Sensor Changed */
+#define PCIE_EM_CR_PRESENT_DETECT_SET 0x00000010 /* Card Presense Detect Changed */
+#define PCIE_EM_CR_CMD_CPL_INT_SET 0x00000020 /* Command Complete Interrupt */
+#define PCIE_EM_CR_SYS_INTERLOCK_SET 0x00000040 /* System Electromechanical IterLock Engaged */
+#define PCIE_EM_CR_ATTENTION_BUTTON_SET 0x00000080 /* Attention Button Pressed */
+
+/* Interrupt Status Register */
+#define PCIE_IR_SR(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x20)
+#define PCIE_IR_SR_PME_CAUSE_MSI 0x00000002 /* MSI caused by PME */
+#define PCIE_IR_SR_HP_PME_WAKE_GEN 0x00000004 /* Hotplug PME Wake Generation */
+#define PCIE_IR_SR_HP_MSI 0x00000008 /* Hotplug MSI */
+#define PCIE_IR_SR_AHB_LU_ERR 0x00000030 /* AHB Bridge Lookup Error Signals */
+#define PCIE_IR_SR_AHB_LU_ERR_S 4
+#define PCIE_IR_SR_INT_MSG_NUM 0x00003E00 /* Interrupt Message Number */
+#define PCIE_IR_SR_INT_MSG_NUM_S 9
+#define PCIE_IR_SR_AER_INT_MSG_NUM 0xF8000000 /* Advanced Error Interrupt Message Number */
+#define PCIE_IR_SR_AER_INT_MSG_NUM_S 27
+
+/* Message Control Register */
+#define PCIE_MSG_CR(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x30)
+#define PCIE_MSG_CR_GEN_PME_TURN_OFF_MSG 0x00000001 /* Generate PME Turn Off Message */
+#define PCIE_MSG_CR_GEN_UNLOCK_MSG 0x00000002 /* Generate Unlock Message */
+
+#define PCIE_VDM_DR(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x34)
+
+/* Vendor-Defined Message Requester ID Register */
+#define PCIE_VDM_RID(X) (PCIE_APP_PORT_TO_BASE (X) + 0x38)
+#define PCIE_VDM_RID_VENROR_MSG_REQ_ID 0x0000FFFF
+#define PCIE_VDM_RID_VDMRID_S 0
+
+/* ASPM Control Register */
+#define PCIE_ASPM_CR(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x40)
+#define PCIE_ASPM_CR_HOT_RST 0x00000001 /* Hot Reset Request to the downstream device */
+#define PCIE_ASPM_CR_REQ_EXIT_L1 0x00000002 /* Request to Exit L1 */
+#define PCIE_ASPM_CR_REQ_ENTER_L1 0x00000004 /* Request to Enter L1 */
+
+/* Vendor Message DW0 Register */
+#define PCIE_VM_MSG_DW0(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x50)
+#define PCIE_VM_MSG_DW0_TYPE 0x0000001F /* Message type */
+#define PCIE_VM_MSG_DW0_TYPE_S 0
+#define PCIE_VM_MSG_DW0_FORMAT 0x00000060 /* Format */
+#define PCIE_VM_MSG_DW0_FORMAT_S 5
+#define PCIE_VM_MSG_DW0_TC 0x00007000 /* Traffic Class */
+#define PCIE_VM_MSG_DW0_TC_S 12
+#define PCIE_VM_MSG_DW0_ATTR 0x000C0000 /* Atrributes */
+#define PCIE_VM_MSG_DW0_ATTR_S 18
+#define PCIE_VM_MSG_DW0_EP_TLP 0x00100000 /* Poisoned TLP */
+#define PCIE_VM_MSG_DW0_TD 0x00200000 /* TLP Digest */
+#define PCIE_VM_MSG_DW0_LEN 0xFFC00000 /* Length */
+#define PCIE_VM_MSG_DW0_LEN_S 22
+
+/* Format Definition */
+enum {
+ PCIE_VM_MSG_FORMAT_00 = 0, /* 3DW Hdr, no data*/
+ PCIE_VM_MSG_FORMAT_01, /* 4DW Hdr, no data */
+ PCIE_VM_MSG_FORMAT_10, /* 3DW Hdr, with data */
+ PCIE_VM_MSG_FORMAT_11, /* 4DW Hdr, with data */
+};
+
+/* Traffic Class Definition */
+enum {
+ PCIE_VM_MSG_TC0 = 0,
+ PCIE_VM_MSG_TC1,
+ PCIE_VM_MSG_TC2,
+ PCIE_VM_MSG_TC3,
+ PCIE_VM_MSG_TC4,
+ PCIE_VM_MSG_TC5,
+ PCIE_VM_MSG_TC6,
+ PCIE_VM_MSG_TC7,
+};
+
+/* Attributes Definition */
+enum {
+ PCIE_VM_MSG_ATTR_00 = 0, /* RO and No Snoop cleared */
+ PCIE_VM_MSG_ATTR_01, /* RO cleared , No Snoop set */
+ PCIE_VM_MSG_ATTR_10, /* RO set, No Snoop cleared*/
+ PCIE_VM_MSG_ATTR_11, /* RO and No Snoop set */
+};
+
+/* Payload Size Definition */
+#define PCIE_VM_MSG_LEN_MIN 0
+#define PCIE_VM_MSG_LEN_MAX 1024
+
+/* Vendor Message DW1 Register */
+#define PCIE_VM_MSG_DW1(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x54)
+#define PCIE_VM_MSG_DW1_FUNC_NUM 0x00000070 /* Function Number */
+#define PCIE_VM_MSG_DW1_FUNC_NUM_S 8
+#define PCIE_VM_MSG_DW1_CODE 0x00FF0000 /* Message Code */
+#define PCIE_VM_MSG_DW1_CODE_S 16
+#define PCIE_VM_MSG_DW1_TAG 0xFF000000 /* Tag */
+#define PCIE_VM_MSG_DW1_TAG_S 24
+
+#define PCIE_VM_MSG_DW2(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x58)
+#define PCIE_VM_MSG_DW3(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x5C)
+
+/* Vendor Message Request Register */
+#define PCIE_VM_MSG_REQR(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x60)
+#define PCIE_VM_MSG_REQR_REQ 0x00000001 /* Vendor Message Request */
+
+
+/* AHB Slave Side Band Control Register */
+#define PCIE_AHB_SSB(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x70)
+#define PCIE_AHB_SSB_REQ_BCM 0x00000001 /* Slave Reques BCM filed */
+#define PCIE_AHB_SSB_REQ_EP 0x00000002 /* Slave Reques EP filed */
+#define PCIE_AHB_SSB_REQ_TD 0x00000004 /* Slave Reques TD filed */
+#define PCIE_AHB_SSB_REQ_ATTR 0x00000018 /* Slave Reques Attribute number */
+#define PCIE_AHB_SSB_REQ_ATTR_S 3
+#define PCIE_AHB_SSB_REQ_TC 0x000000E0 /* Slave Request TC Field */
+#define PCIE_AHB_SSB_REQ_TC_S 5
+
+/* AHB Master SideBand Ctrl Register */
+#define PCIE_AHB_MSB(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x74)
+#define PCIE_AHB_MSB_RESP_ATTR 0x00000003 /* Master Response Attribute number */
+#define PCIE_AHB_MSB_RESP_ATTR_S 0
+#define PCIE_AHB_MSB_RESP_BAD_EOT 0x00000004 /* Master Response Badeot filed */
+#define PCIE_AHB_MSB_RESP_BCM 0x00000008 /* Master Response BCM filed */
+#define PCIE_AHB_MSB_RESP_EP 0x00000010 /* Master Response EP filed */
+#define PCIE_AHB_MSB_RESP_TD 0x00000020 /* Master Response TD filed */
+#define PCIE_AHB_MSB_RESP_FUN_NUM 0x000003C0 /* Master Response Function number */
+#define PCIE_AHB_MSB_RESP_FUN_NUM_S 6
+
+/* AHB Control Register, fixed bus enumeration exception */
+#define PCIE_AHB_CTRL(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0x78)
+#define PCIE_AHB_CTRL_BUS_ERROR_SUPPRESS 0x00000001
+
+/* Interrupt Enalbe Register */
+#define PCIE_IRNEN(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0xF4)
+#define PCIE_IRNCR(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0xF8)
+#define PCIE_IRNICR(X) (volatile u32*)(PCIE_APP_PORT_TO_BASE(X) + 0xFC)
+
+/* PCIe interrupt enable/control/capture register definition */
+#define PCIE_IRN_AER_REPORT 0x00000001 /* AER Interrupt */
+#define PCIE_IRN_AER_MSIX 0x00000002 /* Advanced Error MSI-X Interrupt */
+#define PCIE_IRN_PME 0x00000004 /* PME Interrupt */
+#define PCIE_IRN_HOTPLUG 0x00000008 /* Hotplug Interrupt */
+#define PCIE_IRN_RX_VDM_MSG 0x00000010 /* Vendor-Defined Message Interrupt */
+#define PCIE_IRN_RX_CORRECTABLE_ERR_MSG 0x00000020 /* Correctable Error Message Interrupt */
+#define PCIE_IRN_RX_NON_FATAL_ERR_MSG 0x00000040 /* Non-fatal Error Message */
+#define PCIE_IRN_RX_FATAL_ERR_MSG 0x00000080 /* Fatal Error Message */
+#define PCIE_IRN_RX_PME_MSG 0x00000100 /* PME Message Interrupt */
+#define PCIE_IRN_RX_PME_TURNOFF_ACK 0x00000200 /* PME Turnoff Ack Message Interrupt */
+#define PCIE_IRN_AHB_BR_FATAL_ERR 0x00000400 /* AHB Fatal Error Interrupt */
+#define PCIE_IRN_LINK_AUTO_BW_STATUS 0x00000800 /* Link Auto Bandwidth Status Interrupt */
+#define PCIE_IRN_BW_MGT 0x00001000 /* Bandwidth Managment Interrupt */
+#define PCIE_IRN_INTA 0x00002000 /* INTA */
+#define PCIE_IRN_INTB 0x00004000 /* INTB */
+#define PCIE_IRN_INTC 0x00008000 /* INTC */
+#define PCIE_IRN_INTD 0x00010000 /* INTD */
+#define PCIE_IRN_WAKEUP 0x00020000 /* Wake up Interrupt */
+
+#define PCIE_RC_CORE_COMBINED_INT (PCIE_IRN_AER_REPORT | PCIE_IRN_AER_MSIX | PCIE_IRN_PME | \
+ PCIE_IRN_HOTPLUG | PCIE_IRN_RX_VDM_MSG | PCIE_IRN_RX_CORRECTABLE_ERR_MSG |\
+ PCIE_IRN_RX_NON_FATAL_ERR_MSG | PCIE_IRN_RX_FATAL_ERR_MSG | \
+ PCIE_IRN_RX_PME_MSG | PCIE_IRN_RX_PME_TURNOFF_ACK | PCIE_IRN_AHB_BR_FATAL_ERR | \
+ PCIE_IRN_LINK_AUTO_BW_STATUS | PCIE_IRN_BW_MGT)
+/* PCIe RC Configuration Register */
+#define PCIE_VDID(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x00)
+
+/* Bit definition from pci_reg.h */
+#define PCIE_PCICMDSTS(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x04)
+#define PCIE_CCRID(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x08)
+#define PCIE_CLSLTHTBR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x0C) /* EP only */
+/* BAR0, BAR1,Only necessary if the bridges implements a device-specific register set or memory buffer */
+#define PCIE_BAR0(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x10) /* Not used*/
+#define PCIE_BAR1(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x14) /* Not used */
+
+#define PCIE_BNR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x18) /* Mandatory */
+/* Bus Number Register bits */
+#define PCIE_BNR_PRIMARY_BUS_NUM 0x000000FF
+#define PCIE_BNR_PRIMARY_BUS_NUM_S 0
+#define PCIE_PNR_SECONDARY_BUS_NUM 0x0000FF00
+#define PCIE_PNR_SECONDARY_BUS_NUM_S 8
+#define PCIE_PNR_SUB_BUS_NUM 0x00FF0000
+#define PCIE_PNR_SUB_BUS_NUM_S 16
+
+/* IO Base/Limit Register bits */
+#define PCIE_IOBLSECS(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x1C) /* RC only */
+#define PCIE_IOBLSECS_32BIT_IO_ADDR 0x00000001
+#define PCIE_IOBLSECS_IO_BASE_ADDR 0x000000F0
+#define PCIE_IOBLSECS_IO_BASE_ADDR_S 4
+#define PCIE_IOBLSECS_32BIT_IOLIMT 0x00000100
+#define PCIE_IOBLSECS_IO_LIMIT_ADDR 0x0000F000
+#define PCIE_IOBLSECS_IO_LIMIT_ADDR_S 12
+
+/* Non-prefetchable Memory Base/Limit Register bit */
+#define PCIE_MBML(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x20) /* RC only */
+#define PCIE_MBML_MEM_BASE_ADDR 0x0000FFF0
+#define PCIE_MBML_MEM_BASE_ADDR_S 4
+#define PCIE_MBML_MEM_LIMIT_ADDR 0xFFF00000
+#define PCIE_MBML_MEM_LIMIT_ADDR_S 20
+
+/* Prefetchable Memory Base/Limit Register bit */
+#define PCIE_PMBL(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x24) /* RC only */
+#define PCIE_PMBL_64BIT_ADDR 0x00000001
+#define PCIE_PMBL_UPPER_12BIT 0x0000FFF0
+#define PCIE_PMBL_UPPER_12BIT_S 4
+#define PCIE_PMBL_E64MA 0x00010000
+#define PCIE_PMBL_END_ADDR 0xFFF00000
+#define PCIE_PMBL_END_ADDR_S 20
+#define PCIE_PMBU32(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x28) /* RC only */
+#define PCIE_PMLU32(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x2C) /* RC only */
+
+/* I/O Base/Limit Upper 16 bits register */
+#define PCIE_IO_BANDL(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x30) /* RC only */
+#define PCIE_IO_BANDL_UPPER_16BIT_IO_BASE 0x0000FFFF
+#define PCIE_IO_BANDL_UPPER_16BIT_IO_BASE_S 0
+#define PCIE_IO_BANDL_UPPER_16BIT_IO_LIMIT 0xFFFF0000
+#define PCIE_IO_BANDL_UPPER_16BIT_IO_LIMIT_S 16
+
+#define PCIE_CPR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x34)
+#define PCIE_EBBAR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x38)
+
+/* Interrupt and Secondary Bridge Control Register */
+#define PCIE_INTRBCTRL(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x3C)
+
+#define PCIE_INTRBCTRL_INT_LINE 0x000000FF
+#define PCIE_INTRBCTRL_INT_LINE_S 0
+#define PCIE_INTRBCTRL_INT_PIN 0x0000FF00
+#define PCIE_INTRBCTRL_INT_PIN_S 8
+#define PCIE_INTRBCTRL_PARITY_ERR_RESP_ENABLE 0x00010000 /* #PERR */
+#define PCIE_INTRBCTRL_SERR_ENABLE 0x00020000 /* #SERR */
+#define PCIE_INTRBCTRL_ISA_ENABLE 0x00040000 /* ISA enable, IO 64KB only */
+#define PCIE_INTRBCTRL_VGA_ENABLE 0x00080000 /* VGA enable */
+#define PCIE_INTRBCTRL_VGA_16BIT_DECODE 0x00100000 /* VGA 16bit decode */
+#define PCIE_INTRBCTRL_RST_SECONDARY_BUS 0x00400000 /* Secondary bus rest, hot rest, 1ms */
+/* Others are read only */
+enum {
+ PCIE_INTRBCTRL_INT_NON = 0,
+ PCIE_INTRBCTRL_INTA,
+ PCIE_INTRBCTRL_INTB,
+ PCIE_INTRBCTRL_INTC,
+ PCIE_INTRBCTRL_INTD,
+};
+
+#define PCIE_PM_CAPR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x40)
+
+/* Power Management Control and Status Register */
+#define PCIE_PM_CSR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x44)
+
+#define PCIE_PM_CSR_POWER_STATE 0x00000003 /* Power State */
+#define PCIE_PM_CSR_POWER_STATE_S 0
+#define PCIE_PM_CSR_SW_RST 0x00000008 /* Soft Reset Enabled */
+#define PCIE_PM_CSR_PME_ENABLE 0x00000100 /* PME Enable */
+#define PCIE_PM_CSR_PME_STATUS 0x00008000 /* PME status */
+
+/* MSI Capability Register for EP */
+#define PCIE_MCAPR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x50)
+
+#define PCIE_MCAPR_MSI_CAP_ID 0x000000FF /* MSI Capability ID */
+#define PCIE_MCAPR_MSI_CAP_ID_S 0
+#define PCIE_MCAPR_MSI_NEXT_CAP_PTR 0x0000FF00 /* Next Capability Pointer */
+#define PCIE_MCAPR_MSI_NEXT_CAP_PTR_S 8
+#define PCIE_MCAPR_MSI_ENABLE 0x00010000 /* MSI Enable */
+#define PCIE_MCAPR_MULTI_MSG_CAP 0x000E0000 /* Multiple Message Capable */
+#define PCIE_MCAPR_MULTI_MSG_CAP_S 17
+#define PCIE_MCAPR_MULTI_MSG_ENABLE 0x00700000 /* Multiple Message Enable */
+#define PCIE_MCAPR_MULTI_MSG_ENABLE_S 20
+#define PCIE_MCAPR_ADDR64_CAP 0X00800000 /* 64-bit Address Capable */
+
+/* MSI Message Address Register */
+#define PCIE_MA(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x54)
+
+#define PCIE_MA_ADDR_MASK 0xFFFFFFFC /* Message Address */
+
+/* MSI Message Upper Address Register */
+#define PCIE_MUA(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x58)
+
+/* MSI Message Data Register */
+#define PCIE_MD(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x5C)
+
+#define PCIE_MD_DATA 0x0000FFFF /* Message Data */
+#define PCIE_MD_DATA_S 0
+
+/* PCI Express Capability Register */
+#define PCIE_XCAP(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x70)
+
+#define PCIE_XCAP_ID 0x000000FF /* PCI Express Capability ID */
+#define PCIE_XCAP_ID_S 0
+#define PCIE_XCAP_NEXT_CAP 0x0000FF00 /* Next Capability Pointer */
+#define PCIE_XCAP_NEXT_CAP_S 8
+#define PCIE_XCAP_VER 0x000F0000 /* PCI Express Capability Version */
+#define PCIE_XCAP_VER_S 16
+#define PCIE_XCAP_DEV_PORT_TYPE 0x00F00000 /* Device Port Type */
+#define PCIE_XCAP_DEV_PORT_TYPE_S 20
+#define PCIE_XCAP_SLOT_IMPLEMENTED 0x01000000 /* Slot Implemented */
+#define PCIE_XCAP_MSG_INT_NUM 0x3E000000 /* Interrupt Message Number */
+#define PCIE_XCAP_MSG_INT_NUM_S 25
+
+/* Device Capability Register */
+#define PCIE_DCAP(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x74)
+
+#define PCIE_DCAP_MAX_PAYLOAD_SIZE 0x00000007 /* Max Payload size */
+#define PCIE_DCAP_MAX_PAYLOAD_SIZE_S 0
+#define PCIE_DCAP_PHANTOM_FUNC 0x00000018 /* Phanton Function, not supported */
+#define PCIE_DCAP_PHANTOM_FUNC_S 3
+#define PCIE_DCAP_EXT_TAG 0x00000020 /* Extended Tag Field */
+#define PCIE_DCAP_EP_L0S_LATENCY 0x000001C0 /* EP L0s latency only */
+#define PCIE_DCAP_EP_L0S_LATENCY_S 6
+#define PCIE_DCAP_EP_L1_LATENCY 0x00000E00 /* EP L1 latency only */
+#define PCIE_DCAP_EP_L1_LATENCY_S 9
+#define PCIE_DCAP_ROLE_BASE_ERR_REPORT 0x00008000 /* Role Based ERR */
+
+/* Maximum payload size supported */
+enum {
+ PCIE_MAX_PAYLOAD_128 = 0,
+ PCIE_MAX_PAYLOAD_256,
+ PCIE_MAX_PAYLOAD_512,
+ PCIE_MAX_PAYLOAD_1024,
+ PCIE_MAX_PAYLOAD_2048,
+ PCIE_MAX_PAYLOAD_4096,
+};
+
+/* Device Control and Status Register */
+#define PCIE_DCTLSTS(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x78)
+
+#define PCIE_DCTLSTS_CORRECTABLE_ERR_EN 0x00000001 /* COR-ERR */
+#define PCIE_DCTLSTS_NONFATAL_ERR_EN 0x00000002 /* Non-fatal ERR */
+#define PCIE_DCTLSTS_FATAL_ERR_EN 0x00000004 /* Fatal ERR */
+#define PCIE_DCTLSYS_UR_REQ_EN 0x00000008 /* UR ERR */
+#define PCIE_DCTLSTS_RELAXED_ORDERING_EN 0x00000010 /* Enable relaxing ordering */
+#define PCIE_DCTLSTS_MAX_PAYLOAD_SIZE 0x000000E0 /* Max payload mask */
+#define PCIE_DCTLSTS_MAX_PAYLOAD_SIZE_S 5
+#define PCIE_DCTLSTS_EXT_TAG_EN 0x00000100 /* Extended tag field */
+#define PCIE_DCTLSTS_PHANTOM_FUNC_EN 0x00000200 /* Phantom Function Enable */
+#define PCIE_DCTLSTS_AUX_PM_EN 0x00000400 /* AUX Power PM Enable */
+#define PCIE_DCTLSTS_NO_SNOOP_EN 0x00000800 /* Enable no snoop, except root port*/
+#define PCIE_DCTLSTS_MAX_READ_SIZE 0x00007000 /* Max Read Request size*/
+#define PCIE_DCTLSTS_MAX_READ_SIZE_S 12
+#define PCIE_DCTLSTS_CORRECTABLE_ERR 0x00010000 /* COR-ERR Detected */
+#define PCIE_DCTLSTS_NONFATAL_ERR 0x00020000 /* Non-Fatal ERR Detected */
+#define PCIE_DCTLSTS_FATAL_ER 0x00040000 /* Fatal ERR Detected */
+#define PCIE_DCTLSTS_UNSUPPORTED_REQ 0x00080000 /* UR Detected */
+#define PCIE_DCTLSTS_AUX_POWER 0x00100000 /* Aux Power Detected */
+#define PCIE_DCTLSTS_TRANSACT_PENDING 0x00200000 /* Transaction pending */
+
+#define PCIE_DCTLSTS_ERR_EN (PCIE_DCTLSTS_CORRECTABLE_ERR_EN | \
+ PCIE_DCTLSTS_NONFATAL_ERR_EN | PCIE_DCTLSTS_FATAL_ERR_EN | \
+ PCIE_DCTLSYS_UR_REQ_EN)
+
+/* Link Capability Register */
+#define PCIE_LCAP(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x7C)
+#define PCIE_LCAP_MAX_LINK_SPEED 0x0000000F /* Max link speed, 0x1 by default */
+#define PCIE_LCAP_MAX_LINK_SPEED_S 0
+#define PCIE_LCAP_MAX_LENGTH_WIDTH 0x000003F0 /* Maxium Length Width */
+#define PCIE_LCAP_MAX_LENGTH_WIDTH_S 4
+#define PCIE_LCAP_ASPM_LEVEL 0x00000C00 /* Active State Link PM Support */
+#define PCIE_LCAP_ASPM_LEVEL_S 10
+#define PCIE_LCAP_L0S_EIXT_LATENCY 0x00007000 /* L0s Exit Latency */
+#define PCIE_LCAP_L0S_EIXT_LATENCY_S 12
+#define PCIE_LCAP_L1_EXIT_LATENCY 0x00038000 /* L1 Exit Latency */
+#define PCIE_LCAP_L1_EXIT_LATENCY_S 15
+#define PCIE_LCAP_CLK_PM 0x00040000 /* Clock Power Management */
+#define PCIE_LCAP_SDER 0x00080000 /* Surprise Down Error Reporting */
+#define PCIE_LCAP_DLL_ACTIVE_REPROT 0x00100000 /* Data Link Layer Active Reporting Capable */
+#define PCIE_LCAP_PORT_NUM 0xFF0000000 /* Port number */
+#define PCIE_LCAP_PORT_NUM_S 24
+
+/* Maximum Length width definition */
+#define PCIE_MAX_LENGTH_WIDTH_RES 0x00
+#define PCIE_MAX_LENGTH_WIDTH_X1 0x01 /* Default */
+#define PCIE_MAX_LENGTH_WIDTH_X2 0x02
+#define PCIE_MAX_LENGTH_WIDTH_X4 0x04
+#define PCIE_MAX_LENGTH_WIDTH_X8 0x08
+#define PCIE_MAX_LENGTH_WIDTH_X12 0x0C
+#define PCIE_MAX_LENGTH_WIDTH_X16 0x10
+#define PCIE_MAX_LENGTH_WIDTH_X32 0x20
+
+/* Active State Link PM definition */
+enum {
+ PCIE_ASPM_RES0 = 0,
+ PCIE_ASPM_L0S_ENTRY_SUPPORT, /* L0s */
+ PCIE_ASPM_RES1,
+ PCIE_ASPM_L0S_L1_ENTRY_SUPPORT, /* L0s and L1, default */
+};
+
+/* L0s Exit Latency definition */
+enum {
+ PCIE_L0S_EIXT_LATENCY_L64NS = 0, /* < 64 ns */
+ PCIE_L0S_EIXT_LATENCY_B64A128, /* > 64 ns < 128 ns */
+ PCIE_L0S_EIXT_LATENCY_B128A256, /* > 128 ns < 256 ns */
+ PCIE_L0S_EIXT_LATENCY_B256A512, /* > 256 ns < 512 ns */
+ PCIE_L0S_EIXT_LATENCY_B512TO1U, /* > 512 ns < 1 us */
+ PCIE_L0S_EIXT_LATENCY_B1A2U, /* > 1 us < 2 us */
+ PCIE_L0S_EIXT_LATENCY_B2A4U, /* > 2 us < 4 us */
+ PCIE_L0S_EIXT_LATENCY_M4US, /* > 4 us */
+};
+
+/* L1 Exit Latency definition */
+enum {
+ PCIE_L1_EXIT_LATENCY_L1US = 0, /* < 1 us */
+ PCIE_L1_EXIT_LATENCY_B1A2, /* > 1 us < 2 us */
+ PCIE_L1_EXIT_LATENCY_B2A4, /* > 2 us < 4 us */
+ PCIE_L1_EXIT_LATENCY_B4A8, /* > 4 us < 8 us */
+ PCIE_L1_EXIT_LATENCY_B8A16, /* > 8 us < 16 us */
+ PCIE_L1_EXIT_LATENCY_B16A32, /* > 16 us < 32 us */
+ PCIE_L1_EXIT_LATENCY_B32A64, /* > 32 us < 64 us */
+ PCIE_L1_EXIT_LATENCY_M64US, /* > 64 us */
+};
+
+/* Link Control and Status Register */
+#define PCIE_LCTLSTS(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x80)
+#define PCIE_LCTLSTS_ASPM_ENABLE 0x00000003 /* Active State Link PM Control */
+#define PCIE_LCTLSTS_ASPM_ENABLE_S 0
+#define PCIE_LCTLSTS_RCB128 0x00000008 /* Read Completion Boundary 128*/
+#define PCIE_LCTLSTS_LINK_DISABLE 0x00000010 /* Link Disable */
+#define PCIE_LCTLSTS_RETRIAN_LINK 0x00000020 /* Retrain Link */
+#define PCIE_LCTLSTS_COM_CLK_CFG 0x00000040 /* Common Clock Configuration */
+#define PCIE_LCTLSTS_EXT_SYNC 0x00000080 /* Extended Synch */
+#define PCIE_LCTLSTS_CLK_PM_EN 0x00000100 /* Enable Clock Powerm Management */
+#define PCIE_LCTLSTS_LINK_SPEED 0x000F0000 /* Link Speed */
+#define PCIE_LCTLSTS_LINK_SPEED_S 16
+#define PCIE_LCTLSTS_NEGOTIATED_LINK_WIDTH 0x03F00000 /* Negotiated Link Width */
+#define PCIE_LCTLSTS_NEGOTIATED_LINK_WIDTH_S 20
+#define PCIE_LCTLSTS_RETRAIN_PENDING 0x08000000 /* Link training is ongoing */
+#define PCIE_LCTLSTS_SLOT_CLK_CFG 0x10000000 /* Slot Clock Configuration */
+#define PCIE_LCTLSTS_DLL_ACTIVE 0x20000000 /* Data Link Layer Active */
+
+/* Slot Capabilities Register */
+#define PCIE_SLCAP(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x84)
+
+/* Slot Capabilities */
+#define PCIE_SLCTLSTS(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x88)
+
+/* Root Control and Capability Register */
+#define PCIE_RCTLCAP(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x8C)
+#define PCIE_RCTLCAP_SERR_ON_CORRECTABLE_ERR 0x00000001 /* #SERR on COR-ERR */
+#define PCIE_RCTLCAP_SERR_ON_NONFATAL_ERR 0x00000002 /* #SERR on Non-Fatal ERR */
+#define PCIE_RCTLCAP_SERR_ON_FATAL_ERR 0x00000004 /* #SERR on Fatal ERR */
+#define PCIE_RCTLCAP_PME_INT_EN 0x00000008 /* PME Interrupt Enable */
+#define PCIE_RCTLCAP_SERR_ENABLE (PCIE_RCTLCAP_SERR_ON_CORRECTABLE_ERR | \
+ PCIE_RCTLCAP_SERR_ON_NONFATAL_ERR | PCIE_RCTLCAP_SERR_ON_FATAL_ERR)
+/* Root Status Register */
+#define PCIE_RSTS(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x90)
+#define PCIE_RSTS_PME_REQ_ID 0x0000FFFF /* PME Request ID */
+#define PCIE_RSTS_PME_REQ_ID_S 0
+#define PCIE_RSTS_PME_STATUS 0x00010000 /* PME Status */
+#define PCIE_RSTS_PME_PENDING 0x00020000 /* PME Pending */
+
+/* PCI Express Enhanced Capability Header */
+#define PCIE_ENHANCED_CAP(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x100)
+#define PCIE_ENHANCED_CAP_ID 0x0000FFFF /* PCI Express Extended Capability ID */
+#define PCIE_ENHANCED_CAP_ID_S 0
+#define PCIE_ENHANCED_CAP_VER 0x000F0000 /* Capability Version */
+#define PCIE_ENHANCED_CAP_VER_S 16
+#define PCIE_ENHANCED_CAP_NEXT_OFFSET 0xFFF00000 /* Next Capability Offset */
+#define PCIE_ENHANCED_CAP_NEXT_OFFSET_S 20
+
+/* Uncorrectable Error Status Register */
+#define PCIE_UES_R(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x104)
+#define PCIE_DATA_LINK_PROTOCOL_ERR 0x00000010 /* Data Link Protocol Error Status */
+#define PCIE_SURPRISE_DOWN_ERROR 0x00000020 /* Surprise Down Error Status */
+#define PCIE_POISONED_TLP 0x00001000 /* Poisoned TLP Status */
+#define PCIE_FC_PROTOCOL_ERR 0x00002000 /* Flow Control Protocol Error Status */
+#define PCIE_COMPLETION_TIMEOUT 0x00004000 /* Completion Timeout Status */
+#define PCIE_COMPLETOR_ABORT 0x00008000 /* Completer Abort Error */
+#define PCIE_UNEXPECTED_COMPLETION 0x00010000 /* Unexpected Completion Status */
+#define PCIE_RECEIVER_OVERFLOW 0x00020000 /* Receive Overflow Status */
+#define PCIE_MALFORNED_TLP 0x00040000 /* Malformed TLP Stauts */
+#define PCIE_ECRC_ERR 0x00080000 /* ECRC Error Stauts */
+#define PCIE_UR_REQ 0x00100000 /* Unsupported Request Error Status */
+#define PCIE_ALL_UNCORRECTABLE_ERR (PCIE_DATA_LINK_PROTOCOL_ERR | PCIE_SURPRISE_DOWN_ERROR | \
+ PCIE_POISONED_TLP | PCIE_FC_PROTOCOL_ERR | PCIE_COMPLETION_TIMEOUT | \
+ PCIE_COMPLETOR_ABORT | PCIE_UNEXPECTED_COMPLETION | PCIE_RECEIVER_OVERFLOW |\
+ PCIE_MALFORNED_TLP | PCIE_ECRC_ERR | PCIE_UR_REQ)
+
+/* Uncorrectable Error Mask Register, Mask means no report */
+#define PCIE_UEMR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x108)
+
+/* Uncorrectable Error Severity Register */
+#define PCIE_UESR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x10C)
+
+/* Correctable Error Status Register */
+#define PCIE_CESR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x110)
+#define PCIE_RX_ERR 0x00000001 /* Receive Error Status */
+#define PCIE_BAD_TLP 0x00000040 /* Bad TLP Status */
+#define PCIE_BAD_DLLP 0x00000080 /* Bad DLLP Status */
+#define PCIE_REPLAY_NUM_ROLLOVER 0x00000100 /* Replay Number Rollover Status */
+#define PCIE_REPLAY_TIMER_TIMEOUT_ERR 0x00001000 /* Reply Timer Timeout Status */
+#define PCIE_ADVISORY_NONFTAL_ERR 0x00002000 /* Advisory Non-Fatal Error Status */
+#define PCIE_CORRECTABLE_ERR (PCIE_RX_ERR | PCIE_BAD_TLP | PCIE_BAD_DLLP | PCIE_REPLAY_NUM_ROLLOVER |\
+ PCIE_REPLAY_TIMER_TIMEOUT_ERR | PCIE_ADVISORY_NONFTAL_ERR)
+
+/* Correctable Error Mask Register */
+#define PCIE_CEMR(X) (volatile u32*)(PCIE_RC_CFG_BASE + 0x114)
+
+/* Advanced Error Capabilities and Control Register */
+#define PCIE_AECCR(X) (volatile u32*)(PCIE_RC_CFG_BASE + 0x118)
+#define PCIE_AECCR_FIRST_ERR_PTR 0x0000001F /* First Error Pointer */
+#define PCIE_AECCR_FIRST_ERR_PTR_S 0
+#define PCIE_AECCR_ECRC_GEN_CAP 0x00000020 /* ECRC Generation Capable */
+#define PCIE_AECCR_ECRC_GEN_EN 0x00000040 /* ECRC Generation Enable */
+#define PCIE_AECCR_ECRC_CHECK_CAP 0x00000080 /* ECRC Check Capable */
+#define PCIE_AECCR_ECRC_CHECK_EN 0x00000100 /* ECRC Check Enable */
+
+/* Header Log Register 1 */
+#define PCIE_HLR1(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x11C)
+
+/* Header Log Register 2 */
+#define PCIE_HLR2(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x120)
+
+/* Header Log Register 3 */
+#define PCIE_HLR3(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x124)
+
+/* Header Log Register 4 */
+#define PCIE_HLR4(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x128)
+
+/* Root Error Command Register */
+#define PCIE_RECR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x12C)
+#define PCIE_RECR_CORRECTABLE_ERR_REPORT_EN 0x00000001 /* COR-ERR */
+#define PCIE_RECR_NONFATAL_ERR_REPORT_EN 0x00000002 /* Non-Fatal ERR */
+#define PCIE_RECR_FATAL_ERR_REPORT_EN 0x00000004 /* Fatal ERR */
+#define PCIE_RECR_ERR_REPORT_EN (PCIE_RECR_CORRECTABLE_ERR_REPORT_EN | \
+ PCIE_RECR_NONFATAL_ERR_REPORT_EN | PCIE_RECR_FATAL_ERR_REPORT_EN)
+
+/* Root Error Status Register */
+#define PCIE_RESR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x130)
+#define PCIE_RESR_CORRECTABLE_ERR 0x00000001 /* COR-ERR Receveid */
+#define PCIE_RESR_MULTI_CORRECTABLE_ERR 0x00000002 /* Multiple COR-ERR Received */
+#define PCIE_RESR_FATAL_NOFATAL_ERR 0x00000004 /* ERR Fatal/Non-Fatal Received */
+#define PCIE_RESR_MULTI_FATAL_NOFATAL_ERR 0x00000008 /* Multiple ERR Fatal/Non-Fatal Received */
+#define PCIE_RESR_FIRST_UNCORRECTABLE_FATAL_ERR 0x00000010 /* First UN-COR Fatal */
+#define PCIR_RESR_NON_FATAL_ERR 0x00000020 /* Non-Fatal Error Message Received */
+#define PCIE_RESR_FATAL_ERR 0x00000040 /* Fatal Message Received */
+#define PCIE_RESR_AER_INT_MSG_NUM 0xF8000000 /* Advanced Error Interrupt Message Number */
+#define PCIE_RESR_AER_INT_MSG_NUM_S 27
+
+/* Error Source Indentification Register */
+#define PCIE_ESIR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x134)
+#define PCIE_ESIR_CORRECTABLE_ERR_SRC_ID 0x0000FFFF
+#define PCIE_ESIR_CORRECTABLE_ERR_SRC_ID_S 0
+#define PCIE_ESIR_FATAL_NON_FATAL_SRC_ID 0xFFFF0000
+#define PCIE_ESIR_FATAL_NON_FATAL_SRC_ID_S 16
+
+/* VC Enhanced Capability Header */
+#define PCIE_VC_ECH(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x140)
+
+/* Port VC Capability Register */
+#define PCIE_PVC1(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x144)
+#define PCIE_PVC1_EXT_VC_CNT 0x00000007 /* Extended VC Count */
+#define PCIE_PVC1_EXT_VC_CNT_S 0
+#define PCIE_PVC1_LOW_PRI_EXT_VC_CNT 0x00000070 /* Low Priority Extended VC Count */
+#define PCIE_PVC1_LOW_PRI_EXT_VC_CNT_S 4
+#define PCIE_PVC1_REF_CLK 0x00000300 /* Reference Clock */
+#define PCIE_PVC1_REF_CLK_S 8
+#define PCIE_PVC1_PORT_ARB_TAB_ENTRY_SIZE 0x00000C00 /* Port Arbitration Table Entry Size */
+#define PCIE_PVC1_PORT_ARB_TAB_ENTRY_SIZE_S 10
+
+/* Extended Virtual Channel Count Defintion */
+#define PCIE_EXT_VC_CNT_MIN 0
+#define PCIE_EXT_VC_CNT_MAX 7
+
+/* Port Arbitration Table Entry Size Definition */
+enum {
+ PCIE_PORT_ARB_TAB_ENTRY_SIZE_S1BIT = 0,
+ PCIE_PORT_ARB_TAB_ENTRY_SIZE_S2BIT,
+ PCIE_PORT_ARB_TAB_ENTRY_SIZE_S4BIT,
+ PCIE_PORT_ARB_TAB_ENTRY_SIZE_S8BIT,
+};
+
+/* Port VC Capability Register 2 */
+#define PCIE_PVC2(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x148)
+#define PCIE_PVC2_VC_ARB_16P_FIXED_WRR 0x00000001 /* HW Fixed arbitration, 16 phase WRR */
+#define PCIE_PVC2_VC_ARB_32P_WRR 0x00000002 /* 32 phase WRR */
+#define PCIE_PVC2_VC_ARB_64P_WRR 0x00000004 /* 64 phase WRR */
+#define PCIE_PVC2_VC_ARB_128P_WRR 0x00000008 /* 128 phase WRR */
+#define PCIE_PVC2_VC_ARB_WRR 0x0000000F
+#define PCIE_PVC2_VC_ARB_TAB_OFFSET 0xFF000000 /* VC arbitration table offset, not support */
+#define PCIE_PVC2_VC_ARB_TAB_OFFSET_S 24
+
+/* Port VC Control and Status Register */
+#define PCIE_PVCCRSR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x14C)
+#define PCIE_PVCCRSR_LOAD_VC_ARB_TAB 0x00000001 /* Load VC Arbitration Table */
+#define PCIE_PVCCRSR_VC_ARB_SEL 0x0000000E /* VC Arbitration Select */
+#define PCIE_PVCCRSR_VC_ARB_SEL_S 1
+#define PCIE_PVCCRSR_VC_ARB_TAB_STATUS 0x00010000 /* Arbitration Status */
+
+/* VC0 Resource Capability Register */
+#define PCIE_VC0_RC(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x150)
+#define PCIE_VC0_RC_PORT_ARB_HW_FIXED 0x00000001 /* HW Fixed arbitration */
+#define PCIE_VC0_RC_PORT_ARB_32P_WRR 0x00000002 /* 32 phase WRR */
+#define PCIE_VC0_RC_PORT_ARB_64P_WRR 0x00000004 /* 64 phase WRR */
+#define PCIE_VC0_RC_PORT_ARB_128P_WRR 0x00000008 /* 128 phase WRR */
+#define PCIE_VC0_RC_PORT_ARB_TM_128P_WRR 0x00000010 /* Time-based 128 phase WRR */
+#define PCIE_VC0_RC_PORT_ARB_TM_256P_WRR 0x00000020 /* Time-based 256 phase WRR */
+#define PCIE_VC0_RC_PORT_ARB (PCIE_VC0_RC_PORT_ARB_HW_FIXED | PCIE_VC0_RC_PORT_ARB_32P_WRR |\
+ PCIE_VC0_RC_PORT_ARB_64P_WRR | PCIE_VC0_RC_PORT_ARB_128P_WRR | \
+ PCIE_VC0_RC_PORT_ARB_TM_128P_WRR | PCIE_VC0_RC_PORT_ARB_TM_256P_WRR)
+
+#define PCIE_VC0_RC_REJECT_SNOOP 0x00008000 /* Reject Snoop Transactioin */
+#define PCIE_VC0_RC_MAX_TIMESLOTS 0x007F0000 /* Maximum time Slots */
+#define PCIE_VC0_RC_MAX_TIMESLOTS_S 16
+#define PCIE_VC0_RC_PORT_ARB_TAB_OFFSET 0xFF000000 /* Port Arbitration Table Offset */
+#define PCIE_VC0_RC_PORT_ARB_TAB_OFFSET_S 24
+
+/* VC0 Resource Control Register */
+#define PCIE_VC0_RC0(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x154)
+#define PCIE_VC0_RC0_TVM0 0x00000001 /* TC0 and VC0 */
+#define PCIE_VC0_RC0_TVM1 0x00000002 /* TC1 and VC1 */
+#define PCIE_VC0_RC0_TVM2 0x00000004 /* TC2 and VC2 */
+#define PCIE_VC0_RC0_TVM3 0x00000008 /* TC3 and VC3 */
+#define PCIE_VC0_RC0_TVM4 0x00000010 /* TC4 and VC4 */
+#define PCIE_VC0_RC0_TVM5 0x00000020 /* TC5 and VC5 */
+#define PCIE_VC0_RC0_TVM6 0x00000040 /* TC6 and VC6 */
+#define PCIE_VC0_RC0_TVM7 0x00000080 /* TC7 and VC7 */
+#define PCIE_VC0_RC0_TC_VC 0x000000FF /* TC/VC mask */
+
+#define PCIE_VC0_RC0_LOAD_PORT_ARB_TAB 0x00010000 /* Load Port Arbitration Table */
+#define PCIE_VC0_RC0_PORT_ARB_SEL 0x000E0000 /* Port Arbitration Select */
+#define PCIE_VC0_RC0_PORT_ARB_SEL_S 17
+#define PCIE_VC0_RC0_VC_ID 0x07000000 /* VC ID */
+#define PCIE_VC0_RC0_VC_ID_S 24
+#define PCIE_VC0_RC0_VC_EN 0x80000000 /* VC Enable */
+
+/* VC0 Resource Status Register */
+#define PCIE_VC0_RSR0(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x158)
+#define PCIE_VC0_RSR0_PORT_ARB_TAB_STATUS 0x00010000 /* Port Arbitration Table Status,not used */
+#define PCIE_VC0_RSR0_VC_NEG_PENDING 0x00020000 /* VC Negotiation Pending */
+
+/* Ack Latency Timer and Replay Timer Register */
+#define PCIE_ALTRT(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x700)
+#define PCIE_ALTRT_ROUND_TRIP_LATENCY_LIMIT 0x0000FFFF /* Round Trip Latency Time Limit */
+#define PCIE_ALTRT_ROUND_TRIP_LATENCY_LIMIT_S 0
+#define PCIE_ALTRT_REPLAY_TIME_LIMIT 0xFFFF0000 /* Replay Time Limit */
+#define PCIE_ALTRT_REPLAY_TIME_LIMIT_S 16
+
+/* Other Message Register */
+#define PCIE_OMR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x704)
+
+/* Port Force Link Register */
+#define PCIE_PFLR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x708)
+#define PCIE_PFLR_LINK_NUM 0x000000FF /* Link Number */
+#define PCIE_PFLR_LINK_NUM_S 0
+#define PCIE_PFLR_FORCE_LINK 0x00008000 /* Force link */
+#define PCIE_PFLR_LINK_STATE 0x003F0000 /* Link State */
+#define PCIE_PFLR_LINK_STATE_S 16
+#define PCIE_PFLR_LOW_POWER_ENTRY_CNT 0xFF000000 /* Low Power Entrance Count, only for EP */
+#define PCIE_PFLR_LOW_POWER_ENTRY_CNT_S 24
+
+/* Ack Frequency Register */
+#define PCIE_AFR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x70C)
+#define PCIE_AFR_AF 0x000000FF /* Ack Frequency */
+#define PCIE_AFR_AF_S 0
+#define PCIE_AFR_FTS_NUM 0x0000FF00 /* The number of Fast Training Sequence from L0S to L0 */
+#define PCIE_AFR_FTS_NUM_S 8
+#define PCIE_AFR_COM_FTS_NUM 0x00FF0000 /* N_FTS; when common clock is used*/
+#define PCIE_AFR_COM_FTS_NUM_S 16
+#define PCIE_AFR_L0S_ENTRY_LATENCY 0x07000000 /* L0s Entrance Latency */
+#define PCIE_AFR_L0S_ENTRY_LATENCY_S 24
+#define PCIE_AFR_L1_ENTRY_LATENCY 0x38000000 /* L1 Entrance Latency */
+#define PCIE_AFR_L1_ENTRY_LATENCY_S 27
+#define PCIE_AFR_FTS_NUM_DEFAULT 32
+#define PCIE_AFR_L0S_ENTRY_LATENCY_DEFAULT 7
+#define PCIE_AFR_L1_ENTRY_LATENCY_DEFAULT 5
+
+/* Port Link Control Register */
+#define PCIE_PLCR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x710)
+#define PCIE_PLCR_OTHER_MSG_REQ 0x00000001 /* Other Message Request */
+#define PCIE_PLCR_SCRAMBLE_DISABLE 0x00000002 /* Scramble Disable */
+#define PCIE_PLCR_LOOPBACK_EN 0x00000004 /* Loopback Enable */
+#define PCIE_PLCR_LTSSM_HOT_RST 0x00000008 /* Force LTSSM to the hot reset */
+#define PCIE_PLCR_DLL_LINK_EN 0x00000020 /* Enable Link initialization */
+#define PCIE_PLCR_FAST_LINK_SIM_EN 0x00000080 /* Sets all internal timers to fast mode for simulation purposes */
+#define PCIE_PLCR_LINK_MODE 0x003F0000 /* Link Mode Enable Mask */
+#define PCIE_PLCR_LINK_MODE_S 16
+#define PCIE_PLCR_CORRUPTED_CRC_EN 0x02000000 /* Enabled Corrupt CRC */
+
+/* Lane Skew Register */
+#define PCIE_LSR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x714)
+#define PCIE_LSR_LANE_SKEW_NUM 0x00FFFFFF /* Insert Lane Skew for Transmit, not applicable */
+#define PCIE_LSR_LANE_SKEW_NUM_S 0
+#define PCIE_LSR_FC_DISABLE 0x01000000 /* Disable of Flow Control */
+#define PCIE_LSR_ACKNAK_DISABLE 0x02000000 /* Disable of Ack/Nak */
+#define PCIE_LSR_LANE_DESKEW_DISABLE 0x80000000 /* Disable of Lane-to-Lane Skew */
+
+/* Symbol Number Register */
+#define PCIE_SNR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x718)
+#define PCIE_SNR_TS 0x0000000F /* Number of TS Symbol */
+#define PCIE_SNR_TS_S 0
+#define PCIE_SNR_SKP 0x00000700 /* Number of SKP Symbol */
+#define PCIE_SNR_SKP_S 8
+#define PCIE_SNR_REPLAY_TIMER 0x0007C000 /* Timer Modifier for Replay Timer */
+#define PCIE_SNR_REPLAY_TIMER_S 14
+#define PCIE_SNR_ACKNAK_LATENCY_TIMER 0x00F80000 /* Timer Modifier for Ack/Nak Latency Timer */
+#define PCIE_SNR_ACKNAK_LATENCY_TIMER_S 19
+#define PCIE_SNR_FC_TIMER 0x1F000000 /* Timer Modifier for Flow Control Watchdog Timer */
+#define PCIE_SNR_FC_TIMER_S 28
+
+/* Symbol Timer Register and Filter Mask Register 1 */
+#define PCIE_STRFMR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x71C)
+#define PCIE_STRFMR_SKP_INTERVAL 0x000007FF /* SKP lnterval Value */
+#define PCIE_STRFMR_SKP_INTERVAL_S 0
+#define PCIE_STRFMR_FC_WDT_DISABLE 0x00008000 /* Disable of FC Watchdog Timer */
+#define PCIE_STRFMR_TLP_FUNC_MISMATCH_OK 0x00010000 /* Mask Function Mismatch Filtering for Incoming Requests */
+#define PCIE_STRFMR_POISONED_TLP_OK 0x00020000 /* Mask Poisoned TLP Filtering */
+#define PCIE_STRFMR_BAR_MATCH_OK 0x00040000 /* Mask BAR Match Filtering */
+#define PCIE_STRFMR_TYPE1_CFG_REQ_OK 0x00080000 /* Mask Type 1 Configuration Request Filtering */
+#define PCIE_STRFMR_LOCKED_REQ_OK 0x00100000 /* Mask Locked Request Filtering */
+#define PCIE_STRFMR_CPL_TAG_ERR_RULES_OK 0x00200000 /* Mask Tag Error Rules for Received Completions */
+#define PCIE_STRFMR_CPL_REQUESTOR_ID_MISMATCH_OK 0x00400000 /* Mask Requester ID Mismatch Error for Received Completions */
+#define PCIE_STRFMR_CPL_FUNC_MISMATCH_OK 0x00800000 /* Mask Function Mismatch Error for Received Completions */
+#define PCIE_STRFMR_CPL_TC_MISMATCH_OK 0x01000000 /* Mask Traffic Class Mismatch Error for Received Completions */
+#define PCIE_STRFMR_CPL_ATTR_MISMATCH_OK 0x02000000 /* Mask Attribute Mismatch Error for Received Completions */
+#define PCIE_STRFMR_CPL_LENGTH_MISMATCH_OK 0x04000000 /* Mask Length Mismatch Error for Received Completions */
+#define PCIE_STRFMR_TLP_ECRC_ERR_OK 0x08000000 /* Mask ECRC Error Filtering */
+#define PCIE_STRFMR_CPL_TLP_ECRC_OK 0x10000000 /* Mask ECRC Error Filtering for Completions */
+#define PCIE_STRFMR_RX_TLP_MSG_NO_DROP 0x20000000 /* Send Message TLPs */
+#define PCIE_STRFMR_RX_IO_TRANS_ENABLE 0x40000000 /* Mask Filtering of received I/O Requests */
+#define PCIE_STRFMR_RX_CFG_TRANS_ENABLE 0x80000000 /* Mask Filtering of Received Configuration Requests */
+
+#define PCIE_DEF_SKP_INTERVAL 700 /* 1180 ~1538 , 125MHz * 2, 250MHz * 1 */
+
+/* Filter Masker Register 2 */
+#define PCIE_FMR2(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x720)
+#define PCIE_FMR2_VENDOR_MSG0_PASSED_TO_TRGT1 0x00000001 /* Mask RADM Filtering and Error Handling Rules */
+#define PCIE_FMR2_VENDOR_MSG1_PASSED_TO_TRGT1 0x00000002 /* Mask RADM Filtering and Error Handling Rules */
+
+/* Debug Register 0 */
+#define PCIE_DBR0(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x728)
+
+/* Debug Register 1 */
+#define PCIE_DBR1(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x72C)
+
+/* Transmit Posted FC Credit Status Register */
+#define PCIE_TPFCS(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x730)
+#define PCIE_TPFCS_TX_P_DATA_FC_CREDITS 0x00000FFF /* Transmit Posted Data FC Credits */
+#define PCIE_TPFCS_TX_P_DATA_FC_CREDITS_S 0
+#define PCIE_TPFCS_TX_P_HDR_FC_CREDITS 0x000FF000 /* Transmit Posted Header FC Credits */
+#define PCIE_TPFCS_TX_P_HDR_FC_CREDITS_S 12
+
+/* Transmit Non-Posted FC Credit Status */
+#define PCIE_TNPFCS(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x734)
+#define PCIE_TNPFCS_TX_NP_DATA_FC_CREDITS 0x00000FFF /* Transmit Non-Posted Data FC Credits */
+#define PCIE_TNPFCS_TX_NP_DATA_FC_CREDITS_S 0
+#define PCIE_TNPFCS_TX_NP_HDR_FC_CREDITS 0x000FF000 /* Transmit Non-Posted Header FC Credits */
+#define PCIE_TNPFCS_TX_NP_HDR_FC_CREDITS_S 12
+
+/* Transmit Complete FC Credit Status Register */
+#define PCIE_TCFCS(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x738)
+#define PCIE_TCFCS_TX_CPL_DATA_FC_CREDITS 0x00000FFF /* Transmit Completion Data FC Credits */
+#define PCIE_TCFCS_TX_CPL_DATA_FC_CREDITS_S 0
+#define PCIE_TCFCS_TX_CPL_HDR_FC_CREDITS 0x000FF000 /* Transmit Completion Header FC Credits */
+#define PCIE_TCFCS_TX_CPL_HDR_FC_CREDITS_S 12
+
+/* Queue Status Register */
+#define PCIE_QSR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x73C)
+#define PCIE_QSR_WAIT_UPDATE_FC_DLL 0x00000001 /* Received TLP FC Credits Not Returned */
+#define PCIE_QSR_TX_RETRY_BUF_NOT_EMPTY 0x00000002 /* Transmit Retry Buffer Not Empty */
+#define PCIE_QSR_RX_QUEUE_NOT_EMPTY 0x00000004 /* Received Queue Not Empty */
+
+/* VC Transmit Arbitration Register 1 */
+#define PCIE_VCTAR1(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x740)
+#define PCIE_VCTAR1_WRR_WEIGHT_VC0 0x000000FF /* WRR Weight for VC0 */
+#define PCIE_VCTAR1_WRR_WEIGHT_VC1 0x0000FF00 /* WRR Weight for VC1 */
+#define PCIE_VCTAR1_WRR_WEIGHT_VC2 0x00FF0000 /* WRR Weight for VC2 */
+#define PCIE_VCTAR1_WRR_WEIGHT_VC3 0xFF000000 /* WRR Weight for VC3 */
+
+/* VC Transmit Arbitration Register 2 */
+#define PCIE_VCTAR2(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x744)
+#define PCIE_VCTAR2_WRR_WEIGHT_VC4 0x000000FF /* WRR Weight for VC4 */
+#define PCIE_VCTAR2_WRR_WEIGHT_VC5 0x0000FF00 /* WRR Weight for VC5 */
+#define PCIE_VCTAR2_WRR_WEIGHT_VC6 0x00FF0000 /* WRR Weight for VC6 */
+#define PCIE_VCTAR2_WRR_WEIGHT_VC7 0xFF000000 /* WRR Weight for VC7 */
+
+/* VC0 Posted Receive Queue Control Register */
+#define PCIE_VC0_PRQCR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x748)
+#define PCIE_VC0_PRQCR_P_DATA_CREDITS 0x00000FFF /* VC0 Posted Data Credits */
+#define PCIE_VC0_PRQCR_P_DATA_CREDITS_S 0
+#define PCIE_VC0_PRQCR_P_HDR_CREDITS 0x000FF000 /* VC0 Posted Header Credits */
+#define PCIE_VC0_PRQCR_P_HDR_CREDITS_S 12
+#define PCIE_VC0_PRQCR_P_TLP_QUEUE_MODE 0x00E00000 /* VC0 Posted TLP Queue Mode */
+#define PCIE_VC0_PRQCR_P_TLP_QUEUE_MODE_S 20
+#define PCIE_VC0_PRQCR_TLP_RELAX_ORDER 0x40000000 /* TLP Type Ordering for VC0 */
+#define PCIE_VC0_PRQCR_VC_STRICT_ORDER 0x80000000 /* VC0 Ordering for Receive Queues */
+
+/* VC0 Non-Posted Receive Queue Control */
+#define PCIE_VC0_NPRQCR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x74C)
+#define PCIE_VC0_NPRQCR_NP_DATA_CREDITS 0x00000FFF /* VC0 Non-Posted Data Credits */
+#define PCIE_VC0_NPRQCR_NP_DATA_CREDITS_S 0
+#define PCIE_VC0_NPRQCR_NP_HDR_CREDITS 0x000FF000 /* VC0 Non-Posted Header Credits */
+#define PCIE_VC0_NPRQCR_NP_HDR_CREDITS_S 12
+#define PCIE_VC0_NPRQCR_NP_TLP_QUEUE_MODE 0x00E00000 /* VC0 Non-Posted TLP Queue Mode */
+#define PCIE_VC0_NPRQCR_NP_TLP_QUEUE_MODE_S 20
+
+/* VC0 Completion Receive Queue Control */
+#define PCIE_VC0_CRQCR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x750)
+#define PCIE_VC0_CRQCR_CPL_DATA_CREDITS 0x00000FFF /* VC0 Completion TLP Queue Mode */
+#define PCIE_VC0_CRQCR_CPL_DATA_CREDITS_S 0
+#define PCIE_VC0_CRQCR_CPL_HDR_CREDITS 0x000FF000 /* VC0 Completion Header Credits */
+#define PCIE_VC0_CRQCR_CPL_HDR_CREDITS_S 12
+#define PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE 0x00E00000 /* VC0 Completion Data Credits */
+#define PCIE_VC0_CRQCR_CPL_TLP_QUEUE_MODE_S 21
+
+/* Applicable to the above three registers */
+enum {
+ PCIE_VC0_TLP_QUEUE_MODE_STORE_FORWARD = 1,
+ PCIE_VC0_TLP_QUEUE_MODE_CUT_THROUGH = 2,
+ PCIE_VC0_TLP_QUEUE_MODE_BYPASS = 4,
+};
+
+/* VC0 Posted Buffer Depth Register */
+#define PCIE_VC0_PBD(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x7A8)
+#define PCIE_VC0_PBD_P_DATA_QUEUE_ENTRIES 0x00003FFF /* VC0 Posted Data Queue Depth */
+#define PCIE_VC0_PBD_P_DATA_QUEUE_ENTRIES_S 0
+#define PCIE_VC0_PBD_P_HDR_QUEUE_ENTRIES 0x03FF0000 /* VC0 Posted Header Queue Depth */
+#define PCIE_VC0_PBD_P_HDR_QUEUE_ENTRIES_S 16
+
+/* VC0 Non-Posted Buffer Depth Register */
+#define PCIE_VC0_NPBD(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x7AC)
+#define PCIE_VC0_NPBD_NP_DATA_QUEUE_ENTRIES 0x00003FFF /* VC0 Non-Posted Data Queue Depth */
+#define PCIE_VC0_NPBD_NP_DATA_QUEUE_ENTRIES_S 0
+#define PCIE_VC0_NPBD_NP_HDR_QUEUE_ENTRIES 0x03FF0000 /* VC0 Non-Posted Header Queue Depth */
+#define PCIE_VC0_NPBD_NP_HDR_QUEUE_ENTRIES_S 16
+
+/* VC0 Completion Buffer Depth Register */
+#define PCIE_VC0_CBD(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x7B0)
+#define PCIE_VC0_CBD_CPL_DATA_QUEUE_ENTRIES 0x00003FFF /* C0 Completion Data Queue Depth */
+#define PCIE_VC0_CBD_CPL_DATA_QUEUE_ENTRIES_S 0
+#define PCIE_VC0_CBD_CPL_HDR_QUEUE_ENTRIES 0x03FF0000 /* VC0 Completion Header Queue Depth */
+#define PCIE_VC0_CBD_CPL_HDR_QUEUE_ENTRIES_S 16
+
+/* PHY Status Register, all zeros in VR9 */
+#define PCIE_PHYSR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x810)
+
+/* PHY Control Register, all zeros in VR9 */
+#define PCIE_PHYCR(X) (volatile u32*)(PCIE_RC_PORT_TO_BASE(X) + 0x814)
+
+/*
+ * PCIe PDI PHY register definition, suppose all the following
+ * stuff is confidential.
+ * XXX, detailed bit definition
+ */
+#define PCIE_PHY_PLL_CTRL1(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x22 << 1))
+#define PCIE_PHY_PLL_CTRL2(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x23 << 1))
+#define PCIE_PHY_PLL_CTRL3(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x24 << 1))
+#define PCIE_PHY_PLL_CTRL4(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x25 << 1))
+#define PCIE_PHY_PLL_CTRL5(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x26 << 1))
+#define PCIE_PHY_PLL_CTRL6(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x27 << 1))
+#define PCIE_PHY_PLL_CTRL7(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x28 << 1))
+#define PCIE_PHY_PLL_A_CTRL1(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x29 << 1))
+#define PCIE_PHY_PLL_A_CTRL2(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x2A << 1))
+#define PCIE_PHY_PLL_A_CTRL3(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x2B << 1))
+#define PCIE_PHY_PLL_STATUS(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x2C << 1))
+
+#define PCIE_PHY_TX1_CTRL1(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x30 << 1))
+#define PCIE_PHY_TX1_CTRL2(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x31 << 1))
+#define PCIE_PHY_TX1_CTRL3(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x32 << 1))
+#define PCIE_PHY_TX1_A_CTRL1(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x33 << 1))
+#define PCIE_PHY_TX1_A_CTRL2(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x34 << 1))
+#define PCIE_PHY_TX1_MOD1(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x35 << 1))
+#define PCIE_PHY_TX1_MOD2(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x36 << 1))
+#define PCIE_PHY_TX1_MOD3(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x37 << 1))
+
+#define PCIE_PHY_TX2_CTRL1(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x38 << 1))
+#define PCIE_PHY_TX2_CTRL2(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x39 << 1))
+#define PCIE_PHY_TX2_A_CTRL1(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x3B << 1))
+#define PCIE_PHY_TX2_A_CTRL2(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x3C << 1))
+#define PCIE_PHY_TX2_MOD1(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x3D << 1))
+#define PCIE_PHY_TX2_MOD2(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x3E << 1))
+#define PCIE_PHY_TX2_MOD3(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x3F << 1))
+
+#define PCIE_PHY_RX1_CTRL1(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x50 << 1))
+#define PCIE_PHY_RX1_CTRL2(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x51 << 1))
+#define PCIE_PHY_RX1_CDR(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x52 << 1))
+#define PCIE_PHY_RX1_EI(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x53 << 1))
+#define PCIE_PHY_RX1_A_CTRL(X) (PCIE_PHY_PORT_TO_BASE(X) + (0x55 << 1))
+
+/* Interrupt related stuff */
+#define PCIE_LEGACY_DISABLE 0
+#define PCIE_LEGACY_INTA 1
+#define PCIE_LEGACY_INTB 2
+#define PCIE_LEGACY_INTC 3
+#define PCIE_LEGACY_INTD 4
+#define PCIE_LEGACY_INT_MAX PCIE_LEGACY_INTD
+
+#define PCIE_IRQ_LOCK(lock) do { \
+ unsigned long flags; \
+ spin_lock_irqsave(&(lock), flags);
+#define PCIE_IRQ_UNLOCK(lock) \
+ spin_unlock_irqrestore(&(lock), flags); \
+} while (0)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
+#define IRQF_SHARED SA_SHIRQ
+#endif
+
+#define PCIE_MSG_MSI 0x00000001
+#define PCIE_MSG_ISR 0x00000002
+#define PCIE_MSG_FIXUP 0x00000004
+#define PCIE_MSG_READ_CFG 0x00000008
+#define PCIE_MSG_WRITE_CFG 0x00000010
+#define PCIE_MSG_CFG (PCIE_MSG_READ_CFG | PCIE_MSG_WRITE_CFG)
+#define PCIE_MSG_REG 0x00000020
+#define PCIE_MSG_INIT 0x00000040
+#define PCIE_MSG_ERR 0x00000080
+#define PCIE_MSG_PHY 0x00000100
+#define PCIE_MSG_ANY 0x000001ff
+
+#define IFX_PCIE_PORT0 0
+#define IFX_PCIE_PORT1 1
+
+#ifdef CONFIG_IFX_PCIE_2ND_CORE
+#define IFX_PCIE_CORE_NR 2
+#else
+#define IFX_PCIE_CORE_NR 1
+#endif
+
+//#define IFX_PCIE_ERROR_INT
+
+//#define IFX_PCIE_DBG
+
+#if defined(IFX_PCIE_DBG)
+#define IFX_PCIE_PRINT(_m, _fmt, args...) do { \
+ if (g_pcie_debug_flag & (_m)) { \
+ ifx_pcie_debug((_fmt), ##args); \
+ } \
+} while (0)
+
+#define INLINE
+#else
+#define IFX_PCIE_PRINT(_m, _fmt, args...) \
+ do {} while(0)
+#define INLINE inline
+#endif
+
+struct ifx_pci_controller {
+ struct pci_controller pcic;
+
+ /* RC specific, per host bus information */
+ u32 port; /* Port index, 0 -- 1st core, 1 -- 2nd core */
+};
+
+typedef struct ifx_pcie_ir_irq {
+ const unsigned int irq;
+ const char name[16];
+}ifx_pcie_ir_irq_t;
+
+typedef struct ifx_pcie_legacy_irq{
+ const u32 irq_bit;
+ const int irq;
+}ifx_pcie_legacy_irq_t;
+
+typedef struct ifx_pcie_irq {
+ ifx_pcie_ir_irq_t ir_irq;
+ ifx_pcie_legacy_irq_t legacy_irq[PCIE_LEGACY_INT_MAX];
+}ifx_pcie_irq_t;
+
+extern u32 g_pcie_debug_flag;
+extern void ifx_pcie_debug(const char *fmt, ...);
+extern void pcie_phy_clock_mode_setup(int pcie_port);
+extern void pcie_msi_pic_init(int pcie_port);
+extern u32 ifx_pcie_bus_enum_read_hack(int where, u32 value);
+extern u32 ifx_pcie_bus_enum_write_hack(int where, u32 value);
+
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+
+#include <lantiq_soc.h>
+
+#define IFX_PCIE_GPIO_RESET 38
+#define IFX_REG_R32 ltq_r32
+#define IFX_REG_W32 ltq_w32
+#define CONFIG_IFX_PCIE_HW_SWAP
+#define IFX_RCU_AHB_ENDIAN ((volatile u32*)(IFX_RCU + 0x004C))
+#define IFX_RCU_RST_REQ ((volatile u32*)(IFX_RCU + 0x0010))
+#define IFX_RCU_AHB_BE_PCIE_PDI 0x00000080 /* Configure PCIE PDI module in big endian*/
+
+#define IFX_RCU (KSEG1 | 0x1F203000)
+#define IFX_RCU_AHB_BE_PCIE_M 0x00000001 /* Configure AHB master port that connects to PCIe RC in big endian */
+#define IFX_RCU_AHB_BE_PCIE_S 0x00000010 /* Configure AHB slave port that connects to PCIe RC in little endian */
+#define IFX_RCU_AHB_BE_XBAR_M 0x00000002 /* Configure AHB master port that connects to XBAR in big endian */
+#define CONFIG_IFX_PCIE_PHY_36MHZ_MODE
+
+#define IFX_PMU1_MODULE_PCIE_PHY (0)
+#define IFX_PMU1_MODULE_PCIE_CTRL (1)
+#define IFX_PMU1_MODULE_PDI (4)
+#define IFX_PMU1_MODULE_MSI (5)
+
+#define IFX_PMU_MODULE_PCIE_L0_CLK (31)
+
+
+static inline void pcie_ep_gpio_rst_init(int pcie_port)
+{
+}
+
+static inline void pcie_ahb_pmu_setup(void)
+{
+ struct clk *clk;
+ clk = clk_get_sys("ltq_pcie", "ahb");
+ clk_enable(clk);
+ //ltq_pmu_enable(PMU_AHBM | PMU_AHBS);
+}
+
+static inline void pcie_rcu_endian_setup(int pcie_port)
+{
+ u32 reg;
+
+ reg = IFX_REG_R32(IFX_RCU_AHB_ENDIAN);
+#ifdef CONFIG_IFX_PCIE_HW_SWAP
+ reg |= IFX_RCU_AHB_BE_PCIE_M;
+ reg |= IFX_RCU_AHB_BE_PCIE_S;
+ reg &= ~IFX_RCU_AHB_BE_XBAR_M;
+#else
+ reg |= IFX_RCU_AHB_BE_PCIE_M;
+ reg &= ~IFX_RCU_AHB_BE_PCIE_S;
+ reg &= ~IFX_RCU_AHB_BE_XBAR_M;
+#endif /* CONFIG_IFX_PCIE_HW_SWAP */
+ IFX_REG_W32(reg, IFX_RCU_AHB_ENDIAN);
+ IFX_PCIE_PRINT(PCIE_MSG_REG, "%s IFX_RCU_AHB_ENDIAN: 0x%08x\n", __func__, IFX_REG_R32(IFX_RCU_AHB_ENDIAN));
+}
+
+static inline void pcie_phy_pmu_enable(int pcie_port)
+{
+ struct clk *clk;
+ clk = clk_get_sys("ltq_pcie", "phy");
+ clk_enable(clk);
+ //ltq_pmu1_enable(1<<IFX_PMU1_MODULE_PCIE_PHY);
+}
+
+static inline void pcie_phy_pmu_disable(int pcie_port)
+{
+ struct clk *clk;
+ clk = clk_get_sys("ltq_pcie", "phy");
+ clk_disable(clk);
+ //ltq_pmu1_disable(1<<IFX_PMU1_MODULE_PCIE_PHY);
+}
+
+static inline void pcie_pdi_big_endian(int pcie_port)
+{
+ u32 reg;
+
+ /* SRAM2PDI endianness control. */
+ reg = IFX_REG_R32(IFX_RCU_AHB_ENDIAN);
+ /* Config AHB->PCIe and PDI endianness */
+ reg |= IFX_RCU_AHB_BE_PCIE_PDI;
+ IFX_REG_W32(reg, IFX_RCU_AHB_ENDIAN);
+}
+
+static inline void pcie_pdi_pmu_enable(int pcie_port)
+{
+ struct clk *clk;
+ clk = clk_get_sys("ltq_pcie", "pdi");
+ clk_enable(clk);
+ //ltq_pmu1_enable(1<<IFX_PMU1_MODULE_PDI);
+}
+
+static inline void pcie_core_rst_assert(int pcie_port)
+{
+ u32 reg;
+
+ reg = IFX_REG_R32(IFX_RCU_RST_REQ);
+
+ /* Reset PCIe PHY & Core, bit 22, bit 26 may be affected if write it directly */
+ reg |= 0x00400000;
+ IFX_REG_W32(reg, IFX_RCU_RST_REQ);
+}
+
+static inline void pcie_core_rst_deassert(int pcie_port)
+{
+ u32 reg;
+
+ /* Make sure one micro-second delay */
+ udelay(1);
+
+ /* Reset PCIe PHY & Core, bit 22 */
+ reg = IFX_REG_R32(IFX_RCU_RST_REQ);
+ reg &= ~0x00400000;
+ IFX_REG_W32(reg, IFX_RCU_RST_REQ);
+}
+
+static inline void pcie_phy_rst_assert(int pcie_port)
+{
+ u32 reg;
+
+ reg = IFX_REG_R32(IFX_RCU_RST_REQ);
+ reg |= 0x00001000; /* Bit 12 */
+ IFX_REG_W32(reg, IFX_RCU_RST_REQ);
+}
+
+static inline void pcie_phy_rst_deassert(int pcie_port)
+{
+ u32 reg;
+
+ /* Make sure one micro-second delay */
+ udelay(1);
+
+ reg = IFX_REG_R32(IFX_RCU_RST_REQ);
+ reg &= ~0x00001000; /* Bit 12 */
+ IFX_REG_W32(reg, IFX_RCU_RST_REQ);
+}
+
+static inline void pcie_device_rst_assert(int pcie_port)
+{
+ gpio_set_value(IFX_PCIE_GPIO_RESET, 0);
+ // ifx_gpio_output_clear(IFX_PCIE_GPIO_RESET, ifx_pcie_gpio_module_id);
+}
+
+static inline void pcie_device_rst_deassert(int pcie_port)
+{
+ mdelay(100);
+ gpio_set_value(IFX_PCIE_GPIO_RESET, 1);
+// ifx_gpio_output_set(IFX_PCIE_GPIO_RESET, ifx_pcie_gpio_module_id);
+}
+
+static inline void pcie_core_pmu_setup(int pcie_port)
+{
+ struct clk *clk;
+ clk = clk_get_sys("ltq_pcie", "ctl");
+ clk_enable(clk);
+ clk = clk_get_sys("ltq_pcie", "bus");
+ clk_enable(clk);
+
+ //ltq_pmu1_enable(1 << IFX_PMU1_MODULE_PCIE_CTRL);
+ //ltq_pmu_enable(1 << IFX_PMU_MODULE_PCIE_L0_CLK);
+}
+
+static inline void pcie_msi_init(int pcie_port)
+{
+ struct clk *clk;
+ pcie_msi_pic_init(pcie_port);
+ clk = clk_get_sys("ltq_pcie", "msi");
+ clk_enable(clk);
+ //ltq_pmu1_enable(1 << IFX_PMU1_MODULE_MSI);
+}
+
+static inline u32
+ifx_pcie_bus_nr_deduct(u32 bus_number, int pcie_port)
+{
+ u32 tbus_number = bus_number;
+
+#ifdef CONFIG_IFX_PCI
+ if (pcibios_host_nr() > 1) {
+ tbus_number -= pcibios_1st_host_bus_nr();
+ }
+#endif /* CONFIG_IFX_PCI */
+ return tbus_number;
+}
+
+static inline u32
+ifx_pcie_bus_enum_hack(struct pci_bus *bus, u32 devfn, int where, u32 value, int pcie_port, int read)
+{
+ struct pci_dev *pdev;
+ u32 tvalue = value;
+
+ /* Sanity check */
+ pdev = pci_get_slot(bus, devfn);
+ if (pdev == NULL) {
+ return tvalue;
+ }
+
+ /* Only care about PCI bridge */
+ if (pdev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
+ return tvalue;
+ }
+
+ if (read) { /* Read hack */
+ #ifdef CONFIG_IFX_PCI
+ if (pcibios_host_nr() > 1) {
+ tvalue = ifx_pcie_bus_enum_read_hack(where, tvalue);
+ }
+ #endif /* CONFIG_IFX_PCI */
+ }
+ else { /* Write hack */
+ #ifdef CONFIG_IFX_PCI
+ if (pcibios_host_nr() > 1) {
+ tvalue = ifx_pcie_bus_enum_write_hack(where, tvalue);
+ }
+ #endif
+ }
+ return tvalue;
+}
+
+#endif /* IFXMIPS_PCIE_VR9_H */
+
diff --git a/target/linux/lantiq/files-3.3/drivers/i2c/busses/i2c-falcon.c b/target/linux/lantiq/files-3.3/drivers/i2c/busses/i2c-falcon.c
new file mode 100644
index 0000000000..f97ddb601a
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/i2c/busses/i2c-falcon.c
@@ -0,0 +1,1040 @@
+/*
+ * Lantiq FALC(tm) ON - I2C bus adapter
+ *
+ * Parts based on i2c-designware.c and other i2c drivers from Linux 2.6.33
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+/*
+ * CURRENT ISSUES:
+ * - no high speed support
+ * - supports only master mode
+ * - ten bit mode is not tested (no slave devices)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+
+#include <lantiq_soc.h>
+
+/* I2C Identification Register */
+/* Module ID */
+#define I2C_ID_ID_MASK 0x0000FF00
+/* field offset */
+#define I2C_ID_ID_OFFSET 8
+/* Revision */
+#define I2C_ID_REV_MASK 0x000000FF
+/* field offset */
+#define I2C_ID_REV_OFFSET 0
+
+/* I2C Error Interrupt Request Source Status Register */
+/* TXF_OFL */
+#define I2C_ERR_IRQSS_TXF_OFL 0x00000008
+/* TXF_UFL */
+#define I2C_ERR_IRQSS_TXF_UFL 0x00000004
+/* RXF_OFL */
+#define I2C_ERR_IRQSS_RXF_OFL 0x00000002
+/* RXF_UFL */
+#define I2C_ERR_IRQSS_RXF_UFL 0x00000001
+
+/* I2C Bus Status Register */
+/* Bus Status */
+#define I2C_BUS_STAT_BS_MASK 0x00000003
+/* I2C Bus is free. */
+#define I2C_BUS_STAT_BS_FREE 0x00000000
+/*
+ * The device is working as master and has claimed the control
+ * on the I2C-bus (busy master).
+ */
+#define I2C_BUS_STAT_BS_BM 0x00000002
+
+/* I2C Interrupt Clear Register */
+/* Clear */
+#define I2C_ICR_BREQ_INT_CLR 0x00000008
+/* Clear */
+#define I2C_ICR_LBREQ_INT_CLR 0x00000004
+
+/* I2C RUN Control Register */
+/* Enable */
+#define I2C_RUN_CTRL_RUN_EN 0x00000001
+
+/* I2C Kernel Clock Control Register */
+/* field offset */
+#define I2C_CLC_RMC_OFFSET 8
+/* Enable */
+#define I2C_IMSC_I2C_P_INT_EN 0x00000020
+/* Enable */
+#define I2C_IMSC_I2C_ERR_INT_EN 0x00000010
+/* Enable */
+#define I2C_IMSC_BREQ_INT_EN 0x00000008
+/* Enable */
+#define I2C_IMSC_LBREQ_INT_EN 0x00000004
+
+/* I2C Fractional Divider Configuration Register */
+/* field offset */
+#define I2C_FDIV_CFG_INC_OFFSET 16
+/* field offset */
+#define I2C_FDIV_CFG_DEC_OFFSET 0
+
+/* I2C Fractional Divider (highspeed mode) Configuration Register */
+/* field offset */
+#define I2C_FDIV_HIGH_CFG_INC_OFFSET 16
+/* field offset */
+#define I2C_FDIV_HIGH_CFG_DEC_OFFSET 0
+
+/* I2C Address Register */
+/* Enable */
+#define I2C_ADDR_CFG_SOPE_EN 0x00200000
+/* Enable */
+#define I2C_ADDR_CFG_SONA_EN 0x00100000
+/* Enable */
+#define I2C_ADDR_CFG_MnS_EN 0x00080000
+
+/* I2C Protocol Interrupt Request Source Status Register */
+/* RX */
+#define I2C_P_IRQSS_RX 0x00000040
+/* TX_END */
+#define I2C_P_IRQSS_TX_END 0x00000020
+/* NACK */
+#define I2C_P_IRQSS_NACK 0x00000010
+/* AL */
+#define I2C_P_IRQSS_AL 0x00000008
+
+/* I2C Raw Interrupt Status Register */
+/* Read: Interrupt occurred. */
+#define I2C_RIS_I2C_P_INT_INTOCC 0x00000020
+/* Read: Interrupt occurred. */
+#define I2C_RIS_I2C_ERR_INT_INTOCC 0x00000010
+
+/* I2C End Data Control Register */
+/*
+ * Set End of Transmission - Note: Do not write '1' to this bit when bus is
+ * free. This will cause an abort after the first byte when a new transfer
+ * is started.
+ */
+#define I2C_ENDD_CTRL_SETEND 0x00000002
+/* TX FIFO Flow Control */
+#define I2C_FIFO_CFG_TXFC 0x00020000
+/* RX FIFO Flow Control */
+#define I2C_FIFO_CFG_RXFC 0x00010000
+/* Word aligned (character alignment of four characters) */
+#define I2C_FIFO_CFG_TXFA_TXFA2 0x00002000
+/* Word aligned (character alignment of four characters) */
+#define I2C_FIFO_CFG_RXFA_RXFA2 0x00000200
+/* 1 word */
+#define I2C_FIFO_CFG_TXBS_TXBS0 0x00000000
+/* 1 word */
+#define I2C_FIFO_CFG_RXBS_RXBS0 0x00000000
+
+
+/* I2C register structure */
+struct gpon_reg_i2c {
+ /* I2C Kernel Clock Control Register */
+ unsigned int clc; /* 0x00000000 */
+ /* Reserved */
+ unsigned int res_0; /* 0x00000004 */
+ /* I2C Identification Register */
+ unsigned int id; /* 0x00000008 */
+ /* Reserved */
+ unsigned int res_1; /* 0x0000000C */
+ /*
+ * I2C RUN Control Register - This register enables and disables the I2C
+ * peripheral. Before enabling, the I2C has to be configured properly.
+ * After enabling no configuration is possible
+ */
+ unsigned int run_ctrl; /* 0x00000010 */
+ /*
+ * I2C End Data Control Register - This register is used to either turn
+ * around the data transmission direction or to address another slave
+ * without sending a stop condition. Also the software can stop the
+ * slave-transmitter by sending a not-accolade when working as
+ * master-receiver or even stop data transmission immediately when
+ * operating as master-transmitter. The writing to the bits of this
+ * control register is only effective when in MASTER RECEIVES BYTES,
+ * MASTER TRANSMITS BYTES, MASTER RESTART or SLAVE RECEIVE BYTES state
+ */
+ unsigned int endd_ctrl; /* 0x00000014 */
+ /*
+ * I2C Fractional Divider Configuration Register - These register is
+ * used to program the fractional divider of the I2C bus. Before the
+ * peripheral is switched on by setting the RUN-bit the two (fixed)
+ * values for the two operating frequencies are programmed into these
+ * (configuration) registers. The Register FDIV_HIGH_CFG has the same
+ * layout as I2C_FDIV_CFG.
+ */
+ unsigned int fdiv_cfg; /* 0x00000018 */
+ /*
+ * I2C Fractional Divider (highspeed mode) Configuration Register
+ * These register is used to program the fractional divider of the I2C
+ * bus. Before the peripheral is switched on by setting the RUN-bit the
+ * two (fixed) values for the two operating frequencies are programmed
+ * into these (configuration) registers. The Register FDIV_CFG has the
+ * same layout as I2C_FDIV_CFG.
+ */
+ unsigned int fdiv_high_cfg; /* 0x0000001C */
+ /* I2C Address Configuration Register */
+ unsigned int addr_cfg; /* 0x00000020 */
+ /*
+ * I2C Bus Status Register - This register gives a status information
+ * of the I2C. This additional information can be used by the software
+ * to start proper actions.
+ */
+ unsigned int bus_stat; /* 0x00000024 */
+ /* I2C FIFO Configuration Register */
+ unsigned int fifo_cfg; /* 0x00000028 */
+ /* I2C Maximum Received Packet Size Register */
+ unsigned int mrps_ctrl; /* 0x0000002C */
+ /* I2C Received Packet Size Status Register */
+ unsigned int rps_stat; /* 0x00000030 */
+ /* I2C Transmit Packet Size Register */
+ unsigned int tps_ctrl; /* 0x00000034 */
+ /* I2C Filled FIFO Stages Status Register */
+ unsigned int ffs_stat; /* 0x00000038 */
+ /* Reserved */
+ unsigned int res_2; /* 0x0000003C */
+ /* I2C Timing Configuration Register */
+ unsigned int tim_cfg; /* 0x00000040 */
+ /* Reserved */
+ unsigned int res_3[7]; /* 0x00000044 */
+ /* I2C Error Interrupt Request Source Mask Register */
+ unsigned int err_irqsm; /* 0x00000060 */
+ /* I2C Error Interrupt Request Source Status Register */
+ unsigned int err_irqss; /* 0x00000064 */
+ /* I2C Error Interrupt Request Source Clear Register */
+ unsigned int err_irqsc; /* 0x00000068 */
+ /* Reserved */
+ unsigned int res_4; /* 0x0000006C */
+ /* I2C Protocol Interrupt Request Source Mask Register */
+ unsigned int p_irqsm; /* 0x00000070 */
+ /* I2C Protocol Interrupt Request Source Status Register */
+ unsigned int p_irqss; /* 0x00000074 */
+ /* I2C Protocol Interrupt Request Source Clear Register */
+ unsigned int p_irqsc; /* 0x00000078 */
+ /* Reserved */
+ unsigned int res_5; /* 0x0000007C */
+ /* I2C Raw Interrupt Status Register */
+ unsigned int ris; /* 0x00000080 */
+ /* I2C Interrupt Mask Control Register */
+ unsigned int imsc; /* 0x00000084 */
+ /* I2C Masked Interrupt Status Register */
+ unsigned int mis; /* 0x00000088 */
+ /* I2C Interrupt Clear Register */
+ unsigned int icr; /* 0x0000008C */
+ /* I2C Interrupt Set Register */
+ unsigned int isr; /* 0x00000090 */
+ /* I2C DMA Enable Register */
+ unsigned int dmae; /* 0x00000094 */
+ /* Reserved */
+ unsigned int res_6[8154]; /* 0x00000098 */
+ /* I2C Transmit Data Register */
+ unsigned int txd; /* 0x00008000 */
+ /* Reserved */
+ unsigned int res_7[4095]; /* 0x00008004 */
+ /* I2C Receive Data Register */
+ unsigned int rxd; /* 0x0000C000 */
+ /* Reserved */
+ unsigned int res_8[4095]; /* 0x0000C004 */
+};
+
+/* mapping for access macros */
+#define i2c ((struct gpon_reg_i2c *)priv->membase)
+#define reg_r32(reg) __raw_readl(reg)
+#define reg_w32(val, reg) __raw_writel(val, reg)
+#define reg_w32_mask(clear, set, reg) \
+ reg_w32((reg_r32(reg) & ~(clear)) | (set), reg)
+#define reg_r32_table(reg, idx) reg_r32(&((uint32_t *)&reg)[idx])
+#define reg_w32_table(val, reg, idx) reg_w32(val, &((uint32_t *)&reg)[idx])
+
+#define i2c_r32(reg) reg_r32(&i2c->reg)
+#define i2c_w32(val, reg) reg_w32(val, &i2c->reg)
+#define i2c_w32_mask(clear, set, reg) reg_w32_mask(clear, set, &i2c->reg)
+
+#define DRV_NAME "i2c-falcon"
+#define DRV_VERSION "1.01"
+
+#define FALCON_I2C_BUSY_TIMEOUT 20 /* ms */
+
+#ifdef DEBUG
+#define FALCON_I2C_XFER_TIMEOUT (25 * HZ)
+#else
+#define FALCON_I2C_XFER_TIMEOUT HZ
+#endif
+#if defined(DEBUG) && 0
+#define PRINTK(arg...) pr_info(arg)
+#else
+#define PRINTK(arg...) do {} while (0)
+#endif
+
+#define FALCON_I2C_IMSC_DEFAULT_MASK (I2C_IMSC_I2C_P_INT_EN | \
+ I2C_IMSC_I2C_ERR_INT_EN)
+
+#define FALCON_I2C_ARB_LOST (1 << 0)
+#define FALCON_I2C_NACK (1 << 1)
+#define FALCON_I2C_RX_UFL (1 << 2)
+#define FALCON_I2C_RX_OFL (1 << 3)
+#define FALCON_I2C_TX_UFL (1 << 4)
+#define FALCON_I2C_TX_OFL (1 << 5)
+
+struct falcon_i2c {
+ struct mutex mutex;
+
+ enum {
+ FALCON_I2C_MODE_100 = 1,
+ FALCON_I2C_MODE_400 = 2,
+ FALCON_I2C_MODE_3400 = 3
+ } mode; /* current speed mode */
+
+ struct clk *clk; /* clock input for i2c hardware block */
+ struct gpon_reg_i2c __iomem *membase; /* base of mapped registers */
+ int irq_lb, irq_b, irq_err, irq_p; /* last burst, burst, error,
+ protocol IRQs */
+
+ struct i2c_adapter adap;
+ struct device *dev;
+
+ struct completion cmd_complete;
+
+ /* message transfer data */
+ /* current message */
+ struct i2c_msg *current_msg;
+ /* number of messages to handle */
+ int msgs_num;
+ /* current buffer */
+ u8 *msg_buf;
+ /* remaining length of current buffer */
+ u32 msg_buf_len;
+ /* error status of the current transfer */
+ int msg_err;
+
+ /* master status codes */
+ enum {
+ STATUS_IDLE,
+ STATUS_ADDR, /* address phase */
+ STATUS_WRITE,
+ STATUS_READ,
+ STATUS_READ_END,
+ STATUS_STOP
+ } status;
+};
+
+static irqreturn_t falcon_i2c_isr(int irq, void *dev_id);
+
+static inline void enable_burst_irq(struct falcon_i2c *priv)
+{
+ i2c_w32_mask(0, I2C_IMSC_LBREQ_INT_EN | I2C_IMSC_BREQ_INT_EN, imsc);
+}
+static inline void disable_burst_irq(struct falcon_i2c *priv)
+{
+ i2c_w32_mask(I2C_IMSC_LBREQ_INT_EN | I2C_IMSC_BREQ_INT_EN, 0, imsc);
+}
+
+static void prepare_msg_send_addr(struct falcon_i2c *priv)
+{
+ struct i2c_msg *msg = priv->current_msg;
+ int rd = !!(msg->flags & I2C_M_RD);
+ u16 addr = msg->addr;
+
+ /* new i2c_msg */
+ priv->msg_buf = msg->buf;
+ priv->msg_buf_len = msg->len;
+ if (rd)
+ priv->status = STATUS_READ;
+ else
+ priv->status = STATUS_WRITE;
+
+ /* send slave address */
+ if (msg->flags & I2C_M_TEN) {
+ i2c_w32(0xf0 | ((addr & 0x300) >> 7) | rd, txd);
+ i2c_w32(addr & 0xff, txd);
+ } else
+ i2c_w32((addr & 0x7f) << 1 | rd, txd);
+}
+
+static void set_tx_len(struct falcon_i2c *priv)
+{
+ struct i2c_msg *msg = priv->current_msg;
+ int len = (msg->flags & I2C_M_TEN) ? 2 : 1;
+
+ PRINTK("set_tx_len %cX\n", (msg->flags & I2C_M_RD) ? ('R') : ('T'));
+
+ priv->status = STATUS_ADDR;
+
+ if (!(msg->flags & I2C_M_RD)) {
+ len += msg->len;
+ } else {
+ /* set maximum received packet size (before rx int!) */
+ i2c_w32(msg->len, mrps_ctrl);
+ }
+ i2c_w32(len, tps_ctrl);
+ enable_burst_irq(priv);
+}
+
+static int falcon_i2c_hw_init(struct i2c_adapter *adap)
+{
+ struct falcon_i2c *priv = i2c_get_adapdata(adap);
+
+ /* disable bus */
+ i2c_w32_mask(I2C_RUN_CTRL_RUN_EN, 0, run_ctrl);
+
+#ifndef DEBUG
+ /* set normal operation clock divider */
+ i2c_w32(1 << I2C_CLC_RMC_OFFSET, clc);
+#else
+ /* for debugging a higher divider value! */
+ i2c_w32(0xF0 << I2C_CLC_RMC_OFFSET, clc);
+#endif
+
+ /* set frequency */
+ if (priv->mode == FALCON_I2C_MODE_100) {
+ dev_dbg(priv->dev, "set standard mode (100 kHz)\n");
+ i2c_w32(0, fdiv_high_cfg);
+ i2c_w32((1 << I2C_FDIV_CFG_INC_OFFSET) |
+ (499 << I2C_FDIV_CFG_DEC_OFFSET),
+ fdiv_cfg);
+ } else if (priv->mode == FALCON_I2C_MODE_400) {
+ dev_dbg(priv->dev, "set fast mode (400 kHz)\n");
+ i2c_w32(0, fdiv_high_cfg);
+ i2c_w32((1 << I2C_FDIV_CFG_INC_OFFSET) |
+ (124 << I2C_FDIV_CFG_DEC_OFFSET),
+ fdiv_cfg);
+ } else if (priv->mode == FALCON_I2C_MODE_3400) {
+ dev_dbg(priv->dev, "set high mode (3.4 MHz)\n");
+ i2c_w32(0, fdiv_cfg);
+ /* TODO recalculate value for 100MHz input */
+ i2c_w32((41 << I2C_FDIV_HIGH_CFG_INC_OFFSET) |
+ (152 << I2C_FDIV_HIGH_CFG_DEC_OFFSET),
+ fdiv_high_cfg);
+ } else {
+ dev_warn(priv->dev, "unknown mode\n");
+ return -ENODEV;
+ }
+
+ /* configure fifo */
+ i2c_w32(I2C_FIFO_CFG_TXFC | /* tx fifo as flow controller */
+ I2C_FIFO_CFG_RXFC | /* rx fifo as flow controller */
+ I2C_FIFO_CFG_TXFA_TXFA2 | /* tx fifo 4-byte aligned */
+ I2C_FIFO_CFG_RXFA_RXFA2 | /* rx fifo 4-byte aligned */
+ I2C_FIFO_CFG_TXBS_TXBS0 | /* tx fifo burst size is 1 word */
+ I2C_FIFO_CFG_RXBS_RXBS0, /* rx fifo burst size is 1 word */
+ fifo_cfg);
+
+ /* configure address */
+ i2c_w32(I2C_ADDR_CFG_SOPE_EN | /* generate stop when no more data
+ in the fifo */
+ I2C_ADDR_CFG_SONA_EN | /* generate stop when NA received */
+ I2C_ADDR_CFG_MnS_EN | /* we are master device */
+ 0, /* our slave address (not used!) */
+ addr_cfg);
+
+ /* enable bus */
+ i2c_w32_mask(0, I2C_RUN_CTRL_RUN_EN, run_ctrl);
+
+ return 0;
+}
+
+static int falcon_i2c_wait_bus_not_busy(struct falcon_i2c *priv)
+{
+ int timeout = FALCON_I2C_BUSY_TIMEOUT;
+
+ while ((i2c_r32(bus_stat) & I2C_BUS_STAT_BS_MASK)
+ != I2C_BUS_STAT_BS_FREE) {
+ if (timeout <= 0) {
+ dev_warn(priv->dev, "timeout waiting for bus ready\n");
+ return -ETIMEDOUT;
+ }
+ timeout--;
+ mdelay(1);
+ }
+
+ return 0;
+}
+
+static void falcon_i2c_tx(struct falcon_i2c *priv, int last)
+{
+ if (priv->msg_buf_len && priv->msg_buf) {
+ i2c_w32(*priv->msg_buf, txd);
+
+ if (--priv->msg_buf_len)
+ priv->msg_buf++;
+ else
+ priv->msg_buf = NULL;
+ } else
+ last = 1;
+
+ if (last)
+ disable_burst_irq(priv);
+}
+
+static void falcon_i2c_rx(struct falcon_i2c *priv, int last)
+{
+ u32 fifo_stat, timeout;
+ if (priv->msg_buf_len && priv->msg_buf) {
+ timeout = 5000000;
+ do {
+ fifo_stat = i2c_r32(ffs_stat);
+ } while (!fifo_stat && --timeout);
+ if (!timeout) {
+ last = 1;
+ PRINTK("\nrx timeout\n");
+ goto err;
+ }
+ while (fifo_stat) {
+ *priv->msg_buf = i2c_r32(rxd);
+ if (--priv->msg_buf_len)
+ priv->msg_buf++;
+ else {
+ priv->msg_buf = NULL;
+ last = 1;
+ break;
+ }
+ #if 0
+ fifo_stat = i2c_r32(ffs_stat);
+ #else
+ /* do not read more than burst size, otherwise no "last
+ burst" is generated and the transaction is blocked! */
+ fifo_stat = 0;
+ #endif
+ }
+ } else {
+ last = 1;
+ }
+err:
+ if (last) {
+ disable_burst_irq(priv);
+
+ if (priv->status == STATUS_READ_END) {
+ /* do the STATUS_STOP and complete() here, as sometimes
+ the tx_end is already seen before this is finished */
+ priv->status = STATUS_STOP;
+ complete(&priv->cmd_complete);
+ } else {
+ i2c_w32(I2C_ENDD_CTRL_SETEND, endd_ctrl);
+ priv->status = STATUS_READ_END;
+ }
+ }
+}
+
+static void falcon_i2c_xfer_init(struct falcon_i2c *priv)
+{
+ /* enable interrupts */
+ i2c_w32(FALCON_I2C_IMSC_DEFAULT_MASK, imsc);
+
+ /* trigger transfer of first msg */
+ set_tx_len(priv);
+}
+
+static void dump_msgs(struct i2c_msg msgs[], int num, int rx)
+{
+#if defined(DEBUG)
+ int i, j;
+ pr_info("Messages %d %s\n", num, rx ? "out" : "in");
+ for (i = 0; i < num; i++) {
+ pr_info("%2d %cX Msg(%d) addr=0x%X: ", i,
+ (msgs[i].flags & I2C_M_RD) ? ('R') : ('T'),
+ msgs[i].len, msgs[i].addr);
+ if (!(msgs[i].flags & I2C_M_RD) || rx) {
+ for (j = 0; j < msgs[i].len; j++)
+ printk("%02X ", msgs[i].buf[j]);
+ }
+ printk("\n");
+ }
+#endif
+}
+
+static void falcon_i2c_release_bus(struct falcon_i2c *priv)
+{
+ if ((i2c_r32(bus_stat) & I2C_BUS_STAT_BS_MASK) == I2C_BUS_STAT_BS_BM)
+ i2c_w32(I2C_ENDD_CTRL_SETEND, endd_ctrl);
+}
+
+static int falcon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
+ int num)
+{
+ struct falcon_i2c *priv = i2c_get_adapdata(adap);
+ int ret;
+
+ dev_dbg(priv->dev, "xfer %u messages\n", num);
+ dump_msgs(msgs, num, 0);
+
+ mutex_lock(&priv->mutex);
+
+ INIT_COMPLETION(priv->cmd_complete);
+ priv->current_msg = msgs;
+ priv->msgs_num = num;
+ priv->msg_err = 0;
+ priv->status = STATUS_IDLE;
+
+ /* wait for the bus to become ready */
+ ret = falcon_i2c_wait_bus_not_busy(priv);
+ if (ret)
+ goto done;
+
+ while (priv->msgs_num) {
+ /* start the transfers */
+ falcon_i2c_xfer_init(priv);
+
+ /* wait for transfers to complete */
+ ret = wait_for_completion_interruptible_timeout(
+ &priv->cmd_complete, FALCON_I2C_XFER_TIMEOUT);
+ if (ret == 0) {
+ dev_err(priv->dev, "controller timed out\n");
+ falcon_i2c_hw_init(adap);
+ ret = -ETIMEDOUT;
+ goto done;
+ } else if (ret < 0)
+ goto done;
+
+ if (priv->msg_err) {
+ if (priv->msg_err & FALCON_I2C_NACK)
+ ret = -ENXIO;
+ else
+ ret = -EREMOTEIO;
+ goto done;
+ }
+ if (--priv->msgs_num)
+ priv->current_msg++;
+ }
+ /* no error? */
+ ret = num;
+
+done:
+ falcon_i2c_release_bus(priv);
+
+ mutex_unlock(&priv->mutex);
+
+ if (ret >= 0)
+ dump_msgs(msgs, num, 1);
+
+ PRINTK("XFER ret %d\n", ret);
+ return ret;
+}
+
+static irqreturn_t falcon_i2c_isr_burst(int irq, void *dev_id)
+{
+ struct falcon_i2c *priv = dev_id;
+ struct i2c_msg *msg = priv->current_msg;
+ int last = (irq == priv->irq_lb);
+
+ if (last)
+ PRINTK("LB ");
+ else
+ PRINTK("B ");
+
+ if (msg->flags & I2C_M_RD) {
+ switch (priv->status) {
+ case STATUS_ADDR:
+ PRINTK("X");
+ prepare_msg_send_addr(priv);
+ disable_burst_irq(priv);
+ break;
+ case STATUS_READ:
+ case STATUS_READ_END:
+ PRINTK("R");
+ falcon_i2c_rx(priv, last);
+ break;
+ default:
+ disable_burst_irq(priv);
+ PRINTK("Status R %d\n", priv->status);
+ break;
+ }
+ } else {
+ switch (priv->status) {
+ case STATUS_ADDR:
+ PRINTK("x");
+ prepare_msg_send_addr(priv);
+ break;
+ case STATUS_WRITE:
+ PRINTK("w");
+ falcon_i2c_tx(priv, last);
+ break;
+ default:
+ disable_burst_irq(priv);
+ PRINTK("Status W %d\n", priv->status);
+ break;
+ }
+ }
+
+ i2c_w32(I2C_ICR_BREQ_INT_CLR | I2C_ICR_LBREQ_INT_CLR, icr);
+ return IRQ_HANDLED;
+}
+
+static void falcon_i2c_isr_prot(struct falcon_i2c *priv)
+{
+ u32 i_pro = i2c_r32(p_irqss);
+
+ PRINTK("i2c-p");
+
+ /* not acknowledge */
+ if (i_pro & I2C_P_IRQSS_NACK) {
+ priv->msg_err |= FALCON_I2C_NACK;
+ PRINTK(" nack");
+ }
+
+ /* arbitration lost */
+ if (i_pro & I2C_P_IRQSS_AL) {
+ priv->msg_err |= FALCON_I2C_ARB_LOST;
+ PRINTK(" arb-lost");
+ }
+ /* tx -> rx switch */
+ if (i_pro & I2C_P_IRQSS_RX)
+ PRINTK(" rx");
+
+ /* tx end */
+ if (i_pro & I2C_P_IRQSS_TX_END)
+ PRINTK(" txend");
+ PRINTK("\n");
+
+ if (!priv->msg_err) {
+ /* tx -> rx switch */
+ if (i_pro & I2C_P_IRQSS_RX) {
+ priv->status = STATUS_READ;
+ enable_burst_irq(priv);
+ }
+ if (i_pro & I2C_P_IRQSS_TX_END) {
+ if (priv->status == STATUS_READ)
+ priv->status = STATUS_READ_END;
+ else {
+ disable_burst_irq(priv);
+ priv->status = STATUS_STOP;
+ }
+ }
+ }
+
+ i2c_w32(i_pro, p_irqsc);
+}
+
+static irqreturn_t falcon_i2c_isr(int irq, void *dev_id)
+{
+ u32 i_raw, i_err = 0;
+ struct falcon_i2c *priv = dev_id;
+
+ i_raw = i2c_r32(mis);
+ PRINTK("i_raw 0x%08X\n", i_raw);
+
+ /* error interrupt */
+ if (i_raw & I2C_RIS_I2C_ERR_INT_INTOCC) {
+ i_err = i2c_r32(err_irqss);
+ PRINTK("i_err 0x%08X bus_stat 0x%04X\n",
+ i_err, i2c_r32(bus_stat));
+
+ /* tx fifo overflow (8) */
+ if (i_err & I2C_ERR_IRQSS_TXF_OFL)
+ priv->msg_err |= FALCON_I2C_TX_OFL;
+
+ /* tx fifo underflow (4) */
+ if (i_err & I2C_ERR_IRQSS_TXF_UFL)
+ priv->msg_err |= FALCON_I2C_TX_UFL;
+
+ /* rx fifo overflow (2) */
+ if (i_err & I2C_ERR_IRQSS_RXF_OFL)
+ priv->msg_err |= FALCON_I2C_RX_OFL;
+
+ /* rx fifo underflow (1) */
+ if (i_err & I2C_ERR_IRQSS_RXF_UFL)
+ priv->msg_err |= FALCON_I2C_RX_UFL;
+
+ i2c_w32(i_err, err_irqsc);
+ }
+
+ /* protocol interrupt */
+ if (i_raw & I2C_RIS_I2C_P_INT_INTOCC)
+ falcon_i2c_isr_prot(priv);
+
+ if ((priv->msg_err) || (priv->status == STATUS_STOP))
+ complete(&priv->cmd_complete);
+
+ return IRQ_HANDLED;
+}
+
+static u32 falcon_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C |
+ I2C_FUNC_10BIT_ADDR |
+ I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm falcon_i2c_algorithm = {
+ .master_xfer = falcon_i2c_xfer,
+ .functionality = falcon_i2c_functionality,
+};
+
+static int __devinit falcon_i2c_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct falcon_i2c *priv;
+ struct i2c_adapter *adap;
+ struct resource *mmres, *ioarea,
+ *irqres_lb, *irqres_b, *irqres_err, *irqres_p;
+ struct clk *clk;
+
+ dev_dbg(&pdev->dev, "probing\n");
+
+ mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irqres_lb = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "i2c_lb");
+ irqres_b = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "i2c_b");
+ irqres_err = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+ "i2c_err");
+ irqres_p = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "i2c_p");
+
+ if (!mmres || !irqres_lb || !irqres_b || !irqres_err || !irqres_p) {
+ dev_err(&pdev->dev, "no resources\n");
+ return -ENODEV;
+ }
+
+ clk = clk_get_fpi();
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get fpi clk\n");
+ return -ENOENT;
+ }
+
+ if (clk_get_rate(clk) != 100000000) {
+ dev_err(&pdev->dev, "input clock is not 100MHz\n");
+ return -ENOENT;
+ }
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get i2c clk\n");
+ return -ENOENT;
+ }
+ clk_activate(clk);
+ /* allocate private data */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "can't allocate private data\n");
+ return -ENOMEM;
+ }
+
+ adap = &priv->adap;
+ i2c_set_adapdata(adap, priv);
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ strlcpy(adap->name, DRV_NAME "-adapter", sizeof(adap->name));
+ adap->algo = &falcon_i2c_algorithm;
+
+ priv->mode = FALCON_I2C_MODE_100;
+ priv->clk = clk;
+ priv->dev = &pdev->dev;
+
+ init_completion(&priv->cmd_complete);
+ mutex_init(&priv->mutex);
+
+ if (ltq_gpio_request(&pdev->dev, 107, 0, 0, DRV_NAME":sda") ||
+ ltq_gpio_request(&pdev->dev, 108, 0, 0, DRV_NAME":scl"))
+ {
+ dev_err(&pdev->dev, "I2C gpios not available\n");
+ ret = -ENXIO;
+ goto err_free_priv;
+ }
+
+ ioarea = request_mem_region(mmres->start, resource_size(mmres),
+ pdev->name);
+
+ if (ioarea == NULL) {
+ dev_err(&pdev->dev, "I2C region already claimed\n");
+ ret = -ENXIO;
+ goto err_free_gpio;
+ }
+
+ /* map memory */
+ priv->membase = ioremap_nocache(mmres->start & ~KSEG1,
+ resource_size(mmres));
+ if (priv->membase == NULL) {
+ ret = -ENOMEM;
+ goto err_release_region;
+ }
+
+ priv->irq_lb = irqres_lb->start;
+ ret = request_irq(priv->irq_lb, falcon_i2c_isr_burst, IRQF_DISABLED,
+ irqres_lb->name, priv);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get last burst IRQ %d\n",
+ irqres_lb->start);
+ ret = -ENODEV;
+ goto err_unmap_mem;
+ }
+
+ priv->irq_b = irqres_b->start;
+ ret = request_irq(priv->irq_b, falcon_i2c_isr_burst, IRQF_DISABLED,
+ irqres_b->name, priv);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get burst IRQ %d\n",
+ irqres_b->start);
+ ret = -ENODEV;
+ goto err_free_lb_irq;
+ }
+
+ priv->irq_err = irqres_err->start;
+ ret = request_irq(priv->irq_err, falcon_i2c_isr, IRQF_DISABLED,
+ irqres_err->name, priv);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get error IRQ %d\n",
+ irqres_err->start);
+ ret = -ENODEV;
+ goto err_free_b_irq;
+ }
+
+ priv->irq_p = irqres_p->start;
+ ret = request_irq(priv->irq_p, falcon_i2c_isr, IRQF_DISABLED,
+ irqres_p->name, priv);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get protocol IRQ %d\n",
+ irqres_p->start);
+ ret = -ENODEV;
+ goto err_free_err_irq;
+ }
+
+ dev_dbg(&pdev->dev, "mapped io-space to %p\n", priv->membase);
+ dev_dbg(&pdev->dev, "use IRQs %d, %d, %d, %d\n", irqres_lb->start,
+ irqres_b->start, irqres_err->start, irqres_p->start);
+
+ /* add our adapter to the i2c stack */
+ ret = i2c_add_numbered_adapter(adap);
+ if (ret) {
+ dev_err(&pdev->dev, "can't register I2C adapter\n");
+ goto err_free_p_irq;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ i2c_set_adapdata(adap, priv);
+
+ /* print module version information */
+ dev_dbg(&pdev->dev, "module id=%u revision=%u\n",
+ (i2c_r32(id) & I2C_ID_ID_MASK) >> I2C_ID_ID_OFFSET,
+ (i2c_r32(id) & I2C_ID_REV_MASK) >> I2C_ID_REV_OFFSET);
+
+ /* initialize HW */
+ ret = falcon_i2c_hw_init(adap);
+ if (ret) {
+ dev_err(&pdev->dev, "can't configure adapter\n");
+ goto err_remove_adapter;
+ }
+
+ dev_info(&pdev->dev, "version %s\n", DRV_VERSION);
+
+ return 0;
+
+err_remove_adapter:
+ i2c_del_adapter(adap);
+ platform_set_drvdata(pdev, NULL);
+
+err_free_p_irq:
+ free_irq(priv->irq_p, priv);
+
+err_free_err_irq:
+ free_irq(priv->irq_err, priv);
+
+err_free_b_irq:
+ free_irq(priv->irq_b, priv);
+
+err_free_lb_irq:
+ free_irq(priv->irq_lb, priv);
+
+err_unmap_mem:
+ iounmap(priv->membase);
+
+err_release_region:
+ release_mem_region(mmres->start, resource_size(mmres));
+
+err_free_gpio:
+ gpio_free(108);
+ gpio_free(107);
+
+err_free_priv:
+ kfree(priv);
+
+ return ret;
+}
+
+static int __devexit falcon_i2c_remove(struct platform_device *pdev)
+{
+ struct falcon_i2c *priv = platform_get_drvdata(pdev);
+ struct resource *mmres;
+
+ /* disable bus */
+ i2c_w32_mask(I2C_RUN_CTRL_RUN_EN, 0, run_ctrl);
+
+ /* remove driver */
+ platform_set_drvdata(pdev, NULL);
+ i2c_del_adapter(&priv->adap);
+
+ free_irq(priv->irq_lb, priv);
+ free_irq(priv->irq_b, priv);
+ free_irq(priv->irq_err, priv);
+ free_irq(priv->irq_p, priv);
+
+ iounmap(priv->membase);
+
+ gpio_free(108);
+ gpio_free(107);
+
+ kfree(priv);
+
+ mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mmres->start, resource_size(mmres));
+
+ dev_dbg(&pdev->dev, "removed\n");
+
+ return 0;
+}
+
+static struct platform_driver falcon_i2c_driver = {
+ .probe = falcon_i2c_probe,
+ .remove = __devexit_p(falcon_i2c_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init falcon_i2c_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&falcon_i2c_driver);
+
+ if (ret)
+ pr_debug(DRV_NAME ": can't register platform driver\n");
+
+ return ret;
+}
+
+static void __exit falcon_i2c_exit(void)
+{
+ platform_driver_unregister(&falcon_i2c_driver);
+}
+
+module_init(falcon_i2c_init);
+module_exit(falcon_i2c_exit);
+
+MODULE_DESCRIPTION("Lantiq FALC(tm) ON - I2C bus adapter");
+MODULE_ALIAS("platform:" DRV_NAME);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
diff --git a/target/linux/lantiq/files-3.3/drivers/net/ethernet/lantiq_vrx200.c b/target/linux/lantiq/files-3.3/drivers/net/ethernet/lantiq_vrx200.c
new file mode 100644
index 0000000000..d79d3803cc
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/net/ethernet/lantiq_vrx200.c
@@ -0,0 +1,1358 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+
+#include <asm/checksum.h>
+
+#include <lantiq_soc.h>
+#include <xway_dma.h>
+#include <lantiq_platform.h>
+
+#define LTQ_SWITCH_BASE 0x1E108000
+#define LTQ_SWITCH_CORE_BASE LTQ_SWITCH_BASE
+#define LTQ_SWITCH_TOP_PDI_BASE LTQ_SWITCH_CORE_BASE
+#define LTQ_SWITCH_BM_PDI_BASE (LTQ_SWITCH_CORE_BASE + 4 * 0x40)
+#define LTQ_SWITCH_MAC_PDI_0_BASE (LTQ_SWITCH_CORE_BASE + 4 * 0x900)
+#define LTQ_SWITCH_MAC_PDI_X_BASE(x) (LTQ_SWITCH_MAC_PDI_0_BASE + x * 0x30)
+#define LTQ_SWITCH_TOPLEVEL_BASE (LTQ_SWITCH_BASE + 4 * 0xC40)
+#define LTQ_SWITCH_MDIO_PDI_BASE (LTQ_SWITCH_TOPLEVEL_BASE)
+#define LTQ_SWITCH_MII_PDI_BASE (LTQ_SWITCH_TOPLEVEL_BASE + 4 * 0x36)
+#define LTQ_SWITCH_PMAC_PDI_BASE (LTQ_SWITCH_TOPLEVEL_BASE + 4 * 0x82)
+
+#define LTQ_ETHSW_MAC_CTRL0_PADEN (1 << 8)
+#define LTQ_ETHSW_MAC_CTRL0_FCS (1 << 7)
+#define LTQ_ETHSW_MAC_CTRL1_SHORTPRE (1 << 8)
+#define LTQ_ETHSW_MAC_CTRL2_MLEN (1 << 3)
+#define LTQ_ETHSW_MAC_CTRL2_LCHKL (1 << 2)
+#define LTQ_ETHSW_MAC_CTRL2_LCHKS_DIS 0
+#define LTQ_ETHSW_MAC_CTRL2_LCHKS_UNTAG 1
+#define LTQ_ETHSW_MAC_CTRL2_LCHKS_TAG 2
+#define LTQ_ETHSW_MAC_CTRL6_RBUF_DLY_WP_SHIFT 9
+#define LTQ_ETHSW_MAC_CTRL6_RXBUF_BYPASS (1 << 6)
+#define LTQ_ETHSW_GLOB_CTRL_SE (1 << 15)
+#define LTQ_ETHSW_MDC_CFG1_MCEN (1 << 8)
+#define LTQ_ETHSW_PMAC_HD_CTL_FC (1 << 10)
+#define LTQ_ETHSW_PMAC_HD_CTL_RC (1 << 4)
+#define LTQ_ETHSW_PMAC_HD_CTL_AC (1 << 2)
+#define ADVERTIZE_MPD (1 << 10)
+
+#define MDIO_DEVAD_NONE (-1)
+
+#define LTQ_ETH_RX_BUFFER_CNT PKTBUFSRX
+
+#define LTQ_MDIO_DRV_NAME "ltq-mdio"
+#define LTQ_ETH_DRV_NAME "ltq-eth"
+
+#define LTQ_ETHSW_MAX_GMAC 1
+#define LTQ_ETHSW_PMAC 1
+
+#define ltq_setbits(a, set) \
+ ltq_w32(ltq_r32(a) | (set), a)
+
+enum ltq_reset_modules {
+ LTQ_RESET_CORE,
+ LTQ_RESET_DMA,
+ LTQ_RESET_ETH,
+ LTQ_RESET_PHY,
+ LTQ_RESET_HARD,
+ LTQ_RESET_SOFT,
+};
+
+static inline void
+dbg_ltq_writel(void *a, unsigned int b)
+{
+ ltq_w32(b, a);
+}
+
+int ltq_reset_once(enum ltq_reset_modules module, ulong usec);
+
+struct ltq_ethsw_mac_pdi_x_regs {
+ u32 pstat; /* Port status */
+ u32 pisr; /* Interrupt status */
+ u32 pier; /* Interrupt enable */
+ u32 ctrl_0; /* Control 0 */
+ u32 ctrl_1; /* Control 1 */
+ u32 ctrl_2; /* Control 2 */
+ u32 ctrl_3; /* Control 3 */
+ u32 ctrl_4; /* Control 4 */
+ u32 ctrl_5; /* Control 5 */
+ u32 ctrl_6; /* Control 6 */
+ u32 bufst; /* TX/RX buffer control */
+ u32 testen; /* Test enable */
+};
+
+struct ltq_ethsw_mac_pdi_regs {
+ struct ltq_ethsw_mac_pdi_x_regs mac[12];
+};
+
+struct ltq_ethsw_mdio_pdi_regs {
+ u32 glob_ctrl; /* Global control 0 */
+ u32 rsvd0[7];
+ u32 mdio_ctrl; /* MDIO control */
+ u32 mdio_read; /* MDIO read data */
+ u32 mdio_write; /* MDIO write data */
+ u32 mdc_cfg_0; /* MDC clock configuration 0 */
+ u32 mdc_cfg_1; /* MDC clock configuration 1 */
+ u32 rsvd[3];
+ u32 phy_addr_5; /* PHY address port 5 */
+ u32 phy_addr_4; /* PHY address port 4 */
+ u32 phy_addr_3; /* PHY address port 3 */
+ u32 phy_addr_2; /* PHY address port 2 */
+ u32 phy_addr_1; /* PHY address port 1 */
+ u32 phy_addr_0; /* PHY address port 0 */
+ u32 mdio_stat_0; /* MDIO PHY polling status port 0 */
+ u32 mdio_stat_1; /* MDIO PHY polling status port 1 */
+ u32 mdio_stat_2; /* MDIO PHY polling status port 2 */
+ u32 mdio_stat_3; /* MDIO PHY polling status port 3 */
+ u32 mdio_stat_4; /* MDIO PHY polling status port 4 */
+ u32 mdio_stat_5; /* MDIO PHY polling status port 5 */
+};
+
+struct ltq_ethsw_mii_pdi_regs {
+ u32 mii_cfg0; /* xMII port 0 configuration */
+ u32 pcdu0; /* Port 0 clock delay configuration */
+ u32 mii_cfg1; /* xMII port 1 configuration */
+ u32 pcdu1; /* Port 1 clock delay configuration */
+ u32 mii_cfg2; /* xMII port 2 configuration */
+ u32 rsvd0;
+ u32 mii_cfg3; /* xMII port 3 configuration */
+ u32 rsvd1;
+ u32 mii_cfg4; /* xMII port 4 configuration */
+ u32 rsvd2;
+ u32 mii_cfg5; /* xMII port 5 configuration */
+ u32 pcdu5; /* Port 5 clock delay configuration */
+};
+
+struct ltq_ethsw_pmac_pdi_regs {
+ u32 hd_ctl; /* PMAC header control */
+ u32 tl; /* PMAC type/length */
+ u32 sa1; /* PMAC source address 1 */
+ u32 sa2; /* PMAC source address 2 */
+ u32 sa3; /* PMAC source address 3 */
+ u32 da1; /* PMAC destination address 1 */
+ u32 da2; /* PMAC destination address 2 */
+ u32 da3; /* PMAC destination address 3 */
+ u32 vlan; /* PMAC VLAN */
+ u32 rx_ipg; /* PMAC interpacket gap in RX direction */
+ u32 st_etype; /* PMAC special tag ethertype */
+ u32 ewan; /* PMAC ethernet WAN group */
+};
+
+struct ltq_mdio_phy_addr_reg {
+ union {
+ struct {
+ unsigned rsvd:1;
+ unsigned lnkst:2; /* Link status control */
+ unsigned speed:2; /* Speed control */
+ unsigned fdup:2; /* Full duplex control */
+ unsigned fcontx:2; /* Flow control mode TX */
+ unsigned fconrx:2; /* Flow control mode RX */
+ unsigned addr:5; /* PHY address */
+ } bits;
+ u16 val;
+ };
+};
+
+enum ltq_mdio_phy_addr_lnkst {
+ LTQ_MDIO_PHY_ADDR_LNKST_AUTO = 0,
+ LTQ_MDIO_PHY_ADDR_LNKST_UP = 1,
+ LTQ_MDIO_PHY_ADDR_LNKST_DOWN = 2,
+};
+
+enum ltq_mdio_phy_addr_speed {
+ LTQ_MDIO_PHY_ADDR_SPEED_M10 = 0,
+ LTQ_MDIO_PHY_ADDR_SPEED_M100 = 1,
+ LTQ_MDIO_PHY_ADDR_SPEED_G1 = 2,
+ LTQ_MDIO_PHY_ADDR_SPEED_AUTO = 3,
+};
+
+enum ltq_mdio_phy_addr_fdup {
+ LTQ_MDIO_PHY_ADDR_FDUP_AUTO = 0,
+ LTQ_MDIO_PHY_ADDR_FDUP_ENABLE = 1,
+ LTQ_MDIO_PHY_ADDR_FDUP_DISABLE = 3,
+};
+
+enum ltq_mdio_phy_addr_fcon {
+ LTQ_MDIO_PHY_ADDR_FCON_AUTO = 0,
+ LTQ_MDIO_PHY_ADDR_FCON_ENABLE = 1,
+ LTQ_MDIO_PHY_ADDR_FCON_DISABLE = 3,
+};
+
+struct ltq_mii_mii_cfg_reg {
+ union {
+ struct {
+ unsigned res:1; /* Hardware reset */
+ unsigned en:1; /* xMII interface enable */
+ unsigned isol:1; /* xMII interface isolate */
+ unsigned ldclkdis:1; /* Link down clock disable */
+ unsigned rsvd:1;
+ unsigned crs:2; /* CRS sensitivity config */
+ unsigned rgmii_ibs:1; /* RGMII In Band status */
+ unsigned rmii:1; /* RMII ref clock direction */
+ unsigned miirate:3; /* xMII interface clock rate */
+ unsigned miimode:4; /* xMII interface mode */
+ } bits;
+ u16 val;
+ };
+};
+
+enum ltq_mii_mii_cfg_miirate {
+ LTQ_MII_MII_CFG_MIIRATE_M2P5 = 0,
+ LTQ_MII_MII_CFG_MIIRATE_M25 = 1,
+ LTQ_MII_MII_CFG_MIIRATE_M125 = 2,
+ LTQ_MII_MII_CFG_MIIRATE_M50 = 3,
+ LTQ_MII_MII_CFG_MIIRATE_AUTO = 4,
+};
+
+enum ltq_mii_mii_cfg_miimode {
+ LTQ_MII_MII_CFG_MIIMODE_MIIP = 0,
+ LTQ_MII_MII_CFG_MIIMODE_MIIM = 1,
+ LTQ_MII_MII_CFG_MIIMODE_RMIIP = 2,
+ LTQ_MII_MII_CFG_MIIMODE_RMIIM = 3,
+ LTQ_MII_MII_CFG_MIIMODE_RGMII = 4,
+};
+
+struct ltq_eth_priv {
+ struct ltq_dma_device *dma_dev;
+ struct mii_dev *bus;
+ struct eth_device *dev;
+ struct phy_device *phymap[LTQ_ETHSW_MAX_GMAC];
+ int rx_num;
+};
+
+enum ltq_mdio_mbusy {
+ LTQ_MDIO_MBUSY_IDLE = 0,
+ LTQ_MDIO_MBUSY_BUSY = 1,
+};
+
+enum ltq_mdio_op {
+ LTQ_MDIO_OP_WRITE = 1,
+ LTQ_MDIO_OP_READ = 2,
+};
+
+struct ltq_mdio_access {
+ union {
+ struct {
+ unsigned rsvd:3;
+ unsigned mbusy:1;
+ unsigned op:2;
+ unsigned phyad:5;
+ unsigned regad:5;
+ } bits;
+ u16 val;
+ };
+};
+
+enum LTQ_ETH_PORT_FLAGS {
+ LTQ_ETH_PORT_NONE = 0,
+ LTQ_ETH_PORT_PHY = 1,
+ LTQ_ETH_PORT_SWITCH = (1 << 1),
+ LTQ_ETH_PORT_MAC = (1 << 2),
+};
+
+struct ltq_eth_port_config {
+ u8 num;
+ u8 phy_addr;
+ u16 flags;
+ phy_interface_t phy_if;
+};
+
+struct ltq_eth_board_config {
+ const struct ltq_eth_port_config *ports;
+ int num_ports;
+};
+
+static const struct ltq_eth_port_config eth_port_config[] = {
+ /* GMAC0: external Lantiq PEF7071 10/100/1000 PHY for LAN port 0 */
+ { 0, 0x0, LTQ_ETH_PORT_PHY, PHY_INTERFACE_MODE_RGMII },
+ /* GMAC1: external Lantiq PEF7071 10/100/1000 PHY for LAN port 1 */
+ { 1, 0x1, LTQ_ETH_PORT_PHY, PHY_INTERFACE_MODE_RGMII },
+};
+
+static const struct ltq_eth_board_config board_config = {
+ .ports = eth_port_config,
+ .num_ports = ARRAY_SIZE(eth_port_config),
+};
+
+static struct ltq_ethsw_mac_pdi_regs *ltq_ethsw_mac_pdi_regs =
+ (struct ltq_ethsw_mac_pdi_regs *) CKSEG1ADDR(LTQ_SWITCH_MAC_PDI_0_BASE);
+
+static struct ltq_ethsw_mdio_pdi_regs *ltq_ethsw_mdio_pdi_regs =
+ (struct ltq_ethsw_mdio_pdi_regs *) CKSEG1ADDR(LTQ_SWITCH_MDIO_PDI_BASE);
+
+static struct ltq_ethsw_mii_pdi_regs *ltq_ethsw_mii_pdi_regs =
+ (struct ltq_ethsw_mii_pdi_regs *) CKSEG1ADDR(LTQ_SWITCH_MII_PDI_BASE);
+
+static struct ltq_ethsw_pmac_pdi_regs *ltq_ethsw_pmac_pdi_regs =
+ (struct ltq_ethsw_pmac_pdi_regs *) CKSEG1ADDR(LTQ_SWITCH_PMAC_PDI_BASE);
+
+
+#define MAX_DMA_CHAN 0x8
+#define MAX_DMA_CRC_LEN 0x4
+#define MAX_DMA_DATA_LEN 0x600
+
+/* use 2 static channels for TX/RX
+ depending on the SoC we need to use different DMA channels for ethernet */
+#define LTQ_ETOP_TX_CHANNEL 1
+#define LTQ_ETOP_RX_CHANNEL 0
+
+#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
+#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
+
+#define DRV_VERSION "1.0"
+
+static void __iomem *ltq_vrx200_membase;
+
+struct ltq_vrx200_chan {
+ int idx;
+ int tx_free;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ struct ltq_dma_channel dma;
+ struct sk_buff *skb[LTQ_DESC_NUM];
+};
+
+struct ltq_vrx200_priv {
+ struct net_device *netdev;
+ struct ltq_eth_data *pldata;
+ struct resource *res;
+
+ struct mii_bus *mii_bus;
+ struct phy_device *phydev;
+
+ struct ltq_vrx200_chan ch[MAX_DMA_CHAN];
+ int tx_free[MAX_DMA_CHAN >> 1];
+
+ spinlock_t lock;
+
+ struct clk *clk_ppe;
+};
+
+static int ltq_vrx200_mdio_wr(struct mii_bus *bus, int phy_addr,
+ int phy_reg, u16 phy_data);
+
+static int
+ltq_vrx200_alloc_skb(struct ltq_vrx200_chan *ch)
+{
+ ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
+ if (!ch->skb[ch->dma.desc])
+ return -ENOMEM;
+ ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
+ ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
+ DMA_FROM_DEVICE);
+ ch->dma.desc_base[ch->dma.desc].addr =
+ CPHYSADDR(ch->skb[ch->dma.desc]->data);
+ ch->dma.desc_base[ch->dma.desc].ctl =
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
+ MAX_DMA_DATA_LEN;
+ skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
+ return 0;
+}
+
+static void
+ltq_vrx200_hw_receive(struct ltq_vrx200_chan *ch)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(ch->netdev);
+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+ struct sk_buff *skb = ch->skb[ch->dma.desc];
+ int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (ltq_vrx200_alloc_skb(ch)) {
+ netdev_err(ch->netdev,
+ "failed to allocate new rx buffer, stopping DMA\n");
+ ltq_dma_close(&ch->dma);
+ }
+ ch->dma.desc++;
+ ch->dma.desc %= LTQ_DESC_NUM;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ skb_put(skb, len);
+ skb->dev = ch->netdev;
+ skb->protocol = eth_type_trans(skb, ch->netdev);
+ netif_receive_skb(skb);
+}
+
+static int
+ltq_vrx200_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct ltq_vrx200_chan *ch = container_of(napi,
+ struct ltq_vrx200_chan, napi);
+ struct ltq_vrx200_priv *priv = netdev_priv(ch->netdev);
+ int rx = 0;
+ int complete = 0;
+ unsigned long flags;
+
+ while ((rx < budget) && !complete) {
+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+
+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
+ ltq_vrx200_hw_receive(ch);
+ rx++;
+ } else {
+ complete = 1;
+ }
+ }
+ if (complete || !rx) {
+ napi_complete(&ch->napi);
+ spin_lock_irqsave(&priv->lock, flags);
+ ltq_dma_ack_irq(&ch->dma);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ return rx;
+}
+
+static int
+ltq_vrx200_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct ltq_vrx200_chan *ch =
+ container_of(napi, struct ltq_vrx200_chan, napi);
+ struct ltq_vrx200_priv *priv = netdev_priv(ch->netdev);
+ struct netdev_queue *txq =
+ netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ while ((ch->dma.desc_base[ch->tx_free].ctl &
+ (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
+ dev_kfree_skb_any(ch->skb[ch->tx_free]);
+ ch->skb[ch->tx_free] = NULL;
+ memset(&ch->dma.desc_base[ch->tx_free], 0,
+ sizeof(struct ltq_dma_desc));
+ ch->tx_free++;
+ ch->tx_free %= LTQ_DESC_NUM;
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_start_queue(txq);
+ napi_complete(&ch->napi);
+ spin_lock_irqsave(&priv->lock, flags);
+ ltq_dma_ack_irq(&ch->dma);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return 1;
+}
+
+static irqreturn_t
+ltq_vrx200_dma_irq(int irq, void *_priv)
+{
+ struct ltq_vrx200_priv *priv = _priv;
+ int ch = irq - LTQ_DMA_ETOP;
+
+ napi_schedule(&priv->ch[ch].napi);
+ return IRQ_HANDLED;
+}
+
+static void
+ltq_vrx200_free_channel(struct net_device *dev, struct ltq_vrx200_chan *ch)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+
+ ltq_dma_free(&ch->dma);
+ if (ch->dma.irq)
+ free_irq(ch->dma.irq, priv);
+ if (IS_RX(ch->idx)) {
+ int desc;
+ for (desc = 0; desc < LTQ_DESC_NUM; desc++)
+ dev_kfree_skb_any(ch->skb[ch->dma.desc]);
+ }
+}
+
+static void
+ltq_vrx200_hw_exit(struct net_device *dev)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+ int i;
+
+ clk_disable(priv->clk_ppe);
+
+ for (i = 0; i < MAX_DMA_CHAN; i++)
+ if (IS_TX(i) || IS_RX(i))
+ ltq_vrx200_free_channel(dev, &priv->ch[i]);
+}
+
+static void *ltq_eth_phy_addr_reg(int num)
+{
+ switch (num) {
+ case 0:
+ return &ltq_ethsw_mdio_pdi_regs->phy_addr_0;
+ case 1:
+ return &ltq_ethsw_mdio_pdi_regs->phy_addr_1;
+ case 2:
+ return &ltq_ethsw_mdio_pdi_regs->phy_addr_2;
+ case 3:
+ return &ltq_ethsw_mdio_pdi_regs->phy_addr_3;
+ case 4:
+ return &ltq_ethsw_mdio_pdi_regs->phy_addr_4;
+ case 5:
+ return &ltq_ethsw_mdio_pdi_regs->phy_addr_5;
+ }
+
+ return NULL;
+}
+
+static void *ltq_eth_mii_cfg_reg(int num)
+{
+ switch (num) {
+ case 0:
+ return &ltq_ethsw_mii_pdi_regs->mii_cfg0;
+ case 1:
+ return &ltq_ethsw_mii_pdi_regs->mii_cfg1;
+ case 2:
+ return &ltq_ethsw_mii_pdi_regs->mii_cfg2;
+ case 3:
+ return &ltq_ethsw_mii_pdi_regs->mii_cfg3;
+ case 4:
+ return &ltq_ethsw_mii_pdi_regs->mii_cfg4;
+ case 5:
+ return &ltq_ethsw_mii_pdi_regs->mii_cfg5;
+ }
+
+ return NULL;
+}
+
+static void ltq_eth_gmac_update(struct phy_device *phydev, int num)
+{
+ struct ltq_mdio_phy_addr_reg phy_addr_reg;
+ struct ltq_mii_mii_cfg_reg mii_cfg_reg;
+ void *phy_addr = ltq_eth_phy_addr_reg(num);
+ void *mii_cfg = ltq_eth_mii_cfg_reg(num);
+
+ phy_addr_reg.val = ltq_r32(phy_addr);
+ mii_cfg_reg.val = ltq_r32(mii_cfg);
+
+ phy_addr_reg.bits.addr = phydev->addr;
+
+ if (phydev->link)
+ phy_addr_reg.bits.lnkst = LTQ_MDIO_PHY_ADDR_LNKST_UP;
+ else
+ phy_addr_reg.bits.lnkst = LTQ_MDIO_PHY_ADDR_LNKST_DOWN;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ phy_addr_reg.bits.speed = LTQ_MDIO_PHY_ADDR_SPEED_G1;
+ mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M125;
+ break;
+ case SPEED_100:
+ phy_addr_reg.bits.speed = LTQ_MDIO_PHY_ADDR_SPEED_M100;
+ switch (mii_cfg_reg.bits.miimode) {
+ case LTQ_MII_MII_CFG_MIIMODE_RMIIM:
+ case LTQ_MII_MII_CFG_MIIMODE_RMIIP:
+ mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M50;
+ break;
+ default:
+ mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M25;
+ break;
+ }
+ break;
+ default:
+ phy_addr_reg.bits.speed = LTQ_MDIO_PHY_ADDR_SPEED_M10;
+ mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M2P5;
+ break;
+ }
+
+ if (phydev->duplex == DUPLEX_FULL)
+ phy_addr_reg.bits.fdup = LTQ_MDIO_PHY_ADDR_FDUP_ENABLE;
+ else
+ phy_addr_reg.bits.fdup = LTQ_MDIO_PHY_ADDR_FDUP_DISABLE;
+
+ dbg_ltq_writel(phy_addr, phy_addr_reg.val);
+ dbg_ltq_writel(mii_cfg, mii_cfg_reg.val);
+ udelay(1);
+}
+
+
+static void ltq_eth_port_config(struct ltq_vrx200_priv *priv,
+ const struct ltq_eth_port_config *port)
+{
+ struct ltq_mii_mii_cfg_reg mii_cfg_reg;
+ void *mii_cfg = ltq_eth_mii_cfg_reg(port->num);
+ int setup_gpio = 0;
+
+ mii_cfg_reg.val = ltq_r32(mii_cfg);
+
+
+ switch (port->num) {
+ case 0: /* xMII0 */
+ case 1: /* xMII1 */
+ switch (port->phy_if) {
+ case PHY_INTERFACE_MODE_MII:
+ if (port->flags & LTQ_ETH_PORT_PHY)
+ /* MII MAC mode, connected to external PHY */
+ mii_cfg_reg.bits.miimode =
+ LTQ_MII_MII_CFG_MIIMODE_MIIM;
+ else
+ /* MII PHY mode, connected to external MAC */
+ mii_cfg_reg.bits.miimode =
+ LTQ_MII_MII_CFG_MIIMODE_MIIP;
+ setup_gpio = 1;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (port->flags & LTQ_ETH_PORT_PHY)
+ /* RMII MAC mode, connected to external PHY */
+ mii_cfg_reg.bits.miimode =
+ LTQ_MII_MII_CFG_MIIMODE_RMIIM;
+ else
+ /* RMII PHY mode, connected to external MAC */
+ mii_cfg_reg.bits.miimode =
+ LTQ_MII_MII_CFG_MIIMODE_RMIIP;
+ setup_gpio = 1;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ /* RGMII MAC mode, connected to external PHY */
+ mii_cfg_reg.bits.miimode =
+ LTQ_MII_MII_CFG_MIIMODE_RGMII;
+ setup_gpio = 1;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 2: /* internal GPHY0 */
+ case 3: /* internal GPHY0 */
+ case 4: /* internal GPHY1 */
+ switch (port->phy_if) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ /* MII MAC mode, connected to internal GPHY */
+ mii_cfg_reg.bits.miimode =
+ LTQ_MII_MII_CFG_MIIMODE_MIIM;
+ setup_gpio = 1;
+ break;
+ default:
+ break;
+ }
+ break;
+ case 5: /* internal GPHY1 or xMII2 */
+ switch (port->phy_if) {
+ case PHY_INTERFACE_MODE_MII:
+ /* MII MAC mode, connected to internal GPHY */
+ mii_cfg_reg.bits.miimode =
+ LTQ_MII_MII_CFG_MIIMODE_MIIM;
+ setup_gpio = 1;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ /* RGMII MAC mode, connected to external PHY */
+ mii_cfg_reg.bits.miimode =
+ LTQ_MII_MII_CFG_MIIMODE_RGMII;
+ setup_gpio = 1;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* Enable MII interface */
+ mii_cfg_reg.bits.en = port->flags ? 1 : 0;
+ dbg_ltq_writel(mii_cfg, mii_cfg_reg.val);
+
+}
+
+static void ltq_eth_gmac_init(int num)
+{
+ struct ltq_mdio_phy_addr_reg phy_addr_reg;
+ struct ltq_mii_mii_cfg_reg mii_cfg_reg;
+ void *phy_addr = ltq_eth_phy_addr_reg(num);
+ void *mii_cfg = ltq_eth_mii_cfg_reg(num);
+ struct ltq_ethsw_mac_pdi_x_regs *mac_pdi_regs;
+
+ mac_pdi_regs = &ltq_ethsw_mac_pdi_regs->mac[num];
+
+ /* Reset PHY status to link down */
+ phy_addr_reg.val = ltq_r32(phy_addr);
+ phy_addr_reg.bits.addr = num;
+ phy_addr_reg.bits.lnkst = LTQ_MDIO_PHY_ADDR_LNKST_DOWN;
+ phy_addr_reg.bits.speed = LTQ_MDIO_PHY_ADDR_SPEED_M10;
+ phy_addr_reg.bits.fdup = LTQ_MDIO_PHY_ADDR_FDUP_DISABLE;
+ dbg_ltq_writel(phy_addr, phy_addr_reg.val);
+
+ /* Reset and disable MII interface */
+ mii_cfg_reg.val = ltq_r32(mii_cfg);
+ mii_cfg_reg.bits.en = 0;
+ mii_cfg_reg.bits.res = 1;
+ mii_cfg_reg.bits.miirate = LTQ_MII_MII_CFG_MIIRATE_M2P5;
+ dbg_ltq_writel(mii_cfg, mii_cfg_reg.val);
+
+ /*
+ * Enable padding of short frames, enable frame checksum generation
+ * in transmit direction
+ */
+ dbg_ltq_writel(&mac_pdi_regs->ctrl_0, LTQ_ETHSW_MAC_CTRL0_PADEN |
+ LTQ_ETHSW_MAC_CTRL0_FCS);
+
+ /* Set inter packet gap size to 12 bytes */
+ dbg_ltq_writel(&mac_pdi_regs->ctrl_1, 12);
+
+ /*
+ * Configure frame length checks:
+ * - allow jumbo frames
+ * - enable long length check
+ * - enable short length without VLAN tags
+ */
+ dbg_ltq_writel(&mac_pdi_regs->ctrl_2, LTQ_ETHSW_MAC_CTRL2_MLEN |
+ LTQ_ETHSW_MAC_CTRL2_LCHKL |
+ LTQ_ETHSW_MAC_CTRL2_LCHKS_UNTAG);
+}
+
+
+static void ltq_eth_pmac_init(void)
+{
+ struct ltq_ethsw_mac_pdi_x_regs *mac_pdi_regs;
+
+ mac_pdi_regs = &ltq_ethsw_mac_pdi_regs->mac[LTQ_ETHSW_PMAC];
+
+ /*
+ * Enable padding of short frames, enable frame checksum generation
+ * in transmit direction
+ */
+ dbg_ltq_writel(&mac_pdi_regs->ctrl_0, LTQ_ETHSW_MAC_CTRL0_PADEN |
+ LTQ_ETHSW_MAC_CTRL0_FCS);
+
+ /*
+ * Configure frame length checks:
+ * - allow jumbo frames
+ * - enable long length check
+ * - enable short length without VLAN tags
+ */
+ dbg_ltq_writel(&mac_pdi_regs->ctrl_2, LTQ_ETHSW_MAC_CTRL2_MLEN |
+ LTQ_ETHSW_MAC_CTRL2_LCHKL |
+ LTQ_ETHSW_MAC_CTRL2_LCHKS_UNTAG);
+
+ /*
+ * Apply workaround for buffer congestion:
+ * - shorten preambel to 1 byte
+ * - set minimum inter packet gap size to 7 bytes
+ * - enable receive buffer bypass mode
+ */
+ dbg_ltq_writel(&mac_pdi_regs->ctrl_1, LTQ_ETHSW_MAC_CTRL1_SHORTPRE | 7);
+ dbg_ltq_writel(&mac_pdi_regs->ctrl_6,
+ (6 << LTQ_ETHSW_MAC_CTRL6_RBUF_DLY_WP_SHIFT) |
+ LTQ_ETHSW_MAC_CTRL6_RXBUF_BYPASS);
+
+ /* Set request assertion threshold to 8, IPG counter to 11 */
+ dbg_ltq_writel(&ltq_ethsw_pmac_pdi_regs->rx_ipg, 0x8B);
+
+ /*
+ * Configure frame header control:
+ * - enable reaction on pause frames (flow control)
+ * - remove CRC for packets from PMAC to DMA
+ * - add CRC for packets from DMA to PMAC
+ */
+ dbg_ltq_writel(&ltq_ethsw_pmac_pdi_regs->hd_ctl, LTQ_ETHSW_PMAC_HD_CTL_FC |
+ /*LTQ_ETHSW_PMAC_HD_CTL_RC | */LTQ_ETHSW_PMAC_HD_CTL_AC);
+}
+
+static int
+ltq_vrx200_hw_init(struct net_device *dev)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+ int err = 0;
+ int i;
+
+ netdev_info(dev, "setting up dma\n");
+ ltq_dma_init_port(DMA_PORT_ETOP);
+
+ netdev_info(dev, "setting up pmu\n");
+ clk_enable(priv->clk_ppe);
+
+ /* Reset ethernet and switch subsystems */
+ netdev_info(dev, "reset core\n");
+ ltq_reset_once(BIT(8), 10);
+
+ /* Enable switch macro */
+ ltq_setbits(&ltq_ethsw_mdio_pdi_regs->glob_ctrl,
+ LTQ_ETHSW_GLOB_CTRL_SE);
+
+ /* Disable MDIO auto-polling for all ports */
+ dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdc_cfg_0, 0);
+
+ /*
+ * Enable and set MDIO management clock to 2.5 MHz. This is the
+ * maximum clock for FE PHYs.
+ * Formula for clock is:
+ *
+ * 50 MHz
+ * x = ----------- - 1
+ * 2 * f_MDC
+ */
+ dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdc_cfg_1,
+ LTQ_ETHSW_MDC_CFG1_MCEN | 9);
+
+ /* Init MAC connected to CPU */
+ ltq_eth_pmac_init();
+
+ /* Init MACs connected to external MII interfaces */
+ for (i = 0; i < LTQ_ETHSW_MAX_GMAC; i++)
+ ltq_eth_gmac_init(i);
+
+ for (i = 0; i < MAX_DMA_CHAN && !err; i++) {
+ int irq = LTQ_DMA_ETOP + i;
+ struct ltq_vrx200_chan *ch = &priv->ch[i];
+
+ ch->idx = ch->dma.nr = i;
+
+ if (IS_TX(i)) {
+ ltq_dma_alloc_tx(&ch->dma);
+ err = request_irq(irq, ltq_vrx200_dma_irq, IRQF_DISABLED,
+ "vrx200_tx", priv);
+ } else if (IS_RX(i)) {
+ ltq_dma_alloc_rx(&ch->dma);
+ for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
+ ch->dma.desc++)
+ if (ltq_vrx200_alloc_skb(ch))
+ err = -ENOMEM;
+ ch->dma.desc = 0;
+ err = request_irq(irq, ltq_vrx200_dma_irq, IRQF_DISABLED,
+ "vrx200_rx", priv);
+ }
+ if (!err)
+ ch->dma.irq = irq;
+ }
+ for (i = 0; i < board_config.num_ports; i++)
+ ltq_eth_port_config(priv, &board_config.ports[i]);
+ return err;
+}
+
+static void
+ltq_vrx200_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strcpy(info->driver, "Lantiq ETOP");
+ strcpy(info->bus_info, "internal");
+ strcpy(info->version, DRV_VERSION);
+}
+
+static int
+ltq_vrx200_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int
+ltq_vrx200_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int
+ltq_vrx200_nway_reset(struct net_device *dev)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+
+ return phy_start_aneg(priv->phydev);
+}
+
+static const struct ethtool_ops ltq_vrx200_ethtool_ops = {
+ .get_drvinfo = ltq_vrx200_get_drvinfo,
+ .get_settings = ltq_vrx200_get_settings,
+ .set_settings = ltq_vrx200_set_settings,
+ .nway_reset = ltq_vrx200_nway_reset,
+};
+
+static inline int ltq_mdio_poll(struct mii_bus *bus)
+{
+ struct ltq_mdio_access acc;
+ unsigned cnt = 10000;
+
+ while (likely(cnt--)) {
+ acc.val = ltq_r32(&ltq_ethsw_mdio_pdi_regs->mdio_ctrl);
+ if (!acc.bits.mbusy)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int
+ltq_vrx200_mdio_wr(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct ltq_mdio_access acc;
+ int ret;
+
+ acc.val = 0;
+ acc.bits.mbusy = LTQ_MDIO_MBUSY_BUSY;
+ acc.bits.op = LTQ_MDIO_OP_WRITE;
+ acc.bits.phyad = addr;
+ acc.bits.regad = regnum;
+
+ ret = ltq_mdio_poll(bus);
+ if (ret)
+ return ret;
+
+ dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdio_write, val);
+ dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdio_ctrl, acc.val);
+
+ return 0;
+}
+
+static int
+ltq_vrx200_mdio_rd(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ltq_mdio_access acc;
+ int ret;
+
+ acc.val = 0;
+ acc.bits.mbusy = LTQ_MDIO_MBUSY_BUSY;
+ acc.bits.op = LTQ_MDIO_OP_READ;
+ acc.bits.phyad = addr;
+ acc.bits.regad = regnum;
+
+ ret = ltq_mdio_poll(bus);
+ if (ret)
+ goto timeout;
+
+ dbg_ltq_writel(&ltq_ethsw_mdio_pdi_regs->mdio_ctrl, acc.val);
+
+ ret = ltq_mdio_poll(bus);
+ if (ret)
+ goto timeout;
+
+ ret = ltq_r32(&ltq_ethsw_mdio_pdi_regs->mdio_read);
+
+ return ret;
+timeout:
+ return -1;
+}
+
+static void
+ltq_vrx200_mdio_link(struct net_device *dev)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+ ltq_eth_gmac_update(priv->phydev, 0);
+}
+
+static int
+ltq_vrx200_mdio_probe(struct net_device *dev)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ int val;
+
+ phydev = priv->mii_bus->phy_map[0];
+
+ if (!phydev) {
+ netdev_err(dev, "no PHY found\n");
+ return -ENODEV;
+ }
+
+ phydev = phy_connect(dev, dev_name(&phydev->dev), &ltq_vrx200_mdio_link,
+ 0, 0);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ phydev->supported &= (SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | SUPPORTED_1000baseT_Half
+ | SUPPORTED_1000baseT_Full
+ | SUPPORTED_Autoneg
+ | SUPPORTED_MII
+ | SUPPORTED_TP);
+ phydev->advertising = phydev->supported;
+ priv->phydev = phydev;
+
+ pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
+ dev->name, phydev->drv->name,
+ dev_name(&phydev->dev), phydev->irq);
+
+ val = ltq_vrx200_mdio_rd(priv->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000);
+ val |= ADVERTIZE_MPD;
+ ltq_vrx200_mdio_wr(priv->mii_bus, MDIO_DEVAD_NONE, MII_CTRL1000, val);
+ ltq_vrx200_mdio_wr(priv->mii_bus, 0, 0, 0x1040);
+
+ phy_start_aneg(phydev);
+
+ return 0;
+}
+
+static int
+ltq_vrx200_mdio_init(struct net_device *dev)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+ int i;
+ int err;
+
+ priv->mii_bus = mdiobus_alloc();
+ if (!priv->mii_bus) {
+ netdev_err(dev, "failed to allocate mii bus\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ priv->mii_bus->priv = dev;
+ priv->mii_bus->read = ltq_vrx200_mdio_rd;
+ priv->mii_bus->write = ltq_vrx200_mdio_wr;
+ priv->mii_bus->name = "ltq_mii";
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
+ priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!priv->mii_bus->irq) {
+ err = -ENOMEM;
+ goto err_out_free_mdiobus;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
+ priv->mii_bus->irq[i] = PHY_POLL;
+
+ if (mdiobus_register(priv->mii_bus)) {
+ err = -ENXIO;
+ goto err_out_free_mdio_irq;
+ }
+
+ if (ltq_vrx200_mdio_probe(dev)) {
+ err = -ENXIO;
+ goto err_out_unregister_bus;
+ }
+ return 0;
+
+err_out_unregister_bus:
+ mdiobus_unregister(priv->mii_bus);
+err_out_free_mdio_irq:
+ kfree(priv->mii_bus->irq);
+err_out_free_mdiobus:
+ mdiobus_free(priv->mii_bus);
+err_out:
+ return err;
+}
+
+static void
+ltq_vrx200_mdio_cleanup(struct net_device *dev)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+
+ phy_disconnect(priv->phydev);
+ mdiobus_unregister(priv->mii_bus);
+ kfree(priv->mii_bus->irq);
+ mdiobus_free(priv->mii_bus);
+}
+
+void phy_dump(struct net_device *dev)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+ int i;
+ for (i = 0; i < 0x1F; i++) {
+ unsigned int val = ltq_vrx200_mdio_rd(priv->mii_bus, 0, i);
+ printk("%d %4X\n", i, val);
+ }
+}
+
+static int
+ltq_vrx200_open(struct net_device *dev)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < MAX_DMA_CHAN; i++) {
+ struct ltq_vrx200_chan *ch = &priv->ch[i];
+
+ if (!IS_TX(i) && (!IS_RX(i)))
+ continue;
+ napi_enable(&ch->napi);
+ spin_lock_irqsave(&priv->lock, flags);
+ ltq_dma_open(&ch->dma);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ if (priv->phydev) {
+ phy_start(priv->phydev);
+ phy_dump(dev);
+ }
+ netif_tx_start_all_queues(dev);
+ return 0;
+}
+
+static int
+ltq_vrx200_stop(struct net_device *dev)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+ int i;
+ unsigned long flags;
+
+ netif_tx_stop_all_queues(dev);
+ if (priv->phydev)
+ phy_stop(priv->phydev);
+ for (i = 0; i < MAX_DMA_CHAN; i++) {
+ struct ltq_vrx200_chan *ch = &priv->ch[i];
+
+ if (!IS_RX(i) && !IS_TX(i))
+ continue;
+ napi_disable(&ch->napi);
+ spin_lock_irqsave(&priv->lock, flags);
+ ltq_dma_close(&ch->dma);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+ return 0;
+}
+
+static int
+ltq_vrx200_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ int queue = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+ struct ltq_vrx200_chan *ch = &priv->ch[(queue << 1) | 1];
+ struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
+ unsigned long flags;
+ u32 byte_offset;
+ int len;
+
+ len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+
+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
+ netdev_err(dev, "tx ring full\n");
+ netif_tx_stop_queue(txq);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* dma needs to start on a 16 byte aligned address */
+ byte_offset = CPHYSADDR(skb->data) % 16;
+ ch->skb[ch->dma.desc] = skb;
+
+ dev->trans_start = jiffies;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
+ DMA_TO_DEVICE)) - byte_offset;
+ wmb();
+ desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
+ LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
+ ch->dma.desc++;
+ ch->dma.desc %= LTQ_DESC_NUM;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
+ netif_tx_stop_queue(txq);
+
+ return NETDEV_TX_OK;
+}
+
+static int
+ltq_vrx200_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+
+ /* TODO: mii-toll reports "No MII transceiver present!." ?!*/
+ return phy_mii_ioctl(priv->phydev, rq, cmd);
+}
+
+static u16
+ltq_vrx200_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ /* we are currently only using the first queue */
+ return 0;
+}
+
+static int
+ltq_vrx200_init(struct net_device *dev)
+{
+ struct ltq_vrx200_priv *priv = netdev_priv(dev);
+ struct sockaddr mac;
+ int err;
+
+ ether_setup(dev);
+ dev->watchdog_timeo = 10 * HZ;
+
+ err = ltq_vrx200_hw_init(dev);
+ if (err)
+ goto err_hw;
+
+ memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
+ if (!is_valid_ether_addr(mac.sa_data)) {
+ pr_warn("vrx200: invalid MAC, using random\n");
+ random_ether_addr(mac.sa_data);
+ }
+ eth_mac_addr(dev, &mac);
+
+ if (!ltq_vrx200_mdio_init(dev))
+ dev->ethtool_ops = &ltq_vrx200_ethtool_ops;
+ else
+ pr_warn("vrx200: mdio probe failed\n");;
+ return 0;
+
+err_hw:
+ ltq_vrx200_hw_exit(dev);
+ return err;
+}
+
+static void
+ltq_vrx200_tx_timeout(struct net_device *dev)
+{
+ int err;
+
+ ltq_vrx200_hw_exit(dev);
+ err = ltq_vrx200_hw_init(dev);
+ if (err)
+ goto err_hw;
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+ return;
+
+err_hw:
+ ltq_vrx200_hw_exit(dev);
+ netdev_err(dev, "failed to restart vrx200 after TX timeout\n");
+}
+
+static const struct net_device_ops ltq_eth_netdev_ops = {
+ .ndo_open = ltq_vrx200_open,
+ .ndo_stop = ltq_vrx200_stop,
+ .ndo_start_xmit = ltq_vrx200_tx,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_do_ioctl = ltq_vrx200_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_select_queue = ltq_vrx200_select_queue,
+ .ndo_init = ltq_vrx200_init,
+ .ndo_tx_timeout = ltq_vrx200_tx_timeout,
+};
+
+static int __devinit
+ltq_vrx200_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct ltq_vrx200_priv *priv;
+ struct resource *res;
+ int err;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get vrx200 resource\n");
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ res = devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), dev_name(&pdev->dev));
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request vrx200 resource\n");
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ ltq_vrx200_membase = devm_ioremap_nocache(&pdev->dev,
+ res->start, resource_size(res));
+ if (!ltq_vrx200_membase) {
+ dev_err(&pdev->dev, "failed to remap vrx200 engine %d\n",
+ pdev->id);
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ if (ltq_gpio_request(&pdev->dev, 42, 2, 1, "MDIO") ||
+ ltq_gpio_request(&pdev->dev, 43, 2, 1, "MDC")) {
+ dev_err(&pdev->dev, "failed to request MDIO gpios\n");
+ err = -EBUSY;
+ goto err_out;
+ }
+
+ dev = alloc_etherdev_mq(sizeof(struct ltq_vrx200_priv), 4);
+ strcpy(dev->name, "eth%d");
+ dev->netdev_ops = &ltq_eth_netdev_ops;
+ priv = netdev_priv(dev);
+ priv->res = res;
+ priv->pldata = dev_get_platdata(&pdev->dev);
+ priv->netdev = dev;
+
+ priv->clk_ppe = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk_ppe))
+ return PTR_ERR(priv->clk_ppe);
+
+ spin_lock_init(&priv->lock);
+
+ for (i = 0; i < MAX_DMA_CHAN; i++) {
+ if (IS_TX(i))
+ netif_napi_add(dev, &priv->ch[i].napi,
+ ltq_vrx200_poll_tx, 8);
+ else if (IS_RX(i))
+ netif_napi_add(dev, &priv->ch[i].napi,
+ ltq_vrx200_poll_rx, 32);
+ priv->ch[i].netdev = dev;
+ }
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_free;
+
+ platform_set_drvdata(pdev, dev);
+ return 0;
+
+err_free:
+ kfree(dev);
+err_out:
+ return err;
+}
+
+static int __devexit
+ltq_vrx200_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+
+ if (dev) {
+ netif_tx_stop_all_queues(dev);
+ ltq_vrx200_hw_exit(dev);
+ ltq_vrx200_mdio_cleanup(dev);
+ unregister_netdev(dev);
+ }
+ return 0;
+}
+
+static struct platform_driver ltq_mii_driver = {
+ .probe = ltq_vrx200_probe,
+ .remove = __devexit_p(ltq_vrx200_remove),
+ .driver = {
+ .name = "ltq_vrx200",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(ltq_mii_driver);
+
+MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_DESCRIPTION("Lantiq SoC ETOP");
+MODULE_LICENSE("GPL");
diff --git a/target/linux/lantiq/files-3.3/drivers/net/ethernet/svip_eth.c b/target/linux/lantiq/files-3.3/drivers/net/ethernet/svip_eth.c
new file mode 100644
index 0000000000..1e25795a2a
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/net/ethernet/svip_eth.c
@@ -0,0 +1,636 @@
+/************************************************************************
+ *
+ * Copyright (c) 2005
+ * Infineon Technologies AG
+ * St. Martin Strasse 53; 81669 Muenchen; Germany
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ ************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <asm/checksum.h>
+
+#if 1 /** TODO: MOVE TO APPROPRIATE PLACE */
+
+#define ETHERNET_PACKET_DMA_BUFFER_SIZE 0x600
+#define REV_MII_MODE 2
+
+#endif
+
+#define DRV_NAME "ifxmips_mii0"
+
+#include <lantiq_soc.h>
+#include <svip_dma.h>
+
+#ifdef CONFIG_DEBUG_MINI_BOOT
+#define IKOS_MINI_BOOT
+#endif
+
+/* debugging */
+#undef INCAIP2_SW_DUMP
+
+#define INCAIP2_SW_EMSG(fmt,args...) printk("%s: " fmt, __FUNCTION__ , ##args)
+
+#define INCAIP2_SW_CHIP_NO 1
+#define INCAIP2_SW_CHIP_ID 0
+#define INCAIP2_SW_DEVICE_NO 1
+
+#ifdef INCAIP2_SW_DEBUG_MSG
+#define INCAIP2_SW_DMSG(fmt,args...) printk("%s: " fmt, __FUNCTION__ , ##args)
+#else
+#define INCAIP2_SW_DMSG(fmt,args...)
+#endif
+
+/************************** Module Parameters *****************************/
+static char *mode = "bridge";
+module_param(mode, charp, 0000);
+MODULE_PARM_DESC(mode, "<description>");
+
+#ifdef HAVE_TX_TIMEOUT
+static int timeout = 10*HZ;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Transmission watchdog timeout in seconds>");
+#endif
+
+#ifdef IKOS_MINI_BOOT
+#ifdef CONFIG_INCAIP2
+extern s32 incaip2_sw_to_mbx(struct sk_buff* skb);
+#endif
+extern s32 svip_sw_to_mbx(struct sk_buff* skb);
+#endif
+
+struct svip_mii_priv {
+ struct net_device_stats stats;
+ struct dma_device_info *dma_device;
+ struct sk_buff *skb;
+};
+
+static struct net_device *svip_mii0_dev;
+static unsigned char mac_addr[MAX_ADDR_LEN];
+static unsigned char my_ethaddr[MAX_ADDR_LEN];
+
+/**
+ * Initialize MAC address.
+ * This function copies the ethernet address from kernel command line.
+ *
+ * \param line Pointer to parameter
+ * \return 0 OK
+ * \ingroup Internal
+ */
+static int __init svip_eth_ethaddr_setup(char *line)
+{
+ char *ep;
+ int i;
+
+ memset(my_ethaddr, 0, MAX_ADDR_LEN);
+ /* there should really be routines to do this stuff */
+ for (i = 0; i < 6; i++)
+ {
+ my_ethaddr[i] = line ? simple_strtoul(line, &ep, 16) : 0;
+ if (line)
+ line = (*ep) ? ep+1 : ep;
+ }
+ INCAIP2_SW_DMSG("mac address %2x-%2x-%2x-%2x-%2x-%2x \n"
+ ,my_ethaddr[0]
+ ,my_ethaddr[1]
+ ,my_ethaddr[2]
+ ,my_ethaddr[3]
+ ,my_ethaddr[4]
+ ,my_ethaddr[5]);
+ return 0;
+}
+__setup("ethaddr=", svip_eth_ethaddr_setup);
+
+
+/**
+ * Open RX DMA channels.
+ * This function opens all DMA rx channels.
+ *
+ * \param dma_dev pointer to DMA device information
+ * \ingroup Internal
+ */
+static void svip_eth_open_rx_dma(struct dma_device_info *dma_dev)
+{
+ int i;
+
+ for(i=0; i<dma_dev->num_rx_chan; i++)
+ {
+ dma_dev->rx_chan[i]->open(dma_dev->rx_chan[i]);
+ }
+}
+
+
+/**
+ * Open TX DMA channels.
+ * This function opens all DMA tx channels.
+ *
+ * \param dev pointer to net device structure that comprises
+ * DMA device information pointed to by it's priv field.
+ * \ingroup Internal
+ */
+static void svip_eth_open_tx_dma(struct dma_device_info *dma_dev)
+{
+ int i;
+
+ for (i=0; i<dma_dev->num_tx_chan; i++)
+ {
+ dma_dev->tx_chan[i]->open(dma_dev->tx_chan[i]);
+ }
+}
+
+
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+/**
+ * Enable receiving DMA.
+ * This function enables the receiving DMA channel.
+ *
+ * \param dev pointer to net device structure that comprises
+ * DMA device information pointed to by it's priv field.
+ * \ingroup Internal
+ */
+void svip_eth_xon(struct net_device *dev)
+{
+ struct switch_priv *sw_dev = (struct switch_priv *)dev->priv;
+ struct dma_device_info* dma_dev =
+ (struct dma_device_info *)sw_dev->dma_device;
+ unsigned long flag;
+
+ local_irq_save(flag);
+
+ INCAIP2_SW_DMSG("wakeup\n");
+ svip_eth_open_rx_dma(dma_dev);
+
+ local_irq_restore(flag);
+}
+#endif /* CONFIG_NET_HW_FLOWCONTROL */
+
+
+/**
+ * Open network device.
+ * This functions opens the network device and starts the interface queue.
+ *
+ * \param dev Device structure for Ethernet device
+ * \return 0 OK, device opened
+ * \return -1 Error, registering DMA device
+ * \ingroup API
+ */
+int svip_mii_open(struct net_device *dev)
+{
+ struct svip_mii_priv *priv = netdev_priv(dev);
+ struct dma_device_info *dma_dev = priv->dma_device;
+
+ svip_eth_open_rx_dma(dma_dev);
+ svip_eth_open_tx_dma(dma_dev);
+
+ netif_start_queue(dev);
+ return 0;
+}
+
+
+/**
+ * Close network device.
+ * This functions closes the network device, which will also stop the interface
+ * queue.
+ *
+ * \param dev Device structure for Ethernet device
+ * \return 0 OK, device closed (cannot fail)
+ * \ingroup API
+ */
+int svip_mii_release(struct net_device *dev)
+{
+ struct svip_mii_priv *priv = netdev_priv(dev);
+ struct dma_device_info *dma_dev = priv->dma_device;
+ int i;
+
+ for (i = 0; i < dma_dev->max_rx_chan_num; i++)
+ dma_dev->rx_chan[i]->close(dma_dev->rx_chan[i]);
+ netif_stop_queue(dev);
+ return 0;
+}
+
+
+/**
+ * Read data from DMA device.
+ * This function reads data from the DMA device. The function is called by
+ * the switch/DMA pseudo interrupt handler dma_intr_handler on occurence of
+ * a DMA receive interrupt.
+ *
+ * \param dev Pointer to network device structure
+ * \param dma_dev Pointer to dma device structure
+ * \return OK In case of successful data reception from dma
+ * -EIO Incorrect opt pointer provided by device
+ * \ingroup Internal
+ */
+int svip_mii_hw_receive(struct net_device *dev, struct dma_device_info *dma_dev)
+{
+ struct svip_mii_priv *priv = netdev_priv(dev);
+ unsigned char *buf = NULL;
+ struct sk_buff *skb = NULL;
+ int len = 0;
+
+ len = dma_device_read(dma_dev, &buf, (void **)&skb);
+
+ if (len >= ETHERNET_PACKET_DMA_BUFFER_SIZE) {
+ printk(KERN_INFO DRV_NAME ": packet too large %d\n", len);
+ goto mii_hw_receive_err_exit;
+ }
+
+ if (skb == NULL) {
+ printk(KERN_INFO DRV_NAME ": cannot restore pointer\n");
+ goto mii_hw_receive_err_exit;
+ }
+
+ if (len > (skb->end - skb->tail)) {
+ printk(KERN_INFO DRV_NAME ": BUG, len:%d end:%p tail:%p\n",
+ len, skb->end, skb->tail);
+ goto mii_hw_receive_err_exit;
+ }
+
+ skb_put(skb, len);
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += len;
+ return 0;
+
+mii_hw_receive_err_exit:
+ if (len == 0) {
+ if (skb)
+ dev_kfree_skb_any(skb);
+ priv->stats.rx_errors++;
+ priv->stats.rx_dropped++;
+ return -EIO;
+ } else {
+ return len;
+ }
+}
+
+
+/**
+ * Write data to Ethernet switch.
+ * This function writes the data comprised in skb structure via DMA to the
+ * Ethernet Switch. It is installed as the switch driver's hard_start_xmit
+ * method.
+ *
+ * \param skb Pointer to socket buffer structure that contains the data
+ * to be sent
+ * \param dev Pointer to network device structure which is used for
+ * data transmission
+ * \return 1 Transmission error
+ * \return 0 OK, successful data transmission
+ * \ingroup API
+ */
+static int svip_mii_hw_tx(char *buf, int len, struct net_device *dev)
+{
+ int ret = 0;
+ struct svip_mii_priv *priv = netdev_priv(dev);
+ struct dma_device_info *dma_dev = priv->dma_device;
+ ret = dma_device_write(dma_dev, buf, len, priv->skb);
+ return ret;
+}
+
+static int svip_mii_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ int len;
+ char *data;
+ struct svip_mii_priv *priv = netdev_priv(dev);
+ struct dma_device_info *dma_dev = priv->dma_device;
+
+ len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+ data = skb->data;
+ priv->skb = skb;
+ dev->trans_start = jiffies;
+ /* TODO: we got more than 1 dma channel,
+ so we should do something intelligent here to select one */
+ dma_dev->current_tx_chan = 0;
+
+ wmb();
+
+ if (svip_mii_hw_tx(data, len, dev) != len) {
+ dev_kfree_skb_any(skb);
+ priv->stats.tx_errors++;
+ priv->stats.tx_dropped++;
+ } else {
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += len;
+ }
+
+ return 0;
+}
+
+
+/**
+ * Transmission timeout callback.
+ * This functions is called when a trasmission timeout occurs. It will wake up
+ * the interface queue again.
+ *
+ * \param dev Device structure for Ethernet device
+ * \ingroup API
+ */
+void svip_mii_tx_timeout(struct net_device *dev)
+{
+ int i;
+ struct svip_mii_priv *priv = netdev_priv(dev);
+
+ priv->stats.tx_errors++;
+ for (i = 0; i < priv->dma_device->max_tx_chan_num; i++)
+ priv->dma_device->tx_chan[i]->disable_irq(priv->dma_device->tx_chan[i]);
+ netif_wake_queue(dev);
+ return;
+}
+
+
+/**
+ * Get device statistics.
+ * This functions returns the device statistics, stored in the device structure.
+ *
+ * \param dev Device structure for Ethernet device
+ * \return stats Pointer to statistics structure
+ * \ingroup API
+ */
+static struct net_device_stats *svip_get_stats(struct net_device *dev)
+{
+ struct svip_mii_priv *priv = netdev_priv(dev);
+ return &priv->stats;
+}
+
+
+/**
+ * Pseudo Interrupt handler for DMA.
+ * This function processes DMA interrupts notified to the switch device driver.
+ * The function is installed at the DMA core as interrupt handler for the
+ * switch dma device.
+ * It handles the following DMA interrupts:
+ * passes received data to the upper layer in case of rx interrupt,
+ * In case of a dma receive interrupt the received data is passed to the upper layer.
+ * In case of a transmit buffer full interrupt the transmit queue is stopped.
+ * In case of a transmission complete interrupt the transmit queue is restarted.
+ *
+ * \param dma_dev pointer to dma device structure
+ * \param status type of interrupt being notified (RCV_INT: dma receive
+ * interrupt, TX_BUF_FULL_INT: transmit buffer full interrupt,
+ * TRANSMIT_CPT_INT: transmission complete interrupt)
+ * \return OK In case of successful data reception from dma
+ * \ingroup Internal
+ */
+int dma_intr_handler(struct dma_device_info *dma_dev, int status)
+{
+ int i;
+
+ switch (status) {
+ case RCV_INT:
+ svip_mii_hw_receive(svip_mii0_dev, dma_dev);
+ break;
+
+ case TX_BUF_FULL_INT:
+ printk(KERN_INFO DRV_NAME ": tx buffer full\n");
+ netif_stop_queue(svip_mii0_dev);
+ for (i = 0; i < dma_dev->max_tx_chan_num; i++) {
+ if ((dma_dev->tx_chan[i])->control == LTQ_DMA_CH_ON)
+ dma_dev->tx_chan[i]->enable_irq(dma_dev->tx_chan[i]);
+ }
+ break;
+
+ case TRANSMIT_CPT_INT:
+
+#if 0
+ for (i = 0; i < dma_dev->max_tx_chan_num; i++)
+#if 0
+ dma_dev->tx_chan[i]->disable_irq(dma_dev->tx_chan[i]);
+#else
+ dma_dev->tx_chan[i]->disable_irq(dma_dev->tx_chan[i], (char *)__FUNCTION__);
+#endif
+ netif_wake_queue(svip_mii0_dev);
+#endif
+ break;
+ }
+
+ return 0;
+}
+
+
+/**
+ * Allocates buffer sufficient for Ethernet Frame.
+ * This function is installed as DMA callback function to be called on DMA
+ * receive interrupt.
+ *
+ * \param len Unused
+ * \param *byte_offset Pointer to byte offset
+ * \param **opt pointer to skb structure
+ * \return NULL In case of buffer allocation fails
+ * buffer Pointer to allocated memory
+ * \ingroup Internal
+ */
+unsigned char *svip_etop_dma_buffer_alloc(int len, int *byte_offset, void **opt)
+{
+ unsigned char *buffer = NULL;
+ struct sk_buff *skb = NULL;
+
+ skb = dev_alloc_skb(ETHERNET_PACKET_DMA_BUFFER_SIZE);
+ if (skb == NULL)
+ return NULL;
+
+ buffer = (unsigned char *)(skb->data);
+ skb_reserve(skb, 2);
+ *(int *)opt = (int)skb;
+ *byte_offset = 2;
+
+ return buffer;
+}
+
+
+/**
+ * Free DMA buffer.
+ * This function frees a buffer, which can be either a data buffer or an
+ * skb structure.
+ *
+ * \param *dataptr Pointer to data buffer
+ * \param *opt Pointer to skb structure
+ * \return 0 OK
+ * \ingroup Internal
+ */
+void svip_etop_dma_buffer_free(unsigned char *dataptr, void *opt)
+{
+ struct sk_buff *skb = NULL;
+
+ if (opt == NULL) {
+ kfree(dataptr);
+ } else {
+ skb = (struct sk_buff *)opt;
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static int svip_mii_dev_init(struct net_device *dev);
+
+static const struct net_device_ops svip_eth_netdev_ops = {
+ .ndo_init = svip_mii_dev_init,
+ .ndo_open = svip_mii_open,
+ .ndo_stop = svip_mii_release,
+ .ndo_start_xmit = svip_mii_tx,
+ .ndo_get_stats = svip_get_stats,
+ .ndo_tx_timeout = svip_mii_tx_timeout,
+};
+
+//#include <linux/device.h>
+
+/**
+ * Initialize switch driver.
+ * This functions initializes the switch driver structures and registers the
+ * Ethernet device.
+ *
+ * \param dev Device structure for Ethernet device
+ * \return 0 OK
+ * \return ENOMEM No memory for structures available
+ * \return -1 Error during DMA init or Ethernet address configuration.
+ * \ingroup API
+ */
+static int svip_mii_dev_init(struct net_device *dev)
+{
+ int i;
+ struct svip_mii_priv *priv = netdev_priv(dev);
+
+
+ ether_setup(dev);
+ printk(KERN_INFO DRV_NAME ": %s is up\n", dev->name);
+ dev->watchdog_timeo = 10 * HZ;
+ memset(priv, 0, sizeof(*priv));
+ priv->dma_device = dma_device_reserve("SW");
+ if (!priv->dma_device) {
+ BUG();
+ return -ENODEV;
+ }
+ priv->dma_device->buffer_alloc = svip_etop_dma_buffer_alloc;
+ priv->dma_device->buffer_free = svip_etop_dma_buffer_free;
+ priv->dma_device->intr_handler = dma_intr_handler;
+
+ for (i = 0; i < priv->dma_device->max_rx_chan_num; i++)
+ priv->dma_device->rx_chan[i]->packet_size =
+ ETHERNET_PACKET_DMA_BUFFER_SIZE;
+
+ for (i = 0; i < priv->dma_device->max_tx_chan_num; i++) {
+ priv->dma_device->tx_chan[i]->tx_weight=DEFAULT_SW_CHANNEL_WEIGHT;
+ priv->dma_device->tx_chan[i]->packet_size =
+ ETHERNET_PACKET_DMA_BUFFER_SIZE;
+ }
+
+ dma_device_register(priv->dma_device);
+
+ printk(KERN_INFO DRV_NAME ": using mac=");
+
+ for (i = 0; i < 6; i++) {
+ dev->dev_addr[i] = mac_addr[i];
+ printk("%02X%c", dev->dev_addr[i], (i == 5) ? ('\n') : (':'));
+ }
+
+ return 0;
+}
+
+static void svip_mii_chip_init(int mode)
+{
+}
+
+static int svip_mii_probe(struct platform_device *dev)
+{
+ int result = 0;
+ unsigned char *mac = (unsigned char *)dev->dev.platform_data;
+ svip_mii0_dev = alloc_etherdev(sizeof(struct svip_mii_priv));
+ svip_mii0_dev->netdev_ops = &svip_eth_netdev_ops;
+ memcpy(mac_addr, mac, 6);
+ strcpy(svip_mii0_dev->name, "eth%d");
+ svip_mii_chip_init(REV_MII_MODE);
+ result = register_netdev(svip_mii0_dev);
+ if (result) {
+ printk(KERN_INFO DRV_NAME
+ ": error %i registering device \"%s\"\n",
+ result, svip_mii0_dev->name);
+ goto out;
+ }
+ printk(KERN_INFO DRV_NAME ": driver loaded!\n");
+
+out:
+ return result;
+}
+
+static int svip_mii_remove(struct platform_device *dev)
+{
+ struct svip_mii_priv *priv = netdev_priv(svip_mii0_dev);
+
+ printk(KERN_INFO DRV_NAME ": cleanup\n");
+
+ dma_device_unregister(priv->dma_device);
+ dma_device_release(priv->dma_device);
+ kfree(priv->dma_device);
+ unregister_netdev(svip_mii0_dev);
+ free_netdev(svip_mii0_dev);
+ return 0;
+}
+
+
+static struct platform_driver svip_mii_driver = {
+ .probe = svip_mii_probe,
+ .remove = svip_mii_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+
+/**
+ * Initialize switch driver as module.
+ * This functions initializes the switch driver structures and registers the
+ * Ethernet device for module usage.
+ *
+ * \return 0 OK
+ * \return ENODEV An error occured during initialization
+ * \ingroup API
+ */
+int __init svip_mii_init(void)
+{
+ int ret = platform_driver_register(&svip_mii_driver);
+ if (ret)
+ printk(KERN_INFO DRV_NAME
+ ": Error registering platfom driver!\n");
+ return ret;
+}
+
+
+/**
+ * Remove driver module.
+ * This functions removes the driver and unregisters all devices.
+ *
+ * \ingroup API
+ */
+static void __exit svip_mii_cleanup(void)
+{
+ platform_driver_unregister(&svip_mii_driver);
+}
+
+module_init(svip_mii_init);
+module_exit(svip_mii_cleanup);
+
+MODULE_LICENSE("GPL");
diff --git a/target/linux/lantiq/files-3.3/drivers/net/ethernet/svip_virtual_eth.c b/target/linux/lantiq/files-3.3/drivers/net/ethernet/svip_virtual_eth.c
new file mode 100644
index 0000000000..6de0cfab8b
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/net/ethernet/svip_virtual_eth.c
@@ -0,0 +1,346 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ Am Campeon 1-12; 81726 Munich, Germany
+
+ THE DELIVERY OF THIS SOFTWARE AS WELL AS THE HEREBY GRANTED NON-EXCLUSIVE,
+ WORLDWIDE LICENSE TO USE, COPY, MODIFY, DISTRIBUTE AND SUBLICENSE THIS
+ SOFTWARE IS FREE OF CHARGE.
+
+ THE LICENSED SOFTWARE IS PROVIDED "AS IS" AND INFINEON EXPRESSLY DISCLAIMS
+ ALL REPRESENTATIONS AND WARRANTIES, WHETHER EXPRESS OR IMPLIED, INCLUDING
+ WITHOUT LIMITATION, WARRANTIES OR REPRESENTATIONS OF WORKMANSHIP,
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, DURABILITY, THAT THE
+ OPERATING OF THE LICENSED SOFTWARE WILL BE ERROR FREE OR FREE OF ANY THIRD
+ PARTY CLAIMS, INCLUDING WITHOUT LIMITATION CLAIMS OF THIRD PARTY INTELLECTUAL
+ PROPERTY INFRINGEMENT.
+
+ EXCEPT FOR ANY LIABILITY DUE TO WILFUL ACTS OR GROSS NEGLIGENCE AND EXCEPT
+ FOR ANY PERSONAL INJURY INFINEON SHALL IN NO EVENT BE LIABLE FOR ANY CLAIM
+ OR DAMAGES OF ANY KIND, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ****************************************************************************
+Module : svip_virtual_eth.c
+
+Description : This file contains network driver implementation for a
+Virtual Ethernet interface. The Virtual Ethernet interface
+is part of Infineon's VINETIC-SVIP Linux BSP.
+ *******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+
+#define SVIP_VETH_VER_STR "3.0"
+#define SVIP_VETH_INFO_STR \
+ "@(#)SVIP virtual ethernet interface, version " SVIP_VETH_VER_STR
+
+/******************************************************************************
+ * Local define/macro definitions
+ ******************************************************************************/
+struct svip_ve_priv
+{
+ struct net_device_stats stats;
+};
+
+/******************************************************************************
+ * Global function declarations
+ ******************************************************************************/
+int svip_ve_rx(struct sk_buff *skb);
+
+/******************************************************************************
+ * Local variable declarations
+ ******************************************************************************/
+static struct net_device *svip_ve_dev;
+static int watchdog_timeout = 10*HZ;
+static int (*svip_ve_mps_xmit)(struct sk_buff *skb) = NULL;
+
+
+/******************************************************************************
+ * Global function declarations
+ ******************************************************************************/
+
+/**
+ * Called by MPS driver to register a transmit routine called for each outgoing
+ * VoFW0 message.
+ *
+ * \param mps_xmit pointer to transmit routine
+ *
+ * \return none
+ *
+ * \ingroup Internal
+ */
+void register_mps_xmit_routine(int (*mps_xmit)(struct sk_buff *skb))
+{
+ svip_ve_mps_xmit = mps_xmit;
+}
+EXPORT_SYMBOL(register_mps_xmit_routine);
+
+/**
+ * Returns a pointer to the routine used to deliver an incoming packet/message
+ * from the MPS mailbox to the networking layer. This routine is called by MPS
+ * driver during initialisation time.
+ *
+ * \param skb pointer to incoming socket buffer
+ *
+ * \return svip_ve_rx pointer to incoming messages delivering routine
+ *
+ * \ingroup Internal
+ */
+int (*register_mps_recv_routine(void)) (struct sk_buff *skb)
+{
+ return svip_ve_rx;
+}
+
+/**
+ * Used to deliver outgoing packets to VoFW0 module through the MPS driver.
+ * Upon loading/initialisation the MPS driver is registering a transmitting
+ * routine, which is called here to deliver the packet to the VoFW0 module.
+ *
+ * \param skb pointer to skb containing outgoing data
+ * \param dev pointer to this networking device's data
+ *
+ * \return 0 on success
+ * \return non-zero on error
+ *
+ * \ingroup Internal
+ */
+static int svip_ve_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int err;
+ struct svip_ve_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &priv->stats;
+
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+
+ if (svip_ve_mps_xmit)
+ {
+ err = svip_ve_mps_xmit(skb);
+ if (err)
+ stats->tx_errors++;
+ dev->trans_start = jiffies;
+ return err;
+ }
+ else
+ printk(KERN_ERR "%s: MPS driver not registered, outgoing packet not delivered\n", dev->name);
+
+ dev_kfree_skb(skb);
+
+ return -1;
+}
+
+/**
+ * Called by MPS driver upon receipt of a new message from VoFW0 module in
+ * the data inbox. The packet is pushed up the IP module for further processing.
+ *
+ * \param skb pointer to skb containing the incoming message
+ *
+ * \return 0 on success
+ * \return non-zero on error
+ *
+ * \ingroup Internal
+ */
+int svip_ve_rx(struct sk_buff *skb)
+{
+ int err;
+ struct svip_ve_priv *priv = netdev_priv(svip_ve_dev);
+ struct net_device_stats *stats = &priv->stats;
+
+ skb->dev = svip_ve_dev;
+ skb->protocol = eth_type_trans(skb, svip_ve_dev);
+
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+
+ err = netif_rx(skb);
+ switch (err)
+ {
+ case NET_RX_SUCCESS:
+ return 0;
+ break;
+ case NET_RX_DROP:
+ default:
+ stats->rx_dropped++;
+ break;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(svip_ve_rx);
+
+/**
+ * Returns a pointer to the device's networking statistics data
+ *
+ * \param dev pointer to this networking device's data
+ *
+ * \return stats pointer to this network device's statistics data
+ *
+ * \ingroup Internal
+ */
+static struct net_device_stats *svip_ve_get_stats(struct net_device *dev)
+{
+ struct svip_ve_priv *priv = netdev_priv(dev);
+
+ return &priv->stats;
+}
+
+static void svip_ve_tx_timeout(struct net_device *dev)
+{
+ struct svip_ve_priv *priv = netdev_priv(dev);
+
+ priv->stats.tx_errors++;
+ netif_wake_queue(dev);
+}
+
+/**
+ * Device open routine. Called e.g. upon setting of an IP address using,
+ * 'ifconfig veth0 YYY.YYY.YYY.YYY netmask ZZZ.ZZZ.ZZZ.ZZZ' or
+ * 'ifconfig veth0 up'
+ *
+ * \param dev pointer to this network device's data
+ *
+ * \return 0 on success
+ * \return non-zero on error
+ *
+ * \ingroup Internal
+ */
+int svip_ve_open(struct net_device *dev)
+{
+ netif_start_queue(dev);
+ return 0;
+}
+
+/**
+ * Device close routine. Called e.g. upon calling
+ * 'ifconfig veth0 down'
+ *
+ * \param dev pointer to this network device's data
+ *
+ * \return 0 on success
+ * \return non-zero on error
+ *
+ * \ingroup Internal
+ */
+
+int svip_ve_release(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int svip_ve_dev_init(struct net_device *dev);
+
+static const struct net_device_ops svip_virtual_eth_netdev_ops = {
+ .ndo_init = svip_ve_dev_init,
+ .ndo_open = svip_ve_open,
+ .ndo_stop = svip_ve_release,
+ .ndo_start_xmit = svip_ve_xmit,
+ .ndo_get_stats = svip_ve_get_stats,
+ .ndo_tx_timeout = svip_ve_tx_timeout,
+};
+
+
+/**
+ * Device initialisation routine which registers device interface routines.
+ * It is called upon execution of 'register_netdev' routine.
+ *
+ * \param dev pointer to this network device's data
+ *
+ * \return 0 on success
+ * \return non-zero on error
+ *
+ * \ingroup Internal
+ */
+static int svip_ve_dev_init(struct net_device *dev)
+{
+ ether_setup(dev); /* assign some of the fields */
+
+ dev->watchdog_timeo = watchdog_timeout;
+ memset(netdev_priv(dev), 0, sizeof(struct svip_ve_priv));
+ dev->flags |= IFF_NOARP|IFF_PROMISC;
+ dev->flags &= ~IFF_MULTICAST;
+
+ /* dedicated MAC address to veth0, 00:03:19:00:15:80 */
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x03;
+ dev->dev_addr[2] = 0x19;
+ dev->dev_addr[3] = 0x00;
+ dev->dev_addr[4] = 0x15;
+ dev->dev_addr[5] = 0x80;
+
+ return 0;
+}
+
+static int svip_ve_probe(struct platform_device *dev)
+{
+ int result = 0;
+
+ svip_ve_dev = alloc_etherdev(sizeof(struct svip_ve_priv));
+ svip_ve_dev->netdev_ops = &svip_virtual_eth_netdev_ops;
+
+ strcpy(svip_ve_dev->name, "veth%d");
+
+ result = register_netdev(svip_ve_dev);
+ if (result)
+ {
+ printk(KERN_INFO "error %i registering device \"%s\"\n", result, svip_ve_dev->name);
+ goto out;
+ }
+
+ printk (KERN_INFO "%s, (c) 2009, Lantiq Deutschland GmbH\n", &SVIP_VETH_INFO_STR[4]);
+
+out:
+ return result;
+}
+
+static int svip_ve_remove(struct platform_device *dev)
+{
+ unregister_netdev(svip_ve_dev);
+ free_netdev(svip_ve_dev);
+
+ printk(KERN_INFO "%s removed\n", svip_ve_dev->name);
+ return 0;
+}
+
+static struct platform_driver svip_ve_driver = {
+ .probe = svip_ve_probe,
+ .remove = svip_ve_remove,
+ .driver = {
+ .name = "ifxmips_svip_ve",
+ .owner = THIS_MODULE,
+ },
+};
+
+/**
+ * Module/driver entry routine
+ */
+static int __init svip_ve_init_module(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&svip_ve_driver);
+ if (ret)
+ printk(KERN_INFO "SVIP: error(%d) registering virtual Ethernet driver!\n", ret);
+ return ret;
+}
+
+/**
+ * Module exit routine (never called for statically linked driver)
+ */
+static void __exit svip_ve_cleanup_module(void)
+{
+ platform_driver_unregister(&svip_ve_driver);
+}
+
+module_init(svip_ve_init_module);
+module_exit(svip_ve_cleanup_module);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("virtual ethernet driver for LANTIQ SVIP system");
+
+EXPORT_SYMBOL(register_mps_recv_routine);
diff --git a/target/linux/lantiq/files-3.3/drivers/spi/spi-falcon.c b/target/linux/lantiq/files-3.3/drivers/spi/spi-falcon.c
new file mode 100644
index 0000000000..447bbaaa34
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/spi/spi-falcon.c
@@ -0,0 +1,483 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+
+#include <lantiq_soc.h>
+
+#define DRV_NAME "falcon_spi"
+
+#define FALCON_SPI_XFER_BEGIN (1 << 0)
+#define FALCON_SPI_XFER_END (1 << 1)
+
+/* Bus Read Configuration Register0 */
+#define LTQ_BUSRCON0 0x00000010
+/* Bus Write Configuration Register0 */
+#define LTQ_BUSWCON0 0x00000018
+/* Serial Flash Configuration Register */
+#define LTQ_SFCON 0x00000080
+/* Serial Flash Time Register */
+#define LTQ_SFTIME 0x00000084
+/* Serial Flash Status Register */
+#define LTQ_SFSTAT 0x00000088
+/* Serial Flash Command Register */
+#define LTQ_SFCMD 0x0000008C
+/* Serial Flash Address Register */
+#define LTQ_SFADDR 0x00000090
+/* Serial Flash Data Register */
+#define LTQ_SFDATA 0x00000094
+/* Serial Flash I/O Control Register */
+#define LTQ_SFIO 0x00000098
+/* EBU Clock Control Register */
+#define LTQ_EBUCC 0x000000C4
+
+/* Dummy Phase Length */
+#define SFCMD_DUMLEN_OFFSET 16
+#define SFCMD_DUMLEN_MASK 0x000F0000
+/* Chip Select */
+#define SFCMD_CS_OFFSET 24
+#define SFCMD_CS_MASK 0x07000000
+/* field offset */
+#define SFCMD_ALEN_OFFSET 20
+#define SFCMD_ALEN_MASK 0x00700000
+/* SCK Rise-edge Position */
+#define SFTIME_SCKR_POS_OFFSET 8
+#define SFTIME_SCKR_POS_MASK 0x00000F00
+/* SCK Period */
+#define SFTIME_SCK_PER_OFFSET 0
+#define SFTIME_SCK_PER_MASK 0x0000000F
+/* SCK Fall-edge Position */
+#define SFTIME_SCKF_POS_OFFSET 12
+#define SFTIME_SCKF_POS_MASK 0x0000F000
+/* Device Size */
+#define SFCON_DEV_SIZE_A23_0 0x03000000
+#define SFCON_DEV_SIZE_MASK 0x0F000000
+/* Read Data Position */
+#define SFTIME_RD_POS_MASK 0x000F0000
+/* Data Output */
+#define SFIO_UNUSED_WD_MASK 0x0000000F
+/* Command Opcode mask */
+#define SFCMD_OPC_MASK 0x000000FF
+/* dlen bytes of data to write */
+#define SFCMD_DIR_WRITE 0x00000100
+/* Data Length offset */
+#define SFCMD_DLEN_OFFSET 9
+/* Command Error */
+#define SFSTAT_CMD_ERR 0x20000000
+/* Access Command Pending */
+#define SFSTAT_CMD_PEND 0x00400000
+/* Frequency set to 100MHz. */
+#define EBUCC_EBUDIV_SELF100 0x00000001
+/* Serial Flash */
+#define BUSRCON0_AGEN_SERIAL_FLASH 0xF0000000
+/* 8-bit multiplexed */
+#define BUSRCON0_PORTW_8_BIT_MUX 0x00000000
+/* Serial Flash */
+#define BUSWCON0_AGEN_SERIAL_FLASH 0xF0000000
+/* Chip Select after opcode */
+#define SFCMD_KEEP_CS_KEEP_SELECTED 0x00008000
+
+struct falcon_spi {
+ u32 sfcmd; /* for caching of opcode, direction, ... */
+ struct spi_master *master;
+};
+
+int
+falcon_spi_xfer(struct spi_device *spi,
+ struct spi_transfer *t,
+ unsigned long flags)
+{
+ struct device *dev = &spi->dev;
+ struct falcon_spi *priv = spi_master_get_devdata(spi->master);
+ const u8 *txp = t->tx_buf;
+ u8 *rxp = t->rx_buf;
+ unsigned int bytelen = ((8 * t->len + 7) / 8);
+ unsigned int len, alen, dumlen;
+ u32 val;
+ enum {
+ state_init,
+ state_command_prepare,
+ state_write,
+ state_read,
+ state_disable_cs,
+ state_end
+ } state = state_init;
+
+ do {
+ switch (state) {
+ case state_init: /* detect phase of upper layer sequence */
+ {
+ /* initial write ? */
+ if (flags & FALCON_SPI_XFER_BEGIN) {
+ if (!txp) {
+ dev_err(dev,
+ "BEGIN without tx data!\n");
+ return -1;
+ }
+ /*
+ * Prepare the parts of the sfcmd register,
+ * which should not
+ * change during a sequence!
+ * Only exception are the length fields,
+ * especially alen and dumlen.
+ */
+
+ priv->sfcmd = ((spi->chip_select
+ << SFCMD_CS_OFFSET)
+ & SFCMD_CS_MASK);
+ priv->sfcmd |= SFCMD_KEEP_CS_KEEP_SELECTED;
+ priv->sfcmd |= *txp;
+ txp++;
+ bytelen--;
+ if (bytelen) {
+ /*
+ * more data:
+ * maybe address and/or dummy
+ */
+ state = state_command_prepare;
+ break;
+ } else {
+ dev_dbg(dev, "write cmd %02X\n",
+ priv->sfcmd & SFCMD_OPC_MASK);
+ }
+ }
+ /* continued write ? */
+ if (txp && bytelen) {
+ state = state_write;
+ break;
+ }
+ /* read data? */
+ if (rxp && bytelen) {
+ state = state_read;
+ break;
+ }
+ /* end of sequence? */
+ if (flags & FALCON_SPI_XFER_END)
+ state = state_disable_cs;
+ else
+ state = state_end;
+ break;
+ }
+ /* collect tx data for address and dummy phase */
+ case state_command_prepare:
+ {
+ /* txp is valid, already checked */
+ val = 0;
+ alen = 0;
+ dumlen = 0;
+ while (bytelen > 0) {
+ if (alen < 3) {
+ val = (val<<8)|(*txp++);
+ alen++;
+ } else if ((dumlen < 15) && (*txp == 0)) {
+ /*
+ * assume dummy bytes are set to 0
+ * from upper layer
+ */
+ dumlen++;
+ txp++;
+ } else
+ break;
+ bytelen--;
+ }
+ priv->sfcmd &= ~(SFCMD_ALEN_MASK | SFCMD_DUMLEN_MASK);
+ priv->sfcmd |= (alen << SFCMD_ALEN_OFFSET) |
+ (dumlen << SFCMD_DUMLEN_OFFSET);
+ if (alen > 0)
+ ltq_ebu_w32(val, LTQ_SFADDR);
+
+ dev_dbg(dev, "write cmd %02X, alen=%d "
+ "(addr=%06X) dumlen=%d\n",
+ priv->sfcmd & SFCMD_OPC_MASK,
+ alen, val, dumlen);
+
+ if (bytelen > 0) {
+ /* continue with write */
+ state = state_write;
+ } else if (flags & FALCON_SPI_XFER_END) {
+ /* end of sequence? */
+ state = state_disable_cs;
+ } else {
+ /*
+ * go to end and expect another
+ * call (read or write)
+ */
+ state = state_end;
+ }
+ break;
+ }
+ case state_write:
+ {
+ /* txp still valid */
+ priv->sfcmd |= SFCMD_DIR_WRITE;
+ len = 0;
+ val = 0;
+ do {
+ if (bytelen--)
+ val |= (*txp++) << (8 * len++);
+ if ((flags & FALCON_SPI_XFER_END)
+ && (bytelen == 0)) {
+ priv->sfcmd &=
+ ~SFCMD_KEEP_CS_KEEP_SELECTED;
+ }
+ if ((len == 4) || (bytelen == 0)) {
+ ltq_ebu_w32(val, LTQ_SFDATA);
+ ltq_ebu_w32(priv->sfcmd
+ | (len<<SFCMD_DLEN_OFFSET),
+ LTQ_SFCMD);
+ len = 0;
+ val = 0;
+ priv->sfcmd &= ~(SFCMD_ALEN_MASK
+ | SFCMD_DUMLEN_MASK);
+ }
+ } while (bytelen);
+ state = state_end;
+ break;
+ }
+ case state_read:
+ {
+ /* read data */
+ priv->sfcmd &= ~SFCMD_DIR_WRITE;
+ do {
+ if ((flags & FALCON_SPI_XFER_END)
+ && (bytelen <= 4)) {
+ priv->sfcmd &=
+ ~SFCMD_KEEP_CS_KEEP_SELECTED;
+ }
+ len = (bytelen > 4) ? 4 : bytelen;
+ bytelen -= len;
+ ltq_ebu_w32(priv->sfcmd
+ |(len<<SFCMD_DLEN_OFFSET), LTQ_SFCMD);
+ priv->sfcmd &= ~(SFCMD_ALEN_MASK
+ | SFCMD_DUMLEN_MASK);
+ do {
+ val = ltq_ebu_r32(LTQ_SFSTAT);
+ if (val & SFSTAT_CMD_ERR) {
+ /* reset error status */
+ dev_err(dev, "SFSTAT: CMD_ERR "
+ "(%x)\n", val);
+ ltq_ebu_w32(SFSTAT_CMD_ERR,
+ LTQ_SFSTAT);
+ return -1;
+ }
+ } while (val & SFSTAT_CMD_PEND);
+ val = ltq_ebu_r32(LTQ_SFDATA);
+ do {
+ *rxp = (val & 0xFF);
+ rxp++;
+ val >>= 8;
+ len--;
+ } while (len);
+ } while (bytelen);
+ state = state_end;
+ break;
+ }
+ case state_disable_cs:
+ {
+ priv->sfcmd &= ~SFCMD_KEEP_CS_KEEP_SELECTED;
+ ltq_ebu_w32(priv->sfcmd | (0 << SFCMD_DLEN_OFFSET),
+ LTQ_SFCMD);
+ val = ltq_ebu_r32(LTQ_SFSTAT);
+ if (val & SFSTAT_CMD_ERR) {
+ /* reset error status */
+ dev_err(dev, "SFSTAT: CMD_ERR (%x)\n", val);
+ ltq_ebu_w32(SFSTAT_CMD_ERR, LTQ_SFSTAT);
+ return -1;
+ }
+ state = state_end;
+ break;
+ }
+ case state_end:
+ break;
+ }
+ } while (state != state_end);
+
+ return 0;
+}
+
+static int
+falcon_spi_setup(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ const u32 ebuclk = 100000000;
+ unsigned int i;
+ unsigned long flags;
+
+ dev_dbg(dev, "setup\n");
+
+ if (spi->master->bus_num > 0 || spi->chip_select > 0)
+ return -ENODEV;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+
+ if (ebuclk < spi->max_speed_hz) {
+ /* set EBU clock to 100 MHz */
+ ltq_sys1_w32_mask(0, EBUCC_EBUDIV_SELF100, LTQ_EBUCC);
+ i = 1; /* divider */
+ } else {
+ /* set EBU clock to 50 MHz */
+ ltq_sys1_w32_mask(EBUCC_EBUDIV_SELF100, 0, LTQ_EBUCC);
+
+ /* search for suitable divider */
+ for (i = 1; i < 7; i++) {
+ if (ebuclk / i <= spi->max_speed_hz)
+ break;
+ }
+ }
+
+ /* setup period of serial clock */
+ ltq_ebu_w32_mask(SFTIME_SCKF_POS_MASK
+ | SFTIME_SCKR_POS_MASK
+ | SFTIME_SCK_PER_MASK,
+ (i << SFTIME_SCKR_POS_OFFSET)
+ | (i << (SFTIME_SCK_PER_OFFSET + 1)),
+ LTQ_SFTIME);
+
+ /*
+ * set some bits of unused_wd, to not trigger HOLD/WP
+ * signals on non QUAD flashes
+ */
+ ltq_ebu_w32((SFIO_UNUSED_WD_MASK & (0x8 | 0x4)), LTQ_SFIO);
+
+ ltq_ebu_w32(BUSRCON0_AGEN_SERIAL_FLASH | BUSRCON0_PORTW_8_BIT_MUX,
+ LTQ_BUSRCON0);
+ ltq_ebu_w32(BUSWCON0_AGEN_SERIAL_FLASH, LTQ_BUSWCON0);
+ /* set address wrap around to maximum for 24-bit addresses */
+ ltq_ebu_w32_mask(SFCON_DEV_SIZE_MASK, SFCON_DEV_SIZE_A23_0, LTQ_SFCON);
+
+ spin_unlock_irqrestore(&ebu_lock, flags);
+
+ return 0;
+}
+
+static int
+falcon_spi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct falcon_spi *priv = spi_master_get_devdata(spi->master);
+ struct spi_transfer *t;
+ unsigned long spi_flags;
+ unsigned long flags;
+ int ret = 0;
+
+ priv->sfcmd = 0;
+ m->actual_length = 0;
+
+ spi_flags = FALCON_SPI_XFER_BEGIN;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (list_is_last(&t->transfer_list, &m->transfers))
+ spi_flags |= FALCON_SPI_XFER_END;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ ret = falcon_spi_xfer(spi, t, spi_flags);
+ spin_unlock_irqrestore(&ebu_lock, flags);
+
+ if (ret)
+ break;
+
+ m->actual_length += t->len;
+
+ if (t->delay_usecs || t->cs_change)
+ BUG();
+
+ spi_flags = 0;
+ }
+
+ m->status = ret;
+ m->complete(m->context);
+
+ return 0;
+}
+
+static void
+falcon_spi_cleanup(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+
+ dev_dbg(dev, "cleanup\n");
+}
+
+static int __devinit
+falcon_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct falcon_spi *priv;
+ struct spi_master *master;
+ int ret;
+
+ dev_dbg(dev, "probing\n");
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*priv));
+ if (!master) {
+ dev_err(dev, "no memory for spi_master\n");
+ return -ENOMEM;
+ }
+
+ priv = spi_master_get_devdata(master);
+ priv->master = master;
+
+ master->mode_bits = SPI_MODE_3;
+ master->num_chipselect = 1;
+ master->bus_num = 0;
+
+ master->setup = falcon_spi_setup;
+ master->transfer = falcon_spi_transfer;
+ master->cleanup = falcon_spi_cleanup;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = spi_register_master(master);
+ if (ret)
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int __devexit
+falcon_spi_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct falcon_spi *priv = platform_get_drvdata(pdev);
+
+ dev_dbg(dev, "removed\n");
+
+ spi_unregister_master(priv->master);
+
+ return 0;
+}
+
+static struct platform_driver falcon_spi_driver = {
+ .probe = falcon_spi_probe,
+ .remove = __devexit_p(falcon_spi_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE
+ }
+};
+
+static int __init
+falcon_spi_init(void)
+{
+ return platform_driver_register(&falcon_spi_driver);
+}
+
+static void __exit
+falcon_spi_exit(void)
+{
+ platform_driver_unregister(&falcon_spi_driver);
+}
+
+module_init(falcon_spi_init);
+module_exit(falcon_spi_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Lantiq Falcon SPI controller driver");
diff --git a/target/linux/lantiq/files-3.3/drivers/spi/spi-xway.c b/target/linux/lantiq/files-3.3/drivers/spi/spi-xway.c
new file mode 100644
index 0000000000..be5c25b1fe
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/spi/spi-xway.c
@@ -0,0 +1,1070 @@
+/*
+ * Lantiq SoC SPI controller
+ *
+ * Copyright (C) 2011 Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#include <lantiq_soc.h>
+#include <lantiq_platform.h>
+
+#define LTQ_SPI_CLC 0x00 /* Clock control */
+#define LTQ_SPI_PISEL 0x04 /* Port input select */
+#define LTQ_SPI_ID 0x08 /* Identification */
+#define LTQ_SPI_CON 0x10 /* Control */
+#define LTQ_SPI_STAT 0x14 /* Status */
+#define LTQ_SPI_WHBSTATE 0x18 /* Write HW modified state */
+#define LTQ_SPI_TB 0x20 /* Transmit buffer */
+#define LTQ_SPI_RB 0x24 /* Receive buffer */
+#define LTQ_SPI_RXFCON 0x30 /* Receive FIFO control */
+#define LTQ_SPI_TXFCON 0x34 /* Transmit FIFO control */
+#define LTQ_SPI_FSTAT 0x38 /* FIFO status */
+#define LTQ_SPI_BRT 0x40 /* Baudrate timer */
+#define LTQ_SPI_BRSTAT 0x44 /* Baudrate timer status */
+#define LTQ_SPI_SFCON 0x60 /* Serial frame control */
+#define LTQ_SPI_SFSTAT 0x64 /* Serial frame status */
+#define LTQ_SPI_GPOCON 0x70 /* General purpose output control */
+#define LTQ_SPI_GPOSTAT 0x74 /* General purpose output status */
+#define LTQ_SPI_FGPO 0x78 /* Forced general purpose output */
+#define LTQ_SPI_RXREQ 0x80 /* Receive request */
+#define LTQ_SPI_RXCNT 0x84 /* Receive count */
+#define LTQ_SPI_DMACON 0xEC /* DMA control */
+#define LTQ_SPI_IRNEN 0xF4 /* Interrupt node enable */
+#define LTQ_SPI_IRNICR 0xF8 /* Interrupt node interrupt capture */
+#define LTQ_SPI_IRNCR 0xFC /* Interrupt node control */
+
+#define LTQ_SPI_CLC_SMC_SHIFT 16 /* Clock divider for sleep mode */
+#define LTQ_SPI_CLC_SMC_MASK 0xFF
+#define LTQ_SPI_CLC_RMC_SHIFT 8 /* Clock divider for normal run mode */
+#define LTQ_SPI_CLC_RMC_MASK 0xFF
+#define LTQ_SPI_CLC_DISS BIT(1) /* Disable status bit */
+#define LTQ_SPI_CLC_DISR BIT(0) /* Disable request bit */
+
+#define LTQ_SPI_ID_TXFS_SHIFT 24 /* Implemented TX FIFO size */
+#define LTQ_SPI_ID_TXFS_MASK 0x3F
+#define LTQ_SPI_ID_RXFS_SHIFT 16 /* Implemented RX FIFO size */
+#define LTQ_SPI_ID_RXFS_MASK 0x3F
+#define LTQ_SPI_ID_REV_MASK 0x1F /* Hardware revision number */
+#define LTQ_SPI_ID_CFG BIT(5) /* DMA interface support */
+
+#define LTQ_SPI_CON_BM_SHIFT 16 /* Data width selection */
+#define LTQ_SPI_CON_BM_MASK 0x1F
+#define LTQ_SPI_CON_EM BIT(24) /* Echo mode */
+#define LTQ_SPI_CON_IDLE BIT(23) /* Idle bit value */
+#define LTQ_SPI_CON_ENBV BIT(22) /* Enable byte valid control */
+#define LTQ_SPI_CON_RUEN BIT(12) /* Receive underflow error enable */
+#define LTQ_SPI_CON_TUEN BIT(11) /* Transmit underflow error enable */
+#define LTQ_SPI_CON_AEN BIT(10) /* Abort error enable */
+#define LTQ_SPI_CON_REN BIT(9) /* Receive overflow error enable */
+#define LTQ_SPI_CON_TEN BIT(8) /* Transmit overflow error enable */
+#define LTQ_SPI_CON_LB BIT(7) /* Loopback control */
+#define LTQ_SPI_CON_PO BIT(6) /* Clock polarity control */
+#define LTQ_SPI_CON_PH BIT(5) /* Clock phase control */
+#define LTQ_SPI_CON_HB BIT(4) /* Heading control */
+#define LTQ_SPI_CON_RXOFF BIT(1) /* Switch receiver off */
+#define LTQ_SPI_CON_TXOFF BIT(0) /* Switch transmitter off */
+
+#define LTQ_SPI_STAT_RXBV_MASK 0x7
+#define LTQ_SPI_STAT_RXBV_SHIFT 28
+#define LTQ_SPI_STAT_BSY BIT(13) /* Busy flag */
+#define LTQ_SPI_STAT_RUE BIT(12) /* Receive underflow error flag */
+#define LTQ_SPI_STAT_TUE BIT(11) /* Transmit underflow error flag */
+#define LTQ_SPI_STAT_AE BIT(10) /* Abort error flag */
+#define LTQ_SPI_STAT_RE BIT(9) /* Receive error flag */
+#define LTQ_SPI_STAT_TE BIT(8) /* Transmit error flag */
+#define LTQ_SPI_STAT_MS BIT(1) /* Master/slave select bit */
+#define LTQ_SPI_STAT_EN BIT(0) /* Enable bit */
+
+#define LTQ_SPI_WHBSTATE_SETTUE BIT(15) /* Set transmit underflow error flag */
+#define LTQ_SPI_WHBSTATE_SETAE BIT(14) /* Set abort error flag */
+#define LTQ_SPI_WHBSTATE_SETRE BIT(13) /* Set receive error flag */
+#define LTQ_SPI_WHBSTATE_SETTE BIT(12) /* Set transmit error flag */
+#define LTQ_SPI_WHBSTATE_CLRTUE BIT(11) /* Clear transmit underflow error flag */
+#define LTQ_SPI_WHBSTATE_CLRAE BIT(10) /* Clear abort error flag */
+#define LTQ_SPI_WHBSTATE_CLRRE BIT(9) /* Clear receive error flag */
+#define LTQ_SPI_WHBSTATE_CLRTE BIT(8) /* Clear transmit error flag */
+#define LTQ_SPI_WHBSTATE_SETME BIT(7) /* Set mode error flag */
+#define LTQ_SPI_WHBSTATE_CLRME BIT(6) /* Clear mode error flag */
+#define LTQ_SPI_WHBSTATE_SETRUE BIT(5) /* Set receive underflow error flag */
+#define LTQ_SPI_WHBSTATE_CLRRUE BIT(4) /* Clear receive underflow error flag */
+#define LTQ_SPI_WHBSTATE_SETMS BIT(3) /* Set master select bit */
+#define LTQ_SPI_WHBSTATE_CLRMS BIT(2) /* Clear master select bit */
+#define LTQ_SPI_WHBSTATE_SETEN BIT(1) /* Set enable bit (operational mode) */
+#define LTQ_SPI_WHBSTATE_CLREN BIT(0) /* Clear enable bit (config mode */
+#define LTQ_SPI_WHBSTATE_CLR_ERRORS 0x0F50
+
+#define LTQ_SPI_RXFCON_RXFITL_SHIFT 8 /* FIFO interrupt trigger level */
+#define LTQ_SPI_RXFCON_RXFITL_MASK 0x3F
+#define LTQ_SPI_RXFCON_RXFLU BIT(1) /* FIFO flush */
+#define LTQ_SPI_RXFCON_RXFEN BIT(0) /* FIFO enable */
+
+#define LTQ_SPI_TXFCON_TXFITL_SHIFT 8 /* FIFO interrupt trigger level */
+#define LTQ_SPI_TXFCON_TXFITL_MASK 0x3F
+#define LTQ_SPI_TXFCON_TXFLU BIT(1) /* FIFO flush */
+#define LTQ_SPI_TXFCON_TXFEN BIT(0) /* FIFO enable */
+
+#define LTQ_SPI_FSTAT_RXFFL_MASK 0x3f
+#define LTQ_SPI_FSTAT_RXFFL_SHIFT 0
+#define LTQ_SPI_FSTAT_TXFFL_MASK 0x3f
+#define LTQ_SPI_FSTAT_TXFFL_SHIFT 8
+
+#define LTQ_SPI_GPOCON_ISCSBN_SHIFT 8
+#define LTQ_SPI_GPOCON_INVOUTN_SHIFT 0
+
+#define LTQ_SPI_FGPO_SETOUTN_SHIFT 8
+#define LTQ_SPI_FGPO_CLROUTN_SHIFT 0
+
+#define LTQ_SPI_RXREQ_RXCNT_MASK 0xFFFF /* Receive count value */
+#define LTQ_SPI_RXCNT_TODO_MASK 0xFFFF /* Recevie to-do value */
+
+#define LTQ_SPI_IRNEN_F BIT(3) /* Frame end interrupt request */
+#define LTQ_SPI_IRNEN_E BIT(2) /* Error end interrupt request */
+#define LTQ_SPI_IRNEN_T BIT(1) /* Transmit end interrupt request */
+#define LTQ_SPI_IRNEN_R BIT(0) /* Receive end interrupt request */
+#define LTQ_SPI_IRNEN_ALL 0xF
+
+/* Hard-wired GPIOs used by SPI controller */
+#define LTQ_SPI_GPIO_DI (ltq_is_ase()? 8 : 16)
+#define LTQ_SPI_GPIO_DO (ltq_is_ase()? 9 : 17)
+#define LTQ_SPI_GPIO_CLK (ltq_is_ase()? 10 : 18)
+
+struct ltq_spi {
+ struct spi_bitbang bitbang;
+ struct completion done;
+ spinlock_t lock;
+
+ struct device *dev;
+ void __iomem *base;
+ struct clk *fpiclk;
+ struct clk *spiclk;
+
+ int status;
+ int irq[3];
+
+ const u8 *tx;
+ u8 *rx;
+ u32 tx_cnt;
+ u32 rx_cnt;
+ u32 len;
+ struct spi_transfer *curr_transfer;
+
+ u32 (*get_tx) (struct ltq_spi *);
+
+ u16 txfs;
+ u16 rxfs;
+ unsigned dma_support:1;
+ unsigned cfg_mode:1;
+
+};
+
+struct ltq_spi_controller_state {
+ void (*cs_activate) (struct spi_device *);
+ void (*cs_deactivate) (struct spi_device *);
+};
+
+struct ltq_spi_irq_map {
+ char *name;
+ irq_handler_t handler;
+};
+
+struct ltq_spi_cs_gpio_map {
+ unsigned gpio;
+ unsigned mux;
+};
+
+static inline struct ltq_spi *ltq_spi_to_hw(struct spi_device *spi)
+{
+ return spi_master_get_devdata(spi->master);
+}
+
+static inline u32 ltq_spi_reg_read(struct ltq_spi *hw, u32 reg)
+{
+ return ioread32be(hw->base + reg);
+}
+
+static inline void ltq_spi_reg_write(struct ltq_spi *hw, u32 val, u32 reg)
+{
+ iowrite32be(val, hw->base + reg);
+}
+
+static inline void ltq_spi_reg_setbit(struct ltq_spi *hw, u32 bits, u32 reg)
+{
+ u32 val;
+
+ val = ltq_spi_reg_read(hw, reg);
+ val |= bits;
+ ltq_spi_reg_write(hw, val, reg);
+}
+
+static inline void ltq_spi_reg_clearbit(struct ltq_spi *hw, u32 bits, u32 reg)
+{
+ u32 val;
+
+ val = ltq_spi_reg_read(hw, reg);
+ val &= ~bits;
+ ltq_spi_reg_write(hw, val, reg);
+}
+
+static void ltq_spi_hw_enable(struct ltq_spi *hw)
+{
+ u32 clc;
+
+ /* Power-up mdule */
+ clk_enable(hw->spiclk);
+
+ /*
+ * Set clock divider for run mode to 1 to
+ * run at same frequency as FPI bus
+ */
+ clc = (1 << LTQ_SPI_CLC_RMC_SHIFT);
+ ltq_spi_reg_write(hw, clc, LTQ_SPI_CLC);
+}
+
+static void ltq_spi_hw_disable(struct ltq_spi *hw)
+{
+ /* Set clock divider to 0 and set module disable bit */
+ ltq_spi_reg_write(hw, LTQ_SPI_CLC_DISS, LTQ_SPI_CLC);
+
+ /* Power-down mdule */
+ clk_disable(hw->spiclk);
+}
+
+static void ltq_spi_reset_fifos(struct ltq_spi *hw)
+{
+ u32 val;
+
+ /*
+ * Enable and flush FIFOs. Set interrupt trigger level to
+ * half of FIFO count implemented in hardware.
+ */
+ if (hw->txfs > 1) {
+ val = hw->txfs << (LTQ_SPI_TXFCON_TXFITL_SHIFT - 1);
+ val |= LTQ_SPI_TXFCON_TXFEN | LTQ_SPI_TXFCON_TXFLU;
+ ltq_spi_reg_write(hw, val, LTQ_SPI_TXFCON);
+ }
+
+ if (hw->rxfs > 1) {
+ val = hw->rxfs << (LTQ_SPI_RXFCON_RXFITL_SHIFT - 1);
+ val |= LTQ_SPI_RXFCON_RXFEN | LTQ_SPI_RXFCON_RXFLU;
+ ltq_spi_reg_write(hw, val, LTQ_SPI_RXFCON);
+ }
+}
+
+static inline int ltq_spi_wait_ready(struct ltq_spi *hw)
+{
+ u32 stat;
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(200);
+
+ do {
+ stat = ltq_spi_reg_read(hw, LTQ_SPI_STAT);
+ if (!(stat & LTQ_SPI_STAT_BSY))
+ return 0;
+
+ cond_resched();
+ } while (!time_after_eq(jiffies, timeout));
+
+ dev_err(hw->dev, "SPI wait ready timed out stat: %x\n", stat);
+
+ return -ETIMEDOUT;
+}
+
+static void ltq_spi_config_mode_set(struct ltq_spi *hw)
+{
+ if (hw->cfg_mode)
+ return;
+
+ /*
+ * Putting the SPI module in config mode is only safe if no
+ * transfer is in progress as indicated by busy flag STATE.BSY.
+ */
+ if (ltq_spi_wait_ready(hw)) {
+ ltq_spi_reset_fifos(hw);
+ hw->status = -ETIMEDOUT;
+ }
+ ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_CLREN, LTQ_SPI_WHBSTATE);
+
+ hw->cfg_mode = 1;
+}
+
+static void ltq_spi_run_mode_set(struct ltq_spi *hw)
+{
+ if (!hw->cfg_mode)
+ return;
+
+ ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_SETEN, LTQ_SPI_WHBSTATE);
+
+ hw->cfg_mode = 0;
+}
+
+static u32 ltq_spi_tx_word_u8(struct ltq_spi *hw)
+{
+ const u8 *tx = hw->tx;
+ u32 data = *tx++;
+
+ hw->tx_cnt++;
+ hw->tx++;
+
+ return data;
+}
+
+static u32 ltq_spi_tx_word_u16(struct ltq_spi *hw)
+{
+ const u16 *tx = (u16 *) hw->tx;
+ u32 data = *tx++;
+
+ hw->tx_cnt += 2;
+ hw->tx += 2;
+
+ return data;
+}
+
+static u32 ltq_spi_tx_word_u32(struct ltq_spi *hw)
+{
+ const u32 *tx = (u32 *) hw->tx;
+ u32 data = *tx++;
+
+ hw->tx_cnt += 4;
+ hw->tx += 4;
+
+ return data;
+}
+
+static void ltq_spi_bits_per_word_set(struct spi_device *spi)
+{
+ struct ltq_spi *hw = ltq_spi_to_hw(spi);
+ u32 bm;
+ u8 bits_per_word = spi->bits_per_word;
+
+ /*
+ * Use either default value of SPI device or value
+ * from current transfer.
+ */
+ if (hw->curr_transfer && hw->curr_transfer->bits_per_word)
+ bits_per_word = hw->curr_transfer->bits_per_word;
+
+ if (bits_per_word <= 8)
+ hw->get_tx = ltq_spi_tx_word_u8;
+ else if (bits_per_word <= 16)
+ hw->get_tx = ltq_spi_tx_word_u16;
+ else if (bits_per_word <= 32)
+ hw->get_tx = ltq_spi_tx_word_u32;
+
+ /* CON.BM value = bits_per_word - 1 */
+ bm = (bits_per_word - 1) << LTQ_SPI_CON_BM_SHIFT;
+
+ ltq_spi_reg_clearbit(hw, LTQ_SPI_CON_BM_MASK <<
+ LTQ_SPI_CON_BM_SHIFT, LTQ_SPI_CON);
+ ltq_spi_reg_setbit(hw, bm, LTQ_SPI_CON);
+}
+
+static void ltq_spi_speed_set(struct spi_device *spi)
+{
+ struct ltq_spi *hw = ltq_spi_to_hw(spi);
+ u32 br, max_speed_hz, spi_clk;
+ u32 speed_hz = spi->max_speed_hz;
+
+ /*
+ * Use either default value of SPI device or value
+ * from current transfer.
+ */
+ if (hw->curr_transfer && hw->curr_transfer->speed_hz)
+ speed_hz = hw->curr_transfer->speed_hz;
+
+ /*
+ * SPI module clock is derived from FPI bus clock dependent on
+ * divider value in CLC.RMS which is always set to 1.
+ */
+ spi_clk = clk_get_rate(hw->fpiclk);
+
+ /*
+ * Maximum SPI clock frequency in master mode is half of
+ * SPI module clock frequency. Maximum reload value of
+ * baudrate generator BR is 2^16.
+ */
+ max_speed_hz = spi_clk / 2;
+ if (speed_hz >= max_speed_hz)
+ br = 0;
+ else
+ br = (max_speed_hz / speed_hz) - 1;
+
+ if (br > 0xFFFF)
+ br = 0xFFFF;
+
+ ltq_spi_reg_write(hw, br, LTQ_SPI_BRT);
+}
+
+static void ltq_spi_clockmode_set(struct spi_device *spi)
+{
+ struct ltq_spi *hw = ltq_spi_to_hw(spi);
+ u32 con;
+
+ con = ltq_spi_reg_read(hw, LTQ_SPI_CON);
+
+ /*
+ * SPI mode mapping in CON register:
+ * Mode CPOL CPHA CON.PO CON.PH
+ * 0 0 0 0 1
+ * 1 0 1 0 0
+ * 2 1 0 1 1
+ * 3 1 1 1 0
+ */
+ if (spi->mode & SPI_CPHA)
+ con &= ~LTQ_SPI_CON_PH;
+ else
+ con |= LTQ_SPI_CON_PH;
+
+ if (spi->mode & SPI_CPOL)
+ con |= LTQ_SPI_CON_PO;
+ else
+ con &= ~LTQ_SPI_CON_PO;
+
+ /* Set heading control */
+ if (spi->mode & SPI_LSB_FIRST)
+ con &= ~LTQ_SPI_CON_HB;
+ else
+ con |= LTQ_SPI_CON_HB;
+
+ ltq_spi_reg_write(hw, con, LTQ_SPI_CON);
+}
+
+static void ltq_spi_xmit_set(struct ltq_spi *hw, struct spi_transfer *t)
+{
+ u32 con;
+
+ con = ltq_spi_reg_read(hw, LTQ_SPI_CON);
+
+ if (t) {
+ if (t->tx_buf && t->rx_buf) {
+ con &= ~(LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF);
+ } else if (t->rx_buf) {
+ con &= ~LTQ_SPI_CON_RXOFF;
+ con |= LTQ_SPI_CON_TXOFF;
+ } else if (t->tx_buf) {
+ con &= ~LTQ_SPI_CON_TXOFF;
+ con |= LTQ_SPI_CON_RXOFF;
+ }
+ } else
+ con |= (LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF);
+
+ ltq_spi_reg_write(hw, con, LTQ_SPI_CON);
+}
+
+static void ltq_spi_gpio_cs_activate(struct spi_device *spi)
+{
+ struct ltq_spi_controller_data *cdata = spi->controller_data;
+ int val = spi->mode & SPI_CS_HIGH ? 1 : 0;
+
+ gpio_set_value(cdata->gpio, val);
+}
+
+static void ltq_spi_gpio_cs_deactivate(struct spi_device *spi)
+{
+ struct ltq_spi_controller_data *cdata = spi->controller_data;
+ int val = spi->mode & SPI_CS_HIGH ? 0 : 1;
+
+ gpio_set_value(cdata->gpio, val);
+}
+
+static void ltq_spi_internal_cs_activate(struct spi_device *spi)
+{
+ struct ltq_spi *hw = ltq_spi_to_hw(spi);
+ u32 fgpo;
+
+ fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_CLROUTN_SHIFT));
+ ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
+}
+
+static void ltq_spi_internal_cs_deactivate(struct spi_device *spi)
+{
+ struct ltq_spi *hw = ltq_spi_to_hw(spi);
+ u32 fgpo;
+
+ fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_SETOUTN_SHIFT));
+ ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
+}
+
+static void ltq_spi_chipselect(struct spi_device *spi, int cs)
+{
+ struct ltq_spi *hw = ltq_spi_to_hw(spi);
+ struct ltq_spi_controller_state *cstate = spi->controller_state;
+
+ switch (cs) {
+ case BITBANG_CS_ACTIVE:
+ ltq_spi_bits_per_word_set(spi);
+ ltq_spi_speed_set(spi);
+ ltq_spi_clockmode_set(spi);
+ ltq_spi_run_mode_set(hw);
+
+ cstate->cs_activate(spi);
+ break;
+
+ case BITBANG_CS_INACTIVE:
+ cstate->cs_deactivate(spi);
+
+ ltq_spi_config_mode_set(hw);
+
+ break;
+ }
+}
+
+static int ltq_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct ltq_spi *hw = ltq_spi_to_hw(spi);
+ u8 bits_per_word = spi->bits_per_word;
+
+ hw->curr_transfer = t;
+
+ if (t && t->bits_per_word)
+ bits_per_word = t->bits_per_word;
+
+ if (bits_per_word > 32)
+ return -EINVAL;
+
+ ltq_spi_config_mode_set(hw);
+
+ return 0;
+}
+
+static const struct ltq_spi_cs_gpio_map ltq_spi_cs[] = {
+ { 15, 2 },
+ { 22, 2 },
+ { 13, 1 },
+ { 10, 1 },
+ { 9, 1 },
+ { 11, 3 },
+};
+
+static const struct ltq_spi_cs_gpio_map ltq_spi_cs_ase[] = {
+ { 7, 2 },
+ { 15, 1 },
+ { 14, 1 },
+};
+
+static int ltq_spi_setup(struct spi_device *spi)
+{
+ struct ltq_spi *hw = ltq_spi_to_hw(spi);
+ struct ltq_spi_controller_data *cdata = spi->controller_data;
+ struct ltq_spi_controller_state *cstate;
+ u32 gpocon, fgpo;
+ int ret;
+
+ /* Set default word length to 8 if not set */
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ if (spi->bits_per_word > 32)
+ return -EINVAL;
+
+ if (!spi->controller_state) {
+ cstate = kzalloc(sizeof(struct ltq_spi_controller_state),
+ GFP_KERNEL);
+ if (!cstate)
+ return -ENOMEM;
+
+ spi->controller_state = cstate;
+ } else
+ return 0;
+
+ /*
+ * Up to six GPIOs can be connected to the SPI module
+ * via GPIO alternate function to control the chip select lines.
+ * For more flexibility in board layout this driver can also control
+ * the CS lines via GPIO API. If GPIOs should be used, board setup code
+ * have to register the SPI device with struct ltq_spi_controller_data
+ * attached.
+ */
+ if (cdata && cdata->gpio) {
+ ret = gpio_request(cdata->gpio, "spi-cs");
+ if (ret)
+ return -EBUSY;
+
+ ret = spi->mode & SPI_CS_HIGH ? 0 : 1;
+ gpio_direction_output(cdata->gpio, ret);
+
+ cstate->cs_activate = ltq_spi_gpio_cs_activate;
+ cstate->cs_deactivate = ltq_spi_gpio_cs_deactivate;
+ } else {
+ struct ltq_spi_cs_gpio_map *cs_map =
+ ltq_is_ase() ? ltq_spi_cs_ase : ltq_spi_cs;
+ ret = ltq_gpio_request(&spi->dev, cs_map[spi->chip_select].gpio,
+ cs_map[spi->chip_select].mux,
+ 1, "spi-cs");
+ if (ret)
+ return -EBUSY;
+
+ gpocon = (1 << (spi->chip_select +
+ LTQ_SPI_GPOCON_ISCSBN_SHIFT));
+
+ if (spi->mode & SPI_CS_HIGH)
+ gpocon |= (1 << spi->chip_select);
+
+ fgpo = (1 << (spi->chip_select + LTQ_SPI_FGPO_SETOUTN_SHIFT));
+
+ ltq_spi_reg_setbit(hw, gpocon, LTQ_SPI_GPOCON);
+ ltq_spi_reg_setbit(hw, fgpo, LTQ_SPI_FGPO);
+
+ cstate->cs_activate = ltq_spi_internal_cs_activate;
+ cstate->cs_deactivate = ltq_spi_internal_cs_deactivate;
+ }
+
+ return 0;
+}
+
+static void ltq_spi_cleanup(struct spi_device *spi)
+{
+ struct ltq_spi_controller_data *cdata = spi->controller_data;
+ struct ltq_spi_controller_state *cstate = spi->controller_state;
+ unsigned gpio;
+
+ if (cdata && cdata->gpio)
+ gpio = cdata->gpio;
+ else
+ gpio = ltq_is_ase() ? ltq_spi_cs_ase[spi->chip_select].gpio :
+ ltq_spi_cs[spi->chip_select].gpio;
+
+ gpio_free(gpio);
+ kfree(cstate);
+}
+
+static void ltq_spi_txfifo_write(struct ltq_spi *hw)
+{
+ u32 fstat, data;
+ u16 fifo_space;
+
+ /* Determine how much FIFOs are free for TX data */
+ fstat = ltq_spi_reg_read(hw, LTQ_SPI_FSTAT);
+ fifo_space = hw->txfs - ((fstat >> LTQ_SPI_FSTAT_TXFFL_SHIFT) &
+ LTQ_SPI_FSTAT_TXFFL_MASK);
+
+ if (!fifo_space)
+ return;
+
+ while (hw->tx_cnt < hw->len && fifo_space) {
+ data = hw->get_tx(hw);
+ ltq_spi_reg_write(hw, data, LTQ_SPI_TB);
+ fifo_space--;
+ }
+}
+
+static void ltq_spi_rxfifo_read(struct ltq_spi *hw)
+{
+ u32 fstat, data, *rx32;
+ u16 fifo_fill;
+ u8 rxbv, shift, *rx8;
+
+ /* Determine how much FIFOs are filled with RX data */
+ fstat = ltq_spi_reg_read(hw, LTQ_SPI_FSTAT);
+ fifo_fill = ((fstat >> LTQ_SPI_FSTAT_RXFFL_SHIFT)
+ & LTQ_SPI_FSTAT_RXFFL_MASK);
+
+ if (!fifo_fill)
+ return;
+
+ /*
+ * The 32 bit FIFO is always used completely independent from the
+ * bits_per_word value. Thus four bytes have to be read at once
+ * per FIFO.
+ */
+ rx32 = (u32 *) hw->rx;
+ while (hw->len - hw->rx_cnt >= 4 && fifo_fill) {
+ *rx32++ = ltq_spi_reg_read(hw, LTQ_SPI_RB);
+ hw->rx_cnt += 4;
+ hw->rx += 4;
+ fifo_fill--;
+ }
+
+ /*
+ * If there are remaining bytes, read byte count from STAT.RXBV
+ * register and read the data byte-wise.
+ */
+ while (fifo_fill && hw->rx_cnt < hw->len) {
+ rxbv = (ltq_spi_reg_read(hw, LTQ_SPI_STAT) >>
+ LTQ_SPI_STAT_RXBV_SHIFT) & LTQ_SPI_STAT_RXBV_MASK;
+ data = ltq_spi_reg_read(hw, LTQ_SPI_RB);
+
+ shift = (rxbv - 1) * 8;
+ rx8 = hw->rx;
+
+ while (rxbv) {
+ *rx8++ = (data >> shift) & 0xFF;
+ rxbv--;
+ shift -= 8;
+ hw->rx_cnt++;
+ hw->rx++;
+ }
+
+ fifo_fill--;
+ }
+}
+
+static void ltq_spi_rxreq_set(struct ltq_spi *hw)
+{
+ u32 rxreq, rxreq_max, rxtodo;
+
+ rxtodo = ltq_spi_reg_read(hw, LTQ_SPI_RXCNT) & LTQ_SPI_RXCNT_TODO_MASK;
+
+ /*
+ * In RX-only mode the serial clock is activated only after writing
+ * the expected amount of RX bytes into RXREQ register.
+ * To avoid receive overflows at high clocks it is better to request
+ * only the amount of bytes that fits into all FIFOs. This value
+ * depends on the FIFO size implemented in hardware.
+ */
+ rxreq = hw->len - hw->rx_cnt;
+ rxreq_max = hw->rxfs << 2;
+ rxreq = min(rxreq_max, rxreq);
+
+ if (!rxtodo && rxreq)
+ ltq_spi_reg_write(hw, rxreq, LTQ_SPI_RXREQ);
+}
+
+static inline void ltq_spi_complete(struct ltq_spi *hw)
+{
+ complete(&hw->done);
+}
+
+irqreturn_t ltq_spi_tx_irq(int irq, void *data)
+{
+ struct ltq_spi *hw = data;
+ unsigned long flags;
+ int completed = 0;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ if (hw->tx_cnt < hw->len)
+ ltq_spi_txfifo_write(hw);
+
+ if (hw->tx_cnt == hw->len)
+ completed = 1;
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (completed)
+ ltq_spi_complete(hw);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t ltq_spi_rx_irq(int irq, void *data)
+{
+ struct ltq_spi *hw = data;
+ unsigned long flags;
+ int completed = 0;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ if (hw->rx_cnt < hw->len) {
+ ltq_spi_rxfifo_read(hw);
+
+ if (hw->tx && hw->tx_cnt < hw->len)
+ ltq_spi_txfifo_write(hw);
+ }
+
+ if (hw->rx_cnt == hw->len)
+ completed = 1;
+ else if (!hw->tx)
+ ltq_spi_rxreq_set(hw);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (completed)
+ ltq_spi_complete(hw);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t ltq_spi_err_irq(int irq, void *data)
+{
+ struct ltq_spi *hw = data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ /* Disable all interrupts */
+ ltq_spi_reg_clearbit(hw, LTQ_SPI_IRNEN_ALL, LTQ_SPI_IRNEN);
+
+ /* Clear all error flags */
+ ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
+
+ /* Flush FIFOs */
+ ltq_spi_reg_setbit(hw, LTQ_SPI_RXFCON_RXFLU, LTQ_SPI_RXFCON);
+ ltq_spi_reg_setbit(hw, LTQ_SPI_TXFCON_TXFLU, LTQ_SPI_TXFCON);
+
+ hw->status = -EIO;
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ ltq_spi_complete(hw);
+
+ return IRQ_HANDLED;
+}
+
+static int ltq_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct ltq_spi *hw = ltq_spi_to_hw(spi);
+ u32 irq_flags = 0;
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->len = t->len;
+ hw->tx_cnt = 0;
+ hw->rx_cnt = 0;
+ hw->status = 0;
+ INIT_COMPLETION(hw->done);
+
+ ltq_spi_xmit_set(hw, t);
+
+ /* Enable error interrupts */
+ ltq_spi_reg_setbit(hw, LTQ_SPI_IRNEN_E, LTQ_SPI_IRNEN);
+
+ if (hw->tx) {
+ /* Initially fill TX FIFO with as much data as possible */
+ ltq_spi_txfifo_write(hw);
+ irq_flags |= LTQ_SPI_IRNEN_T;
+
+ /* Always enable RX interrupt in Full Duplex mode */
+ if (hw->rx)
+ irq_flags |= LTQ_SPI_IRNEN_R;
+ } else if (hw->rx) {
+ /* Start RX clock */
+ ltq_spi_rxreq_set(hw);
+
+ /* Enable RX interrupt to receive data from RX FIFOs */
+ irq_flags |= LTQ_SPI_IRNEN_R;
+ }
+
+ /* Enable TX or RX interrupts */
+ ltq_spi_reg_setbit(hw, irq_flags, LTQ_SPI_IRNEN);
+ wait_for_completion_interruptible(&hw->done);
+
+ /* Disable all interrupts */
+ ltq_spi_reg_clearbit(hw, LTQ_SPI_IRNEN_ALL, LTQ_SPI_IRNEN);
+
+ /*
+ * Return length of current transfer for bitbang utility code if
+ * no errors occured during transmission.
+ */
+ if (!hw->status)
+ hw->status = hw->len;
+
+ return hw->status;
+}
+
+static const struct ltq_spi_irq_map ltq_spi_irqs[] = {
+ { "spi_tx", ltq_spi_tx_irq },
+ { "spi_rx", ltq_spi_rx_irq },
+ { "spi_err", ltq_spi_err_irq },
+};
+
+static int __devinit
+ltq_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct resource *r;
+ struct ltq_spi *hw;
+ struct ltq_spi_platform_data *pdata = pdev->dev.platform_data;
+ int ret, i;
+ u32 data, id;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct ltq_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "spi_alloc_master\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ hw = spi_master_get_devdata(master);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "platform_get_resource\n");
+ ret = -ENOENT;
+ goto err_master;
+ }
+
+ r = devm_request_mem_region(&pdev->dev, r->start, resource_size(r),
+ pdev->name);
+ if (!r) {
+ dev_err(&pdev->dev, "devm_request_mem_region\n");
+ ret = -ENXIO;
+ goto err_master;
+ }
+
+ hw->base = devm_ioremap_nocache(&pdev->dev, r->start, resource_size(r));
+ if (!hw->base) {
+ dev_err(&pdev->dev, "devm_ioremap_nocache\n");
+ ret = -ENXIO;
+ goto err_master;
+ }
+
+ hw->fpiclk = clk_get_fpi();
+ if (IS_ERR(hw->fpiclk)) {
+ dev_err(&pdev->dev, "fpi clk\n");
+ ret = PTR_ERR(hw->fpiclk);
+ goto err_master;
+ }
+
+ hw->spiclk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hw->spiclk)) {
+ dev_err(&pdev->dev, "spi clk\n");
+ ret = PTR_ERR(hw->spiclk);
+ goto err_master;
+ }
+
+ memset(hw->irq, 0, sizeof(hw->irq));
+ for (i = 0; i < ARRAY_SIZE(ltq_spi_irqs); i++) {
+ ret = platform_get_irq_byname(pdev, ltq_spi_irqs[i].name);
+ if (0 > ret) {
+ dev_err(&pdev->dev, "platform_get_irq_byname\n");
+ goto err_irq;
+ }
+
+ hw->irq[i] = ret;
+ ret = request_irq(hw->irq[i], ltq_spi_irqs[i].handler,
+ 0, ltq_spi_irqs[i].name, hw);
+ if (ret) {
+ dev_err(&pdev->dev, "request_irq\n");
+ goto err_irq;
+ }
+ }
+
+ hw->bitbang.master = spi_master_get(master);
+ hw->bitbang.chipselect = ltq_spi_chipselect;
+ hw->bitbang.setup_transfer = ltq_spi_setup_transfer;
+ hw->bitbang.txrx_bufs = ltq_spi_txrx_bufs;
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = pdata->num_chipselect;
+ master->setup = ltq_spi_setup;
+ master->cleanup = ltq_spi_cleanup;
+
+ hw->dev = &pdev->dev;
+ init_completion(&hw->done);
+ spin_lock_init(&hw->lock);
+
+ /* Set GPIO alternate functions to SPI */
+ ltq_gpio_request(&pdev->dev, LTQ_SPI_GPIO_DI, 2, 0, "spi-di");
+ ltq_gpio_request(&pdev->dev, LTQ_SPI_GPIO_DO, 2, 1, "spi-do");
+ ltq_gpio_request(&pdev->dev, LTQ_SPI_GPIO_CLK, 2, 1, "spi-clk");
+
+ ltq_spi_hw_enable(hw);
+
+ /* Read module capabilities */
+ id = ltq_spi_reg_read(hw, LTQ_SPI_ID);
+ hw->txfs = (id >> LTQ_SPI_ID_TXFS_SHIFT) & LTQ_SPI_ID_TXFS_MASK;
+ hw->rxfs = (id >> LTQ_SPI_ID_TXFS_SHIFT) & LTQ_SPI_ID_TXFS_MASK;
+ hw->dma_support = (id & LTQ_SPI_ID_CFG) ? 1 : 0;
+
+ ltq_spi_config_mode_set(hw);
+
+ /* Enable error checking, disable TX/RX, set idle value high */
+ data = LTQ_SPI_CON_RUEN | LTQ_SPI_CON_AEN |
+ LTQ_SPI_CON_TEN | LTQ_SPI_CON_REN |
+ LTQ_SPI_CON_TXOFF | LTQ_SPI_CON_RXOFF | LTQ_SPI_CON_IDLE;
+ ltq_spi_reg_write(hw, data, LTQ_SPI_CON);
+
+ /* Enable master mode and clear error flags */
+ ltq_spi_reg_write(hw, LTQ_SPI_WHBSTATE_SETMS |
+ LTQ_SPI_WHBSTATE_CLR_ERRORS, LTQ_SPI_WHBSTATE);
+
+ /* Reset GPIO/CS registers */
+ ltq_spi_reg_write(hw, 0x0, LTQ_SPI_GPOCON);
+ ltq_spi_reg_write(hw, 0xFF00, LTQ_SPI_FGPO);
+
+ /* Enable and flush FIFOs */
+ ltq_spi_reset_fifos(hw);
+
+ ret = spi_bitbang_start(&hw->bitbang);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_bitbang_start\n");
+ goto err_bitbang;
+ }
+
+ platform_set_drvdata(pdev, hw);
+
+ pr_info("Lantiq SoC SPI controller rev %u (TXFS %u, RXFS %u, DMA %u)\n",
+ id & LTQ_SPI_ID_REV_MASK, hw->txfs, hw->rxfs, hw->dma_support);
+
+ return 0;
+
+err_bitbang:
+ ltq_spi_hw_disable(hw);
+
+err_irq:
+ clk_put(hw->fpiclk);
+
+ for (; i > 0; i--)
+ free_irq(hw->irq[i], hw);
+
+err_master:
+ spi_master_put(master);
+
+err:
+ return ret;
+}
+
+static int __devexit
+ltq_spi_remove(struct platform_device *pdev)
+{
+ struct ltq_spi *hw = platform_get_drvdata(pdev);
+ int ret, i;
+
+ ret = spi_bitbang_stop(&hw->bitbang);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, NULL);
+
+ ltq_spi_config_mode_set(hw);
+ ltq_spi_hw_disable(hw);
+
+ for (i = 0; i < ARRAY_SIZE(hw->irq); i++)
+ if (0 < hw->irq[i])
+ free_irq(hw->irq[i], hw);
+
+ gpio_free(LTQ_SPI_GPIO_DI);
+ gpio_free(LTQ_SPI_GPIO_DO);
+ gpio_free(LTQ_SPI_GPIO_CLK);
+
+ clk_put(hw->fpiclk);
+ spi_master_put(hw->bitbang.master);
+
+ return 0;
+}
+
+static struct platform_driver ltq_spi_driver = {
+ .probe = ltq_spi_probe,
+ .remove = __devexit_p(ltq_spi_remove),
+ .driver = {
+ .name = "ltq_spi",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(ltq_spi_driver);
+
+MODULE_DESCRIPTION("Lantiq SoC SPI controller driver");
+MODULE_AUTHOR("Daniel Schwierzeck <daniel.schwierzeck@googlemail.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ltq-spi");
diff --git a/target/linux/lantiq/files-3.3/drivers/spi/spi_svip.c b/target/linux/lantiq/files-3.3/drivers/spi/spi_svip.c
new file mode 100644
index 0000000000..ae25c20b3a
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/spi/spi_svip.c
@@ -0,0 +1,955 @@
+/************************************************************************
+ *
+ * Copyright (c) 2008
+ * Infineon Technologies AG
+ * St. Martin Strasse 53; 81669 Muenchen; Germany
+ *
+ * Inspired by Atmel AT32/AT91 SPI Controller driver
+ * Copyright (c) 2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ ************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+#include <asm/io.h>
+
+#include <status_reg.h>
+#include <base_reg.h>
+#include <ssc_reg.h>
+#include <sys0_reg.h>
+#include <sys1_reg.h>
+
+#define SFRAME_SIZE 512 /* bytes */
+#define FIFO_HEADROOM 2 /* words */
+
+#define SVIP_SSC_RFIFO_WORDS 8
+
+enum svip_ssc_dir {
+ SSC_RXTX,
+ SSC_RX,
+ SSC_TX,
+ SSC_UNDEF
+};
+
+/*
+ * The core SPI transfer engine just talks to a register bank to set up
+ * DMA transfers; transfer queue progress is driven by IRQs. The clock
+ * framework provides the base clock, subdivided for each spi_device.
+ */
+struct svip_ssc_device {
+ struct svip_reg_ssc *regs;
+ enum svip_ssc_dir bus_dir;
+ struct spi_device *stay;
+
+ u8 stopping;
+ struct list_head queue;
+ struct spi_transfer *current_transfer;
+ int remaining_bytes;
+ int rx_bytes;
+ int tx_bytes;
+
+ char intname[4][16];
+
+ spinlock_t lock;
+};
+
+static int svip_ssc_setup(struct spi_device *spi);
+
+extern unsigned int ltq_get_fbs0_hz(void);
+
+static void cs_activate(struct svip_ssc_device *ssc_dev, struct spi_device *spi)
+{
+ ssc_dev->regs->whbgpostat = 0x0001 << spi->chip_select; /* activate the chip select */
+}
+
+static void cs_deactivate(struct svip_ssc_device *ssc_dev, struct spi_device *spi)
+{
+ ssc_dev->regs->whbgpostat = 0x0100 << spi->chip_select; /* deactivate the chip select */
+}
+
+/*
+ * "Normally" returns Byte Valid = 4.
+ * If the unaligned remainder of the packet is 3 bytes, these have to be
+ * transferred as a combination of a 16-bit and a 8-bit FPI transfer. For
+ * 2 or 1 remaining bytes a single 16-bit or 8-bit transfer will do.
+ */
+static int inline _estimate_bv(int byte_pos, int bytelen)
+{
+ int remainder = bytelen % 4;
+
+ if (byte_pos < (bytelen - remainder))
+ return 4;
+
+ if (remainder == 3)
+ {
+ if (byte_pos == (bytelen - remainder))
+ return 2;
+ else
+ return 1;
+ }
+ return remainder;
+}
+
+/*
+ * Submit next transfer.
+ * lock is held, spi irq is blocked
+ */
+static void svip_ssc_next_xfer(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ unsigned char *buf_ptr;
+
+ xfer = ssc_dev->current_transfer;
+ if (!xfer || ssc_dev->remaining_bytes == 0) {
+ if (xfer)
+ xfer = list_entry(xfer->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ else
+ xfer = list_entry(msg->transfers.next,
+ struct spi_transfer, transfer_list);
+ ssc_dev->remaining_bytes = xfer->len;
+ ssc_dev->rx_bytes = 0;
+ ssc_dev->tx_bytes = 0;
+ ssc_dev->current_transfer = xfer;
+ ssc_dev->regs->sfcon = 0; /* reset Serial Framing */
+
+ /* enable and flush RX/TX FIFO */
+ ssc_dev->regs->rxfcon =
+ SSC_RXFCON_RXFITL_VAL(SVIP_SSC_RFIFO_WORDS-FIFO_HEADROOM) |
+ SSC_RXFCON_RXFLU | /* Receive FIFO Flush */
+ SSC_RXFCON_RXFEN; /* Receive FIFO Enable */
+
+ ssc_dev->regs->txfcon =
+ SSC_TXFCON_TXFITL_VAL(FIFO_HEADROOM) |
+ SSC_TXFCON_TXFLU | /* Transmit FIFO Flush */
+ SSC_TXFCON_TXFEN; /* Transmit FIFO Enable */
+
+ asm("sync");
+
+ /* select mode RXTX, RX or TX */
+ if (xfer->rx_buf && xfer->tx_buf) /* RX and TX */
+ {
+ if (ssc_dev->bus_dir != SSC_RXTX)
+ {
+ ssc_dev->regs->mcon &= ~(SSC_MCON_RXOFF | SSC_MCON_TXOFF);
+ ssc_dev->bus_dir = SSC_RXTX;
+ ssc_dev->regs->irnen = SSC_IRNEN_T | SSC_IRNEN_F | SSC_IRNEN_E;
+ }
+ ssc_dev->regs->sfcon =
+ SSC_SFCON_PLEN_VAL(0) |
+ SSC_SFCON_DLEN_VAL(((xfer->len-1)%SFRAME_SIZE)*8+7) |
+ SSC_SFCON_STOP |
+ SSC_SFCON_ICLK_VAL(2) |
+ SSC_SFCON_IDAT_VAL(2) |
+ SSC_SFCON_IAEN |
+ SSC_SFCON_SFEN;
+
+ }
+ else if (xfer->rx_buf) /* RX only */
+ {
+ if (ssc_dev->bus_dir != SSC_RX)
+ {
+ ssc_dev->regs->mcon =
+ (ssc_dev->regs->mcon | SSC_MCON_TXOFF) & ~SSC_MCON_RXOFF;
+
+ ssc_dev->bus_dir = SSC_RX;
+
+ ssc_dev->regs->irnen = SSC_IRNEN_R | SSC_IRNEN_E;
+ }
+ /* Initiate clock generation for Rx-Only Transfer. In case of RX-only transfer,
+ * rx_bytes represents the number of already requested bytes.
+ */
+ ssc_dev->rx_bytes = min(xfer->len, (unsigned)(SVIP_SSC_RFIFO_WORDS*4));
+ ssc_dev->regs->rxreq = ssc_dev->rx_bytes;
+ }
+ else /* TX only */
+ {
+ if (ssc_dev->bus_dir != SSC_TX)
+ {
+ ssc_dev->regs->mcon =
+ (ssc_dev->regs->mcon | SSC_MCON_RXOFF) & ~SSC_MCON_TXOFF;
+
+ ssc_dev->bus_dir = SSC_TX;
+
+ ssc_dev->regs->irnen =
+ SSC_IRNEN_T | SSC_IRNEN_F | SSC_IRNEN_E;
+ }
+ ssc_dev->regs->sfcon =
+ SSC_SFCON_PLEN_VAL(0) |
+ SSC_SFCON_DLEN_VAL(((xfer->len-1)%SFRAME_SIZE)*8+7) |
+ SSC_SFCON_STOP |
+ SSC_SFCON_ICLK_VAL(2) |
+ SSC_SFCON_IDAT_VAL(2) |
+ SSC_SFCON_IAEN |
+ SSC_SFCON_SFEN;
+ }
+ }
+
+ if (xfer->tx_buf)
+ {
+ int outstanding;
+ int i;
+ int fstat = ssc_dev->regs->fstat;
+ int txffl = SSC_FSTAT_TXFFL_GET(fstat);
+ int rxffl = SSC_FSTAT_RXFFL_GET(fstat);
+
+ outstanding = txffl;
+
+ if (xfer->rx_buf)
+ {
+ outstanding += rxffl;
+ if (SSC_STATE_BSY_GET(ssc_dev->regs->state))
+ outstanding++;
+
+ while (rxffl) /* is 0 in TX-Only mode */
+ {
+ unsigned int rb;
+ int rxbv = _estimate_bv(ssc_dev->rx_bytes, xfer->len);
+ rb = ssc_dev->regs->rb;
+ for (i=0; i<rxbv; i++)
+ {
+ ((unsigned char*)xfer->rx_buf)[ssc_dev->rx_bytes] =
+ (rb >> ((rxbv-i-1)*8)) & 0xFF;
+
+ ssc_dev->rx_bytes++;
+ }
+ rxffl--;
+ outstanding--;
+ }
+ ssc_dev->remaining_bytes = xfer->len - ssc_dev->rx_bytes;
+ }
+
+ /* for last Tx cycle set TxFifo threshold to 0 */
+ if ((xfer->len - ssc_dev->tx_bytes) <=
+ (4*(SVIP_SSC_RFIFO_WORDS-1-outstanding)))
+ {
+ ssc_dev->regs->txfcon = SSC_TXFCON_TXFITL_VAL(0) |
+ SSC_TXFCON_TXFEN;
+ }
+
+ while ((ssc_dev->tx_bytes < xfer->len) &&
+ (outstanding < (SVIP_SSC_RFIFO_WORDS-1)))
+ {
+ unsigned int tb = 0;
+ int txbv = _estimate_bv(ssc_dev->tx_bytes, xfer->len);
+
+ for (i=0; i<txbv; i++)
+ {
+ tb |= ((unsigned char*)xfer->tx_buf)[ssc_dev->tx_bytes] <<
+ ((txbv-i-1)*8);
+
+ ssc_dev->tx_bytes++;
+ }
+ switch(txbv)
+ {
+#ifdef __BIG_ENDIAN
+ case 1:
+ *((unsigned char *)(&(ssc_dev->regs->tb))+3) = tb & 0xFF;
+ break;
+ case 2:
+ *((unsigned short *)(&(ssc_dev->regs->tb))+1) = tb & 0xFFFF;
+ break;
+#else /* __LITTLE_ENDIAN */
+ case 1:
+ *((unsigned char *)(&(ssc_dev->regs->tb))) = tb & 0xFF;
+ break;
+ case 2:
+ *((unsigned short *)(&(ssc_dev->regs->tb))) = tb & 0xFFFF;
+ break;
+#endif
+ default:
+ ssc_dev->regs->tb = tb;
+ }
+ outstanding++;
+ }
+ }
+ else /* xfer->tx_buf == NULL -> RX only! */
+ {
+ int j;
+ int rxffl = SSC_FSTAT_RXFFL_GET(ssc_dev->regs->fstat);
+ int rxbv = 0;
+ unsigned int rbuf;
+
+ buf_ptr = (unsigned char*)xfer->rx_buf +
+ (xfer->len - ssc_dev->remaining_bytes);
+
+ for (j = 0; j < rxffl; j++)
+ {
+ rxbv = SSC_STATE_RXBV_GET(ssc_dev->regs->state);
+ rbuf = ssc_dev->regs->rb;
+
+ if (rxbv == 4)
+ {
+ *((unsigned int*)buf_ptr+j) = ntohl(rbuf);
+ }
+ else
+ {
+ int b;
+#ifdef __BIG_ENDIAN
+ for (b = 0; b < rxbv; b++)
+ {
+ buf_ptr[4*j+b] = ((unsigned char*)(&rbuf))[4-rxbv+b];
+ }
+#else /* __LITTLE_ENDIAN */
+ for (b = 0; b < rxbv; b++)
+ {
+ buf_ptr[4*j+b] = ((unsigned char*)(&rbuf))[rxbv-1-b];
+ }
+#endif
+ }
+ ssc_dev->remaining_bytes -= rxbv;
+ }
+ if ((ssc_dev->rx_bytes < xfer->len) &&
+ !SSC_STATE_BSY_GET(ssc_dev->regs->state))
+ {
+ int rxreq = min(xfer->len - ssc_dev->rx_bytes,
+ (unsigned)(SVIP_SSC_RFIFO_WORDS*4));
+
+ ssc_dev->rx_bytes += rxreq;
+ ssc_dev->regs->rxreq = rxreq;
+ }
+
+ if (ssc_dev->remaining_bytes < 0)
+ {
+ printk("ssc_dev->remaining_bytes = %d! xfer->len = %d, "
+ "rxffl=%d, rxbv=%d\n", ssc_dev->remaining_bytes, xfer->len,
+ rxffl, rxbv);
+
+ ssc_dev->remaining_bytes = 0;
+ }
+ }
+}
+
+/*
+ * Submit next message.
+ * lock is held
+ */
+static void svip_ssc_next_message(struct spi_master *master)
+{
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
+ struct spi_message *msg;
+ struct spi_device *spi;
+
+ BUG_ON(ssc_dev->current_transfer);
+
+ msg = list_entry(ssc_dev->queue.next, struct spi_message, queue);
+ spi = msg->spi;
+
+ dev_dbg(master->dev.parent, "start message %p on %p\n", msg, spi);
+
+ /* select chip if it's not still active */
+ if (ssc_dev->stay) {
+ if (ssc_dev->stay != spi) {
+ cs_deactivate(ssc_dev, ssc_dev->stay);
+ svip_ssc_setup(spi);
+ cs_activate(ssc_dev, spi);
+ }
+ ssc_dev->stay = NULL;
+ }
+ else {
+ svip_ssc_setup(spi);
+ cs_activate(ssc_dev, spi);
+ }
+
+ svip_ssc_next_xfer(master, msg);
+}
+
+/*
+ * Report message completion.
+ * lock is held
+ */
+static void
+svip_ssc_msg_done(struct spi_master *master, struct svip_ssc_device *ssc_dev,
+ struct spi_message *msg, int status, int stay)
+{
+ if (!stay || status < 0)
+ cs_deactivate(ssc_dev, msg->spi);
+ else
+ ssc_dev->stay = msg->spi;
+
+ list_del(&msg->queue);
+ msg->status = status;
+
+ dev_dbg(master->dev.parent,
+ "xfer complete: %u bytes transferred\n",
+ msg->actual_length);
+
+ spin_unlock(&ssc_dev->lock);
+ msg->complete(msg->context);
+ spin_lock(&ssc_dev->lock);
+
+ ssc_dev->current_transfer = NULL;
+
+ /* continue if needed */
+ if (list_empty(&ssc_dev->queue) || ssc_dev->stopping)
+ ; /* TODO: disable hardware */
+ else
+ svip_ssc_next_message(master);
+}
+
+static irqreturn_t svip_ssc_eir_handler(int irq, void *dev_id)
+{
+ struct platform_device *pdev = (struct platform_device*)dev_id;
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
+
+ dev_err (&pdev->dev, "ERROR: errirq. STATE = 0x%0lx\n",
+ ssc_dev->regs->state);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t svip_ssc_rir_handler(int irq, void *dev_id)
+{
+ struct platform_device *pdev = (struct platform_device*)dev_id;
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
+ struct spi_message *msg;
+ struct spi_transfer *xfer;
+
+ xfer = ssc_dev->current_transfer;
+ msg = list_entry(ssc_dev->queue.next, struct spi_message, queue);
+
+ /* Tx and Rx Interrupts are fairly unpredictable. Just leave interrupt
+ * handler for spurious Interrupts!
+ */
+ if (!xfer) {
+ dev_dbg(master->dev.parent,
+ "%s(%d): xfer = NULL\n", __FUNCTION__, irq);
+ goto out;
+ }
+ if ( !(xfer->rx_buf) ) {
+ dev_dbg(master->dev.parent,
+ "%s(%d): xfer->rx_buf = NULL\n", __FUNCTION__, irq);
+ goto out;
+ }
+ if (ssc_dev->remaining_bytes > 0)
+ {
+ /*
+ * Keep going, we still have data to send in
+ * the current transfer.
+ */
+ svip_ssc_next_xfer(master, msg);
+ }
+
+ if (ssc_dev->remaining_bytes == 0)
+ {
+ msg->actual_length += xfer->len;
+
+ if (msg->transfers.prev == &xfer->transfer_list) {
+ /* report completed message */
+ svip_ssc_msg_done(master, ssc_dev, msg, 0,
+ xfer->cs_change);
+ }
+ else {
+ if (xfer->cs_change) {
+ cs_deactivate(ssc_dev, msg->spi);
+ udelay(1); /* not nice in interrupt context */
+ cs_activate(ssc_dev, msg->spi);
+ }
+
+ /* Not done yet. Submit the next transfer. */
+ svip_ssc_next_xfer(master, msg);
+ }
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t svip_ssc_tir_handler(int irq, void *dev_id)
+{
+ struct platform_device *pdev = (struct platform_device*)dev_id;
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
+ struct spi_message *msg;
+ struct spi_transfer *xfer;
+ int tx_remain;
+
+ xfer = ssc_dev->current_transfer;
+ msg = list_entry(ssc_dev->queue.next, struct spi_message, queue);
+
+ /* Tx and Rx Interrupts are fairly unpredictable. Just leave interrupt
+ * handler for spurious Interrupts!
+ */
+ if (!xfer) {
+ dev_dbg(master->dev.parent,
+ "%s(%d): xfer = NULL\n", __FUNCTION__, irq);
+ goto out;
+ }
+ if ( !(xfer->tx_buf) ) {
+ dev_dbg(master->dev.parent,
+ "%s(%d): xfer->tx_buf = NULL\n", __FUNCTION__, irq);
+ goto out;
+ }
+
+ if (ssc_dev->remaining_bytes > 0)
+ {
+ tx_remain = xfer->len - ssc_dev->tx_bytes;
+ if ( tx_remain == 0 )
+ {
+ dev_dbg(master->dev.parent,
+ "%s(%d): tx_remain = 0\n", __FUNCTION__, irq);
+ }
+ else
+ /*
+ * Keep going, we still have data to send in
+ * the current transfer.
+ */
+ svip_ssc_next_xfer(master, msg);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t svip_ssc_fir_handler(int irq, void *dev_id)
+{
+ struct platform_device *pdev = (struct platform_device*)dev_id;
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
+ struct spi_message *msg;
+ struct spi_transfer *xfer;
+
+ xfer = ssc_dev->current_transfer;
+ msg = list_entry(ssc_dev->queue.next, struct spi_message, queue);
+
+ /* Tx and Rx Interrupts are fairly unpredictable. Just leave interrupt
+ * handler for spurious Interrupts!
+ */
+ if (!xfer) {
+ dev_dbg(master->dev.parent,
+ "%s(%d): xfer = NULL\n", __FUNCTION__, irq);
+ goto out;
+ }
+ if ( !(xfer->tx_buf) ) {
+ dev_dbg(master->dev.parent,
+ "%s(%d): xfer->tx_buf = NULL\n", __FUNCTION__, irq);
+ goto out;
+ }
+
+ if (ssc_dev->remaining_bytes > 0)
+ {
+ int tx_remain = xfer->len - ssc_dev->tx_bytes;
+
+ if (tx_remain == 0)
+ {
+ /* Frame interrupt gets raised _before_ last Rx interrupt */
+ if (xfer->rx_buf)
+ {
+ svip_ssc_next_xfer(master, msg);
+ if (ssc_dev->remaining_bytes)
+ printk("expected RXTX transfer to be complete!\n");
+ }
+ ssc_dev->remaining_bytes = 0;
+ }
+ else
+ {
+ ssc_dev->regs->sfcon = SSC_SFCON_PLEN_VAL(0) |
+ SSC_SFCON_DLEN_VAL(SFRAME_SIZE*8-1) |
+ SSC_SFCON_STOP |
+ SSC_SFCON_ICLK_VAL(2) |
+ SSC_SFCON_IDAT_VAL(2) |
+ SSC_SFCON_IAEN |
+ SSC_SFCON_SFEN;
+ }
+ }
+
+ if (ssc_dev->remaining_bytes == 0)
+ {
+ msg->actual_length += xfer->len;
+
+ if (msg->transfers.prev == &xfer->transfer_list) {
+ /* report completed message */
+ svip_ssc_msg_done(master, ssc_dev, msg, 0,
+ xfer->cs_change);
+ }
+ else {
+ if (xfer->cs_change) {
+ cs_deactivate(ssc_dev, msg->spi);
+ udelay(1); /* not nice in interrupt context */
+ cs_activate(ssc_dev, msg->spi);
+ }
+
+ /* Not done yet. Submit the next transfer. */
+ svip_ssc_next_xfer(master, msg);
+ }
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP)
+
+static int svip_ssc_setup(struct spi_device *spi)
+{
+ struct spi_master *master = spi->master;
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
+ unsigned int bits = spi->bits_per_word;
+ unsigned int br, sck_hz = spi->max_speed_hz;
+ unsigned long flags;
+
+ if (ssc_dev->stopping)
+ return -ESHUTDOWN;
+
+ if (spi->chip_select >= master->num_chipselect) {
+ dev_dbg(&spi->dev,
+ "setup: invalid chipselect %u (%u defined)\n",
+ spi->chip_select, master->num_chipselect);
+ return -EINVAL;
+ }
+
+ if (bits == 0)
+ bits = 8;
+ if (bits != 8) {
+ dev_dbg(&spi->dev,
+ "setup: invalid bits_per_word %u (expect 8)\n",
+ bits);
+ return -EINVAL;
+ }
+
+ if (spi->mode & ~MODEBITS) {
+ dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+ spi->mode & ~MODEBITS);
+ return -EINVAL;
+ }
+
+ /* Disable SSC */
+ ssc_dev->regs->whbstate = SSC_WHBSTATE_CLREN;
+
+ if (sck_hz == 0)
+ sck_hz = 10000;
+
+ br = ltq_get_fbs0_hz()/(2 *sck_hz);
+ if (ltq_get_fbs0_hz()%(2 *sck_hz) == 0)
+ br = br -1;
+ ssc_dev->regs->br = br;
+
+ /* set Control Register */
+ ssc_dev->regs->mcon = SSC_MCON_ENBV |
+ SSC_MCON_RUEN |
+ SSC_MCON_TUEN |
+ SSC_MCON_AEN |
+ SSC_MCON_REN |
+ SSC_MCON_TEN |
+ (spi->mode & SPI_CPOL ? SSC_MCON_PO : 0) | /* Clock Polarity */
+ (spi->mode & SPI_CPHA ? 0 : SSC_MCON_PH) | /* Tx on trailing edge */
+ (spi->mode & SPI_LOOP ? SSC_MCON_LB : 0) | /* Loopback */
+ (spi->mode & SPI_LSB_FIRST ? 0 : SSC_MCON_HB); /* MSB first */
+ ssc_dev->bus_dir = SSC_UNDEF;
+
+ /* Enable SSC */
+ ssc_dev->regs->whbstate = SSC_WHBSTATE_SETEN;
+ asm("sync");
+
+ spin_lock_irqsave(&ssc_dev->lock, flags);
+ if (ssc_dev->stay == spi)
+ ssc_dev->stay = NULL;
+ cs_deactivate(ssc_dev, spi);
+ spin_unlock_irqrestore(&ssc_dev->lock, flags);
+
+ dev_dbg(&spi->dev,
+ "setup: %u Hz bpw %u mode 0x%02x cs %u\n",
+ sck_hz, bits, spi->mode, spi->chip_select);
+
+ return 0;
+}
+
+static int svip_ssc_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct spi_master *master = spi->master;
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ unsigned long flags;
+
+ dev_dbg(&spi->dev, "new message %p submitted\n", msg);
+
+ if (unlikely(list_empty(&msg->transfers)
+ || !spi->max_speed_hz)) {
+ return -EINVAL;
+ }
+
+ if (ssc_dev->stopping)
+ return -ESHUTDOWN;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!(xfer->tx_buf || xfer->rx_buf) || (xfer->len == 0)) {
+ dev_dbg(&spi->dev, "missing rx or tx buf\n");
+ return -EINVAL;
+ }
+
+ /* FIXME implement these protocol options!! */
+ if (xfer->bits_per_word || xfer->speed_hz) {
+ dev_dbg(&spi->dev, "no protocol options yet\n");
+ return -ENOPROTOOPT;
+ }
+
+#ifdef VERBOSE
+ dev_dbg(spi->dev,
+ " xfer %p: len %u tx %p/%08x rx %p/%08x\n",
+ xfer, xfer->len,
+ xfer->tx_buf, xfer->tx_dma,
+ xfer->rx_buf, xfer->rx_dma);
+#endif
+ }
+
+ msg->status = -EINPROGRESS;
+ msg->actual_length = 0;
+
+ spin_lock_irqsave(&ssc_dev->lock, flags);
+ list_add_tail(&msg->queue, &ssc_dev->queue);
+ if (!ssc_dev->current_transfer)
+ {
+ /* start transmission machine, if not started yet */
+ svip_ssc_next_message(master);
+ }
+ spin_unlock_irqrestore(&ssc_dev->lock, flags);
+
+ return 0;
+}
+
+static void svip_ssc_cleanup(struct spi_device *spi)
+{
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ if (!spi->controller_state)
+ return;
+
+ spin_lock_irqsave(&ssc_dev->lock, flags);
+ if (ssc_dev->stay == spi) {
+ ssc_dev->stay = NULL;
+ cs_deactivate(ssc_dev, spi);
+ }
+ spin_unlock_irqrestore(&ssc_dev->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int __init svip_ssc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct spi_master *master;
+ struct svip_ssc_device *ssc_dev;
+ struct resource *res_regs;
+ int irq;
+
+ ret = -ENOMEM;
+
+ /* setup spi core then atmel-specific driver state */
+ master = spi_alloc_master(&pdev->dev, sizeof (*ssc_dev));
+ if (!master)
+ {
+ dev_err (&pdev->dev, "ERROR: no memory for master spi\n");
+ goto errout;
+ }
+
+ ssc_dev = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, master);
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = 8;
+ master->mode_bits = MODEBITS;
+ master->setup = svip_ssc_setup;
+ master->transfer = svip_ssc_transfer;
+ master->cleanup = svip_ssc_cleanup;
+
+ spin_lock_init(&ssc_dev->lock);
+ INIT_LIST_HEAD(&ssc_dev->queue);
+
+ /* retrive register configration */
+ res_regs = platform_get_resource_byname (pdev, IORESOURCE_MEM, "regs");
+ if (NULL == res_regs)
+ {
+ dev_err (&pdev->dev, "ERROR: missed 'regs' resource\n");
+ goto spierr;
+ }
+
+ ssc_dev->regs = (struct svip_reg_ssc*)KSEG1ADDR(res_regs->start);
+
+ irq = platform_get_irq_byname (pdev, "tx");
+ if (irq < 0)
+ goto irqerr;
+ sprintf(ssc_dev->intname[0], "%s_tx", pdev->name);
+ ret = devm_request_irq(&pdev->dev, irq, svip_ssc_tir_handler,
+ IRQF_DISABLED, ssc_dev->intname[0], pdev);
+ if (ret != 0)
+ goto irqerr;
+
+ irq = platform_get_irq_byname (pdev, "rx");
+ if (irq < 0)
+ goto irqerr;
+ sprintf(ssc_dev->intname[1], "%s_rx", pdev->name);
+ ret = devm_request_irq(&pdev->dev, irq, svip_ssc_rir_handler,
+ IRQF_DISABLED, ssc_dev->intname[1], pdev);
+ if (ret != 0)
+ goto irqerr;
+
+ irq = platform_get_irq_byname (pdev, "err");
+ if (irq < 0)
+ goto irqerr;
+ sprintf(ssc_dev->intname[2], "%s_err", pdev->name);
+ ret = devm_request_irq(&pdev->dev, irq, svip_ssc_eir_handler,
+ IRQF_DISABLED, ssc_dev->intname[2], pdev);
+ if (ret != 0)
+ goto irqerr;
+
+ irq = platform_get_irq_byname (pdev, "frm");
+ if (irq < 0)
+ goto irqerr;
+ sprintf(ssc_dev->intname[3], "%s_frm", pdev->name);
+ ret = devm_request_irq(&pdev->dev, irq, svip_ssc_fir_handler,
+ IRQF_DISABLED, ssc_dev->intname[3], pdev);
+ if (ret != 0)
+ goto irqerr;
+
+ /*
+ * Initialize the Hardware
+ */
+
+ /* Clear enable bit, i.e. put SSC into configuration mode */
+ ssc_dev->regs->whbstate = SSC_WHBSTATE_CLREN;
+ /* enable SSC core to run at fpi clock */
+ ssc_dev->regs->clc = SSC_CLC_RMC_VAL(1);
+ asm("sync");
+
+ /* GPIO CS */
+ ssc_dev->regs->gpocon = SSC_GPOCON_ISCSBN_VAL(0xFF);
+ ssc_dev->regs->whbgpostat = SSC_WHBGPOSTAT_SETOUTN_VAL(0xFF); /* CS to high */
+
+ /* Set Master mode */
+ ssc_dev->regs->whbstate = SSC_WHBSTATE_SETMS;
+
+ /* enable and flush RX/TX FIFO */
+ ssc_dev->regs->rxfcon = SSC_RXFCON_RXFITL_VAL(SVIP_SSC_RFIFO_WORDS-FIFO_HEADROOM) |
+ SSC_RXFCON_RXFLU | /* Receive FIFO Flush */
+ SSC_RXFCON_RXFEN; /* Receive FIFO Enable */
+
+ ssc_dev->regs->txfcon = SSC_TXFCON_TXFITL_VAL(FIFO_HEADROOM) |
+ SSC_TXFCON_TXFLU | /* Transmit FIFO Flush */
+ SSC_TXFCON_TXFEN; /* Transmit FIFO Enable */
+ asm("sync");
+
+ /* enable IRQ */
+ ssc_dev->regs->irnen = SSC_IRNEN_E;
+
+ dev_info(&pdev->dev, "controller at 0x%08lx (irq %d)\n",
+ (unsigned long)ssc_dev->regs, platform_get_irq_byname (pdev, "rx"));
+
+ ret = spi_register_master(master);
+ if (ret)
+ goto out_reset_hw;
+
+ return 0;
+
+out_reset_hw:
+
+irqerr:
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "tx"), pdev);
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "rx"), pdev);
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "err"), pdev);
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "frm"), pdev);
+
+spierr:
+
+ spi_master_put(master);
+
+errout:
+ return ret;
+}
+
+static int __exit svip_ssc_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
+ struct spi_message *msg;
+
+ /* reset the hardware and block queue progress */
+ spin_lock_irq(&ssc_dev->lock);
+ ssc_dev->stopping = 1;
+ /* TODO: shutdown hardware */
+ spin_unlock_irq(&ssc_dev->lock);
+
+ /* Terminate remaining queued transfers */
+ list_for_each_entry(msg, &ssc_dev->queue, queue) {
+ /* REVISIT unmapping the dma is a NOP on ARM and AVR32
+ * but we shouldn't depend on that...
+ */
+ msg->status = -ESHUTDOWN;
+ msg->complete(msg->context);
+ }
+
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "tx"), pdev);
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "rx"), pdev);
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "err"), pdev);
+ devm_free_irq (&pdev->dev, platform_get_irq_byname (pdev, "frm"), pdev);
+
+ spi_unregister_master(master);
+ platform_set_drvdata(pdev, NULL);
+ spi_master_put(master);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int svip_ssc_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
+
+ clk_disable(ssc_dev->clk);
+ return 0;
+}
+
+static int svip_ssc_resume(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct svip_ssc_device *ssc_dev = spi_master_get_devdata(master);
+
+ clk_enable(ssc_dev->clk);
+ return 0;
+}
+#endif
+
+static struct platform_driver svip_ssc_driver = {
+ .driver = {
+ .name = "ifx_ssc",
+ .owner = THIS_MODULE,
+ },
+ .probe = svip_ssc_probe,
+#ifdef CONFIG_PM
+ .suspend = svip_ssc_suspend,
+ .resume = svip_ssc_resume,
+#endif
+ .remove = __exit_p(svip_ssc_remove)
+};
+
+int __init svip_ssc_init(void)
+{
+ return platform_driver_register(&svip_ssc_driver);
+}
+
+void __exit svip_ssc_exit(void)
+{
+ platform_driver_unregister(&svip_ssc_driver);
+}
+
+module_init(svip_ssc_init);
+module_exit(svip_ssc_exit);
+
+MODULE_ALIAS("platform:ifx_ssc");
+MODULE_DESCRIPTION("Lantiq SSC Controller driver");
+MODULE_AUTHOR("Andreas Schmidt <andreas.schmidt@infineon.com>");
+MODULE_AUTHOR("Jevgenijs Grigorjevs <Jevgenijs.Grigorjevs@lantiq.com>");
+MODULE_LICENSE("GPL");
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/Kconfig b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/Kconfig
new file mode 100644
index 0000000000..e018490fa3
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/Kconfig
@@ -0,0 +1,37 @@
+config DWC_OTG
+ tristate "Synopsis DWC_OTG support"
+ depends on USB
+ help
+ This driver supports Synopsis DWC_OTG IP core
+ embebbed on many SOCs (ralink, infineon, etc)
+
+choice
+ prompt "USB Operation Mode"
+ depends on DWC_OTG
+ default DWC_OTG_HOST_ONLY
+
+config DWC_OTG_HOST_ONLY
+ bool "HOST ONLY MODE"
+ depends on DWC_OTG
+
+#config DWC_OTG_DEVICE_ONLY
+# bool "DEVICE ONLY MODE"
+# depends on DWC_OTG
+endchoice
+
+choice
+ prompt "Platform"
+ depends on DWC_OTG
+ default DWC_OTG_LANTIQ
+
+config DWC_OTG_LANTIQ
+ bool "Lantiq"
+ depends on LANTIQ
+ help
+ Danube USB Host Controller
+ platform support
+endchoice
+
+config DWC_OTG_DEBUG
+ bool "Enable debug mode"
+ depends on DWC_OTG
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/Makefile b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/Makefile
new file mode 100644
index 0000000000..d4d23554a1
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/Makefile
@@ -0,0 +1,39 @@
+#
+# Makefile for DWC_otg Highspeed USB controller driver
+#
+
+ifeq ($(CONFIG_DWC_OTG_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
+
+# Use one of the following flags to compile the software in host-only or
+# device-only mode based on the configuration selected by the user
+ifeq ($(CONFIG_DWC_OTG_HOST_ONLY),y)
+ EXTRA_CFLAGS += -DDWC_OTG_HOST_ONLY -DDWC_HOST_ONLY
+ EXTRA_CFLAGS += -DDWC_OTG_EN_ISOC -DDWC_EN_ISOC
+else ifeq ($(CONFIG_DWC_OTG_DEVICE_ONLY),y)
+ EXTRA_CFLAGS += -DDWC_OTG_DEVICE_ONLY
+else
+ EXTRA_CFLAGS += -DDWC_OTG_MODE
+endif
+
+# EXTRA_CFLAGS += -DDWC_HS_ELECT_TST
+# EXTRA_CFLAGS += -DDWC_OTG_EXT_CHG_PUMP
+
+ifeq ($(CONFIG_DWC_OTG_LANTIQ),y)
+ EXTRA_CFLAGS += -Dlinux -D__LINUX__ -DDWC_OTG_IFX -DDWC_OTG_HOST_ONLY -DDWC_HOST_ONLY -D__KERNEL__
+endif
+ifeq ($(CONFIG_DWC_OTG_LANTIQ),m)
+ EXTRA_CFLAGS += -Dlinux -D__LINUX__ -DDWC_OTG_IFX -DDWC_HOST_ONLY -DMODULE -D__KERNEL__ -DDEBUG
+endif
+
+obj-$(CONFIG_DWC_OTG) := dwc_otg.o
+dwc_otg-objs := dwc_otg_hcd.o dwc_otg_hcd_intr.o dwc_otg_hcd_queue.o
+#dwc_otg-objs += dwc_otg_pcd.o dwc_otg_pcd_intr.o
+dwc_otg-objs += dwc_otg_attr.o
+dwc_otg-objs += dwc_otg_cil.o dwc_otg_cil_intr.o
+dwc_otg-objs += dwc_otg_ifx.o
+dwc_otg-objs += dwc_otg_driver.o
+
+#obj-$(CONFIG_DWC_OTG_IFX) := dwc_otg_ifx.o
+#dwc_otg_ifx-objs := dwc_otg_ifx.o
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_attr.c b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_attr.c
new file mode 100644
index 0000000000..4675a5cc7d
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_attr.c
@@ -0,0 +1,802 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_attr.c $
+ * $Revision: 1.1.1.1 $
+ * $Date: 2009-04-17 06:15:34 $
+ * $Change: 537387 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+/** @file
+ *
+ * The diagnostic interface will provide access to the controller for
+ * bringing up the hardware and testing. The Linux driver attributes
+ * feature will be used to provide the Linux Diagnostic
+ * Interface. These attributes are accessed through sysfs.
+ */
+
+/** @page "Linux Module Attributes"
+ *
+ * The Linux module attributes feature is used to provide the Linux
+ * Diagnostic Interface. These attributes are accessed through sysfs.
+ * The diagnostic interface will provide access to the controller for
+ * bringing up the hardware and testing.
+
+
+ The following table shows the attributes.
+ <table>
+ <tr>
+ <td><b> Name</b></td>
+ <td><b> Description</b></td>
+ <td><b> Access</b></td>
+ </tr>
+
+ <tr>
+ <td> mode </td>
+ <td> Returns the current mode: 0 for device mode, 1 for host mode</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hnpcapable </td>
+ <td> Gets or sets the "HNP-capable" bit in the Core USB Configuraton Register.
+ Read returns the current value.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> srpcapable </td>
+ <td> Gets or sets the "SRP-capable" bit in the Core USB Configuraton Register.
+ Read returns the current value.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> hnp </td>
+ <td> Initiates the Host Negotiation Protocol. Read returns the status.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> srp </td>
+ <td> Initiates the Session Request Protocol. Read returns the status.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> buspower </td>
+ <td> Gets or sets the Power State of the bus (0 - Off or 1 - On)</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> bussuspend </td>
+ <td> Suspends the USB bus.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> busconnected </td>
+ <td> Gets the connection status of the bus</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> gotgctl </td>
+ <td> Gets or sets the Core Control Status Register.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> gusbcfg </td>
+ <td> Gets or sets the Core USB Configuration Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> grxfsiz </td>
+ <td> Gets or sets the Receive FIFO Size Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> gnptxfsiz </td>
+ <td> Gets or sets the non-periodic Transmit Size Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> gpvndctl </td>
+ <td> Gets or sets the PHY Vendor Control Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> ggpio </td>
+ <td> Gets the value in the lower 16-bits of the General Purpose IO Register
+ or sets the upper 16 bits.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> guid </td>
+ <td> Gets or sets the value of the User ID Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> gsnpsid </td>
+ <td> Gets the value of the Synopsys ID Regester</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> devspeed </td>
+ <td> Gets or sets the device speed setting in the DCFG register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> enumspeed </td>
+ <td> Gets the device enumeration Speed.</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hptxfsiz </td>
+ <td> Gets the value of the Host Periodic Transmit FIFO</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hprt0 </td>
+ <td> Gets or sets the value in the Host Port Control and Status Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> regoffset </td>
+ <td> Sets the register offset for the next Register Access</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> regvalue </td>
+ <td> Gets or sets the value of the register at the offset in the regoffset attribute.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> remote_wakeup </td>
+ <td> On read, shows the status of Remote Wakeup. On write, initiates a remote
+ wakeup of the host. When bit 0 is 1 and Remote Wakeup is enabled, the Remote
+ Wakeup signalling bit in the Device Control Register is set for 1
+ milli-second.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> regdump </td>
+ <td> Dumps the contents of core registers.</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hcddump </td>
+ <td> Dumps the current HCD state.</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hcd_frrem </td>
+ <td> Shows the average value of the Frame Remaining
+ field in the Host Frame Number/Frame Remaining register when an SOF interrupt
+ occurs. This can be used to determine the average interrupt latency. Also
+ shows the average Frame Remaining value for start_transfer and the "a" and
+ "b" sample points. The "a" and "b" sample points may be used during debugging
+ bto determine how long it takes to execute a section of the HCD code.</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> rd_reg_test </td>
+ <td> Displays the time required to read the GNPTXFSIZ register many times
+ (the output shows the number of times the register is read).
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> wr_reg_test </td>
+ <td> Displays the time required to write the GNPTXFSIZ register many times
+ (the output shows the number of times the register is written).
+ <td> Read</td>
+ </tr>
+
+ </table>
+
+ Example usage:
+ To get the current mode:
+ cat /sys/devices/lm0/mode
+
+ To power down the USB:
+ echo 0 > /sys/devices/lm0/buspower
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/stat.h> /* permission constants */
+
+#include <asm/io.h>
+
+#include "dwc_otg_plat.h"
+#include "dwc_otg_attr.h"
+#include "dwc_otg_driver.h"
+// #include "dwc_otg_pcd.h"
+#include "dwc_otg_hcd.h"
+
+// 20070316, winder added.
+#ifndef SZ_256K
+#define SZ_256K 0x00040000
+#endif
+
+/*
+ * MACROs for defining sysfs attribute
+ */
+#define DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \
+{ \
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\
+ uint32_t val; \
+ val = dwc_read_reg32 (_addr_); \
+ val = (val & (_mask_)) >> _shift_; \
+ return sprintf (buf, "%s = 0x%x\n", _string_, val); \
+}
+#define DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\
+ uint32_t set = simple_strtoul(buf, NULL, 16); \
+ uint32_t clear = set; \
+ clear = ((~clear) << _shift_) & _mask_; \
+ set = (set << _shift_) & _mask_; \
+ dev_dbg(_dev, "Storing Address=0x%08x Set=0x%08x Clear=0x%08x\n", (uint32_t)_addr_, set, clear); \
+ dwc_modify_reg32(_addr_, clear, set); \
+ return count; \
+}
+
+#define DWC_OTG_DEVICE_ATTR_BITFIELD_RW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store);
+
+#define DWC_OTG_DEVICE_ATTR_BITFIELD_RO(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL);
+
+/*
+ * MACROs for defining sysfs attribute for 32-bit registers
+ */
+#define DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \
+static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \
+{ \
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\
+ uint32_t val; \
+ val = dwc_read_reg32 (_addr_); \
+ return sprintf (buf, "%s = 0x%08x\n", _string_, val); \
+}
+#define DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \
+static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\
+ uint32_t val = simple_strtoul(buf, NULL, 16); \
+ dev_dbg(_dev, "Storing Address=0x%08x Val=0x%08x\n", (uint32_t)_addr_, val); \
+ dwc_write_reg32(_addr_, val); \
+ return count; \
+}
+
+#define DWC_OTG_DEVICE_ATTR_REG32_RW(_otg_attr_name_,_addr_,_string_) \
+DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \
+DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \
+DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store);
+
+#define DWC_OTG_DEVICE_ATTR_REG32_RO(_otg_attr_name_,_addr_,_string_) \
+DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \
+DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL);
+
+
+/** @name Functions for Show/Store of Attributes */
+/**@{*/
+
+/**
+ * Show the register offset of the Register Access.
+ */
+static ssize_t regoffset_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ return snprintf(buf, sizeof("0xFFFFFFFF\n")+1,"0x%08x\n", otg_dev->reg_offset);
+}
+
+/**
+ * Set the register offset for the next Register Access Read/Write
+ */
+static ssize_t regoffset_store( struct device *_dev, struct device_attribute *attr, const char *buf,
+ size_t count )
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ uint32_t offset = simple_strtoul(buf, NULL, 16);
+ //dev_dbg(_dev, "Offset=0x%08x\n", offset);
+ if (offset < SZ_256K ) {
+ otg_dev->reg_offset = offset;
+ }
+ else {
+ dev_err( _dev, "invalid offset\n" );
+ }
+
+ return count;
+}
+DEVICE_ATTR(regoffset, S_IRUGO|S_IWUSR, regoffset_show, regoffset_store);
+
+/**
+ * Show the value of the register at the offset in the reg_offset
+ * attribute.
+ */
+static ssize_t regvalue_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ uint32_t val;
+ volatile uint32_t *addr;
+
+ if (otg_dev->reg_offset != 0xFFFFFFFF && 0 != otg_dev->base) {
+ /* Calculate the address */
+ addr = (uint32_t*)(otg_dev->reg_offset +
+ (uint8_t*)otg_dev->base);
+ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr);
+ val = dwc_read_reg32( addr );
+ return snprintf(buf, sizeof("Reg@0xFFFFFFFF = 0xFFFFFFFF\n")+1,
+ "Reg@0x%06x = 0x%08x\n",
+ otg_dev->reg_offset, val);
+ }
+ else {
+ dev_err(_dev, "Invalid offset (0x%0x)\n",
+ otg_dev->reg_offset);
+ return sprintf(buf, "invalid offset\n" );
+ }
+}
+
+/**
+ * Store the value in the register at the offset in the reg_offset
+ * attribute.
+ *
+ */
+static ssize_t regvalue_store( struct device *_dev, struct device_attribute *attr, const char *buf,
+ size_t count )
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ volatile uint32_t * addr;
+ uint32_t val = simple_strtoul(buf, NULL, 16);
+ //dev_dbg(_dev, "Offset=0x%08x Val=0x%08x\n", otg_dev->reg_offset, val);
+ if (otg_dev->reg_offset != 0xFFFFFFFF && 0 != otg_dev->base) {
+ /* Calculate the address */
+ addr = (uint32_t*)(otg_dev->reg_offset +
+ (uint8_t*)otg_dev->base);
+ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr);
+ dwc_write_reg32( addr, val );
+ }
+ else {
+ dev_err(_dev, "Invalid Register Offset (0x%08x)\n",
+ otg_dev->reg_offset);
+ }
+ return count;
+}
+DEVICE_ATTR(regvalue, S_IRUGO|S_IWUSR, regvalue_show, regvalue_store);
+
+/*
+ * Attributes
+ */
+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(mode,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<20),20,"Mode");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(hnpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<9),9,"Mode");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(srpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<8),8,"Mode");
+
+//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(buspower,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode");
+//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(bussuspend,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(busconnected,otg_dev->core_if->host_if->hprt0,0x01,0,"Bus Connected");
+
+DWC_OTG_DEVICE_ATTR_REG32_RW(gotgctl,&(otg_dev->core_if->core_global_regs->gotgctl),"GOTGCTL");
+DWC_OTG_DEVICE_ATTR_REG32_RW(gusbcfg,&(otg_dev->core_if->core_global_regs->gusbcfg),"GUSBCFG");
+DWC_OTG_DEVICE_ATTR_REG32_RW(grxfsiz,&(otg_dev->core_if->core_global_regs->grxfsiz),"GRXFSIZ");
+DWC_OTG_DEVICE_ATTR_REG32_RW(gnptxfsiz,&(otg_dev->core_if->core_global_regs->gnptxfsiz),"GNPTXFSIZ");
+DWC_OTG_DEVICE_ATTR_REG32_RW(gpvndctl,&(otg_dev->core_if->core_global_regs->gpvndctl),"GPVNDCTL");
+DWC_OTG_DEVICE_ATTR_REG32_RW(ggpio,&(otg_dev->core_if->core_global_regs->ggpio),"GGPIO");
+DWC_OTG_DEVICE_ATTR_REG32_RW(guid,&(otg_dev->core_if->core_global_regs->guid),"GUID");
+DWC_OTG_DEVICE_ATTR_REG32_RO(gsnpsid,&(otg_dev->core_if->core_global_regs->gsnpsid),"GSNPSID");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(devspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dcfg),0x3,0,"Device Speed");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(enumspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dsts),0x6,1,"Device Enumeration Speed");
+
+DWC_OTG_DEVICE_ATTR_REG32_RO(hptxfsiz,&(otg_dev->core_if->core_global_regs->hptxfsiz),"HPTXFSIZ");
+DWC_OTG_DEVICE_ATTR_REG32_RW(hprt0,otg_dev->core_if->host_if->hprt0,"HPRT0");
+
+
+/**
+ * @todo Add code to initiate the HNP.
+ */
+/**
+ * Show the HNP status bit
+ */
+static ssize_t hnp_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ gotgctl_data_t val;
+ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl));
+ return sprintf (buf, "HstNegScs = 0x%x\n", val.b.hstnegscs);
+}
+
+/**
+ * Set the HNP Request bit
+ */
+static ssize_t hnp_store( struct device *_dev, struct device_attribute *attr, const char *buf,
+ size_t count )
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ uint32_t in = simple_strtoul(buf, NULL, 16);
+ uint32_t *addr = (uint32_t *)&(otg_dev->core_if->core_global_regs->gotgctl);
+ gotgctl_data_t mem;
+ mem.d32 = dwc_read_reg32(addr);
+ mem.b.hnpreq = in;
+ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32);
+ dwc_write_reg32(addr, mem.d32);
+ return count;
+}
+DEVICE_ATTR(hnp, 0644, hnp_show, hnp_store);
+
+/**
+ * @todo Add code to initiate the SRP.
+ */
+/**
+ * Show the SRP status bit
+ */
+static ssize_t srp_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+#ifndef DWC_HOST_ONLY
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ gotgctl_data_t val;
+ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl));
+ return sprintf (buf, "SesReqScs = 0x%x\n", val.b.sesreqscs);
+#else
+ return sprintf(buf, "Host Only Mode!\n");
+#endif
+}
+
+/**
+ * Set the SRP Request bit
+ */
+static ssize_t srp_store( struct device *_dev, struct device_attribute *attr, const char *buf,
+ size_t count )
+{
+#ifndef DWC_HOST_ONLY
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ dwc_otg_pcd_initiate_srp(otg_dev->pcd);
+#endif
+ return count;
+}
+DEVICE_ATTR(srp, 0644, srp_show, srp_store);
+
+/**
+ * @todo Need to do more for power on/off?
+ */
+/**
+ * Show the Bus Power status
+ */
+static ssize_t buspower_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ hprt0_data_t val;
+ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0);
+ return sprintf (buf, "Bus Power = 0x%x\n", val.b.prtpwr);
+}
+
+
+/**
+ * Set the Bus Power status
+ */
+static ssize_t buspower_store( struct device *_dev, struct device_attribute *attr, const char *buf,
+ size_t count )
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ uint32_t on = simple_strtoul(buf, NULL, 16);
+ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0;
+ hprt0_data_t mem;
+
+ mem.d32 = dwc_read_reg32(addr);
+ mem.b.prtpwr = on;
+
+ //dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32);
+ dwc_write_reg32(addr, mem.d32);
+
+ return count;
+}
+DEVICE_ATTR(buspower, 0644, buspower_show, buspower_store);
+
+/**
+ * @todo Need to do more for suspend?
+ */
+/**
+ * Show the Bus Suspend status
+ */
+static ssize_t bussuspend_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ hprt0_data_t val;
+ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0);
+ return sprintf (buf, "Bus Suspend = 0x%x\n", val.b.prtsusp);
+}
+
+/**
+ * Set the Bus Suspend status
+ */
+static ssize_t bussuspend_store( struct device *_dev, struct device_attribute *attr, const char *buf,
+ size_t count )
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ uint32_t in = simple_strtoul(buf, NULL, 16);
+ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0;
+ hprt0_data_t mem;
+ mem.d32 = dwc_read_reg32(addr);
+ mem.b.prtsusp = in;
+ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32);
+ dwc_write_reg32(addr, mem.d32);
+ return count;
+}
+DEVICE_ATTR(bussuspend, 0644, bussuspend_show, bussuspend_store);
+
+/**
+ * Show the status of Remote Wakeup.
+ */
+static ssize_t remote_wakeup_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+#ifndef DWC_HOST_ONLY
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ dctl_data_t val;
+ val.d32 = dwc_read_reg32( &otg_dev->core_if->dev_if->dev_global_regs->dctl);
+ return sprintf( buf, "Remote Wakeup = %d Enabled = %d\n",
+ val.b.rmtwkupsig, otg_dev->pcd->remote_wakeup_enable);
+#else
+ return sprintf(buf, "Host Only Mode!\n");
+#endif
+}
+
+/**
+ * Initiate a remote wakeup of the host. The Device control register
+ * Remote Wakeup Signal bit is written if the PCD Remote wakeup enable
+ * flag is set.
+ *
+ */
+static ssize_t remote_wakeup_store( struct device *_dev, struct device_attribute *attr, const char *buf,
+ size_t count )
+{
+#ifndef DWC_HOST_ONLY
+ uint32_t val = simple_strtoul(buf, NULL, 16);
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ if (val&1) {
+ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 1);
+ }
+ else {
+ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 0);
+ }
+#endif
+ return count;
+}
+DEVICE_ATTR(remote_wakeup, S_IRUGO|S_IWUSR, remote_wakeup_show,
+ remote_wakeup_store);
+
+/**
+ * Dump global registers and either host or device registers (depending on the
+ * current mode of the core).
+ */
+static ssize_t regdump_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+#ifdef DEBUG
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ printk("%s otg_dev=0x%p\n", __FUNCTION__, otg_dev);
+
+ dwc_otg_dump_global_registers( otg_dev->core_if);
+ if (dwc_otg_is_host_mode(otg_dev->core_if)) {
+ dwc_otg_dump_host_registers( otg_dev->core_if);
+ } else {
+ dwc_otg_dump_dev_registers( otg_dev->core_if);
+ }
+#endif
+
+ return sprintf( buf, "Register Dump\n" );
+}
+
+DEVICE_ATTR(regdump, S_IRUGO|S_IWUSR, regdump_show, 0);
+
+/**
+ * Dump the current hcd state.
+ */
+static ssize_t hcddump_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+#ifndef DWC_DEVICE_ONLY
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ dwc_otg_hcd_dump_state(otg_dev->hcd);
+#endif
+ return sprintf( buf, "HCD Dump\n" );
+}
+
+DEVICE_ATTR(hcddump, S_IRUGO|S_IWUSR, hcddump_show, 0);
+
+/**
+ * Dump the average frame remaining at SOF. This can be used to
+ * determine average interrupt latency. Frame remaining is also shown for
+ * start transfer and two additional sample points.
+ */
+static ssize_t hcd_frrem_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+#ifndef DWC_DEVICE_ONLY
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ dwc_otg_hcd_dump_frrem(otg_dev->hcd);
+#endif
+ return sprintf( buf, "HCD Dump Frame Remaining\n" );
+}
+
+DEVICE_ATTR(hcd_frrem, S_IRUGO|S_IWUSR, hcd_frrem_show, 0);
+
+/**
+ * Displays the time required to read the GNPTXFSIZ register many times (the
+ * output shows the number of times the register is read).
+ */
+#define RW_REG_COUNT 10000000
+#define MSEC_PER_JIFFIE 1000/HZ
+static ssize_t rd_reg_test_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ int i;
+ int time;
+ int start_jiffies;
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+
+ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n",
+ HZ, MSEC_PER_JIFFIE, loops_per_jiffy);
+ start_jiffies = jiffies;
+ for (i = 0; i < RW_REG_COUNT; i++) {
+ dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz);
+ }
+ time = jiffies - start_jiffies;
+ return sprintf( buf, "Time to read GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n",
+ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time );
+}
+
+DEVICE_ATTR(rd_reg_test, S_IRUGO|S_IWUSR, rd_reg_test_show, 0);
+
+/**
+ * Displays the time required to write the GNPTXFSIZ register many times (the
+ * output shows the number of times the register is written).
+ */
+static ssize_t wr_reg_test_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ int i;
+ int time;
+ int start_jiffies;
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ uint32_t reg_val;
+
+ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n",
+ HZ, MSEC_PER_JIFFIE, loops_per_jiffy);
+ reg_val = dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz);
+ start_jiffies = jiffies;
+ for (i = 0; i < RW_REG_COUNT; i++) {
+ dwc_write_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz, reg_val);
+ }
+ time = jiffies - start_jiffies;
+ return sprintf( buf, "Time to write GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n",
+ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time);
+}
+
+DEVICE_ATTR(wr_reg_test, S_IRUGO|S_IWUSR, wr_reg_test_show, 0);
+/**@}*/
+
+/**
+ * Create the device files
+ */
+void dwc_otg_attr_create (struct device *_dev)
+{
+ int retval;
+
+ retval = device_create_file(_dev, &dev_attr_regoffset);
+ retval += device_create_file(_dev, &dev_attr_regvalue);
+ retval += device_create_file(_dev, &dev_attr_mode);
+ retval += device_create_file(_dev, &dev_attr_hnpcapable);
+ retval += device_create_file(_dev, &dev_attr_srpcapable);
+ retval += device_create_file(_dev, &dev_attr_hnp);
+ retval += device_create_file(_dev, &dev_attr_srp);
+ retval += device_create_file(_dev, &dev_attr_buspower);
+ retval += device_create_file(_dev, &dev_attr_bussuspend);
+ retval += device_create_file(_dev, &dev_attr_busconnected);
+ retval += device_create_file(_dev, &dev_attr_gotgctl);
+ retval += device_create_file(_dev, &dev_attr_gusbcfg);
+ retval += device_create_file(_dev, &dev_attr_grxfsiz);
+ retval += device_create_file(_dev, &dev_attr_gnptxfsiz);
+ retval += device_create_file(_dev, &dev_attr_gpvndctl);
+ retval += device_create_file(_dev, &dev_attr_ggpio);
+ retval += device_create_file(_dev, &dev_attr_guid);
+ retval += device_create_file(_dev, &dev_attr_gsnpsid);
+ retval += device_create_file(_dev, &dev_attr_devspeed);
+ retval += device_create_file(_dev, &dev_attr_enumspeed);
+ retval += device_create_file(_dev, &dev_attr_hptxfsiz);
+ retval += device_create_file(_dev, &dev_attr_hprt0);
+ retval += device_create_file(_dev, &dev_attr_remote_wakeup);
+ retval += device_create_file(_dev, &dev_attr_regdump);
+ retval += device_create_file(_dev, &dev_attr_hcddump);
+ retval += device_create_file(_dev, &dev_attr_hcd_frrem);
+ retval += device_create_file(_dev, &dev_attr_rd_reg_test);
+ retval += device_create_file(_dev, &dev_attr_wr_reg_test);
+
+ if(retval != 0)
+ {
+ DWC_PRINT("cannot create sysfs device files.\n");
+ // DWC_PRINT("killing own sysfs device files!\n");
+ dwc_otg_attr_remove(_dev);
+ }
+}
+
+/**
+ * Remove the device files
+ */
+void dwc_otg_attr_remove (struct device *_dev)
+{
+ device_remove_file(_dev, &dev_attr_regoffset);
+ device_remove_file(_dev, &dev_attr_regvalue);
+ device_remove_file(_dev, &dev_attr_mode);
+ device_remove_file(_dev, &dev_attr_hnpcapable);
+ device_remove_file(_dev, &dev_attr_srpcapable);
+ device_remove_file(_dev, &dev_attr_hnp);
+ device_remove_file(_dev, &dev_attr_srp);
+ device_remove_file(_dev, &dev_attr_buspower);
+ device_remove_file(_dev, &dev_attr_bussuspend);
+ device_remove_file(_dev, &dev_attr_busconnected);
+ device_remove_file(_dev, &dev_attr_gotgctl);
+ device_remove_file(_dev, &dev_attr_gusbcfg);
+ device_remove_file(_dev, &dev_attr_grxfsiz);
+ device_remove_file(_dev, &dev_attr_gnptxfsiz);
+ device_remove_file(_dev, &dev_attr_gpvndctl);
+ device_remove_file(_dev, &dev_attr_ggpio);
+ device_remove_file(_dev, &dev_attr_guid);
+ device_remove_file(_dev, &dev_attr_gsnpsid);
+ device_remove_file(_dev, &dev_attr_devspeed);
+ device_remove_file(_dev, &dev_attr_enumspeed);
+ device_remove_file(_dev, &dev_attr_hptxfsiz);
+ device_remove_file(_dev, &dev_attr_hprt0);
+ device_remove_file(_dev, &dev_attr_remote_wakeup);
+ device_remove_file(_dev, &dev_attr_regdump);
+ device_remove_file(_dev, &dev_attr_hcddump);
+ device_remove_file(_dev, &dev_attr_hcd_frrem);
+ device_remove_file(_dev, &dev_attr_rd_reg_test);
+ device_remove_file(_dev, &dev_attr_wr_reg_test);
+}
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_attr.h b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_attr.h
new file mode 100644
index 0000000000..4bbf7df02d
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_attr.h
@@ -0,0 +1,67 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_attr.h $
+ * $Revision: 1.1.1.1 $
+ * $Date: 2009-04-17 06:15:34 $
+ * $Change: 510275 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__DWC_OTG_ATTR_H__)
+#define __DWC_OTG_ATTR_H__
+
+/** @file
+ * This file contains the interface to the Linux device attributes.
+ */
+extern struct device_attribute dev_attr_regoffset;
+extern struct device_attribute dev_attr_regvalue;
+
+extern struct device_attribute dev_attr_mode;
+extern struct device_attribute dev_attr_hnpcapable;
+extern struct device_attribute dev_attr_srpcapable;
+extern struct device_attribute dev_attr_hnp;
+extern struct device_attribute dev_attr_srp;
+extern struct device_attribute dev_attr_buspower;
+extern struct device_attribute dev_attr_bussuspend;
+extern struct device_attribute dev_attr_busconnected;
+extern struct device_attribute dev_attr_gotgctl;
+extern struct device_attribute dev_attr_gusbcfg;
+extern struct device_attribute dev_attr_grxfsiz;
+extern struct device_attribute dev_attr_gnptxfsiz;
+extern struct device_attribute dev_attr_gpvndctl;
+extern struct device_attribute dev_attr_ggpio;
+extern struct device_attribute dev_attr_guid;
+extern struct device_attribute dev_attr_gsnpsid;
+extern struct device_attribute dev_attr_devspeed;
+extern struct device_attribute dev_attr_enumspeed;
+extern struct device_attribute dev_attr_hptxfsiz;
+extern struct device_attribute dev_attr_hprt0;
+
+void dwc_otg_attr_create (struct device *_dev);
+void dwc_otg_attr_remove (struct device *_dev);
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil.c b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil.c
new file mode 100644
index 0000000000..42c69eba48
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil.c
@@ -0,0 +1,3025 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_cil.c $
+ * $Revision: 1.1.1.1 $
+ * $Date: 2009-04-17 06:15:34 $
+ * $Change: 631780 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+/** @file
+ *
+ * The Core Interface Layer provides basic services for accessing and
+ * managing the DWC_otg hardware. These services are used by both the
+ * Host Controller Driver and the Peripheral Controller Driver.
+ *
+ * The CIL manages the memory map for the core so that the HCD and PCD
+ * don't have to do this separately. It also handles basic tasks like
+ * reading/writing the registers and data FIFOs in the controller.
+ * Some of the data access functions provide encapsulation of several
+ * operations required to perform a task, such as writing multiple
+ * registers to start a transfer. Finally, the CIL performs basic
+ * services that are not specific to either the host or device modes
+ * of operation. These services include management of the OTG Host
+ * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A
+ * Diagnostic API is also provided to allow testing of the controller
+ * hardware.
+ *
+ * The Core Interface Layer has the following requirements:
+ * - Provides basic controller operations.
+ * - Minimal use of OS services.
+ * - The OS services used will be abstracted by using inline functions
+ * or macros.
+ *
+ */
+#include <asm/unaligned.h>
+
+#ifdef DEBUG
+#include <linux/jiffies.h>
+#endif
+
+#include "dwc_otg_plat.h"
+
+#include "dwc_otg_regs.h"
+#include "dwc_otg_cil.h"
+
+/**
+ * This function is called to initialize the DWC_otg CSR data
+ * structures. The register addresses in the device and host
+ * structures are initialized from the base address supplied by the
+ * caller. The calling function must make the OS calls to get the
+ * base address of the DWC_otg controller registers. The core_params
+ * argument holds the parameters that specify how the core should be
+ * configured.
+ *
+ * @param[in] _reg_base_addr Base address of DWC_otg core registers
+ * @param[in] _core_params Pointer to the core configuration parameters
+ *
+ */
+dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *_reg_base_addr,
+ dwc_otg_core_params_t *_core_params)
+{
+ dwc_otg_core_if_t *core_if = 0;
+ dwc_otg_dev_if_t *dev_if = 0;
+ dwc_otg_host_if_t *host_if = 0;
+ uint8_t *reg_base = (uint8_t *)_reg_base_addr;
+ int i = 0;
+
+ DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, _reg_base_addr, _core_params);
+
+ core_if = kmalloc( sizeof(dwc_otg_core_if_t), GFP_KERNEL);
+ if (core_if == 0) {
+ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_core_if_t failed\n");
+ return 0;
+ }
+ memset(core_if, 0, sizeof(dwc_otg_core_if_t));
+
+ core_if->core_params = _core_params;
+ core_if->core_global_regs = (dwc_otg_core_global_regs_t *)reg_base;
+ /*
+ * Allocate the Device Mode structures.
+ */
+ dev_if = kmalloc( sizeof(dwc_otg_dev_if_t), GFP_KERNEL);
+ if (dev_if == 0) {
+ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_dev_if_t failed\n");
+ kfree( core_if );
+ return 0;
+ }
+
+ dev_if->dev_global_regs =
+ (dwc_otg_device_global_regs_t *)(reg_base + DWC_DEV_GLOBAL_REG_OFFSET);
+
+ for (i=0; i<MAX_EPS_CHANNELS; i++) {
+ dev_if->in_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *)
+ (reg_base + DWC_DEV_IN_EP_REG_OFFSET +
+ (i * DWC_EP_REG_OFFSET));
+
+ dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *)
+ (reg_base + DWC_DEV_OUT_EP_REG_OFFSET +
+ (i * DWC_EP_REG_OFFSET));
+ DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n",
+ i, &dev_if->in_ep_regs[i]->diepctl);
+ DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n",
+ i, &dev_if->out_ep_regs[i]->doepctl);
+ }
+ dev_if->speed = 0; // unknown
+ //dev_if->num_eps = MAX_EPS_CHANNELS;
+ //dev_if->num_perio_eps = 0;
+
+ core_if->dev_if = dev_if;
+ /*
+ * Allocate the Host Mode structures.
+ */
+ host_if = kmalloc( sizeof(dwc_otg_host_if_t), GFP_KERNEL);
+ if (host_if == 0) {
+ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_host_if_t failed\n");
+ kfree( dev_if );
+ kfree( core_if );
+ return 0;
+ }
+
+ host_if->host_global_regs = (dwc_otg_host_global_regs_t *)
+ (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET);
+ host_if->hprt0 = (uint32_t*)(reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET);
+ for (i=0; i<MAX_EPS_CHANNELS; i++) {
+ host_if->hc_regs[i] = (dwc_otg_hc_regs_t *)
+ (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET +
+ (i * DWC_OTG_CHAN_REGS_OFFSET));
+ DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n",
+ i, &host_if->hc_regs[i]->hcchar);
+ }
+ host_if->num_host_channels = MAX_EPS_CHANNELS;
+ core_if->host_if = host_if;
+
+ for (i=0; i<MAX_EPS_CHANNELS; i++) {
+ core_if->data_fifo[i] =
+ (uint32_t *)(reg_base + DWC_OTG_DATA_FIFO_OFFSET +
+ (i * DWC_OTG_DATA_FIFO_SIZE));
+ DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n",
+ i, (unsigned)core_if->data_fifo[i]);
+ } // for loop.
+
+ core_if->pcgcctl = (uint32_t*)(reg_base + DWC_OTG_PCGCCTL_OFFSET);
+
+ /*
+ * Store the contents of the hardware configuration registers here for
+ * easy access later.
+ */
+ core_if->hwcfg1.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg1);
+ core_if->hwcfg2.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg2);
+ core_if->hwcfg3.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg3);
+ core_if->hwcfg4.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg4);
+
+ DWC_DEBUGPL(DBG_CILV,"hwcfg1=%08x\n",core_if->hwcfg1.d32);
+ DWC_DEBUGPL(DBG_CILV,"hwcfg2=%08x\n",core_if->hwcfg2.d32);
+ DWC_DEBUGPL(DBG_CILV,"hwcfg3=%08x\n",core_if->hwcfg3.d32);
+ DWC_DEBUGPL(DBG_CILV,"hwcfg4=%08x\n",core_if->hwcfg4.d32);
+
+
+ DWC_DEBUGPL(DBG_CILV,"op_mode=%0x\n",core_if->hwcfg2.b.op_mode);
+ DWC_DEBUGPL(DBG_CILV,"arch=%0x\n",core_if->hwcfg2.b.architecture);
+ DWC_DEBUGPL(DBG_CILV,"num_dev_ep=%d\n",core_if->hwcfg2.b.num_dev_ep);
+ DWC_DEBUGPL(DBG_CILV,"num_host_chan=%d\n",core_if->hwcfg2.b.num_host_chan);
+ DWC_DEBUGPL(DBG_CILV,"nonperio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.nonperio_tx_q_depth);
+ DWC_DEBUGPL(DBG_CILV,"host_perio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.host_perio_tx_q_depth);
+ DWC_DEBUGPL(DBG_CILV,"dev_token_q_depth=0x%0x\n",core_if->hwcfg2.b.dev_token_q_depth);
+
+ DWC_DEBUGPL(DBG_CILV,"Total FIFO SZ=%d\n", core_if->hwcfg3.b.dfifo_depth);
+ DWC_DEBUGPL(DBG_CILV,"xfer_size_cntr_width=%0x\n", core_if->hwcfg3.b.xfer_size_cntr_width);
+
+ /*
+ * Set the SRP sucess bit for FS-I2c
+ */
+ core_if->srp_success = 0;
+ core_if->srp_timer_started = 0;
+
+ return core_if;
+}
+/**
+ * This function frees the structures allocated by dwc_otg_cil_init().
+ *
+ * @param[in] _core_if The core interface pointer returned from
+ * dwc_otg_cil_init().
+ *
+ */
+void dwc_otg_cil_remove( dwc_otg_core_if_t *_core_if )
+{
+ /* Disable all interrupts */
+ dwc_modify_reg32( &_core_if->core_global_regs->gahbcfg, 1, 0);
+ dwc_write_reg32( &_core_if->core_global_regs->gintmsk, 0);
+
+ if ( _core_if->dev_if ) {
+ kfree( _core_if->dev_if );
+ }
+ if ( _core_if->host_if ) {
+ kfree( _core_if->host_if );
+ }
+ kfree( _core_if );
+}
+
+/**
+ * This function enables the controller's Global Interrupt in the AHB Config
+ * register.
+ *
+ * @param[in] _core_if Programming view of DWC_otg controller.
+ */
+extern void dwc_otg_enable_global_interrupts( dwc_otg_core_if_t *_core_if )
+{
+ gahbcfg_data_t ahbcfg = { .d32 = 0};
+ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
+ dwc_modify_reg32(&_core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32);
+}
+/**
+ * This function disables the controller's Global Interrupt in the AHB Config
+ * register.
+ *
+ * @param[in] _core_if Programming view of DWC_otg controller.
+ */
+extern void dwc_otg_disable_global_interrupts( dwc_otg_core_if_t *_core_if )
+{
+ gahbcfg_data_t ahbcfg = { .d32 = 0};
+ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
+ dwc_modify_reg32(&_core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0);
+}
+
+/**
+ * This function initializes the commmon interrupts, used in both
+ * device and host modes.
+ *
+ * @param[in] _core_if Programming view of the DWC_otg controller
+ *
+ */
+static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t *_core_if)
+{
+ dwc_otg_core_global_regs_t *global_regs =
+ _core_if->core_global_regs;
+ gintmsk_data_t intr_mask = { .d32 = 0};
+ /* Clear any pending OTG Interrupts */
+ dwc_write_reg32( &global_regs->gotgint, 0xFFFFFFFF);
+ /* Clear any pending interrupts */
+ dwc_write_reg32( &global_regs->gintsts, 0xFFFFFFFF);
+ /*
+ * Enable the interrupts in the GINTMSK.
+ */
+ intr_mask.b.modemismatch = 1;
+ intr_mask.b.otgintr = 1;
+ if (!_core_if->dma_enable) {
+ intr_mask.b.rxstsqlvl = 1;
+ }
+ intr_mask.b.conidstschng = 1;
+ intr_mask.b.wkupintr = 1;
+ intr_mask.b.disconnect = 1;
+ intr_mask.b.usbsuspend = 1;
+ intr_mask.b.sessreqintr = 1;
+ dwc_write_reg32( &global_regs->gintmsk, intr_mask.d32);
+}
+
+/**
+ * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY
+ * type.
+ */
+static void init_fslspclksel(dwc_otg_core_if_t *_core_if)
+{
+ uint32_t val;
+ hcfg_data_t hcfg;
+
+ if (((_core_if->hwcfg2.b.hs_phy_type == 2) &&
+ (_core_if->hwcfg2.b.fs_phy_type == 1) &&
+ (_core_if->core_params->ulpi_fs_ls)) ||
+ (_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS))
+ {
+ /* Full speed PHY */
+ val = DWC_HCFG_48_MHZ;
+ } else {
+ /* High speed PHY running at full speed or high speed */
+ val = DWC_HCFG_30_60_MHZ;
+ }
+
+ DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val);
+ hcfg.d32 = dwc_read_reg32(&_core_if->host_if->host_global_regs->hcfg);
+ hcfg.b.fslspclksel = val;
+ dwc_write_reg32(&_core_if->host_if->host_global_regs->hcfg, hcfg.d32);
+}
+
+/**
+ * Initializes the DevSpd field of the DCFG register depending on the PHY type
+ * and the enumeration speed of the device.
+ */
+static void init_devspd(dwc_otg_core_if_t *_core_if)
+{
+ uint32_t val;
+ dcfg_data_t dcfg;
+
+ if (((_core_if->hwcfg2.b.hs_phy_type == 2) &&
+ (_core_if->hwcfg2.b.fs_phy_type == 1) &&
+ (_core_if->core_params->ulpi_fs_ls)) ||
+ (_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS))
+ {
+ /* Full speed PHY */
+ val = 0x3;
+ } else if (_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) {
+ /* High speed PHY running at full speed */
+ val = 0x1;
+ } else {
+ /* High speed PHY running at high speed */
+ val = 0x0;
+ }
+
+ DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val);
+ dcfg.d32 = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->dcfg);
+ dcfg.b.devspd = val;
+ dwc_write_reg32(&_core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
+}
+
+/**
+ * This function calculates the number of IN EPS
+ * using GHWCFG1 and GHWCFG2 registers values
+ *
+ * @param _pcd the pcd structure.
+ */
+static uint32_t calc_num_in_eps(dwc_otg_core_if_t * _core_if)
+{
+ uint32_t num_in_eps = 0;
+ uint32_t num_eps = _core_if->hwcfg2.b.num_dev_ep;
+ uint32_t hwcfg1 = _core_if->hwcfg1.d32 >> 2;
+ uint32_t num_tx_fifos = _core_if->hwcfg4.b.num_in_eps;
+ int i;
+ for (i = 0; i < num_eps; ++i) {
+ if (!(hwcfg1 & 0x1))
+ num_in_eps++;
+ hwcfg1 >>= 2;
+ }
+ if (_core_if->hwcfg4.b.ded_fifo_en) {
+ num_in_eps = (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps;
+ }
+ return num_in_eps;
+}
+
+
+/**
+ * This function calculates the number of OUT EPS
+ * using GHWCFG1 and GHWCFG2 registers values
+ *
+ * @param _pcd the pcd structure.
+ */
+static uint32_t calc_num_out_eps(dwc_otg_core_if_t * _core_if)
+{
+ uint32_t num_out_eps = 0;
+ uint32_t num_eps = _core_if->hwcfg2.b.num_dev_ep;
+ uint32_t hwcfg1 = _core_if->hwcfg1.d32 >> 2;
+ int i;
+ for (i = 0; i < num_eps; ++i) {
+ if (!(hwcfg1 & 0x2))
+ num_out_eps++;
+ hwcfg1 >>= 2;
+ }
+ return num_out_eps;
+}
+/**
+ * This function initializes the DWC_otg controller registers and
+ * prepares the core for device mode or host mode operation.
+ *
+ * @param _core_if Programming view of the DWC_otg controller
+ *
+ */
+void dwc_otg_core_init(dwc_otg_core_if_t *_core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ dwc_otg_dev_if_t *dev_if = _core_if->dev_if;
+ int i = 0;
+ gahbcfg_data_t ahbcfg = { .d32 = 0};
+ gusbcfg_data_t usbcfg = { .d32 = 0 };
+ gi2cctl_data_t i2cctl = {.d32 = 0};
+
+ DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n",_core_if);
+
+ /* Common Initialization */
+
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ DWC_DEBUGPL(DBG_CIL, "USB config register: 0x%08x\n", usbcfg.d32);
+
+ /* Program the ULPI External VBUS bit if needed */
+ //usbcfg.b.ulpi_ext_vbus_drv = 1;
+ //usbcfg.b.ulpi_ext_vbus_drv = 0;
+ usbcfg.b.ulpi_ext_vbus_drv =
+ (_core_if->core_params->phy_ulpi_ext_vbus == DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0;
+
+ /* Set external TS Dline pulsing */
+ usbcfg.b.term_sel_dl_pulse = (_core_if->core_params->ts_dline == 1) ? 1 : 0;
+ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Reset the Controller */
+ dwc_otg_core_reset( _core_if );
+
+ /* Initialize parameters from Hardware configuration registers. */
+#if 0
+ dev_if->num_eps = _core_if->hwcfg2.b.num_dev_ep;
+ dev_if->num_perio_eps = _core_if->hwcfg4.b.num_dev_perio_in_ep;
+#else
+ dev_if->num_in_eps = calc_num_in_eps(_core_if);
+ dev_if->num_out_eps = calc_num_out_eps(_core_if);
+#endif
+ DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n",
+ _core_if->hwcfg4.b.num_dev_perio_in_ep);
+ DWC_DEBUGPL(DBG_CIL, "Is power optimization enabled? %s\n",
+ _core_if->hwcfg4.b.power_optimiz ? "Yes" : "No");
+ DWC_DEBUGPL(DBG_CIL, "vbus_valid filter enabled? %s\n",
+ _core_if->hwcfg4.b.vbus_valid_filt_en ? "Yes" : "No");
+ DWC_DEBUGPL(DBG_CIL, "iddig filter enabled? %s\n",
+ _core_if->hwcfg4.b.iddig_filt_en ? "Yes" : "No");
+
+ DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n",_core_if->hwcfg4.b.num_dev_perio_in_ep);
+ for (i=0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
+ dev_if->perio_tx_fifo_size[i] =
+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
+ DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n", i,
+ dev_if->perio_tx_fifo_size[i]);
+ }
+ for (i = 0; i < _core_if->hwcfg4.b.num_in_eps; i++) {
+ dev_if->tx_fifo_size[i] =
+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
+ DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n", i,
+ dev_if->perio_tx_fifo_size[i]);
+ }
+
+ _core_if->total_fifo_size = _core_if->hwcfg3.b.dfifo_depth;
+ _core_if->rx_fifo_size = dwc_read_reg32(&global_regs->grxfsiz);
+ _core_if->nperio_tx_fifo_size = dwc_read_reg32(&global_regs->gnptxfsiz) >> 16;
+
+ DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", _core_if->total_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", _core_if->rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n", _core_if->nperio_tx_fifo_size);
+
+ /* This programming sequence needs to happen in FS mode before any other
+ * programming occurs */
+ if ((_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) &&
+ (_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
+ /* If FS mode with FS PHY */
+
+ /* core_init() is now called on every switch so only call the
+ * following for the first time through. */
+ if (!_core_if->phy_init_done) {
+ _core_if->phy_init_done = 1;
+ DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n");
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ usbcfg.b.physel = 1;
+ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Reset after a PHY select */
+ dwc_otg_core_reset( _core_if );
+ }
+
+ /* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
+ * do this on HNP Dev/Host mode switches (done in dev_init and
+ * host_init). */
+ if (dwc_otg_is_host_mode(_core_if)) {
+ DWC_DEBUGPL(DBG_CIL, "host mode\n");
+ init_fslspclksel(_core_if);
+ } else {
+ DWC_DEBUGPL(DBG_CIL, "device mode\n");
+ init_devspd(_core_if);
+ }
+
+ if (_core_if->core_params->i2c_enable) {
+ DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n");
+ /* Program GUSBCFG.OtgUtmifsSel to I2C */
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ usbcfg.b.otgutmifssel = 1;
+ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Program GI2CCTL.I2CEn */
+ i2cctl.d32 = dwc_read_reg32(&global_regs->gi2cctl);
+ i2cctl.b.i2cdevaddr = 1;
+ i2cctl.b.i2cen = 0;
+ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32);
+ i2cctl.b.i2cen = 1;
+ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32);
+ }
+
+ } /* endif speed == DWC_SPEED_PARAM_FULL */
+ else {
+ /* High speed PHY. */
+ if (!_core_if->phy_init_done) {
+ _core_if->phy_init_done = 1;
+ DWC_DEBUGPL(DBG_CIL, "High spped PHY\n");
+ /* HS PHY parameters. These parameters are preserved
+ * during soft reset so only program the first time. Do
+ * a soft reset immediately after setting phyif. */
+ usbcfg.b.ulpi_utmi_sel = _core_if->core_params->phy_type;
+ if (usbcfg.b.ulpi_utmi_sel == 2) { // winder
+ DWC_DEBUGPL(DBG_CIL, "ULPI\n");
+ /* ULPI interface */
+ usbcfg.b.phyif = 0;
+ usbcfg.b.ddrsel = _core_if->core_params->phy_ulpi_ddr;
+ } else {
+ /* UTMI+ interface */
+ if (_core_if->core_params->phy_utmi_width == 16) {
+ usbcfg.b.phyif = 1;
+ DWC_DEBUGPL(DBG_CIL, "UTMI+ 16\n");
+ } else {
+ DWC_DEBUGPL(DBG_CIL, "UTMI+ 8\n");
+ usbcfg.b.phyif = 0;
+ }
+ }
+ dwc_write_reg32( &global_regs->gusbcfg, usbcfg.d32);
+
+ /* Reset after setting the PHY parameters */
+ dwc_otg_core_reset( _core_if );
+ }
+ }
+
+ if ((_core_if->hwcfg2.b.hs_phy_type == 2) &&
+ (_core_if->hwcfg2.b.fs_phy_type == 1) &&
+ (_core_if->core_params->ulpi_fs_ls))
+ {
+ DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n");
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ usbcfg.b.ulpi_fsls = 1;
+ usbcfg.b.ulpi_clk_sus_m = 1;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+ } else {
+ DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS=0\n");
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ usbcfg.b.ulpi_fsls = 0;
+ usbcfg.b.ulpi_clk_sus_m = 0;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+ }
+
+ /* Program the GAHBCFG Register.*/
+ switch (_core_if->hwcfg2.b.architecture){
+
+ case DWC_SLAVE_ONLY_ARCH:
+ DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n");
+ ahbcfg.b.nptxfemplvl_txfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
+ ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
+ _core_if->dma_enable = 0;
+ break;
+
+ case DWC_EXT_DMA_ARCH:
+ DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n");
+ ahbcfg.b.hburstlen = _core_if->core_params->dma_burst_size;
+ _core_if->dma_enable = (_core_if->core_params->dma_enable != 0);
+ break;
+
+ case DWC_INT_DMA_ARCH:
+ DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n");
+ //ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR;
+ ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR4;
+ _core_if->dma_enable = (_core_if->core_params->dma_enable != 0);
+ break;
+ }
+ ahbcfg.b.dmaenable = _core_if->dma_enable;
+ dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32);
+ _core_if->en_multiple_tx_fifo = _core_if->hwcfg4.b.ded_fifo_en;
+
+ /*
+ * Program the GUSBCFG register.
+ */
+ usbcfg.d32 = dwc_read_reg32( &global_regs->gusbcfg );
+
+ switch (_core_if->hwcfg2.b.op_mode) {
+ case DWC_MODE_HNP_SRP_CAPABLE:
+ usbcfg.b.hnpcap = (_core_if->core_params->otg_cap ==
+ DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
+ usbcfg.b.srpcap = (_core_if->core_params->otg_cap !=
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+
+ case DWC_MODE_SRP_ONLY_CAPABLE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = (_core_if->core_params->otg_cap !=
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+
+ case DWC_MODE_NO_HNP_SRP_CAPABLE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ break;
+
+ case DWC_MODE_SRP_CAPABLE_DEVICE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = (_core_if->core_params->otg_cap !=
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+
+ case DWC_MODE_NO_SRP_CAPABLE_DEVICE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ break;
+
+ case DWC_MODE_SRP_CAPABLE_HOST:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = (_core_if->core_params->otg_cap !=
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+
+ case DWC_MODE_NO_SRP_CAPABLE_HOST:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ break;
+ }
+
+ dwc_write_reg32( &global_regs->gusbcfg, usbcfg.d32);
+
+ /* Enable common interrupts */
+ dwc_otg_enable_common_interrupts( _core_if );
+
+ /* Do device or host intialization based on mode during PCD
+ * and HCD initialization */
+ if (dwc_otg_is_host_mode( _core_if )) {
+ DWC_DEBUGPL(DBG_ANY, "Host Mode\n" );
+ _core_if->op_state = A_HOST;
+ } else {
+ DWC_DEBUGPL(DBG_ANY, "Device Mode\n" );
+ _core_if->op_state = B_PERIPHERAL;
+#ifdef DWC_DEVICE_ONLY
+ dwc_otg_core_dev_init( _core_if );
+#endif
+ }
+}
+
+
+/**
+ * This function enables the Device mode interrupts.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ */
+void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *_core_if)
+{
+ gintmsk_data_t intr_mask = { .d32 = 0};
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+
+ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
+
+ /* Disable all interrupts. */
+ dwc_write_reg32( &global_regs->gintmsk, 0);
+
+ /* Clear any pending interrupts */
+ dwc_write_reg32( &global_regs->gintsts, 0xFFFFFFFF);
+
+ /* Enable the common interrupts */
+ dwc_otg_enable_common_interrupts( _core_if );
+
+ /* Enable interrupts */
+ intr_mask.b.usbreset = 1;
+ intr_mask.b.enumdone = 1;
+ //intr_mask.b.epmismatch = 1;
+ intr_mask.b.inepintr = 1;
+ intr_mask.b.outepintr = 1;
+ intr_mask.b.erlysuspend = 1;
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.epmismatch = 1;
+ }
+
+ /** @todo NGS: Should this be a module parameter? */
+ intr_mask.b.isooutdrop = 1;
+ intr_mask.b.eopframe = 1;
+ intr_mask.b.incomplisoin = 1;
+ intr_mask.b.incomplisoout = 1;
+
+ dwc_modify_reg32( &global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
+
+ DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__,
+ dwc_read_reg32( &global_regs->gintmsk));
+}
+
+/**
+ * This function initializes the DWC_otg controller registers for
+ * device mode.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ *
+ */
+void dwc_otg_core_dev_init(dwc_otg_core_if_t *_core_if)
+{
+ dwc_otg_core_global_regs_t *global_regs =
+ _core_if->core_global_regs;
+ dwc_otg_dev_if_t *dev_if = _core_if->dev_if;
+ dwc_otg_core_params_t *params = _core_if->core_params;
+ dcfg_data_t dcfg = {.d32 = 0};
+ grstctl_t resetctl = { .d32=0 };
+ int i;
+ uint32_t rx_fifo_size;
+ fifosize_data_t nptxfifosize;
+ fifosize_data_t txfifosize;
+ dthrctl_data_t dthrctl;
+
+ fifosize_data_t ptxfifosize;
+
+ /* Restart the Phy Clock */
+ dwc_write_reg32(_core_if->pcgcctl, 0);
+
+ /* Device configuration register */
+ init_devspd(_core_if);
+ dcfg.d32 = dwc_read_reg32( &dev_if->dev_global_regs->dcfg);
+ dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80;
+ dwc_write_reg32( &dev_if->dev_global_regs->dcfg, dcfg.d32 );
+
+ /* Configure data FIFO sizes */
+ if ( _core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo ) {
+
+ DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n", _core_if->total_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", params->dev_rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n", params->dev_nperio_tx_fifo_size);
+
+ /* Rx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->grxfsiz));
+ rx_fifo_size = params->dev_rx_fifo_size;
+ dwc_write_reg32( &global_regs->grxfsiz, rx_fifo_size );
+ DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->grxfsiz));
+
+ /** Set Periodic Tx FIFO Mask all bits 0 */
+ _core_if->p_tx_msk = 0;
+
+ /** Set Tx FIFO Mask all bits 0 */
+ _core_if->tx_msk = 0;
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ /* Non-periodic Tx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->gnptxfsiz));
+ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
+ nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
+ dwc_write_reg32( &global_regs->gnptxfsiz, nptxfifosize.d32 );
+ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->gnptxfsiz));
+
+
+ /**@todo NGS: Fix Periodic FIFO Sizing! */
+ /*
+ * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15.
+ * Indexes of the FIFO size module parameters in the
+ * dev_perio_tx_fifo_size array and the FIFO size registers in
+ * the dptxfsiz array run from 0 to 14.
+ */
+ /** @todo Finish debug of this */
+ ptxfifosize.b.startaddr =
+ nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+ for (i = 0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep;i++) {
+ ptxfifosize.b.depth = params->dev_perio_tx_fifo_size[i];
+ DWC_DEBUGPL(DBG_CIL,"initial dptxfsiz_dieptxf[%d]=%08x\n",
+ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
+ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i],ptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL,"new dptxfsiz_dieptxf[%d]=%08x\n",
+ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
+ ptxfifosize.b.startaddr += ptxfifosize.b.depth;
+ }
+ } else {
+
+ /*
+ * Tx FIFOs These FIFOs are numbered from 1 to 15.
+ * Indexes of the FIFO size module parameters in the
+ * dev_tx_fifo_size array and the FIFO size registers in
+ * the dptxfsiz_dieptxf array run from 0 to 14.
+ */
+
+ /* Non-periodic Tx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->gnptxfsiz));
+ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
+ nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
+ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->gnptxfsiz));
+ txfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+ for (i = 1;i < _core_if->hwcfg4.b.num_dev_perio_in_ep;i++) {
+ txfifosize.b.depth = params->dev_tx_fifo_size[i];
+ DWC_DEBUGPL(DBG_CIL,"initial dptxfsiz_dieptxf[%d]=%08x\n",
+ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
+ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i - 1],txfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL,"new dptxfsiz_dieptxf[%d]=%08x\n",
+ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i-1]));
+ txfifosize.b.startaddr += txfifosize.b.depth;
+ }
+ }
+ }
+ /* Flush the FIFOs */
+ dwc_otg_flush_tx_fifo(_core_if, 0x10); /* all Tx FIFOs */
+ dwc_otg_flush_rx_fifo(_core_if);
+
+ /* Flush the Learning Queue. */
+ resetctl.b.intknqflsh = 1;
+ dwc_write_reg32( &_core_if->core_global_regs->grstctl, resetctl.d32);
+
+ /* Clear all pending Device Interrupts */
+ dwc_write_reg32( &dev_if->dev_global_regs->diepmsk, 0 );
+ dwc_write_reg32( &dev_if->dev_global_regs->doepmsk, 0 );
+ dwc_write_reg32( &dev_if->dev_global_regs->daint, 0xFFFFFFFF );
+ dwc_write_reg32( &dev_if->dev_global_regs->daintmsk, 0 );
+
+ for (i = 0; i <= dev_if->num_in_eps; i++) {
+ depctl_data_t depctl;
+ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
+ if (depctl.b.epena) {
+ depctl.d32 = 0;
+ depctl.b.epdis = 1;
+ depctl.b.snak = 1;
+ } else {
+ depctl.d32 = 0;
+ }
+ dwc_write_reg32( &dev_if->in_ep_regs[i]->diepctl, depctl.d32);
+
+ dwc_write_reg32(&dev_if->in_ep_regs[i]->dieptsiz, 0);
+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepdma, 0);
+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepint, 0xFF);
+ }
+ for (i = 0; i <= dev_if->num_out_eps; i++) {
+ depctl_data_t depctl;
+ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl);
+ if (depctl.b.epena) {
+ depctl.d32 = 0;
+ depctl.b.epdis = 1;
+ depctl.b.snak = 1;
+ } else {
+ depctl.d32 = 0;
+ }
+ dwc_write_reg32( &dev_if->out_ep_regs[i]->doepctl, depctl.d32);
+
+ //dwc_write_reg32( &dev_if->in_ep_regs[i]->dieptsiz, 0);
+ dwc_write_reg32( &dev_if->out_ep_regs[i]->doeptsiz, 0);
+ //dwc_write_reg32( &dev_if->in_ep_regs[i]->diepdma, 0);
+ dwc_write_reg32( &dev_if->out_ep_regs[i]->doepdma, 0);
+ //dwc_write_reg32( &dev_if->in_ep_regs[i]->diepint, 0xFF);
+ dwc_write_reg32( &dev_if->out_ep_regs[i]->doepint, 0xFF);
+ }
+
+ if (_core_if->en_multiple_tx_fifo && _core_if->dma_enable) {
+ dev_if->non_iso_tx_thr_en = _core_if->core_params->thr_ctl & 0x1;
+ dev_if->iso_tx_thr_en = (_core_if->core_params->thr_ctl >> 1) & 0x1;
+ dev_if->rx_thr_en = (_core_if->core_params->thr_ctl >> 2) & 0x1;
+ dev_if->rx_thr_length = _core_if->core_params->rx_thr_length;
+ dev_if->tx_thr_length = _core_if->core_params->tx_thr_length;
+ dthrctl.d32 = 0;
+ dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en;
+ dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en;
+ dthrctl.b.tx_thr_len = dev_if->tx_thr_length;
+ dthrctl.b.rx_thr_en = dev_if->rx_thr_en;
+ dthrctl.b.rx_thr_len = dev_if->rx_thr_length;
+ dwc_write_reg32(&dev_if->dev_global_regs->dtknqr3_dthrctl,dthrctl.d32);
+ DWC_DEBUGPL(DBG_CIL, "Non ISO Tx Thr - %d\nISO Tx Thr - %d\n"
+ "Rx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n",
+ dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en,
+ dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len,
+ dthrctl.b.rx_thr_len);
+ }
+ dwc_otg_enable_device_interrupts( _core_if );
+ {
+ diepmsk_data_t msk = {.d32 = 0};
+ msk.b.txfifoundrn = 1;
+ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, msk.d32,msk.d32);
+}
+}
+
+/**
+ * This function enables the Host mode interrupts.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ */
+void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *_core_if)
+{
+ dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+
+ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
+
+ /* Disable all interrupts. */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* Clear any pending interrupts. */
+ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
+
+ /* Enable the common interrupts */
+ dwc_otg_enable_common_interrupts(_core_if);
+
+ /*
+ * Enable host mode interrupts without disturbing common
+ * interrupts.
+ */
+ intr_mask.b.sofintr = 1;
+ intr_mask.b.portintr = 1;
+ intr_mask.b.hcintr = 1;
+
+ //dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
+ //dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32);
+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
+}
+
+/**
+ * This function disables the Host Mode interrupts.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ */
+void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *_core_if)
+{
+ dwc_otg_core_global_regs_t *global_regs =
+ _core_if->core_global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+
+ DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__);
+
+ /*
+ * Disable host mode interrupts without disturbing common
+ * interrupts.
+ */
+ intr_mask.b.sofintr = 1;
+ intr_mask.b.portintr = 1;
+ intr_mask.b.hcintr = 1;
+ intr_mask.b.ptxfempty = 1;
+ intr_mask.b.nptxfempty = 1;
+
+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
+}
+
+#if 0
+/* currently not used, keep it here as if needed later */
+static int phy_read(dwc_otg_core_if_t * _core_if, int addr)
+{
+ u32 val;
+ int timeout = 10;
+
+ dwc_write_reg32(&_core_if->core_global_regs->gpvndctl,
+ 0x02000000 | (addr << 16));
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+ while (((val & 0x08000000) == 0) && (timeout--)) {
+ udelay(1000);
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+ }
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+ printk("%s: addr=%02x regval=%02x\n", __func__, addr, val & 0x000000ff);
+
+ return 0;
+}
+#endif
+
+/**
+ * This function initializes the DWC_otg controller registers for
+ * host mode.
+ *
+ * This function flushes the Tx and Rx FIFOs and it flushes any entries in the
+ * request queues. Host channels are reset to ensure that they are ready for
+ * performing transfers.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ *
+ */
+void dwc_otg_core_host_init(dwc_otg_core_if_t *_core_if)
+{
+ dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs;
+ dwc_otg_host_if_t *host_if = _core_if->host_if;
+ dwc_otg_core_params_t *params = _core_if->core_params;
+ hprt0_data_t hprt0 = {.d32 = 0};
+ fifosize_data_t nptxfifosize;
+ fifosize_data_t ptxfifosize;
+ int i;
+ hcchar_data_t hcchar;
+ hcfg_data_t hcfg;
+ dwc_otg_hc_regs_t *hc_regs;
+ int num_channels;
+ gotgctl_data_t gotgctl = {.d32 = 0};
+
+ DWC_DEBUGPL(DBG_CILV,"%s(%p)\n", __func__, _core_if);
+
+ /* Restart the Phy Clock */
+ dwc_write_reg32(_core_if->pcgcctl, 0);
+
+ /* Initialize Host Configuration Register */
+ init_fslspclksel(_core_if);
+ if (_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) {
+ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
+ hcfg.b.fslssupp = 1;
+ dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32);
+ }
+
+ /* Configure data FIFO sizes */
+ if (_core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
+ DWC_DEBUGPL(DBG_CIL,"Total FIFO Size=%d\n", _core_if->total_fifo_size);
+ DWC_DEBUGPL(DBG_CIL,"Rx FIFO Size=%d\n", params->host_rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL,"NP Tx FIFO Size=%d\n", params->host_nperio_tx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL,"P Tx FIFO Size=%d\n", params->host_perio_tx_fifo_size);
+
+ /* Rx FIFO */
+ DWC_DEBUGPL(DBG_CIL,"initial grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz));
+ dwc_write_reg32(&global_regs->grxfsiz, params->host_rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL,"new grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz));
+
+ /* Non-periodic Tx FIFO */
+ DWC_DEBUGPL(DBG_CIL,"initial gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz));
+ nptxfifosize.b.depth = params->host_nperio_tx_fifo_size;
+ nptxfifosize.b.startaddr = params->host_rx_fifo_size;
+ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL,"new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz));
+
+ /* Periodic Tx FIFO */
+ DWC_DEBUGPL(DBG_CIL,"initial hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz));
+ ptxfifosize.b.depth = params->host_perio_tx_fifo_size;
+ ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+ dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL,"new hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz));
+ }
+
+ /* Clear Host Set HNP Enable in the OTG Control Register */
+ gotgctl.b.hstsethnpen = 1;
+ dwc_modify_reg32( &global_regs->gotgctl, gotgctl.d32, 0);
+
+ /* Make sure the FIFOs are flushed. */
+ dwc_otg_flush_tx_fifo(_core_if, 0x10 /* all Tx FIFOs */);
+ dwc_otg_flush_rx_fifo(_core_if);
+
+ /* Flush out any leftover queued requests. */
+ num_channels = _core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ hc_regs = _core_if->host_if->hc_regs[i];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.chen = 0;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ }
+
+ /* Halt all channels to put them into a known state. */
+ for (i = 0; i < num_channels; i++) {
+ int count = 0;
+ hc_regs = _core_if->host_if->hc_regs[i];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i);
+ do {
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (++count > 200) {
+ DWC_ERROR("%s: Unable to clear halt on channel %d\n",
+ __func__, i);
+ break;
+ }
+ udelay(100);
+ } while (hcchar.b.chen);
+ }
+
+ /* Turn on the vbus power. */
+ DWC_PRINT("Init: Port Power? op_state=%d\n", _core_if->op_state);
+ if (_core_if->op_state == A_HOST){
+ hprt0.d32 = dwc_otg_read_hprt0(_core_if);
+ DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr);
+ if (hprt0.b.prtpwr == 0 ) {
+ hprt0.b.prtpwr = 1;
+ dwc_write_reg32(host_if->hprt0, hprt0.d32);
+ }
+ }
+
+ dwc_otg_enable_host_interrupts( _core_if );
+}
+
+/**
+ * Prepares a host channel for transferring packets to/from a specific
+ * endpoint. The HCCHARn register is set up with the characteristics specified
+ * in _hc. Host channel interrupts that may need to be serviced while this
+ * transfer is in progress are enabled.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ * @param _hc Information needed to initialize the host channel
+ */
+void dwc_otg_hc_init(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc)
+{
+ uint32_t intr_enable;
+ hcintmsk_data_t hc_intr_mask;
+ gintmsk_data_t gintmsk = {.d32 = 0};
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+
+ uint8_t hc_num = _hc->hc_num;
+ dwc_otg_host_if_t *host_if = _core_if->host_if;
+ dwc_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num];
+
+ /* Clear old interrupt conditions for this host channel. */
+ hc_intr_mask.d32 = 0xFFFFFFFF;
+ hc_intr_mask.b.reserved = 0;
+ dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32);
+
+ /* Enable channel interrupts required for this transfer. */
+ hc_intr_mask.d32 = 0;
+ hc_intr_mask.b.chhltd = 1;
+ if (_core_if->dma_enable) {
+ hc_intr_mask.b.ahberr = 1;
+ if (_hc->error_state && !_hc->do_split &&
+ _hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
+ hc_intr_mask.b.ack = 1;
+ if (_hc->ep_is_in) {
+ hc_intr_mask.b.datatglerr = 1;
+ if (_hc->ep_type != DWC_OTG_EP_TYPE_INTR) {
+ hc_intr_mask.b.nak = 1;
+ }
+ }
+ }
+ } else {
+ switch (_hc->ep_type) {
+ case DWC_OTG_EP_TYPE_CONTROL:
+ case DWC_OTG_EP_TYPE_BULK:
+ hc_intr_mask.b.xfercompl = 1;
+ hc_intr_mask.b.stall = 1;
+ hc_intr_mask.b.xacterr = 1;
+ hc_intr_mask.b.datatglerr = 1;
+ if (_hc->ep_is_in) {
+ hc_intr_mask.b.bblerr = 1;
+ } else {
+ hc_intr_mask.b.nak = 1;
+ hc_intr_mask.b.nyet = 1;
+ if (_hc->do_ping) {
+ hc_intr_mask.b.ack = 1;
+ }
+ }
+
+ if (_hc->do_split) {
+ hc_intr_mask.b.nak = 1;
+ if (_hc->complete_split) {
+ hc_intr_mask.b.nyet = 1;
+ }
+ else {
+ hc_intr_mask.b.ack = 1;
+ }
+ }
+
+ if (_hc->error_state) {
+ hc_intr_mask.b.ack = 1;
+ }
+ break;
+ case DWC_OTG_EP_TYPE_INTR:
+ hc_intr_mask.b.xfercompl = 1;
+ hc_intr_mask.b.nak = 1;
+ hc_intr_mask.b.stall = 1;
+ hc_intr_mask.b.xacterr = 1;
+ hc_intr_mask.b.datatglerr = 1;
+ hc_intr_mask.b.frmovrun = 1;
+
+ if (_hc->ep_is_in) {
+ hc_intr_mask.b.bblerr = 1;
+ }
+ if (_hc->error_state) {
+ hc_intr_mask.b.ack = 1;
+ }
+ if (_hc->do_split) {
+ if (_hc->complete_split) {
+ hc_intr_mask.b.nyet = 1;
+ }
+ else {
+ hc_intr_mask.b.ack = 1;
+ }
+ }
+ break;
+ case DWC_OTG_EP_TYPE_ISOC:
+ hc_intr_mask.b.xfercompl = 1;
+ hc_intr_mask.b.frmovrun = 1;
+ hc_intr_mask.b.ack = 1;
+
+ if (_hc->ep_is_in) {
+ hc_intr_mask.b.xacterr = 1;
+ hc_intr_mask.b.bblerr = 1;
+ }
+ break;
+ }
+ }
+ dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32);
+
+ /* Enable the top level host channel interrupt. */
+ intr_enable = (1 << hc_num);
+ dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable);
+
+ /* Make sure host channel interrupts are enabled. */
+ gintmsk.b.hcintr = 1;
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
+
+ /*
+ * Program the HCCHARn register with the endpoint characteristics for
+ * the current transfer.
+ */
+ hcchar.d32 = 0;
+ hcchar.b.devaddr = _hc->dev_addr;
+ hcchar.b.epnum = _hc->ep_num;
+ hcchar.b.epdir = _hc->ep_is_in;
+ hcchar.b.lspddev = (_hc->speed == DWC_OTG_EP_SPEED_LOW);
+ hcchar.b.eptype = _hc->ep_type;
+ hcchar.b.mps = _hc->max_packet;
+
+ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32);
+
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr);
+ DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum);
+ DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir);
+ DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev);
+ DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype);
+ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
+ DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt);
+
+ /*
+ * Program the HCSPLIT register for SPLITs
+ */
+ hcsplt.d32 = 0;
+ if (_hc->do_split) {
+ DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n", _hc->hc_num,
+ _hc->complete_split ? "CSPLIT" : "SSPLIT");
+ hcsplt.b.compsplt = _hc->complete_split;
+ hcsplt.b.xactpos = _hc->xact_pos;
+ hcsplt.b.hubaddr = _hc->hub_addr;
+ hcsplt.b.prtaddr = _hc->port_addr;
+ DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", _hc->complete_split);
+ DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", _hc->xact_pos);
+ DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", _hc->hub_addr);
+ DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", _hc->port_addr);
+ DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", _hc->ep_is_in);
+ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
+ DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", _hc->xfer_len);
+ }
+ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32);
+
+}
+
+/**
+ * Attempts to halt a host channel. This function should only be called in
+ * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under
+ * normal circumstances in DMA mode, the controller halts the channel when the
+ * transfer is complete or a condition occurs that requires application
+ * intervention.
+ *
+ * In slave mode, checks for a free request queue entry, then sets the Channel
+ * Enable and Channel Disable bits of the Host Channel Characteristics
+ * register of the specified channel to intiate the halt. If there is no free
+ * request queue entry, sets only the Channel Disable bit of the HCCHARn
+ * register to flush requests for this channel. In the latter case, sets a
+ * flag to indicate that the host channel needs to be halted when a request
+ * queue slot is open.
+ *
+ * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
+ * HCCHARn register. The controller ensures there is space in the request
+ * queue before submitting the halt request.
+ *
+ * Some time may elapse before the core flushes any posted requests for this
+ * host channel and halts. The Channel Halted interrupt handler completes the
+ * deactivation of the host channel.
+ *
+ * @param _core_if Controller register interface.
+ * @param _hc Host channel to halt.
+ * @param _halt_status Reason for halting the channel.
+ */
+void dwc_otg_hc_halt(dwc_otg_core_if_t *_core_if,
+ dwc_hc_t *_hc,
+ dwc_otg_halt_status_e _halt_status)
+{
+ gnptxsts_data_t nptxsts;
+ hptxsts_data_t hptxsts;
+ hcchar_data_t hcchar;
+ dwc_otg_hc_regs_t *hc_regs;
+ dwc_otg_core_global_regs_t *global_regs;
+ dwc_otg_host_global_regs_t *host_global_regs;
+
+ hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ global_regs = _core_if->core_global_regs;
+ host_global_regs = _core_if->host_if->host_global_regs;
+
+ WARN_ON(_halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS);
+
+ if (_halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
+ _halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
+ /*
+ * Disable all channel interrupts except Ch Halted. The QTD
+ * and QH state associated with this transfer has been cleared
+ * (in the case of URB_DEQUEUE), so the channel needs to be
+ * shut down carefully to prevent crashes.
+ */
+ hcintmsk_data_t hcintmsk;
+ hcintmsk.d32 = 0;
+ hcintmsk.b.chhltd = 1;
+ dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32);
+
+ /*
+ * Make sure no other interrupts besides halt are currently
+ * pending. Handling another interrupt could cause a crash due
+ * to the QTD and QH state.
+ */
+ dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32);
+
+ /*
+ * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
+ * even if the channel was already halted for some other
+ * reason.
+ */
+ _hc->halt_status = _halt_status;
+
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen == 0) {
+ /*
+ * The channel is either already halted or it hasn't
+ * started yet. In DMA mode, the transfer may halt if
+ * it finishes normally or a condition occurs that
+ * requires driver intervention. Don't want to halt
+ * the channel again. In either Slave or DMA mode,
+ * it's possible that the transfer has been assigned
+ * to a channel, but not started yet when an URB is
+ * dequeued. Don't want to halt a channel that hasn't
+ * started yet.
+ */
+ return;
+ }
+ }
+
+ if (_hc->halt_pending) {
+ /*
+ * A halt has already been issued for this channel. This might
+ * happen when a transfer is aborted by a higher level in
+ * the stack.
+ */
+#ifdef DEBUG
+ DWC_PRINT("*** %s: Channel %d, _hc->halt_pending already set ***\n",
+ __func__, _hc->hc_num);
+
+/* dwc_otg_dump_global_registers(_core_if); */
+/* dwc_otg_dump_host_registers(_core_if); */
+#endif
+ return;
+ }
+
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 1;
+
+ if (!_core_if->dma_enable) {
+ /* Check for space in the request queue to issue the halt. */
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
+ nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ if (nptxsts.b.nptxqspcavail == 0) {
+ hcchar.b.chen = 0;
+ }
+ } else {
+ hptxsts.d32 = dwc_read_reg32(&host_global_regs->hptxsts);
+ if ((hptxsts.b.ptxqspcavail == 0) || (_core_if->queuing_high_bandwidth)) {
+ hcchar.b.chen = 0;
+ }
+ }
+ }
+
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ _hc->halt_status = _halt_status;
+
+ if (hcchar.b.chen) {
+ _hc->halt_pending = 1;
+ _hc->halt_on_queue = 0;
+ } else {
+ _hc->halt_on_queue = 1;
+ }
+
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32);
+ DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", _hc->halt_pending);
+ DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", _hc->halt_on_queue);
+ DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", _hc->halt_status);
+
+ return;
+}
+
+/**
+ * Clears the transfer state for a host channel. This function is normally
+ * called after a transfer is done and the host channel is being released.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _hc Identifies the host channel to clean up.
+ */
+void dwc_otg_hc_cleanup(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc)
+{
+ dwc_otg_hc_regs_t *hc_regs;
+
+ _hc->xfer_started = 0;
+
+ /*
+ * Clear channel interrupt enables and any unhandled channel interrupt
+ * conditions.
+ */
+ hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ dwc_write_reg32(&hc_regs->hcintmsk, 0);
+ dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF);
+
+#ifdef DEBUG
+ del_timer(&_core_if->hc_xfer_timer[_hc->hc_num]);
+ {
+ hcchar_data_t hcchar;
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chdis) {
+ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
+ __func__, _hc->hc_num, hcchar.d32);
+ }
+ }
+#endif
+}
+
+/**
+ * Sets the channel property that indicates in which frame a periodic transfer
+ * should occur. This is always set to the _next_ frame. This function has no
+ * effect on non-periodic transfers.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _hc Identifies the host channel to set up and its properties.
+ * @param _hcchar Current value of the HCCHAR register for the specified host
+ * channel.
+ */
+static inline void hc_set_even_odd_frame(dwc_otg_core_if_t *_core_if,
+ dwc_hc_t *_hc,
+ hcchar_data_t *_hcchar)
+{
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ hfnum_data_t hfnum;
+ hfnum.d32 = dwc_read_reg32(&_core_if->host_if->host_global_regs->hfnum);
+ /* 1 if _next_ frame is odd, 0 if it's even */
+ _hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
+#ifdef DEBUG
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR && _hc->do_split && !_hc->complete_split) {
+ switch (hfnum.b.frnum & 0x7) {
+ case 7:
+ _core_if->hfnum_7_samples++;
+ _core_if->hfnum_7_frrem_accum += hfnum.b.frrem;
+ break;
+ case 0:
+ _core_if->hfnum_0_samples++;
+ _core_if->hfnum_0_frrem_accum += hfnum.b.frrem;
+ break;
+ default:
+ _core_if->hfnum_other_samples++;
+ _core_if->hfnum_other_frrem_accum += hfnum.b.frrem;
+ break;
+ }
+ }
+#endif
+ }
+}
+
+#ifdef DEBUG
+static void hc_xfer_timeout(unsigned long _ptr)
+{
+ hc_xfer_info_t *xfer_info = (hc_xfer_info_t *)_ptr;
+ int hc_num = xfer_info->hc->hc_num;
+ DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num);
+ DWC_WARN(" start_hcchar_val 0x%08x\n", xfer_info->core_if->start_hcchar_val[hc_num]);
+}
+#endif
+
+/*
+ * This function does the setup for a data transfer for a host channel and
+ * starts the transfer. May be called in either Slave mode or DMA mode. In
+ * Slave mode, the caller must ensure that there is sufficient space in the
+ * request queue and Tx Data FIFO.
+ *
+ * For an OUT transfer in Slave mode, it loads a data packet into the
+ * appropriate FIFO. If necessary, additional data packets will be loaded in
+ * the Host ISR.
+ *
+ * For an IN transfer in Slave mode, a data packet is requested. The data
+ * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
+ * additional data packets are requested in the Host ISR.
+ *
+ * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
+ * register along with a packet count of 1 and the channel is enabled. This
+ * causes a single PING transaction to occur. Other fields in HCTSIZ are
+ * simply set to 0 since no data transfer occurs in this case.
+ *
+ * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
+ * all the information required to perform the subsequent data transfer. In
+ * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
+ * controller performs the entire PING protocol, then starts the data
+ * transfer.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _hc Information needed to initialize the host channel. The xfer_len
+ * value may be reduced to accommodate the max widths of the XferSize and
+ * PktCnt fields in the HCTSIZn register. The multi_count value may be changed
+ * to reflect the final xfer_len value.
+ */
+void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc)
+{
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ uint16_t num_packets;
+ uint32_t max_hc_xfer_size = _core_if->core_params->max_transfer_size;
+ uint16_t max_hc_pkt_count = _core_if->core_params->max_packet_count;
+ dwc_otg_hc_regs_t *hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+
+ hctsiz.d32 = 0;
+
+ if (_hc->do_ping) {
+ if (!_core_if->dma_enable) {
+ dwc_otg_hc_do_ping(_core_if, _hc);
+ _hc->xfer_started = 1;
+ return;
+ } else {
+ hctsiz.b.dopng = 1;
+ }
+ }
+
+ if (_hc->do_split) {
+ num_packets = 1;
+
+ if (_hc->complete_split && !_hc->ep_is_in) {
+ /* For CSPLIT OUT Transfer, set the size to 0 so the
+ * core doesn't expect any data written to the FIFO */
+ _hc->xfer_len = 0;
+ } else if (_hc->ep_is_in || (_hc->xfer_len > _hc->max_packet)) {
+ _hc->xfer_len = _hc->max_packet;
+ } else if (!_hc->ep_is_in && (_hc->xfer_len > 188)) {
+ _hc->xfer_len = 188;
+ }
+
+ hctsiz.b.xfersize = _hc->xfer_len;
+ } else {
+ /*
+ * Ensure that the transfer length and packet count will fit
+ * in the widths allocated for them in the HCTSIZn register.
+ */
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /*
+ * Make sure the transfer size is no larger than one
+ * (micro)frame's worth of data. (A check was done
+ * when the periodic transfer was accepted to ensure
+ * that a (micro)frame's worth of data can be
+ * programmed into a channel.)
+ */
+ uint32_t max_periodic_len = _hc->multi_count * _hc->max_packet;
+ if (_hc->xfer_len > max_periodic_len) {
+ _hc->xfer_len = max_periodic_len;
+ } else {
+ }
+ } else if (_hc->xfer_len > max_hc_xfer_size) {
+ /* Make sure that xfer_len is a multiple of max packet size. */
+ _hc->xfer_len = max_hc_xfer_size - _hc->max_packet + 1;
+ }
+
+ if (_hc->xfer_len > 0) {
+ num_packets = (_hc->xfer_len + _hc->max_packet - 1) / _hc->max_packet;
+ if (num_packets > max_hc_pkt_count) {
+ num_packets = max_hc_pkt_count;
+ _hc->xfer_len = num_packets * _hc->max_packet;
+ }
+ } else {
+ /* Need 1 packet for transfer length of 0. */
+ num_packets = 1;
+ }
+
+ if (_hc->ep_is_in) {
+ /* Always program an integral # of max packets for IN transfers. */
+ _hc->xfer_len = num_packets * _hc->max_packet;
+ }
+
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /*
+ * Make sure that the multi_count field matches the
+ * actual transfer length.
+ */
+ _hc->multi_count = num_packets;
+
+ }
+
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /* Set up the initial PID for the transfer. */
+ if (_hc->speed == DWC_OTG_EP_SPEED_HIGH) {
+ if (_hc->ep_is_in) {
+ if (_hc->multi_count == 1) {
+ _hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
+ } else if (_hc->multi_count == 2) {
+ _hc->data_pid_start = DWC_OTG_HC_PID_DATA1;
+ } else {
+ _hc->data_pid_start = DWC_OTG_HC_PID_DATA2;
+ }
+ } else {
+ if (_hc->multi_count == 1) {
+ _hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
+ } else {
+ _hc->data_pid_start = DWC_OTG_HC_PID_MDATA;
+ }
+ }
+ } else {
+ _hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
+ }
+ }
+
+ hctsiz.b.xfersize = _hc->xfer_len;
+ }
+
+ _hc->start_pkt_count = num_packets;
+ hctsiz.b.pktcnt = num_packets;
+ hctsiz.b.pid = _hc->data_pid_start;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize);
+ DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt);
+ DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid);
+
+ if (_core_if->dma_enable) {
+#ifdef DEBUG
+if(((uint32_t)_hc->xfer_buff)%4)
+printk("dwc_otg_hc_start_transfer _hc->xfer_buff not 4 byte alignment\n");
+#endif
+ dwc_write_reg32(&hc_regs->hcdma, (uint32_t)_hc->xfer_buff);
+ }
+
+ /* Start the split */
+ if (_hc->do_split) {
+ hcsplt_data_t hcsplt;
+ hcsplt.d32 = dwc_read_reg32 (&hc_regs->hcsplt);
+ hcsplt.b.spltena = 1;
+ dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32);
+ }
+
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.multicnt = _hc->multi_count;
+ hc_set_even_odd_frame(_core_if, _hc, &hcchar);
+#ifdef DEBUG
+ _core_if->start_hcchar_val[_hc->hc_num] = hcchar.d32;
+ if (hcchar.b.chdis) {
+ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
+ __func__, _hc->hc_num, hcchar.d32);
+ }
+#endif
+
+ /* Set host channel enable after all other setup is complete. */
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ _hc->xfer_started = 1;
+ _hc->requests++;
+
+ if (!_core_if->dma_enable && !_hc->ep_is_in && _hc->xfer_len > 0) {
+ /* Load OUT packet into the appropriate Tx FIFO. */
+ dwc_otg_hc_write_packet(_core_if, _hc);
+ }
+
+#ifdef DEBUG
+ /* Start a timer for this transfer. */
+ _core_if->hc_xfer_timer[_hc->hc_num].function = hc_xfer_timeout;
+ _core_if->hc_xfer_info[_hc->hc_num].core_if = _core_if;
+ _core_if->hc_xfer_info[_hc->hc_num].hc = _hc;
+ _core_if->hc_xfer_timer[_hc->hc_num].data = (unsigned long)(&_core_if->hc_xfer_info[_hc->hc_num]);
+ _core_if->hc_xfer_timer[_hc->hc_num].expires = jiffies + (HZ*10);
+ add_timer(&_core_if->hc_xfer_timer[_hc->hc_num]);
+#endif
+}
+
+/**
+ * This function continues a data transfer that was started by previous call
+ * to <code>dwc_otg_hc_start_transfer</code>. The caller must ensure there is
+ * sufficient space in the request queue and Tx Data FIFO. This function
+ * should only be called in Slave mode. In DMA mode, the controller acts
+ * autonomously to complete transfers programmed to a host channel.
+ *
+ * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
+ * if there is any data remaining to be queued. For an IN transfer, another
+ * data packet is always requested. For the SETUP phase of a control transfer,
+ * this function does nothing.
+ *
+ * @return 1 if a new request is queued, 0 if no more requests are required
+ * for this transfer.
+ */
+int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc)
+{
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+
+ if (_hc->do_split) {
+ /* SPLITs always queue just once per channel */
+ return 0;
+ } else if (_hc->data_pid_start == DWC_OTG_HC_PID_SETUP) {
+ /* SETUPs are queued only once since they can't be NAKed. */
+ return 0;
+ } else if (_hc->ep_is_in) {
+ /*
+ * Always queue another request for other IN transfers. If
+ * back-to-back INs are issued and NAKs are received for both,
+ * the driver may still be processing the first NAK when the
+ * second NAK is received. When the interrupt handler clears
+ * the NAK interrupt for the first NAK, the second NAK will
+ * not be seen. So we can't depend on the NAK interrupt
+ * handler to requeue a NAKed request. Instead, IN requests
+ * are issued each time this function is called. When the
+ * transfer completes, the extra requests for the channel will
+ * be flushed.
+ */
+ hcchar_data_t hcchar;
+ dwc_otg_hc_regs_t *hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hc_set_even_odd_frame(_core_if, _hc, &hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32);
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ _hc->requests++;
+ return 1;
+ } else {
+ /* OUT transfers. */
+ if (_hc->xfer_count < _hc->xfer_len) {
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ hcchar_data_t hcchar;
+ dwc_otg_hc_regs_t *hc_regs;
+ hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hc_set_even_odd_frame(_core_if, _hc, &hcchar);
+ }
+
+ /* Load OUT packet into the appropriate Tx FIFO. */
+ dwc_otg_hc_write_packet(_core_if, _hc);
+ _hc->requests++;
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+}
+
+/**
+ * Starts a PING transfer. This function should only be called in Slave mode.
+ * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled.
+ */
+void dwc_otg_hc_do_ping(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc)
+{
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ dwc_otg_hc_regs_t *hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+
+ hctsiz.d32 = 0;
+ hctsiz.b.dopng = 1;
+ hctsiz.b.pktcnt = 1;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+}
+
+/*
+ * This function writes a packet into the Tx FIFO associated with the Host
+ * Channel. For a channel associated with a non-periodic EP, the non-periodic
+ * Tx FIFO is written. For a channel associated with a periodic EP, the
+ * periodic Tx FIFO is written. This function should only be called in Slave
+ * mode.
+ *
+ * Upon return the xfer_buff and xfer_count fields in _hc are incremented by
+ * then number of bytes written to the Tx FIFO.
+ */
+void dwc_otg_hc_write_packet(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc)
+{
+ uint32_t i;
+ uint32_t remaining_count;
+ uint32_t byte_count;
+ uint32_t dword_count;
+
+ uint32_t *data_buff = (uint32_t *)(_hc->xfer_buff);
+ uint32_t *data_fifo = _core_if->data_fifo[_hc->hc_num];
+
+ remaining_count = _hc->xfer_len - _hc->xfer_count;
+ if (remaining_count > _hc->max_packet) {
+ byte_count = _hc->max_packet;
+ } else {
+ byte_count = remaining_count;
+ }
+
+ dword_count = (byte_count + 3) / 4;
+
+ if ((((unsigned long)data_buff) & 0x3) == 0) {
+ /* xfer_buff is DWORD aligned. */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_reg32(data_fifo, *data_buff);
+ }
+ } else {
+ /* xfer_buff is not DWORD aligned. */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_reg32(data_fifo, get_unaligned(data_buff));
+ }
+ }
+
+ _hc->xfer_count += byte_count;
+ _hc->xfer_buff += byte_count;
+}
+
+/**
+ * Gets the current USB frame number. This is the frame number from the last
+ * SOF packet.
+ */
+uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *_core_if)
+{
+ dsts_data_t dsts;
+ dsts.d32 = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->dsts);
+
+ /* read current frame/microfreme number from DSTS register */
+ return dsts.b.soffn;
+}
+
+/**
+ * This function reads a setup packet from the Rx FIFO into the destination
+ * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl)
+ * Interrupt routine when a SETUP packet has been received in Slave mode.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _dest Destination buffer for packet data.
+ */
+void dwc_otg_read_setup_packet(dwc_otg_core_if_t *_core_if, uint32_t *_dest)
+{
+ /* Get the 8 bytes of a setup transaction data */
+
+ /* Pop 2 DWORDS off the receive data FIFO into memory */
+ _dest[0] = dwc_read_reg32(_core_if->data_fifo[0]);
+ _dest[1] = dwc_read_reg32(_core_if->data_fifo[0]);
+ //_dest[0] = dwc_read_datafifo32(_core_if->data_fifo[0]);
+ //_dest[1] = dwc_read_datafifo32(_core_if->data_fifo[0]);
+}
+
+
+/**
+ * This function enables EP0 OUT to receive SETUP packets and configures EP0
+ * IN for transmitting packets. It is normally called when the
+ * "Enumeration Done" interrupt occurs.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP0 data.
+ */
+void dwc_otg_ep0_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep)
+{
+ dwc_otg_dev_if_t *dev_if = _core_if->dev_if;
+ dsts_data_t dsts;
+ depctl_data_t diepctl;
+ depctl_data_t doepctl;
+ dctl_data_t dctl ={.d32=0};
+
+ /* Read the Device Status and Endpoint 0 Control registers */
+ dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts);
+ diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl);
+ doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl);
+
+ /* Set the MPS of the IN EP based on the enumeration speed */
+ switch (dsts.b.enumspd) {
+ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
+ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
+ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
+ diepctl.b.mps = DWC_DEP0CTL_MPS_64;
+ break;
+ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
+ diepctl.b.mps = DWC_DEP0CTL_MPS_8;
+ break;
+ }
+
+ dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
+
+ /* Enable OUT EP for receive */
+ doepctl.b.epena = 1;
+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
+
+#ifdef VERBOSE
+ DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n",
+ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
+ DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n",
+ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl));
+#endif
+ dctl.b.cgnpinnak = 1;
+ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
+ DWC_DEBUGPL(DBG_PCDV,"dctl=%0x\n",
+ dwc_read_reg32(&dev_if->dev_global_regs->dctl));
+}
+
+/**
+ * This function activates an EP. The Device EP control register for
+ * the EP is configured as defined in the ep structure. Note: This
+ * function is not used for EP0.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to activate.
+ */
+void dwc_otg_ep_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep)
+{
+ dwc_otg_dev_if_t *dev_if = _core_if->dev_if;
+ depctl_data_t depctl;
+ volatile uint32_t *addr;
+ daint_data_t daintmsk = {.d32=0};
+
+ DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, _ep->num,
+ (_ep->is_in?"IN":"OUT"));
+
+ /* Read DEPCTLn register */
+ if (_ep->is_in == 1) {
+ addr = &dev_if->in_ep_regs[_ep->num]->diepctl;
+ daintmsk.ep.in = 1<<_ep->num;
+ } else {
+ addr = &dev_if->out_ep_regs[_ep->num]->doepctl;
+ daintmsk.ep.out = 1<<_ep->num;
+ }
+
+ /* If the EP is already active don't change the EP Control
+ * register. */
+ depctl.d32 = dwc_read_reg32(addr);
+ if (!depctl.b.usbactep) {
+ depctl.b.mps = _ep->maxpacket;
+ depctl.b.eptype = _ep->type;
+ depctl.b.txfnum = _ep->tx_fifo_num;
+
+ if (_ep->type == DWC_OTG_EP_TYPE_ISOC) {
+ depctl.b.setd0pid = 1; // ???
+ } else {
+ depctl.b.setd0pid = 1;
+ }
+ depctl.b.usbactep = 1;
+
+ dwc_write_reg32(addr, depctl.d32);
+ DWC_DEBUGPL(DBG_PCDV,"DEPCTL=%08x\n", dwc_read_reg32(addr));
+ }
+
+
+ /* Enable the Interrupt for this EP */
+ dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk,
+ 0, daintmsk.d32);
+ DWC_DEBUGPL(DBG_PCDV,"DAINTMSK=%0x\n",
+ dwc_read_reg32(&dev_if->dev_global_regs->daintmsk));
+ _ep->stall_clear_flag = 0;
+ return;
+}
+
+/**
+ * This function deactivates an EP. This is done by clearing the USB Active
+ * EP bit in the Device EP control register. Note: This function is not used
+ * for EP0. EP0 cannot be deactivated.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to deactivate.
+ */
+void dwc_otg_ep_deactivate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep)
+{
+ depctl_data_t depctl ={.d32 = 0};
+ volatile uint32_t *addr;
+ daint_data_t daintmsk = {.d32=0};
+
+ /* Read DEPCTLn register */
+ if (_ep->is_in == 1) {
+ addr = &_core_if->dev_if->in_ep_regs[_ep->num]->diepctl;
+ daintmsk.ep.in = 1<<_ep->num;
+ } else {
+ addr = &_core_if->dev_if->out_ep_regs[_ep->num]->doepctl;
+ daintmsk.ep.out = 1<<_ep->num;
+ }
+
+ depctl.b.usbactep = 0;
+ dwc_write_reg32(addr, depctl.d32);
+
+ /* Disable the Interrupt for this EP */
+ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->daintmsk,
+ daintmsk.d32, 0);
+
+ return;
+}
+
+/**
+ * This function does the setup for a data transfer for an EP and
+ * starts the transfer. For an IN transfer, the packets will be
+ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
+ * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to start the transfer on.
+ */
+void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep)
+{
+ /** @todo Refactor this funciton to check the transfer size
+ * count value does not execed the number bits in the Transfer
+ * count register. */
+ depctl_data_t depctl;
+ deptsiz_data_t deptsiz;
+ gintmsk_data_t intr_mask = { .d32 = 0};
+
+#ifdef CHECK_PACKET_COUNTER_WIDTH
+ const uint32_t MAX_XFER_SIZE =
+ _core_if->core_params->max_transfer_size;
+ const uint32_t MAX_PKT_COUNT =
+ _core_if->core_params->max_packet_count;
+ uint32_t num_packets;
+ uint32_t transfer_len;
+ dwc_otg_dev_out_ep_regs_t *out_regs =
+ _core_if->dev_if->out_ep_regs[_ep->num];
+ dwc_otg_dev_in_ep_regs_t *in_regs =
+ _core_if->dev_if->in_ep_regs[_ep->num];
+ gnptxsts_data_t txstatus;
+
+ int lvl = SET_DEBUG_LEVEL(DBG_PCD);
+
+
+ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
+ "xfer_buff=%p start_xfer_buff=%p\n",
+ _ep->num, (_ep->is_in?"IN":"OUT"), _ep->xfer_len,
+ _ep->xfer_count, _ep->xfer_buff, _ep->start_xfer_buff);
+
+ transfer_len = _ep->xfer_len - _ep->xfer_count;
+ if (transfer_len > MAX_XFER_SIZE) {
+ transfer_len = MAX_XFER_SIZE;
+ }
+ if (transfer_len == 0) {
+ num_packets = 1;
+ /* OUT EP to recieve Zero-length packet set transfer
+ * size to maxpacket size. */
+ if (!_ep->is_in) {
+ transfer_len = _ep->maxpacket;
+ }
+ } else {
+ num_packets =
+ (transfer_len + _ep->maxpacket - 1) / _ep->maxpacket;
+ if (num_packets > MAX_PKT_COUNT) {
+ num_packets = MAX_PKT_COUNT;
+ }
+ }
+ DWC_DEBUGPL(DBG_PCD, "transfer_len=%d #pckt=%d\n", transfer_len,
+ num_packets);
+
+ deptsiz.b.xfersize = transfer_len;
+ deptsiz.b.pktcnt = num_packets;
+
+ /* IN endpoint */
+ if (_ep->is_in == 1) {
+ depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
+ } else {/* OUT endpoint */
+ depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
+ }
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ /* IN endpoint */
+ if (_ep->is_in == 1) {
+ txstatus.d32 =
+ dwc_read_reg32(&_core_if->core_global_regs->gnptxsts);
+ if (txstatus.b.nptxqspcavail == 0) {
+ DWC_DEBUGPL(DBG_ANY, "TX Queue Full (0x%0x)\n",
+ txstatus.d32);
+ return;
+ }
+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+ dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt, the
+ * data will be written into the fifo by the ISR.
+ */
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&in_regs->diepdma, (uint32_t) _ep->xfer_buff);
+ } else {
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32( &_core_if->core_global_regs->gintsts,
+ intr_mask.d32, 0);
+ dwc_modify_reg32( &_core_if->core_global_regs->gintmsk,
+ intr_mask.d32, intr_mask.d32);
+ } else {
+ /* Enable the Tx FIFO Empty Interrupt for this EP */
+ if (_ep->xfer_len > 0 &&
+ _ep->type != DWC_OTG_EP_TYPE_ISOC) {
+ uint32_t fifoemptymsk = 0;
+ fifoemptymsk = (0x1 << _ep->num);
+ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->
+ dtknqr4_fifoemptymsk,0, fifoemptymsk);
+ }
+ }
+ }
+ } else { /* OUT endpoint */
+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
+ dwc_write_reg32(&out_regs->doepctl, depctl.d32);
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&out_regs->doepdma,(uint32_t) _ep->xfer_buff);
+ }
+ }
+ DWC_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n",
+ dwc_read_reg32(&out_regs->doepctl),
+ dwc_read_reg32(&out_regs->doeptsiz));
+ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
+ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk),
+ dwc_read_reg32(&_core_if->core_global_regs->gintmsk));
+
+ SET_DEBUG_LEVEL(lvl);
+#endif
+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
+
+ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
+ "xfer_buff=%p start_xfer_buff=%p\n",
+ _ep->num, (_ep->is_in?"IN":"OUT"), _ep->xfer_len,
+ _ep->xfer_count, _ep->xfer_buff, _ep->start_xfer_buff);
+
+ /* IN endpoint */
+ if (_ep->is_in == 1) {
+ dwc_otg_dev_in_ep_regs_t * in_regs = _core_if->dev_if->in_ep_regs[_ep->num];
+ gnptxsts_data_t gtxstatus;
+ gtxstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts);
+ if (_core_if->en_multiple_tx_fifo == 0 &&
+ gtxstatus.b.nptxqspcavail == 0) {
+#ifdef DEBUG
+ DWC_PRINT("TX Queue Full (0x%0x)\n", gtxstatus.d32);
+#endif
+ //return;
+ MDELAY(100); //james
+ }
+
+ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl));
+ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz));
+
+ /* Zero Length Packet? */
+ if (_ep->xfer_len == 0) {
+ deptsiz.b.xfersize = 0;
+ deptsiz.b.pktcnt = 1;
+ } else {
+
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+ deptsiz.b.xfersize = _ep->xfer_len;
+ deptsiz.b.pktcnt = (_ep->xfer_len - 1 + _ep->maxpacket) / _ep->maxpacket;
+ }
+
+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+
+ /* Write the DMA register */
+ if (_core_if->dma_enable) {
+#if 1 // winder
+ dma_cache_wback_inv((unsigned long) _ep->xfer_buff, _ep->xfer_len); // winder
+ dwc_write_reg32 (&(in_regs->diepdma),
+ CPHYSADDR((uint32_t)_ep->xfer_buff)); // winder
+#else
+ dwc_write_reg32 (&(in_regs->diepdma),
+ (uint32_t)_ep->dma_addr);
+#endif
+ } else {
+ if (_ep->type != DWC_OTG_EP_TYPE_ISOC) {
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt,
+ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode,
+ * the data will be written into the fifo by the ISR.
+ */
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32( &_core_if->core_global_regs->gintsts,
+ intr_mask.d32, 0);
+ dwc_modify_reg32( &_core_if->core_global_regs->gintmsk,
+ intr_mask.d32, intr_mask.d32);
+ } else {
+ /* Enable the Tx FIFO Empty Interrupt for this EP */
+ if (_ep->xfer_len > 0) {
+ uint32_t fifoemptymsk = 0;
+ fifoemptymsk = 1 << _ep->num;
+ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->
+ dtknqr4_fifoemptymsk,0,fifoemptymsk);
+ }
+ }
+ }
+ }
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+
+ if (_core_if->dma_enable) {
+ depctl.d32 = dwc_read_reg32 (&_core_if->dev_if->in_ep_regs[0]->diepctl);
+ depctl.b.nextep = _ep->num;
+ dwc_write_reg32 (&_core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32);
+
+ }
+ } else {
+ /* OUT endpoint */
+ dwc_otg_dev_out_ep_regs_t * out_regs = _core_if->dev_if->out_ep_regs[_ep->num];
+
+ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl));
+ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz));
+
+ /* Program the transfer size and packet count as follows:
+ *
+ * pktcnt = N
+ * xfersize = N * maxpacket
+ */
+ if (_ep->xfer_len == 0) {
+ /* Zero Length Packet */
+ deptsiz.b.xfersize = _ep->maxpacket;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ deptsiz.b.pktcnt = (_ep->xfer_len + (_ep->maxpacket - 1)) / _ep->maxpacket;
+ deptsiz.b.xfersize = deptsiz.b.pktcnt * _ep->maxpacket;
+ }
+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
+
+ DWC_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n",
+ _ep->num, deptsiz.b.xfersize, deptsiz.b.pktcnt);
+
+ if (_core_if->dma_enable) {
+#if 1 // winder
+ dwc_write_reg32 (&(out_regs->doepdma),
+ CPHYSADDR((uint32_t)_ep->xfer_buff)); // winder
+#else
+ dwc_write_reg32 (&(out_regs->doepdma),
+ (uint32_t)_ep->dma_addr);
+#endif
+ }
+
+ if (_ep->type == DWC_OTG_EP_TYPE_ISOC) {
+ /** @todo NGS: dpid is read-only. Use setd0pid
+ * or setd1pid. */
+ if (_ep->even_odd_frame) {
+ depctl.b.setd1pid = 1;
+ } else {
+ depctl.b.setd0pid = 1;
+ }
+ }
+
+ /* EP enable */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+
+ dwc_write_reg32(&out_regs->doepctl, depctl.d32);
+
+ DWC_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n",
+ dwc_read_reg32(&out_regs->doepctl),
+ dwc_read_reg32(&out_regs->doeptsiz));
+ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
+ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk),
+ dwc_read_reg32(&_core_if->core_global_regs->gintmsk));
+ }
+}
+
+
+/**
+ * This function does the setup for a data transfer for EP0 and starts
+ * the transfer. For an IN transfer, the packets will be loaded into
+ * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are
+ * unloaded from the Rx FIFO in the ISR.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP0 data.
+ */
+void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep)
+{
+ volatile depctl_data_t depctl;
+ volatile deptsiz0_data_t deptsiz;
+ gintmsk_data_t intr_mask = { .d32 = 0};
+
+ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
+ "xfer_buff=%p start_xfer_buff=%p total_len=%d\n",
+ _ep->num, (_ep->is_in?"IN":"OUT"), _ep->xfer_len,
+ _ep->xfer_count, _ep->xfer_buff, _ep->start_xfer_buff,
+ _ep->total_len);
+ _ep->total_len = _ep->xfer_len;
+
+ /* IN endpoint */
+ if (_ep->is_in == 1) {
+ dwc_otg_dev_in_ep_regs_t * in_regs = _core_if->dev_if->in_ep_regs[0];
+ gnptxsts_data_t gtxstatus;
+ gtxstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts);
+ if (_core_if->en_multiple_tx_fifo == 0 &&
+ gtxstatus.b.nptxqspcavail == 0) {
+#ifdef DEBUG
+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
+ DWC_DEBUGPL(DBG_PCD,"DIEPCTL0=%0x\n",
+ dwc_read_reg32(&in_regs->diepctl));
+ DWC_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n",
+ deptsiz.d32, deptsiz.b.xfersize,deptsiz.b.pktcnt);
+ DWC_PRINT("TX Queue or FIFO Full (0x%0x)\n", gtxstatus.d32);
+#endif /* */
+ printk("TX Queue or FIFO Full!!!!\n"); // test-only
+ //return;
+ MDELAY(100); //james
+ }
+
+ depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
+
+ /* Zero Length Packet? */
+ if (_ep->xfer_len == 0) {
+ deptsiz.b.xfersize = 0;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+ if (_ep->xfer_len > _ep->maxpacket) {
+ _ep->xfer_len = _ep->maxpacket;
+ deptsiz.b.xfersize = _ep->maxpacket;
+ }
+ else {
+ deptsiz.b.xfersize = _ep->xfer_len;
+ }
+ deptsiz.b.pktcnt = 1;
+
+ }
+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
+ _ep->xfer_len, deptsiz.b.xfersize,deptsiz.b.pktcnt, deptsiz.d32);
+
+ /* Write the DMA register */
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&(in_regs->diepdma), (uint32_t) _ep->dma_addr);
+ }
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt, the
+ * data will be written into the fifo by the ISR.
+ */
+ if (!_core_if->dma_enable) {
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32(&_core_if->core_global_regs->gintsts, intr_mask.d32, 0);
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk, intr_mask.d32,
+ intr_mask.d32);
+ } else {
+ /* Enable the Tx FIFO Empty Interrupt for this EP */
+ if (_ep->xfer_len > 0) {
+ uint32_t fifoemptymsk = 0;
+ fifoemptymsk |= 1 << _ep->num;
+ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
+ 0, fifoemptymsk);
+ }
+
+ }
+ }
+ } else {
+ /* OUT endpoint */
+ dwc_otg_dev_out_ep_regs_t * out_regs = _core_if->dev_if->out_ep_regs[_ep->num];
+
+ depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
+ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz);
+
+ /* Program the transfer size and packet count as follows:
+ * xfersize = N * (maxpacket + 4 - (maxpacket % 4))
+ * pktcnt = N */
+ if (_ep->xfer_len == 0) {
+ /* Zero Length Packet */
+ deptsiz.b.xfersize = _ep->maxpacket;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ deptsiz.b.pktcnt = (_ep->xfer_len + (_ep->maxpacket - 1)) / _ep->maxpacket;
+ deptsiz.b.xfersize = deptsiz.b.pktcnt * _ep->maxpacket;
+ }
+
+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
+ DWC_DEBUGPL(DBG_PCDV, "len=%d xfersize=%d pktcnt=%d\n",
+ _ep->xfer_len, deptsiz.b.xfersize,deptsiz.b.pktcnt);
+
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&(out_regs->doepdma), (uint32_t) _ep->dma_addr);
+ }
+
+ /* EP enable */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32 (&(out_regs->doepctl), depctl.d32);
+ }
+}
+
+/**
+ * This function continues control IN transfers started by
+ * dwc_otg_ep0_start_transfer, when the transfer does not fit in a
+ * single packet. NOTE: The DIEPCTL0/DOEPCTL0 registers only have one
+ * bit for the packet count.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP0 data.
+ */
+void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep)
+{
+ depctl_data_t depctl;
+ deptsiz0_data_t deptsiz;
+ gintmsk_data_t intr_mask = { .d32 = 0};
+
+ if (_ep->is_in == 1) {
+ dwc_otg_dev_in_ep_regs_t *in_regs =
+ _core_if->dev_if->in_ep_regs[0];
+ gnptxsts_data_t tx_status = {.d32 = 0};
+
+ tx_status.d32 = dwc_read_reg32( &_core_if->core_global_regs->gnptxsts );
+ /** @todo Should there be check for room in the Tx
+ * Status Queue. If not remove the code above this comment. */
+
+ depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
+
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+ deptsiz.b.xfersize = (_ep->total_len - _ep->xfer_count) > _ep->maxpacket ? _ep->maxpacket :
+ (_ep->total_len - _ep->xfer_count);
+ deptsiz.b.pktcnt = 1;
+ _ep->xfer_len += deptsiz.b.xfersize;
+
+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
+ _ep->xfer_len,
+ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32);
+
+ /* Write the DMA register */
+ if (_core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) {
+ dwc_write_reg32 (&(in_regs->diepdma),
+ CPHYSADDR((uint32_t)_ep->dma_addr)); // winder
+ }
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt, the
+ * data will be written into the fifo by the ISR.
+ */
+ if (!_core_if->dma_enable) {
+ /* First clear it from GINTSTS */
+ intr_mask.b.nptxfempty = 1;
+ dwc_write_reg32( &_core_if->core_global_regs->gintsts,
+ intr_mask.d32 );
+
+ dwc_modify_reg32( &_core_if->core_global_regs->gintmsk,
+ intr_mask.d32, intr_mask.d32);
+ }
+
+ }
+
+}
+
+#ifdef DEBUG
+void dump_msg(const u8 *buf, unsigned int length)
+{
+ unsigned int start, num, i;
+ char line[52], *p;
+
+ if (length >= 512)
+ return;
+ start = 0;
+ while (length > 0) {
+ num = min(length, 16u);
+ p = line;
+ for (i = 0; i < num; ++i) {
+ if (i == 8)
+ *p++ = ' ';
+ sprintf(p, " %02x", buf[i]);
+ p += 3;
+ }
+ *p = 0;
+ DWC_PRINT( "%6x: %s\n", start, line);
+ buf += num;
+ start += num;
+ length -= num;
+ }
+}
+#else
+static inline void dump_msg(const u8 *buf, unsigned int length)
+{
+}
+#endif
+
+/**
+ * This function writes a packet into the Tx FIFO associated with the
+ * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For
+ * periodic EPs the periodic Tx FIFO associated with the EP is written
+ * with all packets for the next micro-frame.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to write packet for.
+ * @param _dma Indicates if DMA is being used.
+ */
+void dwc_otg_ep_write_packet(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep, int _dma)
+{
+ /**
+ * The buffer is padded to DWORD on a per packet basis in
+ * slave/dma mode if the MPS is not DWORD aligned. The last
+ * packet, if short, is also padded to a multiple of DWORD.
+ *
+ * ep->xfer_buff always starts DWORD aligned in memory and is a
+ * multiple of DWORD in length
+ *
+ * ep->xfer_len can be any number of bytes
+ *
+ * ep->xfer_count is a multiple of ep->maxpacket until the last
+ * packet
+ *
+ * FIFO access is DWORD */
+
+ uint32_t i;
+ uint32_t byte_count;
+ uint32_t dword_count;
+ uint32_t *fifo;
+ uint32_t *data_buff = (uint32_t *)_ep->xfer_buff;
+
+ //DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, _core_if, _ep);
+ if (_ep->xfer_count >= _ep->xfer_len) {
+ DWC_WARN("%s() No data for EP%d!!!\n", __func__, _ep->num);
+ return;
+ }
+
+ /* Find the byte length of the packet either short packet or MPS */
+ if ((_ep->xfer_len - _ep->xfer_count) < _ep->maxpacket) {
+ byte_count = _ep->xfer_len - _ep->xfer_count;
+ }
+ else {
+ byte_count = _ep->maxpacket;
+ }
+
+ /* Find the DWORD length, padded by extra bytes as neccessary if MPS
+ * is not a multiple of DWORD */
+ dword_count = (byte_count + 3) / 4;
+
+#ifdef VERBOSE
+ dump_msg(_ep->xfer_buff, byte_count);
+#endif
+ if (_ep->type == DWC_OTG_EP_TYPE_ISOC) {
+ /**@todo NGS Where are the Periodic Tx FIFO addresses
+ * intialized? What should this be? */
+ fifo = _core_if->data_fifo[_ep->tx_fifo_num];
+ } else {
+ fifo = _core_if->data_fifo[_ep->num];
+ }
+
+ DWC_DEBUGPL((DBG_PCDV|DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n",
+ fifo, data_buff, *data_buff, byte_count);
+
+
+ if (!_dma) {
+ for (i=0; i<dword_count; i++, data_buff++) {
+ dwc_write_reg32( fifo, *data_buff );
+ }
+ }
+
+ _ep->xfer_count += byte_count;
+ _ep->xfer_buff += byte_count;
+#if 1 // winder, why do we need this??
+ _ep->dma_addr += byte_count;
+#endif
+}
+
+/**
+ * Set the EP STALL.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to set the stall on.
+ */
+void dwc_otg_ep_set_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep)
+{
+ depctl_data_t depctl;
+ volatile uint32_t *depctl_addr;
+
+ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, _ep->num,
+ (_ep->is_in?"IN":"OUT"));
+
+ if (_ep->is_in == 1) {
+ depctl_addr = &(_core_if->dev_if->in_ep_regs[_ep->num]->diepctl);
+ depctl.d32 = dwc_read_reg32(depctl_addr);
+
+ /* set the disable and stall bits */
+ if (depctl.b.epena) {
+ depctl.b.epdis = 1;
+ }
+ depctl.b.stall = 1;
+ dwc_write_reg32(depctl_addr, depctl.d32);
+
+ } else {
+ depctl_addr = &(_core_if->dev_if->out_ep_regs[_ep->num]->doepctl);
+ depctl.d32 = dwc_read_reg32(depctl_addr);
+
+ /* set the stall bit */
+ depctl.b.stall = 1;
+ dwc_write_reg32(depctl_addr, depctl.d32);
+ }
+ DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr));
+ return;
+}
+
+/**
+ * Clear the EP STALL.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to clear stall from.
+ */
+void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep)
+{
+ depctl_data_t depctl;
+ volatile uint32_t *depctl_addr;
+
+ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, _ep->num,
+ (_ep->is_in?"IN":"OUT"));
+
+ if (_ep->is_in == 1) {
+ depctl_addr = &(_core_if->dev_if->in_ep_regs[_ep->num]->diepctl);
+ } else {
+ depctl_addr = &(_core_if->dev_if->out_ep_regs[_ep->num]->doepctl);
+ }
+
+ depctl.d32 = dwc_read_reg32(depctl_addr);
+
+ /* clear the stall bits */
+ depctl.b.stall = 0;
+
+ /*
+ * USB Spec 9.4.5: For endpoints using data toggle, regardless
+ * of whether an endpoint has the Halt feature set, a
+ * ClearFeature(ENDPOINT_HALT) request always results in the
+ * data toggle being reinitialized to DATA0.
+ */
+ if (_ep->type == DWC_OTG_EP_TYPE_INTR ||
+ _ep->type == DWC_OTG_EP_TYPE_BULK) {
+ depctl.b.setd0pid = 1; /* DATA0 */
+ }
+
+ dwc_write_reg32(depctl_addr, depctl.d32);
+ DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr));
+ return;
+}
+
+/**
+ * This function reads a packet from the Rx FIFO into the destination
+ * buffer. To read SETUP data use dwc_otg_read_setup_packet.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _dest Destination buffer for the packet.
+ * @param _bytes Number of bytes to copy to the destination.
+ */
+void dwc_otg_read_packet(dwc_otg_core_if_t *_core_if,
+ uint8_t *_dest,
+ uint16_t _bytes)
+{
+ int i;
+ int word_count = (_bytes + 3) / 4;
+
+ volatile uint32_t *fifo = _core_if->data_fifo[0];
+ uint32_t *data_buff = (uint32_t *)_dest;
+
+ /**
+ * @todo Account for the case where _dest is not dword aligned. This
+ * requires reading data from the FIFO into a uint32_t temp buffer,
+ * then moving it into the data buffer.
+ */
+
+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p,%d)\n", __func__,
+ _core_if, _dest, _bytes);
+
+ for (i=0; i<word_count; i++, data_buff++) {
+ *data_buff = dwc_read_reg32(fifo);
+ }
+
+ return;
+}
+
+
+#ifdef DEBUG
+/**
+ * This functions reads the device registers and prints them
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_dump_dev_registers(dwc_otg_core_if_t *_core_if)
+{
+ int i;
+ volatile uint32_t *addr;
+
+ DWC_PRINT("Device Global Registers\n");
+ addr=&_core_if->dev_if->dev_global_regs->dcfg;
+ DWC_PRINT("DCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->dev_if->dev_global_regs->dctl;
+ DWC_PRINT("DCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->dev_if->dev_global_regs->dsts;
+ DWC_PRINT("DSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->dev_if->dev_global_regs->diepmsk;
+ DWC_PRINT("DIEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->dev_if->dev_global_regs->doepmsk;
+ DWC_PRINT("DOEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->dev_if->dev_global_regs->daint;
+ DWC_PRINT("DAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->dev_if->dev_global_regs->dtknqr1;
+ DWC_PRINT("DTKNQR1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ if (_core_if->hwcfg2.b.dev_token_q_depth > 6) {
+ addr=&_core_if->dev_if->dev_global_regs->dtknqr2;
+ DWC_PRINT("DTKNQR2 @0x%08X : 0x%08X\n",
+ (uint32_t)addr,dwc_read_reg32(addr));
+ }
+
+ addr=&_core_if->dev_if->dev_global_regs->dvbusdis;
+ DWC_PRINT("DVBUSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+
+ addr=&_core_if->dev_if->dev_global_regs->dvbuspulse;
+ DWC_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n",
+ (uint32_t)addr,dwc_read_reg32(addr));
+
+ if (_core_if->hwcfg2.b.dev_token_q_depth > 14) {
+ addr = &_core_if->dev_if->dev_global_regs->dtknqr3_dthrctl;
+ DWC_PRINT("DTKNQR3 @0x%08X : 0x%08X\n",
+ (uint32_t)addr, dwc_read_reg32(addr));
+ }
+
+ if (_core_if->hwcfg2.b.dev_token_q_depth > 22) {
+ addr = &_core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
+ DWC_PRINT("DTKNQR4 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ }
+ for (i = 0; i <= _core_if->dev_if->num_in_eps; i++) {
+ DWC_PRINT("Device IN EP %d Registers\n", i);
+ addr=&_core_if->dev_if->in_ep_regs[i]->diepctl;
+ DWC_PRINT("DIEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->dev_if->in_ep_regs[i]->diepint;
+ DWC_PRINT("DIEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->dev_if->in_ep_regs[i]->dieptsiz;
+ DWC_PRINT("DIETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->dev_if->in_ep_regs[i]->diepdma;
+ DWC_PRINT("DIEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+
+addr = &_core_if->dev_if->in_ep_regs[i]->dtxfsts;
+ DWC_PRINT("DTXFSTS @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ }
+ for (i = 0; i <= _core_if->dev_if->num_out_eps; i++) {
+ DWC_PRINT("Device OUT EP %d Registers\n", i);
+ addr=&_core_if->dev_if->out_ep_regs[i]->doepctl;
+ DWC_PRINT("DOEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->dev_if->out_ep_regs[i]->doepfn;
+ DWC_PRINT("DOEPFN @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->dev_if->out_ep_regs[i]->doepint;
+ DWC_PRINT("DOEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->dev_if->out_ep_regs[i]->doeptsiz;
+ DWC_PRINT("DOETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->dev_if->out_ep_regs[i]->doepdma;
+ DWC_PRINT("DOEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ }
+ return;
+}
+
+/**
+ * This function reads the host registers and prints them
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_dump_host_registers(dwc_otg_core_if_t *_core_if)
+{
+ int i;
+ volatile uint32_t *addr;
+
+ DWC_PRINT("Host Global Registers\n");
+ addr=&_core_if->host_if->host_global_regs->hcfg;
+ DWC_PRINT("HCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->host_if->host_global_regs->hfir;
+ DWC_PRINT("HFIR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->host_if->host_global_regs->hfnum;
+ DWC_PRINT("HFNUM @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->host_if->host_global_regs->hptxsts;
+ DWC_PRINT("HPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->host_if->host_global_regs->haint;
+ DWC_PRINT("HAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->host_if->host_global_regs->haintmsk;
+ DWC_PRINT("HAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=_core_if->host_if->hprt0;
+ DWC_PRINT("HPRT0 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+
+ for (i=0; i<_core_if->core_params->host_channels; i++) {
+ DWC_PRINT("Host Channel %d Specific Registers\n", i);
+ addr=&_core_if->host_if->hc_regs[i]->hcchar;
+ DWC_PRINT("HCCHAR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->host_if->hc_regs[i]->hcsplt;
+ DWC_PRINT("HCSPLT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->host_if->hc_regs[i]->hcint;
+ DWC_PRINT("HCINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->host_if->hc_regs[i]->hcintmsk;
+ DWC_PRINT("HCINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->host_if->hc_regs[i]->hctsiz;
+ DWC_PRINT("HCTSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->host_if->hc_regs[i]->hcdma;
+ DWC_PRINT("HCDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+
+ }
+ return;
+}
+
+/**
+ * This function reads the core global registers and prints them
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_dump_global_registers(dwc_otg_core_if_t *_core_if)
+{
+ int i;
+ volatile uint32_t *addr;
+
+ DWC_PRINT("Core Global Registers\n");
+ addr=&_core_if->core_global_regs->gotgctl;
+ DWC_PRINT("GOTGCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->gotgint;
+ DWC_PRINT("GOTGINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->gahbcfg;
+ DWC_PRINT("GAHBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->gusbcfg;
+ DWC_PRINT("GUSBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->grstctl;
+ DWC_PRINT("GRSTCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->gintsts;
+ DWC_PRINT("GINTSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->gintmsk;
+ DWC_PRINT("GINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->grxstsr;
+ DWC_PRINT("GRXSTSR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ //addr=&_core_if->core_global_regs->grxstsp;
+ //DWC_PRINT("GRXSTSP @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->grxfsiz;
+ DWC_PRINT("GRXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->gnptxfsiz;
+ DWC_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->gnptxsts;
+ DWC_PRINT("GNPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->gi2cctl;
+ DWC_PRINT("GI2CCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->gpvndctl;
+ DWC_PRINT("GPVNDCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->ggpio;
+ DWC_PRINT("GGPIO @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->guid;
+ DWC_PRINT("GUID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->gsnpsid;
+ DWC_PRINT("GSNPSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->ghwcfg1;
+ DWC_PRINT("GHWCFG1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->ghwcfg2;
+ DWC_PRINT("GHWCFG2 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->ghwcfg3;
+ DWC_PRINT("GHWCFG3 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->ghwcfg4;
+ DWC_PRINT("GHWCFG4 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr=&_core_if->core_global_regs->hptxfsiz;
+ DWC_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+
+ for (i=0; i<_core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
+ addr=&_core_if->core_global_regs->dptxfsiz_dieptxf[i];
+ DWC_PRINT("DPTXFSIZ[%d] @0x%08X : 0x%08X\n",i,(uint32_t)addr,dwc_read_reg32(addr));
+ }
+
+}
+#endif
+
+/**
+ * Flush a Tx FIFO.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _num Tx FIFO to flush.
+ */
+extern void dwc_otg_flush_tx_fifo( dwc_otg_core_if_t *_core_if,
+ const int _num )
+{
+ dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs;
+ volatile grstctl_t greset = { .d32 = 0};
+ int count = 0;
+
+ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "Flush Tx FIFO %d\n", _num);
+
+ greset.b.txfflsh = 1;
+ greset.b.txfnum = _num;
+ dwc_write_reg32( &global_regs->grstctl, greset.d32 );
+
+ do {
+ greset.d32 = dwc_read_reg32( &global_regs->grstctl);
+ if (++count > 10000){
+ DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
+ __func__, greset.d32,
+ dwc_read_reg32( &global_regs->gnptxsts));
+ break;
+ }
+
+ udelay(1);
+ } while (greset.b.txfflsh == 1);
+ /* Wait for 3 PHY Clocks*/
+ UDELAY(1);
+}
+
+/**
+ * Flush Rx FIFO.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+extern void dwc_otg_flush_rx_fifo( dwc_otg_core_if_t *_core_if )
+{
+ dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs;
+ volatile grstctl_t greset = { .d32 = 0};
+ int count = 0;
+
+ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "%s\n", __func__);
+ /*
+ *
+ */
+ greset.b.rxfflsh = 1;
+ dwc_write_reg32( &global_regs->grstctl, greset.d32 );
+
+ do {
+ greset.d32 = dwc_read_reg32( &global_regs->grstctl);
+ if (++count > 10000){
+ DWC_WARN("%s() HANG! GRSTCTL=%0x\n", __func__,
+ greset.d32);
+ break;
+ }
+ } while (greset.b.rxfflsh == 1);
+ /* Wait for 3 PHY Clocks*/
+ UDELAY(1);
+}
+
+/**
+ * Do core a soft reset of the core. Be careful with this because it
+ * resets all the internal state machines of the core.
+ */
+
+void dwc_otg_core_reset(dwc_otg_core_if_t *_core_if)
+{
+ dwc_otg_core_global_regs_t *global_regs = _core_if->core_global_regs;
+ volatile grstctl_t greset = { .d32 = 0};
+ int count = 0;
+
+ DWC_DEBUGPL(DBG_CILV, "%s\n", __func__);
+ /* Wait for AHB master IDLE state. */
+ do {
+ UDELAY(10);
+ greset.d32 = dwc_read_reg32( &global_regs->grstctl);
+ if (++count > 100000){
+ DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x %x\n", __func__,
+ greset.d32, greset.b.ahbidle);
+ return;
+ }
+ } while (greset.b.ahbidle == 0);
+
+// winder add.
+#if 1
+ /* Note: Actually, I don't exactly why we need to put delay here. */
+ MDELAY(100);
+#endif
+ /* Core Soft Reset */
+ count = 0;
+ greset.b.csftrst = 1;
+ dwc_write_reg32( &global_regs->grstctl, greset.d32 );
+// winder add.
+#if 1
+ /* Note: Actually, I don't exactly why we need to put delay here. */
+ MDELAY(100);
+#endif
+ do {
+ greset.d32 = dwc_read_reg32( &global_regs->grstctl);
+ if (++count > 10000){
+ DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__,
+ greset.d32);
+ break;
+ }
+ udelay(1);
+ } while (greset.b.csftrst == 1);
+ /* Wait for 3 PHY Clocks*/
+ //DWC_PRINT("100ms\n");
+ MDELAY(100);
+}
+
+
+
+/**
+ * Register HCD callbacks. The callbacks are used to start and stop
+ * the HCD for interrupt processing.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _cb the HCD callback structure.
+ * @param _p pointer to be passed to callback function (usb_hcd*).
+ */
+extern void dwc_otg_cil_register_hcd_callbacks( dwc_otg_core_if_t *_core_if,
+ dwc_otg_cil_callbacks_t *_cb,
+ void *_p)
+{
+ _core_if->hcd_cb = _cb;
+ _cb->p = _p;
+}
+
+/**
+ * Register PCD callbacks. The callbacks are used to start and stop
+ * the PCD for interrupt processing.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _cb the PCD callback structure.
+ * @param _p pointer to be passed to callback function (pcd*).
+ */
+extern void dwc_otg_cil_register_pcd_callbacks( dwc_otg_core_if_t *_core_if,
+ dwc_otg_cil_callbacks_t *_cb,
+ void *_p)
+{
+ _core_if->pcd_cb = _cb;
+ _cb->p = _p;
+}
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil.h b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil.h
new file mode 100644
index 0000000000..bbb9516b85
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil.h
@@ -0,0 +1,911 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_cil.h $
+ * $Revision: 1.1.1.1 $
+ * $Date: 2009-04-17 06:15:34 $
+ * $Change: 631780 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__DWC_CIL_H__)
+#define __DWC_CIL_H__
+
+#include "dwc_otg_plat.h"
+
+#include "dwc_otg_regs.h"
+#ifdef DEBUG
+#include "linux/timer.h"
+#endif
+
+/* the OTG capabilities. */
+#define DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0
+#define DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1
+#define DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
+/* the maximum speed of operation in host and device mode. */
+#define DWC_SPEED_PARAM_HIGH 0
+#define DWC_SPEED_PARAM_FULL 1
+/* the PHY clock rate in low power mode when connected to a
+ * Low Speed device in host mode. */
+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
+/* the type of PHY interface to use. */
+#define DWC_PHY_TYPE_PARAM_FS 0
+#define DWC_PHY_TYPE_PARAM_UTMI 1
+#define DWC_PHY_TYPE_PARAM_ULPI 2
+/* whether to use the internal or external supply to
+ * drive the vbus with a ULPI phy. */
+#define DWC_PHY_ULPI_INTERNAL_VBUS 0
+#define DWC_PHY_ULPI_EXTERNAL_VBUS 1
+/* EP type. */
+
+/**
+ * @file
+ * This file contains the interface to the Core Interface Layer.
+ */
+
+/**
+ * The <code>dwc_ep</code> structure represents the state of a single
+ * endpoint when acting in device mode. It contains the data items
+ * needed for an endpoint to be activated and transfer packets.
+ */
+typedef struct dwc_ep {
+ /** EP number used for register address lookup */
+ uint8_t num;
+ /** EP direction 0 = OUT */
+ unsigned is_in : 1;
+ /** EP active. */
+ unsigned active : 1;
+
+ /** Periodic Tx FIFO # for IN EPs For INTR EP set to 0 to use non-periodic Tx FIFO
+ If dedicated Tx FIFOs are enabled for all IN Eps - Tx FIFO # FOR IN EPs*/
+ unsigned tx_fifo_num : 4;
+ /** EP type: 0 - Control, 1 - ISOC, 2 - BULK, 3 - INTR */
+ unsigned type : 2;
+#define DWC_OTG_EP_TYPE_CONTROL 0
+#define DWC_OTG_EP_TYPE_ISOC 1
+#define DWC_OTG_EP_TYPE_BULK 2
+#define DWC_OTG_EP_TYPE_INTR 3
+
+ /** DATA start PID for INTR and BULK EP */
+ unsigned data_pid_start : 1;
+ /** Frame (even/odd) for ISOC EP */
+ unsigned even_odd_frame : 1;
+ /** Max Packet bytes */
+ unsigned maxpacket : 11;
+
+ /** @name Transfer state */
+ /** @{ */
+
+ /**
+ * Pointer to the beginning of the transfer buffer -- do not modify
+ * during transfer.
+ */
+
+ uint32_t dma_addr;
+
+ uint8_t *start_xfer_buff;
+ /** pointer to the transfer buffer */
+ uint8_t *xfer_buff;
+ /** Number of bytes to transfer */
+ unsigned xfer_len : 19;
+ /** Number of bytes transferred. */
+ unsigned xfer_count : 19;
+ /** Sent ZLP */
+ unsigned sent_zlp : 1;
+ /** Total len for control transfer */
+ unsigned total_len : 19;
+
+ /** stall clear flag */
+ unsigned stall_clear_flag : 1;
+
+ /** @} */
+} dwc_ep_t;
+
+/*
+ * Reasons for halting a host channel.
+ */
+typedef enum dwc_otg_halt_status {
+ DWC_OTG_HC_XFER_NO_HALT_STATUS,
+ DWC_OTG_HC_XFER_COMPLETE,
+ DWC_OTG_HC_XFER_URB_COMPLETE,
+ DWC_OTG_HC_XFER_ACK,
+ DWC_OTG_HC_XFER_NAK,
+ DWC_OTG_HC_XFER_NYET,
+ DWC_OTG_HC_XFER_STALL,
+ DWC_OTG_HC_XFER_XACT_ERR,
+ DWC_OTG_HC_XFER_FRAME_OVERRUN,
+ DWC_OTG_HC_XFER_BABBLE_ERR,
+ DWC_OTG_HC_XFER_DATA_TOGGLE_ERR,
+ DWC_OTG_HC_XFER_AHB_ERR,
+ DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE,
+ DWC_OTG_HC_XFER_URB_DEQUEUE
+} dwc_otg_halt_status_e;
+
+/**
+ * Host channel descriptor. This structure represents the state of a single
+ * host channel when acting in host mode. It contains the data items needed to
+ * transfer packets to an endpoint via a host channel.
+ */
+typedef struct dwc_hc {
+ /** Host channel number used for register address lookup */
+ uint8_t hc_num;
+
+ /** Device to access */
+ unsigned dev_addr : 7;
+
+ /** EP to access */
+ unsigned ep_num : 4;
+
+ /** EP direction. 0: OUT, 1: IN */
+ unsigned ep_is_in : 1;
+
+ /**
+ * EP speed.
+ * One of the following values:
+ * - DWC_OTG_EP_SPEED_LOW
+ * - DWC_OTG_EP_SPEED_FULL
+ * - DWC_OTG_EP_SPEED_HIGH
+ */
+ unsigned speed : 2;
+#define DWC_OTG_EP_SPEED_LOW 0
+#define DWC_OTG_EP_SPEED_FULL 1
+#define DWC_OTG_EP_SPEED_HIGH 2
+
+ /**
+ * Endpoint type.
+ * One of the following values:
+ * - DWC_OTG_EP_TYPE_CONTROL: 0
+ * - DWC_OTG_EP_TYPE_ISOC: 1
+ * - DWC_OTG_EP_TYPE_BULK: 2
+ * - DWC_OTG_EP_TYPE_INTR: 3
+ */
+ unsigned ep_type : 2;
+
+ /** Max packet size in bytes */
+ unsigned max_packet : 11;
+
+ /**
+ * PID for initial transaction.
+ * 0: DATA0,<br>
+ * 1: DATA2,<br>
+ * 2: DATA1,<br>
+ * 3: MDATA (non-Control EP),
+ * SETUP (Control EP)
+ */
+ unsigned data_pid_start : 2;
+#define DWC_OTG_HC_PID_DATA0 0
+#define DWC_OTG_HC_PID_DATA2 1
+#define DWC_OTG_HC_PID_DATA1 2
+#define DWC_OTG_HC_PID_MDATA 3
+#define DWC_OTG_HC_PID_SETUP 3
+
+ /** Number of periodic transactions per (micro)frame */
+ unsigned multi_count: 2;
+
+ /** @name Transfer State */
+ /** @{ */
+
+ /** Pointer to the current transfer buffer position. */
+ uint8_t *xfer_buff;
+ /** Total number of bytes to transfer. */
+ uint32_t xfer_len;
+ /** Number of bytes transferred so far. */
+ uint32_t xfer_count;
+ /** Packet count at start of transfer.*/
+ uint16_t start_pkt_count;
+
+ /**
+ * Flag to indicate whether the transfer has been started. Set to 1 if
+ * it has been started, 0 otherwise.
+ */
+ uint8_t xfer_started;
+
+ /**
+ * Set to 1 to indicate that a PING request should be issued on this
+ * channel. If 0, process normally.
+ */
+ uint8_t do_ping;
+
+ /**
+ * Set to 1 to indicate that the error count for this transaction is
+ * non-zero. Set to 0 if the error count is 0.
+ */
+ uint8_t error_state;
+
+ /**
+ * Set to 1 to indicate that this channel should be halted the next
+ * time a request is queued for the channel. This is necessary in
+ * slave mode if no request queue space is available when an attempt
+ * is made to halt the channel.
+ */
+ uint8_t halt_on_queue;
+
+ /**
+ * Set to 1 if the host channel has been halted, but the core is not
+ * finished flushing queued requests. Otherwise 0.
+ */
+ uint8_t halt_pending;
+
+ /**
+ * Reason for halting the host channel.
+ */
+ dwc_otg_halt_status_e halt_status;
+
+ /*
+ * Split settings for the host channel
+ */
+ uint8_t do_split; /**< Enable split for the channel */
+ uint8_t complete_split; /**< Enable complete split */
+ uint8_t hub_addr; /**< Address of high speed hub */
+
+ uint8_t port_addr; /**< Port of the low/full speed device */
+ /** Split transaction position
+ * One of the following values:
+ * - DWC_HCSPLIT_XACTPOS_MID
+ * - DWC_HCSPLIT_XACTPOS_BEGIN
+ * - DWC_HCSPLIT_XACTPOS_END
+ * - DWC_HCSPLIT_XACTPOS_ALL */
+ uint8_t xact_pos;
+
+ /** Set when the host channel does a short read. */
+ uint8_t short_read;
+
+ /**
+ * Number of requests issued for this channel since it was assigned to
+ * the current transfer (not counting PINGs).
+ */
+ uint8_t requests;
+
+ /**
+ * Queue Head for the transfer being processed by this channel.
+ */
+ struct dwc_otg_qh *qh;
+
+ /** @} */
+
+ /** Entry in list of host channels. */
+ struct list_head hc_list_entry;
+} dwc_hc_t;
+
+/**
+ * The following parameters may be specified when starting the module. These
+ * parameters define how the DWC_otg controller should be configured.
+ * Parameter values are passed to the CIL initialization function
+ * dwc_otg_cil_init.
+ */
+
+typedef struct dwc_otg_core_params
+{
+ int32_t opt;
+//#define dwc_param_opt_default 1
+ /**
+ * Specifies the OTG capabilities. The driver will automatically
+ * detect the value for this parameter if none is specified.
+ * 0 - HNP and SRP capable (default)
+ * 1 - SRP Only capable
+ * 2 - No HNP/SRP capable
+ */
+ int32_t otg_cap;
+#define DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0
+#define DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1
+#define DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
+//#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE
+ /**
+ * Specifies whether to use slave or DMA mode for accessing the data
+ * FIFOs. The driver will automatically detect the value for this
+ * parameter if none is specified.
+ * 0 - Slave
+ * 1 - DMA (default, if available)
+ */
+ int32_t dma_enable;
+//#define dwc_param_dma_enable_default 1
+ /** The DMA Burst size (applicable only for External DMA
+ * Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32)
+ */
+ int32_t dma_burst_size; /* Translate this to GAHBCFG values */
+//#define dwc_param_dma_burst_size_default 32
+ /**
+ * Specifies the maximum speed of operation in host and device mode.
+ * The actual speed depends on the speed of the attached device and
+ * the value of phy_type. The actual speed depends on the speed of the
+ * attached device.
+ * 0 - High Speed (default)
+ * 1 - Full Speed
+ */
+ int32_t speed;
+//#define dwc_param_speed_default 0
+#define DWC_SPEED_PARAM_HIGH 0
+#define DWC_SPEED_PARAM_FULL 1
+
+ /** Specifies whether low power mode is supported when attached
+ * to a Full Speed or Low Speed device in host mode.
+ * 0 - Don't support low power mode (default)
+ * 1 - Support low power mode
+ */
+ int32_t host_support_fs_ls_low_power;
+//#define dwc_param_host_support_fs_ls_low_power_default 0
+ /** Specifies the PHY clock rate in low power mode when connected to a
+ * Low Speed device in host mode. This parameter is applicable only if
+ * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS
+ * then defaults to 6 MHZ otherwise 48 MHZ.
+ *
+ * 0 - 48 MHz
+ * 1 - 6 MHz
+ */
+ int32_t host_ls_low_power_phy_clk;
+//#define dwc_param_host_ls_low_power_phy_clk_default 0
+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
+ /**
+ * 0 - Use cC FIFO size parameters
+ * 1 - Allow dynamic FIFO sizing (default)
+ */
+ int32_t enable_dynamic_fifo;
+//#define dwc_param_enable_dynamic_fifo_default 1
+ /** Total number of 4-byte words in the data FIFO memory. This
+ * memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic
+ * Tx FIFOs.
+ * 32 to 32768 (default 8192)
+ * Note: The total FIFO memory depth in the FPGA configuration is 8192.
+ */
+ int32_t data_fifo_size;
+//#define dwc_param_data_fifo_size_default 8192
+ /** Number of 4-byte words in the Rx FIFO in device mode when dynamic
+ * FIFO sizing is enabled.
+ * 16 to 32768 (default 1064)
+ */
+ int32_t dev_rx_fifo_size;
+//#define dwc_param_dev_rx_fifo_size_default 1064
+ /** Number of 4-byte words in the non-periodic Tx FIFO in device mode
+ * when dynamic FIFO sizing is enabled.
+ * 16 to 32768 (default 1024)
+ */
+ int32_t dev_nperio_tx_fifo_size;
+//#define dwc_param_dev_nperio_tx_fifo_size_default 1024
+ /** Number of 4-byte words in each of the periodic Tx FIFOs in device
+ * mode when dynamic FIFO sizing is enabled.
+ * 4 to 768 (default 256)
+ */
+ uint32_t dev_perio_tx_fifo_size[MAX_PERIO_FIFOS];
+//#define dwc_param_dev_perio_tx_fifo_size_default 256
+ /** Number of 4-byte words in the Rx FIFO in host mode when dynamic
+ * FIFO sizing is enabled.
+ * 16 to 32768 (default 1024)
+ */
+ int32_t host_rx_fifo_size;
+//#define dwc_param_host_rx_fifo_size_default 1024
+ /** Number of 4-byte words in the non-periodic Tx FIFO in host mode
+ * when Dynamic FIFO sizing is enabled in the core.
+ * 16 to 32768 (default 1024)
+ */
+ int32_t host_nperio_tx_fifo_size;
+//#define dwc_param_host_nperio_tx_fifo_size_default 1024
+ /** Number of 4-byte words in the host periodic Tx FIFO when dynamic
+ * FIFO sizing is enabled.
+ * 16 to 32768 (default 1024)
+ */
+ int32_t host_perio_tx_fifo_size;
+//#define dwc_param_host_perio_tx_fifo_size_default 1024
+ /** The maximum transfer size supported in bytes.
+ * 2047 to 65,535 (default 65,535)
+ */
+ int32_t max_transfer_size;
+//#define dwc_param_max_transfer_size_default 65535
+ /** The maximum number of packets in a transfer.
+ * 15 to 511 (default 511)
+ */
+ int32_t max_packet_count;
+//#define dwc_param_max_packet_count_default 511
+ /** The number of host channel registers to use.
+ * 1 to 16 (default 12)
+ * Note: The FPGA configuration supports a maximum of 12 host channels.
+ */
+ int32_t host_channels;
+//#define dwc_param_host_channels_default 12
+ /** The number of endpoints in addition to EP0 available for device
+ * mode operations.
+ * 1 to 15 (default 6 IN and OUT)
+ * Note: The FPGA configuration supports a maximum of 6 IN and OUT
+ * endpoints in addition to EP0.
+ */
+ int32_t dev_endpoints;
+//#define dwc_param_dev_endpoints_default 6
+ /**
+ * Specifies the type of PHY interface to use. By default, the driver
+ * will automatically detect the phy_type.
+ *
+ * 0 - Full Speed PHY
+ * 1 - UTMI+ (default)
+ * 2 - ULPI
+ */
+ int32_t phy_type;
+#define DWC_PHY_TYPE_PARAM_FS 0
+#define DWC_PHY_TYPE_PARAM_UTMI 1
+#define DWC_PHY_TYPE_PARAM_ULPI 2
+//#define dwc_param_phy_type_default DWC_PHY_TYPE_PARAM_UTMI
+ /**
+ * Specifies the UTMI+ Data Width. This parameter is
+ * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI
+ * PHY_TYPE, this parameter indicates the data width between
+ * the MAC and the ULPI Wrapper.) Also, this parameter is
+ * applicable only if the OTG_HSPHY_WIDTH cC parameter was set
+ * to "8 and 16 bits", meaning that the core has been
+ * configured to work at either data path width.
+ *
+ * 8 or 16 bits (default 16)
+ */
+ int32_t phy_utmi_width;
+//#define dwc_param_phy_utmi_width_default 16
+ /**
+ * Specifies whether the ULPI operates at double or single
+ * data rate. This parameter is only applicable if PHY_TYPE is
+ * ULPI.
+ *
+ * 0 - single data rate ULPI interface with 8 bit wide data
+ * bus (default)
+ * 1 - double data rate ULPI interface with 4 bit wide data
+ * bus
+ */
+ int32_t phy_ulpi_ddr;
+//#define dwc_param_phy_ulpi_ddr_default 0
+ /**
+ * Specifies whether to use the internal or external supply to
+ * drive the vbus with a ULPI phy.
+ */
+ int32_t phy_ulpi_ext_vbus;
+#define DWC_PHY_ULPI_INTERNAL_VBUS 0
+#define DWC_PHY_ULPI_EXTERNAL_VBUS 1
+//#define dwc_param_phy_ulpi_ext_vbus_default DWC_PHY_ULPI_INTERNAL_VBUS
+ /**
+ * Specifies whether to use the I2Cinterface for full speed PHY. This
+ * parameter is only applicable if PHY_TYPE is FS.
+ * 0 - No (default)
+ * 1 - Yes
+ */
+ int32_t i2c_enable;
+//#define dwc_param_i2c_enable_default 0
+
+ int32_t ulpi_fs_ls;
+//#define dwc_param_ulpi_fs_ls_default 0
+
+ int32_t ts_dline;
+//#define dwc_param_ts_dline_default 0
+
+ /**
+ * Specifies whether dedicated transmit FIFOs are
+ * enabled for non periodic IN endpoints in device mode
+ * 0 - No
+ * 1 - Yes
+ */
+ int32_t en_multiple_tx_fifo;
+#define dwc_param_en_multiple_tx_fifo_default 1
+
+ /** Number of 4-byte words in each of the Tx FIFOs in device
+ * mode when dynamic FIFO sizing is enabled.
+ * 4 to 768 (default 256)
+ */
+ uint32_t dev_tx_fifo_size[MAX_TX_FIFOS];
+#define dwc_param_dev_tx_fifo_size_default 256
+
+ /** Thresholding enable flag-
+ * bit 0 - enable non-ISO Tx thresholding
+ * bit 1 - enable ISO Tx thresholding
+ * bit 2 - enable Rx thresholding
+ */
+ uint32_t thr_ctl;
+#define dwc_param_thr_ctl_default 0
+
+ /** Thresholding length for Tx
+ * FIFOs in 32 bit DWORDs
+ */
+ uint32_t tx_thr_length;
+#define dwc_param_tx_thr_length_default 64
+
+ /** Thresholding length for Rx
+ * FIFOs in 32 bit DWORDs
+ */
+ uint32_t rx_thr_length;
+#define dwc_param_rx_thr_length_default 64
+} dwc_otg_core_params_t;
+
+#ifdef DEBUG
+struct dwc_otg_core_if;
+typedef struct hc_xfer_info
+{
+ struct dwc_otg_core_if *core_if;
+ dwc_hc_t *hc;
+} hc_xfer_info_t;
+#endif
+
+/**
+ * The <code>dwc_otg_core_if</code> structure contains information needed to manage
+ * the DWC_otg controller acting in either host or device mode. It
+ * represents the programming view of the controller as a whole.
+ */
+typedef struct dwc_otg_core_if
+{
+ /** Parameters that define how the core should be configured.*/
+ dwc_otg_core_params_t *core_params;
+
+ /** Core Global registers starting at offset 000h. */
+ dwc_otg_core_global_regs_t *core_global_regs;
+
+ /** Device-specific information */
+ dwc_otg_dev_if_t *dev_if;
+ /** Host-specific information */
+ dwc_otg_host_if_t *host_if;
+
+ /*
+ * Set to 1 if the core PHY interface bits in USBCFG have been
+ * initialized.
+ */
+ uint8_t phy_init_done;
+
+ /*
+ * SRP Success flag, set by srp success interrupt in FS I2C mode
+ */
+ uint8_t srp_success;
+ uint8_t srp_timer_started;
+
+ /* Common configuration information */
+ /** Power and Clock Gating Control Register */
+ volatile uint32_t *pcgcctl;
+#define DWC_OTG_PCGCCTL_OFFSET 0xE00
+
+ /** Push/pop addresses for endpoints or host channels.*/
+ uint32_t *data_fifo[MAX_EPS_CHANNELS];
+#define DWC_OTG_DATA_FIFO_OFFSET 0x1000
+#define DWC_OTG_DATA_FIFO_SIZE 0x1000
+
+ /** Total RAM for FIFOs (Bytes) */
+ uint16_t total_fifo_size;
+ /** Size of Rx FIFO (Bytes) */
+ uint16_t rx_fifo_size;
+ /** Size of Non-periodic Tx FIFO (Bytes) */
+ uint16_t nperio_tx_fifo_size;
+
+ /** 1 if DMA is enabled, 0 otherwise. */
+ uint8_t dma_enable;
+
+ /** 1 if dedicated Tx FIFOs are enabled, 0 otherwise. */
+ uint8_t en_multiple_tx_fifo;
+
+ /** Set to 1 if multiple packets of a high-bandwidth transfer is in
+ * process of being queued */
+ uint8_t queuing_high_bandwidth;
+
+ /** Hardware Configuration -- stored here for convenience.*/
+ hwcfg1_data_t hwcfg1;
+ hwcfg2_data_t hwcfg2;
+ hwcfg3_data_t hwcfg3;
+ hwcfg4_data_t hwcfg4;
+
+ /** The operational State, during transations
+ * (a_host>>a_peripherial and b_device=>b_host) this may not
+ * match the core but allows the software to determine
+ * transitions.
+ */
+ uint8_t op_state;
+
+ /**
+ * Set to 1 if the HCD needs to be restarted on a session request
+ * interrupt. This is required if no connector ID status change has
+ * occurred since the HCD was last disconnected.
+ */
+ uint8_t restart_hcd_on_session_req;
+
+ /** HCD callbacks */
+ /** A-Device is a_host */
+#define A_HOST (1)
+ /** A-Device is a_suspend */
+#define A_SUSPEND (2)
+ /** A-Device is a_peripherial */
+#define A_PERIPHERAL (3)
+ /** B-Device is operating as a Peripheral. */
+#define B_PERIPHERAL (4)
+ /** B-Device is operating as a Host. */
+#define B_HOST (5)
+
+ /** HCD callbacks */
+ struct dwc_otg_cil_callbacks *hcd_cb;
+ /** PCD callbacks */
+ struct dwc_otg_cil_callbacks *pcd_cb;
+
+ /** Device mode Periodic Tx FIFO Mask */
+ uint32_t p_tx_msk;
+ /** Device mode Periodic Tx FIFO Mask */
+ uint32_t tx_msk;
+
+#ifdef DEBUG
+ uint32_t start_hcchar_val[MAX_EPS_CHANNELS];
+
+ hc_xfer_info_t hc_xfer_info[MAX_EPS_CHANNELS];
+ struct timer_list hc_xfer_timer[MAX_EPS_CHANNELS];
+
+#if 1 // winder
+ uint32_t hfnum_7_samples;
+ uint32_t hfnum_7_frrem_accum;
+ uint32_t hfnum_0_samples;
+ uint32_t hfnum_0_frrem_accum;
+ uint32_t hfnum_other_samples;
+ uint32_t hfnum_other_frrem_accum;
+#else
+ uint32_t hfnum_7_samples;
+ uint64_t hfnum_7_frrem_accum;
+ uint32_t hfnum_0_samples;
+ uint64_t hfnum_0_frrem_accum;
+ uint32_t hfnum_other_samples;
+ uint64_t hfnum_other_frrem_accum;
+#endif
+ resource_size_t phys_addr; /* Added to support PLB DMA : phys-virt mapping */
+#endif
+
+} dwc_otg_core_if_t;
+
+/*
+ * The following functions support initialization of the CIL driver component
+ * and the DWC_otg controller.
+ */
+extern dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *_reg_base_addr,
+ dwc_otg_core_params_t *_core_params);
+extern void dwc_otg_cil_remove(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_core_init(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_core_host_init(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_core_dev_init(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_enable_global_interrupts( dwc_otg_core_if_t *_core_if );
+extern void dwc_otg_disable_global_interrupts( dwc_otg_core_if_t *_core_if );
+
+/** @name Device CIL Functions
+ * The following functions support managing the DWC_otg controller in device
+ * mode.
+ */
+/**@{*/
+extern void dwc_otg_wakeup(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_read_setup_packet (dwc_otg_core_if_t *_core_if, uint32_t *_dest);
+extern uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_ep0_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep_deactivate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep_write_packet(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep, int _dma);
+extern void dwc_otg_ep_set_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_dump_dev_registers(dwc_otg_core_if_t *_core_if);
+/**@}*/
+
+/** @name Host CIL Functions
+ * The following functions support managing the DWC_otg controller in host
+ * mode.
+ */
+/**@{*/
+extern void dwc_otg_hc_init(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);
+extern void dwc_otg_hc_halt(dwc_otg_core_if_t *_core_if,
+ dwc_hc_t *_hc,
+ dwc_otg_halt_status_e _halt_status);
+extern void dwc_otg_hc_cleanup(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);
+extern void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);
+extern int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);
+extern void dwc_otg_hc_do_ping(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);
+extern void dwc_otg_hc_write_packet(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);
+extern void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *_core_if);
+
+/**
+ * This function Reads HPRT0 in preparation to modify. It keeps the
+ * WC bits 0 so that if they are read as 1, they won't clear when you
+ * write it back
+ */
+static inline uint32_t dwc_otg_read_hprt0(dwc_otg_core_if_t *_core_if)
+{
+ hprt0_data_t hprt0;
+ hprt0.d32 = dwc_read_reg32(_core_if->host_if->hprt0);
+ hprt0.b.prtena = 0;
+ hprt0.b.prtconndet = 0;
+ hprt0.b.prtenchng = 0;
+ hprt0.b.prtovrcurrchng = 0;
+ return hprt0.d32;
+}
+
+extern void dwc_otg_dump_host_registers(dwc_otg_core_if_t *_core_if);
+/**@}*/
+
+/** @name Common CIL Functions
+ * The following functions support managing the DWC_otg controller in either
+ * device or host mode.
+ */
+/**@{*/
+
+extern void dwc_otg_read_packet(dwc_otg_core_if_t *core_if,
+ uint8_t *dest,
+ uint16_t bytes);
+
+extern void dwc_otg_dump_global_registers(dwc_otg_core_if_t *_core_if);
+
+extern void dwc_otg_flush_tx_fifo( dwc_otg_core_if_t *_core_if,
+ const int _num );
+extern void dwc_otg_flush_rx_fifo( dwc_otg_core_if_t *_core_if );
+extern void dwc_otg_core_reset( dwc_otg_core_if_t *_core_if );
+
+#define NP_TXFIFO_EMPTY -1
+#define MAX_NP_TXREQUEST_Q_SLOTS 8
+/**
+ * This function returns the endpoint number of the request at
+ * the top of non-periodic TX FIFO, or -1 if the request FIFO is
+ * empty.
+ */
+static inline int dwc_otg_top_nptxfifo_epnum(dwc_otg_core_if_t *_core_if) {
+ gnptxsts_data_t txstatus = {.d32 = 0};
+
+ txstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts);
+ return (txstatus.b.nptxqspcavail == MAX_NP_TXREQUEST_Q_SLOTS ?
+ -1 : txstatus.b.nptxqtop_chnep);
+}
+/**
+ * This function returns the Core Interrupt register.
+ */
+static inline uint32_t dwc_otg_read_core_intr(dwc_otg_core_if_t *_core_if) {
+ return (dwc_read_reg32(&_core_if->core_global_regs->gintsts) &
+ dwc_read_reg32(&_core_if->core_global_regs->gintmsk));
+}
+
+/**
+ * This function returns the OTG Interrupt register.
+ */
+static inline uint32_t dwc_otg_read_otg_intr (dwc_otg_core_if_t *_core_if) {
+ return (dwc_read_reg32 (&_core_if->core_global_regs->gotgint));
+}
+
+/**
+ * This function reads the Device All Endpoints Interrupt register and
+ * returns the IN endpoint interrupt bits.
+ */
+static inline uint32_t dwc_otg_read_dev_all_in_ep_intr(dwc_otg_core_if_t *_core_if) {
+ uint32_t v;
+ v = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daint) &
+ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk);
+ return (v & 0xffff);
+
+}
+
+/**
+ * This function reads the Device All Endpoints Interrupt register and
+ * returns the OUT endpoint interrupt bits.
+ */
+static inline uint32_t dwc_otg_read_dev_all_out_ep_intr(dwc_otg_core_if_t *_core_if) {
+ uint32_t v;
+ v = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daint) &
+ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk);
+ return ((v & 0xffff0000) >> 16);
+}
+
+/**
+ * This function returns the Device IN EP Interrupt register
+ */
+static inline uint32_t dwc_otg_read_dev_in_ep_intr(dwc_otg_core_if_t *_core_if,
+ dwc_ep_t *_ep)
+{
+ dwc_otg_dev_if_t *dev_if = _core_if->dev_if;
+ uint32_t v, msk, emp;
+ msk = dwc_read_reg32(&dev_if->dev_global_regs->diepmsk);
+ emp = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk);
+ msk |= ((emp >> _ep->num) & 0x1) << 7;
+ v = dwc_read_reg32(&dev_if->in_ep_regs[_ep->num]->diepint) & msk;
+/*
+ dwc_otg_dev_if_t *dev_if = _core_if->dev_if;
+ uint32_t v;
+ v = dwc_read_reg32(&dev_if->in_ep_regs[_ep->num]->diepint) &
+ dwc_read_reg32(&dev_if->dev_global_regs->diepmsk);
+*/
+ return v;
+}
+/**
+ * This function returns the Device OUT EP Interrupt register
+ */
+static inline uint32_t dwc_otg_read_dev_out_ep_intr(dwc_otg_core_if_t *_core_if,
+ dwc_ep_t *_ep)
+{
+ dwc_otg_dev_if_t *dev_if = _core_if->dev_if;
+ uint32_t v;
+ v = dwc_read_reg32( &dev_if->out_ep_regs[_ep->num]->doepint) &
+ dwc_read_reg32(&dev_if->dev_global_regs->doepmsk);
+ return v;
+}
+
+/**
+ * This function returns the Host All Channel Interrupt register
+ */
+static inline uint32_t dwc_otg_read_host_all_channels_intr (dwc_otg_core_if_t *_core_if)
+{
+ return (dwc_read_reg32 (&_core_if->host_if->host_global_regs->haint));
+}
+
+static inline uint32_t dwc_otg_read_host_channel_intr (dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc)
+{
+ return (dwc_read_reg32 (&_core_if->host_if->hc_regs[_hc->hc_num]->hcint));
+}
+
+
+/**
+ * This function returns the mode of the operation, host or device.
+ *
+ * @return 0 - Device Mode, 1 - Host Mode
+ */
+static inline uint32_t dwc_otg_mode(dwc_otg_core_if_t *_core_if) {
+ return (dwc_read_reg32( &_core_if->core_global_regs->gintsts ) & 0x1);
+}
+
+static inline uint8_t dwc_otg_is_device_mode(dwc_otg_core_if_t *_core_if)
+{
+ return (dwc_otg_mode(_core_if) != DWC_HOST_MODE);
+}
+static inline uint8_t dwc_otg_is_host_mode(dwc_otg_core_if_t *_core_if)
+{
+ return (dwc_otg_mode(_core_if) == DWC_HOST_MODE);
+}
+
+extern int32_t dwc_otg_handle_common_intr( dwc_otg_core_if_t *_core_if );
+
+
+/**@}*/
+
+/**
+ * DWC_otg CIL callback structure. This structure allows the HCD and
+ * PCD to register functions used for starting and stopping the PCD
+ * and HCD for role change on for a DRD.
+ */
+typedef struct dwc_otg_cil_callbacks
+{
+ /** Start function for role change */
+ int (*start) (void *_p);
+ /** Stop Function for role change */
+ int (*stop) (void *_p);
+ /** Disconnect Function for role change */
+ int (*disconnect) (void *_p);
+ /** Resume/Remote wakeup Function */
+ int (*resume_wakeup) (void *_p);
+ /** Suspend function */
+ int (*suspend) (void *_p);
+ /** Session Start (SRP) */
+ int (*session_start) (void *_p);
+ /** Pointer passed to start() and stop() */
+ void *p;
+} dwc_otg_cil_callbacks_t;
+
+
+
+extern void dwc_otg_cil_register_pcd_callbacks( dwc_otg_core_if_t *_core_if,
+ dwc_otg_cil_callbacks_t *_cb,
+ void *_p);
+extern void dwc_otg_cil_register_hcd_callbacks( dwc_otg_core_if_t *_core_if,
+ dwc_otg_cil_callbacks_t *_cb,
+ void *_p);
+
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil_ifx.h b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil_ifx.h
new file mode 100644
index 0000000000..b0298ec2ea
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil_ifx.h
@@ -0,0 +1,58 @@
+/******************************************************************************
+**
+** FILE NAME : dwc_otg_cil_ifx.h
+** PROJECT : Twinpass/Danube
+** MODULES : DWC OTG USB
+**
+** DATE : 07 Sep. 2007
+** AUTHOR : Sung Winder
+** DESCRIPTION : Default param value.
+** COPYRIGHT : Copyright (c) 2007
+** Infineon Technologies AG
+** 2F, No.2, Li-Hsin Rd., Hsinchu Science Park,
+** Hsin-chu City, 300 Taiwan.
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** HISTORY
+** $Date $Author $Comment
+** 12 April 2007 Sung Winder Initiate Version
+*******************************************************************************/
+#if !defined(__DWC_OTG_CIL_IFX_H__)
+#define __DWC_OTG_CIL_IFX_H__
+
+/* ================ Default param value ================== */
+#define dwc_param_opt_default 1
+#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE
+#define dwc_param_dma_enable_default 1
+#define dwc_param_dma_burst_size_default 32
+#define dwc_param_speed_default DWC_SPEED_PARAM_HIGH
+#define dwc_param_host_support_fs_ls_low_power_default 0
+#define dwc_param_host_ls_low_power_phy_clk_default DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ
+#define dwc_param_enable_dynamic_fifo_default 1
+#define dwc_param_data_fifo_size_default 2048
+#define dwc_param_dev_rx_fifo_size_default 1024
+#define dwc_param_dev_nperio_tx_fifo_size_default 1024
+#define dwc_param_dev_perio_tx_fifo_size_default 768
+#define dwc_param_host_rx_fifo_size_default 640
+#define dwc_param_host_nperio_tx_fifo_size_default 640
+#define dwc_param_host_perio_tx_fifo_size_default 768
+#define dwc_param_max_transfer_size_default 65535
+#define dwc_param_max_packet_count_default 511
+#define dwc_param_host_channels_default 16
+#define dwc_param_dev_endpoints_default 6
+#define dwc_param_phy_type_default DWC_PHY_TYPE_PARAM_UTMI
+#define dwc_param_phy_utmi_width_default 16
+#define dwc_param_phy_ulpi_ddr_default 0
+#define dwc_param_phy_ulpi_ext_vbus_default DWC_PHY_ULPI_INTERNAL_VBUS
+#define dwc_param_i2c_enable_default 0
+#define dwc_param_ulpi_fs_ls_default 0
+#define dwc_param_ts_dline_default 0
+
+/* ======================================================= */
+
+#endif // __DWC_OTG_CIL_IFX_H__
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil_intr.c b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil_intr.c
new file mode 100644
index 0000000000..d469ab46b0
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_cil_intr.c
@@ -0,0 +1,708 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_cil_intr.c $
+ * $Revision: 1.1.1.1 $
+ * $Date: 2009-04-17 06:15:34 $
+ * $Change: 553126 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+/** @file
+ *
+ * The Core Interface Layer provides basic services for accessing and
+ * managing the DWC_otg hardware. These services are used by both the
+ * Host Controller Driver and the Peripheral Controller Driver.
+ *
+ * This file contains the Common Interrupt handlers.
+ */
+#include "dwc_otg_plat.h"
+#include "dwc_otg_regs.h"
+#include "dwc_otg_cil.h"
+
+#ifdef DEBUG
+inline const char *op_state_str( dwc_otg_core_if_t *_core_if )
+{
+ return (_core_if->op_state==A_HOST?"a_host":
+ (_core_if->op_state==A_SUSPEND?"a_suspend":
+ (_core_if->op_state==A_PERIPHERAL?"a_peripheral":
+ (_core_if->op_state==B_PERIPHERAL?"b_peripheral":
+ (_core_if->op_state==B_HOST?"b_host":
+ "unknown")))));
+}
+#endif
+
+/** This function will log a debug message
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+int32_t dwc_otg_handle_mode_mismatch_intr (dwc_otg_core_if_t *_core_if)
+{
+ gintsts_data_t gintsts;
+ DWC_WARN("Mode Mismatch Interrupt: currently in %s mode\n",
+ dwc_otg_mode(_core_if) ? "Host" : "Device");
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.modemismatch = 1;
+ dwc_write_reg32 (&_core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/** Start the HCD. Helper function for using the HCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void hcd_start( dwc_otg_core_if_t *_core_if )
+{
+ if (_core_if->hcd_cb && _core_if->hcd_cb->start) {
+ _core_if->hcd_cb->start( _core_if->hcd_cb->p );
+ }
+}
+/** Stop the HCD. Helper function for using the HCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void hcd_stop( dwc_otg_core_if_t *_core_if )
+{
+ if (_core_if->hcd_cb && _core_if->hcd_cb->stop) {
+ _core_if->hcd_cb->stop( _core_if->hcd_cb->p );
+ }
+}
+/** Disconnect the HCD. Helper function for using the HCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void hcd_disconnect( dwc_otg_core_if_t *_core_if )
+{
+ if (_core_if->hcd_cb && _core_if->hcd_cb->disconnect) {
+ _core_if->hcd_cb->disconnect( _core_if->hcd_cb->p );
+ }
+}
+/** Inform the HCD the a New Session has begun. Helper function for
+ * using the HCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void hcd_session_start( dwc_otg_core_if_t *_core_if )
+{
+ if (_core_if->hcd_cb && _core_if->hcd_cb->session_start) {
+ _core_if->hcd_cb->session_start( _core_if->hcd_cb->p );
+ }
+}
+
+/** Start the PCD. Helper function for using the PCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void pcd_start( dwc_otg_core_if_t *_core_if )
+{
+ if (_core_if->pcd_cb && _core_if->pcd_cb->start ) {
+ _core_if->pcd_cb->start( _core_if->pcd_cb->p );
+ }
+}
+/** Stop the PCD. Helper function for using the PCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void pcd_stop( dwc_otg_core_if_t *_core_if )
+{
+ if (_core_if->pcd_cb && _core_if->pcd_cb->stop ) {
+ _core_if->pcd_cb->stop( _core_if->pcd_cb->p );
+ }
+}
+/** Suspend the PCD. Helper function for using the PCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void pcd_suspend( dwc_otg_core_if_t *_core_if )
+{
+ if (_core_if->pcd_cb && _core_if->pcd_cb->suspend ) {
+ _core_if->pcd_cb->suspend( _core_if->pcd_cb->p );
+ }
+}
+/** Resume the PCD. Helper function for using the PCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void pcd_resume( dwc_otg_core_if_t *_core_if )
+{
+ if (_core_if->pcd_cb && _core_if->pcd_cb->resume_wakeup ) {
+ _core_if->pcd_cb->resume_wakeup( _core_if->pcd_cb->p );
+ }
+}
+
+/**
+ * This function handles the OTG Interrupts. It reads the OTG
+ * Interrupt Register (GOTGINT) to determine what interrupt has
+ * occurred.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+int32_t dwc_otg_handle_otg_intr(dwc_otg_core_if_t *_core_if)
+{
+ dwc_otg_core_global_regs_t *global_regs =
+ _core_if->core_global_regs;
+ gotgint_data_t gotgint;
+ gotgctl_data_t gotgctl;
+ gintmsk_data_t gintmsk;
+
+ gotgint.d32 = dwc_read_reg32( &global_regs->gotgint);
+ gotgctl.d32 = dwc_read_reg32( &global_regs->gotgctl);
+ DWC_DEBUGPL(DBG_CIL, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint.d32,
+ op_state_str(_core_if));
+ //DWC_DEBUGPL(DBG_CIL, "gotgctl=%08x\n", gotgctl.d32 );
+
+ if (gotgint.b.sesenddet) {
+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "Session End Detected++ (%s)\n",
+ op_state_str(_core_if));
+ gotgctl.d32 = dwc_read_reg32( &global_regs->gotgctl);
+
+ if (_core_if->op_state == B_HOST) {
+ pcd_start( _core_if );
+ _core_if->op_state = B_PERIPHERAL;
+ } else {
+ /* If not B_HOST and Device HNP still set. HNP
+ * Did not succeed!*/
+ if (gotgctl.b.devhnpen) {
+ DWC_DEBUGPL(DBG_ANY, "Session End Detected\n");
+ DWC_ERROR( "Device Not Connected/Responding!\n" );
+ }
+
+ /* If Session End Detected the B-Cable has
+ * been disconnected. */
+ /* Reset PCD and Gadget driver to a
+ * clean state. */
+ pcd_stop(_core_if);
+ }
+ gotgctl.d32 = 0;
+ gotgctl.b.devhnpen = 1;
+ dwc_modify_reg32( &global_regs->gotgctl,
+ gotgctl.d32, 0);
+ }
+ if (gotgint.b.sesreqsucstschng) {
+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "Session Reqeust Success Status Change++\n");
+ gotgctl.d32 = dwc_read_reg32( &global_regs->gotgctl);
+ if (gotgctl.b.sesreqscs) {
+ if ((_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
+ (_core_if->core_params->i2c_enable)) {
+ _core_if->srp_success = 1;
+ }
+ else {
+ pcd_resume( _core_if );
+ /* Clear Session Request */
+ gotgctl.d32 = 0;
+ gotgctl.b.sesreq = 1;
+ dwc_modify_reg32( &global_regs->gotgctl,
+ gotgctl.d32, 0);
+ }
+ }
+ }
+ if (gotgint.b.hstnegsucstschng) {
+ /* Print statements during the HNP interrupt handling
+ * can cause it to fail.*/
+ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl);
+ if (gotgctl.b.hstnegscs) {
+ if (dwc_otg_is_host_mode(_core_if) ) {
+ _core_if->op_state = B_HOST;
+ /*
+ * Need to disable SOF interrupt immediately.
+ * When switching from device to host, the PCD
+ * interrupt handler won't handle the
+ * interrupt if host mode is already set. The
+ * HCD interrupt handler won't get called if
+ * the HCD state is HALT. This means that the
+ * interrupt does not get handled and Linux
+ * complains loudly.
+ */
+ gintmsk.d32 = 0;
+ gintmsk.b.sofintr = 1;
+ dwc_modify_reg32(&global_regs->gintmsk,
+ gintmsk.d32, 0);
+ pcd_stop(_core_if);
+ /*
+ * Initialize the Core for Host mode.
+ */
+ hcd_start( _core_if );
+ _core_if->op_state = B_HOST;
+ }
+ } else {
+ gotgctl.d32 = 0;
+ gotgctl.b.hnpreq = 1;
+ gotgctl.b.devhnpen = 1;
+ dwc_modify_reg32( &global_regs->gotgctl,
+ gotgctl.d32, 0);
+ DWC_DEBUGPL( DBG_ANY, "HNP Failed\n");
+ DWC_ERROR( "Device Not Connected/Responding\n" );
+ }
+ }
+ if (gotgint.b.hstnegdet) {
+ /* The disconnect interrupt is set at the same time as
+ * Host Negotiation Detected. During the mode
+ * switch all interrupts are cleared so the disconnect
+ * interrupt handler will not get executed.
+ */
+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "Host Negotiation Detected++ (%s)\n",
+ (dwc_otg_is_host_mode(_core_if)?"Host":"Device"));
+ if (dwc_otg_is_device_mode(_core_if)){
+ DWC_DEBUGPL(DBG_ANY, "a_suspend->a_peripheral (%d)\n",_core_if->op_state);
+ hcd_disconnect( _core_if );
+ pcd_start( _core_if );
+ _core_if->op_state = A_PERIPHERAL;
+ } else {
+ /*
+ * Need to disable SOF interrupt immediately. When
+ * switching from device to host, the PCD interrupt
+ * handler won't handle the interrupt if host mode is
+ * already set. The HCD interrupt handler won't get
+ * called if the HCD state is HALT. This means that
+ * the interrupt does not get handled and Linux
+ * complains loudly.
+ */
+ gintmsk.d32 = 0;
+ gintmsk.b.sofintr = 1;
+ dwc_modify_reg32(&global_regs->gintmsk,
+ gintmsk.d32, 0);
+ pcd_stop( _core_if );
+ hcd_start( _core_if );
+ _core_if->op_state = A_HOST;
+ }
+ }
+ if (gotgint.b.adevtoutchng) {
+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "A-Device Timeout Change++\n");
+ }
+ if (gotgint.b.debdone) {
+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "Debounce Done++\n");
+ }
+
+ /* Clear GOTGINT */
+ dwc_write_reg32 (&_core_if->core_global_regs->gotgint, gotgint.d32);
+
+ return 1;
+}
+
+/**
+ * This function handles the Connector ID Status Change Interrupt. It
+ * reads the OTG Interrupt Register (GOTCTL) to determine whether this
+ * is a Device to Host Mode transition or a Host Mode to Device
+ * Transition.
+ *
+ * This only occurs when the cable is connected/removed from the PHY
+ * connector.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+int32_t dwc_otg_handle_conn_id_status_change_intr(dwc_otg_core_if_t *_core_if)
+{
+ uint32_t count = 0;
+
+ gintsts_data_t gintsts = { .d32 = 0 };
+ gintmsk_data_t gintmsk = { .d32 = 0 };
+ gotgctl_data_t gotgctl = { .d32 = 0 };
+
+ /*
+ * Need to disable SOF interrupt immediately. If switching from device
+ * to host, the PCD interrupt handler won't handle the interrupt if
+ * host mode is already set. The HCD interrupt handler won't get
+ * called if the HCD state is HALT. This means that the interrupt does
+ * not get handled and Linux complains loudly.
+ */
+ gintmsk.b.sofintr = 1;
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
+
+ DWC_DEBUGPL(DBG_CIL, " ++Connector ID Status Change Interrupt++ (%s)\n",
+ (dwc_otg_is_host_mode(_core_if)?"Host":"Device"));
+ gotgctl.d32 = dwc_read_reg32(&_core_if->core_global_regs->gotgctl);
+ DWC_DEBUGPL(DBG_CIL, "gotgctl=%0x\n", gotgctl.d32);
+ DWC_DEBUGPL(DBG_CIL, "gotgctl.b.conidsts=%d\n", gotgctl.b.conidsts);
+
+ /* B-Device connector (Device Mode) */
+ if (gotgctl.b.conidsts) {
+ /* Wait for switch to device mode. */
+ while (!dwc_otg_is_device_mode(_core_if) ){
+ DWC_PRINT("Waiting for Peripheral Mode, Mode=%s\n",
+ (dwc_otg_is_host_mode(_core_if)?"Host":"Peripheral"));
+ MDELAY(100);
+ if (++count > 10000) *(uint32_t*)NULL=0;
+ }
+ _core_if->op_state = B_PERIPHERAL;
+ dwc_otg_core_init(_core_if);
+ dwc_otg_enable_global_interrupts(_core_if);
+ pcd_start( _core_if );
+ } else {
+ /* A-Device connector (Host Mode) */
+ while (!dwc_otg_is_host_mode(_core_if) ) {
+ DWC_PRINT("Waiting for Host Mode, Mode=%s\n",
+ (dwc_otg_is_host_mode(_core_if)?"Host":"Peripheral"));
+ MDELAY(100);
+ if (++count > 10000) *(uint32_t*)NULL=0;
+ }
+ _core_if->op_state = A_HOST;
+ /*
+ * Initialize the Core for Host mode.
+ */
+ dwc_otg_core_init(_core_if);
+ dwc_otg_enable_global_interrupts(_core_if);
+ hcd_start( _core_if );
+ }
+
+ /* Set flag and clear interrupt */
+ gintsts.b.conidstschng = 1;
+ dwc_write_reg32 (&_core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * This interrupt indicates that a device is initiating the Session
+ * Request Protocol to request the host to turn on bus power so a new
+ * session can begin. The handler responds by turning on bus power. If
+ * the DWC_otg controller is in low power mode, the handler brings the
+ * controller out of low power mode before turning on bus power.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+int32_t dwc_otg_handle_session_req_intr( dwc_otg_core_if_t *_core_if )
+{
+#ifndef DWC_HOST_ONLY // winder
+ hprt0_data_t hprt0;
+#endif
+ gintsts_data_t gintsts;
+
+#ifndef DWC_HOST_ONLY
+ DWC_DEBUGPL(DBG_ANY, "++Session Request Interrupt++\n");
+
+ if (dwc_otg_is_device_mode(_core_if) ) {
+ DWC_PRINT("SRP: Device mode\n");
+ } else {
+ DWC_PRINT("SRP: Host mode\n");
+
+ /* Turn on the port power bit. */
+ hprt0.d32 = dwc_otg_read_hprt0( _core_if );
+ hprt0.b.prtpwr = 1;
+ dwc_write_reg32(_core_if->host_if->hprt0, hprt0.d32);
+
+ /* Start the Connection timer. So a message can be displayed
+ * if connect does not occur within 10 seconds. */
+ hcd_session_start( _core_if );
+ }
+#endif
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.sessreqintr = 1;
+ dwc_write_reg32 (&_core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * This interrupt indicates that the DWC_otg controller has detected a
+ * resume or remote wakeup sequence. If the DWC_otg controller is in
+ * low power mode, the handler must brings the controller out of low
+ * power mode. The controller automatically begins resume
+ * signaling. The handler schedules a time to stop resume signaling.
+ */
+int32_t dwc_otg_handle_wakeup_detected_intr( dwc_otg_core_if_t *_core_if )
+{
+ gintsts_data_t gintsts;
+
+ DWC_DEBUGPL(DBG_ANY, "++Resume and Remote Wakeup Detected Interrupt++\n");
+
+ if (dwc_otg_is_device_mode(_core_if) ) {
+ dctl_data_t dctl = {.d32=0};
+ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n",
+ dwc_read_reg32( &_core_if->dev_if->dev_global_regs->dsts));
+#ifdef PARTIAL_POWER_DOWN
+ if (_core_if->hwcfg4.b.power_optimiz) {
+ pcgcctl_data_t power = {.d32=0};
+
+ power.d32 = dwc_read_reg32( _core_if->pcgcctl );
+ DWC_DEBUGPL(DBG_CIL, "PCGCCTL=%0x\n", power.d32);
+
+ power.b.stoppclk = 0;
+ dwc_write_reg32( _core_if->pcgcctl, power.d32);
+
+ power.b.pwrclmp = 0;
+ dwc_write_reg32( _core_if->pcgcctl, power.d32);
+
+ power.b.rstpdwnmodule = 0;
+ dwc_write_reg32( _core_if->pcgcctl, power.d32);
+ }
+#endif
+ /* Clear the Remote Wakeup Signalling */
+ dctl.b.rmtwkupsig = 1;
+ dwc_modify_reg32( &_core_if->dev_if->dev_global_regs->dctl,
+ dctl.d32, 0 );
+
+ if (_core_if->pcd_cb && _core_if->pcd_cb->resume_wakeup) {
+ _core_if->pcd_cb->resume_wakeup( _core_if->pcd_cb->p );
+ }
+
+ } else {
+ /*
+ * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms
+ * so that OPT tests pass with all PHYs).
+ */
+ hprt0_data_t hprt0 = {.d32=0};
+ pcgcctl_data_t pcgcctl = {.d32=0};
+ /* Restart the Phy Clock */
+ pcgcctl.b.stoppclk = 1;
+ dwc_modify_reg32(_core_if->pcgcctl, pcgcctl.d32, 0);
+ UDELAY(10);
+
+ /* Now wait for 70 ms. */
+ hprt0.d32 = dwc_otg_read_hprt0( _core_if );
+ DWC_DEBUGPL(DBG_ANY,"Resume: HPRT0=%0x\n", hprt0.d32);
+ MDELAY(70);
+ hprt0.b.prtres = 0; /* Resume */
+ dwc_write_reg32(_core_if->host_if->hprt0, hprt0.d32);
+ DWC_DEBUGPL(DBG_ANY,"Clear Resume: HPRT0=%0x\n", dwc_read_reg32(_core_if->host_if->hprt0));
+ }
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.wkupintr = 1;
+ dwc_write_reg32 (&_core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+/**
+ * This interrupt indicates that a device has been disconnected from
+ * the root port.
+ */
+int32_t dwc_otg_handle_disconnect_intr( dwc_otg_core_if_t *_core_if)
+{
+ gintsts_data_t gintsts;
+
+ DWC_DEBUGPL(DBG_ANY, "++Disconnect Detected Interrupt++ (%s) %s\n",
+ (dwc_otg_is_host_mode(_core_if)?"Host":"Device"),
+ op_state_str(_core_if));
+
+/** @todo Consolidate this if statement. */
+#ifndef DWC_HOST_ONLY
+ if (_core_if->op_state == B_HOST) {
+ /* If in device mode Disconnect and stop the HCD, then
+ * start the PCD. */
+ hcd_disconnect( _core_if );
+ pcd_start( _core_if );
+ _core_if->op_state = B_PERIPHERAL;
+ } else if (dwc_otg_is_device_mode(_core_if)) {
+ gotgctl_data_t gotgctl = { .d32 = 0 };
+ gotgctl.d32 = dwc_read_reg32(&_core_if->core_global_regs->gotgctl);
+ if (gotgctl.b.hstsethnpen==1) {
+ /* Do nothing, if HNP in process the OTG
+ * interrupt "Host Negotiation Detected"
+ * interrupt will do the mode switch.
+ */
+ } else if (gotgctl.b.devhnpen == 0) {
+ /* If in device mode Disconnect and stop the HCD, then
+ * start the PCD. */
+ hcd_disconnect( _core_if );
+ pcd_start( _core_if );
+ _core_if->op_state = B_PERIPHERAL;
+ } else {
+ DWC_DEBUGPL(DBG_ANY,"!a_peripheral && !devhnpen\n");
+ }
+ } else {
+ if (_core_if->op_state == A_HOST) {
+ /* A-Cable still connected but device disconnected. */
+ hcd_disconnect( _core_if );
+ }
+ }
+#endif
+/* Without OTG, we should use the disconnect function!? winder added.*/
+#if 1 // NO OTG, so host only!!
+ hcd_disconnect( _core_if );
+#endif
+
+ gintsts.d32 = 0;
+ gintsts.b.disconnect = 1;
+ dwc_write_reg32 (&_core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+/**
+ * This interrupt indicates that SUSPEND state has been detected on
+ * the USB.
+ *
+ * For HNP the USB Suspend interrupt signals the change from
+ * "a_peripheral" to "a_host".
+ *
+ * When power management is enabled the core will be put in low power
+ * mode.
+ */
+int32_t dwc_otg_handle_usb_suspend_intr(dwc_otg_core_if_t *_core_if )
+{
+ dsts_data_t dsts;
+ gintsts_data_t gintsts;
+
+ //805141:<IFTW-fchang>.removed DWC_DEBUGPL(DBG_ANY,"USB SUSPEND\n");
+
+ if (dwc_otg_is_device_mode( _core_if ) ) {
+ /* Check the Device status register to determine if the Suspend
+ * state is active. */
+ dsts.d32 = dwc_read_reg32( &_core_if->dev_if->dev_global_regs->dsts);
+ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", dsts.d32);
+ DWC_DEBUGPL(DBG_PCD, "DSTS.Suspend Status=%d "
+ "HWCFG4.power Optimize=%d\n",
+ dsts.b.suspsts, _core_if->hwcfg4.b.power_optimiz);
+
+
+#ifdef PARTIAL_POWER_DOWN
+/** @todo Add a module parameter for power management. */
+
+ if (dsts.b.suspsts && _core_if->hwcfg4.b.power_optimiz) {
+ pcgcctl_data_t power = {.d32=0};
+ DWC_DEBUGPL(DBG_CIL, "suspend\n");
+
+ power.b.pwrclmp = 1;
+ dwc_write_reg32( _core_if->pcgcctl, power.d32);
+
+ power.b.rstpdwnmodule = 1;
+ dwc_modify_reg32( _core_if->pcgcctl, 0, power.d32);
+
+ power.b.stoppclk = 1;
+ dwc_modify_reg32( _core_if->pcgcctl, 0, power.d32);
+
+ } else {
+ DWC_DEBUGPL(DBG_ANY,"disconnect?\n");
+ }
+#endif
+ /* PCD callback for suspend. */
+ pcd_suspend(_core_if);
+ } else {
+ if (_core_if->op_state == A_PERIPHERAL) {
+ DWC_DEBUGPL(DBG_ANY,"a_peripheral->a_host\n");
+ /* Clear the a_peripheral flag, back to a_host. */
+ pcd_stop( _core_if );
+ hcd_start( _core_if );
+ _core_if->op_state = A_HOST;
+ }
+ }
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.usbsuspend = 1;
+ dwc_write_reg32( &_core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+
+/**
+ * This function returns the Core Interrupt register.
+ */
+static inline uint32_t dwc_otg_read_common_intr(dwc_otg_core_if_t *_core_if)
+{
+ gintsts_data_t gintsts;
+ gintmsk_data_t gintmsk;
+ gintmsk_data_t gintmsk_common = {.d32=0};
+ gintmsk_common.b.wkupintr = 1;
+ gintmsk_common.b.sessreqintr = 1;
+ gintmsk_common.b.conidstschng = 1;
+ gintmsk_common.b.otgintr = 1;
+ gintmsk_common.b.modemismatch = 1;
+ gintmsk_common.b.disconnect = 1;
+ gintmsk_common.b.usbsuspend = 1;
+ /** @todo: The port interrupt occurs while in device
+ * mode. Added code to CIL to clear the interrupt for now!
+ */
+ gintmsk_common.b.portintr = 1;
+
+ gintsts.d32 = dwc_read_reg32(&_core_if->core_global_regs->gintsts);
+ gintmsk.d32 = dwc_read_reg32(&_core_if->core_global_regs->gintmsk);
+#ifdef DEBUG
+ /* if any common interrupts set */
+ if (gintsts.d32 & gintmsk_common.d32) {
+ DWC_DEBUGPL(DBG_ANY, "gintsts=%08x gintmsk=%08x\n",
+ gintsts.d32, gintmsk.d32);
+ }
+#endif
+
+ return ((gintsts.d32 & gintmsk.d32 ) & gintmsk_common.d32);
+
+}
+
+/**
+ * Common interrupt handler.
+ *
+ * The common interrupts are those that occur in both Host and Device mode.
+ * This handler handles the following interrupts:
+ * - Mode Mismatch Interrupt
+ * - Disconnect Interrupt
+ * - OTG Interrupt
+ * - Connector ID Status Change Interrupt
+ * - Session Request Interrupt.
+ * - Resume / Remote Wakeup Detected Interrupt.
+ *
+ */
+extern int32_t dwc_otg_handle_common_intr( dwc_otg_core_if_t *_core_if )
+{
+ int retval = 0;
+ gintsts_data_t gintsts;
+
+ gintsts.d32 = dwc_otg_read_common_intr(_core_if);
+
+ if (gintsts.b.modemismatch) {
+ retval |= dwc_otg_handle_mode_mismatch_intr( _core_if );
+ }
+ if (gintsts.b.otgintr) {
+ retval |= dwc_otg_handle_otg_intr( _core_if );
+ }
+ if (gintsts.b.conidstschng) {
+ retval |= dwc_otg_handle_conn_id_status_change_intr( _core_if );
+ }
+ if (gintsts.b.disconnect) {
+ retval |= dwc_otg_handle_disconnect_intr( _core_if );
+ }
+ if (gintsts.b.sessreqintr) {
+ retval |= dwc_otg_handle_session_req_intr( _core_if );
+ }
+ if (gintsts.b.wkupintr) {
+ retval |= dwc_otg_handle_wakeup_detected_intr( _core_if );
+ }
+ if (gintsts.b.usbsuspend) {
+ retval |= dwc_otg_handle_usb_suspend_intr( _core_if );
+ }
+ if (gintsts.b.portintr && dwc_otg_is_device_mode(_core_if)) {
+ /* The port interrupt occurs while in device mode with HPRT0
+ * Port Enable/Disable.
+ */
+ gintsts.d32 = 0;
+ gintsts.b.portintr = 1;
+ dwc_write_reg32(&_core_if->core_global_regs->gintsts,
+ gintsts.d32);
+ retval |= 1;
+
+ }
+ return retval;
+}
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_driver.c b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_driver.c
new file mode 100644
index 0000000000..5c64ebbe0d
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_driver.c
@@ -0,0 +1,1277 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_driver.c $
+ * $Revision: 1.1.1.1 $
+ * $Date: 2009-04-17 06:15:34 $
+ * $Change: 631780 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+/** @file
+ * The dwc_otg_driver module provides the initialization and cleanup entry
+ * points for the DWC_otg driver. This module will be dynamically installed
+ * after Linux is booted using the insmod command. When the module is
+ * installed, the dwc_otg_init function is called. When the module is
+ * removed (using rmmod), the dwc_otg_cleanup function is called.
+ *
+ * This module also defines a data structure for the dwc_otg_driver, which is
+ * used in conjunction with the standard ARM lm_device structure. These
+ * structures allow the OTG driver to comply with the standard Linux driver
+ * model in which devices and drivers are registered with a bus driver. This
+ * has the benefit that Linux can expose attributes of the driver and device
+ * in its special sysfs file system. Users can then read or write files in
+ * this file system to perform diagnostics on the driver components or the
+ * device.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/gpio.h>
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/stat.h> /* permission constants */
+#include <linux/irq.h>
+#include <asm/io.h>
+
+#include "dwc_otg_plat.h"
+#include "dwc_otg_attr.h"
+#include "dwc_otg_driver.h"
+#include "dwc_otg_cil.h"
+#include "dwc_otg_cil_ifx.h"
+
+// #include "dwc_otg_pcd.h" // device
+#include "dwc_otg_hcd.h" // host
+
+#include "dwc_otg_ifx.h" // for Infineon platform specific.
+
+#define DWC_DRIVER_VERSION "2.60a 22-NOV-2006"
+#define DWC_DRIVER_DESC "HS OTG USB Controller driver"
+
+const char dwc_driver_name[] = "dwc_otg";
+
+static unsigned long dwc_iomem_base = IFX_USB_IOMEM_BASE;
+int dwc_irq = LTQ_USB_INT;
+//int dwc_irq = 54;
+//int dwc_irq = IFXMIPS_USB_OC_INT;
+
+extern int ifx_usb_hc_init(unsigned long base_addr, int irq);
+extern void ifx_usb_hc_remove(void);
+
+/*-------------------------------------------------------------------------*/
+/* Encapsulate the module parameter settings */
+
+static dwc_otg_core_params_t dwc_otg_module_params = {
+ .opt = -1,
+ .otg_cap = -1,
+ .dma_enable = -1,
+ .dma_burst_size = -1,
+ .speed = -1,
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .enable_dynamic_fifo = -1,
+ .data_fifo_size = -1,
+ .dev_rx_fifo_size = -1,
+ .dev_nperio_tx_fifo_size = -1,
+ .dev_perio_tx_fifo_size = /* dev_perio_tx_fifo_size_1 */ {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, /* 15 */
+ .host_rx_fifo_size = -1,
+ .host_nperio_tx_fifo_size = -1,
+ .host_perio_tx_fifo_size = -1,
+ .max_transfer_size = -1,
+ .max_packet_count = -1,
+ .host_channels = -1,
+ .dev_endpoints = -1,
+ .phy_type = -1,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .ts_dline = -1,
+ .en_multiple_tx_fifo = -1,
+ .dev_tx_fifo_size = { /* dev_tx_fifo_size */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
+ }, /* 15 */
+ .thr_ctl = -1,
+ .tx_thr_length = -1,
+ .rx_thr_length = -1,
+};
+
+/**
+ * This function shows the Driver Version.
+ */
+static ssize_t version_show(struct device_driver *dev, char *buf)
+{
+ return snprintf(buf, sizeof(DWC_DRIVER_VERSION)+2,"%s\n",
+ DWC_DRIVER_VERSION);
+}
+static DRIVER_ATTR(version, S_IRUGO, version_show, NULL);
+
+/**
+ * Global Debug Level Mask.
+ */
+uint32_t g_dbg_lvl = 0xff; /* OFF */
+
+/**
+ * This function shows the driver Debug Level.
+ */
+static ssize_t dbg_level_show(struct device_driver *_drv, char *_buf)
+{
+ return sprintf(_buf, "0x%0x\n", g_dbg_lvl);
+}
+/**
+ * This function stores the driver Debug Level.
+ */
+static ssize_t dbg_level_store(struct device_driver *_drv, const char *_buf,
+ size_t _count)
+{
+ g_dbg_lvl = simple_strtoul(_buf, NULL, 16);
+ return _count;
+}
+static DRIVER_ATTR(debuglevel, S_IRUGO|S_IWUSR, dbg_level_show, dbg_level_store);
+
+/**
+ * This function is called during module intialization to verify that
+ * the module parameters are in a valid state.
+ */
+static int check_parameters(dwc_otg_core_if_t *core_if)
+{
+ int i;
+ int retval = 0;
+
+/* Checks if the parameter is outside of its valid range of values */
+#define DWC_OTG_PARAM_TEST(_param_,_low_,_high_) \
+ ((dwc_otg_module_params._param_ < (_low_)) || \
+ (dwc_otg_module_params._param_ > (_high_)))
+
+/* If the parameter has been set by the user, check that the parameter value is
+ * within the value range of values. If not, report a module error. */
+#define DWC_OTG_PARAM_ERR(_param_,_low_,_high_,_string_) \
+ do { \
+ if (dwc_otg_module_params._param_ != -1) { \
+ if (DWC_OTG_PARAM_TEST(_param_,(_low_),(_high_))) { \
+ DWC_ERROR("`%d' invalid for parameter `%s'\n", \
+ dwc_otg_module_params._param_, _string_); \
+ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \
+ retval ++; \
+ } \
+ } \
+ } while (0)
+
+ DWC_OTG_PARAM_ERR(opt,0,1,"opt");
+ DWC_OTG_PARAM_ERR(otg_cap,0,2,"otg_cap");
+ DWC_OTG_PARAM_ERR(dma_enable,0,1,"dma_enable");
+ DWC_OTG_PARAM_ERR(speed,0,1,"speed");
+ DWC_OTG_PARAM_ERR(host_support_fs_ls_low_power,0,1,"host_support_fs_ls_low_power");
+ DWC_OTG_PARAM_ERR(host_ls_low_power_phy_clk,0,1,"host_ls_low_power_phy_clk");
+ DWC_OTG_PARAM_ERR(enable_dynamic_fifo,0,1,"enable_dynamic_fifo");
+ DWC_OTG_PARAM_ERR(data_fifo_size,32,32768,"data_fifo_size");
+ DWC_OTG_PARAM_ERR(dev_rx_fifo_size,16,32768,"dev_rx_fifo_size");
+ DWC_OTG_PARAM_ERR(dev_nperio_tx_fifo_size,16,32768,"dev_nperio_tx_fifo_size");
+ DWC_OTG_PARAM_ERR(host_rx_fifo_size,16,32768,"host_rx_fifo_size");
+ DWC_OTG_PARAM_ERR(host_nperio_tx_fifo_size,16,32768,"host_nperio_tx_fifo_size");
+ DWC_OTG_PARAM_ERR(host_perio_tx_fifo_size,16,32768,"host_perio_tx_fifo_size");
+ DWC_OTG_PARAM_ERR(max_transfer_size,2047,524288,"max_transfer_size");
+ DWC_OTG_PARAM_ERR(max_packet_count,15,511,"max_packet_count");
+ DWC_OTG_PARAM_ERR(host_channels,1,16,"host_channels");
+ DWC_OTG_PARAM_ERR(dev_endpoints,1,15,"dev_endpoints");
+ DWC_OTG_PARAM_ERR(phy_type,0,2,"phy_type");
+ DWC_OTG_PARAM_ERR(phy_ulpi_ddr,0,1,"phy_ulpi_ddr");
+ DWC_OTG_PARAM_ERR(phy_ulpi_ext_vbus,0,1,"phy_ulpi_ext_vbus");
+ DWC_OTG_PARAM_ERR(i2c_enable,0,1,"i2c_enable");
+ DWC_OTG_PARAM_ERR(ulpi_fs_ls,0,1,"ulpi_fs_ls");
+ DWC_OTG_PARAM_ERR(ts_dline,0,1,"ts_dline");
+
+ if (dwc_otg_module_params.dma_burst_size != -1) {
+ if (DWC_OTG_PARAM_TEST(dma_burst_size,1,1) &&
+ DWC_OTG_PARAM_TEST(dma_burst_size,4,4) &&
+ DWC_OTG_PARAM_TEST(dma_burst_size,8,8) &&
+ DWC_OTG_PARAM_TEST(dma_burst_size,16,16) &&
+ DWC_OTG_PARAM_TEST(dma_burst_size,32,32) &&
+ DWC_OTG_PARAM_TEST(dma_burst_size,64,64) &&
+ DWC_OTG_PARAM_TEST(dma_burst_size,128,128) &&
+ DWC_OTG_PARAM_TEST(dma_burst_size,256,256))
+ {
+ DWC_ERROR("`%d' invalid for parameter `dma_burst_size'\n",
+ dwc_otg_module_params.dma_burst_size);
+ dwc_otg_module_params.dma_burst_size = 32;
+ retval ++;
+ }
+ }
+
+ if (dwc_otg_module_params.phy_utmi_width != -1) {
+ if (DWC_OTG_PARAM_TEST(phy_utmi_width,8,8) &&
+ DWC_OTG_PARAM_TEST(phy_utmi_width,16,16))
+ {
+ DWC_ERROR("`%d' invalid for parameter `phy_utmi_width'\n",
+ dwc_otg_module_params.phy_utmi_width);
+ //dwc_otg_module_params.phy_utmi_width = 16;
+ dwc_otg_module_params.phy_utmi_width = 8;
+ retval ++;
+ }
+ }
+
+ for (i=0; i<15; i++) {
+ /** @todo should be like above */
+ //DWC_OTG_PARAM_ERR(dev_perio_tx_fifo_size[i],4,768,"dev_perio_tx_fifo_size");
+ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] != -1) {
+ if (DWC_OTG_PARAM_TEST(dev_perio_tx_fifo_size[i],4,768)) {
+ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n",
+ dwc_otg_module_params.dev_perio_tx_fifo_size[i], "dev_perio_tx_fifo_size", i);
+ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default;
+ retval ++;
+ }
+ }
+ }
+
+ DWC_OTG_PARAM_ERR(en_multiple_tx_fifo, 0, 1, "en_multiple_tx_fifo");
+ for (i = 0; i < 15; i++) {
+ /** @todo should be like above */
+ //DWC_OTG_PARAM_ERR(dev_tx_fifo_size[i],4,768,"dev_tx_fifo_size");
+ if (dwc_otg_module_params.dev_tx_fifo_size[i] != -1) {
+ if (DWC_OTG_PARAM_TEST(dev_tx_fifo_size[i], 4, 768)) {
+ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n",
+ dwc_otg_module_params.dev_tx_fifo_size[i],
+ "dev_tx_fifo_size", i);
+ dwc_otg_module_params.dev_tx_fifo_size[i] =
+ dwc_param_dev_tx_fifo_size_default;
+ retval++;
+ }
+ }
+ }
+ DWC_OTG_PARAM_ERR(thr_ctl, 0, 7, "thr_ctl");
+ DWC_OTG_PARAM_ERR(tx_thr_length, 8, 128, "tx_thr_length");
+ DWC_OTG_PARAM_ERR(rx_thr_length, 8, 128, "rx_thr_length");
+
+ /* At this point, all module parameters that have been set by the user
+ * are valid, and those that have not are left unset. Now set their
+ * default values and/or check the parameters against the hardware
+ * configurations of the OTG core. */
+
+
+
+/* This sets the parameter to the default value if it has not been set by the
+ * user */
+#define DWC_OTG_PARAM_SET_DEFAULT(_param_) \
+ ({ \
+ int changed = 1; \
+ if (dwc_otg_module_params._param_ == -1) { \
+ changed = 0; \
+ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \
+ } \
+ changed; \
+ })
+
+/* This checks the macro agains the hardware configuration to see if it is
+ * valid. It is possible that the default value could be invalid. In this
+ * case, it will report a module error if the user touched the parameter.
+ * Otherwise it will adjust the value without any error. */
+#define DWC_OTG_PARAM_CHECK_VALID(_param_,_str_,_is_valid_,_set_valid_) \
+ ({ \
+ int changed = DWC_OTG_PARAM_SET_DEFAULT(_param_); \
+ int error = 0; \
+ if (!(_is_valid_)) { \
+ if (changed) { \
+ DWC_ERROR("`%d' invalid for parameter `%s'. Check HW configuration.\n", dwc_otg_module_params._param_,_str_); \
+ error = 1; \
+ } \
+ dwc_otg_module_params._param_ = (_set_valid_); \
+ } \
+ error; \
+ })
+
+ /* OTG Cap */
+ retval += DWC_OTG_PARAM_CHECK_VALID(otg_cap,"otg_cap",
+ ({
+ int valid;
+ valid = 1;
+ switch (dwc_otg_module_params.otg_cap) {
+ case DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE:
+ if (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) valid = 0;
+ break;
+ case DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE:
+ if ((core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) &&
+ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) &&
+ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) &&
+ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST))
+ {
+ valid = 0;
+ }
+ break;
+ case DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE:
+ /* always valid */
+ break;
+ }
+ valid;
+ }),
+ (((core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) ||
+ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) ||
+ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) ||
+ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) ?
+ DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE :
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE));
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(dma_enable,"dma_enable",
+ ((dwc_otg_module_params.dma_enable == 1) && (core_if->hwcfg2.b.architecture == 0)) ? 0 : 1,
+ 0);
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(opt,"opt",
+ 1,
+ 0);
+
+ DWC_OTG_PARAM_SET_DEFAULT(dma_burst_size);
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(host_support_fs_ls_low_power,
+ "host_support_fs_ls_low_power",
+ 1, 0);
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(enable_dynamic_fifo,
+ "enable_dynamic_fifo",
+ ((dwc_otg_module_params.enable_dynamic_fifo == 0) ||
+ (core_if->hwcfg2.b.dynamic_fifo == 1)), 0);
+
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(data_fifo_size,
+ "data_fifo_size",
+ (dwc_otg_module_params.data_fifo_size <= core_if->hwcfg3.b.dfifo_depth),
+ core_if->hwcfg3.b.dfifo_depth);
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(dev_rx_fifo_size,
+ "dev_rx_fifo_size",
+ (dwc_otg_module_params.dev_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)),
+ dwc_read_reg32(&core_if->core_global_regs->grxfsiz));
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(dev_nperio_tx_fifo_size,
+ "dev_nperio_tx_fifo_size",
+ (dwc_otg_module_params.dev_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)),
+ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16));
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(host_rx_fifo_size,
+ "host_rx_fifo_size",
+ (dwc_otg_module_params.host_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)),
+ dwc_read_reg32(&core_if->core_global_regs->grxfsiz));
+
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(host_nperio_tx_fifo_size,
+ "host_nperio_tx_fifo_size",
+ (dwc_otg_module_params.host_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)),
+ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16));
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(host_perio_tx_fifo_size,
+ "host_perio_tx_fifo_size",
+ (dwc_otg_module_params.host_perio_tx_fifo_size <= ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))),
+ ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16)));
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(max_transfer_size,
+ "max_transfer_size",
+ (dwc_otg_module_params.max_transfer_size < (1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11))),
+ ((1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11)) - 1));
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(max_packet_count,
+ "max_packet_count",
+ (dwc_otg_module_params.max_packet_count < (1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4))),
+ ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4)) - 1));
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(host_channels,
+ "host_channels",
+ (dwc_otg_module_params.host_channels <= (core_if->hwcfg2.b.num_host_chan + 1)),
+ (core_if->hwcfg2.b.num_host_chan + 1));
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(dev_endpoints,
+ "dev_endpoints",
+ (dwc_otg_module_params.dev_endpoints <= (core_if->hwcfg2.b.num_dev_ep)),
+ core_if->hwcfg2.b.num_dev_ep);
+
+/*
+ * Define the following to disable the FS PHY Hardware checking. This is for
+ * internal testing only.
+ *
+ * #define NO_FS_PHY_HW_CHECKS
+ */
+
+#ifdef NO_FS_PHY_HW_CHECKS
+ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type,
+ "phy_type", 1, 0);
+#else
+ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type,
+ "phy_type",
+ ({
+ int valid = 0;
+ if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_UTMI) &&
+ ((core_if->hwcfg2.b.hs_phy_type == 1) ||
+ (core_if->hwcfg2.b.hs_phy_type == 3)))
+ {
+ valid = 1;
+ }
+ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_ULPI) &&
+ ((core_if->hwcfg2.b.hs_phy_type == 2) ||
+ (core_if->hwcfg2.b.hs_phy_type == 3)))
+ {
+ valid = 1;
+ }
+ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) &&
+ (core_if->hwcfg2.b.fs_phy_type == 1))
+ {
+ valid = 1;
+ }
+ valid;
+ }),
+ ({
+ int set = DWC_PHY_TYPE_PARAM_FS;
+ if (core_if->hwcfg2.b.hs_phy_type) {
+ if ((core_if->hwcfg2.b.hs_phy_type == 3) ||
+ (core_if->hwcfg2.b.hs_phy_type == 1)) {
+ set = DWC_PHY_TYPE_PARAM_UTMI;
+ }
+ else {
+ set = DWC_PHY_TYPE_PARAM_ULPI;
+ }
+ }
+ set;
+ }));
+#endif
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(speed,"speed",
+ (dwc_otg_module_params.speed == 0) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1,
+ dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS ? 1 : 0);
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(host_ls_low_power_phy_clk,
+ "host_ls_low_power_phy_clk",
+ ((dwc_otg_module_params.host_ls_low_power_phy_clk == DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1),
+ ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ : DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ));
+
+ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ddr);
+ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ext_vbus);
+ DWC_OTG_PARAM_SET_DEFAULT(phy_utmi_width);
+ DWC_OTG_PARAM_SET_DEFAULT(ulpi_fs_ls);
+ DWC_OTG_PARAM_SET_DEFAULT(ts_dline);
+
+#ifdef NO_FS_PHY_HW_CHECKS
+ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable,
+ "i2c_enable", 1, 0);
+#else
+ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable,
+ "i2c_enable",
+ (dwc_otg_module_params.i2c_enable == 1) && (core_if->hwcfg3.b.i2c == 0) ? 0 : 1,
+ 0);
+#endif
+
+ for (i=0; i<16; i++) {
+
+ int changed = 1;
+ int error = 0;
+
+ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] == -1) {
+ changed = 0;
+ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default;
+ }
+ if (!(dwc_otg_module_params.dev_perio_tx_fifo_size[i] <= (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) {
+ if (changed) {
+ DWC_ERROR("`%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n", dwc_otg_module_params.dev_perio_tx_fifo_size[i],i);
+ error = 1;
+ }
+ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]);
+ }
+ retval += error;
+ }
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(en_multiple_tx_fifo,
+ "en_multiple_tx_fifo",
+ ((dwc_otg_module_params.en_multiple_tx_fifo == 1) &&
+ (core_if->hwcfg4.b.ded_fifo_en == 0)) ? 0 : 1, 0);
+
+ for (i = 0; i < 16; i++) {
+ int changed = 1;
+ int error = 0;
+ if (dwc_otg_module_params.dev_tx_fifo_size[i] == -1) {
+ changed = 0;
+ dwc_otg_module_params.dev_tx_fifo_size[i] =
+ dwc_param_dev_tx_fifo_size_default;
+ }
+ if (!(dwc_otg_module_params.dev_tx_fifo_size[i] <=
+ (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) {
+ if (changed) {
+ DWC_ERROR("%d' invalid for parameter `dev_perio_fifo_size_%d'."
+ "Check HW configuration.\n",dwc_otg_module_params.dev_tx_fifo_size[i],i);
+ error = 1;
+ }
+ dwc_otg_module_params.dev_tx_fifo_size[i] =
+ dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]);
+ }
+ retval += error;
+ }
+ DWC_OTG_PARAM_SET_DEFAULT(thr_ctl);
+ DWC_OTG_PARAM_SET_DEFAULT(tx_thr_length);
+ DWC_OTG_PARAM_SET_DEFAULT(rx_thr_length);
+ return retval;
+} // check_parameters
+
+
+/**
+ * This function is the top level interrupt handler for the Common
+ * (Device and host modes) interrupts.
+ */
+static irqreturn_t dwc_otg_common_irq(int _irq, void *_dev)
+{
+ dwc_otg_device_t *otg_dev = _dev;
+ int32_t retval = IRQ_NONE;
+
+ retval = dwc_otg_handle_common_intr( otg_dev->core_if );
+
+ mask_and_ack_ifx_irq (_irq);
+
+ return IRQ_RETVAL(retval);
+}
+
+
+/**
+ * This function is called when a DWC_OTG device is unregistered with the
+ * dwc_otg_driver. This happens, for example, when the rmmod command is
+ * executed. The device may or may not be electrically present. If it is
+ * present, the driver stops device processing. Any resources used on behalf
+ * of this device are freed.
+ *
+ * @return
+ */
+static int
+dwc_otg_driver_remove(struct platform_device *_dev)
+{
+ //dwc_otg_device_t *otg_dev = dev_get_drvdata(&_dev->dev);
+ dwc_otg_device_t *otg_dev = platform_get_drvdata(_dev);
+
+ DWC_DEBUGPL(DBG_ANY, "%s(%p)\n", __func__, _dev);
+
+ if (otg_dev == NULL) {
+ /* Memory allocation for the dwc_otg_device failed. */
+ return 0;
+ }
+
+ /*
+ * Free the IRQ
+ */
+ if (otg_dev->common_irq_installed) {
+ free_irq( otg_dev->irq, otg_dev );
+ }
+
+#ifndef DWC_DEVICE_ONLY
+ if (otg_dev->hcd != NULL) {
+ dwc_otg_hcd_remove(&_dev->dev);
+ }
+#endif
+ printk("after removehcd\n");
+
+// Note: Integrate HOST and DEVICE(Gadget) is not planned yet.
+#ifndef DWC_HOST_ONLY
+ if (otg_dev->pcd != NULL) {
+ dwc_otg_pcd_remove(otg_dev);
+ }
+#endif
+ if (otg_dev->core_if != NULL) {
+ dwc_otg_cil_remove( otg_dev->core_if );
+ }
+ printk("after removecil\n");
+
+ /*
+ * Remove the device attributes
+ */
+ dwc_otg_attr_remove(&_dev->dev);
+ printk("after removeattr\n");
+
+ /*
+ * Return the memory.
+ */
+ if (otg_dev->base != NULL) {
+ iounmap(otg_dev->base);
+ }
+ if (otg_dev->phys_addr != 0) {
+ release_mem_region(otg_dev->phys_addr, otg_dev->base_len);
+ }
+ kfree(otg_dev);
+
+ /*
+ * Clear the drvdata pointer.
+ */
+ //dev_set_drvdata(&_dev->dev, 0);
+ platform_set_drvdata(_dev, 0);
+ return 0;
+}
+
+/**
+ * This function is called when an DWC_OTG device is bound to a
+ * dwc_otg_driver. It creates the driver components required to
+ * control the device (CIL, HCD, and PCD) and it initializes the
+ * device. The driver components are stored in a dwc_otg_device
+ * structure. A reference to the dwc_otg_device is saved in the
+ * lm_device. This allows the driver to access the dwc_otg_device
+ * structure on subsequent calls to driver methods for this device.
+ *
+ * @return
+ */
+static int __devinit
+dwc_otg_driver_probe(struct platform_device *_dev)
+{
+ int retval = 0;
+ dwc_otg_device_t *dwc_otg_device;
+ int pin = (int)_dev->dev.platform_data;
+ int32_t snpsid;
+ struct resource *res;
+ gusbcfg_data_t usbcfg = {.d32 = 0};
+
+ // GPIOs
+ if(pin >= 0)
+ {
+ gpio_request(pin, "usb_power");
+ gpio_direction_output(pin, 1);
+ gpio_set_value(pin, 1);
+ gpio_export(pin, 0);
+ }
+ dev_dbg(&_dev->dev, "dwc_otg_driver_probe (%p)\n", _dev);
+
+ dwc_otg_device = kmalloc(sizeof(dwc_otg_device_t), GFP_KERNEL);
+ if (dwc_otg_device == 0) {
+ dev_err(&_dev->dev, "kmalloc of dwc_otg_device failed\n");
+ retval = -ENOMEM;
+ goto fail;
+ }
+ memset(dwc_otg_device, 0, sizeof(*dwc_otg_device));
+ dwc_otg_device->reg_offset = 0xFFFFFFFF;
+
+ /*
+ * Retrieve the memory and IRQ resources.
+ */
+ dwc_otg_device->irq = platform_get_irq(_dev, 0);
+ if (dwc_otg_device->irq == 0) {
+ dev_err(&_dev->dev, "no device irq\n");
+ retval = -ENODEV;
+ goto fail;
+ }
+ dev_dbg(&_dev->dev, "OTG - device irq: %d\n", dwc_otg_device->irq);
+ res = platform_get_resource(_dev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&_dev->dev, "no CSR address\n");
+ retval = -ENODEV;
+ goto fail;
+ }
+ dev_dbg(&_dev->dev, "OTG - ioresource_mem start0x%08x: end:0x%08x\n",
+ (unsigned)res->start, (unsigned)res->end);
+ dwc_otg_device->phys_addr = res->start;
+ dwc_otg_device->base_len = res->end - res->start + 1;
+ if (request_mem_region(dwc_otg_device->phys_addr, dwc_otg_device->base_len,
+ dwc_driver_name) == NULL) {
+ dev_err(&_dev->dev, "request_mem_region failed\n");
+ retval = -EBUSY;
+ goto fail;
+ }
+
+ /*
+ * Map the DWC_otg Core memory into virtual address space.
+ */
+ dwc_otg_device->base = ioremap_nocache(dwc_otg_device->phys_addr, dwc_otg_device->base_len);
+ if (dwc_otg_device->base == NULL) {
+ dev_err(&_dev->dev, "ioremap() failed\n");
+ retval = -ENOMEM;
+ goto fail;
+ }
+ dev_dbg(&_dev->dev, "mapped base=0x%08x\n", (unsigned)dwc_otg_device->base);
+
+ /*
+ * Attempt to ensure this device is really a DWC_otg Controller.
+ * Read and verify the SNPSID register contents. The value should be
+ * 0x45F42XXX, which corresponds to "OT2", as in "OTG version 2.XX".
+ */
+ snpsid = dwc_read_reg32((uint32_t *)((uint8_t *)dwc_otg_device->base + 0x40));
+ if ((snpsid & 0xFFFFF000) != 0x4F542000) {
+ dev_err(&_dev->dev, "Bad value for SNPSID: 0x%08x\n", snpsid);
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ /*
+ * Initialize driver data to point to the global DWC_otg
+ * Device structure.
+ */
+ platform_set_drvdata(_dev, dwc_otg_device);
+ dev_dbg(&_dev->dev, "dwc_otg_device=0x%p\n", dwc_otg_device);
+ dwc_otg_device->core_if = dwc_otg_cil_init( dwc_otg_device->base, &dwc_otg_module_params);
+ if (dwc_otg_device->core_if == 0) {
+ dev_err(&_dev->dev, "CIL initialization failed!\n");
+ retval = -ENOMEM;
+ goto fail;
+ }
+
+ /*
+ * Validate parameter values.
+ */
+ if (check_parameters(dwc_otg_device->core_if) != 0) {
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ /* Added for PLB DMA phys virt mapping */
+ //dwc_otg_device->core_if->phys_addr = dwc_otg_device->phys_addr;
+ /*
+ * Create Device Attributes in sysfs
+ */
+ dwc_otg_attr_create (&_dev->dev);
+
+ /*
+ * Disable the global interrupt until all the interrupt
+ * handlers are installed.
+ */
+ dwc_otg_disable_global_interrupts( dwc_otg_device->core_if );
+ /*
+ * Install the interrupt handler for the common interrupts before
+ * enabling common interrupts in core_init below.
+ */
+ DWC_DEBUGPL( DBG_CIL, "registering (common) handler for irq%d\n", dwc_otg_device->irq);
+
+ retval = request_irq((unsigned int)dwc_otg_device->irq, dwc_otg_common_irq,
+ //SA_INTERRUPT|SA_SHIRQ, "dwc_otg", (void *)dwc_otg_device );
+ IRQF_SHARED, "dwc_otg", (void *)dwc_otg_device );
+ //IRQF_DISABLED, "dwc_otg", (void *)dwc_otg_device );
+ if (retval != 0) {
+ DWC_ERROR("request of irq%d failed retval: %d\n", dwc_otg_device->irq, retval);
+ retval = -EBUSY;
+ goto fail;
+ } else {
+ dwc_otg_device->common_irq_installed = 1;
+ }
+
+ /*
+ * Initialize the DWC_otg core.
+ */
+ dwc_otg_core_init( dwc_otg_device->core_if );
+
+
+#ifndef DWC_HOST_ONLY // otg device mode. (gadget.)
+ /*
+ * Initialize the PCD
+ */
+ retval = dwc_otg_pcd_init(dwc_otg_device);
+ if (retval != 0) {
+ DWC_ERROR("dwc_otg_pcd_init failed\n");
+ dwc_otg_device->pcd = NULL;
+ goto fail;
+ }
+#endif // DWC_HOST_ONLY
+
+#ifndef DWC_DEVICE_ONLY // otg host mode. (HCD)
+ /*
+ * Initialize the HCD
+ */
+#if 1 /*fscz*/
+ /* force_host_mode */
+ usbcfg.d32 = dwc_read_reg32(&dwc_otg_device->core_if->core_global_regs ->gusbcfg);
+ usbcfg.b.force_host_mode = 1;
+ dwc_write_reg32(&dwc_otg_device->core_if->core_global_regs ->gusbcfg, usbcfg.d32);
+#endif
+ retval = dwc_otg_hcd_init(&_dev->dev, dwc_otg_device);
+ if (retval != 0) {
+ DWC_ERROR("dwc_otg_hcd_init failed\n");
+ dwc_otg_device->hcd = NULL;
+ goto fail;
+ }
+#endif // DWC_DEVICE_ONLY
+
+ /*
+ * Enable the global interrupt after all the interrupt
+ * handlers are installed.
+ */
+ dwc_otg_enable_global_interrupts( dwc_otg_device->core_if );
+#if 0 /*fscz*/
+ usbcfg.d32 = dwc_read_reg32(&dwc_otg_device->core_if->core_global_regs ->gusbcfg);
+ usbcfg.b.force_host_mode = 0;
+ dwc_write_reg32(&dwc_otg_device->core_if->core_global_regs ->gusbcfg, usbcfg.d32);
+#endif
+
+
+ return 0;
+
+fail:
+ dwc_otg_driver_remove(_dev);
+ return retval;
+}
+
+/**
+ * This structure defines the methods to be called by a bus driver
+ * during the lifecycle of a device on that bus. Both drivers and
+ * devices are registered with a bus driver. The bus driver matches
+ * devices to drivers based on information in the device and driver
+ * structures.
+ *
+ * The probe function is called when the bus driver matches a device
+ * to this driver. The remove function is called when a device is
+ * unregistered with the bus driver.
+ */
+struct platform_driver dwc_otg_driver = {
+ .probe = dwc_otg_driver_probe,
+ .remove = dwc_otg_driver_remove,
+// .suspend = dwc_otg_driver_suspend,
+// .resume = dwc_otg_driver_resume,
+ .driver = {
+ .name = dwc_driver_name,
+ .owner = THIS_MODULE,
+ },
+};
+EXPORT_SYMBOL(dwc_otg_driver);
+
+/**
+ * This function is called when the dwc_otg_driver is installed with the
+ * insmod command. It registers the dwc_otg_driver structure with the
+ * appropriate bus driver. This will cause the dwc_otg_driver_probe function
+ * to be called. In addition, the bus driver will automatically expose
+ * attributes defined for the device and driver in the special sysfs file
+ * system.
+ *
+ * @return
+ */
+static int __init dwc_otg_init(void)
+{
+ int retval = 0;
+
+ printk(KERN_INFO "%s: version %s\n", dwc_driver_name, DWC_DRIVER_VERSION);
+
+ if (ltq_is_ase())
+ dwc_irq = LTQ_USB_ASE_INT;
+
+ // ifxmips setup
+ retval = ifx_usb_hc_init(dwc_iomem_base, dwc_irq);
+ if (retval < 0)
+ {
+ printk(KERN_ERR "%s retval=%d\n", __func__, retval);
+ return retval;
+ }
+ dwc_otg_power_on(); // ifx only!!
+
+
+ retval = platform_driver_register(&dwc_otg_driver);
+
+ if (retval < 0) {
+ printk(KERN_ERR "%s retval=%d\n", __func__, retval);
+ goto error1;
+ }
+
+ retval = driver_create_file(&dwc_otg_driver.driver, &driver_attr_version);
+ if (retval < 0)
+ {
+ printk(KERN_ERR "%s retval=%d\n", __func__, retval);
+ goto error2;
+ }
+ retval = driver_create_file(&dwc_otg_driver.driver, &driver_attr_debuglevel);
+ if (retval < 0)
+ {
+ printk(KERN_ERR "%s retval=%d\n", __func__, retval);
+ goto error3;
+ }
+ return retval;
+
+
+error3:
+ driver_remove_file(&dwc_otg_driver.driver, &driver_attr_version);
+error2:
+ driver_unregister(&dwc_otg_driver.driver);
+error1:
+ ifx_usb_hc_remove();
+ return retval;
+}
+module_init(dwc_otg_init);
+
+/**
+ * This function is called when the driver is removed from the kernel
+ * with the rmmod command. The driver unregisters itself with its bus
+ * driver.
+ *
+ */
+static void __exit dwc_otg_cleanup(void)
+{
+ printk(KERN_DEBUG "dwc_otg_cleanup()\n");
+
+ driver_remove_file(&dwc_otg_driver.driver, &driver_attr_debuglevel);
+ driver_remove_file(&dwc_otg_driver.driver, &driver_attr_version);
+
+ platform_driver_unregister(&dwc_otg_driver);
+ ifx_usb_hc_remove();
+
+ printk(KERN_INFO "%s module removed\n", dwc_driver_name);
+}
+module_exit(dwc_otg_cleanup);
+
+MODULE_DESCRIPTION(DWC_DRIVER_DESC);
+MODULE_AUTHOR("Synopsys Inc.");
+MODULE_LICENSE("GPL");
+
+module_param_named(otg_cap, dwc_otg_module_params.otg_cap, int, 0444);
+MODULE_PARM_DESC(otg_cap, "OTG Capabilities 0=HNP&SRP 1=SRP Only 2=None");
+module_param_named(opt, dwc_otg_module_params.opt, int, 0444);
+MODULE_PARM_DESC(opt, "OPT Mode");
+module_param_named(dma_enable, dwc_otg_module_params.dma_enable, int, 0444);
+MODULE_PARM_DESC(dma_enable, "DMA Mode 0=Slave 1=DMA enabled");
+module_param_named(dma_burst_size, dwc_otg_module_params.dma_burst_size, int, 0444);
+MODULE_PARM_DESC(dma_burst_size, "DMA Burst Size 1, 4, 8, 16, 32, 64, 128, 256");
+module_param_named(speed, dwc_otg_module_params.speed, int, 0444);
+MODULE_PARM_DESC(speed, "Speed 0=High Speed 1=Full Speed");
+module_param_named(host_support_fs_ls_low_power, dwc_otg_module_params.host_support_fs_ls_low_power, int, 0444);
+MODULE_PARM_DESC(host_support_fs_ls_low_power, "Support Low Power w/FS or LS 0=Support 1=Don't Support");
+module_param_named(host_ls_low_power_phy_clk, dwc_otg_module_params.host_ls_low_power_phy_clk, int, 0444);
+MODULE_PARM_DESC(host_ls_low_power_phy_clk, "Low Speed Low Power Clock 0=48Mhz 1=6Mhz");
+module_param_named(enable_dynamic_fifo, dwc_otg_module_params.enable_dynamic_fifo, int, 0444);
+MODULE_PARM_DESC(enable_dynamic_fifo, "0=cC Setting 1=Allow Dynamic Sizing");
+module_param_named(data_fifo_size, dwc_otg_module_params.data_fifo_size, int, 0444);
+MODULE_PARM_DESC(data_fifo_size, "Total number of words in the data FIFO memory 32-32768");
+module_param_named(dev_rx_fifo_size, dwc_otg_module_params.dev_rx_fifo_size, int, 0444);
+MODULE_PARM_DESC(dev_rx_fifo_size, "Number of words in the Rx FIFO 16-32768");
+module_param_named(dev_nperio_tx_fifo_size, dwc_otg_module_params.dev_nperio_tx_fifo_size, int, 0444);
+MODULE_PARM_DESC(dev_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768");
+module_param_named(dev_perio_tx_fifo_size_1, dwc_otg_module_params.dev_perio_tx_fifo_size[0], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_1, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_2, dwc_otg_module_params.dev_perio_tx_fifo_size[1], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_2, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_3, dwc_otg_module_params.dev_perio_tx_fifo_size[2], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_3, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_4, dwc_otg_module_params.dev_perio_tx_fifo_size[3], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_4, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_5, dwc_otg_module_params.dev_perio_tx_fifo_size[4], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_5, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_6, dwc_otg_module_params.dev_perio_tx_fifo_size[5], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_6, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_7, dwc_otg_module_params.dev_perio_tx_fifo_size[6], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_7, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_8, dwc_otg_module_params.dev_perio_tx_fifo_size[7], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_8, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_9, dwc_otg_module_params.dev_perio_tx_fifo_size[8], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_9, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_10, dwc_otg_module_params.dev_perio_tx_fifo_size[9], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_10, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_11, dwc_otg_module_params.dev_perio_tx_fifo_size[10], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_11, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_12, dwc_otg_module_params.dev_perio_tx_fifo_size[11], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_12, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_13, dwc_otg_module_params.dev_perio_tx_fifo_size[12], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_13, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_14, dwc_otg_module_params.dev_perio_tx_fifo_size[13], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_14, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_15, dwc_otg_module_params.dev_perio_tx_fifo_size[14], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_15, "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(host_rx_fifo_size, dwc_otg_module_params.host_rx_fifo_size, int, 0444);
+MODULE_PARM_DESC(host_rx_fifo_size, "Number of words in the Rx FIFO 16-32768");
+module_param_named(host_nperio_tx_fifo_size, dwc_otg_module_params.host_nperio_tx_fifo_size, int, 0444);
+MODULE_PARM_DESC(host_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768");
+module_param_named(host_perio_tx_fifo_size, dwc_otg_module_params.host_perio_tx_fifo_size, int, 0444);
+MODULE_PARM_DESC(host_perio_tx_fifo_size, "Number of words in the host periodic Tx FIFO 16-32768");
+module_param_named(max_transfer_size, dwc_otg_module_params.max_transfer_size, int, 0444);
+/** @todo Set the max to 512K, modify checks */
+MODULE_PARM_DESC(max_transfer_size, "The maximum transfer size supported in bytes 2047-65535");
+module_param_named(max_packet_count, dwc_otg_module_params.max_packet_count, int, 0444);
+MODULE_PARM_DESC(max_packet_count, "The maximum number of packets in a transfer 15-511");
+module_param_named(host_channels, dwc_otg_module_params.host_channels, int, 0444);
+MODULE_PARM_DESC(host_channels, "The number of host channel registers to use 1-16");
+module_param_named(dev_endpoints, dwc_otg_module_params.dev_endpoints, int, 0444);
+MODULE_PARM_DESC(dev_endpoints, "The number of endpoints in addition to EP0 available for device mode 1-15");
+module_param_named(phy_type, dwc_otg_module_params.phy_type, int, 0444);
+MODULE_PARM_DESC(phy_type, "0=Reserved 1=UTMI+ 2=ULPI");
+module_param_named(phy_utmi_width, dwc_otg_module_params.phy_utmi_width, int, 0444);
+MODULE_PARM_DESC(phy_utmi_width, "Specifies the UTMI+ Data Width 8 or 16 bits");
+module_param_named(phy_ulpi_ddr, dwc_otg_module_params.phy_ulpi_ddr, int, 0444);
+MODULE_PARM_DESC(phy_ulpi_ddr, "ULPI at double or single data rate 0=Single 1=Double");
+module_param_named(phy_ulpi_ext_vbus, dwc_otg_module_params.phy_ulpi_ext_vbus, int, 0444);
+MODULE_PARM_DESC(phy_ulpi_ext_vbus, "ULPI PHY using internal or external vbus 0=Internal");
+module_param_named(i2c_enable, dwc_otg_module_params.i2c_enable, int, 0444);
+MODULE_PARM_DESC(i2c_enable, "FS PHY Interface");
+module_param_named(ulpi_fs_ls, dwc_otg_module_params.ulpi_fs_ls, int, 0444);
+MODULE_PARM_DESC(ulpi_fs_ls, "ULPI PHY FS/LS mode only");
+module_param_named(ts_dline, dwc_otg_module_params.ts_dline, int, 0444);
+MODULE_PARM_DESC(ts_dline, "Term select Dline pulsing for all PHYs");
+module_param_named(debug, g_dbg_lvl, int, 0444);
+MODULE_PARM_DESC(debug, "0");
+module_param_named(en_multiple_tx_fifo,
+ dwc_otg_module_params.en_multiple_tx_fifo, int, 0444);
+MODULE_PARM_DESC(en_multiple_tx_fifo,
+ "Dedicated Non Periodic Tx FIFOs 0=disabled 1=enabled");
+module_param_named(dev_tx_fifo_size_1,
+ dwc_otg_module_params.dev_tx_fifo_size[0], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_1, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_2,
+ dwc_otg_module_params.dev_tx_fifo_size[1], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_2, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_3,
+ dwc_otg_module_params.dev_tx_fifo_size[2], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_3, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_4,
+ dwc_otg_module_params.dev_tx_fifo_size[3], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_4, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_5,
+ dwc_otg_module_params.dev_tx_fifo_size[4], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_5, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_6,
+ dwc_otg_module_params.dev_tx_fifo_size[5], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_6, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_7,
+ dwc_otg_module_params.dev_tx_fifo_size[6], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_7, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_8,
+ dwc_otg_module_params.dev_tx_fifo_size[7], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_8, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_9,
+ dwc_otg_module_params.dev_tx_fifo_size[8], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_9, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_10,
+ dwc_otg_module_params.dev_tx_fifo_size[9], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_10, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_11,
+ dwc_otg_module_params.dev_tx_fifo_size[10], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_11, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_12,
+ dwc_otg_module_params.dev_tx_fifo_size[11], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_12, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_13,
+ dwc_otg_module_params.dev_tx_fifo_size[12], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_13, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_14,
+ dwc_otg_module_params.dev_tx_fifo_size[13], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_14, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_15,
+ dwc_otg_module_params.dev_tx_fifo_size[14], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_15, "Number of words in the Tx FIFO 4-768");
+module_param_named(thr_ctl, dwc_otg_module_params.thr_ctl, int, 0444);
+MODULE_PARM_DESC(thr_ctl, "Thresholding enable flag bit"
+ "0 - non ISO Tx thr., 1 - ISO Tx thr., 2 - Rx thr.- bit 0=disabled 1=enabled");
+module_param_named(tx_thr_length, dwc_otg_module_params.tx_thr_length, int, 0444);
+MODULE_PARM_DESC(tx_thr_length, "Tx Threshold length in 32 bit DWORDs");
+module_param_named(rx_thr_length, dwc_otg_module_params.rx_thr_length, int, 0444);
+MODULE_PARM_DESC(rx_thr_length, "Rx Threshold length in 32 bit DWORDs");
+module_param_named (iomem_base, dwc_iomem_base, ulong, 0444);
+MODULE_PARM_DESC (dwc_iomem_base, "The base address of the DWC_OTG register.");
+module_param_named (irq, dwc_irq, int, 0444);
+MODULE_PARM_DESC (dwc_irq, "The interrupt number");
+
+/** @page "Module Parameters"
+ *
+ * The following parameters may be specified when starting the module.
+ * These parameters define how the DWC_otg controller should be
+ * configured. Parameter values are passed to the CIL initialization
+ * function dwc_otg_cil_init
+ *
+ * Example: <code>modprobe dwc_otg speed=1 otg_cap=1</code>
+ *
+
+ <table>
+ <tr><td>Parameter Name</td><td>Meaning</td></tr>
+
+ <tr>
+ <td>otg_cap</td>
+ <td>Specifies the OTG capabilities. The driver will automatically detect the
+ value for this parameter if none is specified.
+ - 0: HNP and SRP capable (default, if available)
+ - 1: SRP Only capable
+ - 2: No HNP/SRP capable
+ </td></tr>
+
+ <tr>
+ <td>dma_enable</td>
+ <td>Specifies whether to use slave or DMA mode for accessing the data FIFOs.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: Slave
+ - 1: DMA (default, if available)
+ </td></tr>
+
+ <tr>
+ <td>dma_burst_size</td>
+ <td>The DMA Burst size (applicable only for External DMA Mode).
+ - Values: 1, 4, 8 16, 32, 64, 128, 256 (default 32)
+ </td></tr>
+
+ <tr>
+ <td>speed</td>
+ <td>Specifies the maximum speed of operation in host and device mode. The
+ actual speed depends on the speed of the attached device and the value of
+ phy_type.
+ - 0: High Speed (default)
+ - 1: Full Speed
+ </td></tr>
+
+ <tr>
+ <td>host_support_fs_ls_low_power</td>
+ <td>Specifies whether low power mode is supported when attached to a Full
+ Speed or Low Speed device in host mode.
+ - 0: Don't support low power mode (default)
+ - 1: Support low power mode
+ </td></tr>
+
+ <tr>
+ <td>host_ls_low_power_phy_clk</td>
+ <td>Specifies the PHY clock rate in low power mode when connected to a Low
+ Speed device in host mode. This parameter is applicable only if
+ HOST_SUPPORT_FS_LS_LOW_POWER is enabled.
+ - 0: 48 MHz (default)
+ - 1: 6 MHz
+ </td></tr>
+
+ <tr>
+ <td>enable_dynamic_fifo</td>
+ <td> Specifies whether FIFOs may be resized by the driver software.
+ - 0: Use cC FIFO size parameters
+ - 1: Allow dynamic FIFO sizing (default)
+ </td></tr>
+
+ <tr>
+ <td>data_fifo_size</td>
+ <td>Total number of 4-byte words in the data FIFO memory. This memory
+ includes the Rx FIFO, non-periodic Tx FIFO, and periodic Tx FIFOs.
+ - Values: 32 to 32768 (default 8192)
+
+ Note: The total FIFO memory depth in the FPGA configuration is 8192.
+ </td></tr>
+
+ <tr>
+ <td>dev_rx_fifo_size</td>
+ <td>Number of 4-byte words in the Rx FIFO in device mode when dynamic
+ FIFO sizing is enabled.
+ - Values: 16 to 32768 (default 1064)
+ </td></tr>
+
+ <tr>
+ <td>dev_nperio_tx_fifo_size</td>
+ <td>Number of 4-byte words in the non-periodic Tx FIFO in device mode when
+ dynamic FIFO sizing is enabled.
+ - Values: 16 to 32768 (default 1024)
+ </td></tr>
+
+ <tr>
+ <td>dev_perio_tx_fifo_size_n (n = 1 to 15)</td>
+ <td>Number of 4-byte words in each of the periodic Tx FIFOs in device mode
+ when dynamic FIFO sizing is enabled.
+ - Values: 4 to 768 (default 256)
+ </td></tr>
+
+ <tr>
+ <td>host_rx_fifo_size</td>
+ <td>Number of 4-byte words in the Rx FIFO in host mode when dynamic FIFO
+ sizing is enabled.
+ - Values: 16 to 32768 (default 1024)
+ </td></tr>
+
+ <tr>
+ <td>host_nperio_tx_fifo_size</td>
+ <td>Number of 4-byte words in the non-periodic Tx FIFO in host mode when
+ dynamic FIFO sizing is enabled in the core.
+ - Values: 16 to 32768 (default 1024)
+ </td></tr>
+
+ <tr>
+ <td>host_perio_tx_fifo_size</td>
+ <td>Number of 4-byte words in the host periodic Tx FIFO when dynamic FIFO
+ sizing is enabled.
+ - Values: 16 to 32768 (default 1024)
+ </td></tr>
+
+ <tr>
+ <td>max_transfer_size</td>
+ <td>The maximum transfer size supported in bytes.
+ - Values: 2047 to 65,535 (default 65,535)
+ </td></tr>
+
+ <tr>
+ <td>max_packet_count</td>
+ <td>The maximum number of packets in a transfer.
+ - Values: 15 to 511 (default 511)
+ </td></tr>
+
+ <tr>
+ <td>host_channels</td>
+ <td>The number of host channel registers to use.
+ - Values: 1 to 16 (default 12)
+
+ Note: The FPGA configuration supports a maximum of 12 host channels.
+ </td></tr>
+
+ <tr>
+ <td>dev_endpoints</td>
+ <td>The number of endpoints in addition to EP0 available for device mode
+ operations.
+ - Values: 1 to 15 (default 6 IN and OUT)
+
+ Note: The FPGA configuration supports a maximum of 6 IN and OUT endpoints in
+ addition to EP0.
+ </td></tr>
+
+ <tr>
+ <td>phy_type</td>
+ <td>Specifies the type of PHY interface to use. By default, the driver will
+ automatically detect the phy_type.
+ - 0: Full Speed
+ - 1: UTMI+ (default, if available)
+ - 2: ULPI
+ </td></tr>
+
+ <tr>
+ <td>phy_utmi_width</td>
+ <td>Specifies the UTMI+ Data Width. This parameter is applicable for a
+ phy_type of UTMI+. Also, this parameter is applicable only if the
+ OTG_HSPHY_WIDTH cC parameter was set to "8 and 16 bits", meaning that the
+ core has been configured to work at either data path width.
+ - Values: 8 or 16 bits (default 16)
+ </td></tr>
+
+ <tr>
+ <td>phy_ulpi_ddr</td>
+ <td>Specifies whether the ULPI operates at double or single data rate. This
+ parameter is only applicable if phy_type is ULPI.
+ - 0: single data rate ULPI interface with 8 bit wide data bus (default)
+ - 1: double data rate ULPI interface with 4 bit wide data bus
+ </td></tr>
+
+ <tr>
+ <td>i2c_enable</td>
+ <td>Specifies whether to use the I2C interface for full speed PHY. This
+ parameter is only applicable if PHY_TYPE is FS.
+ - 0: Disabled (default)
+ - 1: Enabled
+ </td></tr>
+
+ <tr>
+ <td>otg_en_multiple_tx_fifo</td>
+ <td>Specifies whether dedicatedto tx fifos are enabled for non periodic IN EPs.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: Disabled
+ - 1: Enabled (default, if available)
+ </td></tr>
+
+ <tr>
+ <td>dev_tx_fifo_size_n (n = 1 to 15)</td>
+ <td>Number of 4-byte words in each of the Tx FIFOs in device mode
+ when dynamic FIFO sizing is enabled.
+ - Values: 4 to 768 (default 256)
+ </td></tr>
+
+*/
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_driver.h b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_driver.h
new file mode 100644
index 0000000000..7e6940dd13
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_driver.h
@@ -0,0 +1,84 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_driver.h $
+ * $Revision: 1.1.1.1 $
+ * $Date: 2009-04-17 06:15:34 $
+ * $Change: 510275 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__DWC_OTG_DRIVER_H__)
+#define __DWC_OTG_DRIVER_H__
+
+/** @file
+ * This file contains the interface to the Linux driver.
+ */
+#include "dwc_otg_cil.h"
+
+/* Type declarations */
+struct dwc_otg_pcd;
+struct dwc_otg_hcd;
+
+/**
+ * This structure is a wrapper that encapsulates the driver components used to
+ * manage a single DWC_otg controller.
+ */
+typedef struct dwc_otg_device
+{
+ /** Base address returned from ioremap() */
+ void *base;
+
+ /** Pointer to the core interface structure. */
+ dwc_otg_core_if_t *core_if;
+
+ /** Register offset for Diagnostic API.*/
+ uint32_t reg_offset;
+
+ /** Pointer to the PCD structure. */
+ struct dwc_otg_pcd *pcd;
+
+ /** Pointer to the HCD structure. */
+ struct dwc_otg_hcd *hcd;
+
+ /** Flag to indicate whether the common IRQ handler is installed. */
+ uint8_t common_irq_installed;
+
+ /** Interrupt request number. */
+ unsigned int irq;
+
+ /** Physical address of Control and Status registers, used by
+ * release_mem_region().
+ */
+ resource_size_t phys_addr;
+
+ /** Length of memory region, used by release_mem_region(). */
+ unsigned long base_len;
+} dwc_otg_device_t;
+
+//#define dev_dbg(fake, format, arg...) printk(KERN_CRIT __FILE__ ":%d: " format "\n" , __LINE__, ## arg)
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd.c b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd.c
new file mode 100644
index 0000000000..ad6bc72ce8
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd.c
@@ -0,0 +1,2870 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd.c $
+ * $Revision: 1.1.1.1 $
+ * $Date: 2009-04-17 06:15:34 $
+ * $Change: 631780 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef DWC_DEVICE_ONLY
+
+/**
+ * @file
+ *
+ * This file contains the implementation of the HCD. In Linux, the HCD
+ * implements the hc_driver API.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+
+#include <linux/device.h>
+
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+
+#include <linux/dma-mapping.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+
+#include <asm/irq.h>
+#include "dwc_otg_ifx.h" // for Infineon platform specific.
+extern atomic_t release_later;
+
+static u64 dma_mask = DMA_BIT_MASK(32);
+
+static const char dwc_otg_hcd_name [] = "dwc_otg_hcd";
+static const struct hc_driver dwc_otg_hc_driver =
+{
+ .description = dwc_otg_hcd_name,
+ .product_desc = "DWC OTG Controller",
+ .hcd_priv_size = sizeof(dwc_otg_hcd_t),
+ .irq = dwc_otg_hcd_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+ //.reset =
+ .start = dwc_otg_hcd_start,
+ //.suspend =
+ //.resume =
+ .stop = dwc_otg_hcd_stop,
+ .urb_enqueue = dwc_otg_hcd_urb_enqueue,
+ .urb_dequeue = dwc_otg_hcd_urb_dequeue,
+ .endpoint_disable = dwc_otg_hcd_endpoint_disable,
+ .get_frame_number = dwc_otg_hcd_get_frame_number,
+ .hub_status_data = dwc_otg_hcd_hub_status_data,
+ .hub_control = dwc_otg_hcd_hub_control,
+ //.hub_suspend =
+ //.hub_resume =
+};
+
+
+/**
+ * Work queue function for starting the HCD when A-Cable is connected.
+ * The dwc_otg_hcd_start() must be called in a process context.
+ */
+static void hcd_start_func(struct work_struct *work)
+{
+ struct dwc_otg_hcd *priv =
+ container_of(work, struct dwc_otg_hcd, start_work);
+ struct usb_hcd *usb_hcd = (struct usb_hcd *)priv->_p;
+ DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd);
+ if (usb_hcd) {
+ dwc_otg_hcd_start(usb_hcd);
+ }
+}
+
+
+/**
+ * HCD Callback function for starting the HCD when A-Cable is
+ * connected.
+ *
+ * @param _p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t dwc_otg_hcd_start_cb(void *_p)
+{
+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p);
+ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
+ hprt0_data_t hprt0;
+ if (core_if->op_state == B_HOST) {
+ /*
+ * Reset the port. During a HNP mode switch the reset
+ * needs to occur within 1ms and have a duration of at
+ * least 50ms.
+ */
+ hprt0.d32 = dwc_otg_read_hprt0 (core_if);
+ hprt0.b.prtrst = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ ((struct usb_hcd *)_p)->self.is_b_host = 1;
+ } else {
+ ((struct usb_hcd *)_p)->self.is_b_host = 0;
+ }
+ /* Need to start the HCD in a non-interrupt context. */
+ INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func);
+ dwc_otg_hcd->_p = _p;
+ schedule_work(&dwc_otg_hcd->start_work);
+ return 1;
+}
+
+
+/**
+ * HCD Callback function for stopping the HCD.
+ *
+ * @param _p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t dwc_otg_hcd_stop_cb( void *_p )
+{
+ struct usb_hcd *usb_hcd = (struct usb_hcd *)_p;
+ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
+ dwc_otg_hcd_stop( usb_hcd );
+ return 1;
+}
+static void del_xfer_timers(dwc_otg_hcd_t *_hcd)
+{
+#ifdef DEBUG
+ int i;
+ int num_channels = _hcd->core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ del_timer(&_hcd->core_if->hc_xfer_timer[i]);
+ }
+#endif /* */
+}
+
+static void del_timers(dwc_otg_hcd_t *_hcd)
+{
+ del_xfer_timers(_hcd);
+ del_timer(&_hcd->conn_timer);
+}
+
+/**
+ * Processes all the URBs in a single list of QHs. Completes them with
+ * -ETIMEDOUT and frees the QTD.
+ */
+static void kill_urbs_in_qh_list(dwc_otg_hcd_t * _hcd,
+ struct list_head *_qh_list)
+{
+ struct list_head *qh_item;
+ dwc_otg_qh_t *qh;
+ struct list_head *qtd_item;
+ dwc_otg_qtd_t *qtd;
+
+ list_for_each(qh_item, _qh_list) {
+ qh = list_entry(qh_item, dwc_otg_qh_t, qh_list_entry);
+ for (qtd_item = qh->qtd_list.next; qtd_item != &qh->qtd_list;
+ qtd_item = qh->qtd_list.next) {
+ qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry);
+ if (qtd->urb != NULL) {
+ dwc_otg_hcd_complete_urb(_hcd, qtd->urb,-ETIMEDOUT);
+ }
+ dwc_otg_hcd_qtd_remove_and_free(qtd);
+ }
+ }
+}
+
+/**
+ * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic
+ * and periodic schedules. The QTD associated with each URB is removed from
+ * the schedule and freed. This function may be called when a disconnect is
+ * detected or when the HCD is being stopped.
+ */
+static void kill_all_urbs(dwc_otg_hcd_t *_hcd)
+{
+ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_deferred);
+ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_inactive);
+ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_active);
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_inactive);
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_ready);
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_assigned);
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_queued);
+}
+
+/**
+ * HCD Callback function for disconnect of the HCD.
+ *
+ * @param _p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t dwc_otg_hcd_disconnect_cb( void *_p )
+{
+ gintsts_data_t intr;
+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_p);
+
+ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
+
+ /*
+ * Set status flags for the hub driver.
+ */
+ dwc_otg_hcd->flags.b.port_connect_status_change = 1;
+ dwc_otg_hcd->flags.b.port_connect_status = 0;
+
+ /*
+ * Shutdown any transfers in process by clearing the Tx FIFO Empty
+ * interrupt mask and status bits and disabling subsequent host
+ * channel interrupts.
+ */
+ intr.d32 = 0;
+ intr.b.nptxfempty = 1;
+ intr.b.ptxfempty = 1;
+ intr.b.hcintr = 1;
+ dwc_modify_reg32 (&dwc_otg_hcd->core_if->core_global_regs->gintmsk, intr.d32, 0);
+ dwc_modify_reg32 (&dwc_otg_hcd->core_if->core_global_regs->gintsts, intr.d32, 0);
+
+ del_timers(dwc_otg_hcd);
+
+ /*
+ * Turn off the vbus power only if the core has transitioned to device
+ * mode. If still in host mode, need to keep power on to detect a
+ * reconnection.
+ */
+ if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) {
+ if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) {
+ hprt0_data_t hprt0 = { .d32=0 };
+ DWC_PRINT("Disconnect: PortPower off\n");
+ hprt0.b.prtpwr = 0;
+ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32);
+ }
+
+ dwc_otg_disable_host_interrupts( dwc_otg_hcd->core_if );
+ }
+
+ /* Respond with an error status to all URBs in the schedule. */
+ kill_all_urbs(dwc_otg_hcd);
+
+ if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) {
+ /* Clean up any host channels that were in use. */
+ int num_channels;
+ int i;
+ dwc_hc_t *channel;
+ dwc_otg_hc_regs_t *hc_regs;
+ hcchar_data_t hcchar;
+
+ num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
+
+ if (!dwc_otg_hcd->core_if->dma_enable) {
+ /* Flush out any channel requests in slave mode. */
+ for (i = 0; i < num_channels; i++) {
+ channel = dwc_otg_hcd->hc_ptr_array[i];
+ if (list_empty(&channel->hc_list_entry)) {
+ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ hcchar.b.chen = 0;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < num_channels; i++) {
+ channel = dwc_otg_hcd->hc_ptr_array[i];
+ if (list_empty(&channel->hc_list_entry)) {
+ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ /* Halt the channel. */
+ hcchar.b.chdis = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ }
+
+ dwc_otg_hc_cleanup(dwc_otg_hcd->core_if, channel);
+ list_add_tail(&channel->hc_list_entry,
+ &dwc_otg_hcd->free_hc_list);
+ }
+ }
+ }
+
+ /* A disconnect will end the session so the B-Device is no
+ * longer a B-host. */
+ ((struct usb_hcd *)_p)->self.is_b_host = 0;
+
+ return 1;
+}
+
+/**
+ * Connection timeout function. An OTG host is required to display a
+ * message if the device does not connect within 10 seconds.
+ */
+void dwc_otg_hcd_connect_timeout( unsigned long _ptr )
+{
+ DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)_ptr);
+ DWC_PRINT( "Connect Timeout\n");
+ DWC_ERROR( "Device Not Connected/Responding\n" );
+}
+
+/**
+ * Start the connection timer. An OTG host is required to display a
+ * message if the device does not connect within 10 seconds. The
+ * timer is deleted if a port connect interrupt occurs before the
+ * timer expires.
+ */
+static void dwc_otg_hcd_start_connect_timer( dwc_otg_hcd_t *_hcd)
+{
+ init_timer( &_hcd->conn_timer );
+ _hcd->conn_timer.function = dwc_otg_hcd_connect_timeout;
+ _hcd->conn_timer.data = (unsigned long)0;
+ _hcd->conn_timer.expires = jiffies + (HZ*10);
+ add_timer( &_hcd->conn_timer );
+}
+
+/**
+ * HCD Callback function for disconnect of the HCD.
+ *
+ * @param _p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t dwc_otg_hcd_session_start_cb( void *_p )
+{
+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_p);
+ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
+ dwc_otg_hcd_start_connect_timer( dwc_otg_hcd );
+ return 1;
+}
+
+/**
+ * HCD Callback structure for handling mode switching.
+ */
+static dwc_otg_cil_callbacks_t hcd_cil_callbacks = {
+ .start = dwc_otg_hcd_start_cb,
+ .stop = dwc_otg_hcd_stop_cb,
+ .disconnect = dwc_otg_hcd_disconnect_cb,
+ .session_start = dwc_otg_hcd_session_start_cb,
+ .p = 0,
+};
+
+
+/**
+ * Reset tasklet function
+ */
+static void reset_tasklet_func (unsigned long data)
+{
+ dwc_otg_hcd_t *dwc_otg_hcd = (dwc_otg_hcd_t*)data;
+ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if;
+ hprt0_data_t hprt0;
+
+ DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n");
+
+ hprt0.d32 = dwc_otg_read_hprt0 (core_if);
+ hprt0.b.prtrst = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ mdelay (60);
+
+ hprt0.b.prtrst = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ dwc_otg_hcd->flags.b.port_reset_change = 1;
+
+ return;
+}
+
+static struct tasklet_struct reset_tasklet = {
+ .next = NULL,
+ .state = 0,
+ .count = ATOMIC_INIT(0),
+ .func = reset_tasklet_func,
+ .data = 0,
+};
+
+/**
+ * Initializes the HCD. This function allocates memory for and initializes the
+ * static parts of the usb_hcd and dwc_otg_hcd structures. It also registers the
+ * USB bus with the core and calls the hc_driver->start() function. It returns
+ * a negative error on failure.
+ */
+int init_hcd_usecs(dwc_otg_hcd_t *_hcd);
+
+int __devinit dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_device)
+{
+ struct usb_hcd *hcd = NULL;
+ dwc_otg_hcd_t *dwc_otg_hcd = NULL;
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+
+ int num_channels;
+ int i;
+ dwc_hc_t *channel;
+
+ int retval = 0;
+
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n");
+
+ /*
+ * Allocate memory for the base HCD plus the DWC OTG HCD.
+ * Initialize the base HCD.
+ */
+ hcd = usb_create_hcd(&dwc_otg_hc_driver, _dev, dev_name(_dev));
+ if (hcd == NULL) {
+ retval = -ENOMEM;
+ goto error1;
+ }
+ dev_set_drvdata(_dev, dwc_otg_device); /* fscz restore */
+ hcd->regs = otg_dev->base;
+ hcd->rsrc_start = (int)otg_dev->base;
+
+ hcd->self.otg_port = 1;
+
+ /* Initialize the DWC OTG HCD. */
+ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+ dwc_otg_hcd->core_if = otg_dev->core_if;
+ otg_dev->hcd = dwc_otg_hcd;
+
+ /* Register the HCD CIL Callbacks */
+ dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if,
+ &hcd_cil_callbacks, hcd);
+
+ /* Initialize the non-periodic schedule. */
+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive);
+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_active);
+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_deferred);
+
+ /* Initialize the periodic schedule. */
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive);
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned);
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
+
+ /*
+ * Create a host channel descriptor for each host channel implemented
+ * in the controller. Initialize the channel descriptor array.
+ */
+ INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list);
+ num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNEL);
+ if (channel == NULL) {
+ retval = -ENOMEM;
+ DWC_ERROR("%s: host channel allocation failed\n", __func__);
+ goto error2;
+ }
+ memset(channel, 0, sizeof(dwc_hc_t));
+ channel->hc_num = i;
+ dwc_otg_hcd->hc_ptr_array[i] = channel;
+#ifdef DEBUG
+ init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]);
+#endif
+
+ DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i, channel);
+ }
+
+ /* Initialize the Connection timeout timer. */
+ init_timer( &dwc_otg_hcd->conn_timer );
+
+ /* Initialize reset tasklet. */
+ reset_tasklet.data = (unsigned long) dwc_otg_hcd;
+ dwc_otg_hcd->reset_tasklet = &reset_tasklet;
+
+ /* Set device flags indicating whether the HCD supports DMA. */
+ if (otg_dev->core_if->dma_enable) {
+ DWC_PRINT("Using DMA mode\n");
+ //_dev->dma_mask = (void *)~0;
+ //_dev->coherent_dma_mask = ~0;
+ _dev->dma_mask = &dma_mask;
+ _dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ } else {
+ DWC_PRINT("Using Slave mode\n");
+ _dev->dma_mask = (void *)0;
+ _dev->coherent_dma_mask = 0;
+ }
+
+ init_hcd_usecs(dwc_otg_hcd);
+ /*
+ * Finish generic HCD initialization and start the HCD. This function
+ * allocates the DMA buffer pool, registers the USB bus, requests the
+ * IRQ line, and calls dwc_otg_hcd_start method.
+ */
+ retval = usb_add_hcd(hcd, otg_dev->irq, IRQF_SHARED);
+ if (retval < 0) {
+ goto error2;
+ }
+
+ /*
+ * Allocate space for storing data on status transactions. Normally no
+ * data is sent, but this space acts as a bit bucket. This must be
+ * done after usb_add_hcd since that function allocates the DMA buffer
+ * pool.
+ */
+ if (otg_dev->core_if->dma_enable) {
+ dwc_otg_hcd->status_buf =
+ dma_alloc_coherent(_dev,
+ DWC_OTG_HCD_STATUS_BUF_SIZE,
+ &dwc_otg_hcd->status_buf_dma,
+ GFP_KERNEL | GFP_DMA);
+ } else {
+ dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD_STATUS_BUF_SIZE,
+ GFP_KERNEL);
+ }
+ if (dwc_otg_hcd->status_buf == NULL) {
+ retval = -ENOMEM;
+ DWC_ERROR("%s: status_buf allocation failed\n", __func__);
+ goto error3;
+ }
+
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Initialized HCD, bus=%s, usbbus=%d\n",
+ dev_name(_dev), hcd->self.busnum);
+
+ return 0;
+
+ /* Error conditions */
+error3:
+ usb_remove_hcd(hcd);
+error2:
+ dwc_otg_hcd_free(hcd);
+ usb_put_hcd(hcd);
+error1:
+ return retval;
+}
+
+/**
+ * Removes the HCD.
+ * Frees memory and resources associated with the HCD and deregisters the bus.
+ */
+void dwc_otg_hcd_remove(struct device *_dev)
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ dwc_otg_hcd_t *dwc_otg_hcd = otg_dev->hcd;
+ struct usb_hcd *hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd);
+
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n");
+
+ /* Turn off all interrupts */
+ dwc_write_reg32 (&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0);
+ dwc_modify_reg32 (&dwc_otg_hcd->core_if->core_global_regs->gahbcfg, 1, 0);
+
+ usb_remove_hcd(hcd);
+
+ dwc_otg_hcd_free(hcd);
+
+ usb_put_hcd(hcd);
+
+ return;
+}
+
+
+/* =========================================================================
+ * Linux HC Driver Functions
+ * ========================================================================= */
+
+/**
+ * Initializes dynamic portions of the DWC_otg HCD state.
+ */
+static void hcd_reinit(dwc_otg_hcd_t *_hcd)
+{
+ struct list_head *item;
+ int num_channels;
+ int i;
+ dwc_hc_t *channel;
+
+ _hcd->flags.d32 = 0;
+
+ _hcd->non_periodic_qh_ptr = &_hcd->non_periodic_sched_active;
+ _hcd->available_host_channels = _hcd->core_if->core_params->host_channels;
+
+ /*
+ * Put all channels in the free channel list and clean up channel
+ * states.
+ */
+ item = _hcd->free_hc_list.next;
+ while (item != &_hcd->free_hc_list) {
+ list_del(item);
+ item = _hcd->free_hc_list.next;
+ }
+ num_channels = _hcd->core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ channel = _hcd->hc_ptr_array[i];
+ list_add_tail(&channel->hc_list_entry, &_hcd->free_hc_list);
+ dwc_otg_hc_cleanup(_hcd->core_if, channel);
+ }
+
+ /* Initialize the DWC core for host mode operation. */
+ dwc_otg_core_host_init(_hcd->core_if);
+}
+
+/** Initializes the DWC_otg controller and its root hub and prepares it for host
+ * mode operation. Activates the root port. Returns 0 on success and a negative
+ * error code on failure. */
+int dwc_otg_hcd_start(struct usb_hcd *_hcd)
+{
+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_hcd);
+ dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if;
+ struct usb_bus *bus;
+
+ // int retval;
+
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n");
+
+ bus = hcd_to_bus(_hcd);
+
+ /* Initialize the bus state. If the core is in Device Mode
+ * HALT the USB bus and return. */
+ if (dwc_otg_is_device_mode (core_if)) {
+ _hcd->state = HC_STATE_HALT;
+ return 0;
+ }
+ _hcd->state = HC_STATE_RUNNING;
+
+ /* Initialize and connect root hub if one is not already attached */
+ if (bus->root_hub) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hub\n");
+ /* Inform the HUB driver to resume. */
+ usb_hcd_resume_root_hub(_hcd);
+ }
+ else {
+#if 0
+ struct usb_device *udev;
+ udev = usb_alloc_dev(NULL, bus, 0);
+ if (!udev) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error udev alloc\n");
+ return -ENODEV;
+ }
+ udev->speed = USB_SPEED_HIGH;
+ /* Not needed - VJ
+ if ((retval = usb_hcd_register_root_hub(udev, _hcd)) != 0) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error registering %d\n", retval);
+ return -ENODEV;
+ }
+ */
+#else
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error udev alloc\n");
+#endif
+ }
+
+ hcd_reinit(dwc_otg_hcd);
+
+ return 0;
+}
+
+static void qh_list_free(dwc_otg_hcd_t *_hcd, struct list_head *_qh_list)
+{
+ struct list_head *item;
+ dwc_otg_qh_t *qh;
+
+ if (_qh_list->next == NULL) {
+ /* The list hasn't been initialized yet. */
+ return;
+ }
+
+ /* Ensure there are no QTDs or URBs left. */
+ kill_urbs_in_qh_list(_hcd, _qh_list);
+
+ for (item = _qh_list->next; item != _qh_list; item = _qh_list->next) {
+ qh = list_entry(item, dwc_otg_qh_t, qh_list_entry);
+ dwc_otg_hcd_qh_remove_and_free(_hcd, qh);
+ }
+}
+
+/**
+ * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
+ * stopped.
+ */
+void dwc_otg_hcd_stop(struct usb_hcd *_hcd)
+{
+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_hcd);
+ hprt0_data_t hprt0 = { .d32=0 };
+
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n");
+
+ /* Turn off all host-specific interrupts. */
+ dwc_otg_disable_host_interrupts( dwc_otg_hcd->core_if );
+
+ /*
+ * The root hub should be disconnected before this function is called.
+ * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
+ * and the QH lists (via ..._hcd_endpoint_disable).
+ */
+
+ /* Turn off the vbus power */
+ DWC_PRINT("PortPower off\n");
+ hprt0.b.prtpwr = 0;
+ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32);
+
+ return;
+}
+
+
+/** Returns the current frame number. */
+int dwc_otg_hcd_get_frame_number(struct usb_hcd *_hcd)
+{
+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ hfnum_data_t hfnum;
+
+ hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->
+ host_if->host_global_regs->hfnum);
+
+#ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n", hfnum.b.frnum);
+#endif
+ return hfnum.b.frnum;
+}
+
+/**
+ * Frees secondary storage associated with the dwc_otg_hcd structure contained
+ * in the struct usb_hcd field.
+ */
+void dwc_otg_hcd_free(struct usb_hcd *_hcd)
+{
+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ int i;
+
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n");
+
+ del_timers(dwc_otg_hcd);
+
+ /* Free memory for QH/QTD lists */
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_inactive);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_deferred);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued);
+
+ /* Free memory for the host channels. */
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ dwc_hc_t *hc = dwc_otg_hcd->hc_ptr_array[i];
+ if (hc != NULL) {
+ DWC_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n", i, hc);
+ kfree(hc);
+ }
+ }
+
+ if (dwc_otg_hcd->core_if->dma_enable) {
+ if (dwc_otg_hcd->status_buf_dma) {
+ dma_free_coherent(_hcd->self.controller,
+ DWC_OTG_HCD_STATUS_BUF_SIZE,
+ dwc_otg_hcd->status_buf,
+ dwc_otg_hcd->status_buf_dma);
+ }
+ } else if (dwc_otg_hcd->status_buf != NULL) {
+ kfree(dwc_otg_hcd->status_buf);
+ }
+
+ return;
+}
+
+
+#ifdef DEBUG
+static void dump_urb_info(struct urb *_urb, char* _fn_name)
+{
+ DWC_PRINT("%s, urb %p\n", _fn_name, _urb);
+ DWC_PRINT(" Device address: %d\n", usb_pipedevice(_urb->pipe));
+ DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(_urb->pipe),
+ (usb_pipein(_urb->pipe) ? "IN" : "OUT"));
+ DWC_PRINT(" Endpoint type: %s\n",
+ ({char *pipetype;
+ switch (usb_pipetype(_urb->pipe)) {
+ case PIPE_CONTROL: pipetype = "CONTROL"; break;
+ case PIPE_BULK: pipetype = "BULK"; break;
+ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break;
+ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break;
+ default: pipetype = "UNKNOWN"; break;
+ }; pipetype;}));
+ DWC_PRINT(" Speed: %s\n",
+ ({char *speed;
+ switch (_urb->dev->speed) {
+ case USB_SPEED_HIGH: speed = "HIGH"; break;
+ case USB_SPEED_FULL: speed = "FULL"; break;
+ case USB_SPEED_LOW: speed = "LOW"; break;
+ default: speed = "UNKNOWN"; break;
+ }; speed;}));
+ DWC_PRINT(" Max packet size: %d\n",
+ usb_maxpacket(_urb->dev, _urb->pipe, usb_pipeout(_urb->pipe)));
+ DWC_PRINT(" Data buffer length: %d\n", _urb->transfer_buffer_length);
+ DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n",
+ _urb->transfer_buffer, (void *)_urb->transfer_dma);
+ DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n",
+ _urb->setup_packet, (void *)_urb->setup_dma);
+ DWC_PRINT(" Interval: %d\n", _urb->interval);
+ if (usb_pipetype(_urb->pipe) == PIPE_ISOCHRONOUS) {
+ int i;
+ for (i = 0; i < _urb->number_of_packets; i++) {
+ DWC_PRINT(" ISO Desc %d:\n", i);
+ DWC_PRINT(" offset: %d, length %d\n",
+ _urb->iso_frame_desc[i].offset,
+ _urb->iso_frame_desc[i].length);
+ }
+ }
+}
+
+static void dump_channel_info(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *qh)
+{
+ if (qh->channel != NULL) {
+ dwc_hc_t *hc = qh->channel;
+ struct list_head *item;
+ dwc_otg_qh_t *qh_item;
+ int num_channels = _hcd->core_if->core_params->host_channels;
+ int i;
+
+ dwc_otg_hc_regs_t *hc_regs;
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+ hctsiz_data_t hctsiz;
+ uint32_t hcdma;
+
+ hc_regs = _hcd->core_if->host_if->hc_regs[hc->hc_num];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
+ hcdma = dwc_read_reg32(&hc_regs->hcdma);
+
+ DWC_PRINT(" Assigned to channel %p:\n", hc);
+ DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
+ DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
+ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+ hc->dev_addr, hc->ep_num, hc->ep_is_in);
+ DWC_PRINT(" ep_type: %d\n", hc->ep_type);
+ DWC_PRINT(" max_packet: %d\n", hc->max_packet);
+ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start);
+ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started);
+ DWC_PRINT(" halt_status: %d\n", hc->halt_status);
+ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff);
+ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len);
+ DWC_PRINT(" qh: %p\n", hc->qh);
+ DWC_PRINT(" NP inactive sched:\n");
+ list_for_each(item, &_hcd->non_periodic_sched_inactive) {
+ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
+ DWC_PRINT(" %p\n", qh_item);
+ } DWC_PRINT(" NP active sched:\n");
+ list_for_each(item, &_hcd->non_periodic_sched_deferred) {
+ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
+ DWC_PRINT(" %p\n", qh_item);
+ } DWC_PRINT(" NP deferred sched:\n");
+ list_for_each(item, &_hcd->non_periodic_sched_active) {
+ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
+ DWC_PRINT(" %p\n", qh_item);
+ } DWC_PRINT(" Channels: \n");
+ for (i = 0; i < num_channels; i++) {
+ dwc_hc_t *hc = _hcd->hc_ptr_array[i];
+ DWC_PRINT(" %2d: %p\n", i, hc);
+ }
+ }
+}
+#endif // DEBUG
+
+/** Starts processing a USB transfer request specified by a USB Request Block
+ * (URB). mem_flags indicates the type of memory allocation to use while
+ * processing this URB. */
+int dwc_otg_hcd_urb_enqueue(struct usb_hcd *_hcd,
+ struct urb *_urb,
+ gfp_t _mem_flags)
+{
+ unsigned long flags;
+ int retval;
+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_hcd);
+ dwc_otg_qtd_t *qtd;
+
+ local_irq_save(flags);
+ retval = usb_hcd_link_urb_to_ep(_hcd, _urb);
+ if (retval) {
+ local_irq_restore(flags);
+ return retval;
+ }
+#ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ dump_urb_info(_urb, "dwc_otg_hcd_urb_enqueue");
+ }
+#endif // DEBUG
+ if (!dwc_otg_hcd->flags.b.port_connect_status) {
+ /* No longer connected. */
+ local_irq_restore(flags);
+ return -ENODEV;
+ }
+
+ qtd = dwc_otg_hcd_qtd_create (_urb);
+ if (qtd == NULL) {
+ local_irq_restore(flags);
+ DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n");
+ return -ENOMEM;
+ }
+
+ retval = dwc_otg_hcd_qtd_add (qtd, dwc_otg_hcd);
+ if (retval < 0) {
+ DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. "
+ "Error status %d\n", retval);
+ dwc_otg_hcd_qtd_free(qtd);
+ }
+
+ local_irq_restore (flags);
+ return retval;
+}
+
+/** Aborts/cancels a USB transfer request. Always returns 0 to indicate
+ * success. */
+int dwc_otg_hcd_urb_dequeue(struct usb_hcd *_hcd, struct urb *_urb, int _status)
+{
+ unsigned long flags;
+ dwc_otg_hcd_t *dwc_otg_hcd;
+ dwc_otg_qtd_t *urb_qtd;
+ dwc_otg_qh_t *qh;
+ int retval;
+ //struct usb_host_endpoint *_ep = NULL;
+
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n");
+
+ local_irq_save(flags);
+
+ retval = usb_hcd_check_unlink_urb(_hcd, _urb, _status);
+ if (retval) {
+ local_irq_restore(flags);
+ return retval;
+ }
+
+ dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ urb_qtd = (dwc_otg_qtd_t *)_urb->hcpriv;
+ if (urb_qtd == NULL) {
+ printk("urb_qtd is NULL for _urb %08x\n",(unsigned)_urb);
+ goto done;
+ }
+ qh = (dwc_otg_qh_t *) urb_qtd->qtd_qh_ptr;
+ if (qh == NULL) {
+ goto done;
+ }
+
+#ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ dump_urb_info(_urb, "dwc_otg_hcd_urb_dequeue");
+ if (urb_qtd == qh->qtd_in_process) {
+ dump_channel_info(dwc_otg_hcd, qh);
+ }
+ }
+#endif // DEBUG
+
+ if (urb_qtd == qh->qtd_in_process) {
+ /* The QTD is in process (it has been assigned to a channel). */
+
+ if (dwc_otg_hcd->flags.b.port_connect_status) {
+ /*
+ * If still connected (i.e. in host mode), halt the
+ * channel so it can be used for other transfers. If
+ * no longer connected, the host registers can't be
+ * written to halt the channel since the core is in
+ * device mode.
+ */
+ dwc_otg_hc_halt(dwc_otg_hcd->core_if, qh->channel,
+ DWC_OTG_HC_XFER_URB_DEQUEUE);
+ }
+ }
+
+ /*
+ * Free the QTD and clean up the associated QH. Leave the QH in the
+ * schedule if it has any remaining QTDs.
+ */
+ dwc_otg_hcd_qtd_remove_and_free(urb_qtd);
+ if (urb_qtd == qh->qtd_in_process) {
+ dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0);
+ qh->channel = NULL;
+ qh->qtd_in_process = NULL;
+ } else if (list_empty(&qh->qtd_list)) {
+ dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh);
+ }
+
+done:
+ local_irq_restore(flags);
+ _urb->hcpriv = NULL;
+
+ /* Higher layer software sets URB status. */
+ usb_hcd_unlink_urb_from_ep(_hcd, _urb);
+ usb_hcd_giveback_urb(_hcd, _urb, _status);
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ DWC_PRINT("Called usb_hcd_giveback_urb()\n");
+ DWC_PRINT(" urb->status = %d\n", _urb->status);
+ }
+
+ return 0;
+}
+
+
+/** Frees resources in the DWC_otg controller related to a given endpoint. Also
+ * clears state in the HCD related to the endpoint. Any URBs for the endpoint
+ * must already be dequeued. */
+void dwc_otg_hcd_endpoint_disable(struct usb_hcd *_hcd,
+ struct usb_host_endpoint *_ep)
+
+{
+ dwc_otg_qh_t *qh;
+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, "
+ "endpoint=%d\n", _ep->desc.bEndpointAddress,
+ dwc_ep_addr_to_endpoint(_ep->desc.bEndpointAddress));
+
+ qh = (dwc_otg_qh_t *)(_ep->hcpriv);
+ if (qh != NULL) {
+#ifdef DEBUG
+ /** Check that the QTD list is really empty */
+ if (!list_empty(&qh->qtd_list)) {
+ DWC_WARN("DWC OTG HCD EP DISABLE:"
+ " QTD List for this endpoint is not empty\n");
+ }
+#endif // DEBUG
+
+ dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh);
+ _ep->hcpriv = NULL;
+ }
+
+ return;
+}
+extern int dwc_irq;
+/** Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
+ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
+ * interrupt.
+ *
+ * This function is called by the USB core when an interrupt occurs */
+irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *_hcd)
+{
+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_hcd);
+
+ mask_and_ack_ifx_irq (dwc_irq);
+ return IRQ_RETVAL(dwc_otg_hcd_handle_intr(dwc_otg_hcd));
+}
+
+/** Creates Status Change bitmap for the root hub and root port. The bitmap is
+ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
+ * is the status change indicator for the single root port. Returns 1 if either
+ * change indicator is 1, otherwise returns 0. */
+int dwc_otg_hcd_hub_status_data(struct usb_hcd *_hcd, char *_buf)
+{
+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_hcd);
+
+ _buf[0] = 0;
+ _buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_change ||
+ dwc_otg_hcd->flags.b.port_reset_change ||
+ dwc_otg_hcd->flags.b.port_enable_change ||
+ dwc_otg_hcd->flags.b.port_suspend_change ||
+ dwc_otg_hcd->flags.b.port_over_current_change) << 1;
+
+#ifdef DEBUG
+ if (_buf[0]) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:"
+ " Root port status changed\n");
+ DWC_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n",
+ dwc_otg_hcd->flags.b.port_connect_status_change);
+ DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n",
+ dwc_otg_hcd->flags.b.port_reset_change);
+ DWC_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n",
+ dwc_otg_hcd->flags.b.port_enable_change);
+ DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n",
+ dwc_otg_hcd->flags.b.port_suspend_change);
+ DWC_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n",
+ dwc_otg_hcd->flags.b.port_over_current_change);
+ }
+#endif // DEBUG
+ return (_buf[0] != 0);
+}
+
+#ifdef DWC_HS_ELECT_TST
+/*
+ * Quick and dirty hack to implement the HS Electrical Test
+ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature.
+ *
+ * This code was copied from our userspace app "hset". It sends a
+ * Get Device Descriptor control sequence in two parts, first the
+ * Setup packet by itself, followed some time later by the In and
+ * Ack packets. Rather than trying to figure out how to add this
+ * functionality to the normal driver code, we just hijack the
+ * hardware, using these two function to drive the hardware
+ * directly.
+ */
+
+dwc_otg_core_global_regs_t *global_regs;
+dwc_otg_host_global_regs_t *hc_global_regs;
+dwc_otg_hc_regs_t *hc_regs;
+uint32_t *data_fifo;
+
+static void do_setup(void)
+{
+ gintsts_data_t gintsts;
+ hctsiz_data_t hctsiz;
+ hcchar_data_t hcchar;
+ haint_data_t haint;
+ hcint_data_t hcint;
+
+ /* Enable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
+
+ /* Enable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /*
+ * Send Setup packet (Get Device Descriptor)
+ */
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ //fprintf(stderr, "Channel already enabled 1, HCCHAR = %08x\n", hcchar.d32);
+ hcchar.b.chdis = 1;
+ // hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ //sleep(1);
+ MDELAY(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ enabled 1, HCCHAR = %08x **\n", hcchar.d32);
+ //}
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 8;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = DWC_OTG_HC_PID_SETUP;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 0;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ /* Fill FIFO with Setup data for Get Device Descriptor */
+ data_fifo = (uint32_t *)((char *)global_regs + 0x1000);
+ dwc_write_reg32(data_fifo++, 0x01000680);
+ dwc_write_reg32(data_fifo++, 0x00080000);
+
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Disable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
+
+ /* Disable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+}
+
+static void do_in_ack(void)
+{
+ gintsts_data_t gintsts;
+ hctsiz_data_t hctsiz;
+ hcchar_data_t hcchar;
+ haint_data_t haint;
+ hcint_data_t hcint;
+ host_grxsts_data_t grxsts;
+
+ /* Enable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
+
+ /* Enable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /*
+ * Receive Control In packet
+ */
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ //fprintf(stderr, "Channel already enabled 2, HCCHAR = %08x\n", hcchar.d32);
+ hcchar.b.chdis = 1;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ //sleep(1);
+ MDELAY(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ enabled 2, HCCHAR = %08x **\n", hcchar.d32);
+ //}
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 8;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 1;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for receive status queue interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.rxstsqlvl == 0);
+
+ //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Read RXSTS */
+ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
+
+ /* Clear RXSTSQLVL in GINTSTS */
+ gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1;
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ switch (grxsts.b.pktsts) {
+ case DWC_GRXSTS_PKTSTS_IN:
+ /* Read the data into the host buffer */
+ if (grxsts.b.bcnt > 0) {
+ int i;
+ int word_count = (grxsts.b.bcnt + 3) / 4;
+
+ data_fifo = (uint32_t *)((char *)global_regs + 0x1000);
+
+ for (i = 0; i < word_count; i++) {
+ (void)dwc_read_reg32(data_fifo++);
+ }
+ }
+
+ //fprintf(stderr, "Received %u bytes\n", (unsigned)grxsts.b.bcnt);
+ break;
+
+ default:
+ //fprintf(stderr, "** Unexpected GRXSTS packet status 1 **\n");
+ break;
+ }
+
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for receive status queue interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.rxstsqlvl == 0);
+
+ //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Read RXSTS */
+ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
+
+ /* Clear RXSTSQLVL in GINTSTS */
+ gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1;
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ switch (grxsts.b.pktsts) {
+ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
+ break;
+
+ default:
+ //fprintf(stderr, "** Unexpected GRXSTS packet status 2 **\n");
+ break;
+ }
+
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ // usleep(100000);
+ // mdelay(100);
+ MDELAY(1);
+
+ /*
+ * Send handshake packet
+ */
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ //fprintf(stderr, "Channel already enabled 3, HCCHAR = %08x\n", hcchar.d32);
+ hcchar.b.chdis = 1;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ //sleep(1);
+ MDELAY(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ enabled 3, HCCHAR = %08x **\n", hcchar.d32);
+ //}
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 0;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 0;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Disable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
+
+ /* Disable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+}
+#endif /* DWC_HS_ELECT_TST */
+
+/** Handles hub class-specific requests.*/
+int dwc_otg_hcd_hub_control(struct usb_hcd *_hcd,
+ u16 _typeReq,
+ u16 _wValue,
+ u16 _wIndex,
+ char *_buf,
+ u16 _wLength)
+{
+ int retval = 0;
+
+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd (_hcd);
+ dwc_otg_core_if_t *core_if = hcd_to_dwc_otg_hcd (_hcd)->core_if;
+ struct usb_hub_descriptor *desc;
+ hprt0_data_t hprt0 = {.d32 = 0};
+
+ uint32_t port_status;
+
+ switch (_typeReq) {
+ case ClearHubFeature:
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearHubFeature 0x%x\n", _wValue);
+ switch (_wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* Nothing required here */
+ break;
+ default:
+ retval = -EINVAL;
+ DWC_ERROR ("DWC OTG HCD - "
+ "ClearHubFeature request %xh unknown\n", _wValue);
+ }
+ break;
+ case ClearPortFeature:
+ if (!_wIndex || _wIndex > 1)
+ goto error;
+
+ switch (_wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ DWC_DEBUGPL (DBG_ANY, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
+ hprt0.d32 = dwc_otg_read_hprt0 (core_if);
+ hprt0.b.prtena = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
+ hprt0.d32 = dwc_otg_read_hprt0 (core_if);
+ hprt0.b.prtres = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ /* Clear Resume bit */
+ mdelay (100);
+ hprt0.b.prtres = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_POWER:
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_POWER\n");
+ hprt0.d32 = dwc_otg_read_hprt0 (core_if);
+ hprt0.b.prtpwr = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_INDICATOR:
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
+ /* Port inidicator not supported */
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ /* Clears drivers internal connect status change
+ * flag */
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
+ dwc_otg_hcd->flags.b.port_connect_status_change = 0;
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ /* Clears the driver's internal Port Reset Change
+ * flag */
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
+ dwc_otg_hcd->flags.b.port_reset_change = 0;
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ /* Clears the driver's internal Port
+ * Enable/Disable Change flag */
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
+ dwc_otg_hcd->flags.b.port_enable_change = 0;
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Clears the driver's internal Port Suspend
+ * Change flag, which is set when resume signaling on
+ * the host port is complete */
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
+ dwc_otg_hcd->flags.b.port_suspend_change = 0;
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
+ dwc_otg_hcd->flags.b.port_over_current_change = 0;
+ break;
+ default:
+ retval = -EINVAL;
+ DWC_ERROR ("DWC OTG HCD - "
+ "ClearPortFeature request %xh "
+ "unknown or unsupported\n", _wValue);
+ }
+ break;
+ case GetHubDescriptor:
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "GetHubDescriptor\n");
+ desc = (struct usb_hub_descriptor *)_buf;
+ desc->bDescLength = 9;
+ desc->bDescriptorType = 0x29;
+ desc->bNbrPorts = 1;
+ desc->wHubCharacteristics = 0x08;
+ desc->bPwrOn2PwrGood = 1;
+ desc->bHubContrCurrent = 0;
+ desc->u.hs.DeviceRemovable[0] = 0;
+ desc->u.hs.DeviceRemovable[1] = 0xff;
+ break;
+ case GetHubStatus:
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "GetHubStatus\n");
+ memset (_buf, 0, 4);
+ break;
+ case GetPortStatus:
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "GetPortStatus\n");
+
+ if (!_wIndex || _wIndex > 1)
+ goto error;
+
+ port_status = 0;
+
+ if (dwc_otg_hcd->flags.b.port_connect_status_change)
+ port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
+
+ if (dwc_otg_hcd->flags.b.port_enable_change)
+ port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
+
+ if (dwc_otg_hcd->flags.b.port_suspend_change)
+ port_status |= (1 << USB_PORT_FEAT_C_SUSPEND);
+
+ if (dwc_otg_hcd->flags.b.port_reset_change)
+ port_status |= (1 << USB_PORT_FEAT_C_RESET);
+
+ if (dwc_otg_hcd->flags.b.port_over_current_change) {
+ DWC_ERROR("Device Not Supported\n");
+ port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT);
+ }
+
+ if (!dwc_otg_hcd->flags.b.port_connect_status) {
+ printk("DISCONNECTED PORT\n");
+ /*
+ * The port is disconnected, which means the core is
+ * either in device mode or it soon will be. Just
+ * return 0's for the remainder of the port status
+ * since the port register can't be read if the core
+ * is in device mode.
+ */
+#if 1 // winder.
+ *((u32 *) _buf) = cpu_to_le32(port_status);
+#else
+ *((__le32 *) _buf) = cpu_to_le32(port_status);
+#endif
+ break;
+ }
+
+ hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
+ DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32);
+
+ if (hprt0.b.prtconnsts)
+ port_status |= (1 << USB_PORT_FEAT_CONNECTION);
+
+ if (hprt0.b.prtena)
+ port_status |= (1 << USB_PORT_FEAT_ENABLE);
+
+ if (hprt0.b.prtsusp)
+ port_status |= (1 << USB_PORT_FEAT_SUSPEND);
+
+ if (hprt0.b.prtovrcurract)
+ port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
+
+ if (hprt0.b.prtrst)
+ port_status |= (1 << USB_PORT_FEAT_RESET);
+
+ if (hprt0.b.prtpwr)
+ port_status |= (1 << USB_PORT_FEAT_POWER);
+
+ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED)
+ port_status |= USB_PORT_STAT_HIGH_SPEED;
+
+ else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED)
+ port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
+
+ if (hprt0.b.prttstctl)
+ port_status |= (1 << USB_PORT_FEAT_TEST);
+
+ /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
+#if 1 // winder.
+ *((u32 *) _buf) = cpu_to_le32(port_status);
+#else
+ *((__le32 *) _buf) = cpu_to_le32(port_status);
+#endif
+
+ break;
+ case SetHubFeature:
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetHubFeature\n");
+ /* No HUB features supported */
+ break;
+ case SetPortFeature:
+ if (_wValue != USB_PORT_FEAT_TEST && (!_wIndex || _wIndex > 1))
+ goto error;
+
+ if (!dwc_otg_hcd->flags.b.port_connect_status) {
+ /*
+ * The port is disconnected, which means the core is
+ * either in device mode or it soon will be. Just
+ * return without doing anything since the port
+ * register can't be written if the core is in device
+ * mode.
+ */
+ break;
+ }
+
+ switch (_wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
+ if (_hcd->self.otg_port == _wIndex
+ && _hcd->self.b_hnp_enable) {
+ gotgctl_data_t gotgctl = {.d32=0};
+ gotgctl.b.hstsethnpen = 1;
+ dwc_modify_reg32(&core_if->core_global_regs->
+ gotgctl, 0, gotgctl.d32);
+ core_if->op_state = A_SUSPEND;
+ }
+ hprt0.d32 = dwc_otg_read_hprt0 (core_if);
+ hprt0.b.prtsusp = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ //DWC_PRINT( "SUSPEND: HPRT0=%0x\n", hprt0.d32);
+ /* Suspend the Phy Clock */
+ {
+ pcgcctl_data_t pcgcctl = {.d32=0};
+ pcgcctl.b.stoppclk = 1;
+ dwc_write_reg32(core_if->pcgcctl, pcgcctl.d32);
+ }
+
+ /* For HNP the bus must be suspended for at least 200ms.*/
+ if (_hcd->self.b_hnp_enable) {
+ mdelay(200);
+ //DWC_PRINT( "SUSPEND: wait complete! (%d)\n", _hcd->state);
+ }
+ break;
+ case USB_PORT_FEAT_POWER:
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_POWER\n");
+ hprt0.d32 = dwc_otg_read_hprt0 (core_if);
+ hprt0.b.prtpwr = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_RESET:
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_RESET\n");
+ hprt0.d32 = dwc_otg_read_hprt0 (core_if);
+ /* TODO: Is this for OTG protocol??
+ * We shoudl remove OTG totally for Danube system.
+ * But, in the future, maybe we need this.
+ */
+#if 1 // winder
+ hprt0.b.prtrst = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+#else
+ /* When B-Host the Port reset bit is set in
+ * the Start HCD Callback function, so that
+ * the reset is started within 1ms of the HNP
+ * success interrupt. */
+ if (!_hcd->self.is_b_host) {
+ hprt0.b.prtrst = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ }
+#endif
+ /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
+ MDELAY (60);
+ hprt0.b.prtrst = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+
+#ifdef DWC_HS_ELECT_TST
+ case USB_PORT_FEAT_TEST:
+ {
+ uint32_t t;
+ gintmsk_data_t gintmsk;
+
+ t = (_wIndex >> 8); /* MSB wIndex USB */
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_TEST %d\n", t);
+ printk("USB_PORT_FEAT_TEST %d\n", t);
+ if (t < 6) {
+ hprt0.d32 = dwc_otg_read_hprt0 (core_if);
+ hprt0.b.prttstctl = t;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ } else {
+ /* Setup global vars with reg addresses (quick and
+ * dirty hack, should be cleaned up)
+ */
+ global_regs = core_if->core_global_regs;
+ hc_global_regs = core_if->host_if->host_global_regs;
+ hc_regs = (dwc_otg_hc_regs_t *)((char *)global_regs + 0x500);
+ data_fifo = (uint32_t *)((char *)global_regs + 0x1000);
+
+ if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */
+ /* Save current interrupt mask */
+ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* 15 second delay per the test spec */
+ mdelay(15000);
+
+ /* Drive suspend on the root port */
+ hprt0.d32 = dwc_otg_read_hprt0 (core_if);
+ hprt0.b.prtsusp = 1;
+ hprt0.b.prtres = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+
+ /* 15 second delay per the test spec */
+ mdelay(15000);
+
+ /* Drive resume on the root port */
+ hprt0.d32 = dwc_otg_read_hprt0 (core_if);
+ hprt0.b.prtsusp = 0;
+ hprt0.b.prtres = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ mdelay(100);
+
+ /* Clear the resume bit */
+ hprt0.b.prtres = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+
+ /* Restore interrupts */
+ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
+ } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */
+ /* Save current interrupt mask */
+ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* 15 second delay per the test spec */
+ mdelay(15000);
+
+ /* Send the Setup packet */
+ do_setup();
+
+ /* 15 second delay so nothing else happens for awhile */
+ mdelay(15000);
+
+ /* Restore interrupts */
+ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
+ } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */
+ /* Save current interrupt mask */
+ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* Send the Setup packet */
+ do_setup();
+
+ /* 15 second delay so nothing else happens for awhile */
+ mdelay(15000);
+
+ /* Send the In and Ack packets */
+ do_in_ack();
+
+ /* 15 second delay so nothing else happens for awhile */
+ mdelay(15000);
+
+ /* Restore interrupts */
+ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32);
+ }
+ }
+ break;
+ }
+#endif /* DWC_HS_ELECT_TST */
+
+ case USB_PORT_FEAT_INDICATOR:
+ DWC_DEBUGPL (DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
+ /* Not supported */
+ break;
+ default:
+ retval = -EINVAL;
+ DWC_ERROR ("DWC OTG HCD - "
+ "SetPortFeature request %xh "
+ "unknown or unsupported\n", _wValue);
+ break;
+ }
+ break;
+ default:
+error:
+ retval = -EINVAL;
+ DWC_WARN ("DWC OTG HCD - "
+ "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n",
+ _typeReq, _wIndex, _wValue);
+ break;
+ }
+
+ return retval;
+}
+
+
+/**
+ * Assigns transactions from a QTD to a free host channel and initializes the
+ * host channel to perform the transactions. The host channel is removed from
+ * the free list.
+ *
+ * @param _hcd The HCD state structure.
+ * @param _qh Transactions from the first QTD for this QH are selected and
+ * assigned to a free host channel.
+ */
+static void assign_and_init_hc(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh)
+{
+ dwc_hc_t *hc;
+ dwc_otg_qtd_t *qtd;
+ struct urb *urb;
+
+ DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, _hcd, _qh);
+
+ hc = list_entry(_hcd->free_hc_list.next, dwc_hc_t, hc_list_entry);
+
+ /* Remove the host channel from the free list. */
+ list_del_init(&hc->hc_list_entry);
+
+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
+ urb = qtd->urb;
+ _qh->channel = hc;
+ _qh->qtd_in_process = qtd;
+
+ /*
+ * Use usb_pipedevice to determine device address. This address is
+ * 0 before the SET_ADDRESS command and the correct address afterward.
+ */
+ hc->dev_addr = usb_pipedevice(urb->pipe);
+ hc->ep_num = usb_pipeendpoint(urb->pipe);
+
+ if (urb->dev->speed == USB_SPEED_LOW) {
+ hc->speed = DWC_OTG_EP_SPEED_LOW;
+ } else if (urb->dev->speed == USB_SPEED_FULL) {
+ hc->speed = DWC_OTG_EP_SPEED_FULL;
+ } else {
+ hc->speed = DWC_OTG_EP_SPEED_HIGH;
+ }
+ hc->max_packet = dwc_max_packet(_qh->maxp);
+
+ hc->xfer_started = 0;
+ hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS;
+ hc->error_state = (qtd->error_count > 0);
+ hc->halt_on_queue = 0;
+ hc->halt_pending = 0;
+ hc->requests = 0;
+
+ /*
+ * The following values may be modified in the transfer type section
+ * below. The xfer_len value may be reduced when the transfer is
+ * started to accommodate the max widths of the XferSize and PktCnt
+ * fields in the HCTSIZn register.
+ */
+ hc->do_ping = _qh->ping_state;
+ hc->ep_is_in = (usb_pipein(urb->pipe) != 0);
+ hc->data_pid_start = _qh->data_toggle;
+ hc->multi_count = 1;
+
+ if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *)(u32)urb->transfer_dma + urb->actual_length;
+ } else {
+ hc->xfer_buff = (uint8_t *)urb->transfer_buffer + urb->actual_length;
+ }
+ hc->xfer_len = urb->transfer_buffer_length - urb->actual_length;
+ hc->xfer_count = 0;
+
+ /*
+ * Set the split attributes
+ */
+ hc->do_split = 0;
+ if (_qh->do_split) {
+ hc->do_split = 1;
+ hc->xact_pos = qtd->isoc_split_pos;
+ hc->complete_split = qtd->complete_split;
+ hc->hub_addr = urb->dev->tt->hub->devnum;
+ hc->port_addr = urb->dev->ttport;
+ }
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ hc->ep_type = DWC_OTG_EP_TYPE_CONTROL;
+ switch (qtd->control_phase) {
+ case DWC_OTG_CONTROL_SETUP:
+ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction\n");
+ hc->do_ping = 0;
+ hc->ep_is_in = 0;
+ hc->data_pid_start = DWC_OTG_HC_PID_SETUP;
+ if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *)(u32)urb->setup_dma;
+ } else {
+ hc->xfer_buff = (uint8_t *)urb->setup_packet;
+ }
+ hc->xfer_len = 8;
+ break;
+ case DWC_OTG_CONTROL_DATA:
+ DWC_DEBUGPL(DBG_HCDV, " Control data transaction\n");
+ hc->data_pid_start = qtd->data_toggle;
+ break;
+ case DWC_OTG_CONTROL_STATUS:
+ /*
+ * Direction is opposite of data direction or IN if no
+ * data.
+ */
+ DWC_DEBUGPL(DBG_HCDV, " Control status transaction\n");
+ if (urb->transfer_buffer_length == 0) {
+ hc->ep_is_in = 1;
+ } else {
+ hc->ep_is_in = (usb_pipein(urb->pipe) != USB_DIR_IN);
+ }
+ if (hc->ep_is_in) {
+ hc->do_ping = 0;
+ }
+ hc->data_pid_start = DWC_OTG_HC_PID_DATA1;
+ hc->xfer_len = 0;
+ if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *)_hcd->status_buf_dma;
+ } else {
+ hc->xfer_buff = (uint8_t *)_hcd->status_buf;
+ }
+ break;
+ }
+ break;
+ case PIPE_BULK:
+ hc->ep_type = DWC_OTG_EP_TYPE_BULK;
+ break;
+ case PIPE_INTERRUPT:
+ hc->ep_type = DWC_OTG_EP_TYPE_INTR;
+ break;
+ case PIPE_ISOCHRONOUS:
+ {
+ struct usb_iso_packet_descriptor *frame_desc;
+ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index];
+ hc->ep_type = DWC_OTG_EP_TYPE_ISOC;
+ if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *)(u32)urb->transfer_dma;
+ } else {
+ hc->xfer_buff = (uint8_t *)urb->transfer_buffer;
+ }
+ hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset;
+ hc->xfer_len = frame_desc->length - qtd->isoc_split_offset;
+
+ if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) {
+ if (hc->xfer_len <= 188) {
+ hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL;
+ }
+ else {
+ hc->xact_pos = DWC_HCSPLIT_XACTPOS_BEGIN;
+ }
+ }
+ }
+ break;
+ }
+
+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /*
+ * This value may be modified when the transfer is started to
+ * reflect the actual transfer length.
+ */
+ hc->multi_count = dwc_hb_mult(_qh->maxp);
+ }
+
+ dwc_otg_hc_init(_hcd->core_if, hc);
+ hc->qh = _qh;
+}
+#define DEBUG_HOST_CHANNELS
+#ifdef DEBUG_HOST_CHANNELS
+static int last_sel_trans_num_per_scheduled = 0;
+module_param(last_sel_trans_num_per_scheduled, int, 0444);
+
+static int last_sel_trans_num_nonper_scheduled = 0;
+module_param(last_sel_trans_num_nonper_scheduled, int, 0444);
+
+static int last_sel_trans_num_avail_hc_at_start = 0;
+module_param(last_sel_trans_num_avail_hc_at_start, int, 0444);
+
+static int last_sel_trans_num_avail_hc_at_end = 0;
+module_param(last_sel_trans_num_avail_hc_at_end, int, 0444);
+#endif /* DEBUG_HOST_CHANNELS */
+
+/**
+ * This function selects transactions from the HCD transfer schedule and
+ * assigns them to available host channels. It is called from HCD interrupt
+ * handler functions.
+ *
+ * @param _hcd The HCD state structure.
+ *
+ * @return The types of new transactions that were assigned to host channels.
+ */
+dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd)
+{
+ struct list_head *qh_ptr;
+ dwc_otg_qh_t *qh;
+ int num_channels;
+ unsigned long flags;
+ dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE;
+
+#ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCD, " Select Transactions\n");
+#endif /* */
+
+#ifdef DEBUG_HOST_CHANNELS
+ last_sel_trans_num_per_scheduled = 0;
+ last_sel_trans_num_nonper_scheduled = 0;
+ last_sel_trans_num_avail_hc_at_start = _hcd->available_host_channels;
+#endif /* DEBUG_HOST_CHANNELS */
+
+ /* Process entries in the periodic ready list. */
+ num_channels = _hcd->core_if->core_params->host_channels;
+ qh_ptr = _hcd->periodic_sched_ready.next;
+ while (qh_ptr != &_hcd->periodic_sched_ready
+ && !list_empty(&_hcd->free_hc_list)) {
+
+ // Make sure we leave one channel for non periodic transactions.
+ local_irq_save(flags);
+ if (_hcd->available_host_channels <= 1) {
+ local_irq_restore(flags);
+ break;
+ }
+ _hcd->available_host_channels--;
+ local_irq_restore(flags);
+#ifdef DEBUG_HOST_CHANNELS
+ last_sel_trans_num_per_scheduled++;
+#endif /* DEBUG_HOST_CHANNELS */
+
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+ assign_and_init_hc(_hcd, qh);
+
+ /*
+ * Move the QH from the periodic ready schedule to the
+ * periodic assigned schedule.
+ */
+ qh_ptr = qh_ptr->next;
+ local_irq_save(flags);
+ list_move(&qh->qh_list_entry, &_hcd->periodic_sched_assigned);
+ local_irq_restore(flags);
+ ret_val = DWC_OTG_TRANSACTION_PERIODIC;
+ }
+
+ /*
+ * Process entries in the deferred portion of the non-periodic list.
+ * A NAK put them here and, at the right time, they need to be
+ * placed on the sched_inactive list.
+ */
+ qh_ptr = _hcd->non_periodic_sched_deferred.next;
+ while (qh_ptr != &_hcd->non_periodic_sched_deferred) {
+ uint16_t frame_number =
+ dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(_hcd));
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+ qh_ptr = qh_ptr->next;
+
+ if (dwc_frame_num_le(qh->sched_frame, frame_number)) {
+ // NAK did this
+ /*
+ * Move the QH from the non periodic deferred schedule to
+ * the non periodic inactive schedule.
+ */
+ local_irq_save(flags);
+ list_move(&qh->qh_list_entry,
+ &_hcd->non_periodic_sched_inactive);
+ local_irq_restore(flags);
+ }
+ }
+
+ /*
+ * Process entries in the inactive portion of the non-periodic
+ * schedule. Some free host channels may not be used if they are
+ * reserved for periodic transfers.
+ */
+ qh_ptr = _hcd->non_periodic_sched_inactive.next;
+ num_channels = _hcd->core_if->core_params->host_channels;
+ while (qh_ptr != &_hcd->non_periodic_sched_inactive
+ && !list_empty(&_hcd->free_hc_list)) {
+
+ local_irq_save(flags);
+ if (_hcd->available_host_channels < 1) {
+ local_irq_restore(flags);
+ break;
+ }
+ _hcd->available_host_channels--;
+ local_irq_restore(flags);
+#ifdef DEBUG_HOST_CHANNELS
+ last_sel_trans_num_nonper_scheduled++;
+#endif /* DEBUG_HOST_CHANNELS */
+
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+ assign_and_init_hc(_hcd, qh);
+
+ /*
+ * Move the QH from the non-periodic inactive schedule to the
+ * non-periodic active schedule.
+ */
+ qh_ptr = qh_ptr->next;
+ local_irq_save(flags);
+ list_move(&qh->qh_list_entry, &_hcd->non_periodic_sched_active);
+ local_irq_restore(flags);
+
+ if (ret_val == DWC_OTG_TRANSACTION_NONE) {
+ ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC;
+ } else {
+ ret_val = DWC_OTG_TRANSACTION_ALL;
+ }
+
+ }
+#ifdef DEBUG_HOST_CHANNELS
+ last_sel_trans_num_avail_hc_at_end = _hcd->available_host_channels;
+#endif /* DEBUG_HOST_CHANNELS */
+
+ return ret_val;
+}
+
+/**
+ * Attempts to queue a single transaction request for a host channel
+ * associated with either a periodic or non-periodic transfer. This function
+ * assumes that there is space available in the appropriate request queue. For
+ * an OUT transfer or SETUP transaction in Slave mode, it checks whether space
+ * is available in the appropriate Tx FIFO.
+ *
+ * @param _hcd The HCD state structure.
+ * @param _hc Host channel descriptor associated with either a periodic or
+ * non-periodic transfer.
+ * @param _fifo_dwords_avail Number of DWORDs available in the periodic Tx
+ * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic
+ * transfers.
+ *
+ * @return 1 if a request is queued and more requests may be needed to
+ * complete the transfer, 0 if no more requests are required for this
+ * transfer, -1 if there is insufficient space in the Tx FIFO.
+ */
+static int queue_transaction(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t *_hc,
+ uint16_t _fifo_dwords_avail)
+{
+ int retval;
+
+ if (_hcd->core_if->dma_enable) {
+ if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_if, _hc);
+ _hc->qh->ping_state = 0;
+ }
+ retval = 0;
+ } else if (_hc->halt_pending) {
+ /* Don't queue a request if the channel has been halted. */
+ retval = 0;
+ } else if (_hc->halt_on_queue) {
+ dwc_otg_hc_halt(_hcd->core_if, _hc, _hc->halt_status);
+ retval = 0;
+ } else if (_hc->do_ping) {
+ if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_if, _hc);
+ }
+ retval = 0;
+ } else if (!_hc->ep_is_in ||
+ _hc->data_pid_start == DWC_OTG_HC_PID_SETUP) {
+ if ((_fifo_dwords_avail * 4) >= _hc->max_packet) {
+ if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_if, _hc);
+ retval = 1;
+ } else {
+ retval = dwc_otg_hc_continue_transfer(_hcd->core_if, _hc);
+ }
+ } else {
+ retval = -1;
+ }
+ } else {
+ if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_if, _hc);
+ retval = 1;
+ } else {
+ retval = dwc_otg_hc_continue_transfer(_hcd->core_if, _hc);
+ }
+ }
+
+ return retval;
+}
+
+/**
+ * Processes active non-periodic channels and queues transactions for these
+ * channels to the DWC_otg controller. After queueing transactions, the NP Tx
+ * FIFO Empty interrupt is enabled if there are more transactions to queue as
+ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
+ * FIFO Empty interrupt is disabled.
+ */
+static void process_non_periodic_channels(dwc_otg_hcd_t *_hcd)
+{
+ gnptxsts_data_t tx_status;
+ struct list_head *orig_qh_ptr;
+ dwc_otg_qh_t *qh;
+ int status;
+ int no_queue_space = 0;
+ int no_fifo_space = 0;
+ int more_to_do = 0;
+
+ dwc_otg_core_global_regs_t *global_regs = _hcd->core_if->core_global_regs;
+
+ DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n");
+#ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (before queue): %d\n",
+ tx_status.b.nptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n",
+ tx_status.b.nptxfspcavail);
+#endif
+ /*
+ * Keep track of the starting point. Skip over the start-of-list
+ * entry.
+ */
+ if (_hcd->non_periodic_qh_ptr == &_hcd->non_periodic_sched_active) {
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next;
+ }
+ orig_qh_ptr = _hcd->non_periodic_qh_ptr;
+
+ /*
+ * Process once through the active list or until no more space is
+ * available in the request queue or the Tx FIFO.
+ */
+ do {
+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ if (!_hcd->core_if->dma_enable && tx_status.b.nptxqspcavail == 0) {
+ no_queue_space = 1;
+ break;
+ }
+
+ qh = list_entry(_hcd->non_periodic_qh_ptr, dwc_otg_qh_t, qh_list_entry);
+ status = queue_transaction(_hcd, qh->channel, tx_status.b.nptxfspcavail);
+
+ if (status > 0) {
+ more_to_do = 1;
+ } else if (status < 0) {
+ no_fifo_space = 1;
+ break;
+ }
+
+ /* Advance to next QH, skipping start-of-list entry. */
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next;
+ if (_hcd->non_periodic_qh_ptr == &_hcd->non_periodic_sched_active) {
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next;
+ }
+
+ } while (_hcd->non_periodic_qh_ptr != orig_qh_ptr);
+
+ if (!_hcd->core_if->dma_enable) {
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ intr_mask.b.nptxfempty = 1;
+
+#ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (after queue): %d\n",
+ tx_status.b.nptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (after queue): %d\n",
+ tx_status.b.nptxfspcavail);
+#endif
+ if (more_to_do || no_queue_space || no_fifo_space) {
+ /*
+ * May need to queue more transactions as the request
+ * queue or Tx FIFO empties. Enable the non-periodic
+ * Tx FIFO empty interrupt. (Always use the half-empty
+ * level to ensure that new requests are loaded as
+ * soon as possible.)
+ */
+ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32);
+ } else {
+ /*
+ * Disable the Tx FIFO empty interrupt since there are
+ * no more transactions that need to be queued right
+ * now. This function is called from interrupt
+ * handlers to queue more transactions as transfer
+ * states change.
+ */
+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
+ }
+ }
+}
+
+/**
+ * Processes periodic channels for the next frame and queues transactions for
+ * these channels to the DWC_otg controller. After queueing transactions, the
+ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
+ * to queue as Periodic Tx FIFO or request queue space becomes available.
+ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
+ */
+static void process_periodic_channels(dwc_otg_hcd_t *_hcd)
+{
+ hptxsts_data_t tx_status;
+ struct list_head *qh_ptr;
+ dwc_otg_qh_t *qh;
+ int status;
+ int no_queue_space = 0;
+ int no_fifo_space = 0;
+
+ dwc_otg_host_global_regs_t *host_regs;
+ host_regs = _hcd->core_if->host_if->host_global_regs;
+
+ DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n");
+#ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
+ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (before queue): %d\n",
+ tx_status.b.ptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (before queue): %d\n",
+ tx_status.b.ptxfspcavail);
+#endif
+
+ qh_ptr = _hcd->periodic_sched_assigned.next;
+ while (qh_ptr != &_hcd->periodic_sched_assigned) {
+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
+ if (tx_status.b.ptxqspcavail == 0) {
+ no_queue_space = 1;
+ break;
+ }
+
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+
+ /*
+ * Set a flag if we're queuing high-bandwidth in slave mode.
+ * The flag prevents any halts to get into the request queue in
+ * the middle of multiple high-bandwidth packets getting queued.
+ */
+ if ((!_hcd->core_if->dma_enable) &&
+ (qh->channel->multi_count > 1))
+ {
+ _hcd->core_if->queuing_high_bandwidth = 1;
+ }
+
+ status = queue_transaction(_hcd, qh->channel, tx_status.b.ptxfspcavail);
+ if (status < 0) {
+ no_fifo_space = 1;
+ break;
+ }
+
+ /*
+ * In Slave mode, stay on the current transfer until there is
+ * nothing more to do or the high-bandwidth request count is
+ * reached. In DMA mode, only need to queue one request. The
+ * controller automatically handles multiple packets for
+ * high-bandwidth transfers.
+ */
+ if (_hcd->core_if->dma_enable ||
+ (status == 0 ||
+ qh->channel->requests == qh->channel->multi_count)) {
+ qh_ptr = qh_ptr->next;
+ /*
+ * Move the QH from the periodic assigned schedule to
+ * the periodic queued schedule.
+ */
+ list_move(&qh->qh_list_entry, &_hcd->periodic_sched_queued);
+
+ /* done queuing high bandwidth */
+ _hcd->core_if->queuing_high_bandwidth = 0;
+ }
+ }
+
+ if (!_hcd->core_if->dma_enable) {
+ dwc_otg_core_global_regs_t *global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+
+ global_regs = _hcd->core_if->core_global_regs;
+ intr_mask.b.ptxfempty = 1;
+#ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
+ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (after queue): %d\n",
+ tx_status.b.ptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (after queue): %d\n",
+ tx_status.b.ptxfspcavail);
+#endif
+ if (!(list_empty(&_hcd->periodic_sched_assigned)) ||
+ no_queue_space || no_fifo_space) {
+ /*
+ * May need to queue more transactions as the request
+ * queue or Tx FIFO empties. Enable the periodic Tx
+ * FIFO empty interrupt. (Always use the half-empty
+ * level to ensure that new requests are loaded as
+ * soon as possible.)
+ */
+ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32);
+ } else {
+ /*
+ * Disable the Tx FIFO empty interrupt since there are
+ * no more transactions that need to be queued right
+ * now. This function is called from interrupt
+ * handlers to queue more transactions as transfer
+ * states change.
+ */
+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
+ }
+ }
+}
+
+/**
+ * This function processes the currently active host channels and queues
+ * transactions for these channels to the DWC_otg controller. It is called
+ * from HCD interrupt handler functions.
+ *
+ * @param _hcd The HCD state structure.
+ * @param _tr_type The type(s) of transactions to queue (non-periodic,
+ * periodic, or both).
+ */
+void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *_hcd,
+ dwc_otg_transaction_type_e _tr_type)
+{
+#ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n");
+#endif
+ /* Process host channels associated with periodic transfers. */
+ if ((_tr_type == DWC_OTG_TRANSACTION_PERIODIC ||
+ _tr_type == DWC_OTG_TRANSACTION_ALL) &&
+ !list_empty(&_hcd->periodic_sched_assigned)) {
+
+ process_periodic_channels(_hcd);
+ }
+
+ /* Process host channels associated with non-periodic transfers. */
+ if ((_tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC ||
+ _tr_type == DWC_OTG_TRANSACTION_ALL)) {
+ if (!list_empty(&_hcd->non_periodic_sched_active)) {
+ process_non_periodic_channels(_hcd);
+ } else {
+ /*
+ * Ensure NP Tx FIFO empty interrupt is disabled when
+ * there are no non-periodic transfers to process.
+ */
+ gintmsk_data_t gintmsk = {.d32 = 0};
+ gintmsk.b.nptxfempty = 1;
+ dwc_modify_reg32(&_hcd->core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
+ }
+ }
+}
+
+/**
+ * Sets the final status of an URB and returns it to the device driver. Any
+ * required cleanup of the URB is performed.
+ */
+void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t * _hcd, struct urb *_urb,
+ int _status)
+ __releases(_hcd->lock)
+__acquires(_hcd->lock)
+{
+#ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n",
+ __func__, _urb, usb_pipedevice(_urb->pipe),
+ usb_pipeendpoint(_urb->pipe),
+ usb_pipein(_urb->pipe) ? "IN" : "OUT", _status);
+ if (usb_pipetype(_urb->pipe) == PIPE_ISOCHRONOUS) {
+ int i;
+ for (i = 0; i < _urb->number_of_packets; i++) {
+ DWC_PRINT(" ISO Desc %d status: %d\n",
+ i, _urb->iso_frame_desc[i].status);
+ }
+ }
+ }
+#endif
+
+ _urb->status = _status;
+ _urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(_hcd), _urb);
+ spin_unlock(&_hcd->lock);
+ usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(_hcd), _urb, _status);
+ spin_lock(&_hcd->lock);
+}
+
+/*
+ * Returns the Queue Head for an URB.
+ */
+dwc_otg_qh_t *dwc_urb_to_qh(struct urb *_urb)
+{
+ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(_urb);
+ return (dwc_otg_qh_t *)ep->hcpriv;
+}
+
+#ifdef DEBUG
+void dwc_print_setup_data (uint8_t *setup)
+{
+ int i;
+ if (CHK_DEBUG_LEVEL(DBG_HCD)){
+ DWC_PRINT("Setup Data = MSB ");
+ for (i=7; i>=0; i--) DWC_PRINT ("%02x ", setup[i]);
+ DWC_PRINT("\n");
+ DWC_PRINT(" bmRequestType Tranfer = %s\n", (setup[0]&0x80) ? "Device-to-Host" : "Host-to-Device");
+ DWC_PRINT(" bmRequestType Type = ");
+ switch ((setup[0]&0x60) >> 5) {
+ case 0: DWC_PRINT("Standard\n"); break;
+ case 1: DWC_PRINT("Class\n"); break;
+ case 2: DWC_PRINT("Vendor\n"); break;
+ case 3: DWC_PRINT("Reserved\n"); break;
+ }
+ DWC_PRINT(" bmRequestType Recipient = ");
+ switch (setup[0]&0x1f) {
+ case 0: DWC_PRINT("Device\n"); break;
+ case 1: DWC_PRINT("Interface\n"); break;
+ case 2: DWC_PRINT("Endpoint\n"); break;
+ case 3: DWC_PRINT("Other\n"); break;
+ default: DWC_PRINT("Reserved\n"); break;
+ }
+ DWC_PRINT(" bRequest = 0x%0x\n", setup[1]);
+ DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t *)&setup[2]));
+ DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t *)&setup[4]));
+ DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_t *)&setup[6]));
+ }
+}
+#endif
+
+void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *_hcd) {
+#ifdef DEBUG
+#if 0
+ DWC_PRINT("Frame remaining at SOF:\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->frrem_samples, _hcd->frrem_accum,
+ (_hcd->frrem_samples > 0) ?
+ _hcd->frrem_accum/_hcd->frrem_samples : 0);
+
+ DWC_PRINT("\n");
+ DWC_PRINT("Frame remaining at start_transfer (uframe 7):\n");
+ DWC_PRINT(" samples %u, accum %u, avg %u\n",
+ _hcd->core_if->hfnum_7_samples, _hcd->core_if->hfnum_7_frrem_accum,
+ (_hcd->core_if->hfnum_7_samples > 0) ?
+ _hcd->core_if->hfnum_7_frrem_accum/_hcd->core_if->hfnum_7_samples : 0);
+ DWC_PRINT("Frame remaining at start_transfer (uframe 0):\n");
+ DWC_PRINT(" samples %u, accum %u, avg %u\n",
+ _hcd->core_if->hfnum_0_samples, _hcd->core_if->hfnum_0_frrem_accum,
+ (_hcd->core_if->hfnum_0_samples > 0) ?
+ _hcd->core_if->hfnum_0_frrem_accum/_hcd->core_if->hfnum_0_samples : 0);
+ DWC_PRINT("Frame remaining at start_transfer (uframe 1-6):\n");
+ DWC_PRINT(" samples %u, accum %u, avg %u\n",
+ _hcd->core_if->hfnum_other_samples, _hcd->core_if->hfnum_other_frrem_accum,
+ (_hcd->core_if->hfnum_other_samples > 0) ?
+ _hcd->core_if->hfnum_other_frrem_accum/_hcd->core_if->hfnum_other_samples : 0);
+
+ DWC_PRINT("\n");
+ DWC_PRINT("Frame remaining at sample point A (uframe 7):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_7_samples_a, _hcd->hfnum_7_frrem_accum_a,
+ (_hcd->hfnum_7_samples_a > 0) ?
+ _hcd->hfnum_7_frrem_accum_a/_hcd->hfnum_7_samples_a : 0);
+ DWC_PRINT("Frame remaining at sample point A (uframe 0):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_0_samples_a, _hcd->hfnum_0_frrem_accum_a,
+ (_hcd->hfnum_0_samples_a > 0) ?
+ _hcd->hfnum_0_frrem_accum_a/_hcd->hfnum_0_samples_a : 0);
+ DWC_PRINT("Frame remaining at sample point A (uframe 1-6):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_other_samples_a, _hcd->hfnum_other_frrem_accum_a,
+ (_hcd->hfnum_other_samples_a > 0) ?
+ _hcd->hfnum_other_frrem_accum_a/_hcd->hfnum_other_samples_a : 0);
+
+ DWC_PRINT("\n");
+ DWC_PRINT("Frame remaining at sample point B (uframe 7):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_7_samples_b, _hcd->hfnum_7_frrem_accum_b,
+ (_hcd->hfnum_7_samples_b > 0) ?
+ _hcd->hfnum_7_frrem_accum_b/_hcd->hfnum_7_samples_b : 0);
+ DWC_PRINT("Frame remaining at sample point B (uframe 0):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_0_samples_b, _hcd->hfnum_0_frrem_accum_b,
+ (_hcd->hfnum_0_samples_b > 0) ?
+ _hcd->hfnum_0_frrem_accum_b/_hcd->hfnum_0_samples_b : 0);
+ DWC_PRINT("Frame remaining at sample point B (uframe 1-6):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_other_samples_b, _hcd->hfnum_other_frrem_accum_b,
+ (_hcd->hfnum_other_samples_b > 0) ?
+ _hcd->hfnum_other_frrem_accum_b/_hcd->hfnum_other_samples_b : 0);
+#endif
+#endif
+}
+
+void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *_hcd)
+{
+#ifdef DEBUG
+ int num_channels;
+ int i;
+ gnptxsts_data_t np_tx_status;
+ hptxsts_data_t p_tx_status;
+
+ num_channels = _hcd->core_if->core_params->host_channels;
+ DWC_PRINT("\n");
+ DWC_PRINT("************************************************************\n");
+ DWC_PRINT("HCD State:\n");
+ DWC_PRINT(" Num channels: %d\n", num_channels);
+ for (i = 0; i < num_channels; i++) {
+ dwc_hc_t *hc = _hcd->hc_ptr_array[i];
+ DWC_PRINT(" Channel %d:\n", i);
+ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+ hc->dev_addr, hc->ep_num, hc->ep_is_in);
+ DWC_PRINT(" speed: %d\n", hc->speed);
+ DWC_PRINT(" ep_type: %d\n", hc->ep_type);
+ DWC_PRINT(" max_packet: %d\n", hc->max_packet);
+ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start);
+ DWC_PRINT(" multi_count: %d\n", hc->multi_count);
+ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started);
+ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff);
+ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len);
+ DWC_PRINT(" xfer_count: %d\n", hc->xfer_count);
+ DWC_PRINT(" halt_on_queue: %d\n", hc->halt_on_queue);
+ DWC_PRINT(" halt_pending: %d\n", hc->halt_pending);
+ DWC_PRINT(" halt_status: %d\n", hc->halt_status);
+ DWC_PRINT(" do_split: %d\n", hc->do_split);
+ DWC_PRINT(" complete_split: %d\n", hc->complete_split);
+ DWC_PRINT(" hub_addr: %d\n", hc->hub_addr);
+ DWC_PRINT(" port_addr: %d\n", hc->port_addr);
+ DWC_PRINT(" xact_pos: %d\n", hc->xact_pos);
+ DWC_PRINT(" requests: %d\n", hc->requests);
+ DWC_PRINT(" qh: %p\n", hc->qh);
+ if (hc->xfer_started) {
+ hfnum_data_t hfnum;
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum);
+ hcchar.d32 = dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->hcchar);
+ hctsiz.d32 = dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->hctsiz);
+ hcint.d32 = dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->hcint);
+ hcintmsk.d32 = dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->hcintmsk);
+ DWC_PRINT(" hfnum: 0x%08x\n", hfnum.d32);
+ DWC_PRINT(" hcchar: 0x%08x\n", hcchar.d32);
+ DWC_PRINT(" hctsiz: 0x%08x\n", hctsiz.d32);
+ DWC_PRINT(" hcint: 0x%08x\n", hcint.d32);
+ DWC_PRINT(" hcintmsk: 0x%08x\n", hcintmsk.d32);
+ }
+ if (hc->xfer_started && (hc->qh != NULL) && (hc->qh->qtd_in_process != NULL)) {
+ dwc_otg_qtd_t *qtd;
+ struct urb *urb;
+ qtd = hc->qh->qtd_in_process;
+ urb = qtd->urb;
+ DWC_PRINT(" URB Info:\n");
+ DWC_PRINT(" qtd: %p, urb: %p\n", qtd, urb);
+ if (urb != NULL) {
+ DWC_PRINT(" Dev: %d, EP: %d %s\n",
+ usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "IN" : "OUT");
+ DWC_PRINT(" Max packet size: %d\n",
+ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+ DWC_PRINT(" transfer_buffer: %p\n", urb->transfer_buffer);
+ DWC_PRINT(" transfer_dma: %p\n", (void *)urb->transfer_dma);
+ DWC_PRINT(" transfer_buffer_length: %d\n", urb->transfer_buffer_length);
+ DWC_PRINT(" actual_length: %d\n", urb->actual_length);
+ }
+ }
+ }
+ //DWC_PRINT(" non_periodic_channels: %d\n", _hcd->non_periodic_channels);
+ //DWC_PRINT(" periodic_channels: %d\n", _hcd->periodic_channels);
+ DWC_PRINT(" available_channels: %d\n", _hcd->available_host_channels);
+ DWC_PRINT(" periodic_usecs: %d\n", _hcd->periodic_usecs);
+ np_tx_status.d32 = dwc_read_reg32(&_hcd->core_if->core_global_regs->gnptxsts);
+ DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n", np_tx_status.b.nptxqspcavail);
+ DWC_PRINT(" NP Tx FIFO Space Avail: %d\n", np_tx_status.b.nptxfspcavail);
+ p_tx_status.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hptxsts);
+ DWC_PRINT(" P Tx Req Queue Space Avail: %d\n", p_tx_status.b.ptxqspcavail);
+ DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail);
+ dwc_otg_hcd_dump_frrem(_hcd);
+ dwc_otg_dump_global_registers(_hcd->core_if);
+ dwc_otg_dump_host_registers(_hcd->core_if);
+ DWC_PRINT("************************************************************\n");
+ DWC_PRINT("\n");
+#endif
+}
+#endif /* DWC_DEVICE_ONLY */
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd.h b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd.h
new file mode 100644
index 0000000000..8a20dffa0a
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd.h
@@ -0,0 +1,676 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd.h $
+ * $Revision: 1.1.1.1 $
+ * $Date: 2009-04-17 06:15:34 $
+ * $Change: 537387 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef DWC_DEVICE_ONLY
+#if !defined(__DWC_HCD_H__)
+#define __DWC_HCD_H__
+
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+
+struct lm_device;
+struct dwc_otg_device;
+
+#include "dwc_otg_cil.h"
+//#include "dwc_otg_ifx.h" // winder
+
+
+/**
+ * @file
+ *
+ * This file contains the structures, constants, and interfaces for
+ * the Host Contoller Driver (HCD).
+ *
+ * The Host Controller Driver (HCD) is responsible for translating requests
+ * from the USB Driver into the appropriate actions on the DWC_otg controller.
+ * It isolates the USBD from the specifics of the controller by providing an
+ * API to the USBD.
+ */
+
+/**
+ * Phases for control transfers.
+ */
+typedef enum dwc_otg_control_phase {
+ DWC_OTG_CONTROL_SETUP,
+ DWC_OTG_CONTROL_DATA,
+ DWC_OTG_CONTROL_STATUS
+} dwc_otg_control_phase_e;
+
+/** Transaction types. */
+typedef enum dwc_otg_transaction_type {
+ DWC_OTG_TRANSACTION_NONE,
+ DWC_OTG_TRANSACTION_PERIODIC,
+ DWC_OTG_TRANSACTION_NON_PERIODIC,
+ DWC_OTG_TRANSACTION_ALL
+} dwc_otg_transaction_type_e;
+
+/**
+ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
+ * interrupt, or isochronous transfer. A single QTD is created for each URB
+ * (of one of these types) submitted to the HCD. The transfer associated with
+ * a QTD may require one or multiple transactions.
+ *
+ * A QTD is linked to a Queue Head, which is entered in either the
+ * non-periodic or periodic schedule for execution. When a QTD is chosen for
+ * execution, some or all of its transactions may be executed. After
+ * execution, the state of the QTD is updated. The QTD may be retired if all
+ * its transactions are complete or if an error occurred. Otherwise, it
+ * remains in the schedule so more transactions can be executed later.
+ */
+struct dwc_otg_qh;
+typedef struct dwc_otg_qtd {
+ /**
+ * Determines the PID of the next data packet for the data phase of
+ * control transfers. Ignored for other transfer types.<br>
+ * One of the following values:
+ * - DWC_OTG_HC_PID_DATA0
+ * - DWC_OTG_HC_PID_DATA1
+ */
+ uint8_t data_toggle;
+
+ /** Current phase for control transfers (Setup, Data, or Status). */
+ dwc_otg_control_phase_e control_phase;
+
+ /** Keep track of the current split type
+ * for FS/LS endpoints on a HS Hub */
+ uint8_t complete_split;
+
+ /** How many bytes transferred during SSPLIT OUT */
+ uint32_t ssplit_out_xfer_count;
+
+ /**
+ * Holds the number of bus errors that have occurred for a transaction
+ * within this transfer.
+ */
+ uint8_t error_count;
+
+ /**
+ * Index of the next frame descriptor for an isochronous transfer. A
+ * frame descriptor describes the buffer position and length of the
+ * data to be transferred in the next scheduled (micro)frame of an
+ * isochronous transfer. It also holds status for that transaction.
+ * The frame index starts at 0.
+ */
+ int isoc_frame_index;
+
+ /** Position of the ISOC split on full/low speed */
+ uint8_t isoc_split_pos;
+
+ /** Position of the ISOC split in the buffer for the current frame */
+ uint16_t isoc_split_offset;
+
+ /** URB for this transfer */
+ struct urb *urb;
+
+ /** This list of QTDs */
+ struct list_head qtd_list_entry;
+
+ /* Field to track the qh pointer */
+ struct dwc_otg_qh *qtd_qh_ptr;
+} dwc_otg_qtd_t;
+
+/**
+ * A Queue Head (QH) holds the static characteristics of an endpoint and
+ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
+ * be entered in either the non-periodic or periodic schedule.
+ */
+typedef struct dwc_otg_qh {
+ /**
+ * Endpoint type.
+ * One of the following values:
+ * - USB_ENDPOINT_XFER_CONTROL
+ * - USB_ENDPOINT_XFER_ISOC
+ * - USB_ENDPOINT_XFER_BULK
+ * - USB_ENDPOINT_XFER_INT
+ */
+ uint8_t ep_type;
+ uint8_t ep_is_in;
+
+ /** wMaxPacketSize Field of Endpoint Descriptor. */
+ uint16_t maxp;
+
+ /**
+ * Determines the PID of the next data packet for non-control
+ * transfers. Ignored for control transfers.<br>
+ * One of the following values:
+ * - DWC_OTG_HC_PID_DATA0
+ * - DWC_OTG_HC_PID_DATA1
+ */
+ uint8_t data_toggle;
+
+ /** Ping state if 1. */
+ uint8_t ping_state;
+
+ /**
+ * List of QTDs for this QH.
+ */
+ struct list_head qtd_list;
+
+ /** Host channel currently processing transfers for this QH. */
+ dwc_hc_t *channel;
+
+ /** QTD currently assigned to a host channel for this QH. */
+ dwc_otg_qtd_t *qtd_in_process;
+
+ /** Full/low speed endpoint on high-speed hub requires split. */
+ uint8_t do_split;
+
+ /** @name Periodic schedule information */
+ /** @{ */
+
+ /** Bandwidth in microseconds per (micro)frame. */
+ uint8_t usecs;
+
+ /** Interval between transfers in (micro)frames. */
+ uint16_t interval;
+
+ /**
+ * (micro)frame to initialize a periodic transfer. The transfer
+ * executes in the following (micro)frame.
+ */
+ uint16_t sched_frame;
+
+ /** (micro)frame at which last start split was initialized. */
+ uint16_t start_split_frame;
+
+ /** @} */
+
+ uint16_t speed;
+ uint16_t frame_usecs[8];
+ /** Entry for QH in either the periodic or non-periodic schedule. */
+ struct list_head qh_list_entry;
+} dwc_otg_qh_t;
+
+/**
+ * This structure holds the state of the HCD, including the non-periodic and
+ * periodic schedules.
+ */
+typedef struct dwc_otg_hcd {
+ spinlock_t lock;
+
+ /** DWC OTG Core Interface Layer */
+ dwc_otg_core_if_t *core_if;
+
+ /** Internal DWC HCD Flags */
+ volatile union dwc_otg_hcd_internal_flags {
+ uint32_t d32;
+ struct {
+ unsigned port_connect_status_change : 1;
+ unsigned port_connect_status : 1;
+ unsigned port_reset_change : 1;
+ unsigned port_enable_change : 1;
+ unsigned port_suspend_change : 1;
+ unsigned port_over_current_change : 1;
+ unsigned reserved : 27;
+ } b;
+ } flags;
+
+ /**
+ * Inactive items in the non-periodic schedule. This is a list of
+ * Queue Heads. Transfers associated with these Queue Heads are not
+ * currently assigned to a host channel.
+ */
+ struct list_head non_periodic_sched_inactive;
+
+ /**
+ * Deferred items in the non-periodic schedule. This is a list of
+ * Queue Heads. Transfers associated with these Queue Heads are not
+ * currently assigned to a host channel.
+ * When we get an NAK, the QH goes here.
+ */
+ struct list_head non_periodic_sched_deferred;
+
+ /**
+ * Active items in the non-periodic schedule. This is a list of
+ * Queue Heads. Transfers associated with these Queue Heads are
+ * currently assigned to a host channel.
+ */
+ struct list_head non_periodic_sched_active;
+
+ /**
+ * Pointer to the next Queue Head to process in the active
+ * non-periodic schedule.
+ */
+ struct list_head *non_periodic_qh_ptr;
+
+ /**
+ * Inactive items in the periodic schedule. This is a list of QHs for
+ * periodic transfers that are _not_ scheduled for the next frame.
+ * Each QH in the list has an interval counter that determines when it
+ * needs to be scheduled for execution. This scheduling mechanism
+ * allows only a simple calculation for periodic bandwidth used (i.e.
+ * must assume that all periodic transfers may need to execute in the
+ * same frame). However, it greatly simplifies scheduling and should
+ * be sufficient for the vast majority of OTG hosts, which need to
+ * connect to a small number of peripherals at one time.
+ *
+ * Items move from this list to periodic_sched_ready when the QH
+ * interval counter is 0 at SOF.
+ */
+ struct list_head periodic_sched_inactive;
+
+ /**
+ * List of periodic QHs that are ready for execution in the next
+ * frame, but have not yet been assigned to host channels.
+ *
+ * Items move from this list to periodic_sched_assigned as host
+ * channels become available during the current frame.
+ */
+ struct list_head periodic_sched_ready;
+
+ /**
+ * List of periodic QHs to be executed in the next frame that are
+ * assigned to host channels.
+ *
+ * Items move from this list to periodic_sched_queued as the
+ * transactions for the QH are queued to the DWC_otg controller.
+ */
+ struct list_head periodic_sched_assigned;
+
+ /**
+ * List of periodic QHs that have been queued for execution.
+ *
+ * Items move from this list to either periodic_sched_inactive or
+ * periodic_sched_ready when the channel associated with the transfer
+ * is released. If the interval for the QH is 1, the item moves to
+ * periodic_sched_ready because it must be rescheduled for the next
+ * frame. Otherwise, the item moves to periodic_sched_inactive.
+ */
+ struct list_head periodic_sched_queued;
+
+ /**
+ * Total bandwidth claimed so far for periodic transfers. This value
+ * is in microseconds per (micro)frame. The assumption is that all
+ * periodic transfers may occur in the same (micro)frame.
+ */
+ uint16_t periodic_usecs;
+
+ /**
+ * Total bandwidth claimed so far for all periodic transfers
+ * in a frame.
+ * This will include a mixture of HS and FS transfers.
+ * Units are microseconds per (micro)frame.
+ * We have a budget per frame and have to schedule
+ * transactions accordingly.
+ * Watch out for the fact that things are actually scheduled for the
+ * "next frame".
+ */
+ uint16_t frame_usecs[8];
+
+ /**
+ * Frame number read from the core at SOF. The value ranges from 0 to
+ * DWC_HFNUM_MAX_FRNUM.
+ */
+ uint16_t frame_number;
+
+ /**
+ * Free host channels in the controller. This is a list of
+ * dwc_hc_t items.
+ */
+ struct list_head free_hc_list;
+
+ /**
+ * Number of available host channels.
+ */
+ int available_host_channels;
+
+ /**
+ * Array of pointers to the host channel descriptors. Allows accessing
+ * a host channel descriptor given the host channel number. This is
+ * useful in interrupt handlers.
+ */
+ dwc_hc_t *hc_ptr_array[MAX_EPS_CHANNELS];
+
+ /**
+ * Buffer to use for any data received during the status phase of a
+ * control transfer. Normally no data is transferred during the status
+ * phase. This buffer is used as a bit bucket.
+ */
+ uint8_t *status_buf;
+
+ /**
+ * DMA address for status_buf.
+ */
+ dma_addr_t status_buf_dma;
+#define DWC_OTG_HCD_STATUS_BUF_SIZE 64
+
+ /**
+ * Structure to allow starting the HCD in a non-interrupt context
+ * during an OTG role change.
+ */
+ struct work_struct start_work;
+ struct usb_hcd *_p;
+
+ /**
+ * Connection timer. An OTG host must display a message if the device
+ * does not connect. Started when the VBus power is turned on via
+ * sysfs attribute "buspower".
+ */
+ struct timer_list conn_timer;
+
+ /* Tasket to do a reset */
+ struct tasklet_struct *reset_tasklet;
+
+#ifdef DEBUG
+ uint32_t frrem_samples;
+ uint64_t frrem_accum;
+
+ uint32_t hfnum_7_samples_a;
+ uint64_t hfnum_7_frrem_accum_a;
+ uint32_t hfnum_0_samples_a;
+ uint64_t hfnum_0_frrem_accum_a;
+ uint32_t hfnum_other_samples_a;
+ uint64_t hfnum_other_frrem_accum_a;
+
+ uint32_t hfnum_7_samples_b;
+ uint64_t hfnum_7_frrem_accum_b;
+ uint32_t hfnum_0_samples_b;
+ uint64_t hfnum_0_frrem_accum_b;
+ uint32_t hfnum_other_samples_b;
+ uint64_t hfnum_other_frrem_accum_b;
+#endif
+
+} dwc_otg_hcd_t;
+
+/** Gets the dwc_otg_hcd from a struct usb_hcd */
+static inline dwc_otg_hcd_t *hcd_to_dwc_otg_hcd(struct usb_hcd *hcd)
+{
+ return (dwc_otg_hcd_t *)(hcd->hcd_priv);
+}
+
+/** Gets the struct usb_hcd that contains a dwc_otg_hcd_t. */
+static inline struct usb_hcd *dwc_otg_hcd_to_hcd(dwc_otg_hcd_t *dwc_otg_hcd)
+{
+ return container_of((void *)dwc_otg_hcd, struct usb_hcd, hcd_priv);
+}
+
+/** @name HCD Create/Destroy Functions */
+/** @{ */
+extern int __devinit dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_device);
+extern void dwc_otg_hcd_remove(struct device *_dev);
+/** @} */
+
+/** @name Linux HC Driver API Functions */
+/** @{ */
+
+extern int dwc_otg_hcd_start(struct usb_hcd *hcd);
+extern void dwc_otg_hcd_stop(struct usb_hcd *hcd);
+extern int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd);
+extern void dwc_otg_hcd_free(struct usb_hcd *hcd);
+
+extern int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags);
+extern int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd,
+ struct urb *urb,
+ int status);
+extern irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd);
+
+extern void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep);
+
+extern int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd,
+ char *buf);
+extern int dwc_otg_hcd_hub_control(struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength);
+
+/** @} */
+
+/** @name Transaction Execution Functions */
+/** @{ */
+extern dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd);
+extern void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *_hcd,
+ dwc_otg_transaction_type_e _tr_type);
+extern void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *_hcd, struct urb *_urb,
+ int _status);
+/** @} */
+
+/** @name Interrupt Handler Functions */
+/** @{ */
+extern int32_t dwc_otg_hcd_handle_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_sof_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_rx_status_q_level_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_incomplete_periodic_intr(dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_port_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_conn_id_status_change_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_disconnect_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_hc_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_hc_n_intr (dwc_otg_hcd_t *_dwc_otg_hcd, uint32_t _num);
+extern int32_t dwc_otg_hcd_handle_session_req_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_wakeup_detected_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+/** @} */
+
+
+/** @name Schedule Queue Functions */
+/** @{ */
+
+/* Implemented in dwc_otg_hcd_queue.c */
+extern dwc_otg_qh_t *dwc_otg_hcd_qh_create (dwc_otg_hcd_t *_hcd, struct urb *_urb);
+extern void dwc_otg_hcd_qh_init (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, struct urb *_urb);
+extern void dwc_otg_hcd_qh_free (dwc_otg_qh_t *_qh);
+extern int dwc_otg_hcd_qh_add (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh);
+extern void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh);
+extern void dwc_otg_hcd_qh_deactivate (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, int sched_csplit);
+extern int dwc_otg_hcd_qh_deferr (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, int delay);
+
+/** Remove and free a QH */
+static inline void dwc_otg_hcd_qh_remove_and_free (dwc_otg_hcd_t *_hcd,
+ dwc_otg_qh_t *_qh)
+{
+ dwc_otg_hcd_qh_remove (_hcd, _qh);
+ dwc_otg_hcd_qh_free (_qh);
+}
+
+/** Allocates memory for a QH structure.
+ * @return Returns the memory allocate or NULL on error. */
+static inline dwc_otg_qh_t *dwc_otg_hcd_qh_alloc (void)
+{
+#ifdef _SC_BUILD_
+ return (dwc_otg_qh_t *) kmalloc (sizeof(dwc_otg_qh_t), GFP_ATOMIC);
+#else
+ return (dwc_otg_qh_t *) kmalloc (sizeof(dwc_otg_qh_t), GFP_KERNEL);
+#endif
+}
+
+extern dwc_otg_qtd_t *dwc_otg_hcd_qtd_create (struct urb *urb);
+extern void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *qtd, struct urb *urb);
+extern int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd, dwc_otg_hcd_t *dwc_otg_hcd);
+
+/** Allocates memory for a QTD structure.
+ * @return Returns the memory allocate or NULL on error. */
+static inline dwc_otg_qtd_t *dwc_otg_hcd_qtd_alloc (void)
+{
+#ifdef _SC_BUILD_
+ return (dwc_otg_qtd_t *) kmalloc (sizeof(dwc_otg_qtd_t), GFP_ATOMIC);
+#else
+ return (dwc_otg_qtd_t *) kmalloc (sizeof(dwc_otg_qtd_t), GFP_KERNEL);
+#endif
+}
+
+/** Frees the memory for a QTD structure. QTD should already be removed from
+ * list.
+ * @param[in] _qtd QTD to free.*/
+static inline void dwc_otg_hcd_qtd_free (dwc_otg_qtd_t *_qtd)
+{
+ kfree (_qtd);
+}
+
+/** Removes a QTD from list.
+ * @param[in] _qtd QTD to remove from list. */
+static inline void dwc_otg_hcd_qtd_remove (dwc_otg_qtd_t *_qtd)
+{
+ unsigned long flags;
+ local_irq_save (flags);
+ list_del (&_qtd->qtd_list_entry);
+ local_irq_restore (flags);
+}
+
+/** Remove and free a QTD */
+static inline void dwc_otg_hcd_qtd_remove_and_free (dwc_otg_qtd_t *_qtd)
+{
+ dwc_otg_hcd_qtd_remove (_qtd);
+ dwc_otg_hcd_qtd_free (_qtd);
+}
+
+/** @} */
+
+
+/** @name Internal Functions */
+/** @{ */
+dwc_otg_qh_t *dwc_urb_to_qh(struct urb *_urb);
+void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *_hcd);
+void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *_hcd);
+/** @} */
+
+
+/** Gets the usb_host_endpoint associated with an URB. */
+static inline struct usb_host_endpoint *dwc_urb_to_endpoint(struct urb *_urb)
+{
+ struct usb_device *dev = _urb->dev;
+ int ep_num = usb_pipeendpoint(_urb->pipe);
+ if (usb_pipein(_urb->pipe))
+ return dev->ep_in[ep_num];
+ else
+ return dev->ep_out[ep_num];
+}
+
+/**
+ * Gets the endpoint number from a _bEndpointAddress argument. The endpoint is
+ * qualified with its direction (possible 32 endpoints per device).
+ */
+#define dwc_ep_addr_to_endpoint(_bEndpointAddress_) \
+ ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \
+ ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4)
+
+/** Gets the QH that contains the list_head */
+#define dwc_list_to_qh(_list_head_ptr_) (container_of(_list_head_ptr_,dwc_otg_qh_t,qh_list_entry))
+
+/** Gets the QTD that contains the list_head */
+#define dwc_list_to_qtd(_list_head_ptr_) (container_of(_list_head_ptr_,dwc_otg_qtd_t,qtd_list_entry))
+
+/** Check if QH is non-periodic */
+#define dwc_qh_is_non_per(_qh_ptr_) ((_qh_ptr_->ep_type == USB_ENDPOINT_XFER_BULK) || \
+ (_qh_ptr_->ep_type == USB_ENDPOINT_XFER_CONTROL))
+
+/** High bandwidth multiplier as encoded in highspeed endpoint descriptors */
+#define dwc_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+
+/** Packet size for any kind of endpoint descriptor */
+#define dwc_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+
+/**
+ * Returns true if _frame1 is less than or equal to _frame2. The comparison is
+ * done modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the
+ * frame number when the max frame number is reached.
+ */
+static inline int dwc_frame_num_le(uint16_t _frame1, uint16_t _frame2)
+{
+ return ((_frame2 - _frame1) & DWC_HFNUM_MAX_FRNUM) <=
+ (DWC_HFNUM_MAX_FRNUM >> 1);
+}
+
+/**
+ * Returns true if _frame1 is greater than _frame2. The comparison is done
+ * modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
+ * number when the max frame number is reached.
+ */
+static inline int dwc_frame_num_gt(uint16_t _frame1, uint16_t _frame2)
+{
+ return (_frame1 != _frame2) &&
+ (((_frame1 - _frame2) & DWC_HFNUM_MAX_FRNUM) <
+ (DWC_HFNUM_MAX_FRNUM >> 1));
+}
+
+/**
+ * Increments _frame by the amount specified by _inc. The addition is done
+ * modulo DWC_HFNUM_MAX_FRNUM. Returns the incremented value.
+ */
+static inline uint16_t dwc_frame_num_inc(uint16_t _frame, uint16_t _inc)
+{
+ return (_frame + _inc) & DWC_HFNUM_MAX_FRNUM;
+}
+
+static inline uint16_t dwc_full_frame_num (uint16_t _frame)
+{
+ return ((_frame) & DWC_HFNUM_MAX_FRNUM) >> 3;
+}
+
+static inline uint16_t dwc_micro_frame_num (uint16_t _frame)
+{
+ return (_frame) & 0x7;
+}
+
+#ifdef DEBUG
+/**
+ * Macro to sample the remaining PHY clocks left in the current frame. This
+ * may be used during debugging to determine the average time it takes to
+ * execute sections of code. There are two possible sample points, "a" and
+ * "b", so the _letter argument must be one of these values.
+ *
+ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For
+ * example, "cat /sys/devices/lm0/hcd_frrem".
+ */
+#define dwc_sample_frrem(_hcd, _qh, _letter) \
+{ \
+ hfnum_data_t hfnum; \
+ dwc_otg_qtd_t *qtd; \
+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); \
+ if (usb_pipeint(qtd->urb->pipe) && _qh->start_split_frame != 0 && !qtd->complete_split) { \
+ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum); \
+ switch (hfnum.b.frnum & 0x7) { \
+ case 7: \
+ _hcd->hfnum_7_samples_##_letter++; \
+ _hcd->hfnum_7_frrem_accum_##_letter += hfnum.b.frrem; \
+ break; \
+ case 0: \
+ _hcd->hfnum_0_samples_##_letter++; \
+ _hcd->hfnum_0_frrem_accum_##_letter += hfnum.b.frrem; \
+ break; \
+ default: \
+ _hcd->hfnum_other_samples_##_letter++; \
+ _hcd->hfnum_other_frrem_accum_##_letter += hfnum.b.frrem; \
+ break; \
+ } \
+ } \
+}
+#else // DEBUG
+#define dwc_sample_frrem(_hcd, _qh, _letter)
+#endif // DEBUG
+#endif // __DWC_HCD_H__
+#endif /* DWC_DEVICE_ONLY */
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c
new file mode 100644
index 0000000000..f6f3f3dc8d
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c
@@ -0,0 +1,1839 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd_intr.c $
+ * $Revision: 1.1.1.1 $
+ * $Date: 2009-04-17 06:15:34 $
+ * $Change: 553126 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef DWC_DEVICE_ONLY
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+
+const int erratum_usb09_patched = 0;
+const int deferral_on = 1;
+const int nak_deferral_delay = 8;
+const int nyet_deferral_delay = 1;
+/** @file
+ * This file contains the implementation of the HCD Interrupt handlers.
+ */
+
+/** This function handles interrupts for the HCD. */
+int32_t dwc_otg_hcd_handle_intr (dwc_otg_hcd_t *_dwc_otg_hcd)
+{
+ int retval = 0;
+
+ dwc_otg_core_if_t *core_if = _dwc_otg_hcd->core_if;
+ gintsts_data_t gintsts;
+#ifdef DEBUG
+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
+#endif
+
+ /* Check if HOST Mode */
+ if (dwc_otg_is_host_mode(core_if)) {
+ gintsts.d32 = dwc_otg_read_core_intr(core_if);
+ if (!gintsts.d32) {
+ return 0;
+ }
+
+#ifdef DEBUG
+ /* Don't print debug message in the interrupt handler on SOF */
+# ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK)
+# endif
+ DWC_DEBUGPL (DBG_HCD, "\n");
+#endif
+
+#ifdef DEBUG
+# ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK)
+# endif
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", gintsts.d32);
+#endif
+
+ if (gintsts.b.sofintr) {
+ retval |= dwc_otg_hcd_handle_sof_intr (_dwc_otg_hcd);
+ }
+ if (gintsts.b.rxstsqlvl) {
+ retval |= dwc_otg_hcd_handle_rx_status_q_level_intr (_dwc_otg_hcd);
+ }
+ if (gintsts.b.nptxfempty) {
+ retval |= dwc_otg_hcd_handle_np_tx_fifo_empty_intr (_dwc_otg_hcd);
+ }
+ if (gintsts.b.i2cintr) {
+ /** @todo Implement i2cintr handler. */
+ }
+ if (gintsts.b.portintr) {
+ retval |= dwc_otg_hcd_handle_port_intr (_dwc_otg_hcd);
+ }
+ if (gintsts.b.hcintr) {
+ retval |= dwc_otg_hcd_handle_hc_intr (_dwc_otg_hcd);
+ }
+ if (gintsts.b.ptxfempty) {
+ retval |= dwc_otg_hcd_handle_perio_tx_fifo_empty_intr (_dwc_otg_hcd);
+ }
+#ifdef DEBUG
+# ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK)
+# endif
+ {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Finished Servicing Interrupts\n");
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n",
+ dwc_read_reg32(&global_regs->gintsts));
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n",
+ dwc_read_reg32(&global_regs->gintmsk));
+ }
+#endif
+
+#ifdef DEBUG
+# ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK)
+# endif
+ DWC_DEBUGPL (DBG_HCD, "\n");
+#endif
+
+ }
+
+ return retval;
+}
+
+#ifdef DWC_TRACK_MISSED_SOFS
+#warning Compiling code to track missed SOFs
+#define FRAME_NUM_ARRAY_SIZE 1000
+/**
+ * This function is for debug only.
+ */
+static inline void track_missed_sofs(uint16_t _curr_frame_number) {
+ static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE];
+ static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE];
+ static int frame_num_idx = 0;
+ static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM;
+ static int dumped_frame_num_array = 0;
+
+ if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
+ if ((((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) != _curr_frame_number)) {
+ frame_num_array[frame_num_idx] = _curr_frame_number;
+ last_frame_num_array[frame_num_idx++] = last_frame_num;
+ }
+ } else if (!dumped_frame_num_array) {
+ int i;
+ printk(KERN_EMERG USB_DWC "Frame Last Frame\n");
+ printk(KERN_EMERG USB_DWC "----- ----------\n");
+ for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
+ printk(KERN_EMERG USB_DWC "0x%04x 0x%04x\n",
+ frame_num_array[i], last_frame_num_array[i]);
+ }
+ dumped_frame_num_array = 1;
+ }
+ last_frame_num = _curr_frame_number;
+}
+#endif
+
+/**
+ * Handles the start-of-frame interrupt in host mode. Non-periodic
+ * transactions may be queued to the DWC_otg controller for the current
+ * (micro)frame. Periodic transactions may be queued to the controller for the
+ * next (micro)frame.
+ */
+int32_t dwc_otg_hcd_handle_sof_intr (dwc_otg_hcd_t *_hcd)
+{
+ hfnum_data_t hfnum;
+ struct list_head *qh_entry;
+ dwc_otg_qh_t *qh;
+ dwc_otg_transaction_type_e tr_type;
+ gintsts_data_t gintsts = {.d32 = 0};
+
+ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum);
+
+#ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n");
+#endif
+
+ _hcd->frame_number = hfnum.b.frnum;
+
+#ifdef DEBUG
+ _hcd->frrem_accum += hfnum.b.frrem;
+ _hcd->frrem_samples++;
+#endif
+
+#ifdef DWC_TRACK_MISSED_SOFS
+ track_missed_sofs(_hcd->frame_number);
+#endif
+
+ /* Determine whether any periodic QHs should be executed. */
+ qh_entry = _hcd->periodic_sched_inactive.next;
+ while (qh_entry != &_hcd->periodic_sched_inactive) {
+ qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry);
+ qh_entry = qh_entry->next;
+ if (dwc_frame_num_le(qh->sched_frame, _hcd->frame_number)) {
+ /*
+ * Move QH to the ready list to be executed next
+ * (micro)frame.
+ */
+ list_move(&qh->qh_list_entry, &_hcd->periodic_sched_ready);
+ }
+ }
+
+ tr_type = dwc_otg_hcd_select_transactions(_hcd);
+ if (tr_type != DWC_OTG_TRANSACTION_NONE) {
+ dwc_otg_hcd_queue_transactions(_hcd, tr_type);
+ }
+
+ /* Clear interrupt */
+ gintsts.b.sofintr = 1;
+ dwc_write_reg32(&_hcd->core_if->core_global_regs->gintsts, gintsts.d32);
+
+ return 1;
+}
+
+/** Handles the Rx Status Queue Level Interrupt, which indicates that there is at
+ * least one packet in the Rx FIFO. The packets are moved from the FIFO to
+ * memory if the DWC_otg controller is operating in Slave mode. */
+int32_t dwc_otg_hcd_handle_rx_status_q_level_intr (dwc_otg_hcd_t *_dwc_otg_hcd)
+{
+ host_grxsts_data_t grxsts;
+ dwc_hc_t *hc = NULL;
+
+ DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n");
+
+ grxsts.d32 = dwc_read_reg32(&_dwc_otg_hcd->core_if->core_global_regs->grxstsp);
+
+ hc = _dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum];
+
+ /* Packet Status */
+ DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum);
+ DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt);
+ DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid, hc->data_pid_start);
+ DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts);
+
+ switch (grxsts.b.pktsts) {
+ case DWC_GRXSTS_PKTSTS_IN:
+ /* Read the data into the host buffer. */
+ if (grxsts.b.bcnt > 0) {
+ dwc_otg_read_packet(_dwc_otg_hcd->core_if,
+ hc->xfer_buff,
+ grxsts.b.bcnt);
+
+ /* Update the HC fields for the next packet received. */
+ hc->xfer_count += grxsts.b.bcnt;
+ hc->xfer_buff += grxsts.b.bcnt;
+ }
+
+ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
+ case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR:
+ case DWC_GRXSTS_PKTSTS_CH_HALTED:
+ /* Handled in interrupt, just ignore data */
+ break;
+ default:
+ DWC_ERROR ("RX_STS_Q Interrupt: Unknown status %d\n", grxsts.b.pktsts);
+ break;
+ }
+
+ return 1;
+}
+
+/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
+ * data packets may be written to the FIFO for OUT transfers. More requests
+ * may be written to the non-periodic request queue for IN transfers. This
+ * interrupt is enabled only in Slave mode. */
+int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n");
+ dwc_otg_hcd_queue_transactions(_dwc_otg_hcd,
+ DWC_OTG_TRANSACTION_NON_PERIODIC);
+ return 1;
+}
+
+/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data
+ * packets may be written to the FIFO for OUT transfers. More requests may be
+ * written to the periodic request queue for IN transfers. This interrupt is
+ * enabled only in Slave mode. */
+int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n");
+ dwc_otg_hcd_queue_transactions(_dwc_otg_hcd,
+ DWC_OTG_TRANSACTION_PERIODIC);
+ return 1;
+}
+
+/** There are multiple conditions that can cause a port interrupt. This function
+ * determines which interrupt conditions have occurred and handles them
+ * appropriately. */
+int32_t dwc_otg_hcd_handle_port_intr (dwc_otg_hcd_t *_dwc_otg_hcd)
+{
+ int retval = 0;
+ hprt0_data_t hprt0;
+ hprt0_data_t hprt0_modify;
+
+ hprt0.d32 = dwc_read_reg32(_dwc_otg_hcd->core_if->host_if->hprt0);
+ hprt0_modify.d32 = dwc_read_reg32(_dwc_otg_hcd->core_if->host_if->hprt0);
+
+ /* Clear appropriate bits in HPRT0 to clear the interrupt bit in
+ * GINTSTS */
+
+ hprt0_modify.b.prtena = 0;
+ hprt0_modify.b.prtconndet = 0;
+ hprt0_modify.b.prtenchng = 0;
+ hprt0_modify.b.prtovrcurrchng = 0;
+
+ /* Port Connect Detected
+ * Set flag and clear if detected */
+ if (hprt0.b.prtconndet) {
+ DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x "
+ "Port Connect Detected--\n", hprt0.d32);
+ _dwc_otg_hcd->flags.b.port_connect_status_change = 1;
+ _dwc_otg_hcd->flags.b.port_connect_status = 1;
+ hprt0_modify.b.prtconndet = 1;
+
+ /* B-Device has connected, Delete the connection timer. */
+ del_timer( &_dwc_otg_hcd->conn_timer );
+
+ /* The Hub driver asserts a reset when it sees port connect
+ * status change flag */
+ retval |= 1;
+ }
+
+ /* Port Enable Changed
+ * Clear if detected - Set internal flag if disabled */
+ if (hprt0.b.prtenchng) {
+ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
+ "Port Enable Changed--\n", hprt0.d32);
+ hprt0_modify.b.prtenchng = 1;
+ if (hprt0.b.prtena == 1) {
+ int do_reset = 0;
+ dwc_otg_core_params_t *params = _dwc_otg_hcd->core_if->core_params;
+ dwc_otg_core_global_regs_t *global_regs = _dwc_otg_hcd->core_if->core_global_regs;
+ dwc_otg_host_if_t *host_if = _dwc_otg_hcd->core_if->host_if;
+
+ /* Check if we need to adjust the PHY clock speed for
+ * low power and adjust it */
+ if (params->host_support_fs_ls_low_power)
+ {
+ gusbcfg_data_t usbcfg;
+
+ usbcfg.d32 = dwc_read_reg32 (&global_regs->gusbcfg);
+
+ if ((hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) ||
+ (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED))
+ {
+ /*
+ * Low power
+ */
+ hcfg_data_t hcfg;
+ if (usbcfg.b.phylpwrclksel == 0) {
+ /* Set PHY low power clock select for FS/LS devices */
+ usbcfg.b.phylpwrclksel = 1;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+ do_reset = 1;
+ }
+
+ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
+
+ if ((hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) &&
+ (params->host_ls_low_power_phy_clk ==
+ DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ))
+ {
+ /* 6 MHZ */
+ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 6 MHz (Low Power)\n");
+ if (hcfg.b.fslspclksel != DWC_HCFG_6_MHZ) {
+ hcfg.b.fslspclksel = DWC_HCFG_6_MHZ;
+ dwc_write_reg32(&host_if->host_global_regs->hcfg,
+ hcfg.d32);
+ do_reset = 1;
+ }
+ }
+ else {
+ /* 48 MHZ */
+ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 48 MHz ()\n");
+ if (hcfg.b.fslspclksel != DWC_HCFG_48_MHZ) {
+ hcfg.b.fslspclksel = DWC_HCFG_48_MHZ;
+ dwc_write_reg32(&host_if->host_global_regs->hcfg,
+ hcfg.d32);
+ do_reset = 1;
+ }
+ }
+ }
+ else {
+ /*
+ * Not low power
+ */
+ if (usbcfg.b.phylpwrclksel == 1) {
+ usbcfg.b.phylpwrclksel = 0;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+ do_reset = 1;
+ }
+ }
+
+ if (do_reset) {
+ tasklet_schedule(_dwc_otg_hcd->reset_tasklet);
+ }
+ }
+
+ if (!do_reset) {
+ /* Port has been enabled set the reset change flag */
+ _dwc_otg_hcd->flags.b.port_reset_change = 1;
+ }
+
+ } else {
+ _dwc_otg_hcd->flags.b.port_enable_change = 1;
+ }
+ retval |= 1;
+ }
+
+ /** Overcurrent Change Interrupt */
+ if (hprt0.b.prtovrcurrchng) {
+ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
+ "Port Overcurrent Changed--\n", hprt0.d32);
+ _dwc_otg_hcd->flags.b.port_over_current_change = 1;
+ hprt0_modify.b.prtovrcurrchng = 1;
+ retval |= 1;
+ }
+
+ /* Clear Port Interrupts */
+ dwc_write_reg32(_dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32);
+
+ return retval;
+}
+
+
+/** This interrupt indicates that one or more host channels has a pending
+ * interrupt. There are multiple conditions that can cause each host channel
+ * interrupt. This function determines which conditions have occurred for each
+ * host channel interrupt and handles them appropriately. */
+int32_t dwc_otg_hcd_handle_hc_intr (dwc_otg_hcd_t *_dwc_otg_hcd)
+{
+ int i;
+ int retval = 0;
+ haint_data_t haint;
+
+ /* Clear appropriate bits in HCINTn to clear the interrupt bit in
+ * GINTSTS */
+
+ haint.d32 = dwc_otg_read_host_all_channels_intr(_dwc_otg_hcd->core_if);
+
+ for (i=0; i<_dwc_otg_hcd->core_if->core_params->host_channels; i++) {
+ if (haint.b2.chint & (1 << i)) {
+ retval |= dwc_otg_hcd_handle_hc_n_intr (_dwc_otg_hcd, i);
+ }
+ }
+
+ return retval;
+}
+
+/* Macro used to clear one channel interrupt */
+#define clear_hc_int(_hc_regs_,_intr_) \
+do { \
+ hcint_data_t hcint_clear = {.d32 = 0}; \
+ hcint_clear.b._intr_ = 1; \
+ dwc_write_reg32(&((_hc_regs_)->hcint), hcint_clear.d32); \
+} while (0)
+
+/*
+ * Macro used to disable one channel interrupt. Channel interrupts are
+ * disabled when the channel is halted or released by the interrupt handler.
+ * There is no need to handle further interrupts of that type until the
+ * channel is re-assigned. In fact, subsequent handling may cause crashes
+ * because the channel structures are cleaned up when the channel is released.
+ */
+#define disable_hc_int(_hc_regs_,_intr_) \
+do { \
+ hcintmsk_data_t hcintmsk = {.d32 = 0}; \
+ hcintmsk.b._intr_ = 1; \
+ dwc_modify_reg32(&((_hc_regs_)->hcintmsk), hcintmsk.d32, 0); \
+} while (0)
+
+/**
+ * Gets the actual length of a transfer after the transfer halts. _halt_status
+ * holds the reason for the halt.
+ *
+ * For IN transfers where _halt_status is DWC_OTG_HC_XFER_COMPLETE,
+ * *_short_read is set to 1 upon return if less than the requested
+ * number of bytes were transferred. Otherwise, *_short_read is set to 0 upon
+ * return. _short_read may also be NULL on entry, in which case it remains
+ * unchanged.
+ */
+static uint32_t get_actual_xfer_length(dwc_hc_t *_hc,
+ dwc_otg_hc_regs_t *_hc_regs,
+ dwc_otg_qtd_t *_qtd,
+ dwc_otg_halt_status_e _halt_status,
+ int *_short_read)
+{
+ hctsiz_data_t hctsiz;
+ uint32_t length;
+
+ if (_short_read != NULL) {
+ *_short_read = 0;
+ }
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+
+ if (_halt_status == DWC_OTG_HC_XFER_COMPLETE) {
+ if (_hc->ep_is_in) {
+ length = _hc->xfer_len - hctsiz.b.xfersize;
+ if (_short_read != NULL) {
+ *_short_read = (hctsiz.b.xfersize != 0);
+ }
+ } else if (_hc->qh->do_split) {
+ length = _qtd->ssplit_out_xfer_count;
+ } else {
+ length = _hc->xfer_len;
+ }
+ } else {
+ /*
+ * Must use the hctsiz.pktcnt field to determine how much data
+ * has been transferred. This field reflects the number of
+ * packets that have been transferred via the USB. This is
+ * always an integral number of packets if the transfer was
+ * halted before its normal completion. (Can't use the
+ * hctsiz.xfersize field because that reflects the number of
+ * bytes transferred via the AHB, not the USB).
+ */
+ length = (_hc->start_pkt_count - hctsiz.b.pktcnt) * _hc->max_packet;
+ }
+
+ return length;
+}
+
+/**
+ * Updates the state of the URB after a Transfer Complete interrupt on the
+ * host channel. Updates the actual_length field of the URB based on the
+ * number of bytes transferred via the host channel. Sets the URB status
+ * if the data transfer is finished.
+ *
+ * @return 1 if the data transfer specified by the URB is completely finished,
+ * 0 otherwise.
+ */
+static int update_urb_state_xfer_comp(dwc_hc_t *_hc,
+ dwc_otg_hc_regs_t * _hc_regs, struct urb *_urb,
+ dwc_otg_qtd_t * _qtd, int *status)
+{
+ int xfer_done = 0;
+ int short_read = 0;
+
+ _urb->actual_length += get_actual_xfer_length(_hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_COMPLETE,
+ &short_read);
+
+ if (short_read || (_urb->actual_length == _urb->transfer_buffer_length)) {
+ xfer_done = 1;
+ if (short_read && (_urb->transfer_flags & URB_SHORT_NOT_OK)) {
+ *status = -EREMOTEIO;
+ } else {
+ *status = 0;
+ }
+ }
+
+#ifdef DEBUG
+ {
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
+ __func__, (_hc->ep_is_in ? "IN" : "OUT"), _hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", _hc->xfer_len);
+ DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n", hctsiz.b.xfersize);
+ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
+ _urb->transfer_buffer_length);
+ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", _urb->actual_length);
+ DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n",
+ short_read, xfer_done);
+ }
+#endif
+
+ return xfer_done;
+}
+
+/*
+ * Save the starting data toggle for the next transfer. The data toggle is
+ * saved in the QH for non-control transfers and it's saved in the QTD for
+ * control transfers.
+ */
+static void save_data_toggle(dwc_hc_t *_hc,
+ dwc_otg_hc_regs_t *_hc_regs,
+ dwc_otg_qtd_t *_qtd)
+{
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+
+ if (_hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) {
+ dwc_otg_qh_t *qh = _hc->qh;
+ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
+ qh->data_toggle = DWC_OTG_HC_PID_DATA0;
+ } else {
+ qh->data_toggle = DWC_OTG_HC_PID_DATA1;
+ }
+ } else {
+ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
+ _qtd->data_toggle = DWC_OTG_HC_PID_DATA0;
+ } else {
+ _qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
+ }
+ }
+}
+
+/**
+ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
+ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
+ * still linked to the QH, the QH is added to the end of the inactive
+ * non-periodic schedule. For periodic QHs, removes the QH from the periodic
+ * schedule if no more QTDs are linked to the QH.
+ */
+static void deactivate_qh(dwc_otg_hcd_t *_hcd,
+ dwc_otg_qh_t *_qh,
+ int free_qtd)
+{
+ int continue_split = 0;
+ dwc_otg_qtd_t *qtd;
+
+ DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, _hcd, _qh, free_qtd);
+
+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
+
+ if (qtd->complete_split) {
+ continue_split = 1;
+ }
+ else if ((qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID) ||
+ (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END))
+ {
+ continue_split = 1;
+ }
+
+ if (free_qtd) {
+ /*
+ * Note that this was previously a call to
+ * dwc_otg_hcd_qtd_remove_and_free(qtd), which frees the qtd.
+ * However, that call frees the qtd memory, and we continue in the
+ * interrupt logic to access it many more times, including writing
+ * to it. With slub debugging on, it is clear that we were writing
+ * to memory we had freed.
+ * Call this instead, and now I have moved the freeing of the memory to
+ * the end of processing this interrupt.
+ */
+ //dwc_otg_hcd_qtd_remove_and_free(qtd);
+ dwc_otg_hcd_qtd_remove(qtd);
+
+ continue_split = 0;
+ }
+
+ _qh->channel = NULL;
+ _qh->qtd_in_process = NULL;
+ dwc_otg_hcd_qh_deactivate(_hcd, _qh, continue_split);
+}
+
+/**
+ * Updates the state of an Isochronous URB when the transfer is stopped for
+ * any reason. The fields of the current entry in the frame descriptor array
+ * are set based on the transfer state and the input _halt_status. Completes
+ * the Isochronous URB if all the URB frames have been completed.
+ *
+ * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be
+ * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE.
+ */
+static dwc_otg_halt_status_e
+update_isoc_urb_state(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t *_hc,
+ dwc_otg_hc_regs_t *_hc_regs,
+ dwc_otg_qtd_t *_qtd,
+ dwc_otg_halt_status_e _halt_status)
+{
+ struct urb *urb = _qtd->urb;
+ dwc_otg_halt_status_e ret_val = _halt_status;
+ struct usb_iso_packet_descriptor *frame_desc;
+
+ frame_desc = &urb->iso_frame_desc[_qtd->isoc_frame_index];
+ switch (_halt_status) {
+ case DWC_OTG_HC_XFER_COMPLETE:
+ frame_desc->status = 0;
+ frame_desc->actual_length =
+ get_actual_xfer_length(_hc, _hc_regs, _qtd,
+ _halt_status, NULL);
+ break;
+ case DWC_OTG_HC_XFER_FRAME_OVERRUN:
+ urb->error_count++;
+ if (_hc->ep_is_in) {
+ frame_desc->status = -ENOSR;
+ } else {
+ frame_desc->status = -ECOMM;
+ }
+ frame_desc->actual_length = 0;
+ break;
+ case DWC_OTG_HC_XFER_BABBLE_ERR:
+ urb->error_count++;
+ frame_desc->status = -EOVERFLOW;
+ /* Don't need to update actual_length in this case. */
+ break;
+ case DWC_OTG_HC_XFER_XACT_ERR:
+ urb->error_count++;
+ frame_desc->status = -EPROTO;
+ frame_desc->actual_length =
+ get_actual_xfer_length(_hc, _hc_regs, _qtd,
+ _halt_status, NULL);
+ default:
+ DWC_ERROR("%s: Unhandled _halt_status (%d)\n", __func__,
+ _halt_status);
+ BUG();
+ break;
+ }
+
+ if (++_qtd->isoc_frame_index == urb->number_of_packets) {
+ /*
+ * urb->status is not used for isoc transfers.
+ * The individual frame_desc statuses are used instead.
+ */
+ dwc_otg_hcd_complete_urb(_hcd, urb, 0);
+ ret_val = DWC_OTG_HC_XFER_URB_COMPLETE;
+ } else {
+ ret_val = DWC_OTG_HC_XFER_COMPLETE;
+ }
+
+ return ret_val;
+}
+
+/**
+ * Releases a host channel for use by other transfers. Attempts to select and
+ * queue more transactions since at least one host channel is available.
+ *
+ * @param _hcd The HCD state structure.
+ * @param _hc The host channel to release.
+ * @param _qtd The QTD associated with the host channel. This QTD may be freed
+ * if the transfer is complete or an error has occurred.
+ * @param _halt_status Reason the channel is being released. This status
+ * determines the actions taken by this function.
+ */
+static void release_channel(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t *_hc,
+ dwc_otg_qtd_t *_qtd,
+ dwc_otg_halt_status_e _halt_status,
+ int *must_free)
+{
+ dwc_otg_transaction_type_e tr_type;
+ int free_qtd;
+ dwc_otg_qh_t * _qh;
+ int deact = 1;
+ int retry_delay = 1;
+ unsigned long flags;
+
+ DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n", __func__,
+ _hc->hc_num, _halt_status);
+
+ switch (_halt_status) {
+ case DWC_OTG_HC_XFER_NYET:
+ case DWC_OTG_HC_XFER_NAK:
+ if (_halt_status == DWC_OTG_HC_XFER_NYET) {
+ retry_delay = nyet_deferral_delay;
+ } else {
+ retry_delay = nak_deferral_delay;
+ }
+ free_qtd = 0;
+ if (deferral_on && _hc->do_split) {
+ _qh = _hc->qh;
+ if (_qh) {
+ deact = dwc_otg_hcd_qh_deferr(_hcd, _qh , retry_delay);
+ }
+ }
+ break;
+ case DWC_OTG_HC_XFER_URB_COMPLETE:
+ free_qtd = 1;
+ break;
+ case DWC_OTG_HC_XFER_AHB_ERR:
+ case DWC_OTG_HC_XFER_STALL:
+ case DWC_OTG_HC_XFER_BABBLE_ERR:
+ free_qtd = 1;
+ break;
+ case DWC_OTG_HC_XFER_XACT_ERR:
+ if (_qtd->error_count >= 3) {
+ DWC_DEBUGPL(DBG_HCDV, " Complete URB with transaction error\n");
+ free_qtd = 1;
+ //_qtd->urb->status = -EPROTO;
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPROTO);
+ } else {
+ free_qtd = 0;
+ }
+ break;
+ case DWC_OTG_HC_XFER_URB_DEQUEUE:
+ /*
+ * The QTD has already been removed and the QH has been
+ * deactivated. Don't want to do anything except release the
+ * host channel and try to queue more transfers.
+ */
+ goto cleanup;
+ case DWC_OTG_HC_XFER_NO_HALT_STATUS:
+ DWC_ERROR("%s: No halt_status, channel %d\n", __func__, _hc->hc_num);
+ free_qtd = 0;
+ break;
+ default:
+ free_qtd = 0;
+ break;
+ }
+ if (free_qtd) {
+ /* Only change must_free to true (do not set to zero here -- it is
+ * pre-initialized to zero).
+ */
+ *must_free = 1;
+ }
+ if (deact) {
+ deactivate_qh(_hcd, _hc->qh, free_qtd);
+ }
+ cleanup:
+ /*
+ * Release the host channel for use by other transfers. The cleanup
+ * function clears the channel interrupt enables and conditions, so
+ * there's no need to clear the Channel Halted interrupt separately.
+ */
+ dwc_otg_hc_cleanup(_hcd->core_if, _hc);
+ list_add_tail(&_hc->hc_list_entry, &_hcd->free_hc_list);
+
+ local_irq_save(flags);
+ _hcd->available_host_channels++;
+ local_irq_restore(flags);
+ /* Try to queue more transfers now that there's a free channel, */
+ /* unless erratum_usb09_patched is set */
+ if (!erratum_usb09_patched) {
+ tr_type = dwc_otg_hcd_select_transactions(_hcd);
+ if (tr_type != DWC_OTG_TRANSACTION_NONE) {
+ dwc_otg_hcd_queue_transactions(_hcd, tr_type);
+ }
+ }
+}
+
+/**
+ * Halts a host channel. If the channel cannot be halted immediately because
+ * the request queue is full, this function ensures that the FIFO empty
+ * interrupt for the appropriate queue is enabled so that the halt request can
+ * be queued when there is space in the request queue.
+ *
+ * This function may also be called in DMA mode. In that case, the channel is
+ * simply released since the core always halts the channel automatically in
+ * DMA mode.
+ */
+static void halt_channel(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t *_hc,
+ dwc_otg_qtd_t *_qtd,
+ dwc_otg_halt_status_e _halt_status, int *must_free)
+{
+ if (_hcd->core_if->dma_enable) {
+ release_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ return;
+ }
+
+ /* Slave mode processing... */
+ dwc_otg_hc_halt(_hcd->core_if, _hc, _halt_status);
+
+ if (_hc->halt_on_queue) {
+ gintmsk_data_t gintmsk = {.d32 = 0};
+ dwc_otg_core_global_regs_t *global_regs;
+ global_regs = _hcd->core_if->core_global_regs;
+
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
+ /*
+ * Make sure the Non-periodic Tx FIFO empty interrupt
+ * is enabled so that the non-periodic schedule will
+ * be processed.
+ */
+ gintmsk.b.nptxfempty = 1;
+ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32);
+ } else {
+ /*
+ * Move the QH from the periodic queued schedule to
+ * the periodic assigned schedule. This allows the
+ * halt to be queued when the periodic schedule is
+ * processed.
+ */
+ list_move(&_hc->qh->qh_list_entry,
+ &_hcd->periodic_sched_assigned);
+
+ /*
+ * Make sure the Periodic Tx FIFO Empty interrupt is
+ * enabled so that the periodic schedule will be
+ * processed.
+ */
+ gintmsk.b.ptxfempty = 1;
+ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32);
+ }
+ }
+}
+
+/**
+ * Performs common cleanup for non-periodic transfers after a Transfer
+ * Complete interrupt. This function should be called after any endpoint type
+ * specific handling is finished to release the host channel.
+ */
+static void complete_non_periodic_xfer(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t *_hc,
+ dwc_otg_hc_regs_t *_hc_regs,
+ dwc_otg_qtd_t *_qtd,
+ dwc_otg_halt_status_e _halt_status, int *must_free)
+{
+ hcint_data_t hcint;
+
+ _qtd->error_count = 0;
+
+ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
+ if (hcint.b.nyet) {
+ /*
+ * Got a NYET on the last transaction of the transfer. This
+ * means that the endpoint should be in the PING state at the
+ * beginning of the next transfer.
+ */
+ _hc->qh->ping_state = 1;
+ clear_hc_int(_hc_regs,nyet);
+ }
+
+ /*
+ * Always halt and release the host channel to make it available for
+ * more transfers. There may still be more phases for a control
+ * transfer or more data packets for a bulk transfer at this point,
+ * but the host channel is still halted. A channel will be reassigned
+ * to the transfer when the non-periodic schedule is processed after
+ * the channel is released. This allows transactions to be queued
+ * properly via dwc_otg_hcd_queue_transactions, which also enables the
+ * Tx FIFO Empty interrupt if necessary.
+ */
+ if (_hc->ep_is_in) {
+ /*
+ * IN transfers in Slave mode require an explicit disable to
+ * halt the channel. (In DMA mode, this call simply releases
+ * the channel.)
+ */
+ halt_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ } else {
+ /*
+ * The channel is automatically disabled by the core for OUT
+ * transfers in Slave mode.
+ */
+ release_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ }
+}
+
+/**
+ * Performs common cleanup for periodic transfers after a Transfer Complete
+ * interrupt. This function should be called after any endpoint type specific
+ * handling is finished to release the host channel.
+ */
+static void complete_periodic_xfer(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t *_hc,
+ dwc_otg_hc_regs_t *_hc_regs,
+ dwc_otg_qtd_t *_qtd,
+ dwc_otg_halt_status_e _halt_status, int *must_free)
+{
+ hctsiz_data_t hctsiz;
+ _qtd->error_count = 0;
+
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ if (!_hc->ep_is_in || hctsiz.b.pktcnt == 0) {
+ /* Core halts channel in these cases. */
+ release_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ } else {
+ /* Flush any outstanding requests from the Tx queue. */
+ halt_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ }
+}
+
+/**
+ * Handles a host channel Transfer Complete interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t *_hc,
+ dwc_otg_hc_regs_t *_hc_regs,
+ dwc_otg_qtd_t *_qtd, int *must_free)
+{
+ int urb_xfer_done;
+ dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE;
+ struct urb *urb = _qtd->urb;
+ int pipe_type = usb_pipetype(urb->pipe);
+ int status = -EINPROGRESS;
+
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Transfer Complete--\n", _hc->hc_num);
+
+ /*
+ * Handle xfer complete on CSPLIT.
+ */
+ if (_hc->qh->do_split) {
+ _qtd->complete_split = 0;
+ }
+
+ /* Update the QTD and URB states. */
+ switch (pipe_type) {
+ case PIPE_CONTROL:
+ switch (_qtd->control_phase) {
+ case DWC_OTG_CONTROL_SETUP:
+ if (urb->transfer_buffer_length > 0) {
+ _qtd->control_phase = DWC_OTG_CONTROL_DATA;
+ } else {
+ _qtd->control_phase = DWC_OTG_CONTROL_STATUS;
+ }
+ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction done\n");
+ halt_status = DWC_OTG_HC_XFER_COMPLETE;
+ break;
+ case DWC_OTG_CONTROL_DATA: {
+ urb_xfer_done = update_urb_state_xfer_comp(_hc, _hc_regs,urb, _qtd, &status);
+ if (urb_xfer_done) {
+ _qtd->control_phase = DWC_OTG_CONTROL_STATUS;
+ DWC_DEBUGPL(DBG_HCDV, " Control data transfer done\n");
+ } else {
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ }
+ halt_status = DWC_OTG_HC_XFER_COMPLETE;
+ break;
+ }
+ case DWC_OTG_CONTROL_STATUS:
+ DWC_DEBUGPL(DBG_HCDV, " Control transfer complete\n");
+ if (status == -EINPROGRESS) {
+ status = 0;
+ }
+ dwc_otg_hcd_complete_urb(_hcd, urb, status);
+ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
+ break;
+ }
+
+ complete_non_periodic_xfer(_hcd, _hc, _hc_regs, _qtd,
+ halt_status, must_free);
+ break;
+ case PIPE_BULK:
+ DWC_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n");
+ urb_xfer_done = update_urb_state_xfer_comp(_hc, _hc_regs, urb, _qtd, &status);
+ if (urb_xfer_done) {
+ dwc_otg_hcd_complete_urb(_hcd, urb, status);
+ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
+ } else {
+ halt_status = DWC_OTG_HC_XFER_COMPLETE;
+ }
+
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ complete_non_periodic_xfer(_hcd, _hc, _hc_regs, _qtd,halt_status, must_free);
+ break;
+ case PIPE_INTERRUPT:
+ DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n");
+ update_urb_state_xfer_comp(_hc, _hc_regs, urb, _qtd, &status);
+
+ /*
+ * Interrupt URB is done on the first transfer complete
+ * interrupt.
+ */
+ dwc_otg_hcd_complete_urb(_hcd, urb, status);
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ complete_periodic_xfer(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_URB_COMPLETE, must_free);
+ break;
+ case PIPE_ISOCHRONOUS:
+ DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n");
+ if (_qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL)
+ {
+ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_COMPLETE);
+ }
+ complete_periodic_xfer(_hcd, _hc, _hc_regs, _qtd, halt_status, must_free);
+ break;
+ }
+
+ disable_hc_int(_hc_regs,xfercompl);
+
+ return 1;
+}
+
+/**
+ * Handles a host channel STALL interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_stall_intr(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t *_hc,
+ dwc_otg_hc_regs_t *_hc_regs,
+ dwc_otg_qtd_t *_qtd, int *must_free)
+{
+ struct urb *urb = _qtd->urb;
+ int pipe_type = usb_pipetype(urb->pipe);
+
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "STALL Received--\n", _hc->hc_num);
+
+ if (pipe_type == PIPE_CONTROL) {
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPIPE);
+ }
+
+ if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTERRUPT) {
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPIPE);
+ /*
+ * USB protocol requires resetting the data toggle for bulk
+ * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
+ * setup command is issued to the endpoint. Anticipate the
+ * CLEAR_FEATURE command since a STALL has occurred and reset
+ * the data toggle now.
+ */
+ _hc->qh->data_toggle = 0;
+ }
+
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_STALL, must_free);
+ disable_hc_int(_hc_regs,stall);
+
+ return 1;
+}
+
+/*
+ * Updates the state of the URB when a transfer has been stopped due to an
+ * abnormal condition before the transfer completes. Modifies the
+ * actual_length field of the URB to reflect the number of bytes that have
+ * actually been transferred via the host channel.
+ */
+static void update_urb_state_xfer_intr(dwc_hc_t *_hc,
+ dwc_otg_hc_regs_t *_hc_regs,
+ struct urb *_urb,
+ dwc_otg_qtd_t *_qtd,
+ dwc_otg_halt_status_e _halt_status)
+{
+ uint32_t bytes_transferred = get_actual_xfer_length(_hc, _hc_regs, _qtd,
+ _halt_status, NULL);
+ _urb->actual_length += bytes_transferred;
+
+#ifdef DEBUG
+ {
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
+ __func__, (_hc->ep_is_in ? "IN" : "OUT"), _hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " _hc->start_pkt_count %d\n", _hc->start_pkt_count);
+ DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt);
+ DWC_DEBUGPL(DBG_HCDV, " _hc->max_packet %d\n", _hc->max_packet);
+ DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n", bytes_transferred);
+ DWC_DEBUGPL(DBG_HCDV, " _urb->actual_length %d\n", _urb->actual_length);
+ DWC_DEBUGPL(DBG_HCDV, " _urb->transfer_buffer_length %d\n",
+ _urb->transfer_buffer_length);
+ }
+#endif
+}
+
+/**
+ * Handles a host channel NAK interrupt. This handler may be called in either
+ * DMA mode or Slave mode.
+ */
+static int32_t handle_hc_nak_intr(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t *_hc,
+ dwc_otg_hc_regs_t *_hc_regs,
+ dwc_otg_qtd_t *_qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "NAK Received--\n", _hc->hc_num);
+
+ /*
+ * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
+ * interrupt. Re-start the SSPLIT transfer.
+ */
+ if (_hc->do_split) {
+ if (_hc->complete_split) {
+ _qtd->error_count = 0;
+ }
+ _qtd->complete_split = 0;
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free);
+ goto handle_nak_done;
+ }
+
+ switch (usb_pipetype(_qtd->urb->pipe)) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ if (_hcd->core_if->dma_enable && _hc->ep_is_in) {
+ /*
+ * NAK interrupts are enabled on bulk/control IN
+ * transfers in DMA mode for the sole purpose of
+ * resetting the error count after a transaction error
+ * occurs. The core will continue transferring data.
+ */
+ _qtd->error_count = 0;
+ goto handle_nak_done;
+ }
+
+ /*
+ * NAK interrupts normally occur during OUT transfers in DMA
+ * or Slave mode. For IN transfers, more requests will be
+ * queued as request queue space is available.
+ */
+ _qtd->error_count = 0;
+
+ if (!_hc->qh->ping_state) {
+ update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb,
+ _qtd, DWC_OTG_HC_XFER_NAK);
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ if (_qtd->urb->dev->speed == USB_SPEED_HIGH) {
+ _hc->qh->ping_state = 1;
+ }
+ }
+
+ /*
+ * Halt the channel so the transfer can be re-started from
+ * the appropriate point or the PING protocol will
+ * start/continue.
+ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free);
+ break;
+ case PIPE_INTERRUPT:
+ _qtd->error_count = 0;
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free);
+ break;
+ case PIPE_ISOCHRONOUS:
+ /* Should never get called for isochronous transfers. */
+ BUG();
+ break;
+ }
+
+ handle_nak_done:
+ disable_hc_int(_hc_regs,nak);
+
+ return 1;
+}
+
+/**
+ * Handles a host channel ACK interrupt. This interrupt is enabled when
+ * performing the PING protocol in Slave mode, when errors occur during
+ * either Slave mode or DMA mode, and during Start Split transactions.
+ */
+static int32_t handle_hc_ack_intr(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "ACK Received--\n", _hc->hc_num);
+
+ if (_hc->do_split) {
+ /*
+ * Handle ACK on SSPLIT.
+ * ACK should not occur in CSPLIT.
+ */
+ if ((!_hc->ep_is_in) && (_hc->data_pid_start != DWC_OTG_HC_PID_SETUP)) {
+ _qtd->ssplit_out_xfer_count = _hc->xfer_len;
+ }
+ if (!(_hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !_hc->ep_is_in)) {
+ /* Don't need complete for isochronous out transfers. */
+ _qtd->complete_split = 1;
+ }
+
+ /* ISOC OUT */
+ if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && !_hc->ep_is_in) {
+ switch (_hc->xact_pos) {
+ case DWC_HCSPLIT_XACTPOS_ALL:
+ break;
+ case DWC_HCSPLIT_XACTPOS_END:
+ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
+ _qtd->isoc_split_offset = 0;
+ break;
+ case DWC_HCSPLIT_XACTPOS_BEGIN:
+ case DWC_HCSPLIT_XACTPOS_MID:
+ /*
+ * For BEGIN or MID, calculate the length for
+ * the next microframe to determine the correct
+ * SSPLIT token, either MID or END.
+ */
+ do {
+ struct usb_iso_packet_descriptor *frame_desc;
+
+ frame_desc = &_qtd->urb->iso_frame_desc[_qtd->isoc_frame_index];
+ _qtd->isoc_split_offset += 188;
+
+ if ((frame_desc->length - _qtd->isoc_split_offset) <= 188) {
+ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_END;
+ }
+ else {
+ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_MID;
+ }
+
+ } while(0);
+ break;
+ }
+ } else {
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_ACK, must_free);
+ }
+ } else {
+ _qtd->error_count = 0;
+
+ if (_hc->qh->ping_state) {
+ _hc->qh->ping_state = 0;
+ /*
+ * Halt the channel so the transfer can be re-started
+ * from the appropriate point. This only happens in
+ * Slave mode. In DMA mode, the ping_state is cleared
+ * when the transfer is started because the core
+ * automatically executes the PING, then the transfer.
+ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_ACK, must_free);
+ }
+ }
+
+ /*
+ * If the ACK occurred when _not_ in the PING state, let the channel
+ * continue transferring data after clearing the error count.
+ */
+
+ disable_hc_int(_hc_regs,ack);
+
+ return 1;
+}
+
+/**
+ * Handles a host channel NYET interrupt. This interrupt should only occur on
+ * Bulk and Control OUT endpoints and for complete split transactions. If a
+ * NYET occurs at the same time as a Transfer Complete interrupt, it is
+ * handled in the xfercomp interrupt handler, not here. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t *_hc,
+ dwc_otg_hc_regs_t *_hc_regs,
+ dwc_otg_qtd_t *_qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "NYET Received--\n", _hc->hc_num);
+
+ /*
+ * NYET on CSPLIT
+ * re-do the CSPLIT immediately on non-periodic
+ */
+ if ((_hc->do_split) && (_hc->complete_split)) {
+ if ((_hc->ep_type == DWC_OTG_EP_TYPE_INTR) ||
+ (_hc->ep_type == DWC_OTG_EP_TYPE_ISOC)) {
+ int frnum = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(_hcd));
+
+ if (dwc_full_frame_num(frnum) !=
+ dwc_full_frame_num(_hc->qh->sched_frame)) {
+ /*
+ * No longer in the same full speed frame.
+ * Treat this as a transaction error.
+ */
+#if 0
+ /** @todo Fix system performance so this can
+ * be treated as an error. Right now complete
+ * splits cannot be scheduled precisely enough
+ * due to other system activity, so this error
+ * occurs regularly in Slave mode.
+ */
+ _qtd->error_count++;
+#endif
+ _qtd->complete_split = 0;
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
+ /** @todo add support for isoc release */
+ goto handle_nyet_done;
+ }
+ }
+
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NYET, must_free);
+ goto handle_nyet_done;
+ }
+
+ _hc->qh->ping_state = 1;
+ _qtd->error_count = 0;
+
+ update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb, _qtd,
+ DWC_OTG_HC_XFER_NYET);
+ save_data_toggle(_hc, _hc_regs, _qtd);
+
+ /*
+ * Halt the channel and re-start the transfer so the PING
+ * protocol will start.
+ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NYET, must_free);
+
+handle_nyet_done:
+ disable_hc_int(_hc_regs,nyet);
+ clear_hc_int(_hc_regs, nyet);
+ return 1;
+}
+
+/**
+ * Handles a host channel babble interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_babble_intr(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Babble Error--\n", _hc->hc_num);
+ if (_hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EOVERFLOW);
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_BABBLE_ERR, must_free);
+ } else {
+ dwc_otg_halt_status_e halt_status;
+ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_BABBLE_ERR);
+ halt_channel(_hcd, _hc, _qtd, halt_status, must_free);
+ }
+ disable_hc_int(_hc_regs,bblerr);
+ return 1;
+}
+
+/**
+ * Handles a host channel AHB error interrupt. This handler is only called in
+ * DMA mode.
+ */
+static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t *_hc,
+ dwc_otg_hc_regs_t *_hc_regs,
+ dwc_otg_qtd_t *_qtd)
+{
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+ hctsiz_data_t hctsiz;
+ uint32_t hcdma;
+ struct urb *urb = _qtd->urb;
+
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "AHB Error--\n", _hc->hc_num);
+
+ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
+ hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt);
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ hcdma = dwc_read_reg32(&_hc_regs->hcdma);
+
+ DWC_ERROR("AHB ERROR, Channel %d\n", _hc->hc_num);
+ DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
+ DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n");
+ DWC_ERROR(" Device address: %d\n", usb_pipedevice(urb->pipe));
+ DWC_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
+ (usb_pipein(urb->pipe) ? "IN" : "OUT"));
+ DWC_ERROR(" Endpoint type: %s\n",
+ ({char *pipetype;
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL: pipetype = "CONTROL"; break;
+ case PIPE_BULK: pipetype = "BULK"; break;
+ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break;
+ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break;
+ default: pipetype = "UNKNOWN"; break;
+ }; pipetype;}));
+ DWC_ERROR(" Speed: %s\n",
+ ({char *speed;
+ switch (urb->dev->speed) {
+ case USB_SPEED_HIGH: speed = "HIGH"; break;
+ case USB_SPEED_FULL: speed = "FULL"; break;
+ case USB_SPEED_LOW: speed = "LOW"; break;
+ default: speed = "UNKNOWN"; break;
+ }; speed;}));
+ DWC_ERROR(" Max packet size: %d\n",
+ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+ DWC_ERROR(" Data buffer length: %d\n", urb->transfer_buffer_length);
+ DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n",
+ urb->transfer_buffer, (void *)(u32)urb->transfer_dma);
+ DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n",
+ urb->setup_packet, (void *)(u32)urb->setup_dma);
+ DWC_ERROR(" Interval: %d\n", urb->interval);
+
+ dwc_otg_hcd_complete_urb(_hcd, urb, -EIO);
+
+ /*
+ * Force a channel halt. Don't call halt_channel because that won't
+ * write to the HCCHARn register in DMA mode to force the halt.
+ */
+ dwc_otg_hc_halt(_hcd->core_if, _hc, DWC_OTG_HC_XFER_AHB_ERR);
+
+ disable_hc_int(_hc_regs,ahberr);
+ return 1;
+}
+
+/**
+ * Handles a host channel transaction error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Transaction Error--\n", _hc->hc_num);
+
+ switch (usb_pipetype(_qtd->urb->pipe)) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ _qtd->error_count++;
+ if (!_hc->qh->ping_state) {
+ update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb,
+ _qtd, DWC_OTG_HC_XFER_XACT_ERR);
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ if (!_hc->ep_is_in && _qtd->urb->dev->speed == USB_SPEED_HIGH) {
+ _hc->qh->ping_state = 1;
+ }
+ }
+
+ /*
+ * Halt the channel so the transfer can be re-started from
+ * the appropriate point or the PING protocol will start.
+ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
+ break;
+ case PIPE_INTERRUPT:
+ _qtd->error_count++;
+ if ((_hc->do_split) && (_hc->complete_split)) {
+ _qtd->complete_split = 0;
+ }
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
+ break;
+ case PIPE_ISOCHRONOUS:
+ {
+ dwc_otg_halt_status_e halt_status;
+ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_XACT_ERR);
+
+ halt_channel(_hcd, _hc, _qtd, halt_status, must_free);
+ }
+ break;
+ }
+
+
+ disable_hc_int(_hc_regs,xacterr);
+
+ return 1;
+}
+
+/**
+ * Handles a host channel frame overrun interrupt. This handler may be called
+ * in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Frame Overrun--\n", _hc->hc_num);
+
+ switch (usb_pipetype(_qtd->urb->pipe)) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ break;
+ case PIPE_INTERRUPT:
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN, must_free);
+ break;
+ case PIPE_ISOCHRONOUS:
+ {
+ dwc_otg_halt_status_e halt_status;
+ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_FRAME_OVERRUN);
+
+ halt_channel(_hcd, _hc, _qtd, halt_status, must_free);
+ }
+ break;
+ }
+
+ disable_hc_int(_hc_regs,frmovrun);
+
+ return 1;
+}
+
+/**
+ * Handles a host channel data toggle error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Data Toggle Error--\n", _hc->hc_num);
+
+ if (_hc->ep_is_in) {
+ _qtd->error_count = 0;
+ } else {
+ DWC_ERROR("Data Toggle Error on OUT transfer,"
+ "channel %d\n", _hc->hc_num);
+ }
+
+ disable_hc_int(_hc_regs,datatglerr);
+
+ return 1;
+}
+
+#ifdef DEBUG
+/**
+ * This function is for debug only. It checks that a valid halt status is set
+ * and that HCCHARn.chdis is clear. If there's a problem, corrective action is
+ * taken and a warning is issued.
+ * @return 1 if halt status is ok, 0 otherwise.
+ */
+static inline int halt_status_ok(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ hcsplt_data_t hcsplt;
+
+ if (_hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) {
+ /*
+ * This code is here only as a check. This condition should
+ * never happen. Ignore the halt if it does occur.
+ */
+ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
+ hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcintmsk);
+ hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt);
+ DWC_WARN("%s: _hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS, "
+ "channel %d, hcchar 0x%08x, hctsiz 0x%08x, "
+ "hcint 0x%08x, hcintmsk 0x%08x, "
+ "hcsplt 0x%08x, qtd->complete_split %d\n",
+ __func__, _hc->hc_num, hcchar.d32, hctsiz.d32,
+ hcint.d32, hcintmsk.d32,
+ hcsplt.d32, _qtd->complete_split);
+
+ DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n",
+ __func__, _hc->hc_num);
+ DWC_WARN("\n");
+ clear_hc_int(_hc_regs,chhltd);
+ return 0;
+ }
+
+ /*
+ * This code is here only as a check. hcchar.chdis should
+ * never be set when the halt interrupt occurs. Halt the
+ * channel again if it does occur.
+ */
+ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
+ if (hcchar.b.chdis) {
+ DWC_WARN("%s: hcchar.chdis set unexpectedly, "
+ "hcchar 0x%08x, trying to halt again\n",
+ __func__, hcchar.d32);
+ clear_hc_int(_hc_regs,chhltd);
+ _hc->halt_pending = 0;
+ halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
+ return 0;
+ }
+
+ return 1;
+}
+#endif
+
+/**
+ * Handles a host Channel Halted interrupt in DMA mode. This handler
+ * determines the reason the channel halted and proceeds accordingly.
+ */
+static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+
+ if (_hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
+ _hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
+ /*
+ * Just release the channel. A dequeue can happen on a
+ * transfer timeout. In the case of an AHB Error, the channel
+ * was forced to halt because there's no way to gracefully
+ * recover.
+ */
+ release_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
+ return;
+ }
+
+ /* Read the HCINTn register to determine the cause for the halt. */
+ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
+ hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcintmsk);
+
+ if (hcint.b.xfercomp) {
+ /** @todo This is here because of a possible hardware bug. Spec
+ * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
+ * interrupt w/ACK bit set should occur, but I only see the
+ * XFERCOMP bit, even with it masked out. This is a workaround
+ * for that behavior. Should fix this when hardware is fixed.
+ */
+ if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && (!_hc->ep_is_in)) {
+ handle_hc_ack_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ }
+ handle_hc_xfercomp_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.stall) {
+ handle_hc_stall_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.xacterr) {
+ /*
+ * Must handle xacterr before nak or ack. Could get a xacterr
+ * at the same time as either of these on a BULK/CONTROL OUT
+ * that started with a PING. The xacterr takes precedence.
+ */
+ handle_hc_xacterr_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.nyet) {
+ /*
+ * Must handle nyet before nak or ack. Could get a nyet at the
+ * same time as either of those on a BULK/CONTROL OUT that
+ * started with a PING. The nyet takes precedence.
+ */
+ handle_hc_nyet_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.bblerr) {
+ handle_hc_babble_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.frmovrun) {
+ handle_hc_frmovrun_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.datatglerr) {
+ handle_hc_datatglerr_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ _hc->qh->data_toggle = 0;
+ halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
+ } else if (hcint.b.nak && !hcintmsk.b.nak) {
+ /*
+ * If nak is not masked, it's because a non-split IN transfer
+ * is in an error state. In that case, the nak is handled by
+ * the nak interrupt handler, not here. Handle nak here for
+ * BULK/CONTROL OUT transfers, which halt on a NAK to allow
+ * rewinding the buffer pointer.
+ */
+ handle_hc_nak_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.ack && !hcintmsk.b.ack) {
+ /*
+ * If ack is not masked, it's because a non-split IN transfer
+ * is in an error state. In that case, the ack is handled by
+ * the ack interrupt handler, not here. Handle ack here for
+ * split transfers. Start splits halt on ACK.
+ */
+ handle_hc_ack_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else {
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /*
+ * A periodic transfer halted with no other channel
+ * interrupts set. Assume it was halted by the core
+ * because it could not be completed in its scheduled
+ * (micro)frame.
+ */
+#ifdef DEBUG
+ DWC_PRINT("%s: Halt channel %d (assume incomplete periodic transfer)\n",
+ __func__, _hc->hc_num);
+#endif /* */
+ halt_channel(_hcd, _hc, _qtd,
+ DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE, must_free);
+ } else {
+#ifdef DEBUG
+ DWC_ERROR("%s: Channel %d, DMA Mode -- ChHltd set, but reason "
+ "for halting is unknown, nyet %d, hcint 0x%08x, intsts 0x%08x\n",
+ __func__, _hc->hc_num, hcint.b.nyet, hcint.d32,
+ dwc_read_reg32(&_hcd->core_if->core_global_regs->gintsts));
+#endif
+ halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
+ }
+ }
+}
+
+/**
+ * Handles a host channel Channel Halted interrupt.
+ *
+ * In slave mode, this handler is called only when the driver specifically
+ * requests a halt. This occurs during handling other host channel interrupts
+ * (e.g. nak, xacterr, stall, nyet, etc.).
+ *
+ * In DMA mode, this is the interrupt that occurs when the core has finished
+ * processing a transfer on a channel. Other host channel interrupts (except
+ * ahberr) are disabled in DMA mode.
+ */
+static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t *_hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Channel Halted--\n", _hc->hc_num);
+
+ if (_hcd->core_if->dma_enable) {
+ handle_hc_chhltd_intr_dma(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else {
+#ifdef DEBUG
+ if (!halt_status_ok(_hcd, _hc, _hc_regs, _qtd, must_free)) {
+ return 1;
+ }
+#endif /* */
+ release_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
+ }
+
+ return 1;
+}
+
+/** Handles interrupt for a specific Host Channel */
+int32_t dwc_otg_hcd_handle_hc_n_intr (dwc_otg_hcd_t *_dwc_otg_hcd, uint32_t _num)
+{
+ int must_free = 0;
+ int retval = 0;
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ dwc_hc_t *hc;
+ dwc_otg_hc_regs_t *hc_regs;
+ dwc_otg_qtd_t *qtd;
+
+ DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", _num);
+
+ hc = _dwc_otg_hcd->hc_ptr_array[_num];
+ hc_regs = _dwc_otg_hcd->core_if->host_if->hc_regs[_num];
+ qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
+
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
+ DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
+ hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32));
+ hcint.d32 = hcint.d32 & hcintmsk.d32;
+
+ if (!_dwc_otg_hcd->core_if->dma_enable) {
+ if ((hcint.b.chhltd) && (hcint.d32 != 0x2)) {
+ hcint.b.chhltd = 0;
+ }
+ }
+
+ if (hcint.b.xfercomp) {
+ retval |= handle_hc_xfercomp_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ /*
+ * If NYET occurred at same time as Xfer Complete, the NYET is
+ * handled by the Xfer Complete interrupt handler. Don't want
+ * to call the NYET interrupt handler in this case.
+ */
+ hcint.b.nyet = 0;
+ }
+ if (hcint.b.chhltd) {
+ retval |= handle_hc_chhltd_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.ahberr) {
+ retval |= handle_hc_ahberr_intr(_dwc_otg_hcd, hc, hc_regs, qtd);
+ }
+ if (hcint.b.stall) {
+ retval |= handle_hc_stall_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.nak) {
+ retval |= handle_hc_nak_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.ack) {
+ retval |= handle_hc_ack_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.nyet) {
+ retval |= handle_hc_nyet_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.xacterr) {
+ retval |= handle_hc_xacterr_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.bblerr) {
+ retval |= handle_hc_babble_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.frmovrun) {
+ retval |= handle_hc_frmovrun_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.datatglerr) {
+ retval |= handle_hc_datatglerr_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+
+ /*
+ * Logic to free the qtd here, at the end of the hc intr
+ * processing, if the handling of this interrupt determined
+ * that it needs to be freed.
+ */
+ if (must_free) {
+ /* Free the qtd here now that we are done using it. */
+ dwc_otg_hcd_qtd_free(qtd);
+ }
+ return retval;
+}
+
+#endif /* DWC_DEVICE_ONLY */
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd_queue.c b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd_queue.c
new file mode 100644
index 0000000000..fcb5ce634a
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_hcd_queue.c
@@ -0,0 +1,794 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd_queue.c $
+ * $Revision: 1.1.1.1 $
+ * $Date: 2009-04-17 06:15:34 $
+ * $Change: 537387 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef DWC_DEVICE_ONLY
+
+/**
+ * @file
+ *
+ * This file contains the functions to manage Queue Heads and Queue
+ * Transfer Descriptors.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+
+/**
+ * This function allocates and initializes a QH.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param[in] _urb Holds the information about the device/endpoint that we need
+ * to initialize the QH.
+ *
+ * @return Returns pointer to the newly allocated QH, or NULL on error. */
+dwc_otg_qh_t *dwc_otg_hcd_qh_create (dwc_otg_hcd_t *_hcd, struct urb *_urb)
+{
+ dwc_otg_qh_t *qh;
+
+ /* Allocate memory */
+ /** @todo add memflags argument */
+ qh = dwc_otg_hcd_qh_alloc ();
+ if (qh == NULL) {
+ return NULL;
+ }
+
+ dwc_otg_hcd_qh_init (_hcd, qh, _urb);
+ return qh;
+}
+
+/** Free each QTD in the QH's QTD-list then free the QH. QH should already be
+ * removed from a list. QTD list should already be empty if called from URB
+ * Dequeue.
+ *
+ * @param[in] _qh The QH to free.
+ */
+void dwc_otg_hcd_qh_free (dwc_otg_qh_t *_qh)
+{
+ dwc_otg_qtd_t *qtd;
+ struct list_head *pos;
+ unsigned long flags;
+
+ /* Free each QTD in the QTD list */
+ local_irq_save (flags);
+ for (pos = _qh->qtd_list.next;
+ pos != &_qh->qtd_list;
+ pos = _qh->qtd_list.next)
+ {
+ list_del (pos);
+ qtd = dwc_list_to_qtd (pos);
+ dwc_otg_hcd_qtd_free (qtd);
+ }
+ local_irq_restore (flags);
+
+ kfree (_qh);
+ return;
+}
+
+/** Initializes a QH structure.
+ *
+ * @param[in] _hcd The HCD state structure for the DWC OTG controller.
+ * @param[in] _qh The QH to init.
+ * @param[in] _urb Holds the information about the device/endpoint that we need
+ * to initialize the QH. */
+#define SCHEDULE_SLOP 10
+void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, struct urb *_urb)
+{
+ memset (_qh, 0, sizeof (dwc_otg_qh_t));
+
+ /* Initialize QH */
+ switch (usb_pipetype(_urb->pipe)) {
+ case PIPE_CONTROL:
+ _qh->ep_type = USB_ENDPOINT_XFER_CONTROL;
+ break;
+ case PIPE_BULK:
+ _qh->ep_type = USB_ENDPOINT_XFER_BULK;
+ break;
+ case PIPE_ISOCHRONOUS:
+ _qh->ep_type = USB_ENDPOINT_XFER_ISOC;
+ break;
+ case PIPE_INTERRUPT:
+ _qh->ep_type = USB_ENDPOINT_XFER_INT;
+ break;
+ }
+
+ _qh->ep_is_in = usb_pipein(_urb->pipe) ? 1 : 0;
+
+ _qh->data_toggle = DWC_OTG_HC_PID_DATA0;
+ _qh->maxp = usb_maxpacket(_urb->dev, _urb->pipe, !(usb_pipein(_urb->pipe)));
+ INIT_LIST_HEAD(&_qh->qtd_list);
+ INIT_LIST_HEAD(&_qh->qh_list_entry);
+ _qh->channel = NULL;
+
+ /* FS/LS Enpoint on HS Hub
+ * NOT virtual root hub */
+ _qh->do_split = 0;
+ _qh->speed = _urb->dev->speed;
+ if (((_urb->dev->speed == USB_SPEED_LOW) ||
+ (_urb->dev->speed == USB_SPEED_FULL)) &&
+ (_urb->dev->tt) && (_urb->dev->tt->hub) && (_urb->dev->tt->hub->devnum != 1)) {
+ DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT found at hub addr %d, for port %d\n",
+ usb_pipeendpoint(_urb->pipe), _urb->dev->tt->hub->devnum,
+ _urb->dev->ttport);
+ _qh->do_split = 1;
+ }
+
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT ||
+ _qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
+ /* Compute scheduling parameters once and save them. */
+ hprt0_data_t hprt;
+
+ /** @todo Account for split transfers in the bus time. */
+ int bytecount = dwc_hb_mult(_qh->maxp) * dwc_max_packet(_qh->maxp);
+ _qh->usecs = NS_TO_US(usb_calc_bus_time(_urb->dev->speed,
+ usb_pipein(_urb->pipe),
+ (_qh->ep_type == USB_ENDPOINT_XFER_ISOC),bytecount));
+
+ /* Start in a slightly future (micro)frame. */
+ _qh->sched_frame = dwc_frame_num_inc(_hcd->frame_number, SCHEDULE_SLOP);
+ _qh->interval = _urb->interval;
+#if 0
+ /* Increase interrupt polling rate for debugging. */
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ _qh->interval = 8;
+ }
+#endif
+ hprt.d32 = dwc_read_reg32(_hcd->core_if->host_if->hprt0);
+ if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) &&
+ ((_urb->dev->speed == USB_SPEED_LOW) ||
+ (_urb->dev->speed == USB_SPEED_FULL)))
+ {
+ _qh->interval *= 8;
+ _qh->sched_frame |= 0x7;
+ _qh->start_split_frame = _qh->sched_frame;
+ }
+ }
+
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n");
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - qh = %p\n", _qh);
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Device Address = %d\n",
+ _urb->dev->devnum);
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Endpoint %d, %s\n",
+ usb_pipeendpoint(_urb->pipe),
+ usb_pipein(_urb->pipe) == USB_DIR_IN ? "IN" : "OUT");
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Speed = %s\n",
+ ({ char *speed; switch (_urb->dev->speed) {
+ case USB_SPEED_LOW: speed = "low"; break;
+ case USB_SPEED_FULL: speed = "full"; break;
+ case USB_SPEED_HIGH: speed = "high"; break;
+ default: speed = "?"; break;
+ }; speed;}));
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Type = %s\n",
+ ({ char *type; switch (_qh->ep_type) {
+ case USB_ENDPOINT_XFER_ISOC: type = "isochronous"; break;
+ case USB_ENDPOINT_XFER_INT: type = "interrupt"; break;
+ case USB_ENDPOINT_XFER_CONTROL: type = "control"; break;
+ case USB_ENDPOINT_XFER_BULK: type = "bulk"; break;
+ default: type = "?"; break;
+ }; type;}));
+#ifdef DEBUG
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - usecs = %d\n",
+ _qh->usecs);
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - interval = %d\n",
+ _qh->interval);
+ }
+#endif
+
+ return;
+}
+
+/**
+ * Microframe scheduler
+ * track the total use in hcd->frame_usecs
+ * keep each qh use in qh->frame_usecs
+ * when surrendering the qh then donate the time back
+ */
+const unsigned short max_uframe_usecs[]={ 100, 100, 100, 100, 100, 100, 30, 0 };
+
+/*
+ * called from dwc_otg_hcd.c:dwc_otg_hcd_init
+ */
+int init_hcd_usecs(dwc_otg_hcd_t *_hcd)
+{
+ int i;
+ for (i=0; i<8; i++) {
+ _hcd->frame_usecs[i] = max_uframe_usecs[i];
+ }
+ return 0;
+}
+
+static int find_single_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ int i;
+ unsigned short utime;
+ int t_left;
+ int ret;
+ int done;
+
+ ret = -1;
+ utime = _qh->usecs;
+ t_left = utime;
+ i = 0;
+ done = 0;
+ while (done == 0) {
+ /* At the start _hcd->frame_usecs[i] = max_uframe_usecs[i]; */
+ if (utime <= _hcd->frame_usecs[i]) {
+ _hcd->frame_usecs[i] -= utime;
+ _qh->frame_usecs[i] += utime;
+ t_left -= utime;
+ ret = i;
+ done = 1;
+ return ret;
+ } else {
+ i++;
+ if (i == 8) {
+ done = 1;
+ ret = -1;
+ }
+ }
+ }
+ return ret;
+}
+
+/*
+ * use this for FS apps that can span multiple uframes
+ */
+static int find_multi_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ int i;
+ int j;
+ unsigned short utime;
+ int t_left;
+ int ret;
+ int done;
+ unsigned short xtime;
+
+ ret = -1;
+ utime = _qh->usecs;
+ t_left = utime;
+ i = 0;
+ done = 0;
+loop:
+ while (done == 0) {
+ if(_hcd->frame_usecs[i] <= 0) {
+ i++;
+ if (i == 8) {
+ done = 1;
+ ret = -1;
+ }
+ goto loop;
+ }
+
+ /*
+ * we need n consequtive slots
+ * so use j as a start slot j plus j+1 must be enough time (for now)
+ */
+ xtime= _hcd->frame_usecs[i];
+ for (j = i+1 ; j < 8 ; j++ ) {
+ /*
+ * if we add this frame remaining time to xtime we may
+ * be OK, if not we need to test j for a complete frame
+ */
+ if ((xtime+_hcd->frame_usecs[j]) < utime) {
+ if (_hcd->frame_usecs[j] < max_uframe_usecs[j]) {
+ j = 8;
+ ret = -1;
+ continue;
+ }
+ }
+ if (xtime >= utime) {
+ ret = i;
+ j = 8; /* stop loop with a good value ret */
+ continue;
+ }
+ /* add the frame time to x time */
+ xtime += _hcd->frame_usecs[j];
+ /* we must have a fully available next frame or break */
+ if ((xtime < utime)
+ && (_hcd->frame_usecs[j] == max_uframe_usecs[j])) {
+ ret = -1;
+ j = 8; /* stop loop with a bad value ret */
+ continue;
+ }
+ }
+ if (ret >= 0) {
+ t_left = utime;
+ for (j = i; (t_left>0) && (j < 8); j++ ) {
+ t_left -= _hcd->frame_usecs[j];
+ if ( t_left <= 0 ) {
+ _qh->frame_usecs[j] += _hcd->frame_usecs[j] + t_left;
+ _hcd->frame_usecs[j]= -t_left;
+ ret = i;
+ done = 1;
+ } else {
+ _qh->frame_usecs[j] += _hcd->frame_usecs[j];
+ _hcd->frame_usecs[j] = 0;
+ }
+ }
+ } else {
+ i++;
+ if (i == 8) {
+ done = 1;
+ ret = -1;
+ }
+ }
+ }
+ return ret;
+}
+
+static int find_uframe(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ int ret;
+ ret = -1;
+
+ if (_qh->speed == USB_SPEED_HIGH) {
+ /* if this is a hs transaction we need a full frame */
+ ret = find_single_uframe(_hcd, _qh);
+ } else {
+ /* if this is a fs transaction we may need a sequence of frames */
+ ret = find_multi_uframe(_hcd, _qh);
+ }
+ return ret;
+}
+
+/**
+ * Checks that the max transfer size allowed in a host channel is large enough
+ * to handle the maximum data transfer in a single (micro)frame for a periodic
+ * transfer.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param _qh QH for a periodic endpoint.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int check_max_xfer_size(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh)
+{
+ int status;
+ uint32_t max_xfer_size;
+ uint32_t max_channel_xfer_size;
+
+ status = 0;
+
+ max_xfer_size = dwc_max_packet(_qh->maxp) * dwc_hb_mult(_qh->maxp);
+ max_channel_xfer_size = _hcd->core_if->core_params->max_transfer_size;
+
+ if (max_xfer_size > max_channel_xfer_size) {
+ DWC_NOTICE("%s: Periodic xfer length %d > "
+ "max xfer length for channel %d\n",
+ __func__, max_xfer_size, max_channel_xfer_size);
+ status = -ENOSPC;
+ }
+
+ return status;
+}
+
+/**
+ * Schedules an interrupt or isochronous transfer in the periodic schedule.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param _qh QH for the periodic transfer. The QH should already contain the
+ * scheduling information.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int schedule_periodic(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh)
+{
+ int status = 0;
+
+ int frame;
+ status = find_uframe(_hcd, _qh);
+ frame = -1;
+ if (status == 0) {
+ frame = 7;
+ } else {
+ if (status > 0 )
+ frame = status-1;
+ }
+
+ /* Set the new frame up */
+ if (frame > -1) {
+ _qh->sched_frame &= ~0x7;
+ _qh->sched_frame |= (frame & 7);
+ }
+
+ if (status != -1 )
+ status = 0;
+ if (status) {
+ DWC_NOTICE("%s: Insufficient periodic bandwidth for "
+ "periodic transfer.\n", __func__);
+ return status;
+ }
+
+ status = check_max_xfer_size(_hcd, _qh);
+ if (status) {
+ DWC_NOTICE("%s: Channel max transfer size too small "
+ "for periodic transfer.\n", __func__);
+ return status;
+ }
+
+ /* Always start in the inactive schedule. */
+ list_add_tail(&_qh->qh_list_entry, &_hcd->periodic_sched_inactive);
+
+
+ /* Update claimed usecs per (micro)frame. */
+ _hcd->periodic_usecs += _qh->usecs;
+
+ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_allocated += _qh->usecs / _qh->interval;
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_int_reqs++;
+ DWC_DEBUGPL(DBG_HCD, "Scheduled intr: qh %p, usecs %d, period %d\n",
+ _qh, _qh->usecs, _qh->interval);
+ } else {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_isoc_reqs++;
+ DWC_DEBUGPL(DBG_HCD, "Scheduled isoc: qh %p, usecs %d, period %d\n",
+ _qh, _qh->usecs, _qh->interval);
+ }
+
+ return status;
+}
+
+/**
+ * This function adds a QH to either the non periodic or periodic schedule if
+ * it is not already in the schedule. If the QH is already in the schedule, no
+ * action is taken.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int dwc_otg_hcd_qh_add (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh)
+{
+ unsigned long flags;
+ int status = 0;
+
+ local_irq_save(flags);
+
+ if (!list_empty(&_qh->qh_list_entry)) {
+ /* QH already in a schedule. */
+ goto done;
+ }
+
+ /* Add the new QH to the appropriate schedule */
+ if (dwc_qh_is_non_per(_qh)) {
+ /* Always start in the inactive schedule. */
+ list_add_tail(&_qh->qh_list_entry, &_hcd->non_periodic_sched_inactive);
+ } else {
+ status = schedule_periodic(_hcd, _qh);
+ }
+
+ done:
+ local_irq_restore(flags);
+
+ return status;
+}
+
+/**
+ * This function adds a QH to the non periodic deferred schedule.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int dwc_otg_hcd_qh_add_deferred(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ if (!list_empty(&_qh->qh_list_entry)) {
+ /* QH already in a schedule. */
+ goto done;
+ }
+
+ /* Add the new QH to the non periodic deferred schedule */
+ if (dwc_qh_is_non_per(_qh)) {
+ list_add_tail(&_qh->qh_list_entry,
+ &_hcd->non_periodic_sched_deferred);
+ }
+done:
+ local_irq_restore(flags);
+ return 0;
+}
+
+/**
+ * Removes an interrupt or isochronous transfer from the periodic schedule.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param _qh QH for the periodic transfer.
+ */
+static void deschedule_periodic(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh)
+{
+ int i;
+ list_del_init(&_qh->qh_list_entry);
+
+
+ /* Update claimed usecs per (micro)frame. */
+ _hcd->periodic_usecs -= _qh->usecs;
+
+ for (i = 0; i < 8; i++) {
+ _hcd->frame_usecs[i] += _qh->frame_usecs[i];
+ _qh->frame_usecs[i] = 0;
+ }
+ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_allocated -= _qh->usecs / _qh->interval;
+
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_int_reqs--;
+ DWC_DEBUGPL(DBG_HCD, "Descheduled intr: qh %p, usecs %d, period %d\n",
+ _qh, _qh->usecs, _qh->interval);
+ } else {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_isoc_reqs--;
+ DWC_DEBUGPL(DBG_HCD, "Descheduled isoc: qh %p, usecs %d, period %d\n",
+ _qh, _qh->usecs, _qh->interval);
+ }
+}
+
+/**
+ * Removes a QH from either the non-periodic or periodic schedule. Memory is
+ * not freed.
+ *
+ * @param[in] _hcd The HCD state structure.
+ * @param[in] _qh QH to remove from schedule. */
+void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ if (list_empty(&_qh->qh_list_entry)) {
+ /* QH is not in a schedule. */
+ goto done;
+ }
+
+ if (dwc_qh_is_non_per(_qh)) {
+ if (_hcd->non_periodic_qh_ptr == &_qh->qh_list_entry) {
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next;
+ }
+ list_del_init(&_qh->qh_list_entry);
+ } else {
+ deschedule_periodic(_hcd, _qh);
+ }
+
+ done:
+ local_irq_restore(flags);
+}
+
+/**
+ * Defers a QH. For non-periodic QHs, removes the QH from the active
+ * non-periodic schedule. The QH is added to the deferred non-periodic
+ * schedule if any QTDs are still attached to the QH.
+ */
+int dwc_otg_hcd_qh_deferr(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh, int delay)
+{
+ int deact = 1;
+ unsigned long flags;
+ local_irq_save(flags);
+ if (dwc_qh_is_non_per(_qh)) {
+ _qh->sched_frame =
+ dwc_frame_num_inc(_hcd->frame_number,
+ delay);
+ _qh->channel = NULL;
+ _qh->qtd_in_process = NULL;
+ deact = 0;
+ dwc_otg_hcd_qh_remove(_hcd, _qh);
+ if (!list_empty(&_qh->qtd_list)) {
+ /* Add back to deferred non-periodic schedule. */
+ dwc_otg_hcd_qh_add_deferred(_hcd, _qh);
+ }
+ }
+ local_irq_restore(flags);
+ return deact;
+}
+
+/**
+ * Deactivates a QH. For non-periodic QHs, removes the QH from the active
+ * non-periodic schedule. The QH is added to the inactive non-periodic
+ * schedule if any QTDs are still attached to the QH.
+ *
+ * For periodic QHs, the QH is removed from the periodic queued schedule. If
+ * there are any QTDs still attached to the QH, the QH is added to either the
+ * periodic inactive schedule or the periodic ready schedule and its next
+ * scheduled frame is calculated. The QH is placed in the ready schedule if
+ * the scheduled frame has been reached already. Otherwise it's placed in the
+ * inactive schedule. If there are no QTDs attached to the QH, the QH is
+ * completely removed from the periodic schedule.
+ */
+void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, int sched_next_periodic_split)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+
+ if (dwc_qh_is_non_per(_qh)) {
+ dwc_otg_hcd_qh_remove(_hcd, _qh);
+ if (!list_empty(&_qh->qtd_list)) {
+ /* Add back to inactive non-periodic schedule. */
+ dwc_otg_hcd_qh_add(_hcd, _qh);
+ }
+ } else {
+ uint16_t frame_number = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(_hcd));
+
+ if (_qh->do_split) {
+ /* Schedule the next continuing periodic split transfer */
+ if (sched_next_periodic_split) {
+
+ _qh->sched_frame = frame_number;
+ if (dwc_frame_num_le(frame_number,
+ dwc_frame_num_inc(_qh->start_split_frame, 1))) {
+ /*
+ * Allow one frame to elapse after start
+ * split microframe before scheduling
+ * complete split, but DONT if we are
+ * doing the next start split in the
+ * same frame for an ISOC out.
+ */
+ if ((_qh->ep_type != USB_ENDPOINT_XFER_ISOC) || (_qh->ep_is_in != 0)) {
+ _qh->sched_frame = dwc_frame_num_inc(_qh->sched_frame, 1);
+ }
+ }
+ } else {
+ _qh->sched_frame = dwc_frame_num_inc(_qh->start_split_frame,
+ _qh->interval);
+ if (dwc_frame_num_le(_qh->sched_frame, frame_number)) {
+ _qh->sched_frame = frame_number;
+ }
+ _qh->sched_frame |= 0x7;
+ _qh->start_split_frame = _qh->sched_frame;
+ }
+ } else {
+ _qh->sched_frame = dwc_frame_num_inc(_qh->sched_frame, _qh->interval);
+ if (dwc_frame_num_le(_qh->sched_frame, frame_number)) {
+ _qh->sched_frame = frame_number;
+ }
+ }
+
+ if (list_empty(&_qh->qtd_list)) {
+ dwc_otg_hcd_qh_remove(_hcd, _qh);
+ } else {
+ /*
+ * Remove from periodic_sched_queued and move to
+ * appropriate queue.
+ */
+ if (dwc_frame_num_le(_qh->sched_frame, frame_number)) {
+ list_move(&_qh->qh_list_entry,
+ &_hcd->periodic_sched_ready);
+ } else {
+ list_move(&_qh->qh_list_entry,
+ &_hcd->periodic_sched_inactive);
+ }
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ * This function allocates and initializes a QTD.
+ *
+ * @param[in] _urb The URB to create a QTD from. Each URB-QTD pair will end up
+ * pointing to each other so each pair should have a unique correlation.
+ *
+ * @return Returns pointer to the newly allocated QTD, or NULL on error. */
+dwc_otg_qtd_t *dwc_otg_hcd_qtd_create (struct urb *_urb)
+{
+ dwc_otg_qtd_t *qtd;
+
+ qtd = dwc_otg_hcd_qtd_alloc ();
+ if (qtd == NULL) {
+ return NULL;
+ }
+
+ dwc_otg_hcd_qtd_init (qtd, _urb);
+ return qtd;
+}
+
+/**
+ * Initializes a QTD structure.
+ *
+ * @param[in] _qtd The QTD to initialize.
+ * @param[in] _urb The URB to use for initialization. */
+void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *_qtd, struct urb *_urb)
+{
+ memset (_qtd, 0, sizeof (dwc_otg_qtd_t));
+ _qtd->urb = _urb;
+ if (usb_pipecontrol(_urb->pipe)) {
+ /*
+ * The only time the QTD data toggle is used is on the data
+ * phase of control transfers. This phase always starts with
+ * DATA1.
+ */
+ _qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
+ _qtd->control_phase = DWC_OTG_CONTROL_SETUP;
+ }
+
+ /* start split */
+ _qtd->complete_split = 0;
+ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
+ _qtd->isoc_split_offset = 0;
+
+ /* Store the qtd ptr in the urb to reference what QTD. */
+ _urb->hcpriv = _qtd;
+ return;
+}
+
+/**
+ * This function adds a QTD to the QTD-list of a QH. It will find the correct
+ * QH to place the QTD into. If it does not find a QH, then it will create a
+ * new QH. If the QH to which the QTD is added is not currently scheduled, it
+ * is placed into the proper schedule based on its EP type.
+ *
+ * @param[in] _qtd The QTD to add
+ * @param[in] _dwc_otg_hcd The DWC HCD structure
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int dwc_otg_hcd_qtd_add(dwc_otg_qtd_t * _qtd, dwc_otg_hcd_t * _dwc_otg_hcd)
+{
+ struct usb_host_endpoint *ep;
+ dwc_otg_qh_t *qh;
+ unsigned long flags;
+ int retval = 0;
+ struct urb *urb = _qtd->urb;
+
+ local_irq_save(flags);
+
+ /*
+ * Get the QH which holds the QTD-list to insert to. Create QH if it
+ * doesn't exist.
+ */
+ ep = dwc_urb_to_endpoint(urb);
+ qh = (dwc_otg_qh_t *)ep->hcpriv;
+ if (qh == NULL) {
+ qh = dwc_otg_hcd_qh_create (_dwc_otg_hcd, urb);
+ if (qh == NULL) {
+ retval = -1;
+ goto done;
+ }
+ ep->hcpriv = qh;
+ }
+
+ _qtd->qtd_qh_ptr = qh;
+ retval = dwc_otg_hcd_qh_add(_dwc_otg_hcd, qh);
+ if (retval == 0) {
+ list_add_tail(&_qtd->qtd_list_entry, &qh->qtd_list);
+ }
+
+ done:
+ local_irq_restore(flags);
+ return retval;
+}
+
+#endif /* DWC_DEVICE_ONLY */
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_ifx.c b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_ifx.c
new file mode 100644
index 0000000000..e45da853ea
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_ifx.c
@@ -0,0 +1,103 @@
+/******************************************************************************
+**
+** FILE NAME : dwc_otg_ifx.c
+** PROJECT : Twinpass/Danube
+** MODULES : DWC OTG USB
+**
+** DATE : 12 Auguest 2007
+** AUTHOR : Sung Winder
+** DESCRIPTION : Platform specific initialization.
+** COPYRIGHT : Copyright (c) 2007
+** Infineon Technologies AG
+** 2F, No.2, Li-Hsin Rd., Hsinchu Science Park,
+** Hsin-chu City, 300 Taiwan.
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** HISTORY
+** $Date $Author $Comment
+** 12 Auguest 2007 Sung Winder Initiate Version
+*******************************************************************************/
+#include "dwc_otg_ifx.h"
+
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/gpio.h>
+
+#include <asm/io.h>
+//#include <asm/mach-ifxmips/ifxmips.h>
+#include <lantiq_soc.h>
+
+#define IFXMIPS_GPIO_BASE_ADDR (0xBE100B00)
+
+#define IFXMIPS_GPIO_P0_OUT ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0010))
+#define IFXMIPS_GPIO_P1_OUT ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0040))
+#define IFXMIPS_GPIO_P0_IN ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0014))
+#define IFXMIPS_GPIO_P1_IN ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0044))
+#define IFXMIPS_GPIO_P0_DIR ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0018))
+#define IFXMIPS_GPIO_P1_DIR ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0048))
+#define IFXMIPS_GPIO_P0_ALTSEL0 ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x001C))
+#define IFXMIPS_GPIO_P1_ALTSEL0 ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x004C))
+#define IFXMIPS_GPIO_P0_ALTSEL1 ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0020))
+#define IFXMIPS_GPIO_P1_ALTSEL1 ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0050))
+#define IFXMIPS_GPIO_P0_OD ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0024))
+#define IFXMIPS_GPIO_P1_OD ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0054))
+#define IFXMIPS_GPIO_P0_STOFF ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0028))
+#define IFXMIPS_GPIO_P1_STOFF ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0058))
+#define IFXMIPS_GPIO_P0_PUDSEL ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x002C))
+#define IFXMIPS_GPIO_P1_PUDSEL ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x005C))
+#define IFXMIPS_GPIO_P0_PUDEN ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0030))
+#define IFXMIPS_GPIO_P1_PUDEN ((u32 *)(IFXMIPS_GPIO_BASE_ADDR + 0x0060))
+
+
+#define writel ltq_w32
+#define readl ltq_r32
+void dwc_otg_power_on (void)
+{
+ // clear power
+ writel(readl(DANUBE_PMU_PWDCR) | 0x41, DANUBE_PMU_PWDCR);
+ // set clock gating
+ if (ltq_is_ase())
+ writel(readl(DANUBE_CGU_IFCCR) & ~0x20, DANUBE_CGU_IFCCR);
+ else
+ writel(readl(DANUBE_CGU_IFCCR) | 0x30, DANUBE_CGU_IFCCR);
+ // set power
+ writel(readl(DANUBE_PMU_PWDCR) & ~0x1, DANUBE_PMU_PWDCR);
+ writel(readl(DANUBE_PMU_PWDCR) & ~0x40, DANUBE_PMU_PWDCR);
+ writel(readl(DANUBE_PMU_PWDCR) & ~0x8000, DANUBE_PMU_PWDCR);
+
+#if 1//defined (DWC_HOST_ONLY)
+ // make the hardware be a host controller (default)
+ //clear_bit (DANUBE_USBCFG_HDSEL_BIT, (volatile unsigned long *)DANUBE_RCU_UBSCFG);
+ writel(readl(DANUBE_RCU_UBSCFG) & ~(1<<DANUBE_USBCFG_HDSEL_BIT), DANUBE_RCU_UBSCFG);
+
+ //#elif defined (DWC_DEVICE_ONLY)
+ /* set the controller to the device mode */
+ // set_bit (DANUBE_USBCFG_HDSEL_BIT, (volatile unsigned long *)DANUBE_RCU_UBSCFG);
+#else
+#error "For Danube/Twinpass, it should be HOST or Device Only."
+#endif
+
+ // set the HC's byte-order to big-endian
+ //set_bit (DANUBE_USBCFG_HOST_END_BIT, (volatile unsigned long *)DANUBE_RCU_UBSCFG);
+ writel(readl(DANUBE_RCU_UBSCFG) | (1<<DANUBE_USBCFG_HOST_END_BIT), DANUBE_RCU_UBSCFG);
+ //clear_bit (DANUBE_USBCFG_SLV_END_BIT, (volatile unsigned long *)DANUBE_RCU_UBSCFG);
+ writel(readl(DANUBE_RCU_UBSCFG) & ~(1<<DANUBE_USBCFG_SLV_END_BIT), DANUBE_RCU_UBSCFG);
+ //writel(0x400, DANUBE_RCU_UBSCFG);
+
+ // PHY configurations.
+ writel (0x14014, (volatile unsigned long *)0xbe10103c);
+}
+
+int ifx_usb_hc_init(unsigned long base_addr, int irq)
+{
+ return 0;
+}
+
+void ifx_usb_hc_remove(void)
+{
+}
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_ifx.h b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_ifx.h
new file mode 100644
index 0000000000..402d7a6540
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_ifx.h
@@ -0,0 +1,85 @@
+/******************************************************************************
+**
+** FILE NAME : dwc_otg_ifx.h
+** PROJECT : Twinpass/Danube
+** MODULES : DWC OTG USB
+**
+** DATE : 12 April 2007
+** AUTHOR : Sung Winder
+** DESCRIPTION : Platform specific initialization.
+** COPYRIGHT : Copyright (c) 2007
+** Infineon Technologies AG
+** 2F, No.2, Li-Hsin Rd., Hsinchu Science Park,
+** Hsin-chu City, 300 Taiwan.
+**
+** This program is free software; you can redistribute it and/or modify
+** it under the terms of the GNU General Public License as published by
+** the Free Software Foundation; either version 2 of the License, or
+** (at your option) any later version.
+**
+** HISTORY
+** $Date $Author $Comment
+** 12 April 2007 Sung Winder Initiate Version
+*******************************************************************************/
+#if !defined(__DWC_OTG_IFX_H__)
+#define __DWC_OTG_IFX_H__
+
+#include <linux/irq.h>
+#include <irq.h>
+
+// 20070316, winder added.
+#ifndef SZ_256K
+#define SZ_256K 0x00040000
+#endif
+
+extern void dwc_otg_power_on (void);
+
+/* FIXME: The current Linux-2.6 do not have these header files, but anyway, we need these. */
+// #include <asm/danube/danube.h>
+// #include <asm/ifx/irq.h>
+
+/* winder, I used the Danube parameter as default. *
+ * We could change this through module param. */
+#define IFX_USB_IOMEM_BASE 0x1e101000
+#define IFX_USB_IOMEM_SIZE SZ_256K
+#define IFX_USB_IRQ LTQ_USB_INT
+
+/**
+ * This function is called to set correct clock gating and power.
+ * For Twinpass/Danube board.
+ */
+#ifndef DANUBE_RCU_BASE_ADDR
+#define DANUBE_RCU_BASE_ADDR (0xBF203000)
+#endif
+
+#ifndef DANUBE_CGU
+#define DANUBE_CGU (0xBF103000)
+#endif
+#ifndef DANUBE_CGU_IFCCR
+/***CGU Interface Clock Control Register***/
+#define DANUBE_CGU_IFCCR ((volatile u32*)(DANUBE_CGU+ 0x0018))
+#endif
+
+#ifndef DANUBE_PMU
+#define DANUBE_PMU (KSEG1+0x1F102000)
+#endif
+#ifndef DANUBE_PMU_PWDCR
+/* PMU Power down Control Register */
+#define DANUBE_PMU_PWDCR ((volatile u32*)(DANUBE_PMU+0x001C))
+#endif
+
+
+#define DANUBE_RCU_UBSCFG ((volatile u32*)(DANUBE_RCU_BASE_ADDR + 0x18))
+#define DANUBE_USBCFG_HDSEL_BIT 11 // 0:host, 1:device
+#define DANUBE_USBCFG_HOST_END_BIT 10 // 0:little_end, 1:big_end
+#define DANUBE_USBCFG_SLV_END_BIT 9 // 0:little_end, 1:big_end
+
+extern void ltq_mask_and_ack_irq(struct irq_data *d);
+
+static void inline mask_and_ack_ifx_irq(int x)
+{
+ struct irq_data d;
+ d.irq = x;
+ ltq_mask_and_ack_irq(&d);
+}
+#endif //__DWC_OTG_IFX_H__
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_plat.h b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_plat.h
new file mode 100644
index 0000000000..727d0c4a52
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_plat.h
@@ -0,0 +1,269 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/platform/dwc_otg_plat.h $
+ * $Revision: 1.1.1.1 $
+ * $Date: 2009-04-17 06:15:34 $
+ * $Change: 510301 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__DWC_OTG_PLAT_H__)
+#define __DWC_OTG_PLAT_H__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+/**
+ * @file
+ *
+ * This file contains the Platform Specific constants, interfaces
+ * (functions and macros) for Linux.
+ *
+ */
+/*#if !defined(__LINUX__)
+#error "The contents of this file is Linux specific!!!"
+#endif
+*/
+#include <lantiq_soc.h>
+#define writel ltq_w32
+#define readl ltq_r32
+
+/**
+ * Reads the content of a register.
+ *
+ * @param _reg address of register to read.
+ * @return contents of the register.
+ *
+
+ * Usage:<br>
+ * <code>uint32_t dev_ctl = dwc_read_reg32(&dev_regs->dctl);</code>
+ */
+static __inline__ uint32_t dwc_read_reg32( volatile uint32_t *_reg)
+{
+ return readl(_reg);
+};
+
+/**
+ * Writes a register with a 32 bit value.
+ *
+ * @param _reg address of register to read.
+ * @param _value to write to _reg.
+ *
+ * Usage:<br>
+ * <code>dwc_write_reg32(&dev_regs->dctl, 0); </code>
+ */
+static __inline__ void dwc_write_reg32( volatile uint32_t *_reg, const uint32_t _value)
+{
+ writel( _value, _reg );
+};
+
+/**
+ * This function modifies bit values in a register. Using the
+ * algorithm: (reg_contents & ~clear_mask) | set_mask.
+ *
+ * @param _reg address of register to read.
+ * @param _clear_mask bit mask to be cleared.
+ * @param _set_mask bit mask to be set.
+ *
+ * Usage:<br>
+ * <code> // Clear the SOF Interrupt Mask bit and <br>
+ * // set the OTG Interrupt mask bit, leaving all others as they were.
+ * dwc_modify_reg32(&dev_regs->gintmsk, DWC_SOF_INT, DWC_OTG_INT);</code>
+ */
+static __inline__
+ void dwc_modify_reg32( volatile uint32_t *_reg, const uint32_t _clear_mask, const uint32_t _set_mask)
+{
+ writel( (readl(_reg) & ~_clear_mask) | _set_mask, _reg );
+};
+
+
+/**
+ * Wrapper for the OS micro-second delay function.
+ * @param[in] _usecs Microseconds of delay
+ */
+static __inline__ void UDELAY( const uint32_t _usecs )
+{
+ udelay( _usecs );
+}
+
+/**
+ * Wrapper for the OS milli-second delay function.
+ * @param[in] _msecs milliseconds of delay
+ */
+static __inline__ void MDELAY( const uint32_t _msecs )
+{
+ mdelay( _msecs );
+}
+
+/**
+ * Wrapper for the Linux spin_lock. On the ARM (Integrator)
+ * spin_lock() is a nop.
+ *
+ * @param _lock Pointer to the spinlock.
+ */
+static __inline__ void SPIN_LOCK( spinlock_t *_lock )
+{
+ spin_lock(_lock);
+}
+
+/**
+ * Wrapper for the Linux spin_unlock. On the ARM (Integrator)
+ * spin_lock() is a nop.
+ *
+ * @param _lock Pointer to the spinlock.
+ */
+static __inline__ void SPIN_UNLOCK( spinlock_t *_lock )
+{
+ spin_unlock(_lock);
+}
+
+/**
+ * Wrapper (macro) for the Linux spin_lock_irqsave. On the ARM
+ * (Integrator) spin_lock() is a nop.
+ *
+ * @param _l Pointer to the spinlock.
+ * @param _f unsigned long for irq flags storage.
+ */
+#define SPIN_LOCK_IRQSAVE( _l, _f ) { \
+ spin_lock_irqsave(_l,_f); \
+ }
+
+/**
+ * Wrapper (macro) for the Linux spin_unlock_irqrestore. On the ARM
+ * (Integrator) spin_lock() is a nop.
+ *
+ * @param _l Pointer to the spinlock.
+ * @param _f unsigned long for irq flags storage.
+ */
+#define SPIN_UNLOCK_IRQRESTORE( _l,_f ) {\
+ spin_unlock_irqrestore(_l,_f); \
+ }
+
+
+/*
+ * Debugging support vanishes in non-debug builds.
+ */
+
+
+/**
+ * The Debug Level bit-mask variable.
+ */
+extern uint32_t g_dbg_lvl;
+/**
+ * Set the Debug Level variable.
+ */
+static inline uint32_t SET_DEBUG_LEVEL( const uint32_t _new )
+{
+ uint32_t old = g_dbg_lvl;
+ g_dbg_lvl = _new;
+ return old;
+}
+
+/** When debug level has the DBG_CIL bit set, display CIL Debug messages. */
+#define DBG_CIL (0x2)
+/** When debug level has the DBG_CILV bit set, display CIL Verbose debug
+ * messages */
+#define DBG_CILV (0x20)
+/** When debug level has the DBG_PCD bit set, display PCD (Device) debug
+ * messages */
+#define DBG_PCD (0x4)
+/** When debug level has the DBG_PCDV set, display PCD (Device) Verbose debug
+ * messages */
+#define DBG_PCDV (0x40)
+/** When debug level has the DBG_HCD bit set, display Host debug messages */
+#define DBG_HCD (0x8)
+/** When debug level has the DBG_HCDV bit set, display Verbose Host debug
+ * messages */
+#define DBG_HCDV (0x80)
+/** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host
+ * mode. */
+#define DBG_HCD_URB (0x800)
+
+/** When debug level has any bit set, display debug messages */
+#define DBG_ANY (0xFF)
+
+/** All debug messages off */
+#define DBG_OFF 0
+
+/** Prefix string for DWC_DEBUG print macros. */
+#define USB_DWC "DWC_otg: "
+
+/**
+ * Print a debug message when the Global debug level variable contains
+ * the bit defined in <code>lvl</code>.
+ *
+ * @param[in] lvl - Debug level, use one of the DBG_ constants above.
+ * @param[in] x - like printf
+ *
+ * Example:<p>
+ * <code>
+ * DWC_DEBUGPL( DBG_ANY, "%s(%p)\n", __func__, _reg_base_addr);
+ * </code>
+ * <br>
+ * results in:<br>
+ * <code>
+ * usb-DWC_otg: dwc_otg_cil_init(ca867000)
+ * </code>
+ */
+#ifdef DEBUG
+
+# define DWC_DEBUGPL(lvl, x...) do{ if ((lvl)&g_dbg_lvl)printk( KERN_DEBUG USB_DWC x ); }while(0)
+# define DWC_DEBUGP(x...) DWC_DEBUGPL(DBG_ANY, x )
+
+# define CHK_DEBUG_LEVEL(level) ((level) & g_dbg_lvl)
+
+#else
+
+# define DWC_DEBUGPL(lvl, x...) do{}while(0)
+# define DWC_DEBUGP(x...)
+
+# define CHK_DEBUG_LEVEL(level) (0)
+
+#endif /*DEBUG*/
+
+/**
+ * Print an Error message.
+ */
+#define DWC_ERROR(x...) printk( KERN_ERR USB_DWC x )
+/**
+ * Print a Warning message.
+ */
+#define DWC_WARN(x...) printk( KERN_WARNING USB_DWC x )
+/**
+ * Print a notice (normal but significant message).
+ */
+#define DWC_NOTICE(x...) printk( KERN_NOTICE USB_DWC x )
+/**
+ * Basic message printing.
+ */
+#define DWC_PRINT(x...) printk( KERN_INFO USB_DWC x )
+
+#endif
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_regs.h b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_regs.h
new file mode 100644
index 0000000000..397a954f39
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/dwc_otg/dwc_otg_regs.h
@@ -0,0 +1,1797 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_regs.h $
+ * $Revision: 1.1.1.1 $
+ * $Date: 2009-04-17 06:15:34 $
+ * $Change: 631780 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef __DWC_OTG_REGS_H__
+#define __DWC_OTG_REGS_H__
+
+/**
+ * @file
+ *
+ * This file contains the data structures for accessing the DWC_otg core registers.
+ *
+ * The application interfaces with the HS OTG core by reading from and
+ * writing to the Control and Status Register (CSR) space through the
+ * AHB Slave interface. These registers are 32 bits wide, and the
+ * addresses are 32-bit-block aligned.
+ * CSRs are classified as follows:
+ * - Core Global Registers
+ * - Device Mode Registers
+ * - Device Global Registers
+ * - Device Endpoint Specific Registers
+ * - Host Mode Registers
+ * - Host Global Registers
+ * - Host Port CSRs
+ * - Host Channel Specific Registers
+ *
+ * Only the Core Global registers can be accessed in both Device and
+ * Host modes. When the HS OTG core is operating in one mode, either
+ * Device or Host, the application must not access registers from the
+ * other mode. When the core switches from one mode to another, the
+ * registers in the new mode of operation must be reprogrammed as they
+ * would be after a power-on reset.
+ */
+
+/****************************************************************************/
+/** DWC_otg Core registers .
+ * The dwc_otg_core_global_regs structure defines the size
+ * and relative field offsets for the Core Global registers.
+ */
+typedef struct dwc_otg_core_global_regs
+{
+ /** OTG Control and Status Register. <i>Offset: 000h</i> */
+ volatile uint32_t gotgctl;
+ /** OTG Interrupt Register. <i>Offset: 004h</i> */
+ volatile uint32_t gotgint;
+ /**Core AHB Configuration Register. <i>Offset: 008h</i> */
+ volatile uint32_t gahbcfg;
+#define DWC_GLBINTRMASK 0x0001
+#define DWC_DMAENABLE 0x0020
+#define DWC_NPTXEMPTYLVL_EMPTY 0x0080
+#define DWC_NPTXEMPTYLVL_HALFEMPTY 0x0000
+#define DWC_PTXEMPTYLVL_EMPTY 0x0100
+#define DWC_PTXEMPTYLVL_HALFEMPTY 0x0000
+
+
+ /**Core USB Configuration Register. <i>Offset: 00Ch</i> */
+ volatile uint32_t gusbcfg;
+ /**Core Reset Register. <i>Offset: 010h</i> */
+ volatile uint32_t grstctl;
+ /**Core Interrupt Register. <i>Offset: 014h</i> */
+ volatile uint32_t gintsts;
+ /**Core Interrupt Mask Register. <i>Offset: 018h</i> */
+ volatile uint32_t gintmsk;
+ /**Receive Status Queue Read Register (Read Only). <i>Offset: 01Ch</i> */
+ volatile uint32_t grxstsr;
+ /**Receive Status Queue Read & POP Register (Read Only). <i>Offset: 020h</i>*/
+ volatile uint32_t grxstsp;
+ /**Receive FIFO Size Register. <i>Offset: 024h</i> */
+ volatile uint32_t grxfsiz;
+ /**Non Periodic Transmit FIFO Size Register. <i>Offset: 028h</i> */
+ volatile uint32_t gnptxfsiz;
+ /**Non Periodic Transmit FIFO/Queue Status Register (Read
+ * Only). <i>Offset: 02Ch</i> */
+ volatile uint32_t gnptxsts;
+ /**I2C Access Register. <i>Offset: 030h</i> */
+ volatile uint32_t gi2cctl;
+ /**PHY Vendor Control Register. <i>Offset: 034h</i> */
+ volatile uint32_t gpvndctl;
+ /**General Purpose Input/Output Register. <i>Offset: 038h</i> */
+ volatile uint32_t ggpio;
+ /**User ID Register. <i>Offset: 03Ch</i> */
+ volatile uint32_t guid;
+ /**Synopsys ID Register (Read Only). <i>Offset: 040h</i> */
+ volatile uint32_t gsnpsid;
+ /**User HW Config1 Register (Read Only). <i>Offset: 044h</i> */
+ volatile uint32_t ghwcfg1;
+ /**User HW Config2 Register (Read Only). <i>Offset: 048h</i> */
+ volatile uint32_t ghwcfg2;
+#define DWC_SLAVE_ONLY_ARCH 0
+#define DWC_EXT_DMA_ARCH 1
+#define DWC_INT_DMA_ARCH 2
+
+#define DWC_MODE_HNP_SRP_CAPABLE 0
+#define DWC_MODE_SRP_ONLY_CAPABLE 1
+#define DWC_MODE_NO_HNP_SRP_CAPABLE 2
+#define DWC_MODE_SRP_CAPABLE_DEVICE 3
+#define DWC_MODE_NO_SRP_CAPABLE_DEVICE 4
+#define DWC_MODE_SRP_CAPABLE_HOST 5
+#define DWC_MODE_NO_SRP_CAPABLE_HOST 6
+
+ /**User HW Config3 Register (Read Only). <i>Offset: 04Ch</i> */
+ volatile uint32_t ghwcfg3;
+ /**User HW Config4 Register (Read Only). <i>Offset: 050h</i>*/
+ volatile uint32_t ghwcfg4;
+ /** Reserved <i>Offset: 054h-0FFh</i> */
+ uint32_t reserved[43];
+ /** Host Periodic Transmit FIFO Size Register. <i>Offset: 100h</i> */
+ volatile uint32_t hptxfsiz;
+ /** Device Periodic Transmit FIFO#n Register if dedicated fifos are disabled,
+ otherwise Device Transmit FIFO#n Register.
+ * <i>Offset: 104h + (FIFO_Number-1)*04h, 1 <= FIFO Number <= 15 (1<=n<=15).</i> */
+ //volatile uint32_t dptxfsiz[15];
+ volatile uint32_t dptxfsiz_dieptxf[15];
+} dwc_otg_core_global_regs_t;
+
+/**
+ * This union represents the bit fields of the Core OTG Control
+ * and Status Register (GOTGCTL). Set the bits using the bit
+ * fields then write the <i>d32</i> value to the register.
+ */
+typedef union gotgctl_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct
+ {
+ unsigned reserved31_21 : 11;
+ unsigned currmod : 1;
+ unsigned bsesvld : 1;
+ unsigned asesvld : 1;
+ unsigned reserved17 : 1;
+ unsigned conidsts : 1;
+ unsigned reserved15_12 : 4;
+ unsigned devhnpen : 1;
+ unsigned hstsethnpen : 1;
+ unsigned hnpreq : 1;
+ unsigned hstnegscs : 1;
+ unsigned reserved7_2 : 6;
+ unsigned sesreq : 1;
+ unsigned sesreqscs : 1;
+ } b;
+} gotgctl_data_t;
+
+/**
+ * This union represents the bit fields of the Core OTG Interrupt Register
+ * (GOTGINT). Set/clear the bits using the bit fields then write the <i>d32</i>
+ * value to the register.
+ */
+typedef union gotgint_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct
+ {
+ /** Current Mode */
+ unsigned reserved31_20 : 12;
+ /** Debounce Done */
+ unsigned debdone : 1;
+ /** A-Device Timeout Change */
+ unsigned adevtoutchng : 1;
+ /** Host Negotiation Detected */
+ unsigned hstnegdet : 1;
+ unsigned reserver16_10 : 7;
+ /** Host Negotiation Success Status Change */
+ unsigned hstnegsucstschng : 1;
+ /** Session Request Success Status Change */
+ unsigned sesreqsucstschng : 1;
+ unsigned reserved3_7 : 5;
+ /** Session End Detected */
+ unsigned sesenddet : 1;
+ /** Current Mode */
+ unsigned reserved1_0 : 2;
+ } b;
+} gotgint_data_t;
+
+
+/**
+ * This union represents the bit fields of the Core AHB Configuration
+ * Register (GAHBCFG). Set/clear the bits using the bit fields then
+ * write the <i>d32</i> value to the register.
+ */
+typedef union gahbcfg_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct
+ {
+#define DWC_GAHBCFG_TXFEMPTYLVL_EMPTY 1
+#define DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY 0
+ unsigned reserved9_31 : 23;
+ unsigned ptxfemplvl : 1;
+ unsigned nptxfemplvl_txfemplvl : 1;
+#define DWC_GAHBCFG_DMAENABLE 1
+ unsigned reserved : 1;
+ unsigned dmaenable : 1;
+#define DWC_GAHBCFG_INT_DMA_BURST_SINGLE 0
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR 1
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR4 3
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR8 5
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR16 7
+ unsigned hburstlen : 4;
+ unsigned glblintrmsk : 1;
+#define DWC_GAHBCFG_GLBINT_ENABLE 1
+
+ } b;
+} gahbcfg_data_t;
+
+/**
+ * This union represents the bit fields of the Core USB Configuration
+ * Register (GUSBCFG). Set the bits using the bit fields then write
+ * the <i>d32</i> value to the register.
+ */
+typedef union gusbcfg_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct
+ {
+ unsigned corrupt_tx_packet: 1; /*fscz*/
+ unsigned force_device_mode: 1;
+ unsigned force_host_mode: 1;
+ unsigned reserved23_28 : 6;
+ unsigned term_sel_dl_pulse : 1;
+ unsigned ulpi_int_vbus_indicator : 1;
+ unsigned ulpi_ext_vbus_drv : 1;
+ unsigned ulpi_clk_sus_m : 1;
+ unsigned ulpi_auto_res : 1;
+ unsigned ulpi_fsls : 1;
+ unsigned otgutmifssel : 1;
+ unsigned phylpwrclksel : 1;
+ unsigned nptxfrwnden : 1;
+ unsigned usbtrdtim : 4;
+ unsigned hnpcap : 1;
+ unsigned srpcap : 1;
+ unsigned ddrsel : 1;
+ unsigned physel : 1;
+ unsigned fsintf : 1;
+ unsigned ulpi_utmi_sel : 1;
+ unsigned phyif : 1;
+ unsigned toutcal : 3;
+ } b;
+} gusbcfg_data_t;
+
+/**
+ * This union represents the bit fields of the Core Reset Register
+ * (GRSTCTL). Set/clear the bits using the bit fields then write the
+ * <i>d32</i> value to the register.
+ */
+typedef union grstctl_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct
+ {
+ /** AHB Master Idle. Indicates the AHB Master State
+ * Machine is in IDLE condition. */
+ unsigned ahbidle : 1;
+ /** DMA Request Signal. Indicated DMA request is in
+ * probress. Used for debug purpose. */
+ unsigned dmareq : 1;
+ /** Reserved */
+ unsigned reserved29_11 : 19;
+ /** TxFIFO Number (TxFNum) (Device and Host).
+ *
+ * This is the FIFO number which needs to be flushed,
+ * using the TxFIFO Flush bit. This field should not
+ * be changed until the TxFIFO Flush bit is cleared by
+ * the core.
+ * - 0x0 : Non Periodic TxFIFO Flush
+ * - 0x1 : Periodic TxFIFO #1 Flush in device mode
+ * or Periodic TxFIFO in host mode
+ * - 0x2 : Periodic TxFIFO #2 Flush in device mode.
+ * - ...
+ * - 0xF : Periodic TxFIFO #15 Flush in device mode
+ * - 0x10: Flush all the Transmit NonPeriodic and
+ * Transmit Periodic FIFOs in the core
+ */
+ unsigned txfnum : 5;
+ /** TxFIFO Flush (TxFFlsh) (Device and Host).
+ *
+ * This bit is used to selectively flush a single or
+ * all transmit FIFOs. The application must first
+ * ensure that the core is not in the middle of a
+ * transaction. <p>The application should write into
+ * this bit, only after making sure that neither the
+ * DMA engine is writing into the TxFIFO nor the MAC
+ * is reading the data out of the FIFO. <p>The
+ * application should wait until the core clears this
+ * bit, before performing any operations. This bit
+ * will takes 8 clocks (slowest of PHY or AHB clock)
+ * to clear.
+ */
+ unsigned txfflsh : 1;
+ /** RxFIFO Flush (RxFFlsh) (Device and Host)
+ *
+ * The application can flush the entire Receive FIFO
+ * using this bit. <p>The application must first
+ * ensure that the core is not in the middle of a
+ * transaction. <p>The application should write into
+ * this bit, only after making sure that neither the
+ * DMA engine is reading from the RxFIFO nor the MAC
+ * is writing the data in to the FIFO. <p>The
+ * application should wait until the bit is cleared
+ * before performing any other operations. This bit
+ * will takes 8 clocks (slowest of PHY or AHB clock)
+ * to clear.
+ */
+ unsigned rxfflsh : 1;
+ /** In Token Sequence Learning Queue Flush
+ * (INTknQFlsh) (Device Only)
+ */
+ unsigned intknqflsh : 1;
+ /** Host Frame Counter Reset (Host Only)<br>
+ *
+ * The application can reset the (micro)frame number
+ * counter inside the core, using this bit. When the
+ * (micro)frame counter is reset, the subsequent SOF
+ * sent out by the core, will have a (micro)frame
+ * number of 0.
+ */
+ unsigned hstfrm : 1;
+ /** Hclk Soft Reset
+ *
+ * The application uses this bit to reset the control logic in
+ * the AHB clock domain. Only AHB clock domain pipelines are
+ * reset.
+ */
+ unsigned hsftrst : 1;
+ /** Core Soft Reset (CSftRst) (Device and Host)
+ *
+ * The application can flush the control logic in the
+ * entire core using this bit. This bit resets the
+ * pipelines in the AHB Clock domain as well as the
+ * PHY Clock domain.
+ *
+ * The state machines are reset to an IDLE state, the
+ * control bits in the CSRs are cleared, all the
+ * transmit FIFOs and the receive FIFO are flushed.
+ *
+ * The status mask bits that control the generation of
+ * the interrupt, are cleared, to clear the
+ * interrupt. The interrupt status bits are not
+ * cleared, so the application can get the status of
+ * any events that occurred in the core after it has
+ * set this bit.
+ *
+ * Any transactions on the AHB are terminated as soon
+ * as possible following the protocol. Any
+ * transactions on the USB are terminated immediately.
+ *
+ * The configuration settings in the CSRs are
+ * unchanged, so the software doesn't have to
+ * reprogram these registers (Device
+ * Configuration/Host Configuration/Core System
+ * Configuration/Core PHY Configuration).
+ *
+ * The application can write to this bit, any time it
+ * wants to reset the core. This is a self clearing
+ * bit and the core clears this bit after all the
+ * necessary logic is reset in the core, which may
+ * take several clocks, depending on the current state
+ * of the core.
+ */
+ unsigned csftrst : 1;
+ } b;
+} grstctl_t;
+
+
+/**
+ * This union represents the bit fields of the Core Interrupt Mask
+ * Register (GINTMSK). Set/clear the bits using the bit fields then
+ * write the <i>d32</i> value to the register.
+ */
+typedef union gintmsk_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct
+ {
+ unsigned wkupintr : 1;
+ unsigned sessreqintr : 1;
+ unsigned disconnect : 1;
+ unsigned conidstschng : 1;
+ unsigned reserved27 : 1;
+ unsigned ptxfempty : 1;
+ unsigned hcintr : 1;
+ unsigned portintr : 1;
+ unsigned reserved22_23 : 2;
+ unsigned incomplisoout : 1;
+ unsigned incomplisoin : 1;
+ unsigned outepintr : 1;
+ unsigned inepintr : 1;
+ unsigned epmismatch : 1;
+ unsigned reserved16 : 1;
+ unsigned eopframe : 1;
+ unsigned isooutdrop : 1;
+ unsigned enumdone : 1;
+ unsigned usbreset : 1;
+ unsigned usbsuspend : 1;
+ unsigned erlysuspend : 1;
+ unsigned i2cintr : 1;
+ unsigned reserved8 : 1;
+ unsigned goutnakeff : 1;
+ unsigned ginnakeff : 1;
+ unsigned nptxfempty : 1;
+ unsigned rxstsqlvl : 1;
+ unsigned sofintr : 1;
+ unsigned otgintr : 1;
+ unsigned modemismatch : 1;
+ unsigned reserved0 : 1;
+ } b;
+} gintmsk_data_t;
+/**
+ * This union represents the bit fields of the Core Interrupt Register
+ * (GINTSTS). Set/clear the bits using the bit fields then write the
+ * <i>d32</i> value to the register.
+ */
+typedef union gintsts_data
+{
+ /** raw register data */
+ uint32_t d32;
+#define DWC_SOF_INTR_MASK 0x0008
+ /** register bits */
+ struct
+ {
+#define DWC_HOST_MODE 1
+ unsigned wkupintr : 1;
+ unsigned sessreqintr : 1;
+ unsigned disconnect : 1;
+ unsigned conidstschng : 1;
+ unsigned reserved27 : 1;
+ unsigned ptxfempty : 1;
+ unsigned hcintr : 1;
+ unsigned portintr : 1;
+ unsigned reserved22_23 : 2;
+ unsigned incomplisoout : 1;
+ unsigned incomplisoin : 1;
+ unsigned outepintr : 1;
+ unsigned inepint: 1;
+ unsigned epmismatch : 1;
+ unsigned intokenrx : 1;
+ unsigned eopframe : 1;
+ unsigned isooutdrop : 1;
+ unsigned enumdone : 1;
+ unsigned usbreset : 1;
+ unsigned usbsuspend : 1;
+ unsigned erlysuspend : 1;
+ unsigned i2cintr : 1;
+ unsigned reserved8 : 1;
+ unsigned goutnakeff : 1;
+ unsigned ginnakeff : 1;
+ unsigned nptxfempty : 1;
+ unsigned rxstsqlvl : 1;
+ unsigned sofintr : 1;
+ unsigned otgintr : 1;
+ unsigned modemismatch : 1;
+ unsigned curmode : 1;
+ } b;
+} gintsts_data_t;
+
+
+/**
+ * This union represents the bit fields in the Device Receive Status Read and
+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the <i>d32</i>
+ * element then read out the bits using the <i>b</i>it elements.
+ */
+typedef union device_grxsts_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved : 7;
+ unsigned fn : 4;
+#define DWC_STS_DATA_UPDT 0x2 // OUT Data Packet
+#define DWC_STS_XFER_COMP 0x3 // OUT Data Transfer Complete
+
+#define DWC_DSTS_GOUT_NAK 0x1 // Global OUT NAK
+#define DWC_DSTS_SETUP_COMP 0x4 // Setup Phase Complete
+#define DWC_DSTS_SETUP_UPDT 0x6 // SETUP Packet
+ unsigned pktsts : 4;
+ unsigned dpid : 2;
+ unsigned bcnt : 11;
+ unsigned epnum : 4;
+ } b;
+} device_grxsts_data_t;
+
+/**
+ * This union represents the bit fields in the Host Receive Status Read and
+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the <i>d32</i>
+ * element then read out the bits using the <i>b</i>it elements.
+ */
+typedef union host_grxsts_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved31_21 : 11;
+#define DWC_GRXSTS_PKTSTS_IN 0x2
+#define DWC_GRXSTS_PKTSTS_IN_XFER_COMP 0x3
+#define DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR 0x5
+#define DWC_GRXSTS_PKTSTS_CH_HALTED 0x7
+ unsigned pktsts : 4;
+ unsigned dpid : 2;
+ unsigned bcnt : 11;
+ unsigned chnum : 4;
+ } b;
+} host_grxsts_data_t;
+
+/**
+ * This union represents the bit fields in the FIFO Size Registers (HPTXFSIZ,
+ * GNPTXFSIZ, DPTXFSIZn). Read the register into the <i>d32</i> element then
+ * read out the bits using the <i>b</i>it elements.
+ */
+typedef union fifosize_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned depth : 16;
+ unsigned startaddr : 16;
+ } b;
+} fifosize_data_t;
+
+/**
+ * This union represents the bit fields in the Non-Periodic Transmit
+ * FIFO/Queue Status Register (GNPTXSTS). Read the register into the
+ * <i>d32</i> element then read out the bits using the <i>b</i>it
+ * elements.
+ */
+typedef union gnptxsts_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved : 1;
+ /** Top of the Non-Periodic Transmit Request Queue
+ * - bits 30:27 - Channel/EP Number
+ * - bits 26:25 - Token Type
+ * - bit 24 - Terminate (Last entry for the selected
+ * channel/EP)
+ * - 2'b00 - IN/OUT
+ * - 2'b01 - Zero Length OUT
+ * - 2'b10 - PING/Complete Split
+ * - 2'b11 - Channel Halt
+
+ */
+ unsigned nptxqtop_chnep : 4;
+ unsigned nptxqtop_token : 2;
+ unsigned nptxqtop_terminate : 1;
+ unsigned nptxqspcavail : 8;
+ unsigned nptxfspcavail : 16;
+ } b;
+} gnptxsts_data_t;
+
+/**
+ * This union represents the bit fields in the Transmit
+ * FIFO Status Register (DTXFSTS). Read the register into the
+ * <i>d32</i> element then read out the bits using the <i>b</i>it
+ * elements.
+ */
+typedef union dtxfsts_data /* fscz */ //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved : 16;
+ unsigned txfspcavail : 16;
+ } b;
+} dtxfsts_data_t;
+
+/**
+ * This union represents the bit fields in the I2C Control Register
+ * (I2CCTL). Read the register into the <i>d32</i> element then read out the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union gi2cctl_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned bsydne : 1;
+ unsigned rw : 1;
+ unsigned reserved : 2;
+ unsigned i2cdevaddr : 2;
+ unsigned i2csuspctl : 1;
+ unsigned ack : 1;
+ unsigned i2cen : 1;
+ unsigned addr : 7;
+ unsigned regaddr : 8;
+ unsigned rwdata : 8;
+ } b;
+} gi2cctl_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config1
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg1_data {
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned ep_dir15 : 2;
+ unsigned ep_dir14 : 2;
+ unsigned ep_dir13 : 2;
+ unsigned ep_dir12 : 2;
+ unsigned ep_dir11 : 2;
+ unsigned ep_dir10 : 2;
+ unsigned ep_dir9 : 2;
+ unsigned ep_dir8 : 2;
+ unsigned ep_dir7 : 2;
+ unsigned ep_dir6 : 2;
+ unsigned ep_dir5 : 2;
+ unsigned ep_dir4 : 2;
+ unsigned ep_dir3 : 2;
+ unsigned ep_dir2 : 2;
+ unsigned ep_dir1 : 2;
+ unsigned ep_dir0 : 2;
+ } b;
+} hwcfg1_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config2
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg2_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /* GHWCFG2 */
+ unsigned reserved31 : 1;
+ unsigned dev_token_q_depth : 5;
+ unsigned host_perio_tx_q_depth : 2;
+ unsigned nonperio_tx_q_depth : 2;
+ unsigned rx_status_q_depth : 2;
+ unsigned dynamic_fifo : 1;
+ unsigned perio_ep_supported : 1;
+ unsigned num_host_chan : 4;
+ unsigned num_dev_ep : 4;
+ unsigned fs_phy_type : 2;
+#define DWC_HWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0
+#define DWC_HWCFG2_HS_PHY_TYPE_UTMI 1
+#define DWC_HWCFG2_HS_PHY_TYPE_ULPI 2
+#define DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI 3
+ unsigned hs_phy_type : 2;
+ unsigned point2point : 1;
+ unsigned architecture : 2;
+#define DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG 0
+#define DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG 1
+#define DWC_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG 2
+#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3
+#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4
+#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST 5
+#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6
+ unsigned op_mode : 3;
+ } b;
+} hwcfg2_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config3
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg3_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /* GHWCFG3 */
+ unsigned dfifo_depth : 16;
+ unsigned reserved15_13 : 3;
+ unsigned ahb_phy_clock_synch : 1;
+ unsigned synch_reset_type : 1;
+ unsigned optional_features : 1;
+ unsigned vendor_ctrl_if : 1;
+ unsigned i2c : 1;
+ unsigned otg_func : 1;
+ unsigned packet_size_cntr_width : 3;
+ unsigned xfer_size_cntr_width : 4;
+ } b;
+} hwcfg3_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config4
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg4_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+unsigned reserved31_30 : 2; /* fscz */
+ unsigned num_in_eps : 4;
+ unsigned ded_fifo_en : 1;
+
+ unsigned session_end_filt_en : 1;
+ unsigned b_valid_filt_en : 1;
+ unsigned a_valid_filt_en : 1;
+ unsigned vbus_valid_filt_en : 1;
+ unsigned iddig_filt_en : 1;
+ unsigned num_dev_mode_ctrl_ep : 4;
+ unsigned utmi_phy_data_width : 2;
+ unsigned min_ahb_freq : 9;
+ unsigned power_optimiz : 1;
+ unsigned num_dev_perio_in_ep : 4;
+ } b;
+} hwcfg4_data_t;
+
+////////////////////////////////////////////
+// Device Registers
+/**
+ * Device Global Registers. <i>Offsets 800h-BFFh</i>
+ *
+ * The following structures define the size and relative field offsets
+ * for the Device Mode Registers.
+ *
+ * <i>These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.</i>
+ */
+typedef struct dwc_otg_dev_global_regs
+{
+ /** Device Configuration Register. <i>Offset 800h</i> */
+ volatile uint32_t dcfg;
+ /** Device Control Register. <i>Offset: 804h</i> */
+ volatile uint32_t dctl;
+ /** Device Status Register (Read Only). <i>Offset: 808h</i> */
+ volatile uint32_t dsts;
+ /** Reserved. <i>Offset: 80Ch</i> */
+ uint32_t unused;
+ /** Device IN Endpoint Common Interrupt Mask
+ * Register. <i>Offset: 810h</i> */
+ volatile uint32_t diepmsk;
+ /** Device OUT Endpoint Common Interrupt Mask
+ * Register. <i>Offset: 814h</i> */
+ volatile uint32_t doepmsk;
+ /** Device All Endpoints Interrupt Register. <i>Offset: 818h</i> */
+ volatile uint32_t daint;
+ /** Device All Endpoints Interrupt Mask Register. <i>Offset:
+ * 81Ch</i> */
+ volatile uint32_t daintmsk;
+ /** Device IN Token Queue Read Register-1 (Read Only).
+ * <i>Offset: 820h</i> */
+ volatile uint32_t dtknqr1;
+ /** Device IN Token Queue Read Register-2 (Read Only).
+ * <i>Offset: 824h</i> */
+ volatile uint32_t dtknqr2;
+ /** Device VBUS discharge Register. <i>Offset: 828h</i> */
+ volatile uint32_t dvbusdis;
+ /** Device VBUS Pulse Register. <i>Offset: 82Ch</i> */
+ volatile uint32_t dvbuspulse;
+ /** Device IN Token Queue Read Register-3 (Read Only).
+ * Device Thresholding control register (Read/Write)
+ * <i>Offset: 830h</i> */
+ volatile uint32_t dtknqr3_dthrctl;
+ /** Device IN Token Queue Read Register-4 (Read Only). /
+ * Device IN EPs empty Inr. Mask Register (Read/Write)
+ * <i>Offset: 834h</i> */
+ volatile uint32_t dtknqr4_fifoemptymsk;
+} dwc_otg_device_global_regs_t;
+
+/**
+ * This union represents the bit fields in the Device Configuration
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements. Write the
+ * <i>d32</i> member to the dcfg register.
+ */
+typedef union dcfg_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved31_23 : 9;
+ /** In Endpoint Mis-match count */
+ unsigned epmscnt : 5;
+ unsigned reserved13_17 : 5;
+ /** Periodic Frame Interval */
+#define DWC_DCFG_FRAME_INTERVAL_80 0
+#define DWC_DCFG_FRAME_INTERVAL_85 1
+#define DWC_DCFG_FRAME_INTERVAL_90 2
+#define DWC_DCFG_FRAME_INTERVAL_95 3
+ unsigned perfrint : 2;
+ /** Device Addresses */
+ unsigned devaddr : 7;
+ unsigned reserved3 : 1;
+ /** Non Zero Length Status OUT Handshake */
+#define DWC_DCFG_SEND_STALL 1
+ unsigned nzstsouthshk : 1;
+ /** Device Speed */
+ unsigned devspd : 2;
+ } b;
+} dcfg_data_t;
+
+/**
+ * This union represents the bit fields in the Device Control
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union dctl_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved : 20;
+ /** Power-On Programming Done */
+ unsigned pwronprgdone : 1;
+ /** Clear Global OUT NAK */
+ unsigned cgoutnak : 1;
+ /** Set Global OUT NAK */
+ unsigned sgoutnak : 1;
+ /** Clear Global Non-Periodic IN NAK */
+ unsigned cgnpinnak : 1;
+ /** Set Global Non-Periodic IN NAK */
+ unsigned sgnpinnak : 1;
+ /** Test Control */
+ unsigned tstctl : 3;
+ /** Global OUT NAK Status */
+ unsigned goutnaksts : 1;
+ /** Global Non-Periodic IN NAK Status */
+ unsigned gnpinnaksts : 1;
+ /** Soft Disconnect */
+ unsigned sftdiscon : 1;
+ /** Remote Wakeup */
+ unsigned rmtwkupsig : 1;
+ } b;
+} dctl_data_t;
+
+/**
+ * This union represents the bit fields in the Device Status
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union dsts_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved22_31 : 10;
+ /** Frame or Microframe Number of the received SOF */
+ unsigned soffn : 14;
+ unsigned reserved4_7: 4;
+ /** Erratic Error */
+ unsigned errticerr : 1;
+ /** Enumerated Speed */
+#define DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ 0
+#define DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ 1
+#define DWC_DSTS_ENUMSPD_LS_PHY_6MHZ 2
+#define DWC_DSTS_ENUMSPD_FS_PHY_48MHZ 3
+ unsigned enumspd : 2;
+ /** Suspend Status */
+ unsigned suspsts : 1;
+ } b;
+} dsts_data_t;
+
+
+/**
+ * This union represents the bit fields in the Device IN EP Interrupt
+ * Register and the Device IN EP Common Mask Register.
+ *
+ * - Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union diepint_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved07_31 : 23;
+ unsigned txfifoundrn : 1;
+ /** IN Endpoint HAK Effective mask */
+ unsigned emptyintr : 1;
+ /** IN Endpoint NAK Effective mask */
+ unsigned inepnakeff : 1;
+ /** IN Token Received with EP mismatch mask */
+ unsigned intknepmis : 1;
+ /** IN Token received with TxF Empty mask */
+ unsigned intktxfemp : 1;
+ /** TimeOUT Handshake mask (non-ISOC EPs) */
+ unsigned timeout : 1;
+ /** AHB Error mask */
+ unsigned ahberr : 1;
+ /** Endpoint disable mask */
+ unsigned epdisabled : 1;
+ /** Transfer complete mask */
+ unsigned xfercompl : 1;
+ } b;
+} diepint_data_t;
+/**
+ * This union represents the bit fields in the Device IN EP Common
+ * Interrupt Mask Register.
+ */
+typedef union diepint_data diepmsk_data_t;
+
+/**
+ * This union represents the bit fields in the Device OUT EP Interrupt
+ * Registerand Device OUT EP Common Interrupt Mask Register.
+ *
+ * - Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union doepint_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved04_31 : 27;
+ /** OUT Token Received when Endpoint Disabled */
+ unsigned outtknepdis : 1;
+ /** Setup Phase Done (contorl EPs) */
+ unsigned setup : 1;
+ /** AHB Error */
+ unsigned ahberr : 1;
+ /** Endpoint disable */
+ unsigned epdisabled : 1;
+ /** Transfer complete */
+ unsigned xfercompl : 1;
+ } b;
+} doepint_data_t;
+/**
+ * This union represents the bit fields in the Device OUT EP Common
+ * Interrupt Mask Register.
+ */
+typedef union doepint_data doepmsk_data_t;
+
+
+/**
+ * This union represents the bit fields in the Device All EP Interrupt
+ * and Mask Registers.
+ * - Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union daint_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** OUT Endpoint bits */
+ unsigned out : 16;
+ /** IN Endpoint bits */
+ unsigned in : 16;
+ } ep;
+ struct {
+ /** OUT Endpoint bits */
+ unsigned outep15 : 1;
+ unsigned outep14 : 1;
+ unsigned outep13 : 1;
+ unsigned outep12 : 1;
+ unsigned outep11 : 1;
+ unsigned outep10 : 1;
+ unsigned outep9 : 1;
+ unsigned outep8 : 1;
+ unsigned outep7 : 1;
+ unsigned outep6 : 1;
+ unsigned outep5 : 1;
+ unsigned outep4 : 1;
+ unsigned outep3 : 1;
+ unsigned outep2 : 1;
+ unsigned outep1 : 1;
+ unsigned outep0 : 1;
+ /** IN Endpoint bits */
+ unsigned inep15 : 1;
+ unsigned inep14 : 1;
+ unsigned inep13 : 1;
+ unsigned inep12 : 1;
+ unsigned inep11 : 1;
+ unsigned inep10 : 1;
+ unsigned inep9 : 1;
+ unsigned inep8 : 1;
+ unsigned inep7 : 1;
+ unsigned inep6 : 1;
+ unsigned inep5 : 1;
+ unsigned inep4 : 1;
+ unsigned inep3 : 1;
+ unsigned inep2 : 1;
+ unsigned inep1 : 1;
+ unsigned inep0 : 1;
+ } b;
+} daint_data_t;
+
+/**
+ * This union represents the bit fields in the Device IN Token Queue
+ * Read Registers.
+ * - Read the register into the <i>d32</i> member.
+ * - READ-ONLY Register
+ */
+typedef union dtknq1_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** EP Numbers of IN Tokens 0 ... 4 */
+ unsigned epnums0_5 : 24;
+ /** write pointer has wrapped. */
+ unsigned wrap_bit : 1;
+ /** Reserved */
+ unsigned reserved05_06 : 2;
+ /** In Token Queue Write Pointer */
+ unsigned intknwptr : 5;
+ }b;
+} dtknq1_data_t;
+
+/**
+ * This union represents Threshold control Register
+ * - Read and write the register into the <i>d32</i> member.
+ * - READ-WRITABLE Register
+ */
+typedef union dthrctl_data //* /*fscz */
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Reserved */
+ unsigned reserved26_31 : 6;
+ /** Rx Thr. Length */
+ unsigned rx_thr_len : 9;
+ /** Rx Thr. Enable */
+ unsigned rx_thr_en : 1;
+ /** Reserved */
+ unsigned reserved11_15 : 5;
+ /** Tx Thr. Length */
+ unsigned tx_thr_len : 9;
+ /** ISO Tx Thr. Enable */
+ unsigned iso_thr_en : 1;
+ /** non ISO Tx Thr. Enable */
+ unsigned non_iso_thr_en : 1;
+
+ }b;
+} dthrctl_data_t;
+
+/**
+ * Device Logical IN Endpoint-Specific Registers. <i>Offsets
+ * 900h-AFCh</i>
+ *
+ * There will be one set of endpoint registers per logical endpoint
+ * implemented.
+ *
+ * <i>These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.</i>
+ */
+typedef struct dwc_otg_dev_in_ep_regs
+{
+ /** Device IN Endpoint Control Register. <i>Offset:900h +
+ * (ep_num * 20h) + 00h</i> */
+ volatile uint32_t diepctl;
+ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 04h</i> */
+ uint32_t reserved04;
+ /** Device IN Endpoint Interrupt Register. <i>Offset:900h +
+ * (ep_num * 20h) + 08h</i> */
+ volatile uint32_t diepint;
+ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 0Ch</i> */
+ uint32_t reserved0C;
+ /** Device IN Endpoint Transfer Size
+ * Register. <i>Offset:900h + (ep_num * 20h) + 10h</i> */
+ volatile uint32_t dieptsiz;
+ /** Device IN Endpoint DMA Address Register. <i>Offset:900h +
+ * (ep_num * 20h) + 14h</i> */
+ volatile uint32_t diepdma;
+ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 18h - 900h +
+ * (ep_num * 20h) + 1Ch</i>*/
+ volatile uint32_t dtxfsts;
+ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 1Ch - 900h +
+ * (ep_num * 20h) + 1Ch</i>*/
+ uint32_t reserved18;
+} dwc_otg_dev_in_ep_regs_t;
+
+/**
+ * Device Logical OUT Endpoint-Specific Registers. <i>Offsets:
+ * B00h-CFCh</i>
+ *
+ * There will be one set of endpoint registers per logical endpoint
+ * implemented.
+ *
+ * <i>These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.</i>
+ */
+typedef struct dwc_otg_dev_out_ep_regs
+{
+ /** Device OUT Endpoint Control Register. <i>Offset:B00h +
+ * (ep_num * 20h) + 00h</i> */
+ volatile uint32_t doepctl;
+ /** Device OUT Endpoint Frame number Register. <i>Offset:
+ * B00h + (ep_num * 20h) + 04h</i> */
+ volatile uint32_t doepfn;
+ /** Device OUT Endpoint Interrupt Register. <i>Offset:B00h +
+ * (ep_num * 20h) + 08h</i> */
+ volatile uint32_t doepint;
+ /** Reserved. <i>Offset:B00h + (ep_num * 20h) + 0Ch</i> */
+ uint32_t reserved0C;
+ /** Device OUT Endpoint Transfer Size Register. <i>Offset:
+ * B00h + (ep_num * 20h) + 10h</i> */
+ volatile uint32_t doeptsiz;
+ /** Device OUT Endpoint DMA Address Register. <i>Offset:B00h
+ * + (ep_num * 20h) + 14h</i> */
+ volatile uint32_t doepdma;
+ /** Reserved. <i>Offset:B00h + (ep_num * 20h) + 18h - B00h +
+ * (ep_num * 20h) + 1Ch</i> */
+ uint32_t unused[2];
+} dwc_otg_dev_out_ep_regs_t;
+
+/**
+ * This union represents the bit fields in the Device EP Control
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union depctl_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Endpoint Enable */
+ unsigned epena : 1;
+ /** Endpoint Disable */
+ unsigned epdis : 1;
+ /** Set DATA1 PID (INTR/Bulk IN and OUT endpoints)
+ * Writing to this field sets the Endpoint DPID (DPID)
+ * field in this register to DATA1 Set Odd
+ * (micro)frame (SetOddFr) (ISO IN and OUT Endpoints)
+ * Writing to this field sets the Even/Odd
+ * (micro)frame (EO_FrNum) field to odd (micro) frame.
+ */
+ unsigned setd1pid : 1;
+ /** Set DATA0 PID (INTR/Bulk IN and OUT endpoints)
+ * Writing to this field sets the Endpoint DPID (DPID)
+ * field in this register to DATA0. Set Even
+ * (micro)frame (SetEvenFr) (ISO IN and OUT Endpoints)
+ * Writing to this field sets the Even/Odd
+ * (micro)frame (EO_FrNum) field to even (micro)
+ * frame.
+ */
+ unsigned setd0pid : 1;
+ /** Set NAK */
+ unsigned snak : 1;
+ /** Clear NAK */
+ unsigned cnak : 1;
+ /** Tx Fifo Number
+ * IN EPn/IN EP0
+ * OUT EPn/OUT EP0 - reserved */
+ unsigned txfnum : 4;
+ /** Stall Handshake */
+ unsigned stall : 1;
+ /** Snoop Mode
+ * OUT EPn/OUT EP0
+ * IN EPn/IN EP0 - reserved */
+ unsigned snp : 1;
+ /** Endpoint Type
+ * 2'b00: Control
+ * 2'b01: Isochronous
+ * 2'b10: Bulk
+ * 2'b11: Interrupt */
+ unsigned eptype : 2;
+ /** NAK Status */
+ unsigned naksts : 1;
+ /** Endpoint DPID (INTR/Bulk IN and OUT endpoints)
+ * This field contains the PID of the packet going to
+ * be received or transmitted on this endpoint. The
+ * application should program the PID of the first
+ * packet going to be received or transmitted on this
+ * endpoint , after the endpoint is
+ * activated. Application use the SetD1PID and
+ * SetD0PID fields of this register to program either
+ * D0 or D1 PID.
+ *
+ * The encoding for this field is
+ * - 0: D0
+ * - 1: D1
+ */
+ unsigned dpid : 1;
+ /** USB Active Endpoint */
+ unsigned usbactep : 1;
+ /** Next Endpoint
+ * IN EPn/IN EP0
+ * OUT EPn/OUT EP0 - reserved */
+ unsigned nextep : 4;
+ /** Maximum Packet Size
+ * IN/OUT EPn
+ * IN/OUT EP0 - 2 bits
+ * 2'b00: 64 Bytes
+ * 2'b01: 32
+ * 2'b10: 16
+ * 2'b11: 8 */
+#define DWC_DEP0CTL_MPS_64 0
+#define DWC_DEP0CTL_MPS_32 1
+#define DWC_DEP0CTL_MPS_16 2
+#define DWC_DEP0CTL_MPS_8 3
+ unsigned mps : 11;
+ } b;
+} depctl_data_t;
+
+/**
+ * This union represents the bit fields in the Device EP Transfer
+ * Size Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union deptsiz_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved : 1;
+ /** Multi Count - Periodic IN endpoints */
+ unsigned mc : 2;
+ /** Packet Count */
+ unsigned pktcnt : 10;
+ /** Transfer size */
+ unsigned xfersize : 19;
+ } b;
+} deptsiz_data_t;
+
+/**
+ * This union represents the bit fields in the Device EP 0 Transfer
+ * Size Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union deptsiz0_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved31 : 1;
+ /**Setup Packet Count (DOEPTSIZ0 Only) */
+ unsigned supcnt : 2;
+ /** Reserved */
+ unsigned reserved28_20 : 9;
+ /** Packet Count */
+ unsigned pktcnt : 1;
+ /** Reserved */
+ unsigned reserved18_7 : 12;
+ /** Transfer size */
+ unsigned xfersize : 7;
+ } b;
+} deptsiz0_data_t;
+
+
+/** Maximum number of Periodic FIFOs */
+#define MAX_PERIO_FIFOS 15
+/** Maximum number of TX FIFOs */
+#define MAX_TX_FIFOS 15
+/** Maximum number of Endpoints/HostChannels */
+#define MAX_EPS_CHANNELS 16
+//#define MAX_EPS_CHANNELS 4
+
+/**
+ * The dwc_otg_dev_if structure contains information needed to manage
+ * the DWC_otg controller acting in device mode. It represents the
+ * programming view of the device-specific aspects of the controller.
+ */
+typedef struct dwc_otg_dev_if {
+ /** Pointer to device Global registers.
+ * Device Global Registers starting at offset 800h
+ */
+ dwc_otg_device_global_regs_t *dev_global_regs;
+#define DWC_DEV_GLOBAL_REG_OFFSET 0x800
+
+ /**
+ * Device Logical IN Endpoint-Specific Registers 900h-AFCh
+ */
+ dwc_otg_dev_in_ep_regs_t *in_ep_regs[MAX_EPS_CHANNELS];
+#define DWC_DEV_IN_EP_REG_OFFSET 0x900
+#define DWC_EP_REG_OFFSET 0x20
+
+ /** Device Logical OUT Endpoint-Specific Registers B00h-CFCh */
+ dwc_otg_dev_out_ep_regs_t *out_ep_regs[MAX_EPS_CHANNELS];
+#define DWC_DEV_OUT_EP_REG_OFFSET 0xB00
+
+ /* Device configuration information*/
+ uint8_t speed; /**< Device Speed 0: Unknown, 1: LS, 2:FS, 3: HS */
+ //uint8_t num_eps; /**< Number of EPs range: 0-16 (includes EP0) */
+ //uint8_t num_perio_eps; /**< # of Periodic EP range: 0-15 */
+ /*fscz */
+ uint8_t num_in_eps; /**< Number # of Tx EP range: 0-15 exept ep0 */
+ uint8_t num_out_eps; /**< Number # of Rx EP range: 0-15 exept ep 0*/
+
+ /** Size of periodic FIFOs (Bytes) */
+ uint16_t perio_tx_fifo_size[MAX_PERIO_FIFOS];
+
+ /** Size of Tx FIFOs (Bytes) */
+ uint16_t tx_fifo_size[MAX_TX_FIFOS];
+
+ /** Thresholding enable flags and length varaiables **/
+ uint16_t rx_thr_en;
+ uint16_t iso_tx_thr_en;
+ uint16_t non_iso_tx_thr_en;
+
+ uint16_t rx_thr_length;
+ uint16_t tx_thr_length;
+} dwc_otg_dev_if_t;
+
+/**
+ * This union represents the bit fields in the Power and Clock Gating Control
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union pcgcctl_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned reserved31_05 : 27;
+ /** PHY Suspended */
+ unsigned physuspended : 1;
+ /** Reset Power Down Modules */
+ unsigned rstpdwnmodule : 1;
+ /** Power Clamp */
+ unsigned pwrclmp : 1;
+ /** Gate Hclk */
+ unsigned gatehclk : 1;
+ /** Stop Pclk */
+ unsigned stoppclk : 1;
+ } b;
+} pcgcctl_data_t;
+
+/////////////////////////////////////////////////
+// Host Mode Register Structures
+//
+/**
+ * The Host Global Registers structure defines the size and relative
+ * field offsets for the Host Mode Global Registers. Host Global
+ * Registers offsets 400h-7FFh.
+*/
+typedef struct dwc_otg_host_global_regs
+{
+ /** Host Configuration Register. <i>Offset: 400h</i> */
+ volatile uint32_t hcfg;
+ /** Host Frame Interval Register. <i>Offset: 404h</i> */
+ volatile uint32_t hfir;
+ /** Host Frame Number / Frame Remaining Register. <i>Offset: 408h</i> */
+ volatile uint32_t hfnum;
+ /** Reserved. <i>Offset: 40Ch</i> */
+ uint32_t reserved40C;
+ /** Host Periodic Transmit FIFO/ Queue Status Register. <i>Offset: 410h</i> */
+ volatile uint32_t hptxsts;
+ /** Host All Channels Interrupt Register. <i>Offset: 414h</i> */
+ volatile uint32_t haint;
+ /** Host All Channels Interrupt Mask Register. <i>Offset: 418h</i> */
+ volatile uint32_t haintmsk;
+} dwc_otg_host_global_regs_t;
+
+/**
+ * This union represents the bit fields in the Host Configuration Register.
+ * Read the register into the <i>d32</i> member then set/clear the bits using
+ * the <i>b</i>it elements. Write the <i>d32</i> member to the hcfg register.
+ */
+typedef union hcfg_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Reserved */
+ //unsigned reserved31_03 : 29;
+ /** FS/LS Only Support */
+ unsigned fslssupp : 1;
+ /** FS/LS Phy Clock Select */
+#define DWC_HCFG_30_60_MHZ 0
+#define DWC_HCFG_48_MHZ 1
+#define DWC_HCFG_6_MHZ 2
+ unsigned fslspclksel : 2;
+ } b;
+} hcfg_data_t;
+
+/**
+ * This union represents the bit fields in the Host Frame Remaing/Number
+ * Register.
+ */
+typedef union hfir_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned reserved : 16;
+ unsigned frint : 16;
+ } b;
+} hfir_data_t;
+
+/**
+ * This union represents the bit fields in the Host Frame Remaing/Number
+ * Register.
+ */
+typedef union hfnum_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned frrem : 16;
+#define DWC_HFNUM_MAX_FRNUM 0x3FFF
+ unsigned frnum : 16;
+ } b;
+} hfnum_data_t;
+
+typedef union hptxsts_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Top of the Periodic Transmit Request Queue
+ * - bit 24 - Terminate (last entry for the selected channel)
+ * - bits 26:25 - Token Type
+ * - 2'b00 - Zero length
+ * - 2'b01 - Ping
+ * - 2'b10 - Disable
+ * - bits 30:27 - Channel Number
+ * - bit 31 - Odd/even microframe
+ */
+ unsigned ptxqtop_odd : 1;
+ unsigned ptxqtop_chnum : 4;
+ unsigned ptxqtop_token : 2;
+ unsigned ptxqtop_terminate : 1;
+ unsigned ptxqspcavail : 8;
+ unsigned ptxfspcavail : 16;
+ } b;
+} hptxsts_data_t;
+
+/**
+ * This union represents the bit fields in the Host Port Control and Status
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hprt0 register.
+ */
+typedef union hprt0_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved19_31 : 13;
+#define DWC_HPRT0_PRTSPD_HIGH_SPEED 0
+#define DWC_HPRT0_PRTSPD_FULL_SPEED 1
+#define DWC_HPRT0_PRTSPD_LOW_SPEED 2
+ unsigned prtspd : 2;
+ unsigned prttstctl : 4;
+ unsigned prtpwr : 1;
+ unsigned prtlnsts : 2;
+ unsigned reserved9 : 1;
+ unsigned prtrst : 1;
+ unsigned prtsusp : 1;
+ unsigned prtres : 1;
+ unsigned prtovrcurrchng : 1;
+ unsigned prtovrcurract : 1;
+ unsigned prtenchng : 1;
+ unsigned prtena : 1;
+ unsigned prtconndet : 1;
+ unsigned prtconnsts : 1;
+ } b;
+} hprt0_data_t;
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+typedef union haint_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved : 16;
+ unsigned ch15 : 1;
+ unsigned ch14 : 1;
+ unsigned ch13 : 1;
+ unsigned ch12 : 1;
+ unsigned ch11 : 1;
+ unsigned ch10 : 1;
+ unsigned ch9 : 1;
+ unsigned ch8 : 1;
+ unsigned ch7 : 1;
+ unsigned ch6 : 1;
+ unsigned ch5 : 1;
+ unsigned ch4 : 1;
+ unsigned ch3 : 1;
+ unsigned ch2 : 1;
+ unsigned ch1 : 1;
+ unsigned ch0 : 1;
+ } b;
+ struct {
+ unsigned reserved : 16;
+ unsigned chint : 16;
+ } b2;
+} haint_data_t;
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+typedef union haintmsk_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved : 16;
+ unsigned ch15 : 1;
+ unsigned ch14 : 1;
+ unsigned ch13 : 1;
+ unsigned ch12 : 1;
+ unsigned ch11 : 1;
+ unsigned ch10 : 1;
+ unsigned ch9 : 1;
+ unsigned ch8 : 1;
+ unsigned ch7 : 1;
+ unsigned ch6 : 1;
+ unsigned ch5 : 1;
+ unsigned ch4 : 1;
+ unsigned ch3 : 1;
+ unsigned ch2 : 1;
+ unsigned ch1 : 1;
+ unsigned ch0 : 1;
+ } b;
+ struct {
+ unsigned reserved : 16;
+ unsigned chint : 16;
+ } b2;
+} haintmsk_data_t;
+
+/**
+ * Host Channel Specific Registers. <i>500h-5FCh</i>
+ */
+typedef struct dwc_otg_hc_regs
+{
+ /** Host Channel 0 Characteristic Register. <i>Offset: 500h + (chan_num * 20h) + 00h</i> */
+ volatile uint32_t hcchar;
+ /** Host Channel 0 Split Control Register. <i>Offset: 500h + (chan_num * 20h) + 04h</i> */
+ volatile uint32_t hcsplt;
+ /** Host Channel 0 Interrupt Register. <i>Offset: 500h + (chan_num * 20h) + 08h</i> */
+ volatile uint32_t hcint;
+ /** Host Channel 0 Interrupt Mask Register. <i>Offset: 500h + (chan_num * 20h) + 0Ch</i> */
+ volatile uint32_t hcintmsk;
+ /** Host Channel 0 Transfer Size Register. <i>Offset: 500h + (chan_num * 20h) + 10h</i> */
+ volatile uint32_t hctsiz;
+ /** Host Channel 0 DMA Address Register. <i>Offset: 500h + (chan_num * 20h) + 14h</i> */
+ volatile uint32_t hcdma;
+ /** Reserved. <i>Offset: 500h + (chan_num * 20h) + 18h - 500h + (chan_num * 20h) + 1Ch</i> */
+ uint32_t reserved[2];
+} dwc_otg_hc_regs_t;
+
+/**
+ * This union represents the bit fields in the Host Channel Characteristics
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hcchar register.
+ */
+typedef union hcchar_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Channel enable */
+ unsigned chen : 1;
+ /** Channel disable */
+ unsigned chdis : 1;
+ /**
+ * Frame to transmit periodic transaction.
+ * 0: even, 1: odd
+ */
+ unsigned oddfrm : 1;
+ /** Device address */
+ unsigned devaddr : 7;
+ /** Packets per frame for periodic transfers. 0 is reserved. */
+ unsigned multicnt : 2;
+ /** 0: Control, 1: Isoc, 2: Bulk, 3: Intr */
+ unsigned eptype : 2;
+ /** 0: Full/high speed device, 1: Low speed device */
+ unsigned lspddev : 1;
+ unsigned reserved : 1;
+ /** 0: OUT, 1: IN */
+ unsigned epdir : 1;
+ /** Endpoint number */
+ unsigned epnum : 4;
+ /** Maximum packet size in bytes */
+ unsigned mps : 11;
+ } b;
+} hcchar_data_t;
+
+typedef union hcsplt_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Split Enble */
+ unsigned spltena : 1;
+ /** Reserved */
+ unsigned reserved : 14;
+ /** Do Complete Split */
+ unsigned compsplt : 1;
+ /** Transaction Position */
+#define DWC_HCSPLIT_XACTPOS_MID 0
+#define DWC_HCSPLIT_XACTPOS_END 1
+#define DWC_HCSPLIT_XACTPOS_BEGIN 2
+#define DWC_HCSPLIT_XACTPOS_ALL 3
+ unsigned xactpos : 2;
+ /** Hub Address */
+ unsigned hubaddr : 7;
+ /** Port Address */
+ unsigned prtaddr : 7;
+ } b;
+} hcsplt_data_t;
+
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+typedef union hcint_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Reserved */
+ unsigned reserved : 21;
+ /** Data Toggle Error */
+ unsigned datatglerr : 1;
+ /** Frame Overrun */
+ unsigned frmovrun : 1;
+ /** Babble Error */
+ unsigned bblerr : 1;
+ /** Transaction Err */
+ unsigned xacterr : 1;
+ /** NYET Response Received */
+ unsigned nyet : 1;
+ /** ACK Response Received */
+ unsigned ack : 1;
+ /** NAK Response Received */
+ unsigned nak : 1;
+ /** STALL Response Received */
+ unsigned stall : 1;
+ /** AHB Error */
+ unsigned ahberr : 1;
+ /** Channel Halted */
+ unsigned chhltd : 1;
+ /** Transfer Complete */
+ unsigned xfercomp : 1;
+ } b;
+} hcint_data_t;
+
+/**
+ * This union represents the bit fields in the Host Channel Transfer Size
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hcchar register.
+ */
+typedef union hctsiz_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Do PING protocol when 1 */
+ unsigned dopng : 1;
+ /**
+ * Packet ID for next data packet
+ * 0: DATA0
+ * 1: DATA2
+ * 2: DATA1
+ * 3: MDATA (non-Control), SETUP (Control)
+ */
+#define DWC_HCTSIZ_DATA0 0
+#define DWC_HCTSIZ_DATA1 2
+#define DWC_HCTSIZ_DATA2 1
+#define DWC_HCTSIZ_MDATA 3
+#define DWC_HCTSIZ_SETUP 3
+ unsigned pid : 2;
+ /** Data packets to transfer */
+ unsigned pktcnt : 10;
+ /** Total transfer size in bytes */
+ unsigned xfersize : 19;
+ } b;
+} hctsiz_data_t;
+
+/**
+ * This union represents the bit fields in the Host Channel Interrupt Mask
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hcintmsk register.
+ */
+typedef union hcintmsk_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned reserved : 21;
+ unsigned datatglerr : 1;
+ unsigned frmovrun : 1;
+ unsigned bblerr : 1;
+ unsigned xacterr : 1;
+ unsigned nyet : 1;
+ unsigned ack : 1;
+ unsigned nak : 1;
+ unsigned stall : 1;
+ unsigned ahberr : 1;
+ unsigned chhltd : 1;
+ unsigned xfercompl : 1;
+ } b;
+} hcintmsk_data_t;
+
+/** OTG Host Interface Structure.
+ *
+ * The OTG Host Interface Structure structure contains information
+ * needed to manage the DWC_otg controller acting in host mode. It
+ * represents the programming view of the host-specific aspects of the
+ * controller.
+ */
+typedef struct dwc_otg_host_if {
+ /** Host Global Registers starting at offset 400h.*/
+ dwc_otg_host_global_regs_t *host_global_regs;
+#define DWC_OTG_HOST_GLOBAL_REG_OFFSET 0x400
+
+ /** Host Port 0 Control and Status Register */
+ volatile uint32_t *hprt0;
+#define DWC_OTG_HOST_PORT_REGS_OFFSET 0x440
+
+
+ /** Host Channel Specific Registers at offsets 500h-5FCh. */
+ dwc_otg_hc_regs_t *hc_regs[MAX_EPS_CHANNELS];
+#define DWC_OTG_HOST_CHAN_REGS_OFFSET 0x500
+#define DWC_OTG_CHAN_REGS_OFFSET 0x20
+
+
+ /* Host configuration information */
+ /** Number of Host Channels (range: 1-16) */
+ uint8_t num_host_channels;
+ /** Periodic EPs supported (0: no, 1: yes) */
+ uint8_t perio_eps_supported;
+ /** Periodic Tx FIFO Size (Only 1 host periodic Tx FIFO) */
+ uint16_t perio_tx_fifo_size;
+
+} dwc_otg_host_if_t;
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/Kconfig b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/Kconfig
new file mode 100644
index 0000000000..7eb8cebf42
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/Kconfig
@@ -0,0 +1,58 @@
+
+config USB_HOST_IFX
+ tristate "Infineon USB Host Controller Driver"
+ depends on USB
+ default n
+ help
+ Infineon USB Host Controller
+
+config USB_HOST_IFX_B
+ bool "USB host mode on core 1 and 2"
+ depends on USB_HOST_IFX
+ help
+ Both cores run as host
+
+#config USB_HOST_IFX_1
+#config USB_HOST_IFX_2
+
+#config IFX_DANUBE
+#config IFX_AMAZON_SE
+config IFX_AR9
+ depends on USB_HOST_IFX
+ bool "AR9"
+
+config IFX_VR9
+ depends on USB_HOST_IFX
+ bool "VR9"
+
+#config USB_HOST_IFX_FORCE_USB11
+# bool "Forced USB1.1"
+# depends on USB_HOST_IFX
+# default n
+# help
+# force to be USB 1.1
+
+#config USB_HOST_IFX_WITH_HS_ELECT_TST
+# bool "With HS_Electrical Test"
+# depends on USB_HOST_IFX
+# default n
+# help
+# With USBIF HSET routines
+
+#config USB_HOST_IFX_WITH_ISO
+# bool "With ISO transfer"
+# depends on USB_HOST_IFX
+# default n
+# help
+# With USBIF ISO transfer
+
+config USB_HOST_IFX_UNALIGNED_ADJ
+ bool "Adjust"
+ depends on USB_HOST_IFX
+ help
+ USB_HOST_IFX_UNALIGNED_ADJ
+
+#config USB_HOST_IFX_UNALIGNED_CHK
+#config USB_HOST_IFX_UNALIGNED_NONE
+
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/Makefile b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/Makefile
new file mode 100644
index 0000000000..0a2ac99380
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/Makefile
@@ -0,0 +1,85 @@
+
+#
+# Makefile for USB Core files and filesystem
+#
+ ifxusb_host-objs := ifxusb_driver.o
+ ifxusb_host-objs += ifxusb_ctl.o
+ ifxusb_host-objs += ifxusb_cif.o
+ ifxusb_host-objs += ifxusb_cif_h.o
+ ifxusb_host-objs += ifxhcd.o
+ ifxusb_host-objs += ifxhcd_es.o
+ ifxusb_host-objs += ifxhcd_intr.o
+ ifxusb_host-objs += ifxhcd_queue.o
+
+ifeq ($(CONFIG_IFX_TWINPASS),y)
+ EXTRA_CFLAGS += -D__IS_TWINPASS__
+endif
+ifeq ($(CONFIG_IFX_DANUBE),y)
+ EXTRA_CFLAGS += -D__IS_DANUBE__
+endif
+ifeq ($(CONFIG_IFX_AMAZON_SE),y)
+ EXTRA_CFLAGS += -D__IS_AMAZON_SE__
+endif
+ifeq ($(CONFIG_IFX_AR9),y)
+ EXTRA_CFLAGS += -D__IS_AR9__
+endif
+ifeq ($(CONFIG_IFX_AMAZON_S),y)
+ EXTRA_CFLAGS += -D__IS_AR9__
+endif
+ifeq ($(CONFIG_IFX_VR9),y)
+ EXTRA_CFLAGS += -D__IS_VR9__
+endif
+
+ifeq ($(CONFIG_USB_HOST_IFX),y)
+ EXTRA_CFLAGS += -Dlinux -D__LINUX__
+ EXTRA_CFLAGS += -D__IS_HOST__
+ EXTRA_CFLAGS += -D__KERNEL__
+endif
+
+ifeq ($(CONFIG_USB_HOST_IFX),m)
+ EXTRA_CFLAGS += -Dlinux -D__LINUX__
+ EXTRA_CFLAGS += -D__IS_HOST__
+ EXTRA_CFLAGS += -D__KERNEL__
+endif
+
+ifeq ($(CONFIG_USB_DEBUG),y)
+ EXTRA_CFLAGS += -D__DEBUG__
+ EXTRA_CFLAGS += -D__ENABLE_DUMP__
+endif
+
+ifeq ($(CONFIG_USB_HOST_IFX_B),y)
+ EXTRA_CFLAGS += -D__IS_DUAL__
+endif
+ifeq ($(CONFIG_USB_HOST_IFX_1),y)
+ EXTRA_CFLAGS += -D__IS_FIRST__
+endif
+ifeq ($(CONFIG_USB_HOST_IFX_2),y)
+ EXTRA_CFLAGS += -D__IS_SECOND__
+endif
+
+ifeq ($(CONFIG_USB_HOST_IFX_FORCE_USB11),y)
+ EXTRA_CFLAGS += -D__FORCE_USB11__
+endif
+ifeq ($(CONFIG_USB_HOST_IFX_WITH_HS_ELECT_TST),y)
+ EXTRA_CFLAGS += -D__WITH_HS_ELECT_TST__
+endif
+ifeq ($(CONFIG_USB_HOST_IFX_WITH_ISO),y)
+ EXTRA_CFLAGS += -D__EN_ISOC__
+endif
+ifeq ($(CONFIG_USB_HOST_IFX_UNALIGNED_ADJ),y)
+ EXTRA_CFLAGS += -D__UNALIGNED_BUFFER_ADJ__
+endif
+ifeq ($(CONFIG_USB_HOST_IFX_UNALIGNED_CHK),y)
+ EXTRA_CFLAGS += -D__UNALIGNED_BUFFER_CHK__
+endif
+
+# EXTRA_CFLAGS += -D__DYN_SOF_INTR__
+ EXTRA_CFLAGS += -D__UEIP__
+# EXTRA_CFLAGS += -D__EN_ISOC__
+# EXTRA_CFLAGS += -D__EN_ISOC_SPLIT__
+
+## 20110628 AVM/WK New flag for less SOF IRQs
+ EXTRA_CFLAGS += -D__USE_TIMER_4_SOF__
+
+obj-$(CONFIG_USB_HOST_IFX) += ifxusb_host.o
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/TagHistory b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/TagHistory
new file mode 100644
index 0000000000..3820d70b7a
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/TagHistory
@@ -0,0 +1,171 @@
+
+
++----------------------------------------------------------------------+
+| TAG: svn://embeddedvm/home/SVN/drivers/usb_host20/tags/5.18-r240-non_musb_ar9_vr9-SOF_Timer_Fixed
+| Erzeugt mit SVN-Tagger Version 3.74.
++----------------------------------------------------------------------+
+FIX - Korrektur bei der SOF-Timer/IRQ Steuerung. (Bug in Tag 5.17)
+FIX - Fehlerbehandlung an mehreren Stellen korrigiert bzw. eingebaut.
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://embeddedvm/home/SVN/drivers/usb_host20/tags/5.17-r237-non_musb_ar9_vr9-2_6_32_41_Kompatibel
+| Erzeugt mit SVN-Tagger Version 3.73.
++----------------------------------------------------------------------+
+FIX - Kompatiblität zum Update auf Kernel 2.6.32-41. Weiterhin für 28er geeignet.
+ENH - Reduktion der Interrruptlast durch Nutzung eines hrtimers anstatt SOF-IRQ.
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.16-r208-non_musb_ar9_vr9-20110421_Zero_Paket_Optimiert
+| Erzeugt mit SVN-Tagger Version 3.66.
++----------------------------------------------------------------------+
+
+FIX - VR9 / AR9 - Zero Packet. Optimierung korrigiert.
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.15-r205-non_musb_ar9_vr9-20110421_Zero_Paket_WA_funktioniert
+| Erzeugt mit SVN-Tagger Version 3.66.
++----------------------------------------------------------------------+
+
+FIX - VR9 / AR9 - "Zero Packet" funktioniert nun wirklich. Letzter Tag hatte einen Bug.
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.14-r202-non_musb_ar9_vr9-20110420_Zero_Paket_WA
+| Erzeugt mit SVN-Tagger Version 3.66.
++----------------------------------------------------------------------+
+
+FIX - VR9 / AR9 - Zero Packet Workaround: ZLP wird nun geschickt wenn URB_ZERO_PACKET aktiv ist.
+ Wird von LTE Altair Firmware benoetig.
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.13-r199-non_musb_ar9_vr9-20110310_Init_Fix
+| Erzeugt mit SVN-Tagger Version 3.64.
++----------------------------------------------------------------------+
+
+FIX - VR9 / AR9 - Timing der Initialisierungsphase angepasst zum Kernel 2.6.28 mit UGW-4.3.1.
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.12-r184-non_musb_ar9_vr9-20110118_Full_Speed_Fix
+| Erzeugt mit SVN-Tagger Version 3.58.
++----------------------------------------------------------------------+
+AR9/VR9 (3370,6840,7320):
+Makefile - FIX - (Workaround) Debug Modus hilft gegen Enumerationsfehler bei Full Speed Drucker.
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.11-r175-non_musb_ar9_vr9-20101220_VR9_2_Ports_DMA_Fix
+| Erzeugt mit SVN-Tagger Version 3.58.
++----------------------------------------------------------------------+
+
+FIX - VR9 - Workaround DMA Burst Size. Wenn beiden USB Ports benutzt werden, geht der USB Host nicht mehr.
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.10-r169-non_musb_ar9_vr9-Fix_Spontan_Reboot
+| Erzeugt mit SVN-Tagger Version 3.58.
++----------------------------------------------------------------------+
+
+FIX - Endlosschleife führte zu einem spontanen Reboot.
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.9-r166-non_musb_ar9_vr9-20101112_deferred_completion
+| Erzeugt mit SVN-Tagger Version 3.58.
++----------------------------------------------------------------------+
+
+ENH - Deferred URB Completion Mechanismus eingebaut. Nun ca. 10% schneller bei usb-storage.
+
+FIX - PING Flow Control gefixt.
+FIX - Channel Halt wird nun immer angerufen. (Split Transaction wurde nicht erfolgreich gestoppt).
+FIX - Spinlock Benutzung verbessert. Mehr Stabilitaet.
+
+CHG - Ubersetztungsoption __DEBUG__ ist nun abhaengig von CONFIG_USB_DEBUG
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.8-r149-non_musb_ar9_vr9-20100827_LTE_Interrupt_EP_Fix
+| Erzeugt mit SVN-Tagger Version 3.57.
++----------------------------------------------------------------------+
+AR9/VR9 - FIX - Interrupt Packets gingen verloren, wegen falschem Timing beim OddFrame Bit.
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.7-r142-non_musb_ar9_vr9-20100728_Unaligned_Buf_Fix
+| Erzeugt mit SVN-Tagger Version 3.57.
++----------------------------------------------------------------------+
+FIX - "Unaligned Data" Flag wieder nach Transfer geloescht.
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.6-r133-non_musb_ar9_vr9-20100714_Toggle_Datenverlust_Fix
+| Erzeugt mit SVN-Tagger Version 3.57.
++----------------------------------------------------------------------+
+TL5508 - Einige UMTS Modems funktionierten nicht korrekt an der 7320 (AR9).
+FIX - USB Data Toggle des usbcore benutzen. Datenverlust nach EP-Halt.
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.5-r130-non_musb_ar9_vr9-20100712_USB_Ports_abschaltbar
+| Erzeugt mit SVN-Tagger Version 3.57.
++----------------------------------------------------------------------+
+Power - Fix - Beide USB Port abschaltbar bei rmmod.
+rmmod - FIX - URB_Dequeue funktionierte beim Entladen des Treibers nicht (mehrere Ursachen).
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.4-r126-non_musb_ar9_vr9-20100701_Lost_Interrupt_Workaround
+| Erzeugt mit SVN-Tagger Version 3.57.
++----------------------------------------------------------------------+
+FIX - Workaround wegen verpasstem Interrupt, bei Full-Speed Interrupt EP.
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.3-r123-non_musb_ar9_vr9-20100630_UMTS_Fixes
+| Erzeugt mit SVN-Tagger Version 3.57.
++----------------------------------------------------------------------+
+FIX - Full-Speed Interrupt Endpoint hinter Hi-Speed Hub funktioniert nun (UMTS Modems)
+FIX - usb_hcd_link_urb_from_ep API von USBCore muss benutzt werden.
+FIX - Interrupt URBs nicht bei NAK completen.
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.2-r114-non_musb_ar9_vr9-20100520_StickAndSurf_funktioniert
+| Erzeugt mit SVN-Tagger Version 3.56.
++----------------------------------------------------------------------+
+- Merge mit neuen LANTIQ Sourcen "3.0alpha B100312"
+- Fix - Spin_lock eingebaut, Stick&Surf funktioniert nun
+
+- DEP - CONFIG_USB_HOST_IFX_WITH_ISO wird nicht unterstuetzt: In der Kernel Config deaktivieren.
+
+
+
++----------------------------------------------------------------------+
+| TAG: svn://EmbeddedVM/home/SVN/drivers/usb_host20/tags/5.1-r107-non_musb_ar9_vr9-20100505_IFXUSB_Host_mit_Energiemonitor
+| Erzeugt mit SVN-Tagger Version 3.56.
++----------------------------------------------------------------------+
+USB Host Treiber für AR9 und VR9
+--------------------------------
+FIX - Toggle Error nach STALL - Einfacher Workaround - Nun werden Massenspeicherpartitionen erkannt!
+AVM_POWERMETER - USB Energiemonitor Support.
+
+Bekanntes Problem: Stick and Surf funktioniert nur sporadisch, weil CONTROL_IRQ manchmal ausbleibt.
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd.c b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd.c
new file mode 100644
index 0000000000..d2ae1250bb
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd.c
@@ -0,0 +1,2523 @@
+/*****************************************************************************
+ ** FILE NAME : ifxhcd.c
+ ** PROJECT : IFX USB sub-system V3
+ ** MODULES : IFX USB sub-system Host and Device driver
+ ** SRC VERSION : 1.0
+ ** DATE : 1/Jan/2009
+ ** AUTHOR : Chen, Howard
+ ** DESCRIPTION : This file contains the structures, constants, and interfaces for
+ ** the Host Contoller Driver (HCD).
+ **
+ ** The Host Controller Driver (HCD) is responsible for translating requests
+ ** from the USB Driver into the appropriate actions on the IFXUSB controller.
+ ** It isolates the USBD from the specifics of the controller by providing an
+ ** API to the USBD.
+ *****************************************************************************/
+
+/*!
+ \file ifxhcd.c
+ \ingroup IFXUSB_DRIVER_V3
+ \brief This file contains the implementation of the HCD. In Linux,
+ the HCD implements the hc_driver API.
+*/
+
+#include <linux/version.h>
+#include "ifxusb_version.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+
+#include <linux/device.h>
+
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+
+#include <linux/dma-mapping.h>
+
+
+#include "ifxusb_plat.h"
+#include "ifxusb_regs.h"
+#include "ifxusb_cif.h"
+#include "ifxhcd.h"
+
+#include <asm/irq.h>
+
+#ifdef CONFIG_AVM_POWERMETER
+#include <linux/avm_power.h>
+#endif /*--- #ifdef CONFIG_AVM_POWERMETER ---*/
+
+#ifdef __DEBUG__
+ static void dump_urb_info(struct urb *_urb, char* _fn_name);
+ static void dump_channel_info(ifxhcd_hcd_t *_ifxhcd, ifxhcd_epqh_t *_epqh);
+#endif
+
+
+/*!
+ \brief Sets the final status of an URB and returns it to the device driver. Any
+ required cleanup of the URB is performed.
+ */
+void ifxhcd_complete_urb(ifxhcd_hcd_t *_ifxhcd, ifxhcd_urbd_t *_urbd, int _status)
+{
+ struct urb *urb=NULL;
+ unsigned long flags = 0;
+
+ /*== AVM/BC 20101111 Function called with Lock ==*/
+ //SPIN_LOCK_IRQSAVE(&_ifxhcd->lock, flags);
+
+ if (!list_empty(&_urbd->urbd_list_entry))
+ list_del_init (&_urbd->urbd_list_entry);
+
+ if(!_urbd->urb)
+ {
+ IFX_ERROR("%s: invalid urb\n",__func__);
+ /*== AVM/BC 20101111 Function called with Lock ==*/
+ //SPIN_UNLOCK_IRQRESTORE(&_ifxhcd->lock, flags);
+ return;
+ }
+
+ urb=_urbd->urb;
+
+ #ifdef __DEBUG__
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB))
+ {
+ IFX_PRINT("%s: _urbd %p, urb %p, device %d, ep %d %s/%s, status=%d\n",
+ __func__, _urbd,_urbd->urb, usb_pipedevice(_urbd->urb->pipe),
+ usb_pipeendpoint(_urbd->urb->pipe),
+ usb_pipein(_urbd->urb->pipe) ? "IN" : "OUT",
+ (_urbd->is_in) ? "IN" : "OUT",
+ _status);
+ if (_urbd->epqh->ep_type == IFXUSB_EP_TYPE_ISOC)
+ {
+ int i;
+ for (i = 0; i < _urbd->urb->number_of_packets; i++)
+ IFX_PRINT(" ISO Desc %d status: %d\n", i, _urbd->urb->iso_frame_desc[i].status);
+ }
+ }
+ #endif
+
+ if (!_urbd->epqh)
+ IFX_ERROR("%s: invalid epqd\n",__func__);
+
+ #if defined(__UNALIGNED_BUFFER_ADJ__)
+ else if(_urbd->is_active)
+ {
+ if( _urbd->epqh->aligned_checked &&
+ _urbd->epqh->using_aligned_buf &&
+ _urbd->xfer_buff &&
+ _urbd->is_in )
+ memcpy(_urbd->xfer_buff,_urbd->epqh->aligned_buf,_urbd->xfer_len);
+ _urbd->epqh->using_aligned_buf=0;
+ _urbd->epqh->using_aligned_setup=0;
+ _urbd->epqh->aligned_checked=0;
+ }
+ #endif
+
+ urb->status = _status;
+ urb->hcpriv=NULL;
+ kfree(_urbd);
+
+ usb_hcd_unlink_urb_from_ep(ifxhcd_to_syshcd(_ifxhcd), urb);
+ SPIN_UNLOCK_IRQRESTORE(&_ifxhcd->lock, flags);
+
+// usb_hcd_giveback_urb(ifxhcd_to_syshcd(_ifxhcd), urb);
+ usb_hcd_giveback_urb(ifxhcd_to_syshcd(_ifxhcd), urb, _status);
+
+ /*== AVM/BC 20100630 - 2.6.28 needs HCD link/unlink URBs ==*/
+ SPIN_LOCK_IRQSAVE(&_ifxhcd->lock, flags);
+}
+
+/*== AVM/BC 20101111 URB Complete deferred
+ * Must be called with Spinlock
+ */
+
+/*!
+ \brief Inserts an urbd structur in the completion list. The urbd will be
+ later completed by select_eps_sub
+ */
+void defer_ifxhcd_complete_urb(ifxhcd_hcd_t *_ifxhcd, ifxhcd_urbd_t *_urbd, int _status)
+{
+
+ _urbd->status = _status;
+
+ //Unlink Urbd from epqh / Insert it into the complete list
+ list_move_tail(&_urbd->urbd_list_entry, &_ifxhcd->urbd_complete_list);
+
+}
+
+/*!
+ \brief Processes all the URBs in a single EPQHs. Completes them with
+ status and frees the URBD.
+ */
+//static
+void kill_all_urbs_in_epqh(ifxhcd_hcd_t *_ifxhcd, ifxhcd_epqh_t *_epqh, int _status)
+{
+ struct list_head *urbd_item;
+ ifxhcd_urbd_t *urbd;
+
+ if(!_epqh)
+ return;
+
+ for (urbd_item = _epqh->urbd_list.next;
+ urbd_item != &_epqh->urbd_list;
+ urbd_item = _epqh->urbd_list.next)
+ {
+ urbd = list_entry(urbd_item, ifxhcd_urbd_t, urbd_list_entry);
+ ifxhcd_complete_urb(_ifxhcd, urbd, _status);
+ }
+}
+
+
+/*!
+ \brief Free all EPS in one Processes all the URBs in a single list of EPQHs. Completes them with
+ -ETIMEDOUT and frees the URBD.
+ */
+//static
+void epqh_list_free(ifxhcd_hcd_t *_ifxhcd, struct list_head *_epqh_list)
+{
+ struct list_head *item;
+ ifxhcd_epqh_t *epqh;
+
+ if (!_epqh_list)
+ return;
+ if (_epqh_list->next == NULL) /* The list hasn't been initialized yet. */
+ return;
+
+ /* Ensure there are no URBDs or URBs left. */
+ for (item = _epqh_list->next; item != _epqh_list; item = _epqh_list->next)
+ {
+ epqh = list_entry(item, ifxhcd_epqh_t, epqh_list_entry);
+ kill_all_urbs_in_epqh(_ifxhcd, epqh, -ETIMEDOUT);
+ ifxhcd_epqh_free(epqh);
+ }
+}
+
+
+
+//static
+void epqh_list_free_all(ifxhcd_hcd_t *_ifxhcd)
+{
+ unsigned long flags;
+
+ /*== AVM/BC 20101111 - 2.6.28 Needs Spinlock ==*/
+ SPIN_LOCK_IRQSAVE(&_ifxhcd->lock, flags);
+
+ epqh_list_free(_ifxhcd, &_ifxhcd->epqh_np_active );
+ epqh_list_free(_ifxhcd, &_ifxhcd->epqh_np_ready );
+ epqh_list_free(_ifxhcd, &_ifxhcd->epqh_intr_active );
+ epqh_list_free(_ifxhcd, &_ifxhcd->epqh_intr_ready );
+ #ifdef __EN_ISOC__
+ epqh_list_free(_ifxhcd, &_ifxhcd->epqh_isoc_active );
+ epqh_list_free(_ifxhcd, &_ifxhcd->epqh_isoc_ready );
+ #endif
+ epqh_list_free(_ifxhcd, &_ifxhcd->epqh_stdby );
+
+ SPIN_UNLOCK_IRQRESTORE(&_ifxhcd->lock, flags);
+
+}
+
+
+/*!
+ \brief This function is called to handle the disconnection of host port.
+ */
+int32_t ifxhcd_disconnect(ifxhcd_hcd_t *_ifxhcd)
+{
+ IFX_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _ifxhcd);
+
+ /* Set status flags for the hub driver. */
+ _ifxhcd->flags.b.port_connect_status_change = 1;
+ _ifxhcd->flags.b.port_connect_status = 0;
+
+ /*
+ * Shutdown any transfers in process by clearing the Tx FIFO Empty
+ * interrupt mask and status bits and disabling subsequent host
+ * channel interrupts.
+ */
+ {
+ gint_data_t intr = { .d32 = 0 };
+ intr.b.nptxfempty = 1;
+ intr.b.ptxfempty = 1;
+ intr.b.hcintr = 1;
+ ifxusb_mreg (&_ifxhcd->core_if.core_global_regs->gintmsk, intr.d32, 0);
+ ifxusb_mreg (&_ifxhcd->core_if.core_global_regs->gintsts, intr.d32, 0);
+ }
+
+ /* Respond with an error status to all URBs in the schedule. */
+ epqh_list_free_all(_ifxhcd);
+
+ /* Clean up any host channels that were in use. */
+ {
+ int num_channels;
+ ifxhcd_hc_t *channel;
+ ifxusb_hc_regs_t *hc_regs;
+ hcchar_data_t hcchar;
+ int i;
+
+ num_channels = _ifxhcd->core_if.params.host_channels;
+
+ for (i = 0; i < num_channels; i++)
+ {
+ channel = &_ifxhcd->ifxhc[i];
+ if (list_empty(&channel->hc_list_entry))
+ {
+ hc_regs = _ifxhcd->core_if.hc_regs[i];
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ if (hcchar.b.chen)
+ {
+ /* Halt the channel. */
+ hcchar.b.chdis = 1;
+ ifxusb_wreg(&hc_regs->hcchar, hcchar.d32);
+ }
+ list_add_tail(&channel->hc_list_entry, &_ifxhcd->free_hc_list);
+ ifxhcd_hc_cleanup(&_ifxhcd->core_if, channel);
+ }
+ }
+ }
+ return 1;
+}
+
+
+/*!
+ \brief Frees secondary storage associated with the ifxhcd_hcd structure contained
+ in the struct usb_hcd field.
+ */
+static void ifxhcd_freeextra(struct usb_hcd *_syshcd)
+{
+ ifxhcd_hcd_t *ifxhcd = syshcd_to_ifxhcd(_syshcd);
+
+ IFX_DEBUGPL(DBG_HCD, "IFXUSB HCD FREE\n");
+
+ /* Free memory for EPQH/URBD lists */
+ epqh_list_free_all(ifxhcd);
+
+ /* Free memory for the host channels. */
+ ifxusb_free_buf(ifxhcd->status_buf);
+ return;
+}
+#ifdef __USE_TIMER_4_SOF__
+static enum hrtimer_restart ifxhcd_timer_func(struct hrtimer *timer) {
+ ifxhcd_hcd_t *ifxhcd = container_of(timer, ifxhcd_hcd_t, hr_timer);
+
+ ifxhcd_handle_intr(ifxhcd);
+
+ return HRTIMER_NORESTART;
+}
+#endif
+
+/*!
+ \brief Initializes the HCD. This function allocates memory for and initializes the
+ static parts of the usb_hcd and ifxhcd_hcd structures. It also registers the
+ USB bus with the core and calls the hc_driver->start() function. It returns
+ a negative error on failure.
+ */
+int ifxhcd_init(ifxhcd_hcd_t *_ifxhcd)
+{
+ int retval = 0;
+ struct usb_hcd *syshcd = NULL;
+
+ IFX_DEBUGPL(DBG_HCD, "IFX USB HCD INIT\n");
+
+ spin_lock_init(&_ifxhcd->lock);
+#ifdef __USE_TIMER_4_SOF__
+ hrtimer_init(&_ifxhcd->hr_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ _ifxhcd->hr_timer.function = ifxhcd_timer_func;
+#endif
+ _ifxhcd->hc_driver.description = _ifxhcd->core_if.core_name;
+ _ifxhcd->hc_driver.product_desc = "IFX USB Controller";
+ //_ifxhcd->hc_driver.hcd_priv_size = sizeof(ifxhcd_hcd_t);
+ _ifxhcd->hc_driver.hcd_priv_size = sizeof(unsigned long);
+ _ifxhcd->hc_driver.irq = ifxhcd_irq;
+ _ifxhcd->hc_driver.flags = HCD_MEMORY | HCD_USB2;
+ _ifxhcd->hc_driver.start = ifxhcd_start;
+ _ifxhcd->hc_driver.stop = ifxhcd_stop;
+ //_ifxhcd->hc_driver.reset =
+ //_ifxhcd->hc_driver.suspend =
+ //_ifxhcd->hc_driver.resume =
+ _ifxhcd->hc_driver.urb_enqueue = ifxhcd_urb_enqueue;
+ _ifxhcd->hc_driver.urb_dequeue = ifxhcd_urb_dequeue;
+ _ifxhcd->hc_driver.endpoint_disable = ifxhcd_endpoint_disable;
+ _ifxhcd->hc_driver.get_frame_number = ifxhcd_get_frame_number;
+ _ifxhcd->hc_driver.hub_status_data = ifxhcd_hub_status_data;
+ _ifxhcd->hc_driver.hub_control = ifxhcd_hub_control;
+ //_ifxhcd->hc_driver.hub_suspend =
+ //_ifxhcd->hc_driver.hub_resume =
+
+ /* Allocate memory for and initialize the base HCD and */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
+ syshcd = usb_create_hcd(&_ifxhcd->hc_driver, _ifxhcd->dev, _ifxhcd->core_if.core_name);
+#else
+ syshcd = usb_create_hcd(&_ifxhcd->hc_driver, _ifxhcd->dev, _ifxhcd->dev->bus_id);
+#endif
+
+ if (syshcd == NULL)
+ {
+ retval = -ENOMEM;
+ goto error1;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
+ syshcd->has_tt = 1;
+#endif
+
+ syshcd->rsrc_start = (unsigned long)_ifxhcd->core_if.core_global_regs;
+ syshcd->regs = (void *)_ifxhcd->core_if.core_global_regs;
+ syshcd->self.otg_port = 0;
+
+ //*((unsigned long *)(&(syshcd->hcd_priv)))=(unsigned long)_ifxhcd;
+ //*((unsigned long *)(&(syshcd->hcd_priv[0])))=(unsigned long)_ifxhcd;
+ syshcd->hcd_priv[0]=(unsigned long)_ifxhcd;
+ _ifxhcd->syshcd=syshcd;
+
+ INIT_LIST_HEAD(&_ifxhcd->epqh_np_active );
+ INIT_LIST_HEAD(&_ifxhcd->epqh_np_ready );
+ INIT_LIST_HEAD(&_ifxhcd->epqh_intr_active );
+ INIT_LIST_HEAD(&_ifxhcd->epqh_intr_ready );
+ #ifdef __EN_ISOC__
+ INIT_LIST_HEAD(&_ifxhcd->epqh_isoc_active );
+ INIT_LIST_HEAD(&_ifxhcd->epqh_isoc_ready );
+ #endif
+ INIT_LIST_HEAD(&_ifxhcd->epqh_stdby );
+ INIT_LIST_HEAD(&_ifxhcd->urbd_complete_list);
+
+ /*
+ * Create a host channel descriptor for each host channel implemented
+ * in the controller. Initialize the channel descriptor array.
+ */
+ INIT_LIST_HEAD(&_ifxhcd->free_hc_list);
+ {
+ int num_channels = _ifxhcd->core_if.params.host_channels;
+ int i;
+ for (i = 0; i < num_channels; i++)
+ {
+ _ifxhcd->ifxhc[i].hc_num = i;
+ IFX_DEBUGPL(DBG_HCDV, "HCD Added channel #%d\n", i);
+ }
+ }
+
+ /* Set device flags indicating whether the HCD supports DMA. */
+ if(_ifxhcd->dev->dma_mask)
+ *(_ifxhcd->dev->dma_mask) = ~0;
+ _ifxhcd->dev->coherent_dma_mask = ~0;
+
+ /*
+ * Finish generic HCD initialization and start the HCD. This function
+ * allocates the DMA buffer pool, registers the USB bus, requests the
+ * IRQ line, and calls ifxusb_hcd_start method.
+ */
+// retval = usb_add_hcd(syshcd, _ifxhcd->core_if.irq, SA_INTERRUPT|SA_SHIRQ);
+ retval = usb_add_hcd(syshcd, _ifxhcd->core_if.irq, IRQF_DISABLED | IRQF_SHARED );
+ if (retval < 0)
+ goto error2;
+
+ /*
+ * Allocate space for storing data on status transactions. Normally no
+ * data is sent, but this space acts as a bit bucket. This must be
+ * done after usb_add_hcd since that function allocates the DMA buffer
+ * pool.
+ */
+ _ifxhcd->status_buf = ifxusb_alloc_buf(IFXHCD_STATUS_BUF_SIZE, 1);
+
+ if (_ifxhcd->status_buf)
+ {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
+ IFX_DEBUGPL(DBG_HCD, "IFX USB HCD Initialized, bus=%s, usbbus=%d\n", _ifxhcd->core_if.core_name, syshcd->self.busnum);
+#else
+ IFX_DEBUGPL(DBG_HCD, "IFX USB HCD Initialized, bus=%s, usbbus=%d\n", _ifxhcd->dev->bus_id, syshcd->self.busnum);
+#endif
+ return 0;
+ }
+ IFX_ERROR("%s: status_buf allocation failed\n", __func__);
+
+ /* Error conditions */
+ usb_remove_hcd(syshcd);
+error2:
+ ifxhcd_freeextra(syshcd);
+ usb_put_hcd(syshcd);
+error1:
+ return retval;
+}
+
+/*!
+ \brief Removes the HCD.
+ Frees memory and resources associated with the HCD and deregisters the bus.
+ */
+void ifxhcd_remove(ifxhcd_hcd_t *_ifxhcd)
+{
+ struct usb_hcd *syshcd = ifxhcd_to_syshcd(_ifxhcd);
+
+ IFX_DEBUGPL(DBG_HCD, "IFX USB HCD REMOVE\n");
+
+/* == AVM/WK 20100709 - Fix: Order changed, disable IRQs not before remove_hcd == */
+
+ usb_remove_hcd(syshcd);
+
+ /* Turn off all interrupts */
+ ifxusb_wreg (&_ifxhcd->core_if.core_global_regs->gintmsk, 0);
+ ifxusb_mreg (&_ifxhcd->core_if.core_global_regs->gahbcfg, 1, 0);
+
+ ifxhcd_freeextra(syshcd);
+
+ usb_put_hcd(syshcd);
+
+ return;
+}
+
+
+/* =========================================================================
+ * Linux HC Driver Functions
+ * ========================================================================= */
+
+/*!
+ \brief Initializes the IFXUSB controller and its root hub and prepares it for host
+ mode operation. Activates the root port. Returns 0 on success and a negative
+ error code on failure.
+ Called by USB stack.
+ */
+int ifxhcd_start(struct usb_hcd *_syshcd)
+{
+ ifxhcd_hcd_t *ifxhcd = syshcd_to_ifxhcd (_syshcd);
+ ifxusb_core_if_t *core_if = &ifxhcd->core_if;
+ struct usb_bus *bus;
+
+ IFX_DEBUGPL(DBG_HCD, "IFX USB HCD START\n");
+
+ bus = hcd_to_bus(_syshcd);
+
+ /* Initialize the bus state. */
+ _syshcd->state = HC_STATE_RUNNING;
+
+ /* Initialize and connect root hub if one is not already attached */
+ if (bus->root_hub)
+ {
+ IFX_DEBUGPL(DBG_HCD, "IFX USB HCD Has Root Hub\n");
+ /* Inform the HUB driver to resume. */
+ usb_hcd_resume_root_hub(_syshcd);
+ }
+
+ ifxhcd->flags.d32 = 0;
+
+ /* Put all channels in the free channel list and clean up channel states.*/
+ {
+ struct list_head *item;
+ item = ifxhcd->free_hc_list.next;
+ while (item != &ifxhcd->free_hc_list)
+ {
+ list_del(item);
+ item = ifxhcd->free_hc_list.next;
+ }
+ }
+ {
+ int num_channels = ifxhcd->core_if.params.host_channels;
+ int i;
+ for (i = 0; i < num_channels; i++)
+ {
+ ifxhcd_hc_t *channel;
+ channel = &ifxhcd->ifxhc[i];
+ list_add_tail(&channel->hc_list_entry, &ifxhcd->free_hc_list);
+ ifxhcd_hc_cleanup(&ifxhcd->core_if, channel);
+ }
+ }
+ /* Initialize the USB core for host mode operation. */
+
+ ifxusb_host_enable_interrupts(core_if);
+ ifxusb_enable_global_interrupts(core_if);
+ ifxusb_phy_power_on (core_if);
+
+ ifxusb_vbus_init(core_if);
+
+ /* Turn on the vbus power. */
+ {
+ hprt0_data_t hprt0;
+ hprt0.d32 = ifxusb_read_hprt0(core_if);
+
+ IFX_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr);
+ if (hprt0.b.prtpwr == 0 )
+ {
+ hprt0.b.prtpwr = 1;
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+ ifxusb_vbus_on(core_if);
+ }
+ }
+ return 0;
+}
+
+
+/*!
+ \brief Halts the IFXUSB host mode operations in a clean manner. USB transfers are
+ stopped.
+ */
+void ifxhcd_stop(struct usb_hcd *_syshcd)
+{
+ ifxhcd_hcd_t *ifxhcd = syshcd_to_ifxhcd(_syshcd);
+ hprt0_data_t hprt0 = { .d32=0 };
+
+ IFX_DEBUGPL(DBG_HCD, "IFX USB HCD STOP\n");
+
+ /* Turn off all interrupts. */
+ ifxusb_disable_global_interrupts(&ifxhcd->core_if );
+ ifxusb_host_disable_interrupts(&ifxhcd->core_if );
+#ifdef __USE_TIMER_4_SOF__
+ hrtimer_cancel(&ifxhcd->hr_timer);
+#endif
+ /*
+ * The root hub should be disconnected before this function is called.
+ * The disconnect will clear the URBD lists (via ..._hcd_urb_dequeue)
+ * and the EPQH lists (via ..._hcd_endpoint_disable).
+ */
+
+ /* Turn off the vbus power */
+ IFX_PRINT("PortPower off\n");
+
+ ifxusb_vbus_off(&ifxhcd->core_if );
+
+ ifxusb_vbus_free(&ifxhcd->core_if );
+
+ hprt0.b.prtpwr = 0;
+ ifxusb_wreg(ifxhcd->core_if.hprt0, hprt0.d32);
+ return;
+}
+
+/*!
+ \brief Returns the current frame number
+ */
+int ifxhcd_get_frame_number(struct usb_hcd *_syshcd)
+{
+ ifxhcd_hcd_t *ifxhcd = syshcd_to_ifxhcd(_syshcd);
+ hfnum_data_t hfnum;
+
+ hfnum.d32 = ifxusb_rreg(&ifxhcd->core_if.host_global_regs->hfnum);
+
+ return hfnum.b.frnum;
+}
+
+/*!
+ \brief Starts processing a USB transfer request specified by a USB Request Block
+ (URB). mem_flags indicates the type of memory allocation to use while
+ processing this URB.
+ */
+int ifxhcd_urb_enqueue( struct usb_hcd *_syshcd,
+ /*--- struct usb_host_endpoint *_sysep, Parameter im 2.6.28 entfallen ---*/
+ struct urb *_urb,
+ gfp_t _mem_flags)
+{
+ int retval = 0;
+ ifxhcd_hcd_t *ifxhcd = syshcd_to_ifxhcd (_syshcd);
+ struct usb_host_endpoint *_sysep = ifxhcd_urb_to_endpoint(_urb);
+ ifxhcd_epqh_t *epqh;
+
+ #ifdef __DEBUG__
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB))
+ dump_urb_info(_urb, "ifxusb_hcd_urb_enqueue");
+ #endif //__DEBUG__
+
+ if (!ifxhcd->flags.b.port_connect_status) /* No longer connected. */
+ return -ENODEV;
+
+ #ifndef __EN_ISOC__
+ if(usb_pipetype(_urb->pipe) == PIPE_ISOCHRONOUS)
+ {
+ IFX_ERROR("ISOC transfer not supported!!!\n");
+ return -ENODEV;
+ }
+ #endif
+
+ retval=ifxhcd_urbd_create (ifxhcd,_urb);
+
+ if (retval)
+ {
+ IFX_ERROR("IFXUSB HCD URB Enqueue failed creating URBD\n");
+ return retval;
+ }
+ epqh = (ifxhcd_epqh_t *) _sysep->hcpriv;
+ ifxhcd_epqh_ready(ifxhcd, epqh);
+
+ select_eps(ifxhcd);
+ //enable_sof(ifxhcd);
+ {
+ gint_data_t gintsts;
+ gintsts.d32=0;
+ gintsts.b.sofintr = 1;
+ ifxusb_mreg(&ifxhcd->core_if.core_global_regs->gintmsk, 0,gintsts.d32);
+ }
+
+ return retval;
+}
+
+/*!
+ \brief Aborts/cancels a USB transfer request. Always returns 0 to indicate
+ success.
+ */
+int ifxhcd_urb_dequeue( struct usb_hcd *_syshcd,
+ struct urb *_urb, int status /* Parameter neu in 2.6.28 */)
+{
+ unsigned long flags;
+ ifxhcd_hcd_t *ifxhcd;
+ ifxhcd_urbd_t *urbd;
+ ifxhcd_epqh_t *epqh;
+ int is_active=0;
+ int rc;
+
+ struct usb_host_endpoint *_sysep;
+
+ IFX_DEBUGPL(DBG_HCD, "IFXUSB HCD URB Dequeue\n");
+
+ #ifndef __EN_ISOC__
+ if(usb_pipetype(_urb->pipe) == PIPE_ISOCHRONOUS)
+ return 0;
+ #endif
+
+ _sysep = ifxhcd_urb_to_endpoint(_urb);
+
+ ifxhcd = syshcd_to_ifxhcd(_syshcd);
+
+ SPIN_LOCK_IRQSAVE(&ifxhcd->lock, flags);
+
+ /*== AVM/BC 20100630 - 2.6.28 needs HCD link/unlink URBs ==*/
+ rc = usb_hcd_check_unlink_urb(_syshcd, _urb, status);
+ if (rc) {
+ SPIN_UNLOCK_IRQRESTORE(&ifxhcd->lock, flags);
+ return rc;
+ }
+
+ urbd = (ifxhcd_urbd_t *) _urb->hcpriv;
+
+ if(_sysep)
+ epqh = (ifxhcd_epqh_t *) _sysep->hcpriv;
+ else
+ epqh = (ifxhcd_epqh_t *) urbd->epqh;
+
+ if(epqh!=urbd->epqh)
+ IFX_ERROR("%s inconsistant epqh %p %p\n",__func__,epqh,urbd->epqh);
+
+ #ifdef __DEBUG__
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB))
+ {
+ dump_urb_info(_urb, "ifxhcd_urb_dequeue");
+ if (epqh->is_active)
+ dump_channel_info(ifxhcd, epqh);
+ }
+ #endif //__DEBUG__
+
+ if(!epqh->hc)
+ epqh->is_active=0;
+ else if (!ifxhcd->flags.b.port_connect_status)
+ epqh->is_active=0;
+ else if (epqh->is_active && urbd->is_active)
+ {
+ /*== AVM/WK 20100709 - halt channel only if really started ==*/
+ //if (epqh->hc->xfer_started && !epqh->hc->wait_for_sof) {
+ /*== AVM/WK 20101112 - halt channel if started ==*/
+ if (epqh->hc->xfer_started) {
+ /*
+ * If still connected (i.e. in host mode), halt the
+ * channel so it can be used for other transfers. If
+ * no longer connected, the host registers can't be
+ * written to halt the channel since the core is in
+ * device mode.
+ */
+ /* == 20110803 AVM/WK FIX propagate status == */
+ if (_urb->status == -EINPROGRESS) {
+ _urb->status = status;
+ }
+ ifxhcd_hc_halt(&ifxhcd->core_if, epqh->hc, HC_XFER_URB_DEQUEUE);
+ epqh->hc = NULL;
+ is_active=1;
+ }
+ }
+
+ if(is_active)
+ {
+ SPIN_UNLOCK_IRQRESTORE(&ifxhcd->lock, flags);
+ }
+ else
+ {
+ list_del_init(&urbd->urbd_list_entry);
+ kfree (urbd);
+
+ /*== AVM/BC 20100630 - 2.6.28 needs HCD link/unlink URBs ==*/
+ usb_hcd_unlink_urb_from_ep(_syshcd, _urb);
+
+ SPIN_UNLOCK_IRQRESTORE(&ifxhcd->lock, flags);
+ _urb->hcpriv = NULL;
+// usb_hcd_giveback_urb(_syshcd, _urb);
+ usb_hcd_giveback_urb(_syshcd, _urb, status /* neu in 2.6.28 */);
+ select_eps(ifxhcd);
+ }
+
+ return 0;
+}
+
+
+
+/*!
+ \brief Frees resources in the IFXUSB controller related to a given endpoint. Also
+ clears state in the HCD related to the endpoint. Any URBs for the endpoint
+ must already be dequeued.
+ */
+void ifxhcd_endpoint_disable( struct usb_hcd *_syshcd,
+ struct usb_host_endpoint *_sysep)
+{
+ ifxhcd_epqh_t *epqh;
+ ifxhcd_hcd_t *ifxhcd = syshcd_to_ifxhcd(_syshcd);
+ unsigned long flags;
+
+ int retry = 0;
+
+ IFX_DEBUGPL(DBG_HCD, "IFXUSB HCD EP DISABLE: _bEndpointAddress=0x%02x, "
+ "endpoint=%d\n", _sysep->desc.bEndpointAddress,
+ ifxhcd_ep_addr_to_endpoint(_sysep->desc.bEndpointAddress));
+
+ SPIN_LOCK_IRQSAVE(&ifxhcd->lock, flags);
+ if((uint32_t)_sysep>=0x80000000 && (uint32_t)_sysep->hcpriv>=(uint32_t)0x80000000)
+ {
+ epqh = (ifxhcd_epqh_t *)(_sysep->hcpriv);
+ if (epqh && epqh->sysep==_sysep)
+ {
+
+#if 1 /*== AVM/BC 20101111 CHG Option active: Kill URBs when disabling EP ==*/
+ while (!list_empty(&epqh->urbd_list))
+ {
+ if (retry++ > 250)
+ {
+ IFX_WARN("IFXUSB HCD EP DISABLE:"
+ " URBD List for this endpoint is not empty\n");
+ break;
+ }
+ kill_all_urbs_in_epqh(ifxhcd, epqh, -ETIMEDOUT);
+ }
+#else
+ while (!list_empty(&epqh->urbd_list))
+ {
+ /** Check that the QTD list is really empty */
+ if (retry++ > 250)
+ {
+ IFX_WARN("IFXUSB HCD EP DISABLE:"
+ " URBD List for this endpoint is not empty\n");
+ break;
+ }
+ SPIN_UNLOCK_IRQRESTORE(&ifxhcd->lock, flags);
+ schedule_timeout_uninterruptible(1);
+ SPIN_LOCK_IRQSAVE(&ifxhcd->lock, flags);
+ }
+#endif
+
+ ifxhcd_epqh_free(epqh);
+ _sysep->hcpriv = NULL;
+ }
+ }
+ SPIN_UNLOCK_IRQRESTORE(&ifxhcd->lock, flags);
+}
+
+
+/*!
+ \brief Handles host mode interrupts for the IFXUSB controller. Returns IRQ_NONE if
+ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
+ * interrupt.
+ *
+ * This function is called by the USB core when an interrupt occurs
+ */
+irqreturn_t ifxhcd_irq(struct usb_hcd *_syshcd)
+{
+ ifxhcd_hcd_t *ifxhcd = syshcd_to_ifxhcd (_syshcd);
+ int32_t retval=0;
+
+ //mask_and_ack_ifx_irq (ifxhcd->core_if.irq);
+ retval = ifxhcd_handle_intr(ifxhcd);
+ return IRQ_RETVAL(retval);
+}
+
+
+/*!
+ \brief Handles host mode Over Current Interrupt
+ */
+irqreturn_t ifxhcd_oc_irq(int _irq , void *_dev)
+{
+ ifxhcd_hcd_t *ifxhcd = _dev;
+ int32_t retval=1;
+
+ ifxhcd->flags.b.port_over_current_change = 1;
+ ifxusb_vbus_off(&ifxhcd->core_if);
+ IFX_DEBUGP("OC INTERRUPT # %d\n",ifxhcd->core_if.core_no);
+
+ //mask_and_ack_ifx_irq (_irq);
+ return IRQ_RETVAL(retval);
+}
+
+/*!
+ \brief Creates Status Change bitmap for the root hub and root port. The bitmap is
+ returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
+ is the status change indicator for the single root port. Returns 1 if either
+ change indicator is 1, otherwise returns 0.
+ */
+int ifxhcd_hub_status_data(struct usb_hcd *_syshcd, char *_buf)
+{
+ ifxhcd_hcd_t *ifxhcd = syshcd_to_ifxhcd (_syshcd);
+
+ _buf[0] = 0;
+ _buf[0] |= (ifxhcd->flags.b.port_connect_status_change ||
+ ifxhcd->flags.b.port_reset_change ||
+ ifxhcd->flags.b.port_enable_change ||
+ ifxhcd->flags.b.port_suspend_change ||
+ ifxhcd->flags.b.port_over_current_change) << 1;
+
+ #ifdef __DEBUG__
+ if (_buf[0])
+ {
+ IFX_DEBUGPL(DBG_HCD, "IFXUSB HCD HUB STATUS DATA:"
+ " Root port status changed\n");
+ IFX_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n",
+ ifxhcd->flags.b.port_connect_status_change);
+ IFX_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n",
+ ifxhcd->flags.b.port_reset_change);
+ IFX_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n",
+ ifxhcd->flags.b.port_enable_change);
+ IFX_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n",
+ ifxhcd->flags.b.port_suspend_change);
+ IFX_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n",
+ ifxhcd->flags.b.port_over_current_change);
+ }
+ #endif //__DEBUG__
+ return (_buf[0] != 0);
+}
+
+#ifdef __WITH_HS_ELECT_TST__
+ extern void do_setup(ifxusb_core_if_t *_core_if) ;
+ extern void do_in_ack(ifxusb_core_if_t *_core_if);
+#endif //__WITH_HS_ELECT_TST__
+
+/*!
+ \brief Handles hub class-specific requests.
+ */
+int ifxhcd_hub_control( struct usb_hcd *_syshcd,
+ u16 _typeReq,
+ u16 _wValue,
+ u16 _wIndex,
+ char *_buf,
+ u16 _wLength)
+{
+ int retval = 0;
+
+ ifxhcd_hcd_t *ifxhcd = syshcd_to_ifxhcd (_syshcd);
+ ifxusb_core_if_t *core_if = &ifxhcd->core_if;
+ struct usb_hub_descriptor *desc;
+ hprt0_data_t hprt0 = {.d32 = 0};
+
+ uint32_t port_status;
+
+ switch (_typeReq)
+ {
+ case ClearHubFeature:
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "ClearHubFeature 0x%x\n", _wValue);
+ switch (_wValue)
+ {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* Nothing required here */
+ break;
+ default:
+ retval = -EINVAL;
+ IFX_ERROR ("IFXUSB HCD - "
+ "ClearHubFeature request %xh unknown\n", _wValue);
+ }
+ break;
+ case ClearPortFeature:
+ if (!_wIndex || _wIndex > 1)
+ goto error;
+
+ switch (_wValue)
+ {
+ case USB_PORT_FEAT_ENABLE:
+ IFX_DEBUGPL (DBG_ANY, "IFXUSB HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
+ hprt0.d32 = ifxusb_read_hprt0 (core_if);
+ hprt0.b.prtena = 1;
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
+ hprt0.d32 = ifxusb_read_hprt0 (core_if);
+ hprt0.b.prtres = 1;
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+ /* Clear Resume bit */
+ mdelay (100);
+ hprt0.b.prtres = 0;
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_POWER:
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_POWER\n");
+ #ifdef __IS_DUAL__
+ ifxusb_vbus_off(core_if);
+ #else
+ ifxusb_vbus_off(core_if);
+ #endif
+ hprt0.d32 = ifxusb_read_hprt0 (core_if);
+ hprt0.b.prtpwr = 0;
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_INDICATOR:
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
+ /* Port inidicator not supported */
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ /* Clears drivers internal connect status change
+ * flag */
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
+ ifxhcd->flags.b.port_connect_status_change = 0;
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ /* Clears the driver's internal Port Reset Change
+ * flag */
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
+ ifxhcd->flags.b.port_reset_change = 0;
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ /* Clears the driver's internal Port
+ * Enable/Disable Change flag */
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
+ ifxhcd->flags.b.port_enable_change = 0;
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Clears the driver's internal Port Suspend
+ * Change flag, which is set when resume signaling on
+ * the host port is complete */
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
+ ifxhcd->flags.b.port_suspend_change = 0;
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
+ ifxhcd->flags.b.port_over_current_change = 0;
+ break;
+ default:
+ retval = -EINVAL;
+ IFX_ERROR ("IFXUSB HCD - "
+ "ClearPortFeature request %xh "
+ "unknown or unsupported\n", _wValue);
+ }
+ break;
+ case GetHubDescriptor:
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "GetHubDescriptor\n");
+ desc = (struct usb_hub_descriptor *)_buf;
+ desc->bDescLength = 9;
+ desc->bDescriptorType = 0x29;
+ desc->bNbrPorts = 1;
+ desc->wHubCharacteristics = 0x08;
+ desc->bPwrOn2PwrGood = 1;
+ desc->bHubContrCurrent = 0;
+// desc->bitmap[0] = 0;
+// desc->bitmap[1] = 0xff;
+ break;
+ case GetHubStatus:
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "GetHubStatus\n");
+ memset (_buf, 0, 4);
+ break;
+ case GetPortStatus:
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "GetPortStatus\n");
+ if (!_wIndex || _wIndex > 1)
+ goto error;
+
+# ifdef CONFIG_AVM_POWERMETER
+ {
+ /* first port only, but 2 Hosts */
+ static unsigned char ucOldPower1 = 255;
+ static unsigned char ucOldPower2 = 255;
+
+ unsigned char ucNewPower = 0;
+ struct usb_device *childdev = _syshcd->self.root_hub->children[0];
+
+ if (childdev != NULL) {
+ ucNewPower = (childdev->actconfig != NULL)
+ ? childdev->actconfig->desc.bMaxPower
+ : 50;/* default: 50 means 100 mA*/
+ }
+ if (_syshcd->self.busnum == 1) {
+ if (ucOldPower1 != ucNewPower) {
+ ucOldPower1 = ucNewPower;
+ printk (KERN_INFO "IFXHCD#1: AVM Powermeter changed to %u mA\n", ucNewPower*2);
+ PowerManagmentRessourceInfo(powerdevice_usb_host, ucNewPower*2);
+ }
+ } else {
+ if (ucOldPower2 != ucNewPower) {
+ ucOldPower2 = ucNewPower;
+ printk (KERN_INFO "IFXHCD#2: AVM Powermeter changed to %u mA\n", ucNewPower*2);
+ PowerManagmentRessourceInfo(powerdevice_usb_host2, ucNewPower*2);
+ }
+ }
+ }
+# endif /*--- #ifdef CONFIG_AVM_POWERMETER ---*/
+
+ port_status = 0;
+ if (ifxhcd->flags.b.port_connect_status_change)
+ port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
+ if (ifxhcd->flags.b.port_enable_change)
+ port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
+ if (ifxhcd->flags.b.port_suspend_change)
+ port_status |= (1 << USB_PORT_FEAT_C_SUSPEND);
+ if (ifxhcd->flags.b.port_reset_change)
+ port_status |= (1 << USB_PORT_FEAT_C_RESET);
+ if (ifxhcd->flags.b.port_over_current_change)
+ {
+ IFX_ERROR("Device Not Supported\n");
+ port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT);
+ }
+ if (!ifxhcd->flags.b.port_connect_status)
+ {
+ /*
+ * The port is disconnected, which means the core is
+ * either in device mode or it soon will be. Just
+ * return 0's for the remainder of the port status
+ * since the port register can't be read if the core
+ * is in device mode.
+ */
+ *((u32 *) _buf) = cpu_to_le32(port_status);
+ break;
+ }
+
+ hprt0.d32 = ifxusb_rreg(core_if->hprt0);
+ IFX_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32);
+ if (hprt0.b.prtconnsts)
+ port_status |= (1 << USB_PORT_FEAT_CONNECTION);
+ if (hprt0.b.prtena)
+ port_status |= (1 << USB_PORT_FEAT_ENABLE);
+ if (hprt0.b.prtsusp)
+ port_status |= (1 << USB_PORT_FEAT_SUSPEND);
+ if (hprt0.b.prtovrcurract)
+ port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
+ if (hprt0.b.prtrst)
+ port_status |= (1 << USB_PORT_FEAT_RESET);
+ if (hprt0.b.prtpwr)
+ port_status |= (1 << USB_PORT_FEAT_POWER);
+/* if (hprt0.b.prtspd == IFXUSB_HPRT0_PRTSPD_HIGH_SPEED)
+ port_status |= (1 << USB_PORT_FEAT_HIGHSPEED);
+ else if (hprt0.b.prtspd == IFXUSB_HPRT0_PRTSPD_LOW_SPEED)
+ port_status |= (1 << USB_PORT_FEAT_LOWSPEED);*/
+ if (hprt0.b.prttstctl)
+ port_status |= (1 << USB_PORT_FEAT_TEST);
+ /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
+ *((u32 *) _buf) = cpu_to_le32(port_status);
+ break;
+ case SetHubFeature:
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "SetHubFeature\n");
+ /* No HUB features supported */
+ break;
+ case SetPortFeature:
+ if (_wValue != USB_PORT_FEAT_TEST && (!_wIndex || _wIndex > 1))
+ goto error;
+ /*
+ * The port is disconnected, which means the core is
+ * either in device mode or it soon will be. Just
+ * return without doing anything since the port
+ * register can't be written if the core is in device
+ * mode.
+ */
+ if (!ifxhcd->flags.b.port_connect_status)
+ break;
+ switch (_wValue)
+ {
+ case USB_PORT_FEAT_SUSPEND:
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
+ hprt0.d32 = ifxusb_read_hprt0 (core_if);
+ hprt0.b.prtsusp = 1;
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+ //IFX_PRINT( "SUSPEND: HPRT0=%0x\n", hprt0.d32);
+ /* Suspend the Phy Clock */
+ {
+ pcgcctl_data_t pcgcctl = {.d32=0};
+ pcgcctl.b.stoppclk = 1;
+ ifxusb_wreg(core_if->pcgcctl, pcgcctl.d32);
+ }
+ break;
+ case USB_PORT_FEAT_POWER:
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_POWER\n");
+ ifxusb_vbus_on (core_if);
+ hprt0.d32 = ifxusb_read_hprt0 (core_if);
+ hprt0.b.prtpwr = 1;
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_RESET:
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_RESET\n");
+ hprt0.d32 = ifxusb_read_hprt0 (core_if);
+ hprt0.b.prtrst = 1;
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+ /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
+ MDELAY (60);
+ hprt0.b.prtrst = 0;
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+ break;
+ #ifdef __WITH_HS_ELECT_TST__
+ case USB_PORT_FEAT_TEST:
+ {
+ uint32_t t;
+ gint_data_t gintmsk;
+ t = (_wIndex >> 8); /* MSB wIndex USB */
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_TEST %d\n", t);
+ warn("USB_PORT_FEAT_TEST %d\n", t);
+ if (t < 6)
+ {
+ hprt0.d32 = ifxusb_read_hprt0 (core_if);
+ hprt0.b.prttstctl = t;
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+ }
+ else if (t == 6) /* HS_HOST_PORT_SUSPEND_RESUME */
+ {
+ /* Save current interrupt mask */
+ gintmsk.d32 = ifxusb_rreg(&core_if->core_global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ ifxusb_wreg(&core_if->core_global_regs->gintmsk, 0);
+
+ /* 15 second delay per the test spec */
+ mdelay(15000);
+
+ /* Drive suspend on the root port */
+ hprt0.d32 = ifxusb_read_hprt0 (core_if);
+ hprt0.b.prtsusp = 1;
+ hprt0.b.prtres = 0;
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+
+ /* 15 second delay per the test spec */
+ mdelay(15000);
+
+ /* Drive resume on the root port */
+ hprt0.d32 = ifxusb_read_hprt0 (core_if);
+ hprt0.b.prtsusp = 0;
+ hprt0.b.prtres = 1;
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+ mdelay(100);
+
+ /* Clear the resume bit */
+ hprt0.b.prtres = 0;
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+
+ /* Restore interrupts */
+ ifxusb_wreg(&core_if->core_global_regs->gintmsk, gintmsk.d32);
+ }
+ else if (t == 7) /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */
+ {
+ /* Save current interrupt mask */
+ gintmsk.d32 = ifxusb_rreg(&core_if->core_global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ ifxusb_wreg(&core_if->core_global_regs->gintmsk, 0);
+
+ /* 15 second delay per the test spec */
+ mdelay(15000);
+
+ /* Send the Setup packet */
+ do_setup(core_if);
+
+ /* 15 second delay so nothing else happens for awhile */
+ mdelay(15000);
+
+ /* Restore interrupts */
+ ifxusb_wreg(&core_if->core_global_regs->gintmsk, gintmsk.d32);
+ }
+
+ else if (t == 8) /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */
+ {
+ /* Save current interrupt mask */
+ gintmsk.d32 = ifxusb_rreg(&core_if->core_global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ ifxusb_wreg(&core_if->core_global_regs->gintmsk, 0);
+
+ /* Send the Setup packet */
+ do_setup(core_if);
+
+ /* 15 second delay so nothing else happens for awhile */
+ mdelay(15000);
+
+ /* Send the In and Ack packets */
+ do_in_ack(core_if);
+
+ /* 15 second delay so nothing else happens for awhile */
+ mdelay(15000);
+
+ /* Restore interrupts */
+ ifxusb_wreg(&core_if->core_global_regs->gintmsk, gintmsk.d32);
+ }
+ }
+ break;
+ #endif //__WITH_HS_ELECT_TST__
+ case USB_PORT_FEAT_INDICATOR:
+ IFX_DEBUGPL (DBG_HCD, "IFXUSB HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
+ /* Not supported */
+ break;
+ default:
+ retval = -EINVAL;
+ IFX_ERROR ("IFXUSB HCD - "
+ "SetPortFeature request %xh "
+ "unknown or unsupported\n", _wValue);
+ }
+ break;
+ default:
+ error:
+ retval = -EINVAL;
+ IFX_WARN ("IFXUSB HCD - "
+ "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n",
+ _typeReq, _wIndex, _wValue);
+ }
+ return retval;
+}
+
+
+/*!
+ \brief Assigns transactions from a URBD to a free host channel and initializes the
+ host channel to perform the transactions. The host channel is removed from
+ the free list.
+ \param _ifxhcd The HCD state structure.
+ \param _epqh Transactions from the first URBD for this EPQH are selected and assigned to a free host channel.
+ */
+static int assign_and_init_hc(ifxhcd_hcd_t *_ifxhcd, ifxhcd_epqh_t *_epqh)
+{
+ ifxhcd_hc_t *ifxhc;
+ ifxhcd_urbd_t *urbd;
+ struct urb *urb;
+
+ IFX_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, _ifxhcd, _epqh);
+
+ if(list_empty(&_epqh->urbd_list))
+ return 0;
+
+ ifxhc = list_entry(_ifxhcd->free_hc_list.next, ifxhcd_hc_t, hc_list_entry);
+ /* Remove the host channel from the free list. */
+ list_del_init(&ifxhc->hc_list_entry);
+
+ urbd = list_entry(_epqh->urbd_list.next, ifxhcd_urbd_t, urbd_list_entry);
+ urb = urbd->urb;
+
+ _epqh->hc = ifxhc;
+ _epqh->urbd = urbd;
+ ifxhc->epqh = _epqh;
+
+ urbd->is_active=1;
+
+ /*
+ * Use usb_pipedevice to determine device address. This address is
+ * 0 before the SET_ADDRESS command and the correct address afterward.
+ */
+ ifxhc->dev_addr = usb_pipedevice(urb->pipe);
+ ifxhc->ep_num = usb_pipeendpoint(urb->pipe);
+
+ ifxhc->xfer_started = 0;
+
+ if (urb->dev->speed == USB_SPEED_LOW) ifxhc->speed = IFXUSB_EP_SPEED_LOW;
+ else if (urb->dev->speed == USB_SPEED_FULL) ifxhc->speed = IFXUSB_EP_SPEED_FULL;
+ else ifxhc->speed = IFXUSB_EP_SPEED_HIGH;
+
+ ifxhc->mps = _epqh->mps;
+ ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+
+ ifxhc->ep_type = _epqh->ep_type;
+
+ if(_epqh->ep_type==IFXUSB_EP_TYPE_CTRL)
+ {
+ ifxhc->control_phase=IFXHCD_CONTROL_SETUP;
+ ifxhc->is_in = 0;
+ ifxhc->data_pid_start = IFXUSB_HC_PID_SETUP;
+ ifxhc->xfer_buff = urbd->setup_buff;
+ ifxhc->xfer_len = 8;
+ ifxhc->xfer_count = 0;
+ ifxhc->short_rw =(urb->transfer_flags & URB_ZERO_PACKET)?1:0;
+ }
+ else
+ {
+ ifxhc->is_in = urbd->is_in;
+ ifxhc->xfer_buff = urbd->xfer_buff;
+ ifxhc->xfer_len = urbd->xfer_len;
+ ifxhc->xfer_count = 0;
+ /* == AVM/WK 20100710 Fix - Use toggle of usbcore ==*/
+ //ifxhc->data_pid_start = _epqh->data_toggle;
+ ifxhc->data_pid_start = usb_gettoggle (urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout (urb->pipe))
+ ? IFXUSB_HC_PID_DATA1
+ : IFXUSB_HC_PID_DATA0;
+ if(ifxhc->is_in)
+ ifxhc->short_rw =0;
+ else
+ ifxhc->short_rw =(urb->transfer_flags & URB_ZERO_PACKET)?1:0;
+
+ #ifdef __EN_ISOC__
+ if(_epqh->ep_type==IFXUSB_EP_TYPE_ISOC)
+ {
+ struct usb_iso_packet_descriptor *frame_desc;
+ frame_desc = &urb->iso_frame_desc[urbd->isoc_frame_index];
+ ifxhc->xfer_buff += frame_desc->offset + urbd->isoc_split_offset;
+ ifxhc->xfer_len = frame_desc->length - urbd->isoc_split_offset;
+ if (ifxhc->isoc_xact_pos == IFXUSB_HCSPLIT_XACTPOS_ALL)
+ {
+ if (ifxhc->xfer_len <= 188)
+ ifxhc->isoc_xact_pos = IFXUSB_HCSPLIT_XACTPOS_ALL;
+ else
+ ifxhc->isoc_xact_pos = IFXUSB_HCSPLIT_XACTPOS_BEGIN;
+ }
+ }
+ #endif
+ }
+
+ ifxhc->do_ping=0;
+ if (_ifxhcd->core_if.snpsid < 0x4f54271a && ifxhc->speed == IFXUSB_EP_SPEED_HIGH)
+ ifxhc->do_ping=1;
+
+
+ /* Set the split attributes */
+ ifxhc->split = 0;
+ if (_epqh->need_split) {
+ ifxhc->split = 1;
+ ifxhc->hub_addr = urb->dev->tt->hub->devnum;
+ ifxhc->port_addr = urb->dev->ttport;
+ }
+
+ //ifxhc->uint16_t pkt_count_limit
+
+ {
+ hcint_data_t hc_intr_mask;
+ uint8_t hc_num = ifxhc->hc_num;
+ ifxusb_hc_regs_t *hc_regs = _ifxhcd->core_if.hc_regs[hc_num];
+
+ /* Clear old interrupt conditions for this host channel. */
+ hc_intr_mask.d32 = 0xFFFFFFFF;
+ hc_intr_mask.b.reserved = 0;
+ ifxusb_wreg(&hc_regs->hcint, hc_intr_mask.d32);
+
+ /* Enable channel interrupts required for this transfer. */
+ hc_intr_mask.d32 = 0;
+ hc_intr_mask.b.chhltd = 1;
+ hc_intr_mask.b.ahberr = 1;
+
+ ifxusb_wreg(&hc_regs->hcintmsk, hc_intr_mask.d32);
+
+ /* Enable the top level host channel interrupt. */
+ {
+ uint32_t intr_enable;
+ intr_enable = (1 << hc_num);
+ ifxusb_mreg(&_ifxhcd->core_if.host_global_regs->haintmsk, 0, intr_enable);
+ }
+
+ /* Make sure host channel interrupts are enabled. */
+ {
+ gint_data_t gintmsk ={.d32 = 0};
+ gintmsk.b.hcintr = 1;
+ ifxusb_mreg(&_ifxhcd->core_if.core_global_regs->gintmsk, 0, gintmsk.d32);
+ }
+
+ /*
+ * Program the HCCHARn register with the endpoint characteristics for
+ * the current transfer.
+ */
+ {
+ hcchar_data_t hcchar;
+
+ hcchar.d32 = 0;
+ hcchar.b.devaddr = ifxhc->dev_addr;
+ hcchar.b.epnum = ifxhc->ep_num;
+ hcchar.b.lspddev = (ifxhc->speed == IFXUSB_EP_SPEED_LOW);
+ hcchar.b.eptype = ifxhc->ep_type;
+ hcchar.b.mps = ifxhc->mps;
+ ifxusb_wreg(&hc_regs->hcchar, hcchar.d32);
+
+ IFX_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, ifxhc->hc_num);
+ IFX_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n" , hcchar.b.devaddr);
+ IFX_DEBUGPL(DBG_HCDV, " Ep Num: %d\n" , hcchar.b.epnum);
+ IFX_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev);
+ IFX_DEBUGPL(DBG_HCDV, " Ep Type: %d\n" , hcchar.b.eptype);
+ IFX_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n" , hcchar.b.mps);
+ IFX_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n" , hcchar.b.multicnt);
+ }
+ /* Program the HCSPLIT register for SPLITs */
+ {
+ hcsplt_data_t hcsplt;
+
+ hcsplt.d32 = 0;
+ if (ifxhc->split)
+ {
+ IFX_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n", ifxhc->hc_num,
+ (ifxhc->split==2) ? "CSPLIT" : "SSPLIT");
+ hcsplt.b.spltena = 1;
+ hcsplt.b.compsplt = (ifxhc->split==2);
+ #ifdef __EN_ISOC__
+ if(_epqh->ep_type==IFXUSB_EP_TYPE_ISOC)
+ hcsplt.b.xactpos = ifxhc->isoc_xact_pos;
+ else
+ #endif
+ hcsplt.b.xactpos = IFXUSB_HCSPLIT_XACTPOS_ALL;
+ hcsplt.b.hubaddr = ifxhc->hub_addr;
+ hcsplt.b.prtaddr = ifxhc->port_addr;
+ IFX_DEBUGPL(DBG_HCDV, " comp split %d\n" , hcsplt.b.compsplt);
+ IFX_DEBUGPL(DBG_HCDV, " xact pos %d\n" , hcsplt.b.xactpos);
+ IFX_DEBUGPL(DBG_HCDV, " hub addr %d\n" , hcsplt.b.hubaddr);
+ IFX_DEBUGPL(DBG_HCDV, " port addr %d\n" , hcsplt.b.prtaddr);
+ IFX_DEBUGPL(DBG_HCDV, " is_in %d\n" , ifxhc->is_in);
+ IFX_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n" , ifxhc->mps);
+ IFX_DEBUGPL(DBG_HCDV, " xferlen: %d\n" , ifxhc->xfer_len);
+ }
+ ifxusb_wreg(&hc_regs->hcsplt, hcsplt.d32);
+ }
+ }
+
+ ifxhc->nak_retry_r=ifxhc->nak_retry=0;
+ ifxhc->nak_countdown_r=ifxhc->nak_countdown=0;
+
+ if (ifxhc->split)
+ {
+ if(ifxhc->is_in)
+ {
+ }
+ else
+ {
+ }
+ }
+ else if(_epqh->ep_type==IFXUSB_EP_TYPE_CTRL)
+ {
+ if(ifxhc->is_in)
+ {
+ }
+ else
+ {
+ }
+ }
+ else if(_epqh->ep_type==IFXUSB_EP_TYPE_BULK)
+ {
+ if(ifxhc->is_in)
+ {
+// ifxhc->nak_retry_r=ifxhc->nak_retry=nak_retry_max;
+// ifxhc->nak_countdown_r=ifxhc->nak_countdown=nak_countdown_max;
+ }
+ else
+ {
+ }
+ }
+ else if(_epqh->ep_type==IFXUSB_EP_TYPE_INTR)
+ {
+ if(ifxhc->is_in)
+ {
+ }
+ else
+ {
+ }
+ }
+ else if(_epqh->ep_type==IFXUSB_EP_TYPE_ISOC)
+ {
+ if(ifxhc->is_in)
+ {
+ }
+ else
+ {
+ }
+ }
+
+ return 1;
+}
+
+/*!
+ \brief This function selects transactions from the HCD transfer schedule and
+ assigns them to available host channels. It is called from HCD interrupt
+ handler functions.
+ */
+static void select_eps_sub(ifxhcd_hcd_t *_ifxhcd)
+{
+ struct list_head *epqh_ptr;
+ struct list_head *urbd_ptr;
+ ifxhcd_epqh_t *epqh;
+ ifxhcd_urbd_t *urbd;
+ int ret_val=0;
+
+ /*== AVM/BC 20101111 Function called with Lock ==*/
+
+// #ifdef __DEBUG__
+// IFX_DEBUGPL(DBG_HCD, " ifxhcd_select_ep\n");
+// #endif
+
+ /* Process entries in the periodic ready list. */
+ #ifdef __EN_ISOC__
+ epqh_ptr = _ifxhcd->epqh_isoc_ready.next;
+ while (epqh_ptr != &_ifxhcd->epqh_isoc_ready && !list_empty(&_ifxhcd->free_hc_list))
+ {
+ epqh = list_entry(epqh_ptr, ifxhcd_epqh_t, epqh_list_entry);
+ epqh_ptr = epqh_ptr->next;
+ if(epqh->period_do)
+ {
+ if(assign_and_init_hc(_ifxhcd, epqh))
+ {
+ IFX_DEBUGPL(DBG_HCD, " select_eps ISOC\n");
+ list_move_tail(&epqh->epqh_list_entry, &_ifxhcd->epqh_isoc_active);
+ epqh->is_active=1;
+ ret_val=1;
+ epqh->period_do=0;
+ }
+ }
+ }
+ #endif
+
+ epqh_ptr = _ifxhcd->epqh_intr_ready.next;
+ while (epqh_ptr != &_ifxhcd->epqh_intr_ready && !list_empty(&_ifxhcd->free_hc_list))
+ {
+ epqh = list_entry(epqh_ptr, ifxhcd_epqh_t, epqh_list_entry);
+ epqh_ptr = epqh_ptr->next;
+ if(epqh->period_do)
+ {
+ if(assign_and_init_hc(_ifxhcd, epqh))
+ {
+ IFX_DEBUGPL(DBG_HCD, " select_eps INTR\n");
+ list_move_tail(&epqh->epqh_list_entry, &_ifxhcd->epqh_intr_active);
+ epqh->is_active=1;
+ ret_val=1;
+ epqh->period_do=0;
+ }
+ }
+ }
+
+ epqh_ptr = _ifxhcd->epqh_np_ready.next;
+ while (epqh_ptr != &_ifxhcd->epqh_np_ready && !list_empty(&_ifxhcd->free_hc_list)) // may need to preserve at lease one for period
+ {
+ epqh = list_entry(epqh_ptr, ifxhcd_epqh_t, epqh_list_entry);
+ epqh_ptr = epqh_ptr->next;
+ if(assign_and_init_hc(_ifxhcd, epqh))
+ {
+ IFX_DEBUGPL(DBG_HCD, " select_eps CTRL/BULK\n");
+ list_move_tail(&epqh->epqh_list_entry, &_ifxhcd->epqh_np_active);
+ epqh->is_active=1;
+ ret_val=1;
+ }
+ }
+ if(ret_val)
+ /*== AVM/BC 20101111 Function called with Lock ==*/
+ process_channels_sub(_ifxhcd);
+
+ /* AVM/BC 20101111 Urbds completion loop */
+ while (!list_empty(&_ifxhcd->urbd_complete_list))
+ {
+ urbd_ptr = _ifxhcd->urbd_complete_list.next;
+ list_del_init(urbd_ptr);
+
+ urbd = list_entry(urbd_ptr, ifxhcd_urbd_t, urbd_list_entry);
+
+ ifxhcd_complete_urb(_ifxhcd, urbd, urbd->status);
+
+ }
+
+}
+
+static void select_eps_func(unsigned long data)
+{
+ unsigned long flags;
+
+ ifxhcd_hcd_t *ifxhcd;
+ ifxhcd=((ifxhcd_hcd_t *)data);
+
+ /* AVM/BC 20101111 select_eps_in_use flag removed */
+
+ SPIN_LOCK_IRQSAVE(&ifxhcd->lock, flags);
+
+ /*if(ifxhcd->select_eps_in_use){
+ SPIN_UNLOCK_IRQRESTORE(&ifxhcd->lock, flags);
+ return;
+ }
+ ifxhcd->select_eps_in_use=1;
+ */
+
+ select_eps_sub(ifxhcd);
+
+ //ifxhcd->select_eps_in_use=0;
+
+ SPIN_UNLOCK_IRQRESTORE(&ifxhcd->lock, flags);
+}
+
+void select_eps(ifxhcd_hcd_t *_ifxhcd)
+{
+ if(in_irq())
+ {
+ if(!_ifxhcd->select_eps.func)
+ {
+ _ifxhcd->select_eps.next = NULL;
+ _ifxhcd->select_eps.state = 0;
+ atomic_set( &_ifxhcd->select_eps.count, 0);
+ _ifxhcd->select_eps.func = select_eps_func;
+ _ifxhcd->select_eps.data = (unsigned long)_ifxhcd;
+ }
+ tasklet_schedule(&_ifxhcd->select_eps);
+ }
+ else
+ {
+ unsigned long flags;
+
+ /* AVM/BC 20101111 select_eps_in_use flag removed */
+
+ SPIN_LOCK_IRQSAVE(&_ifxhcd->lock, flags);
+
+ /*if(_ifxhcd->select_eps_in_use){
+ printk ("select_eps non_irq: busy\n");
+ SPIN_UNLOCK_IRQRESTORE(&_ifxhcd->lock, flags);
+ return;
+ }
+ _ifxhcd->select_eps_in_use=1;
+ */
+
+ select_eps_sub(_ifxhcd);
+
+ //_ifxhcd->select_eps_in_use=0;
+
+ SPIN_UNLOCK_IRQRESTORE(&_ifxhcd->lock, flags);
+ }
+}
+
+/*!
+ \brief
+ */
+static void process_unaligned( ifxhcd_epqh_t *_epqh)
+{
+ #if defined(__UNALIGNED_BUFFER_ADJ__)
+ if(!_epqh->aligned_checked)
+ {
+ uint32_t xfer_len;
+ xfer_len=_epqh->urbd->xfer_len;
+ if(_epqh->urbd->is_in && xfer_len<_epqh->mps)
+ xfer_len = _epqh->mps;
+ _epqh->using_aligned_buf=0;
+
+ if(xfer_len > 0 && ((unsigned long)_epqh->urbd->xfer_buff) & 3)
+ {
+ if( _epqh->aligned_buf
+ && _epqh->aligned_buf_len > 0
+ && _epqh->aligned_buf_len < xfer_len
+ )
+ {
+ ifxusb_free_buf(_epqh->aligned_buf);
+ _epqh->aligned_buf=NULL;
+ _epqh->aligned_buf_len=0;
+ }
+ if(! _epqh->aligned_buf || ! _epqh->aligned_buf_len)
+ {
+ _epqh->aligned_buf = ifxusb_alloc_buf(xfer_len, _epqh->urbd->is_in);
+ if(_epqh->aligned_buf)
+ _epqh->aligned_buf_len = xfer_len;
+ }
+ if(_epqh->aligned_buf)
+ {
+ if(!_epqh->urbd->is_in)
+ memcpy(_epqh->aligned_buf, _epqh->urbd->xfer_buff, xfer_len);
+ _epqh->using_aligned_buf=1;
+ _epqh->hc->xfer_buff = _epqh->aligned_buf;
+ }
+ else
+ IFX_WARN("%s():%d\n",__func__,__LINE__);
+ }
+ if(_epqh->ep_type==IFXUSB_EP_TYPE_CTRL)
+ {
+ _epqh->using_aligned_setup=0;
+ if(((unsigned long)_epqh->urbd->setup_buff) & 3)
+ {
+ if(! _epqh->aligned_setup)
+ _epqh->aligned_setup = ifxusb_alloc_buf(8,0);
+ if(_epqh->aligned_setup)
+ {
+ memcpy(_epqh->aligned_setup, _epqh->urbd->setup_buff, 8);
+ _epqh->using_aligned_setup=1;
+ }
+ else
+ IFX_WARN("%s():%d\n",__func__,__LINE__);
+ _epqh->hc->xfer_buff = _epqh->aligned_setup;
+ }
+ }
+ }
+ #elif defined(__UNALIGNED_BUFFER_CHK__)
+ if(!_epqh->aligned_checked)
+ {
+ if(_epqh->urbd->is_in)
+ {
+ if(_epqh->urbd->xfer_len==0)
+ IFX_WARN("%s():%d IN xfer while length is zero \n",__func__,__LINE__);
+ else{
+ if(_epqh->urbd->xfer_len < _epqh->mps)
+ IFX_WARN("%s():%d IN xfer while length < mps \n",__func__,__LINE__);
+
+ if(((unsigned long)_epqh->urbd->xfer_buff) & 3)
+ IFX_WARN("%s():%d IN xfer Buffer UNALIGNED\n",__func__,__LINE__);
+ }
+ }
+ else
+ {
+ if(_epqh->urbd->xfer_len > 0 && (((unsigned long)_epqh->urbd->xfer_buff) & 3) )
+ IFX_WARN("%s():%d OUT xfer Buffer UNALIGNED\n",__func__,__LINE__);
+ }
+
+ if(_epqh->ep_type==IFXUSB_EP_TYPE_CTRL)
+ {
+ if(((unsigned long)_epqh->urbd->setup_buff) & 3)
+ IFX_WARN("%s():%d SETUP xfer Buffer UNALIGNED\n",__func__,__LINE__);
+ }
+ }
+ #endif
+ _epqh->aligned_checked=1;
+}
+
+
+/*!
+ \brief
+ */
+void process_channels_sub(ifxhcd_hcd_t *_ifxhcd)
+{
+ ifxhcd_epqh_t *epqh;
+ struct list_head *epqh_item;
+ struct ifxhcd_hc *hc;
+
+ #ifdef __EN_ISOC__
+ if (!list_empty(&_ifxhcd->epqh_isoc_active))
+ {
+ for (epqh_item = _ifxhcd->epqh_isoc_active.next;
+ epqh_item != &_ifxhcd->epqh_isoc_active;
+ )
+ {
+ epqh = list_entry(epqh_item, ifxhcd_epqh_t, epqh_list_entry);
+ epqh_item = epqh_item->next;
+ hc=epqh->hc;
+ if(hc && !hc->xfer_started && epqh->period_do)
+ {
+ if(hc->split==0
+ || hc->split==1
+ )
+ {
+ //epqh->ping_state = 0;
+ process_unaligned(epqh);
+ hc->wait_for_sof=epqh->wait_for_sof;
+ epqh->wait_for_sof=0;
+ ifxhcd_hc_start(&_ifxhcd->core_if, hc);
+ epqh->period_do=0;
+ {
+ gint_data_t gintsts = {.d32 = 0};
+ gintsts.b.sofintr = 1;
+ ifxusb_mreg(&_ifxhcd->core_if.core_global_regs->gintmsk,0, gintsts.d32);
+ }
+ }
+ }
+ }
+ }
+ #endif
+
+ if (!list_empty(&_ifxhcd->epqh_intr_active))
+ {
+ for (epqh_item = _ifxhcd->epqh_intr_active.next;
+ epqh_item != &_ifxhcd->epqh_intr_active;
+ )
+ {
+ epqh = list_entry(epqh_item, ifxhcd_epqh_t, epqh_list_entry);
+ epqh_item = epqh_item->next;
+ hc=epqh->hc;
+ if(hc && !hc->xfer_started && epqh->period_do)
+ {
+ if(hc->split==0
+ || hc->split==1
+ )
+ {
+ //epqh->ping_state = 0;
+ process_unaligned(epqh);
+ hc->wait_for_sof=epqh->wait_for_sof;
+ epqh->wait_for_sof=0;
+ ifxhcd_hc_start(&_ifxhcd->core_if, hc);
+ epqh->period_do=0;
+#ifdef __USE_TIMER_4_SOF__
+ /* AVM/WK change: let hc_start decide, if irq is needed */
+#else
+ {
+ gint_data_t gintsts = {.d32 = 0};
+ gintsts.b.sofintr = 1;
+ ifxusb_mreg(&_ifxhcd->core_if.core_global_regs->gintmsk,0, gintsts.d32);
+ }
+#endif
+ }
+ }
+
+ }
+ }
+
+ if (!list_empty(&_ifxhcd->epqh_np_active))
+ {
+ for (epqh_item = _ifxhcd->epqh_np_active.next;
+ epqh_item != &_ifxhcd->epqh_np_active;
+ )
+ {
+ epqh = list_entry(epqh_item, ifxhcd_epqh_t, epqh_list_entry);
+ epqh_item = epqh_item->next;
+ hc=epqh->hc;
+ if(hc)
+ {
+ if(!hc->xfer_started)
+ {
+ if(hc->split==0
+ || hc->split==1
+ //|| hc->split_counter == 0
+ )
+ {
+ //epqh->ping_state = 0;
+ process_unaligned(epqh);
+ hc->wait_for_sof=epqh->wait_for_sof;
+ epqh->wait_for_sof=0;
+ ifxhcd_hc_start(&_ifxhcd->core_if, hc);
+ }
+ }
+ }
+ }
+ }
+}
+
+void process_channels(ifxhcd_hcd_t *_ifxhcd)
+{
+ unsigned long flags;
+
+ /* AVM/WK Fix: use spin_lock instead busy flag
+ **/
+ SPIN_LOCK_IRQSAVE(&_ifxhcd->lock, flags);
+
+ //if(_ifxhcd->process_channels_in_use)
+ // return;
+ //_ifxhcd->process_channels_in_use=1;
+
+ process_channels_sub(_ifxhcd);
+ //_ifxhcd->process_channels_in_use=0;
+ SPIN_UNLOCK_IRQRESTORE(&_ifxhcd->lock, flags);
+}
+
+
+#ifdef __HC_XFER_TIMEOUT__
+ static void hc_xfer_timeout(unsigned long _ptr)
+ {
+ hc_xfer_info_t *xfer_info = (hc_xfer_info_t *)_ptr;
+ int hc_num = xfer_info->hc->hc_num;
+ IFX_WARN("%s: timeout on channel %d\n", __func__, hc_num);
+ IFX_WARN(" start_hcchar_val 0x%08x\n", xfer_info->hc->start_hcchar_val);
+ }
+#endif
+
+void ifxhcd_hc_dumb_rx(ifxusb_core_if_t *_core_if, ifxhcd_hc_t *_ifxhc,uint8_t *dump_buf)
+{
+ ifxusb_hc_regs_t *hc_regs = _core_if->hc_regs[_ifxhc->hc_num];
+ hctsiz_data_t hctsiz= { .d32=0 };
+ hcchar_data_t hcchar;
+
+
+ _ifxhc->xfer_len = _ifxhc->mps;
+ hctsiz.b.xfersize = _ifxhc->mps;
+ hctsiz.b.pktcnt = 0;
+ hctsiz.b.pid = _ifxhc->data_pid_start;
+ ifxusb_wreg(&hc_regs->hctsiz, hctsiz.d32);
+
+ ifxusb_wreg(&hc_regs->hcdma, (uint32_t)(CPHYSADDR( ((uint32_t)(dump_buf)))));
+
+ {
+ hcint_data_t hcint= { .d32=0 };
+// hcint.b.nak =1;
+// hcint.b.nyet=1;
+// hcint.b.ack =1;
+ hcint.d32 =0xFFFFFFFF;
+ ifxusb_wreg(&hc_regs->hcint, hcint.d32);
+ }
+
+ /* Set host channel enable after all other setup is complete. */
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ hcchar.b.epdir = 1;
+ IFX_DEBUGPL(DBG_HCDV, " HCCHART: 0x%08x\n", hcchar.d32);
+ ifxusb_wreg(&hc_regs->hcchar, hcchar.d32);
+}
+
+/*!
+ \brief This function trigger a data transfer for a host channel and
+ starts the transfer.
+
+ For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
+ register along with a packet count of 1 and the channel is enabled. This
+ causes a single PING transaction to occur. Other fields in HCTSIZ are
+ simply set to 0 since no data transfer occurs in this case.
+
+ For a PING transfer in DMA mode, the HCTSIZ register is initialized with
+ all the information required to perform the subsequent data transfer. In
+ addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
+ controller performs the entire PING protocol, then starts the data
+ transfer.
+ \param _core_if Pointer of core_if structure
+ \param _ifxhc Information needed to initialize the host channel. The xfer_len
+ value may be reduced to accommodate the max widths of the XferSize and
+ PktCnt fields in the HCTSIZn register. The multi_count value may be changed
+ to reflect the final xfer_len value.
+ */
+void ifxhcd_hc_start(ifxusb_core_if_t *_core_if, ifxhcd_hc_t *_ifxhc)
+{
+ hctsiz_data_t hctsiz= { .d32=0 };
+ hcchar_data_t hcchar;
+ uint32_t max_hc_xfer_size = _core_if->params.max_transfer_size;
+ uint16_t max_hc_pkt_count = _core_if->params.max_packet_count;
+ ifxusb_hc_regs_t *hc_regs = _core_if->hc_regs[_ifxhc->hc_num];
+ hfnum_data_t hfnum;
+
+ hctsiz.b.dopng = 0;
+// if(_ifxhc->do_ping && !_ifxhc->is_in) hctsiz.b.dopng = 1;
+
+ _ifxhc->nak_countdown=_ifxhc->nak_countdown_r;
+
+ /* AVM/BC 20101111 Workaround: Always PING if HI-Speed Out and xfer_len > 0 */
+ if(/*_ifxhc->do_ping &&*/
+ (!_ifxhc->is_in) &&
+ (_ifxhc->speed == IFXUSB_EP_SPEED_HIGH) &&
+ ((_ifxhc->ep_type == IFXUSB_EP_TYPE_BULK) || ((_ifxhc->ep_type == IFXUSB_EP_TYPE_CTRL) && (_ifxhc->control_phase != IFXHCD_CONTROL_SETUP))) &&
+ _ifxhc->xfer_len
+ )
+ hctsiz.b.dopng = 1;
+
+ _ifxhc->xfer_started = 1;
+
+ if(_ifxhc->epqh->pkt_count_limit > 0 && _ifxhc->epqh->pkt_count_limit < max_hc_pkt_count )
+ {
+ max_hc_pkt_count=_ifxhc->epqh->pkt_count_limit;
+ if(max_hc_pkt_count * _ifxhc->mps < max_hc_xfer_size)
+ max_hc_xfer_size = max_hc_pkt_count * _ifxhc->mps;
+ }
+ if (_ifxhc->split > 0)
+ {
+ {
+ gint_data_t gintsts = {.d32 = 0};
+ gintsts.b.sofintr = 1;
+ ifxusb_mreg(&_core_if->core_global_regs->gintmsk,0, gintsts.d32);
+ }
+
+ _ifxhc->start_pkt_count = 1;
+ if(!_ifxhc->is_in && _ifxhc->split>1) // OUT CSPLIT
+ _ifxhc->xfer_len = 0;
+ if (_ifxhc->xfer_len > _ifxhc->mps)
+ _ifxhc->xfer_len = _ifxhc->mps;
+ if (_ifxhc->xfer_len > 188)
+ _ifxhc->xfer_len = 188;
+ }
+ else if(_ifxhc->is_in)
+ {
+ _ifxhc->short_rw = 0;
+ if (_ifxhc->xfer_len > 0)
+ {
+ if (_ifxhc->xfer_len > max_hc_xfer_size)
+ _ifxhc->xfer_len = max_hc_xfer_size - _ifxhc->mps + 1;
+ _ifxhc->start_pkt_count = (_ifxhc->xfer_len + _ifxhc->mps - 1) / _ifxhc->mps;
+ if (_ifxhc->start_pkt_count > max_hc_pkt_count)
+ _ifxhc->start_pkt_count = max_hc_pkt_count;
+ }
+ else /* Need 1 packet for transfer length of 0. */
+ _ifxhc->start_pkt_count = 1;
+ _ifxhc->xfer_len = _ifxhc->start_pkt_count * _ifxhc->mps;
+ }
+ else //non-split out
+ {
+ if (_ifxhc->xfer_len == 0)
+ {
+ /*== AVM/BC WK 20110421 ZERO PACKET Workaround: Is not an error ==*/
+ //if(_ifxhc->short_rw==0)
+ // printk(KERN_INFO "%s() line %d: ZLP write without short_rw set!\n",__func__,__LINE__);
+ _ifxhc->start_pkt_count = 1;
+ }
+ else
+ {
+ if (_ifxhc->xfer_len > max_hc_xfer_size)
+ {
+ _ifxhc->start_pkt_count = (max_hc_xfer_size / _ifxhc->mps);
+ _ifxhc->xfer_len = _ifxhc->start_pkt_count * _ifxhc->mps;
+ }
+ else
+ {
+ _ifxhc->start_pkt_count = (_ifxhc->xfer_len+_ifxhc->mps-1) / _ifxhc->mps;
+// if(_ifxhc->start_pkt_count * _ifxhc->mps == _ifxhc->xfer_len )
+// _ifxhc->start_pkt_count += _ifxhc->short_rw;
+ /*== AVM/BC WK 20110421 ZERO PACKET Workaround / check if short_rw is needed ==*/
+ if(_ifxhc->start_pkt_count * _ifxhc->mps != _ifxhc->xfer_len )
+ _ifxhc->short_rw = 0;
+ }
+ }
+ }
+
+ #ifdef __EN_ISOC__
+ if (_ifxhc->ep_type == IFXUSB_EP_TYPE_ISOC)
+ {
+ /* Set up the initial PID for the transfer. */
+ #if 1
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA0;
+ #else
+ if (_ifxhc->speed == IFXUSB_EP_SPEED_HIGH)
+ {
+ if (_ifxhc->is_in)
+ {
+ if (_ifxhc->multi_count == 1)
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA0;
+ else if (_ifxhc->multi_count == 2)
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA1;
+ else
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA2;
+ }
+ else
+ {
+ if (_ifxhc->multi_count == 1)
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA0;
+ else
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_MDATA;
+ }
+ }
+ else
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA0;
+ #endif
+ }
+ #endif
+
+ hctsiz.b.xfersize = _ifxhc->xfer_len;
+ hctsiz.b.pktcnt = _ifxhc->start_pkt_count;
+ hctsiz.b.pid = _ifxhc->data_pid_start;
+
+ ifxusb_wreg(&hc_regs->hctsiz, hctsiz.d32);
+
+
+ IFX_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _ifxhc->hc_num);
+ IFX_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize);
+ IFX_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n" , hctsiz.b.pktcnt);
+ IFX_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid);
+ IFX_DEBUGPL(DBG_HCDV, " DMA: 0x%08x\n", (uint32_t)(CPHYSADDR( ((uint32_t)(_ifxhc->xfer_buff))+ _ifxhc->xfer_count )));
+ ifxusb_wreg(&hc_regs->hcdma, (uint32_t)(CPHYSADDR( ((uint32_t)(_ifxhc->xfer_buff))+ _ifxhc->xfer_count )));
+
+ /* Start the split */
+ if (_ifxhc->split>0)
+ {
+ hcsplt_data_t hcsplt;
+ hcsplt.d32 = ifxusb_rreg (&hc_regs->hcsplt);
+ hcsplt.b.spltena = 1;
+ if (_ifxhc->split>1)
+ hcsplt.b.compsplt = 1;
+ else
+ hcsplt.b.compsplt = 0;
+
+ #ifdef __EN_ISOC__
+ if (_ifxhc->ep_type == IFXUSB_EP_TYPE_ISOC)
+ hcsplt.b.xactpos = _ifxhc->isoc_xact_pos;
+ else
+ #endif
+ hcsplt.b.xactpos = IFXUSB_HCSPLIT_XACTPOS_ALL;// if not ISO
+ ifxusb_wreg(&hc_regs->hcsplt, hcsplt.d32);
+ IFX_DEBUGPL(DBG_HCDV, " SPLIT: XACT_POS:0x%08x\n", hcsplt.d32);
+ }
+
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+// hcchar.b.multicnt = _ifxhc->multi_count;
+ hcchar.b.multicnt = 1;
+
+ #ifdef __DEBUG__
+ _ifxhc->start_hcchar_val = hcchar.d32;
+ if (hcchar.b.chdis)
+ IFX_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
+ __func__, _ifxhc->hc_num, hcchar.d32);
+ #endif
+
+ /* Set host channel enable after all other setup is complete. */
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ hcchar.b.epdir = _ifxhc->is_in;
+ _ifxhc->hcchar=hcchar.d32;
+
+ IFX_DEBUGPL(DBG_HCDV, " HCCHART: 0x%08x\n", _ifxhc->hcchar);
+
+ /* == 20110901 AVM/WK Fix: Clear IRQ flags in any case ==*/
+ {
+ hcint_data_t hcint= { .d32=0 };
+ hcint.d32 =0xFFFFFFFF;
+ ifxusb_wreg(&hc_regs->hcint, hcint.d32);
+ }
+
+ if(_ifxhc->wait_for_sof==0)
+ {
+ hcint_data_t hcint;
+
+ hcint.d32=ifxusb_rreg(&hc_regs->hcintmsk);
+
+ hcint.b.nak =0;
+ hcint.b.ack =0;
+ /* == 20110901 AVM/WK Fix: We don't need NOT YET IRQ ==*/
+ hcint.b.nyet=0;
+ if(_ifxhc->nak_countdown_r)
+ hcint.b.nak =1;
+ ifxusb_wreg(&hc_regs->hcintmsk, hcint.d32);
+
+ /* AVM WK / BC 20100827
+ * MOVED. Oddframe updated inmediatly before write HCChar Register.
+ */
+ if (_ifxhc->ep_type == IFXUSB_EP_TYPE_INTR || _ifxhc->ep_type == IFXUSB_EP_TYPE_ISOC)
+ {
+ hfnum.d32 = ifxusb_rreg(&_core_if->host_global_regs->hfnum);
+ /* 1 if _next_ frame is odd, 0 if it's even */
+ hcchar.b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
+ _ifxhc->hcchar=hcchar.d32;
+ }
+
+ ifxusb_wreg(&hc_regs->hcchar, _ifxhc->hcchar);
+#ifdef __USE_TIMER_4_SOF__
+ } else {
+ //activate SOF IRQ
+ gint_data_t gintsts = {.d32 = 0};
+ gintsts.b.sofintr = 1;
+ ifxusb_mreg(&_core_if->core_global_regs->gintmsk,0, gintsts.d32);
+#endif
+ }
+
+ #ifdef __HC_XFER_TIMEOUT__
+ /* Start a timer for this transfer. */
+ init_timer(&_ifxhc->hc_xfer_timer);
+ _ifxhc->hc_xfer_timer.function = hc_xfer_timeout;
+ _ifxhc->hc_xfer_timer.core_if = _core_if;
+ _ifxhc->hc_xfer_timer.hc = _ifxhc;
+ _ifxhc->hc_xfer_timer.data = (unsigned long)(&_ifxhc->hc_xfer_info);
+ _ifxhc->hc_xfer_timer.expires = jiffies + (HZ*10);
+ add_timer(&_ifxhc->hc_xfer_timer);
+ #endif
+}
+
+/*!
+ \brief Attempts to halt a host channel. This function should only be called
+ to abort a transfer in DMA mode. Under normal circumstances in DMA mode, the
+ controller halts the channel when the transfer is complete or a condition
+ occurs that requires application intervention.
+
+ In DMA mode, always sets the Channel Enable and Channel Disable bits of the
+ HCCHARn register. The controller ensures there is space in the request
+ queue before submitting the halt request.
+
+ Some time may elapse before the core flushes any posted requests for this
+ host channel and halts. The Channel Halted interrupt handler completes the
+ deactivation of the host channel.
+ */
+void ifxhcd_hc_halt(ifxusb_core_if_t *_core_if,
+ ifxhcd_hc_t *_ifxhc,
+ ifxhcd_halt_status_e _halt_status)
+{
+ hcchar_data_t hcchar;
+ ifxusb_hc_regs_t *hc_regs;
+
+ hc_regs = _core_if->hc_regs[_ifxhc->hc_num];
+
+ WARN_ON(_halt_status == HC_XFER_NO_HALT_STATUS);
+
+ if (_halt_status == HC_XFER_URB_DEQUEUE ||
+ _halt_status == HC_XFER_AHB_ERR)
+ {
+ /*
+ * Disable all channel interrupts except Ch Halted. The URBD
+ * and EPQH state associated with this transfer has been cleared
+ * (in the case of URB_DEQUEUE), so the channel needs to be
+ * shut down carefully to prevent crashes.
+ */
+ hcint_data_t hcintmsk;
+ hcintmsk.d32 = 0;
+ hcintmsk.b.chhltd = 1;
+ ifxusb_wreg(&hc_regs->hcintmsk, hcintmsk.d32);
+
+ /*
+ * Make sure no other interrupts besides halt are currently
+ * pending. Handling another interrupt could cause a crash due
+ * to the URBD and EPQH state.
+ */
+ ifxusb_wreg(&hc_regs->hcint, ~hcintmsk.d32);
+
+ /*
+ * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
+ * even if the channel was already halted for some other
+ * reason.
+ */
+ _ifxhc->halt_status = _halt_status;
+
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ if (hcchar.b.chen == 0)
+ {
+ /*
+ * The channel is either already halted or it hasn't
+ * started yet. In DMA mode, the transfer may halt if
+ * it finishes normally or a condition occurs that
+ * requires driver intervention. Don't want to halt
+ * the channel again. In either Slave or DMA mode,
+ * it's possible that the transfer has been assigned
+ * to a channel, but not started yet when an URB is
+ * dequeued. Don't want to halt a channel that hasn't
+ * started yet.
+ */
+ return;
+ }
+ }
+
+ if (_ifxhc->halting)
+ {
+ /*
+ * A halt has already been issued for this channel. This might
+ * happen when a transfer is aborted by a higher level in
+ * the stack.
+ */
+ #ifdef __DEBUG__
+ IFX_PRINT("*** %s: Channel %d, _hc->halting already set ***\n",
+ __func__, _ifxhc->hc_num);
+ #endif
+ //ifxusb_dump_global_registers(_core_if); */
+ //ifxusb_dump_host_registers(_core_if); */
+ return;
+ }
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ /* == AVM/WK 20100709 halt channel only if enabled ==*/
+ if (hcchar.b.chen) {
+ _ifxhc->halting = 1;
+ hcchar.b.chdis = 1;
+
+ ifxusb_wreg(&hc_regs->hcchar, hcchar.d32);
+ _ifxhc->halt_status = _halt_status;
+ }
+
+ IFX_DEBUGPL(DBG_HCDV, "%s: Channel %d\n" , __func__, _ifxhc->hc_num);
+ IFX_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n" , hcchar.d32);
+ IFX_DEBUGPL(DBG_HCDV, " halting: %d\n" , _ifxhc->halting);
+ IFX_DEBUGPL(DBG_HCDV, " halt_status: %d\n" , _ifxhc->halt_status);
+
+ return;
+}
+
+/*!
+ \brief Clears a host channel.
+ */
+void ifxhcd_hc_cleanup(ifxusb_core_if_t *_core_if, ifxhcd_hc_t *_ifxhc)
+{
+ ifxusb_hc_regs_t *hc_regs;
+
+ _ifxhc->xfer_started = 0;
+ /*
+ * Clear channel interrupt enables and any unhandled channel interrupt
+ * conditions.
+ */
+ hc_regs = _core_if->hc_regs[_ifxhc->hc_num];
+ ifxusb_wreg(&hc_regs->hcintmsk, 0);
+ ifxusb_wreg(&hc_regs->hcint, 0xFFFFFFFF);
+
+ #ifdef __HC_XFER_TIMEOUT__
+ del_timer(&_ifxhc->hc_xfer_timer);
+ #endif
+ #ifdef __DEBUG__
+ {
+ hcchar_data_t hcchar;
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ if (hcchar.b.chdis)
+ IFX_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", __func__, _ifxhc->hc_num, hcchar.d32);
+ }
+ #endif
+}
+
+
+
+
+
+
+
+
+#ifdef __DEBUG__
+ static void dump_urb_info(struct urb *_urb, char* _fn_name)
+ {
+ IFX_PRINT("%s, urb %p\n" , _fn_name, _urb);
+ IFX_PRINT(" Device address: %d\n", usb_pipedevice(_urb->pipe));
+ IFX_PRINT(" Endpoint: %d, %s\n" , usb_pipeendpoint(_urb->pipe),
+ (usb_pipein(_urb->pipe) ? "IN" : "OUT"));
+ IFX_PRINT(" Endpoint type: %s\n",
+ ({ char *pipetype;
+ switch (usb_pipetype(_urb->pipe)) {
+ case PIPE_CONTROL: pipetype = "CONTROL"; break;
+ case PIPE_BULK: pipetype = "BULK"; break;
+ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break;
+ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break;
+ default: pipetype = "UNKNOWN"; break;
+ };
+ pipetype;
+ }));
+ IFX_PRINT(" Speed: %s\n",
+ ({ char *speed;
+ switch (_urb->dev->speed) {
+ case USB_SPEED_HIGH: speed = "HIGH"; break;
+ case USB_SPEED_FULL: speed = "FULL"; break;
+ case USB_SPEED_LOW: speed = "LOW"; break;
+ default: speed = "UNKNOWN"; break;
+ };
+ speed;
+ }));
+ IFX_PRINT(" Max packet size: %d\n",
+ usb_maxpacket(_urb->dev, _urb->pipe, usb_pipeout(_urb->pipe)));
+ IFX_PRINT(" Data buffer length: %d\n", _urb->transfer_buffer_length);
+ IFX_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n",
+ _urb->transfer_buffer, (void *)_urb->transfer_dma);
+ IFX_PRINT(" Setup buffer: %p, Setup DMA: %p\n",
+ _urb->setup_packet, (void *)_urb->setup_dma);
+ IFX_PRINT(" Interval: %d\n", _urb->interval);
+ if (usb_pipetype(_urb->pipe) == PIPE_ISOCHRONOUS)
+ {
+ int i;
+ for (i = 0; i < _urb->number_of_packets; i++)
+ {
+ IFX_PRINT(" ISO Desc %d:\n", i);
+ IFX_PRINT(" offset: %d, length %d\n",
+ _urb->iso_frame_desc[i].offset,
+ _urb->iso_frame_desc[i].length);
+ }
+ }
+ }
+
+ static void dump_channel_info(ifxhcd_hcd_t *_ifxhcd, ifxhcd_epqh_t *_epqh)
+ {
+ if (_epqh->hc != NULL)
+ {
+ ifxhcd_hc_t *hc = _epqh->hc;
+ struct list_head *item;
+ ifxhcd_epqh_t *epqh_item;
+
+ ifxusb_hc_regs_t *hc_regs;
+
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+ hctsiz_data_t hctsiz;
+ uint32_t hcdma;
+
+ hc_regs = _ifxhcd->core_if.hc_regs[hc->hc_num];
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ hcsplt.d32 = ifxusb_rreg(&hc_regs->hcsplt);
+ hctsiz.d32 = ifxusb_rreg(&hc_regs->hctsiz);
+ hcdma = ifxusb_rreg(&hc_regs->hcdma);
+
+ IFX_PRINT(" Assigned to channel %d:\n" , hc->hc_num);
+ IFX_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
+ IFX_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n" , hctsiz.d32, hcdma);
+ IFX_PRINT(" dev_addr: %d, ep_num: %d, is_in: %d\n",
+ hc->dev_addr, hc->ep_num, hc->is_in);
+ IFX_PRINT(" ep_type: %d\n" , hc->ep_type);
+ IFX_PRINT(" max_packet_size: %d\n", hc->mps);
+ IFX_PRINT(" data_pid_start: %d\n" , hc->data_pid_start);
+ IFX_PRINT(" xfer_started: %d\n" , hc->xfer_started);
+ IFX_PRINT(" halt_status: %d\n" , hc->halt_status);
+ IFX_PRINT(" xfer_buff: %p\n" , hc->xfer_buff);
+ IFX_PRINT(" xfer_len: %d\n" , hc->xfer_len);
+ IFX_PRINT(" epqh: %p\n" , hc->epqh);
+ IFX_PRINT(" NP Active:\n");
+ list_for_each(item, &_ifxhcd->epqh_np_active)
+ {
+ epqh_item = list_entry(item, ifxhcd_epqh_t, epqh_list_entry);
+ IFX_PRINT(" %p\n", epqh_item);
+ }
+ IFX_PRINT(" NP Ready:\n");
+ list_for_each(item, &_ifxhcd->epqh_np_ready)
+ {
+ epqh_item = list_entry(item, ifxhcd_epqh_t, epqh_list_entry);
+ IFX_PRINT(" %p\n", epqh_item);
+ }
+ IFX_PRINT(" INTR Active:\n");
+ list_for_each(item, &_ifxhcd->epqh_intr_active)
+ {
+ epqh_item = list_entry(item, ifxhcd_epqh_t, epqh_list_entry);
+ IFX_PRINT(" %p\n", epqh_item);
+ }
+ IFX_PRINT(" INTR Ready:\n");
+ list_for_each(item, &_ifxhcd->epqh_intr_ready)
+ {
+ epqh_item = list_entry(item, ifxhcd_epqh_t, epqh_list_entry);
+ IFX_PRINT(" %p\n", epqh_item);
+ }
+ #ifdef __EN_ISOC__
+ IFX_PRINT(" ISOC Active:\n");
+ list_for_each(item, &_ifxhcd->epqh_isoc_active)
+ {
+ epqh_item = list_entry(item, ifxhcd_epqh_t, epqh_list_entry);
+ IFX_PRINT(" %p\n", epqh_item);
+ }
+ IFX_PRINT(" ISOC Ready:\n");
+ list_for_each(item, &_ifxhcd->epqh_isoc_ready)
+ {
+ epqh_item = list_entry(item, ifxhcd_epqh_t, epqh_list_entry);
+ IFX_PRINT(" %p\n", epqh_item);
+ }
+ #endif
+ IFX_PRINT(" Standby:\n");
+ list_for_each(item, &_ifxhcd->epqh_stdby)
+ {
+ epqh_item = list_entry(item, ifxhcd_epqh_t, epqh_list_entry);
+ IFX_PRINT(" %p\n", epqh_item);
+ }
+ }
+ }
+#endif //__DEBUG__
+
+
+/*!
+ \brief This function writes a packet into the Tx FIFO associated with the Host
+ Channel. For a channel associated with a non-periodic EP, the non-periodic
+ Tx FIFO is written. For a channel associated with a periodic EP, the
+ periodic Tx FIFO is written. This function should only be called in Slave
+ mode.
+
+ Upon return the xfer_buff and xfer_count fields in _hc are incremented by
+ then number of bytes written to the Tx FIFO.
+ */
+
+#ifdef __ENABLE_DUMP__
+ void ifxhcd_dump_state(ifxhcd_hcd_t *_ifxhcd)
+ {
+ int num_channels;
+ int i;
+ num_channels = _ifxhcd->core_if.params.host_channels;
+ IFX_PRINT("\n");
+ IFX_PRINT("************************************************************\n");
+ IFX_PRINT("HCD State:\n");
+ IFX_PRINT(" Num channels: %d\n", num_channels);
+ for (i = 0; i < num_channels; i++) {
+ ifxhcd_hc_t *hc = &_ifxhcd->ifxhc[i];
+ IFX_PRINT(" Channel %d:\n", hc->hc_num);
+ IFX_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+ hc->dev_addr, hc->ep_num, hc->is_in);
+ IFX_PRINT(" speed: %d\n" , hc->speed);
+ IFX_PRINT(" ep_type: %d\n" , hc->ep_type);
+ IFX_PRINT(" mps: %d\n", hc->mps);
+ IFX_PRINT(" data_pid_start: %d\n" , hc->data_pid_start);
+ IFX_PRINT(" xfer_started: %d\n" , hc->xfer_started);
+ IFX_PRINT(" xfer_buff: %p\n" , hc->xfer_buff);
+ IFX_PRINT(" xfer_len: %d\n" , hc->xfer_len);
+ IFX_PRINT(" xfer_count: %d\n" , hc->xfer_count);
+ IFX_PRINT(" halting: %d\n" , hc->halting);
+ IFX_PRINT(" halt_status: %d\n" , hc->halt_status);
+ IFX_PRINT(" split: %d\n" , hc->split);
+ IFX_PRINT(" hub_addr: %d\n" , hc->hub_addr);
+ IFX_PRINT(" port_addr: %d\n" , hc->port_addr);
+ #ifdef __EN_ISOC__
+ IFX_PRINT(" isoc_xact_pos: %d\n" , hc->isoc_xact_pos);
+ #endif
+ IFX_PRINT(" epqh: %p\n" , hc->epqh);
+ IFX_PRINT(" short_rw: %d\n" , hc->short_rw);
+ IFX_PRINT(" do_ping: %d\n" , hc->do_ping);
+ IFX_PRINT(" control_phase: %d\n" , hc->control_phase);
+ IFX_PRINT(" pkt_count_limit: %d\n", hc->epqh->pkt_count_limit);
+ IFX_PRINT(" start_pkt_count: %d\n" , hc->start_pkt_count);
+ }
+ IFX_PRINT("************************************************************\n");
+ IFX_PRINT("\n");
+ }
+#endif //__ENABLE_DUMP__
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd.h b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd.h
new file mode 100644
index 0000000000..3a408513ce
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd.h
@@ -0,0 +1,628 @@
+/*****************************************************************************
+ ** FILE NAME : ifxhcd.h
+ ** PROJECT : IFX USB sub-system V3
+ ** MODULES : IFX USB sub-system Host and Device driver
+ ** SRC VERSION : 1.0
+ ** DATE : 1/Jan/2009
+ ** AUTHOR : Chen, Howard
+ ** DESCRIPTION : This file contains the structures, constants, and interfaces for
+ ** the Host Contoller Driver (HCD).
+ **
+ ** The Host Controller Driver (HCD) is responsible for translating requests
+ ** from the USB Driver into the appropriate actions on the IFXUSB controller.
+ ** It isolates the USBD from the specifics of the controller by providing an
+ ** API to the USBD.
+ ** FUNCTIONS :
+ ** COMPILER : gcc
+ ** REFERENCE : Synopsys DWC-OTG Driver 2.7
+ ** COPYRIGHT :
+ ** Version Control Section **
+ ** $Author$
+ ** $Date$
+ ** $Revisions$
+ ** $Log$ Revision history
+*****************************************************************************/
+
+/*!
+ \defgroup IFXUSB_HCD HCD Interface
+ \ingroup IFXUSB_DRIVER_V3
+ \brief The Host Controller Driver (HCD) is responsible for translating requests
+ from the USB Driver into the appropriate actions on the IFXUSB controller.
+ It isolates the USBD from the specifics of the controller by providing an
+ API to the USBD.
+ */
+
+
+/*!
+ \file ifxhcd.h
+ \ingroup IFXUSB_DRIVER_V3
+ \brief This file contains the structures, constants, and interfaces for
+ the Host Contoller Driver (HCD).
+ */
+
+#if !defined(__IFXHCD_H__)
+#define __IFXHCD_H__
+
+#include <linux/list.h>
+#include <linux/usb.h>
+
+#ifdef __USE_TIMER_4_SOF__
+#include <linux/hrtimer.h>
+#endif
+#include <linux/usb/hcd.h>
+
+#include "ifxusb_cif.h"
+#include "ifxusb_plat.h"
+
+
+
+/*!
+ \addtogroup IFXUSB_HCD
+ */
+/*@{*/
+
+/* Phases for control transfers.*/
+typedef enum ifxhcd_control_phase {
+ IFXHCD_CONTROL_SETUP,
+ IFXHCD_CONTROL_DATA,
+ IFXHCD_CONTROL_STATUS
+} ifxhcd_control_phase_e;
+
+/* Reasons for halting a host channel. */
+typedef enum ifxhcd_halt_status
+{
+ HC_XFER_NO_HALT_STATUS, // Initial
+ HC_XFER_COMPLETE, // Xact complete without error, upward
+ HC_XFER_URB_COMPLETE, // Xfer complete without error, short upward
+ HC_XFER_STALL, // HC stopped abnormally, upward/downward
+ HC_XFER_XACT_ERR, // HC stopped abnormally, upward
+ HC_XFER_FRAME_OVERRUN, // HC stopped abnormally, upward
+ HC_XFER_BABBLE_ERR, // HC stopped abnormally, upward
+ HC_XFER_AHB_ERR, // HC stopped abnormally, upward
+ HC_XFER_DATA_TOGGLE_ERR,
+ HC_XFER_URB_DEQUEUE, // HC stopper manually, downward
+ HC_XFER_NAK // HC stopped by nak monitor, downward
+} ifxhcd_halt_status_e;
+
+struct ifxhcd_urbd;
+struct ifxhcd_hc ;
+struct ifxhcd_epqh ;
+struct ifxhcd_hcd;
+
+/*!
+ \brief A URB Descriptor (URBD) holds the state of a bulk, control,
+ interrupt, or isochronous transfer. A single URBD is created for each URB
+ (of one of these types) submitted to the HCD. The transfer associated with
+ a URBD may require one or multiple transactions.
+
+ A URBD is linked to a EP Queue Head, which is entered in either the
+ isoc, intr or non-periodic schedule for execution. When a URBD is chosen for
+ execution, some or all of its transactions may be executed. After
+ execution, the state of the URBD is updated. The URBD may be retired if all
+ its transactions are complete or if an error occurred. Otherwise, it
+ remains in the schedule so more transactions can be executed later.
+ */
+typedef struct ifxhcd_urbd {
+ struct list_head urbd_list_entry; // Hook for EPQH->urbd_list and ifxhcd->urbd_complete_list
+ struct urb *urb; /*!< URB for this transfer */
+ //struct urb {
+ // struct list_head urb_list;
+ // struct list_head anchor_list;
+ // struct usb_anchor * anchor;
+ // struct usb_device * dev;
+ // struct usb_host_endpoint * ep;
+ // unsigned int pipe;
+ // int status;
+ // unsigned int transfer_flags;
+ // void * transfer_buffer;
+ // dma_addr_t transfer_dma;
+ // u32 transfer_buffer_length;
+ // u32 actual_length;
+ // unsigned char * setup_packet;
+ // dma_addr_t setup_dma;
+ // int start_frame;
+ // int number_of_packets;
+ // int interval;
+ // int error_count;
+ // void * context;
+ // usb_complete_t complete;
+ // struct usb_iso_packet_descriptor iso_frame_desc[0];
+ //};
+ //urb_list For use by current owner of the URB.
+ //anchor_list membership in the list of an anchor
+ //anchor to anchor URBs to a common mooring
+ //dev Identifies the USB device to perform the request.
+ //ep Points to the endpoint's data structure. Will
+ // eventually replace pipe.
+ //pipe Holds endpoint number, direction, type, and more.
+ // Create these values with the eight macros available; u
+ // sb_{snd,rcv}TYPEpipe(dev,endpoint), where the TYPE is
+ // "ctrl", "bulk", "int" or "iso". For example
+ // usb_sndbulkpipe or usb_rcvintpipe. Endpoint numbers
+ // range from zero to fifteen. Note that "in" endpoint two
+ // is a different endpoint (and pipe) from "out" endpoint
+ // two. The current configuration controls the existence,
+ // type, and maximum packet size of any given endpoint.
+ //status This is read in non-iso completion functions to get
+ // the status of the particular request. ISO requests
+ // only use it to tell whether the URB was unlinked;
+ // detailed status for each frame is in the fields of
+ // the iso_frame-desc.
+ //transfer_flags A variety of flags may be used to affect how URB
+ // submission, unlinking, or operation are handled.
+ // Different kinds of URB can use different flags.
+ // URB_SHORT_NOT_OK
+ // URB_ISO_ASAP
+ // URB_NO_TRANSFER_DMA_MAP
+ // URB_NO_SETUP_DMA_MAP
+ // URB_NO_FSBR
+ // URB_ZERO_PACKET
+ // URB_NO_INTERRUPT
+ //transfer_buffer This identifies the buffer to (or from) which the I/O
+ // request will be performed (unless URB_NO_TRANSFER_DMA_MAP
+ // is set). This buffer must be suitable for DMA; allocate it
+ // with kmalloc or equivalent. For transfers to "in"
+ // endpoints, contents of this buffer will be modified. This
+ // buffer is used for the data stage of control transfers.
+ //transfer_dma When transfer_flags includes URB_NO_TRANSFER_DMA_MAP, the
+ // device driver is saying that it provided this DMA address,
+ // which the host controller driver should use in preference
+ // to the transfer_buffer.
+ //transfer_buffer_length How big is transfer_buffer. The transfer may be broken
+ // up into chunks according to the current maximum packet size
+ // for the endpoint, which is a function of the configuration
+ // and is encoded in the pipe. When the length is zero, neither
+ // transfer_buffer nor transfer_dma is used.
+ //actual_length This is read in non-iso completion functions, and it tells
+ // how many bytes (out of transfer_buffer_length) were transferred.
+ // It will normally be the same as requested, unless either an error
+ // was reported or a short read was performed. The URB_SHORT_NOT_OK
+ // transfer flag may be used to make such short reads be reported
+ // as errors.
+ //setup_packet Only used for control transfers, this points to eight bytes of
+ // setup data. Control transfers always start by sending this data
+ // to the device. Then transfer_buffer is read or written, if needed.
+ //setup_dma For control transfers with URB_NO_SETUP_DMA_MAP set, the device
+ // driver has provided this DMA address for the setup packet. The
+ // host controller driver should use this in preference to setup_packet.
+ //start_frame Returns the initial frame for isochronous transfers.
+ //number_of_packets Lists the number of ISO transfer buffers.
+ //interval Specifies the polling interval for interrupt or isochronous transfers.
+ // The units are frames (milliseconds) for for full and low speed devices,
+ // and microframes (1/8 millisecond) for highspeed ones.
+ //error_count Returns the number of ISO transfers that reported errors.
+ //context For use in completion functions. This normally points to request-specific
+ // driver context.
+ //complete Completion handler. This URB is passed as the parameter to the completion
+ // function. The completion function may then do what it likes with the URB,
+ // including resubmitting or freeing it.
+ //iso_frame_desc[0] Used to provide arrays of ISO transfer buffers and to collect the transfer
+ // status for each buffer.
+
+ struct ifxhcd_epqh *epqh;
+ // Actual data portion, not SETUP or STATUS in case of CTRL XFER
+ // DMA adjusted
+ uint8_t *setup_buff; /*!< Pointer to the entire transfer buffer. (CPU accessable)*/
+ uint8_t *xfer_buff; /*!< Pointer to the entire transfer buffer. (CPU accessable)*/
+ uint32_t xfer_len; /*!< Total number of bytes to transfer in this xfer. */
+ unsigned is_in :1;
+ unsigned is_active:1;
+
+ // For ALL XFER
+ uint8_t error_count; /*!< Holds the number of bus errors that have occurred for a transaction
+ within this transfer.
+ */
+ /*== AVM/BC 20101111 Needed for URB Complete List ==*/
+ int status;
+ // For ISOC XFER only
+ #ifdef __EN_ISOC__
+ int isoc_frame_index; /*!< Index of the next frame descriptor for an isochronous transfer. A
+ frame descriptor describes the buffer position and length of the
+ data to be transferred in the next scheduled (micro)frame of an
+ isochronous transfer. It also holds status for that transaction.
+ The frame index starts at 0.
+ */
+ // For SPLITed ISOC XFER only
+ uint8_t isoc_split_pos; /*!< Position of the ISOC split on full/low speed */
+ uint16_t isoc_split_offset;/*!< Position of the ISOC split in the buffer for the current frame */
+ #endif
+} ifxhcd_urbd_t;
+
+/*!
+ \brief A EP Queue Head (EPQH) holds the static characteristics of an endpoint and
+ maintains a list of transfers (URBDs) for that endpoint. A EPQH structure may
+ be entered in either the isoc, intr or non-periodic schedule.
+ */
+
+typedef struct ifxhcd_epqh {
+ struct list_head epqh_list_entry; // Hook for EP Queues
+ struct list_head urbd_list; /*!< List of URBDs for this EPQH. */
+ struct ifxhcd_hc *hc; /*!< Host channel currently processing transfers for this EPQH. */
+ struct ifxhcd_urbd *urbd; /*!< URBD currently assigned to a host channel for this EPQH. */
+ struct usb_host_endpoint *sysep;
+ uint8_t ep_type; /*!< Endpoint type. One of the following values:
+ - IFXUSB_EP_TYPE_CTRL
+ - IFXUSB_EP_TYPE_ISOC
+ - IFXUSB_EP_TYPE_BULK
+ - IFXUSB_EP_TYPE_INTR
+ */
+ uint16_t mps; /*!< wMaxPacketSize Field of Endpoint Descriptor. */
+
+ /* == AVM/WK 20100710 Fix - Use toggle of usbcore ==*/
+ /*uint8_t data_toggle;*/ /*!< Determines the PID of the next data packet
+ One of the following values:
+ - IFXHCD_HC_PID_DATA0
+ - IFXHCD_HC_PID_DATA1
+ */
+ uint8_t is_active;
+
+ uint8_t pkt_count_limit;
+ #ifdef __EPQD_DESTROY_TIMEOUT__
+ struct timer_list destroy_timer;
+ #endif
+
+ uint16_t wait_for_sof;
+ uint8_t need_split; /*!< Full/low speed endpoint on high-speed hub requires split. */
+ uint16_t interval; /*!< Interval between transfers in (micro)frames. (for INTR)*/
+
+ uint16_t period_counter; /*!< Interval between transfers in (micro)frames. */
+ uint8_t period_do;
+
+ uint8_t aligned_checked;
+
+ #if defined(__UNALIGNED_BUFFER_ADJ__)
+ uint8_t using_aligned_setup;
+ uint8_t *aligned_setup;
+ uint8_t using_aligned_buf;
+ uint8_t *aligned_buf;
+ unsigned aligned_buf_len : 19;
+ #endif
+
+ uint8_t *dump_buf;
+} ifxhcd_epqh_t;
+
+
+#if defined(__HC_XFER_TIMEOUT__)
+ struct ifxusb_core_if;
+ struct ifxhcd_hc;
+ typedef struct hc_xfer_info
+ {
+ struct ifxusb_core_if *core_if;
+ struct ifxhcd_hc *hc;
+ } hc_xfer_info_t;
+#endif //defined(__HC_XFER_TIMEOUT__)
+
+
+/*!
+ \brief Host channel descriptor. This structure represents the state of a single
+ host channel when acting in host mode. It contains the data items needed to
+ transfer packets to an endpoint via a host channel.
+ */
+typedef struct ifxhcd_hc
+{
+ struct list_head hc_list_entry ; // Hook to free hc
+ struct ifxhcd_epqh *epqh ; /*!< EP Queue Head for the transfer being processed by this channel. */
+
+ uint8_t hc_num ; /*!< Host channel number used for register address lookup */
+ uint8_t *xfer_buff ; /*!< Pointer to the entire transfer buffer. */
+ uint32_t xfer_count ; /*!< Number of bytes transferred so far. The offset of the begin of the buf */
+ uint32_t xfer_len ; /*!< Total number of bytes to transfer in this xfer. */
+ uint16_t start_pkt_count ; /*!< Packet count at start of transfer. Used to calculate the actual xfer size*/
+ ifxhcd_halt_status_e halt_status; /*!< Reason for halting the host channel. */
+
+ unsigned dev_addr : 7; /*!< Device to access */
+ unsigned ep_num : 4; /*!< EP to access */
+ unsigned is_in : 1; /*!< EP direction. 0: OUT, 1: IN */
+ unsigned speed : 2; /*!< EP speed. */
+ unsigned ep_type : 2; /*!< Endpoint type. */
+ unsigned mps :11; /*!< Max packet size in bytes */
+ unsigned data_pid_start : 2; /*!< PID for initial transaction. */
+ unsigned do_ping : 1; /*!< Set to 1 to indicate that a PING request should be issued on this
+ channel. If 0, process normally.
+ */
+
+ unsigned xfer_started : 1; /*!< Flag to indicate whether the transfer has been started. Set to 1 if
+ it has been started, 0 otherwise.
+ */
+ unsigned halting : 1; /*!< Set to 1 if the host channel has been halted, but the core is not
+ finished flushing queued requests. Otherwise 0.
+ */
+ unsigned short_rw : 1; /*!< When Tx, means termination needed.
+ When Rx, indicate Short Read */
+ /* Split settings for the host channel */
+ unsigned split : 2; /*!< Split: 0-Non Split, 1-SSPLIT, 2&3 CSPLIT */
+
+ /*== AVM/BC 20100701 - Workaround FullSpeed Interrupts with HiSpeed Hub ==*/
+ unsigned nyet_count;
+
+ /* nak monitor */
+ unsigned nak_retry_r : 16;
+ unsigned nak_retry : 16;
+ #define nak_retry_max 40000
+ unsigned nak_countdown : 8;
+ unsigned nak_countdown_r: 8;
+ #define nak_countdown_max 1
+
+ uint16_t wait_for_sof;
+ ifxhcd_control_phase_e control_phase; /*!< Current phase for control transfers (Setup, Data, or Status). */
+ uint32_t ssplit_out_xfer_count; /*!< How many bytes transferred during SSPLIT OUT */
+ #ifdef __DEBUG__
+ uint32_t start_hcchar_val;
+ #endif
+ #ifdef __HC_XFER_TIMEOUT__
+ hc_xfer_info_t hc_xfer_info;
+ struct timer_list hc_xfer_timer;
+ #endif
+ uint32_t hcchar;
+
+ /* Split settings for the host channel */
+ uint8_t hub_addr; /*!< Address of high speed hub */
+ uint8_t port_addr; /*!< Port of the low/full speed device */
+ #ifdef __EN_ISOC__
+ uint8_t isoc_xact_pos; /*!< Split transaction position */
+ #endif
+} ifxhcd_hc_t;
+
+
+/*!
+ \brief This structure holds the state of the HCD, including the non-periodic and
+ periodic schedules.
+ */
+typedef struct ifxhcd_hcd
+{
+ struct device *dev;
+ struct hc_driver hc_driver;
+ ifxusb_core_if_t core_if; /*!< Pointer to the core interface structure. */
+ struct usb_hcd *syshcd;
+
+ volatile union ifxhcd_internal_flags
+ {
+ uint32_t d32;
+ struct
+ {
+ unsigned port_connect_status_change : 1;
+ unsigned port_connect_status : 1;
+ unsigned port_reset_change : 1;
+ unsigned port_enable_change : 1;
+ unsigned port_suspend_change : 1;
+ unsigned port_over_current_change : 1;
+ unsigned reserved : 27;
+ } b;
+ } flags; /*!< Internal HCD Flags */
+
+ struct ifxhcd_hc ifxhc[MAX_EPS_CHANNELS]; /*!< Array of pointers to the host channel descriptors. Allows accessing
+ a host channel descriptor given the host channel number. This is
+ useful in interrupt handlers.
+ */
+ struct list_head free_hc_list; /*!< Free host channels in the controller. This is a list of ifxhcd_hc_t items. */
+ uint8_t *status_buf; /*!< Buffer to use for any data received during the status phase of a
+ control transfer. Normally no data is transferred during the status
+ phase. This buffer is used as a bit bucket.
+ */
+ #define IFXHCD_STATUS_BUF_SIZE 64
+
+ struct list_head epqh_np_active; // with URBD, with HC
+ struct list_head epqh_np_ready; // with URBD, No HC
+
+ struct list_head epqh_intr_active; // with URBD, with HC
+ struct list_head epqh_intr_ready; // with URBD, no pass, No HC
+
+ #ifdef __EN_ISOC__
+ struct list_head epqh_isoc_active; // with URBD, with HC
+ struct list_head epqh_isoc_ready; // with URBD, no pass, No HC
+ #endif
+
+ /*== AVM/BC 20101111 URB Complete List ==*/
+ struct list_head urbd_complete_list;
+
+ struct list_head epqh_stdby;
+
+ /* AVM/BC 20101111 flags removed */
+ //unsigned process_channels_in_use : 1;
+ //unsigned select_eps_in_use : 1;
+
+ struct tasklet_struct select_eps; /*!< Tasket to do a reset */
+ uint32_t lastframe;
+ spinlock_t lock;
+#ifdef __USE_TIMER_4_SOF__
+ struct hrtimer hr_timer;
+#endif
+} ifxhcd_hcd_t;
+
+/* Gets the ifxhcd_hcd from a struct usb_hcd */
+static inline ifxhcd_hcd_t *syshcd_to_ifxhcd(struct usb_hcd *syshcd)
+{
+ return (ifxhcd_hcd_t *)(syshcd->hcd_priv[0]);
+}
+
+/* Gets the struct usb_hcd that contains a ifxhcd_hcd_t. */
+static inline struct usb_hcd *ifxhcd_to_syshcd(ifxhcd_hcd_t *ifxhcd)
+{
+ return (struct usb_hcd *)(ifxhcd->syshcd);
+}
+
+/*! \brief HCD Create/Destroy Functions */
+/*@{*/
+ extern int ifxhcd_init (ifxhcd_hcd_t *_ifxhcd);
+ extern void ifxhcd_remove(ifxhcd_hcd_t *_ifxhcd);
+/*@}*/
+
+/*! \brief Linux HC Driver API Functions */
+/*@{*/
+extern int ifxhcd_start(struct usb_hcd *hcd);
+extern void ifxhcd_stop (struct usb_hcd *hcd);
+extern int ifxhcd_get_frame_number(struct usb_hcd *hcd);
+
+
+/*!
+ \brief This function does the setup for a data transfer for a host channel and
+ starts the transfer. May be called in either Slave mode or DMA mode. In
+ Slave mode, the caller must ensure that there is sufficient space in the
+ request queue and Tx Data FIFO.
+
+ For an OUT transfer in Slave mode, it loads a data packet into the
+ appropriate FIFO. If necessary, additional data packets will be loaded in
+ the Host ISR.
+
+ For an IN transfer in Slave mode, a data packet is requested. The data
+ packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
+ additional data packets are requested in the Host ISR.
+
+ For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
+ register along with a packet count of 1 and the channel is enabled. This
+ causes a single PING transaction to occur. Other fields in HCTSIZ are
+ simply set to 0 since no data transfer occurs in this case.
+
+ For a PING transfer in DMA mode, the HCTSIZ register is initialized with
+ all the information required to perform the subsequent data transfer. In
+ addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
+ controller performs the entire PING protocol, then starts the data
+ transfer.
+
+ @param _ifxhc Information needed to initialize the host channel. The xfer_len
+ value may be reduced to accommodate the max widths of the XferSize and
+ PktCnt fields in the HCTSIZn register. The multi_count value may be changed
+ to reflect the final xfer_len value.
+ */
+extern void ifxhcd_hc_start(ifxusb_core_if_t *_core_if, ifxhcd_hc_t *_ifxhc);
+
+//extern int ifxhcd_urb_enqueue(struct usb_hcd *_syshcd, struct usb_host_endpoint *_sysep, struct urb *_urb, gfp_t mem_flags);
+//extern int ifxhcd_urb_dequeue(struct usb_hcd *_syshcd, struct urb *_urb);
+extern irqreturn_t ifxhcd_irq(struct usb_hcd *_syshcd);
+int ifxhcd_urb_enqueue( struct usb_hcd *_syshcd,
+ /*--- struct usb_host_endpoint *_sysep, Parameter im 2.6.28 entfallen ---*/
+ struct urb *_urb,
+ gfp_t _mem_flags);
+int ifxhcd_urb_dequeue( struct usb_hcd *_syshcd,
+ struct urb *_urb, int status /* Parameter neu in 2.6.28 */);
+
+extern void ifxhcd_endpoint_disable(struct usb_hcd *_syshcd, struct usb_host_endpoint *_sysep);
+
+extern int ifxhcd_hub_status_data(struct usb_hcd *_syshcd, char *_buf);
+extern int ifxhcd_hub_control( struct usb_hcd *_syshcd,
+ u16 _typeReq,
+ u16 _wValue,
+ u16 _wIndex,
+ char *_buf,
+ u16 _wLength);
+
+/*@}*/
+
+/*! \brief Transaction Execution Functions */
+/*@{*/
+extern void ifxhcd_complete_urb (ifxhcd_hcd_t *_ifxhcd, ifxhcd_urbd_t *_urbd, int _status);
+
+/*@}*/
+
+/*! \brief Deferred Transaction Execution Functions */
+/*@{*/
+
+/*== AVM/BC 20101111 URB Complete List ==*/
+extern void defer_ifxhcd_complete_urb (ifxhcd_hcd_t *_ifxhcd, ifxhcd_urbd_t *_urbd, int _status);
+
+/*!
+ \brief Clears the transfer state for a host channel. This function is normally
+ called after a transfer is done and the host channel is being released.
+ */
+extern void ifxhcd_hc_cleanup(ifxusb_core_if_t *_core_if, ifxhcd_hc_t *_ifxhc);
+
+/*!
+ \brief Attempts to halt a host channel. This function should only be called in
+ Slave mode or to abort a transfer in either Slave mode or DMA mode. Under
+ normal circumstances in DMA mode, the controller halts the channel when the
+ transfer is complete or a condition occurs that requires application
+ intervention.
+
+ In slave mode, checks for a free request queue entry, then sets the Channel
+ Enable and Channel Disable bits of the Host Channel Characteristics
+ register of the specified channel to intiate the halt. If there is no free
+ request queue entry, sets only the Channel Disable bit of the HCCHARn
+ register to flush requests for this channel. In the latter case, sets a
+ flag to indicate that the host channel needs to be halted when a request
+ queue slot is open.
+
+ In DMA mode, always sets the Channel Enable and Channel Disable bits of the
+ HCCHARn register. The controller ensures there is space in the request
+ queue before submitting the halt request.
+
+ Some time may elapse before the core flushes any posted requests for this
+ host channel and halts. The Channel Halted interrupt handler completes the
+ deactivation of the host channel.
+ */
+extern void ifxhcd_hc_halt(ifxusb_core_if_t *_core_if,
+ ifxhcd_hc_t *_ifxhc,
+ ifxhcd_halt_status_e _halt_status);
+
+/*!
+ \brief Prepares a host channel for transferring packets to/from a specific
+ endpoint. The HCCHARn register is set up with the characteristics specified
+ in _ifxhc. Host channel interrupts that may need to be serviced while this
+ transfer is in progress are enabled.
+ */
+extern void ifxhcd_hc_init(ifxusb_core_if_t *_core_if, ifxhcd_hc_t *_ifxhc);
+
+/*!
+ \brief This function is called to handle the disconnection of host port.
+ */
+int32_t ifxhcd_disconnect(ifxhcd_hcd_t *_ifxhcd);
+/*@}*/
+
+/*! \brief Interrupt Handler Functions */
+/*@{*/
+extern irqreturn_t ifxhcd_oc_irq(int _irq, void *_dev);
+
+extern int32_t ifxhcd_handle_oc_intr(ifxhcd_hcd_t *_ifxhcd);
+extern int32_t ifxhcd_handle_intr (ifxhcd_hcd_t *_ifxhcd);
+/*@}*/
+
+
+/*! \brief Schedule Queue Functions */
+/*@{*/
+extern ifxhcd_epqh_t *ifxhcd_epqh_create (ifxhcd_hcd_t *_ifxhcd, struct urb *_urb);
+extern void ifxhcd_epqh_free ( ifxhcd_epqh_t *_epqh);
+extern void select_eps (ifxhcd_hcd_t *_ifxhcd);
+extern void process_channels(ifxhcd_hcd_t *_ifxhcd);
+extern void process_channels_sub(ifxhcd_hcd_t *_ifxhcd);
+extern void complete_channel(ifxhcd_hcd_t *_ifxhcd, ifxhcd_hc_t *_ifxhc, ifxhcd_urbd_t *_urbd);
+extern void ifxhcd_epqh_ready(ifxhcd_hcd_t *_ifxhcd, ifxhcd_epqh_t *_epqh);
+extern void ifxhcd_epqh_active(ifxhcd_hcd_t *_ifxhcd, ifxhcd_epqh_t *_epqh);
+extern void ifxhcd_epqh_idle(ifxhcd_hcd_t *_ifxhcd, ifxhcd_epqh_t *_epqh);
+extern void ifxhcd_epqh_idle_periodic(ifxhcd_epqh_t *_epqh);
+extern int ifxhcd_urbd_create (ifxhcd_hcd_t *_ifxhcd,struct urb *_urb);
+/*@}*/
+
+/*! \brief Gets the usb_host_endpoint associated with an URB. */
+static inline struct usb_host_endpoint *ifxhcd_urb_to_endpoint(struct urb *_urb)
+{
+ struct usb_device *dev = _urb->dev;
+ int ep_num = usb_pipeendpoint(_urb->pipe);
+
+ return (usb_pipein(_urb->pipe))?(dev->ep_in[ep_num]):(dev->ep_out[ep_num]);
+}
+
+/*!
+ * \brief Gets the endpoint number from a _bEndpointAddress argument. The endpoint is
+ * qualified with its direction (possible 32 endpoints per device).
+ */
+#define ifxhcd_ep_addr_to_endpoint(_bEndpointAddress_) ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \
+ ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4)
+
+
+/* AVM/WK: not needed?
+
+extern struct usb_device *usb_alloc_dev (struct usb_device *parent, struct usb_bus *, unsigned port);
+extern int usb_add_hcd (struct usb_hcd *syshcd, unsigned int irqnum, unsigned long irqflags);
+extern void usb_remove_hcd (struct usb_hcd *syshcd);
+extern struct usb_hcd *usb_create_hcd (const struct hc_driver *driver, struct device *dev, char *bus_name);
+extern void usb_hcd_giveback_urb (struct usb_hcd *syshcd, struct urb *urb);
+extern void usb_put_hcd (struct usb_hcd *syshcd);
+extern long usb_calc_bus_time (int speed, int is_input, int isoc, int bytecount);
+
+*/
+/** Internal Functions */
+void ifxhcd_dump_state(ifxhcd_hcd_t *_ifxhcd);
+extern char *syserr(int errno);
+
+/*@}*//*IFXUSB_HCD*/
+
+#endif // __IFXHCD_H__
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd_es.c b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd_es.c
new file mode 100644
index 0000000000..ef9e8c05ef
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd_es.c
@@ -0,0 +1,549 @@
+/*****************************************************************************
+ ** FILE NAME : ifxhcd_es.c
+ ** PROJECT : IFX USB sub-system V3
+ ** MODULES : IFX USB sub-system Host and Device driver
+ ** SRC VERSION : 1.0
+ ** DATE : 1/Jan/2009
+ ** AUTHOR : Chen, Howard
+ ** DESCRIPTION : The file contain function to enable host mode USB-IF Electrical Test function.
+ *****************************************************************************/
+
+/*!
+ \file ifxhcd_es.c
+ \ingroup IFXUSB_DRIVER_V3
+ \brief The file contain function to enable host mode USB-IF Electrical Test function.
+*/
+
+#include <linux/version.h>
+#include "ifxusb_version.h"
+
+#include <linux/kernel.h>
+
+#include <linux/errno.h>
+
+#include <linux/dma-mapping.h>
+
+#include "ifxusb_plat.h"
+#include "ifxusb_regs.h"
+#include "ifxusb_cif.h"
+#include "ifxhcd.h"
+
+
+#ifdef __WITH_HS_ELECT_TST__
+ /*
+ * Quick and dirty hack to implement the HS Electrical Test
+ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature.
+ *
+ * This code was copied from our userspace app "hset". It sends a
+ * Get Device Descriptor control sequence in two parts, first the
+ * Setup packet by itself, followed some time later by the In and
+ * Ack packets. Rather than trying to figure out how to add this
+ * functionality to the normal driver code, we just hijack the
+ * hardware, using these two function to drive the hardware
+ * directly.
+ */
+
+
+ void do_setup(ifxusb_core_if_t *_core_if)
+ {
+
+ ifxusb_core_global_regs_t *global_regs = _core_if->core_global_regs;
+ ifxusb_host_global_regs_t *hc_global_regs = _core_if->host_global_regs;
+ ifxusb_hc_regs_t *hc_regs = _core_if->hc_regs[0];
+ uint32_t *data_fifo = _core_if->data_fifo[0];
+
+ gint_data_t gintsts;
+ hctsiz_data_t hctsiz;
+ hcchar_data_t hcchar;
+ haint_data_t haint;
+ hcint_data_t hcint;
+
+
+ /* Enable HAINTs */
+ ifxusb_wreg(&hc_global_regs->haintmsk, 0x0001);
+
+ /* Enable HCINTs */
+ ifxusb_wreg(&hc_regs->hcintmsk, 0x04a3);
+
+ /* Read GINTSTS */
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = ifxusb_rreg(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = ifxusb_rreg(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ ifxusb_wreg(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ ifxusb_wreg(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ ifxusb_wreg(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /*
+ * Send Setup packet (Get Device Descriptor)
+ */
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ //fprintf(stderr, "Channel already enabled 1, HCCHAR = %08x\n", hcchar.d32);
+ hcchar.b.chdis = 1;
+ // hcchar.b.chen = 1;
+ ifxusb_wreg(&hc_regs->hcchar, hcchar.d32);
+ //sleep(1);
+ mdelay(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = ifxusb_rreg(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = ifxusb_rreg(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ ifxusb_wreg(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ ifxusb_wreg(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ ifxusb_wreg(&global_regs->gintsts, gintsts.d32);
+
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ enabled 1, HCCHAR = %08x **\n", hcchar.d32);
+ //}
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 8;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = IFXUSB_HC_PID_SETUP;
+ ifxusb_wreg(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ hcchar.b.eptype = IFXUSB_EP_TYPE_CTRL;
+ hcchar.b.epdir = 0;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ ifxusb_wreg(&hc_regs->hcchar, hcchar.d32);
+
+ /* Fill FIFO with Setup data for Get Device Descriptor */
+ ifxusb_wreg(data_fifo++, 0x01000680);
+ ifxusb_wreg(data_fifo++, 0x00080000);
+
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Disable HCINTs */
+ ifxusb_wreg(&hc_regs->hcintmsk, 0x0000);
+
+ /* Disable HAINTs */
+ ifxusb_wreg(&hc_global_regs->haintmsk, 0x0000);
+
+ /* Read HAINT */
+ haint.d32 = ifxusb_rreg(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = ifxusb_rreg(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ ifxusb_wreg(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ ifxusb_wreg(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ ifxusb_wreg(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+ }
+
+ void do_in_ack(ifxusb_core_if_t *_core_if)
+ {
+
+ ifxusb_core_global_regs_t *global_regs = _core_if->core_global_regs;
+ ifxusb_host_global_regs_t *hc_global_regs = _core_if->host_global_regs;
+ ifxusb_hc_regs_t *hc_regs = _core_if->hc_regs[0];
+ uint32_t *data_fifo = _core_if->data_fifo[0];
+
+ gint_data_t gintsts;
+ hctsiz_data_t hctsiz;
+ hcchar_data_t hcchar;
+ haint_data_t haint;
+ hcint_data_t hcint;
+ grxsts_data_t grxsts;
+
+ /* Enable HAINTs */
+ ifxusb_wreg(&hc_global_regs->haintmsk, 0x0001);
+
+ /* Enable HCINTs */
+ ifxusb_wreg(&hc_regs->hcintmsk, 0x04a3);
+
+ /* Read GINTSTS */
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = ifxusb_rreg(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = ifxusb_rreg(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ ifxusb_wreg(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ ifxusb_wreg(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ ifxusb_wreg(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /*
+ * Receive Control In packet
+ */
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ //fprintf(stderr, "Channel already enabled 2, HCCHAR = %08x\n", hcchar.d32);
+ hcchar.b.chdis = 1;
+ hcchar.b.chen = 1;
+ ifxusb_wreg(&hc_regs->hcchar, hcchar.d32);
+ //sleep(1);
+ mdelay(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = ifxusb_rreg(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = ifxusb_rreg(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ ifxusb_wreg(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ ifxusb_wreg(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ ifxusb_wreg(&global_regs->gintsts, gintsts.d32);
+
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ enabled 2, HCCHAR = %08x **\n", hcchar.d32);
+ //}
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 8;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = IFXUSB_HC_PID_DATA1;
+ ifxusb_wreg(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ hcchar.b.eptype = IFXUSB_EP_TYPE_CTRL;
+ hcchar.b.epdir = 1;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ ifxusb_wreg(&hc_regs->hcchar, hcchar.d32);
+
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for receive status queue interrupt */
+ do {
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ } while (gintsts.b.rxstsqlvl == 0);
+
+ //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Read RXSTS */
+ grxsts.d32 = ifxusb_rreg(&global_regs->grxstsp);
+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
+
+ /* Clear RXSTSQLVL in GINTSTS */
+ gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1;
+ ifxusb_wreg(&global_regs->gintsts, gintsts.d32);
+
+ switch (grxsts.hb.pktsts) {
+ case IFXUSB_HSTS_DATA_UPDT:
+ /* Read the data into the host buffer */
+ if (grxsts.hb.bcnt > 0) {
+ int i;
+ int word_count = (grxsts.hb.bcnt + 3) / 4;
+
+ for (i = 0; i < word_count; i++) {
+ (void)ifxusb_rreg(data_fifo++);
+ }
+ }
+
+ //fprintf(stderr, "Received %u bytes\n", (unsigned)grxsts.hb.bcnt);
+ break;
+
+ default:
+ //fprintf(stderr, "** Unexpected GRXSTS packet status 1 **\n");
+ break;
+ }
+
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for receive status queue interrupt */
+ do {
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ } while (gintsts.b.rxstsqlvl == 0);
+
+ //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Read RXSTS */
+ grxsts.d32 = ifxusb_rreg(&global_regs->grxstsp);
+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
+
+ /* Clear RXSTSQLVL in GINTSTS */
+ gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1;
+ ifxusb_wreg(&global_regs->gintsts, gintsts.d32);
+
+ switch (grxsts.hb.pktsts) {
+ case IFXUSB_HSTS_XFER_COMP:
+ break;
+
+ default:
+ //fprintf(stderr, "** Unexpected GRXSTS packet status 2 **\n");
+ break;
+ }
+
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = ifxusb_rreg(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = ifxusb_rreg(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ ifxusb_wreg(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ ifxusb_wreg(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ ifxusb_wreg(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ // usleep(100000);
+ // mdelay(100);
+ mdelay(1);
+
+ /*
+ * Send handshake packet
+ */
+
+ /* Read HAINT */
+ haint.d32 = ifxusb_rreg(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = ifxusb_rreg(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ ifxusb_wreg(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ ifxusb_wreg(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ ifxusb_wreg(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ //fprintf(stderr, "Channel already enabled 3, HCCHAR = %08x\n", hcchar.d32);
+ hcchar.b.chdis = 1;
+ hcchar.b.chen = 1;
+ ifxusb_wreg(&hc_regs->hcchar, hcchar.d32);
+ //sleep(1);
+ mdelay(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = ifxusb_rreg(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = ifxusb_rreg(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ ifxusb_wreg(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ ifxusb_wreg(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ ifxusb_wreg(&global_regs->gintsts, gintsts.d32);
+
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ enabled 3, HCCHAR = %08x **\n", hcchar.d32);
+ //}
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 0;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = IFXUSB_HC_PID_DATA1;
+ ifxusb_wreg(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ hcchar.b.eptype = IFXUSB_EP_TYPE_CTRL;
+ hcchar.b.epdir = 0;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ ifxusb_wreg(&hc_regs->hcchar, hcchar.d32);
+
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Disable HCINTs */
+ ifxusb_wreg(&hc_regs->hcintmsk, 0x0000);
+
+ /* Disable HAINTs */
+ ifxusb_wreg(&hc_global_regs->haintmsk, 0x0000);
+
+ /* Read HAINT */
+ haint.d32 = ifxusb_rreg(&hc_global_regs->haint);
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = ifxusb_rreg(&hc_regs->hcint);
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = ifxusb_rreg(&hc_regs->hcchar);
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ ifxusb_wreg(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ ifxusb_wreg(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ ifxusb_wreg(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = ifxusb_rreg(&global_regs->gintsts);
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+ }
+#endif //__WITH_HS_ELECT_TST__
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd_intr.c b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd_intr.c
new file mode 100644
index 0000000000..76fe602c76
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd_intr.c
@@ -0,0 +1,3742 @@
+/*****************************************************************************
+ ** FILE NAME : ifxhcd_intr.c
+ ** PROJECT : IFX USB sub-system V3
+ ** MODULES : IFX USB sub-system Host and Device driver
+ ** SRC VERSION : 1.0
+ ** DATE : 1/Jan/2009
+ ** AUTHOR : Chen, Howard
+ ** DESCRIPTION : This file contains the implementation of the HCD Interrupt handlers.
+ *****************************************************************************/
+
+/*!
+ \file ifxhcd_intr.c
+ \ingroup IFXUSB_DRIVER_V3
+ \brief This file contains the implementation of the HCD Interrupt handlers.
+*/
+
+
+#include <linux/version.h>
+#include "ifxusb_version.h"
+
+#include "ifxusb_plat.h"
+#include "ifxusb_regs.h"
+#include "ifxusb_cif.h"
+
+#include "ifxhcd.h"
+
+/* AVM/WK 20100520*/
+#ifdef __EN_ISOC__
+#error AVM/WK: CONFIG_USB_HOST_IFX_WITH_ISO currently not supported!
+#endif
+
+/* Macro used to clear one channel interrupt */
+#define clear_hc_int(_hc_regs_,_intr_) \
+ do { \
+ hcint_data_t hcint_clear = {.d32 = 0}; \
+ hcint_clear.b._intr_ = 1; \
+ ifxusb_wreg(&((_hc_regs_)->hcint), hcint_clear.d32); \
+ } while (0)
+
+/*
+ * Macro used to disable one channel interrupt. Channel interrupts are
+ * disabled when the channel is halted or released by the interrupt handler.
+ * There is no need to handle further interrupts of that type until the
+ * channel is re-assigned. In fact, subsequent handling may cause crashes
+ * because the channel structures are cleaned up when the channel is released.
+ */
+#define disable_hc_int(_hc_regs_,_intr_) \
+ do { \
+ hcint_data_t hcintmsk = {.d32 = 0}; \
+ hcintmsk.b._intr_ = 1; \
+ ifxusb_mreg(&((_hc_regs_)->hcintmsk), hcintmsk.d32, 0); \
+ } while (0)
+
+#define enable_hc_int(_hc_regs_,_intr_) \
+ do { \
+ hcint_data_t hcintmsk = {.d32 = 0}; \
+ hcintmsk.b._intr_ = 1; \
+ ifxusb_mreg(&((_hc_regs_)->hcintmsk),0, hcintmsk.d32); \
+ } while (0)
+
+/*
+ * Save the starting data toggle for the next transfer. The data toggle is
+ * saved in the QH for non-control transfers and it's saved in the QTD for
+ * control transfers.
+ */
+uint8_t read_data_toggle(ifxusb_hc_regs_t *_hc_regs)
+{
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ return(hctsiz.b.pid);
+}
+
+
+static void release_channel_dump(ifxhcd_hc_t *ifxhc,
+ struct urb *urb,
+ ifxhcd_epqh_t *epqh,
+ ifxhcd_urbd_t *urbd,
+ ifxhcd_halt_status_e halt_status)
+{
+ #ifdef __DEBUG__
+ printk(KERN_INFO);
+ switch (halt_status)
+ {
+ case HC_XFER_NO_HALT_STATUS:
+ printk("HC_XFER_NO_HALT_STATUS");break;
+ case HC_XFER_URB_COMPLETE:
+ printk("HC_XFER_URB_COMPLETE");break;
+ case HC_XFER_AHB_ERR:
+ printk("HC_XFER_AHB_ERR");break;
+ case HC_XFER_STALL:
+ printk("HC_XFER_STALL");break;
+ case HC_XFER_BABBLE_ERR:
+ printk("HC_XFER_BABBLE_ERR");break;
+ case HC_XFER_XACT_ERR:
+ printk("HC_XFER_XACT_ERR");break;
+ case HC_XFER_URB_DEQUEUE:
+ printk("HC_XFER_URB_DEQUEUE");break;
+ case HC_XFER_FRAME_OVERRUN:
+ printk("HC_XFER_FRAME_OVERRUN");break;
+ case HC_XFER_DATA_TOGGLE_ERR:
+ printk("HC_XFER_DATA_TOGGLE_ERR");break;
+ case HC_XFER_NAK:
+ printk("HC_XFER_NAK");break;
+ case HC_XFER_COMPLETE:
+ printk("HC_XFER_COMPLETE");break;
+ default:
+ printk("KNOWN");break;
+ }
+ if(ifxhc)
+ printk("Ch %d %s%s S%d " , ifxhc->hc_num
+ ,(ifxhc->ep_type == IFXUSB_EP_TYPE_CTRL)?"CTRL-":
+ ((ifxhc->ep_type == IFXUSB_EP_TYPE_BULK)?"BULK-":
+ ((ifxhc->ep_type == IFXUSB_EP_TYPE_INTR)?"INTR-":
+ ((ifxhc->ep_type == IFXUSB_EP_TYPE_ISOC)?"ISOC-":"????"
+ )
+ )
+ )
+ ,(ifxhc->is_in)?"IN":"OUT"
+ ,(ifxhc->split)
+ );
+ else
+ printk(" [NULL HC] ");
+ printk("urb=%p epqh=%p urbd=%p\n",urb,epqh,urbd);
+
+ if(urb)
+ {
+ printk(KERN_INFO " Device address: %d\n", usb_pipedevice(urb->pipe));
+ printk(KERN_INFO " Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
+ (usb_pipein(urb->pipe) ? "IN" : "OUT"));
+ printk(KERN_INFO " Endpoint type: %s\n",
+ ({char *pipetype;
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL: pipetype = "CTRL"; break;
+ case PIPE_BULK: pipetype = "BULK"; break;
+ case PIPE_INTERRUPT: pipetype = "INTR"; break;
+ case PIPE_ISOCHRONOUS: pipetype = "ISOC"; break;
+ default: pipetype = "????"; break;
+ }; pipetype;}));
+ printk(KERN_INFO " Speed: %s\n",
+ ({char *speed;
+ switch (urb->dev->speed) {
+ case USB_SPEED_HIGH: speed = "HS"; break;
+ case USB_SPEED_FULL: speed = "FS"; break;
+ case USB_SPEED_LOW: speed = "LS"; break;
+ default: speed = "????"; break;
+ }; speed;}));
+ printk(KERN_INFO " Max packet size: %d\n",
+ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+ printk(KERN_INFO " Data buffer length: %d\n", urb->transfer_buffer_length);
+ printk(KERN_INFO " Transfer buffer: %p, Transfer DMA: %p\n",
+ urb->transfer_buffer, (void *)urb->transfer_dma);
+ printk(KERN_INFO " Setup buffer: %p, Setup DMA: %p\n",
+ urb->setup_packet, (void *)urb->setup_dma);
+ printk(KERN_INFO " Interval: %d\n", urb->interval);
+ switch (urb->status)
+ {
+ case HC_XFER_NO_HALT_STATUS:
+ printk(KERN_INFO " STATUS:HC_XFER_NO_HALT_STATUS\n");break;
+ case HC_XFER_URB_COMPLETE:
+ printk(KERN_INFO " STATUS:HC_XFER_URB_COMPLETE\n");break;
+ case HC_XFER_AHB_ERR:
+ printk(KERN_INFO " STATUS:HC_XFER_AHB_ERR\n");break;
+ case HC_XFER_STALL:
+ printk(KERN_INFO " STATUS:HC_XFER_STALL\n");break;
+ case HC_XFER_BABBLE_ERR:
+ printk(KERN_INFO " STATUS:HC_XFER_BABBLE_ERR\n");break;
+ case HC_XFER_XACT_ERR:
+ printk(KERN_INFO " STATUS:HC_XFER_XACT_ERR\n");break;
+ case HC_XFER_URB_DEQUEUE:
+ printk(KERN_INFO " STATUS:HC_XFER_URB_DEQUEUE\n");break;
+ case HC_XFER_FRAME_OVERRUN:
+ printk(KERN_INFO " STATUS:HC_XFER_FRAME_OVERRUN\n");break;
+ case HC_XFER_DATA_TOGGLE_ERR:
+ printk(KERN_INFO " STATUS:HC_XFER_DATA_TOGGLE_ERR\n");break;
+ case HC_XFER_COMPLETE:
+ printk(KERN_INFO " STATUS:HC_XFER_COMPLETE\n");break;
+ default:
+ printk(KERN_INFO " STATUS:KNOWN\n");break;
+ }
+ }
+ #endif
+}
+
+
+static void release_channel(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxhcd_halt_status_e _halt_status)
+{
+ ifxusb_hc_regs_t *hc_regs = _ifxhcd->core_if.hc_regs[_ifxhc->hc_num];
+ struct urb *urb = NULL;
+ ifxhcd_epqh_t *epqh = NULL;
+ ifxhcd_urbd_t *urbd = NULL;
+
+ IFX_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n",
+ __func__, _ifxhc->hc_num, _halt_status);
+
+ epqh=_ifxhc->epqh;
+
+ if(!epqh)
+ IFX_ERROR("%s epqh=null\n",__func__);
+ else
+ {
+ urbd=epqh->urbd;
+ if(!urbd)
+ IFX_ERROR("%s urbd=null\n",__func__);
+ else
+ {
+ urb=urbd->urb;
+ if(!urb)
+ IFX_ERROR("%s urb =null\n",__func__);
+ else {
+ /* == AVM/WK 20100710 Fix - Use toggle of usbcore ==*/
+ unsigned toggle = (read_data_toggle(hc_regs) == IFXUSB_HC_PID_DATA0)? 0: 1;
+ usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), usb_pipeout(urb->pipe), toggle);
+ }
+ }
+ //epqh->data_toggle = read_data_toggle(hc_regs);
+
+ }
+
+ switch (_halt_status)
+ {
+ case HC_XFER_NO_HALT_STATUS:
+ IFX_ERROR("%s: No halt_status, channel %d\n", __func__, _ifxhc->hc_num);
+ break;
+ case HC_XFER_COMPLETE:
+ IFX_ERROR("%s: Inavalid halt_status HC_XFER_COMPLETE, channel %d\n", __func__, _ifxhc->hc_num);
+ break;
+ case HC_XFER_URB_COMPLETE:
+ case HC_XFER_URB_DEQUEUE:
+ case HC_XFER_AHB_ERR:
+ case HC_XFER_XACT_ERR:
+ case HC_XFER_FRAME_OVERRUN:
+ if(urbd && urb) {
+ /* == 20110803 AVM/WK FIX set status, if still in progress == */
+ if (urb->status == -EINPROGRESS) {
+ switch (_halt_status) {
+ case HC_XFER_URB_COMPLETE:
+ urb->status = 0;
+ break;
+ case HC_XFER_URB_DEQUEUE:
+ urb->status = -ECONNRESET;
+ break;
+ case HC_XFER_AHB_ERR:
+ case HC_XFER_XACT_ERR:
+ case HC_XFER_FRAME_OVERRUN:
+ urb->status = -EPROTO;
+ break;
+ default:
+ break;
+ }
+ }
+ /*== AVM/BC 20101111 Deferred Complete ==*/
+ defer_ifxhcd_complete_urb(_ifxhcd, urbd, urb->status);
+ }
+ else
+ {
+ IFX_WARN("WARNING %s():%d urbd=%p urb=%p\n",__func__,__LINE__,urbd,urb);
+ release_channel_dump(_ifxhc,urb,epqh,urbd,_halt_status);
+ }
+ if(epqh)
+ ifxhcd_epqh_idle(_ifxhcd, epqh);
+ else
+ {
+ IFX_WARN("WARNING %s():%d epqh=%p\n",__func__,__LINE__,epqh);
+ release_channel_dump(_ifxhc,urb,epqh,urbd,_halt_status);
+ }
+
+ list_add_tail(&_ifxhc->hc_list_entry, &_ifxhcd->free_hc_list);
+ ifxhcd_hc_cleanup(&_ifxhcd->core_if, _ifxhc);
+ break;
+ case HC_XFER_STALL:
+ release_channel_dump(_ifxhc,urb,epqh,urbd,_halt_status);
+ if(urbd)
+ /*== AVM/BC 20101111 Deferred Complete ==*/
+ defer_ifxhcd_complete_urb(_ifxhcd, urbd, -EPIPE);
+ else
+ IFX_WARN("WARNING %s():%d urbd=%p urb=%p\n",__func__,__LINE__,urbd,urb);
+ if(epqh)
+ {
+// epqh->data_toggle = 0;
+ ifxhcd_epqh_idle(_ifxhcd, epqh);
+ }
+ else
+ IFX_WARN("WARNING %s():%d epqh=%p\n",__func__,__LINE__,epqh);
+ list_add_tail(&_ifxhc->hc_list_entry, &_ifxhcd->free_hc_list);
+ ifxhcd_hc_cleanup(&_ifxhcd->core_if, _ifxhc);
+ break;
+ case HC_XFER_NAK:
+ release_channel_dump(_ifxhc,urb,epqh,urbd,_halt_status);
+ if(urbd)
+ {
+ //ifxhcd_complete_urb(_ifxhcd, urbd, -ETIMEDOUT);
+ urb->status = 0;
+ /*== AVM/BC 20101111 Deferred Complete ==*/
+ defer_ifxhcd_complete_urb(_ifxhcd, urbd, urb->status);
+ }
+ else
+ IFX_WARN("WARNING %s():%d urbd=%p urb=%p\n",__func__,__LINE__,urbd,urb);
+ if(epqh)
+ ifxhcd_epqh_idle(_ifxhcd, epqh);
+ else
+ IFX_WARN("WARNING %s():%d epqh=%p\n",__func__,__LINE__,epqh);
+ list_add_tail(&_ifxhc->hc_list_entry, &_ifxhcd->free_hc_list);
+ ifxhcd_hc_cleanup(&_ifxhcd->core_if, _ifxhc);
+ break;
+ case HC_XFER_BABBLE_ERR:
+ case HC_XFER_DATA_TOGGLE_ERR:
+ release_channel_dump(_ifxhc,urb,epqh,urbd,_halt_status);
+ if(urbd)
+ /*== AVM/BC 20101111 Deferred Complete ==*/
+ defer_ifxhcd_complete_urb(_ifxhcd, urbd, -EOVERFLOW);
+ else
+ IFX_WARN("WARNING %s():%d urbd=%p urb=%p\n",__func__,__LINE__,urbd,urb);
+ if(epqh)
+ ifxhcd_epqh_idle(_ifxhcd, epqh);
+ else
+ IFX_WARN("WARNING %s():%d epqh=%p\n",__func__,__LINE__,epqh);
+ list_add_tail(&_ifxhc->hc_list_entry, &_ifxhcd->free_hc_list);
+ ifxhcd_hc_cleanup(&_ifxhcd->core_if, _ifxhc);
+ break;
+ }
+ select_eps(_ifxhcd);
+}
+
+/*
+ * Updates the state of the URB after a Transfer Complete interrupt on the
+ * host channel. Updates the actual_length field of the URB based on the
+ * number of bytes transferred via the host channel. Sets the URB status
+ * if the data transfer is finished.
+ *
+ * @return 1 if the data transfer specified by the URB is completely finished,
+ * 0 otherwise.
+ */
+static int update_urb_state_xfer_comp(ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ struct urb *_urb,
+ ifxhcd_urbd_t *_urbd)
+{
+ int xfer_done = 0;
+
+ if (_ifxhc->is_in)
+ {
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ _urb->actual_length += (_ifxhc->xfer_len - hctsiz.b.xfersize);
+ if ((hctsiz.b.xfersize != 0) || (_urb->actual_length >= _urb->transfer_buffer_length))
+ {
+ xfer_done = 1;
+ _urb->status = 0;
+ /* 20110805 AVM/WK Workaround: catch overflow error here, hardware does not */
+ if (_urb->actual_length > _urb->transfer_buffer_length) {
+ _urb->status = -EOVERFLOW;
+ }
+ #if 0
+ if (_urb->actual_length < _urb->transfer_buffer_length && _urb->transfer_flags & URB_SHORT_NOT_OK)
+ _urb->status = -EREMOTEIO;
+ #endif
+ }
+
+ }
+ else
+ {
+ if (_ifxhc->split)
+ _urb->actual_length += _ifxhc->ssplit_out_xfer_count;
+ else
+ _urb->actual_length += _ifxhc->xfer_len;
+
+ if (_urb->actual_length >= _urb->transfer_buffer_length)
+ {
+ /*== AVM/BC WK 20110421 ZERO PACKET Workaround ==*/
+ if ((_ifxhc->short_rw == 1) && ( _ifxhc->xfer_len > 0) && ( _ifxhc->xfer_len % _ifxhc->mps == 0 ))
+ {
+ _ifxhc->short_rw = 0;
+ //Transfer not finished. Another iteration for ZLP.
+ }
+ else
+ {
+ xfer_done = 1;
+ }
+ _urb->status = 0;
+ }
+ }
+
+ #ifdef __DEBUG__
+ {
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ IFX_DEBUGPL(DBG_HCDV, "IFXUSB: %s: %s, channel %d\n",
+ __func__, (_ifxhc->is_in ? "IN" : "OUT"), _ifxhc->hc_num);
+ IFX_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", _ifxhc->xfer_len);
+ IFX_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n", hctsiz.b.xfersize);
+ IFX_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
+ _urb->transfer_buffer_length);
+ IFX_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", _urb->actual_length);
+ }
+ #endif
+ return xfer_done;
+}
+
+/*== AVM/BC 20101111 Function called with Lock ==*/
+
+void complete_channel(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxhcd_urbd_t *_urbd)
+{
+ ifxusb_hc_regs_t *hc_regs = _ifxhcd->core_if.hc_regs[_ifxhc->hc_num];
+ struct urb *urb = NULL;
+ ifxhcd_epqh_t *epqh = NULL;
+ int urb_xfer_done;
+
+ IFX_DEBUGPL(DBG_HCD, "--Complete Channel %d : \n", _ifxhc->hc_num);
+
+ if(!_urbd)
+ {
+ IFX_ERROR("ERROR %s():%d urbd=%p\n",__func__,__LINE__,_urbd);
+ return;
+ }
+
+ urb = _urbd->urb;
+ epqh = _urbd->epqh;
+
+ if(!urb || !epqh)
+ {
+ IFX_ERROR("ERROR %s():%d urb=%p epqh=%p\n",__func__,__LINE__,urb,epqh);
+ return;
+ }
+
+ _ifxhc->do_ping=0;
+
+ if (_ifxhc->split)
+ _ifxhc->split = 1;
+
+ switch (epqh->ep_type)
+ {
+ case IFXUSB_EP_TYPE_CTRL:
+ switch (_ifxhc->control_phase)
+ {
+ case IFXHCD_CONTROL_SETUP:
+ IFX_DEBUGPL(DBG_HCDV, " Control setup transaction done\n");
+ if (_urbd->xfer_len > 0)
+ {
+ _ifxhc->control_phase = IFXHCD_CONTROL_DATA;
+ _ifxhc->is_in = _urbd->is_in;
+ _ifxhc->xfer_len = _urbd->xfer_len;
+ #if defined(__UNALIGNED_BUFFER_ADJ__)
+ if(epqh->using_aligned_buf)
+ _ifxhc->xfer_buff = epqh->aligned_buf;
+ else
+ #endif
+ _ifxhc->xfer_buff = _urbd->xfer_buff;
+ }
+ else
+ {
+ _ifxhc->control_phase = IFXHCD_CONTROL_STATUS;
+ _ifxhc->is_in = 1;
+ _ifxhc->xfer_len = 0;
+ _ifxhc->xfer_buff = _ifxhcd->status_buf;
+ }
+ if(_ifxhc->is_in)
+ _ifxhc->short_rw =0;
+ else
+ _ifxhc->short_rw =(urb->transfer_flags & URB_ZERO_PACKET)?1:0;
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA1;
+ _ifxhc->xfer_count = 0;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ /*== AVM/BC 20101111 Lock not needed ==*/
+ process_channels_sub(_ifxhcd);
+ break;
+ case IFXHCD_CONTROL_DATA:
+ urb_xfer_done = update_urb_state_xfer_comp(_ifxhc, hc_regs, urb, _urbd);
+ if (urb_xfer_done)
+ {
+ _ifxhc->control_phase = IFXHCD_CONTROL_STATUS;
+ _ifxhc->is_in = (_urbd->is_in)?0:1;
+ _ifxhc->xfer_len = 0;
+ _ifxhc->xfer_count = 0;
+ _ifxhc->xfer_buff = _ifxhcd->status_buf;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA1;
+ if(_ifxhc->is_in)
+ _ifxhc->short_rw =0;
+ else
+ _ifxhc->short_rw =1;
+ }
+ else // continue
+ {
+ _ifxhc->xfer_len = _urbd->xfer_len - urb->actual_length;
+ _ifxhc->xfer_count = urb->actual_length;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ _ifxhc->data_pid_start = read_data_toggle(hc_regs);
+ }
+ /*== AVM/BC 20101111 Lock not needed ==*/
+ process_channels_sub(_ifxhcd);
+ break;
+ case IFXHCD_CONTROL_STATUS:
+ if (urb->status == -EINPROGRESS)
+ urb->status = 0;
+ release_channel(_ifxhcd,_ifxhc,HC_XFER_URB_COMPLETE);
+ break;
+ }
+ break;
+ case IFXUSB_EP_TYPE_BULK:
+ IFX_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n");
+ urb_xfer_done = update_urb_state_xfer_comp(_ifxhc, hc_regs, urb, _urbd);
+ if (urb_xfer_done)
+ release_channel(_ifxhcd,_ifxhc,HC_XFER_URB_COMPLETE);
+ else
+ {
+ _ifxhc->xfer_len = _urbd->xfer_len - urb->actual_length;
+ _ifxhc->xfer_count = urb->actual_length;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ _ifxhc->data_pid_start = read_data_toggle(hc_regs);
+ /*== AVM/BC 20101111 Lock not needed ==*/
+ process_channels_sub(_ifxhcd);
+ }
+ break;
+ case IFXUSB_EP_TYPE_INTR:
+ urb_xfer_done = update_urb_state_xfer_comp(_ifxhc, hc_regs, urb, _urbd);
+ release_channel(_ifxhcd,_ifxhc,HC_XFER_URB_COMPLETE);
+ break;
+ case IFXUSB_EP_TYPE_ISOC:
+// if (_urbd->isoc_split_pos == IFXUSB_HCSPLIT_XACTPOS_ALL)
+// halt_status = update_isoc_urb_state(_ifxhcd, _ifxhc, hc_regs, _urbd, HC_XFER_COMPLETE);
+// complete_periodic_xfer(_ifxhcd, _ifxhc, hc_regs, _urbd, halt_status);
+ urb_xfer_done = update_urb_state_xfer_comp(_ifxhc, hc_regs, urb, _urbd);
+ release_channel(_ifxhcd,_ifxhc,HC_XFER_URB_COMPLETE);
+ break;
+ }
+}
+
+
+
+void showint(uint32_t val_hcint
+ ,uint32_t val_hcintmsk
+ ,uint32_t val_hctsiz)
+{
+#ifdef __DEBUG__
+ hcint_data_t hcint = {.d32 = val_hcint};
+ hcint_data_t hcintmsk = {.d32 = val_hcintmsk};
+
+ printk(KERN_INFO " WITH FLAG: Sz:%08x I:%08X/M:%08X %s%s%s%s%s%s%s%s%s%s\n"
+ ,val_hctsiz,hcint.d32 ,hcintmsk.d32
+ ,(hcint.b.datatglerr || hcintmsk.b.datatglerr)?
+ (
+ (hcint.b.datatglerr && hcintmsk.b.datatglerr)?"datatglerr[*/*] ":
+ (
+ (hcint.b.datatglerr)?"datatglerr[*/] ":"datatglerr[/*] "
+ )
+ )
+ :""
+ ,(hcint.b.frmovrun || hcintmsk.b.frmovrun)?
+ (
+ (hcint.b.frmovrun && hcintmsk.b.frmovrun)?"frmovrun[*/*] ":
+ (
+ (hcint.b.frmovrun)?"frmovrun[*/] ":"frmovrun[/*] "
+ )
+ )
+ :""
+ ,(hcint.b.bblerr || hcintmsk.b.bblerr)?
+ (
+ (hcint.b.bblerr && hcintmsk.b.bblerr)?"bblerr[*/*] ":
+ (
+ (hcint.b.bblerr)?"bblerr[*/] ":"bblerr[/*] "
+ )
+ )
+ :""
+ ,(hcint.b.xacterr || hcintmsk.b.xacterr)?
+ (
+ (hcint.b.xacterr && hcintmsk.b.xacterr)?"xacterr[*/*] ":
+ (
+ (hcint.b.xacterr)?"xacterr[*/] ":"xacterr[/*] "
+ )
+ )
+ :""
+ ,(hcint.b.nyet || hcintmsk.b.nyet)?
+ (
+ (hcint.b.nyet && hcintmsk.b.nyet)?"nyet[*/*] ":
+ (
+ (hcint.b.nyet)?"nyet[*/] ":"nyet[/*] "
+ )
+ )
+ :""
+ ,(hcint.b.nak || hcintmsk.b.nak)?
+ (
+ (hcint.b.nak && hcintmsk.b.nak)?"nak[*/*] ":
+ (
+ (hcint.b.nak)?"nak[*/] ":"nak[/*] "
+ )
+ )
+ :""
+ ,(hcint.b.ack || hcintmsk.b.ack)?
+ (
+ (hcint.b.ack && hcintmsk.b.ack)?"ack[*/*] ":
+ (
+ (hcint.b.ack)?"ack[*/] ":"ack[/*] "
+ )
+ )
+ :""
+ ,(hcint.b.stall || hcintmsk.b.stall)?
+ (
+ (hcint.b.stall && hcintmsk.b.stall)?"stall[*/*] ":
+ (
+ (hcint.b.stall)?"stall[*/] ":"stall[/*] "
+ )
+ )
+ :""
+ ,(hcint.b.ahberr || hcintmsk.b.ahberr)?
+ (
+ (hcint.b.ahberr && hcintmsk.b.ahberr)?"ahberr[*/*] ":
+ (
+ (hcint.b.ahberr)?"ahberr[*/] ":"ahberr[/*] "
+ )
+ )
+ :""
+ ,(hcint.b.xfercomp || hcintmsk.b.xfercomp)?
+ (
+ (hcint.b.xfercomp && hcintmsk.b.xfercomp)?"xfercomp[*/*] ":
+ (
+ (hcint.b.xfercomp)?"xfercomp[*/] ":"xfercomp[/*] "
+ )
+ )
+ :""
+ );
+#endif
+}
+
+
+extern void ifxhcd_hc_dumb_rx(ifxusb_core_if_t *_core_if, ifxhcd_hc_t *_ifxhc,uint8_t *dump_buf);
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_ctrlbulk_rx_nonsplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+ _ifxhc->do_ping = 0;
+
+ if(_ifxhc->halt_status == HC_XFER_NAK)
+ {
+ if(_ifxhc->nak_retry_r)
+ {
+ _urbd->urb->actual_length += (_ifxhc->xfer_len - hctsiz.b.xfersize);
+ _ifxhc->nak_retry--;
+ if(_ifxhc->nak_retry)
+ {
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ _ifxhc->wait_for_sof = 1;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ else
+ {
+ _ifxhc->wait_for_sof = 0;
+ release_channel(_ifxhcd, _ifxhc, _ifxhc->halt_status);
+ }
+ }
+ else
+ {
+ _urbd->urb->actual_length += (_ifxhc->xfer_len - hctsiz.b.xfersize);
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ _ifxhc->wait_for_sof = 1;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+
+ if (hcint.b.xfercomp)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ return 1;
+ }
+ else if (hcint.b.stall)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ // ZLP shortcut
+ #if 0
+ if(hctsiz.b.pktcnt==0)
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ else
+ #endif
+ {
+ // Stall FIFO compensation.
+ #if 0
+ int sz1,sz2;
+ sz2=_ifxhc->start_pkt_count - hctsiz.b.pktcnt;
+ sz2*=_ifxhc->mps;
+ sz1=_ifxhc->xfer_len - hctsiz.b.xfersize;
+ sz2-=sz1;
+ if(sz2)
+ ifxhcd_hc_dumb_rx(&_ifxhcd->core_if, _ifxhc,_ifxhc->epqh->dump_buf);
+ #endif
+ _urbd->urb->actual_length += (_ifxhc->xfer_len - hctsiz.b.xfersize);
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_STALL);
+ }
+ return 1;
+ }
+ else if (hcint.b.bblerr)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+
+ // ZLP shortcut
+ #if 0
+ if(hctsiz.b.pktcnt==0)
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ else
+ #endif
+ _urbd->urb->actual_length += (_ifxhc->xfer_len - hctsiz.b.xfersize);
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ return 1;
+ }
+ else if (hcint.b.xacterr)
+ {
+ // ZLP shortcut
+ #if 1
+ if(hctsiz.b.pktcnt==0)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ }
+ else
+ #endif
+ {
+ _urbd->urb->actual_length += (_ifxhc->xfer_len - hctsiz.b.xfersize);
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+
+ /* 20110803 AVM/WK FIX: Reset error count on any handshake */
+ if (hcint.b.nak || hcint.b.nyet || hcint.b.ack) {
+ _urbd->error_count = 1;
+ } else {
+ _urbd->error_count++;
+ }
+
+ if (_urbd->error_count >= 3)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ _ifxhc->wait_for_sof = 1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ }
+ return 1;
+ }
+ else if(hcint.b.datatglerr )
+ {
+ _urbd->urb->actual_length += (_ifxhc->xfer_len - hctsiz.b.xfersize);
+ #if 1
+ if(_ifxhc->data_pid_start == IFXUSB_HC_PID_DATA0)
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA1;
+ else
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA0;
+ _ifxhc->wait_for_sof = 1;
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ #else
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_DATA_TOGGLE_ERR);
+ #endif
+ return 1;
+ }
+ else if(hcint.b.frmovrun )
+ {
+IFX_WARN("%s() %d Warning CTRLBULK IN SPLIT0 FRMOVRUN [should be Period only]\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_FRAME_OVERRUN);
+ return 1;
+ }
+ else if(hcint.b.nyet )
+ {
+IFX_WARN("%s() %d Warning CTRLBULK IN SPLIT0 NYET [should be Out only]\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ }
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_ctrlbulk_tx_nonsplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+ int out_nak_enh = 0;
+
+#ifdef __DEBUG__
+static int first=0;
+#endif
+
+ if (_ifxhcd->core_if.snpsid >= 0x4f54271a && _ifxhc->speed == IFXUSB_EP_SPEED_HIGH)
+ out_nak_enh = 1;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+
+#ifdef __DEBUG__
+if(!first&& _ifxhc->ep_type == IFXUSB_EP_TYPE_BULK
+ &&(hcint.b.stall || hcint.b.datatglerr || hcint.b.frmovrun || hcint.b.bblerr || hcint.b.xacterr) && !hcint.b.ack)
+{
+ showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ first=1;
+ printk(KERN_INFO " [%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X] \n"
+ ,*(_ifxhc->xfer_buff+ 0),*(_ifxhc->xfer_buff+ 1),*(_ifxhc->xfer_buff+ 2),*(_ifxhc->xfer_buff+ 3)
+ ,*(_ifxhc->xfer_buff+ 4),*(_ifxhc->xfer_buff+ 5),*(_ifxhc->xfer_buff+ 6),*(_ifxhc->xfer_buff+ 7)
+ ,*(_ifxhc->xfer_buff+ 8),*(_ifxhc->xfer_buff+ 9),*(_ifxhc->xfer_buff+10),*(_ifxhc->xfer_buff+11)
+ ,*(_ifxhc->xfer_buff+12),*(_ifxhc->xfer_buff+13),*(_ifxhc->xfer_buff+14),*(_ifxhc->xfer_buff+15));
+
+ printk(KERN_INFO " [_urbd->urb->actual_length:%08X _ifxhc->start_pkt_count:%08X hctsiz.b.pktcnt:%08X ,_urbd->xfer_len:%08x] \n"
+ ,_urbd->urb->actual_length
+ ,_ifxhc->start_pkt_count
+ ,hctsiz.b.pktcnt
+ ,_urbd->xfer_len);
+}
+#endif
+
+ if(_ifxhc->halt_status == HC_XFER_NAK)
+ {
+ if(_ifxhc->nak_retry_r)
+ {
+ _ifxhc->nak_retry--;
+ if(_ifxhc->nak_retry)
+ {
+ if(_ifxhc->xfer_len!=0)
+ _urbd->urb->actual_length += ((_ifxhc->start_pkt_count - hctsiz.b.pktcnt ) * _ifxhc->mps);
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ _ifxhc->wait_for_sof = 1;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ else
+ {
+ _ifxhc->wait_for_sof = 0;
+ release_channel(_ifxhcd, _ifxhc, _ifxhc->halt_status);
+ }
+ }
+ else
+ {
+ if(_ifxhc->xfer_len!=0)
+ _urbd->urb->actual_length += ((_ifxhc->start_pkt_count - hctsiz.b.pktcnt ) * _ifxhc->mps);
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ _ifxhc->wait_for_sof = 1;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+
+ if (hcint.b.xfercomp)
+ {
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+ _urbd->error_count =0;
+ if(_ifxhc->xfer_len==0 && !hcint.b.ack && hcint.b.nak)
+ {
+ // Walkaround: When sending ZLP and receive NAK but also issue CMPT intr
+ // Solution: NoSplit: Resend at next SOF
+ // Split : Resend at next SOF with SSPLIT
+ if(hcint.b.nyet && !out_nak_enh)
+ _ifxhc->do_ping = 1;
+ else
+ _ifxhc->do_ping = 0;
+ _ifxhc->xfer_len = 0;
+ _ifxhc->xfer_count = 0;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ _ifxhc->wait_for_sof = 1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ else
+ {
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->do_ping = 0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ }
+ return 1;
+ }
+ else if (hcint.b.stall)
+ {
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+
+ // ZLP shortcut
+ #if 1
+ if(hctsiz.b.pktcnt==0)
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ else
+ #endif
+ {
+ if(_ifxhc->xfer_len!=0)
+ _urbd->urb->actual_length += ((_ifxhc->start_pkt_count - hctsiz.b.pktcnt ) * _ifxhc->mps);
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_STALL);
+ }
+ return 1;
+ }
+ else if (hcint.b.xacterr)
+ {
+ // ZLP shortcut
+ #if 1
+ if(hctsiz.b.pktcnt==0)
+ {
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ }
+ else
+ #endif
+ {
+ if(_ifxhc->xfer_len!=0)
+ _urbd->urb->actual_length += ((_ifxhc->start_pkt_count - hctsiz.b.pktcnt ) * _ifxhc->mps);
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+
+ if (hcint.b.nak || hcint.b.nyet || hcint.b.ack)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =1;
+ enable_hc_int(_hc_regs,ack);
+ enable_hc_int(_hc_regs,nak);
+ enable_hc_int(_hc_regs,nyet);
+ if(!out_nak_enh)
+ _ifxhc->do_ping =1;
+ else
+ _ifxhc->do_ping =0;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ else
+ {
+ _urbd->error_count ++ ;
+ if (_urbd->error_count == 3)
+ {
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ enable_hc_int(_hc_regs,ack);
+ enable_hc_int(_hc_regs,nak);
+ enable_hc_int(_hc_regs,nyet);
+ _ifxhc->wait_for_sof =1;
+ if(!out_nak_enh)
+ _ifxhc->do_ping =1;
+ else
+ _ifxhc->do_ping =0;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ }
+ }
+ return 1;
+ }
+ else if(hcint.b.bblerr )
+ {
+IFX_WARN("%s() %d Warning CTRLBULK OUT SPLIT0 BABBLE [should be IN only]\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _ifxhc->do_ping = 0;
+ if(_ifxhc->xfer_len!=0)
+ _urbd->urb->actual_length += ((_ifxhc->start_pkt_count - hctsiz.b.pktcnt ) * _ifxhc->mps);
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ return 1;
+ }
+ else if(hcint.b.nak || hcint.b.nyet)
+ {
+ if(!out_nak_enh)
+ {
+ // ZLP shortcut
+ #if 1
+ if(hctsiz.b.pktcnt==0)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ }
+ else
+ #endif
+ {
+ if(!out_nak_enh)
+ _ifxhc->do_ping =1;
+ else
+ _ifxhc->do_ping =0;
+ if(_ifxhc->xfer_len!=0)
+ {
+ _urbd->urb->actual_length += ((_ifxhc->start_pkt_count - hctsiz.b.pktcnt ) * _ifxhc->mps);
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ }
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ _ifxhc->wait_for_sof = 1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+ }
+ else if(hcint.b.datatglerr )
+ {
+IFX_WARN("%s() %d Warning CTRLBULK OUT SPLIT0 DATATGLERR [should be IN only]\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_DATA_TOGGLE_ERR);
+ return 1;
+ }
+ else if(hcint.b.frmovrun )
+ {
+IFX_WARN("%s() %d Warning CTRLBULK OUT SPLIT0 FRMOVRUN [should be PERIODIC only]\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_FRAME_OVERRUN);
+ return 1;
+ }
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_intr_rx_nonsplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+ _ifxhc->do_ping =0;
+
+ if(_ifxhc->halt_status == HC_XFER_NAK)
+ {
+ if(_ifxhc->nak_retry_r)
+ {
+ _ifxhc->nak_retry--;
+ if(_ifxhc->nak_retry)
+ {
+ if(_ifxhc->xfer_len!=0)
+ _urbd->urb->actual_length += ((_ifxhc->start_pkt_count - hctsiz.b.pktcnt ) * _ifxhc->mps);
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ _ifxhc->wait_for_sof = 1;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ else
+ {
+ _ifxhc->wait_for_sof = 0;
+ release_channel(_ifxhcd, _ifxhc, _ifxhc->halt_status);
+ }
+ }
+ else
+ {
+ if(_ifxhc->xfer_len!=0)
+ _urbd->urb->actual_length += ((_ifxhc->start_pkt_count - hctsiz.b.pktcnt ) * _ifxhc->mps);
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ _ifxhc->wait_for_sof = 1;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+
+ if(hcint.b.xfercomp )
+ {
+ _urbd->error_count =0;
+ //restart INTR immediately
+ #if 1
+ if(hctsiz.b.pktcnt>0)
+ {
+ // TODO Re-initialize Channel (in next b_interval - 1 uF/F)
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ else
+ #endif
+ {
+ _ifxhc->wait_for_sof =0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ }
+ return 1;
+ }
+ else if (hcint.b.stall)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+
+ // Don't care shortcut
+ #if 0
+ if(hctsiz.b.pktcnt==0)
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ else
+ #endif
+ {
+ // Stall FIFO compensation.
+ #if 0
+ int sz1,sz2;
+ sz2=_ifxhc->start_pkt_count - hctsiz.b.pktcnt;
+ sz2*=_ifxhc->mps;
+ sz1=_ifxhc->xfer_len - hctsiz.b.xfersize;
+ sz2-=sz1;
+ if(sz2)
+ ifxhcd_hc_dumb_rx(&_ifxhcd->core_if, _ifxhc,_ifxhc->epqh->dump_buf);
+ #endif
+ _urbd->urb->actual_length += (_ifxhc->xfer_len - hctsiz.b.xfersize);
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_STALL);
+ }
+ return 1;
+ }
+
+
+ else if (hcint.b.bblerr)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+
+ // Don't care shortcut
+ #if 0
+ if(hctsiz.b.pktcnt==0)
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ else
+ #endif
+ {
+ _urbd->urb->actual_length += (_ifxhc->xfer_len - hctsiz.b.xfersize);
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ }
+ return 1;
+ }
+ else if (hcint.b.nak || hcint.b.datatglerr || hcint.b.frmovrun)
+ {
+ _urbd->error_count =0;
+ //restart INTR immediately
+ #if 1
+ if(hctsiz.b.pktcnt>0)
+ {
+ // TODO Re-initialize Channel (in next b_interval - 1 uF/F)
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ else
+ #endif
+ {
+ _ifxhc->wait_for_sof =0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ }
+ return 1;
+ }
+ else if (hcint.b.xacterr)
+ {
+ // ZLP shortcut
+ #if 1
+ if(hctsiz.b.pktcnt==0)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ }
+ else
+ #endif
+ {
+ /* 20110803 AVM/WK FIX: Reset error count on any handshake */
+ if (hcint.b.nak || hcint.b.nyet || hcint.b.ack) {
+ _urbd->error_count = 1;
+ } else {
+ _urbd->error_count++;
+ }
+
+ if(_urbd->error_count>=3)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ }
+ return 1;
+ }
+ else if(hcint.b.nyet )
+ {
+IFX_WARN("%s() %d Warning INTR IN SPLIT0 NYET [should be OUT only]\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ return 1;
+ }
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_intr_tx_nonsplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+ int out_nak_enh = 0;
+
+ if (_ifxhcd->core_if.snpsid >= 0x4f54271a && _ifxhc->speed == IFXUSB_EP_SPEED_HIGH)
+ out_nak_enh = 1;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+
+ if(_ifxhc->halt_status == HC_XFER_NAK)
+ {
+ if(_ifxhc->nak_retry_r)
+ {
+ _ifxhc->nak_retry--;
+ if(_ifxhc->nak_retry)
+ {
+ if(_ifxhc->xfer_len!=0)
+ _urbd->urb->actual_length += ((_ifxhc->start_pkt_count - hctsiz.b.pktcnt ) * _ifxhc->mps);
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ _ifxhc->wait_for_sof = 1;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ else
+ {
+ _ifxhc->wait_for_sof = 0;
+ release_channel(_ifxhcd, _ifxhc, _ifxhc->halt_status);
+ }
+ }
+ else
+ {
+ if(_ifxhc->xfer_len!=0)
+ _urbd->urb->actual_length += ((_ifxhc->start_pkt_count - hctsiz.b.pktcnt ) * _ifxhc->mps);
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ _ifxhc->wait_for_sof = 1;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+
+ if(hcint.b.xfercomp )
+ {
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+ _urbd->error_count =0;
+ //restart INTR immediately
+ #if 0
+ if(hctsiz.b.pktcnt>0)
+ {
+ // TODO Re-initialize Channel (in next b_interval - 1 uF/F)
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ if(hcint.b.nyet && !out_nak_enh )
+ _ifxhc->do_ping =1;
+ else
+ _ifxhc->do_ping =0;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ else
+ #endif
+ {
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ }
+ return 1;
+ }
+ else if (hcint.b.stall)
+ {
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nyet);
+ disable_hc_int(_hc_regs,nak);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+
+ // Don't care shortcut
+ #if 0
+ if(hctsiz.b.pktcnt==0)
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ else
+ #endif
+ {
+ if(_ifxhc->xfer_len!=0)// !_ifxhc->is_in
+ _urbd->urb->actual_length += ((_ifxhc->start_pkt_count - hctsiz.b.pktcnt ) * _ifxhc->mps);
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_STALL);
+ }
+ return 1;
+ }
+ else if(hcint.b.nak || hcint.b.frmovrun )
+ {
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nyet);
+ disable_hc_int(_hc_regs,nak);
+ _urbd->error_count =0;
+ //restart INTR immediately
+ #if 0
+ if(hctsiz.b.pktcnt>0)
+ {
+ // TODO Re-initialize Channel (in next b_interval - 1 uF/F)
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ if(!out_nak_enh )
+ _ifxhc->do_ping =1;
+ else
+ _ifxhc->do_ping =0;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ else
+ #endif
+ {
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ }
+ return 1;
+ }
+ else if(hcint.b.xacterr )
+ {
+ // ZLP shortcut
+ #if 1
+ if(hctsiz.b.pktcnt==0)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ }
+ else
+ #endif
+ {
+ /* 20110803 AVM/WK FIX: Reset error count on any handshake */
+ if (hcint.b.nak || hcint.b.nyet || hcint.b.ack) {
+ _urbd->error_count = 1;
+ } else {
+ _urbd->error_count++;
+ }
+
+ if(_urbd->error_count>=3)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ //_ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ //if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ _ifxhc->wait_for_sof=1;
+ if(!out_nak_enh )
+ _ifxhc->do_ping =1;
+ else
+ _ifxhc->do_ping =0;
+
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ }
+ return 1;
+ }
+ else if(hcint.b.bblerr )
+ {
+IFX_WARN("%s() %d Warning INTR OUT SPLIT0 BABBLEERR [should be IN only]\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ return 1;
+ }
+ else if(hcint.b.datatglerr )
+ {
+IFX_WARN("%s() %d Warning INTR OUT SPLIT0 DATATGLERR\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_DATA_TOGGLE_ERR);
+ return 1;
+ }
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_isoc_rx_nonsplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ #if defined(__EN_ISOC__)
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+
+ if (hcint.b.xfercomp || hcint.b.frmovrun)
+ {
+ _urbd->error_count=0;
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+ _ifxhc->wait_for_sof = 0;
+ if (hcint.b.xfercomp)
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ else
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_FRAME_OVERRUN);
+ }
+ else if (hcint.b.xacterr || hcint.b.bblerr)
+ {
+ #ifndef VR9Skip
+ if(hctsiz.b.pktcnt==0)
+ {
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ }
+ else
+ {
+ int sz1,sz2;
+ sz2=_ifxhc->start_pkt_count - hctsiz.b.pktcnt;
+ sz2*=_ifxhc->mps;
+ sz1=_ifxhc->xfer_len - hctsiz.b.xfersize;
+ sz2-=sz1;
+ if(sz2)
+ ifxhcd_hc_dumb_rx(&_ifxhcd->core_if, _ifxhc,_ifxhc->epqh->dump_buf);
+ _urbd->urb->actual_length += (_ifxhc->xfer_len - hctsiz.b.xfersize);
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ _urbd->error_count++;
+ if(_urbd->error_count>=3)
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ _ifxhc->wait_for_sof = 1;
+ enable_hc_int(_hc_regs,ack);
+ enable_hc_int(_hc_regs,nak);
+ enable_hc_int(_hc_regs,nyet);
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ }
+ #endif
+ }
+ else if(hcint.b.datatglerr )
+ {
+ warning
+ }
+ else if(hcint.b.stall )
+ {
+ warning
+ }
+ #else
+ #endif
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_isoc_tx_nonsplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ #if defined(__EN_ISOC__)
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+ int out_nak_enh = 0;
+
+ if (_ifxhcd->core_if.snpsid >= 0x4f54271a && _ifxhc->speed == IFXUSB_EP_SPEED_HIGH)
+ out_nak_enh = 1;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+
+ if (hcint.b.xfercomp)
+ {
+ _urbd->error_count=0;
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+ _ifxhc->wait_for_sof = 0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ return 1;
+ }
+ else if (hcint.b.frmovrun)
+ {
+ #ifndef VR9Skip
+ _urbd->error_count=0;
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+ _ifxhc->wait_for_sof = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_FRAME_OVERRUN);
+ #endif
+ }
+ else if(hcint.b.datatglerr )
+ {
+ warning
+ }
+ else if(hcint.b.bblerr )
+ {
+ #ifndef VR9Skip
+ if(hctsiz.b.pktcnt==0)
+ {
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ }
+ else
+ {
+ int sz1,sz2;
+ sz2=_ifxhc->start_pkt_count - hctsiz.b.pktcnt;
+ sz2*=_ifxhc->mps;
+ sz1=_ifxhc->xfer_len - hctsiz.b.xfersize;
+ sz2-=sz1;
+ if(sz2)
+ ifxhcd_hc_dumb_rx(&_ifxhcd->core_if, _ifxhc,_ifxhc->epqh->dump_buf);
+ _urbd->urb->actual_length += (_ifxhc->xfer_len - hctsiz.b.xfersize);
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ _urbd->error_count++;
+ if(_urbd->error_count>=3)
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ }
+ else
+ {
+ _ifxhc->wait_for_sof = 1;
+ enable_hc_int(_hc_regs,ack);
+ enable_hc_int(_hc_regs,nak);
+ enable_hc_int(_hc_regs,nyet);
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ }
+ #endif
+ }
+ else if(hcint.b.xacterr )
+ {
+ if(hctsiz.b.pktcnt==0)
+ {
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ return 1;
+ }
+ _urbd->error_count++;
+ if(_urbd->error_count>=3)
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ _ifxhc->wait_for_sof = 1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+ else if(hcint.b.stall )
+ {
+ warning
+ }
+ #else
+ #endif
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_ctrlbulk_rx_ssplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+
+ _ifxhc->do_ping =0;
+
+ if (hcint.b.ack)
+ {
+ _urbd->error_count=0;
+ _ifxhc->split=2;
+ _ifxhc->wait_for_sof = 8;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if (hcint.b.nak)
+ {
+ _ifxhc->wait_for_sof = 1;
+ _urbd->error_count = 0;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if (hcint.b.xacterr)
+ {
+ _urbd->error_count++;
+ if(_urbd->error_count>=3)
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ _ifxhc->wait_for_sof =1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+ else if(hcint.b.bblerr )
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ return 1;
+ }
+ else if(hcint.b.stall )
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_STALL);
+ return 1;
+ }
+ else if(hcint.b.datatglerr )
+ {
+IFX_WARN("%s() %d Warning CTRLBULK IN SPLIT1 HC_XFER_DATA_TOGGLE_ERR\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_DATA_TOGGLE_ERR);
+ return 1;
+ }
+ else if(hcint.b.frmovrun )
+ {
+IFX_WARN("%s() %d Warning CTRLBULK IN SPLIT1 HC_XFER_FRAME_OVERRUN\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_FRAME_OVERRUN);
+ return 1;
+ }
+ else if(hcint.b.nyet )
+ {
+IFX_WARN("%s() %d Warning CTRLBULK IN SPLIT1 NYET\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ }
+ else if(hcint.b.xfercomp )
+ {
+IFX_WARN("%s() %d Warning CTRLBULK IN SPLIT1 COMPLETE\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ }
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_ctrlbulk_tx_ssplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+ int out_nak_enh = 0;
+
+#ifdef __DEBUG__
+static int first=0;
+#endif
+
+ if (_ifxhcd->core_if.snpsid >= 0x4f54271a && _ifxhc->speed == IFXUSB_EP_SPEED_HIGH)
+ out_nak_enh = 1;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+
+#ifdef __DEBUG__
+ if(!first&& _ifxhc->ep_type == IFXUSB_EP_TYPE_BULK
+ &&(hcint.b.stall || hcint.b.datatglerr || hcint.b.frmovrun || hcint.b.bblerr || hcint.b.xacterr) && !hcint.b.ack)
+ {
+ showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ first=1;
+ printk(KERN_INFO " [%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X] \n"
+ ,*(_ifxhc->xfer_buff+ 0),*(_ifxhc->xfer_buff+ 1),*(_ifxhc->xfer_buff+ 2),*(_ifxhc->xfer_buff+ 3)
+ ,*(_ifxhc->xfer_buff+ 4),*(_ifxhc->xfer_buff+ 5),*(_ifxhc->xfer_buff+ 6),*(_ifxhc->xfer_buff+ 7)
+ ,*(_ifxhc->xfer_buff+ 8),*(_ifxhc->xfer_buff+ 9),*(_ifxhc->xfer_buff+10),*(_ifxhc->xfer_buff+11)
+ ,*(_ifxhc->xfer_buff+12),*(_ifxhc->xfer_buff+13),*(_ifxhc->xfer_buff+14),*(_ifxhc->xfer_buff+15));
+
+ printk(KERN_INFO " [_urbd->urb->actual_length:%08X _ifxhc->start_pkt_count:%08X hctsiz.b.pktcnt:%08X ,_urbd->xfer_len:%08x] \n"
+ ,_urbd->urb->actual_length
+ ,_ifxhc->start_pkt_count
+ ,hctsiz.b.pktcnt
+ ,_urbd->xfer_len);
+ }
+#endif
+
+ if (hcint.b.ack )
+ {
+ _urbd->error_count=0;
+ if (_ifxhc->ep_type == IFXUSB_EP_TYPE_BULK || _ifxhc->control_phase != IFXHCD_CONTROL_SETUP)
+ _ifxhc->ssplit_out_xfer_count = _ifxhc->xfer_len;
+ _ifxhc->split=2;
+ _ifxhc->wait_for_sof =8;
+ _ifxhc->data_pid_start =read_data_toggle(_hc_regs);
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.nyet)
+ {
+IFX_WARN("%s() %d Warning CTRLBULK OUT SPLIT1 NYET\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _urbd->error_count=0;
+ if (_ifxhc->ep_type == IFXUSB_EP_TYPE_BULK || _ifxhc->control_phase != IFXHCD_CONTROL_SETUP)
+ _ifxhc->ssplit_out_xfer_count = _ifxhc->xfer_len;
+ _ifxhc->split=2;
+ _ifxhc->wait_for_sof =1;
+ _ifxhc->data_pid_start =read_data_toggle(_hc_regs);
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.nak )
+ {
+ _ifxhc->wait_for_sof =1;
+ if(!out_nak_enh )
+ _ifxhc->do_ping =1;
+ else
+ _ifxhc->do_ping =0;
+ _urbd->error_count =0;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.xacterr )
+ {
+ _urbd->error_count++;
+ if(_urbd->error_count>=3)
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ _ifxhc->wait_for_sof =1;
+ _ifxhc->do_ping =1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+ else if(hcint.b.datatglerr )
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_DATA_TOGGLE_ERR);
+ return 1;
+ }
+ else if(hcint.b.bblerr )
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ return 1;
+ }
+ else if(hcint.b.stall )
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_STALL);
+ return 1;
+ }
+ else if(hcint.b.frmovrun )
+ {
+IFX_WARN("%s() %d Warning CTRLBULK OUT SPLIT1 HC_XFER_FRAME_OVERRUN\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ _ifxhc->do_ping =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_FRAME_OVERRUN);
+ return 1;
+ }
+ else if(hcint.b.xfercomp )
+ {
+ printk(KERN_INFO "%s() %d Warning CTRLBULK OUT SPLIT1 COMPLETE\n",__func__,__LINE__);
+ }
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_intr_rx_ssplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+
+ _ifxhc->do_ping =0;
+
+ if (hcint.b.ack )
+ {
+ /*== AVM/BC 20100701 - Workaround FullSpeed Interrupts with HiSpeed Hub ==*/
+ _ifxhc->nyet_count=0;
+
+ _urbd->error_count=0;
+ _ifxhc->split=2;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.nak )
+ {
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ _urbd->error_count=0;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.xacterr )
+ {
+ hcchar_data_t hcchar;
+ hcchar.d32 = ifxusb_rreg(&_hc_regs->hcchar);
+ _urbd->error_count=hcchar.b.multicnt;
+ if(_urbd->error_count>=3)
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+ else if(hcint.b.stall )
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_STALL);
+ return 1;
+ }
+ else if(hcint.b.bblerr )
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ return 1;
+ }
+ else if(hcint.b.frmovrun )
+ {
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.datatglerr )
+ {
+IFX_WARN( "%s() %d Warning INTR IN SPLIT1 DATATGLERR\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_DATA_TOGGLE_ERR);
+ return 1;
+ }
+ else if(hcint.b.xfercomp )
+ {
+IFX_WARN("%s() %d Warning INTR IN SPLIT1 COMPLETE\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ }
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_intr_tx_ssplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+ int out_nak_enh = 0;
+
+ if (_ifxhcd->core_if.snpsid >= 0x4f54271a && _ifxhc->speed == IFXUSB_EP_SPEED_HIGH)
+ out_nak_enh = 1;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+
+ if (hcint.b.ack )
+ {
+ /*== AVM/BC 20100701 - Workaround FullSpeed Interrupts with HiSpeed Hub ==*/
+ _ifxhc->nyet_count=0;
+
+ _urbd->error_count=0;
+ _ifxhc->ssplit_out_xfer_count = _ifxhc->xfer_len;
+ _ifxhc->split=2;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.nyet)
+ {
+IFX_WARN("%s() %d Warning INTR OUT SPLIT1 NYET\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _urbd->error_count=0;
+ _ifxhc->ssplit_out_xfer_count = _ifxhc->xfer_len;
+ _ifxhc->split=2;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->data_pid_start = read_data_toggle(_hc_regs);
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.nak )
+ {
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ _urbd->error_count =0;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.frmovrun )
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.xacterr )
+ {
+ hcchar_data_t hcchar;
+ hcchar.d32 = ifxusb_rreg(&_hc_regs->hcchar);
+ _urbd->error_count=hcchar.b.multicnt;
+ if(_urbd->error_count>=3)
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ enable_hc_int(_hc_regs,ack);
+ enable_hc_int(_hc_regs,nak);
+ enable_hc_int(_hc_regs,nyet);
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+ else if(hcint.b.datatglerr )
+ {
+IFX_WARN("%s() %d Warning INTR IN SPLIT1 DATATGLERR\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_DATA_TOGGLE_ERR);
+ return 1;
+ }
+ else if(hcint.b.bblerr )
+ {
+IFX_WARN("%s() %d Warning INTR IN SPLIT1 BABBLEERR\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ return 1;
+ }
+ else if(hcint.b.stall )
+ {
+IFX_WARN("%s() %d Warning INTR IN SPLIT1 STALL\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof =0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_STALL);
+ return 1;
+ }
+ else if(hcint.b.xfercomp )
+ {
+IFX_WARN("%s() %d Warning INTR IN SPLIT1 COMPLETE\n",__func__,__LINE__);
+showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ }
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_isoc_rx_ssplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ #if defined(__EN_ISOC__) && defined(__EN_ISOC_SPLIT__)
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ if (hcint.b.ack )
+ {
+ Do Complete Split
+ }
+ else if(hcint.b.frmovrun )
+ {
+ Rewind Buffer Pointers
+ Retry Start Split (in next b_interval ¡V 1 uF)
+ }
+ else if(hcint.b.datatglerr )
+ {
+ warning
+ }
+ else if(hcint.b.bblerr )
+ {
+ warning
+ }
+ else if(hcint.b.xacterr )
+ {
+ warning
+ }
+ else if(hcint.b.stall )
+ {
+ warning
+ }
+ else if(hcint.b.nak )
+ {
+ warning
+ }
+ else if(hcint.b.xfercomp )
+ {
+ warning
+ }
+ else if(hcint.b.nyet)
+ {
+ warning
+ }
+ #endif
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_isoc_tx_ssplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ #if defined(__EN_ISOC__) && defined(__EN_ISOC_SPLIT__)
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+ int out_nak_enh = 0;
+
+ if (_ifxhcd->core_if.snpsid >= 0x4f54271a && _ifxhc->speed == IFXUSB_EP_SPEED_HIGH)
+ out_nak_enh = 1;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ if (hcint.b.ack )
+ {
+ Do Next Start Split (in next b_interval ¡V 1 uF)
+ }
+ else if(hcint.b.frmovrun )
+ {
+ Do Next Transaction in next frame.
+ }
+ else if(hcint.b.datatglerr )
+ {
+ warning
+ }
+ else if(hcint.b.bblerr )
+ {
+ warning
+ }
+ else if(hcint.b.xacterr )
+ {
+ warning
+ }
+ else if(hcint.b.stall )
+ {
+ warning
+ }
+ else if(hcint.b.nak )
+ {
+ warning
+ }
+ else if(hcint.b.xfercomp )
+ {
+ warning
+ }
+ else if(hcint.b.nyet)
+ {
+ warning
+ }
+ #endif
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_ctrlbulk_rx_csplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+
+ _ifxhc->do_ping = 0;
+
+ if (hcint.b.xfercomp)
+ {
+ _urbd->error_count =0;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->split=1;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ return 1;
+ }
+ else if (hcint.b.nak)
+ {
+ _urbd->error_count=0;
+
+ _ifxhc->split = 1;
+ _ifxhc->wait_for_sof = 1;
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.nyet)
+ {
+ _urbd->error_count=0;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ _ifxhc->wait_for_sof = 1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.stall || hcint.b.bblerr )
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ if (hcint.b.stall)
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_STALL);
+ else if(hcint.b.bblerr )
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ return 1;
+ }
+ else if(hcint.b.xacterr )
+ {
+ _urbd->error_count++;
+ if(_urbd->error_count>=3)
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ _ifxhc->split=1;
+ _ifxhc->wait_for_sof = 1;
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+ else if(hcint.b.datatglerr )
+ {
+ if(_ifxhc->data_pid_start == IFXUSB_HC_PID_DATA0)
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA1;
+ else
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA0;
+ _ifxhc->split=1;
+ _ifxhc->wait_for_sof = 1;
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.frmovrun )
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_FRAME_OVERRUN);
+ return 1;
+ }
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_ctrlbulk_tx_csplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+ int out_nak_enh = 0;
+
+#if 1
+static int first=0;
+#endif
+
+ if (_ifxhcd->core_if.snpsid >= 0x4f54271a && _ifxhc->speed == IFXUSB_EP_SPEED_HIGH)
+ out_nak_enh = 1;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+
+#if 1
+ if(!first&& _ifxhc->ep_type == IFXUSB_EP_TYPE_BULK
+ &&(hcint.b.stall || hcint.b.datatglerr || hcint.b.frmovrun || hcint.b.bblerr || hcint.b.xacterr) && !hcint.b.ack)
+ {
+ showint( hcint.d32,hcintmsk.d32,hctsiz.d32);
+ first=1;
+ printk(KERN_INFO " [%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X] \n"
+ ,*(_ifxhc->xfer_buff+ 0),*(_ifxhc->xfer_buff+ 1),*(_ifxhc->xfer_buff+ 2),*(_ifxhc->xfer_buff+ 3)
+ ,*(_ifxhc->xfer_buff+ 4),*(_ifxhc->xfer_buff+ 5),*(_ifxhc->xfer_buff+ 6),*(_ifxhc->xfer_buff+ 7)
+ ,*(_ifxhc->xfer_buff+ 8),*(_ifxhc->xfer_buff+ 9),*(_ifxhc->xfer_buff+10),*(_ifxhc->xfer_buff+11)
+ ,*(_ifxhc->xfer_buff+12),*(_ifxhc->xfer_buff+13),*(_ifxhc->xfer_buff+14),*(_ifxhc->xfer_buff+15));
+
+ printk(KERN_INFO " [_urbd->urb->actual_length:%08X _ifxhc->start_pkt_count:%08X hctsiz.b.pktcnt:%08X ,_urbd->xfer_len:%08x] \n"
+ ,_urbd->urb->actual_length
+ ,_ifxhc->start_pkt_count
+ ,hctsiz.b.pktcnt
+ ,_urbd->xfer_len);
+ }
+#endif
+
+ if(hcint.b.xfercomp )
+ {
+ _urbd->error_count=0;
+ _ifxhc->split=1;
+ _ifxhc->do_ping= 0;
+ #if 0
+ if(_ifxhc->xfer_len==0 && !hcint.b.ack && (hcint.b.nak || hcint.b.nyet))
+ {
+ // Walkaround: When sending ZLP and receive NYEY or NAK but also issue CMPT intr
+ // Solution: NoSplit: Resend at next SOF
+ // Split : Resend at next SOF with SSPLIT
+ _ifxhc->xfer_len = 0;
+ _ifxhc->xfer_count = 0;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ _ifxhc->wait_for_sof = 1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ else
+ #endif
+ {
+ _ifxhc->wait_for_sof = 0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ }
+ return 1;
+ }
+ else if(hcint.b.nak )
+ {
+ _urbd->error_count=0;
+
+ _ifxhc->split = 1;
+ _ifxhc->wait_for_sof = 1;
+ if(!out_nak_enh )
+ _ifxhc->do_ping =1;
+ else
+ _ifxhc->do_ping =0;
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.nyet)
+ {
+ //Retry Complete Split
+ // Issue Retry instantly on next SOF, without gothrough process_channels
+ _urbd->error_count=0;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ _ifxhc->wait_for_sof = 1;
+ _ifxhc->do_ping = 0;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.stall )
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->do_ping = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_STALL);
+ return 1;
+ }
+ else if(hcint.b.xacterr )
+ {
+ _urbd->error_count++;
+ if(_urbd->error_count>=3)
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->do_ping = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ _ifxhc->split=1;
+ _ifxhc->wait_for_sof = 1;
+ if(!out_nak_enh )
+ _ifxhc->do_ping =1;
+ else
+ _ifxhc->do_ping =0;
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+ else if(hcint.b.datatglerr )
+ {
+ if(_ifxhc->data_pid_start == IFXUSB_HC_PID_DATA0)
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA1;
+ else
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA0;
+ _ifxhc->split=1;
+ _ifxhc->wait_for_sof = 1;
+ if(!out_nak_enh )
+ _ifxhc->do_ping =1;
+ else
+ _ifxhc->do_ping =0;
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.frmovrun )
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->do_ping = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_FRAME_OVERRUN);
+ return 1;
+ }
+ else if(hcint.b.bblerr )
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->do_ping = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ return 1;
+ }
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_intr_rx_csplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+ _ifxhc->do_ping = 0;
+
+ if (hcint.b.xfercomp )
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->split=1;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ return 1;
+ }
+ else if(hcint.b.nak )
+ {
+ _urbd->error_count=0;
+ _ifxhc->split = 1;
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.nyet)
+ {
+ _urbd->error_count=0;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ _ifxhc->wait_for_sof = 0;
+
+ /*== AVM/BC 20100701 - Workaround FullSpeed Interrupts with HiSpeed Hub ==*/
+ _ifxhc->nyet_count++;
+ if(_ifxhc->nyet_count > 2) {
+ _ifxhc->split = 1;
+ _ifxhc->nyet_count = 0;
+ _ifxhc->wait_for_sof = 5;
+ }
+
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.frmovrun || hcint.b.bblerr || hcint.b.stall )
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ if (hcint.b.stall)
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_STALL);
+ else if(hcint.b.bblerr )
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ else if(hcint.b.frmovrun )
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_FRAME_OVERRUN);
+ return 1;
+ }
+ else if(hcint.b.xacterr )
+ {
+ hcchar_data_t hcchar;
+ hcchar.d32 = ifxusb_rreg(&_hc_regs->hcchar);
+ _urbd->error_count=hcchar.b.multicnt;
+ if(_urbd->error_count>=3)
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ _ifxhc->split=1;
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+ else if(hcint.b.datatglerr )
+ {
+ if(_ifxhc->data_pid_start == IFXUSB_HC_PID_DATA0)
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA1;
+ else
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA0;
+ _ifxhc->split=1;
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_intr_tx_csplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+ int out_nak_enh = 0;
+
+ if (_ifxhcd->core_if.snpsid >= 0x4f54271a && _ifxhc->speed == IFXUSB_EP_SPEED_HIGH)
+ out_nak_enh = 1;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+
+ if(hcint.b.xfercomp )
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->split=1;
+ _ifxhc->do_ping = 0;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ return 1;
+ }
+ else if(hcint.b.nak )
+ {
+ _urbd->error_count=0;
+ _ifxhc->split = 1;
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ if(!out_nak_enh )
+ _ifxhc->do_ping =1;
+ else
+ _ifxhc->do_ping =0;
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.nyet)
+ {
+ _urbd->error_count=0;
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->do_ping = 0;
+
+ /*== AVM/BC 20100701 - Workaround FullSpeed Interrupts with HiSpeed Hub ==*/
+ _ifxhc->nyet_count++;
+ if(_ifxhc->nyet_count > 2) {
+ _ifxhc->split = 1;
+ _ifxhc->nyet_count = 0;
+ _ifxhc->wait_for_sof = 5;
+ }
+
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.stall || hcint.b.frmovrun)
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->do_ping = 0;
+ if (hcint.b.stall)
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_STALL);
+ else if(hcint.b.frmovrun )
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_FRAME_OVERRUN);
+ return 1;
+ }
+ else if(hcint.b.xacterr )
+ {
+ hcchar_data_t hcchar;
+ hcchar.d32 = ifxusb_rreg(&_hc_regs->hcchar);
+ _urbd->error_count=hcchar.b.multicnt;
+ if(_urbd->error_count>=3)
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->do_ping = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_XACT_ERR);
+ }
+ else
+ {
+ _ifxhc->split=1;
+ _ifxhc->wait_for_sof = _ifxhc->epqh->interval-1;
+ if(!_ifxhc->wait_for_sof) _ifxhc->wait_for_sof=1;
+ if(!out_nak_enh )
+ _ifxhc->do_ping =1;
+ else
+ _ifxhc->do_ping =0;
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ }
+ return 1;
+ }
+ else if(hcint.b.datatglerr )
+ {
+ if(_ifxhc->data_pid_start == IFXUSB_HC_PID_DATA0)
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA1;
+ else
+ _ifxhc->data_pid_start = IFXUSB_HC_PID_DATA0;
+ _ifxhc->split=1;
+ if(!out_nak_enh )
+ _ifxhc->do_ping =1;
+ else
+ _ifxhc->do_ping =0;
+ _ifxhc->xfer_len = _urbd->xfer_len - _urbd->urb->actual_length;
+ _ifxhc->xfer_count = _urbd->urb->actual_length;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.bblerr )
+ {
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->do_ping = 0;
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_BABBLE_ERR);
+ return 1;
+ }
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_isoc_rx_csplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ #if defined(__EN_ISOC__) && defined(__EN_ISOC_SPLIT__)
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ if(hcint.b.xfercomp )
+ {
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,nyet);
+ _urbd->error_count=0;
+ _ifxhc->wait_for_sof = 0;
+ _ifxhc->split=1;
+ complete_channel(_ifxhcd, _ifxhc, _urbd);
+ return 1;
+ }
+ else if(hcint.b.nak )
+ {
+ Retry Start Split (in next b_interval ¡V 1 uF)
+ }
+ else if(hcint.b.nyet)
+ {
+ //Do Next Complete Split
+ // Issue Retry instantly on next SOF, without gothrough process_channels
+ _urbd->error_count=0;
+ //disable_hc_int(_hc_regs,ack);
+ //disable_hc_int(_hc_regs,nak);
+ //disable_hc_int(_hc_regs,datatglerr);
+ _ifxhc->halt_status = HC_XFER_NO_HALT_STATUS;
+ _ifxhc->wait_for_sof = 1;
+ ifxhcd_hc_start(&_ifxhcd->core_if, _ifxhc);
+ return 1;
+ }
+ else if(hcint.b.frmovrun || hcint.b.stall || hcint.b.bblerr)
+ {
+ _urbd->error_count=0;
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nyet);
+ disable_hc_int(_hc_regs,nak);
+ _ifxhc->wait_for_sof = 0;
+
+ //if(hctsiz.b.pktcnt==0)
+ //{
+ // complete_channel(_ifxhcd, _ifxhc, _urbd);
+ // return 1;
+ //}
+ //else
+ // _urbd->urb->actual_length += (_ifxhc->xfer_len - hctsiz.b.xfersize);
+ if (hcint.b.stall)
+ release_channel(_ifxhcd, _ifxhc, HC_XFER_STALL);
+ else if(hcint.b.frmovrun )
+ else if(hcint.b.bblerr )
+ return 1;
+ }
+ else if(hcint.b.xacterr )
+ {
+ Rewind Buffer Pointers
+ if (HCCHARn.EC = = 3) // ERR response received
+ {
+ Record ERR error
+ Do Next Start Split (in next frame)
+ }
+ else
+ {
+ De-allocate Channel
+ }
+ }
+ else if(hcint.b.datatglerr )
+ {
+ warning
+ }
+ else if(hcint.b.ack )
+ {
+ warning
+ }
+ #endif
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+static int32_t chhltd_isoc_tx_csplit(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ #if defined(__EN_ISOC__) && defined(__EN_ISOC_SPLIT__)
+ hcint_data_t hcint;
+ hcint_data_t hcintmsk;
+ hctsiz_data_t hctsiz;
+ int out_nak_enh = 0;
+
+ if (_ifxhcd->core_if.snpsid >= 0x4f54271a && _ifxhc->speed == IFXUSB_EP_SPEED_HIGH)
+ out_nak_enh = 1;
+
+ hcint.d32 = ifxusb_rreg(&_hc_regs->hcint);
+ hcintmsk.d32 = ifxusb_rreg(&_hc_regs->hcintmsk);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ warning
+ #endif
+ return 0;
+}
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static int32_t handle_hc_chhltd_intr(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ IFX_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: Channel Halted--\n", _ifxhc->hc_num);
+
+ _ifxhc->halting = 0;
+ _ifxhc->xfer_started = 0;
+
+ if (_ifxhc->halt_status == HC_XFER_URB_DEQUEUE ||
+ _ifxhc->halt_status == HC_XFER_AHB_ERR) {
+ /*
+ * Just release the channel. A dequeue can happen on a
+ * transfer timeout. In the case of an AHB Error, the channel
+ * was forced to halt because there's no way to gracefully
+ * recover.
+ */
+ release_channel(_ifxhcd, _ifxhc, _ifxhc->halt_status);
+ return 1;
+ }
+
+ if (_ifxhc->ep_type == IFXUSB_EP_TYPE_CTRL || _ifxhc->ep_type == IFXUSB_EP_TYPE_BULK)
+ {
+ if (_ifxhc->split==0)
+ {
+ if(_ifxhc->is_in)
+ return (chhltd_ctrlbulk_rx_nonsplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ else
+ return (chhltd_ctrlbulk_tx_nonsplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ }
+ else if(_ifxhc->split==1)
+ {
+ if(_ifxhc->is_in)
+ return (chhltd_ctrlbulk_rx_ssplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ else
+ return (chhltd_ctrlbulk_tx_ssplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ }
+ else if(_ifxhc->split==2)
+ {
+ if(_ifxhc->is_in)
+ return (chhltd_ctrlbulk_rx_csplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ else
+ return (chhltd_ctrlbulk_tx_csplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ }
+ }
+ else if(_ifxhc->ep_type == IFXUSB_EP_TYPE_INTR)
+ {
+ if (_ifxhc->split==0)
+ {
+ if(_ifxhc->is_in)
+ return (chhltd_intr_rx_nonsplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ else
+ return (chhltd_intr_tx_nonsplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ }
+ else if(_ifxhc->split==1)
+ {
+ if(_ifxhc->is_in)
+ return (chhltd_intr_rx_ssplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ else
+ return (chhltd_intr_tx_ssplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ }
+ else if(_ifxhc->split==2)
+ {
+ if(_ifxhc->is_in)
+ return (chhltd_intr_rx_csplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ else
+ return (chhltd_intr_tx_csplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ }
+ }
+ else if(_ifxhc->ep_type == IFXUSB_EP_TYPE_ISOC)
+ {
+ if (_ifxhc->split==0)
+ {
+ if(_ifxhc->is_in)
+ return (chhltd_isoc_rx_nonsplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ else
+ return (chhltd_isoc_tx_nonsplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ }
+ else if(_ifxhc->split==1)
+ {
+ if(_ifxhc->is_in)
+ return (chhltd_isoc_rx_ssplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ else
+ return (chhltd_isoc_tx_ssplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ }
+ else if(_ifxhc->split==2)
+ {
+ if(_ifxhc->is_in)
+ return (chhltd_isoc_rx_csplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ else
+ return (chhltd_isoc_tx_csplit(_ifxhcd,_ifxhc,_hc_regs,_urbd));
+ }
+ }
+ return 0;
+}
+
+/*
+ * Handles a host channel AHB error interrupt. This handler is only called in
+ * DMA mode.
+ */
+static void hc_other_intr_dump(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ #ifdef __DEBUG__
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+ hctsiz_data_t hctsiz;
+ uint32_t hcdma;
+ struct urb *urb = _urbd->urb;
+ hcchar.d32 = ifxusb_rreg(&_hc_regs->hcchar);
+ hcsplt.d32 = ifxusb_rreg(&_hc_regs->hcsplt);
+ hctsiz.d32 = ifxusb_rreg(&_hc_regs->hctsiz);
+ hcdma = ifxusb_rreg(&_hc_regs->hcdma);
+
+ IFX_ERROR("Channel %d\n", _ifxhc->hc_num);
+ IFX_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
+ IFX_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
+ IFX_ERROR(" Device address: %d\n", usb_pipedevice(urb->pipe));
+ IFX_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
+ (usb_pipein(urb->pipe) ? "IN" : "OUT"));
+ IFX_ERROR(" Endpoint type: %s\n",
+ ({char *pipetype;
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL: pipetype = "CTRL"; break;
+ case PIPE_BULK: pipetype = "BULK"; break;
+ case PIPE_INTERRUPT: pipetype = "INTR"; break;
+ case PIPE_ISOCHRONOUS: pipetype = "ISOC"; break;
+ default: pipetype = "????"; break;
+ }; pipetype;}));
+ IFX_ERROR(" Speed: %s\n",
+ ({char *speed;
+ switch (urb->dev->speed) {
+ case USB_SPEED_HIGH: speed = "HS"; break;
+ case USB_SPEED_FULL: speed = "FS"; break;
+ case USB_SPEED_LOW: speed = "LS"; break;
+ default: speed = "????"; break;
+ }; speed;}));
+ IFX_ERROR(" Max packet size: %d\n",
+ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+ IFX_ERROR(" Data buffer length: %d\n", urb->transfer_buffer_length);
+ IFX_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n",
+ urb->transfer_buffer, (void *)urb->transfer_dma);
+ IFX_ERROR(" Setup buffer: %p, Setup DMA: %p\n",
+ urb->setup_packet, (void *)urb->setup_dma);
+ IFX_ERROR(" Interval: %d\n", urb->interval);
+ #endif //__DEBUG__
+}
+
+/*
+ * Handles a host channel ACK interrupt. This interrupt is enabled when
+ * errors occur, and during Start Split transactions.
+ */
+static int32_t handle_hc_ack_intr(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ _urbd->error_count=0;
+ if(_ifxhc->nak_countdown_r)
+ {
+ _ifxhc->nak_retry=_ifxhc->nak_retry_r;
+ _ifxhc->nak_countdown=_ifxhc->nak_countdown_r;
+ }
+ else
+ disable_hc_int(_hc_regs,nak);
+ disable_hc_int(_hc_regs,ack);
+ return 1;
+}
+
+/*
+ * Handles a host channel ACK interrupt. This interrupt is enabled when
+ * errors occur, and during Start Split transactions.
+ */
+static int32_t handle_hc_nak_intr(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+
+ _urbd->error_count=0;
+
+ if(_ifxhc->nak_countdown_r)
+ {
+ _ifxhc->nak_countdown--;
+ if(!_ifxhc->nak_countdown)
+ {
+ _ifxhc->nak_countdown=_ifxhc->nak_countdown_r;
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ ifxhcd_hc_halt(&_ifxhcd->core_if, _ifxhc, HC_XFER_NAK);
+ }
+ else
+ enable_hc_int(_hc_regs,ack);
+ }
+ else
+ {
+ disable_hc_int(_hc_regs,ack);
+ disable_hc_int(_hc_regs,nak);
+ }
+ return 1;
+}
+
+/*
+ * Handles a host channel AHB error interrupt. This handler is only called in
+ * DMA mode.
+ */
+static int32_t handle_hc_ahberr_intr(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ IFX_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "AHB Error--\n", _ifxhc->hc_num);
+ hc_other_intr_dump(_ifxhcd,_ifxhc,_hc_regs,_urbd);
+
+ ifxhcd_hc_halt(&_ifxhcd->core_if, _ifxhc, HC_XFER_AHB_ERR);
+ return 1;
+}
+
+/*
+ * Datatoggle
+ */
+static int32_t handle_hc_datatglerr_intr(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ IFX_ERROR( "--Host Channel %d Interrupt: "
+ "DATATOGGLE Error--\n", _ifxhc->hc_num);
+ hc_other_intr_dump(_ifxhcd,_ifxhc,_hc_regs,_urbd);
+ disable_hc_int(_hc_regs,datatglerr);
+ return 1;
+}
+
+
+
+/*
+ * Interrupts which should not been triggered
+ */
+static int32_t handle_hc_frmovrun_intr(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ IFX_ERROR( "--Host Channel %d Interrupt: "
+ "FrameOverRun Error--\n", _ifxhc->hc_num);
+ hc_other_intr_dump(_ifxhcd,_ifxhc,_hc_regs,_urbd);
+ disable_hc_int(_hc_regs,frmovrun);
+ return 1;
+}
+
+static int32_t handle_hc_bblerr_intr(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ IFX_ERROR( "--Host Channel %d Interrupt: "
+ "BBL Error--\n", _ifxhc->hc_num);
+ hc_other_intr_dump(_ifxhcd,_ifxhc,_hc_regs,_urbd);
+ disable_hc_int(_hc_regs,bblerr);
+ return 1;
+}
+
+static int32_t handle_hc_xacterr_intr(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ IFX_ERROR( "--Host Channel %d Interrupt: "
+ "XACT Error--\n", _ifxhc->hc_num);
+ hc_other_intr_dump(_ifxhcd,_ifxhc,_hc_regs,_urbd);
+ disable_hc_int(_hc_regs,xacterr);
+ return 1;
+}
+
+static int32_t handle_hc_nyet_intr(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ IFX_ERROR( "--Host Channel %d Interrupt: "
+ "NYET--\n", _ifxhc->hc_num);
+ hc_other_intr_dump(_ifxhcd,_ifxhc,_hc_regs,_urbd);
+ _urbd->error_count=0;
+ disable_hc_int(_hc_regs,nyet);
+ return 1;
+}
+
+static int32_t handle_hc_stall_intr(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ IFX_ERROR( "--Host Channel %d Interrupt: "
+ "STALL--\n", _ifxhc->hc_num);
+ hc_other_intr_dump(_ifxhcd,_ifxhc,_hc_regs,_urbd);
+ disable_hc_int(_hc_regs,stall);
+ return 1;
+}
+
+static int32_t handle_hc_xfercomp_intr(ifxhcd_hcd_t *_ifxhcd,
+ ifxhcd_hc_t *_ifxhc,
+ ifxusb_hc_regs_t *_hc_regs,
+ ifxhcd_urbd_t *_urbd)
+{
+ IFX_ERROR( "--Host Channel %d Interrupt: "
+ "XFERCOMP--\n", _ifxhc->hc_num);
+ hc_other_intr_dump(_ifxhcd,_ifxhc,_hc_regs,_urbd);
+ disable_hc_int(_hc_regs,xfercomp);
+ return 1;
+}
+
+
+
+/* This interrupt indicates that the specified host channels has a pending
+ * interrupt. There are multiple conditions that can cause each host channel
+ * interrupt. This function determines which conditions have occurred for this
+ * host channel interrupt and handles them appropriately. */
+static int32_t handle_hc_n_intr (ifxhcd_hcd_t *_ifxhcd, uint32_t _num)
+{
+ uint32_t hcintval,hcintmsk;
+ hcint_data_t hcint;
+ ifxhcd_hc_t *ifxhc;
+ ifxusb_hc_regs_t *hc_regs;
+ ifxhcd_urbd_t *urbd;
+ unsigned long flags;
+
+ int retval = 0;
+
+ IFX_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", _num);
+
+ /*== AVM/BC 20101111 Lock needed ==*/
+ SPIN_LOCK_IRQSAVE(&_ifxhcd->lock, flags);
+
+ ifxhc = &_ifxhcd->ifxhc[_num];
+ hc_regs = _ifxhcd->core_if.hc_regs[_num];
+
+ hcintval = ifxusb_rreg(&hc_regs->hcint);
+ hcintmsk = ifxusb_rreg(&hc_regs->hcintmsk);
+ hcint.d32 = hcintval & hcintmsk;
+ IFX_DEBUGPL(DBG_HCDV, " 0x%08x & 0x%08x = 0x%08x\n",
+ hcintval, hcintmsk, hcint.d32);
+
+ urbd = list_entry(ifxhc->epqh->urbd_list.next, ifxhcd_urbd_t, urbd_list_entry);
+
+ if (hcint.b.datatglerr)
+ retval |= handle_hc_datatglerr_intr(_ifxhcd, ifxhc, hc_regs, urbd);
+ if (hcint.b.frmovrun)
+ retval |= handle_hc_frmovrun_intr(_ifxhcd, ifxhc, hc_regs, urbd);
+ if (hcint.b.bblerr)
+ retval |= handle_hc_bblerr_intr(_ifxhcd, ifxhc, hc_regs, urbd);
+ if (hcint.b.xacterr)
+ retval |= handle_hc_xacterr_intr(_ifxhcd, ifxhc, hc_regs, urbd);
+ if (hcint.b.nyet)
+ retval |= handle_hc_nyet_intr(_ifxhcd, ifxhc, hc_regs, urbd);
+ if (hcint.b.ack)
+ retval |= handle_hc_ack_intr(_ifxhcd, ifxhc, hc_regs, urbd);
+ if (hcint.b.nak)
+ retval |= handle_hc_nak_intr(_ifxhcd, ifxhc, hc_regs, urbd);
+ if (hcint.b.stall)
+ retval |= handle_hc_stall_intr(_ifxhcd, ifxhc, hc_regs, urbd);
+ if (hcint.b.ahberr) {
+ clear_hc_int(hc_regs, ahberr);
+ retval |= handle_hc_ahberr_intr(_ifxhcd, ifxhc, hc_regs, urbd);
+ }
+ if (hcint.b.chhltd) {
+ /* == 20110901 AVM/WK Fix: Flag must not be cleared after restart of channel ==*/
+ clear_hc_int(hc_regs, chhltd);
+ retval |= handle_hc_chhltd_intr(_ifxhcd, ifxhc, hc_regs, urbd);
+ }
+ if (hcint.b.xfercomp)
+ retval |= handle_hc_xfercomp_intr(_ifxhcd, ifxhc, hc_regs, urbd);
+
+ /* == 20110901 AVM/WK Fix: Never clear possibly new intvals ==*/
+ //ifxusb_wreg(&hc_regs->hcint,hcintval);
+
+ SPIN_UNLOCK_IRQRESTORE(&_ifxhcd->lock, flags);
+
+ return retval;
+}
+
+
+
+
+
+
+static uint8_t update_interval_counter(ifxhcd_epqh_t *_epqh,uint32_t _diff)
+{
+ if(_diff>=_epqh->period_counter)
+ {
+ _epqh->period_do=1;
+ if(_diff>_epqh->interval)
+ _epqh->period_counter=1;
+ else
+ _epqh->period_counter=_epqh->period_counter+_epqh->interval-_diff;
+ return 1;
+ }
+ _epqh->period_counter=_epqh->period_counter-_diff;
+ return 0;
+}
+
+
+
+
+/*
+ * Handles the start-of-frame interrupt in host mode. Non-periodic
+ * transactions may be queued to the DWC_otg controller for the current
+ * (micro)frame. Periodic transactions may be queued to the controller for the
+ * next (micro)frame.
+ */
+static int32_t handle_sof_intr (ifxhcd_hcd_t *_ifxhcd)
+{
+ #ifdef __DYN_SOF_INTR__
+ uint8_t with_count_down=0;
+ #endif
+ uint8_t active_on=0;
+ uint8_t ready_on=0;
+ struct list_head *epqh_entry;
+ ifxhcd_epqh_t *epqh;
+ hfnum_data_t hfnum;
+ uint32_t fndiff;
+
+ unsigned long flags;
+#ifdef __USE_TIMER_4_SOF__
+ uint32_t wait_for_sof = 0x10000;
+#endif
+
+ SPIN_LOCK_IRQSAVE(&_ifxhcd->lock, flags);
+
+ {
+ int num_channels;
+ ifxusb_hc_regs_t *hc_regs;
+ int i;
+ num_channels = _ifxhcd->core_if.params.host_channels;
+
+// AVM/WK moved block here due to use of SOF timer
+ hfnum.d32 = ifxusb_rreg(&_ifxhcd->core_if.host_global_regs->hfnum);
+ fndiff = hfnum.b.frnum;
+ fndiff+= 0x00004000;
+ fndiff-= _ifxhcd->lastframe ;
+ fndiff&= 0x00003FFF;
+ if(!fndiff) fndiff =1;
+
+ for (i = 0; i < num_channels; i++)
+ {
+ if(_ifxhcd->ifxhc[i].wait_for_sof && _ifxhcd->ifxhc[i].xfer_started)
+ {
+#ifdef __USE_TIMER_4_SOF__
+ if (_ifxhcd->ifxhc[i].wait_for_sof > fndiff) {
+ _ifxhcd->ifxhc[i].wait_for_sof -= fndiff;
+ } else {
+ _ifxhcd->ifxhc[i].wait_for_sof = 0;
+ }
+#else
+ _ifxhcd->ifxhc[i].wait_for_sof--;
+#endif
+ if(_ifxhcd->ifxhc[i].wait_for_sof==0)
+ {
+ hcint_data_t hcint= { .d32=0 };
+ hc_regs = _ifxhcd->core_if.hc_regs[i];
+
+ hcint.d32 =0xFFFFFFFF;
+ ifxusb_wreg(&hc_regs->hcint, hcint.d32);
+
+ hcint.d32=ifxusb_rreg(&hc_regs->hcintmsk);
+ hcint.b.nak =0;
+ hcint.b.ack =0;
+ /* == 20110901 AVM/WK Fix: We don't need NOT YET IRQ ==*/
+ hcint.b.nyet=0;
+ _ifxhcd->ifxhc[i].nak_countdown=_ifxhcd->ifxhc[i].nak_countdown_r;
+ if(_ifxhcd->ifxhc[i].nak_countdown_r)
+ hcint.b.nak =1;
+ ifxusb_wreg(&hc_regs->hcintmsk, hcint.d32);
+
+ /* AVM WK / BC 20100827
+ * FIX: Packet was ignored because of wrong Oddframe bit
+ */
+ if (_ifxhcd->ifxhc[i].ep_type == IFXUSB_EP_TYPE_INTR || _ifxhcd->ifxhc[i].ep_type == IFXUSB_EP_TYPE_ISOC)
+ {
+ hcchar_data_t hcchar;
+ hcchar.d32 = _ifxhcd->ifxhc[i].hcchar;
+ hfnum.d32 = ifxusb_rreg(&_ifxhcd->core_if.host_global_regs->hfnum);
+ /* 1 if _next_ frame is odd, 0 if it's even */
+ hcchar.b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
+ _ifxhcd->ifxhc[i].hcchar = hcchar.d32;
+ }
+
+ ifxusb_wreg(&hc_regs->hcchar, _ifxhcd->ifxhc[i].hcchar);
+
+ }
+ }
+ else
+ _ifxhcd->ifxhc[i].wait_for_sof=0;
+
+#ifdef __USE_TIMER_4_SOF__
+ if (_ifxhcd->ifxhc[i].wait_for_sof && (wait_for_sof > _ifxhcd->ifxhc[i].wait_for_sof)) {
+ wait_for_sof = _ifxhcd->ifxhc[i].wait_for_sof;
+ }
+#endif
+ }
+ }
+
+ // ISOC Active
+ #ifdef __EN_ISOC__
+ #error ISOC not supported: missing SOF code
+ epqh_entry = _ifxhcd->epqh_isoc_active.next;
+ while (epqh_entry != &_ifxhcd->epqh_isoc_active)
+ {
+ epqh = list_entry(epqh_entry, ifxhcd_epqh_t, epqh_list_entry);
+ epqh_entry = epqh_entry->next;
+ #ifdef __DYN_SOF_INTR__
+ with_count_down=1;
+ #endif
+ active_on+=update_interval_counter(epqh,fndiff);
+ }
+
+ // ISOC Ready
+ epqh_entry = _ifxhcd->epqh_isoc_ready.next;
+ while (epqh_entry != &_ifxhcd->epqh_isoc_ready)
+ {
+ epqh = list_entry(epqh_entry, ifxhcd_epqh_t, epqh_list_entry);
+ epqh_entry = epqh_entry->next;
+ #ifdef __DYN_SOF_INTR__
+ with_count_down=1;
+ #endif
+ ready_on+=update_interval_counter(epqh,fndiff);
+ }
+ #endif
+
+ // INTR Active
+ epqh_entry = _ifxhcd->epqh_intr_active.next;
+ while (epqh_entry != &_ifxhcd->epqh_intr_active)
+ {
+ epqh = list_entry(epqh_entry, ifxhcd_epqh_t, epqh_list_entry);
+ epqh_entry = epqh_entry->next;
+ #ifdef __DYN_SOF_INTR__
+ with_count_down=1;
+ #endif
+#ifdef __USE_TIMER_4_SOF__
+ if (update_interval_counter(epqh,fndiff)) {
+ active_on ++;
+ wait_for_sof = 1;
+ } else {
+ if (epqh->period_counter && (wait_for_sof > epqh->period_counter)) {
+ wait_for_sof = epqh->period_counter;
+ }
+ }
+#else
+ active_on+=update_interval_counter(epqh,fndiff);
+#endif
+ }
+
+ // INTR Ready
+ epqh_entry = _ifxhcd->epqh_intr_ready.next;
+ while (epqh_entry != &_ifxhcd->epqh_intr_ready)
+ {
+ epqh = list_entry(epqh_entry, ifxhcd_epqh_t, epqh_list_entry);
+ epqh_entry = epqh_entry->next;
+ #ifdef __DYN_SOF_INTR__
+ with_count_down=1;
+ #endif
+#ifdef __USE_TIMER_4_SOF__
+ if (update_interval_counter(epqh,fndiff)) {
+ ready_on ++;
+ wait_for_sof = 1;
+ } else {
+ if (epqh->period_counter && (wait_for_sof > epqh->period_counter)) {
+ wait_for_sof = epqh->period_counter;
+ }
+ }
+#else
+ ready_on+=update_interval_counter(epqh,fndiff);
+#endif
+ }
+
+ // Stdby
+ epqh_entry = _ifxhcd->epqh_stdby.next;
+ while (epqh_entry != &_ifxhcd->epqh_stdby)
+ {
+ epqh = list_entry(epqh_entry, ifxhcd_epqh_t, epqh_list_entry);
+ epqh_entry = epqh_entry->next;
+ if(epqh->period_counter > 0 ) {
+#ifdef __USE_TIMER_4_SOF__
+ if (epqh->period_counter > fndiff) {
+ epqh->period_counter -= fndiff;
+ } else {
+ epqh->period_counter = 0;
+ }
+#else
+ epqh->period_counter --;
+#endif
+ #ifdef __DYN_SOF_INTR__
+ with_count_down=1;
+ #endif
+ }
+ if(epqh->period_counter == 0) {
+ ifxhcd_epqh_idle_periodic(epqh);
+ }
+#ifdef __USE_TIMER_4_SOF__
+ else {
+ if (wait_for_sof > epqh->period_counter) {
+ wait_for_sof = epqh->period_counter;
+ }
+ }
+#endif
+ }
+ SPIN_UNLOCK_IRQRESTORE(&_ifxhcd->lock, flags);
+
+ if(ready_on)
+ select_eps(_ifxhcd);
+ else if(active_on)
+ process_channels(_ifxhcd);
+
+ /* Clear interrupt */
+ {
+ gint_data_t gintsts;
+ gintsts.d32=0;
+ gintsts.b.sofintr = 1;
+ ifxusb_wreg(&_ifxhcd->core_if.core_global_regs->gintsts, gintsts.d32);
+
+ #ifdef __DYN_SOF_INTR__
+ if(!with_count_down)
+ ifxusb_mreg(&_ifxhcd->core_if.core_global_regs->gintmsk, gintsts.d32,0);
+ #endif
+#ifdef __USE_TIMER_4_SOF__
+ wait_for_sof &= 0xFFFF; // reduce to 16 Bits.
+
+ if(wait_for_sof == 1) {
+ // enable SOF
+ gint_data_t gintsts;
+ gintsts.d32=0;
+ gintsts.b.sofintr = 1;
+ ifxusb_mreg(&_ifxhcd->core_if.core_global_regs->gintmsk, 0,gintsts.d32);
+ } else {
+ // disable SOF
+ ifxusb_mreg(&_ifxhcd->core_if.core_global_regs->gintmsk, gintsts.d32,0);
+ if (wait_for_sof > 1) {
+ // use timer, not SOF IRQ
+ hprt0_data_t hprt0;
+ ktime_t ktime;
+ hprt0.d32 = ifxusb_read_hprt0 (&_ifxhcd->core_if);
+ if (hprt0.b.prtspd == IFXUSB_HPRT0_PRTSPD_HIGH_SPEED) {
+ ktime = ktime_set(0, wait_for_sof * 125 * 1000); /*--- wakeup in n*125usec ---*/
+ } else {
+ ktime = ktime_set(0, wait_for_sof * (1000*1000)); /*--- wakeup in n*1000usec ---*/
+ }
+ hrtimer_start(&_ifxhcd->hr_timer, ktime, HRTIMER_MODE_REL);
+ }
+ }
+#endif
+ }
+ _ifxhcd->lastframe=hfnum.b.frnum;
+ return 1;
+}
+
+
+
+/* There are multiple conditions that can cause a port interrupt. This function
+ * determines which interrupt conditions have occurred and handles them
+ * appropriately. */
+static int32_t handle_port_intr (ifxhcd_hcd_t *_ifxhcd)
+{
+ int retval = 0;
+ hprt0_data_t hprt0;
+ hprt0_data_t hprt0_modify;
+
+ hprt0.d32 =
+ hprt0_modify.d32 = ifxusb_rreg(_ifxhcd->core_if.hprt0);
+
+ /* Clear appropriate bits in HPRT0 to clear the interrupt bit in
+ * GINTSTS */
+
+ hprt0_modify.b.prtena = 0;
+ hprt0_modify.b.prtconndet = 0;
+ hprt0_modify.b.prtenchng = 0;
+ hprt0_modify.b.prtovrcurrchng = 0;
+
+ /* Port Connect Detected
+ * Set flag and clear if detected */
+ if (hprt0.b.prtconndet) {
+ IFX_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x "
+ "Port Connect Detected--\n", hprt0.d32);
+ _ifxhcd->flags.b.port_connect_status_change = 1;
+ _ifxhcd->flags.b.port_connect_status = 1;
+ hprt0_modify.b.prtconndet = 1;
+
+ /* The Hub driver asserts a reset when it sees port connect
+ * status change flag */
+ retval |= 1;
+ }
+
+ /* Port Enable Changed
+ * Clear if detected - Set internal flag if disabled */
+ if (hprt0.b.prtenchng) {
+
+ IFX_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
+ "Port Enable Changed--\n", hprt0.d32);
+ hprt0_modify.b.prtenchng = 1;
+ if (hprt0.b.prtena == 1)
+ /* Port has been enabled set the reset change flag */
+ _ifxhcd->flags.b.port_reset_change = 1;
+ else
+ _ifxhcd->flags.b.port_enable_change = 1;
+ retval |= 1;
+ }
+
+ /* Overcurrent Change Interrupt */
+
+ if (hprt0.b.prtovrcurrchng) {
+ IFX_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
+ "Port Overcurrent Changed--\n", hprt0.d32);
+ _ifxhcd->flags.b.port_over_current_change = 1;
+ hprt0_modify.b.prtovrcurrchng = 1;
+ retval |= 1;
+ }
+
+ /* Clear Port Interrupts */
+ ifxusb_wreg(_ifxhcd->core_if.hprt0, hprt0_modify.d32);
+ return retval;
+}
+
+/*
+ * This interrupt indicates that SUSPEND state has been detected on
+ * the USB.
+ * No Functioning in Host Mode
+ */
+static int32_t handle_usb_suspend_intr(ifxhcd_hcd_t *_ifxhcd)
+{
+ gint_data_t gintsts;
+ IFX_DEBUGP("USB SUSPEND RECEIVED!\n");
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.usbsuspend = 1;
+ ifxusb_wreg(&_ifxhcd->core_if.core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/*
+ * This interrupt indicates that the IFXUSB controller has detected a
+ * resume or remote wakeup sequence. If the IFXUSB controller is in
+ * low power mode, the handler must brings the controller out of low
+ * power mode. The controller automatically begins resume
+ * signaling. The handler schedules a time to stop resume signaling.
+ */
+static int32_t handle_wakeup_detected_intr(ifxhcd_hcd_t *_ifxhcd)
+{
+ gint_data_t gintsts;
+ hprt0_data_t hprt0 = {.d32=0};
+ pcgcctl_data_t pcgcctl = {.d32=0};
+ ifxusb_core_if_t *core_if = &_ifxhcd->core_if;
+
+ IFX_DEBUGPL(DBG_ANY, "++Resume and Remote Wakeup Detected Interrupt++\n");
+
+ /*
+ * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms
+ * so that OPT tests pass with all PHYs).
+ */
+ /* Restart the Phy Clock */
+ pcgcctl.b.stoppclk = 1;
+ ifxusb_mreg(core_if->pcgcctl, pcgcctl.d32, 0);
+ UDELAY(10);
+
+ /* Now wait for 70 ms. */
+ hprt0.d32 = ifxusb_read_hprt0( core_if );
+ IFX_DEBUGPL(DBG_ANY,"Resume: HPRT0=%0x\n", hprt0.d32);
+ MDELAY(70);
+ hprt0.b.prtres = 0; /* Resume */
+ ifxusb_wreg(core_if->hprt0, hprt0.d32);
+ IFX_DEBUGPL(DBG_ANY,"Clear Resume: HPRT0=%0x\n", ifxusb_rreg(core_if->hprt0));
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.wkupintr = 1;
+ ifxusb_wreg(&core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/*
+ * This interrupt indicates that a device is initiating the Session
+ * Request Protocol to request the host to turn on bus power so a new
+ * session can begin. The handler responds by turning on bus power. If
+ * the DWC_otg controller is in low power mode, the handler brings the
+ * controller out of low power mode before turning on bus power.
+ */
+static int32_t handle_session_req_intr(ifxhcd_hcd_t *_ifxhcd)
+{
+ /* Clear interrupt */
+ gint_data_t gintsts = { .d32 = 0 };
+ gintsts.b.sessreqintr = 1;
+ ifxusb_wreg(&_ifxhcd->core_if.core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/*
+ * This interrupt indicates that a device has been disconnected from
+ * the root port.
+ */
+static int32_t handle_disconnect_intr(ifxhcd_hcd_t *_ifxhcd)
+{
+ gint_data_t gintsts;
+
+ ifxhcd_disconnect(_ifxhcd);
+
+ gintsts.d32 = 0;
+ gintsts.b.disconnect = 1;
+ ifxusb_wreg(&_ifxhcd->core_if.core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/*
+ * This function handles the Connector ID Status Change Interrupt. It
+ * reads the OTG Interrupt Register (GOTCTL) to determine whether this
+ * is a Device to Host Mode transition or a Host Mode to Device
+ * Transition.
+ * This only occurs when the cable is connected/removed from the PHY
+ * connector.
+ */
+static int32_t handle_conn_id_status_change_intr(ifxhcd_hcd_t *_ifxhcd)
+{
+ gint_data_t gintsts;
+
+ IFX_WARN("ID Status Change Interrupt: currently in %s mode\n",
+ ifxusb_mode(&_ifxhcd->core_if) ? "Host" : "Device");
+
+ gintsts.d32 = 0;
+ gintsts.b.conidstschng = 1;
+ ifxusb_wreg(&_ifxhcd->core_if.core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+static int32_t handle_otg_intr(ifxhcd_hcd_t *_ifxhcd)
+{
+ ifxusb_core_global_regs_t *global_regs = _ifxhcd->core_if.core_global_regs;
+ gotgint_data_t gotgint;
+ gotgint.d32 = ifxusb_rreg( &global_regs->gotgint);
+ /* Clear GOTGINT */
+ ifxusb_wreg (&global_regs->gotgint, gotgint.d32);
+ return 1;
+}
+
+/** This function will log a debug message */
+static int32_t handle_mode_mismatch_intr(ifxhcd_hcd_t *_ifxhcd)
+{
+ gint_data_t gintsts;
+
+ IFX_WARN("Mode Mismatch Interrupt: currently in %s mode\n",
+ ifxusb_mode(&_ifxhcd->core_if) ? "Host" : "Device");
+ gintsts.d32 = 0;
+ gintsts.b.modemismatch = 1;
+ ifxusb_wreg(&_ifxhcd->core_if.core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/** This function handles interrupts for the HCD. */
+int32_t ifxhcd_handle_intr (ifxhcd_hcd_t *_ifxhcd)
+{
+ int retval = 0;
+
+ ifxusb_core_if_t *core_if = &_ifxhcd->core_if;
+ /* AVM/BC 20101111 Unnecesary variable removed*/
+ //gint_data_t gintsts,gintsts2;
+ gint_data_t gintsts;
+
+ /* Check if HOST Mode */
+ if (ifxusb_is_device_mode(core_if))
+ {
+ IFX_ERROR("%s() CRITICAL! IN DEVICE MODE\n", __func__);
+ return 0;
+ }
+
+ gintsts.d32 = ifxusb_read_core_intr(core_if);
+
+ if (!gintsts.d32)
+ return 0;
+
+ //Common INT
+ if (gintsts.b.modemismatch)
+ {
+ retval |= handle_mode_mismatch_intr(_ifxhcd);
+ gintsts.b.modemismatch=0;
+ }
+ if (gintsts.b.otgintr)
+ {
+ retval |= handle_otg_intr(_ifxhcd);
+ gintsts.b.otgintr=0;
+ }
+ if (gintsts.b.conidstschng)
+ {
+ retval |= handle_conn_id_status_change_intr(_ifxhcd);
+ gintsts.b.conidstschng=0;
+ }
+ if (gintsts.b.disconnect)
+ {
+ retval |= handle_disconnect_intr(_ifxhcd);
+ gintsts.b.disconnect=0;
+ }
+ if (gintsts.b.sessreqintr)
+ {
+ retval |= handle_session_req_intr(_ifxhcd);
+ gintsts.b.sessreqintr=0;
+ }
+ if (gintsts.b.wkupintr)
+ {
+ retval |= handle_wakeup_detected_intr(_ifxhcd);
+ gintsts.b.wkupintr=0;
+ }
+ if (gintsts.b.usbsuspend)
+ {
+ retval |= handle_usb_suspend_intr(_ifxhcd);
+ gintsts.b.usbsuspend=0;
+ }
+
+ //Host Int
+ if (gintsts.b.sofintr)
+ {
+ retval |= handle_sof_intr (_ifxhcd);
+ gintsts.b.sofintr=0;
+ }
+ if (gintsts.b.portintr)
+ {
+ retval |= handle_port_intr (_ifxhcd);
+ gintsts.b.portintr=0;
+ }
+ if (gintsts.b.hcintr)
+ {
+ int i;
+ haint_data_t haint;
+ haint.d32 = ifxusb_read_host_all_channels_intr(core_if);
+ for (i=0; i< core_if->params.host_channels; i++)
+ if (haint.b2.chint & (1 << i))
+ retval |= handle_hc_n_intr (_ifxhcd, i);
+ gintsts.b.hcintr=0;
+ }
+ return retval;
+}
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd_queue.c b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd_queue.c
new file mode 100644
index 0000000000..8f9dd25745
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxhcd_queue.c
@@ -0,0 +1,418 @@
+/*****************************************************************************
+ ** FILE NAME : ifxhcd_queue.c
+ ** PROJECT : IFX USB sub-system V3
+ ** MODULES : IFX USB sub-system Host and Device driver
+ ** SRC VERSION : 1.0
+ ** DATE : 1/Jan/2009
+ ** AUTHOR : Chen, Howard
+ ** DESCRIPTION : This file contains the functions to manage Queue Heads and Queue
+ ** Transfer Descriptors.
+ *****************************************************************************/
+
+/*!
+ \file ifxhcd_queue.c
+ \ingroup IFXUSB_DRIVER_V3
+ \brief This file contains the functions to manage Queue Heads and Queue
+ Transfer Descriptors.
+*/
+#include <linux/version.h>
+#include "ifxusb_version.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+
+#include "ifxusb_plat.h"
+#include "ifxusb_regs.h"
+#include "ifxusb_cif.h"
+#include "ifxhcd.h"
+
+#ifdef __EPQD_DESTROY_TIMEOUT__
+ #define epqh_self_destroy_timeout 5
+ static void eqph_destroy_func(unsigned long _ptr)
+ {
+ ifxhcd_epqh_t *epqh=(ifxhcd_epqh_t *)_ptr;
+ if(epqh)
+ {
+ ifxhcd_epqh_free (epqh);
+ }
+ }
+#endif
+
+#define SCHEDULE_SLOP 10
+
+/*!
+ \brief This function allocates and initializes a EPQH.
+
+ \param _ifxhcd The HCD state structure for the USB Host controller.
+ \param[in] _urb Holds the information about the device/endpoint that we need
+ to initialize the EPQH.
+
+ \return Returns pointer to the newly allocated EPQH, or NULL on error.
+ */
+ifxhcd_epqh_t *ifxhcd_epqh_create (ifxhcd_hcd_t *_ifxhcd, struct urb *_urb)
+{
+ ifxhcd_epqh_t *epqh;
+
+ hprt0_data_t hprt0;
+ struct usb_host_endpoint *sysep = ifxhcd_urb_to_endpoint(_urb);
+
+ /* Allocate memory */
+// epqh=(ifxhcd_epqh_t *) kmalloc (sizeof(ifxhcd_epqh_t), GFP_KERNEL);
+ epqh=(ifxhcd_epqh_t *) kmalloc (sizeof(ifxhcd_epqh_t), GFP_ATOMIC);
+
+ if(epqh == NULL)
+ return NULL;
+
+ memset (epqh, 0, sizeof (ifxhcd_epqh_t));
+
+ epqh->sysep=sysep;
+
+ /* Initialize EPQH */
+ switch (usb_pipetype(_urb->pipe))
+ {
+ case PIPE_CONTROL : epqh->ep_type = IFXUSB_EP_TYPE_CTRL; break;
+ case PIPE_BULK : epqh->ep_type = IFXUSB_EP_TYPE_BULK; break;
+ case PIPE_ISOCHRONOUS: epqh->ep_type = IFXUSB_EP_TYPE_ISOC; break;
+ case PIPE_INTERRUPT : epqh->ep_type = IFXUSB_EP_TYPE_INTR; break;
+ }
+
+ //epqh->data_toggle = IFXUSB_HC_PID_DATA0;
+
+ epqh->mps = usb_maxpacket(_urb->dev, _urb->pipe, !(usb_pipein(_urb->pipe)));
+
+ hprt0.d32 = ifxusb_read_hprt0 (&_ifxhcd->core_if);
+
+ INIT_LIST_HEAD(&epqh->urbd_list);
+ INIT_LIST_HEAD(&epqh->epqh_list_entry);
+ epqh->hc = NULL;
+
+ epqh->dump_buf = ifxusb_alloc_buf(epqh->mps, 0);
+
+ /* FS/LS Enpoint on HS Hub
+ * NOT virtual root hub */
+ epqh->need_split = 0;
+ epqh->pkt_count_limit=0;
+ if(epqh->ep_type == IFXUSB_EP_TYPE_BULK && !(usb_pipein(_urb->pipe)) )
+ epqh->pkt_count_limit=4;
+ if (hprt0.b.prtspd == IFXUSB_HPRT0_PRTSPD_HIGH_SPEED &&
+ ((_urb->dev->speed == USB_SPEED_LOW) ||
+ (_urb->dev->speed == USB_SPEED_FULL)) &&
+ (_urb->dev->tt) && (_urb->dev->tt->hub->devnum != 1))
+ {
+ IFX_DEBUGPL(DBG_HCD, "QH init: EP %d: TT found at hub addr %d, for port %d\n",
+ usb_pipeendpoint(_urb->pipe), _urb->dev->tt->hub->devnum,
+ _urb->dev->ttport);
+ epqh->need_split = 1;
+ epqh->pkt_count_limit=1;
+ }
+
+ if (epqh->ep_type == IFXUSB_EP_TYPE_INTR ||
+ epqh->ep_type == IFXUSB_EP_TYPE_ISOC)
+ {
+ /* Compute scheduling parameters once and save them. */
+ epqh->interval = _urb->interval;
+ if(epqh->need_split)
+ epqh->interval *= 8;
+ }
+
+ epqh->period_counter=0;
+ epqh->is_active=0;
+
+ #ifdef __EPQD_DESTROY_TIMEOUT__
+ /* Start a timer for this transfer. */
+ init_timer(&epqh->destroy_timer);
+ epqh->destroy_timer.function = eqph_destroy_func;
+ epqh->destroy_timer.data = (unsigned long)(epqh);
+ #endif
+
+ #ifdef __DEBUG__
+ IFX_DEBUGPL(DBG_HCD , "IFXUSB HCD EPQH Initialized\n");
+ IFX_DEBUGPL(DBG_HCDV, "IFXUSB HCD EPQH - epqh = %p\n", epqh);
+ IFX_DEBUGPL(DBG_HCDV, "IFXUSB HCD EPQH - Device Address = %d EP %d, %s\n",
+ _urb->dev->devnum,
+ usb_pipeendpoint(_urb->pipe),
+ usb_pipein(_urb->pipe) == USB_DIR_IN ? "IN" : "OUT");
+ IFX_DEBUGPL(DBG_HCDV, "IFXUSB HCD EPQH - Speed = %s\n",
+ ({ char *speed; switch (_urb->dev->speed) {
+ case USB_SPEED_LOW: speed = "low" ; break;
+ case USB_SPEED_FULL: speed = "full"; break;
+ case USB_SPEED_HIGH: speed = "high"; break;
+ default: speed = "?"; break;
+ }; speed;}));
+ IFX_DEBUGPL(DBG_HCDV, "IFXUSB HCD EPQH - Type = %s\n",
+ ({
+ char *type; switch (epqh->ep_type)
+ {
+ case IFXUSB_EP_TYPE_ISOC: type = "isochronous"; break;
+ case IFXUSB_EP_TYPE_INTR: type = "interrupt" ; break;
+ case IFXUSB_EP_TYPE_CTRL: type = "control" ; break;
+ case IFXUSB_EP_TYPE_BULK: type = "bulk" ; break;
+ default: type = "?"; break;
+ };
+ type;
+ }));
+ if (epqh->ep_type == IFXUSB_EP_TYPE_INTR)
+ IFX_DEBUGPL(DBG_HCDV, "IFXUSB HCD EPQH - interval = %d\n", epqh->interval);
+ #endif
+
+ return epqh;
+}
+
+
+
+
+
+
+/*!
+ \brief Free the EPQH. EPQH should already be removed from a list.
+ URBD list should already be empty if called from URB Dequeue.
+
+ \param[in] _epqh The EPQH to free.
+ */
+void ifxhcd_epqh_free (ifxhcd_epqh_t *_epqh)
+{
+ unsigned long flags;
+
+ if(_epqh->sysep) _epqh->sysep->hcpriv=NULL;
+ _epqh->sysep=NULL;
+
+ if(!_epqh)
+ return;
+
+ /* Free each QTD in the QTD list */
+ local_irq_save (flags);
+ if (!list_empty(&_epqh->urbd_list))
+ IFX_WARN("%s() invalid epqh state\n",__func__);
+
+ #if defined(__UNALIGNED_BUFFER_ADJ__)
+ if(_epqh->aligned_buf)
+ ifxusb_free_buf(_epqh->aligned_buf);
+ if(_epqh->aligned_setup)
+ ifxusb_free_buf(_epqh->aligned_setup);
+ #endif
+
+ if (!list_empty(&_epqh->epqh_list_entry))
+ list_del_init(&_epqh->epqh_list_entry);
+
+ #ifdef __EPQD_DESTROY_TIMEOUT__
+ del_timer(&_epqh->destroy_timer);
+ #endif
+ if(_epqh->dump_buf)
+ ifxusb_free_buf(_epqh->dump_buf);
+ _epqh->dump_buf=0;
+
+
+ kfree (_epqh);
+ local_irq_restore (flags);
+}
+
+/*!
+ \brief This function adds a EPQH to
+
+ \return 0 if successful, negative error code otherwise.
+ */
+void ifxhcd_epqh_ready(ifxhcd_hcd_t *_ifxhcd, ifxhcd_epqh_t *_epqh)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ if (list_empty(&_epqh->epqh_list_entry))
+ {
+ #ifdef __EN_ISOC__
+ if (_epqh->ep_type == IFXUSB_EP_TYPE_ISOC)
+ list_add_tail(&_epqh->epqh_list_entry, &_ifxhcd->epqh_isoc_ready);
+ else
+ #endif
+ if(_epqh->ep_type == IFXUSB_EP_TYPE_INTR)
+ list_add_tail(&_epqh->epqh_list_entry, &_ifxhcd->epqh_intr_ready);
+ else
+ list_add_tail(&_epqh->epqh_list_entry, &_ifxhcd->epqh_np_ready);
+ _epqh->is_active=0;
+ }
+ else if(!_epqh->is_active)
+ {
+ #ifdef __EN_ISOC__
+ if (_epqh->ep_type == IFXUSB_EP_TYPE_ISOC)
+ list_move_tail(&_epqh->epqh_list_entry, &_ifxhcd->epqh_isoc_ready);
+ else
+ #endif
+ if(_epqh->ep_type == IFXUSB_EP_TYPE_INTR)
+ list_move_tail(&_epqh->epqh_list_entry, &_ifxhcd->epqh_intr_ready);
+ else
+ list_move_tail(&_epqh->epqh_list_entry, &_ifxhcd->epqh_np_ready);
+ }
+ #ifdef __EPQD_DESTROY_TIMEOUT__
+ del_timer(&_epqh->destroy_timer);
+ #endif
+ local_irq_restore(flags);
+}
+
+void ifxhcd_epqh_active(ifxhcd_hcd_t *_ifxhcd, ifxhcd_epqh_t *_epqh)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ if (list_empty(&_epqh->epqh_list_entry))
+ IFX_WARN("%s() invalid epqh state\n",__func__);
+ #ifdef __EN_ISOC__
+ if (_epqh->ep_type == IFXUSB_EP_TYPE_ISOC)
+ list_move_tail(&_epqh->epqh_list_entry, &_ifxhcd->epqh_isoc_active);
+ else
+ #endif
+ if(_epqh->ep_type == IFXUSB_EP_TYPE_INTR)
+ list_move_tail(&_epqh->epqh_list_entry, &_ifxhcd->epqh_intr_active);
+ else
+ list_move_tail(&_epqh->epqh_list_entry, &_ifxhcd->epqh_np_active);
+ _epqh->is_active=1;
+ #ifdef __EPQD_DESTROY_TIMEOUT__
+ del_timer(&_epqh->destroy_timer);
+ #endif
+ local_irq_restore(flags);
+}
+
+void ifxhcd_epqh_idle(ifxhcd_hcd_t *_ifxhcd, ifxhcd_epqh_t *_epqh)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+
+ if (list_empty(&_epqh->urbd_list))
+ {
+ if(_epqh->ep_type == IFXUSB_EP_TYPE_ISOC || _epqh->ep_type == IFXUSB_EP_TYPE_INTR)
+ {
+ list_move_tail(&_epqh->epqh_list_entry, &_ifxhcd->epqh_stdby);
+ }
+ else
+ {
+ list_del_init(&_epqh->epqh_list_entry);
+ #ifdef __EPQD_DESTROY_TIMEOUT__
+ del_timer(&_epqh->destroy_timer);
+ _epqh->destroy_timer.expires = jiffies + (HZ*epqh_self_destroy_timeout);
+ add_timer(&_epqh->destroy_timer );
+ #endif
+ }
+ }
+ else
+ {
+ #ifdef __EN_ISOC__
+ if (_epqh->ep_type == IFXUSB_EP_TYPE_ISOC)
+ list_move_tail(&_epqh->epqh_list_entry, &_ifxhcd->epqh_isoc_ready);
+ else
+ #endif
+ if(_epqh->ep_type == IFXUSB_EP_TYPE_INTR)
+ list_move_tail(&_epqh->epqh_list_entry, &_ifxhcd->epqh_intr_ready);
+ else
+ list_move_tail(&_epqh->epqh_list_entry, &_ifxhcd->epqh_np_ready);
+ }
+ _epqh->is_active=0;
+ local_irq_restore(flags);
+}
+
+
+void ifxhcd_epqh_idle_periodic(ifxhcd_epqh_t *_epqh)
+{
+ unsigned long flags;
+ if(_epqh->ep_type != IFXUSB_EP_TYPE_ISOC && _epqh->ep_type != IFXUSB_EP_TYPE_INTR)
+ return;
+
+ local_irq_save(flags);
+
+ if (list_empty(&_epqh->epqh_list_entry))
+ IFX_WARN("%s() invalid epqh state\n",__func__);
+ if (!list_empty(&_epqh->urbd_list))
+ IFX_WARN("%s() invalid epqh state(not empty)\n",__func__);
+
+ _epqh->is_active=0;
+ list_del_init(&_epqh->epqh_list_entry);
+ #ifdef __EPQD_DESTROY_TIMEOUT__
+ del_timer(&_epqh->destroy_timer);
+ _epqh->destroy_timer.expires = jiffies + (HZ*epqh_self_destroy_timeout);
+ add_timer(&_epqh->destroy_timer );
+ #endif
+
+ local_irq_restore(flags);
+}
+
+
+int ifxhcd_urbd_create (ifxhcd_hcd_t *_ifxhcd,struct urb *_urb)
+{
+ ifxhcd_urbd_t *urbd;
+ struct usb_host_endpoint *sysep;
+ ifxhcd_epqh_t *epqh;
+ unsigned long flags;
+ /* == AVM/WK 20100714 retval correctly initialized ==*/
+ int retval = -ENOMEM;
+
+ /*== AVM/BC 20100630 - Spinlock ==*/
+ //local_irq_save(flags);
+ SPIN_LOCK_IRQSAVE(&_ifxhcd->lock, flags);
+
+// urbd = (ifxhcd_urbd_t *) kmalloc (sizeof(ifxhcd_urbd_t), GFP_KERNEL);
+ urbd = (ifxhcd_urbd_t *) kmalloc (sizeof(ifxhcd_urbd_t), GFP_ATOMIC);
+ if (urbd != NULL) /* Initializes a QTD structure.*/
+ {
+ retval = 0;
+ memset (urbd, 0, sizeof (ifxhcd_urbd_t));
+
+ sysep = ifxhcd_urb_to_endpoint(_urb);
+ epqh = (ifxhcd_epqh_t *)sysep->hcpriv;
+ if (epqh == NULL)
+ {
+ epqh = ifxhcd_epqh_create (_ifxhcd, _urb);
+ if (epqh == NULL)
+ {
+ retval = -ENOSPC;
+ kfree(urbd);
+ //local_irq_restore (flags);
+ SPIN_UNLOCK_IRQRESTORE(&_ifxhcd->lock, flags);
+ return retval;
+ }
+ sysep->hcpriv = epqh;
+ }
+
+ INIT_LIST_HEAD(&urbd->urbd_list_entry);
+
+ /*== AVM/BC 20100630 - 2.6.28 needs HCD link/unlink URBs ==*/
+ retval = usb_hcd_link_urb_to_ep(ifxhcd_to_syshcd(_ifxhcd), _urb);
+
+ if (unlikely(retval)){
+ kfree(urbd);
+ kfree(epqh);
+ SPIN_UNLOCK_IRQRESTORE(&_ifxhcd->lock, flags);
+ return retval;
+ }
+
+ list_add_tail(&urbd->urbd_list_entry, &epqh->urbd_list);
+ urbd->urb = _urb;
+ _urb->hcpriv = urbd;
+
+ urbd->epqh=epqh;
+ urbd->is_in=usb_pipein(_urb->pipe) ? 1 : 0;;
+
+ urbd->xfer_len=_urb->transfer_buffer_length;
+#define URB_NO_SETUP_DMA_MAP 0
+
+ if(urbd->xfer_len>0)
+ {
+ if(_urb->transfer_flags && URB_NO_TRANSFER_DMA_MAP)
+ urbd->xfer_buff = (uint8_t *) (KSEG1ADDR((uint32_t *)_urb->transfer_dma));
+ else
+ urbd->xfer_buff = (uint8_t *) _urb->transfer_buffer;
+ }
+ if(epqh->ep_type == IFXUSB_EP_TYPE_CTRL)
+ {
+ if(_urb->transfer_flags && URB_NO_SETUP_DMA_MAP)
+ urbd->setup_buff = (uint8_t *) (KSEG1ADDR((uint32_t *)_urb->setup_dma));
+ else
+ urbd->setup_buff = (uint8_t *) _urb->setup_packet;
+ }
+ }
+ //local_irq_restore (flags);
+ SPIN_UNLOCK_IRQRESTORE(&_ifxhcd->lock, flags);
+ return retval;
+}
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif.c b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif.c
new file mode 100644
index 0000000000..10b12927cf
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif.c
@@ -0,0 +1,1458 @@
+/*****************************************************************************
+ ** FILE NAME : ifxusb_cif.c
+ ** PROJECT : IFX USB sub-system V3
+ ** MODULES : IFX USB sub-system Host and Device driver
+ ** SRC VERSION : 1.0
+ ** DATE : 1/Jan/2009
+ ** AUTHOR : Chen, Howard
+ ** DESCRIPTION : The Core Interface provides basic services for accessing and
+ ** managing the IFX USB hardware. These services are used by both the
+ ** Host Controller Driver and the Peripheral Controller Driver.
+ *****************************************************************************/
+
+/*!
+ \file ifxusb_cif.c
+ \ingroup IFXUSB_DRIVER_V3
+ \brief This file contains the interface to the IFX USB Core.
+*/
+
+#include <linux/clk.h>
+#include <linux/version.h>
+#include "ifxusb_version.h"
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+
+#if defined(__UEIP__)
+// #include <asm/ifx/ifx_pmu.h>
+// #include <ifx_pmu.h>
+#endif
+
+
+#include "ifxusb_plat.h"
+#include "ifxusb_regs.h"
+#include "ifxusb_cif.h"
+
+
+#ifdef __IS_DEVICE__
+ #include "ifxpcd.h"
+#endif
+
+#ifdef __IS_HOST__
+ #include "ifxhcd.h"
+#endif
+
+#include <linux/mm.h>
+
+#include <linux/gfp.h>
+
+#if defined(__UEIP__)
+// #include <asm/ifx/ifx_board.h>
+ //#include <ifx_board.h>
+#endif
+
+//#include <asm/ifx/ifx_gpio.h>
+//#include <ifx_gpio.h>
+#if defined(__UEIP__)
+// #include <asm/ifx/ifx_led.h>
+ //#include <ifx_led.h>
+#endif
+
+
+
+#if defined(__UEIP__)
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__) || defined(__IS_AMAZON_SE__)
+ #ifndef USB_CTRL_PMU_SETUP
+ #define USB_CTRL_PMU_SETUP(__x) USB0_CTRL_PMU_SETUP(__x)
+ #endif
+ #ifndef USB_PHY_PMU_SETUP
+ #define USB_PHY_PMU_SETUP(__x) USB0_PHY_PMU_SETUP(__x)
+ #endif
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__) || defined(__IS_AMAZON_SE__)
+#endif // defined(__UEIP__)
+
+/*!
+ \brief This function is called to allocate buffer of specified size.
+ The allocated buffer is mapped into DMA accessable address.
+ \param size Size in BYTE to be allocated
+ \param clear 0: don't do clear after buffer allocated, other: do clear to zero
+ \return 0/NULL: Fail; uncached pointer of allocated buffer
+ */
+void *ifxusb_alloc_buf(size_t size, int clear)
+{
+ uint32_t *cached,*uncached;
+ uint32_t totalsize,page;
+
+ if(!size)
+ return 0;
+
+ size=(size+3)&0xFFFFFFFC;
+ totalsize=size + 12;
+ page=get_order(totalsize);
+
+ cached = (void *) __get_free_pages(( GFP_ATOMIC | GFP_DMA), page);
+
+ if(!cached)
+ {
+ IFX_PRINT("%s Allocation Failed size:%d\n",__func__,size);
+ return NULL;
+ }
+
+ uncached = (uint32_t *)(KSEG1ADDR(cached));
+ if(clear)
+ memset(uncached, 0, totalsize);
+
+ *(uncached+0)=totalsize;
+ *(uncached+1)=page;
+ *(uncached+2)=(uint32_t)cached;
+ return (void *)(uncached+3);
+}
+
+
+/*!
+ \brief This function is called to free allocated buffer.
+ \param vaddr the uncached pointer of the buffer
+ */
+void ifxusb_free_buf(void *vaddr)
+{
+ uint32_t totalsize,page;
+ uint32_t *cached,*uncached;
+
+ if(vaddr != NULL)
+ {
+ uncached=vaddr;
+ uncached-=3;
+ totalsize=*(uncached+0);
+ page=*(uncached+1);
+ cached=(uint32_t *)(*(uncached+2));
+ if(totalsize && page==get_order(totalsize) && cached==(uint32_t *)(KSEG0ADDR(uncached)))
+ {
+ free_pages((unsigned long)cached, page);
+ return;
+ }
+ // the memory is not allocated by ifxusb_alloc_buf. Allowed but must be careful.
+ return;
+ }
+}
+
+
+
+/*!
+ \brief This function is called to initialize the IFXUSB CSR data
+ structures. The register addresses in the device and host
+ structures are initialized from the base address supplied by the
+ caller. The calling function must make the OS calls to get the
+ base address of the IFXUSB controller registers.
+
+ \param _core_if Pointer of core_if structure
+ \param _irq irq number
+ \param _reg_base_addr Base address of IFXUSB core registers
+ \param _fifo_base_addr Fifo base address
+ \param _fifo_dbg_addr Fifo debug address
+ \return 0: success;
+ */
+int ifxusb_core_if_init(ifxusb_core_if_t *_core_if,
+ int _irq,
+ uint32_t _reg_base_addr,
+ uint32_t _fifo_base_addr,
+ uint32_t _fifo_dbg_addr)
+{
+ int retval = 0;
+ uint32_t *reg_base =NULL;
+ uint32_t *fifo_base =NULL;
+ uint32_t *fifo_dbg =NULL;
+
+ int i;
+
+ IFX_DEBUGPL(DBG_CILV, "%s(%p,%d,0x%08X,0x%08X,0x%08X)\n", __func__,
+ _core_if,
+ _irq,
+ _reg_base_addr,
+ _fifo_base_addr,
+ _fifo_dbg_addr);
+
+ if( _core_if == NULL)
+ {
+ IFX_ERROR("%s() invalid _core_if\n", __func__);
+ retval = -ENOMEM;
+ goto fail;
+ }
+
+ //memset(_core_if, 0, sizeof(ifxusb_core_if_t));
+
+ _core_if->irq=_irq;
+
+ reg_base =ioremap_nocache(_reg_base_addr , IFXUSB_IOMEM_SIZE );
+ fifo_base =ioremap_nocache(_fifo_base_addr, IFXUSB_FIFOMEM_SIZE);
+ fifo_dbg =ioremap_nocache(_fifo_dbg_addr , IFXUSB_FIFODBG_SIZE);
+ if( reg_base == NULL || fifo_base == NULL || fifo_dbg == NULL)
+ {
+ IFX_ERROR("%s() usb ioremap() failed\n", __func__);
+ retval = -ENOMEM;
+ goto fail;
+ }
+
+ _core_if->core_global_regs = (ifxusb_core_global_regs_t *)reg_base;
+
+ /*
+ * Attempt to ensure this device is really a IFXUSB Controller.
+ * Read and verify the SNPSID register contents. The value should be
+ * 0x45F42XXX
+ */
+ {
+ int32_t snpsid;
+ snpsid = ifxusb_rreg(&_core_if->core_global_regs->gsnpsid);
+ if ((snpsid & 0xFFFFF000) != 0x4F542000)
+ {
+ IFX_ERROR("%s() snpsid error(0x%08x) failed\n", __func__,snpsid);
+ retval = -EINVAL;
+ goto fail;
+ }
+ _core_if->snpsid=snpsid;
+ }
+
+ #ifdef __IS_HOST__
+ _core_if->host_global_regs = (ifxusb_host_global_regs_t *)
+ ((uint32_t)reg_base + IFXUSB_HOST_GLOBAL_REG_OFFSET);
+ _core_if->hprt0 = (uint32_t*)((uint32_t)reg_base + IFXUSB_HOST_PORT_REGS_OFFSET);
+
+ for (i=0; i<MAX_EPS_CHANNELS; i++)
+ {
+ _core_if->hc_regs[i] = (ifxusb_hc_regs_t *)
+ ((uint32_t)reg_base + IFXUSB_HOST_CHAN_REGS_OFFSET +
+ (i * IFXUSB_CHAN_REGS_OFFSET));
+ IFX_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n",
+ i, &_core_if->hc_regs[i]->hcchar);
+ }
+ #endif //__IS_HOST__
+
+ #ifdef __IS_DEVICE__
+ _core_if->dev_global_regs =
+ (ifxusb_device_global_regs_t *)((uint32_t)reg_base + IFXUSB_DEV_GLOBAL_REG_OFFSET);
+
+ for (i=0; i<MAX_EPS_CHANNELS; i++)
+ {
+ _core_if->in_ep_regs[i] = (ifxusb_dev_in_ep_regs_t *)
+ ((uint32_t)reg_base + IFXUSB_DEV_IN_EP_REG_OFFSET +
+ (i * IFXUSB_EP_REG_OFFSET));
+ _core_if->out_ep_regs[i] = (ifxusb_dev_out_ep_regs_t *)
+ ((uint32_t)reg_base + IFXUSB_DEV_OUT_EP_REG_OFFSET +
+ (i * IFXUSB_EP_REG_OFFSET));
+ IFX_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p/%p %p/0x%08X/0x%08X\n",
+ i, &_core_if->in_ep_regs[i]->diepctl, _core_if->in_ep_regs[i],
+ reg_base,IFXUSB_DEV_IN_EP_REG_OFFSET,(i * IFXUSB_EP_REG_OFFSET)
+ );
+ IFX_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p/%p %p/0x%08X/0x%08X\n",
+ i, &_core_if->out_ep_regs[i]->doepctl, _core_if->out_ep_regs[i],
+ reg_base,IFXUSB_DEV_OUT_EP_REG_OFFSET,(i * IFXUSB_EP_REG_OFFSET)
+ );
+ }
+ #endif //__IS_DEVICE__
+
+ /* Setting the FIFO and other Address. */
+ for (i=0; i<MAX_EPS_CHANNELS; i++)
+ {
+ _core_if->data_fifo[i] = fifo_base + (i * IFXUSB_DATA_FIFO_SIZE);
+ IFX_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n",
+ i, (unsigned)_core_if->data_fifo[i]);
+ }
+
+ _core_if->data_fifo_dbg = fifo_dbg;
+ _core_if->pcgcctl = (uint32_t*)(((uint32_t)reg_base) + IFXUSB_PCGCCTL_OFFSET);
+
+ /*
+ * Store the contents of the hardware configuration registers here for
+ * easy access later.
+ */
+ _core_if->hwcfg1.d32 = ifxusb_rreg(&_core_if->core_global_regs->ghwcfg1);
+ _core_if->hwcfg2.d32 = ifxusb_rreg(&_core_if->core_global_regs->ghwcfg2);
+ _core_if->hwcfg3.d32 = ifxusb_rreg(&_core_if->core_global_regs->ghwcfg3);
+ _core_if->hwcfg4.d32 = ifxusb_rreg(&_core_if->core_global_regs->ghwcfg4);
+
+ IFX_DEBUGPL(DBG_CILV,"hwcfg1=%08x\n",_core_if->hwcfg1.d32);
+ IFX_DEBUGPL(DBG_CILV,"hwcfg2=%08x\n",_core_if->hwcfg2.d32);
+ IFX_DEBUGPL(DBG_CILV,"hwcfg3=%08x\n",_core_if->hwcfg3.d32);
+ IFX_DEBUGPL(DBG_CILV,"hwcfg4=%08x\n",_core_if->hwcfg4.d32);
+
+
+ #ifdef __DED_FIFO__
+ IFX_PRINT("Waiting for PHY Clock Lock!\n");
+ while(!( ifxusb_rreg(&_core_if->core_global_regs->grxfsiz) & (1<<9)))
+ {
+ }
+ IFX_PRINT("PHY Clock Locked!\n");
+ //ifxusb_clean_spram(_core_if,128*1024/4);
+ #endif
+
+ /* Create new workqueue and init works */
+#if 0
+ _core_if->wq_usb = create_singlethread_workqueue(_core_if->core_name);
+
+ if(_core_if->wq_usb == 0)
+ {
+ IFX_DEBUGPL(DBG_CIL, "Creation of wq_usb failed\n");
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+ INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change, core_if);
+ INIT_WORK(&core_if->w_wkp, w_wakeup_detected, core_if);
+ #else
+ INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change);
+ INIT_DELAYED_WORK(&core_if->w_wkp, w_wakeup_detected);
+ #endif
+#endif
+ return 0;
+
+fail:
+ if( reg_base != NULL) iounmap(reg_base );
+ if( fifo_base != NULL) iounmap(fifo_base);
+ if( fifo_dbg != NULL) iounmap(fifo_dbg );
+ return retval;
+}
+
+/*!
+ \brief This function free the mapped address in the IFXUSB CSR data structures.
+ \param _core_if Pointer of core_if structure
+ */
+void ifxusb_core_if_remove(ifxusb_core_if_t *_core_if)
+{
+ /* Disable all interrupts */
+ if( _core_if->core_global_regs != NULL)
+ {
+ ifxusb_mreg( &_core_if->core_global_regs->gahbcfg, 1, 0);
+ ifxusb_wreg( &_core_if->core_global_regs->gintmsk, 0);
+ }
+
+ if( _core_if->core_global_regs != NULL) iounmap(_core_if->core_global_regs );
+ if( _core_if->data_fifo[0] != NULL) iounmap(_core_if->data_fifo[0] );
+ if( _core_if->data_fifo_dbg != NULL) iounmap(_core_if->data_fifo_dbg );
+
+#if 0
+ if (_core_if->wq_usb)
+ destroy_workqueue(_core_if->wq_usb);
+#endif
+ memset(_core_if, 0, sizeof(ifxusb_core_if_t));
+}
+
+
+
+
+/*!
+ \brief This function enbles the controller's Global Interrupt in the AHB Config register.
+ \param _core_if Pointer of core_if structure
+ */
+void ifxusb_enable_global_interrupts( ifxusb_core_if_t *_core_if )
+{
+ gahbcfg_data_t ahbcfg ={ .d32 = 0};
+ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
+ ifxusb_mreg(&_core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32);
+}
+
+/*!
+ \brief This function disables the controller's Global Interrupt in the AHB Config register.
+ \param _core_if Pointer of core_if structure
+ */
+void ifxusb_disable_global_interrupts( ifxusb_core_if_t *_core_if )
+{
+ gahbcfg_data_t ahbcfg ={ .d32 = 0};
+ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
+ ifxusb_mreg(&_core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0);
+}
+
+
+
+
+/*!
+ \brief Flush Tx and Rx FIFO.
+ \param _core_if Pointer of core_if structure
+ */
+void ifxusb_flush_both_fifo( ifxusb_core_if_t *_core_if )
+{
+ ifxusb_core_global_regs_t *global_regs = _core_if->core_global_regs;
+ volatile grstctl_t greset ={ .d32 = 0};
+ int count = 0;
+
+ IFX_DEBUGPL((DBG_CIL|DBG_PCDV), "%s\n", __func__);
+ greset.b.rxfflsh = 1;
+ greset.b.txfflsh = 1;
+ greset.b.txfnum = 0x10;
+ greset.b.intknqflsh=1;
+ greset.b.hstfrm=1;
+ ifxusb_wreg( &global_regs->grstctl, greset.d32 );
+
+ do
+ {
+ greset.d32 = ifxusb_rreg( &global_regs->grstctl);
+ if (++count > 10000)
+ {
+ IFX_WARN("%s() HANG! GRSTCTL=%0x\n", __func__, greset.d32);
+ break;
+ }
+ } while (greset.b.rxfflsh == 1 || greset.b.txfflsh == 1);
+ /* Wait for 3 PHY Clocks*/
+ UDELAY(1);
+}
+
+/*!
+ \brief Flush a Tx FIFO.
+ \param _core_if Pointer of core_if structure
+ \param _num Tx FIFO to flush. ( 0x10 for ALL TX FIFO )
+ */
+void ifxusb_flush_tx_fifo( ifxusb_core_if_t *_core_if, const int _num )
+{
+ ifxusb_core_global_regs_t *global_regs = _core_if->core_global_regs;
+ volatile grstctl_t greset ={ .d32 = 0};
+ int count = 0;
+
+ IFX_DEBUGPL((DBG_CIL|DBG_PCDV), "Flush Tx FIFO %d\n", _num);
+
+ greset.b.intknqflsh=1;
+ greset.b.txfflsh = 1;
+ greset.b.txfnum = _num;
+ ifxusb_wreg( &global_regs->grstctl, greset.d32 );
+
+ do
+ {
+ greset.d32 = ifxusb_rreg( &global_regs->grstctl);
+ if (++count > 10000&&(_num==0 ||_num==0x10))
+ {
+ IFX_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
+ __func__, greset.d32,
+ ifxusb_rreg( &global_regs->gnptxsts));
+ break;
+ }
+ } while (greset.b.txfflsh == 1);
+ /* Wait for 3 PHY Clocks*/
+ UDELAY(1);
+}
+
+
+/*!
+ \brief Flush Rx FIFO.
+ \param _core_if Pointer of core_if structure
+ */
+void ifxusb_flush_rx_fifo( ifxusb_core_if_t *_core_if )
+{
+ ifxusb_core_global_regs_t *global_regs = _core_if->core_global_regs;
+ volatile grstctl_t greset ={ .d32 = 0};
+ int count = 0;
+
+ IFX_DEBUGPL((DBG_CIL|DBG_PCDV), "%s\n", __func__);
+ greset.b.rxfflsh = 1;
+ ifxusb_wreg( &global_regs->grstctl, greset.d32 );
+
+ do
+ {
+ greset.d32 = ifxusb_rreg( &global_regs->grstctl);
+ if (++count > 10000)
+ {
+ IFX_WARN("%s() HANG! GRSTCTL=%0x\n", __func__, greset.d32);
+ break;
+ }
+ } while (greset.b.rxfflsh == 1);
+ /* Wait for 3 PHY Clocks*/
+ UDELAY(1);
+}
+
+
+#define SOFT_RESET_DELAY 100
+
+/*!
+ \brief Do a soft reset of the core. Be careful with this because it
+ resets all the internal state machines of the core.
+ \param _core_if Pointer of core_if structure
+ */
+int ifxusb_core_soft_reset(ifxusb_core_if_t *_core_if)
+{
+ ifxusb_core_global_regs_t *global_regs = _core_if->core_global_regs;
+ volatile grstctl_t greset ={ .d32 = 0};
+ int count = 0;
+
+ IFX_DEBUGPL(DBG_CILV, "%s\n", __func__);
+ /* Wait for AHB master IDLE state. */
+ do
+ {
+ UDELAY(10);
+ greset.d32 = ifxusb_rreg( &global_regs->grstctl);
+ if (++count > 100000)
+ {
+ IFX_WARN("%s() HANG! AHB Idle GRSTCTL=%0x %x\n", __func__,
+ greset.d32, greset.b.ahbidle);
+ break;
+ }
+ } while (greset.b.ahbidle == 0);
+
+ UDELAY(1);
+
+ /* Core Soft Reset */
+ count = 0;
+ greset.b.csftrst = 1;
+ ifxusb_wreg( &global_regs->grstctl, greset.d32 );
+
+ #ifdef SOFT_RESET_DELAY
+ MDELAY(SOFT_RESET_DELAY);
+ #endif
+
+ do
+ {
+ UDELAY(10);
+ greset.d32 = ifxusb_rreg( &global_regs->grstctl);
+ if (++count > 100000)
+ {
+ IFX_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__, greset.d32);
+ return -1;
+ }
+ } while (greset.b.csftrst == 1);
+
+ #ifdef SOFT_RESET_DELAY
+ MDELAY(SOFT_RESET_DELAY);
+ #endif
+
+
+ #if defined(__IS_VR9__)
+ if(_core_if->core_no==0)
+ {
+ set_bit (4, VR9_RCU_USBRESET2);
+ MDELAY(50);
+ clear_bit (4, VR9_RCU_USBRESET2);
+ }
+ else
+ {
+ set_bit (5, VR9_RCU_USBRESET2);
+ MDELAY(50);
+ clear_bit (5, VR9_RCU_USBRESET2);
+ }
+ MDELAY(50);
+ #endif //defined(__IS_VR9__)
+
+ IFX_PRINT("USB core #%d soft-reset\n",_core_if->core_no);
+
+ return 0;
+}
+
+/*!
+ \brief Turn on the USB Core Power
+ \param _core_if Pointer of core_if structure
+*/
+void ifxusb_power_on (ifxusb_core_if_t *_core_if)
+{
+ struct clk *clk0 = clk_get_sys("usb0", NULL);
+ struct clk *clk1 = clk_get_sys("usb1", NULL);
+ // set clock gating
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ #if defined(__UEIP__)
+
+ #if defined(__IS_TWINPASS) || defined(__IS_DANUBE__)
+ set_bit (4, (volatile unsigned long *)DANUBE_CGU_IFCCR);
+ set_bit (5, (volatile unsigned long *)DANUBE_CGU_IFCCR);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ // clear_bit (4, (volatile unsigned long *)AMAZON_SE_CGU_IFCCR);
+ clear_bit (5, (volatile unsigned long *)AMAZON_SE_CGU_IFCCR);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ set_bit (0, (volatile unsigned long *)AR9_CGU_IFCCR);
+ set_bit (1, (volatile unsigned long *)AR9_CGU_IFCCR);
+ #endif //defined(__IS_AR9__)
+ #if defined(__IS_VR9__)
+// set_bit (0, (volatile unsigned long *)VR9_CGU_IFCCR);
+// set_bit (1, (volatile unsigned long *)VR9_CGU_IFCCR);
+ #endif //defined(__IS_VR9__)
+
+ MDELAY(50);
+
+ // set power
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__) || defined(__IS_AMAZON_SE__)
+ USB_CTRL_PMU_SETUP(IFX_PMU_ENABLE);
+ //#if defined(__IS_TWINPASS__)
+ // ifxusb_enable_afe_oc();
+ //#endif
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__) || defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__) || defined(__IS_VR9__)
+ if(_core_if->core_no==0)
+ clk_enable(clk0);
+// USB0_CTRL_PMU_SETUP(IFX_PMU_ENABLE);
+ else
+ clk_enable(clk1);
+// USB1_CTRL_PMU_SETUP(IFX_PMU_ENABLE);
+ #endif //defined(__IS_AR9__) || defined(__IS_VR9__)
+
+ if(_core_if->core_global_regs)
+ {
+ // PHY configurations.
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AR9__)
+ #if defined(__IS_VR9__)
+ //ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_VR9__)
+ }
+ #else //defined(__UEIP__)
+ #if defined(__IS_TWINPASS) || defined(__IS_DANUBE__)
+ set_bit (4, (volatile unsigned long *)DANUBE_CGU_IFCCR);
+ set_bit (5, (volatile unsigned long *)DANUBE_CGU_IFCCR);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ // clear_bit (4, (volatile unsigned long *)AMAZON_SE_CGU_IFCCR);
+ clear_bit (5, (volatile unsigned long *)AMAZON_SE_CGU_IFCCR);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ set_bit (0, (volatile unsigned long *)AMAZON_S_CGU_IFCCR);
+ set_bit (1, (volatile unsigned long *)AMAZON_S_CGU_IFCCR);
+ #endif //defined(__IS_AR9__)
+
+ MDELAY(50);
+
+ // set power
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ clear_bit (6, (volatile unsigned long *)DANUBE_PMU_PWDCR);//USB
+ clear_bit (9, (volatile unsigned long *)DANUBE_PMU_PWDCR);//DSL
+ clear_bit (15, (volatile unsigned long *)DANUBE_PMU_PWDCR);//AHB
+ #if defined(__IS_TWINPASS__)
+ ifxusb_enable_afe_oc();
+ #endif
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ clear_bit (6, (volatile unsigned long *)AMAZON_SE_PMU_PWDCR);
+ clear_bit (9, (volatile unsigned long *)AMAZON_SE_PMU_PWDCR);
+ clear_bit (15, (volatile unsigned long *)AMAZON_SE_PMU_PWDCR);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ if(_core_if->core_no==0)
+ clear_bit (6, (volatile unsigned long *)AMAZON_S_PMU_PWDCR);//USB
+ else
+ clear_bit (27, (volatile unsigned long *)AMAZON_S_PMU_PWDCR);//USB
+ clear_bit (9, (volatile unsigned long *)AMAZON_S_PMU_PWDCR);//DSL
+ clear_bit (15, (volatile unsigned long *)AMAZON_S_PMU_PWDCR);//AHB
+ #endif //defined(__IS_AR9__)
+
+ if(_core_if->core_global_regs)
+ {
+ // PHY configurations.
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AR9__)
+ }
+
+ #endif //defined(__UEIP__)
+}
+
+/*!
+ \brief Turn off the USB Core Power
+ \param _core_if Pointer of core_if structure
+*/
+void ifxusb_power_off (ifxusb_core_if_t *_core_if)
+{
+ struct clk *clk0 = clk_get_sys("usb0", NULL);
+ struct clk *clk1 = clk_get_sys("usb1", NULL);
+ ifxusb_phy_power_off (_core_if);
+
+ // set power
+ #if defined(__UEIP__)
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__) || defined(__IS_AMAZON_SE__)
+ USB_CTRL_PMU_SETUP(IFX_PMU_DISABLE);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__) || defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__) || defined(__IS_VR9__)
+ if(_core_if->core_no==0)
+ clk_disable(clk0);
+ //USB0_CTRL_PMU_SETUP(IFX_PMU_DISABLE);
+ else
+ clk_disable(clk1);
+ //USB1_CTRL_PMU_SETUP(IFX_PMU_DISABLE);
+ #endif //defined(__IS_AR9__) || defined(__IS_VR9__)
+ #else //defined(__UEIP__)
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ set_bit (6, (volatile unsigned long *)DANUBE_PMU_PWDCR);//USB
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ set_bit (6, (volatile unsigned long *)AMAZON_SE_PMU_PWDCR);//USB
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ if(_core_if->core_no==0)
+ set_bit (6, (volatile unsigned long *)AMAZON_S_PMU_PWDCR);//USB
+ else
+ set_bit (27, (volatile unsigned long *)AMAZON_S_PMU_PWDCR);//USB
+ #endif //defined(__IS_AR9__)
+ #endif //defined(__UEIP__)
+}
+
+/*!
+ \brief Turn on the USB PHY Power
+ \param _core_if Pointer of core_if structure
+*/
+void ifxusb_phy_power_on (ifxusb_core_if_t *_core_if)
+{
+ struct clk *clk0 = clk_get_sys("usb0", NULL);
+ struct clk *clk1 = clk_get_sys("usb1", NULL);
+ #if defined(__UEIP__)
+ if(_core_if->core_global_regs)
+ {
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AR9__)
+ #if defined(__IS_VR9_S__)
+ if(_core_if->core_no==0)
+ set_bit (0, VR9_RCU_USB_ANA_CFG1A);
+ else
+ set_bit (0, VR9_RCU_USB_ANA_CFG1B);
+ #endif //defined(__IS_VR9__)
+ }
+
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__) || defined(__IS_AMAZON_SE__)
+ USB_PHY_PMU_SETUP(IFX_PMU_ENABLE);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__) || defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__) || defined(__IS_VR9__)
+ if(_core_if->core_no==0)
+ clk_enable(clk0);
+ //USB0_PHY_PMU_SETUP(IFX_PMU_ENABLE);
+ else
+ clk_enable(clk1);
+ //USB1_PHY_PMU_SETUP(IFX_PMU_ENABLE);
+ #endif //defined(__IS_AR9__) || defined(__IS_VR9__)
+
+ // PHY configurations.
+ if(_core_if->core_global_regs)
+ {
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AR9__)
+ #if defined(__IS_VR9_S__)
+ if(_core_if->core_no==0)
+ set_bit (0, VR9_RCU_USB_ANA_CFG1A);
+ else
+ set_bit (0, VR9_RCU_USB_ANA_CFG1B);
+ #endif //defined(__IS_VR9__)
+ }
+ #else //defined(__UEIP__)
+ // PHY configurations.
+ if(_core_if->core_global_regs)
+ {
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AR9__)
+ }
+
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ clear_bit (0, (volatile unsigned long *)DANUBE_PMU_PWDCR);//PHY
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ clear_bit (0, (volatile unsigned long *)AMAZON_SE_PMU_PWDCR);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ if(_core_if->core_no==0)
+ clear_bit (0, (volatile unsigned long *)AMAZON_S_PMU_PWDCR);//PHY
+ else
+ clear_bit (26, (volatile unsigned long *)AMAZON_S_PMU_PWDCR);//PHY
+ #endif //defined(__IS_AR9__)
+
+ // PHY configurations.
+ if(_core_if->core_global_regs)
+ {
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AR9__)
+ }
+ #endif //defined(__UEIP__)
+}
+
+
+/*!
+ \brief Turn off the USB PHY Power
+ \param _core_if Pointer of core_if structure
+*/
+void ifxusb_phy_power_off (ifxusb_core_if_t *_core_if)
+{
+ struct clk *clk0 = clk_get_sys("usb0", NULL);
+ struct clk *clk1 = clk_get_sys("usb1", NULL);
+ #if defined(__UEIP__)
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__) || defined(__IS_AMAZON_SE__)
+ USB_PHY_PMU_SETUP(IFX_PMU_DISABLE);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__) || defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__) || defined(__IS_VR9__)
+ if(_core_if->core_no==0)
+ clk_disable(clk0);
+ //USB0_PHY_PMU_SETUP(IFX_PMU_DISABLE);
+ else
+ clk_disable(clk1);
+ //USB1_PHY_PMU_SETUP(IFX_PMU_DISABLE);
+ #endif // defined(__IS_AR9__) || defined(__IS_VR9__)
+ #else //defined(__UEIP__)
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ set_bit (0, (volatile unsigned long *)DANUBE_PMU_PWDCR);//PHY
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ set_bit (0, (volatile unsigned long *)AMAZON_SE_PMU_PWDCR);//PHY
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ if(_core_if->core_no==0)
+ set_bit (0, (volatile unsigned long *)AMAZON_S_PMU_PWDCR);//PHY
+ else
+ set_bit (26, (volatile unsigned long *)AMAZON_S_PMU_PWDCR);//PHY
+ #endif //defined(__IS_AR9__)
+ #endif //defined(__UEIP__)
+}
+
+
+/*!
+ \brief Reset on the USB Core RCU
+ \param _core_if Pointer of core_if structure
+ */
+#if defined(__IS_VR9__)
+ int already_hard_reset=0;
+#endif
+void ifxusb_hard_reset(ifxusb_core_if_t *_core_if)
+{
+ #if defined(__UEIP__)
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined (__IS_HOST__)
+ clear_bit (DANUBE_USBCFG_HDSEL_BIT, (volatile unsigned long *)DANUBE_RCU_USBCFG);
+ #elif defined (__IS_DEVICE__)
+ set_bit (DANUBE_USBCFG_HDSEL_BIT, (volatile unsigned long *)DANUBE_RCU_USBCFG);
+ #endif
+ #endif //defined(__IS_AMAZON_SE__)
+
+ #if defined(__IS_AMAZON_SE__)
+ #if defined (__IS_HOST__)
+ clear_bit (AMAZON_SE_USBCFG_HDSEL_BIT, (volatile unsigned long *)AMAZON_SE_RCU_USBCFG);
+ #elif defined (__IS_DEVICE__)
+ set_bit (AMAZON_SE_USBCFG_HDSEL_BIT, (volatile unsigned long *)AMAZON_SE_RCU_USBCFG);
+ #endif
+ #endif //defined(__IS_AMAZON_SE__)
+
+ #if defined(__IS_AR9__)
+ if(_core_if->core_no==0)
+ {
+ #if defined (__IS_HOST__)
+ clear_bit (AR9_USBCFG_HDSEL_BIT, (volatile unsigned long *)AR9_RCU_USB1CFG);
+ #elif defined (__IS_DEVICE__)
+ set_bit (AR9_USBCFG_HDSEL_BIT, (volatile unsigned long *)AR9_RCU_USB1CFG);
+ #endif
+ }
+ else
+ {
+ #if defined (__IS_HOST__)
+ clear_bit (AR9_USBCFG_HDSEL_BIT, (volatile unsigned long *)AR9_RCU_USB2CFG);
+ #elif defined (__IS_DEVICE__)
+ set_bit (AR9_USBCFG_HDSEL_BIT, (volatile unsigned long *)AR9_RCU_USB2CFG);
+ #endif
+ }
+ #endif //defined(__IS_AR9__)
+
+ #if defined(__IS_VR9__)
+ if(_core_if->core_no==0)
+ {
+ #if defined (__IS_HOST__)
+ clear_bit (VR9_USBCFG_HDSEL_BIT, (volatile unsigned long *)VR9_RCU_USB1CFG);
+ #elif defined (__IS_DEVICE__)
+ set_bit (VR9_USBCFG_HDSEL_BIT, (volatile unsigned long *)VR9_RCU_USB1CFG);
+ #endif
+ }
+ else
+ {
+ #if defined (__IS_HOST__)
+ clear_bit (VR9_USBCFG_HDSEL_BIT, (volatile unsigned long *)VR9_RCU_USB2CFG);
+ #elif defined (__IS_DEVICE__)
+ set_bit (VR9_USBCFG_HDSEL_BIT, (volatile unsigned long *)VR9_RCU_USB2CFG);
+ #endif
+ }
+ #endif //defined(__IS_VR9__)
+
+
+ // set the HC's byte-order to big-endian
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ set_bit (DANUBE_USBCFG_HOST_END_BIT, (volatile unsigned long *)DANUBE_RCU_USBCFG);
+ clear_bit (DANUBE_USBCFG_SLV_END_BIT, (volatile unsigned long *)DANUBE_RCU_USBCFG);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ set_bit (AMAZON_SE_USBCFG_HOST_END_BIT, (volatile unsigned long *)AMAZON_SE_RCU_USBCFG);
+ clear_bit (AMAZON_SE_USBCFG_SLV_END_BIT, (volatile unsigned long *)AMAZON_SE_RCU_USBCFG);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ if(_core_if->core_no==0)
+ {
+ set_bit (AR9_USBCFG_HOST_END_BIT, (volatile unsigned long *)AR9_RCU_USB1CFG);
+ clear_bit (AR9_USBCFG_SLV_END_BIT, (volatile unsigned long *)AR9_RCU_USB1CFG);
+ }
+ else
+ {
+ set_bit (AR9_USBCFG_HOST_END_BIT, (volatile unsigned long *)AR9_RCU_USB2CFG);
+ clear_bit (AR9_USBCFG_SLV_END_BIT, (volatile unsigned long *)AR9_RCU_USB2CFG);
+ }
+ #endif //defined(__IS_AR9__)
+ #if defined(__IS_VR9__)
+ if(_core_if->core_no==0)
+ {
+ set_bit (VR9_USBCFG_HOST_END_BIT, (volatile unsigned long *)VR9_RCU_USB1CFG);
+ clear_bit (VR9_USBCFG_SLV_END_BIT, (volatile unsigned long *)VR9_RCU_USB1CFG);
+ }
+ else
+ {
+ set_bit (VR9_USBCFG_HOST_END_BIT, (volatile unsigned long *)VR9_RCU_USB2CFG);
+ clear_bit (VR9_USBCFG_SLV_END_BIT, (volatile unsigned long *)VR9_RCU_USB2CFG);
+ }
+ #endif //defined(__IS_VR9__)
+
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ set_bit (4, DANUBE_RCU_RESET);
+ MDELAY(500);
+ clear_bit (4, DANUBE_RCU_RESET);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+
+ #if defined(__IS_AMAZON_SE__)
+ set_bit (4, AMAZON_SE_RCU_RESET);
+ MDELAY(500);
+ clear_bit (4, AMAZON_SE_RCU_RESET);
+ MDELAY(500);
+ #endif //defined(__IS_AMAZON_SE__)
+
+ #if defined(__IS_AR9__)
+ if(_core_if->core_no==0)
+ {
+ set_bit (4, AR9_RCU_USBRESET);
+ MDELAY(500);
+ clear_bit (4, AR9_RCU_USBRESET);
+ }
+ else
+ {
+ set_bit (28, AR9_RCU_USBRESET);
+ MDELAY(500);
+ clear_bit (28, AR9_RCU_USBRESET);
+ }
+ MDELAY(500);
+ #endif //defined(__IS_AR9__)
+ #if defined(__IS_VR9__)
+ if(!already_hard_reset)
+ {
+ set_bit (4, VR9_RCU_USBRESET);
+ MDELAY(500);
+ clear_bit (4, VR9_RCU_USBRESET);
+ MDELAY(500);
+ already_hard_reset=1;
+ }
+ #endif //defined(__IS_VR9__)
+
+ #if defined(__IS_TWINPASS__)
+ ifxusb_enable_afe_oc();
+ #endif
+
+ if(_core_if->core_global_regs)
+ {
+ // PHY configurations.
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AR9__)
+ #if defined(__IS_VR9__)
+ // ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_VR9__)
+ }
+ #else //defined(__UEIP__)
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined (__IS_HOST__)
+ clear_bit (DANUBE_USBCFG_HDSEL_BIT, (volatile unsigned long *)DANUBE_RCU_USBCFG);
+ #elif defined (__IS_DEVICE__)
+ set_bit (DANUBE_USBCFG_HDSEL_BIT, (volatile unsigned long *)DANUBE_RCU_USBCFG);
+ #endif
+ #endif //defined(__IS_AMAZON_SE__)
+
+ #if defined(__IS_AMAZON_SE__)
+ #if defined (__IS_HOST__)
+ clear_bit (AMAZON_SE_USBCFG_HDSEL_BIT, (volatile unsigned long *)AMAZON_SE_RCU_USBCFG);
+ #elif defined (__IS_DEVICE__)
+ set_bit (AMAZON_SE_USBCFG_HDSEL_BIT, (volatile unsigned long *)AMAZON_SE_RCU_USBCFG);
+ #endif
+ #endif //defined(__IS_AMAZON_SE__)
+
+ #if defined(__IS_AR9__)
+ if(_core_if->core_no==0)
+ {
+ #if defined (__IS_HOST__)
+ clear_bit (AMAZON_S_USBCFG_HDSEL_BIT, (volatile unsigned long *)AMAZON_S_RCU_USB1CFG);
+ #elif defined (__IS_DEVICE__)
+ set_bit (AMAZON_S_USBCFG_HDSEL_BIT, (volatile unsigned long *)AMAZON_S_RCU_USB1CFG);
+ #endif
+ }
+ else
+ {
+ #if defined (__IS_HOST__)
+ clear_bit (AMAZON_S_USBCFG_HDSEL_BIT, (volatile unsigned long *)AMAZON_S_RCU_USB2CFG);
+ #elif defined (__IS_DEVICE__)
+ set_bit (AMAZON_S_USBCFG_HDSEL_BIT, (volatile unsigned long *)AMAZON_S_RCU_USB2CFG);
+ #endif
+ }
+ #endif //defined(__IS_AR9__)
+
+ // set the HC's byte-order to big-endian
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ set_bit (DANUBE_USBCFG_HOST_END_BIT, (volatile unsigned long *)DANUBE_RCU_USBCFG);
+ clear_bit (DANUBE_USBCFG_SLV_END_BIT, (volatile unsigned long *)DANUBE_RCU_USBCFG);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ set_bit (AMAZON_SE_USBCFG_HOST_END_BIT, (volatile unsigned long *)AMAZON_SE_RCU_USBCFG);
+ clear_bit (AMAZON_SE_USBCFG_SLV_END_BIT, (volatile unsigned long *)AMAZON_SE_RCU_USBCFG);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ if(_core_if->core_no==0)
+ {
+ set_bit (AMAZON_S_USBCFG_HOST_END_BIT, (volatile unsigned long *)AMAZON_S_RCU_USB1CFG);
+ clear_bit (AMAZON_S_USBCFG_SLV_END_BIT, (volatile unsigned long *)AMAZON_S_RCU_USB1CFG);
+ }
+ else
+ {
+ set_bit (AMAZON_S_USBCFG_HOST_END_BIT, (volatile unsigned long *)AMAZON_S_RCU_USB2CFG);
+ clear_bit (AMAZON_S_USBCFG_SLV_END_BIT, (volatile unsigned long *)AMAZON_S_RCU_USB2CFG);
+ }
+ #endif //defined(__IS_AR9__)
+
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ set_bit (4, DANUBE_RCU_RESET);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ set_bit (4, AMAZON_SE_RCU_RESET);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ if(_core_if->core_no==0)
+ {
+ set_bit (4, AMAZON_S_RCU_USBRESET);
+ }
+ else
+ {
+ set_bit (28, AMAZON_S_RCU_USBRESET);
+ }
+ #endif //defined(__IS_AR9__)
+
+ MDELAY(500);
+
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ clear_bit (4, DANUBE_RCU_RESET);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ clear_bit (4, AMAZON_SE_RCU_RESET);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ if(_core_if->core_no==0)
+ {
+ clear_bit (4, AMAZON_S_RCU_USBRESET);
+ }
+ else
+ {
+ clear_bit (28, AMAZON_S_RCU_USBRESET);
+ }
+ #endif //defined(__IS_AR9__)
+
+ MDELAY(500);
+
+ #if defined(__IS_TWINPASS__)
+ ifxusb_enable_afe_oc();
+ #endif
+
+ if(_core_if->core_global_regs)
+ {
+ // PHY configurations.
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ ifxusb_wreg (&_core_if->core_global_regs->guid,0x14014);
+ #endif //defined(__IS_AR9__)
+ }
+ #endif //defined(__UEIP__)
+}
+
+#if defined(__GADGET_LED__) || defined(__HOST_LED__)
+ #if defined(__UEIP__)
+ static void *g_usb_led_trigger = NULL;
+ #endif
+
+ void ifxusb_led_init(ifxusb_core_if_t *_core_if)
+ {
+ #if defined(__UEIP__)
+ if ( !g_usb_led_trigger )
+ {
+ ifx_led_trigger_register("usb_link", &g_usb_led_trigger);
+ if ( g_usb_led_trigger != NULL )
+ {
+ struct ifx_led_trigger_attrib attrib = {0};
+ attrib.delay_on = 250;
+ attrib.delay_off = 250;
+ attrib.timeout = 2000;
+ attrib.def_value = 1;
+ attrib.flags = IFX_LED_TRIGGER_ATTRIB_DELAY_ON | IFX_LED_TRIGGER_ATTRIB_DELAY_OFF | IFX_LED_TRIGGER_ATTRIB_TIMEOUT | IFX_LED_TRIGGER_ATTRIB_DEF_VALUE;
+ IFX_DEBUGP("Reg USB LED!!\n");
+ ifx_led_trigger_set_attrib(g_usb_led_trigger, &attrib);
+ }
+ }
+ #endif //defined(__UEIP__)
+ }
+
+ void ifxusb_led_free(ifxusb_core_if_t *_core_if)
+ {
+ #if defined(__UEIP__)
+ if ( g_usb_led_trigger )
+ {
+ ifx_led_trigger_deregister(g_usb_led_trigger);
+ g_usb_led_trigger = NULL;
+ }
+ #endif //defined(__UEIP__)
+ }
+
+ /*!
+ \brief Turn off the USB 5V VBus Power
+ \param _core_if Pointer of core_if structure
+ */
+ void ifxusb_led(ifxusb_core_if_t *_core_if)
+ {
+ #if defined(__UEIP__)
+ if(g_usb_led_trigger)
+ ifx_led_trigger_activate(g_usb_led_trigger);
+ #else
+ #endif //defined(__UEIP__)
+ }
+#endif // defined(__GADGET_LED__) || defined(__HOST_LED__)
+
+
+
+#if defined(__IS_HOST__) && defined(__DO_OC_INT__) && defined(__DO_OC_INT_ENABLE__)
+/*!
+ \brief Turn on the OC Int
+ */
+ void ifxusb_oc_int_on()
+ {
+ #if defined(__UEIP__)
+ #else
+ #if defined(__IS_TWINPASS__)
+ irq_enable(DANUBE_USB_OC_INT);
+ #endif
+ #endif //defined(__UEIP__)
+ }
+/*!
+ \brief Turn off the OC Int
+ */
+ void ifxusb_oc_int_off()
+ {
+ #if defined(__UEIP__)
+ #else
+ #if defined(__IS_TWINPASS__)
+ irq_disable(DANUBE_USB_OC_INT);
+ #endif
+ #endif //defined(__UEIP__)
+ }
+#endif //defined(__IS_HOST__) && defined(__DO_OC_INT__) && defined(__DO_OC_INT_ENABLE__)
+
+/* internal routines for debugging */
+void ifxusb_dump_msg(const u8 *buf, unsigned int length)
+{
+#ifdef __DEBUG__
+ unsigned int start, num, i;
+ char line[52], *p;
+
+ if (length >= 512)
+ return;
+ start = 0;
+ while (length > 0)
+ {
+ num = min(length, 16u);
+ p = line;
+ for (i = 0; i < num; ++i)
+ {
+ if (i == 8)
+ *p++ = ' ';
+ sprintf(p, " %02x", buf[i]);
+ p += 3;
+ }
+ *p = 0;
+ IFX_PRINT( "%6x: %s\n", start, line);
+ buf += num;
+ start += num;
+ length -= num;
+ }
+#endif
+}
+
+/* This functions reads the SPRAM and prints its content */
+void ifxusb_dump_spram(ifxusb_core_if_t *_core_if)
+{
+#ifdef __ENABLE_DUMP__
+ volatile uint8_t *addr, *start_addr, *end_addr;
+ uint32_t size;
+ IFX_PRINT("SPRAM Data:\n");
+ start_addr = (void*)_core_if->core_global_regs;
+ IFX_PRINT("Base Address: 0x%8X\n", (uint32_t)start_addr);
+
+ start_addr = (void*)_core_if->data_fifo_dbg;
+ IFX_PRINT("Starting Address: 0x%8X\n", (uint32_t)start_addr);
+
+ size=_core_if->hwcfg3.b.dfifo_depth;
+ size<<=2;
+ size+=0x200;
+ size&=0x0003FFFC;
+
+ end_addr = (void*)_core_if->data_fifo_dbg;
+ end_addr += size;
+
+ for(addr = start_addr; addr < end_addr; addr+=16)
+ {
+ IFX_PRINT("0x%8X:\t%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", (uint32_t)addr,
+ addr[ 0], addr[ 1], addr[ 2], addr[ 3],
+ addr[ 4], addr[ 5], addr[ 6], addr[ 7],
+ addr[ 8], addr[ 9], addr[10], addr[11],
+ addr[12], addr[13], addr[14], addr[15]
+ );
+ }
+ return;
+#endif //__ENABLE_DUMP__
+}
+
+
+
+
+/* This function reads the core global registers and prints them */
+void ifxusb_dump_registers(ifxusb_core_if_t *_core_if)
+{
+#ifdef __ENABLE_DUMP__
+ int i;
+ volatile uint32_t *addr;
+ #ifdef __IS_DEVICE__
+ volatile uint32_t *addri,*addro;
+ #endif
+
+ IFX_PRINT("Core Global Registers\n");
+ addr=&_core_if->core_global_regs->gotgctl;
+ IFX_PRINT("GOTGCTL @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->gotgint;
+ IFX_PRINT("GOTGINT @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->gahbcfg;
+ IFX_PRINT("GAHBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->gusbcfg;
+ IFX_PRINT("GUSBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->grstctl;
+ IFX_PRINT("GRSTCTL @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->gintsts;
+ IFX_PRINT("GINTSTS @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->gintmsk;
+ IFX_PRINT("GINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->gi2cctl;
+ IFX_PRINT("GI2CCTL @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->gpvndctl;
+ IFX_PRINT("GPVNDCTL @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->ggpio;
+ IFX_PRINT("GGPIO @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->guid;
+ IFX_PRINT("GUID @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->gsnpsid;
+ IFX_PRINT("GSNPSID @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->ghwcfg1;
+ IFX_PRINT("GHWCFG1 @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->ghwcfg2;
+ IFX_PRINT("GHWCFG2 @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->ghwcfg3;
+ IFX_PRINT("GHWCFG3 @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->ghwcfg4;
+ IFX_PRINT("GHWCFG4 @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+
+ addr=_core_if->pcgcctl;
+ IFX_PRINT("PCGCCTL @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+
+ addr=&_core_if->core_global_regs->grxfsiz;
+ IFX_PRINT("GRXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+
+ #ifdef __IS_HOST__
+ addr=&_core_if->core_global_regs->gnptxfsiz;
+ IFX_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->core_global_regs->hptxfsiz;
+ IFX_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ #endif //__IS_HOST__
+
+ #ifdef __IS_DEVICE__
+ #ifdef __DED_FIFO__
+ addr=&_core_if->core_global_regs->gnptxfsiz;
+ IFX_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ for (i=0; i<= _core_if->hwcfg4.b.num_in_eps; i++)
+ {
+ addr=&_core_if->core_global_regs->dptxfsiz_dieptxf[i];
+ IFX_PRINT("DPTXFSIZ[%d] @0x%08X : 0x%08X\n",i,(uint32_t)addr,ifxusb_rreg(addr));
+ }
+ #else
+ addr=&_core_if->core_global_regs->gnptxfsiz;
+ IFX_PRINT("TXFSIZ[00] @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ for (i=0; i< _core_if->hwcfg4.b.num_dev_perio_in_ep; i++)
+ {
+ addr=&_core_if->core_global_regs->dptxfsiz_dieptxf[i];
+ IFX_PRINT("TXFSIZ[%02d] @0x%08X : 0x%08X\n",i+1,(uint32_t)addr,ifxusb_rreg(addr));
+ }
+ #endif
+ #endif //__IS_DEVICE__
+
+ #ifdef __IS_HOST__
+ IFX_PRINT("Host Global Registers\n");
+ addr=&_core_if->host_global_regs->hcfg;
+ IFX_PRINT("HCFG @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->host_global_regs->hfir;
+ IFX_PRINT("HFIR @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->host_global_regs->hfnum;
+ IFX_PRINT("HFNUM @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->host_global_regs->hptxsts;
+ IFX_PRINT("HPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->host_global_regs->haint;
+ IFX_PRINT("HAINT @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->host_global_regs->haintmsk;
+ IFX_PRINT("HAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr= _core_if->hprt0;
+ IFX_PRINT("HPRT0 @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+
+ for (i=0; i<MAX_EPS_CHANNELS; i++)
+ {
+ IFX_PRINT("Host Channel %d Specific Registers\n", i);
+ addr=&_core_if->hc_regs[i]->hcchar;
+ IFX_PRINT("HCCHAR @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->hc_regs[i]->hcsplt;
+ IFX_PRINT("HCSPLT @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->hc_regs[i]->hcint;
+ IFX_PRINT("HCINT @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->hc_regs[i]->hcintmsk;
+ IFX_PRINT("HCINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->hc_regs[i]->hctsiz;
+ IFX_PRINT("HCTSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->hc_regs[i]->hcdma;
+ IFX_PRINT("HCDMA @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ }
+ #endif //__IS_HOST__
+
+ #ifdef __IS_DEVICE__
+ IFX_PRINT("Device Global Registers\n");
+ addr=&_core_if->dev_global_regs->dcfg;
+ IFX_PRINT("DCFG @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->dev_global_regs->dctl;
+ IFX_PRINT("DCTL @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->dev_global_regs->dsts;
+ IFX_PRINT("DSTS @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->dev_global_regs->diepmsk;
+ IFX_PRINT("DIEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->dev_global_regs->doepmsk;
+ IFX_PRINT("DOEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->dev_global_regs->daintmsk;
+ IFX_PRINT("DAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->dev_global_regs->daint;
+ IFX_PRINT("DAINT @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->dev_global_regs->dvbusdis;
+ IFX_PRINT("DVBUSID @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ addr=&_core_if->dev_global_regs->dvbuspulse;
+ IFX_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n", (uint32_t)addr,ifxusb_rreg(addr));
+
+ addr=&_core_if->dev_global_regs->dtknqr1;
+ IFX_PRINT("DTKNQR1 @0x%08X : 0x%08X\n",(uint32_t)addr,ifxusb_rreg(addr));
+ if (_core_if->hwcfg2.b.dev_token_q_depth > 6) {
+ addr=&_core_if->dev_global_regs->dtknqr2;
+ IFX_PRINT("DTKNQR2 @0x%08X : 0x%08X\n", (uint32_t)addr,ifxusb_rreg(addr));
+ }
+
+ if (_core_if->hwcfg2.b.dev_token_q_depth > 14)
+ {
+ addr=&_core_if->dev_global_regs->dtknqr3_dthrctl;
+ IFX_PRINT("DTKNQR3_DTHRCTL @0x%08X : 0x%08X\n", (uint32_t)addr, ifxusb_rreg(addr));
+ }
+
+ if (_core_if->hwcfg2.b.dev_token_q_depth > 22)
+ {
+ addr=&_core_if->dev_global_regs->dtknqr4_fifoemptymsk;
+ IFX_PRINT("DTKNQR4 @0x%08X : 0x%08X\n", (uint32_t)addr, ifxusb_rreg(addr));
+ }
+
+ //for (i=0; i<= MAX_EPS_CHANNELS; i++)
+ //for (i=0; i<= 10; i++)
+ for (i=0; i<= 3; i++)
+ {
+ IFX_PRINT("Device EP %d Registers\n", i);
+ addri=&_core_if->in_ep_regs[i]->diepctl;addro=&_core_if->out_ep_regs[i]->doepctl;
+ IFX_PRINT("DEPCTL I: 0x%08X O: 0x%08X\n",ifxusb_rreg(addri),ifxusb_rreg(addro));
+ addro=&_core_if->out_ep_regs[i]->doepfn;
+ IFX_PRINT("DEPFN I: O: 0x%08X\n",ifxusb_rreg(addro));
+ addri=&_core_if->in_ep_regs[i]->diepint;addro=&_core_if->out_ep_regs[i]->doepint;
+ IFX_PRINT("DEPINT I: 0x%08X O: 0x%08X\n",ifxusb_rreg(addri),ifxusb_rreg(addro));
+ addri=&_core_if->in_ep_regs[i]->dieptsiz;addro=&_core_if->out_ep_regs[i]->doeptsiz;
+ IFX_PRINT("DETSIZ I: 0x%08X O: 0x%08X\n",ifxusb_rreg(addri),ifxusb_rreg(addro));
+ addri=&_core_if->in_ep_regs[i]->diepdma;addro=&_core_if->out_ep_regs[i]->doepdma;
+ IFX_PRINT("DEPDMA I: 0x%08X O: 0x%08X\n",ifxusb_rreg(addri),ifxusb_rreg(addro));
+ addri=&_core_if->in_ep_regs[i]->dtxfsts;
+ IFX_PRINT("DTXFSTS I: 0x%08X\n",ifxusb_rreg(addri) );
+ addri=&_core_if->in_ep_regs[i]->diepdmab;addro=&_core_if->out_ep_regs[i]->doepdmab;
+ IFX_PRINT("DEPDMAB I: 0x%08X O: 0x%08X\n",ifxusb_rreg(addri),ifxusb_rreg(addro));
+ }
+ #endif //__IS_DEVICE__
+#endif //__ENABLE_DUMP__
+}
+
+void ifxusb_clean_spram(ifxusb_core_if_t *_core_if,uint32_t dwords)
+{
+ volatile uint32_t *addr1,*addr2, *start_addr, *end_addr;
+
+ if(!dwords)
+ return;
+
+ start_addr = (uint32_t *)_core_if->data_fifo_dbg;
+
+ end_addr = (uint32_t *)_core_if->data_fifo_dbg;
+ end_addr += dwords;
+
+ IFX_PRINT("Clearning SPRAM: 0x%8X-0x%8X\n", (uint32_t)start_addr,(uint32_t)end_addr);
+ for(addr1 = start_addr; addr1 < end_addr; addr1+=4)
+ {
+ for(addr2 = addr1; addr2 < addr1+4; addr2++)
+ *addr2=0x00000000;
+ }
+ IFX_PRINT("Clearning SPRAM: 0x%8X-0x%8X Done\n", (uint32_t)start_addr,(uint32_t)end_addr);
+ return;
+}
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif.h b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif.h
new file mode 100644
index 0000000000..191781f623
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif.h
@@ -0,0 +1,665 @@
+/*****************************************************************************
+ ** FILE NAME : ifxusb_cif.h
+ ** PROJECT : IFX USB sub-system V3
+ ** MODULES : IFX USB sub-system Host and Device driver
+ ** SRC VERSION : 1.0
+ ** DATE : 1/Jan/2009
+ ** AUTHOR : Chen, Howard
+ ** DESCRIPTION : The Core Interface provides basic services for accessing and
+ ** managing the IFX USB hardware. These services are used by both the
+ ** Host Controller Driver and the Peripheral Controller Driver.
+ ** FUNCTIONS :
+ ** COMPILER : gcc
+ ** REFERENCE : IFX hardware ref handbook for each plateforms
+ ** COPYRIGHT :
+ ** Version Control Section **
+ ** $Author$
+ ** $Date$
+ ** $Revisions$
+ ** $Log$ Revision history
+*****************************************************************************/
+
+/*!
+ \defgroup IFXUSB_DRIVER_V3 IFX USB SS Project
+ \brief IFX USB subsystem V3.x
+ */
+
+/*!
+ \defgroup IFXUSB_CIF Core Interface APIs
+ \ingroup IFXUSB_DRIVER_V3
+ \brief The Core Interface provides basic services for accessing and
+ managing the IFXUSB hardware. These services are used by both the
+ Host Controller Driver and the Peripheral Controller Driver.
+ */
+
+
+/*!
+ \file ifxusb_cif.h
+ \ingroup IFXUSB_DRIVER_V3
+ \brief This file contains the interface to the IFX USB Core.
+ */
+
+#if !defined(__IFXUSB_CIF_H__)
+#define __IFXUSB_CIF_H__
+
+#include <linux/workqueue.h>
+
+#include <linux/version.h>
+#include <asm/param.h>
+
+#include "ifxusb_plat.h"
+#include "ifxusb_regs.h"
+
+#ifdef __DEBUG__
+ #include "linux/timer.h"
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#define IFXUSB_PARAM_SPEED_HIGH 0
+#define IFXUSB_PARAM_SPEED_FULL 1
+
+#define IFXUSB_EP_SPEED_LOW 0
+#define IFXUSB_EP_SPEED_FULL 1
+#define IFXUSB_EP_SPEED_HIGH 2
+
+#define IFXUSB_EP_TYPE_CTRL 0
+#define IFXUSB_EP_TYPE_ISOC 1
+#define IFXUSB_EP_TYPE_BULK 2
+#define IFXUSB_EP_TYPE_INTR 3
+
+#define IFXUSB_HC_PID_DATA0 0
+#define IFXUSB_HC_PID_DATA2 1
+#define IFXUSB_HC_PID_DATA1 2
+#define IFXUSB_HC_PID_MDATA 3
+#define IFXUSB_HC_PID_SETUP 3
+
+
+/*!
+ \addtogroup IFXUSB_CIF
+ */
+/*@{*/
+
+/*!
+ \struct ifxusb_params
+ \brief IFXUSB Parameters structure.
+ This structure is used for both importing from insmod stage and run-time storage.
+ These parameters define how the IFXUSB controller should be configured.
+ */
+typedef struct ifxusb_params
+{
+ int32_t dma_burst_size; /*!< The DMA Burst size (applicable only for Internal DMA
+ Mode). 0(for single), 1(incr), 4(incr4), 8(incr8) 16(incr16)
+ */
+ /* Translate this to GAHBCFG values */
+ int32_t speed; /*!< Specifies the maximum speed of operation in host and device mode.
+ The actual speed depends on the speed of the attached device and
+ the value of phy_type. The actual speed depends on the speed of the
+ attached device.
+ 0 - High Speed (default)
+ 1 - Full Speed
+ */
+
+ int32_t data_fifo_size; /*!< Total number of dwords in the data FIFO memory. This
+ memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic
+ Tx FIFOs.
+ 32 to 32768
+ */
+ #ifdef __IS_DEVICE__
+ int32_t rx_fifo_size; /*!< Number of dwords in the Rx FIFO in device mode.
+ 16 to 32768
+ */
+
+
+ int32_t tx_fifo_size[MAX_EPS_CHANNELS]; /*!< Number of dwords in each of the Tx FIFOs in device mode.
+ 4 to 768
+ */
+ #ifdef __DED_FIFO__
+ int32_t thr_ctl; /*!< Threshold control on/off */
+ int32_t tx_thr_length; /*!< Threshold length for Tx */
+ int32_t rx_thr_length; /*!< Threshold length for Rx*/
+ #endif
+ #else //__IS_HOST__
+ int32_t host_channels; /*!< The number of host channel registers to use.
+ 1 to 16
+ */
+
+ int32_t rx_fifo_size; /*!< Number of dwords in the Rx FIFO in host mode.
+ 16 to 32768
+ */
+
+ int32_t nperio_tx_fifo_size;/*!< Number of dwords in the non-periodic Tx FIFO in host mode.
+ 16 to 32768
+ */
+
+ int32_t perio_tx_fifo_size; /*!< Number of dwords in the host periodic Tx FIFO.
+ 16 to 32768
+ */
+ #endif //__IS_HOST__
+
+ int32_t max_transfer_size; /*!< The maximum transfer size supported in bytes.
+ 2047 to 65,535
+ */
+
+ int32_t max_packet_count; /*!< The maximum number of packets in a transfer.
+ 15 to 511 (default 511)
+ */
+ int32_t phy_utmi_width; /*!< Specifies the UTMI+ Data Width.
+ 8 or 16 bits (default 16)
+ */
+
+ int32_t turn_around_time_hs; /*!< Specifies the Turn-Around time at HS*/
+ int32_t turn_around_time_fs; /*!< Specifies the Turn-Around time at FS*/
+
+ int32_t timeout_cal_hs; /*!< Specifies the Timeout_Calibration at HS*/
+ int32_t timeout_cal_fs; /*!< Specifies the Timeout_Calibration at FS*/
+} ifxusb_params_t;
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/*!
+ \struct ifxusb_core_if
+ \brief The ifx_core_if structure contains information needed to manage
+ the IFX USB controller acting in either host or device mode. It
+ represents the programming view of the controller as a whole.
+ */
+typedef struct ifxusb_core_if
+{
+ ifxusb_params_t params; /*!< Run-time Parameters */
+
+ uint8_t core_no; /*!< core number (used as id when multi-core case */
+ char *core_name; /*!< core name used for registration and informative purpose*/
+ int irq; /*!< irq number this core is hooked */
+
+ /*****************************************************************
+ * Structures and pointers to physical register interface.
+ *****************************************************************/
+ /** Core Global registers starting at offset 000h. */
+ ifxusb_core_global_regs_t *core_global_regs; /*!< pointer to Core Global Registers, offset at 000h */
+
+ /** Host-specific registers */
+ #ifdef __IS_HOST__
+ /** Host Global Registers starting at offset 400h.*/
+ ifxusb_host_global_regs_t *host_global_regs; /*!< pointer to Host Global Registers, offset at 400h */
+ #define IFXUSB_HOST_GLOBAL_REG_OFFSET 0x400
+ /** Host Port 0 Control and Status Register */
+ volatile uint32_t *hprt0; /*!< pointer to HPRT0 Registers, offset at 440h */
+ #define IFXUSB_HOST_PORT_REGS_OFFSET 0x440
+ /** Host Channel Specific Registers at offsets 500h-5FCh. */
+ ifxusb_hc_regs_t *hc_regs[MAX_EPS_CHANNELS]; /*!< pointer to Host-Channel n Registers, offset at 500h */
+ #define IFXUSB_HOST_CHAN_REGS_OFFSET 0x500
+ #define IFXUSB_CHAN_REGS_OFFSET 0x20
+ #endif
+
+ /** Device-specific registers */
+ #ifdef __IS_DEVICE__
+ /** Device Global Registers starting at offset 800h */
+ ifxusb_device_global_regs_t *dev_global_regs; /*!< pointer to Device Global Registers, offset at 800h */
+ #define IFXUSB_DEV_GLOBAL_REG_OFFSET 0x800
+
+ /** Device Logical IN Endpoint-Specific Registers 900h-AFCh */
+ ifxusb_dev_in_ep_regs_t *in_ep_regs[MAX_EPS_CHANNELS]; /*!< pointer to Device IN-EP Registers, offset at 900h */
+ #define IFXUSB_DEV_IN_EP_REG_OFFSET 0x900
+ #define IFXUSB_EP_REG_OFFSET 0x20
+ /** Device Logical OUT Endpoint-Specific Registers B00h-CFCh */
+ ifxusb_dev_out_ep_regs_t *out_ep_regs[MAX_EPS_CHANNELS];/*!< pointer to Device OUT-EP Registers, offset at 900h */
+ #define IFXUSB_DEV_OUT_EP_REG_OFFSET 0xB00
+ #endif
+
+ /** Power and Clock Gating Control Register */
+ volatile uint32_t *pcgcctl; /*!< pointer to Power and Clock Gating Control Registers, offset at E00h */
+ #define IFXUSB_PCGCCTL_OFFSET 0xE00
+
+ /** Push/pop addresses for endpoints or host channels.*/
+ uint32_t *data_fifo[MAX_EPS_CHANNELS]; /*!< pointer to FIFO access windows, offset at 1000h */
+ #define IFXUSB_DATA_FIFO_OFFSET 0x1000
+ #define IFXUSB_DATA_FIFO_SIZE 0x1000
+
+ uint32_t *data_fifo_dbg; /*!< pointer to FIFO debug windows, offset at 1000h */
+
+ /** Hardware Configuration -- stored here for convenience.*/
+ hwcfg1_data_t hwcfg1; /*!< preserved Hardware Configuration 1 */
+ hwcfg2_data_t hwcfg2; /*!< preserved Hardware Configuration 2 */
+ hwcfg3_data_t hwcfg3; /*!< preserved Hardware Configuration 3 */
+ hwcfg4_data_t hwcfg4; /*!< preserved Hardware Configuration 3 */
+ uint32_t snpsid; /*!< preserved SNPSID */
+
+ /*****************************************************************
+ * Run-time informations.
+ *****************************************************************/
+ /* Set to 1 if the core PHY interface bits in USBCFG have been initialized. */
+ uint8_t phy_init_done; /*!< indicated PHY is initialized. */
+
+ #ifdef __IS_HOST__
+ uint8_t queuing_high_bandwidth; /*!< Host mode, Queueing High Bandwidth. */
+ #endif
+} ifxusb_core_if_t;
+
+/*@}*//*IFXUSB_CIF*/
+
+
+/*!
+ \fn void *ifxusb_alloc_buf(size_t size, int clear)
+ \brief This function is called to allocate buffer of specified size.
+ The allocated buffer is mapped into DMA accessable address.
+ \param size Size in BYTE to be allocated
+ \param clear 0: don't do clear after buffer allocated, other: do clear to zero
+ \return 0/NULL: Fail; uncached pointer of allocated buffer
+ \ingroup IFXUSB_CIF
+ */
+extern void *ifxusb_alloc_buf(size_t size, int clear);
+
+/*!
+ \fn void ifxusb_free_buf(void *vaddr)
+ \brief This function is called to free allocated buffer.
+ \param vaddr the uncached pointer of the buffer
+ \ingroup IFXUSB_CIF
+ */
+extern void ifxusb_free_buf(void *vaddr);
+
+/*!
+ \fn int ifxusb_core_if_init(ifxusb_core_if_t *_core_if,
+ int _irq,
+ uint32_t _reg_base_addr,
+ uint32_t _fifo_base_addr,
+ uint32_t _fifo_dbg_addr)
+ \brief This function is called to initialize the IFXUSB CSR data
+ structures. The register addresses in the device and host
+ structures are initialized from the base address supplied by the
+ caller. The calling function must make the OS calls to get the
+ base address of the IFXUSB controller registers.
+ \param _core_if Pointer of core_if structure
+ \param _irq irq number
+ \param _reg_base_addr Base address of IFXUSB core registers
+ \param _fifo_base_addr Fifo base address
+ \param _fifo_dbg_addr Fifo debug address
+ \return 0: success;
+ \ingroup IFXUSB_CIF
+ */
+extern int ifxusb_core_if_init(ifxusb_core_if_t *_core_if,
+ int _irq,
+ uint32_t _reg_base_addr,
+ uint32_t _fifo_base_addr,
+ uint32_t _fifo_dbg_addr);
+
+
+/*!
+ \fn void ifxusb_core_if_remove(ifxusb_core_if_t *_core_if)
+ \brief This function free the mapped address in the IFXUSB CSR data structures.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+extern void ifxusb_core_if_remove(ifxusb_core_if_t *_core_if);
+
+/*!
+ \fn void ifxusb_enable_global_interrupts( ifxusb_core_if_t *_core_if )
+ \brief This function enbles the controller's Global Interrupt in the AHB Config register.
+ \param _core_if Pointer of core_if structure
+ */
+extern void ifxusb_enable_global_interrupts( ifxusb_core_if_t *_core_if );
+
+/*!
+ \fn void ifxusb_disable_global_interrupts( ifxusb_core_if_t *_core_if )
+ \brief This function disables the controller's Global Interrupt in the AHB Config register.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+extern void ifxusb_disable_global_interrupts( ifxusb_core_if_t *_core_if );
+
+/*!
+ \fn void ifxusb_flush_tx_fifo( ifxusb_core_if_t *_core_if, const int _num )
+ \brief Flush a Tx FIFO.
+ \param _core_if Pointer of core_if structure
+ \param _num Tx FIFO to flush. ( 0x10 for ALL TX FIFO )
+ \ingroup IFXUSB_CIF
+ */
+extern void ifxusb_flush_tx_fifo( ifxusb_core_if_t *_core_if, const int _num );
+
+/*!
+ \fn void ifxusb_flush_rx_fifo( ifxusb_core_if_t *_core_if )
+ \brief Flush Rx FIFO.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+extern void ifxusb_flush_rx_fifo( ifxusb_core_if_t *_core_if );
+
+/*!
+ \fn void ifxusb_flush_both_fifo( ifxusb_core_if_t *_core_if )
+ \brief Flush ALL Rx and Tx FIFO.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+extern void ifxusb_flush_both_fifo( ifxusb_core_if_t *_core_if );
+
+
+/*!
+ \fn int ifxusb_core_soft_reset(ifxusb_core_if_t *_core_if)
+ \brief Do core a soft reset of the core. Be careful with this because it
+ resets all the internal state machines of the core.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+extern int ifxusb_core_soft_reset(ifxusb_core_if_t *_core_if);
+
+
+/*!
+ \brief Turn on the USB Core Power
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+*/
+extern void ifxusb_power_on (ifxusb_core_if_t *_core_if);
+
+/*!
+ \fn void ifxusb_power_off (ifxusb_core_if_t *_core_if)
+ \brief Turn off the USB Core Power
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+*/
+extern void ifxusb_power_off (ifxusb_core_if_t *_core_if);
+
+/*!
+ \fn void ifxusb_phy_power_on (ifxusb_core_if_t *_core_if)
+ \brief Turn on the USB PHY Power
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+*/
+extern void ifxusb_phy_power_on (ifxusb_core_if_t *_core_if);
+
+/*!
+ \fn void ifxusb_phy_power_off (ifxusb_core_if_t *_core_if)
+ \brief Turn off the USB PHY Power
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+*/
+extern void ifxusb_phy_power_off (ifxusb_core_if_t *_core_if);
+
+/*!
+ \fn void ifxusb_hard_reset(ifxusb_core_if_t *_core_if)
+ \brief Reset on the USB Core RCU
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+extern void ifxusb_hard_reset(ifxusb_core_if_t *_core_if);
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+
+#ifdef __IS_HOST__
+ /*!
+ \fn void ifxusb_host_core_init(ifxusb_core_if_t *_core_if, ifxusb_params_t *_params)
+ \brief This function initializes the IFXUSB controller registers for Host mode.
+ This function flushes the Tx and Rx FIFOs and it flushes any entries in the
+ request queues.
+ \param _core_if Pointer of core_if structure
+ \param _params parameters to be set
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_host_core_init(ifxusb_core_if_t *_core_if, ifxusb_params_t *_params);
+
+ /*!
+ \fn void ifxusb_host_enable_interrupts(ifxusb_core_if_t *_core_if)
+ \brief This function enables the Host mode interrupts.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_host_enable_interrupts(ifxusb_core_if_t *_core_if);
+
+ /*!
+ \fn void ifxusb_host_disable_interrupts(ifxusb_core_if_t *_core_if)
+ \brief This function disables the Host mode interrupts.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_host_disable_interrupts(ifxusb_core_if_t *_core_if);
+
+ #if defined(__IS_TWINPASS__)
+ extern void ifxusb_enable_afe_oc(void);
+ #endif
+
+ /*!
+ \fn void ifxusb_vbus_init(ifxusb_core_if_t *_core_if)
+ \brief This function init the VBUS control.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_vbus_init(ifxusb_core_if_t *_core_if);
+
+ /*!
+ \fn void ifxusb_vbus_free(ifxusb_core_if_t *_core_if)
+ \brief This function free the VBUS control.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_vbus_free(ifxusb_core_if_t *_core_if);
+
+ /*!
+ \fn void ifxusb_vbus_on(ifxusb_core_if_t *_core_if)
+ \brief Turn on the USB 5V VBus Power
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_vbus_on(ifxusb_core_if_t *_core_if);
+
+ /*!
+ \fn void ifxusb_vbus_off(ifxusb_core_if_t *_core_if)
+ \brief Turn off the USB 5V VBus Power
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_vbus_off(ifxusb_core_if_t *_core_if);
+
+ /*!
+ \fn int ifxusb_vbus(ifxusb_core_if_t *_core_if)
+ \brief Read Current VBus status
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+ extern int ifxusb_vbus(ifxusb_core_if_t *_core_if);
+
+ #if defined(__DO_OC_INT__) && defined(__DO_OC_INT_ENABLE__)
+ /*!
+ \fn void ifxusb_oc_int_on(void)
+ \brief Turn on the OC interrupt
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_oc_int_on(void);
+
+ /*!
+ \fn void ifxusb_oc_int_off(void)
+ \brief Turn off the OC interrupt
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_oc_int_off(void);
+ #endif //defined(__DO_OC_INT__) && defined(__DO_OC_INT_ENABLE__)
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+
+#ifdef __IS_DEVICE__
+ /*!
+ \fn void ifxusb_dev_enable_interrupts(ifxusb_core_if_t *_core_if)
+ \brief This function enables the Device mode interrupts.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_dev_enable_interrupts(ifxusb_core_if_t *_core_if);
+
+ /*!
+ \fn uint32_t ifxusb_dev_get_frame_number(ifxusb_core_if_t *_core_if)
+ \brief Gets the current USB frame number. This is the frame number from the last SOF packet.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+ extern uint32_t ifxusb_dev_get_frame_number(ifxusb_core_if_t *_core_if);
+
+ /*!
+ \fn void ifxusb_dev_ep_set_stall(ifxusb_core_if_t *_core_if, uint8_t _epno, uint8_t _is_in)
+ \brief Set the EP STALL.
+ \param _core_if Pointer of core_if structure
+ \param _epno EP number
+ \param _is_in 1: is IN transfer
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_dev_ep_set_stall(ifxusb_core_if_t *_core_if, uint8_t _epno, uint8_t _is_in);
+
+ /*!
+ \fn void ifxusb_dev_ep_clear_stall(ifxusb_core_if_t *_core_if, uint8_t _epno, uint8_t _ep_type, uint8_t _is_in)
+ \brief Set the EP STALL.
+ \param _core_if Pointer of core_if structure
+ \param _epno EP number
+ \param _ep_type EP Type
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_dev_ep_clear_stall(ifxusb_core_if_t *_core_if, uint8_t _epno, uint8_t _ep_type, uint8_t _is_in);
+
+ /*!
+ \fn void ifxusb_dev_core_init(ifxusb_core_if_t *_core_if, ifxusb_params_t *_params)
+ \brief This function initializes the IFXUSB controller registers for Device mode.
+ This function flushes the Tx and Rx FIFOs and it flushes any entries in the
+ request queues.
+ This function validate the imported parameters and store the result in the CIF structure.
+ After
+ \param _core_if Pointer of core_if structure
+ \param _params structure of inported parameters
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_dev_core_init(ifxusb_core_if_t *_core_if, ifxusb_params_t *_params);
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if defined(__GADGET_LED__) || defined(__HOST_LED__)
+ /*!
+ \fn void ifxusb_led_init(ifxusb_core_if_t *_core_if)
+ \brief This function init the LED control.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_led_init(ifxusb_core_if_t *_core_if);
+
+ /*!
+ \fn void ifxusb_led_free(ifxusb_core_if_t *_core_if)
+ \brief This function free the LED control.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_led_free(ifxusb_core_if_t *_core_if);
+
+ /*!
+ \fn void ifxusb_led(ifxusb_core_if_t *_core_if)
+ \brief This function trigger the LED access.
+ \param _core_if Pointer of core_if structure
+ \ingroup IFXUSB_CIF
+ */
+ extern void ifxusb_led(ifxusb_core_if_t *_core_if);
+#endif
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+/* internal routines for debugging */
+extern void ifxusb_dump_msg(const u8 *buf, unsigned int length);
+extern void ifxusb_dump_spram(ifxusb_core_if_t *_core_if);
+extern void ifxusb_dump_registers(ifxusb_core_if_t *_core_if);
+extern void ifxusb_clean_spram(ifxusb_core_if_t *_core_if,uint32_t dwords);
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static inline uint32_t ifxusb_read_core_intr(ifxusb_core_if_t *_core_if)
+{
+ return (ifxusb_rreg(&_core_if->core_global_regs->gintsts) &
+ (ifxusb_rreg(&_core_if->core_global_regs->gintmsk)
+#ifdef __USE_TIMER_4_SOF__
+ | IFXUSB_SOF_INTR_MASK
+#endif
+ ));
+}
+
+static inline uint32_t ifxusb_read_otg_intr (ifxusb_core_if_t *_core_if)
+{
+ return (ifxusb_rreg (&_core_if->core_global_regs->gotgint));
+}
+
+static inline uint32_t ifxusb_mode(ifxusb_core_if_t *_core_if)
+{
+ return (ifxusb_rreg( &_core_if->core_global_regs->gintsts ) & 0x1);
+}
+static inline uint8_t ifxusb_is_device_mode(ifxusb_core_if_t *_core_if)
+{
+ return (ifxusb_mode(_core_if) != 1);
+}
+static inline uint8_t ifxusb_is_host_mode(ifxusb_core_if_t *_core_if)
+{
+ return (ifxusb_mode(_core_if) == 1);
+}
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef __IS_HOST__
+ static inline uint32_t ifxusb_read_hprt0(ifxusb_core_if_t *_core_if)
+ {
+ hprt0_data_t hprt0;
+ hprt0.d32 = ifxusb_rreg(_core_if->hprt0);
+ hprt0.b.prtena = 0;
+ hprt0.b.prtconndet = 0;
+ hprt0.b.prtenchng = 0;
+ hprt0.b.prtovrcurrchng = 0;
+ return hprt0.d32;
+ }
+
+ static inline uint32_t ifxusb_read_host_all_channels_intr (ifxusb_core_if_t *_core_if)
+ {
+ return (ifxusb_rreg (&_core_if->host_global_regs->haint));
+ }
+
+ static inline uint32_t ifxusb_read_host_channel_intr (ifxusb_core_if_t *_core_if, int hc_num)
+ {
+ return (ifxusb_rreg (&_core_if->hc_regs[hc_num]->hcint));
+ }
+#endif
+
+#ifdef __IS_DEVICE__
+ static inline uint32_t ifxusb_read_dev_all_in_ep_intr(ifxusb_core_if_t *_core_if)
+ {
+ uint32_t v;
+ v = ifxusb_rreg(&_core_if->dev_global_regs->daint) &
+ ifxusb_rreg(&_core_if->dev_global_regs->daintmsk);
+ return (v & 0xffff);
+ }
+
+ static inline uint32_t ifxusb_read_dev_all_out_ep_intr(ifxusb_core_if_t *_core_if)
+ {
+ uint32_t v;
+ v = ifxusb_rreg(&_core_if->dev_global_regs->daint) &
+ ifxusb_rreg(&_core_if->dev_global_regs->daintmsk);
+ return ((v & 0xffff0000) >> 16);
+ }
+
+ static inline uint32_t ifxusb_read_dev_in_ep_intr(ifxusb_core_if_t *_core_if, int _ep_num)
+ {
+ uint32_t v;
+ v = ifxusb_rreg(&_core_if->in_ep_regs[_ep_num]->diepint) &
+ ifxusb_rreg(&_core_if->dev_global_regs->diepmsk);
+ return v;
+ }
+
+ static inline uint32_t ifxusb_read_dev_out_ep_intr(ifxusb_core_if_t *_core_if, int _ep_num)
+ {
+ uint32_t v;
+ v = ifxusb_rreg(&_core_if->out_ep_regs[_ep_num]->doepint) &
+ ifxusb_rreg(&_core_if->dev_global_regs->doepmsk);
+ return v;
+ }
+
+#endif
+
+extern void ifxusb_attr_create (void *_dev);
+
+extern void ifxusb_attr_remove (void *_dev);
+
+///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#endif // !defined(__IFXUSB_CIF_H__)
+
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif_d.c b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif_d.c
new file mode 100644
index 0000000000..36ab0e75bf
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif_d.c
@@ -0,0 +1,458 @@
+/*****************************************************************************
+ ** FILE NAME : ifxusb_cif_d.c
+ ** PROJECT : IFX USB sub-system V3
+ ** MODULES : IFX USB sub-system Host and Device driver
+ ** SRC VERSION : 1.0
+ ** DATE : 1/Jan/2009
+ ** AUTHOR : Chen, Howard
+ ** DESCRIPTION : The Core Interface provides basic services for accessing and
+ ** managing the IFX USB hardware. These services are used by the
+ ** Peripheral Controller Driver only.
+ *****************************************************************************/
+
+/*!
+ \file ifxusb_cif_d.c
+ \ingroup IFXUSB_DRIVER_V3
+ \brief This file contains the interface to the IFX USB Core.
+*/
+
+#include <linux/version.h>
+#include "ifxusb_version.h"
+
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#ifdef __DEBUG__
+ #include <linux/jiffies.h>
+#endif
+
+#include "ifxusb_plat.h"
+#include "ifxusb_regs.h"
+#include "ifxusb_cif.h"
+
+#include "ifxpcd.h"
+
+
+
+/*!
+ \brief Initializes the DevSpd field of the DCFG register depending on the PHY type
+ and the enumeration speed of the device.
+ \param _core_if Pointer of core_if structure
+ */
+void ifxusb_dev_init_spd(ifxusb_core_if_t *_core_if)
+{
+ uint32_t val;
+ dcfg_data_t dcfg;
+
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ if (_core_if->params.speed == IFXUSB_PARAM_SPEED_FULL)
+ /* High speed PHY running at full speed */
+ val = 0x1;
+ else
+ /* High speed PHY running at high speed and full speed*/
+ val = 0x0;
+
+ IFX_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val);
+ dcfg.d32 = ifxusb_rreg(&_core_if->dev_global_regs->dcfg);
+ dcfg.b.devspd = val;
+ ifxusb_wreg(&_core_if->dev_global_regs->dcfg, dcfg.d32);
+}
+
+
+/*!
+ \brief This function enables the Device mode interrupts.
+ \param _core_if Pointer of core_if structure
+ */
+void ifxusb_dev_enable_interrupts(ifxusb_core_if_t *_core_if)
+{
+ gint_data_t intr_mask ={ .d32 = 0};
+ ifxusb_core_global_regs_t *global_regs = _core_if->core_global_regs;
+
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ IFX_DEBUGPL(DBG_CIL, "%s()\n", __func__);
+
+ /* Clear any pending OTG Interrupts */
+ ifxusb_wreg( &global_regs->gotgint, 0xFFFFFFFF);
+
+ /* Clear any pending interrupts */
+ ifxusb_wreg( &global_regs->gintsts, 0xFFFFFFFF);
+
+ /* Enable the interrupts in the GINTMSK.*/
+ intr_mask.b.modemismatch = 1;
+ intr_mask.b.conidstschng = 1;
+ intr_mask.b.wkupintr = 1;
+ intr_mask.b.disconnect = 1;
+ intr_mask.b.usbsuspend = 1;
+
+ intr_mask.b.usbreset = 1;
+ intr_mask.b.enumdone = 1;
+ intr_mask.b.inepintr = 1;
+ intr_mask.b.outepintr = 1;
+ intr_mask.b.erlysuspend = 1;
+ #ifndef __DED_FIFO__
+// intr_mask.b.epmismatch = 1;
+ #endif
+
+ ifxusb_mreg( &global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
+ IFX_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__, ifxusb_rreg( &global_regs->gintmsk));
+}
+
+/*!
+ \brief Gets the current USB frame number. This is the frame number from the last SOF packet.
+ \param _core_if Pointer of core_if structure
+ */
+uint32_t ifxusb_dev_get_frame_number(ifxusb_core_if_t *_core_if)
+{
+ dsts_data_t dsts;
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ dsts.d32 = ifxusb_rreg(&_core_if->dev_global_regs->dsts);
+ /* read current frame/microfreme number from DSTS register */
+ return dsts.b.soffn;
+}
+
+
+/*!
+ \brief Set the EP STALL.
+ */
+void ifxusb_dev_ep_set_stall(ifxusb_core_if_t *_core_if, uint8_t _epno, uint8_t _is_in)
+{
+ depctl_data_t depctl;
+ volatile uint32_t *depctl_addr;
+
+ IFX_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, _epno, (_is_in?"IN":"OUT"));
+
+ depctl_addr = (_is_in)? (&(_core_if->in_ep_regs [_epno]->diepctl)):
+ (&(_core_if->out_ep_regs[_epno]->doepctl));
+ depctl.d32 = ifxusb_rreg(depctl_addr);
+ depctl.b.stall = 1;
+
+ if (_is_in && depctl.b.epena)
+ depctl.b.epdis = 1;
+
+ ifxusb_wreg(depctl_addr, depctl.d32);
+ IFX_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",ifxusb_rreg(depctl_addr));
+ return;
+}
+
+/*!
+\brief Clear the EP STALL.
+ */
+void ifxusb_dev_ep_clear_stall(ifxusb_core_if_t *_core_if, uint8_t _epno, uint8_t _ep_type, uint8_t _is_in)
+{
+ depctl_data_t depctl;
+ volatile uint32_t *depctl_addr;
+
+ IFX_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, _epno, (_is_in?"IN":"OUT"));
+
+ depctl_addr = (_is_in)? (&(_core_if->in_ep_regs [_epno]->diepctl)):
+ (&(_core_if->out_ep_regs[_epno]->doepctl));
+
+ depctl.d32 = ifxusb_rreg(depctl_addr);
+ /* clear the stall bits */
+ depctl.b.stall = 0;
+
+ /*
+ * USB Spec 9.4.5: For endpoints using data toggle, regardless
+ * of whether an endpoint has the Halt feature set, a
+ * ClearFeature(ENDPOINT_HALT) request always results in the
+ * data toggle being reinitialized to DATA0.
+ */
+ if (_ep_type == IFXUSB_EP_TYPE_INTR || _ep_type == IFXUSB_EP_TYPE_BULK)
+ depctl.b.setd0pid = 1; /* DATA0 */
+
+ ifxusb_wreg(depctl_addr, depctl.d32);
+ IFX_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",ifxusb_rreg(depctl_addr));
+ return;
+}
+
+/*!
+ \brief This function initializes the IFXUSB controller registers for Device mode.
+ This function flushes the Tx and Rx FIFOs and it flushes any entries in the
+ request queues.
+ \param _core_if Pointer of core_if structure
+ \param _params parameters to be set
+ */
+void ifxusb_dev_core_init(ifxusb_core_if_t *_core_if, ifxusb_params_t *_params)
+{
+ ifxusb_core_global_regs_t *global_regs = _core_if->core_global_regs;
+
+ gusbcfg_data_t usbcfg ={.d32 = 0};
+ gahbcfg_data_t ahbcfg ={.d32 = 0};
+ dcfg_data_t dcfg ={.d32 = 0};
+ grstctl_t resetctl ={.d32 = 0};
+ gotgctl_data_t gotgctl ={.d32 = 0};
+
+ uint32_t dir;
+ int i;
+
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ IFX_DEBUGPL(DBG_CILV, "%s(%p)\n",__func__,_core_if);
+
+ /* Copy Params */
+ _core_if->params.dma_burst_size = _params->dma_burst_size;
+ _core_if->params.speed = _params->speed;
+ if(_params->max_transfer_size < 2048 || _params->max_transfer_size > ((1 << (_core_if->hwcfg3.b.xfer_size_cntr_width + 11)) - 1) )
+ _core_if->params.max_transfer_size = ((1 << (_core_if->hwcfg3.b.xfer_size_cntr_width + 11)) - 1);
+ else
+ _core_if->params.max_transfer_size = _params->max_transfer_size;
+
+ if(_params->max_packet_count < 16 || _params->max_packet_count > ((1 << (_core_if->hwcfg3.b.packet_size_cntr_width + 4)) - 1) )
+ _core_if->params.max_packet_count= ((1 << (_core_if->hwcfg3.b.packet_size_cntr_width + 4)) - 1);
+ else
+ _core_if->params.max_packet_count= _params->max_packet_count;
+ _core_if->params.phy_utmi_width = _params->phy_utmi_width;
+ _core_if->params.turn_around_time_hs = _params->turn_around_time_hs;
+ _core_if->params.turn_around_time_fs = _params->turn_around_time_fs;
+ _core_if->params.timeout_cal_hs = _params->timeout_cal_hs;
+ _core_if->params.timeout_cal_fs = _params->timeout_cal_fs;
+
+ #ifdef __DED_FIFO__
+ _core_if->params.thr_ctl = _params->thr_ctl;
+ _core_if->params.tx_thr_length = _params->tx_thr_length;
+ _core_if->params.rx_thr_length = _params->rx_thr_length;
+ #endif
+
+ /* Reset the Controller */
+ do
+ {
+ while(ifxusb_core_soft_reset( _core_if ))
+ ifxusb_hard_reset(_core_if);
+ } while (ifxusb_is_host_mode(_core_if));
+
+ usbcfg.d32 = ifxusb_rreg(&global_regs->gusbcfg);
+ #if 0
+ #if defined(__DED_FIFO__)
+ usbcfg.b.ForceDevMode = 1;
+ usbcfg.b.ForceHstMode = 0;
+ #endif
+ #endif
+ usbcfg.b.term_sel_dl_pulse = 0;
+ ifxusb_wreg (&global_regs->gusbcfg, usbcfg.d32);
+
+ /* This programming sequence needs to happen in FS mode before any other
+ * programming occurs */
+ /* High speed PHY. */
+ if (!_core_if->phy_init_done)
+ {
+ _core_if->phy_init_done = 1;
+ /* HS PHY parameters. These parameters are preserved
+ * during soft reset so only program the first time. Do
+ * a soft reset immediately after setting phyif. */
+ usbcfg.b.ulpi_utmi_sel = 0; //UTMI+
+ usbcfg.b.phyif = ( _core_if->params.phy_utmi_width == 16)?1:0;
+ ifxusb_wreg( &global_regs->gusbcfg, usbcfg.d32);
+ /* Reset after setting the PHY parameters */
+ ifxusb_core_soft_reset( _core_if );
+ }
+
+ /* Program the GAHBCFG Register.*/
+ switch (_core_if->params.dma_burst_size)
+ {
+ case 0 :
+ ahbcfg.b.hburstlen = IFXUSB_GAHBCFG_INT_DMA_BURST_SINGLE;
+ break;
+ case 1 :
+ ahbcfg.b.hburstlen = IFXUSB_GAHBCFG_INT_DMA_BURST_INCR;
+ break;
+ case 4 :
+ ahbcfg.b.hburstlen = IFXUSB_GAHBCFG_INT_DMA_BURST_INCR4;
+ break;
+ case 8 :
+ ahbcfg.b.hburstlen = IFXUSB_GAHBCFG_INT_DMA_BURST_INCR8;
+ break;
+ case 16:
+ ahbcfg.b.hburstlen = IFXUSB_GAHBCFG_INT_DMA_BURST_INCR16;
+ break;
+ }
+ ahbcfg.b.dmaenable = 1;
+ ifxusb_wreg(&global_regs->gahbcfg, ahbcfg.d32);
+
+ /* Program the GUSBCFG register. */
+ usbcfg.d32 = ifxusb_rreg( &global_regs->gusbcfg );
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ ifxusb_wreg( &global_regs->gusbcfg, usbcfg.d32);
+
+ /* Restart the Phy Clock */
+ ifxusb_wreg(_core_if->pcgcctl, 0);
+
+ /* Device configuration register */
+ ifxusb_dev_init_spd(_core_if);
+ dcfg.d32 = ifxusb_rreg( &_core_if->dev_global_regs->dcfg);
+ dcfg.b.perfrint = IFXUSB_DCFG_FRAME_INTERVAL_80;
+ #if defined(__DED_FIFO__)
+ #if defined(__DESC_DMA__)
+ dcfg.b.descdma = 1;
+ #else
+ dcfg.b.descdma = 0;
+ #endif
+ #endif
+
+ ifxusb_wreg( &_core_if->dev_global_regs->dcfg, dcfg.d32 );
+
+ /* Configure data FIFO sizes */
+ _core_if->params.data_fifo_size = _core_if->hwcfg3.b.dfifo_depth;
+ _core_if->params.rx_fifo_size = ifxusb_rreg(&global_regs->grxfsiz);
+ IFX_DEBUGPL(DBG_CIL, "Initial: FIFO Size=0x%06X\n" , _core_if->params.data_fifo_size);
+ IFX_DEBUGPL(DBG_CIL, " Rx FIFO Size=0x%06X\n", _core_if->params.rx_fifo_size);
+
+ _core_if->params.tx_fifo_size[0]= ifxusb_rreg(&global_regs->gnptxfsiz) >> 16;
+
+ #ifdef __DED_FIFO__
+ for (i=1; i <= _core_if->hwcfg4.b.num_in_eps; i++)
+ _core_if->params.tx_fifo_size[i] =
+ ifxusb_rreg(&global_regs->dptxfsiz_dieptxf[i-1]) >> 16;
+ #else
+ for (i=0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++)
+ _core_if->params.tx_fifo_size[i+1] =
+ ifxusb_rreg(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
+ #endif
+
+ #ifdef __DEBUG__
+ #ifdef __DED_FIFO__
+ for (i=0; i <= _core_if->hwcfg4.b.num_in_eps; i++)
+ IFX_DEBUGPL(DBG_CIL, " Tx[%02d] FIFO Size=0x%06X\n",i, _core_if->params.tx_fifo_size[i]);
+ #else
+ IFX_DEBUGPL(DBG_CIL, " NPTx FIFO Size=0x%06X\n", _core_if->params.tx_fifo_size[0]);
+ for (i=0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++)
+ IFX_DEBUGPL(DBG_CIL, " PTx[%02d] FIFO Size=0x%06X\n",i, _core_if->params.tx_fifo_size[i+1]);
+ #endif
+ #endif
+
+ {
+ fifosize_data_t txfifosize;
+ if(_params->data_fifo_size >=0 && _params->data_fifo_size < _core_if->params.data_fifo_size)
+ _core_if->params.data_fifo_size = _params->data_fifo_size;
+
+
+ if(_params->rx_fifo_size >=0 && _params->rx_fifo_size < _core_if->params.rx_fifo_size)
+ _core_if->params.rx_fifo_size = _params->rx_fifo_size;
+ if(_core_if->params.data_fifo_size < _core_if->params.rx_fifo_size)
+ _core_if->params.rx_fifo_size = _core_if->params.data_fifo_size;
+ ifxusb_wreg( &global_regs->grxfsiz, _core_if->params.rx_fifo_size);
+
+ for (i=0; i < MAX_EPS_CHANNELS; i++)
+ if(_params->tx_fifo_size[i] >=0 && _params->tx_fifo_size[i] < _core_if->params.tx_fifo_size[i])
+ _core_if->params.tx_fifo_size[i] = _params->tx_fifo_size[i];
+
+ txfifosize.b.startaddr = _core_if->params.rx_fifo_size;
+ #ifdef __DED_FIFO__
+ if(txfifosize.b.startaddr + _core_if->params.tx_fifo_size[0] > _core_if->params.data_fifo_size)
+ _core_if->params.tx_fifo_size[0]= _core_if->params.data_fifo_size - txfifosize.b.startaddr;
+ txfifosize.b.depth=_core_if->params.tx_fifo_size[0];
+ ifxusb_wreg( &global_regs->gnptxfsiz, txfifosize.d32);
+ txfifosize.b.startaddr += _core_if->params.tx_fifo_size[0];
+ for (i=1; i <= _core_if->hwcfg4.b.num_in_eps; i++)
+ {
+ if(txfifosize.b.startaddr + _core_if->params.tx_fifo_size[i] > _core_if->params.data_fifo_size)
+ _core_if->params.tx_fifo_size[i]= _core_if->params.data_fifo_size - txfifosize.b.startaddr;
+ txfifosize.b.depth=_core_if->params.tx_fifo_size[i];
+ ifxusb_wreg( &global_regs->dptxfsiz_dieptxf[i-1], txfifosize.d32);
+ txfifosize.b.startaddr += _core_if->params.tx_fifo_size[i];
+ }
+ #else
+ if(txfifosize.b.startaddr + _core_if->params.tx_fifo_size[0] > _core_if->params.data_fifo_size)
+ _core_if->params.tx_fifo_size[0]= _core_if->params.data_fifo_size - txfifosize.b.startaddr;
+ txfifosize.b.depth=_core_if->params.tx_fifo_size[0];
+ ifxusb_wreg( &global_regs->gnptxfsiz, txfifosize.d32);
+ txfifosize.b.startaddr += _core_if->params.tx_fifo_size[0];
+ for (i=0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++)
+ {
+ if(txfifosize.b.startaddr + _core_if->params.tx_fifo_size[i+1] > _core_if->params.data_fifo_size)
+ _core_if->params.tx_fifo_size[i+1]= _core_if->params.data_fifo_size - txfifosize.b.startaddr;
+ //txfifosize.b.depth=_core_if->params.tx_fifo_size[i+1];
+ ifxusb_wreg( &global_regs->dptxfsiz_dieptxf[i], txfifosize.d32);
+ txfifosize.b.startaddr += _core_if->params.tx_fifo_size[i+1];
+ }
+ #endif
+ }
+
+ #ifdef __DEBUG__
+ {
+ fifosize_data_t fifosize;
+ IFX_DEBUGPL(DBG_CIL, "Result : FIFO Size=0x%06X\n" , _core_if->params.data_fifo_size);
+
+ IFX_DEBUGPL(DBG_CIL, " Rx FIFO =0x%06X Sz=0x%06X\n", 0,ifxusb_rreg(&global_regs->grxfsiz));
+ #ifdef __DED_FIFO__
+ fifosize.d32=ifxusb_rreg(&global_regs->gnptxfsiz);
+ IFX_DEBUGPL(DBG_CIL, " Tx[00] FIFO =0x%06X Sz=0x%06X\n", fifosize.b.startaddr,fifosize.b.depth);
+ for (i=1; i <= _core_if->hwcfg4.b.num_in_eps; i++)
+ {
+ fifosize.d32=ifxusb_rreg(&global_regs->dptxfsiz_dieptxf[i-1]);
+ IFX_DEBUGPL(DBG_CIL, " Tx[%02d] FIFO 0x%06X Sz=0x%06X\n",i, fifosize.b.startaddr,fifosize.b.depth);
+ }
+ #else
+ fifosize.d32=ifxusb_rreg(&global_regs->gnptxfsiz);
+ IFX_DEBUGPL(DBG_CIL, " NPTx FIFO =0x%06X Sz=0x%06X\n", fifosize.b.startaddr,fifosize.b.depth);
+ for (i=0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++)
+ {
+ fifosize.d32=ifxusb_rreg(&global_regs->dptxfsiz_dieptxf[i]);
+ IFX_DEBUGPL(DBG_CIL, " PTx[%02d] FIFO 0x%06X Sz=0x%06X\n",i, fifosize.b.startaddr,fifosize.b.depth);
+ }
+ #endif
+ }
+ #endif
+
+ /* Clear Host Set HNP Enable in the OTG Control Register */
+ gotgctl.b.hstsethnpen = 1;
+ ifxusb_mreg( &global_regs->gotgctl, gotgctl.d32, 0);
+
+ /* Flush the FIFOs */
+ ifxusb_flush_tx_fifo(_core_if, 0x10); /* all Tx FIFOs */
+ ifxusb_flush_rx_fifo(_core_if);
+
+ /* Flush the Learning Queue. */
+ resetctl.b.intknqflsh = 1;
+ ifxusb_wreg( &global_regs->grstctl, resetctl.d32);
+
+ /* Clear all pending Device Interrupts */
+ ifxusb_wreg( &_core_if->dev_global_regs->diepmsk , 0 );
+ ifxusb_wreg( &_core_if->dev_global_regs->doepmsk , 0 );
+ ifxusb_wreg( &_core_if->dev_global_regs->daint , 0xFFFFFFFF );
+ ifxusb_wreg( &_core_if->dev_global_regs->daintmsk, 0 );
+
+ dir=_core_if->hwcfg1.d32;
+ for (i=0; i <= _core_if->hwcfg2.b.num_dev_ep ; i++,dir>>=2)
+ {
+ depctl_data_t depctl;
+ if((dir&0x03)==0 || (dir&0x03) ==1)
+ {
+ depctl.d32 = ifxusb_rreg(&_core_if->in_ep_regs[i]->diepctl);
+ if (depctl.b.epena)
+ {
+ depctl.d32 = 0;
+ depctl.b.epdis = 1;
+ depctl.b.snak = 1;
+ }
+ else
+ depctl.d32 = 0;
+ ifxusb_wreg( &_core_if->in_ep_regs[i]->diepctl, depctl.d32);
+ #ifndef __DESC_DMA__
+ ifxusb_wreg( &_core_if->in_ep_regs[i]->dieptsiz, 0);
+ #endif
+ ifxusb_wreg( &_core_if->in_ep_regs[i]->diepdma, 0);
+ ifxusb_wreg( &_core_if->in_ep_regs[i]->diepint, 0xFF);
+ }
+
+ if((dir&0x03)==0 || (dir&0x03) ==2)
+ {
+ depctl.d32 = ifxusb_rreg(&_core_if->out_ep_regs[i]->doepctl);
+ if (depctl.b.epena)
+ {
+ depctl.d32 = 0;
+ depctl.b.epdis = 1;
+ depctl.b.snak = 1;
+ }
+ else
+ depctl.d32 = 0;
+ ifxusb_wreg( &_core_if->out_ep_regs[i]->doepctl, depctl.d32);
+ #ifndef __DESC_DMA__
+ ifxusb_wreg( &_core_if->out_ep_regs[i]->doeptsiz, 0);
+ #endif
+ ifxusb_wreg( &_core_if->out_ep_regs[i]->doepdma, 0);
+ ifxusb_wreg( &_core_if->out_ep_regs[i]->doepint, 0xFF);
+ }
+ }
+}
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif_h.c b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif_h.c
new file mode 100644
index 0000000000..0f47ecd1ab
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_cif_h.c
@@ -0,0 +1,846 @@
+/*****************************************************************************
+ ** FILE NAME : ifxusb_cif_h.c
+ ** PROJECT : IFX USB sub-system V3
+ ** MODULES : IFX USB sub-system Host and Device driver
+ ** SRC VERSION : 1.0
+ ** DATE : 1/Jan/2009
+ ** AUTHOR : Chen, Howard
+ ** DESCRIPTION : The Core Interface provides basic services for accessing and
+ ** managing the IFX USB hardware. These services are used by the
+ ** Host Controller Driver only.
+ *****************************************************************************/
+
+/*!
+ \file ifxusb_cif_h.c
+ \ingroup IFXUSB_DRIVER_V3
+ \brief This file contains the interface to the IFX USB Core.
+*/
+#include <linux/version.h>
+#include "ifxusb_version.h"
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+#ifdef __DEBUG__
+ #include <linux/jiffies.h>
+#endif
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#if defined(__UEIP__)
+// #include <asm/ifx/ifx_board.h>
+#endif
+
+//#include <asm/ifx/ifx_gpio.h>
+#if defined(__UEIP__)
+// #include <asm/ifx/ifx_led.h>
+#endif
+
+#include "ifxusb_plat.h"
+#include "ifxusb_regs.h"
+#include "ifxusb_cif.h"
+
+#include "ifxhcd.h"
+
+#if !defined(__UEIP__)
+ #undef __USING_LED_AS_GPIO__
+#endif
+
+
+/*!
+ \brief This function enables the Host mode interrupts.
+ \param _core_if Pointer of core_if structure
+ */
+void ifxusb_host_enable_interrupts(ifxusb_core_if_t *_core_if)
+{
+ gint_data_t intr_mask ={ .d32 = 0};
+ ifxusb_core_global_regs_t *global_regs = _core_if->core_global_regs;
+
+ IFX_DEBUGPL(DBG_CIL, "%s()\n", __func__);
+
+ /* Clear any pending OTG Interrupts */
+ ifxusb_wreg( &global_regs->gotgint, 0xFFFFFFFF);
+
+ /* Clear any pending interrupts */
+ ifxusb_wreg( &global_regs->gintsts, 0xFFFFFFFF);
+
+ /* Enable the interrupts in the GINTMSK.*/
+
+ /* Common interrupts */
+ intr_mask.b.modemismatch = 1;
+ intr_mask.b.conidstschng = 1;
+ intr_mask.b.wkupintr = 1;
+ intr_mask.b.disconnect = 1;
+ intr_mask.b.usbsuspend = 1;
+
+ /* Host interrupts */
+ intr_mask.b.sofintr = 1;
+ intr_mask.b.portintr = 1;
+ intr_mask.b.hcintr = 1;
+
+ ifxusb_mreg( &global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
+ IFX_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__, ifxusb_rreg( &global_regs->gintmsk));
+}
+
+/*!
+ \brief This function disables the Host mode interrupts.
+ \param _core_if Pointer of core_if structure
+ */
+void ifxusb_host_disable_interrupts(ifxusb_core_if_t *_core_if)
+{
+ ifxusb_core_global_regs_t *global_regs = _core_if->core_global_regs;
+
+ IFX_DEBUGPL(DBG_CILV, "%s()\n", __func__);
+
+ #if 1
+ ifxusb_wreg( &global_regs->gintmsk, 0);
+ #else
+ /* Common interrupts */
+ {
+ gint_data_t intr_mask ={.d32 = 0};
+ intr_mask.b.modemismatch = 1;
+ intr_mask.b.rxstsqlvl = 1;
+ intr_mask.b.conidstschng = 1;
+ intr_mask.b.wkupintr = 1;
+ intr_mask.b.disconnect = 1;
+ intr_mask.b.usbsuspend = 1;
+
+ /* Host interrupts */
+ intr_mask.b.sofintr = 1;
+ intr_mask.b.portintr = 1;
+ intr_mask.b.hcintr = 1;
+ intr_mask.b.ptxfempty = 1;
+ intr_mask.b.nptxfempty = 1;
+ ifxusb_mreg(&global_regs->gintmsk, intr_mask.d32, 0);
+ }
+ #endif
+}
+
+/*!
+ \brief This function initializes the IFXUSB controller registers for Host mode.
+ This function flushes the Tx and Rx FIFOs and it flushes any entries in the
+ request queues.
+ \param _core_if Pointer of core_if structure
+ \param _params parameters to be set
+ */
+void ifxusb_host_core_init(ifxusb_core_if_t *_core_if, ifxusb_params_t *_params)
+{
+ ifxusb_core_global_regs_t *global_regs = _core_if->core_global_regs;
+
+ gusbcfg_data_t usbcfg ={.d32 = 0};
+ gahbcfg_data_t ahbcfg ={.d32 = 0};
+ gotgctl_data_t gotgctl ={.d32 = 0};
+
+ int i;
+
+ IFX_DEBUGPL(DBG_CILV, "%s(%p)\n",__func__,_core_if);
+
+ /* Copy Params */
+
+ _core_if->params.dma_burst_size = _params->dma_burst_size;
+ _core_if->params.speed = _params->speed;
+ _core_if->params.max_transfer_size = _params->max_transfer_size;
+ _core_if->params.max_packet_count = _params->max_packet_count;
+ _core_if->params.phy_utmi_width = _params->phy_utmi_width;
+ _core_if->params.turn_around_time_hs = _params->turn_around_time_hs;
+ _core_if->params.turn_around_time_fs = _params->turn_around_time_fs;
+ _core_if->params.timeout_cal_hs = _params->timeout_cal_hs;
+ _core_if->params.timeout_cal_fs = _params->timeout_cal_fs;
+
+ /* Reset the Controller */
+ do
+ {
+ while(ifxusb_core_soft_reset( _core_if ))
+ ifxusb_hard_reset(_core_if);
+ } while (ifxusb_is_device_mode(_core_if));
+
+ usbcfg.d32 = ifxusb_rreg(&global_regs->gusbcfg);
+// usbcfg.b.ulpi_ext_vbus_drv = 1;
+ usbcfg.b.term_sel_dl_pulse = 0;
+ ifxusb_wreg (&global_regs->gusbcfg, usbcfg.d32);
+
+ /* This programming sequence needs to happen in FS mode before any other
+ * programming occurs */
+ /* High speed PHY. */
+ if (!_core_if->phy_init_done)
+ {
+ _core_if->phy_init_done = 1;
+ /* HS PHY parameters. These parameters are preserved
+ * during soft reset so only program the first time. Do
+ * a soft reset immediately after setting phyif. */
+ usbcfg.b.ulpi_utmi_sel = 0; //UTMI+
+ usbcfg.b.phyif = ( _core_if->params.phy_utmi_width == 16)?1:0;
+ ifxusb_wreg( &global_regs->gusbcfg, usbcfg.d32);
+ /* Reset after setting the PHY parameters */
+ ifxusb_core_soft_reset( _core_if );
+ }
+
+ usbcfg.d32 = ifxusb_rreg(&global_regs->gusbcfg);
+// usbcfg.b.ulpi_fsls = 0;
+// usbcfg.b.ulpi_clk_sus_m = 0;
+ ifxusb_wreg(&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Program the GAHBCFG Register.*/
+ switch (_core_if->params.dma_burst_size)
+ {
+ case 0 :
+ ahbcfg.b.hburstlen = IFXUSB_GAHBCFG_INT_DMA_BURST_SINGLE;
+ break;
+ case 1 :
+ ahbcfg.b.hburstlen = IFXUSB_GAHBCFG_INT_DMA_BURST_INCR;
+ break;
+ case 4 :
+ ahbcfg.b.hburstlen = IFXUSB_GAHBCFG_INT_DMA_BURST_INCR4;
+ break;
+ case 8 :
+ ahbcfg.b.hburstlen = IFXUSB_GAHBCFG_INT_DMA_BURST_INCR8;
+ break;
+ case 16:
+ ahbcfg.b.hburstlen = IFXUSB_GAHBCFG_INT_DMA_BURST_INCR16;
+ break;
+ }
+ ahbcfg.b.dmaenable = 1;
+ ifxusb_wreg(&global_regs->gahbcfg, ahbcfg.d32);
+
+ /* Program the GUSBCFG register. */
+ usbcfg.d32 = ifxusb_rreg( &global_regs->gusbcfg );
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ ifxusb_wreg( &global_regs->gusbcfg, usbcfg.d32);
+
+ /* Restart the Phy Clock */
+ ifxusb_wreg(_core_if->pcgcctl, 0);
+
+ /* Initialize Host Configuration Register */
+ {
+ hcfg_data_t hcfg;
+ hcfg.d32 = ifxusb_rreg(&_core_if->host_global_regs->hcfg);
+ hcfg.b.fslspclksel = IFXUSB_HCFG_30_60_MHZ;
+ if (_params->speed == IFXUSB_PARAM_SPEED_FULL)
+ hcfg.b.fslssupp = 1;
+ ifxusb_wreg(&_core_if->host_global_regs->hcfg, hcfg.d32);
+ }
+
+ _core_if->params.host_channels=(_core_if->hwcfg2.b.num_host_chan + 1);
+
+ if(_params->host_channels>0 && _params->host_channels < _core_if->params.host_channels)
+ _core_if->params.host_channels = _params->host_channels;
+
+ /* Configure data FIFO sizes */
+ _core_if->params.data_fifo_size = _core_if->hwcfg3.b.dfifo_depth;
+ _core_if->params.rx_fifo_size = ifxusb_rreg(&global_regs->grxfsiz);
+ _core_if->params.nperio_tx_fifo_size= ifxusb_rreg(&global_regs->gnptxfsiz) >> 16;
+ _core_if->params.perio_tx_fifo_size = ifxusb_rreg(&global_regs->hptxfsiz) >> 16;
+ IFX_DEBUGPL(DBG_CIL, "Initial: FIFO Size=0x%06X\n" , _core_if->params.data_fifo_size);
+ IFX_DEBUGPL(DBG_CIL, " Rx FIFO Size=0x%06X\n", _core_if->params.rx_fifo_size);
+ IFX_DEBUGPL(DBG_CIL, " NPTx FIFO Size=0x%06X\n", _core_if->params.nperio_tx_fifo_size);
+ IFX_DEBUGPL(DBG_CIL, " PTx FIFO Size=0x%06X\n", _core_if->params.perio_tx_fifo_size);
+
+ {
+ fifosize_data_t txfifosize;
+ if(_params->data_fifo_size >=0 && _params->data_fifo_size < _core_if->params.data_fifo_size)
+ _core_if->params.data_fifo_size = _params->data_fifo_size;
+
+ if( _params->rx_fifo_size >= 0 && _params->rx_fifo_size < _core_if->params.rx_fifo_size)
+ _core_if->params.rx_fifo_size = _params->rx_fifo_size;
+ if( _params->nperio_tx_fifo_size >=0 && _params->nperio_tx_fifo_size < _core_if->params.nperio_tx_fifo_size)
+ _core_if->params.nperio_tx_fifo_size = _params->nperio_tx_fifo_size;
+ if( _params->perio_tx_fifo_size >=0 && _params->perio_tx_fifo_size < _core_if->params.perio_tx_fifo_size)
+ _core_if->params.perio_tx_fifo_size = _params->perio_tx_fifo_size;
+
+ if(_core_if->params.data_fifo_size < _core_if->params.rx_fifo_size)
+ _core_if->params.rx_fifo_size = _core_if->params.data_fifo_size;
+ ifxusb_wreg( &global_regs->grxfsiz, _core_if->params.rx_fifo_size);
+ txfifosize.b.startaddr = _core_if->params.rx_fifo_size;
+
+ if(txfifosize.b.startaddr + _core_if->params.nperio_tx_fifo_size > _core_if->params.data_fifo_size)
+ _core_if->params.nperio_tx_fifo_size = _core_if->params.data_fifo_size - txfifosize.b.startaddr;
+ txfifosize.b.depth=_core_if->params.nperio_tx_fifo_size;
+ ifxusb_wreg( &global_regs->gnptxfsiz, txfifosize.d32);
+ txfifosize.b.startaddr += _core_if->params.nperio_tx_fifo_size;
+
+ if(txfifosize.b.startaddr + _core_if->params.perio_tx_fifo_size > _core_if->params.data_fifo_size)
+ _core_if->params.perio_tx_fifo_size = _core_if->params.data_fifo_size - txfifosize.b.startaddr;
+ txfifosize.b.depth=_core_if->params.perio_tx_fifo_size;
+ ifxusb_wreg( &global_regs->hptxfsiz, txfifosize.d32);
+ txfifosize.b.startaddr += _core_if->params.perio_tx_fifo_size;
+ }
+
+ #ifdef __DEBUG__
+ {
+ fifosize_data_t fifosize;
+ IFX_DEBUGPL(DBG_CIL, "Result : FIFO Size=0x%06X\n" , _core_if->params.data_fifo_size);
+
+ fifosize.d32=ifxusb_rreg(&global_regs->grxfsiz);
+ IFX_DEBUGPL(DBG_CIL, " Rx FIFO =0x%06X 0x%06X\n", fifosize.b.startaddr,fifosize.b.depth);
+ fifosize.d32=ifxusb_rreg(&global_regs->gnptxfsiz);
+ IFX_DEBUGPL(DBG_CIL, " NPTx FIFO =0x%06X 0x%06X\n", fifosize.b.startaddr,fifosize.b.depth);
+ fifosize.d32=ifxusb_rreg(&global_regs->hptxfsiz);
+ IFX_DEBUGPL(DBG_CIL, " PTx FIFO =0x%06X 0x%06X\n", fifosize.b.startaddr,fifosize.b.depth);
+ }
+ #endif
+
+ /* Clear Host Set HNP Enable in the OTG Control Register */
+ gotgctl.b.hstsethnpen = 1;
+ ifxusb_mreg( &global_regs->gotgctl, gotgctl.d32, 0);
+
+ /* Flush the FIFOs */
+ ifxusb_flush_tx_fifo(_core_if, 0x10); /* all Tx FIFOs */
+ ifxusb_flush_rx_fifo(_core_if);
+
+ for (i = 0; i < _core_if->hwcfg2.b.num_host_chan + 1; i++)
+ {
+ hcchar_data_t hcchar;
+ hcchar.d32 = ifxusb_rreg(&_core_if->hc_regs[i]->hcchar);
+ hcchar.b.chen = 0;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ ifxusb_wreg(&_core_if->hc_regs[i]->hcchar, hcchar.d32);
+ }
+ /* Halt all channels to put them into a known state. */
+ for (i = 0; i < _core_if->hwcfg2.b.num_host_chan + 1; i++)
+ {
+ hcchar_data_t hcchar;
+ int count = 0;
+
+ hcchar.d32 = ifxusb_rreg(&_core_if->hc_regs[i]->hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ ifxusb_wreg(&_core_if->hc_regs[i]->hcchar, hcchar.d32);
+
+ IFX_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i);
+ do{
+ hcchar.d32 = ifxusb_rreg(&_core_if->hc_regs[i]->hcchar);
+ if (++count > 1000)
+ {
+ IFX_ERROR("%s: Unable to clear halt on channel %d\n", __func__, i);
+ break;
+ }
+ } while (hcchar.b.chen);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#if defined(__UEIP__)
+ #if defined(IFX_GPIO_USB_VBUS) || defined(IFX_LEDGPIO_USB_VBUS) || defined(IFX_LEDLED_USB_VBUS)
+ int ifxusb_vbus_status =-1;
+ #endif
+
+ #if defined(IFX_GPIO_USB_VBUS1) || defined(IFX_LEDGPIO_USB_VBUS1) || defined(IFX_LEDLED_USB_VBUS1)
+ int ifxusb_vbus1_status =-1;
+ #endif
+
+ #if defined(IFX_GPIO_USB_VBUS2) || defined(IFX_LEDGPIO_USB_VBUS2) || defined(IFX_LEDLED_USB_VBUS2)
+ int ifxusb_vbus2_status =-1;
+ #endif
+
+ #if defined(IFX_LEDGPIO_USB_VBUS) || defined(IFX_LEDLED_USB_VBUS)
+ static void *g_usb_vbus_trigger = NULL;
+ #endif
+ #if defined(IFX_LEDGPIO_USB_VBUS1) || defined(IFX_LEDLED_USB_VBUS1)
+ static void *g_usb_vbus1_trigger = NULL;
+ #endif
+ #if defined(IFX_LEDGPIO_USB_VBUS2) || defined(IFX_LEDLED_USB_VBUS2)
+ static void *g_usb_vbus2_trigger = NULL;
+ #endif
+
+ #if defined(IFX_GPIO_USB_VBUS) || defined(IFX_GPIO_USB_VBUS1) || defined(IFX_GPIO_USB_VBUS2)
+ int ifxusb_vbus_gpio_inited=0;
+ #endif
+
+#else //defined(__UEIP__)
+ int ifxusb_vbus_gpio_inited=0;
+#endif
+
+//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+void ifxusb_vbus_init(ifxusb_core_if_t *_core_if)
+{
+ #if defined(__UEIP__)
+ #if defined(IFX_LEDGPIO_USB_VBUS) || defined(IFX_LEDLED_USB_VBUS)
+ if ( !g_usb_vbus_trigger )
+ {
+ ifx_led_trigger_register("USB_VBUS", &g_usb_vbus_trigger);
+ if ( g_usb_vbus_trigger != NULL )
+ {
+ struct ifx_led_trigger_attrib attrib = {0};
+ attrib.delay_on = 0;
+ attrib.delay_off = 0;
+ attrib.timeout = 0;
+ attrib.def_value = 0;
+ attrib.flags = IFX_LED_TRIGGER_ATTRIB_DELAY_ON | IFX_LED_TRIGGER_ATTRIB_DELAY_OFF | IFX_LED_TRIGGER_ATTRIB_TIMEOUT | IFX_LED_TRIGGER_ATTRIB_DEF_VALUE;
+ IFX_DEBUGP("Reg USB power!!\n");
+ ifx_led_trigger_set_attrib(g_usb_vbus_trigger, &attrib);
+ ifxusb_vbus_status =0;
+ }
+ }
+ #endif
+ #if defined(IFX_LEDGPIO_USB_VBUS1) || defined(IFX_LEDLED_USB_VBUS1)
+ if(_core_if->core_no==0 && !g_usb_vbus1_trigger )
+ {
+ ifx_led_trigger_register("USB_VBUS1", &g_usb_vbus1_trigger);
+ if ( g_usb_vbus1_trigger != NULL )
+ {
+ struct ifx_led_trigger_attrib attrib = {0};
+ attrib.delay_on = 0;
+ attrib.delay_off = 0;
+ attrib.timeout = 0;
+ attrib.def_value = 0;
+ attrib.flags = IFX_LED_TRIGGER_ATTRIB_DELAY_ON | IFX_LED_TRIGGER_ATTRIB_DELAY_OFF | IFX_LED_TRIGGER_ATTRIB_TIMEOUT | IFX_LED_TRIGGER_ATTRIB_DEF_VALUE;
+ IFX_DEBUGP("Reg USB1 power!!\n");
+ ifx_led_trigger_set_attrib(g_usb_vbus1_trigger, &attrib);
+ ifxusb_vbus1_status =0;
+ }
+ }
+ #endif
+ #if defined(IFX_LEDGPIO_USB_VBUS2) || defined(IFX_LEDLED_USB_VBUS2)
+ if(_core_if->core_no==1 && !g_usb_vbus2_trigger )
+ {
+ ifx_led_trigger_register("USB_VBUS2", &g_usb_vbus2_trigger);
+ if ( g_usb_vbus2_trigger != NULL )
+ {
+ struct ifx_led_trigger_attrib attrib = {0};
+ attrib.delay_on = 0;
+ attrib.delay_off = 0;
+ attrib.timeout = 0;
+ attrib.def_value = 0;
+ attrib.flags = IFX_LED_TRIGGER_ATTRIB_DELAY_ON | IFX_LED_TRIGGER_ATTRIB_DELAY_OFF | IFX_LED_TRIGGER_ATTRIB_TIMEOUT | IFX_LED_TRIGGER_ATTRIB_DEF_VALUE;
+ IFX_DEBUGP("Reg USB2 power!!\n");
+ ifx_led_trigger_set_attrib(g_usb_vbus2_trigger, &attrib);
+ ifxusb_vbus2_status =0;
+ }
+ }
+ #endif
+
+ #if defined(IFX_GPIO_USB_VBUS) || defined(IFX_GPIO_USB_VBUS1) || defined(IFX_GPIO_USB_VBUS2)
+ /* == 20100712 AVM/WK use gpio_inited as bitmask == */
+ if(ifxusb_vbus_gpio_inited == 0)
+ {
+ if(!ifx_gpio_register(IFX_GPIO_MODULE_USB))
+ {
+ IFX_DEBUGP("Register USB VBus through GPIO OK!!\n");
+ #ifdef IFX_GPIO_USB_VBUS
+ ifxusb_vbus_status =0;
+ #endif //IFX_GPIO_USB_VBUS
+ #ifdef IFX_GPIO_USB_VBUS1
+ ifxusb_vbus1_status=0;
+ #endif //IFX_GPIO_USB_VBUS1
+ #ifdef IFX_GPIO_USB_VBUS2
+ ifxusb_vbus2_status=0;
+ #endif //IFX_GPIO_USB_VBUS2
+ ifxusb_vbus_gpio_inited|= (1<<_core_if->core_no);
+ }
+ else
+ IFX_PRINT("Register USB VBus Failed!!\n");
+ } else {
+ ifxusb_vbus_gpio_inited|= (1<<_core_if->core_no);
+ }
+ #endif //defined(IFX_GPIO_USB_VBUS) || defined(IFX_GPIO_USB_VBUS1) || defined(IFX_GPIO_USB_VBUS2)
+ #endif //defined(__UEIP__)
+}
+
+void ifxusb_vbus_free(ifxusb_core_if_t *_core_if)
+{
+ #if defined(__UEIP__)
+ #if defined(IFX_LEDGPIO_USB_VBUS) || defined(IFX_LEDLED_USB_VBUS)
+ if ( g_usb_vbus_trigger )
+ {
+ ifx_led_trigger_deregister(g_usb_vbus_trigger);
+ g_usb_vbus_trigger = NULL;
+ ifxusb_vbus_status =-1;
+ }
+ #endif
+ #if defined(IFX_LEDGPIO_USB_VBUS1) || defined(IFX_LEDLED_USB_VBUS1)
+ if(_core_if->core_no==0 && g_usb_vbus1_trigger )
+ {
+ ifx_led_trigger_deregister(g_usb_vbus1_trigger);
+ g_usb_vbus1_trigger = NULL;
+ ifxusb_vbus1_status =-1;
+ }
+ #endif
+ #if defined(IFX_LEDGPIO_USB_VBUS2) || defined(IFX_LEDLED_USB_VBUS2)
+ if(_core_if->core_no==1 && g_usb_vbus2_trigger )
+ {
+ ifx_led_trigger_deregister(g_usb_vbus2_trigger);
+ g_usb_vbus2_trigger = NULL;
+ ifxusb_vbus2_status =-1;
+ }
+ #endif
+
+ #if defined(IFX_GPIO_USB_VBUS) || defined(IFX_GPIO_USB_VBUS1) || defined(IFX_GPIO_USB_VBUS2)
+ /* == 20100712 AVM/WK use gpio_inited as bitmask == */
+ if((ifxusb_vbus_gpio_inited & (1<<_core_if->core_no)) == ifxusb_vbus_gpio_inited)
+ {
+ ifx_gpio_deregister(IFX_GPIO_MODULE_USB);
+ #ifdef IFX_GPIO_USB_VBUS
+ ifxusb_vbus_status =-1;
+ #endif //IFX_GPIO_USB_VBUS
+ #ifdef IFX_GPIO_USB_VBUS1
+ ifxusb_vbus1_status=-1;
+ #endif //IFX_GPIO_USB_VBUS1
+ #ifdef IFX_GPIO_USB_VBUS2
+ ifxusb_vbus2_status=-1;
+ #endif //IFX_GPIO_USB_VBUS2
+ }
+ ifxusb_vbus_gpio_inited &= ~(1<<_core_if->core_no);
+ #endif //defined(IFX_GPIO_USB_VBUS) || defined(IFX_GPIO_USB_VBUS1) || defined(IFX_GPIO_USB_VBUS2)
+ #endif //defined(__UEIP__)
+}
+
+
+/*!
+ \brief Turn on the USB 5V VBus Power
+ \param _core_if Pointer of core_if structure
+ */
+void ifxusb_vbus_on(ifxusb_core_if_t *_core_if)
+{
+ IFX_DEBUGP("SENDING VBus POWER UP\n");
+ #if defined(__UEIP__)
+ #if defined(IFX_LEDGPIO_USB_VBUS) || defined(IFX_LEDLED_USB_VBUS)
+ if ( g_usb_vbus_trigger && ifxusb_vbus_status==0)
+ {
+ ifx_led_trigger_activate(g_usb_vbus_trigger);
+ IFX_DEBUGP("Enable USB power!!\n");
+ ifxusb_vbus_status=1;
+ }
+ #endif
+ #if defined(IFX_LEDGPIO_USB_VBUS1) || defined(IFX_LEDLED_USB_VBUS1)
+ if(_core_if->core_no==0 && g_usb_vbus1_trigger && ifxusb_vbus1_status==0)
+ {
+ ifx_led_trigger_activate(g_usb_vbus1_trigger);
+ IFX_DEBUGP("Enable USB1 power!!\n");
+ ifxusb_vbus1_status=1;
+ }
+ #endif
+ #if defined(IFX_LEDGPIO_USB_VBUS2) || defined(IFX_LEDLED_USB_VBUS2)
+ if(_core_if->core_no==1 && g_usb_vbus2_trigger && ifxusb_vbus2_status==0)
+ {
+ ifx_led_trigger_activate(g_usb_vbus2_trigger);
+ IFX_DEBUGP("Enable USB2 power!!\n");
+ ifxusb_vbus2_status=1;
+ }
+ #endif
+
+ #if defined(IFX_GPIO_USB_VBUS) || defined(IFX_GPIO_USB_VBUS1) || defined(IFX_GPIO_USB_VBUS2)
+ if(ifxusb_vbus_gpio_inited)
+ {
+ #if defined(IFX_GPIO_USB_VBUS)
+ if(ifxusb_vbus_status==0)
+ {
+ ifx_gpio_output_set(IFX_GPIO_USB_VBUS,IFX_GPIO_MODULE_USB);
+ ifxusb_vbus_status=1;
+ }
+ #endif
+ #if defined(IFX_GPIO_USB_VBUS1)
+ if(_core_if->core_no==0 && ifxusb_vbus1_status==0)
+ {
+ ifx_gpio_output_set(IFX_GPIO_USB_VBUS1,IFX_GPIO_MODULE_USB);
+ ifxusb_vbus1_status=1;
+ }
+ #endif
+ #if defined(IFX_GPIO_USB_VBUS2)
+ if(_core_if->core_no==1 && ifxusb_vbus2_status==0)
+ {
+ ifx_gpio_output_set(IFX_GPIO_USB_VBUS2,IFX_GPIO_MODULE_USB);
+ ifxusb_vbus2_status=1;
+ }
+ #endif
+ }
+ #endif //defined(IFX_GPIO_USB_VBUS) || defined(IFX_GPIO_USB_VBUS1) || defined(IFX_GPIO_USB_VBUS2)
+ #else
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ ifxusb_vbus_status=1;
+ //usb_set_vbus_on();
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ set_bit (4, (volatile unsigned long *)AMAZON_SE_GPIO_P0_OUT);
+ ifxusb_vbus_status=1;
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ if(_core_if->core_no==0)
+ {
+ if (bsp_port_reserve_pin(1, 13, PORT_MODULE_USB) != 0)
+ {
+ IFX_PRINT("Can't enable USB1 5.5V power!!\n");
+ return;
+ }
+ bsp_port_clear_altsel0(1, 13, PORT_MODULE_USB);
+ bsp_port_clear_altsel1(1, 13, PORT_MODULE_USB);
+ bsp_port_set_dir_out(1, 13, PORT_MODULE_USB);
+ bsp_port_set_pudsel(1, 13, PORT_MODULE_USB);
+ bsp_port_set_puden(1, 13, PORT_MODULE_USB);
+ bsp_port_set_output(1, 13, PORT_MODULE_USB);
+ IFX_DEBUGP("Enable USB1 power!!\n");
+ ifxusb_vbus1_status=1;
+ }
+ else
+ {
+ if (bsp_port_reserve_pin(3, 4, PORT_MODULE_USB) != 0)
+ {
+ IFX_PRINT("Can't enable USB2 5.5V power!!\n");
+ return;
+ }
+ bsp_port_clear_altsel0(3, 4, PORT_MODULE_USB);
+ bsp_port_clear_altsel1(3, 4, PORT_MODULE_USB);
+ bsp_port_set_dir_out(3, 4, PORT_MODULE_USB);
+ bsp_port_set_pudsel(3, 4, PORT_MODULE_USB);
+ bsp_port_set_puden(3, 4, PORT_MODULE_USB);
+ bsp_port_set_output(3, 4, PORT_MODULE_USB);
+ IFX_DEBUGP("Enable USB2 power!!\n");
+ ifxusb_vbus2_status=1;
+ }
+ #endif //defined(__IS_AR9__)
+ #if defined(__IS_VR9__)
+ if(_core_if->core_no==0)
+ {
+ ifxusb_vbus1_status=1;
+ }
+ else
+ {
+ ifxusb_vbus2_status=1;
+ }
+ #endif //defined(__IS_VR9__)
+ #endif //defined(__UEIP__)
+}
+
+
+/*!
+ \brief Turn off the USB 5V VBus Power
+ \param _core_if Pointer of core_if structure
+ */
+void ifxusb_vbus_off(ifxusb_core_if_t *_core_if)
+{
+ IFX_DEBUGP("SENDING VBus POWER OFF\n");
+
+ #if defined(__UEIP__)
+ #if defined(IFX_LEDGPIO_USB_VBUS) || defined(IFX_LEDLED_USB_VBUS)
+ if ( g_usb_vbus_trigger && ifxusb_vbus_status==1)
+ {
+ ifx_led_trigger_deactivate(g_usb_vbus_trigger);
+ IFX_DEBUGP("Disable USB power!!\n");
+ ifxusb_vbus_status=0;
+ }
+ #endif
+ #if defined(IFX_LEDGPIO_USB_VBUS1) || defined(IFX_LEDLED_USB_VBUS1)
+ if(_core_if->core_no==0 && g_usb_vbus1_trigger && ifxusb_vbus1_status==1)
+ {
+ ifx_led_trigger_deactivate(g_usb_vbus1_trigger);
+ IFX_DEBUGP("Disable USB1 power!!\n");
+ ifxusb_vbus1_status=0;
+ }
+ #endif
+ #if defined(IFX_LEDGPIO_USB_VBUS2) || defined(IFX_LEDLED_USB_VBUS2)
+ if(_core_if->core_no==1 && g_usb_vbus2_trigger && ifxusb_vbus2_status==1)
+ {
+ ifx_led_trigger_deactivate(g_usb_vbus2_trigger);
+ IFX_DEBUGP("Disable USB2 power!!\n");
+ ifxusb_vbus2_status=0;
+ }
+ #endif
+
+ #if defined(IFX_GPIO_USB_VBUS) || defined(IFX_GPIO_USB_VBUS1) || defined(IFX_GPIO_USB_VBUS2)
+ if(ifxusb_vbus_gpio_inited)
+ {
+ #if defined(IFX_GPIO_USB_VBUS)
+ if(ifxusb_vbus_status==1)
+ {
+ ifx_gpio_output_clear(IFX_GPIO_USB_VBUS,IFX_GPIO_MODULE_USB);
+ ifxusb_vbus_status=0;
+ }
+ #endif
+ #if defined(IFX_GPIO_USB_VBUS1)
+ if(_core_if->core_no==0 && ifxusb_vbus1_status==1)
+ {
+ ifx_gpio_output_clear(IFX_GPIO_USB_VBUS1,IFX_GPIO_MODULE_USB);
+ ifxusb_vbus1_status=0;
+ }
+ #endif
+ #if defined(IFX_GPIO_USB_VBUS2)
+ if(_core_if->core_no==1 && ifxusb_vbus2_status==1)
+ {
+ ifx_gpio_output_clear(IFX_GPIO_USB_VBUS2,IFX_GPIO_MODULE_USB);
+ ifxusb_vbus2_status=0;
+ }
+ #endif
+ }
+ #endif //defined(IFX_GPIO_USB_VBUS) || defined(IFX_GPIO_USB_VBUS1) || defined(IFX_GPIO_USB_VBUS2)
+ #else
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ ifxusb_vbus_status=0;
+ //usb_set_vbus_on();
+ #endif //defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+ #if defined(__IS_AMAZON_SE__)
+ clear_bit (4, (volatile unsigned long *)AMAZON_SE_GPIO_P0_OUT);
+ ifxusb_vbus_status=0;
+ #endif //defined(__IS_AMAZON_SE__)
+ #if defined(__IS_AR9__)
+ if(_core_if->core_no==0)
+ {
+ if (bsp_port_reserve_pin(1, 13, PORT_MODULE_USB) != 0) {
+ IFX_PRINT("Can't Disable USB1 5.5V power!!\n");
+ return;
+ }
+ bsp_port_clear_altsel0(1, 13, PORT_MODULE_USB);
+ bsp_port_clear_altsel1(1, 13, PORT_MODULE_USB);
+ bsp_port_set_dir_out(1, 13, PORT_MODULE_USB);
+ bsp_port_set_pudsel(1, 13, PORT_MODULE_USB);
+ bsp_port_set_puden(1, 13, PORT_MODULE_USB);
+ bsp_port_clear_output(1, 13, PORT_MODULE_USB);
+ IFX_DEBUGP("Disable USB1 power!!\n");
+ ifxusb_vbus1_status=0;
+ }
+ else
+ {
+ if (bsp_port_reserve_pin(3, 4, PORT_MODULE_USB) != 0) {
+ IFX_PRINT("Can't Disable USB2 5.5V power!!\n");
+ return;
+ }
+ bsp_port_clear_altsel0(3, 4, PORT_MODULE_USB);
+ bsp_port_clear_altsel1(3, 4, PORT_MODULE_USB);
+ bsp_port_set_dir_out(3, 4, PORT_MODULE_USB);
+ bsp_port_set_pudsel(3, 4, PORT_MODULE_USB);
+ bsp_port_set_puden(3, 4, PORT_MODULE_USB);
+ bsp_port_clear_output(3, 4, PORT_MODULE_USB);
+ IFX_DEBUGP("Disable USB2 power!!\n");
+
+ ifxusb_vbus2_status=0;
+ }
+ #endif //defined(__IS_AR9__)
+ #if defined(__IS_VR9__)
+ if(_core_if->core_no==0)
+ {
+ ifxusb_vbus1_status=0;
+ }
+ else
+ {
+ ifxusb_vbus2_status=0;
+ }
+ #endif //defined(__IS_VR9__)
+ #endif //defined(__UEIP__)
+}
+
+
+
+/*!
+ \brief Read Current VBus status
+ \param _core_if Pointer of core_if structure
+ */
+int ifxusb_vbus(ifxusb_core_if_t *_core_if)
+{
+#if defined(__UEIP__)
+ #if defined(IFX_GPIO_USB_VBUS) || defined(IFX_LEDGPIO_USB_VBUS) || defined(IFX_LEDLED_USB_VBUS)
+ return (ifxusb_vbus_status);
+ #endif
+
+ #if defined(IFX_GPIO_USB_VBUS1) || defined(IFX_LEDGPIO_USB_VBUS1) || defined(IFX_LEDLED_USB_VBUS1)
+ if(_core_if->core_no==0)
+ return (ifxusb_vbus1_status);
+ #endif
+
+ #if defined(IFX_GPIO_USB_VBUS2) || defined(IFX_LEDGPIO_USB_VBUS2) || defined(IFX_LEDLED_USB_VBUS2)
+ if(_core_if->core_no==1)
+ return (ifxusb_vbus2_status);
+ #endif
+#else //defined(__UEIP__)
+#endif
+ return -1;
+}
+
+#if defined(__UEIP__)
+#else
+ #if defined(__IS_TWINPASS__)
+ #define ADSL_BASE 0x20000
+ #define CRI_BASE 0x31F00
+ #define CRI_CCR0 CRI_BASE + 0x00
+ #define CRI_CCR1 CRI_BASE + 0x01*4
+ #define CRI_CDC0 CRI_BASE + 0x02*4
+ #define CRI_CDC1 CRI_BASE + 0x03*4
+ #define CRI_RST CRI_BASE + 0x04*4
+ #define CRI_MASK0 CRI_BASE + 0x05*4
+ #define CRI_MASK1 CRI_BASE + 0x06*4
+ #define CRI_MASK2 CRI_BASE + 0x07*4
+ #define CRI_STATUS0 CRI_BASE + 0x08*4
+ #define CRI_STATUS1 CRI_BASE + 0x09*4
+ #define CRI_STATUS2 CRI_BASE + 0x0A*4
+ #define CRI_AMASK0 CRI_BASE + 0x0B*4
+ #define CRI_AMASK1 CRI_BASE + 0x0C*4
+ #define CRI_UPDCTL CRI_BASE + 0x0D*4
+ #define CRI_MADST CRI_BASE + 0x0E*4
+ // 0x0f is missing
+ #define CRI_EVENT0 CRI_BASE + 0x10*4
+ #define CRI_EVENT1 CRI_BASE + 0x11*4
+ #define CRI_EVENT2 CRI_BASE + 0x12*4
+
+ #define IRI_I_ENABLE 0x32000
+ #define STY_SMODE 0x3c004
+ #define AFE_TCR_0 0x3c0dc
+ #define AFE_ADDR_ADDR 0x3c0e8
+ #define AFE_RDATA_ADDR 0x3c0ec
+ #define AFE_WDATA_ADDR 0x3c0f0
+ #define AFE_CONFIG 0x3c0f4
+ #define AFE_SERIAL_CFG 0x3c0fc
+
+ #define DFE_BASE_ADDR 0xBE116000
+ //#define DFE_BASE_ADDR 0x9E116000
+
+ #define MEI_FR_ARCINT_C (DFE_BASE_ADDR + 0x0000001C)
+ #define MEI_DBG_WADDR_C (DFE_BASE_ADDR + 0x00000024)
+ #define MEI_DBG_RADDR_C (DFE_BASE_ADDR + 0x00000028)
+ #define MEI_DBG_DATA_C (DFE_BASE_ADDR + 0x0000002C)
+ #define MEI_DBG_DECO_C (DFE_BASE_ADDR + 0x00000030)
+ #define MEI_DBG_MASTER_C (DFE_BASE_ADDR + 0x0000003C)
+
+ static void WriteARCmem(uint32_t addr, uint32_t data)
+ {
+ writel(1 ,(volatile uint32_t *)MEI_DBG_MASTER_C);
+ writel(1 ,(volatile uint32_t *)MEI_DBG_DECO_C );
+ writel(addr ,(volatile uint32_t *)MEI_DBG_WADDR_C );
+ writel(data ,(volatile uint32_t *)MEI_DBG_DATA_C );
+ while( (ifxusb_rreg((volatile uint32_t *)MEI_FR_ARCINT_C) & 0x20) != 0x20 ){};
+ writel(0 ,(volatile uint32_t *)MEI_DBG_MASTER_C);
+ IFX_DEBUGP("WriteARCmem %08x %08x\n",addr,data);
+ };
+
+ static uint32_t ReadARCmem(uint32_t addr)
+ {
+ u32 data;
+ writel(1 ,(volatile uint32_t *)MEI_DBG_MASTER_C);
+ writel(1 ,(volatile uint32_t *)MEI_DBG_DECO_C );
+ writel(addr ,(volatile uint32_t *)MEI_DBG_RADDR_C );
+ while( (ifxusb_rreg((volatile uint32_t *)MEI_FR_ARCINT_C) & 0x20) != 0x20 ){};
+ data = ifxusb_rreg((volatile uint32_t *)MEI_DBG_DATA_C );
+ writel(0 ,(volatile uint32_t *)MEI_DBG_MASTER_C);
+ IFX_DEBUGP("ReadARCmem %08x %08x\n",addr,data);
+ return data;
+ };
+
+ void ifxusb_enable_afe_oc(void)
+ {
+ /* Start the clock */
+ WriteARCmem(CRI_UPDCTL ,0x00000008);
+ WriteARCmem(CRI_CCR0 ,0x00000014);
+ WriteARCmem(CRI_CCR1 ,0x00000500);
+ WriteARCmem(AFE_CONFIG ,0x000001c8);
+ WriteARCmem(AFE_SERIAL_CFG,0x00000016); // (DANUBE_PCI_CFG_BASE+(1<<addrline))AFE serial interface clock & data latch edge
+ WriteARCmem(AFE_TCR_0 ,0x00000002);
+ //Take afe out of reset
+ WriteARCmem(AFE_CONFIG ,0x000000c0);
+ WriteARCmem(IRI_I_ENABLE ,0x00000101);
+ WriteARCmem(STY_SMODE ,0x00001980);
+
+ ReadARCmem(CRI_UPDCTL );
+ ReadARCmem(CRI_CCR0 );
+ ReadARCmem(CRI_CCR1 );
+ ReadARCmem(AFE_CONFIG );
+ ReadARCmem(AFE_SERIAL_CFG); // (DANUBE_PCI_CFG_BASE+(1<<addrline))AFE serial interface clock & data latch edge
+ ReadARCmem(AFE_TCR_0 );
+ ReadARCmem(AFE_CONFIG );
+ ReadARCmem(IRI_I_ENABLE );
+ ReadARCmem(STY_SMODE );
+ }
+ #endif //defined(__IS_TWINPASS__)
+#endif //defined(__UEIP__)
+
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_ctl.c b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_ctl.c
new file mode 100644
index 0000000000..ade8e13cb2
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_ctl.c
@@ -0,0 +1,1385 @@
+/*****************************************************************************
+ ** FILE NAME : ifxusb_ctl.c
+ ** PROJECT : IFX USB sub-system V3
+ ** MODULES : IFX USB sub-system Host and Device driver
+ ** SRC VERSION : 1.0
+ ** DATE : 1/Jan/2009
+ ** AUTHOR : Chen, Howard
+ ** DESCRIPTION : Implementing the procfs and sysfs for IFX USB driver
+ *****************************************************************************/
+
+/*! \file ifxusb_ctl.c
+ \ingroup IFXUSB_DRIVER_V3
+ \brief Implementing the procfs and sysfs for IFX USB driver
+*/
+
+#include <linux/version.h>
+#include "ifxusb_version.h"
+
+
+#include <linux/proc_fs.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#include <asm/uaccess.h>
+
+#include "ifxusb_plat.h"
+#include "ifxusb_regs.h"
+#include "ifxusb_cif.h"
+
+#ifdef __IS_DEVICE__
+ #include "ifxpcd.h"
+#endif
+
+#ifdef __IS_HOST__
+ #include "ifxhcd.h"
+#endif
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/gfp.h>
+
+
+#ifdef __IS_HOST__
+ extern char ifxusb_driver_name[];
+
+ #ifdef __IS_DUAL__
+ extern ifxhcd_hcd_t ifxusb_hcd_1;
+ extern ifxhcd_hcd_t ifxusb_hcd_2;
+ extern char ifxusb_hcd_name_1[];
+ extern char ifxusb_hcd_name_2[];
+ #else
+ extern ifxhcd_hcd_t ifxusb_hcd;
+ extern char ifxusb_hcd_name[];
+ #endif
+
+#endif
+
+#ifdef __IS_DEVICE__
+ extern char ifxusb_driver_name[];
+
+ extern ifxpcd_pcd_t ifxusb_pcd;
+ extern char ifxusb_pcd_name[];
+#endif
+
+
+//Attributes for sysfs (for 2.6 only)
+
+extern struct device_attribute dev_attr_dbglevel;
+
+#ifdef __IS_DUAL__
+ extern struct device_attribute dev_attr_dump_params_1;
+ extern struct device_attribute dev_attr_dump_params_2;
+#else
+ extern struct device_attribute dev_attr_dump_params;
+#endif
+
+#ifdef __IS_DUAL__
+ extern struct device_attribute dev_attr_mode_1;
+ extern struct device_attribute dev_attr_mode_2;
+#else
+ extern struct device_attribute dev_attr_mode;
+#endif
+
+#ifdef __IS_HOST__
+ #ifdef __IS_DUAL__
+ extern struct device_attribute dev_attr_buspower_1;
+ extern struct device_attribute dev_attr_buspower_2;
+ extern struct device_attribute dev_attr_bussuspend_1;
+ extern struct device_attribute dev_attr_bussuspend_2;
+ extern struct device_attribute dev_attr_busconnected_1;
+ extern struct device_attribute dev_attr_busconnected_2;
+ extern struct device_attribute dev_attr_connectspeed_1;
+ extern struct device_attribute dev_attr_connectspeed_1;
+ #else
+ extern struct device_attribute dev_attr_buspower;
+ extern struct device_attribute dev_attr_bussuspend;
+ extern struct device_attribute dev_attr_busconnected;
+ extern struct device_attribute dev_attr_connectspeed;
+ #endif
+#endif //__IS_HOST__
+
+#ifdef __IS_DEVICE__
+ extern struct device_attribute dev_attr_devspeed;
+ extern struct device_attribute dev_attr_enumspeed;
+#endif //__IS_DEVICE__
+
+#ifdef __ENABLE_DUMP__
+ #ifdef __IS_DUAL__
+ extern struct device_attribute dev_attr_dump_reg_1;
+ extern struct device_attribute dev_attr_dump_reg_2;
+ extern struct device_attribute dev_attr_dump_spram_1;
+ extern struct device_attribute dev_attr_dump_spram_2;
+ #ifdef __IS_HOST__
+ extern struct device_attribute dev_attr_dump_host_state_1;
+ extern struct device_attribute dev_attr_dump_host_state_2;
+ #else
+ #endif
+ #else
+ extern struct device_attribute dev_attr_dump_reg;
+ extern struct device_attribute dev_attr_dump_spram;
+ #ifdef __IS_HOST__
+ extern struct device_attribute dev_attr_dump_host_state;
+ #else
+ #endif
+ #endif
+#endif //__ENABLE_DUMP__
+
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static ssize_t procfs_dbglevel_show(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+{
+ #ifdef __IS_HOST__
+ return sprintf( buf, "%08X\n",h_dbg_lvl );
+ #else
+ return sprintf( buf, "%08X\n",d_dbg_lvl );
+ #endif
+}
+
+static ssize_t procfs_dbglevel_store(struct file *file, const char *buffer, unsigned long count, void *data)
+{
+ char buf[10];
+ int i = 0;
+ uint32_t value;
+ if (copy_from_user(buf, &buffer[i], sizeof("0xFFFFFFFF\n")+1))
+ return -EFAULT;
+ value = simple_strtoul(buf, NULL, 16);
+ #ifdef __IS_HOST__
+ h_dbg_lvl =value;
+ #else
+ d_dbg_lvl =value;
+ #endif
+ //turn on and off power
+ return count;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dbglevel_show( struct device *_dev, struct device_attribute *attr,char *buf)
+#else
+ static ssize_t sysfs_dbglevel_show( struct device *_dev, char *buf)
+#endif
+{
+ #ifdef __IS_HOST__
+ return sprintf( buf, "%08X\n",h_dbg_lvl );
+ #else
+ return sprintf( buf, "%08X\n",d_dbg_lvl );
+ #endif
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dbglevel_store( struct device *_dev, struct device_attribute *attr,const char *buffer, size_t count )
+#else
+ static ssize_t sysfs_dbglevel_store( struct device *_dev, const char *buffer, size_t count )
+#endif
+{
+ char buf[10];
+ int i = 0;
+ uint32_t value;
+ if (copy_from_user(buf, &buffer[i], sizeof("0xFFFFFFFF\n")+1))
+ return -EFAULT;
+ value = simple_strtoul(buf, NULL, 16);
+ #ifdef __IS_HOST__
+ h_dbg_lvl =value;
+ #else
+ d_dbg_lvl =value;
+ #endif
+ //turn on and off power
+ return count;
+}
+
+DEVICE_ATTR(dbglevel, S_IRUGO|S_IWUSR, sysfs_dbglevel_show, sysfs_dbglevel_store);
+
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+
+static void ifxusb_dump_params(ifxusb_core_if_t *_core_if);
+
+#ifdef __IS_DUAL__
+ static void dump_params_1(void)
+ {
+ ifxusb_dump_params(&ifxusb_hcd_1.core_if);
+ }
+ static void dump_params_2(void)
+ {
+ ifxusb_dump_params(&ifxusb_hcd_2.core_if);
+ }
+
+ static ssize_t procfs_dump_params_show_1(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ dump_params_1();
+ return 0;
+ }
+ static ssize_t procfs_dump_params_show_2(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ dump_params_2();
+ return 0;
+ }
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dump_params_show_1( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_dump_params_show_1( struct device *_dev,char *buf)
+ #endif
+ {
+ dump_params_1();
+ return 0;
+ }
+ DEVICE_ATTR(dump_params_1, S_IRUGO|S_IWUSR, sysfs_dump_params_show_1, NULL);
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dump_params_show_2( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_dump_params_show_2( struct device *_dev,char *buf)
+ #endif
+ {
+ dump_params_2();
+ return 0;
+ }
+
+ DEVICE_ATTR(dump_params_2, S_IRUGO|S_IWUSR, sysfs_dump_params_show_2, NULL);
+#else
+ static void dump_params(void)
+ {
+ #ifdef __IS_HOST__
+ ifxusb_dump_params(&ifxusb_hcd.core_if);
+ #else
+ ifxusb_dump_params(&ifxusb_pcd.core_if);
+ #endif
+ }
+
+ static ssize_t procfs_dump_params_show(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ dump_params();
+ return 0;
+ }
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dump_params_show( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_dump_params_show( struct device *_dev,char *buf)
+ #endif
+ {
+ dump_params();
+ return 0;
+ }
+ DEVICE_ATTR(dump_params, S_IRUGO|S_IWUSR, sysfs_dump_params_show, NULL);
+#endif
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef __IS_DUAL__
+ static ssize_t mode_show_1(char *buf)
+ {
+ if((ifxusb_rreg(&ifxusb_hcd_1.core_if.core_global_regs->gintsts ) & 0x1) == 1)
+ return sprintf( buf, "HOST\n" );
+ else
+ return sprintf( buf, "DEVICE(INCORRECT!)\n" );
+ }
+
+ static ssize_t mode_show_2(char *buf)
+ {
+ if((ifxusb_rreg(&ifxusb_hcd_2.core_if.core_global_regs->gintsts ) & 0x1) == 1)
+ return sprintf( buf, "HOST\n" );
+ else
+ return sprintf( buf, "DEVICE(INCORRECT!)\n" );
+ }
+
+ static ssize_t procfs_mode_show_1(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return mode_show_1(buf);
+ }
+ static ssize_t procfs_mode_show_2(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return mode_show_2(buf);
+ }
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_mode_show_1( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_mode_show_1( struct device *_dev,char *buf)
+ #endif
+ {
+ return mode_show_1(buf);
+ }
+
+ DEVICE_ATTR(mode_1, S_IRUGO|S_IWUSR, sysfs_mode_show_1, 0);
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_mode_show_2( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_mode_show_2( struct device *_dev,char *buf)
+ #endif
+ {
+ return mode_show_2(buf);
+ }
+ DEVICE_ATTR(mode_2, S_IRUGO|S_IWUSR, sysfs_mode_show_2, NULL);
+#else
+ static ssize_t mode_show(char *buf)
+ {
+ #ifdef __IS_HOST__
+ if((ifxusb_rreg(&ifxusb_hcd.core_if.core_global_regs->gintsts ) & 0x1) == 1)
+ return sprintf( buf, "HOST\n" );
+ else
+ return sprintf( buf, "DEVICE(INCORRECT!)\n" );
+ #else
+ if((ifxusb_rreg(&ifxusb_pcd.core_if.core_global_regs->gintsts ) & 0x1) != 1)
+ return sprintf( buf, "DEVICE\n" );
+ else
+ return sprintf( buf, "HOST(INCORRECT!)\n" );
+ #endif
+ }
+ static ssize_t procfs_mode_show(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return mode_show(buf);
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_mode_show( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_mode_show( struct device *_dev, char *buf)
+ #endif
+ {
+ return mode_show(buf);
+ }
+ DEVICE_ATTR(mode, S_IRUGO|S_IWUSR, sysfs_mode_show, NULL);
+#endif
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+
+#ifdef __IS_HOST__
+ #ifdef __IS_DUAL__
+ static ssize_t buspower_show_1(char *buf)
+ {
+ if(ifxusb_vbus (&ifxusb_hcd_1.core_if)==1) return sprintf( buf, "1\n" );
+ if(ifxusb_vbus (&ifxusb_hcd_1.core_if)==0) return sprintf( buf, "0\n" );
+ return sprintf( buf, "UNKNOWN\n" );
+ }
+ static void buspower_store_1(uint32_t value)
+ {
+ if (value==1) ifxusb_vbus_on (&ifxusb_hcd_1.core_if);
+ else if(value==0) ifxusb_vbus_off(&ifxusb_hcd_1.core_if);
+ }
+ static ssize_t buspower_show_2(char *buf)
+ {
+ if(ifxusb_vbus (&ifxusb_hcd_2.core_if)==1) return sprintf( buf, "1\n" );
+ if(ifxusb_vbus (&ifxusb_hcd_2.core_if)==0) return sprintf( buf, "0\n" );
+ return sprintf( buf, "UNKNOWN\n" );
+ }
+ static void buspower_store_2(uint32_t value)
+ {
+ if (value==1) ifxusb_vbus_on (&ifxusb_hcd_2.core_if);
+ else if(value==0) ifxusb_vbus_off(&ifxusb_hcd_2.core_if);
+ }
+ static ssize_t procfs_buspower_show_1(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return buspower_show_1(buf);
+ }
+ static ssize_t procfs_buspower_store_1(struct file *file, const char *buffer, unsigned long count, void *data)
+ {
+ char buf[10];
+ int i = 0;
+ uint32_t value;
+ if (copy_from_user(buf, &buffer[i], sizeof("0xFFFFFFFF\n")+1))
+ return -EFAULT;
+ value = simple_strtoul(buf, NULL, 16);
+ buspower_store_1(value);
+ return count;
+ }
+ static ssize_t procfs_buspower_show_2(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return buspower_show_2(buf);
+ }
+ static ssize_t procfs_buspower_store_2(struct file *file, const char *buffer, unsigned long count, void *data)
+ {
+ char buf[10];
+ int i = 0;
+ uint32_t value;
+ if (copy_from_user(buf, &buffer[i], sizeof("0xFFFFFFFF\n")+1))
+ return -EFAULT;
+ value = simple_strtoul(buf, NULL, 16);
+ buspower_store_2(value);
+ return count;
+ }
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_buspower_show_1( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_buspower_show_1( struct device *_dev,char *buf)
+ #endif
+ {
+ return buspower_show_1(buf);
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_buspower_store_1( struct device *_dev, struct device_attribute *attr,const char *buffer, size_t count )
+ #else
+ static ssize_t sysfs_buspower_store_1( struct device *_dev, const char *buffer, size_t count )
+ #endif
+ {
+ char buf[10];
+ int i = 0;
+ uint32_t value;
+ if (copy_from_user(buf, &buffer[i], sizeof("0xFFFFFFFF\n")+1))
+ return -EFAULT;
+ value = simple_strtoul(buf, NULL, 16);
+ buspower_store_1(value);
+ return count;
+ }
+ DEVICE_ATTR(buspower_1, S_IRUGO|S_IWUSR, sysfs_buspower_show_1, sysfs_buspower_store_1);
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_buspower_show_2( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_buspower_show_2( struct device *_dev,char *buf)
+ #endif
+ {
+ return buspower_show_2(buf);
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_buspower_store_2( struct device *_dev, struct device_attribute *attr,const char *buffer, size_t count )
+ #else
+ static ssize_t sysfs_buspower_store_2( struct device *_dev, const char *buffer, size_t count )
+ #endif
+ {
+ char buf[10];
+ int i = 0;
+ uint32_t value;
+ if (copy_from_user(buf, &buffer[i], sizeof("0xFFFFFFFF\n")+1))
+ return -EFAULT;
+ value = simple_strtoul(buf, NULL, 16);
+ buspower_store_2(value);
+ return count;
+ }
+ DEVICE_ATTR(buspower_2, S_IRUGO|S_IWUSR, sysfs_buspower_show_2, sysfs_buspower_store_2);
+ #else
+ static ssize_t buspower_show(char *buf)
+ {
+ if(ifxusb_vbus (&ifxusb_hcd.core_if)==1) return sprintf( buf, "1\n" );
+ if(ifxusb_vbus (&ifxusb_hcd.core_if)==0) return sprintf( buf, "0\n" );
+ return sprintf( buf, "UNKNOWN\n" );
+ }
+ static void buspower_store(uint32_t value)
+ {
+ if (value==1) ifxusb_vbus_on (&ifxusb_hcd.core_if);
+ else if(value==0) ifxusb_vbus_off(&ifxusb_hcd.core_if);
+ }
+ static ssize_t procfs_buspower_show(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return buspower_show(buf);
+ }
+ static ssize_t procfs_buspower_store(struct file *file, const char *buffer, unsigned long count, void *data)
+ {
+ char buf[10];
+ int i = 0;
+ uint32_t value;
+ if (copy_from_user(buf, &buffer[i], sizeof("0xFFFFFFFF\n")+1))
+ return -EFAULT;
+ value = simple_strtoul(buf, NULL, 16);
+ buspower_store(value);
+ return count;
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_buspower_show( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_buspower_show( struct device *_dev, char *buf)
+ #endif
+ {
+ return buspower_show(buf);
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_buspower_store( struct device *_dev, struct device_attribute *attr,const char *buffer, size_t count )
+ #else
+ static ssize_t sysfs_buspower_store( struct device *_dev, const char *buffer, size_t count )
+ #endif
+ {
+ char buf[10];
+ int i = 0;
+ uint32_t value;
+ if (copy_from_user(buf, &buffer[i], sizeof("0xFFFFFFFF\n")+1))
+ return -EFAULT;
+ value = simple_strtoul(buf, NULL, 16);
+ buspower_store(value);
+ return count;
+ }
+ DEVICE_ATTR(buspower, S_IRUGO|S_IWUSR, sysfs_buspower_show, sysfs_buspower_store);
+ #endif
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+
+
+ #ifdef __IS_DUAL__
+ static ssize_t bussuspend_show_1(char *buf)
+ {
+ hprt0_data_t val;
+ val.d32 = ifxusb_rreg(ifxusb_hcd_1.core_if.hprt0);
+ return sprintf (buf, "Bus Suspend = 0x%x\n", val.b.prtsusp);
+ }
+ static ssize_t bussuspend_show_2(char *buf)
+ {
+ hprt0_data_t val;
+ val.d32 = ifxusb_rreg(ifxusb_hcd_2.core_if.hprt0);
+ return sprintf (buf, "Bus Suspend = 0x%x\n", val.b.prtsusp);
+ }
+
+ static ssize_t procfs_bussuspend_show_1(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return bussuspend_show_1(buf);
+ }
+ static ssize_t procfs_bussuspend_show_2(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return bussuspend_show_2(buf);
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_bussuspend_show_1( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_bussuspend_show_1( struct device *_dev,char *buf)
+ #endif
+ {
+ return bussuspend_show_1(buf);
+ }
+ DEVICE_ATTR(bussuspend_1, S_IRUGO|S_IWUSR, sysfs_bussuspend_show_1, 0);
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_bussuspend_show_2( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_bussuspend_show_2( struct device *_dev,char *buf)
+ #endif
+ {
+ return bussuspend_show_2(buf);
+ }
+ DEVICE_ATTR(bussuspend_2, S_IRUGO|S_IWUSR, sysfs_bussuspend_show_2, 0);
+ #else
+ static ssize_t bussuspend_show(char *buf)
+ {
+ hprt0_data_t val;
+ val.d32 = ifxusb_rreg(ifxusb_hcd.core_if.hprt0);
+ return sprintf (buf, "Bus Suspend = 0x%x\n", val.b.prtsusp);
+ }
+ static ssize_t procfs_bussuspend_show(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return bussuspend_show(buf);
+ }
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_bussuspend_show( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_bussuspend_show( struct device *_dev, char *buf)
+ #endif
+ {
+ return bussuspend_show(buf);
+ }
+ DEVICE_ATTR(bussuspend, S_IRUGO|S_IWUSR, sysfs_bussuspend_show, 0);
+ #endif
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ #ifdef __IS_DUAL__
+ static ssize_t busconnected_show_1(char *buf)
+ {
+ hprt0_data_t val;
+ val.d32 = ifxusb_rreg(ifxusb_hcd_1.core_if.hprt0);
+ return sprintf (buf, "Bus Connected = 0x%x\n", val.b.prtconnsts);
+ }
+ static ssize_t busconnected_show_2(char *buf)
+ {
+ hprt0_data_t val;
+ val.d32 = ifxusb_rreg(ifxusb_hcd_2.core_if.hprt0);
+ return sprintf (buf, "Bus Connected = 0x%x\n", val.b.prtconnsts);
+ }
+
+ static ssize_t procfs_busconnected_show_1(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return busconnected_show_1(buf);
+ }
+ static ssize_t procfs_busconnected_show_2(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return busconnected_show_2(buf);
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_busconnected_show_1( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_busconnected_show_1( struct device *_dev,char *buf)
+ #endif
+ {
+ return busconnected_show_1(buf);
+ }
+ DEVICE_ATTR(busconnected_1, S_IRUGO|S_IWUSR, sysfs_busconnected_show_1, 0);
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_busconnected_show_2( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_busconnected_show_2( struct device *_dev,char *buf)
+ #endif
+ {
+ return busconnected_show_2(buf);
+ }
+ DEVICE_ATTR(busconnected_2, S_IRUGO|S_IWUSR, sysfs_busconnected_show_2, 0);
+ #else
+ static ssize_t busconnected_show(char *buf)
+ {
+ hprt0_data_t val;
+ val.d32 = ifxusb_rreg(ifxusb_hcd.core_if.hprt0);
+ return sprintf (buf, "Bus Connected = 0x%x\n", val.b.prtconnsts);
+ }
+ static ssize_t procfs_busconnected_show(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return busconnected_show(buf);
+ }
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_busconnected_show( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_busconnected_show( struct device *_dev, char *buf)
+ #endif
+ {
+ return busconnected_show(buf);
+ }
+ DEVICE_ATTR(busconnected, S_IRUGO|S_IWUSR, sysfs_busconnected_show, 0);
+ #endif
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ #ifdef __IS_DUAL__
+ static ssize_t connectspeed_show_1(char *buf)
+ {
+ hprt0_data_t val;
+ val.d32 = ifxusb_rreg(ifxusb_hcd_1.core_if.hprt0);
+ if( val.b.prtspd ==0) return sprintf (buf, "Bus Speed = High (%d)\n", val.b.prtspd);
+ if( val.b.prtspd ==1) return sprintf (buf, "Bus Speed = Full (%d)\n", val.b.prtspd);
+ if( val.b.prtspd ==2) return sprintf (buf, "Bus Speed = Low (%d)\n", val.b.prtspd);
+ return sprintf (buf, "Bus Speed = Unknown (%d)\n", val.b.prtspd);
+ }
+ static ssize_t connectspeed_show_2(char *buf)
+ {
+ hprt0_data_t val;
+ val.d32 = ifxusb_rreg(ifxusb_hcd_2.core_if.hprt0);
+ if( val.b.prtspd ==0) return sprintf (buf, "Bus Speed = High (%d)\n", val.b.prtspd);
+ if( val.b.prtspd ==1) return sprintf (buf, "Bus Speed = Full (%d)\n", val.b.prtspd);
+ if( val.b.prtspd ==2) return sprintf (buf, "Bus Speed = Low (%d)\n", val.b.prtspd);
+ return sprintf (buf, "Bus Speed = Unknown (%d)\n", val.b.prtspd);
+ }
+
+ static ssize_t procfs_connectspeed_show_1(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return connectspeed_show_1(buf);
+ }
+ static ssize_t procfs_connectspeed_show_2(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return connectspeed_show_2(buf);
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_connectspeed_show_1( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_connectspeed_show_1( struct device *_dev,char *buf)
+ #endif
+ {
+ return connectspeed_show_1(buf);
+ }
+ DEVICE_ATTR(connectspeed_1, S_IRUGO|S_IWUSR, sysfs_connectspeed_show_1, 0);
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_connectspeed_show_2( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_connectspeed_show_2( struct device *_dev,char *buf)
+ #endif
+ {
+ return connectspeed_show_2(buf);
+ }
+ DEVICE_ATTR(connectspeed_2, S_IRUGO|S_IWUSR, sysfs_connectspeed_show_2, 0);
+ #else
+ static ssize_t connectspeed_show(char *buf)
+ {
+ hprt0_data_t val;
+ val.d32 = ifxusb_rreg(ifxusb_hcd.core_if.hprt0);
+ if( val.b.prtspd ==0) return sprintf (buf, "Bus Speed = High (%d)\n", val.b.prtspd);
+ if( val.b.prtspd ==1) return sprintf (buf, "Bus Speed = Full (%d)\n", val.b.prtspd);
+ if( val.b.prtspd ==2) return sprintf (buf, "Bus Speed = Low (%d)\n", val.b.prtspd);
+ return sprintf (buf, "Bus Speed = Unknown (%d)\n", val.b.prtspd);
+ }
+
+ static ssize_t procfs_connectspeed_show(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return connectspeed_show(buf);
+ }
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_connectspeed_show( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_connectspeed_show( struct device *_dev, char *buf)
+ #endif
+ {
+ return connectspeed_show(buf);
+ }
+ DEVICE_ATTR(connectspeed, S_IRUGO|S_IWUSR, sysfs_connectspeed_show, 0);
+ #endif
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+#endif
+
+
+#ifdef __IS_DEVICE__
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+ static ssize_t devspeed_show(char *buf)
+ {
+ dcfg_data_t val;
+ val.d32 = ifxusb_rreg(&ifxusb_pcd.core_if.dev_global_regs->dcfg);
+ if( val.b.devspd ==0) return sprintf (buf, "Dev Speed = High (%d)\n", val.b.devspd);
+ if( val.b.devspd ==1) return sprintf (buf, "Dev Speed = Full (%d)\n", val.b.devspd);
+ if( val.b.devspd ==3) return sprintf (buf, "Dev Speed = Full (%d)\n", val.b.devspd);
+ return sprintf (buf, "Dev Speed = Unknown (%d)\n", val.b.devspd);
+ }
+
+ static ssize_t procfs_devspeed_show(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return devspeed_show(buf);
+ }
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_devspeed_show( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_devspeed_show( struct device *_dev, char *buf)
+ #endif
+ {
+ return devspeed_show(buf);
+ }
+ DEVICE_ATTR(devspeed, S_IRUGO|S_IWUSR, sysfs_devspeed_show, 0);
+
+ static ssize_t enumspeed_show(char *buf)
+ {
+ dsts_data_t val;
+ val.d32 = ifxusb_rreg(&ifxusb_pcd.core_if.dev_global_regs->dsts);
+ if( val.b.enumspd ==0) return sprintf (buf, "Enum Speed = High (%d)\n", val.b.enumspd);
+ if( val.b.enumspd ==1) return sprintf (buf, "Enum Speed = Full (%d)\n", val.b.enumspd);
+ if( val.b.enumspd ==2) return sprintf (buf, "Enum Speed = Low (%d)\n", val.b.enumspd);
+ return sprintf (buf, "Enum Speed = invalid(%d)\n", val.b.enumspd);
+ }
+
+ static ssize_t procfs_enumspeed_show(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ return enumspeed_show(buf);
+ }
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_enumspeed_show( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_enumspeed_show( struct device *_dev, char *buf)
+ #endif
+ {
+ return enumspeed_show(buf);
+ }
+ DEVICE_ATTR(enumspeed, S_IRUGO|S_IWUSR, sysfs_enumspeed_show, 0);
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+#endif
+
+
+//////////////////////////////////////////////////////////////////////////////////
+#ifdef __ENABLE_DUMP__
+
+ #ifdef __IS_DUAL__
+ static void dump_reg_1(void)
+ {
+ ifxusb_dump_registers(&ifxusb_hcd_1.core_if);
+ }
+ static void dump_reg_2(void)
+ {
+ ifxusb_dump_registers(&ifxusb_hcd_2.core_if);
+ }
+
+ static ssize_t procfs_dump_reg_show_1(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ dump_reg_1();
+ return 0;
+ }
+ static ssize_t procfs_dump_reg_show_2(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ dump_reg_2();
+ return 0;
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dump_reg_show_1( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_dump_reg_show_1( struct device *_dev,char *buf)
+ #endif
+ {
+ dump_reg_1();
+ return 0;
+ }
+ DEVICE_ATTR(dump_reg_1, S_IRUGO|S_IWUSR, sysfs_dump_reg_show_1, 0);
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dump_reg_show_2( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_dump_reg_show_2( struct device *_dev,char *buf)
+ #endif
+ {
+ dump_reg_2();
+ return 0;
+ }
+ DEVICE_ATTR(dump_reg_2, S_IRUGO|S_IWUSR, sysfs_dump_reg_show_2, 0);
+ #else
+ static void dump_reg(void)
+ {
+ #ifdef __IS_HOST__
+ ifxusb_dump_registers(&ifxusb_hcd.core_if);
+ #endif
+ #ifdef __IS_DEVICE__
+ ifxusb_dump_registers(&ifxusb_pcd.core_if);
+ #endif
+ }
+ static ssize_t procfs_dump_reg_show(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ dump_reg();
+ return 0;
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dump_reg_show( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_dump_reg_show( struct device *_dev,char *buf)
+ #endif
+ {
+ dump_reg();
+ return 0;
+ }
+ DEVICE_ATTR(dump_reg, S_IRUGO|S_IWUSR, sysfs_dump_reg_show, 0);
+ #endif
+
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ #ifdef __IS_DUAL__
+ static void dump_spram_1(void)
+ {
+ ifxusb_dump_spram(&ifxusb_hcd_1.core_if);
+ }
+ static void dump_spram_2(void)
+ {
+ ifxusb_dump_spram(&ifxusb_hcd_2.core_if);
+ }
+
+ static ssize_t procfs_dump_spram_show_1(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ dump_spram_1();
+ return 0;
+ }
+ static ssize_t procfs_dump_spram_show_2(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ dump_spram_2();
+ return 0;
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dump_spram_show_1( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_dump_spram_show_1( struct device *_dev,char *buf)
+ #endif
+ {
+ dump_spram_1();
+ return 0;
+ }
+ DEVICE_ATTR(dump_spram_1, S_IRUGO|S_IWUSR, sysfs_dump_spram_show_1, 0);
+
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dump_spram_show_2( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_dump_spram_show_2( struct device *_dev,char *buf)
+ #endif
+ {
+ dump_spram_2();
+ return 0;
+ }
+ DEVICE_ATTR(dump_spram_2, S_IRUGO|S_IWUSR, sysfs_dump_spram_show_2, 0);
+ #else
+ static void dump_spram(void)
+ {
+ #ifdef __IS_HOST__
+ ifxusb_dump_spram(&ifxusb_hcd.core_if);
+ #endif
+ #ifdef __IS_DEVICE__
+ ifxusb_dump_spram(&ifxusb_pcd.core_if);
+ #endif
+ }
+ static ssize_t procfs_dump_spram_show(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ dump_spram();
+ return 0;
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dump_spram_show( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_dump_spram_show( struct device *_dev,char *buf)
+ #endif
+ {
+ dump_spram();
+ return 0;
+ }
+ DEVICE_ATTR(dump_spram, S_IRUGO|S_IWUSR, sysfs_dump_spram_show, 0);
+ #endif
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ #ifdef __IS_HOST__
+ #ifdef __IS_DUAL__
+ static ssize_t procfs_dump_host_state_show_1(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ ifxhcd_dump_state(&ifxusb_hcd_1);
+ return 0;
+ }
+ static ssize_t procfs_dump_host_state_show_2(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ ifxhcd_dump_state(&ifxusb_hcd_2);
+ return 0;
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dump_host_state_show_1( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_dump_host_state_show_1( struct device *_dev,char *buf)
+ #endif
+ {
+ ifxhcd_dump_state(&ifxusb_hcd_1);
+ return 0;
+ }
+ DEVICE_ATTR(dump_host_state_1, S_IRUGO|S_IWUSR, sysfs_dump_host_state_show_1, 0);
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dump_host_state_show_2( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_dump_host_state_show_2( struct device *_dev,char *buf)
+ #endif
+ {
+ ifxhcd_dump_state(&ifxusb_hcd_2);
+ return 0;
+ }
+ DEVICE_ATTR(dump_host_state_2, S_IRUGO|S_IWUSR, sysfs_dump_host_state_show_2, 0);
+ #else
+ static ssize_t procfs_dump_host_state_show(char *buf, char **start, off_t offset, int count, int *eof, void *data)
+ {
+ ifxhcd_dump_state(&ifxusb_hcd);
+ return 0;
+ }
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ static ssize_t sysfs_dump_host_state_show( struct device *_dev, struct device_attribute *attr,char *buf)
+ #else
+ static ssize_t sysfs_dump_host_state_show( struct device *_dev,char *buf)
+ #endif
+ {
+ ifxhcd_dump_state(&ifxusb_hcd);
+ return 0;
+ }
+ DEVICE_ATTR(dump_host_state, S_IRUGO|S_IWUSR, sysfs_dump_host_state_show, 0);
+ #endif
+
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ #endif //IS_HOST_
+
+#endif //__ENABLE_DUMP__
+
+//////////////////////////////////////////////////////////////////////////////////
+
+static int ifx_proc_addproc(char *funcname, read_proc_t *hookfuncr, write_proc_t *hookfuncw);
+static void ifx_proc_delproc(char *funcname);
+
+//////////////////////////////////////////////////////////////////////////////////
+
+/*!
+ \brief This function create the sysfs and procfs entries
+ \param[in] _dev Pointer of device structure, if applied
+ */
+void ifxusb_attr_create (void *_dev)
+{
+ int error;
+
+ struct device *dev = (struct device *) _dev;
+
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ error = ifx_proc_addproc("dbglevel", procfs_dbglevel_show, procfs_dbglevel_store);
+ error = device_create_file(dev, &dev_attr_dbglevel);
+
+ #ifdef __IS_DUAL__
+ error = ifx_proc_addproc("dump_params_1", procfs_dump_params_show_1, NULL);
+ error = ifx_proc_addproc("dump_params_2", procfs_dump_params_show_2, NULL);
+ error = device_create_file(dev, &dev_attr_dump_params_1);
+ error = device_create_file(dev, &dev_attr_dump_params_2);
+ #else
+ error = ifx_proc_addproc("dump_params", procfs_dump_params_show, NULL);
+ error = device_create_file(dev, &dev_attr_dump_params);
+ #endif
+
+ #ifdef __IS_DUAL__
+ error = ifx_proc_addproc("mode_1", procfs_mode_show_1, NULL);
+ error = ifx_proc_addproc("mode_2", procfs_mode_show_2, NULL);
+ error = device_create_file(dev, &dev_attr_mode_1);
+ error = device_create_file(dev, &dev_attr_mode_2);
+ #else
+ error = ifx_proc_addproc("mode", procfs_mode_show, NULL);
+ error = device_create_file(dev, &dev_attr_mode);
+ #endif
+
+ #ifdef __IS_HOST__
+ #ifdef __IS_DUAL__
+ error = ifx_proc_addproc("buspower_1", procfs_buspower_show_1, procfs_buspower_store_1);
+ error = ifx_proc_addproc("buspower_2", procfs_buspower_show_2, procfs_buspower_store_2);
+ error = device_create_file(dev, &dev_attr_buspower_1);
+ error = device_create_file(dev, &dev_attr_buspower_2);
+ #else
+ error = ifx_proc_addproc("buspower", procfs_buspower_show, procfs_buspower_store);
+ error = device_create_file(dev, &dev_attr_buspower);
+ #endif
+
+ #ifdef __IS_DUAL__
+ error = ifx_proc_addproc("bussuspend_1", procfs_bussuspend_show_1, NULL);
+ error = ifx_proc_addproc("bussuspend_2", procfs_bussuspend_show_2, NULL);
+ error = device_create_file(dev, &dev_attr_bussuspend_1);
+ error = device_create_file(dev, &dev_attr_bussuspend_2);
+ #else
+ error = ifx_proc_addproc("bussuspend", procfs_bussuspend_show, NULL);
+ error = device_create_file(dev, &dev_attr_bussuspend);
+ #endif
+
+ #ifdef __IS_DUAL__
+ error = ifx_proc_addproc("busconnected_1", procfs_busconnected_show_1, NULL);
+ error = ifx_proc_addproc("busconnected_2", procfs_busconnected_show_2, NULL);
+ error = device_create_file(dev, &dev_attr_busconnected_1);
+ error = device_create_file(dev, &dev_attr_busconnected_2);
+ #else
+ error = ifx_proc_addproc("busconnected", procfs_busconnected_show, NULL);
+ error = device_create_file(dev, &dev_attr_busconnected);
+ #endif
+
+ #ifdef __IS_DUAL__
+ error = ifx_proc_addproc("connectspeed_1", procfs_connectspeed_show_1, NULL);
+ error = ifx_proc_addproc("connectspeed_2", procfs_connectspeed_show_2, NULL);
+ error = device_create_file(dev, &dev_attr_connectspeed_1);
+ error = device_create_file(dev, &dev_attr_connectspeed_2);
+ #else
+ error = ifx_proc_addproc("connectspeed", procfs_connectspeed_show, NULL);
+ error = device_create_file(dev, &dev_attr_connectspeed);
+ #endif
+ #endif
+
+ #ifdef __IS_DEVICE__
+ error = ifx_proc_addproc("devspeed", procfs_devspeed_show, NULL);
+ error = device_create_file(dev, &dev_attr_devspeed);
+ error = ifx_proc_addproc("enumspeed", procfs_enumspeed_show, NULL);
+ error = device_create_file(dev, &dev_attr_enumspeed);
+ #endif
+
+ //////////////////////////////////////////////////////
+ #ifdef __ENABLE_DUMP__
+ #ifdef __IS_DUAL__
+ error = ifx_proc_addproc("dump_reg_1", procfs_dump_reg_show_1, NULL);
+ error = ifx_proc_addproc("dump_reg_2", procfs_dump_reg_show_2, NULL);
+ error = device_create_file(dev, &dev_attr_dump_reg_1);
+ error = device_create_file(dev, &dev_attr_dump_reg_2);
+ #else
+ error = ifx_proc_addproc("dump_reg", procfs_dump_reg_show, NULL);
+ error = device_create_file(dev, &dev_attr_dump_reg);
+ #endif
+
+ #ifdef __IS_DUAL__
+ error = ifx_proc_addproc("dump_spram_1", procfs_dump_spram_show_1, NULL);
+ error = ifx_proc_addproc("dump_spram_2", procfs_dump_spram_show_2, NULL);
+ error = device_create_file(dev, &dev_attr_dump_spram_1);
+ error = device_create_file(dev, &dev_attr_dump_spram_2);
+ #else
+ error = ifx_proc_addproc("dump_spram", procfs_dump_spram_show, NULL);
+ error = device_create_file(dev, &dev_attr_dump_spram);
+ #endif
+
+ #ifdef __IS_HOST__
+ #ifdef __IS_DUAL__
+ error = ifx_proc_addproc("dump_host_state_1", procfs_dump_host_state_show_1, NULL);
+ error = ifx_proc_addproc("dump_host_state_2", procfs_dump_host_state_show_2, NULL);
+ error = device_create_file(dev, &dev_attr_dump_host_state_1);
+ error = device_create_file(dev, &dev_attr_dump_host_state_2);
+ #else
+ error = ifx_proc_addproc("dump_host_state", procfs_dump_host_state_show, NULL);
+ error = device_create_file(dev, &dev_attr_dump_host_state);
+ #endif
+ #endif
+ #endif //__ENABLE_DUMP__
+ //////////////////////////////////////////////////////
+}
+
+
+/*!
+ \brief This function remove the sysfs and procfs entries
+ \param[in] _dev Pointer of device structure, if applied
+ */
+void ifxusb_attr_remove (void *_dev)
+{
+ struct device *dev = (struct device *) _dev;
+
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ ifx_proc_delproc("dbglevel");
+ device_remove_file(dev, &dev_attr_dbglevel);
+
+ #ifdef __IS_DUAL__
+ ifx_proc_delproc("dump_params_1");
+ ifx_proc_delproc("dump_params_2");
+ device_remove_file(dev, &dev_attr_dump_params_1);
+ device_remove_file(dev, &dev_attr_dump_params_2);
+ #else
+ ifx_proc_delproc("dump_params");
+ device_remove_file(dev, &dev_attr_dump_params);
+ #endif
+
+ #ifdef __IS_DUAL__
+ ifx_proc_delproc("mode_1");
+ ifx_proc_delproc("mode_2");
+ device_remove_file(dev, &dev_attr_mode_1);
+ device_remove_file(dev, &dev_attr_mode_2);
+ #else
+ ifx_proc_delproc("mode");
+ device_remove_file(dev, &dev_attr_mode);
+ #endif
+
+ #ifdef __IS_HOST__
+ #ifdef __IS_DUAL__
+ ifx_proc_delproc("buspower_1");
+ ifx_proc_delproc("buspower_2");
+ device_remove_file(dev, &dev_attr_buspower_1);
+ device_remove_file(dev, &dev_attr_buspower_2);
+ #else
+ ifx_proc_delproc("buspower");
+ device_remove_file(dev, &dev_attr_buspower);
+ #endif
+
+ #ifdef __IS_DUAL__
+ ifx_proc_delproc("bussuspend_1");
+ ifx_proc_delproc("bussuspend_2");
+ device_remove_file(dev, &dev_attr_bussuspend_1);
+ device_remove_file(dev, &dev_attr_bussuspend_2);
+ #else
+ ifx_proc_delproc("bussuspend");
+ device_remove_file(dev, &dev_attr_bussuspend);
+ #endif
+
+ #ifdef __IS_DUAL__
+ ifx_proc_delproc("busconnected_1");
+ ifx_proc_delproc("busconnected_2");
+ device_remove_file(dev, &dev_attr_busconnected_1);
+ device_remove_file(dev, &dev_attr_busconnected_2);
+ #else
+ ifx_proc_delproc("busconnected");
+ device_remove_file(dev, &dev_attr_busconnected);
+ #endif
+
+ #ifdef __IS_DUAL__
+ ifx_proc_delproc("connectspeed_1");
+ ifx_proc_delproc("connectspeed_2");
+ device_remove_file(dev, &dev_attr_connectspeed_1);
+ device_remove_file(dev, &dev_attr_connectspeed_2);
+ #else
+ ifx_proc_delproc("connectspeed");
+ device_remove_file(dev, &dev_attr_connectspeed);
+ #endif
+ #endif
+
+ #ifdef __IS_DEVICE__
+ ifx_proc_delproc("devspeed");
+ device_remove_file(dev, &dev_attr_devspeed);
+ ifx_proc_delproc("enumspeed");
+ device_remove_file(dev, &dev_attr_enumspeed);
+ #endif
+
+ #ifdef __ENABLE_DUMP__
+ #ifdef __IS_DUAL__
+ ifx_proc_delproc("dump_reg_1");
+ ifx_proc_delproc("dump_reg_2");
+ device_remove_file(dev, &dev_attr_dump_reg_1);
+ device_remove_file(dev, &dev_attr_dump_reg_2);
+ #else
+ ifx_proc_delproc("dump_reg");
+ device_remove_file(dev, &dev_attr_dump_reg);
+ #endif
+
+ #ifdef __IS_DUAL__
+ ifx_proc_delproc("dump_spram_1");
+ ifx_proc_delproc("dump_spram_2");
+ device_remove_file(dev, &dev_attr_dump_spram_1);
+ device_remove_file(dev, &dev_attr_dump_spram_2);
+ #else
+ ifx_proc_delproc("dump_spram");
+ device_remove_file(dev, &dev_attr_dump_spram);
+ #endif
+
+ #ifdef __IS_HOST__
+ #ifdef __IS_DUAL__
+ ifx_proc_delproc("dump_host_state_1");
+ ifx_proc_delproc("dump_host_state_2");
+ device_remove_file(dev, &dev_attr_dump_host_state_1);
+ device_remove_file(dev, &dev_attr_dump_host_state_2);
+ #else
+ ifx_proc_delproc("dump_host_state");
+ device_remove_file(dev, &dev_attr_dump_host_state);
+ #endif
+ #endif
+ #endif //__ENABLE_DUMP__
+ /* AVM/WK fix: del IFXUSB root dir*/
+ ifx_proc_delproc(NULL);
+}
+
+static struct proc_dir_entry * proc_ifx_root = NULL;
+
+/* initialize the proc file system and make a dir named /proc/[name] */
+static void ifx_proc_init(void)
+{
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ proc_ifx_root = proc_mkdir(ifxusb_driver_name, (void *)0);
+ if (!proc_ifx_root){
+ IFX_PRINT("%s proc initialization failed! \n", ifxusb_driver_name);
+ return;
+ }
+}
+
+/* proc file system add function for debugging. */
+static int ifx_proc_addproc(char *funcname, read_proc_t *hookfuncr, write_proc_t *hookfuncw)
+{
+ struct proc_dir_entry *pe;
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ if (!proc_ifx_root)
+ ifx_proc_init();
+
+ if (hookfuncw == NULL)
+ {
+ pe = create_proc_read_entry(funcname, S_IRUGO, proc_ifx_root, hookfuncr, NULL);
+ if (!pe)
+ {
+ IFX_PRINT("ERROR in creating read proc entry (%s)! \n", funcname);
+ return -1;
+ }
+ }
+ else
+ {
+ pe = create_proc_entry(funcname, S_IRUGO | S_IWUGO, proc_ifx_root);
+ if (pe)
+ {
+ pe->read_proc = hookfuncr;
+ pe->write_proc = hookfuncw;
+ }
+ else
+ {
+ IFX_PRINT("ERROR in creating proc entry (%s)! \n", funcname);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+
+/* proc file system del function for removing module. */
+static void ifx_proc_delproc(char *funcname)
+{
+/* AVM/WK Fix*/
+ if (funcname != NULL) {
+ remove_proc_entry(funcname, proc_ifx_root);
+ } else {
+ remove_proc_entry(ifxusb_driver_name, NULL);
+ proc_ifx_root = NULL;
+ }
+}
+
+static void ifxusb_dump_params(ifxusb_core_if_t *_core_if)
+{
+ ifxusb_params_t *params=&_core_if->params;
+
+ #ifdef __IS_HOST__
+ IFX_PRINT("IFXUSB Dump Parameters ( Host Mode) \n");
+ #endif //__IS_HOST__
+ #ifdef __IS_DEVICE__
+ IFX_PRINT("IFXUSB Dump Parameters ( Device Mode) \n");
+ #endif //__IS_DEVICE__
+
+ #ifdef __DESC_DMA__
+ IFX_PRINT("DMA: Hermes DMA\n");
+ #else
+ IFX_PRINT("DMA: Non-Desc DMA\n");
+ #endif
+ IFX_PRINT(" Burst size: %d\n",params->dma_burst_size);
+
+ if (params->speed==1)
+ IFX_PRINT("Full Speed only\n");
+ else if(params->speed==0)
+ IFX_PRINT("Full/Hign Speed\n");
+ else
+ IFX_PRINT("Unkonwn setting (%d) for Speed\n",params->speed);
+
+ IFX_PRINT("Total Data FIFO size: %d(0x%06X) DWord, %d(0x%06X) Bytes\n",
+ params->data_fifo_size,params->data_fifo_size,
+ params->data_fifo_size*4, params->data_fifo_size*4
+ );
+
+ #ifdef __IS_DEVICE__
+ IFX_PRINT("Rx FIFO size: %d(0x%06X) DWord, %d(0x%06X) Bytes\n",
+ params->rx_fifo_size,params->rx_fifo_size,
+ params->rx_fifo_size*4, params->rx_fifo_size*4
+ );
+ {
+ int i;
+ for(i=0;i<MAX_EPS_CHANNELS;i++)
+ {
+ IFX_PRINT("Tx FIFO #%d size: %d(0x%06X) DWord, %d(0x%06X) Bytes\n",i,
+ params->tx_fifo_size[i],params->tx_fifo_size[i],
+ params->tx_fifo_size[i]*4, params->tx_fifo_size[i]*4
+ );
+ }
+ }
+ #ifdef __DED_FIFO__
+ IFX_PRINT("Treshold : %s Rx:%d Tx:%d \n",
+ (params->thr_ctl)?"On":"Off",params->tx_thr_length,params->rx_thr_length);
+ #endif
+ #else //__IS_HOST__
+ IFX_PRINT("Host Channels: %d\n",params->host_channels);
+
+ IFX_PRINT("Rx FIFO size: %d(0x%06X) DWord, %d(0x%06X) Bytes\n",
+ params->data_fifo_size,params->data_fifo_size,
+ params->data_fifo_size*4, params->data_fifo_size*4
+ );
+
+ IFX_PRINT("NP Tx FIFO size: %d(0x%06X) DWord, %d(0x%06X) Bytes\n",
+ params->nperio_tx_fifo_size,params->nperio_tx_fifo_size,
+ params->nperio_tx_fifo_size*4, params->nperio_tx_fifo_size*4
+ );
+
+ IFX_PRINT(" P Tx FIFO size: %d(0x%06X) DWord, %d(0x%06X) Bytes\n",
+ params->perio_tx_fifo_size,params->perio_tx_fifo_size,
+ params->perio_tx_fifo_size*4, params->perio_tx_fifo_size*4
+ );
+ #endif //__IS_HOST__
+
+ IFX_PRINT("Max Transfer size: %d(0x%06X) Bytes\n",
+ params->max_transfer_size,params->max_transfer_size
+ );
+ IFX_PRINT("Max Packet Count: %d(0x%06X)\n",
+ params->max_packet_count,params->max_packet_count
+ );
+
+ IFX_PRINT("PHY UTMI Width: %d\n",params->phy_utmi_width);
+
+ IFX_PRINT("Turn Around Time: HS:%d FS:%d\n",params->turn_around_time_hs,params->turn_around_time_fs);
+ IFX_PRINT("Timeout Calibration: HS:%d FS:%d\n",params->timeout_cal_hs,params->timeout_cal_fs);
+
+
+ IFX_PRINT("==================================================\n");
+ IFX_PRINT("End of Parameters Dump\n");
+ IFX_PRINT("==================================================\n");
+}
+
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_driver.c b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_driver.c
new file mode 100644
index 0000000000..23349059f4
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_driver.c
@@ -0,0 +1,970 @@
+/*****************************************************************************
+ ** FILE NAME : ifxusb_driver.c
+ ** PROJECT : IFX USB sub-system V3
+ ** MODULES : IFX USB sub-system Host and Device driver
+ ** SRC VERSION : 1.0
+ ** DATE : 1/Jan/2009
+ ** AUTHOR : Chen, Howard
+ ** DESCRIPTION : The provides the initialization and cleanup entry
+ ** points for the IFX USB driver. This module can be
+ ** dynamically loaded with insmod command or built-in
+ ** with kernel. When loaded or executed the ifxusb_driver_init
+ ** function is called. When the module is removed (using rmmod),
+ ** the ifxusb_driver_cleanup function is called.
+ *****************************************************************************/
+
+/*!
+ \file ifxusb_driver.c
+ \brief This file contains the loading/unloading interface to the Linux driver.
+*/
+
+#include <linux/version.h>
+#include "ifxusb_version.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/stat.h> /* permission constants */
+#include <linux/gpio.h>
+#include <lantiq_soc.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+ #include <linux/irq.h>
+#endif
+
+#include <asm/io.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+ #include <asm/irq.h>
+#endif
+
+#include "ifxusb_plat.h"
+
+#include "ifxusb_cif.h"
+
+#ifdef __IS_HOST__
+ #include "ifxhcd.h"
+
+ #define USB_DRIVER_DESC "IFX USB HCD driver"
+ const char ifxusb_driver_name[] = "ifxusb_hcd";
+
+ #ifdef __IS_DUAL__
+ ifxhcd_hcd_t ifxusb_hcd_1;
+ ifxhcd_hcd_t ifxusb_hcd_2;
+ const char ifxusb_hcd_name_1[] = "ifxusb_hcd_1";
+ const char ifxusb_hcd_name_2[] = "ifxusb_hcd_2";
+ #else
+ ifxhcd_hcd_t ifxusb_hcd;
+ const char ifxusb_hcd_name[] = "ifxusb_hcd";
+ #endif
+
+ #if defined(__DO_OC_INT__)
+ static unsigned int oc_int_installed=0;
+ static ifxhcd_hcd_t *oc_int_id=NULL;
+ #endif
+#endif
+
+#ifdef __IS_DEVICE__
+ #include "ifxpcd.h"
+
+ #define USB_DRIVER_DESC "IFX USB PCD driver"
+ const char ifxusb_driver_name[] = "ifxusb_pcd";
+
+ ifxpcd_pcd_t ifxusb_pcd;
+ const char ifxusb_pcd_name[] = "ifxusb_pcd";
+#endif
+
+/* Global Debug Level Mask. */
+#ifdef __IS_HOST__
+ uint32_t h_dbg_lvl = 0x00;
+#endif
+
+#ifdef __IS_DEVICE__
+ uint32_t d_dbg_lvl = 0x00;
+#endif
+
+ifxusb_params_t ifxusb_module_params;
+
+static void parse_parms(void);
+
+
+#include <lantiq_irq.h>
+#define IFX_USB0_IR (INT_NUM_IM1_IRL0 + 22)
+#define IFX_USB1_IR (INT_NUM_IM2_IRL0 + 19)
+
+/*!
+ \brief This function is called when a driver is unregistered. This happens when
+ the rmmod command is executed. The device may or may not be electrically
+ present. If it is present, the driver stops device processing. Any resources
+ used on behalf of this device are freed.
+*/
+static int ifxusb_driver_remove(struct platform_device *_dev)
+{
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ #ifdef __IS_HOST__
+ #if defined(__DO_OC_INT__)
+ #if defined(__DO_OC_INT_ENABLE__)
+ ifxusb_oc_int_off();
+ #endif
+
+ if(oc_int_installed && oc_int_id)
+ free_irq((unsigned int)IFXUSB_OC_IRQ, oc_int_id );
+ oc_int_installed=0;
+ oc_int_id=NULL;
+ #endif
+
+ #if defined(__IS_DUAL__)
+ ifxhcd_remove(&ifxusb_hcd_1);
+ ifxusb_core_if_remove(&ifxusb_hcd_1.core_if );
+ ifxhcd_remove(&ifxusb_hcd_2);
+ ifxusb_core_if_remove(&ifxusb_hcd_2.core_if );
+ #else
+ ifxhcd_remove(&ifxusb_hcd);
+ ifxusb_core_if_remove(&ifxusb_hcd.core_if );
+ #endif
+ #endif
+
+ #ifdef __IS_DEVICE__
+ ifxpcd_remove();
+ ifxusb_core_if_remove(&ifxusb_pcd.core_if );
+ #endif
+
+ /* Remove the device attributes */
+
+ ifxusb_attr_remove(&_dev->dev);
+
+ return 0;
+}
+
+
+/* Function to setup the structures to control one usb core running as host*/
+#ifdef __IS_HOST__
+/*!
+ \brief inlined by ifxusb_driver_probe(), handling host mode probing. Run at each host core.
+*/
+ static inline int ifxusb_driver_probe_h(ifxhcd_hcd_t *_hcd,
+ int _irq,
+ uint32_t _iobase,
+ uint32_t _fifomem,
+ uint32_t _fifodbg
+ )
+ {
+ int retval = 0;
+
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+
+#ifdef __DEV_NEW__
+ ifxusb_power_off (&_hcd->core_if);
+ ifxusb_phy_power_off (&_hcd->core_if); // Test
+ mdelay(500);
+#endif //__DEV_NEW__
+ ifxusb_power_on (&_hcd->core_if);
+ mdelay(50);
+ ifxusb_phy_power_on (&_hcd->core_if); // Test
+ mdelay(50);
+ ifxusb_hard_reset(&_hcd->core_if);
+ retval =ifxusb_core_if_init(&_hcd->core_if,
+ _irq,
+ _iobase,
+ _fifomem,
+ _fifodbg);
+ if(retval)
+ return retval;
+
+ ifxusb_host_core_init(&_hcd->core_if,&ifxusb_module_params);
+
+ ifxusb_disable_global_interrupts( &_hcd->core_if);
+
+ /* The driver is now initialized and need to be registered into Linux USB sub-system */
+
+ retval = ifxhcd_init(_hcd); // hook the hcd into usb ss
+
+ if (retval != 0)
+ {
+ IFX_ERROR("_hcd_init failed\n");
+ return retval;
+ }
+
+ //ifxusb_enable_global_interrupts( _hcd->core_if ); // this should be done at hcd_start , including hcd_interrupt
+ return 0;
+ }
+#endif //__IS_HOST__
+
+#ifdef __IS_DEVICE__
+/*!
+ \brief inlined by ifxusb_driver_probe(), handling device mode probing.
+*/
+ static inline int ifxusb_driver_probe_d(ifxpcd_pcd_t *_pcd,
+ int _irq,
+ uint32_t _iobase,
+ uint32_t _fifomem,
+ uint32_t _fifodbg
+ )
+ {
+ int retval = 0;
+
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+#ifdef __DEV_NEW__
+ ifxusb_power_off (&_pcd->core_if);
+ ifxusb_phy_power_off (&_pcd->core_if); // Test
+ mdelay(500);
+#endif // __DEV_NEW__
+ ifxusb_power_on (&_pcd->core_if);
+ mdelay(50);
+ ifxusb_phy_power_on (&_pcd->core_if); // Test
+ mdelay(50);
+ ifxusb_hard_reset(&_pcd->core_if);
+ retval =ifxusb_core_if_init(&_pcd->core_if,
+ _irq,
+ _iobase,
+ _fifomem,
+ _fifodbg);
+ if(retval)
+ return retval;
+
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ ifxusb_dev_core_init(&_pcd->core_if,&ifxusb_module_params);
+
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ ifxusb_disable_global_interrupts( &_pcd->core_if);
+
+ /* The driver is now initialized and need to be registered into
+ Linux USB Gadget sub-system
+ */
+ retval = ifxpcd_init();
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+
+ if (retval != 0)
+ {
+ IFX_ERROR("_pcd_init failed\n");
+ return retval;
+ }
+ //ifxusb_enable_global_interrupts( _pcd->core_if ); // this should be done at gadget bind or start
+ return 0;
+ }
+#endif //__IS_DEVICE__
+
+
+
+/*!
+ \brief This function is called by module management in 2.6 kernel or by ifxusb_driver_init with 2.4 kernel
+ It is to probe and setup IFXUSB core(s).
+*/
+static int ifxusb_driver_probe(struct platform_device *_dev)
+{
+ int retval = 0;
+ int *pins = _dev->dev.platform_data;
+ if (ltq_is_vr9()) {
+ gpio_request(6, "id1");
+ gpio_request(9, "id2");
+ gpio_direction_input(6);
+ gpio_direction_input(9);
+ }
+ if (pins) {
+ if (pins[0]) {
+ gpio_request(pins[0], "vbus1");
+ gpio_direction_output(pins[0], 1);
+ }
+ if (pins[1] && ltq_is_vr9()) {
+ gpio_request(pins[1], "vbus2");
+ gpio_direction_output(pins[1], 1);
+ }
+ }
+ // Parsing and store the parameters
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ parse_parms();
+
+ #ifdef __IS_HOST__
+ #if defined(__IS_DUAL__)
+ memset(&ifxusb_hcd_1, 0, sizeof(ifxhcd_hcd_t));
+ memset(&ifxusb_hcd_2, 0, sizeof(ifxhcd_hcd_t));
+
+ ifxusb_hcd_1.core_if.core_no=0;
+ ifxusb_hcd_2.core_if.core_no=1;
+ ifxusb_hcd_1.core_if.core_name=(char *)ifxusb_hcd_name_1;
+ ifxusb_hcd_2.core_if.core_name=(char *)ifxusb_hcd_name_2;
+
+ ifxusb_hcd_1.dev=&_dev->dev;
+ ifxusb_hcd_2.dev=&_dev->dev;
+
+ retval = ifxusb_driver_probe_h(&ifxusb_hcd_1,
+ IFX_USB0_IR,
+ IFXUSB1_IOMEM_BASE,
+ IFXUSB1_FIFOMEM_BASE,
+ IFXUSB1_FIFODBG_BASE
+ );
+ if(retval)
+ goto ifxusb_driver_probe_fail;
+
+ retval = ifxusb_driver_probe_h(&ifxusb_hcd_2,
+ IFX_USB1_IR,
+ IFXUSB2_IOMEM_BASE,
+ IFXUSB2_FIFOMEM_BASE,
+ IFXUSB2_FIFODBG_BASE
+ );
+ if(retval)
+ goto ifxusb_driver_probe_fail;
+
+ #elif defined(__IS_FIRST__)
+ memset(&ifxusb_hcd, 0, sizeof(ifxhcd_hcd_t));
+
+ ifxusb_hcd.core_if.core_no=0;
+ ifxusb_hcd.core_if.core_name=(char *)ifxusb_hcd_name;
+
+ ifxusb_hcd.dev=&_dev->dev;
+
+ retval = ifxusb_driver_probe_h(&ifxusb_hcd,
+ IFX_USB0_IR,
+ IFXUSB1_IOMEM_BASE,
+ IFXUSB1_FIFOMEM_BASE,
+ IFXUSB1_FIFODBG_BASE
+ );
+ if(retval)
+ goto ifxusb_driver_probe_fail;
+
+ #elif defined(__IS_SECOND__)
+ memset(&ifxusb_hcd, 0, sizeof(ifxhcd_hcd_t));
+
+ ifxusb_hcd.core_if.core_no=1;
+ ifxusb_hcd.core_if.core_name=(char *)ifxusb_hcd_name;
+
+ ifxusb_hcd.dev=&_dev->dev;
+
+ retval = ifxusb_driver_probe_h(&ifxusb_hcd,
+ IFX_USB1_IR,
+ IFXUSB2_IOMEM_BASE,
+ IFXUSB2_FIFOMEM_BASE,
+ IFXUSB2_FIFODBG_BASE
+ );
+ if(retval)
+ goto ifxusb_driver_probe_fail;
+
+ #else
+ memset(&ifxusb_hcd, 0, sizeof(ifxhcd_hcd_t));
+
+ ifxusb_hcd.core_if.core_no=0;
+ ifxusb_hcd.core_if.core_name=(char *)ifxusb_hcd_name;
+
+ ifxusb_hcd.dev=&_dev->dev;
+
+ retval = ifxusb_driver_probe_h(&ifxusb_hcd,
+ IFXUSB_IRQ,
+ IFXUSB_IOMEM_BASE,
+ IFXUSB_FIFOMEM_BASE,
+ IFXUSB_FIFODBG_BASE
+ );
+ if(retval)
+ goto ifxusb_driver_probe_fail;
+ #endif
+
+ #if defined(__DO_OC_INT__)
+ IFXUSB_DEBUGPL( DBG_CIL, "registering (overcurrent) handler for irq%d\n", IFXUSB_OC_IRQ);
+ #if defined(__IS_DUAL__)
+ request_irq((unsigned int)IFXUSB_OC_IRQ, &ifx_hcd_oc_irq,
+// SA_INTERRUPT|SA_SHIRQ, "ifxusb_oc", (void *)&ifxusb_hcd_1);
+ IRQF_DISABLED | IRQF_SHARED, "ifxusb_oc", (void *)&ifxusb_hcd_1);
+ oc_int_id=&ifxusb_hcd_1;
+ #else
+ request_irq((unsigned int)IFXUSB_OC_IRQ, &ifx_hcd_oc_irq,
+// SA_INTERRUPT|SA_SHIRQ, "ifxusb_oc", (void *)&ifxusb_hcd);
+ IRQF_DISABLED | IRQF_SHARED, "ifxusb_oc", (void *)&ifxusb_hcd);
+ oc_int_id=&ifxusb_hcd;
+ #endif
+ oc_int_installed=1;
+
+ #if defined(__DO_OC_INT_ENABLE__)
+ ifxusb_oc_int_on();
+ #endif
+ #endif
+
+ #endif
+
+ #ifdef __IS_DEVICE__
+ memset(&ifxusb_pcd, 0, sizeof(ifxpcd_pcd_t));
+ ifxusb_pcd.core_if.core_name=(char *)&ifxusb_pcd_name[0];
+
+ ifxusb_pcd.dev=&_dev->dev;
+
+ #if defined(__IS_FIRST__)
+ ifxusb_pcd.core_if.core_no=0;
+ retval = ifxusb_driver_probe_d(&ifxusb_pcd,
+ IFXUSB1_IRQ,
+ IFXUSB1_IOMEM_BASE,
+ IFXUSB1_FIFOMEM_BASE,
+ IFXUSB1_FIFODBG_BASE
+ );
+ #elif defined(__IS_SECOND__)
+ ifxusb_pcd.core_if.core_no=1;
+ retval = ifxusb_driver_probe_d(&ifxusb_pcd,
+ IFXUSB2_IRQ,
+ IFXUSB2_IOMEM_BASE,
+ IFXUSB2_FIFOMEM_BASE,
+ IFXUSB2_FIFODBG_BASE
+ );
+ #else
+ ifxusb_pcd.core_if.core_no=0;
+ retval = ifxusb_driver_probe_d(&ifxusb_pcd,
+ IFXUSB_IRQ,
+ IFXUSB_IOMEM_BASE,
+ IFXUSB_FIFOMEM_BASE,
+ IFXUSB_FIFODBG_BASE
+ );
+ #endif
+ if(retval)
+ goto ifxusb_driver_probe_fail;
+ #endif
+
+ ifxusb_attr_create(&_dev->dev);
+
+ return 0;
+
+ifxusb_driver_probe_fail:
+ ifxusb_driver_remove(_dev);
+ return retval;
+}
+
+
+
+/*!
+ \brief This function is called when the ifxusb_driver is installed with the insmod command.
+*/
+
+
+static struct platform_driver ifxusb_driver = {
+ .driver = {
+ .name = ifxusb_driver_name,
+ .owner = THIS_MODULE,
+ },
+ .probe = ifxusb_driver_probe,
+ .remove = ifxusb_driver_remove,
+};
+
+int __init ifxusb_driver_init(void)
+{
+ int retval = 0;
+
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ IFX_PRINT("%s: version %s\n", ifxusb_driver_name, IFXUSB_VERSION);
+
+ retval = platform_driver_register(&ifxusb_driver);
+
+ if (retval < 0) {
+ IFX_ERROR("%s retval=%d\n", __func__, retval);
+ return retval;
+ }
+ return retval;
+}
+
+#if 0 // 2.4
+ int __init ifxusb_driver_init(void)
+ {
+ int retval = 0;
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ IFX_PRINT("%s: version %s\n", ifxusb_driver_name, IFXUSB_VERSION);
+ retval = ifxusb_driver_probe();
+
+ if (retval < 0) {
+ IFX_ERROR("%s retval=%d\n", __func__, retval);
+ return retval;
+ }
+
+ return retval;
+ }
+#endif
+
+module_init(ifxusb_driver_init);
+
+
+/*!
+ \brief This function is called when the driver is removed from the kernel
+ with the rmmod command. The driver unregisters itself with its bus
+ driver.
+*/
+
+void __exit ifxusb_driver_cleanup(void)
+{
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+
+ platform_driver_unregister(&ifxusb_driver);
+
+ IFX_PRINT("%s module removed\n", ifxusb_driver_name);
+}
+#if 0
+ void __exit ifxusb_driver_cleanup(void)
+ {
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ ifxusb_driver_remove();
+ IFX_PRINT("%s module removed\n", ifxusb_driver_name);
+ }
+#endif
+module_exit(ifxusb_driver_cleanup);
+
+
+
+MODULE_DESCRIPTION(USB_DRIVER_DESC);
+MODULE_AUTHOR("Infineon");
+MODULE_LICENSE("GPL");
+
+
+
+// Parameters set when loaded
+//static long dbg_lvl =0xFFFFFFFF;
+static long dbg_lvl =0;
+static short dma_burst_size =-1;
+static short speed =-1;
+static long data_fifo_size =-1;
+#ifdef __IS_DEVICE__
+ static long rx_fifo_size =-1;
+ #ifdef __DED_FIFO__
+ static long tx_fifo_size_00 =-1;
+ static long tx_fifo_size_01 =-1;
+ static long tx_fifo_size_02 =-1;
+ static long tx_fifo_size_03 =-1;
+ static long tx_fifo_size_04 =-1;
+ static long tx_fifo_size_05 =-1;
+ static long tx_fifo_size_06 =-1;
+ static long tx_fifo_size_07 =-1;
+ static long tx_fifo_size_08 =-1;
+ static long tx_fifo_size_09 =-1;
+ static long tx_fifo_size_10 =-1;
+ static long tx_fifo_size_11 =-1;
+ static long tx_fifo_size_12 =-1;
+ static long tx_fifo_size_13 =-1;
+ static long tx_fifo_size_14 =-1;
+ static long tx_fifo_size_15 =-1;
+ static short thr_ctl=-1;
+ static long tx_thr_length =-1;
+ static long rx_thr_length =-1;
+ #else
+ static long nperio_tx_fifo_size =-1;
+ static long perio_tx_fifo_size_01 =-1;
+ static long perio_tx_fifo_size_02 =-1;
+ static long perio_tx_fifo_size_03 =-1;
+ static long perio_tx_fifo_size_04 =-1;
+ static long perio_tx_fifo_size_05 =-1;
+ static long perio_tx_fifo_size_06 =-1;
+ static long perio_tx_fifo_size_07 =-1;
+ static long perio_tx_fifo_size_08 =-1;
+ static long perio_tx_fifo_size_09 =-1;
+ static long perio_tx_fifo_size_10 =-1;
+ static long perio_tx_fifo_size_11 =-1;
+ static long perio_tx_fifo_size_12 =-1;
+ static long perio_tx_fifo_size_13 =-1;
+ static long perio_tx_fifo_size_14 =-1;
+ static long perio_tx_fifo_size_15 =-1;
+ #endif
+ static short dev_endpoints =-1;
+#endif
+
+#ifdef __IS_HOST__
+ static long rx_fifo_size =-1;
+ static long nperio_tx_fifo_size =-1;
+ static long perio_tx_fifo_size =-1;
+ static short host_channels =-1;
+#endif
+
+static long max_transfer_size =-1;
+static long max_packet_count =-1;
+static long phy_utmi_width =-1;
+static long turn_around_time_hs =-1;
+static long turn_around_time_fs =-1;
+static long timeout_cal_hs =-1;
+static long timeout_cal_fs =-1;
+
+/*!
+ \brief Parsing the parameters taken when module load
+*/
+static void parse_parms(void)
+{
+
+ IFX_DEBUGPL(DBG_ENTRY, "%s() %d\n", __func__, __LINE__ );
+ #ifdef __IS_HOST__
+ h_dbg_lvl=dbg_lvl;
+ #endif
+ #ifdef __IS_DEVICE__
+ d_dbg_lvl=dbg_lvl;
+ #endif
+
+ switch(dma_burst_size)
+ {
+ case 0:
+ case 1:
+ case 4:
+ case 8:
+ case 16:
+ ifxusb_module_params.dma_burst_size=dma_burst_size;
+ break;
+ default:
+ ifxusb_module_params.dma_burst_size=default_param_dma_burst_size;
+ }
+
+ if(speed==0 || speed==1)
+ ifxusb_module_params.speed=speed;
+ else
+ ifxusb_module_params.speed=default_param_speed;
+
+ if(max_transfer_size>=2048 && max_transfer_size<=65535)
+ ifxusb_module_params.max_transfer_size=max_transfer_size;
+ else
+ ifxusb_module_params.max_transfer_size=default_param_max_transfer_size;
+
+ if(max_packet_count>=15 && max_packet_count<=511)
+ ifxusb_module_params.max_packet_count=max_packet_count;
+ else
+ ifxusb_module_params.max_packet_count=default_param_max_packet_count;
+
+ switch(phy_utmi_width)
+ {
+ case 8:
+ case 16:
+ ifxusb_module_params.phy_utmi_width=phy_utmi_width;
+ break;
+ default:
+ ifxusb_module_params.phy_utmi_width=default_param_phy_utmi_width;
+ }
+
+ if(turn_around_time_hs>=0 && turn_around_time_hs<=7)
+ ifxusb_module_params.turn_around_time_hs=turn_around_time_hs;
+ else
+ ifxusb_module_params.turn_around_time_hs=default_param_turn_around_time_hs;
+
+ if(turn_around_time_fs>=0 && turn_around_time_fs<=7)
+ ifxusb_module_params.turn_around_time_fs=turn_around_time_fs;
+ else
+ ifxusb_module_params.turn_around_time_fs=default_param_turn_around_time_fs;
+
+ if(timeout_cal_hs>=0 && timeout_cal_hs<=7)
+ ifxusb_module_params.timeout_cal_hs=timeout_cal_hs;
+ else
+ ifxusb_module_params.timeout_cal_hs=default_param_timeout_cal_hs;
+
+ if(timeout_cal_fs>=0 && timeout_cal_fs<=7)
+ ifxusb_module_params.timeout_cal_fs=timeout_cal_fs;
+ else
+ ifxusb_module_params.timeout_cal_fs=default_param_timeout_cal_fs;
+
+ if(data_fifo_size>=32 && data_fifo_size<=32768)
+ ifxusb_module_params.data_fifo_size=data_fifo_size;
+ else
+ ifxusb_module_params.data_fifo_size=default_param_data_fifo_size;
+
+ #ifdef __IS_HOST__
+ if(host_channels>=1 && host_channels<=16)
+ ifxusb_module_params.host_channels=host_channels;
+ else
+ ifxusb_module_params.host_channels=default_param_host_channels;
+
+ if(rx_fifo_size>=16 && rx_fifo_size<=32768)
+ ifxusb_module_params.rx_fifo_size=rx_fifo_size;
+ else
+ ifxusb_module_params.rx_fifo_size=default_param_rx_fifo_size;
+
+ if(nperio_tx_fifo_size>=16 && nperio_tx_fifo_size<=32768)
+ ifxusb_module_params.nperio_tx_fifo_size=nperio_tx_fifo_size;
+ else
+ ifxusb_module_params.nperio_tx_fifo_size=default_param_nperio_tx_fifo_size;
+
+ if(perio_tx_fifo_size>=16 && perio_tx_fifo_size<=32768)
+ ifxusb_module_params.perio_tx_fifo_size=perio_tx_fifo_size;
+ else
+ ifxusb_module_params.perio_tx_fifo_size=default_param_perio_tx_fifo_size;
+ #endif //__IS_HOST__
+
+ #ifdef __IS_DEVICE__
+ if(rx_fifo_size>=16 && rx_fifo_size<=32768)
+ ifxusb_module_params.rx_fifo_size=rx_fifo_size;
+ else
+ ifxusb_module_params.rx_fifo_size=default_param_rx_fifo_size;
+ #ifdef __DED_FIFO__
+ if(tx_fifo_size_00>=16 && tx_fifo_size_00<=32768)
+ ifxusb_module_params.tx_fifo_size[ 0]=tx_fifo_size_00;
+ else
+ ifxusb_module_params.tx_fifo_size[ 0]=default_param_tx_fifo_size_00;
+ if(tx_fifo_size_01>=0 && tx_fifo_size_01<=32768)
+ ifxusb_module_params.tx_fifo_size[ 1]=tx_fifo_size_01;
+ else
+ ifxusb_module_params.tx_fifo_size[ 1]=default_param_tx_fifo_size_01;
+ if(tx_fifo_size_02>=0 && tx_fifo_size_02<=32768)
+ ifxusb_module_params.tx_fifo_size[ 2]=tx_fifo_size_02;
+ else
+ ifxusb_module_params.tx_fifo_size[ 2]=default_param_tx_fifo_size_02;
+ if(tx_fifo_size_03>=0 && tx_fifo_size_03<=32768)
+ ifxusb_module_params.tx_fifo_size[ 3]=tx_fifo_size_03;
+ else
+ ifxusb_module_params.tx_fifo_size[ 3]=default_param_tx_fifo_size_03;
+ if(tx_fifo_size_04>=0 && tx_fifo_size_04<=32768)
+ ifxusb_module_params.tx_fifo_size[ 4]=tx_fifo_size_04;
+ else
+ ifxusb_module_params.tx_fifo_size[ 4]=default_param_tx_fifo_size_04;
+ if(tx_fifo_size_05>=0 && tx_fifo_size_05<=32768)
+ ifxusb_module_params.tx_fifo_size[ 5]=tx_fifo_size_05;
+ else
+ ifxusb_module_params.tx_fifo_size[ 5]=default_param_tx_fifo_size_05;
+ if(tx_fifo_size_06>=0 && tx_fifo_size_06<=32768)
+ ifxusb_module_params.tx_fifo_size[ 6]=tx_fifo_size_06;
+ else
+ ifxusb_module_params.tx_fifo_size[ 6]=default_param_tx_fifo_size_06;
+ if(tx_fifo_size_07>=0 && tx_fifo_size_07<=32768)
+ ifxusb_module_params.tx_fifo_size[ 7]=tx_fifo_size_07;
+ else
+ ifxusb_module_params.tx_fifo_size[ 7]=default_param_tx_fifo_size_07;
+ if(tx_fifo_size_08>=0 && tx_fifo_size_08<=32768)
+ ifxusb_module_params.tx_fifo_size[ 8]=tx_fifo_size_08;
+ else
+ ifxusb_module_params.tx_fifo_size[ 8]=default_param_tx_fifo_size_08;
+ if(tx_fifo_size_09>=0 && tx_fifo_size_09<=32768)
+ ifxusb_module_params.tx_fifo_size[ 9]=tx_fifo_size_09;
+ else
+ ifxusb_module_params.tx_fifo_size[ 9]=default_param_tx_fifo_size_09;
+ if(tx_fifo_size_10>=0 && tx_fifo_size_10<=32768)
+ ifxusb_module_params.tx_fifo_size[10]=tx_fifo_size_10;
+ else
+ ifxusb_module_params.tx_fifo_size[10]=default_param_tx_fifo_size_10;
+ if(tx_fifo_size_11>=0 && tx_fifo_size_11<=32768)
+ ifxusb_module_params.tx_fifo_size[11]=tx_fifo_size_11;
+ else
+ ifxusb_module_params.tx_fifo_size[11]=default_param_tx_fifo_size_11;
+ if(tx_fifo_size_12>=0 && tx_fifo_size_12<=32768)
+ ifxusb_module_params.tx_fifo_size[12]=tx_fifo_size_12;
+ else
+ ifxusb_module_params.tx_fifo_size[12]=default_param_tx_fifo_size_12;
+ if(tx_fifo_size_13>=0 && tx_fifo_size_13<=32768)
+ ifxusb_module_params.tx_fifo_size[13]=tx_fifo_size_13;
+ else
+ ifxusb_module_params.tx_fifo_size[13]=default_param_tx_fifo_size_13;
+ if(tx_fifo_size_14>=0 && tx_fifo_size_14<=32768)
+ ifxusb_module_params.tx_fifo_size[14]=tx_fifo_size_14;
+ else
+ ifxusb_module_params.tx_fifo_size[14]=default_param_tx_fifo_size_14;
+ if(tx_fifo_size_15>=0 && tx_fifo_size_15<=32768)
+ ifxusb_module_params.tx_fifo_size[15]=tx_fifo_size_15;
+ else
+ ifxusb_module_params.tx_fifo_size[15]=default_param_tx_fifo_size_15;
+ if(thr_ctl==0 || thr_ctl==1)
+ ifxusb_module_params.thr_ctl=thr_ctl;
+ else
+ ifxusb_module_params.thr_ctl=default_param_thr_ctl;
+ if(tx_thr_length>=16 && tx_thr_length<=511)
+ ifxusb_module_params.tx_thr_length=tx_thr_length;
+ else
+ ifxusb_module_params.tx_thr_length=default_param_tx_thr_length;
+ if(rx_thr_length>=16 && rx_thr_length<=511)
+ ifxusb_module_params.rx_thr_length=rx_thr_length;
+ else
+ ifxusb_module_params.rx_thr_length=default_param_rx_thr_length;
+ #else //__DED_FIFO__
+ if(nperio_tx_fifo_size>=16 && nperio_tx_fifo_size<=32768)
+ ifxusb_module_params.tx_fifo_size[ 0]=nperio_tx_fifo_size;
+ else
+ ifxusb_module_params.tx_fifo_size[ 0]=default_param_nperio_tx_fifo_size;
+ if(perio_tx_fifo_size_01>=0 && perio_tx_fifo_size_01<=32768)
+ ifxusb_module_params.tx_fifo_size[ 1]=perio_tx_fifo_size_01;
+ else
+ ifxusb_module_params.tx_fifo_size[ 1]=default_param_perio_tx_fifo_size_01;
+ if(perio_tx_fifo_size_02>=0 && perio_tx_fifo_size_02<=32768)
+ ifxusb_module_params.tx_fifo_size[ 2]=perio_tx_fifo_size_02;
+ else
+ ifxusb_module_params.tx_fifo_size[ 2]=default_param_perio_tx_fifo_size_02;
+ if(perio_tx_fifo_size_03>=0 && perio_tx_fifo_size_03<=32768)
+ ifxusb_module_params.tx_fifo_size[ 3]=perio_tx_fifo_size_03;
+ else
+ ifxusb_module_params.tx_fifo_size[ 3]=default_param_perio_tx_fifo_size_03;
+ if(perio_tx_fifo_size_04>=0 && perio_tx_fifo_size_04<=32768)
+ ifxusb_module_params.tx_fifo_size[ 4]=perio_tx_fifo_size_04;
+ else
+ ifxusb_module_params.tx_fifo_size[ 4]=default_param_perio_tx_fifo_size_04;
+ if(perio_tx_fifo_size_05>=0 && perio_tx_fifo_size_05<=32768)
+ ifxusb_module_params.tx_fifo_size[ 5]=perio_tx_fifo_size_05;
+ else
+ ifxusb_module_params.tx_fifo_size[ 5]=default_param_perio_tx_fifo_size_05;
+ if(perio_tx_fifo_size_06>=0 && perio_tx_fifo_size_06<=32768)
+ ifxusb_module_params.tx_fifo_size[ 6]=perio_tx_fifo_size_06;
+ else
+ ifxusb_module_params.tx_fifo_size[ 6]=default_param_perio_tx_fifo_size_06;
+ if(perio_tx_fifo_size_07>=0 && perio_tx_fifo_size_07<=32768)
+ ifxusb_module_params.tx_fifo_size[ 7]=perio_tx_fifo_size_07;
+ else
+ ifxusb_module_params.tx_fifo_size[ 7]=default_param_perio_tx_fifo_size_07;
+ if(perio_tx_fifo_size_08>=0 && perio_tx_fifo_size_08<=32768)
+ ifxusb_module_params.tx_fifo_size[ 8]=perio_tx_fifo_size_08;
+ else
+ ifxusb_module_params.tx_fifo_size[ 8]=default_param_perio_tx_fifo_size_08;
+ if(perio_tx_fifo_size_09>=0 && perio_tx_fifo_size_09<=32768)
+ ifxusb_module_params.tx_fifo_size[ 9]=perio_tx_fifo_size_09;
+ else
+ ifxusb_module_params.tx_fifo_size[ 9]=default_param_perio_tx_fifo_size_09;
+ if(perio_tx_fifo_size_10>=0 && perio_tx_fifo_size_10<=32768)
+ ifxusb_module_params.tx_fifo_size[10]=perio_tx_fifo_size_10;
+ else
+ ifxusb_module_params.tx_fifo_size[10]=default_param_perio_tx_fifo_size_10;
+ if(perio_tx_fifo_size_11>=0 && perio_tx_fifo_size_11<=32768)
+ ifxusb_module_params.tx_fifo_size[11]=perio_tx_fifo_size_11;
+ else
+ ifxusb_module_params.tx_fifo_size[11]=default_param_perio_tx_fifo_size_11;
+ if(perio_tx_fifo_size_12>=0 && perio_tx_fifo_size_12<=32768)
+ ifxusb_module_params.tx_fifo_size[12]=perio_tx_fifo_size_12;
+ else
+ ifxusb_module_params.tx_fifo_size[12]=default_param_perio_tx_fifo_size_12;
+ if(perio_tx_fifo_size_13>=0 && perio_tx_fifo_size_13<=32768)
+ ifxusb_module_params.tx_fifo_size[13]=perio_tx_fifo_size_13;
+ else
+ ifxusb_module_params.tx_fifo_size[13]=default_param_perio_tx_fifo_size_13;
+ if(perio_tx_fifo_size_14>=0 && perio_tx_fifo_size_14<=32768)
+ ifxusb_module_params.tx_fifo_size[14]=perio_tx_fifo_size_14;
+ else
+ ifxusb_module_params.tx_fifo_size[14]=default_param_perio_tx_fifo_size_14;
+ if(perio_tx_fifo_size_15>=0 && perio_tx_fifo_size_15<=32768)
+ ifxusb_module_params.tx_fifo_size[15]=perio_tx_fifo_size_15;
+ else
+ ifxusb_module_params.tx_fifo_size[15]=default_param_perio_tx_fifo_size_15;
+ #endif //__DED_FIFO__
+ #endif //__IS_DEVICE__
+}
+
+
+
+
+
+
+
+module_param(dbg_lvl, long, 0444);
+MODULE_PARM_DESC(dbg_lvl, "Debug level.");
+
+module_param(dma_burst_size, short, 0444);
+MODULE_PARM_DESC(dma_burst_size, "DMA Burst Size 0, 1, 4, 8, 16");
+
+module_param(speed, short, 0444);
+MODULE_PARM_DESC(speed, "Speed 0=High Speed 1=Full Speed");
+
+module_param(data_fifo_size, long, 0444);
+MODULE_PARM_DESC(data_fifo_size, "Total number of words in the data FIFO memory 32-32768");
+
+#ifdef __IS_DEVICE__
+ module_param(rx_fifo_size, long, 0444);
+ MODULE_PARM_DESC(rx_fifo_size, "Number of words in the Rx FIFO 16-32768");
+
+ #ifdef __DED_FIFO__
+ module_param(tx_fifo_size_00, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_00, "Number of words in the Tx FIFO #00 16-32768");
+ module_param(tx_fifo_size_01, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_01, "Number of words in the Tx FIFO #01 0-32768");
+ module_param(tx_fifo_size_02, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_02, "Number of words in the Tx FIFO #02 0-32768");
+ module_param(tx_fifo_size_03, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_03, "Number of words in the Tx FIFO #03 0-32768");
+ module_param(tx_fifo_size_04, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_04, "Number of words in the Tx FIFO #04 0-32768");
+ module_param(tx_fifo_size_05, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_05, "Number of words in the Tx FIFO #05 0-32768");
+ module_param(tx_fifo_size_06, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_06, "Number of words in the Tx FIFO #06 0-32768");
+ module_param(tx_fifo_size_07, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_07, "Number of words in the Tx FIFO #07 0-32768");
+ module_param(tx_fifo_size_08, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_08, "Number of words in the Tx FIFO #08 0-32768");
+ module_param(tx_fifo_size_09, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_09, "Number of words in the Tx FIFO #09 0-32768");
+ module_param(tx_fifo_size_10, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_10, "Number of words in the Tx FIFO #10 0-32768");
+ module_param(tx_fifo_size_11, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_11, "Number of words in the Tx FIFO #11 0-32768");
+ module_param(tx_fifo_size_12, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_12, "Number of words in the Tx FIFO #12 0-32768");
+ module_param(tx_fifo_size_13, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_13, "Number of words in the Tx FIFO #13 0-32768");
+ module_param(tx_fifo_size_14, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_14, "Number of words in the Tx FIFO #14 0-32768");
+ module_param(tx_fifo_size_15, long, 0444);
+ MODULE_PARM_DESC(tx_fifo_size_15, "Number of words in the Tx FIFO #15 0-32768");
+
+ module_param(thr_ctl, short, 0444);
+ MODULE_PARM_DESC(thr_ctl, "0=Without 1=With Theshold Ctrl");
+
+ module_param(tx_thr_length, long, 0444);
+ MODULE_PARM_DESC(tx_thr_length, "TX Threshold length");
+
+ module_param(rx_thr_length, long, 0444);
+ MODULE_PARM_DESC(rx_thr_length, "RX Threshold length");
+
+ #else
+ module_param(nperio_tx_fifo_size, long, 0444);
+ MODULE_PARM_DESC(nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768");
+
+ module_param(perio_tx_fifo_size_01, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_01, "Number of words in the periodic Tx FIFO #01 0-32768");
+ module_param(perio_tx_fifo_size_02, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_02, "Number of words in the periodic Tx FIFO #02 0-32768");
+ module_param(perio_tx_fifo_size_03, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_03, "Number of words in the periodic Tx FIFO #03 0-32768");
+ module_param(perio_tx_fifo_size_04, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_04, "Number of words in the periodic Tx FIFO #04 0-32768");
+ module_param(perio_tx_fifo_size_05, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_05, "Number of words in the periodic Tx FIFO #05 0-32768");
+ module_param(perio_tx_fifo_size_06, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_06, "Number of words in the periodic Tx FIFO #06 0-32768");
+ module_param(perio_tx_fifo_size_07, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_07, "Number of words in the periodic Tx FIFO #07 0-32768");
+ module_param(perio_tx_fifo_size_08, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_08, "Number of words in the periodic Tx FIFO #08 0-32768");
+ module_param(perio_tx_fifo_size_09, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_09, "Number of words in the periodic Tx FIFO #09 0-32768");
+ module_param(perio_tx_fifo_size_10, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_10, "Number of words in the periodic Tx FIFO #10 0-32768");
+ module_param(perio_tx_fifo_size_11, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_11, "Number of words in the periodic Tx FIFO #11 0-32768");
+ module_param(perio_tx_fifo_size_12, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_12, "Number of words in the periodic Tx FIFO #12 0-32768");
+ module_param(perio_tx_fifo_size_13, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_13, "Number of words in the periodic Tx FIFO #13 0-32768");
+ module_param(perio_tx_fifo_size_14, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_14, "Number of words in the periodic Tx FIFO #14 0-32768");
+ module_param(perio_tx_fifo_size_15, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size_15, "Number of words in the periodic Tx FIFO #15 0-32768");
+ #endif//__DED_FIFO__
+ module_param(dev_endpoints, short, 0444);
+ MODULE_PARM_DESC(dev_endpoints, "The number of endpoints in addition to EP0 available for device mode 1-15");
+#endif
+
+#ifdef __IS_HOST__
+ module_param(rx_fifo_size, long, 0444);
+ MODULE_PARM_DESC(rx_fifo_size, "Number of words in the Rx FIFO 16-32768");
+
+ module_param(nperio_tx_fifo_size, long, 0444);
+ MODULE_PARM_DESC(nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768");
+
+ module_param(perio_tx_fifo_size, long, 0444);
+ MODULE_PARM_DESC(perio_tx_fifo_size, "Number of words in the host periodic Tx FIFO 16-32768");
+
+ module_param(host_channels, short, 0444);
+ MODULE_PARM_DESC(host_channels, "The number of host channel registers to use 1-16");
+#endif
+
+module_param(max_transfer_size, long, 0444);
+MODULE_PARM_DESC(max_transfer_size, "The maximum transfer size supported in bytes 2047-65535");
+
+module_param(max_packet_count, long, 0444);
+MODULE_PARM_DESC(max_packet_count, "The maximum number of packets in a transfer 15-511");
+
+module_param(phy_utmi_width, long, 0444);
+MODULE_PARM_DESC(phy_utmi_width, "Specifies the UTMI+ Data Width 8 or 16 bits");
+
+module_param(turn_around_time_hs, long, 0444);
+MODULE_PARM_DESC(turn_around_time_hs, "Turn-Around time for HS");
+
+module_param(turn_around_time_fs, long, 0444);
+MODULE_PARM_DESC(turn_around_time_fs, "Turn-Around time for FS");
+
+module_param(timeout_cal_hs, long, 0444);
+MODULE_PARM_DESC(timeout_cal_hs, "Timeout Cal for HS");
+
+module_param(timeout_cal_fs, long, 0444);
+MODULE_PARM_DESC(timeout_cal_fs, "Timeout Cal for FS");
+
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_plat.h b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_plat.h
new file mode 100644
index 0000000000..a50294fdcf
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_plat.h
@@ -0,0 +1,1018 @@
+/*****************************************************************************
+ ** FILE NAME : ifxusb_plat.h
+ ** PROJECT : IFX USB sub-system V3
+ ** MODULES : IFX USB sub-system Host and Device driver
+ ** SRC VERSION : 1.0
+ ** DATE : 1/Jan/2009
+ ** AUTHOR : Chen, Howard
+ ** DESCRIPTION : This file contains the Platform Specific constants, interfaces
+ ** (functions and macros).
+ ** FUNCTIONS :
+ ** COMPILER : gcc
+ ** REFERENCE : IFX hardware ref handbook for each plateforms
+ ** COPYRIGHT :
+ ** Version Control Section **
+ ** $Author$
+ ** $Date$
+ ** $Revisions$
+ ** $Log$ Revision history
+ *****************************************************************************/
+
+
+/*!
+ \defgroup IFXUSB_PLATEFORM_DEFINITION Platform Specific constants, interfaces (functions and macros).
+ \ingroup IFXUSB_DRIVER_V3
+ \brief Maintain plateform specific definitions and macros in this file.
+ Each plateform has its own definition zone.
+ */
+
+/*!
+ \defgroup IFXUSB_PLATEFORM_MEM_ADDR Definition of memory address and size and default parameters
+ \ingroup IFXUSB_PLATEFORM_DEFINITION
+ */
+
+/*!
+ \defgroup IFXUSB_DBG_ROUTINE Routines for debug message
+ \ingroup IFXUSB_PLATEFORM_DEFINITION
+ */
+
+
+/*! \file ifxusb_plat.h
+ \ingroup IFXUSB_DRIVER_V3
+ \brief This file contains the Platform Specific constants, interfaces (functions and macros).
+*/
+
+#if !defined(__IFXUSB_PLAT_H__)
+#define __IFXUSB_PLAT_H__
+
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+
+#define IFXUSB_IOMEM_SIZE 0x00001000
+#define IFXUSB_FIFOMEM_SIZE 0x00010000
+#define IFXUSB_FIFODBG_SIZE 0x00020000
+
+
+
+/*!
+ \addtogroup IFXUSB_PLATEFORM_MEM_ADDR
+ */
+/*@{*/
+#if defined(__UEIP__)
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+// #define IFXUSB_IRQ 54
+ #define IFXUSB_IOMEM_BASE 0x1e101000
+ #define IFXUSB_FIFOMEM_BASE 0x1e120000
+ #define IFXUSB_FIFODBG_BASE 0x1e140000
+// #define IFXUSB_OC_IRQ 151
+
+ #ifndef DANUBE_RCU_BASE_ADDR
+ #define DANUBE_RCU_BASE_ADDR (0xBF203000)
+ #endif
+
+ #ifndef DANUBE_CGU
+ #define DANUBE_CGU (0xBF103000)
+ #endif
+ #ifndef DANUBE_CGU_IFCCR
+ #define DANUBE_CGU_IFCCR ((volatile unsigned long *)(DANUBE_CGU+ 0x0018))
+ #endif
+ #ifndef DANUBE_PMU
+ #define DANUBE_PMU (KSEG1+0x1F102000)
+ #endif
+ #ifndef DANUBE_PMU_PWDCR
+ #define DANUBE_PMU_PWDCR ((volatile unsigned long *)(DANUBE_PMU+0x001C))
+ #endif
+
+ #ifndef DANUBE_GPIO_P0_OUT
+ #define DANUBE_GPIO_P0_OUT (0xBF103000+0x10)
+ #define DANUBE_GPIO_P0_DIR (0xBF103000+0x18)
+ #define DANUBE_GPIO_P0_ALTSEL0 (0xBF103000+0x1C)
+ #define DANUBE_GPIO_P0_ALTSEL1 (0xBF103000+0x20)
+ #define DANUBE_GPIO_P0_OD (0xBF103000+0x24)
+ #define DANUBE_GPIO_P0_PUDSEL (0xBF103000+0x2C)
+ #define DANUBE_GPIO_P0_PUDEN (0xBF103000+0x30)
+ #define DANUBE_GPIO_P1_OUT (0xBF103000+0x40)
+ #define DANUBE_GPIO_P1_DIR (0xBF103000+0x48)
+ #define DANUBE_GPIO_P1_ALTSEL0 (0xBF103000+0x4C)
+ #define DANUBE_GPIO_P1_ALTSEL1 (0xBF103000+0x50)
+ #define DANUBE_GPIO_P1_OD (0xBF103000+0x54)
+ #define DANUBE_GPIO_P1_PUDSEL (0xBF103000+0x5C)
+ #define DANUBE_GPIO_P1_PUDEN (0xBF103000+0x60)
+ #endif
+
+ #define DANUBE_RCU_USBCFG ((volatile unsigned long *)(DANUBE_RCU_BASE_ADDR + 0x18))
+ #define DANUBE_RCU_RESET ((volatile unsigned long *)(DANUBE_RCU_BASE_ADDR + 0x10))
+ #define DANUBE_USBCFG_HDSEL_BIT 11 // 0:host, 1:device
+ #define DANUBE_USBCFG_HOST_END_BIT 10 // 0:little_end, 1:big_end
+ #define DANUBE_USBCFG_SLV_END_BIT 9 // 0:little_end, 1:big_end
+
+ #define default_param_dma_burst_size 4
+
+ #define default_param_speed IFXUSB_PARAM_SPEED_HIGH
+
+ #define default_param_max_transfer_size -1 //(Max, hwcfg)
+ #define default_param_max_packet_count -1 //(Max, hwcfg)
+ #define default_param_phy_utmi_width 16
+
+ #define default_param_turn_around_time_hs 4
+ #define default_param_turn_around_time_fs 4
+ #define default_param_timeout_cal_hs -1 //(NoChange)
+ #define default_param_timeout_cal_fs -1 //(NoChange)
+
+ #define default_param_data_fifo_size -1 //(Max, hwcfg)
+
+ #ifdef __IS_HOST__
+ #define default_param_host_channels -1 //(Max, hwcfg)
+ #define default_param_rx_fifo_size 640
+ #define default_param_nperio_tx_fifo_size 640
+ #define default_param_perio_tx_fifo_size 768
+ #endif //__IS_HOST__
+
+ #ifdef __IS_DEVICE__
+ #ifdef __DED_INTR__
+ #define default_param_rx_fifo_size 1024
+ #define default_param_nperio_tx_fifo_size 1016
+ #define default_param_perio_tx_fifo_size_01 8
+ #else
+ #define default_param_rx_fifo_size 1024
+ #define default_param_nperio_tx_fifo_size 1024
+ #define default_param_perio_tx_fifo_size_01 0
+ #endif
+ #define default_param_perio_tx_fifo_size_02 0
+ #define default_param_perio_tx_fifo_size_03 0
+ #define default_param_perio_tx_fifo_size_04 0
+ #define default_param_perio_tx_fifo_size_05 0
+ #define default_param_perio_tx_fifo_size_06 0
+ #define default_param_perio_tx_fifo_size_07 0
+ #define default_param_perio_tx_fifo_size_08 0
+ #define default_param_perio_tx_fifo_size_09 0
+ #define default_param_perio_tx_fifo_size_10 0
+ #define default_param_perio_tx_fifo_size_11 0
+ #define default_param_perio_tx_fifo_size_12 0
+ #define default_param_perio_tx_fifo_size_13 0
+ #define default_param_perio_tx_fifo_size_14 0
+ #define default_param_perio_tx_fifo_size_15 0
+ #endif //__IS_DEVICE__
+
+ #elif defined(__IS_AMAZON_SE__)
+ //#include <asm/amazon_se/amazon_se.h>
+ //#include <asm/amazon_se/irq.h>
+
+// #define IFXUSB_IRQ 31
+ #define IFXUSB_IOMEM_BASE 0x1e101000
+ #define IFXUSB_FIFOMEM_BASE 0x1e120000
+ #define IFXUSB_FIFODBG_BASE 0x1e140000
+// #define IFXUSB_OC_IRQ 20
+
+ #ifndef AMAZON_SE_RCU_BASE_ADDR
+ #define AMAZON_SE_RCU_BASE_ADDR (0xBF203000)
+ #endif
+ #define AMAZON_SE_RCU_USBCFG ((volatile unsigned long *)(AMAZON_SE_RCU_BASE_ADDR + 0x18))
+ #define AMAZON_SE_RCU_RESET ((volatile unsigned long *)(AMAZON_SE_RCU_BASE_ADDR + 0x10))
+ #define AMAZON_SE_USBCFG_HDSEL_BIT 11 // 0:host, 1:device
+ #define AMAZON_SE_USBCFG_HOST_END_BIT 10 // 0:little_end, 1:big_end
+ #define AMAZON_SE_USBCFG_SLV_END_BIT 9 // 0:little_end, 1:big_end
+
+ #ifndef AMAZON_SE_GPIO_P0_OUT
+ #define AMAZON_SE_GPIO_P0_OUT (0xBF103000+0x10)
+ #define AMAZON_SE_GPIO_P0_DIR (0xBF103000+0x18)
+ #define AMAZON_SE_GPIO_P0_ALTSEL0 (0xBF103000+0x1C)
+ #define AMAZON_SE_GPIO_P0_ALTSEL1 (0xBF103000+0x20)
+ #define AMAZON_SE_GPIO_P0_OD (0xBF103000+0x24)
+ #define AMAZON_SE_GPIO_P0_PUDSEL (0xBF103000+0x2C)
+ #define AMAZON_SE_GPIO_P0_PUDEN (0xBF103000+0x30)
+ #define AMAZON_SE_GPIO_P1_OUT (0xBF103000+0x40)
+ #define AMAZON_SE_GPIO_P1_DIR (0xBF103000+0x48)
+ #define AMAZON_SE_GPIO_P1_ALTSEL0 (0xBF103000+0x4C)
+ #define AMAZON_SE_GPIO_P1_ALTSEL1 (0xBF103000+0x50)
+ #define AMAZON_SE_GPIO_P1_OD (0xBF103000+0x54)
+ #define AMAZON_SE_GPIO_P1_PUDSEL (0xBF103000+0x5C)
+ #define AMAZON_SE_GPIO_P1_PUDEN (0xBF103000+0x60)
+ #endif
+
+ #ifndef AMAZON_SE_CGU
+ #define AMAZON_SE_CGU (0xBF103000)
+ #endif
+ #ifndef AMAZON_SE_CGU_IFCCR
+ #define AMAZON_SE_CGU_IFCCR ((volatile unsigned long *)(AMAZON_SE_CGU+ 0x0018))
+ #endif
+ #ifndef AMAZON_SE_PMU
+ #define AMAZON_SE_PMU (KSEG1+0x1F102000)
+ #endif
+ #ifndef AMAZON_SE_PMU_PWDCR
+ #define AMAZON_SE_PMU_PWDCR ((volatile unsigned long *)(AMAZON_SE_PMU+0x001C))
+ #endif
+
+ #define default_param_dma_burst_size 4
+
+ #define default_param_speed IFXUSB_PARAM_SPEED_HIGH
+
+ #define default_param_max_transfer_size -1 //(Max, hwcfg)
+ #define default_param_max_packet_count -1 //(Max, hwcfg)
+ #define default_param_phy_utmi_width 16
+
+ #define default_param_turn_around_time_hs 4 //(NoChange)
+ #define default_param_turn_around_time_fs 4 //(NoChange)
+ #define default_param_timeout_cal_hs -1 //(NoChange)
+ #define default_param_timeout_cal_fs -1 //(NoChange)
+
+ #define default_param_data_fifo_size -1 //(Max, hwcfg)
+
+ #ifdef __IS_HOST__
+ #define default_param_host_channels -1 //(Max, hwcfg)
+ #define default_param_rx_fifo_size 240
+ #define default_param_nperio_tx_fifo_size 240
+ #define default_param_perio_tx_fifo_size 32
+ #endif //__IS_HOST__
+ #ifdef __IS_DEVICE__
+ #ifdef __DED_INTR__
+ #define default_param_rx_fifo_size 256
+ #define default_param_nperio_tx_fifo_size 248
+ #define default_param_perio_tx_fifo_size_01 8
+ #else
+ #define default_param_rx_fifo_size 256
+ #define default_param_nperio_tx_fifo_size 256
+ #define default_param_perio_tx_fifo_size_01 0
+ #endif
+ #define default_param_perio_tx_fifo_size_02 0
+ #define default_param_perio_tx_fifo_size_03 0
+ #define default_param_perio_tx_fifo_size_04 0
+ #define default_param_perio_tx_fifo_size_05 0
+ #define default_param_perio_tx_fifo_size_06 0
+ #define default_param_perio_tx_fifo_size_07 0
+ #define default_param_perio_tx_fifo_size_08 0
+ #define default_param_perio_tx_fifo_size_09 0
+ #define default_param_perio_tx_fifo_size_10 0
+ #define default_param_perio_tx_fifo_size_11 0
+ #define default_param_perio_tx_fifo_size_12 0
+ #define default_param_perio_tx_fifo_size_13 0
+ #define default_param_perio_tx_fifo_size_14 0
+ #define default_param_perio_tx_fifo_size_15 0
+ #endif //__IS_DEVICE__
+
+ #elif defined(__IS_AR9__)
+// #define IFXUSB1_IRQ 54
+ #define IFXUSB1_IOMEM_BASE 0x1E101000
+ #define IFXUSB1_FIFOMEM_BASE 0x1E120000
+ #define IFXUSB1_FIFODBG_BASE 0x1E140000
+
+// #define IFXUSB2_IRQ 83
+ #define IFXUSB2_IOMEM_BASE 0x1E106000
+ #define IFXUSB2_FIFOMEM_BASE 0x1E1E0000
+ #define IFXUSB2_FIFODBG_BASE 0x1E1C0000
+
+// #define IFXUSB_OC_IRQ 60
+
+ #ifndef AR9_RCU_BASE_ADDR
+ #define AR9_RCU_BASE_ADDR (0xBF203000)
+ #endif
+
+ #ifndef AR9_CGU
+ #define AR9_CGU (0xBF103000)
+ #endif
+ #ifndef AR9_CGU_IFCCR
+ #define AR9_CGU_IFCCR ((volatile unsigned long *)(AR9_CGU+ 0x0018))
+ #endif
+
+ #ifndef AR9_PMU
+ #define AR9_PMU (KSEG1+0x1F102000)
+ #endif
+ #ifndef AR9_PMU_PWDCR
+ #define AR9_PMU_PWDCR ((volatile unsigned long *)(AR9_PMU+0x001C))
+ #endif
+
+ #ifndef AR9_GPIO_P0_OUT
+ #define AR9_GPIO_P0_OUT (0xBF103000+0x10)
+ #define AR9_GPIO_P0_DIR (0xBF103000+0x18)
+ #define AR9_GPIO_P0_ALTSEL0 (0xBF103000+0x1C)
+ #define AR9_GPIO_P0_ALTSEL1 (0xBF103000+0x20)
+ #define AR9_GPIO_P0_OD (0xBF103000+0x24)
+ #define AR9_GPIO_P0_PUDSEL (0xBF103000+0x2C)
+ #define AR9_GPIO_P0_PUDEN (0xBF103000+0x30)
+ #define AR9_GPIO_P1_OUT (0xBF103000+0x40)
+ #define AR9_GPIO_P1_DIR (0xBF103000+0x48)
+ #define AR9_GPIO_P1_ALTSEL0 (0xBF103000+0x4C)
+ #define AR9_GPIO_P1_ALTSEL1 (0xBF103000+0x50)
+ #define AR9_GPIO_P1_OD (0xBF103000+0x54)
+ #define AR9_GPIO_P1_PUDSEL (0xBF103000+0x5C)
+ #define AR9_GPIO_P1_PUDEN (0xBF103000+0x60)
+ #endif
+
+ #define AR9_RCU_USB1CFG ((volatile unsigned long *)(AR9_RCU_BASE_ADDR + 0x18))
+ #define AR9_RCU_USB2CFG ((volatile unsigned long *)(AR9_RCU_BASE_ADDR + 0x34))
+ #define AR9_RCU_USBRESET ((volatile unsigned long *)(AR9_RCU_BASE_ADDR + 0x10))
+ #define AR9_USBCFG_ARB 7 //
+ #define AR9_USBCFG_HDSEL_BIT 11 // 0:host, 1:device
+ #define AR9_USBCFG_HOST_END_BIT 10 // 0:little_end, 1:big_end
+ #define AR9_USBCFG_SLV_END_BIT 17 // 0:little_end, 1:big_end
+
+ #define default_param_dma_burst_size 4
+
+ #define default_param_speed IFXUSB_PARAM_SPEED_HIGH
+
+ #define default_param_max_transfer_size -1 //(Max, hwcfg)
+ #define default_param_max_packet_count -1 //(Max, hwcfg)
+ #define default_param_phy_utmi_width 16
+
+ #define default_param_turn_around_time_hs 4 //(NoChange)
+ #define default_param_turn_around_time_fs 4 //(NoChange)
+ #define default_param_timeout_cal_hs -1 //(NoChange)
+ #define default_param_timeout_cal_fs -1 //(NoChange)
+
+ #define default_param_data_fifo_size -1 //(Max, hwcfg)
+
+ #ifdef __IS_HOST__
+ #define default_param_host_channels -1 //(Max, hwcfg)
+ #define default_param_rx_fifo_size 240
+ #define default_param_nperio_tx_fifo_size 240
+ #define default_param_perio_tx_fifo_size 32
+ #endif //__IS_HOST__
+ #ifdef __IS_DEVICE__
+ #ifdef __DED_INTR__
+ #define default_param_rx_fifo_size 256
+// #define default_param_nperio_tx_fifo_size 248
+// #define default_param_perio_tx_fifo_size_01 8
+ #define default_param_nperio_tx_fifo_size 252
+ #define default_param_perio_tx_fifo_size_01 4
+ #else
+ #define default_param_rx_fifo_size 256
+ #define default_param_nperio_tx_fifo_size 256
+ #define default_param_perio_tx_fifo_size_01 0
+ #endif
+ #define default_param_perio_tx_fifo_size_02 0
+ #define default_param_perio_tx_fifo_size_03 0
+ #define default_param_perio_tx_fifo_size_04 0
+ #define default_param_perio_tx_fifo_size_05 0
+ #define default_param_perio_tx_fifo_size_06 0
+ #define default_param_perio_tx_fifo_size_07 0
+ #define default_param_perio_tx_fifo_size_08 0
+ #define default_param_perio_tx_fifo_size_09 0
+ #define default_param_perio_tx_fifo_size_10 0
+ #define default_param_perio_tx_fifo_size_11 0
+ #define default_param_perio_tx_fifo_size_12 0
+ #define default_param_perio_tx_fifo_size_13 0
+ #define default_param_perio_tx_fifo_size_14 0
+ #define default_param_perio_tx_fifo_size_15 0
+ #endif //__IS_DEVICE__
+
+ #elif defined(__IS_VR9__)
+// #define IFXUSB1_IRQ 54
+ #define IFXUSB1_IOMEM_BASE 0x1E101000
+ #define IFXUSB1_FIFOMEM_BASE 0x1E120000
+ #define IFXUSB1_FIFODBG_BASE 0x1E140000
+
+// #define IFXUSB2_IRQ 83
+ #define IFXUSB2_IOMEM_BASE 0x1E106000
+ #define IFXUSB2_FIFOMEM_BASE 0x1E1E0000
+ #define IFXUSB2_FIFODBG_BASE 0x1E1C0000
+// #define IFXUSB_OC_IRQ 60
+
+ #ifndef VR9_RCU_BASE_ADDR
+ #define VR9_RCU_BASE_ADDR (0xBF203000)
+ #endif
+
+ #ifndef VR9_CGU
+ #define VR9_CGU (0xBF103000)
+ #endif
+ #ifndef VR9_CGU_IFCCR
+ #define VR9_CGU_IFCCR ((volatile unsigned long *)(VR9_CGU+ 0x0018))
+ #endif
+
+ #ifndef VR9_PMU
+ #define VR9_PMU (KSEG1+0x1F102000)
+ #endif
+ #ifndef VR9_PMU_PWDCR
+ #define VR9_PMU_PWDCR ((volatile unsigned long *)(VR9_PMU+0x001C))
+ #endif
+
+ #ifndef VR9_GPIO_P0_OUT
+ #define VR9_GPIO_P0_OUT (0xBF103000+0x10)
+ #define VR9_GPIO_P0_DIR (0xBF103000+0x18)
+ #define VR9_GPIO_P0_ALTSEL0 (0xBF103000+0x1C)
+ #define VR9_GPIO_P0_ALTSEL1 (0xBF103000+0x20)
+ #define VR9_GPIO_P0_OD (0xBF103000+0x24)
+ #define VR9_GPIO_P0_PUDSEL (0xBF103000+0x2C)
+ #define VR9_GPIO_P0_PUDEN (0xBF103000+0x30)
+ #define VR9_GPIO_P1_OUT (0xBF103000+0x40)
+ #define VR9_GPIO_P1_DIR (0xBF103000+0x48)
+ #define VR9_GPIO_P1_ALTSEL0 (0xBF103000+0x4C)
+ #define VR9_GPIO_P1_ALTSEL1 (0xBF103000+0x50)
+ #define VR9_GPIO_P1_OD (0xBF103000+0x54)
+ #define VR9_GPIO_P1_PUDSEL (0xBF103000+0x5C)
+ #define VR9_GPIO_P1_PUDEN (0xBF103000+0x60)
+ #endif
+
+ #define VR9_RCU_USB1CFG ((volatile unsigned long *)(VR9_RCU_BASE_ADDR + 0x18))
+ #define VR9_RCU_USB2CFG ((volatile unsigned long *)(VR9_RCU_BASE_ADDR + 0x34))
+ #define VR9_RCU_USB_ANA_CFG1A ((volatile unsigned long *)(AR9_RCU_BASE_ADDR + 0x38))
+ #define VR9_RCU_USB_ANA_CFG1B ((volatile unsigned long *)(AR9_RCU_BASE_ADDR + 0x3C))
+ #define VR9_RCU_USBRESET ((volatile unsigned long *)(VR9_RCU_BASE_ADDR + 0x10))
+ #define VR9_RCU_USBRESET2 ((volatile unsigned long *)(VR9_RCU_BASE_ADDR + 0x48))
+ #define VR9_USBCFG_ARB 7 //
+ #define VR9_USBCFG_HDSEL_BIT 11 // 0:host, 1:device
+ #define VR9_USBCFG_HOST_END_BIT 10 // 0:little_end, 1:big_end
+ #define VR9_USBCFG_SLV_END_BIT 9 // 0:little_end, 1:big_end
+
+ /*== AVM/BC 20101220 Workaround VR9 DMA burst size ==
+ * Using 2 Devices in diferent ports cause a general USB Host Error.
+ * Workaround found in UGW4.3
+ */
+// #define default_param_dma_burst_size 4 //(ALL)
+ //WA for AHB
+ #define default_param_dma_burst_size 0 //(ALL)
+
+ #define default_param_speed IFXUSB_PARAM_SPEED_HIGH
+
+ #define default_param_max_transfer_size -1 //(Max, hwcfg)
+ #define default_param_max_packet_count -1 //(Max, hwcfg)
+ #define default_param_phy_utmi_width 16
+
+ #define default_param_turn_around_time_hs 6 //(NoChange) snpsid >= 0x4f54260a
+ #define default_param_turn_around_time_fs 6 //(NoChange) snpsid >= 0x4f54260a
+ #define default_param_timeout_cal_hs -1 //(NoChange)
+ #define default_param_timeout_cal_fs -1 //(NoChange)
+
+ #define default_param_data_fifo_size -1 //(Max, hwcfg)
+
+ #ifdef __IS_HOST__
+ #define default_param_host_channels -1 //(Max, hwcfg)
+ #define default_param_rx_fifo_size 240
+ #define default_param_nperio_tx_fifo_size 240
+ #define default_param_perio_tx_fifo_size 32
+ #endif //__IS_HOST__
+ #ifdef __IS_DEVICE__
+#if 0
+ #define default_param_rx_fifo_size 256
+ #define default_param_tx_fifo_size_00 -1
+ #define default_param_tx_fifo_size_01 -1
+ #define default_param_tx_fifo_size_02 -1
+#else
+ #define default_param_rx_fifo_size 256
+ #define default_param_tx_fifo_size_00 32
+ #define default_param_tx_fifo_size_01 200
+ #define default_param_tx_fifo_size_02 8
+#endif
+ #define default_param_tx_fifo_size_03 -1
+ #define default_param_tx_fifo_size_04 -1
+ #define default_param_tx_fifo_size_05 -1
+ #define default_param_tx_fifo_size_06 -1
+ #define default_param_tx_fifo_size_07 -1
+ #define default_param_tx_fifo_size_08 -1
+ #define default_param_tx_fifo_size_09 -1
+ #define default_param_tx_fifo_size_10 -1
+ #define default_param_tx_fifo_size_11 -1
+ #define default_param_tx_fifo_size_12 -1
+ #define default_param_tx_fifo_size_13 -1
+ #define default_param_tx_fifo_size_14 -1
+ #define default_param_tx_fifo_size_15 -1
+ #define default_param_dma_unalgned_tx -1
+ #define default_param_dma_unalgned_rx -1
+ #define default_param_thr_ctl -1
+ #define default_param_tx_thr_length -1
+ #define default_param_rx_thr_length -1
+ #endif //__IS_DEVICE__
+ #else // __IS_VR9__
+ #error "Please choose one platform!!"
+ #endif // __IS_VR9__
+
+#else //UEIP
+ #if defined(__IS_TWINPASS__) || defined(__IS_DANUBE__)
+// #define IFXUSB_IRQ 54
+ #define IFXUSB_IOMEM_BASE 0x1e101000
+ #define IFXUSB_FIFOMEM_BASE 0x1e120000
+ #define IFXUSB_FIFODBG_BASE 0x1e140000
+// #define IFXUSB_OC_IRQ 151
+
+
+ #ifndef DANUBE_RCU_BASE_ADDR
+ #define DANUBE_RCU_BASE_ADDR (0xBF203000)
+ #endif
+
+ #ifndef DANUBE_CGU
+ #define DANUBE_CGU (0xBF103000)
+ #endif
+ #ifndef DANUBE_CGU_IFCCR
+ #define DANUBE_CGU_IFCCR ((volatile unsigned long *)(DANUBE_CGU+ 0x0018))
+ #endif
+ #ifndef DANUBE_PMU
+ #define DANUBE_PMU (KSEG1+0x1F102000)
+ #endif
+ #ifndef DANUBE_PMU_PWDCR
+ #define DANUBE_PMU_PWDCR ((volatile unsigned long *)(DANUBE_PMU+0x001C))
+ #endif
+
+ #ifndef DANUBE_GPIO_P0_OUT
+ #define DANUBE_GPIO_P0_OUT (0xBF103000+0x10)
+ #define DANUBE_GPIO_P0_DIR (0xBF103000+0x18)
+ #define DANUBE_GPIO_P0_ALTSEL0 (0xBF103000+0x1C)
+ #define DANUBE_GPIO_P0_ALTSEL1 (0xBF103000+0x20)
+ #define DANUBE_GPIO_P0_OD (0xBF103000+0x24)
+ #define DANUBE_GPIO_P0_PUDSEL (0xBF103000+0x2C)
+ #define DANUBE_GPIO_P0_PUDEN (0xBF103000+0x30)
+ #define DANUBE_GPIO_P1_OUT (0xBF103000+0x40)
+ #define DANUBE_GPIO_P1_DIR (0xBF103000+0x48)
+ #define DANUBE_GPIO_P1_ALTSEL0 (0xBF103000+0x4C)
+ #define DANUBE_GPIO_P1_ALTSEL1 (0xBF103000+0x50)
+ #define DANUBE_GPIO_P1_OD (0xBF103000+0x54)
+ #define DANUBE_GPIO_P1_PUDSEL (0xBF103000+0x5C)
+ #define DANUBE_GPIO_P1_PUDEN (0xBF103000+0x60)
+ #endif
+
+
+ #define DANUBE_RCU_USBCFG ((volatile unsigned long *)(DANUBE_RCU_BASE_ADDR + 0x18))
+ #define DANUBE_RCU_RESET ((volatile unsigned long *)(DANUBE_RCU_BASE_ADDR + 0x10))
+ #define DANUBE_USBCFG_HDSEL_BIT 11 // 0:host, 1:device
+ #define DANUBE_USBCFG_HOST_END_BIT 10 // 0:little_end, 1:big_end
+ #define DANUBE_USBCFG_SLV_END_BIT 9 // 0:little_end, 1:big_end
+
+ #define default_param_dma_burst_size 4
+
+ #define default_param_speed IFXUSB_PARAM_SPEED_HIGH
+
+ #define default_param_max_transfer_size -1 //(Max, hwcfg)
+ #define default_param_max_packet_count -1 //(Max, hwcfg)
+ #define default_param_phy_utmi_width 16
+
+ #define default_param_turn_around_time_hs 4 //(NoChange)
+ #define default_param_turn_around_time_fs 4 //(NoChange)
+ #define default_param_timeout_cal_hs -1 //(NoChange)
+ #define default_param_timeout_cal_fs -1 //(NoChange)
+
+ #define default_param_data_fifo_size -1 //(Max, hwcfg)
+ #ifdef __IS_HOST__
+ #define default_param_host_channels -1 //(Max, hwcfg)
+ #define default_param_rx_fifo_size 640
+ #define default_param_nperio_tx_fifo_size 640
+ #define default_param_perio_tx_fifo_size 768
+ #endif //__IS_HOST__
+
+ #ifdef __IS_DEVICE__
+ #ifdef __DED_INTR__
+ #define default_param_rx_fifo_size 1024
+ #define default_param_nperio_tx_fifo_size 1016
+ #define default_param_perio_tx_fifo_size_01 8
+ #else
+ #define default_param_rx_fifo_size 1024
+ #define default_param_nperio_tx_fifo_size 1024
+ #define default_param_perio_tx_fifo_size_01 0
+ #endif
+ #define default_param_perio_tx_fifo_size_02 0
+ #define default_param_perio_tx_fifo_size_03 0
+ #define default_param_perio_tx_fifo_size_04 0
+ #define default_param_perio_tx_fifo_size_05 0
+ #define default_param_perio_tx_fifo_size_06 0
+ #define default_param_perio_tx_fifo_size_07 0
+ #define default_param_perio_tx_fifo_size_08 0
+ #define default_param_perio_tx_fifo_size_09 0
+ #define default_param_perio_tx_fifo_size_10 0
+ #define default_param_perio_tx_fifo_size_11 0
+ #define default_param_perio_tx_fifo_size_12 0
+ #define default_param_perio_tx_fifo_size_13 0
+ #define default_param_perio_tx_fifo_size_14 0
+ #define default_param_perio_tx_fifo_size_15 0
+ #endif //__IS_DEVICE__
+
+ #elif defined(__IS_AMAZON_SE__)
+ #include <asm/amazon_se/amazon_se.h>
+ //#include <asm/amazon_se/irq.h>
+
+// #define IFXUSB_IRQ 31
+ #define IFXUSB_IOMEM_BASE 0x1e101000
+ #define IFXUSB_FIFOMEM_BASE 0x1e120000
+ #define IFXUSB_FIFODBG_BASE 0x1e140000
+// #define IFXUSB_OC_IRQ 20
+
+ #define AMAZON_SE_RCU_USBCFG ((volatile unsigned long *)(AMAZON_SE_RCU_BASE_ADDR + 0x18))
+ #define AMAZON_SE_RCU_RESET ((volatile unsigned long *)(AMAZON_SE_RCU_BASE_ADDR + 0x10))
+ #define AMAZON_SE_USBCFG_HDSEL_BIT 11 // 0:host, 1:device
+ #define AMAZON_SE_USBCFG_HOST_END_BIT 10 // 0:little_end, 1:big_end
+ #define AMAZON_SE_USBCFG_SLV_END_BIT 9 // 0:little_end, 1:big_end
+
+ #ifndef AMAZON_SE_GPIO_P0_OUT
+ #define AMAZON_SE_GPIO_P0_OUT (0xBF103000+0x10)
+ #define AMAZON_SE_GPIO_P0_DIR (0xBF103000+0x18)
+ #define AMAZON_SE_GPIO_P0_ALTSEL0 (0xBF103000+0x1C)
+ #define AMAZON_SE_GPIO_P0_ALTSEL1 (0xBF103000+0x20)
+ #define AMAZON_SE_GPIO_P0_OD (0xBF103000+0x24)
+ #define AMAZON_SE_GPIO_P0_PUDSEL (0xBF103000+0x2C)
+ #define AMAZON_SE_GPIO_P0_PUDEN (0xBF103000+0x30)
+ #define AMAZON_SE_GPIO_P1_OUT (0xBF103000+0x40)
+ #define AMAZON_SE_GPIO_P1_DIR (0xBF103000+0x48)
+ #define AMAZON_SE_GPIO_P1_ALTSEL0 (0xBF103000+0x4C)
+ #define AMAZON_SE_GPIO_P1_ALTSEL1 (0xBF103000+0x50)
+ #define AMAZON_SE_GPIO_P1_OD (0xBF103000+0x54)
+ #define AMAZON_SE_GPIO_P1_PUDSEL (0xBF103000+0x5C)
+ #define AMAZON_SE_GPIO_P1_PUDEN (0xBF103000+0x60)
+ #endif
+
+
+ #ifndef AMAZON_SE_CGU
+ #define AMAZON_SE_CGU (0xBF103000)
+ #endif
+ #ifndef AMAZON_SE_CGU_IFCCR
+ #define AMAZON_SE_CGU_IFCCR ((volatile unsigned long *)(AMAZON_SE_CGU+ 0x0018))
+ #endif
+ #ifndef AMAZON_SE_PMU
+ #define AMAZON_SE_PMU (KSEG1+0x1F102000)
+ #endif
+ #ifndef AMAZON_SE_PMU_PWDCR
+ #define AMAZON_SE_PMU_PWDCR ((volatile unsigned long *)(AMAZON_SE_PMU+0x001C))
+ #endif
+
+ #define default_param_dma_burst_size 4
+
+ #define default_param_speed IFXUSB_PARAM_SPEED_HIGH
+
+ #define default_param_max_transfer_size -1 //(Max, hwcfg)
+ #define default_param_max_packet_count -1 //(Max, hwcfg)
+ #define default_param_phy_utmi_width 16
+
+ #define default_param_turn_around_time_hs 4 //(NoChange)
+ #define default_param_turn_around_time_fs 4 //(NoChange)
+ #define default_param_timeout_cal_hs -1 //(NoChange)
+ #define default_param_timeout_cal_fs -1 //(NoChange)
+
+ #define default_param_data_fifo_size -1 //(Max, hwcfg)
+
+ #ifdef __IS_HOST__
+ #define default_param_host_channels -1 //(Max, hwcfg)
+ #define default_param_rx_fifo_size 240
+ #define default_param_nperio_tx_fifo_size 240
+ #define default_param_perio_tx_fifo_size 32
+ #endif //__IS_HOST__
+ #ifdef __IS_DEVICE__
+ #ifdef __DED_INTR__
+ #define default_param_rx_fifo_size 256
+ #define default_param_nperio_tx_fifo_size 248
+ #define default_param_perio_tx_fifo_size_01 8
+ #else
+ #define default_param_rx_fifo_size 256
+ #define default_param_nperio_tx_fifo_size 256
+ #define default_param_perio_tx_fifo_size_01 0
+ #endif
+ #define default_param_perio_tx_fifo_size_02 0
+ #define default_param_perio_tx_fifo_size_03 0
+ #define default_param_perio_tx_fifo_size_04 0
+ #define default_param_perio_tx_fifo_size_05 0
+ #define default_param_perio_tx_fifo_size_06 0
+ #define default_param_perio_tx_fifo_size_07 0
+ #define default_param_perio_tx_fifo_size_08 0
+ #define default_param_perio_tx_fifo_size_09 0
+ #define default_param_perio_tx_fifo_size_10 0
+ #define default_param_perio_tx_fifo_size_11 0
+ #define default_param_perio_tx_fifo_size_12 0
+ #define default_param_perio_tx_fifo_size_13 0
+ #define default_param_perio_tx_fifo_size_14 0
+ #define default_param_perio_tx_fifo_size_15 0
+ #endif //__IS_DEVICE__
+
+ #elif defined(__IS_AR9__)
+// #define IFXUSB1_IRQ 54
+ #define IFXUSB1_IOMEM_BASE 0x1E101000
+ #define IFXUSB1_FIFOMEM_BASE 0x1E120000
+ #define IFXUSB1_FIFODBG_BASE 0x1E140000
+
+// #define IFXUSB2_IRQ 83
+ #define IFXUSB2_IOMEM_BASE 0x1E106000
+ #define IFXUSB2_FIFOMEM_BASE 0x1E1E0000
+ #define IFXUSB2_FIFODBG_BASE 0x1E1C0000
+
+// #define IFXUSB_OC_IRQ 60
+
+ #ifndef AMAZON_S_RCU_BASE_ADDR
+ #define AMAZON_S_RCU_BASE_ADDR (0xBF203000)
+ #endif
+
+ #ifndef AMAZON_S_CGU
+ #define AMAZON_S_CGU (0xBF103000)
+ #endif
+ #ifndef AMAZON_S_CGU_IFCCR
+ #define AMAZON_S_CGU_IFCCR ((volatile unsigned long *)(AMAZON_S_CGU+ 0x0018))
+ #endif
+
+ #ifndef AMAZON_S_PMU
+ #define AMAZON_S_PMU (KSEG1+0x1F102000)
+ #endif
+ #ifndef AMAZON_S_PMU_PWDCR
+ #define AMAZON_S_PMU_PWDCR ((volatile unsigned long *)(AMAZON_S_PMU+0x001C))
+ #endif
+
+ #ifndef AMAZON_S_GPIO_P0_OUT
+ #define AMAZON_S_GPIO_P0_OUT (0xBF103000+0x10)
+ #define AMAZON_S_GPIO_P0_DIR (0xBF103000+0x18)
+ #define AMAZON_S_GPIO_P0_ALTSEL0 (0xBF103000+0x1C)
+ #define AMAZON_S_GPIO_P0_ALTSEL1 (0xBF103000+0x20)
+ #define AMAZON_S_GPIO_P0_OD (0xBF103000+0x24)
+ #define AMAZON_S_GPIO_P0_PUDSEL (0xBF103000+0x2C)
+ #define AMAZON_S_GPIO_P0_PUDEN (0xBF103000+0x30)
+ #define AMAZON_S_GPIO_P1_OUT (0xBF103000+0x40)
+ #define AMAZON_S_GPIO_P1_DIR (0xBF103000+0x48)
+ #define AMAZON_S_GPIO_P1_ALTSEL0 (0xBF103000+0x4C)
+ #define AMAZON_S_GPIO_P1_ALTSEL1 (0xBF103000+0x50)
+ #define AMAZON_S_GPIO_P1_OD (0xBF103000+0x54)
+ #define AMAZON_S_GPIO_P1_PUDSEL (0xBF103000+0x5C)
+ #define AMAZON_S_GPIO_P1_PUDEN (0xBF103000+0x60)
+ #endif
+
+ #define AMAZON_S_RCU_USB1CFG ((volatile unsigned long *)(AMAZON_S_RCU_BASE_ADDR + 0x18))
+ #define AMAZON_S_RCU_USB2CFG ((volatile unsigned long *)(AMAZON_S_RCU_BASE_ADDR + 0x34))
+ #define AMAZON_S_RCU_USBRESET ((volatile unsigned long *)(AMAZON_S_RCU_BASE_ADDR + 0x10))
+ #define AMAZON_S_USBCFG_ARB 7 //
+ #define AMAZON_S_USBCFG_HDSEL_BIT 11 // 0:host, 1:device
+ #define AMAZON_S_USBCFG_HOST_END_BIT 10 // 0:little_end, 1:big_end
+ #define AMAZON_S_USBCFG_SLV_END_BIT 17 // 0:little_end, 1:big_end
+
+ #define default_param_dma_burst_size 4
+
+ #define default_param_speed IFXUSB_PARAM_SPEED_HIGH
+
+ #define default_param_max_transfer_size -1 //(Max, hwcfg)
+ #define default_param_max_packet_count -1 //(Max, hwcfg)
+ #define default_param_phy_utmi_width 16
+
+ #define default_param_turn_around_time_hs 4 //(NoChange)
+ #define default_param_turn_around_time_fs 4 //(NoChange)
+ #define default_param_timeout_cal_hs -1 //(NoChange)
+ #define default_param_timeout_cal_fs -1 //(NoChange)
+
+ #define default_param_data_fifo_size -1 //(Max, hwcfg)
+
+ #ifdef __IS_HOST__
+ #define default_param_host_channels -1 //(Max, hwcfg)
+ #define default_param_rx_fifo_size 240
+ #define default_param_nperio_tx_fifo_size 240
+ #define default_param_perio_tx_fifo_size 32
+ #endif //__IS_HOST__
+ #ifdef __IS_DEVICE__
+ #ifdef __DED_INTR__
+ #define default_param_rx_fifo_size 256
+ #define default_param_nperio_tx_fifo_size 248
+ #define default_param_perio_tx_fifo_size_01 8
+ #else
+ #define default_param_rx_fifo_size 256
+ #define default_param_nperio_tx_fifo_size 256
+ #define default_param_perio_tx_fifo_size_01 0
+ #endif
+ #define default_param_perio_tx_fifo_size_02 0
+ #define default_param_perio_tx_fifo_size_03 0
+ #define default_param_perio_tx_fifo_size_04 0
+ #define default_param_perio_tx_fifo_size_05 0
+ #define default_param_perio_tx_fifo_size_06 0
+ #define default_param_perio_tx_fifo_size_07 0
+ #define default_param_perio_tx_fifo_size_08 0
+ #define default_param_perio_tx_fifo_size_09 0
+ #define default_param_perio_tx_fifo_size_10 0
+ #define default_param_perio_tx_fifo_size_11 0
+ #define default_param_perio_tx_fifo_size_12 0
+ #define default_param_perio_tx_fifo_size_13 0
+ #define default_param_perio_tx_fifo_size_14 0
+ #define default_param_perio_tx_fifo_size_15 0
+ #endif //__IS_DEVICE__
+
+ #elif defined(__IS_VR9__)
+// #define IFXUSB1_IRQ 54
+ #define IFXUSB1_IOMEM_BASE 0x1E101000
+ #define IFXUSB1_FIFOMEM_BASE 0x1E120000
+ #define IFXUSB1_FIFODBG_BASE 0x1E140000
+
+// #define IFXUSB2_IRQ 83
+ #define IFXUSB2_IOMEM_BASE 0x1E106000
+ #define IFXUSB2_FIFOMEM_BASE 0x1E1E0000
+ #define IFXUSB2_FIFODBG_BASE 0x1E1C0000
+// #define IFXUSB_OC_IRQ 60
+
+ #ifndef AMAZON_S_RCU_BASE_ADDR
+ #define AMAZON_S_RCU_BASE_ADDR (0xBF203000)
+ #endif
+
+ #ifndef AMAZON_S_CGU
+ #define AMAZON_S_CGU (0xBF103000)
+ #endif
+ #ifndef AMAZON_S_CGU_IFCCR
+ #define AMAZON_S_CGU_IFCCR ((volatile unsigned long *)(AMAZON_S_CGU+ 0x0018))
+ #endif
+
+ #ifndef AMAZON_S_PMU
+ #define AMAZON_S_PMU (KSEG1+0x1F102000)
+ #endif
+ #ifndef AMAZON_S_PMU_PWDCR
+ #define AMAZON_S_PMU_PWDCR ((volatile unsigned long *)(AMAZON_S_PMU+0x001C))
+ #endif
+
+ #ifndef AMAZON_S_GPIO_P0_OUT
+ #define AMAZON_S_GPIO_P0_OUT (0xBF103000+0x10)
+ #define AMAZON_S_GPIO_P0_DIR (0xBF103000+0x18)
+ #define AMAZON_S_GPIO_P0_ALTSEL0 (0xBF103000+0x1C)
+ #define AMAZON_S_GPIO_P0_ALTSEL1 (0xBF103000+0x20)
+ #define AMAZON_S_GPIO_P0_OD (0xBF103000+0x24)
+ #define AMAZON_S_GPIO_P0_PUDSEL (0xBF103000+0x2C)
+ #define AMAZON_S_GPIO_P0_PUDEN (0xBF103000+0x30)
+ #define AMAZON_S_GPIO_P1_OUT (0xBF103000+0x40)
+ #define AMAZON_S_GPIO_P1_DIR (0xBF103000+0x48)
+ #define AMAZON_S_GPIO_P1_ALTSEL0 (0xBF103000+0x4C)
+ #define AMAZON_S_GPIO_P1_ALTSEL1 (0xBF103000+0x50)
+ #define AMAZON_S_GPIO_P1_OD (0xBF103000+0x54)
+ #define AMAZON_S_GPIO_P1_PUDSEL (0xBF103000+0x5C)
+ #define AMAZON_S_GPIO_P1_PUDEN (0xBF103000+0x60)
+ #endif
+
+ #define AMAZON_S_RCU_USB1CFG ((volatile unsigned long *)(AMAZON_S_RCU_BASE_ADDR + 0x18))
+ #define AMAZON_S_RCU_USB2CFG ((volatile unsigned long *)(AMAZON_S_RCU_BASE_ADDR + 0x34))
+ #define AMAZON_S_RCU_USBRESET ((volatile unsigned long *)(AMAZON_S_RCU_BASE_ADDR + 0x10))
+ #define AMAZON_S_USBCFG_ARB 7 //
+ #define AMAZON_S_USBCFG_HDSEL_BIT 11 // 0:host, 1:device
+ #define AMAZON_S_USBCFG_HOST_END_BIT 10 // 0:little_end, 1:big_end
+ #define AMAZON_S_USBCFG_SLV_END_BIT 17 // 0:little_end, 1:big_end
+
+ #define default_param_dma_burst_size 4 //(ALL)
+
+ #define default_param_speed IFXUSB_PARAM_SPEED_HIGH
+
+ #define default_param_max_transfer_size -1 //(Max, hwcfg)
+ #define default_param_max_packet_count -1 //(Max, hwcfg)
+ #define default_param_phy_utmi_width 16
+
+ #define default_param_turn_around_time_hs 6 //(NoChange) snpsid >= 0x4f54260a
+ #define default_param_turn_around_time_fs 6 //(NoChange) snpsid >= 0x4f54260a
+ #define default_param_timeout_cal_hs -1 //(NoChange)
+ #define default_param_timeout_cal_fs -1 //(NoChange)
+
+ #define default_param_data_fifo_size -1 //(Max, hwcfg)
+
+ #ifdef __IS_HOST__
+ #define default_param_host_channels -1 //(Max, hwcfg)
+ #define default_param_rx_fifo_size 240
+ #define default_param_nperio_tx_fifo_size 240
+ #define default_param_perio_tx_fifo_size 32
+ #endif //__IS_HOST__
+ #ifdef __IS_DEVICE__
+ #define default_param_rx_fifo_size 256
+ #define default_param_tx_fifo_size_00 -1
+ #define default_param_tx_fifo_size_01 -1
+ #define default_param_tx_fifo_size_02 -1
+ #define default_param_tx_fifo_size_03 -1
+ #define default_param_tx_fifo_size_04 -1
+ #define default_param_tx_fifo_size_05 -1
+ #define default_param_tx_fifo_size_06 -1
+ #define default_param_tx_fifo_size_07 -1
+ #define default_param_tx_fifo_size_08 -1
+ #define default_param_tx_fifo_size_09 -1
+ #define default_param_tx_fifo_size_10 -1
+ #define default_param_tx_fifo_size_11 -1
+ #define default_param_tx_fifo_size_12 -1
+ #define default_param_tx_fifo_size_13 -1
+ #define default_param_tx_fifo_size_14 -1
+ #define default_param_tx_fifo_size_15 -1
+ #define default_param_dma_unalgned_tx -1
+ #define default_param_dma_unalgned_rx -1
+ #define default_param_thr_ctl -1
+ #define default_param_tx_thr_length -1
+ #define default_param_rx_thr_length -1
+ #endif //__IS_DEVICE__
+ #else // __IS_VR9__
+ #error "Please choose one platform!!"
+ #endif // __IS_VR9__
+#endif //UEIP
+
+/*@}*//*IFXUSB_PLATEFORM_MEM_ADDR*/
+
+/////////////////////////////////////////////////////////////////////////
+
+#ifdef __IS_HOST__
+ #ifdef CONFIG_USB_HOST_IFX_FORCE_USB11
+ #undef default_param_speed
+ #define default_param_speed IFXUSB_PARAM_SPEED_FULL
+ #endif
+#endif
+#ifdef __IS_DEVICE__
+ #ifndef CONFIG_USB_GADGET_DUALSPEED
+ #undef default_param_speed
+ #define default_param_speed IFXUSB_PARAM_SPEED_FULL
+ #endif
+#endif
+
+/////////////////////////////////////////////////////////////////////////
+
+static __inline__ void UDELAY( const uint32_t _usecs )
+{
+ udelay( _usecs );
+}
+
+static __inline__ void MDELAY( const uint32_t _msecs )
+{
+ mdelay( _msecs );
+}
+
+static __inline__ void SPIN_LOCK( spinlock_t *_lock )
+{
+ spin_lock(_lock);
+}
+
+static __inline__ void SPIN_UNLOCK( spinlock_t *_lock )
+{
+ spin_unlock(_lock);
+}
+
+#define SPIN_LOCK_IRQSAVE( _l, _f ) \
+ { \
+ spin_lock_irqsave(_l,_f); \
+ }
+
+#define SPIN_UNLOCK_IRQRESTORE( _l,_f ) \
+ { \
+ spin_unlock_irqrestore(_l,_f); \
+ }
+
+/////////////////////////////////////////////////////////////////////////
+/*!
+ \addtogroup IFXUSB_DBG_ROUTINE
+ */
+/*@{*/
+#ifdef __IS_HOST__
+ extern uint32_t h_dbg_lvl;
+#endif
+
+#ifdef __IS_DEVICE__
+ extern uint32_t d_dbg_lvl;
+#endif
+
+/*! \brief When debug level has the DBG_CIL bit set, display CIL Debug messages. */
+#define DBG_CIL (0x2)
+/*! \brief When debug level has the DBG_CILV bit set, display CIL Verbose debug messages */
+#define DBG_CILV (0x20)
+/*! \brief When debug level has the DBG_PCD bit set, display PCD (Device) debug messages */
+#define DBG_PCD (0x4)
+/*! \brief When debug level has the DBG_PCDV set, display PCD (Device) Verbose debug messages */
+#define DBG_PCDV (0x40)
+/*! \brief When debug level has the DBG_HCD bit set, display Host debug messages */
+#define DBG_HCD (0x8)
+/*! \brief When debug level has the DBG_HCDV bit set, display Verbose Host debug messages */
+#define DBG_HCDV (0x80)
+/*! \brief When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host mode. */
+#define DBG_HCD_URB (0x800)
+/*! \brief When debug level has any bit set, display debug messages */
+#define DBG_ANY (0xFF)
+/*! \brief All debug messages off */
+#define DBG_OFF 0
+
+#define DBG_ENTRY (0x8000)
+
+#define IFXUSB "IFXUSB: "
+
+/*!
+ \fn inline uint32_t SET_DEBUG_LEVEL( const uint32_t _new )
+ \brief Set the Debug Level variable.
+ \param _new 32-bit mask of debug level.
+ \return previous debug level
+ */
+static inline uint32_t SET_DEBUG_LEVEL( const uint32_t _new )
+{
+ #ifdef __IS_HOST__
+ uint32_t old = h_dbg_lvl;
+ h_dbg_lvl = _new;
+ #endif
+
+ #ifdef __IS_DEVICE__
+ uint32_t old = d_dbg_lvl;
+ d_dbg_lvl = _new;
+ #endif
+ return old;
+}
+
+#ifdef __DEBUG__
+ #ifdef __IS_HOST__
+ # define IFX_DEBUGPL(lvl, x...) do{ if ((lvl)&h_dbg_lvl)printk( KERN_DEBUG IFXUSB x ); }while(0)
+ # define CHK_DEBUG_LEVEL(level) ((level) & h_dbg_lvl)
+ #endif
+
+ #ifdef __IS_DEVICE__
+ # define IFX_DEBUGPL(lvl, x...) do{ if ((lvl)&d_dbg_lvl)printk( KERN_DEBUG IFXUSB x ); }while(0)
+ # define CHK_DEBUG_LEVEL(level) ((level) & d_dbg_lvl)
+ #endif
+
+ # define IFX_DEBUGP(x...) IFX_DEBUGPL(DBG_ANY, x )
+#else
+ # define IFX_DEBUGPL(lvl, x...) do{}while(0)
+ # define IFX_DEBUGP(x...)
+ # define CHK_DEBUG_LEVEL(level) (0)
+#endif //__DEBUG__
+
+/* Print an Error message. */
+#define IFX_ERROR(x...) printk( KERN_ERR IFXUSB x )
+/* Print a Warning message. */
+#define IFX_WARN(x...) printk( KERN_WARNING IFXUSB x )
+/* Print a notice (normal but significant message). */
+#define IFX_NOTICE(x...) printk( KERN_NOTICE IFXUSB x )
+/* Basic message printing. */
+#define IFX_PRINT(x...) printk( KERN_INFO IFXUSB x )
+
+/*@}*//*IFXUSB_DBG_ROUTINE*/
+
+
+#endif //__IFXUSB_PLAT_H__
+
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_regs.h b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_regs.h
new file mode 100644
index 0000000000..014c6dbcfd
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_regs.h
@@ -0,0 +1,1420 @@
+/*****************************************************************************
+ ** FILE NAME : ifxusb_regs.h
+ ** PROJECT : IFX USB sub-system V3
+ ** MODULES : IFX USB sub-system Host and Device driver
+ ** SRC VERSION : 1.0
+ ** DATE : 1/Jan/2009
+ ** AUTHOR : Chen, Howard
+ ** DESCRIPTION : This file contains the data structures for accessing the IFXUSB core
+ ** registers.
+ ** The application interfaces with the USB core by reading from and
+ ** writing to the Control and Status Register (CSR) space through the
+ ** AHB Slave interface. These registers are 32 bits wide, and the
+ ** addresses are 32-bit-block aligned.
+ ** CSRs are classified as follows:
+ ** - Core Global Registers
+ ** - Device Mode Registers
+ ** - Device Global Registers
+ ** - Device Endpoint Specific Registers
+ ** - Host Mode Registers
+ ** - Host Global Registers
+ ** - Host Port CSRs
+ ** - Host Channel Specific Registers
+ **
+ ** Only the Core Global registers can be accessed in both Device and
+ ** Host modes. When the USB core is operating in one mode, either
+ ** Device or Host, the application must not access registers from the
+ ** other mode. When the core switches from one mode to another, the
+ ** registers in the new mode of operation must be reprogrammed as they
+ ** would be after a power-on reset.
+ ** FUNCTIONS :
+ ** COMPILER : gcc
+ ** REFERENCE : Synopsys DWC-OTG Driver 2.7
+ ** COPYRIGHT :
+ ** Version Control Section **
+ ** $Author$
+ ** $Date$
+ ** $Revisions$
+ ** $Log$ Revision history
+*****************************************************************************/
+
+
+
+/*!
+ \defgroup IFXUSB_CSR_DEFINITION Control and Status Register bit-map definition
+ \ingroup IFXUSB_DRIVER_V3
+ \brief Data structures for accessing the IFXUSB core registers.
+ The application interfaces with the USB core by reading from and
+ writing to the Control and Status Register (CSR) space through the
+ AHB Slave interface. These registers are 32 bits wide, and the
+ addresses are 32-bit-block aligned.
+ CSRs are classified as follows:
+ - Core Global Registers
+ - Device Mode Registers
+ - Device Global Registers
+ - Device Endpoint Specific Registers
+ - Host Mode Registers
+ - Host Global Registers
+ - Host Port CSRs
+ - Host Channel Specific Registers
+
+ Only the Core Global registers can be accessed in both Device andHost modes.
+ When the USB core is operating in one mode, either Device or Host, the
+ application must not access registers from the other mode. When the core
+ switches from one mode to another, the registers in the new mode of operation
+ must be reprogrammed as they would be after a power-on reset.
+ */
+
+/*!
+ \defgroup IFXUSB_CSR_DEVICE_GLOBAL_REG Device Mode Registers
+ \ingroup IFXUSB_CSR_DEFINITION
+ \brief Bit-mapped structure to access Device Mode Global Registers
+ */
+
+/*!
+ \defgroup IFXUSB_CSR_DEVICE_EP_REG Device Mode EP Registers
+ \ingroup IFXUSB_CSR_DEFINITION
+ \brief Bit-mapped structure to access Device Mode EP Registers
+ There will be one set of endpoint registers per logical endpoint
+ implemented.
+ These registers are visible only in Device mode and must not be
+ accessed in Host mode, as the results are unknown.
+ */
+
+/*!
+ \defgroup IFXUSB_CSR_DEVICE_DMA_DESC Device mode scatter dma descriptor strusture
+ \ingroup IFXUSB_CSR_DEFINITION
+ \brief Bit-mapped structure to DMA descriptor
+ */
+
+
+/*!
+ \defgroup IFXUSB_CSR_HOST_GLOBAL_REG Host Mode Registers
+ \ingroup IFXUSB_CSR_DEFINITION
+ \brief Bit-mapped structure to access Host Mode Global Registers
+ */
+
+/*!
+ \defgroup IFXUSB_CSR_HOST_HC_REG Host Mode HC Registers
+ \ingroup IFXUSB_CSR_DEFINITION
+ \brief Bit-mapped structure to access Host Mode Host Channel Registers
+ There will be one set of endpoint registers per host channel
+ implemented.
+ These registers are visible only in Host mode and must not be
+ accessed in Device mode, as the results are unknown.
+ */
+
+/*!
+ \defgroup IFXUSB_CSR_PWR_CLK_GATING_REG Power and Clock Gating Control Register
+ \ingroup IFXUSB_CSR_DEFINITION
+ \brief Bit-mapped structure to Power and Clock Gating Control Register
+ */
+
+
+
+
+
+
+
+
+/*!
+ \defgroup IFXUSB_CSR_CORE_GLOBAL_REG Core Global Registers
+ \ingroup IFXUSB_CSR_DEFINITION
+ \brief Bit-mapped structure to access Core Global Registers
+ */
+/*!
+ \defgroup IFXUSB_CSR_CORE_GLOBAL_REG Core Global Registers
+ \ingroup IFXUSB_CSR_DEFINITION
+ \brief Bit-mapped structure to access Core Global Registers
+ */
+
+
+
+
+
+
+
+
+
+/*!
+ \file ifxusb_regs.h
+ \ingroup IFXUSB_DRIVER_V3
+ \brief This file contains the data structures for accessing the IFXUSB core registers.
+ */
+
+
+#ifndef __IFXUSB_REGS_H__
+#define __IFXUSB_REGS_H__
+
+/****************************************************************************/
+
+#define MAX_PERIO_FIFOS 15 /** Maximum number of Periodic FIFOs */
+#define MAX_TX_FIFOS 15 /** Maximum number of Periodic FIFOs */
+#define MAX_EPS_CHANNELS 16 /** Maximum number of Endpoints/HostChannels */
+
+/****************************************************************************/
+
+/*!
+ \addtogroup IFXUSB_CSR_ACCESS_MACROS
+ */
+/*@{*/
+
+//#define RecordRegRW
+
+/*!
+ \fn static __inline__ uint32_t ifxusb_rreg( volatile uint32_t *_reg)
+ \brief Reads the content of a register.
+ \param _reg address of register to read.
+ \return contents of the register.
+ */
+static __inline__ uint32_t ifxusb_rreg( volatile uint32_t *_reg)
+{
+ #ifdef RecordRegRW
+ uint32_t r;
+ r=*(_reg);
+ return (r);
+ #else
+ return (*(_reg));
+ #endif
+};
+
+
+/*!
+ \fn static __inline__ void ifxusb_wreg( volatile uint32_t *_reg, const uint32_t _value)
+ \brief Writes a register with a 32 bit value.
+ \param _reg address of register to write.
+ \param _value value to write to _reg.
+ */
+static __inline__ void ifxusb_wreg( volatile uint32_t *_reg, const uint32_t _value)
+{
+ #ifdef RecordRegRW
+ printk(KERN_INFO "[W %p<-%08X]\n",_reg,_value);
+ #else
+ *(_reg)=_value;
+ #endif
+};
+
+/*!
+ \fn static __inline__ void ifxusb_mreg( volatile uint32_t *_reg, const uint32_t _clear_mask, const uint32_t _set_mask)
+ \brief Modifies bit values in a register. Using the
+ algorithm: (reg_contents & ~clear_mask) | set_mask.
+ \param _reg address of register to modify.
+ \param _clear_mask bit mask to be cleared.
+ \param _set_mask bit mask to be set.
+ */
+static __inline__ void ifxusb_mreg( volatile uint32_t *_reg, const uint32_t _clear_mask, const uint32_t _set_mask)
+{
+ uint32_t v;
+ #ifdef RecordRegRW
+ uint32_t r;
+ v= *(_reg);
+ r=v;
+ r&=(~_clear_mask);
+ r|= _set_mask;
+ *(_reg)=r ;
+ printk(KERN_INFO "[M %p->%08X+%08X/%08X<-%08X]\n",_reg,r,_clear_mask,_set_mask,r);
+ #else
+ v= *(_reg);
+ v&=(~_clear_mask);
+ v|= _set_mask;
+ *(_reg)=v ;
+ #endif
+};
+
+/*@}*//*IFXUSB_CSR_ACCESS_MACROS*/
+/****************************************************************************/
+
+/*!
+ \addtogroup IFXUSB_CSR_CORE_GLOBAL_REG
+ */
+/*@{*/
+
+/*!
+ \struct ifxusb_core_global_regs
+ \brief IFXUSB Core registers .
+ The ifxusb_core_global_regs structure defines the size
+ and relative field offsets for the Core Global registers.
+ */
+typedef struct ifxusb_core_global_regs
+{
+ volatile uint32_t gotgctl; /*!< 000h OTG Control and Status Register. */
+ volatile uint32_t gotgint; /*!< 004h OTG Interrupt Register. */
+ volatile uint32_t gahbcfg; /*!< 008h Core AHB Configuration Register. */
+ volatile uint32_t gusbcfg; /*!< 00Ch Core USB Configuration Register. */
+ volatile uint32_t grstctl; /*!< 010h Core Reset Register. */
+ volatile uint32_t gintsts; /*!< 014h Core Interrupt Register. */
+ volatile uint32_t gintmsk; /*!< 018h Core Interrupt Mask Register. */
+ volatile uint32_t grxstsr; /*!< 01Ch Receive Status Queue Read Register (Read Only). */
+ volatile uint32_t grxstsp; /*!< 020h Receive Status Queue Read & POP Register (Read Only). */
+ volatile uint32_t grxfsiz; /*!< 024h Receive FIFO Size Register. */
+ volatile uint32_t gnptxfsiz; /*!< 028h Non Periodic Transmit FIFO Size Register. */
+ volatile uint32_t gnptxsts; /*!< 02Ch Non Periodic Transmit FIFO/Queue Status Register (Read Only). */
+ volatile uint32_t gi2cctl; /*!< 030h I2C Access Register. */
+ volatile uint32_t gpvndctl; /*!< 034h PHY Vendor Control Register. */
+ volatile uint32_t ggpio; /*!< 038h General Purpose Input/Output Register. */
+ volatile uint32_t guid; /*!< 03Ch User ID Register. */
+ volatile uint32_t gsnpsid; /*!< 040h Synopsys ID Register (Read Only). */
+ volatile uint32_t ghwcfg1; /*!< 044h User HW Config1 Register (Read Only). */
+ volatile uint32_t ghwcfg2; /*!< 048h User HW Config2 Register (Read Only). */
+ volatile uint32_t ghwcfg3; /*!< 04Ch User HW Config3 Register (Read Only). */
+ volatile uint32_t ghwcfg4; /*!< 050h User HW Config4 Register (Read Only). */
+ volatile uint32_t reserved[43]; /*!< 054h Reserved 054h-0FFh */
+ volatile uint32_t hptxfsiz; /*!< 100h Host Periodic Transmit FIFO Size Register. */
+ volatile uint32_t dptxfsiz_dieptxf[15];/*!< 104h + (FIFO_Number-1)*04h, 1 <= FIFO Number <= 15.
+ Device Periodic Transmit FIFO#n Register if dedicated
+ fifos are disabled, otherwise Device Transmit FIFO#n
+ Register.
+ */
+} ifxusb_core_global_regs_t;
+
+/*!
+ \brief Bits of the Core OTG Control and Status Register (GOTGCTL).
+ */
+typedef union gotgctl_data
+{
+ uint32_t d32;
+ struct{
+ unsigned reserved21_31 : 11;
+ unsigned currmod : 1 ; /*!< 20 */
+ unsigned bsesvld : 1 ; /*!< 19 */
+ unsigned asesvld : 1 ; /*!< 18 */
+ unsigned reserved17 : 1 ;
+ unsigned conidsts : 1 ; /*!< 16 */
+ unsigned reserved12_15 : 4 ;
+ unsigned devhnpen : 1 ; /*!< 11 */
+ unsigned hstsethnpen : 1 ; /*!< 10 */
+ unsigned hnpreq : 1 ; /*!< 09 */
+ unsigned hstnegscs : 1 ; /*!< 08 */
+ unsigned reserved2_7 : 6 ;
+ unsigned sesreq : 1 ; /*!< 01 */
+ unsigned sesreqscs : 1 ; /*!< 00 */
+ } b;
+} gotgctl_data_t;
+
+/*!
+ \brief Bit fields of the Core OTG Interrupt Register (GOTGINT).
+ */
+typedef union gotgint_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved31_20 : 12;
+ unsigned debdone : 1 ; /*!< 19 Debounce Done */
+ unsigned adevtoutchng : 1 ; /*!< 18 A-Device Timeout Change */
+ unsigned hstnegdet : 1 ; /*!< 17 Host Negotiation Detected */
+ unsigned reserver10_16 : 7 ;
+ unsigned hstnegsucstschng : 1 ; /*!< 09 Host Negotiation Success Status Change */
+ unsigned sesreqsucstschng : 1 ; /*!< 08 Session Request Success Status Change */
+ unsigned reserved3_7 : 5 ;
+ unsigned sesenddet : 1 ; /*!< 02 Session End Detected */
+ unsigned reserved0_1 : 2 ;
+ } b;
+} gotgint_data_t;
+
+/*!
+ \brief Bit fields of the Core AHB Configuration Register (GAHBCFG).
+ */
+typedef union gahbcfg_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved9_31 : 23;
+ unsigned ptxfemplvl : 1 ; /*!< 08 Periodic FIFO empty level trigger condition*/
+ unsigned nptxfemplvl : 1 ; /*!< 07 Non-Periodic FIFO empty level trigger condition*/
+ #define IFXUSB_GAHBCFG_TXFEMPTYLVL_EMPTY 1
+ #define IFXUSB_GAHBCFG_TXFEMPTYLVL_HALFEMPTY 0
+ unsigned reserved : 1 ;
+ unsigned dmaenable : 1 ; /*!< 05 DMA enable*/
+ #define IFXUSB_GAHBCFG_DMAENABLE 1
+ unsigned hburstlen : 4 ; /*!< 01-04 DMA Burst-length*/
+ #define IFXUSB_GAHBCFG_INT_DMA_BURST_SINGLE 0
+ #define IFXUSB_GAHBCFG_INT_DMA_BURST_INCR 1
+ #define IFXUSB_GAHBCFG_INT_DMA_BURST_INCR4 3
+ #define IFXUSB_GAHBCFG_INT_DMA_BURST_INCR8 5
+ #define IFXUSB_GAHBCFG_INT_DMA_BURST_INCR16 7
+ unsigned glblintrmsk : 1 ; /*!< 00 USB Global Interrupt Enable */
+ #define IFXUSB_GAHBCFG_GLBINT_ENABLE 1
+ } b;
+} gahbcfg_data_t;
+
+/*!
+ \brief Bit fields of the Core USB Configuration Register (GUSBCFG).
+*/
+typedef union gusbcfg_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved31 : 1;
+ unsigned ForceDevMode : 1; /*!< 30 Force Device Mode */
+ unsigned ForceHstMode : 1; /*!< 29 Force Host Mode */
+ unsigned TxEndDelay : 1; /*!< 28 Tx End Delay */
+ unsigned reserved2723 : 5;
+ unsigned term_sel_dl_pulse : 1; /*!< 22 TermSel DLine Pulsing Selection */
+ unsigned reserved2117 : 5;
+ unsigned otgutmifssel : 1; /*!< 16 UTMIFS Select */
+ unsigned phylpwrclksel : 1; /*!< 15 PHY Low-Power Clock Select */
+ unsigned reserved14 : 1;
+ unsigned usbtrdtim : 4; /*!< 13-10 USB Turnaround Time */
+ unsigned hnpcap : 1; /*!< 09 HNP-Capable */
+ unsigned srpcap : 1; /*!< 08 SRP-Capable */
+ unsigned reserved07 : 1;
+ unsigned physel : 1; /*!< 06 USB 2.0 High-Speed PHY or
+ USB 1.1 Full-Speed Serial
+ Transceiver Select */
+ unsigned fsintf : 1; /*!< 05 Full-Speed Serial Interface Select */
+ unsigned ulpi_utmi_sel : 1; /*!< 04 ULPI or UTMI+ Select */
+ unsigned phyif : 1; /*!< 03 PHY Interface */
+ unsigned toutcal : 3; /*!< 00-02 HS/FS Timeout Calibration */
+ }b;
+} gusbcfg_data_t;
+
+/*!
+ \brief Bit fields of the Core Reset Register (GRSTCTL).
+ */
+typedef union grstctl_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned ahbidle : 1; /*!< 31 AHB Master Idle. Indicates the AHB Master State
+ Machine is in IDLE condition. */
+ unsigned dmareq : 1; /*!< 30 DMA Request Signal. Indicated DMA request is in
+ probress. Used for debug purpose. */
+ unsigned reserved11_29 :19;
+ unsigned txfnum : 5; /*!< 10-06 TxFIFO Number (TxFNum) to be flushed.
+ 0x00: Non Periodic TxFIFO Flush or TxFIFO 0
+ 0x01-0x0F: Periodic TxFIFO Flush or TxFIFO n
+ 0x10: Flush all TxFIFO
+ */
+ unsigned txfflsh : 1; /*!< 05 TxFIFO Flush */
+ unsigned rxfflsh : 1; /*!< 04 RxFIFO Flush */
+ unsigned intknqflsh : 1; /*!< 03 In Token Sequence Learning Queue Flush (Device Only) */
+ unsigned hstfrm : 1; /*!< 02 Host Frame Counter Reset (Host Only) */
+ unsigned hsftrst : 1; /*!< 01 Hclk Soft Reset */
+
+ unsigned csftrst : 1; /*!< 00 Core Soft Reset
+ The application can flush the control logic in the
+ entire core using this bit. This bit resets the
+ pipelines in the AHB Clock domain as well as the
+ PHY Clock domain.
+ The state machines are reset to an IDLE state, the
+ control bits in the CSRs are cleared, all the
+ transmit FIFOs and the receive FIFO are flushed.
+ The status mask bits that control the generation of
+ the interrupt, are cleared, to clear the
+ interrupt. The interrupt status bits are not
+ cleared, so the application can get the status of
+ any events that occurred in the core after it has
+ set this bit.
+ Any transactions on the AHB are terminated as soon
+ as possible following the protocol. Any
+ transactions on the USB are terminated immediately.
+ The configuration settings in the CSRs are
+ unchanged, so the software doesn't have to
+ reprogram these registers (Device
+ Configuration/Host Configuration/Core System
+ Configuration/Core PHY Configuration).
+ The application can write to this bit, any time it
+ wants to reset the core. This is a self clearing
+ bit and the core clears this bit after all the
+ necessary logic is reset in the core, which may
+ take several clocks, depending on the current state
+ of the core.
+ */
+ }b;
+} grstctl_t;
+
+/*!
+ \brief Bit fields of the Core Interrupt Mask Register (GINTMSK) and
+ Core Interrupt Register (GINTSTS).
+ */
+typedef union gint_data
+{
+ uint32_t d32;
+ #define IFXUSB_SOF_INTR_MASK 0x0008
+ struct
+ {
+ unsigned wkupintr : 1; /*!< 31 Resume/Remote Wakeup Detected Interrupt */
+ unsigned sessreqintr : 1; /*!< 30 Session Request/New Session Detected Interrupt */
+ unsigned disconnect : 1; /*!< 29 Disconnect Detected Interrupt */
+ unsigned conidstschng : 1; /*!< 28 Connector ID Status Change */
+ unsigned reserved27 : 1;
+ unsigned ptxfempty : 1; /*!< 26 Periodic TxFIFO Empty */
+ unsigned hcintr : 1; /*!< 25 Host Channels Interrupt */
+ unsigned portintr : 1; /*!< 24 Host Port Interrupt */
+ unsigned reserved23 : 1;
+ unsigned fetsuspmsk : 1; /*!< 22 Data Fetch Suspended */
+ unsigned incomplisoout : 1; /*!< 21 Incomplete IsochronousOUT/Period Transfer */
+ unsigned incomplisoin : 1; /*!< 20 Incomplete Isochronous IN Transfer */
+ unsigned outepintr : 1; /*!< 19 OUT Endpoints Interrupt */
+ unsigned inepintr : 1; /*!< 18 IN Endpoints Interrupt */
+ unsigned epmismatch : 1; /*!< 17 Endpoint Mismatch Interrupt */
+ unsigned reserved16 : 1;
+ unsigned eopframe : 1; /*!< 15 End of Periodic Frame Interrupt */
+ unsigned isooutdrop : 1; /*!< 14 Isochronous OUT Packet Dropped Interrupt */
+ unsigned enumdone : 1; /*!< 13 Enumeration Done */
+ unsigned usbreset : 1; /*!< 12 USB Reset */
+ unsigned usbsuspend : 1; /*!< 11 USB Suspend */
+ unsigned erlysuspend : 1; /*!< 10 Early Suspend */
+ unsigned i2cintr : 1; /*!< 09 I2C Interrupt */
+ unsigned reserved8 : 1;
+ unsigned goutnakeff : 1; /*!< 07 Global OUT NAK Effective */
+ unsigned ginnakeff : 1; /*!< 06 Global Non-periodic IN NAK Effective */
+ unsigned nptxfempty : 1; /*!< 05 Non-periodic TxFIFO Empty */
+ unsigned rxstsqlvl : 1; /*!< 04 Receive FIFO Non-Empty */
+ unsigned sofintr : 1; /*!< 03 Start of (u)Frame */
+ unsigned otgintr : 1; /*!< 02 OTG Interrupt */
+ unsigned modemismatch : 1; /*!< 01 Mode Mismatch Interrupt */
+ unsigned reserved0 : 1;
+ } b;
+} gint_data_t;
+
+/*!
+ \brief Bit fields in the Receive Status Read and Pop Registers (GRXSTSR, GRXSTSP)
+ */
+typedef union grxsts_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved : 7;
+ unsigned fn : 4; /*!< 24-21 Frame Number */
+ unsigned pktsts : 4; /*!< 20-17 Packet Status */
+ #define IFXUSB_DSTS_DATA_UPDT 0x2 // OUT Data Packet
+ #define IFXUSB_DSTS_XFER_COMP 0x3 // OUT Data Transfer Complete
+ #define IFXUSB_DSTS_GOUT_NAK 0x1 // Global OUT NAK
+ #define IFXUSB_DSTS_SETUP_COMP 0x4 // Setup Phase Complete
+ #define IFXUSB_DSTS_SETUP_UPDT 0x6 // SETUP Packet
+ unsigned dpid : 2; /*!< 16-15 Data PID */
+ unsigned bcnt :11; /*!< 14-04 Byte Count */
+ unsigned epnum : 4; /*!< 03-00 Endpoint Number */
+ } db;
+ struct
+ {
+ unsigned reserved :11;
+ unsigned pktsts : 4; /*!< 20-17 Packet Status */
+ #define IFXUSB_HSTS_DATA_UPDT 0x2 // OUT Data Packet
+ #define IFXUSB_HSTS_XFER_COMP 0x3 // OUT Data Transfer Complete
+ #define IFXUSB_HSTS_DATA_TOGGLE_ERR 0x5 // DATA TOGGLE Error
+ #define IFXUSB_HSTS_CH_HALTED 0x7 // Channel Halted
+ unsigned dpid : 2; /*!< 16-15 Data PID */
+ unsigned bcnt :11; /*!< 14-04 Byte Count */
+ unsigned chnum : 4; /*!< 03-00 Channel Number */
+ } hb;
+} grxsts_data_t;
+
+/*!
+ \brief Bit fields in the FIFO Size Registers (HPTXFSIZ, GNPTXFSIZ, DPTXFSIZn).
+ */
+typedef union fifosize_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned depth : 16; /*!< 31-16 TxFIFO Depth (in DWord)*/
+ unsigned startaddr : 16; /*!< 15-00 RAM Starting address */
+ } b;
+} fifosize_data_t;
+
+/*!
+ \brief Bit fields in the Non-Periodic Transmit FIFO/Queue Status Register (GNPTXSTS).
+ */
+
+typedef union gnptxsts_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved : 1;
+ unsigned nptxqtop_chnep : 4; /*!< 30-27 Channel/EP Number of top of the Non-Periodic
+ Transmit Request Queue
+ */
+ unsigned nptxqtop_token : 2; /*!< 26-25 Token Type top of the Non-Periodic
+ Transmit Request Queue
+ 0 - IN/OUT
+ 1 - Zero Length OUT
+ 2 - PING/Complete Split
+ 3 - Channel Halt
+ */
+ unsigned nptxqtop_terminate : 1; /*!< 24 Terminate (Last entry for the selected
+ channel/EP)*/
+ unsigned nptxqspcavail : 8; /*!< 23-16 Transmit Request Queue Space Available */
+ unsigned nptxfspcavail :16; /*!< 15-00 TxFIFO Space Avail (in DWord)*/
+ }b;
+} gnptxsts_data_t;
+
+
+/*!
+ \brief Bit fields in the Transmit FIFO Status Register (DTXFSTS).
+ */
+typedef union dtxfsts_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved : 16;
+ unsigned txfspcavail : 16; /*!< 15-00 TxFIFO Space Avail (in DWord)*/
+ }b;
+} dtxfsts_data_t;
+
+
+/*!
+ \brief Bit fields in the I2C Control Register (I2CCTL).
+ */
+typedef union gi2cctl_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned bsydne : 1; /*!< 31 I2C Busy/Done*/
+ unsigned rw : 1; /*!< 30 Read/Write Indicator */
+ unsigned reserved : 2;
+ unsigned i2cdevaddr : 2; /*!< 27-26 I2C Device Address */
+ unsigned i2csuspctl : 1; /*!< 25 I2C Suspend Control */
+ unsigned ack : 1; /*!< 24 I2C ACK */
+ unsigned i2cen : 1; /*!< 23 I2C Enable */
+ unsigned addr : 7; /*!< 22-16 I2C Address */
+ unsigned regaddr : 8; /*!< 15-08 I2C Register Addr */
+ unsigned rwdata : 8; /*!< I2C Read/Write Data */
+ } b;
+} gi2cctl_data_t;
+
+
+/*!
+ \brief Bit fields in the User HW Config1 Register.
+ */
+typedef union hwcfg1_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned ep_dir15 : 2; /*!< Direction of each EP
+ 0: BIDIR (IN and OUT) endpoint
+ 1: IN endpoint
+ 2: OUT endpoint
+ 3: Reserved
+ */
+ unsigned ep_dir14 : 2;
+ unsigned ep_dir13 : 2;
+ unsigned ep_dir12 : 2;
+ unsigned ep_dir11 : 2;
+ unsigned ep_dir10 : 2;
+ unsigned ep_dir09 : 2;
+ unsigned ep_dir08 : 2;
+ unsigned ep_dir07 : 2;
+ unsigned ep_dir06 : 2;
+ unsigned ep_dir05 : 2;
+ unsigned ep_dir04 : 2;
+ unsigned ep_dir03 : 2;
+ unsigned ep_dir02 : 2;
+ unsigned ep_dir01 : 2;
+ unsigned ep_dir00 : 2;
+ }b;
+} hwcfg1_data_t;
+
+/*!
+ \brief Bit fields in the User HW Config2 Register.
+ */
+typedef union hwcfg2_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved31 : 1;
+ unsigned dev_token_q_depth : 5; /*!< 30-26 Device Mode IN Token Sequence Learning Queue Depth */
+ unsigned host_perio_tx_q_depth : 2; /*!< 25-24 Host Mode Periodic Request Queue Depth */
+ unsigned nonperio_tx_q_depth : 2; /*!< 23-22 Non-periodic Request Queue Depth */
+ unsigned rx_status_q_depth : 2; /*!< 21-20 Multi Processor Interrupt Enabled */
+ unsigned dynamic_fifo : 1; /*!< 19 Dynamic FIFO Sizing Enabled */
+ unsigned perio_ep_supported : 1; /*!< 18 Periodic OUT Channels Supported in Host Mode */
+ unsigned num_host_chan : 4; /*!< 17-14 Number of Host Channels */
+ unsigned num_dev_ep : 4; /*!< 13-10 Number of Device Endpoints */
+ unsigned fs_phy_type : 2; /*!< 09-08 Full-Speed PHY Interface Type */
+ #define IFXUSB_HWCFG2_FS_PHY_TYPE_NOT_SUPPORTED 0
+ #define IFXUSB_HWCFG2_FS_PHY_TYPE_DEDICATE 1
+ #define IFXUSB_HWCFG2_FS_PHY_TYPE_UTMI 2
+ #define IFXUSB_HWCFG2_FS_PHY_TYPE_ULPI 3
+ unsigned hs_phy_type : 2; /*!< 07-06 High-Speed PHY Interface Type */
+ #define IFXUSB_HWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0
+ #define IFXUSB_HWCFG2_HS_PHY_TYPE_UTMI 1
+ #define IFXUSB_HWCFG2_HS_PHY_TYPE_ULPI 2
+ #define IFXUSB_HWCFG2_HS_PHY_TYPE_UTMI_ULPI 3
+ unsigned point2point : 1; /*!< 05 Point-to-Point */
+ unsigned architecture : 2; /*!< 04-03 Architecture */
+ #define IFXUSB_HWCFG2_ARCH_SLAVE_ONLY 0
+ #define IFXUSB_HWCFG2_ARCH_EXT_DMA 1
+ #define IFXUSB_HWCFG2_ARCH_INT_DMA 2
+ unsigned op_mode : 3; /*!< 02-00 Mode of Operation */
+ #define IFXUSB_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG 0
+ #define IFXUSB_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG 1
+ #define IFXUSB_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG 2
+ #define IFXUSB_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3
+ #define IFXUSB_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4
+ #define IFXUSB_HWCFG2_OP_MODE_SRP_CAPABLE_HOST 5
+ #define IFXUSB_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6
+ } b;
+} hwcfg2_data_t;
+
+/*!
+ \brief Bit fields in the User HW Config3 Register.
+ */
+typedef union hwcfg3_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned dfifo_depth :16; /*!< 31-16 DFIFO Depth */
+ unsigned reserved15_12 : 4;
+ unsigned synch_reset_type : 1; /*!< 11 Reset Style for Clocked always Blocks in RTL */
+ unsigned optional_features : 1; /*!< 10 Optional Features Removed */
+ unsigned vendor_ctrl_if : 1; /*!< 09 Vendor Control Interface Support */
+ unsigned i2c : 1; /*!< 08 I2C Selection */
+ unsigned otg_func : 1; /*!< 07 OTG Function Enabled */
+ unsigned packet_size_cntr_width : 3; /*!< 06-04 Width of Packet Size Counters */
+ unsigned xfer_size_cntr_width : 4; /*!< 03-00 Width of Transfer Size Counters */
+ } b;
+} hwcfg3_data_t;
+
+/*!
+ \brief Bit fields in the User HW Config4
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg4_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned desc_dma_dyn : 1; /*!< 31 Scatter/Gather DMA */
+ unsigned desc_dma : 1; /*!< 30 Scatter/Gather DMA configuration */
+ unsigned num_in_eps : 4; /*!< 29-26 Number of Device Mode IN Endpoints Including Control Endpoints */
+ unsigned ded_fifo_en : 1; /*!< 25 Enable Dedicated Transmit FIFO for device IN Endpoints */
+ unsigned session_end_filt_en : 1; /*!< 24 session_end Filter Enabled */
+ unsigned b_valid_filt_en : 1; /*!< 23 b_valid Filter Enabled */
+ unsigned a_valid_filt_en : 1; /*!< 22 a_valid Filter Enabled */
+ unsigned vbus_valid_filt_en : 1; /*!< 21 vbus_valid Filter Enabled */
+ unsigned iddig_filt_en : 1; /*!< 20 iddig Filter Enable */
+ unsigned num_dev_mode_ctrl_ep : 4; /*!< 19-16 Number of Device Mode Control Endpoints in Addition to Endpoint 0 */
+ unsigned utmi_phy_data_width : 2; /*!< 15-14 UTMI+ PHY/ULPI-to-Internal UTMI+ Wrapper Data Width */
+ unsigned reserved13_06 : 8;
+ unsigned min_ahb_freq : 1; /*!< 05 Minimum AHB Frequency Less Than 60 MHz */
+ unsigned power_optimiz : 1; /*!< 04 Enable Power Optimization? */
+ unsigned num_dev_perio_in_ep : 4; /*!< 03-00 Number of Device Mode Periodic IN Endpoints */
+ } b;
+} hwcfg4_data_t;
+
+/*@}*//*IFXUSB_CSR_CORE_GLOBAL_REG*/
+
+/****************************************************************************/
+/*!
+ \addtogroup IFXUSB_CSR_DEVICE_GLOBAL_REG
+ */
+/*@{*/
+
+/*!
+ \struct ifxusb_dev_global_regs
+ \brief IFXUSB Device Mode Global registers. Offsets 800h-BFFh
+ The ifxusb_dev_global_regs structure defines the size
+ and relative field offsets for the Device Global registers.
+ These registers are visible only in Device mode and must not be
+ accessed in Host mode, as the results are unknown.
+ */
+typedef struct ifxusb_dev_global_regs
+{
+ volatile uint32_t dcfg; /*!< 800h Device Configuration Register. */
+ volatile uint32_t dctl; /*!< 804h Device Control Register. */
+ volatile uint32_t dsts; /*!< 808h Device Status Register (Read Only). */
+ uint32_t unused;
+ volatile uint32_t diepmsk; /*!< 810h Device IN Endpoint Common Interrupt Mask Register. */
+ volatile uint32_t doepmsk; /*!< 814h Device OUT Endpoint Common Interrupt Mask Register. */
+ volatile uint32_t daint; /*!< 818h Device All Endpoints Interrupt Register. */
+ volatile uint32_t daintmsk; /*!< 81Ch Device All Endpoints Interrupt Mask Register. */
+ volatile uint32_t dtknqr1; /*!< 820h Device IN Token Queue Read Register-1 (Read Only). */
+ volatile uint32_t dtknqr2; /*!< 824h Device IN Token Queue Read Register-2 (Read Only). */
+ volatile uint32_t dvbusdis; /*!< 828h Device VBUS discharge Register.*/
+ volatile uint32_t dvbuspulse; /*!< 82Ch Device VBUS Pulse Register. */
+ volatile uint32_t dtknqr3_dthrctl; /*!< 830h Device IN Token Queue Read Register-3 (Read Only).
+ Device Thresholding control register (Read/Write)
+ */
+ volatile uint32_t dtknqr4_fifoemptymsk; /*!< 834h Device IN Token Queue Read Register-4 (Read Only).
+ Device IN EPs empty Inr. Mask Register (Read/Write)
+ */
+} ifxusb_device_global_regs_t;
+
+/*!
+ \brief Bit fields in the Device Configuration Register.
+ */
+
+typedef union dcfg_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved31_26 : 6;
+ unsigned perschintvl : 2; /*!< 25-24 Periodic Scheduling Interval */
+ unsigned descdma : 1; /*!< 23 Enable Descriptor DMA in Device mode */
+ unsigned epmscnt : 5; /*!< 22-18 In Endpoint Mis-match count */
+ unsigned reserved13_17 : 5;
+ unsigned perfrint : 2; /*!< 12-11 Periodic Frame Interval */
+ #define IFXUSB_DCFG_FRAME_INTERVAL_80 0
+ #define IFXUSB_DCFG_FRAME_INTERVAL_85 1
+ #define IFXUSB_DCFG_FRAME_INTERVAL_90 2
+ #define IFXUSB_DCFG_FRAME_INTERVAL_95 3
+ unsigned devaddr : 7; /*!< 10-04 Device Addresses */
+ unsigned reserved3 : 1;
+ unsigned nzstsouthshk : 1; /*!< 02 Non Zero Length Status OUT Handshake */
+ #define IFXUSB_DCFG_SEND_STALL 1
+ unsigned devspd : 2; /*!< 01-00 Device Speed */
+ } b;
+} dcfg_data_t;
+
+/*!
+ \brief Bit fields in the Device Control Register.
+ */
+typedef union dctl_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved16_31 :16;
+ unsigned ifrmnum : 1; /*!< 15 Ignore Frame Number for ISOC EPs */
+ unsigned gmc : 2; /*!< 14-13 Global Multi Count */
+ unsigned gcontbna : 1; /*!< 12 Global Continue on BNA */
+ unsigned pwronprgdone : 1; /*!< 11 Power-On Programming Done */
+ unsigned cgoutnak : 1; /*!< 10 Clear Global OUT NAK */
+ unsigned sgoutnak : 1; /*!< 09 Set Global OUT NAK */
+ unsigned cgnpinnak : 1; /*!< 08 Clear Global Non-Periodic IN NAK */
+ unsigned sgnpinnak : 1; /*!< 07 Set Global Non-Periodic IN NAK */
+ unsigned tstctl : 3; /*!< 06-04 Test Control */
+ unsigned goutnaksts : 1; /*!< 03 Global OUT NAK Status */
+ unsigned gnpinnaksts : 1; /*!< 02 Global Non-Periodic IN NAK Status */
+ unsigned sftdiscon : 1; /*!< 01 Soft Disconnect */
+ unsigned rmtwkupsig : 1; /*!< 00 Remote Wakeup */
+ } b;
+} dctl_data_t;
+
+
+/*!
+ \brief Bit fields in the Device Status Register.
+ */
+typedef union dsts_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved22_31 :10;
+ unsigned soffn :14; /*!< 21-08 Frame or Microframe Number of the received SOF */
+ unsigned reserved4_7 : 4;
+ unsigned errticerr : 1; /*!< 03 Erratic Error */
+ unsigned enumspd : 2; /*!< 02-01 Enumerated Speed */
+ #define IFXUSB_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ 0
+ #define IFXUSB_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ 1
+ #define IFXUSB_DSTS_ENUMSPD_LS_PHY_6MHZ 2
+ #define IFXUSB_DSTS_ENUMSPD_FS_PHY_48MHZ 3
+ unsigned suspsts : 1; /*!< 00 Suspend Status */
+ } b;
+} dsts_data_t;
+
+/*!
+ \brief Bit fields in the Device IN EP Interrupt Register
+ and the Device IN EP Common Mask Register.
+ */
+typedef union diepint_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved14_31 :18;
+ unsigned nakmsk : 1; /*!< 13 NAK interrupt Mask */
+ unsigned reserved10_12 : 3;
+ unsigned bna : 1; /*!< 09 BNA Interrupt mask */
+ unsigned txfifoundrn : 1; /*!< 08 Fifo Underrun Mask */
+ unsigned emptyintr : 1; /*!< 07 IN Endpoint HAK Effective mask */
+ unsigned inepnakeff : 1; /*!< 06 IN Endpoint HAK Effective mask */
+ unsigned intknepmis : 1; /*!< 05 IN Token Received with EP mismatch mask */
+ unsigned intktxfemp : 1; /*!< 04 IN Token received with TxF Empty mask */
+ unsigned timeout : 1; /*!< 03 TimeOUT Handshake mask (non-ISOC EPs) */
+ unsigned ahberr : 1; /*!< 02 AHB Error mask */
+ unsigned epdisabled : 1; /*!< 01 Endpoint disable mask */
+ unsigned xfercompl : 1; /*!< 00 Transfer complete mask */
+ } b;
+} diepint_data_t;
+
+
+/*!
+ \brief Bit fields in the Device OUT EP Interrupt Register and
+ Device OUT EP Common Interrupt Mask Register.
+ */
+typedef union doepint_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved15_31 :17;
+ unsigned nyetmsk : 1; /*!< 14 NYET Interrupt */
+ unsigned nakmsk : 1; /*!< 13 NAK Interrupt */
+ unsigned bbleerrmsk : 1; /*!< 12 Babble Interrupt */
+ unsigned reserved10_11 : 2;
+ unsigned bna : 1; /*!< 09 BNA Interrupt */
+ unsigned outpkterr : 1; /*!< 08 OUT packet Error */
+ unsigned reserved07 : 1;
+ unsigned back2backsetup : 1; /*!< 06 Back-to-Back SETUP Packets Received */
+ unsigned stsphsercvd : 1; /*!< 05 */
+ unsigned outtknepdis : 1; /*!< 04 OUT Token Received when Endpoint Disabled */
+ unsigned setup : 1; /*!< 03 Setup Phase Done (contorl EPs) */
+ unsigned ahberr : 1; /*!< 02 AHB Error */
+ unsigned epdisabled : 1; /*!< 01 Endpoint disable */
+ unsigned xfercompl : 1; /*!< 00 Transfer complete */
+ } b;
+} doepint_data_t;
+
+
+/*!
+ \brief Bit fields in the Device All EP Interrupt Registers.
+ */
+typedef union daint_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned out : 16; /*!< 31-16 OUT Endpoint bits */
+ unsigned in : 16; /*!< 15-00 IN Endpoint bits */
+ } eps;
+ struct
+ {
+ /** OUT Endpoint bits */
+ unsigned outep15 : 1;
+ unsigned outep14 : 1;
+ unsigned outep13 : 1;
+ unsigned outep12 : 1;
+ unsigned outep11 : 1;
+ unsigned outep10 : 1;
+ unsigned outep09 : 1;
+ unsigned outep08 : 1;
+ unsigned outep07 : 1;
+ unsigned outep06 : 1;
+ unsigned outep05 : 1;
+ unsigned outep04 : 1;
+ unsigned outep03 : 1;
+ unsigned outep02 : 1;
+ unsigned outep01 : 1;
+ unsigned outep00 : 1;
+ /** IN Endpoint bits */
+ unsigned inep15 : 1;
+ unsigned inep14 : 1;
+ unsigned inep13 : 1;
+ unsigned inep12 : 1;
+ unsigned inep11 : 1;
+ unsigned inep10 : 1;
+ unsigned inep09 : 1;
+ unsigned inep08 : 1;
+ unsigned inep07 : 1;
+ unsigned inep06 : 1;
+ unsigned inep05 : 1;
+ unsigned inep04 : 1;
+ unsigned inep03 : 1;
+ unsigned inep02 : 1;
+ unsigned inep01 : 1;
+ unsigned inep00 : 1;
+ } ep;
+} daint_data_t;
+
+
+/*!
+ \brief Bit fields in the Device IN Token Queue Read Registers.
+ */
+typedef union dtknq1_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned epnums0_5 :24; /*!< 31-08 EP Numbers of IN Tokens 0 ... 4 */
+ unsigned wrap_bit : 1; /*!< 07 write pointer has wrapped */
+ unsigned reserved05_06 : 2;
+ unsigned intknwptr : 5; /*!< 04-00 In Token Queue Write Pointer */
+ }b;
+} dtknq1_data_t;
+
+
+/*!
+ \brief Bit fields in Threshold control Register
+ */
+typedef union dthrctl_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved26_31 : 6;
+ unsigned rx_thr_len : 9; /*!< 25-17 Rx Thr. Length */
+ unsigned rx_thr_en : 1; /*!< 16 Rx Thr. Enable */
+ unsigned reserved11_15 : 5;
+ unsigned tx_thr_len : 9; /*!< 10-02 Tx Thr. Length */
+ unsigned iso_thr_en : 1; /*!< 01 ISO Tx Thr. Enable */
+ unsigned non_iso_thr_en : 1; /*!< 00 non ISO Tx Thr. Enable */
+ } b;
+} dthrctl_data_t;
+
+/*@}*//*IFXUSB_CSR_DEVICE_GLOBAL_REG*/
+
+/****************************************************************************/
+
+/*!
+ \addtogroup IFXUSB_CSR_DEVICE_EP_REG
+ */
+/*@{*/
+
+/*!
+ \struct ifxusb_dev_in_ep_regs
+ \brief Device Logical IN Endpoint-Specific Registers.
+ There will be one set of endpoint registers per logical endpoint
+ implemented.
+ each EP's IN EP Register are offset at :
+ 900h + * (ep_num * 20h)
+ */
+
+typedef struct ifxusb_dev_in_ep_regs
+{
+ volatile uint32_t diepctl; /*!< 00h: Endpoint Control Register */
+ uint32_t reserved04; /*!< 04h: */
+ volatile uint32_t diepint; /*!< 08h: Endpoint Interrupt Register */
+ uint32_t reserved0C; /*!< 0Ch: */
+ volatile uint32_t dieptsiz; /*!< 10h: Endpoint Transfer Size Register.*/
+ volatile uint32_t diepdma; /*!< 14h: Endpoint DMA Address Register. */
+ volatile uint32_t dtxfsts; /*!< 18h: Endpoint Transmit FIFO Status Register. */
+ volatile uint32_t diepdmab; /*!< 1Ch: Endpoint DMA Buffer Register. */
+} ifxusb_dev_in_ep_regs_t;
+
+/*!
+ \brief Device Logical OUT Endpoint-Specific Registers.
+ There will be one set of endpoint registers per logical endpoint
+ implemented.
+ each EP's OUT EP Register are offset at :
+ B00h + * (ep_num * 20h) + 00h
+ */
+typedef struct ifxusb_dev_out_ep_regs
+{
+ volatile uint32_t doepctl; /*!< 00h: Endpoint Control Register */
+ volatile uint32_t doepfn; /*!< 04h: Endpoint Frame number Register */
+ volatile uint32_t doepint; /*!< 08h: Endpoint Interrupt Register */
+ uint32_t reserved0C; /*!< 0Ch: */
+ volatile uint32_t doeptsiz; /*!< 10h: Endpoint Transfer Size Register.*/
+ volatile uint32_t doepdma; /*!< 14h: Endpoint DMA Address Register. */
+ uint32_t reserved18; /*!< 18h: */
+ volatile uint32_t doepdmab; /*!< 1Ch: Endpoint DMA Buffer Register. */
+} ifxusb_dev_out_ep_regs_t;
+
+
+/*!
+ \brief Bit fields in the Device EP Control
+ Register.
+ */
+typedef union depctl_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned epena : 1; /*!< 31 Endpoint Enable */
+ unsigned epdis : 1; /*!< 30 Endpoint Disable */
+ unsigned setd1pid : 1; /*!< 29 Set DATA1 PID (INTR/Bulk IN and OUT endpoints) */
+ unsigned setd0pid : 1; /*!< 28 Set DATA0 PID (INTR/Bulk IN and OUT endpoints) */
+ unsigned snak : 1; /*!< 27 Set NAK */
+ unsigned cnak : 1; /*!< 26 Clear NAK */
+ unsigned txfnum : 4; /*!< 25-22 Tx Fifo Number */
+ unsigned stall : 1; /*!< 21 Stall Handshake */
+ unsigned snp : 1; /*!< 20 Snoop Mode */
+ unsigned eptype : 2; /*!< 19-18 Endpoint Type
+ 0: Control
+ 1: Isochronous
+ 2: Bulk
+ 3: Interrupt
+ */
+ unsigned naksts : 1; /*!< 17 NAK Status */
+ unsigned dpid : 1; /*!< 16 Endpoint DPID (INTR/Bulk IN and OUT endpoints) */
+ unsigned usbactep : 1; /*!< 15 USB Active Endpoint */
+ unsigned nextep : 4; /*!< 14-11 Next Endpoint */
+ unsigned mps :11; /*!< 10-00 Maximum Packet Size */
+ #define IFXUSB_DEP0CTL_MPS_64 0
+ #define IFXUSB_DEP0CTL_MPS_32 1
+ #define IFXUSB_DEP0CTL_MPS_16 2
+ #define IFXUSB_DEP0CTL_MPS_8 3
+ } b;
+} depctl_data_t;
+
+
+/*!
+ \brief Bit fields in the Device EP Transfer Size Register. (EP0 and EPn)
+ */
+typedef union deptsiz_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved31 : 1;
+ unsigned supcnt : 2; /*!< 30-29 Setup Packet Count */
+ unsigned reserved20_28 : 9;
+ unsigned pktcnt : 1; /*!< 19 Packet Count */
+ unsigned reserved7_18 :12;
+ unsigned xfersize : 7; /*!< 06-00 Transfer size */
+ }b0;
+ struct
+ {
+ unsigned reserved : 1;
+ unsigned mc : 2; /*!< 30-29 Multi Count */
+ unsigned pktcnt :10; /*!< 28-19 Packet Count */
+ unsigned xfersize :19; /*!< 18-00 Transfer size */
+ } b;
+} deptsiz_data_t;
+
+/*@}*//*IFXUSB_CSR_DEVICE_EP_REG*/
+/****************************************************************************/
+
+/*!
+ \addtogroup IFXUSB_CSR_DEVICE_DMA_DESC
+ */
+/*@{*/
+/*!
+ \struct desc_sts_data
+ \brief Bit fields in the DMA Descriptor status quadlet.
+ */
+typedef union desc_sts_data
+{
+ struct
+ {
+ unsigned bs : 2; /*!< 31-30 Buffer Status */
+ #define BS_HOST_READY 0x0
+ #define BS_DMA_BUSY 0x1
+ #define BS_DMA_DONE 0x2
+ #define BS_HOST_BUSY 0x3
+ unsigned sts : 2; /*!< 29-28 Receive/Trasmit Status */
+ #define RTS_SUCCESS 0x0
+ #define RTS_BUFFLUSH 0x1
+ #define RTS_RESERVED 0x2
+ #define RTS_BUFERR 0x3
+ unsigned l : 1; /*!< 27 Last */
+ unsigned sp : 1; /*!< 26 Short Packet */
+ unsigned ioc : 1; /*!< 25 Interrupt On Complete */
+ unsigned sr : 1; /*!< 24 Setup Packet received */
+ unsigned mtrf : 1; /*!< 23 Multiple Transfer */
+ unsigned reserved16_22 : 7;
+ unsigned bytes :16; /*!< 15-00 Transfer size in bytes */
+ } b;
+ uint32_t d32; /*!< DMA Descriptor data buffer pointer */
+} desc_sts_data_t;
+
+/*@}*//*IFXUSB_CSR_DEVICE_DMA_DESC*/
+/****************************************************************************/
+
+/*!
+ \addtogroup IFXUSB_CSR_HOST_GLOBAL_REG
+ */
+/*@{*/
+/*!
+ \struct ifxusb_host_global_regs
+ \brief IFXUSB Host Mode Global registers. Offsets 400h-7FFh
+ The ifxusb_host_global_regs structure defines the size
+ and relative field offsets for the Host Global registers.
+ These registers are visible only in Host mode and must not be
+ accessed in Device mode, as the results are unknown.
+ */
+typedef struct ifxusb_host_global_regs
+{
+ volatile uint32_t hcfg; /*!< 400h Host Configuration Register. */
+ volatile uint32_t hfir; /*!< 404h Host Frame Interval Register. */
+ volatile uint32_t hfnum; /*!< 408h Host Frame Number / Frame Remaining Register. */
+ uint32_t reserved40C;
+ volatile uint32_t hptxsts; /*!< 410h Host Periodic Transmit FIFO/ Queue Status Register. */
+ volatile uint32_t haint; /*!< 414h Host All Channels Interrupt Register. */
+ volatile uint32_t haintmsk; /*!< 418h Host All Channels Interrupt Mask Register. */
+} ifxusb_host_global_regs_t;
+
+/*!
+ \brief Bit fields in the Host Configuration Register.
+ */
+typedef union hcfg_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved31_03 :29;
+ unsigned fslssupp : 1; /*!< 02 FS/LS Only Support */
+ unsigned fslspclksel : 2; /*!< 01-00 FS/LS Phy Clock Select */
+ #define IFXUSB_HCFG_30_60_MHZ 0
+ #define IFXUSB_HCFG_48_MHZ 1
+ #define IFXUSB_HCFG_6_MHZ 2
+ } b;
+} hcfg_data_t;
+
+/*!
+ \brief Bit fields in the Host Frame Interval Register.
+ */
+typedef union hfir_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved : 16;
+ unsigned frint : 16; /*!< 15-00 Frame Interval */
+ } b;
+} hfir_data_t;
+
+/*!
+ \brief Bit fields in the Host Frame Time Remaing/Number Register.
+ */
+typedef union hfnum_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned frrem : 16; /*!< 31-16 Frame Time Remaining */
+ unsigned frnum : 16; /*!< 15-00 Frame Number*/
+ #define IFXUSB_HFNUM_MAX_FRNUM 0x3FFF
+ } b;
+} hfnum_data_t;
+
+/*!
+ \brief Bit fields in the Host Periodic Transmit FIFO/Queue Status Register
+ */
+typedef union hptxsts_data
+{
+ /** raw register data */
+ uint32_t d32;
+ struct
+ {
+ /** Top of the Periodic Transmit Request Queue
+ * - bit 24 - Terminate (last entry for the selected channel)
+ */
+ unsigned ptxqtop_odd : 1; /*!< 31 Top of the Periodic Transmit Request
+ Queue Odd/even microframe*/
+ unsigned ptxqtop_chnum : 4; /*!< 30-27 Top of the Periodic Transmit Request
+ Channel Number */
+ unsigned ptxqtop_token : 2; /*!< 26-25 Top of the Periodic Transmit Request
+ Token Type
+ 0 - Zero length
+ 1 - Ping
+ 2 - Disable
+ */
+ unsigned ptxqtop_terminate : 1; /*!< 24 Top of the Periodic Transmit Request
+ Terminate (last entry for the selected channel)*/
+ unsigned ptxqspcavail : 8; /*!< 23-16 Periodic Transmit Request Queue Space Available */
+ unsigned ptxfspcavail :16; /*!< 15-00 Periodic Transmit Data FIFO Space Available */
+ } b;
+} hptxsts_data_t;
+
+/*!
+ \brief Bit fields in the Host Port Control and Status Register.
+ */
+typedef union hprt0_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved19_31 :13;
+ unsigned prtspd : 2; /*!< 18-17 Port Speed */
+ #define IFXUSB_HPRT0_PRTSPD_HIGH_SPEED 0
+ #define IFXUSB_HPRT0_PRTSPD_FULL_SPEED 1
+ #define IFXUSB_HPRT0_PRTSPD_LOW_SPEED 2
+ unsigned prttstctl : 4; /*!< 16-13 Port Test Control */
+ unsigned prtpwr : 1; /*!< 12 Port Power */
+ unsigned prtlnsts : 2; /*!< 11-10 Port Line Status */
+ unsigned reserved9 : 1;
+ unsigned prtrst : 1; /*!< 08 Port Reset */
+ unsigned prtsusp : 1; /*!< 07 Port Suspend */
+ unsigned prtres : 1; /*!< 06 Port Resume */
+ unsigned prtovrcurrchng : 1; /*!< 05 Port Overcurrent Change */
+ unsigned prtovrcurract : 1; /*!< 04 Port Overcurrent Active */
+ unsigned prtenchng : 1; /*!< 03 Port Enable/Disable Change */
+ unsigned prtena : 1; /*!< 02 Port Enable */
+ unsigned prtconndet : 1; /*!< 01 Port Connect Detected */
+ unsigned prtconnsts : 1; /*!< 00 Port Connect Status */
+ }b;
+} hprt0_data_t;
+
+/*!
+ \brief Bit fields in the Host All Interrupt Register.
+ */
+typedef union haint_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved : 16;
+ unsigned ch15 : 1;
+ unsigned ch14 : 1;
+ unsigned ch13 : 1;
+ unsigned ch12 : 1;
+ unsigned ch11 : 1;
+ unsigned ch10 : 1;
+ unsigned ch09 : 1;
+ unsigned ch08 : 1;
+ unsigned ch07 : 1;
+ unsigned ch06 : 1;
+ unsigned ch05 : 1;
+ unsigned ch04 : 1;
+ unsigned ch03 : 1;
+ unsigned ch02 : 1;
+ unsigned ch01 : 1;
+ unsigned ch00 : 1;
+ } b;
+ struct
+ {
+ unsigned reserved : 16;
+ unsigned chint : 16;
+ } b2;
+} haint_data_t;
+/*@}*//*IFXUSB_CSR_HOST_GLOBAL_REG*/
+/****************************************************************************/
+/*!
+ \addtogroup IFXUSB_CSR_HOST_HC_REG
+ */
+/*@{*/
+/*!
+ \brief Host Channel Specific Registers
+ There will be one set of hc registers per host channelimplemented.
+ each HC's Register are offset at :
+ 500h + * (hc_num * 20h)
+ */
+typedef struct ifxusb_hc_regs
+{
+ volatile uint32_t hcchar; /*!< 00h Host Channel Characteristic Register.*/
+ volatile uint32_t hcsplt; /*!< 04h Host Channel Split Control Register.*/
+ volatile uint32_t hcint; /*!< 08h Host Channel Interrupt Register. */
+ volatile uint32_t hcintmsk; /*!< 0Ch Host Channel Interrupt Mask Register. */
+ volatile uint32_t hctsiz; /*!< 10h Host Channel Transfer Size Register. */
+ volatile uint32_t hcdma; /*!< 14h Host Channel DMA Address Register. */
+ uint32_t reserved[2]; /*!< 18h Reserved. */
+} ifxusb_hc_regs_t;
+
+
+/*!
+ \brief Bit fields in the Host Channel Characteristics Register.
+ */
+typedef union hcchar_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned chen : 1; /*!< 31 Channel enable */
+ unsigned chdis : 1; /*!< 30 Channel disable */
+ unsigned oddfrm : 1; /*!< 29 Frame to transmit periodic transaction */
+ unsigned devaddr : 7; /*!< 28-22 Device address */
+ unsigned multicnt : 2; /*!< 21-20 Packets per frame for periodic transfers */
+ unsigned eptype : 2; /*!< 19-18 0: Control, 1: Isoc, 2: Bulk, 3: Intr */
+ unsigned lspddev : 1; /*!< 17 0: Full/high speed device, 1: Low speed device */
+ unsigned reserved : 1;
+ unsigned epdir : 1; /*!< 15 0: OUT, 1: IN */
+ unsigned epnum : 4; /*!< 14-11 Endpoint number */
+ unsigned mps :11; /*!< 10-00 Maximum packet size in bytes */
+ } b;
+} hcchar_data_t;
+
+/*!
+ \brief Bit fields in the Host Channel Split Control Register
+ */
+typedef union hcsplt_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned spltena : 1; /*!< 31 Split Enble */
+ unsigned reserved :14;
+ unsigned compsplt : 1; /*!< 16 Do Complete Split */
+ unsigned xactpos : 2; /*!< 15-14 Transaction Position */
+ #define IFXUSB_HCSPLIT_XACTPOS_MID 0
+ #define IFXUSB_HCSPLIT_XACTPOS_END 1
+ #define IFXUSB_HCSPLIT_XACTPOS_BEGIN 2
+ #define IFXUSB_HCSPLIT_XACTPOS_ALL 3
+ unsigned hubaddr : 7; /*!< 13-07 Hub Address */
+ unsigned prtaddr : 7; /*!< 06-00 Port Address */
+ } b;
+} hcsplt_data_t;
+
+/*!
+ \brief Bit fields in the Host Interrupt Register.
+ */
+typedef union hcint_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved :21;
+ unsigned datatglerr : 1; /*!< 10 Data Toggle Error */
+ unsigned frmovrun : 1; /*!< 09 Frame Overrun */
+ unsigned bblerr : 1; /*!< 08 Babble Error */
+ unsigned xacterr : 1; /*!< 07 Transaction Err */
+ unsigned nyet : 1; /*!< 06 NYET Response Received */
+ unsigned ack : 1; /*!< 05 ACK Response Received */
+ unsigned nak : 1; /*!< 04 NAK Response Received */
+ unsigned stall : 1; /*!< 03 STALL Response Received */
+ unsigned ahberr : 1; /*!< 02 AHB Error */
+ unsigned chhltd : 1; /*!< 01 Channel Halted */
+ unsigned xfercomp : 1; /*!< 00 Channel Halted */
+ }b;
+} hcint_data_t;
+
+
+/*!
+ \brief Bit fields in the Host Channel Transfer Size
+ Register.
+ */
+typedef union hctsiz_data
+{
+ uint32_t d32;
+ struct
+ {
+ /** */
+ unsigned dopng : 1; /*!< 31 Do PING protocol when 1 */
+ /**
+ * Packet ID for next data packet
+ * 0: DATA0
+ * 1: DATA2
+ * 2: DATA1
+ * 3: MDATA (non-Control), SETUP (Control)
+ */
+ unsigned pid : 2; /*!< 30-29 Packet ID for next data packet
+ 0: DATA0
+ 1: DATA2
+ 2: DATA1
+ 3: MDATA (non-Control), SETUP (Control)
+ */
+ #define IFXUSB_HCTSIZ_DATA0 0
+ #define IFXUSB_HCTSIZ_DATA1 2
+ #define IFXUSB_HCTSIZ_DATA2 1
+ #define IFXUSB_HCTSIZ_MDATA 3
+ #define IFXUSB_HCTSIZ_SETUP 3
+ unsigned pktcnt :10; /*!< 28-19 Data packets to transfer */
+ unsigned xfersize :19; /*!< 18-00 Total transfer size in bytes */
+ }b;
+} hctsiz_data_t;
+
+/*@}*//*IFXUSB_CSR_HOST_HC_REG*/
+
+/****************************************************************************/
+
+/*!
+ \addtogroup IFXUSB_CSR_PWR_CLK_GATING_REG
+ */
+/*@{*/
+/*!
+ \brief Bit fields in the Power and Clock Gating Control Register
+ */
+typedef union pcgcctl_data
+{
+ uint32_t d32;
+ struct
+ {
+ unsigned reserved : 27;
+ unsigned physuspended : 1; /*!< 04 PHY Suspended */
+ unsigned rstpdwnmodule : 1; /*!< 03 Reset Power Down Modules */
+ unsigned pwrclmp : 1; /*!< 02 Power Clamp */
+ unsigned gatehclk : 1; /*!< 01 Gate Hclk */
+ unsigned stoppclk : 1; /*!< 00 Stop Pclk */
+ } b;
+} pcgcctl_data_t;
+/*@}*//*IFXUSB_CSR_PWR_CLK_GATING_REG*/
+
+/****************************************************************************/
+
+#endif //__IFXUSB_REGS_H__
diff --git a/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_version.h b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_version.h
new file mode 100644
index 0000000000..2dff7354e5
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/drivers/usb/ifxhcd/ifxusb_version.h
@@ -0,0 +1,5 @@
+
+#ifndef IFXUSB_VERSION
+#define IFXUSB_VERSION "3.0alpha B100312"
+#endif
+
diff --git a/target/linux/lantiq/files-3.3/include/linux/svip_nat.h b/target/linux/lantiq/files-3.3/include/linux/svip_nat.h
new file mode 100644
index 0000000000..6563c38d58
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/include/linux/svip_nat.h
@@ -0,0 +1,37 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ Am Campeon 1-12; 81726 Munich, Germany
+
+ THE DELIVERY OF THIS SOFTWARE AS WELL AS THE HEREBY GRANTED NON-EXCLUSIVE,
+ WORLDWIDE LICENSE TO USE, COPY, MODIFY, DISTRIBUTE AND SUBLICENSE THIS
+ SOFTWARE IS FREE OF CHARGE.
+
+ THE LICENSED SOFTWARE IS PROVIDED "AS IS" AND INFINEON EXPRESSLY DISCLAIMS
+ ALL REPRESENTATIONS AND WARRANTIES, WHETHER EXPRESS OR IMPLIED, INCLUDING
+ WITHOUT LIMITATION, WARRANTIES OR REPRESENTATIONS OF WORKMANSHIP,
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, DURABILITY, THAT THE
+ OPERATING OF THE LICENSED SOFTWARE WILL BE ERROR FREE OR FREE OF ANY THIRD
+ PARTY CLAIMS, INCLUDING WITHOUT LIMITATION CLAIMS OF THIRD PARTY INTELLECTUAL
+ PROPERTY INFRINGEMENT.
+
+ EXCEPT FOR ANY LIABILITY DUE TO WILFUL ACTS OR GROSS NEGLIGENCE AND EXCEPT
+ FOR ANY PERSONAL INJURY INFINEON SHALL IN NO EVENT BE LIABLE FOR ANY CLAIM
+ OR DAMAGES OF ANY KIND, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+#ifndef _SVIP_NAT_H
+#define _SVIP_NAT_H
+
+/* The declarations here have to be in a header file, because
+ * they need to be known both to the kernel module
+ * (in chardev.c) and the process calling ioctl (ioctl.c)
+ */
+#include <linux/svip_nat_io.h>
+
+#define SVIP_NAT_VERSION "3.1"
+extern int do_SVIP_NAT(struct sk_buff *);
+
+#endif
diff --git a/target/linux/lantiq/files-3.3/include/linux/svip_nat_io.h b/target/linux/lantiq/files-3.3/include/linux/svip_nat_io.h
new file mode 100644
index 0000000000..bf42dd4ee3
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/include/linux/svip_nat_io.h
@@ -0,0 +1,103 @@
+/******************************************************************************
+
+ Copyright (c) 2007
+ Infineon Technologies AG
+ Am Campeon 1-12; 81726 Munich, Germany
+
+ THE DELIVERY OF THIS SOFTWARE AS WELL AS THE HEREBY GRANTED NON-EXCLUSIVE,
+ WORLDWIDE LICENSE TO USE, COPY, MODIFY, DISTRIBUTE AND SUBLICENSE THIS
+ SOFTWARE IS FREE OF CHARGE.
+
+ THE LICENSED SOFTWARE IS PROVIDED "AS IS" AND INFINEON EXPRESSLY DISCLAIMS
+ ALL REPRESENTATIONS AND WARRANTIES, WHETHER EXPRESS OR IMPLIED, INCLUDING
+ WITHOUT LIMITATION, WARRANTIES OR REPRESENTATIONS OF WORKMANSHIP,
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, DURABILITY, THAT THE
+ OPERATING OF THE LICENSED SOFTWARE WILL BE ERROR FREE OR FREE OF ANY THIRD
+ PARTY CLAIMS, INCLUDING WITHOUT LIMITATION CLAIMS OF THIRD PARTY INTELLECTUAL
+ PROPERTY INFRINGEMENT.
+
+ EXCEPT FOR ANY LIABILITY DUE TO WILFUL ACTS OR GROSS NEGLIGENCE AND EXCEPT
+ FOR ANY PERSONAL INJURY INFINEON SHALL IN NO EVENT BE LIABLE FOR ANY CLAIM
+ OR DAMAGES OF ANY KIND, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+ *******************************************************************************/
+#ifndef _SVIP_NAT_IO_H_
+#define _SVIP_NAT_IO_H_
+
+#include <asm/ioctl.h>
+
+#define SVIP_NAT_DEVICE_NAME "svip_nat"
+#define PATH_SVIP_NAT_DEVICE_NAME "/dev/"SVIP_NAT_DEVICE_NAME
+
+#define MAJOR_NUM_SVIP_NAT 10
+#define MINOR_NUM_SVIP_NAT 120
+
+/** maximum SVIP devices supported on a Line card system */
+#define SVIP_SYS_NUM 12
+
+/** maximum voice packet channels possible per SVIP device */
+#define SVIP_CODEC_NUM 16
+
+/** start UDP port number of the SVIP Linecard System */
+#define SVIP_UDP_FROM 50000
+
+/** @defgroup SVIP_NATAPI SVIP Custom NAT ioctl interface.
+ An ioctl interface is provided to add a rule into the SVIP NAT table and
+ to respectively remove the rule form it. The ioctl interface is accessible
+ using the fd issued upon opening the special device node /dev/svip_nat.
+ @{ */
+
+/** Used to add a new rule to the SVIP Custom NAT table. If a rule already
+ exists for the target UDP port, that rule shall be overwritten.
+
+ \param SVIP_NAT_IO_Rule_t* The parameter points to a
+ \ref SVIP_NAT_IO_Rule_t structure.
+ */
+#define FIO_SVIP_NAT_RULE_ADD \
+ _IOW(MAJOR_NUM_SVIP_NAT, 1, SVIP_NAT_IO_Rule_t)
+
+/** Used to remove a rule from the SVIP Custom NAT table. No check is
+ performed whether the rule already exists or not. The remove operation is
+ performed as long as the target UDP port is within the defined port range.
+
+ \param SVIP_NAT_IO_Rule_t* The parameter points to a
+ \ref SVIP_NAT_IO_Rule_t structure.
+ */
+#define FIO_SVIP_NAT_RULE_REMOVE \
+ _IOW(MAJOR_NUM_SVIP_NAT, 2, SVIP_NAT_IO_Rule_t)
+
+/** Used to list all rules in the SVIP Custom NAT table.
+
+ \param <none>
+ */
+#define FIO_SVIP_NAT_RULE_LIST \
+ _IO(MAJOR_NUM_SVIP_NAT, 3)
+
+/** IP address in network-byte order */
+typedef u32 SVIP_IP_ADDR_t;
+/** UDP port in network-byte order */
+typedef u16 SVIP_UDP_PORT_t;
+
+#ifndef ETH_ALEN
+#define ETH_ALEN 6 /* Octets in one ethernet address */
+#endif
+
+/** NAT parameters part of the NAT table.
+ These paramters are configurable through the NAT API. */
+typedef struct SVIP_NAT_IO_Rule
+{
+ /** Remote peer, IP address */
+ SVIP_IP_ADDR_t remIP;
+ /** Remote peer, MAC address */
+ u8 remMAC[ETH_ALEN];
+ /** Target SVIP, IP address (local peer) */
+ SVIP_IP_ADDR_t locIP;
+ /** Target SVIP, MAC address */
+ u8 locMAC[ETH_ALEN];
+ /** Target SVIP, UDP port number */
+ SVIP_UDP_PORT_t locUDP;
+} SVIP_NAT_IO_Rule_t;
+
+/** @} */
+#endif
diff --git a/target/linux/lantiq/files-3.3/net/ipv4/svip_nat.c b/target/linux/lantiq/files-3.3/net/ipv4/svip_nat.c
new file mode 100644
index 0000000000..04a0d223a2
--- /dev/null
+++ b/target/linux/lantiq/files-3.3/net/ipv4/svip_nat.c
@@ -0,0 +1,1569 @@
+/******************************************************************************
+
+ Copyright (c) 2009
+ Lantiq Deutschland GmbH
+ Am Campeon 3; 81726 Munich, Germany
+
+ THE DELIVERY OF THIS SOFTWARE AS WELL AS THE HEREBY GRANTED NON-EXCLUSIVE,
+ WORLDWIDE LICENSE TO USE, COPY, MODIFY, DISTRIBUTE AND SUBLICENSE THIS
+ SOFTWARE IS FREE OF CHARGE.
+
+ THE LICENSED SOFTWARE IS PROVIDED "AS IS" AND INFINEON EXPRESSLY DISCLAIMS
+ ALL REPRESENTATIONS AND WARRANTIES, WHETHER EXPRESS OR IMPLIED, INCLUDING
+ WITHOUT LIMITATION, WARRANTIES OR REPRESENTATIONS OF WORKMANSHIP,
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, DURABILITY, THAT THE
+ OPERATING OF THE LICENSED SOFTWARE WILL BE ERROR FREE OR FREE OF ANY THIRD
+ PARTY CLAIMS, INCLUDING WITHOUT LIMITATION CLAIMS OF THIRD PARTY INTELLECTUAL
+ PROPERTY INFRINGEMENT.
+
+ EXCEPT FOR ANY LIABILITY DUE TO WILFUL ACTS OR GROSS NEGLIGENCE AND EXCEPT
+ FOR ANY PERSONAL INJURY INFINEON SHALL IN NO EVENT BE LIABLE FOR ANY CLAIM
+ OR DAMAGES OF ANY KIND, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+
+ ****************************************************************************
+
+Description : This file contains implementation of Custom NAT function
+for Infineon's VINETIC-SVIP16
+ *******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/if_vlan.h>
+#include <linux/udp.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/proc_fs.h>
+#include <linux/in6.h> /* just to shut up a warning */
+#include <linux/miscdevice.h>
+#include <asm/checksum.h>
+
+#include <linux/svip_nat.h>
+
+MODULE_AUTHOR("Lantiq Deutschland GmbH");
+MODULE_DESCRIPTION("SVIP Network Address Translation module");
+MODULE_LICENSE("GPL");
+
+#define SVIP_NAT_INFO_STR "@(#)SVIP NAT, version "SVIP_NAT_VERSION
+
+/** maximum voice packet channels possible on the SVIP LC system
+ (equals maximum number of Codec channels possible) */
+#define SVIP_SYS_CODEC_NUM ((SVIP_SYS_NUM) * (SVIP_CODEC_NUM))
+
+/** end UDP port number of the SVIP Linecard System */
+#define SVIP_UDP_TO ((SVIP_UDP_FROM) + (SVIP_SYS_CODEC_NUM) - 1)
+
+/** end UDP port number of the Master SVIP in SVIP Linecard System */
+#define SVIP_UDP_TO_VOFW0 ((SVIP_UDP_FROM) + (SVIP_CODEC_NUM) - 1)
+
+#define SVIP_PORT_INRANGE(nPort) \
+ ((nPort) >= (SVIP_UDP_FROM) && (nPort) <= (SVIP_UDP_TO))
+
+#define SVIP_PORT_INDEX(nPort) (nPort - SVIP_UDP_FROM)
+
+#define SVIP_NET_DEV_ETH0_IDX 0
+#define SVIP_NET_DEV_VETH0_IDX 1
+#define SVIP_NET_DEV_LO_IDX 2
+
+#define SVIP_NET_DEV_ETH0_NAME "eth0"
+#define SVIP_NET_DEV_ETH1_NAME "eth1"
+#define SVIP_NET_DEV_VETH1_NAME "veth0"
+#define SVIP_NET_DEV_LO_NAME "lo"
+
+#define SVIP_NAT_STATS_LOC2REM 0
+#define SVIP_NAT_STATS_REM2LOC 1
+#define SVIP_NAT_STATS_TYPES 2
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+#define SVIP_NAT_FOR_EACH_NETDEV(d) for_each_netdev(&init_net, dev)
+#define SVIP_NAT_IP_HDR(ethhdr) ip_hdr(ethhdr)
+#else
+#define SVIP_NAT_FOR_EACH_NETDEV(d) for(d=dev_base; dev; dev = dev->next)
+#define SVIP_NAT_IP_HDR(ethhdr) (ethhdr)->nh.iph
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define SVIP_NAT_SKB_MAC_HEADER(ethhdr) (ethhdr)->mac.ethernet
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+#define SVIP_NAT_SKB_MAC_HEADER(ethhdr) (ethhdr)->mac.raw
+#else
+#define SVIP_NAT_SKB_MAC_HEADER(ethhdr) skb_mac_header(ethhdr)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
+#define VLAN_DEV_REAL_DEV(dev) vlan_dev_real_dev(dev)
+#define VLAN_DEV_VLAN_ID(dev) vlan_dev_vlan_id(dev)
+#else
+#define VLAN_DEV_REAL_DEV(dev) (VLAN_DEV_INFO(dev)->real_dev)
+#define VLAN_DEV_VLAN_ID(dev) (VLAN_DEV_INFO(dev)->vlan_id)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+
+#if ! ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) && \
+ (defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)))
+#define VLAN_8021Q_UNUSED
+#endif
+
+
+extern spinlock_t vlan_group_lock;
+extern struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, unsigned short VID);
+
+typedef struct SVIP_NAT_stats
+{
+ unsigned long inPackets;
+ unsigned long outPackets;
+ unsigned long outErrors;
+} SVIP_NAT_stats_t;
+
+typedef struct SVIP_NAT_table_entry
+{
+ SVIP_NAT_IO_Rule_t natRule;
+ SVIP_NAT_stats_t natStats[SVIP_NAT_STATS_TYPES];
+} SVIP_NAT_table_entry_t;
+
+/* pointer to the SVIP NAT table */
+static SVIP_NAT_table_entry_t *pNatTable = NULL;
+
+struct net_device *net_devs[3];
+static u32 *paddr_eth0;
+static u32 *paddr_eth0_0;
+static u32 *paddr_veth0;
+static u32 *pmask_veth0;
+
+static struct semaphore *sem_nat_tbl_access;
+static int proc_read_in_progress = 0;
+
+static int nDeviceOpen = 0;
+
+/* saves the NAT table index between subsequent invocation */
+static int nProcReadIdx = 0;
+
+static long SVIP_NAT_device_ioctl(struct file *,unsigned int ,unsigned long);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
+static int SVIP_NAT_device_release (struct inode *,struct file *);
+#else
+static void SVIP_NAT_device_release (struct inode *,struct file *);
+#endif
+static int SVIP_NAT_device_open (struct inode *,struct file *);
+
+/* This structure holds the interface functions supported by
+ the SVIP NAT configuration device. */
+struct file_operations SVIP_NAT_Fops = {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
+owner: THIS_MODULE,
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0) */
+ llseek: NULL, /* seek */
+ read: NULL,
+ write: NULL,
+ readdir: NULL, /* readdir */
+ poll: NULL, /* select */
+ unlocked_ioctl: SVIP_NAT_device_ioctl, /* ioctl */
+ mmap: NULL, /* mmap */
+ open: SVIP_NAT_device_open, /* open, */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
+ flush: NULL, /* flush */
+#endif
+ release: SVIP_NAT_device_release /* close */
+};
+
+/** Structure holding MISC module operations */
+static struct miscdevice SVIP_NAT_miscdev =
+{
+minor: MINOR_NUM_SVIP_NAT,
+ name: SVIP_NAT_DEVICE_NAME,
+ fops: &SVIP_NAT_Fops
+};
+
+#ifdef CONFIG_SVIP_FW_PKT_SNIFFER
+int nSVIP_NAT_Sniffer;
+unsigned char pSVIP_NAT_SnifferMAC[ETH_ALEN];
+int nSVIP_NAT_SnifferMacSet;
+#endif
+
+/******************************************************************************/
+/**
+ Function to read /proc/net/svip_nat/nat proc entry
+
+ \arguments
+ page - pointer to page buffer
+ start - pointer to start address pointer
+ off - offset
+ count - maximum data length to read
+ eof - end of file flag
+ data - proc read data (provided by the function
+ pointed to by data)
+
+ \return
+ length of read data
+
+ \remarks:
+ Each call of this routine forces a copy_to_user of the data returned by
+ 'fn'. This routine will be called by the user until 'len = 0'.
+ ****************************************************************************/
+static int SVIP_NAT_ProcRead (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ unsigned long flags;
+ int (*fn)(char *buf, int size);
+ int len;
+
+ /* If the NAT table index is negative, the reading has completed */
+ if (nProcReadIdx < 0)
+ {
+ nProcReadIdx = 0;
+ *eof = 1;
+ proc_read_in_progress = 0;
+ up(sem_nat_tbl_access);
+ return 0;
+ }
+
+ local_irq_save(flags);
+ if (!proc_read_in_progress)
+ {
+ proc_read_in_progress = 1;
+ local_irq_restore(flags);
+ /* we use this semaphore in order to ensure no other party(could be ioctl
+ FIO_SVIP_NAT_RULE_LIST), uses function SVIP_NAT_ProcReadNAT(), during
+ the time read of the proc file takes place */
+ down(sem_nat_tbl_access);
+ }
+ else
+ {
+ local_irq_restore(flags);
+ }
+
+ if (data != NULL)
+ {
+ fn = data;
+ len = fn (page, count);
+ /* In this setup each read of the proc entries returns the read data by
+ 'fn' to the user. The user keeps issuing read requests as long as the
+ returned value of 'len' is greater than zero. */
+ *eof = 1;
+ *start = page;
+ }
+ else
+ {
+ len = 0;
+ }
+
+ return len;
+}
+
+#ifdef CONFIG_SVIP_FW_PKT_SNIFFER
+/**
+ Function to read remaining proc entries
+ */
+static int SVIP_NAT_ProcReadGen (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int (*fn)(char *buf, int size);
+ int len = 0;
+
+ MOD_INC_USE_COUNT;
+
+ if (data == NULL)
+ {
+ MOD_DEC_USE_COUNT;
+ return 0;
+ }
+
+ fn = data;
+ len = fn (page, count);
+
+ if (len <= off + count)
+ {
+ *eof = 1;
+ }
+ *start = page + off;
+ len -= off;
+ if (len > count)
+ {
+ len = count;
+ }
+ if (len < 0)
+ {
+ len = 0;
+ }
+
+ MOD_DEC_USE_COUNT;
+
+ return len;
+}
+#endif
+
+/******************************************************************************/
+/**
+ Function for setting up /proc/net/svip_nat read data
+
+ \arguments
+ buf - pointer to read buffer
+ count - size of read buffer
+
+ \return
+ length of read data into buffer
+
+ \remarks:
+ The global variable 'nProcReadIdx' is used to save the table index where
+ the reading of the NAT table stopped. Reading is stopped when the end of
+ the read buffer is approached. On the next itteration the reading continues
+ from the saved index.
+ *******************************************************************************/
+static int SVIP_NAT_ProcReadNAT(char *buf, int count)
+{
+ int i, j;
+ int len = 0;
+ SVIP_NAT_IO_Rule_t *pNatRule;
+
+ if (nProcReadIdx == -1)
+ {
+ nProcReadIdx = 0;
+ return 0;
+ }
+
+ if (nProcReadIdx == 0)
+ {
+ len = sprintf(buf+len,
+ "Remote host IP " /* 16 char */
+ "Remote host MAC " /* 19 char */
+ "Local host IP " /* 15 char */
+ "Local host MAC " /* 19 char */
+ "Local host UDP " /* 16 char */
+ "Loc->Rem(in/out/err) " /* 22 char */
+ "Rem->Loc(in/out/err)\n\r");
+ }
+
+ for (i = nProcReadIdx; i < SVIP_SYS_CODEC_NUM; i++)
+ {
+ int slen;
+
+ pNatRule = &pNatTable[i].natRule;
+
+ if (pNatRule->remIP != 0)
+ {
+ /* make sure not to overwrite the buffer */
+ if (count < len+120)
+ break;
+
+ /* remIP */
+ slen = sprintf(buf+len, "%d.%d.%d.%d",
+ (int)((pNatRule->remIP >> 24) & 0xff),
+ (int)((pNatRule->remIP >> 16) & 0xff),
+ (int)((pNatRule->remIP >> 8) & 0xff),
+ (int)((pNatRule->remIP >> 0) & 0xff));
+ len += slen;
+ for (j = 0; j < (16-slen); j++)
+ len += sprintf(buf+len, " ");
+
+ /* remMAC */
+ slen = 0;
+ for (j = 0; j < ETH_ALEN; j++)
+ {
+ slen += sprintf(buf+len+slen, "%02x%s",
+ pNatRule->remMAC[j], j < ETH_ALEN-1 ? ":" : " ");
+ }
+ len += slen;
+ for (j = 0; j < (19-slen); j++)
+ len += sprintf(buf+len, " ");
+
+ /* locIP */
+ slen = sprintf(buf+len, "%d.%d.%d.%d",
+ (int)((pNatRule->locIP >> 24) & 0xff),
+ (int)((pNatRule->locIP >> 16) & 0xff),
+ (int)((pNatRule->locIP >> 8) & 0xff),
+ (int)((pNatRule->locIP >> 0) & 0xff));
+ len += slen;
+ for (j = 0; j < (15-slen); j++)
+ len += sprintf(buf+len, " ");
+
+ /* locMAC */
+ slen = 0;
+ for (j = 0; j < ETH_ALEN; j++)
+ {
+ slen += sprintf(buf+len+slen, "%02x%s",
+ pNatRule->locMAC[j], j < ETH_ALEN-1 ? ":" : " ");
+ }
+ len += slen;
+ for (j = 0; j < (19-slen); j++)
+ len += sprintf(buf+len, " ");
+
+ /* locUDP */
+ slen = sprintf(buf+len, "%d", pNatRule->locUDP);
+ len += slen;
+ for (j = 0; j < (16-slen); j++)
+ len += sprintf(buf+len, " ");
+
+ /* NAT statistics, Local to Remote translation */
+ slen = sprintf(buf+len, "(%ld/%ld/%ld)",
+ pNatTable[i].natStats[SVIP_NAT_STATS_LOC2REM].inPackets,
+ pNatTable[i].natStats[SVIP_NAT_STATS_LOC2REM].outPackets,
+ pNatTable[i].natStats[SVIP_NAT_STATS_LOC2REM].outErrors);
+ len += slen;
+ for (j = 0; j < (22-slen); j++)
+ len += sprintf(buf+len, " ");
+
+ /* NAT statistics, Remote to Local translation */
+ len += sprintf(buf+len, "(%ld/%ld/%ld)\n\r",
+ pNatTable[i].natStats[SVIP_NAT_STATS_REM2LOC].inPackets,
+ pNatTable[i].natStats[SVIP_NAT_STATS_REM2LOC].outPackets,
+ pNatTable[i].natStats[SVIP_NAT_STATS_REM2LOC].outErrors);
+ }
+ }
+ if (i == SVIP_SYS_CODEC_NUM)
+ nProcReadIdx = -1; /* reading completed */
+ else
+ nProcReadIdx = i; /* reading still in process, buffer was full */
+
+ return len;
+}
+
+#ifdef CONFIG_SVIP_FW_PKT_SNIFFER
+/**
+ Converts MAC address from ascii to hex respesentaion
+ */
+static int SVIP_NAT_MacAsciiToHex(const char *pMacStr, unsigned char *pMacHex)
+{
+ int i=0, c=0, b=0, n=0;
+
+ memset(pMacHex, 0, ETH_ALEN);
+ while (pMacStr[i] != '\0')
+ {
+ if (n >= 0)
+ {
+ unsigned char nToHex = 0;
+
+ /* check for hex digit */
+ if (pMacStr[i] >= '0' && pMacStr[i] <= '9')
+ nToHex = 0x30;
+ else if (pMacStr[i] >= 'a' && pMacStr[i] <= 'f')
+ nToHex = 0x57;
+ else if (pMacStr[i] >= 'A' && pMacStr[i] <= 'F')
+ nToHex = 0x37;
+ else
+ {
+ if (n != 0)
+ {
+ printk(KERN_ERR "SVIP NAT: invalid MAC address format[%s]\n", pMacStr);
+ return -1;
+ }
+ i++;
+ continue;
+ }
+ n^=1;
+ pMacHex[b] |= ((pMacStr[i] - nToHex)&0xf) << (4*n);
+ if (n == 0)
+ {
+ /* advance to next byte, check if complete */
+ if (++b >= ETH_ALEN)
+ return 0;
+ /* byte completed, next we expect a colon... */
+ c = 1;
+ /* and, do not check for hex digit */
+ n = -1;
+ }
+ i++;
+ continue;
+ }
+ if (c == 1)
+ {
+ if (pMacStr[i] == ':')
+ {
+ /* next we expect hex digit, again */
+ n = 0;
+ }
+ else
+ {
+ printk(KERN_ERR "SVIP NAT: invalid MAC address format[%s]\n", pMacStr);
+ return -1;
+ }
+ }
+ i++;
+ }
+ return 0;
+}
+
+/**
+ Used to set the destination MAC address of a host where incoming
+ SVIP VoFW packets are to be addressed. In case the address is set
+ to 00:00:00:00:00:00 (the default case), the packets will written
+ out to eth0 with its original MAC addess.
+
+ \remark
+usage: 'echo "00:03:19:00:15:D1" > cat /proc/net/svip_nat/snifferMAC'
+*/
+int SVIP_NAT_ProcWriteSnifferMAC (struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ /* at least strlen("xx:xx:xx:xx:xx:xx") characters, followed by '\0' */
+ if (count >= 18)
+ {
+ int ret;
+
+ ret = SVIP_NAT_MacAsciiToHex(buffer, pSVIP_NAT_SnifferMAC);
+
+ if (ret != 0)
+ return 0;
+
+ if (!(pSVIP_NAT_SnifferMAC[0]==0 && pSVIP_NAT_SnifferMAC[1]==0 &&
+ pSVIP_NAT_SnifferMAC[2]==0 && pSVIP_NAT_SnifferMAC[3]==0 &&
+ pSVIP_NAT_SnifferMAC[4]==0 && pSVIP_NAT_SnifferMAC[5]==0))
+ {
+ nSVIP_NAT_SnifferMacSet = 1;
+ }
+ }
+ return count;
+}
+
+/**
+ Used to read the destination MAC address of a sniffer host
+ */
+int SVIP_NAT_ProcReadSnifferMAC (char *buf, int count)
+{
+ int len = 0;
+
+ len = snprintf(buf, count, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ pSVIP_NAT_SnifferMAC[0], pSVIP_NAT_SnifferMAC[1],
+ pSVIP_NAT_SnifferMAC[2], pSVIP_NAT_SnifferMAC[3],
+ pSVIP_NAT_SnifferMAC[4], pSVIP_NAT_SnifferMAC[5]);
+
+ if (len > count)
+ {
+ printk(KERN_ERR "SVIP NAT: Only part of the text could be put into the buffer\n");
+ return count;
+ }
+
+ return len;
+}
+
+/**
+ Used to switch VoFW message sniffer on/off
+
+ \remark
+usage: 'echo "1" > cat /proc/net/svip_nat/snifferOnOff'
+*/
+int SVIP_NAT_ProcWriteSnifferOnOff (struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ /* at least one digit expected, followed by '\0' */
+ if (count >= 2)
+ {
+ int ret, nSnifferOnOff;
+
+ ret = sscanf(buffer, "%d", &nSnifferOnOff);
+
+ if (ret != 1)
+ return count;
+
+ if (nSnifferOnOff > 0)
+ nSnifferOnOff = 1;
+
+ nSVIP_NAT_Sniffer = nSnifferOnOff;
+ }
+ return count;
+}
+
+/**
+ Used to read the VoFW message sniffer configuration (on/off)
+ */
+int SVIP_NAT_ProcReadSnifferOnOff (char *buf, int count)
+{
+ int len = 0;
+
+ len = snprintf(buf, count, "%d\n", nSVIP_NAT_Sniffer);
+
+ if (len > count)
+ {
+ printk(KERN_ERR "SVIP NAT: Only part of the text could be put into the buffer\n");
+ return count;
+ }
+
+ return len;
+}
+#endif
+
+/******************************************************************************/
+/**
+ Creates proc read/write entries
+
+ \return
+ 0 on success, -1 on error
+ */
+/******************************************************************************/
+static int SVIP_NAT_ProcInstall(void)
+{
+ struct proc_dir_entry *pProcParentDir, *pProcDir;
+ struct proc_dir_entry *pProcNode;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+ pProcParentDir = proc_net;
+#else
+ pProcParentDir = init_net.proc_net;
+#endif
+ pProcDir = proc_mkdir(SVIP_NAT_DEVICE_NAME, pProcParentDir);
+ if (pProcDir == NULL)
+ {
+ printk(KERN_ERR "SVIP NAT: cannot create proc dir %s/%s\n\r",
+ pProcParentDir->name, SVIP_NAT_DEVICE_NAME);
+ return -1;
+ }
+
+ pProcNode = create_proc_read_entry("nat", S_IFREG|S_IRUGO, pProcDir,
+ SVIP_NAT_ProcRead, (void *)SVIP_NAT_ProcReadNAT);
+ if (pProcNode == NULL)
+ {
+ printk(KERN_ERR "SVIP NAT: cannot create proc entry %s/%s",
+ pProcDir->name, "nat");
+ return -1;
+ }
+
+#ifdef CONFIG_SVIP_FW_PKT_SNIFFER
+ nSVIP_NAT_Sniffer = 0;
+ /* creates proc entry for switching on/off sniffer to VoFW messages */
+ pProcNode = create_proc_read_entry("snifferOnOff", S_IFREG|S_IRUGO|S_IWUGO,
+ pProcDir, SVIP_NAT_ProcReadGen, (void *)SVIP_NAT_ProcReadSnifferOnOff);
+ if (pProcNode == NULL)
+ {
+ printk(KERN_ERR "SVIP NAT: cannot create proc entry %s/%s\n\r",
+ pProcDir->name, "snifferOnOff");
+ return -1;
+ }
+ pProcNode->write_proc = SVIP_NAT_ProcWriteSnifferOnOff;
+
+ memset (pSVIP_NAT_SnifferMAC, 0, ETH_ALEN);
+ nSVIP_NAT_SnifferMacSet = 0;
+ /* creates proc entry for setting MAC address of sniffer host to VoFW messages */
+ pProcNode = create_proc_read_entry("snifferMAC", S_IFREG|S_IRUGO|S_IWUGO,
+ pProcDir, SVIP_NAT_ProcReadGen, (void *)SVIP_NAT_ProcReadSnifferMAC);
+ if (pProcNode == NULL)
+ {
+ printk(KERN_ERR "SVIP NAT: cannot create proc entry %s/%s\n\r",
+ pProcDir->name, "snifferMAC");
+ return -1;
+ }
+ pProcNode->write_proc = SVIP_NAT_ProcWriteSnifferMAC;
+#endif
+
+ return 0;
+}
+
+/******************************************************************************/
+/**
+ No actions done here, simply a check is performed if an open has already
+ been performed. Currently only a single open is allowed as it is a sufficient
+ to have hat a single process configuring the SVIP NAT at one time.
+
+ \arguments
+ inode - pointer to disk file data
+ file - pointer to device file data
+
+ \return
+ 0 on success, else -1
+ */
+/******************************************************************************/
+static int SVIP_NAT_device_open(struct inode *inode, struct file *file)
+{
+ unsigned long flags;
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ local_irq_save(flags);
+#else
+ local_save_flags(flags);
+#endif
+
+ if (nDeviceOpen)
+ {
+ MOD_INC_USE_COUNT;
+ local_irq_restore(flags);
+ nDeviceOpen++;
+ return 0;
+ }
+
+ /* find pointer to IP address of eth0 */
+ if ((in_dev=in_dev_get(net_devs[SVIP_NET_DEV_ETH0_IDX])) != NULL)
+ {
+ for (ifa = in_dev->ifa_list; ifa != NULL; ifa = ifa->ifa_next)
+ {
+ if (!paddr_eth0 && ifa->ifa_address != 0)
+ {
+ paddr_eth0 = &ifa->ifa_address;
+ continue;
+ }
+ if (paddr_eth0 && ifa->ifa_address != 0)
+ {
+ paddr_eth0_0 = &ifa->ifa_address;
+ break;
+ }
+ }
+ in_dev_put(in_dev);
+ }
+ if (paddr_eth0 == NULL || paddr_eth0_0 == NULL)
+ {
+ local_irq_restore(flags);
+ return -ENODATA;
+ }
+
+ /* find pointer to IP address of veth0 */
+ if ((in_dev=in_dev_get(net_devs[SVIP_NET_DEV_VETH0_IDX])) != NULL)
+ {
+ for (ifa = in_dev->ifa_list; ifa != NULL; ifa = ifa->ifa_next)
+ {
+ if (ifa->ifa_address != 0)
+ {
+ paddr_veth0 = &ifa->ifa_address;
+ pmask_veth0 = &ifa->ifa_mask;
+ break;
+ }
+ }
+ in_dev_put(in_dev);
+ }
+ if (paddr_veth0 == NULL)
+ {
+ local_irq_restore(flags);
+ return -ENODATA;
+ }
+
+ MOD_INC_USE_COUNT;
+ nDeviceOpen++;
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+
+/******************************************************************************/
+/**
+ This function is called when a process closes the SVIP NAT device file
+
+ \arguments
+ inode - pointer to disk file data
+ file - pointer to device file data
+
+ \return
+ 0 on success, else -1
+
+*/
+/******************************************************************************/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
+static int SVIP_NAT_device_release(struct inode *inode,
+ struct file *file)
+#else
+static void SVIP_NAT_device_release(struct inode *inode,
+ struct file *file)
+#endif
+{
+ unsigned long flags;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ save_flags(flags);
+ cli();
+#else
+ local_save_flags(flags);
+#endif
+
+ /* The device can now be openned by the next caller */
+ nDeviceOpen--;
+
+ MOD_DEC_USE_COUNT;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ restore_flags(flags);
+#else
+ local_irq_restore(flags);
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,2,0)
+ return 0;
+#endif
+}
+
+
+/******************************************************************************/
+/**
+ This function is called when a process closes the SVIP NAT device file
+
+ \arguments
+ inode - pointer to disk file data
+ file - pointer to device file data
+ ioctl_num - ioctl number requested
+ ioctl_param - pointer to data related to the ioctl number
+
+ \return
+ 0 on success, else -1
+
+*/
+/******************************************************************************/
+long SVIP_NAT_device_ioctl (struct file *file,
+ unsigned int ioctl_num, unsigned long ioctl_param)
+{
+ int ret = 0;
+ SVIP_NAT_IO_Rule_t *pNatRule, *pNatRuleIn;
+ SVIP_UDP_PORT_t nPort;
+ int nNatIdx;
+ int bWrite = 0;
+ int bRead = 0;
+ unsigned char *pData = 0;
+ int nSize;
+
+ if (_IOC_DIR(ioctl_num) & _IOC_WRITE)
+ bWrite = 1;
+ if (_IOC_DIR(ioctl_num) & _IOC_READ)
+ bRead = 1;
+ nSize = _IOC_SIZE(ioctl_num);
+
+ if (nSize > sizeof(int))
+ {
+ if (bRead || bWrite)
+ {
+ pData = kmalloc (nSize, GFP_KERNEL);
+ if (bWrite)
+ {
+ if (copy_from_user ((void *)pData, (void *)ioctl_param, nSize) != 0)
+ {
+ printk(KERN_ERR "SVIP NAT: ioctl %x: copy_from_user() failed!\n", ioctl_num);
+ ret = -1;
+ goto error;
+ }
+ }
+ }
+ }
+
+ switch (ioctl_num)
+ {
+ case FIO_SVIP_NAT_RULE_ADD:
+
+ pNatRuleIn = (SVIP_NAT_IO_Rule_t *)pData;
+
+ /* check if destination UDP port is within range */
+ nPort = ntohs(pNatRuleIn->locUDP);
+
+ if (!SVIP_PORT_INRANGE(nPort))
+ {
+ printk(KERN_ERR "SVIP NAT: Error, UDP port(%d) is out of range(%d..%d)\n",
+ nPort, SVIP_UDP_FROM, SVIP_UDP_TO);
+ ret = -1;
+ goto error;
+ }
+ nNatIdx = SVIP_PORT_INDEX(nPort);
+
+ down(sem_nat_tbl_access);
+ pNatRule = &pNatTable[nNatIdx].natRule;
+
+ /* add rule to the NAT table */
+ pNatRule->remIP = pNatRuleIn->remIP;
+ memcpy((char *)pNatRule->remMAC, (char *)pNatRuleIn->remMAC, ETH_ALEN);
+ pNatRule->locIP = pNatRuleIn->locIP;
+ memcpy((char *)pNatRule->locMAC, (char *)pNatRuleIn->locMAC, ETH_ALEN);
+ pNatRule->locUDP = pNatRuleIn->locUDP;
+
+ memset(pNatTable[nNatIdx].natStats, 0,
+ sizeof(SVIP_NAT_stats_t)*SVIP_NAT_STATS_TYPES);
+ up(sem_nat_tbl_access);
+ break;
+
+ case FIO_SVIP_NAT_RULE_REMOVE:
+
+ pNatRuleIn = (SVIP_NAT_IO_Rule_t *)pData;
+
+ /* check if destination UDP port is within range */
+ nPort = ntohs(pNatRuleIn->locUDP);
+ if (!SVIP_PORT_INRANGE(nPort))
+ {
+ printk(KERN_ERR "SVIP NAT: Error, UDP port(%d) is out of range(%d..%d)\n",
+ nPort, SVIP_UDP_FROM, SVIP_UDP_TO);
+ ret = -1;
+ goto error;
+ }
+ nNatIdx = SVIP_PORT_INDEX(nPort);
+ down(sem_nat_tbl_access);
+ /* remove rule from the NAT table */
+ memset(&pNatTable[nNatIdx], 0, sizeof(SVIP_NAT_table_entry_t));
+ up(sem_nat_tbl_access);
+ break;
+
+ case FIO_SVIP_NAT_RULE_LIST:
+ {
+ int len;
+ char buf[256];
+
+ down(sem_nat_tbl_access);
+ while (nProcReadIdx != -1)
+ {
+ len = SVIP_NAT_ProcReadNAT(buf, 256);
+ if (len > 0)
+ printk("%s", buf);
+ }
+ nProcReadIdx = 0;
+ up(sem_nat_tbl_access);
+ break;
+ }
+
+ default:
+ printk(KERN_ERR "SVIP NAT: unsupported ioctl (%x) command for device %s\n",
+ ioctl_num, PATH_SVIP_NAT_DEVICE_NAME);
+ ret = -1;
+ goto error;
+ }
+
+ if (nSize > sizeof(int))
+ {
+ if (bRead)
+ {
+ if (copy_to_user ((void *)ioctl_param, (void *)pData, nSize) != 0)
+ {
+ printk(KERN_ERR "SVIP NAT: ioctl %x: copy_to_user() failed!\n", ioctl_num);
+ ret = -1;
+ goto error;
+ }
+ }
+ }
+
+error:
+ if (pData)
+ kfree(pData);
+
+ return ret;
+}
+
+#if 0
+void dump_msg(unsigned char *pData, unsigned int nLen)
+{
+ int i;
+
+ for (i=0; i<nLen; i++)
+ {
+ if (!i || !(i%16))
+ printk("\n ");
+ else if (i && !(i%4))
+ printk(" ");
+ printk("%02x", pData[i]);
+ }
+ if (--i%16)
+ printk("\n");
+}
+#endif
+
+/******************************************************************************/
+/**
+ Used to recalculate IP/UDP checksum using the original IP/UDP checksum
+ coming with the packet. The original source and destination IP addresses
+ are accounted for, and, the checksum is updated using the new source and
+ destination IP addresses.
+
+ \arguments
+ skb - pointer to the receiving socket buffer
+ csum_old - original checksum
+ saddr_old - pointer to original source IP address
+ saddr_new - pointer to new source IP address
+ daddr_old - pointer to original destination IP address
+ daddr_new - pointer to new destination IP address
+
+ \return
+ recalculated IP/UDP checksum
+ */
+/******************************************************************************/
+static inline u16 ip_udp_quick_csum(u16 csum_old, u16 *saddr_old, u16 *saddr_new,
+ u16 *daddr_old, u16 *daddr_new)
+{
+ u32 sum;
+
+ sum = csum_old;
+
+ /* convert back from one's complement */
+ sum = ~sum & 0xffff;
+
+ if (sum < saddr_old[0]) sum += 0xffff;
+ sum -= saddr_old[0];
+ if (sum < saddr_old[1]) sum += 0xffff;
+ sum -= saddr_old[1];
+ if (sum < daddr_old[0]) sum += 0xffff;
+ sum -= daddr_old[0];
+ if (sum < daddr_old[1]) sum += 0xffff;
+ sum -= daddr_old[1];
+
+ sum += saddr_new[0];
+ sum += saddr_new[1];
+ sum += daddr_new[0];
+ sum += daddr_new[1];
+
+ /* take only 16 bits out of the 32 bit sum and add up the carries */
+ while (sum >> 16)
+ sum = (sum & 0xffff)+((sum >> 16) & 0xffff);
+
+ /* one's complement the result */
+ sum = ~sum;
+
+ return (u16)(sum & 0xffff);
+}
+
+
+/******************************************************************************/
+/**
+ Returns a pointer to an ipv4 address assigned to device dev. The ipv4
+ instance checked is pointed to by ifa_start. The function is suited for
+ itterative calls.
+
+ \arguments
+ dev - pointer to network interface
+ ifa_start - pointer to ipv4 instance to return ipv4 address assigned
+ to, NULL for the first one
+ ppifa_addr - output parameter
+
+ \return
+ pointer to the next ipv4 instance, which can be null if ifa_start was
+ the last instance present
+ */
+/******************************************************************************/
+static struct in_ifaddr *get_ifaddr(struct net_device *dev,
+ struct in_ifaddr *ifa_start, unsigned int **ppifa_addr)
+{
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa = NULL;
+
+ if ((in_dev=in_dev_get(dev)) != NULL)
+ {
+ if (ifa_start == NULL)
+ ifa = in_dev->ifa_list;
+ else
+ ifa = ifa_start;
+ if (ifa)
+ {
+ *ppifa_addr = &ifa->ifa_address;
+ ifa = ifa->ifa_next;
+ }
+ in_dev_put(in_dev);
+ return ifa;
+ }
+ *ppifa_addr = NULL;
+ return NULL;
+}
+
+/******************************************************************************/
+/**
+ This function performs IP NAT for received packets satisfying the
+ following requirements:
+
+ - packet is destined to local IP host
+ - transport protocol type is UDP
+ - destination UDP port is within range
+
+ \arguments
+ skb - pointer to the receiving socket buffer
+
+ \return
+ returns 1 on performed SVIP NAT, else returns 0
+
+ \remarks
+ When function returns 0, it indicates the caller to pass the
+ packet up the IP stack to make further decision about it
+ */
+/******************************************************************************/
+int do_SVIP_NAT (struct sk_buff *skb)
+{
+ struct net_device *real_dev;
+ struct iphdr *iph;
+ struct udphdr *udph;
+ SVIP_NAT_IO_Rule_t *pNatRule;
+ int nNatIdx, in_eth0, nDir;
+#ifndef VLAN_8021Q_UNUSED
+ int vlan;
+ unsigned short vid;
+#endif /* ! VLAN_8021Q_UNUSED */
+ SVIP_UDP_PORT_t nPort;
+ u32 orgSrcIp, orgDstIp, *pSrcIp, *pDstIp;
+ struct ethhdr *ethh;
+
+ /* do not consider if SVIP NAT device not open. */
+ if (!nDeviceOpen)
+ {
+ return 0;
+ }
+
+ /* consider only UDP packets. */
+ iph = SVIP_NAT_IP_HDR(skb);
+ if (iph->protocol != IPPROTO_UDP)
+ {
+ return 0;
+ }
+
+ udph = (struct udphdr *)((u_int32_t *)iph + iph->ihl);
+ /* consider only packets which UDP port numbers reside within
+ the predefined SVIP NAT UDP port range. */
+ if ((!SVIP_PORT_INRANGE(ntohs(udph->dest))) &&
+ (!SVIP_PORT_INRANGE(ntohs(udph->source))))
+ {
+ return 0;
+ }
+
+#ifndef VLAN_8021Q_UNUSED
+ /* check if packet delivered over VLAN. VLAN packets will be routed over
+ the VLAN interfaces of the respective real Ethernet interface, if one
+ exists(VIDs must match). Else, the packet will be send out as IEEE 802.3
+ Ethernet frame */
+ if (skb->dev->priv_flags & IFF_802_1Q_VLAN)
+ {
+ vlan = 1;
+ vid = VLAN_DEV_VLAN_ID(skb->dev);
+ real_dev = VLAN_DEV_REAL_DEV(skb->dev);
+ }
+ else
+ {
+ vlan = 0;
+ vid = 0;
+ real_dev = skb->dev;
+ }
+#endif /* ! VLAN_8021Q_UNUSED */
+
+#ifdef CONFIG_SVIP_FW_PKT_SNIFFER
+ /** Debugging feature which can be enabled by writing,
+ 'echo 1 > /proc/net/svip_nat/snifferOnOff'.
+ It copies all packets received on veth0 and, sends them out over eth0.
+ When a destination MAC address is specified through
+ /proc/net/svip_nat/snifferMAC, this MAC addess will substitute the
+ original MAC address of the packet.
+ It is recommended to specify a MAC address of some host where Wireshark
+ runs and sniffs for this traffic, else you may flood your LAN with
+ undeliverable traffic.
+
+NOTE: In case of VLAN traffic the VLAN header information is lost. */
+ if (nSVIP_NAT_Sniffer)
+ {
+ if (real_dev == net_devs[SVIP_NET_DEV_VETH0_IDX])
+ {
+ struct sk_buff *copied_skb;
+
+ /* gain the Ethernet header from the skb */
+ skb_push(skb, ETH_HLEN);
+
+ copied_skb = skb_copy (skb, GFP_ATOMIC);
+
+ if (nSVIP_NAT_SnifferMacSet == 1)
+ {
+ ethh = (struct ethhdr *)SVIP_NAT_SKB_MAC_HEADER(copied_skb);
+ memcpy((char *)ethh->h_dest, (char *)pSVIP_NAT_SnifferMAC, ETH_ALEN);
+ }
+ copied_skb->dev = net_devs[SVIP_NET_DEV_ETH0_IDX];
+ dev_queue_xmit(copied_skb);
+
+ /* skip the ETH header again */
+ skb_pull(skb, ETH_HLEN);
+ }
+ }
+#endif
+
+
+ /* check if packet arrived on eth0 */
+ if (real_dev == net_devs[SVIP_NET_DEV_ETH0_IDX])
+ {
+ /* check if destination IP address equals the primary assigned IP address
+ of interface eth0. This is the case of packets originating from a
+ remote peer that are to be delivered to a channel residing on THIS
+ voice linecard system. This is typical SVIP NAT case, therefore this
+ rule is placed on top. */
+ if (iph->daddr == *paddr_eth0)
+ {
+ nPort = ntohs(udph->dest);
+ nDir = SVIP_NAT_STATS_REM2LOC;
+ }
+ /* check if destination IP address equals the secondary assigned IP address
+ of interface eth0. This is not a typical SVIP NAT case. It is basically
+ there, as someone might like for debugging purpose to use the LCC to route
+ Slave SVIP packets which are part of voice/fax streaming. */
+ else if (iph->daddr == *paddr_eth0_0)
+ {
+ nPort = ntohs(udph->source);
+ nDir = SVIP_NAT_STATS_LOC2REM;
+ }
+#ifndef VLAN_8021Q_UNUSED
+ /* when the packet did not hit the top two rules, here we check if the packet
+ has addressed any of the IP addresses assigned to the VLAN interface attached
+ to eth0. This is not recommended approach because of the CPU cost incurred. */
+ else if (vlan)
+ {
+ unsigned int *pifa_addr;
+ struct in_ifaddr *ifa_start = NULL;
+ int i = 0;
+
+ do
+ {
+ ifa_start = get_ifaddr(skb->dev, ifa_start, &pifa_addr);
+ if (!pifa_addr)
+ {
+ /* VLAN packet received on vlan interface attached to eth0,
+ however no IP address assigned to the interface.
+ The packet is ignored. */
+ return 0;
+ }
+ if (iph->daddr == *pifa_addr)
+ {
+ /* packet destined to... */
+ break;
+ }
+ if (!ifa_start)
+ {
+ return 0;
+ }
+ i++;
+ } while (ifa_start);
+ if (!i)
+ {
+ /* ...primary assigned IP address to the VLAN interface. */
+ nPort = ntohs(udph->dest);
+ nDir = SVIP_NAT_STATS_REM2LOC;
+ }
+ else
+ {
+ /* ...secondary assigned IP address to the VLAN interface. */
+ nPort = ntohs(udph->source);
+ nDir = SVIP_NAT_STATS_LOC2REM;
+ }
+ }
+#endif /* ! VLAN_8021Q_UNUSED */
+ else
+ {
+ return 0;
+ }
+ in_eth0 = 1;
+ }
+ /* check if packet arrived on veth0 */
+ else if (real_dev == net_devs[SVIP_NET_DEV_VETH0_IDX])
+ {
+ nPort = ntohs(udph->source);
+ nDir = SVIP_NAT_STATS_LOC2REM;
+ in_eth0 = 0;
+ }
+ else
+ {
+ /* packet arrived neither on eth0, nor veth0 */
+ return 0;
+ }
+
+ /* calculate the respective index of the NAT table */
+ nNatIdx = SVIP_PORT_INDEX(nPort);
+ /* process the packet if a respective NAT rule exists */
+ pNatRule = &pNatTable[nNatIdx].natRule;
+
+ ethh = (struct ethhdr *)SVIP_NAT_SKB_MAC_HEADER(skb);
+
+ /* copy packet's original source and destination IP addresses to use
+ later on to perform efficient checksum recalculation */
+ orgSrcIp = iph->saddr;
+ orgDstIp = iph->daddr;
+
+ if (in_eth0)
+ {
+ u8 *pDstMac;
+
+ /* Process packet arrived on eth0 */
+
+ if (nDir == SVIP_NAT_STATS_REM2LOC && iph->saddr == pNatRule->remIP)
+ {
+ pDstIp = &pNatRule->locIP;
+ pDstMac = pNatRule->locMAC;
+ }
+ else if (nDir == SVIP_NAT_STATS_LOC2REM && iph->saddr == pNatRule->locIP)
+ {
+ pDstIp = &pNatRule->remIP;
+ pDstMac = pNatRule->remMAC;
+ }
+ else
+ {
+ /* Rule check failed. The packet is passed up the layers,
+ it will be dropped by UDP */
+ return 0;
+ }
+
+ if ((*pDstIp & *pmask_veth0) == (*paddr_veth0 & *pmask_veth0))
+ {
+#ifndef VLAN_8021Q_UNUSED
+ if (vlan)
+ {
+ struct net_device *vlan_dev;
+
+ spin_lock_bh(&vlan_group_lock);
+ vlan_dev = __vlan_find_dev_deep(net_devs[SVIP_NET_DEV_VETH0_IDX], vid);
+ spin_unlock_bh(&vlan_group_lock);
+ if (vlan_dev)
+ {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ struct vlan_ethhdr *vethh;
+
+ skb_push(skb, VLAN_ETH_HLEN);
+ /* reconstruct the VLAN header.
+NOTE: priority information is lost */
+ vethh = (struct vlan_ethhdr *)skb->data;
+ vethh->h_vlan_proto = htons(ETH_P_8021Q);
+ vethh->h_vlan_TCI = htons(vid);
+ vethh->h_vlan_encapsulated_proto = htons(ETH_P_IP);
+ ethh = (struct ethhdr *)vethh;
+#else
+ skb_push(skb, ETH_HLEN);
+#endif
+ skb->dev = vlan_dev;
+ }
+ else
+ {
+ skb->dev = net_devs[SVIP_NET_DEV_VETH0_IDX];
+ skb_push(skb, ETH_HLEN);
+ }
+ }
+ else
+#endif /* ! VLAN_8021Q_UNUSED */
+ {
+ skb->dev = net_devs[SVIP_NET_DEV_VETH0_IDX];
+ skb_push(skb, ETH_HLEN);
+ }
+ pSrcIp = paddr_veth0;
+ }
+ else
+ {
+#ifndef VLAN_8021Q_UNUSED
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ if (vlan)
+ {
+ struct vlan_ethhdr *vethh;
+
+ /* reconstruct the VLAN header.
+NOTE: priority information is lost */
+ skb_push(skb, VLAN_ETH_HLEN);
+ vethh = (struct vlan_ethhdr *)skb->data;
+ vethh->h_vlan_proto = htons(ETH_P_8021Q);
+ vethh->h_vlan_TCI = htons(vid);
+ vethh->h_vlan_encapsulated_proto = htons(ETH_P_IP);
+ ethh = (struct ethhdr *)vethh;
+ }
+ else
+#endif
+#endif /* ! VLAN_8021Q_UNUSED */
+ {
+ skb_push(skb, ETH_HLEN);
+ }
+ /* source IP address equals the destination IP address
+ of the incoming packet */
+ pSrcIp = &iph->daddr;
+ }
+ iph->saddr = *pSrcIp;
+ memcpy((char *)ethh->h_source, (char *)skb->dev->dev_addr, ETH_ALEN);
+ iph->daddr = *pDstIp;
+ memcpy((char *)ethh->h_dest, (char *)pDstMac, ETH_ALEN);
+ }
+ else
+ {
+ /* Process packet arrived on veth0 */
+
+ if (iph->saddr != pNatRule->locIP)
+ {
+ /* Rule check failed. The packet is passed up the layers,
+ it will be dropped by UDP */
+ return 0;
+ }
+
+ if (!((pNatRule->remIP & *pmask_veth0) == (*paddr_veth0 & *pmask_veth0)))
+ {
+#ifndef VLAN_8021Q_UNUSED
+ if (vlan)
+ {
+ struct net_device *vlan_dev;
+
+ spin_lock_bh(&vlan_group_lock);
+ vlan_dev = __vlan_find_dev_deep(net_devs[SVIP_NET_DEV_ETH0_IDX], vid);
+ spin_unlock_bh(&vlan_group_lock);
+ if (vlan_dev)
+ {
+ unsigned int *pifa_addr;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ struct vlan_ethhdr *vethh;
+
+ skb_push(skb, VLAN_ETH_HLEN);
+ /* construct the VLAN header, note priority information is lost */
+ vethh = (struct vlan_ethhdr *)skb->data;
+ vethh->h_vlan_proto = htons(ETH_P_8021Q);
+ vethh->h_vlan_TCI = htons(vid);
+ vethh->h_vlan_encapsulated_proto = htons(ETH_P_IP);
+ ethh = (struct ethhdr *)vethh;
+#else
+ skb_push(skb, ETH_HLEN);
+#endif
+ skb->dev = vlan_dev;
+
+ get_ifaddr(skb->dev, NULL, &pifa_addr);
+ if (pifa_addr)
+ {
+ pSrcIp = pifa_addr;
+ }
+ else
+ {
+ pSrcIp = paddr_eth0;
+ }
+ }
+ else
+ {
+ skb->dev = net_devs[SVIP_NET_DEV_ETH0_IDX];
+ pSrcIp = paddr_eth0;
+ skb_push(skb, ETH_HLEN);
+ }
+ }
+ else
+#endif /* ! VLAN_8021Q_UNUSED */
+ {
+ skb->dev = net_devs[SVIP_NET_DEV_ETH0_IDX];
+ pSrcIp = paddr_eth0;
+ skb_push(skb, ETH_HLEN);
+ }
+ }
+ else
+ {
+ pSrcIp = paddr_veth0;
+#ifndef VLAN_8021Q_UNUSED
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ if (vlan)
+ {
+ struct vlan_ethhdr *vethh;
+
+ skb_push(skb, VLAN_ETH_HLEN);
+ /* reconstruct the VLAN header.
+NOTE: priority information is lost. */
+ vethh = (struct vlan_ethhdr *)skb->data;
+ vethh->h_vlan_proto = htons(ETH_P_8021Q);
+ vethh->h_vlan_TCI = htons(vid);
+ vethh->h_vlan_encapsulated_proto = htons(ETH_P_IP);
+ ethh = (struct ethhdr *)vethh;
+ }
+ else
+#endif
+#endif /* ! VLAN_8021Q_UNUSED */
+ {
+ skb_push(skb, ETH_HLEN);
+ }
+ }
+ iph->saddr = *pSrcIp;
+ memcpy((char *)ethh->h_source, (char *)skb->dev->dev_addr, ETH_ALEN);
+ iph->daddr = pNatRule->remIP;
+ memcpy((char *)ethh->h_dest, (char *)pNatRule->remMAC, ETH_ALEN);
+ }
+ pNatTable[nNatIdx].natStats[nDir].inPackets++;
+
+ iph->check = ip_udp_quick_csum(iph->check, (u16 *)&orgSrcIp, (u16 *)&iph->saddr,
+ (u16 *)&orgDstIp, (u16 *)&iph->daddr);
+ if (udph->check != 0)
+ {
+ udph->check = ip_udp_quick_csum(udph->check, (u16 *)&orgSrcIp, (u16 *)&iph->saddr,
+ (u16 *)&orgDstIp, (u16 *)&iph->daddr);
+ }
+
+ /* write the packet out, directly to the network device */
+ if (dev_queue_xmit(skb) < 0)
+ pNatTable[nNatIdx].natStats[nDir].outErrors++;
+ else
+ pNatTable[nNatIdx].natStats[nDir].outPackets++;
+
+ return 1;
+}
+
+/******************************************************************************/
+/**
+ Function executed upon unloading of the SVIP NAT module. It unregisters the
+ SVIP NAT configuration device and frees the memory used for the NAT table.
+
+ \remarks:
+ Currently the SVIP NAT module is statically linked into the Linux kernel
+ therefore this routine cannot be executed.
+ *******************************************************************************/
+static int __init init(void)
+{
+ int ret = 0;
+ struct net_device *dev;
+
+ if (misc_register(&SVIP_NAT_miscdev) != 0)
+ {
+ printk(KERN_ERR "%s: cannot register SVIP NAT device node.\n",
+ SVIP_NAT_miscdev.name);
+ return -EIO;
+ }
+
+ /* allocation of memory for NAT table */
+ pNatTable = (SVIP_NAT_table_entry_t *)kmalloc(
+ sizeof(SVIP_NAT_table_entry_t) * SVIP_SYS_CODEC_NUM, GFP_ATOMIC);
+ if (pNatTable == NULL)
+ {
+ printk (KERN_ERR "SVIP NAT: Error(%d), allocating memory for NAT table\n", ret);
+ return -1;
+ }
+
+ /* clear the NAT table */
+ memset((void *)pNatTable, 0, sizeof(SVIP_NAT_table_entry_t) * SVIP_SYS_CODEC_NUM);
+
+ if ((sem_nat_tbl_access = kmalloc(sizeof(struct semaphore), GFP_KERNEL)))
+ {
+ sema_init(sem_nat_tbl_access, 1);
+ }
+
+ SVIP_NAT_ProcInstall();
+
+ /* find pointers to 'struct net_device' of eth0 and veth0, respectevely */
+ read_lock(&dev_base_lock);
+ SVIP_NAT_FOR_EACH_NETDEV(dev)
+ {
+ if (!strcmp(dev->name, SVIP_NET_DEV_ETH0_NAME))
+ {
+ net_devs[SVIP_NET_DEV_ETH0_IDX] = dev;
+ }
+ if (!strcmp(dev->name, SVIP_NET_DEV_VETH1_NAME))
+ {
+ net_devs[SVIP_NET_DEV_VETH0_IDX] = dev;
+ }
+ else if (!strcmp(dev->name, SVIP_NET_DEV_ETH1_NAME))
+ {
+ net_devs[SVIP_NET_DEV_VETH0_IDX] = dev;
+ }
+ }
+ read_unlock(&dev_base_lock);
+
+ if (net_devs[SVIP_NET_DEV_ETH0_IDX] == NULL ||
+ net_devs[SVIP_NET_DEV_VETH0_IDX] == NULL)
+ {
+ printk (KERN_ERR "SVIP NAT: Error, unable to locate eth0 and veth0 interfaces\n");
+ return -1;
+ }
+
+ printk ("%s, (c) 2009, Lantiq Deutschland GmbH\n", &SVIP_NAT_INFO_STR[4]);
+
+ return ret;
+}
+
+/******************************************************************************/
+/**
+ Function executed upon unloading of the SVIP NAT module. It unregisters the
+ SVIP NAT configuration device and frees the memory used for the NAT table.
+
+ \remarks:
+ Currently the SVIP NAT module is statically linked into the Linux kernel
+ therefore this routine cannot be executed.
+ *******************************************************************************/
+static void __exit fini(void)
+{
+ MOD_DEC_USE_COUNT;
+
+ /* unregister SVIP NAT configuration device */
+ misc_deregister(&SVIP_NAT_miscdev);
+
+ /* release memory of SVIP NAT table */
+ if (pNatTable != NULL)
+ {
+ kfree (pNatTable);
+ }
+}
+
+module_init(init);
+module_exit(fini);