aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/ramips/patches-4.14
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/ramips/patches-4.14')
-rw-r--r--target/linux/ramips/patches-4.14/0004-MIPS-ralink-add-MT7621-pcie-driver.patch861
-rw-r--r--target/linux/ramips/patches-4.14/0005-MIPS-use-set_mode-to-enable-disable-the-cevt-r4k-irq.patch82
-rw-r--r--target/linux/ramips/patches-4.14/0006-MIPS-ralink-add-cpu-frequency-scaling.patch200
-rw-r--r--target/linux/ramips/patches-4.14/0007-MIPS-ralink-copy-the-commandline-from-the-devicetree.patch21
-rw-r--r--target/linux/ramips/patches-4.14/0009-PCI-MIPS-enable-PCIe-on-MT7688.patch28
-rw-r--r--target/linux/ramips/patches-4.14/0013-owrt-hack-fix-mt7688-cache-issue.patch28
-rw-r--r--target/linux/ramips/patches-4.14/0015-arch-mips-do-not-select-illegal-access-driver-by-def.patch25
-rw-r--r--target/linux/ramips/patches-4.14/0024-GPIO-add-named-gpio-exports.patch166
-rw-r--r--target/linux/ramips/patches-4.14/0025-pinctrl-ralink-add-pinctrl-driver.patch524
-rw-r--r--target/linux/ramips/patches-4.14/0026-DT-Add-documentation-for-gpio-ralink.patch59
-rw-r--r--target/linux/ramips/patches-4.14/0027-GPIO-MIPS-ralink-add-gpio-driver-for-ralink-SoC.patch430
-rw-r--r--target/linux/ramips/patches-4.14/0028-GPIO-ralink-add-mt7621-gpio-controller.patch405
-rw-r--r--target/linux/ramips/patches-4.14/0031-uvc-add-iPassion-iP2970-support.patch246
-rw-r--r--target/linux/ramips/patches-4.14/0032-USB-dwc2-add-device_reset.patch29
-rw-r--r--target/linux/ramips/patches-4.14/0034-NET-multi-phy-support.patch59
-rw-r--r--target/linux/ramips/patches-4.14/0036-mtd-fix-cfi-cmdset-0002-erase-status-check.patch29
-rw-r--r--target/linux/ramips/patches-4.14/0037-mtd-cfi-cmdset-0002-force-word-write.patch70
-rw-r--r--target/linux/ramips/patches-4.14/0039-mtd-add-mt7621-nand-support.patch4480
-rw-r--r--target/linux/ramips/patches-4.14/0040-nand-hack-restore-write_page.patch42
-rw-r--r--target/linux/ramips/patches-4.14/0040-nand-hack.patch65
-rw-r--r--target/linux/ramips/patches-4.14/0041-DT-Add-documentation-for-spi-rt2880.patch44
-rw-r--r--target/linux/ramips/patches-4.14/0042-SPI-ralink-add-Ralink-SoC-spi-driver.patch574
-rw-r--r--target/linux/ramips/patches-4.14/0043-spi-add-mt7621-support.patch524
-rw-r--r--target/linux/ramips/patches-4.14/0044-i2c-MIPS-adds-ralink-I2C-driver.patch507
-rw-r--r--target/linux/ramips/patches-4.14/0045-i2c-add-mt7621-driver.patch473
-rw-r--r--target/linux/ramips/patches-4.14/0046-mmc-MIPS-ralink-add-sdhci-for-mt7620a-SoC.patch4831
-rw-r--r--target/linux/ramips/patches-4.14/0047-DMA-ralink-add-rt2880-dma-engine.patch1757
-rw-r--r--target/linux/ramips/patches-4.14/0048-asoc-add-mt7620-support.patch1046
-rw-r--r--target/linux/ramips/patches-4.14/0051-serial-add-ugly-custom-baud-rate-hack.patch22
-rw-r--r--target/linux/ramips/patches-4.14/0052-pwm-add-mediatek-support.patch217
-rw-r--r--target/linux/ramips/patches-4.14/0053-mtd-spi-nor-add-w25q256-3b-mode-switch.patch214
-rw-r--r--target/linux/ramips/patches-4.14/0054-mtd-add-chunked-read-io-to-m25p80.patch124
-rw-r--r--target/linux/ramips/patches-4.14/0069-awake-rt305x-dwc2-controller.patch15
-rw-r--r--target/linux/ramips/patches-4.14/0070-weak_reordering.patch10
-rw-r--r--target/linux/ramips/patches-4.14/0090-ethernet.patch41
-rw-r--r--target/linux/ramips/patches-4.14/0098-disable_cm.patch19
-rw-r--r--target/linux/ramips/patches-4.14/0099-pci-mt7620.patch10
-rw-r--r--target/linux/ramips/patches-4.14/0100-prom_fixes.patch66
-rw-r--r--target/linux/ramips/patches-4.14/0200-linkit_bootstrap.patch97
-rw-r--r--target/linux/ramips/patches-4.14/100-mt7621-core-detect-hack.patch61
-rw-r--r--target/linux/ramips/patches-4.14/101-mt7621-timer.patch98
-rw-r--r--target/linux/ramips/patches-4.14/302-spi-nor-add-gd25q512.patch14
-rw-r--r--target/linux/ramips/patches-4.14/303-spi-nor-enable-4B-opcodes-for-mx66l51235l.patch64
-rw-r--r--target/linux/ramips/patches-4.14/998-mt7621-needs-jiffies.patch10
-rw-r--r--target/linux/ramips/patches-4.14/999-fix-pci-init-mt7620.patch21
45 files changed, 18708 insertions, 0 deletions
diff --git a/target/linux/ramips/patches-4.14/0004-MIPS-ralink-add-MT7621-pcie-driver.patch b/target/linux/ramips/patches-4.14/0004-MIPS-ralink-add-MT7621-pcie-driver.patch
new file mode 100644
index 0000000000..0c96d6b984
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0004-MIPS-ralink-add-MT7621-pcie-driver.patch
@@ -0,0 +1,861 @@
+From fec11d4e8dc5cc79bcd7c8fd55038ac21ac39965 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 16 Mar 2014 05:22:39 +0000
+Subject: [PATCH 04/53] MIPS: ralink: add MT7621 pcie driver
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ arch/mips/pci/Makefile | 1 +
+ arch/mips/pci/pci-mt7621.c | 813 ++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 814 insertions(+)
+ create mode 100644 arch/mips/pci/pci-mt7621.c
+
+--- a/arch/mips/pci/Makefile
++++ b/arch/mips/pci/Makefile
+@@ -47,6 +47,7 @@ obj-$(CONFIG_SNI_RM) += fixup-sni.o ops
+ obj-$(CONFIG_LANTIQ) += fixup-lantiq.o
+ obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o
+ obj-$(CONFIG_SOC_MT7620) += pci-mt7620.o
++obj-$(CONFIG_SOC_MT7621) += pci-mt7621.o
+ obj-$(CONFIG_SOC_RT288X) += pci-rt2880.o
+ obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o
+ obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
+--- /dev/null
++++ b/arch/mips/pci/pci-mt7621.c
+@@ -0,0 +1,836 @@
++/**************************************************************************
++ *
++ * BRIEF MODULE DESCRIPTION
++ * PCI init for Ralink RT2880 solution
++ *
++ * Copyright 2007 Ralink Inc. (bruce_chang@ralinktech.com.tw)
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
++ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
++ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
++ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
++ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
++ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ *
++ **************************************************************************
++ * May 2007 Bruce Chang
++ * Initial Release
++ *
++ * May 2009 Bruce Chang
++ * support RT2880/RT3883 PCIe
++ *
++ * May 2011 Bruce Chang
++ * support RT6855/MT7620 PCIe
++ *
++ **************************************************************************
++ */
++
++#include <linux/types.h>
++#include <linux/pci.h>
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/version.h>
++#include <asm/pci.h>
++#include <asm/io.h>
++#include <asm/mips-cm.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/of.h>
++#include <linux/of_pci.h>
++#include <linux/of_irq.h>
++#include <linux/platform_device.h>
++
++#include <ralink_regs.h>
++
++extern void pcie_phy_init(void);
++extern void chk_phy_pll(void);
++
++/*
++ * These functions and structures provide the BIOS scan and mapping of the PCI
++ * devices.
++ */
++
++#define CONFIG_PCIE_PORT0
++#define CONFIG_PCIE_PORT1
++#define CONFIG_PCIE_PORT2
++#define RALINK_PCIE0_CLK_EN (1<<24)
++#define RALINK_PCIE1_CLK_EN (1<<25)
++#define RALINK_PCIE2_CLK_EN (1<<26)
++
++#define RALINK_PCI_CONFIG_ADDR 0x20
++#define RALINK_PCI_CONFIG_DATA_VIRTUAL_REG 0x24
++#define RALINK_INT_PCIE0 pcie_irq[0]
++#define RALINK_INT_PCIE1 pcie_irq[1]
++#define RALINK_INT_PCIE2 pcie_irq[2]
++#define RALINK_PCI_MEMBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x0028)
++#define RALINK_PCI_IOBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x002C)
++#define RALINK_PCIE0_RST (1<<24)
++#define RALINK_PCIE1_RST (1<<25)
++#define RALINK_PCIE2_RST (1<<26)
++#define RALINK_SYSCTL_BASE 0xBE000000
++
++#define RALINK_PCI_PCICFG_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0000)
++#define RALINK_PCI_PCIMSK_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x000C)
++#define RALINK_PCI_BASE 0xBE140000
++
++#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000)
++#define RT6855_PCIE0_OFFSET 0x2000
++#define RT6855_PCIE1_OFFSET 0x3000
++#define RT6855_PCIE2_OFFSET 0x4000
++
++#define RALINK_PCI0_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0010)
++#define RALINK_PCI0_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0018)
++#define RALINK_PCI0_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0030)
++#define RALINK_PCI0_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0034)
++#define RALINK_PCI0_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0038)
++#define RALINK_PCI0_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0050)
++#define RALINK_PCI0_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0060)
++#define RALINK_PCI0_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0064)
++
++#define RALINK_PCI1_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0010)
++#define RALINK_PCI1_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0018)
++#define RALINK_PCI1_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0030)
++#define RALINK_PCI1_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0034)
++#define RALINK_PCI1_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0038)
++#define RALINK_PCI1_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0050)
++#define RALINK_PCI1_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0060)
++#define RALINK_PCI1_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0064)
++
++#define RALINK_PCI2_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0010)
++#define RALINK_PCI2_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0018)
++#define RALINK_PCI2_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0030)
++#define RALINK_PCI2_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0034)
++#define RALINK_PCI2_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0038)
++#define RALINK_PCI2_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0050)
++#define RALINK_PCI2_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0060)
++#define RALINK_PCI2_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0064)
++
++#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000)
++#define RALINK_PCIEPHY_P2_CTL_OFFSET (RALINK_PCI_BASE + 0xA000)
++
++
++#define MV_WRITE(ofs, data) \
++ *(volatile u32 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le32(data)
++#define MV_READ(ofs, data) \
++ *(data) = le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs)))
++#define MV_READ_DATA(ofs) \
++ le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs)))
++
++#define MV_WRITE_16(ofs, data) \
++ *(volatile u16 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le16(data)
++#define MV_READ_16(ofs, data) \
++ *(data) = le16_to_cpu(*(volatile u16 *)(RALINK_PCI_BASE+(ofs)))
++
++#define MV_WRITE_8(ofs, data) \
++ *(volatile u8 *)(RALINK_PCI_BASE+(ofs)) = data
++#define MV_READ_8(ofs, data) \
++ *(data) = *(volatile u8 *)(RALINK_PCI_BASE+(ofs))
++
++
++
++#define RALINK_PCI_MM_MAP_BASE 0x60000000
++#define RALINK_PCI_IO_MAP_BASE 0x1e160000
++
++#define RALINK_SYSTEM_CONTROL_BASE 0xbe000000
++#define GPIO_PERST
++#define ASSERT_SYSRST_PCIE(val) do { \
++ if (*(unsigned int *)(0xbe00000c) == 0x00030101) \
++ RALINK_RSTCTRL |= val; \
++ else \
++ RALINK_RSTCTRL &= ~val; \
++ } while(0)
++#define DEASSERT_SYSRST_PCIE(val) do { \
++ if (*(unsigned int *)(0xbe00000c) == 0x00030101) \
++ RALINK_RSTCTRL &= ~val; \
++ else \
++ RALINK_RSTCTRL |= val; \
++ } while(0)
++#define RALINK_SYSCFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x14)
++#define RALINK_CLKCFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x30)
++#define RALINK_RSTCTRL *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x34)
++#define RALINK_GPIOMODE *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x60)
++#define RALINK_PCIE_CLK_GEN *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x7c)
++#define RALINK_PCIE_CLK_GEN1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x80)
++#define PPLL_CFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x9c)
++#define PPLL_DRV *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0xa0)
++//RALINK_SYSCFG1 bit
++#define RALINK_PCI_HOST_MODE_EN (1<<7)
++#define RALINK_PCIE_RC_MODE_EN (1<<8)
++//RALINK_RSTCTRL bit
++#define RALINK_PCIE_RST (1<<23)
++#define RALINK_PCI_RST (1<<24)
++//RALINK_CLKCFG1 bit
++#define RALINK_PCI_CLK_EN (1<<19)
++#define RALINK_PCIE_CLK_EN (1<<21)
++//RALINK_GPIOMODE bit
++#define PCI_SLOTx2 (1<<11)
++#define PCI_SLOTx1 (2<<11)
++//MTK PCIE PLL bit
++#define PDRV_SW_SET (1<<31)
++#define LC_CKDRVPD_ (1<<19)
++
++#define MEMORY_BASE 0x0
++static int pcie_link_status = 0;
++
++#define PCI_ACCESS_READ_1 0
++#define PCI_ACCESS_READ_2 1
++#define PCI_ACCESS_READ_4 2
++#define PCI_ACCESS_WRITE_1 3
++#define PCI_ACCESS_WRITE_2 4
++#define PCI_ACCESS_WRITE_4 5
++
++static int pcie_irq[3];
++
++static int config_access(unsigned char access_type, struct pci_bus *bus,
++ unsigned int devfn, unsigned int where, u32 * data)
++{
++ unsigned int slot = PCI_SLOT(devfn);
++ u8 func = PCI_FUNC(devfn);
++ uint32_t address_reg, data_reg;
++ unsigned int address;
++
++ address_reg = RALINK_PCI_CONFIG_ADDR;
++ data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG;
++
++ address = (((where&0xF00)>>8)<<24) |(bus->number << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | 0x80000000;
++ MV_WRITE(address_reg, address);
++
++ switch(access_type) {
++ case PCI_ACCESS_WRITE_1:
++ MV_WRITE_8(data_reg+(where&0x3), *data);
++ break;
++ case PCI_ACCESS_WRITE_2:
++ MV_WRITE_16(data_reg+(where&0x3), *data);
++ break;
++ case PCI_ACCESS_WRITE_4:
++ MV_WRITE(data_reg, *data);
++ break;
++ case PCI_ACCESS_READ_1:
++ MV_READ_8( data_reg+(where&0x3), data);
++ break;
++ case PCI_ACCESS_READ_2:
++ MV_READ_16(data_reg+(where&0x3), data);
++ break;
++ case PCI_ACCESS_READ_4:
++ MV_READ(data_reg, data);
++ break;
++ default:
++ printk("no specify access type\n");
++ break;
++ }
++ return 0;
++}
++
++static int
++read_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 * val)
++{
++ return config_access(PCI_ACCESS_READ_1, bus, devfn, (unsigned int)where, (u32 *)val);
++}
++
++static int
++read_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 * val)
++{
++ return config_access(PCI_ACCESS_READ_2, bus, devfn, (unsigned int)where, (u32 *)val);
++}
++
++static int
++read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 * val)
++{
++ return config_access(PCI_ACCESS_READ_4, bus, devfn, (unsigned int)where, (u32 *)val);
++}
++
++static int
++write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val)
++{
++ if (config_access(PCI_ACCESS_WRITE_1, bus, devfn, (unsigned int)where, (u32 *)&val))
++ return -1;
++
++ return PCIBIOS_SUCCESSFUL;
++}
++
++static int
++write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 val)
++{
++ if (config_access(PCI_ACCESS_WRITE_2, bus, devfn, where, (u32 *)&val))
++ return -1;
++
++ return PCIBIOS_SUCCESSFUL;
++}
++
++static int
++write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val)
++{
++ if (config_access(PCI_ACCESS_WRITE_4, bus, devfn, where, &val))
++ return -1;
++
++ return PCIBIOS_SUCCESSFUL;
++}
++
++
++static int
++pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val)
++{
++ switch (size) {
++ case 1:
++ return read_config_byte(bus, devfn, where, (u8 *) val);
++ case 2:
++ return read_config_word(bus, devfn, where, (u16 *) val);
++ default:
++ return read_config_dword(bus, devfn, where, val);
++ }
++}
++
++static int
++pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
++{
++ switch (size) {
++ case 1:
++ return write_config_byte(bus, devfn, where, (u8) val);
++ case 2:
++ return write_config_word(bus, devfn, where, (u16) val);
++ default:
++ return write_config_dword(bus, devfn, where, val);
++ }
++}
++
++struct pci_ops mt7621_pci_ops= {
++ .read = pci_config_read,
++ .write = pci_config_write,
++};
++
++static struct resource mt7621_res_pci_mem1 = {
++ .name = "PCI MEM1",
++ .start = RALINK_PCI_MM_MAP_BASE,
++ .end = (u32)((RALINK_PCI_MM_MAP_BASE + (unsigned char *)0x0fffffff)),
++ .flags = IORESOURCE_MEM,
++};
++static struct resource mt7621_res_pci_io1 = {
++ .name = "PCI I/O1",
++ .start = RALINK_PCI_IO_MAP_BASE,
++ .end = (u32)((RALINK_PCI_IO_MAP_BASE + (unsigned char *)0x0ffff)),
++ .flags = IORESOURCE_IO,
++};
++
++static struct pci_controller mt7621_controller = {
++ .pci_ops = &mt7621_pci_ops,
++ .mem_resource = &mt7621_res_pci_mem1,
++ .io_resource = &mt7621_res_pci_io1,
++ .mem_offset = 0x00000000UL,
++ .io_offset = 0x00000000UL,
++ .io_map_base = 0xa0000000,
++};
++
++static void
++read_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long *val)
++{
++ unsigned int address_reg, data_reg, address;
++
++ address_reg = RALINK_PCI_CONFIG_ADDR;
++ data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG;
++ address = (((reg & 0xF00)>>8)<<24) | (bus << 16) | (dev << 11) | (func << 8) | (reg & 0xfc) | 0x80000000 ;
++ MV_WRITE(address_reg, address);
++ MV_READ(data_reg, val);
++ return;
++}
++
++static void
++write_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long val)
++{
++ unsigned int address_reg, data_reg, address;
++
++ address_reg = RALINK_PCI_CONFIG_ADDR;
++ data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG;
++ address = (((reg & 0xF00)>>8)<<24) | (bus << 16) | (dev << 11) | (func << 8) | (reg & 0xfc) | 0x80000000 ;
++ MV_WRITE(address_reg, address);
++ MV_WRITE(data_reg, val);
++ return;
++}
++
++
++int __init
++pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
++{
++ u16 cmd;
++ u32 val;
++ int irq = 0;
++
++ if ((dev->bus->number == 0) && (slot == 0)) {
++ write_config(0, 0, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE);
++ read_config(0, 0, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val);
++ printk("BAR0 at slot 0 = %x\n", val);
++ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot);
++ } else if((dev->bus->number == 0) && (slot == 0x1)) {
++ write_config(0, 1, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE);
++ read_config(0, 1, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val);
++ printk("BAR0 at slot 1 = %x\n", val);
++ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot);
++ } else if((dev->bus->number == 0) && (slot == 0x2)) {
++ write_config(0, 2, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE);
++ read_config(0, 2, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val);
++ printk("BAR0 at slot 2 = %x\n", val);
++ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot);
++ } else if ((dev->bus->number == 1) && (slot == 0x0)) {
++ switch (pcie_link_status) {
++ case 2:
++ case 6:
++ irq = RALINK_INT_PCIE1;
++ break;
++ case 4:
++ irq = RALINK_INT_PCIE2;
++ break;
++ default:
++ irq = RALINK_INT_PCIE0;
++ }
++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq);
++ } else if ((dev->bus->number == 2) && (slot == 0x0)) {
++ switch (pcie_link_status) {
++ case 5:
++ case 6:
++ irq = RALINK_INT_PCIE2;
++ break;
++ default:
++ irq = RALINK_INT_PCIE1;
++ }
++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq);
++ } else if ((dev->bus->number == 2) && (slot == 0x1)) {
++ switch (pcie_link_status) {
++ case 5:
++ case 6:
++ irq = RALINK_INT_PCIE2;
++ break;
++ default:
++ irq = RALINK_INT_PCIE1;
++ }
++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq);
++ } else if ((dev->bus->number ==3) && (slot == 0x0)) {
++ irq = RALINK_INT_PCIE2;
++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq);
++ } else if ((dev->bus->number ==3) && (slot == 0x1)) {
++ irq = RALINK_INT_PCIE2;
++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq);
++ } else if ((dev->bus->number ==3) && (slot == 0x2)) {
++ irq = RALINK_INT_PCIE2;
++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq);
++ } else {
++ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot);
++ return 0;
++ }
++
++ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x14); //configure cache line size 0x14
++ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xFF); //configure latency timer 0x10
++ pci_read_config_word(dev, PCI_COMMAND, &cmd);
++ cmd = cmd | PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
++ pci_write_config_word(dev, PCI_COMMAND, cmd);
++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
++ return irq;
++}
++
++void
++set_pcie_phy(u32 *addr, int start_b, int bits, int val)
++{
++// printk("0x%p:", addr);
++// printk(" %x", *addr);
++ *(unsigned int *)(addr) &= ~(((1<<bits) - 1)<<start_b);
++ *(unsigned int *)(addr) |= val << start_b;
++// printk(" -> %x\n", *addr);
++}
++
++void
++bypass_pipe_rst(void)
++{
++#if defined (CONFIG_PCIE_PORT0)
++ /* PCIe Port 0 */
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4]
++#endif
++#if defined (CONFIG_PCIE_PORT1)
++ /* PCIe Port 1 */
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 12, 1, 0x01); // rg_pe1_pipe_rst_b
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4]
++#endif
++#if defined (CONFIG_PCIE_PORT2)
++ /* PCIe Port 2 */
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4]
++#endif
++}
++
++void
++set_phy_for_ssc(void)
++{
++ unsigned long reg = (*(volatile u32 *)(RALINK_SYSCTL_BASE + 0x10));
++
++ reg = (reg >> 6) & 0x7;
++#if defined (CONFIG_PCIE_PORT0) || defined (CONFIG_PCIE_PORT1)
++ /* Set PCIe Port0 & Port1 PHY to disable SSC */
++ /* Debug Xtal Type */
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 1 enable control
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x00); // rg_pe1_phy_en //Port 1 disable
++ if(reg <= 5 && reg >= 3) { // 40MHz Xtal
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
++ printk("***** Xtal 40MHz *****\n");
++ } else { // 25MHz | 20MHz Xtal
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
++ if (reg >= 6) {
++ printk("***** Xtal 25MHz *****\n");
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode)
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial
++ } else {
++ printk("***** Xtal 20MHz *****\n");
++ }
++ }
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN
++ if(reg <= 5 && reg >= 3) { // 40MHz Xtal
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv
++ }
++ /* Enable PHY and disable force mode */
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x01); // rg_pe1_phy_en //Port 1 enable
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 1 disable control
++#endif
++#if defined (CONFIG_PCIE_PORT2)
++ /* Set PCIe Port2 PHY to disable SSC */
++ /* Debug Xtal Type */
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable
++ if(reg <= 5 && reg >= 3) { // 40MHz Xtal
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
++ } else { // 25MHz | 20MHz Xtal
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode)
++ if (reg >= 6) { // 25MHz Xtal
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode)
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial
++ }
++ }
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN
++ if(reg <= 5 && reg >= 3) { // 40MHz Xtal
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv
++ }
++ /* Enable PHY and disable force mode */
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable
++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control
++#endif
++}
++
++void setup_cm_memory_region(struct resource *mem_resource)
++{
++ resource_size_t mask;
++ if (mips_cps_numiocu(0)) {
++ /* FIXME: hardware doesn't accept mask values with 1s after
++ 0s (e.g. 0xffef), so it would be great to warn if that's
++ about to happen */
++ mask = ~(mem_resource->end - mem_resource->start);
++
++ write_gcr_reg1_base(mem_resource->start);
++ write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0);
++ printk("PCI coherence region base: 0x%08lx, mask/settings: 0x%08lx\n",
++ read_gcr_reg1_base(),
++ read_gcr_reg1_mask());
++ }
++}
++
++static int mt7621_pci_probe(struct platform_device *pdev)
++{
++ unsigned long val = 0;
++ int i;
++
++ for (i = 0; i < 3; i++)
++ pcie_irq[i] = irq_of_parse_and_map(pdev->dev.of_node, i);
++
++ iomem_resource.start = 0;
++ iomem_resource.end= ~0;
++ ioport_resource.start= 0;
++ ioport_resource.end = ~0;
++
++#if defined (CONFIG_PCIE_PORT0)
++ val = RALINK_PCIE0_RST;
++#endif
++#if defined (CONFIG_PCIE_PORT1)
++ val |= RALINK_PCIE1_RST;
++#endif
++#if defined (CONFIG_PCIE_PORT2)
++ val |= RALINK_PCIE2_RST;
++#endif
++ ASSERT_SYSRST_PCIE(RALINK_PCIE0_RST | RALINK_PCIE1_RST | RALINK_PCIE2_RST);
++ printk("pull PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL);
++#if defined GPIO_PERST /* add GPIO control instead of PERST_N */ /*chhung*/
++ *(unsigned int *)(0xbe000060) &= ~(0x3<<10 | 0x3<<3);
++ *(unsigned int *)(0xbe000060) |= 0x1<<10 | 0x1<<3;
++ mdelay(100);
++ *(unsigned int *)(0xbe000600) |= 0x1<<19 | 0x1<<8 | 0x1<<7; // use GPIO19/GPIO8/GPIO7 (PERST_N/UART_RXD3/UART_TXD3)
++ mdelay(100);
++ *(unsigned int *)(0xbe000620) &= ~(0x1<<19 | 0x1<<8 | 0x1<<7); // clear DATA
++
++ mdelay(100);
++#else
++ *(unsigned int *)(0xbe000060) &= ~0x00000c00;
++#endif
++#if defined (CONFIG_PCIE_PORT0)
++ val = RALINK_PCIE0_RST;
++#endif
++#if defined (CONFIG_PCIE_PORT1)
++ val |= RALINK_PCIE1_RST;
++#endif
++#if defined (CONFIG_PCIE_PORT2)
++ val |= RALINK_PCIE2_RST;
++#endif
++ DEASSERT_SYSRST_PCIE(val);
++ printk("release PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL);
++
++ if ((*(unsigned int *)(0xbe00000c)&0xFFFF) == 0x0101) // MT7621 E2
++ bypass_pipe_rst();
++ set_phy_for_ssc();
++ printk("release PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL);
++
++#if defined (CONFIG_PCIE_PORT0)
++ read_config(0, 0, 0, 0x70c, &val);
++ printk("Port 0 N_FTS = %x\n", (unsigned int)val);
++#endif
++#if defined (CONFIG_PCIE_PORT1)
++ read_config(0, 1, 0, 0x70c, &val);
++ printk("Port 1 N_FTS = %x\n", (unsigned int)val);
++#endif
++#if defined (CONFIG_PCIE_PORT2)
++ read_config(0, 2, 0, 0x70c, &val);
++ printk("Port 2 N_FTS = %x\n", (unsigned int)val);
++#endif
++
++ RALINK_RSTCTRL = (RALINK_RSTCTRL | RALINK_PCIE_RST);
++ RALINK_SYSCFG1 &= ~(0x30);
++ RALINK_SYSCFG1 |= (2<<4);
++ RALINK_PCIE_CLK_GEN &= 0x7fffffff;
++ RALINK_PCIE_CLK_GEN1 &= 0x80ffffff;
++ RALINK_PCIE_CLK_GEN1 |= 0xa << 24;
++ RALINK_PCIE_CLK_GEN |= 0x80000000;
++ mdelay(50);
++ RALINK_RSTCTRL = (RALINK_RSTCTRL & ~RALINK_PCIE_RST);
++
++
++#if defined GPIO_PERST /* add GPIO control instead of PERST_N */ /*chhung*/
++ *(unsigned int *)(0xbe000620) |= 0x1<<19 | 0x1<<8 | 0x1<<7; // set DATA
++ mdelay(100);
++#else
++ RALINK_PCI_PCICFG_ADDR &= ~(1<<1); //de-assert PERST
++#endif
++ mdelay(500);
++
++
++ mdelay(500);
++#if defined (CONFIG_PCIE_PORT0)
++ if(( RALINK_PCI0_STATUS & 0x1) == 0)
++ {
++ printk("PCIE0 no card, disable it(RST&CLK)\n");
++ ASSERT_SYSRST_PCIE(RALINK_PCIE0_RST);
++ RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE0_CLK_EN);
++ pcie_link_status &= ~(1<<0);
++ } else {
++ pcie_link_status |= 1<<0;
++ RALINK_PCI_PCIMSK_ADDR |= (1<<20); // enable pcie1 interrupt
++ }
++#endif
++#if defined (CONFIG_PCIE_PORT1)
++ if(( RALINK_PCI1_STATUS & 0x1) == 0)
++ {
++ printk("PCIE1 no card, disable it(RST&CLK)\n");
++ ASSERT_SYSRST_PCIE(RALINK_PCIE1_RST);
++ RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE1_CLK_EN);
++ pcie_link_status &= ~(1<<1);
++ } else {
++ pcie_link_status |= 1<<1;
++ RALINK_PCI_PCIMSK_ADDR |= (1<<21); // enable pcie1 interrupt
++ }
++#endif
++#if defined (CONFIG_PCIE_PORT2)
++ if (( RALINK_PCI2_STATUS & 0x1) == 0) {
++ printk("PCIE2 no card, disable it(RST&CLK)\n");
++ ASSERT_SYSRST_PCIE(RALINK_PCIE2_RST);
++ RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE2_CLK_EN);
++ pcie_link_status &= ~(1<<2);
++ } else {
++ pcie_link_status |= 1<<2;
++ RALINK_PCI_PCIMSK_ADDR |= (1<<22); // enable pcie2 interrupt
++ }
++#endif
++ if (pcie_link_status == 0)
++ return 0;
++
++/*
++pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num
++3'b000 x x x
++3'b001 x x 0
++3'b010 x 0 x
++3'b011 x 1 0
++3'b100 0 x x
++3'b101 1 x 0
++3'b110 1 0 x
++3'b111 2 1 0
++*/
++ switch(pcie_link_status) {
++ case 2:
++ RALINK_PCI_PCICFG_ADDR &= ~0x00ff0000;
++ RALINK_PCI_PCICFG_ADDR |= 0x1 << 16; //port0
++ RALINK_PCI_PCICFG_ADDR |= 0x0 << 20; //port1
++ break;
++ case 4:
++ RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000;
++ RALINK_PCI_PCICFG_ADDR |= 0x1 << 16; //port0
++ RALINK_PCI_PCICFG_ADDR |= 0x2 << 20; //port1
++ RALINK_PCI_PCICFG_ADDR |= 0x0 << 24; //port2
++ break;
++ case 5:
++ RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000;
++ RALINK_PCI_PCICFG_ADDR |= 0x0 << 16; //port0
++ RALINK_PCI_PCICFG_ADDR |= 0x2 << 20; //port1
++ RALINK_PCI_PCICFG_ADDR |= 0x1 << 24; //port2
++ break;
++ case 6:
++ RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000;
++ RALINK_PCI_PCICFG_ADDR |= 0x2 << 16; //port0
++ RALINK_PCI_PCICFG_ADDR |= 0x0 << 20; //port1
++ RALINK_PCI_PCICFG_ADDR |= 0x1 << 24; //port2
++ break;
++ }
++ printk(" -> %x\n", RALINK_PCI_PCICFG_ADDR);
++ //printk(" RALINK_PCI_ARBCTL = %x\n", RALINK_PCI_ARBCTL);
++
++/*
++ ioport_resource.start = mt7621_res_pci_io1.start;
++ ioport_resource.end = mt7621_res_pci_io1.end;
++*/
++
++ RALINK_PCI_MEMBASE = 0xffffffff; //RALINK_PCI_MM_MAP_BASE;
++ RALINK_PCI_IOBASE = RALINK_PCI_IO_MAP_BASE;
++
++#if defined (CONFIG_PCIE_PORT0)
++ //PCIe0
++ if((pcie_link_status & 0x1) != 0) {
++ RALINK_PCI0_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE
++ RALINK_PCI0_IMBASEBAR0_ADDR = MEMORY_BASE;
++ RALINK_PCI0_CLASS = 0x06040001;
++ printk("PCIE0 enabled\n");
++ }
++#endif
++#if defined (CONFIG_PCIE_PORT1)
++ //PCIe1
++ if ((pcie_link_status & 0x2) != 0) {
++ RALINK_PCI1_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE
++ RALINK_PCI1_IMBASEBAR0_ADDR = MEMORY_BASE;
++ RALINK_PCI1_CLASS = 0x06040001;
++ printk("PCIE1 enabled\n");
++ }
++#endif
++#if defined (CONFIG_PCIE_PORT2)
++ //PCIe2
++ if ((pcie_link_status & 0x4) != 0) {
++ RALINK_PCI2_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE
++ RALINK_PCI2_IMBASEBAR0_ADDR = MEMORY_BASE;
++ RALINK_PCI2_CLASS = 0x06040001;
++ printk("PCIE2 enabled\n");
++ }
++#endif
++
++
++ switch(pcie_link_status) {
++ case 7:
++ read_config(0, 2, 0, 0x4, &val);
++ write_config(0, 2, 0, 0x4, val|0x4);
++ // write_config(0, 1, 0, 0x4, val|0x7);
++ read_config(0, 2, 0, 0x70c, &val);
++ val &= ~(0xff)<<8;
++ val |= 0x50<<8;
++ write_config(0, 2, 0, 0x70c, val);
++ case 3:
++ case 5:
++ case 6:
++ read_config(0, 1, 0, 0x4, &val);
++ write_config(0, 1, 0, 0x4, val|0x4);
++ // write_config(0, 1, 0, 0x4, val|0x7);
++ read_config(0, 1, 0, 0x70c, &val);
++ val &= ~(0xff)<<8;
++ val |= 0x50<<8;
++ write_config(0, 1, 0, 0x70c, val);
++ default:
++ read_config(0, 0, 0, 0x4, &val);
++ write_config(0, 0, 0, 0x4, val|0x4); //bus master enable
++ // write_config(0, 0, 0, 0x4, val|0x7); //bus master enable
++ read_config(0, 0, 0, 0x70c, &val);
++ val &= ~(0xff)<<8;
++ val |= 0x50<<8;
++ write_config(0, 0, 0, 0x70c, val);
++ }
++
++ pci_load_of_ranges(&mt7621_controller, pdev->dev.of_node);
++ setup_cm_memory_region(mt7621_controller.mem_resource);
++ register_pci_controller(&mt7621_controller);
++ return 0;
++
++}
++
++int pcibios_plat_dev_init(struct pci_dev *dev)
++{
++ return 0;
++}
++
++static const struct of_device_id mt7621_pci_ids[] = {
++ { .compatible = "mediatek,mt7621-pci" },
++ {},
++};
++MODULE_DEVICE_TABLE(of, mt7621_pci_ids);
++
++static struct platform_driver mt7621_pci_driver = {
++ .probe = mt7621_pci_probe,
++ .driver = {
++ .name = "mt7621-pci",
++ .owner = THIS_MODULE,
++ .of_match_table = of_match_ptr(mt7621_pci_ids),
++ },
++};
++
++static int __init mt7621_pci_init(void)
++{
++ return platform_driver_register(&mt7621_pci_driver);
++}
++
++arch_initcall(mt7621_pci_init);
diff --git a/target/linux/ramips/patches-4.14/0005-MIPS-use-set_mode-to-enable-disable-the-cevt-r4k-irq.patch b/target/linux/ramips/patches-4.14/0005-MIPS-use-set_mode-to-enable-disable-the-cevt-r4k-irq.patch
new file mode 100644
index 0000000000..773c74f4e9
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0005-MIPS-use-set_mode-to-enable-disable-the-cevt-r4k-irq.patch
@@ -0,0 +1,82 @@
+From ce3d4a4111a5f7e6b4e74bceae5faa6ce388e8ec Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 14 Jul 2013 23:08:11 +0200
+Subject: [PATCH 05/53] MIPS: use set_mode() to enable/disable the cevt-r4k
+ irq
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ arch/mips/ralink/Kconfig | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/mips/ralink/Kconfig
++++ b/arch/mips/ralink/Kconfig
+@@ -1,12 +1,17 @@
+ # SPDX-License-Identifier: GPL-2.0
+ if RALINK
+
++config CEVT_SYSTICK_QUIRK
++ bool
++ default n
++
+ config CLKEVT_RT3352
+ bool
+ depends on SOC_RT305X || SOC_MT7620
+ default y
+ select TIMER_OF
+ select CLKSRC_MMIO
++ select CEVT_SYSTICK_QUIRK
+
+ config RALINK_ILL_ACC
+ bool
+--- a/arch/mips/kernel/cevt-r4k.c
++++ b/arch/mips/kernel/cevt-r4k.c
+@@ -15,6 +15,26 @@
+ #include <asm/time.h>
+ #include <asm/cevt-r4k.h>
+
++static int mips_state_oneshot(struct clock_event_device *evt)
++{
++ if (!cp0_timer_irq_installed) {
++ cp0_timer_irq_installed = 1;
++ setup_irq(evt->irq, &c0_compare_irqaction);
++ }
++
++ return 0;
++}
++
++static int mips_state_shutdown(struct clock_event_device *evt)
++{
++ if (cp0_timer_irq_installed) {
++ cp0_timer_irq_installed = 0;
++ remove_irq(evt->irq, &c0_compare_irqaction);
++ }
++
++ return 0;
++}
++
+ static int mips_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+ {
+@@ -281,17 +301,21 @@ int r4k_clockevent_init(void)
+ cd->rating = 300;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
++ cd->set_state_shutdown = mips_state_shutdown;
++ cd->set_state_oneshot = mips_state_oneshot;
+ cd->set_next_event = mips_next_event;
+ cd->event_handler = mips_event_handler;
+
+ clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
+
++#ifndef CONFIG_CEVT_SYSTICK_QUIRK
+ if (cp0_timer_irq_installed)
+ return 0;
+
+ cp0_timer_irq_installed = 1;
+
+ setup_irq(irq, &c0_compare_irqaction);
++#endif
+
+ return 0;
+ }
diff --git a/target/linux/ramips/patches-4.14/0006-MIPS-ralink-add-cpu-frequency-scaling.patch b/target/linux/ramips/patches-4.14/0006-MIPS-ralink-add-cpu-frequency-scaling.patch
new file mode 100644
index 0000000000..90215fbf86
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0006-MIPS-ralink-add-cpu-frequency-scaling.patch
@@ -0,0 +1,200 @@
+From bd30f19a006fb52bac80c6463c49dd2f4159f4ac Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 28 Jul 2013 16:26:41 +0200
+Subject: [PATCH 06/53] MIPS: ralink: add cpu frequency scaling
+
+This feature will break udelay() and cause the delay loop to have longer delays
+when the frequency is scaled causing a performance hit.
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ arch/mips/ralink/cevt-rt3352.c | 38 ++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 38 insertions(+)
+
+--- a/arch/mips/ralink/cevt-rt3352.c
++++ b/arch/mips/ralink/cevt-rt3352.c
+@@ -29,6 +29,10 @@
+ /* enable the counter */
+ #define CFG_CNT_EN 0x1
+
++/* mt7620 frequency scaling defines */
++#define CLK_LUT_CFG 0x40
++#define SLEEP_EN BIT(31)
++
+ struct systick_device {
+ void __iomem *membase;
+ struct clock_event_device dev;
+@@ -36,21 +40,53 @@ struct systick_device {
+ int freq_scale;
+ };
+
++static void (*systick_freq_scaling)(struct systick_device *sdev, int status);
++
+ static int systick_set_oneshot(struct clock_event_device *evt);
+ static int systick_shutdown(struct clock_event_device *evt);
+
++static inline void mt7620_freq_scaling(struct systick_device *sdev, int status)
++{
++ if (sdev->freq_scale == status)
++ return;
++
++ sdev->freq_scale = status;
++
++ pr_info("%s: %s autosleep mode\n", sdev->dev.name,
++ (status) ? ("enable") : ("disable"));
++ if (status)
++ rt_sysc_w32(rt_sysc_r32(CLK_LUT_CFG) | SLEEP_EN, CLK_LUT_CFG);
++ else
++ rt_sysc_w32(rt_sysc_r32(CLK_LUT_CFG) & ~SLEEP_EN, CLK_LUT_CFG);
++}
++
++static inline unsigned int read_count(struct systick_device *sdev)
++{
++ return ioread32(sdev->membase + SYSTICK_COUNT);
++}
++
++static inline unsigned int read_compare(struct systick_device *sdev)
++{
++ return ioread32(sdev->membase + SYSTICK_COMPARE);
++}
++
++static inline void write_compare(struct systick_device *sdev, unsigned int val)
++{
++ iowrite32(val, sdev->membase + SYSTICK_COMPARE);
++}
++
+ static int systick_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+ {
+ struct systick_device *sdev;
+- u32 count;
++ int res;
+
+ sdev = container_of(evt, struct systick_device, dev);
+- count = ioread32(sdev->membase + SYSTICK_COUNT);
+- count = (count + delta) % SYSTICK_FREQ;
+- iowrite32(count, sdev->membase + SYSTICK_COMPARE);
++ delta += read_count(sdev);
++ write_compare(sdev, delta);
++ res = ((int)(read_count(sdev) - delta) >= 0) ? -ETIME : 0;
+
+- return 0;
++ return res;
+ }
+
+ static void systick_event_handler(struct clock_event_device *dev)
+@@ -60,20 +96,25 @@ static void systick_event_handler(struct
+
+ static irqreturn_t systick_interrupt(int irq, void *dev_id)
+ {
+- struct clock_event_device *dev = (struct clock_event_device *) dev_id;
++ int ret = 0;
++ struct clock_event_device *cdev;
++ struct systick_device *sdev;
+
+- dev->event_handler(dev);
++ if (read_c0_cause() & STATUSF_IP7) {
++ cdev = (struct clock_event_device *) dev_id;
++ sdev = container_of(cdev, struct systick_device, dev);
++
++ /* Clear Count/Compare Interrupt */
++ write_compare(sdev, read_compare(sdev));
++ cdev->event_handler(cdev);
++ ret = 1;
++ }
+
+- return IRQ_HANDLED;
++ return IRQ_RETVAL(ret);
+ }
+
+ static struct systick_device systick = {
+ .dev = {
+- /*
+- * cevt-r4k uses 300, make sure systick
+- * gets used if available
+- */
+- .rating = 310,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = systick_next_event,
+ .set_state_shutdown = systick_shutdown,
+@@ -95,9 +136,15 @@ static int systick_shutdown(struct clock
+ sdev = container_of(evt, struct systick_device, dev);
+
+ if (sdev->irq_requested)
+- free_irq(systick.dev.irq, &systick_irqaction);
++ remove_irq(systick.dev.irq, &systick_irqaction);
+ sdev->irq_requested = 0;
+- iowrite32(0, systick.membase + SYSTICK_CONFIG);
++ iowrite32(CFG_CNT_EN, systick.membase + SYSTICK_CONFIG);
++
++ if (systick_freq_scaling)
++ systick_freq_scaling(sdev, 0);
++
++ if (systick_freq_scaling)
++ systick_freq_scaling(sdev, 1);
+
+ return 0;
+ }
+@@ -117,34 +164,48 @@ static int systick_set_oneshot(struct cl
+ return 0;
+ }
+
++static const struct of_device_id systick_match[] = {
++ { .compatible = "ralink,mt7620a-systick", .data = mt7620_freq_scaling},
++ {},
++};
++
+ static int __init ralink_systick_init(struct device_node *np)
+ {
++ const struct of_device_id *match;
++ int rating = 200;
+ int ret;
+
+ systick.membase = of_iomap(np, 0);
+ if (!systick.membase)
+ return -ENXIO;
+
+- systick_irqaction.name = np->name;
+- systick.dev.name = np->name;
+- clockevents_calc_mult_shift(&systick.dev, SYSTICK_FREQ, 60);
+- systick.dev.max_delta_ns = clockevent_delta2ns(0x7fff, &systick.dev);
+- systick.dev.max_delta_ticks = 0x7fff;
+- systick.dev.min_delta_ns = clockevent_delta2ns(0x3, &systick.dev);
+- systick.dev.min_delta_ticks = 0x3;
++ match = of_match_node(systick_match, np);
++ if (match) {
++ systick_freq_scaling = match->data;
++ /*
++ * cevt-r4k uses 300, make sure systick
++ * gets used if available
++ */
++ rating = 310;
++ }
++
++ /* enable counter than register clock source */
++ iowrite32(CFG_CNT_EN, systick.membase + SYSTICK_CONFIG);
++ clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
++ SYSTICK_FREQ, rating, 16, clocksource_mmio_readl_up);
++
++ /* register clock event */
+ systick.dev.irq = irq_of_parse_and_map(np, 0);
+ if (!systick.dev.irq) {
+ pr_err("%s: request_irq failed", np->name);
+ return -EINVAL;
+ }
+
+- ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
+- SYSTICK_FREQ, 301, 16,
+- clocksource_mmio_readl_up);
+- if (ret)
+- return ret;
+-
+- clockevents_register_device(&systick.dev);
++ systick_irqaction.name = np->name;
++ systick.dev.name = np->name;
++ systick.dev.rating = rating;
++ systick.dev.cpumask = cpumask_of(0);
++ clockevents_config_and_register(&systick.dev, SYSTICK_FREQ, 0x3, 0x7fff);
+
+ pr_info("%s: running - mult: %d, shift: %d\n",
+ np->name, systick.dev.mult, systick.dev.shift);
diff --git a/target/linux/ramips/patches-4.14/0007-MIPS-ralink-copy-the-commandline-from-the-devicetree.patch b/target/linux/ramips/patches-4.14/0007-MIPS-ralink-copy-the-commandline-from-the-devicetree.patch
new file mode 100644
index 0000000000..c05ee8018d
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0007-MIPS-ralink-copy-the-commandline-from-the-devicetree.patch
@@ -0,0 +1,21 @@
+From 67b7bff0fd364c194e653f69baa623ba2141bd4c Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Mon, 4 Aug 2014 18:46:02 +0200
+Subject: [PATCH 07/53] MIPS: ralink: copy the commandline from the devicetree
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ arch/mips/ralink/of.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/mips/ralink/of.c
++++ b/arch/mips/ralink/of.c
+@@ -82,6 +82,8 @@ void __init plat_mem_setup(void)
+
+ __dt_setup_arch(dtb);
+
++ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
++
+ of_scan_flat_dt(early_init_dt_find_memory, NULL);
+ if (memory_dtb)
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
diff --git a/target/linux/ramips/patches-4.14/0009-PCI-MIPS-enable-PCIe-on-MT7688.patch b/target/linux/ramips/patches-4.14/0009-PCI-MIPS-enable-PCIe-on-MT7688.patch
new file mode 100644
index 0000000000..6ca15fe32e
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0009-PCI-MIPS-enable-PCIe-on-MT7688.patch
@@ -0,0 +1,28 @@
+From 7768798964eb0e4f95eaecffb93b5d0ca28a38af Mon Sep 17 00:00:00 2001
+From: Daniel Golle <daniel@makrotopia.org>
+Date: Sat, 3 Jun 2017 20:00:03 +0200
+Subject: [PATCH] MIPS: pci-mt7620: enabled PCIe on MT7688
+To: linux-mips@linux-mips.org,
+ John Crispin <john@phrozen.org>
+Cc: Wei Yongjun <yongjun_wei@trendmicro.com.cn>,
+ Ralf Baechle <ralf@linux-mips.org>,
+ linux-mediatek@lists.infradead.org
+
+Use PCIe support for MT7628AN also on MT7688.
+Tested on WRTNODE2R.
+
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+---
+ arch/mips/pci/pci-mt7620.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/mips/pci/pci-mt7620.c
++++ b/arch/mips/pci/pci-mt7620.c
+@@ -316,6 +316,7 @@ static int mt7620_pci_probe(struct platf
+ break;
+
+ case MT762X_SOC_MT7628AN:
++ case MT762X_SOC_MT7688:
+ if (mt7628_pci_hw_init(pdev))
+ return -1;
+ break;
diff --git a/target/linux/ramips/patches-4.14/0013-owrt-hack-fix-mt7688-cache-issue.patch b/target/linux/ramips/patches-4.14/0013-owrt-hack-fix-mt7688-cache-issue.patch
new file mode 100644
index 0000000000..528c629649
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0013-owrt-hack-fix-mt7688-cache-issue.patch
@@ -0,0 +1,28 @@
+From 5ede027f6c4a57ed25da872420508b7f1168b36b Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Mon, 7 Dec 2015 17:15:32 +0100
+Subject: [PATCH 13/53] owrt: hack: fix mt7688 cache issue
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ arch/mips/kernel/setup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -902,7 +902,6 @@ static void __init arch_mem_init(char **
+ crashk_res.end - crashk_res.start + 1,
+ BOOTMEM_DEFAULT);
+ #endif
+- device_tree_init();
+ sparse_init();
+ plat_swiotlb_setup();
+
+@@ -1018,6 +1017,7 @@ void __init setup_arch(char **cmdline_p)
+
+ cpu_cache_init();
+ paging_init();
++ device_tree_init();
+ }
+
+ unsigned long kernelsp[NR_CPUS];
diff --git a/target/linux/ramips/patches-4.14/0015-arch-mips-do-not-select-illegal-access-driver-by-def.patch b/target/linux/ramips/patches-4.14/0015-arch-mips-do-not-select-illegal-access-driver-by-def.patch
new file mode 100644
index 0000000000..1dc54ccf23
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0015-arch-mips-do-not-select-illegal-access-driver-by-def.patch
@@ -0,0 +1,25 @@
+From 9e6ce539092a1dd605a20bf73c655a9de58d8641 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Mon, 7 Dec 2015 17:18:05 +0100
+Subject: [PATCH 15/53] arch: mips: do not select illegal access driver by
+ default
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ arch/mips/ralink/Kconfig | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/mips/ralink/Kconfig
++++ b/arch/mips/ralink/Kconfig
+@@ -14,9 +14,9 @@ config CLKEVT_RT3352
+ select CEVT_SYSTICK_QUIRK
+
+ config RALINK_ILL_ACC
+- bool
++ bool "illegal access irq"
+ depends on SOC_RT305X
+- default y
++ default n
+
+ config IRQ_INTC
+ bool
diff --git a/target/linux/ramips/patches-4.14/0024-GPIO-add-named-gpio-exports.patch b/target/linux/ramips/patches-4.14/0024-GPIO-add-named-gpio-exports.patch
new file mode 100644
index 0000000000..0c1bc73926
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0024-GPIO-add-named-gpio-exports.patch
@@ -0,0 +1,166 @@
+From 4267880319bc1a2270d352e0ded6d6386242a7ef Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Tue, 12 Aug 2014 20:49:27 +0200
+Subject: [PATCH 24/53] GPIO: add named gpio exports
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/gpio/gpiolib-of.c | 68 +++++++++++++++++++++++++++++++++++++++++
+ drivers/gpio/gpiolib-sysfs.c | 10 +++++-
+ include/asm-generic/gpio.h | 6 ++++
+ include/linux/gpio/consumer.h | 8 +++++
+ 4 files changed, 91 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpiolib-of.c
++++ b/drivers/gpio/gpiolib-of.c
+@@ -23,6 +23,8 @@
+ #include <linux/pinctrl/pinctrl.h>
+ #include <linux/slab.h>
+ #include <linux/gpio/machine.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
+
+ #include "gpiolib.h"
+
+@@ -506,3 +508,69 @@ void of_gpiochip_remove(struct gpio_chip
+ gpiochip_remove_pin_ranges(chip);
+ of_node_put(chip->of_node);
+ }
++
++static struct of_device_id gpio_export_ids[] = {
++ { .compatible = "gpio-export" },
++ { /* sentinel */ }
++};
++
++static int __init of_gpio_export_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct device_node *cnp;
++ u32 val;
++ int nb = 0;
++
++ for_each_child_of_node(np, cnp) {
++ const char *name = NULL;
++ int gpio;
++ bool dmc;
++ int max_gpio = 1;
++ int i;
++
++ of_property_read_string(cnp, "gpio-export,name", &name);
++
++ if (!name)
++ max_gpio = of_gpio_count(cnp);
++
++ for (i = 0; i < max_gpio; i++) {
++ unsigned flags = 0;
++ enum of_gpio_flags of_flags;
++
++ gpio = of_get_gpio_flags(cnp, i, &of_flags);
++
++ if (of_flags == OF_GPIO_ACTIVE_LOW)
++ flags |= GPIOF_ACTIVE_LOW;
++
++ if (!of_property_read_u32(cnp, "gpio-export,output", &val))
++ flags |= val ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
++ else
++ flags |= GPIOF_IN;
++
++ if (devm_gpio_request_one(&pdev->dev, gpio, flags, name ? name : of_node_full_name(np)))
++ continue;
++
++ dmc = of_property_read_bool(cnp, "gpio-export,direction_may_change");
++ gpio_export_with_name(gpio, dmc, name);
++ nb++;
++ }
++ }
++
++ dev_info(&pdev->dev, "%d gpio(s) exported\n", nb);
++
++ return 0;
++}
++
++static struct platform_driver gpio_export_driver = {
++ .driver = {
++ .name = "gpio-export",
++ .owner = THIS_MODULE,
++ .of_match_table = of_match_ptr(gpio_export_ids),
++ },
++};
++
++static int __init of_gpio_export_init(void)
++{
++ return platform_driver_probe(&gpio_export_driver, of_gpio_export_probe);
++}
++device_initcall(of_gpio_export_init);
+--- a/drivers/gpio/gpiolib-sysfs.c
++++ b/drivers/gpio/gpiolib-sysfs.c
+@@ -553,7 +553,7 @@ static struct class gpio_class = {
+ *
+ * Returns zero on success, else an error.
+ */
+-int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
++int __gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name)
+ {
+ struct gpio_chip *chip;
+ struct gpio_device *gdev;
+@@ -615,6 +615,8 @@ int gpiod_export(struct gpio_desc *desc,
+ offset = gpio_chip_hwgpio(desc);
+ if (chip->names && chip->names[offset])
+ ioname = chip->names[offset];
++ if (name)
++ ioname = name;
+
+ dev = device_create_with_groups(&gpio_class, &gdev->dev,
+ MKDEV(0, 0), data, gpio_groups,
+@@ -636,6 +638,12 @@ err_unlock:
+ gpiod_dbg(desc, "%s: status %d\n", __func__, status);
+ return status;
+ }
++EXPORT_SYMBOL_GPL(__gpiod_export);
++
++int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
++{
++ return __gpiod_export(desc, direction_may_change, NULL);
++}
+ EXPORT_SYMBOL_GPL(gpiod_export);
+
+ static int match_export(struct device *dev, const void *desc)
+--- a/include/asm-generic/gpio.h
++++ b/include/asm-generic/gpio.h
+@@ -127,6 +127,12 @@ static inline int gpio_export(unsigned g
+ return gpiod_export(gpio_to_desc(gpio), direction_may_change);
+ }
+
++int __gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name);
++static inline int gpio_export_with_name(unsigned gpio, bool direction_may_change, const char *name)
++{
++ return __gpiod_export(gpio_to_desc(gpio), direction_may_change, name);
++}
++
+ static inline int gpio_export_link(struct device *dev, const char *name,
+ unsigned gpio)
+ {
+--- a/include/linux/gpio/consumer.h
++++ b/include/linux/gpio/consumer.h
+@@ -451,6 +451,7 @@ struct gpio_desc *devm_fwnode_get_gpiod_
+
+ #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
+
++int _gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name);
+ int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
+ int gpiod_export_link(struct device *dev, const char *name,
+ struct gpio_desc *desc);
+@@ -458,6 +459,13 @@ void gpiod_unexport(struct gpio_desc *de
+
+ #else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
+
++static inline int _gpiod_export(struct gpio_desc *desc,
++ bool direction_may_change,
++ const char *name)
++{
++ return -ENOSYS;
++}
++
+ static inline int gpiod_export(struct gpio_desc *desc,
+ bool direction_may_change)
+ {
diff --git a/target/linux/ramips/patches-4.14/0025-pinctrl-ralink-add-pinctrl-driver.patch b/target/linux/ramips/patches-4.14/0025-pinctrl-ralink-add-pinctrl-driver.patch
new file mode 100644
index 0000000000..3e5dac5376
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0025-pinctrl-ralink-add-pinctrl-driver.patch
@@ -0,0 +1,524 @@
+From 7adbe9a88c33c6e362a10b109d963b5500a21f00 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 27 Jul 2014 09:34:05 +0100
+Subject: [PATCH 25/53] pinctrl: ralink: add pinctrl driver
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ arch/mips/Kconfig | 2 +
+ drivers/pinctrl/Kconfig | 5 +
+ drivers/pinctrl/Makefile | 1 +
+ drivers/pinctrl/pinctrl-rt2880.c | 474 ++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 482 insertions(+)
+ create mode 100644 drivers/pinctrl/pinctrl-rt2880.c
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -628,6 +628,8 @@ config RALINK
+ select CLKDEV_LOOKUP
+ select ARCH_HAS_RESET_CONTROLLER
+ select RESET_CONTROLLER
++ select PINCTRL
++ select PINCTRL_RT2880
+
+ config SGI_IP22
+ bool "SGI IP22 (Indy/Indigo2)"
+--- a/drivers/pinctrl/Kconfig
++++ b/drivers/pinctrl/Kconfig
+@@ -143,6 +143,11 @@ config PINCTRL_LPC18XX
+ help
+ Pinctrl driver for NXP LPC18xx/43xx System Control Unit (SCU).
+
++config PINCTRL_RT2880
++ bool
++ depends on RALINK
++ select PINMUX
++
+ config PINCTRL_FALCON
+ bool
+ depends on SOC_FALCON
+--- a/drivers/pinctrl/Makefile
++++ b/drivers/pinctrl/Makefile
+@@ -28,6 +28,7 @@ obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-
+ obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o
+ obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
+ obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
++obj-$(CONFIG_PINCTRL_RT2880) += pinctrl-rt2880.o
+ obj-$(CONFIG_PINCTRL_RZA1) += pinctrl-rza1.o
+ obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
+ obj-$(CONFIG_PINCTRL_SIRF) += sirf/
+--- /dev/null
++++ b/drivers/pinctrl/pinctrl-rt2880.c
+@@ -0,0 +1,472 @@
++/*
++ * linux/drivers/pinctrl/pinctrl-rt2880.c
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * publishhed by the Free Software Foundation.
++ *
++ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
++ */
++
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/io.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/of.h>
++#include <linux/pinctrl/pinctrl.h>
++#include <linux/pinctrl/pinconf.h>
++#include <linux/pinctrl/pinmux.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/pinctrl/machine.h>
++
++#include <asm/mach-ralink/ralink_regs.h>
++#include <asm/mach-ralink/pinmux.h>
++#include <asm/mach-ralink/mt7620.h>
++
++#include "core.h"
++
++#define SYSC_REG_GPIO_MODE 0x60
++#define SYSC_REG_GPIO_MODE2 0x64
++
++struct rt2880_priv {
++ struct device *dev;
++
++ struct pinctrl_pin_desc *pads;
++ struct pinctrl_desc *desc;
++
++ struct rt2880_pmx_func **func;
++ int func_count;
++
++ struct rt2880_pmx_group *groups;
++ const char **group_names;
++ int group_count;
++
++ uint8_t *gpio;
++ int max_pins;
++};
++
++static int rt2880_get_group_count(struct pinctrl_dev *pctrldev)
++{
++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
++
++ return p->group_count;
++}
++
++static const char *rt2880_get_group_name(struct pinctrl_dev *pctrldev,
++ unsigned group)
++{
++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
++
++ if (group >= p->group_count)
++ return NULL;
++
++ return p->group_names[group];
++}
++
++static int rt2880_get_group_pins(struct pinctrl_dev *pctrldev,
++ unsigned group,
++ const unsigned **pins,
++ unsigned *num_pins)
++{
++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
++
++ if (group >= p->group_count)
++ return -EINVAL;
++
++ *pins = p->groups[group].func[0].pins;
++ *num_pins = p->groups[group].func[0].pin_count;
++
++ return 0;
++}
++
++static void rt2880_pinctrl_dt_free_map(struct pinctrl_dev *pctrldev,
++ struct pinctrl_map *map, unsigned num_maps)
++{
++ int i;
++
++ for (i = 0; i < num_maps; i++)
++ if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN ||
++ map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
++ kfree(map[i].data.configs.configs);
++ kfree(map);
++}
++
++static void rt2880_pinctrl_pin_dbg_show(struct pinctrl_dev *pctrldev,
++ struct seq_file *s,
++ unsigned offset)
++{
++ seq_printf(s, "ralink pio");
++}
++
++static void rt2880_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctrldev,
++ struct device_node *np,
++ struct pinctrl_map **map)
++{
++ const char *function;
++ int func = of_property_read_string(np, "ralink,function", &function);
++ int grps = of_property_count_strings(np, "ralink,group");
++ int i;
++
++ if (func || !grps)
++ return;
++
++ for (i = 0; i < grps; i++) {
++ const char *group;
++
++ of_property_read_string_index(np, "ralink,group", i, &group);
++
++ (*map)->type = PIN_MAP_TYPE_MUX_GROUP;
++ (*map)->name = function;
++ (*map)->data.mux.group = group;
++ (*map)->data.mux.function = function;
++ (*map)++;
++ }
++}
++
++static int rt2880_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrldev,
++ struct device_node *np_config,
++ struct pinctrl_map **map,
++ unsigned *num_maps)
++{
++ int max_maps = 0;
++ struct pinctrl_map *tmp;
++ struct device_node *np;
++
++ for_each_child_of_node(np_config, np) {
++ int ret = of_property_count_strings(np, "ralink,group");
++
++ if (ret >= 0)
++ max_maps += ret;
++ }
++
++ if (!max_maps)
++ return -EINVAL;
++
++ *map = kzalloc(max_maps * sizeof(struct pinctrl_map), GFP_KERNEL);
++ if (!*map)
++ return -ENOMEM;
++
++ tmp = *map;
++
++ for_each_child_of_node(np_config, np)
++ rt2880_pinctrl_dt_subnode_to_map(pctrldev, np, &tmp);
++ *num_maps = max_maps;
++
++ return 0;
++}
++
++static const struct pinctrl_ops rt2880_pctrl_ops = {
++ .get_groups_count = rt2880_get_group_count,
++ .get_group_name = rt2880_get_group_name,
++ .get_group_pins = rt2880_get_group_pins,
++ .pin_dbg_show = rt2880_pinctrl_pin_dbg_show,
++ .dt_node_to_map = rt2880_pinctrl_dt_node_to_map,
++ .dt_free_map = rt2880_pinctrl_dt_free_map,
++};
++
++static int rt2880_pmx_func_count(struct pinctrl_dev *pctrldev)
++{
++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
++
++ return p->func_count;
++}
++
++static const char *rt2880_pmx_func_name(struct pinctrl_dev *pctrldev,
++ unsigned func)
++{
++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
++
++ return p->func[func]->name;
++}
++
++static int rt2880_pmx_group_get_groups(struct pinctrl_dev *pctrldev,
++ unsigned func,
++ const char * const **groups,
++ unsigned * const num_groups)
++{
++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
++
++ if (p->func[func]->group_count == 1)
++ *groups = &p->group_names[p->func[func]->groups[0]];
++ else
++ *groups = p->group_names;
++
++ *num_groups = p->func[func]->group_count;
++
++ return 0;
++}
++
++static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
++ unsigned func,
++ unsigned group)
++{
++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
++ u32 mode = 0;
++ u32 reg = SYSC_REG_GPIO_MODE;
++ int i;
++ int shift;
++
++ /* dont allow double use */
++ if (p->groups[group].enabled) {
++ dev_err(p->dev, "%s is already enabled\n", p->groups[group].name);
++ return -EBUSY;
++ }
++
++ p->groups[group].enabled = 1;
++ p->func[func]->enabled = 1;
++
++ shift = p->groups[group].shift;
++ if (shift >= 32) {
++ shift -= 32;
++ reg = SYSC_REG_GPIO_MODE2;
++ }
++ mode = rt_sysc_r32(reg);
++ mode &= ~(p->groups[group].mask << shift);
++
++ /* mark the pins as gpio */
++ for (i = 0; i < p->groups[group].func[0].pin_count; i++)
++ p->gpio[p->groups[group].func[0].pins[i]] = 1;
++
++ /* function 0 is gpio and needs special handling */
++ if (func == 0) {
++ mode |= p->groups[group].gpio << shift;
++ } else {
++ for (i = 0; i < p->func[func]->pin_count; i++)
++ p->gpio[p->func[func]->pins[i]] = 0;
++ mode |= p->func[func]->value << shift;
++ }
++ rt_sysc_w32(mode, reg);
++
++ return 0;
++}
++
++static int rt2880_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev,
++ struct pinctrl_gpio_range *range,
++ unsigned pin)
++{
++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev);
++
++ if (!p->gpio[pin]) {
++ dev_err(p->dev, "pin %d is not set to gpio mux\n", pin);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static const struct pinmux_ops rt2880_pmx_group_ops = {
++ .get_functions_count = rt2880_pmx_func_count,
++ .get_function_name = rt2880_pmx_func_name,
++ .get_function_groups = rt2880_pmx_group_get_groups,
++ .set_mux = rt2880_pmx_group_enable,
++ .gpio_request_enable = rt2880_pmx_group_gpio_request_enable,
++};
++
++static struct pinctrl_desc rt2880_pctrl_desc = {
++ .owner = THIS_MODULE,
++ .name = "rt2880-pinmux",
++ .pctlops = &rt2880_pctrl_ops,
++ .pmxops = &rt2880_pmx_group_ops,
++};
++
++static struct rt2880_pmx_func gpio_func = {
++ .name = "gpio",
++};
++
++static int rt2880_pinmux_index(struct rt2880_priv *p)
++{
++ struct rt2880_pmx_func **f;
++ struct rt2880_pmx_group *mux = p->groups;
++ int i, j, c = 0;
++
++ /* count the mux functions */
++ while (mux->name) {
++ p->group_count++;
++ mux++;
++ }
++
++ /* allocate the group names array needed by the gpio function */
++ p->group_names = devm_kzalloc(p->dev, sizeof(char *) * p->group_count, GFP_KERNEL);
++ if (!p->group_names)
++ return -1;
++
++ for (i = 0; i < p->group_count; i++) {
++ p->group_names[i] = p->groups[i].name;
++ p->func_count += p->groups[i].func_count;
++ }
++
++ /* we have a dummy function[0] for gpio */
++ p->func_count++;
++
++ /* allocate our function and group mapping index buffers */
++ f = p->func = devm_kzalloc(p->dev, sizeof(struct rt2880_pmx_func) * p->func_count, GFP_KERNEL);
++ gpio_func.groups = devm_kzalloc(p->dev, sizeof(int) * p->group_count, GFP_KERNEL);
++ if (!f || !gpio_func.groups)
++ return -1;
++
++ /* add a backpointer to the function so it knows its group */
++ gpio_func.group_count = p->group_count;
++ for (i = 0; i < gpio_func.group_count; i++)
++ gpio_func.groups[i] = i;
++
++ f[c] = &gpio_func;
++ c++;
++
++ /* add remaining functions */
++ for (i = 0; i < p->group_count; i++) {
++ for (j = 0; j < p->groups[i].func_count; j++) {
++ f[c] = &p->groups[i].func[j];
++ f[c]->groups = devm_kzalloc(p->dev, sizeof(int), GFP_KERNEL);
++ f[c]->groups[0] = i;
++ f[c]->group_count = 1;
++ c++;
++ }
++ }
++ return 0;
++}
++
++static int rt2880_pinmux_pins(struct rt2880_priv *p)
++{
++ int i, j;
++
++ /* loop over the functions and initialize the pins array. also work out the highest pin used */
++ for (i = 0; i < p->func_count; i++) {
++ int pin;
++
++ if (!p->func[i]->pin_count)
++ continue;
++
++ p->func[i]->pins = devm_kzalloc(p->dev, sizeof(int) * p->func[i]->pin_count, GFP_KERNEL);
++ for (j = 0; j < p->func[i]->pin_count; j++)
++ p->func[i]->pins[j] = p->func[i]->pin_first + j;
++
++ pin = p->func[i]->pin_first + p->func[i]->pin_count;
++ if (pin > p->max_pins)
++ p->max_pins = pin;
++ }
++
++ /* the buffer that tells us which pins are gpio */
++ p->gpio = devm_kzalloc(p->dev,sizeof(uint8_t) * p->max_pins,
++ GFP_KERNEL);
++ /* the pads needed to tell pinctrl about our pins */
++ p->pads = devm_kzalloc(p->dev,
++ sizeof(struct pinctrl_pin_desc) * p->max_pins,
++ GFP_KERNEL);
++ if (!p->pads || !p->gpio ) {
++ dev_err(p->dev, "Failed to allocate gpio data\n");
++ return -ENOMEM;
++ }
++
++ memset(p->gpio, 1, sizeof(uint8_t) * p->max_pins);
++ for (i = 0; i < p->func_count; i++) {
++ if (!p->func[i]->pin_count)
++ continue;
++
++ for (j = 0; j < p->func[i]->pin_count; j++)
++ p->gpio[p->func[i]->pins[j]] = 0;
++ }
++
++ /* pin 0 is always a gpio */
++ p->gpio[0] = 1;
++
++ /* set the pads */
++ for (i = 0; i < p->max_pins; i++) {
++ /* strlen("ioXY") + 1 = 5 */
++ char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL);
++
++ if (!name) {
++ dev_err(p->dev, "Failed to allocate pad name\n");
++ return -ENOMEM;
++ }
++ snprintf(name, 5, "io%d", i);
++ p->pads[i].number = i;
++ p->pads[i].name = name;
++ }
++ p->desc->pins = p->pads;
++ p->desc->npins = p->max_pins;
++
++ return 0;
++}
++
++static int rt2880_pinmux_probe(struct platform_device *pdev)
++{
++ struct rt2880_priv *p;
++ struct pinctrl_dev *dev;
++ struct device_node *np;
++
++ if (!rt2880_pinmux_data)
++ return -ENOSYS;
++
++ /* setup the private data */
++ p = devm_kzalloc(&pdev->dev, sizeof(struct rt2880_priv), GFP_KERNEL);
++ if (!p)
++ return -ENOMEM;
++
++ p->dev = &pdev->dev;
++ p->desc = &rt2880_pctrl_desc;
++ p->groups = rt2880_pinmux_data;
++ platform_set_drvdata(pdev, p);
++
++ /* init the device */
++ if (rt2880_pinmux_index(p)) {
++ dev_err(&pdev->dev, "failed to load index\n");
++ return -EINVAL;
++ }
++ if (rt2880_pinmux_pins(p)) {
++ dev_err(&pdev->dev, "failed to load pins\n");
++ return -EINVAL;
++ }
++ dev = pinctrl_register(p->desc, &pdev->dev, p);
++ if (IS_ERR(dev))
++ return PTR_ERR(dev);
++
++ /* finalize by adding gpio ranges for enables gpio controllers */
++ for_each_compatible_node(np, NULL, "ralink,rt2880-gpio") {
++ const __be32 *ngpio, *gpiobase;
++ struct pinctrl_gpio_range *range;
++ char *name;
++
++ if (!of_device_is_available(np))
++ continue;
++
++ ngpio = of_get_property(np, "ralink,num-gpios", NULL);
++ gpiobase = of_get_property(np, "ralink,gpio-base", NULL);
++ if (!ngpio || !gpiobase) {
++ dev_err(&pdev->dev, "failed to load chip info\n");
++ return -EINVAL;
++ }
++
++ range = devm_kzalloc(p->dev, sizeof(struct pinctrl_gpio_range) + 4, GFP_KERNEL);
++ range->name = name = (char *) &range[1];
++ sprintf(name, "pio");
++ range->npins = __be32_to_cpu(*ngpio);
++ range->base = __be32_to_cpu(*gpiobase);
++ range->pin_base = range->base;
++ pinctrl_add_gpio_range(dev, range);
++ }
++
++ return 0;
++}
++
++static const struct of_device_id rt2880_pinmux_match[] = {
++ { .compatible = "ralink,rt2880-pinmux" },
++ {},
++};
++MODULE_DEVICE_TABLE(of, rt2880_pinmux_match);
++
++static struct platform_driver rt2880_pinmux_driver = {
++ .probe = rt2880_pinmux_probe,
++ .driver = {
++ .name = "rt2880-pinmux",
++ .owner = THIS_MODULE,
++ .of_match_table = rt2880_pinmux_match,
++ },
++};
++
++int __init rt2880_pinmux_init(void)
++{
++ return platform_driver_register(&rt2880_pinmux_driver);
++}
++
++core_initcall_sync(rt2880_pinmux_init);
diff --git a/target/linux/ramips/patches-4.14/0026-DT-Add-documentation-for-gpio-ralink.patch b/target/linux/ramips/patches-4.14/0026-DT-Add-documentation-for-gpio-ralink.patch
new file mode 100644
index 0000000000..7d5f98f647
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0026-DT-Add-documentation-for-gpio-ralink.patch
@@ -0,0 +1,59 @@
+From d410e5478c622c01fcf31427533df5f433df9146 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 28 Jul 2013 19:45:30 +0200
+Subject: [PATCH 26/53] DT: Add documentation for gpio-ralink
+
+Describe gpio-ralink binding.
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+Cc: linux-mips@linux-mips.org
+Cc: devicetree@vger.kernel.org
+Cc: linux-gpio@vger.kernel.org
+---
+ .../devicetree/bindings/gpio/gpio-ralink.txt | 40 ++++++++++++++++++++
+ 1 file changed, 40 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/gpio/gpio-ralink.txt
+
+--- /dev/null
++++ b/Documentation/devicetree/bindings/gpio/gpio-ralink.txt
+@@ -0,0 +1,40 @@
++Ralink SoC GPIO controller bindings
++
++Required properties:
++- compatible:
++ - "ralink,rt2880-gpio" for Ralink controllers
++- #gpio-cells : Should be two.
++ - first cell is the pin number
++ - second cell is used to specify optional parameters (unused)
++- gpio-controller : Marks the device node as a GPIO controller
++- reg : Physical base address and length of the controller's registers
++- interrupt-parent: phandle to the INTC device node
++- interrupts : Specify the INTC interrupt number
++- ralink,num-gpios : Specify the number of GPIOs
++- ralink,register-map : The register layout depends on the GPIO bank and actual
++ SoC type. Register offsets need to be in this order.
++ [ INT, EDGE, RENA, FENA, DATA, DIR, POL, SET, RESET, TOGGLE ]
++
++Optional properties:
++- ralink,gpio-base : Specify the GPIO chips base number
++
++Example:
++
++ gpio0: gpio@600 {
++ compatible = "ralink,rt5350-gpio", "ralink,rt2880-gpio";
++
++ #gpio-cells = <2>;
++ gpio-controller;
++
++ reg = <0x600 0x34>;
++
++ interrupt-parent = <&intc>;
++ interrupts = <6>;
++
++ ralink,gpio-base = <0>;
++ ralink,num-gpios = <24>;
++ ralink,register-map = [ 00 04 08 0c
++ 20 24 28 2c
++ 30 34 ];
++
++ };
diff --git a/target/linux/ramips/patches-4.14/0027-GPIO-MIPS-ralink-add-gpio-driver-for-ralink-SoC.patch b/target/linux/ramips/patches-4.14/0027-GPIO-MIPS-ralink-add-gpio-driver-for-ralink-SoC.patch
new file mode 100644
index 0000000000..35cedf61fd
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0027-GPIO-MIPS-ralink-add-gpio-driver-for-ralink-SoC.patch
@@ -0,0 +1,430 @@
+From 69fdd2c4f937796b934e89c33acde9d082e27bfd Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Mon, 4 Aug 2014 20:36:29 +0200
+Subject: [PATCH 27/53] GPIO: MIPS: ralink: add gpio driver for ralink SoC
+
+Add gpio driver for Ralink SoC. This driver makes the gpio core on
+RT2880, RT305x, rt3352, rt3662, rt3883, rt5350 and mt7620 work.
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+Cc: linux-mips@linux-mips.org
+Cc: linux-gpio@vger.kernel.org
+---
+ arch/mips/include/asm/mach-ralink/gpio.h | 24 ++
+ drivers/gpio/Kconfig | 6 +
+ drivers/gpio/Makefile | 1 +
+ drivers/gpio/gpio-ralink.c | 355 ++++++++++++++++++++++++++++++
+ 4 files changed, 386 insertions(+)
+ create mode 100644 arch/mips/include/asm/mach-ralink/gpio.h
+ create mode 100644 drivers/gpio/gpio-ralink.c
+
+--- /dev/null
++++ b/arch/mips/include/asm/mach-ralink/gpio.h
+@@ -0,0 +1,24 @@
++/*
++ * Ralink SoC GPIO API support
++ *
++ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
++ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ *
++ */
++
++#ifndef __ASM_MACH_RALINK_GPIO_H
++#define __ASM_MACH_RALINK_GPIO_H
++
++#define ARCH_NR_GPIOS 128
++#include <asm-generic/gpio.h>
++
++#define gpio_get_value __gpio_get_value
++#define gpio_set_value __gpio_set_value
++#define gpio_cansleep __gpio_cansleep
++#define gpio_to_irq __gpio_to_irq
++
++#endif /* __ASM_MACH_RALINK_GPIO_H */
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -398,6 +398,12 @@ config GPIO_REG
+ A 32-bit single register GPIO fixed in/out implementation. This
+ can be used to represent any register as a set of GPIO signals.
+
++config GPIO_RALINK
++ bool "Ralink GPIO Support"
++ depends on RALINK
++ help
++ Say yes here to support the Ralink SoC GPIO device
++
+ config GPIO_SPEAR_SPICS
+ bool "ST SPEAr13xx SPI Chip Select as GPIO support"
+ depends on PLAT_SPEAR
+--- a/drivers/gpio/Makefile
++++ b/drivers/gpio/Makefile
+@@ -98,6 +98,7 @@ obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-p
+ obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
+ obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
+ obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
++obj-$(CONFIG_GPIO_RALINK) += gpio-ralink.o
+ obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
+ obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
+ obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
+--- /dev/null
++++ b/drivers/gpio/gpio-ralink.c
+@@ -0,0 +1,355 @@
++/*
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ *
++ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
++ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
++ */
++
++#include <linux/module.h>
++#include <linux/io.h>
++#include <linux/gpio.h>
++#include <linux/spinlock.h>
++#include <linux/platform_device.h>
++#include <linux/of_irq.h>
++#include <linux/irqdomain.h>
++#include <linux/interrupt.h>
++
++enum ralink_gpio_reg {
++ GPIO_REG_INT = 0,
++ GPIO_REG_EDGE,
++ GPIO_REG_RENA,
++ GPIO_REG_FENA,
++ GPIO_REG_DATA,
++ GPIO_REG_DIR,
++ GPIO_REG_POL,
++ GPIO_REG_SET,
++ GPIO_REG_RESET,
++ GPIO_REG_TOGGLE,
++ GPIO_REG_MAX
++};
++
++struct ralink_gpio_chip {
++ struct gpio_chip chip;
++ u8 regs[GPIO_REG_MAX];
++
++ spinlock_t lock;
++ void __iomem *membase;
++ struct irq_domain *domain;
++ int irq;
++
++ u32 rising;
++ u32 falling;
++};
++
++#define MAP_MAX 4
++static struct irq_domain *irq_map[MAP_MAX];
++static int irq_map_count;
++static atomic_t irq_refcount = ATOMIC_INIT(0);
++
++static inline struct ralink_gpio_chip *to_ralink_gpio(struct gpio_chip *chip)
++{
++ struct ralink_gpio_chip *rg;
++
++ rg = container_of(chip, struct ralink_gpio_chip, chip);
++
++ return rg;
++}
++
++static inline void rt_gpio_w32(struct ralink_gpio_chip *rg, u8 reg, u32 val)
++{
++ iowrite32(val, rg->membase + rg->regs[reg]);
++}
++
++static inline u32 rt_gpio_r32(struct ralink_gpio_chip *rg, u8 reg)
++{
++ return ioread32(rg->membase + rg->regs[reg]);
++}
++
++static void ralink_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
++{
++ struct ralink_gpio_chip *rg = to_ralink_gpio(chip);
++
++ rt_gpio_w32(rg, (value) ? GPIO_REG_SET : GPIO_REG_RESET, BIT(offset));
++}
++
++static int ralink_gpio_get(struct gpio_chip *chip, unsigned offset)
++{
++ struct ralink_gpio_chip *rg = to_ralink_gpio(chip);
++
++ return !!(rt_gpio_r32(rg, GPIO_REG_DATA) & BIT(offset));
++}
++
++static int ralink_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
++{
++ struct ralink_gpio_chip *rg = to_ralink_gpio(chip);
++ unsigned long flags;
++ u32 t;
++
++ spin_lock_irqsave(&rg->lock, flags);
++ t = rt_gpio_r32(rg, GPIO_REG_DIR);
++ t &= ~BIT(offset);
++ rt_gpio_w32(rg, GPIO_REG_DIR, t);
++ spin_unlock_irqrestore(&rg->lock, flags);
++
++ return 0;
++}
++
++static int ralink_gpio_direction_output(struct gpio_chip *chip,
++ unsigned offset, int value)
++{
++ struct ralink_gpio_chip *rg = to_ralink_gpio(chip);
++ unsigned long flags;
++ u32 t;
++
++ spin_lock_irqsave(&rg->lock, flags);
++ ralink_gpio_set(chip, offset, value);
++ t = rt_gpio_r32(rg, GPIO_REG_DIR);
++ t |= BIT(offset);
++ rt_gpio_w32(rg, GPIO_REG_DIR, t);
++ spin_unlock_irqrestore(&rg->lock, flags);
++
++ return 0;
++}
++
++static int ralink_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
++{
++ struct ralink_gpio_chip *rg = to_ralink_gpio(chip);
++
++ if (rg->irq < 1)
++ return -1;
++
++ return irq_create_mapping(rg->domain, pin);
++}
++
++static void ralink_gpio_irq_handler(struct irq_desc *desc)
++{
++ int i;
++
++ for (i = 0; i < irq_map_count; i++) {
++ struct irq_domain *domain = irq_map[i];
++ struct ralink_gpio_chip *rg;
++ unsigned long pending;
++ int bit;
++
++ rg = (struct ralink_gpio_chip *) domain->host_data;
++ pending = rt_gpio_r32(rg, GPIO_REG_INT);
++
++ for_each_set_bit(bit, &pending, rg->chip.ngpio) {
++ u32 map = irq_find_mapping(domain, bit);
++ generic_handle_irq(map);
++ rt_gpio_w32(rg, GPIO_REG_INT, BIT(bit));
++ }
++ }
++}
++
++static void ralink_gpio_irq_unmask(struct irq_data *d)
++{
++ struct ralink_gpio_chip *rg;
++ unsigned long flags;
++ u32 rise, fall;
++
++ rg = (struct ralink_gpio_chip *) d->domain->host_data;
++ rise = rt_gpio_r32(rg, GPIO_REG_RENA);
++ fall = rt_gpio_r32(rg, GPIO_REG_FENA);
++
++ spin_lock_irqsave(&rg->lock, flags);
++ rt_gpio_w32(rg, GPIO_REG_RENA, rise | (BIT(d->hwirq) & rg->rising));
++ rt_gpio_w32(rg, GPIO_REG_FENA, fall | (BIT(d->hwirq) & rg->falling));
++ spin_unlock_irqrestore(&rg->lock, flags);
++}
++
++static void ralink_gpio_irq_mask(struct irq_data *d)
++{
++ struct ralink_gpio_chip *rg;
++ unsigned long flags;
++ u32 rise, fall;
++
++ rg = (struct ralink_gpio_chip *) d->domain->host_data;
++ rise = rt_gpio_r32(rg, GPIO_REG_RENA);
++ fall = rt_gpio_r32(rg, GPIO_REG_FENA);
++
++ spin_lock_irqsave(&rg->lock, flags);
++ rt_gpio_w32(rg, GPIO_REG_FENA, fall & ~BIT(d->hwirq));
++ rt_gpio_w32(rg, GPIO_REG_RENA, rise & ~BIT(d->hwirq));
++ spin_unlock_irqrestore(&rg->lock, flags);
++}
++
++static int ralink_gpio_irq_type(struct irq_data *d, unsigned int type)
++{
++ struct ralink_gpio_chip *rg;
++ u32 mask = BIT(d->hwirq);
++
++ rg = (struct ralink_gpio_chip *) d->domain->host_data;
++
++ if (type == IRQ_TYPE_PROBE) {
++ if ((rg->rising | rg->falling) & mask)
++ return 0;
++
++ type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
++ }
++
++ if (type & IRQ_TYPE_EDGE_RISING)
++ rg->rising |= mask;
++ else
++ rg->rising &= ~mask;
++
++ if (type & IRQ_TYPE_EDGE_FALLING)
++ rg->falling |= mask;
++ else
++ rg->falling &= ~mask;
++
++ return 0;
++}
++
++static struct irq_chip ralink_gpio_irq_chip = {
++ .name = "GPIO",
++ .irq_unmask = ralink_gpio_irq_unmask,
++ .irq_mask = ralink_gpio_irq_mask,
++ .irq_mask_ack = ralink_gpio_irq_mask,
++ .irq_set_type = ralink_gpio_irq_type,
++};
++
++static int gpio_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
++{
++ irq_set_chip_and_handler(irq, &ralink_gpio_irq_chip, handle_level_irq);
++ irq_set_handler_data(irq, d);
++
++ return 0;
++}
++
++static const struct irq_domain_ops irq_domain_ops = {
++ .xlate = irq_domain_xlate_onecell,
++ .map = gpio_map,
++};
++
++static void ralink_gpio_irq_init(struct device_node *np,
++ struct ralink_gpio_chip *rg)
++{
++ if (irq_map_count >= MAP_MAX)
++ return;
++
++ rg->irq = irq_of_parse_and_map(np, 0);
++ if (!rg->irq)
++ return;
++
++ rg->domain = irq_domain_add_linear(np, rg->chip.ngpio,
++ &irq_domain_ops, rg);
++ if (!rg->domain) {
++ dev_err(rg->chip.parent, "irq_domain_add_linear failed\n");
++ return;
++ }
++
++ irq_map[irq_map_count++] = rg->domain;
++
++ rt_gpio_w32(rg, GPIO_REG_RENA, 0x0);
++ rt_gpio_w32(rg, GPIO_REG_FENA, 0x0);
++
++ if (!atomic_read(&irq_refcount))
++ irq_set_chained_handler(rg->irq, ralink_gpio_irq_handler);
++ atomic_inc(&irq_refcount);
++
++ dev_info(rg->chip.parent, "registering %d irq handlers\n", rg->chip.ngpio);
++}
++
++static int ralink_gpio_request(struct gpio_chip *chip, unsigned offset)
++{
++ int gpio = chip->base + offset;
++
++ return pinctrl_request_gpio(gpio);
++}
++
++static void ralink_gpio_free(struct gpio_chip *chip, unsigned offset)
++{
++ int gpio = chip->base + offset;
++
++ pinctrl_free_gpio(gpio);
++}
++
++static int ralink_gpio_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ struct ralink_gpio_chip *rg;
++ const __be32 *ngpio, *gpiobase;
++
++ if (!res) {
++ dev_err(&pdev->dev, "failed to find resource\n");
++ return -ENOMEM;
++ }
++
++ rg = devm_kzalloc(&pdev->dev,
++ sizeof(struct ralink_gpio_chip), GFP_KERNEL);
++ if (!rg)
++ return -ENOMEM;
++
++ rg->membase = devm_ioremap_resource(&pdev->dev, res);
++ if (!rg->membase) {
++ dev_err(&pdev->dev, "cannot remap I/O memory region\n");
++ return -ENOMEM;
++ }
++
++ if (of_property_read_u8_array(np, "ralink,register-map",
++ rg->regs, GPIO_REG_MAX)) {
++ dev_err(&pdev->dev, "failed to read register definition\n");
++ return -EINVAL;
++ }
++
++ ngpio = of_get_property(np, "ralink,num-gpios", NULL);
++ if (!ngpio) {
++ dev_err(&pdev->dev, "failed to read number of pins\n");
++ return -EINVAL;
++ }
++
++ gpiobase = of_get_property(np, "ralink,gpio-base", NULL);
++ if (gpiobase)
++ rg->chip.base = be32_to_cpu(*gpiobase);
++ else
++ rg->chip.base = -1;
++
++ spin_lock_init(&rg->lock);
++
++ rg->chip.parent = &pdev->dev;
++ rg->chip.label = dev_name(&pdev->dev);
++ rg->chip.of_node = np;
++ rg->chip.ngpio = be32_to_cpu(*ngpio);
++ rg->chip.direction_input = ralink_gpio_direction_input;
++ rg->chip.direction_output = ralink_gpio_direction_output;
++ rg->chip.get = ralink_gpio_get;
++ rg->chip.set = ralink_gpio_set;
++ rg->chip.request = ralink_gpio_request;
++ rg->chip.to_irq = ralink_gpio_to_irq;
++ rg->chip.free = ralink_gpio_free;
++
++ /* set polarity to low for all lines */
++ rt_gpio_w32(rg, GPIO_REG_POL, 0);
++
++ dev_info(&pdev->dev, "registering %d gpios\n", rg->chip.ngpio);
++
++ ralink_gpio_irq_init(np, rg);
++
++ return gpiochip_add(&rg->chip);
++}
++
++static const struct of_device_id ralink_gpio_match[] = {
++ { .compatible = "ralink,rt2880-gpio" },
++ {},
++};
++MODULE_DEVICE_TABLE(of, ralink_gpio_match);
++
++static struct platform_driver ralink_gpio_driver = {
++ .probe = ralink_gpio_probe,
++ .driver = {
++ .name = "rt2880_gpio",
++ .owner = THIS_MODULE,
++ .of_match_table = ralink_gpio_match,
++ },
++};
++
++static int __init ralink_gpio_init(void)
++{
++ return platform_driver_register(&ralink_gpio_driver);
++}
++
++subsys_initcall(ralink_gpio_init);
diff --git a/target/linux/ramips/patches-4.14/0028-GPIO-ralink-add-mt7621-gpio-controller.patch b/target/linux/ramips/patches-4.14/0028-GPIO-ralink-add-mt7621-gpio-controller.patch
new file mode 100644
index 0000000000..d657274efe
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0028-GPIO-ralink-add-mt7621-gpio-controller.patch
@@ -0,0 +1,405 @@
+From 61ac7d9b4228de8c332900902c2b93189b042eab Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 27 Jul 2014 11:00:32 +0100
+Subject: [PATCH 28/53] GPIO: ralink: add mt7621 gpio controller
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ arch/mips/Kconfig | 3 +
+ drivers/gpio/Kconfig | 6 +
+ drivers/gpio/Makefile | 1 +
+ drivers/gpio/gpio-mt7621.c | 354 ++++++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 364 insertions(+)
+ create mode 100644 drivers/gpio/gpio-mt7621.c
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -630,6 +630,9 @@ config RALINK
+ select RESET_CONTROLLER
+ select PINCTRL
+ select PINCTRL_RT2880
++ select ARCH_HAS_RESET_CONTROLLER
++ select RESET_CONTROLLER
++ select ARCH_REQUIRE_GPIOLIB
+
+ config SGI_IP22
+ bool "SGI IP22 (Indy/Indigo2)"
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -298,6 +298,12 @@ config GPIO_MENZ127
+ help
+ Say yes here to support the MEN 16Z127 GPIO Controller
+
++config GPIO_MT7621
++ bool "Mediatek GPIO Support"
++ depends on SOC_MT7620 || SOC_MT7621
++ help
++ Say yes here to support the Mediatek SoC GPIO device
++
+ config GPIO_MM_LANTIQ
+ bool "Lantiq Memory mapped GPIOs"
+ depends on LANTIQ && SOC_XWAY
+--- a/drivers/gpio/Makefile
++++ b/drivers/gpio/Makefile
+@@ -152,3 +152,4 @@ obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o
+ obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o
+ obj-$(CONFIG_GPIO_ZX) += gpio-zx.o
+ obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o
++obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
+--- /dev/null
++++ b/drivers/gpio/gpio-mt7621.c
+@@ -0,0 +1,354 @@
++/*
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ *
++ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
++ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
++ */
++
++#include <linux/io.h>
++#include <linux/err.h>
++#include <linux/gpio.h>
++#include <linux/module.h>
++#include <linux/of_irq.h>
++#include <linux/spinlock.h>
++#include <linux/irqdomain.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++
++#define MTK_MAX_BANK 3
++#define MTK_BANK_WIDTH 32
++
++enum mediatek_gpio_reg {
++ GPIO_REG_CTRL = 0,
++ GPIO_REG_POL,
++ GPIO_REG_DATA,
++ GPIO_REG_DSET,
++ GPIO_REG_DCLR,
++ GPIO_REG_REDGE,
++ GPIO_REG_FEDGE,
++ GPIO_REG_HLVL,
++ GPIO_REG_LLVL,
++ GPIO_REG_STAT,
++ GPIO_REG_EDGE,
++};
++
++static void __iomem *mediatek_gpio_membase;
++static int mediatek_gpio_irq;
++static struct irq_domain *mediatek_gpio_irq_domain;
++static atomic_t irq_refcount = ATOMIC_INIT(0);
++
++struct mtk_gc {
++ struct gpio_chip chip;
++ spinlock_t lock;
++ int bank;
++ u32 rising;
++ u32 falling;
++} *gc_map[MTK_MAX_BANK];
++
++static inline struct mtk_gc
++*to_mediatek_gpio(struct gpio_chip *chip)
++{
++ struct mtk_gc *mgc;
++
++ mgc = container_of(chip, struct mtk_gc, chip);
++
++ return mgc;
++}
++
++static inline void
++mtk_gpio_w32(struct mtk_gc *rg, u8 reg, u32 val)
++{
++ iowrite32(val, mediatek_gpio_membase + (reg * 0x10) + (rg->bank * 0x4));
++}
++
++static inline u32
++mtk_gpio_r32(struct mtk_gc *rg, u8 reg)
++{
++ return ioread32(mediatek_gpio_membase + (reg * 0x10) + (rg->bank * 0x4));
++}
++
++static void
++mediatek_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
++{
++ struct mtk_gc *rg = to_mediatek_gpio(chip);
++
++ mtk_gpio_w32(rg, (value) ? GPIO_REG_DSET : GPIO_REG_DCLR, BIT(offset));
++}
++
++static int
++mediatek_gpio_get(struct gpio_chip *chip, unsigned offset)
++{
++ struct mtk_gc *rg = to_mediatek_gpio(chip);
++
++ return !!(mtk_gpio_r32(rg, GPIO_REG_DATA) & BIT(offset));
++}
++
++static int
++mediatek_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
++{
++ struct mtk_gc *rg = to_mediatek_gpio(chip);
++ unsigned long flags;
++ u32 t;
++
++ spin_lock_irqsave(&rg->lock, flags);
++ t = mtk_gpio_r32(rg, GPIO_REG_CTRL);
++ t &= ~BIT(offset);
++ mtk_gpio_w32(rg, GPIO_REG_CTRL, t);
++ spin_unlock_irqrestore(&rg->lock, flags);
++
++ return 0;
++}
++
++static int
++mediatek_gpio_direction_output(struct gpio_chip *chip,
++ unsigned offset, int value)
++{
++ struct mtk_gc *rg = to_mediatek_gpio(chip);
++ unsigned long flags;
++ u32 t;
++
++ spin_lock_irqsave(&rg->lock, flags);
++ t = mtk_gpio_r32(rg, GPIO_REG_CTRL);
++ t |= BIT(offset);
++ mtk_gpio_w32(rg, GPIO_REG_CTRL, t);
++ mediatek_gpio_set(chip, offset, value);
++ spin_unlock_irqrestore(&rg->lock, flags);
++
++ return 0;
++}
++
++static int
++mediatek_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
++{
++ struct mtk_gc *rg = to_mediatek_gpio(chip);
++ unsigned long flags;
++ u32 t;
++
++ spin_lock_irqsave(&rg->lock, flags);
++ t = mtk_gpio_r32(rg, GPIO_REG_CTRL);
++ spin_unlock_irqrestore(&rg->lock, flags);
++
++ if (t & BIT(offset))
++ return 0;
++
++ return 1;
++}
++
++static int
++mediatek_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
++{
++ struct mtk_gc *rg = to_mediatek_gpio(chip);
++
++ return irq_create_mapping(mediatek_gpio_irq_domain, pin + (rg->bank * MTK_BANK_WIDTH));
++}
++
++static int
++mediatek_gpio_bank_probe(struct platform_device *pdev, struct device_node *bank)
++{
++ const __be32 *id = of_get_property(bank, "reg", NULL);
++ struct mtk_gc *rg = devm_kzalloc(&pdev->dev,
++ sizeof(struct mtk_gc), GFP_KERNEL);
++
++ if (!rg || !id || be32_to_cpu(*id) > MTK_MAX_BANK)
++ return -ENOMEM;
++
++ gc_map[be32_to_cpu(*id)] = rg;
++
++ memset(rg, 0, sizeof(struct mtk_gc));
++
++ spin_lock_init(&rg->lock);
++
++ rg->chip.parent = &pdev->dev;
++ rg->chip.label = dev_name(&pdev->dev);
++ rg->chip.of_node = bank;
++ rg->chip.base = MTK_BANK_WIDTH * be32_to_cpu(*id);
++ rg->chip.ngpio = MTK_BANK_WIDTH;
++ rg->chip.direction_input = mediatek_gpio_direction_input;
++ rg->chip.direction_output = mediatek_gpio_direction_output;
++ rg->chip.get_direction = mediatek_gpio_get_direction;
++ rg->chip.get = mediatek_gpio_get;
++ rg->chip.set = mediatek_gpio_set;
++ if (mediatek_gpio_irq_domain)
++ rg->chip.to_irq = mediatek_gpio_to_irq;
++ rg->bank = be32_to_cpu(*id);
++
++ /* set polarity to low for all gpios */
++ mtk_gpio_w32(rg, GPIO_REG_POL, 0);
++
++ dev_info(&pdev->dev, "registering %d gpios\n", rg->chip.ngpio);
++
++ return gpiochip_add(&rg->chip);
++}
++
++static void
++mediatek_gpio_irq_handler(struct irq_desc *desc)
++{
++ int i;
++
++ for (i = 0; i < MTK_MAX_BANK; i++) {
++ struct mtk_gc *rg = gc_map[i];
++ unsigned long pending;
++ int bit;
++
++ if (!rg)
++ continue;
++
++ pending = mtk_gpio_r32(rg, GPIO_REG_STAT);
++
++ for_each_set_bit(bit, &pending, MTK_BANK_WIDTH) {
++ u32 map = irq_find_mapping(mediatek_gpio_irq_domain, (MTK_BANK_WIDTH * i) + bit);
++
++ generic_handle_irq(map);
++ mtk_gpio_w32(rg, GPIO_REG_STAT, BIT(bit));
++ }
++ }
++}
++
++static void
++mediatek_gpio_irq_unmask(struct irq_data *d)
++{
++ int pin = d->hwirq;
++ int bank = pin / 32;
++ struct mtk_gc *rg = gc_map[bank];
++ unsigned long flags;
++ u32 rise, fall;
++
++ if (!rg)
++ return;
++
++ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
++ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
++
++ spin_lock_irqsave(&rg->lock, flags);
++ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise | (BIT(d->hwirq) & rg->rising));
++ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall | (BIT(d->hwirq) & rg->falling));
++ spin_unlock_irqrestore(&rg->lock, flags);
++}
++
++static void
++mediatek_gpio_irq_mask(struct irq_data *d)
++{
++ int pin = d->hwirq;
++ int bank = pin / 32;
++ struct mtk_gc *rg = gc_map[bank];
++ unsigned long flags;
++ u32 rise, fall;
++
++ if (!rg)
++ return;
++
++ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
++ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
++
++ spin_lock_irqsave(&rg->lock, flags);
++ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(d->hwirq));
++ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(d->hwirq));
++ spin_unlock_irqrestore(&rg->lock, flags);
++}
++
++static int
++mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
++{
++ int pin = d->hwirq;
++ int bank = pin / 32;
++ struct mtk_gc *rg = gc_map[bank];
++ u32 mask = BIT(d->hwirq);
++
++ if (!rg)
++ return -1;
++
++ if (type == IRQ_TYPE_PROBE) {
++ if ((rg->rising | rg->falling) & mask)
++ return 0;
++
++ type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
++ }
++
++ if (type & IRQ_TYPE_EDGE_RISING)
++ rg->rising |= mask;
++ else
++ rg->rising &= ~mask;
++
++ if (type & IRQ_TYPE_EDGE_FALLING)
++ rg->falling |= mask;
++ else
++ rg->falling &= ~mask;
++
++ return 0;
++}
++
++static struct irq_chip mediatek_gpio_irq_chip = {
++ .name = "GPIO",
++ .irq_unmask = mediatek_gpio_irq_unmask,
++ .irq_mask = mediatek_gpio_irq_mask,
++ .irq_mask_ack = mediatek_gpio_irq_mask,
++ .irq_set_type = mediatek_gpio_irq_type,
++};
++
++static int
++mediatek_gpio_gpio_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
++{
++ irq_set_chip_and_handler(irq, &mediatek_gpio_irq_chip, handle_level_irq);
++ irq_set_handler_data(irq, d);
++
++ return 0;
++}
++
++static const struct irq_domain_ops irq_domain_ops = {
++ .xlate = irq_domain_xlate_onecell,
++ .map = mediatek_gpio_gpio_map,
++};
++
++static int
++mediatek_gpio_probe(struct platform_device *pdev)
++{
++ struct device_node *bank, *np = pdev->dev.of_node;
++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ mediatek_gpio_membase = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(mediatek_gpio_membase))
++ return PTR_ERR(mediatek_gpio_membase);
++
++ mediatek_gpio_irq = irq_of_parse_and_map(np, 0);
++ if (mediatek_gpio_irq) {
++ mediatek_gpio_irq_domain = irq_domain_add_linear(np,
++ MTK_MAX_BANK * MTK_BANK_WIDTH,
++ &irq_domain_ops, NULL);
++ if (!mediatek_gpio_irq_domain)
++ dev_err(&pdev->dev, "irq_domain_add_linear failed\n");
++ }
++
++ for_each_child_of_node(np, bank)
++ if (of_device_is_compatible(bank, "mtk,mt7621-gpio-bank"))
++ mediatek_gpio_bank_probe(pdev, bank);
++
++ if (mediatek_gpio_irq_domain)
++ irq_set_chained_handler(mediatek_gpio_irq, mediatek_gpio_irq_handler);
++
++ return 0;
++}
++
++static const struct of_device_id mediatek_gpio_match[] = {
++ { .compatible = "mtk,mt7621-gpio" },
++ {},
++};
++MODULE_DEVICE_TABLE(of, mediatek_gpio_match);
++
++static struct platform_driver mediatek_gpio_driver = {
++ .probe = mediatek_gpio_probe,
++ .driver = {
++ .name = "mt7621_gpio",
++ .owner = THIS_MODULE,
++ .of_match_table = mediatek_gpio_match,
++ },
++};
++
++static int __init
++mediatek_gpio_init(void)
++{
++ return platform_driver_register(&mediatek_gpio_driver);
++}
++
++subsys_initcall(mediatek_gpio_init);
diff --git a/target/linux/ramips/patches-4.14/0031-uvc-add-iPassion-iP2970-support.patch b/target/linux/ramips/patches-4.14/0031-uvc-add-iPassion-iP2970-support.patch
new file mode 100644
index 0000000000..5829b2fdd0
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0031-uvc-add-iPassion-iP2970-support.patch
@@ -0,0 +1,246 @@
+From 975e76214cd2516eb6cfff4c3eec581872645e88 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Thu, 19 Sep 2013 01:50:59 +0200
+Subject: [PATCH 31/53] uvc: add iPassion iP2970 support
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/media/usb/uvc/uvc_driver.c | 12 +++
+ drivers/media/usb/uvc/uvc_status.c | 2 +
+ drivers/media/usb/uvc/uvc_video.c | 147 ++++++++++++++++++++++++++++++++++++
+ drivers/media/usb/uvc/uvcvideo.h | 5 +-
+ 4 files changed, 165 insertions(+), 1 deletion(-)
+
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -2734,6 +2734,18 @@ static const struct usb_device_id uvc_id
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_QUIRK_FORCE_Y8 },
++ /* iPassion iP2970 */
++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
++ | USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = 0x1B3B,
++ .idProduct = 0x2970,
++ .bInterfaceClass = USB_CLASS_VIDEO,
++ .bInterfaceSubClass = 1,
++ .bInterfaceProtocol = 0,
++ .driver_info = UVC_QUIRK_PROBE_MINMAX
++ | UVC_QUIRK_STREAM_NO_FID
++ | UVC_QUIRK_MOTION
++ | UVC_QUIRK_SINGLE_ISO },
+ /* Generic USB Video Class */
+ { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_UNDEFINED) },
+ { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_15) },
+--- a/drivers/media/usb/uvc/uvc_status.c
++++ b/drivers/media/usb/uvc/uvc_status.c
+@@ -139,6 +139,7 @@ static void uvc_status_complete(struct u
+ switch (dev->status[0] & 0x0f) {
+ case UVC_STATUS_TYPE_CONTROL:
+ uvc_event_control(dev, dev->status, len);
++ dev->motion = 1;
+ break;
+
+ case UVC_STATUS_TYPE_STREAMING:
+@@ -182,6 +183,7 @@ int uvc_status_init(struct uvc_device *d
+ }
+
+ pipe = usb_rcvintpipe(dev->udev, ep->desc.bEndpointAddress);
++ dev->motion = 0;
+
+ /* For high-speed interrupt endpoints, the bInterval value is used as
+ * an exponent of two. Some developers forgot about it.
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -21,6 +21,11 @@
+ #include <linux/wait.h>
+ #include <linux/atomic.h>
+ #include <asm/unaligned.h>
++#include <linux/skbuff.h>
++#include <linux/kobject.h>
++#include <linux/netlink.h>
++#include <linux/kobject.h>
++#include <linux/workqueue.h>
+
+ #include <media/v4l2-common.h>
+
+@@ -1081,9 +1086,149 @@ static void uvc_video_decode_data(struct
+ }
+ }
+
++struct bh_priv {
++ unsigned long seen;
++};
++
++struct bh_event {
++ const char *name;
++ struct sk_buff *skb;
++ struct work_struct work;
++};
++
++#define BH_ERR(fmt, args...) printk(KERN_ERR "%s: " fmt, "webcam", ##args )
++#define BH_DBG(fmt, args...) do {} while (0)
++#define BH_SKB_SIZE 2048
++
++extern u64 uevent_next_seqnum(void);
++static int seen = 0;
++
++static int bh_event_add_var(struct bh_event *event, int argv,
++ const char *format, ...)
++{
++ static char buf[128];
++ char *s;
++ va_list args;
++ int len;
++
++ if (argv)
++ return 0;
++
++ va_start(args, format);
++ len = vsnprintf(buf, sizeof(buf), format, args);
++ va_end(args);
++
++ if (len >= sizeof(buf)) {
++ BH_ERR("buffer size too small\n");
++ WARN_ON(1);
++ return -ENOMEM;
++ }
++
++ s = skb_put(event->skb, len + 1);
++ strcpy(s, buf);
++
++ BH_DBG("added variable '%s'\n", s);
++
++ return 0;
++}
++
++static int motion_hotplug_fill_event(struct bh_event *event)
++{
++ int s = jiffies;
++ int ret;
++
++ if (!seen)
++ seen = jiffies;
++
++ ret = bh_event_add_var(event, 0, "HOME=%s", "/");
++ if (ret)
++ return ret;
++
++ ret = bh_event_add_var(event, 0, "PATH=%s",
++ "/sbin:/bin:/usr/sbin:/usr/bin");
++ if (ret)
++ return ret;
++
++ ret = bh_event_add_var(event, 0, "SUBSYSTEM=usb");
++ if (ret)
++ return ret;
++
++ ret = bh_event_add_var(event, 0, "ACTION=motion");
++ if (ret)
++ return ret;
++
++ ret = bh_event_add_var(event, 0, "SEEN=%d", s - seen);
++ if (ret)
++ return ret;
++ seen = s;
++
++ ret = bh_event_add_var(event, 0, "SEQNUM=%llu", uevent_next_seqnum());
++
++ return ret;
++}
++
++static void motion_hotplug_work(struct work_struct *work)
++{
++ struct bh_event *event = container_of(work, struct bh_event, work);
++ int ret = 0;
++
++ event->skb = alloc_skb(BH_SKB_SIZE, GFP_KERNEL);
++ if (!event->skb)
++ goto out_free_event;
++
++ ret = bh_event_add_var(event, 0, "%s@", "add");
++ if (ret)
++ goto out_free_skb;
++
++ ret = motion_hotplug_fill_event(event);
++ if (ret)
++ goto out_free_skb;
++
++ NETLINK_CB(event->skb).dst_group = 1;
++ broadcast_uevent(event->skb, 0, 1, GFP_KERNEL);
++
++out_free_skb:
++ if (ret) {
++ BH_ERR("work error %d\n", ret);
++ kfree_skb(event->skb);
++ }
++out_free_event:
++ kfree(event);
++}
++
++static int motion_hotplug_create_event(void)
++{
++ struct bh_event *event;
++
++ event = kzalloc(sizeof(*event), GFP_KERNEL);
++ if (!event)
++ return -ENOMEM;
++
++ event->name = "motion";
++
++ INIT_WORK(&event->work, (void *)(void *)motion_hotplug_work);
++ schedule_work(&event->work);
++
++ return 0;
++}
++
++#define MOTION_FLAG_OFFSET 4
+ static void uvc_video_decode_end(struct uvc_streaming *stream,
+ struct uvc_buffer *buf, const __u8 *data, int len)
+ {
++ if ((stream->dev->quirks & UVC_QUIRK_MOTION) &&
++ (data[len - 2] == 0xff) && (data[len - 1] == 0xd9)) {
++ u8 *mem;
++ buf->state = UVC_BUF_STATE_READY;
++ mem = (u8 *) (buf->mem + MOTION_FLAG_OFFSET);
++ if ( stream->dev->motion ) {
++ stream->dev->motion = 0;
++ motion_hotplug_create_event();
++ } else {
++ *mem &= 0x7f;
++ }
++ }
++
+ /* Mark the buffer as done if the EOF marker is set. */
+ if (data[1] & UVC_STREAM_EOF && buf->bytesused != 0) {
+ uvc_trace(UVC_TRACE_FRAME, "Frame complete (EOF found).\n");
+@@ -1498,6 +1643,8 @@ static int uvc_init_video_isoc(struct uv
+ if (npackets == 0)
+ return -ENOMEM;
+
++ if (stream->dev->quirks & UVC_QUIRK_SINGLE_ISO)
++ npackets = 1;
+ size = npackets * psize;
+
+ for (i = 0; i < UVC_URBS; ++i) {
+--- a/drivers/media/usb/uvc/uvcvideo.h
++++ b/drivers/media/usb/uvc/uvcvideo.h
+@@ -186,7 +186,9 @@
+ #define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200
+ #define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400
+ #define UVC_QUIRK_FORCE_Y8 0x00000800
+-
++#define UVC_QUIRK_MOTION 0x00001000
++#define UVC_QUIRK_SINGLE_ISO 0x00002000
++
+ /* Format flags */
+ #define UVC_FMT_FLAG_COMPRESSED 0x00000001
+ #define UVC_FMT_FLAG_STREAM 0x00000002
+@@ -584,6 +586,7 @@ struct uvc_device {
+ __u8 *status;
+ struct input_dev *input;
+ char input_phys[64];
++ int motion;
+ };
+
+ enum uvc_handle_state {
diff --git a/target/linux/ramips/patches-4.14/0032-USB-dwc2-add-device_reset.patch b/target/linux/ramips/patches-4.14/0032-USB-dwc2-add-device_reset.patch
new file mode 100644
index 0000000000..623eaf7291
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0032-USB-dwc2-add-device_reset.patch
@@ -0,0 +1,29 @@
+From a758e0870c6d1e4b0272f6e7f9efa9face5534bb Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 27 Jul 2014 09:49:07 +0100
+Subject: [PATCH 32/53] USB: dwc2: add device_reset()
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/usb/dwc2/hcd.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -48,6 +48,7 @@
+ #include <linux/io.h>
+ #include <linux/slab.h>
+ #include <linux/usb.h>
++#include <linux/reset.h>
+
+ #include <linux/usb/hcd.h>
+ #include <linux/usb/ch11.h>
+@@ -5072,6 +5073,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hso
+
+ retval = -ENOMEM;
+
++ device_reset(hsotg->dev);
++
+ hcfg = dwc2_readl(hsotg->regs + HCFG);
+ dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg);
+
diff --git a/target/linux/ramips/patches-4.14/0034-NET-multi-phy-support.patch b/target/linux/ramips/patches-4.14/0034-NET-multi-phy-support.patch
new file mode 100644
index 0000000000..0bcb617ba1
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0034-NET-multi-phy-support.patch
@@ -0,0 +1,59 @@
+From 0b6eb1e68290243d439ee330ea8d0b239a5aec69 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 27 Jul 2014 09:38:50 +0100
+Subject: [PATCH 34/53] NET: multi phy support
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/net/phy/phy.c | 9 ++++++---
+ include/linux/phy.h | 1 +
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -984,7 +984,10 @@ void phy_state_machine(struct work_struc
+ /* If the link is down, give up on negotiation for now */
+ if (!phydev->link) {
+ phydev->state = PHY_NOLINK;
+- phy_link_down(phydev, true);
++ if (!phydev->no_auto_carrier_off)
++ phy_link_down(phydev, true);
++ else
++ phy_link_down(phydev, false);
+ break;
+ }
+
+@@ -1071,7 +1074,10 @@ void phy_state_machine(struct work_struc
+ phy_link_up(phydev);
+ } else {
+ phydev->state = PHY_NOLINK;
+- phy_link_down(phydev, true);
++ if (!phydev->no_auto_carrier_off)
++ phy_link_down(phydev, true);
++ else
++ phy_link_down(phydev, false);
+ }
+
+ if (phy_interrupt_is_valid(phydev))
+@@ -1081,7 +1087,10 @@ void phy_state_machine(struct work_struc
+ case PHY_HALTED:
+ if (phydev->link) {
+ phydev->link = 0;
+- phy_link_down(phydev, true);
++ if (!phydev->no_auto_carrier_off)
++ phy_link_down(phydev, true);
++ else
++ phy_link_down(phydev, false);
+ do_suspend = true;
+ }
+ break;
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -412,6 +412,7 @@ struct phy_device {
+ bool suspended;
+ bool sysfs_links;
+ bool loopback_enabled;
++ bool no_auto_carrier_off;
+
+ enum phy_state state;
+
diff --git a/target/linux/ramips/patches-4.14/0036-mtd-fix-cfi-cmdset-0002-erase-status-check.patch b/target/linux/ramips/patches-4.14/0036-mtd-fix-cfi-cmdset-0002-erase-status-check.patch
new file mode 100644
index 0000000000..0b3a6c40e4
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0036-mtd-fix-cfi-cmdset-0002-erase-status-check.patch
@@ -0,0 +1,29 @@
+From 8e72a3a1be8f6328bd7ef491332ba541547b6086 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Mon, 15 Jul 2013 00:38:51 +0200
+Subject: [PATCH 36/53] mtd: fix cfi cmdset 0002 erase status check
+
+---
+ drivers/mtd/chips/cfi_cmdset_0002.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -2293,7 +2293,7 @@ static int __xipram do_erase_chip(struct
+ chip->erase_suspended = 0;
+ }
+
+- if (chip_ready(map, adr))
++ if (chip_good(map, adr, map_word_ff(map)))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+@@ -2382,7 +2382,7 @@ static int __xipram do_erase_oneblock(st
+ chip->erase_suspended = 0;
+ }
+
+- if (chip_ready(map, adr)) {
++ if (chip_good(map, adr, map_word_ff(map))) {
+ xip_enable(map, chip, adr);
+ break;
+ }
diff --git a/target/linux/ramips/patches-4.14/0037-mtd-cfi-cmdset-0002-force-word-write.patch b/target/linux/ramips/patches-4.14/0037-mtd-cfi-cmdset-0002-force-word-write.patch
new file mode 100644
index 0000000000..8ac6964cd1
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0037-mtd-cfi-cmdset-0002-force-word-write.patch
@@ -0,0 +1,70 @@
+From ee9081b2726a5ca8cde5497afdc5425e21ff8f8b Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Mon, 15 Jul 2013 00:39:21 +0200
+Subject: [PATCH 37/53] mtd: cfi cmdset 0002 force word write
+
+---
+ drivers/mtd/chips/cfi_cmdset_0002.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -40,7 +40,7 @@
+ #include <linux/mtd/xip.h>
+
+ #define AMD_BOOTLOC_BUG
+-#define FORCE_WORD_WRITE 0
++#define FORCE_WORD_WRITE 1
+
+ #define MAX_WORD_RETRIES 3
+
+@@ -51,7 +51,9 @@
+
+ static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
+ static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
++#if !FORCE_WORD_WRITE
+ static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
++#endif
+ static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
+ static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
+ static void cfi_amdstd_sync (struct mtd_info *);
+@@ -202,6 +204,7 @@ static void fixup_amd_bootblock(struct m
+ }
+ #endif
+
++#if !FORCE_WORD_WRITE
+ static void fixup_use_write_buffers(struct mtd_info *mtd)
+ {
+ struct map_info *map = mtd->priv;
+@@ -211,6 +214,7 @@ static void fixup_use_write_buffers(stru
+ mtd->_write = cfi_amdstd_write_buffers;
+ }
+ }
++#endif /* !FORCE_WORD_WRITE */
+
+ /* Atmel chips don't use the same PRI format as AMD chips */
+ static void fixup_convert_atmel_pri(struct mtd_info *mtd)
+@@ -1791,6 +1795,7 @@ static int cfi_amdstd_write_words(struct
+ /*
+ * FIXME: interleaved mode not tested, and probably not supported!
+ */
++#if !FORCE_WORD_WRITE
+ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+ unsigned long adr, const u_char *buf,
+ int len)
+@@ -1919,7 +1924,6 @@ static int __xipram do_write_buffer(stru
+ return ret;
+ }
+
+-
+ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+ {
+@@ -1994,6 +1998,7 @@ static int cfi_amdstd_write_buffers(stru
+
+ return 0;
+ }
++#endif /* !FORCE_WORD_WRITE */
+
+ /*
+ * Wait for the flash chip to become ready to write data
diff --git a/target/linux/ramips/patches-4.14/0039-mtd-add-mt7621-nand-support.patch b/target/linux/ramips/patches-4.14/0039-mtd-add-mt7621-nand-support.patch
new file mode 100644
index 0000000000..dae115dd13
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0039-mtd-add-mt7621-nand-support.patch
@@ -0,0 +1,4480 @@
+From 0e1c4e3c97b83b4e7da65b1c56f0a7d40736ac53 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 27 Jul 2014 11:05:17 +0100
+Subject: [PATCH 39/53] mtd: add mt7621 nand support
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/mtd/nand/Kconfig | 6 +
+ drivers/mtd/nand/Makefile | 1 +
+ drivers/mtd/nand/bmt.c | 750 ++++++++++++
+ drivers/mtd/nand/bmt.h | 80 ++
+ drivers/mtd/nand/dev-nand.c | 63 +
+ drivers/mtd/nand/mt6575_typedefs.h | 340 ++++++
+ drivers/mtd/nand/mtk_nand2.c | 2304 +++++++++++++++++++++++++++++++++++
+ drivers/mtd/nand/mtk_nand2.h | 452 +++++++
+ drivers/mtd/nand/nand_base.c | 6 +-
+ drivers/mtd/nand/nand_bbt.c | 19 +
+ drivers/mtd/nand/nand_def.h | 123 ++
+ drivers/mtd/nand/nand_device_list.h | 55 +
+ drivers/mtd/nand/partition.h | 115 ++
+ 13 files changed, 4311 insertions(+), 3 deletions(-)
+ create mode 100644 drivers/mtd/nand/bmt.c
+ create mode 100644 drivers/mtd/nand/bmt.h
+ create mode 100644 drivers/mtd/nand/dev-nand.c
+ create mode 100644 drivers/mtd/nand/mt6575_typedefs.h
+ create mode 100644 drivers/mtd/nand/mtk_nand2.c
+ create mode 100644 drivers/mtd/nand/mtk_nand2.h
+ create mode 100644 drivers/mtd/nand/nand_def.h
+ create mode 100644 drivers/mtd/nand/nand_device_list.h
+ create mode 100644 drivers/mtd/nand/partition.h
+
+--- a/drivers/mtd/nand/Kconfig
++++ b/drivers/mtd/nand/Kconfig
+@@ -563,4 +563,10 @@ config MTD_NAND_MTK
+ Enables support for NAND controller on MTK SoCs.
+ This controller is found on mt27xx, mt81xx, mt65xx SoCs.
+
++config MTK_MTD_NAND
++ tristate "Support for MTK SoC NAND controller"
++ depends on SOC_MT7621
++ select MTD_NAND_IDS
++ select MTD_NAND_ECC
++
+ endif # MTD_NAND
+--- a/drivers/mtd/nand/Makefile
++++ b/drivers/mtd/nand/Makefile
+@@ -60,6 +60,7 @@ obj-$(CONFIG_MTD_NAND_HISI504) +
+ obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
+ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
+ obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o
++obj-$(CONFIG_MTK_MTD_NAND) += mtk_nand2.o bmt.o
+
+ nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
+ nand-objs += nand_amd.o
+--- /dev/null
++++ b/drivers/mtd/nand/bmt.c
+@@ -0,0 +1,750 @@
++#include "bmt.h"
++
++typedef struct
++{
++ char signature[3];
++ u8 version;
++ u8 bad_count; // bad block count in pool
++ u8 mapped_count; // mapped block count in pool
++ u8 checksum;
++ u8 reseverd[13];
++} phys_bmt_header;
++
++typedef struct
++{
++ phys_bmt_header header;
++ bmt_entry table[MAX_BMT_SIZE];
++} phys_bmt_struct;
++
++typedef struct
++{
++ char signature[3];
++} bmt_oob_data;
++
++static char MAIN_SIGNATURE[] = "BMT";
++static char OOB_SIGNATURE[] = "bmt";
++#define SIGNATURE_SIZE (3)
++
++#define MAX_DAT_SIZE 0x1000
++#define MAX_OOB_SIZE 0x80
++
++static struct mtd_info *mtd_bmt;
++static struct nand_chip *nand_chip_bmt;
++#define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift)
++#define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift)
++
++#define OFFSET(block) ((block) * BLOCK_SIZE_BMT)
++#define PAGE_ADDR(block) ((block) * BLOCK_SIZE_BMT / PAGE_SIZE_BMT)
++
++/*********************************************************************
++* Flash is splited into 2 parts, system part is for normal system *
++* system usage, size is system_block_count, another is replace pool *
++* +-------------------------------------------------+ *
++* | system_block_count | bmt_block_count | *
++* +-------------------------------------------------+ *
++*********************************************************************/
++static u32 total_block_count; // block number in flash
++static u32 system_block_count;
++static int bmt_block_count; // bmt table size
++// static int bmt_count; // block used in bmt
++static int page_per_block; // page per count
++
++static u32 bmt_block_index; // bmt block index
++static bmt_struct bmt; // dynamic created global bmt table
++
++static u8 dat_buf[MAX_DAT_SIZE];
++static u8 oob_buf[MAX_OOB_SIZE];
++static bool pool_erased;
++
++/***************************************************************
++*
++* Interface adaptor for preloader/uboot/kernel
++* These interfaces operate on physical address, read/write
++* physical data.
++*
++***************************************************************/
++int nand_read_page_bmt(u32 page, u8 * dat, u8 * oob)
++{
++ return mtk_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob);
++}
++
++bool nand_block_bad_bmt(u32 offset)
++{
++ return mtk_nand_block_bad_hw(mtd_bmt, offset);
++}
++
++bool nand_erase_bmt(u32 offset)
++{
++ int status;
++ if (offset < 0x20000)
++ {
++ MSG(INIT, "erase offset: 0x%x\n", offset);
++ }
++
++ status = mtk_nand_erase_hw(mtd_bmt, offset / PAGE_SIZE_BMT); // as nand_chip structure doesn't have a erase function defined
++ if (status & NAND_STATUS_FAIL)
++ return false;
++ else
++ return true;
++}
++
++int mark_block_bad_bmt(u32 offset)
++{
++ return mtk_nand_block_markbad_hw(mtd_bmt, offset); //mark_block_bad_hw(offset);
++}
++
++bool nand_write_page_bmt(u32 page, u8 * dat, u8 * oob)
++{
++ if (mtk_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob))
++ return false;
++ else
++ return true;
++}
++
++/***************************************************************
++* *
++* static internal function *
++* *
++***************************************************************/
++static void dump_bmt_info(bmt_struct * bmt)
++{
++ int i;
++
++ MSG(INIT, "BMT v%d. total %d mapping:\n", bmt->version, bmt->mapped_count);
++ for (i = 0; i < bmt->mapped_count; i++)
++ {
++ MSG(INIT, "\t0x%x -> 0x%x\n", bmt->table[i].bad_index, bmt->table[i].mapped_index);
++ }
++}
++
++static bool match_bmt_signature(u8 * dat, u8 * oob)
++{
++
++ if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE))
++ {
++ return false;
++ }
++
++ if (memcmp(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE))
++ {
++ MSG(INIT, "main signature match, oob signature doesn't match, but ignore\n");
++ }
++ return true;
++}
++
++static u8 cal_bmt_checksum(phys_bmt_struct * phys_table, int bmt_size)
++{
++ int i;
++ u8 checksum = 0;
++ u8 *dat = (u8 *) phys_table;
++
++ checksum += phys_table->header.version;
++ checksum += phys_table->header.mapped_count;
++
++ dat += sizeof(phys_bmt_header);
++ for (i = 0; i < bmt_size * sizeof(bmt_entry); i++)
++ {
++ checksum += dat[i];
++ }
++
++ return checksum;
++}
++
++
++static int is_block_mapped(int index)
++{
++ int i;
++ for (i = 0; i < bmt.mapped_count; i++)
++ {
++ if (index == bmt.table[i].mapped_index)
++ return i;
++ }
++ return -1;
++}
++
++static bool is_page_used(u8 * dat, u8 * oob)
++{
++ return ((oob[OOB_INDEX_OFFSET] != 0xFF) || (oob[OOB_INDEX_OFFSET + 1] != 0xFF));
++}
++
++static bool valid_bmt_data(phys_bmt_struct * phys_table)
++{
++ int i;
++ u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count);
++
++ // checksum correct?
++ if (phys_table->header.checksum != checksum)
++ {
++ MSG(INIT, "BMT Data checksum error: %x %x\n", phys_table->header.checksum, checksum);
++ return false;
++ }
++
++ MSG(INIT, "BMT Checksum is: 0x%x\n", phys_table->header.checksum);
++
++ // block index correct?
++ for (i = 0; i < phys_table->header.mapped_count; i++)
++ {
++ if (phys_table->table[i].bad_index >= total_block_count || phys_table->table[i].mapped_index >= total_block_count || phys_table->table[i].mapped_index < system_block_count)
++ {
++ MSG(INIT, "index error: bad_index: %d, mapped_index: %d\n", phys_table->table[i].bad_index, phys_table->table[i].mapped_index);
++ return false;
++ }
++ }
++
++ // pass check, valid bmt.
++ MSG(INIT, "Valid BMT, version v%d\n", phys_table->header.version);
++ return true;
++}
++
++static void fill_nand_bmt_buffer(bmt_struct * bmt, u8 * dat, u8 * oob)
++{
++ phys_bmt_struct phys_bmt;
++
++ dump_bmt_info(bmt);
++
++ // fill phys_bmt_struct structure with bmt_struct
++ memset(&phys_bmt, 0xFF, sizeof(phys_bmt));
++
++ memcpy(phys_bmt.header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE);
++ phys_bmt.header.version = BMT_VERSION;
++ // phys_bmt.header.bad_count = bmt->bad_count;
++ phys_bmt.header.mapped_count = bmt->mapped_count;
++ memcpy(phys_bmt.table, bmt->table, sizeof(bmt_entry) * bmt_block_count);
++
++ phys_bmt.header.checksum = cal_bmt_checksum(&phys_bmt, bmt_block_count);
++
++ memcpy(dat + MAIN_SIGNATURE_OFFSET, &phys_bmt, sizeof(phys_bmt));
++ memcpy(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE);
++}
++
++// return valid index if found BMT, else return 0
++static int load_bmt_data(int start, int pool_size)
++{
++ int bmt_index = start + pool_size - 1; // find from the end
++ phys_bmt_struct phys_table;
++ int i;
++
++ MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index);
++
++ for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--)
++ {
++ if (nand_block_bad_bmt(OFFSET(bmt_index)))
++ {
++ MSG(INIT, "Skip bad block: %d\n", bmt_index);
++ continue;
++ }
++
++ if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf))
++ {
++ MSG(INIT, "Error found when read block %d\n", bmt_index);
++ continue;
++ }
++
++ if (!match_bmt_signature(dat_buf, oob_buf))
++ {
++ continue;
++ }
++
++ MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index);
++
++ memcpy(&phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_table));
++
++ if (!valid_bmt_data(&phys_table))
++ {
++ MSG(INIT, "BMT data is not correct %d\n", bmt_index);
++ continue;
++ } else
++ {
++ bmt.mapped_count = phys_table.header.mapped_count;
++ bmt.version = phys_table.header.version;
++ // bmt.bad_count = phys_table.header.bad_count;
++ memcpy(bmt.table, phys_table.table, bmt.mapped_count * sizeof(bmt_entry));
++
++ MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count);
++
++ for (i = 0; i < bmt.mapped_count; i++)
++ {
++ if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index)))
++ {
++ MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index);
++ mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index));
++ }
++ }
++
++ return bmt_index;
++ }
++ }
++
++ MSG(INIT, "bmt block not found!\n");
++ return 0;
++}
++
++/*************************************************************************
++* Find an available block and erase. *
++* start_from_end: if true, find available block from end of flash. *
++* else, find from the beginning of the pool *
++* need_erase: if true, all unmapped blocks in the pool will be erased *
++*************************************************************************/
++static int find_available_block(bool start_from_end)
++{
++ int i; // , j;
++ int block = system_block_count;
++ int direction;
++ // int avail_index = 0;
++ MSG(INIT, "Try to find_available_block, pool_erase: %d\n", pool_erased);
++
++ // erase all un-mapped blocks in pool when finding avaliable block
++ if (!pool_erased)
++ {
++ MSG(INIT, "Erase all un-mapped blocks in pool\n");
++ for (i = 0; i < bmt_block_count; i++)
++ {
++ if (block == bmt_block_index)
++ {
++ MSG(INIT, "Skip bmt block 0x%x\n", block);
++ continue;
++ }
++
++ if (nand_block_bad_bmt(OFFSET(block + i)))
++ {
++ MSG(INIT, "Skip bad block 0x%x\n", block + i);
++ continue;
++ }
++//if(block==4095)
++//{
++// continue;
++//}
++
++ if (is_block_mapped(block + i) >= 0)
++ {
++ MSG(INIT, "Skip mapped block 0x%x\n", block + i);
++ continue;
++ }
++
++ if (!nand_erase_bmt(OFFSET(block + i)))
++ {
++ MSG(INIT, "Erase block 0x%x failed\n", block + i);
++ mark_block_bad_bmt(OFFSET(block + i));
++ }
++ }
++
++ pool_erased = 1;
++ }
++
++ if (start_from_end)
++ {
++ block = total_block_count - 1;
++ direction = -1;
++ } else
++ {
++ block = system_block_count;
++ direction = 1;
++ }
++
++ for (i = 0; i < bmt_block_count; i++, block += direction)
++ {
++ if (block == bmt_block_index)
++ {
++ MSG(INIT, "Skip bmt block 0x%x\n", block);
++ continue;
++ }
++
++ if (nand_block_bad_bmt(OFFSET(block)))
++ {
++ MSG(INIT, "Skip bad block 0x%x\n", block);
++ continue;
++ }
++
++ if (is_block_mapped(block) >= 0)
++ {
++ MSG(INIT, "Skip mapped block 0x%x\n", block);
++ continue;
++ }
++
++ MSG(INIT, "Find block 0x%x available\n", block);
++ return block;
++ }
++
++ return 0;
++}
++
++static unsigned short get_bad_index_from_oob(u8 * oob_buf)
++{
++ unsigned short index;
++ memcpy(&index, oob_buf + OOB_INDEX_OFFSET, OOB_INDEX_SIZE);
++
++ return index;
++}
++
++void set_bad_index_to_oob(u8 * oob, u16 index)
++{
++ memcpy(oob + OOB_INDEX_OFFSET, &index, sizeof(index));
++}
++
++static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob)
++{
++ int page;
++ int error_block = offset / BLOCK_SIZE_BMT;
++ int error_page = (offset / PAGE_SIZE_BMT) % page_per_block;
++ int to_index;
++
++ memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
++
++ to_index = find_available_block(false);
++
++ if (!to_index)
++ {
++ MSG(INIT, "Cannot find an available block for BMT\n");
++ return 0;
++ }
++
++ { // migrate error page first
++ MSG(INIT, "Write error page: 0x%x\n", error_page);
++ if (!write_dat)
++ {
++ nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL);
++ write_dat = dat_buf;
++ }
++ // memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
++
++ if (error_block < system_block_count)
++ set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB.
++
++ if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf))
++ {
++ MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page);
++ mark_block_bad_bmt(to_index);
++ return migrate_from_bad(offset, write_dat, write_oob);
++ }
++ }
++
++ for (page = 0; page < page_per_block; page++)
++ {
++ if (page != error_page)
++ {
++ nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf);
++ if (is_page_used(dat_buf, oob_buf))
++ {
++ if (error_block < system_block_count)
++ {
++ set_bad_index_to_oob(oob_buf, error_block);
++ }
++ MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page);
++ if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf))
++ {
++ MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page);
++ mark_block_bad_bmt(to_index);
++ return migrate_from_bad(offset, write_dat, write_oob);
++ }
++ }
++ }
++ }
++
++ MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index);
++
++ return to_index;
++}
++
++static bool write_bmt_to_flash(u8 * dat, u8 * oob)
++{
++ bool need_erase = true;
++ MSG(INIT, "Try to write BMT\n");
++
++ if (bmt_block_index == 0)
++ {
++ // if we don't have index, we don't need to erase found block as it has been erased in find_available_block()
++ need_erase = false;
++ if (!(bmt_block_index = find_available_block(true)))
++ {
++ MSG(INIT, "Cannot find an available block for BMT\n");
++ return false;
++ }
++ }
++
++ MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index);
++
++ // write bmt to flash
++ if (need_erase)
++ {
++ if (!nand_erase_bmt(OFFSET(bmt_block_index)))
++ {
++ MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index);
++ mark_block_bad_bmt(OFFSET(bmt_block_index));
++ // bmt.bad_count++;
++
++ bmt_block_index = 0;
++ return write_bmt_to_flash(dat, oob); // recursive call
++ }
++ }
++
++ if (!nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob))
++ {
++ MSG(INIT, "Write BMT data fail, need to write again\n");
++ mark_block_bad_bmt(OFFSET(bmt_block_index));
++ // bmt.bad_count++;
++
++ bmt_block_index = 0;
++ return write_bmt_to_flash(dat, oob); // recursive call
++ }
++
++ MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index);
++ return true;
++}
++
++/*******************************************************************
++* Reconstruct bmt, called when found bmt info doesn't match bad
++* block info in flash.
++*
++* Return NULL for failure
++*******************************************************************/
++bmt_struct *reconstruct_bmt(bmt_struct * bmt)
++{
++ int i;
++ int index = system_block_count;
++ unsigned short bad_index;
++ int mapped;
++
++ // init everything in BMT struct
++ bmt->version = BMT_VERSION;
++ bmt->bad_count = 0;
++ bmt->mapped_count = 0;
++
++ memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry));
++
++ for (i = 0; i < bmt_block_count; i++, index++)
++ {
++ if (nand_block_bad_bmt(OFFSET(index)))
++ {
++ MSG(INIT, "Skip bad block: 0x%x\n", index);
++ // bmt->bad_count++;
++ continue;
++ }
++
++ MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index));
++ nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf);
++ /* if (mtk_nand_read_page_hw(PAGE_ADDR(index), dat_buf))
++ {
++ MSG(INIT, "Error when read block %d\n", bmt_block_index);
++ continue;
++ } */
++
++ if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count)
++ {
++ MSG(INIT, "get bad index: 0x%x\n", bad_index);
++ if (bad_index != 0xFFFF)
++ MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index, bad_index);
++ continue;
++ }
++
++ MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index);
++
++ if (!nand_block_bad_bmt(OFFSET(bad_index)))
++ {
++ MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index);
++ continue; // no need to erase here, it will be erased later when trying to write BMT
++ }
++
++ if ((mapped = is_block_mapped(bad_index)) >= 0)
++ {
++ MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index);
++ bmt->table[mapped].mapped_index = index; // use new one instead.
++ } else
++ {
++ // add mapping to BMT
++ bmt->table[bmt->mapped_count].bad_index = bad_index;
++ bmt->table[bmt->mapped_count].mapped_index = index;
++ bmt->mapped_count++;
++ }
++
++ MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index);
++
++ }
++
++ MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count);
++ // dump_bmt_info(bmt);
++
++ // fill NAND BMT buffer
++ memset(oob_buf, 0xFF, sizeof(oob_buf));
++ fill_nand_bmt_buffer(bmt, dat_buf, oob_buf);
++
++ // write BMT back
++ if (!write_bmt_to_flash(dat_buf, oob_buf))
++ {
++ MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n");
++ }
++
++ return bmt;
++}
++
++/*******************************************************************
++* [BMT Interface]
++*
++* Description:
++* Init bmt from nand. Reconstruct if not found or data error
++*
++* Parameter:
++* size: size of bmt and replace pool
++*
++* Return:
++* NULL for failure, and a bmt struct for success
++*******************************************************************/
++bmt_struct *init_bmt(struct nand_chip * chip, int size)
++{
++ struct mtk_nand_host *host;
++
++ if (size > 0 && size < MAX_BMT_SIZE)
++ {
++ MSG(INIT, "Init bmt table, size: %d\n", size);
++ bmt_block_count = size;
++ } else
++ {
++ MSG(INIT, "Invalid bmt table size: %d\n", size);
++ return NULL;
++ }
++ nand_chip_bmt = chip;
++ system_block_count = chip->chipsize >> chip->phys_erase_shift;
++ total_block_count = bmt_block_count + system_block_count;
++ page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT;
++ host = (struct mtk_nand_host *)chip->priv;
++ mtd_bmt = host->mtd;
++
++ MSG(INIT, "mtd_bmt: %p, nand_chip_bmt: %p\n", mtd_bmt, nand_chip_bmt);
++ MSG(INIT, "bmt count: %d, system count: %d\n", bmt_block_count, system_block_count);
++
++ // set this flag, and unmapped block in pool will be erased.
++ pool_erased = 0;
++ memset(bmt.table, 0, size * sizeof(bmt_entry));
++ if ((bmt_block_index = load_bmt_data(system_block_count, size)))
++ {
++ MSG(INIT, "Load bmt data success @ block 0x%x\n", bmt_block_index);
++ dump_bmt_info(&bmt);
++ return &bmt;
++ } else
++ {
++ MSG(INIT, "Load bmt data fail, need re-construct!\n");
++#ifndef __UBOOT_NAND__ // BMT is not re-constructed in UBOOT.
++ if (reconstruct_bmt(&bmt))
++ return &bmt;
++ else
++#endif
++ return NULL;
++ }
++}
++
++/*******************************************************************
++* [BMT Interface]
++*
++* Description:
++* Update BMT.
++*
++* Parameter:
++* offset: update block/page offset.
++* reason: update reason, see update_reason_t for reason.
++* dat/oob: data and oob buffer for write fail.
++*
++* Return:
++* Return true for success, and false for failure.
++*******************************************************************/
++bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob)
++{
++ int map_index;
++ int orig_bad_block = -1;
++ // int bmt_update_index;
++ int i;
++ int bad_index = offset / BLOCK_SIZE_BMT;
++
++#ifndef MTK_NAND_BMT
++ return false;
++#endif
++ if (reason == UPDATE_WRITE_FAIL)
++ {
++ MSG(INIT, "Write fail, need to migrate\n");
++ if (!(map_index = migrate_from_bad(offset, dat, oob)))
++ {
++ MSG(INIT, "migrate fail\n");
++ return false;
++ }
++ } else
++ {
++ if (!(map_index = find_available_block(false)))
++ {
++ MSG(INIT, "Cannot find block in pool\n");
++ return false;
++ }
++ }
++
++ // now let's update BMT
++ if (bad_index >= system_block_count) // mapped block become bad, find original bad block
++ {
++ for (i = 0; i < bmt_block_count; i++)
++ {
++ if (bmt.table[i].mapped_index == bad_index)
++ {
++ orig_bad_block = bmt.table[i].bad_index;
++ break;
++ }
++ }
++ // bmt.bad_count++;
++ MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block);
++
++ bmt.table[i].mapped_index = map_index;
++ } else
++ {
++ bmt.table[bmt.mapped_count].mapped_index = map_index;
++ bmt.table[bmt.mapped_count].bad_index = bad_index;
++ bmt.mapped_count++;
++ }
++
++ memset(oob_buf, 0xFF, sizeof(oob_buf));
++ fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf);
++ if (!write_bmt_to_flash(dat_buf, oob_buf))
++ return false;
++
++ mark_block_bad_bmt(offset);
++
++ return true;
++}
++
++/*******************************************************************
++* [BMT Interface]
++*
++* Description:
++* Given an block index, return mapped index if it's mapped, else
++* return given index.
++*
++* Parameter:
++* index: given an block index. This value cannot exceed
++* system_block_count.
++*
++* Return NULL for failure
++*******************************************************************/
++u16 get_mapping_block_index(int index)
++{
++ int i;
++#ifndef MTK_NAND_BMT
++ return index;
++#endif
++ if (index > system_block_count)
++ {
++ return index;
++ }
++
++ for (i = 0; i < bmt.mapped_count; i++)
++ {
++ if (bmt.table[i].bad_index == index)
++ {
++ return bmt.table[i].mapped_index;
++ }
++ }
++
++ return index;
++}
++#ifdef __KERNEL_NAND__
++EXPORT_SYMBOL_GPL(init_bmt);
++EXPORT_SYMBOL_GPL(update_bmt);
++EXPORT_SYMBOL_GPL(get_mapping_block_index);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("MediaTek");
++MODULE_DESCRIPTION("Bad Block mapping management for MediaTek NAND Flash Driver");
++#endif
+--- /dev/null
++++ b/drivers/mtd/nand/bmt.h
+@@ -0,0 +1,80 @@
++#ifndef __BMT_H__
++#define __BMT_H__
++
++#include "nand_def.h"
++
++#if defined(__PRELOADER_NAND__)
++
++#include "nand.h"
++
++#elif defined(__UBOOT_NAND__)
++
++#include <linux/mtd/nand.h>
++#include "mtk_nand2.h"
++
++#elif defined(__KERNEL_NAND__)
++
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/rawnand.h>
++#include <linux/module.h>
++#include "mtk_nand2.h"
++
++#endif
++
++
++#define MAX_BMT_SIZE (0x80)
++#define BMT_VERSION (1) // initial version
++
++#define MAIN_SIGNATURE_OFFSET (0)
++#define OOB_SIGNATURE_OFFSET (1)
++#define OOB_INDEX_OFFSET (29)
++#define OOB_INDEX_SIZE (2)
++#define FAKE_INDEX (0xAAAA)
++
++typedef struct _bmt_entry_
++{
++ u16 bad_index; // bad block index
++ u16 mapped_index; // mapping block index in the replace pool
++} bmt_entry;
++
++typedef enum
++{
++ UPDATE_ERASE_FAIL,
++ UPDATE_WRITE_FAIL,
++ UPDATE_UNMAPPED_BLOCK,
++ UPDATE_REASON_COUNT,
++} update_reason_t;
++
++typedef struct
++{
++ bmt_entry table[MAX_BMT_SIZE];
++ u8 version;
++ u8 mapped_count; // mapped block count in pool
++ u8 bad_count; // bad block count in pool. Not used in V1
++} bmt_struct;
++
++/***************************************************************
++* *
++* Interface BMT need to use *
++* *
++***************************************************************/
++extern bool mtk_nand_exec_read_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob);
++extern int mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs);
++extern int mtk_nand_erase_hw(struct mtd_info *mtd, int page);
++extern int mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t ofs);
++extern int mtk_nand_exec_write_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob);
++
++
++/***************************************************************
++* *
++* Different function interface for preloader/uboot/kernel *
++* *
++***************************************************************/
++void set_bad_index_to_oob(u8 * oob, u16 index);
++
++
++bmt_struct *init_bmt(struct nand_chip *nand, int size);
++bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob);
++unsigned short get_mapping_block_index(int index);
++
++#endif // #ifndef __BMT_H__
+--- /dev/null
++++ b/drivers/mtd/nand/dev-nand.c
+@@ -0,0 +1,63 @@
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/platform_device.h>
++
++#include "mt6575_typedefs.h"
++
++#define RALINK_NAND_CTRL_BASE 0xBE003000
++#define NFI_base RALINK_NAND_CTRL_BASE
++#define RALINK_NANDECC_CTRL_BASE 0xBE003800
++#define NFIECC_base RALINK_NANDECC_CTRL_BASE
++#define MT7621_NFI_IRQ_ID SURFBOARDINT_NAND
++#define MT7621_NFIECC_IRQ_ID SURFBOARDINT_NAND_ECC
++
++#define SURFBOARDINT_NAND 22
++#define SURFBOARDINT_NAND_ECC 23
++
++static struct resource MT7621_resource_nand[] = {
++ {
++ .start = NFI_base,
++ .end = NFI_base + 0x1A0,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = NFIECC_base,
++ .end = NFIECC_base + 0x150,
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .start = MT7621_NFI_IRQ_ID,
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .start = MT7621_NFIECC_IRQ_ID,
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static struct platform_device MT7621_nand_dev = {
++ .name = "MT7621-NAND",
++ .id = 0,
++ .num_resources = ARRAY_SIZE(MT7621_resource_nand),
++ .resource = MT7621_resource_nand,
++ .dev = {
++ .platform_data = &mt7621_nand_hw,
++ },
++};
++
++
++int __init mtk_nand_register(void)
++{
++
++ int retval = 0;
++
++ retval = platform_device_register(&MT7621_nand_dev);
++ if (retval != 0) {
++ printk(KERN_ERR "register nand device fail\n");
++ return retval;
++ }
++
++
++ return retval;
++}
++arch_initcall(mtk_nand_register);
+--- /dev/null
++++ b/drivers/mtd/nand/mt6575_typedefs.h
+@@ -0,0 +1,340 @@
++/* Copyright Statement:
++ *
++ * This software/firmware and related documentation ("MediaTek Software") are
++ * protected under relevant copyright laws. The information contained herein
++ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
++ * Without the prior written permission of MediaTek inc. and/or its licensors,
++ * any reproduction, modification, use or disclosure of MediaTek Software,
++ * and information contained herein, in whole or in part, shall be strictly prohibited.
++ */
++/* MediaTek Inc. (C) 2010. All rights reserved.
++ *
++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
++ *
++ * The following software/firmware and/or related documentation ("MediaTek Software")
++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's
++ * applicable license agreements with MediaTek Inc.
++ */
++
++/*****************************************************************************
++* Copyright Statement:
++* --------------------
++* This software is protected by Copyright and the information contained
++* herein is confidential. The software may not be copied and the information
++* contained herein may not be used or disclosed except with the written
++* permission of MediaTek Inc. (C) 2008
++*
++* BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
++* THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
++* RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON
++* AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
++* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
++* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
++* NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
++* SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
++* SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH
++* THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO
++* NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S
++* SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM.
++*
++* BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE
++* LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
++* AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
++* OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO
++* MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
++*
++* THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE
++* WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF
++* LAWS PRINCIPLES. ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND
++* RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER
++* THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC).
++*
++*****************************************************************************/
++
++#ifndef _MT6575_TYPEDEFS_H
++#define _MT6575_TYPEDEFS_H
++
++#if defined (__KERNEL_NAND__)
++#include <linux/bug.h>
++#else
++#define true 1
++#define false 0
++#define bool u8
++#endif
++
++// ---------------------------------------------------------------------------
++// Basic Type Definitions
++// ---------------------------------------------------------------------------
++
++typedef volatile unsigned char *P_kal_uint8;
++typedef volatile unsigned short *P_kal_uint16;
++typedef volatile unsigned int *P_kal_uint32;
++
++typedef long LONG;
++typedef unsigned char UBYTE;
++typedef short SHORT;
++
++typedef signed char kal_int8;
++typedef signed short kal_int16;
++typedef signed int kal_int32;
++typedef long long kal_int64;
++typedef unsigned char kal_uint8;
++typedef unsigned short kal_uint16;
++typedef unsigned int kal_uint32;
++typedef unsigned long long kal_uint64;
++typedef char kal_char;
++
++typedef unsigned int *UINT32P;
++typedef volatile unsigned short *UINT16P;
++typedef volatile unsigned char *UINT8P;
++typedef unsigned char *U8P;
++
++typedef volatile unsigned char *P_U8;
++typedef volatile signed char *P_S8;
++typedef volatile unsigned short *P_U16;
++typedef volatile signed short *P_S16;
++typedef volatile unsigned int *P_U32;
++typedef volatile signed int *P_S32;
++typedef unsigned long long *P_U64;
++typedef signed long long *P_S64;
++
++typedef unsigned char U8;
++typedef signed char S8;
++typedef unsigned short U16;
++typedef signed short S16;
++typedef unsigned int U32;
++typedef signed int S32;
++typedef unsigned long long U64;
++typedef signed long long S64;
++//typedef unsigned char bool;
++
++typedef unsigned char UINT8;
++typedef unsigned short UINT16;
++typedef unsigned int UINT32;
++typedef unsigned short USHORT;
++typedef signed char INT8;
++typedef signed short INT16;
++typedef signed int INT32;
++typedef unsigned int DWORD;
++typedef void VOID;
++typedef unsigned char BYTE;
++typedef float FLOAT;
++
++typedef char *LPCSTR;
++typedef short *LPWSTR;
++
++
++// ---------------------------------------------------------------------------
++// Constants
++// ---------------------------------------------------------------------------
++
++#define IMPORT EXTERN
++#ifndef __cplusplus
++ #define EXTERN extern
++#else
++ #define EXTERN extern "C"
++#endif
++#define LOCAL static
++#define GLOBAL
++#define EXPORT GLOBAL
++
++#define EQ ==
++#define NEQ !=
++#define AND &&
++#define OR ||
++#define XOR(A,B) ((!(A) AND (B)) OR ((A) AND !(B)))
++
++#ifndef FALSE
++ #define FALSE (0)
++#endif
++
++#ifndef TRUE
++ #define TRUE (1)
++#endif
++
++#ifndef NULL
++ #define NULL (0)
++#endif
++
++//enum boolean {false, true};
++enum {RX, TX, NONE};
++
++#ifndef BOOL
++typedef unsigned char BOOL;
++#endif
++
++typedef enum {
++ KAL_FALSE = 0,
++ KAL_TRUE = 1,
++} kal_bool;
++
++
++// ---------------------------------------------------------------------------
++// Type Casting
++// ---------------------------------------------------------------------------
++
++#define AS_INT32(x) (*(INT32 *)((void*)x))
++#define AS_INT16(x) (*(INT16 *)((void*)x))
++#define AS_INT8(x) (*(INT8 *)((void*)x))
++
++#define AS_UINT32(x) (*(UINT32 *)((void*)x))
++#define AS_UINT16(x) (*(UINT16 *)((void*)x))
++#define AS_UINT8(x) (*(UINT8 *)((void*)x))
++
++
++// ---------------------------------------------------------------------------
++// Register Manipulations
++// ---------------------------------------------------------------------------
++
++#define READ_REGISTER_UINT32(reg) \
++ (*(volatile UINT32 * const)(reg))
++
++#define WRITE_REGISTER_UINT32(reg, val) \
++ (*(volatile UINT32 * const)(reg)) = (val)
++
++#define READ_REGISTER_UINT16(reg) \
++ (*(volatile UINT16 * const)(reg))
++
++#define WRITE_REGISTER_UINT16(reg, val) \
++ (*(volatile UINT16 * const)(reg)) = (val)
++
++#define READ_REGISTER_UINT8(reg) \
++ (*(volatile UINT8 * const)(reg))
++
++#define WRITE_REGISTER_UINT8(reg, val) \
++ (*(volatile UINT8 * const)(reg)) = (val)
++
++#define INREG8(x) READ_REGISTER_UINT8((UINT8*)((void*)(x)))
++#define OUTREG8(x, y) WRITE_REGISTER_UINT8((UINT8*)((void*)(x)), (UINT8)(y))
++#define SETREG8(x, y) OUTREG8(x, INREG8(x)|(y))
++#define CLRREG8(x, y) OUTREG8(x, INREG8(x)&~(y))
++#define MASKREG8(x, y, z) OUTREG8(x, (INREG8(x)&~(y))|(z))
++
++#define INREG16(x) READ_REGISTER_UINT16((UINT16*)((void*)(x)))
++#define OUTREG16(x, y) WRITE_REGISTER_UINT16((UINT16*)((void*)(x)),(UINT16)(y))
++#define SETREG16(x, y) OUTREG16(x, INREG16(x)|(y))
++#define CLRREG16(x, y) OUTREG16(x, INREG16(x)&~(y))
++#define MASKREG16(x, y, z) OUTREG16(x, (INREG16(x)&~(y))|(z))
++
++#define INREG32(x) READ_REGISTER_UINT32((UINT32*)((void*)(x)))
++#define OUTREG32(x, y) WRITE_REGISTER_UINT32((UINT32*)((void*)(x)), (UINT32)(y))
++#define SETREG32(x, y) OUTREG32(x, INREG32(x)|(y))
++#define CLRREG32(x, y) OUTREG32(x, INREG32(x)&~(y))
++#define MASKREG32(x, y, z) OUTREG32(x, (INREG32(x)&~(y))|(z))
++
++
++#define DRV_Reg8(addr) INREG8(addr)
++#define DRV_WriteReg8(addr, data) OUTREG8(addr, data)
++#define DRV_SetReg8(addr, data) SETREG8(addr, data)
++#define DRV_ClrReg8(addr, data) CLRREG8(addr, data)
++
++#define DRV_Reg16(addr) INREG16(addr)
++#define DRV_WriteReg16(addr, data) OUTREG16(addr, data)
++#define DRV_SetReg16(addr, data) SETREG16(addr, data)
++#define DRV_ClrReg16(addr, data) CLRREG16(addr, data)
++
++#define DRV_Reg32(addr) INREG32(addr)
++#define DRV_WriteReg32(addr, data) OUTREG32(addr, data)
++#define DRV_SetReg32(addr, data) SETREG32(addr, data)
++#define DRV_ClrReg32(addr, data) CLRREG32(addr, data)
++
++// !!! DEPRECATED, WILL BE REMOVED LATER !!!
++#define DRV_Reg(addr) DRV_Reg16(addr)
++#define DRV_WriteReg(addr, data) DRV_WriteReg16(addr, data)
++#define DRV_SetReg(addr, data) DRV_SetReg16(addr, data)
++#define DRV_ClrReg(addr, data) DRV_ClrReg16(addr, data)
++
++
++// ---------------------------------------------------------------------------
++// Compiler Time Deduction Macros
++// ---------------------------------------------------------------------------
++
++#define _MASK_OFFSET_1(x, n) ((x) & 0x1) ? (n) :
++#define _MASK_OFFSET_2(x, n) _MASK_OFFSET_1((x), (n)) _MASK_OFFSET_1((x) >> 1, (n) + 1)
++#define _MASK_OFFSET_4(x, n) _MASK_OFFSET_2((x), (n)) _MASK_OFFSET_2((x) >> 2, (n) + 2)
++#define _MASK_OFFSET_8(x, n) _MASK_OFFSET_4((x), (n)) _MASK_OFFSET_4((x) >> 4, (n) + 4)
++#define _MASK_OFFSET_16(x, n) _MASK_OFFSET_8((x), (n)) _MASK_OFFSET_8((x) >> 8, (n) + 8)
++#define _MASK_OFFSET_32(x, n) _MASK_OFFSET_16((x), (n)) _MASK_OFFSET_16((x) >> 16, (n) + 16)
++
++#define MASK_OFFSET_ERROR (0xFFFFFFFF)
++
++#define MASK_OFFSET(x) (_MASK_OFFSET_32(x, 0) MASK_OFFSET_ERROR)
++
++
++// ---------------------------------------------------------------------------
++// Assertions
++// ---------------------------------------------------------------------------
++
++#ifndef ASSERT
++ #define ASSERT(expr) BUG_ON(!(expr))
++#endif
++
++#ifndef NOT_IMPLEMENTED
++ #define NOT_IMPLEMENTED() BUG_ON(1)
++#endif
++
++#define STATIC_ASSERT(pred) STATIC_ASSERT_X(pred, __LINE__)
++#define STATIC_ASSERT_X(pred, line) STATIC_ASSERT_XX(pred, line)
++#define STATIC_ASSERT_XX(pred, line) \
++ extern char assertion_failed_at_##line[(pred) ? 1 : -1]
++
++// ---------------------------------------------------------------------------
++// Resolve Compiler Warnings
++// ---------------------------------------------------------------------------
++
++#define NOT_REFERENCED(x) { (x) = (x); }
++
++
++// ---------------------------------------------------------------------------
++// Utilities
++// ---------------------------------------------------------------------------
++
++#define MAXIMUM(A,B) (((A)>(B))?(A):(B))
++#define MINIMUM(A,B) (((A)<(B))?(A):(B))
++
++#define ARY_SIZE(x) (sizeof((x)) / sizeof((x[0])))
++#define DVT_DELAYMACRO(u4Num) \
++{ \
++ UINT32 u4Count = 0 ; \
++ for (u4Count = 0; u4Count < u4Num; u4Count++ ); \
++} \
++
++#define A68351B 0
++#define B68351B 1
++#define B68351D 2
++#define B68351E 3
++#define UNKNOWN_IC_VERSION 0xFF
++
++/* NAND driver */
++struct mtk_nand_host_hw {
++ unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */
++ unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */
++ unsigned int nfi_cs_num; /* NFI_CS_NUM */
++ unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */
++ unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */
++ unsigned int nand_ecc_size;
++ unsigned int nand_ecc_bytes;
++ unsigned int nand_ecc_mode;
++};
++extern struct mtk_nand_host_hw mt7621_nand_hw;
++extern unsigned int CFG_BLOCKSIZE;
++
++#endif // _MT6575_TYPEDEFS_H
++
+--- /dev/null
++++ b/drivers/mtd/nand/mtk_nand2.c
+@@ -0,0 +1,2363 @@
++/******************************************************************************
++* mtk_nand2.c - MTK NAND Flash Device Driver
++ *
++* Copyright 2009-2012 MediaTek Co.,Ltd.
++ *
++* DESCRIPTION:
++* This file provid the other drivers nand relative functions
++ *
++* modification history
++* ----------------------------------------
++* v3.0, 11 Feb 2010, mtk
++* ----------------------------------------
++******************************************************************************/
++#include "nand_def.h"
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/types.h>
++#include <linux/wait.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/rawnand.h>
++#include <linux/mtd/partitions.h>
++#include <linux/mtd/nand_ecc.h>
++#include <linux/dma-mapping.h>
++#include <linux/jiffies.h>
++#include <linux/platform_device.h>
++#include <linux/proc_fs.h>
++#include <linux/time.h>
++#include <linux/mm.h>
++#include <asm/io.h>
++#include <asm/cacheflush.h>
++#include <asm/uaccess.h>
++#include <linux/miscdevice.h>
++#include "mtk_nand2.h"
++#include "nand_device_list.h"
++
++#include "bmt.h"
++#include "partition.h"
++
++unsigned int CFG_BLOCKSIZE;
++
++static int shift_on_bbt = 0;
++extern void nand_bbt_set(struct mtd_info *mtd, int page, int flag);
++extern int nand_bbt_get(struct mtd_info *mtd, int page);
++int mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page);
++
++static const char * const probe_types[] = { "cmdlinepart", "ofpart", NULL };
++
++#define NAND_CMD_STATUS_MULTI 0x71
++
++void show_stack(struct task_struct *tsk, unsigned long *sp);
++extern void mt_irq_set_sens(unsigned int irq, unsigned int sens);
++extern void mt_irq_set_polarity(unsigned int irq,unsigned int polarity);
++
++struct mtk_nand_host mtk_nand_host; /* include mtd_info and nand_chip structs */
++struct mtk_nand_host_hw mt7621_nand_hw = {
++ .nfi_bus_width = 8,
++ .nfi_access_timing = NFI_DEFAULT_ACCESS_TIMING,
++ .nfi_cs_num = NFI_CS_NUM,
++ .nand_sec_size = 512,
++ .nand_sec_shift = 9,
++ .nand_ecc_size = 2048,
++ .nand_ecc_bytes = 32,
++ .nand_ecc_mode = NAND_ECC_HW,
++};
++
++
++/*******************************************************************************
++ * Gloable Varible Definition
++ *******************************************************************************/
++
++#define NFI_ISSUE_COMMAND(cmd, col_addr, row_addr, col_num, row_num) \
++ do { \
++ DRV_WriteReg(NFI_CMD_REG16,cmd);\
++ while (DRV_Reg32(NFI_STA_REG32) & STA_CMD_STATE);\
++ DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);\
++ DRV_WriteReg32(NFI_ROWADDR_REG32, row_addr);\
++ DRV_WriteReg(NFI_ADDRNOB_REG16, col_num | (row_num<<ADDR_ROW_NOB_SHIFT));\
++ while (DRV_Reg32(NFI_STA_REG32) & STA_ADDR_STATE);\
++ }while(0);
++
++//-------------------------------------------------------------------------------
++static struct NAND_CMD g_kCMD;
++static u32 g_u4ChipVer;
++bool g_bInitDone;
++static bool g_bcmdstatus;
++static u32 g_value = 0;
++static int g_page_size;
++
++BOOL g_bHwEcc = true;
++
++
++static u8 *local_buffer_16_align; // 16 byte aligned buffer, for HW issue
++static u8 local_buffer[4096 + 512];
++
++extern void nand_release_device(struct mtd_info *mtd);
++extern int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state);
++
++#if defined(MTK_NAND_BMT)
++static bmt_struct *g_bmt;
++#endif
++struct mtk_nand_host *host;
++extern struct mtd_partition g_pasStatic_Partition[];
++int part_num = NUM_PARTITIONS;
++int manu_id;
++int dev_id;
++
++/* this constant was taken from linux/nand/nand.h v 3.14
++ * in later versions it seems it was removed in order to save a bit of space
++ */
++#define NAND_MAX_OOBSIZE 774
++static u8 local_oob_buf[NAND_MAX_OOBSIZE];
++
++static u8 nand_badblock_offset = 0;
++
++void nand_enable_clock(void)
++{
++ //enable_clock(MT65XX_PDN_PERI_NFI, "NAND");
++}
++
++void nand_disable_clock(void)
++{
++ //disable_clock(MT65XX_PDN_PERI_NFI, "NAND");
++}
++
++struct nand_ecclayout {
++ __u32 eccbytes;
++ __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE];
++ __u32 oobavail;
++ struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE];
++};
++
++static struct nand_ecclayout *layout;
++
++static struct nand_ecclayout nand_oob_16 = {
++ .eccbytes = 8,
++ .eccpos = {8, 9, 10, 11, 12, 13, 14, 15},
++ .oobfree = {{1, 6}, {0, 0}}
++};
++
++struct nand_ecclayout nand_oob_64 = {
++ .eccbytes = 32,
++ .eccpos = {32, 33, 34, 35, 36, 37, 38, 39,
++ 40, 41, 42, 43, 44, 45, 46, 47,
++ 48, 49, 50, 51, 52, 53, 54, 55,
++ 56, 57, 58, 59, 60, 61, 62, 63},
++ .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 6}, {0, 0}}
++};
++
++struct nand_ecclayout nand_oob_128 = {
++ .eccbytes = 64,
++ .eccpos = {
++ 64, 65, 66, 67, 68, 69, 70, 71,
++ 72, 73, 74, 75, 76, 77, 78, 79,
++ 80, 81, 82, 83, 84, 85, 86, 86,
++ 88, 89, 90, 91, 92, 93, 94, 95,
++ 96, 97, 98, 99, 100, 101, 102, 103,
++ 104, 105, 106, 107, 108, 109, 110, 111,
++ 112, 113, 114, 115, 116, 117, 118, 119,
++ 120, 121, 122, 123, 124, 125, 126, 127},
++ .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 7}, {33, 7}, {41, 7}, {49, 7}, {57, 6}}
++};
++
++flashdev_info devinfo;
++
++void dump_nfi(void)
++{
++}
++
++void dump_ecc(void)
++{
++}
++
++u32
++nand_virt_to_phys_add(u32 va)
++{
++ u32 pageOffset = (va & (PAGE_SIZE - 1));
++ pgd_t *pgd;
++ pmd_t *pmd;
++ pte_t *pte;
++ u32 pa;
++
++ if (virt_addr_valid(va))
++ return __virt_to_phys(va);
++
++ if (NULL == current) {
++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR ,current is NULL! \n");
++ return 0;
++ }
++
++ if (NULL == current->mm) {
++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm);
++ return 0;
++ }
++
++ pgd = pgd_offset(current->mm, va); /* what is tsk->mm */
++ if (pgd_none(*pgd) || pgd_bad(*pgd)) {
++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pgd invalid! \n", va);
++ return 0;
++ }
++
++ pmd = pmd_offset((pud_t *)pgd, va);
++ if (pmd_none(*pmd) || pmd_bad(*pmd)) {
++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pmd invalid! \n", va);
++ return 0;
++ }
++
++ pte = pte_offset_map(pmd, va);
++ if (pte_present(*pte)) {
++ pa = (pte_val(*pte) & (PAGE_MASK)) | pageOffset;
++ return pa;
++ }
++
++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR va=0x%x, pte invalid! \n", va);
++ return 0;
++}
++EXPORT_SYMBOL(nand_virt_to_phys_add);
++
++bool
++get_device_info(u16 id, u32 ext_id, flashdev_info * pdevinfo)
++{
++ u32 index;
++ for (index = 0; gen_FlashTable[index].id != 0; index++) {
++ if (id == gen_FlashTable[index].id && ext_id == gen_FlashTable[index].ext_id) {
++ pdevinfo->id = gen_FlashTable[index].id;
++ pdevinfo->ext_id = gen_FlashTable[index].ext_id;
++ pdevinfo->blocksize = gen_FlashTable[index].blocksize;
++ pdevinfo->addr_cycle = gen_FlashTable[index].addr_cycle;
++ pdevinfo->iowidth = gen_FlashTable[index].iowidth;
++ pdevinfo->timmingsetting = gen_FlashTable[index].timmingsetting;
++ pdevinfo->advancedmode = gen_FlashTable[index].advancedmode;
++ pdevinfo->pagesize = gen_FlashTable[index].pagesize;
++ pdevinfo->sparesize = gen_FlashTable[index].sparesize;
++ pdevinfo->totalsize = gen_FlashTable[index].totalsize;
++ memcpy(pdevinfo->devciename, gen_FlashTable[index].devciename, sizeof(pdevinfo->devciename));
++ printk(KERN_INFO "Device found in MTK table, ID: %x, EXT_ID: %x\n", id, ext_id);
++
++ goto find;
++ }
++ }
++
++find:
++ if (0 == pdevinfo->id) {
++ printk(KERN_INFO "Device not found, ID: %x\n", id);
++ return false;
++ } else {
++ return true;
++ }
++}
++
++static void
++ECC_Config(struct mtk_nand_host_hw *hw,u32 ecc_bit)
++{
++ u32 u4ENCODESize;
++ u32 u4DECODESize;
++ u32 ecc_bit_cfg = ECC_CNFG_ECC4;
++
++ switch(ecc_bit){
++ case 4:
++ ecc_bit_cfg = ECC_CNFG_ECC4;
++ break;
++ case 8:
++ ecc_bit_cfg = ECC_CNFG_ECC8;
++ break;
++ case 10:
++ ecc_bit_cfg = ECC_CNFG_ECC10;
++ break;
++ case 12:
++ ecc_bit_cfg = ECC_CNFG_ECC12;
++ break;
++ default:
++ break;
++ }
++ DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
++ do {
++ } while (!DRV_Reg16(ECC_DECIDLE_REG16));
++
++ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
++ do {
++ } while (!DRV_Reg32(ECC_ENCIDLE_REG32));
++
++ /* setup FDM register base */
++ DRV_WriteReg32(ECC_FDMADDR_REG32, NFI_FDM0L_REG32);
++
++ /* Sector + FDM */
++ u4ENCODESize = (hw->nand_sec_size + 8) << 3;
++ /* Sector + FDM + YAFFS2 meta data bits */
++ u4DECODESize = ((hw->nand_sec_size + 8) << 3) + ecc_bit * 13;
++
++ /* configure ECC decoder && encoder */
++ DRV_WriteReg32(ECC_DECCNFG_REG32, ecc_bit_cfg | DEC_CNFG_NFI | DEC_CNFG_EMPTY_EN | (u4DECODESize << DEC_CNFG_CODE_SHIFT));
++
++ DRV_WriteReg32(ECC_ENCCNFG_REG32, ecc_bit_cfg | ENC_CNFG_NFI | (u4ENCODESize << ENC_CNFG_MSG_SHIFT));
++ NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_EL);
++}
++
++static void
++ECC_Decode_Start(void)
++{
++ while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
++ ;
++ DRV_WriteReg16(ECC_DECCON_REG16, DEC_EN);
++}
++
++static void
++ECC_Decode_End(void)
++{
++ while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE))
++ ;
++ DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE);
++}
++
++static void
++ECC_Encode_Start(void)
++{
++ while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE))
++ ;
++ mb();
++ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_EN);
++}
++
++static void
++ECC_Encode_End(void)
++{
++ /* wait for device returning idle */
++ while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) ;
++ mb();
++ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE);
++}
++
++static bool
++mtk_nand_check_bch_error(struct mtd_info *mtd, u8 * pDataBuf, u32 u4SecIndex, u32 u4PageAddr)
++{
++ bool bRet = true;
++ u16 u2SectorDoneMask = 1 << u4SecIndex;
++ u32 u4ErrorNumDebug, i, u4ErrNum;
++ u32 timeout = 0xFFFF;
++ // int el;
++ u32 au4ErrBitLoc[6];
++ u32 u4ErrByteLoc, u4BitOffset;
++ u32 u4ErrBitLoc1th, u4ErrBitLoc2nd;
++
++ //4 // Wait for Decode Done
++ while (0 == (u2SectorDoneMask & DRV_Reg16(ECC_DECDONE_REG16))) {
++ timeout--;
++ if (0 == timeout)
++ return false;
++ }
++ /* We will manually correct the error bits in the last sector, not all the sectors of the page! */
++ memset(au4ErrBitLoc, 0x0, sizeof(au4ErrBitLoc));
++ u4ErrorNumDebug = DRV_Reg32(ECC_DECENUM_REG32);
++ u4ErrNum = DRV_Reg32(ECC_DECENUM_REG32) >> (u4SecIndex << 2);
++ u4ErrNum &= 0xF;
++
++ if (u4ErrNum) {
++ if (0xF == u4ErrNum) {
++ mtd->ecc_stats.failed++;
++ bRet = false;
++ printk(KERN_ERR"mtk_nand: UnCorrectable at PageAddr=%d\n", u4PageAddr);
++ } else {
++ for (i = 0; i < ((u4ErrNum + 1) >> 1); ++i) {
++ au4ErrBitLoc[i] = DRV_Reg32(ECC_DECEL0_REG32 + i);
++ u4ErrBitLoc1th = au4ErrBitLoc[i] & 0x1FFF;
++ if (u4ErrBitLoc1th < 0x1000) {
++ u4ErrByteLoc = u4ErrBitLoc1th / 8;
++ u4BitOffset = u4ErrBitLoc1th % 8;
++ pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
++ mtd->ecc_stats.corrected++;
++ } else {
++ mtd->ecc_stats.failed++;
++ }
++ u4ErrBitLoc2nd = (au4ErrBitLoc[i] >> 16) & 0x1FFF;
++ if (0 != u4ErrBitLoc2nd) {
++ if (u4ErrBitLoc2nd < 0x1000) {
++ u4ErrByteLoc = u4ErrBitLoc2nd / 8;
++ u4BitOffset = u4ErrBitLoc2nd % 8;
++ pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset);
++ mtd->ecc_stats.corrected++;
++ } else {
++ mtd->ecc_stats.failed++;
++ //printk(KERN_ERR"UnCorrectable High ErrLoc=%d\n", au4ErrBitLoc[i]);
++ }
++ }
++ }
++ }
++ if (0 == (DRV_Reg16(ECC_DECFER_REG16) & (1 << u4SecIndex)))
++ bRet = false;
++ }
++ return bRet;
++}
++
++static bool
++mtk_nand_RFIFOValidSize(u16 u2Size)
++{
++ u32 timeout = 0xFFFF;
++ while (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) < u2Size) {
++ timeout--;
++ if (0 == timeout)
++ return false;
++ }
++ return true;
++}
++
++static bool
++mtk_nand_WFIFOValidSize(u16 u2Size)
++{
++ u32 timeout = 0xFFFF;
++
++ while (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) > u2Size) {
++ timeout--;
++ if (0 == timeout)
++ return false;
++ }
++ return true;
++}
++
++static bool
++mtk_nand_status_ready(u32 u4Status)
++{
++ u32 timeout = 0xFFFF;
++
++ while ((DRV_Reg32(NFI_STA_REG32) & u4Status) != 0) {
++ timeout--;
++ if (0 == timeout)
++ return false;
++ }
++ return true;
++}
++
++static bool
++mtk_nand_reset(void)
++{
++ int timeout = 0xFFFF;
++ if (DRV_Reg16(NFI_MASTERSTA_REG16)) {
++ mb();
++ DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
++ while (DRV_Reg16(NFI_MASTERSTA_REG16)) {
++ timeout--;
++ if (!timeout)
++ MSG(INIT, "Wait for NFI_MASTERSTA timeout\n");
++ }
++ }
++ /* issue reset operation */
++ mb();
++ DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST);
++
++ return mtk_nand_status_ready(STA_NFI_FSM_MASK | STA_NAND_BUSY) && mtk_nand_RFIFOValidSize(0) && mtk_nand_WFIFOValidSize(0);
++}
++
++static void
++mtk_nand_set_mode(u16 u2OpMode)
++{
++ u16 u2Mode = DRV_Reg16(NFI_CNFG_REG16);
++ u2Mode &= ~CNFG_OP_MODE_MASK;
++ u2Mode |= u2OpMode;
++ DRV_WriteReg16(NFI_CNFG_REG16, u2Mode);
++}
++
++static void
++mtk_nand_set_autoformat(bool bEnable)
++{
++ if (bEnable)
++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
++ else
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN);
++}
++
++static void
++mtk_nand_configure_fdm(u16 u2FDMSize)
++{
++ NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_FDM_MASK | PAGEFMT_FDM_ECC_MASK);
++ NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_SHIFT);
++ NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_ECC_SHIFT);
++}
++
++static void
++mtk_nand_configure_lock(void)
++{
++ u32 u4WriteColNOB = 2;
++ u32 u4WriteRowNOB = 3;
++ u32 u4EraseColNOB = 0;
++ u32 u4EraseRowNOB = 3;
++ DRV_WriteReg16(NFI_LOCKANOB_REG16,
++ (u4WriteColNOB << PROG_CADD_NOB_SHIFT) | (u4WriteRowNOB << PROG_RADD_NOB_SHIFT) | (u4EraseColNOB << ERASE_CADD_NOB_SHIFT) | (u4EraseRowNOB << ERASE_RADD_NOB_SHIFT));
++
++ if (CHIPVER_ECO_1 == g_u4ChipVer) {
++ int i;
++ for (i = 0; i < 16; ++i) {
++ DRV_WriteReg32(NFI_LOCK00ADD_REG32 + (i << 1), 0xFFFFFFFF);
++ DRV_WriteReg32(NFI_LOCK00FMT_REG32 + (i << 1), 0xFFFFFFFF);
++ }
++ //DRV_WriteReg16(NFI_LOCKANOB_REG16, 0x0);
++ DRV_WriteReg32(NFI_LOCKCON_REG32, 0xFFFFFFFF);
++ DRV_WriteReg16(NFI_LOCK_REG16, NFI_LOCK_ON);
++ }
++}
++
++static bool
++mtk_nand_pio_ready(void)
++{
++ int count = 0;
++ while (!(DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)) {
++ count++;
++ if (count > 0xffff) {
++ printk("PIO_DIRDY timeout\n");
++ return false;
++ }
++ }
++
++ return true;
++}
++
++static bool
++mtk_nand_set_command(u16 command)
++{
++ mb();
++ DRV_WriteReg16(NFI_CMD_REG16, command);
++ return mtk_nand_status_ready(STA_CMD_STATE);
++}
++
++static bool
++mtk_nand_set_address(u32 u4ColAddr, u32 u4RowAddr, u16 u2ColNOB, u16 u2RowNOB)
++{
++ mb();
++ DRV_WriteReg32(NFI_COLADDR_REG32, u4ColAddr);
++ DRV_WriteReg32(NFI_ROWADDR_REG32, u4RowAddr);
++ DRV_WriteReg16(NFI_ADDRNOB_REG16, u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT));
++ return mtk_nand_status_ready(STA_ADDR_STATE);
++}
++
++static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
++{
++ if (ctrl & NAND_ALE) {
++ mtk_nand_set_address(dat, 0, 1, 0);
++ } else if (ctrl & NAND_CLE) {
++ mtk_nand_reset();
++ mtk_nand_set_mode(0x6000);
++ mtk_nand_set_command(dat);
++ }
++}
++
++static bool
++mtk_nand_check_RW_count(u16 u2WriteSize)
++{
++ u32 timeout = 0xFFFF;
++ u16 u2SecNum = u2WriteSize >> 9;
++
++ while (ADDRCNTR_CNTR(DRV_Reg16(NFI_ADDRCNTR_REG16)) < u2SecNum) {
++ timeout--;
++ if (0 == timeout) {
++ printk(KERN_INFO "[%s] timeout\n", __FUNCTION__);
++ return false;
++ }
++ }
++ return true;
++}
++
++static bool
++mtk_nand_ready_for_read(struct nand_chip *nand, u32 u4RowAddr, u32 u4ColAddr, bool full, u8 * buf)
++{
++ /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
++ bool bRet = false;
++ u16 sec_num = 1 << (nand->page_shift - 9);
++ u32 col_addr = u4ColAddr;
++ u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
++ if (nand->options & NAND_BUSWIDTH_16)
++ col_addr /= 2;
++
++ if (!mtk_nand_reset())
++ goto cleanup;
++ if (g_bHwEcc) {
++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++ } else {
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++ }
++
++ mtk_nand_set_mode(CNFG_OP_READ);
++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
++ DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
++
++ if (full) {
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
++
++ if (g_bHwEcc)
++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++ else
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++ } else {
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
++ }
++
++ mtk_nand_set_autoformat(full);
++ if (full)
++ if (g_bHwEcc)
++ ECC_Decode_Start();
++ if (!mtk_nand_set_command(NAND_CMD_READ0))
++ goto cleanup;
++ if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
++ goto cleanup;
++ if (!mtk_nand_set_command(NAND_CMD_READSTART))
++ goto cleanup;
++ if (!mtk_nand_status_ready(STA_NAND_BUSY))
++ goto cleanup;
++
++ bRet = true;
++
++cleanup:
++ return bRet;
++}
++
++static bool
++mtk_nand_ready_for_write(struct nand_chip *nand, u32 u4RowAddr, u32 col_addr, bool full, u8 * buf)
++{
++ bool bRet = false;
++ u32 sec_num = 1 << (nand->page_shift - 9);
++ u32 colnob = 2, rownob = devinfo.addr_cycle - 2;
++ if (nand->options & NAND_BUSWIDTH_16)
++ col_addr /= 2;
++
++ /* Reset NFI HW internal state machine and flush NFI in/out FIFO */
++ if (!mtk_nand_reset())
++ return false;
++
++ mtk_nand_set_mode(CNFG_OP_PRGM);
++
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
++
++ DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT);
++
++ if (full) {
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
++ if (g_bHwEcc)
++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++ else
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++ } else {
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
++ }
++
++ mtk_nand_set_autoformat(full);
++
++ if (full)
++ if (g_bHwEcc)
++ ECC_Encode_Start();
++
++ if (!mtk_nand_set_command(NAND_CMD_SEQIN))
++ goto cleanup;
++ //1 FIXED ME: For Any Kind of AddrCycle
++ if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob))
++ goto cleanup;
++
++ if (!mtk_nand_status_ready(STA_NAND_BUSY))
++ goto cleanup;
++
++ bRet = true;
++
++cleanup:
++ return bRet;
++}
++
++static bool
++mtk_nand_check_dececc_done(u32 u4SecNum)
++{
++ u32 timeout, dec_mask;
++
++ timeout = 0xffff;
++ dec_mask = (1 << u4SecNum) - 1;
++ while ((dec_mask != DRV_Reg(ECC_DECDONE_REG16)) && timeout > 0)
++ timeout--;
++ if (timeout == 0) {
++ MSG(VERIFY, "ECC_DECDONE: timeout\n");
++ return false;
++ }
++ return true;
++}
++
++static bool
++mtk_nand_mcu_read_data(u8 * buf, u32 length)
++{
++ int timeout = 0xffff;
++ u32 i;
++ u32 *buf32 = (u32 *) buf;
++ if ((u32) buf % 4 || length % 4)
++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
++ else
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
++
++ //DRV_WriteReg32(NFI_STRADDR_REG32, 0);
++ mb();
++ NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BRD);
++
++ if ((u32) buf % 4 || length % 4) {
++ for (i = 0; (i < (length)) && (timeout > 0);) {
++ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
++ *buf++ = (u8) DRV_Reg32(NFI_DATAR_REG32);
++ i++;
++ } else {
++ timeout--;
++ }
++ if (0 == timeout) {
++ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
++ dump_nfi();
++ return false;
++ }
++ }
++ } else {
++ for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
++ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
++ *buf32++ = DRV_Reg32(NFI_DATAR_REG32);
++ i++;
++ } else {
++ timeout--;
++ }
++ if (0 == timeout) {
++ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
++ dump_nfi();
++ return false;
++ }
++ }
++ }
++ return true;
++}
++
++static bool
++mtk_nand_read_page_data(struct mtd_info *mtd, u8 * pDataBuf, u32 u4Size)
++{
++ return mtk_nand_mcu_read_data(pDataBuf, u4Size);
++}
++
++static bool
++mtk_nand_mcu_write_data(struct mtd_info *mtd, const u8 * buf, u32 length)
++{
++ u32 timeout = 0xFFFF;
++ u32 i;
++ u32 *pBuf32;
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
++ mb();
++ NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BWR);
++ pBuf32 = (u32 *) buf;
++
++ if ((u32) buf % 4 || length % 4)
++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
++ else
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
++
++ if ((u32) buf % 4 || length % 4) {
++ for (i = 0; (i < (length)) && (timeout > 0);) {
++ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
++ DRV_WriteReg32(NFI_DATAW_REG32, *buf++);
++ i++;
++ } else {
++ timeout--;
++ }
++ if (0 == timeout) {
++ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
++ dump_nfi();
++ return false;
++ }
++ }
++ } else {
++ for (i = 0; (i < (length >> 2)) && (timeout > 0);) {
++ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) {
++ DRV_WriteReg32(NFI_DATAW_REG32, *pBuf32++);
++ i++;
++ } else {
++ timeout--;
++ }
++ if (0 == timeout) {
++ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__);
++ dump_nfi();
++ return false;
++ }
++ }
++ }
++
++ return true;
++}
++
++static bool
++mtk_nand_write_page_data(struct mtd_info *mtd, u8 * buf, u32 size)
++{
++ return mtk_nand_mcu_write_data(mtd, buf, size);
++}
++
++static void
++mtk_nand_read_fdm_data(u8 * pDataBuf, u32 u4SecNum)
++{
++ u32 i;
++ u32 *pBuf32 = (u32 *) pDataBuf;
++
++ if (pBuf32) {
++ for (i = 0; i < u4SecNum; ++i) {
++ *pBuf32++ = DRV_Reg32(NFI_FDM0L_REG32 + (i << 1));
++ *pBuf32++ = DRV_Reg32(NFI_FDM0M_REG32 + (i << 1));
++ }
++ }
++}
++
++static u8 fdm_buf[64];
++static void
++mtk_nand_write_fdm_data(struct nand_chip *chip, u8 * pDataBuf, u32 u4SecNum)
++{
++ u32 i, j;
++ u8 checksum = 0;
++ bool empty = true;
++ struct nand_oobfree *free_entry;
++ u32 *pBuf32;
++
++ memcpy(fdm_buf, pDataBuf, u4SecNum * 8);
++
++ free_entry = layout->oobfree;
++ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free_entry[i].length; i++) {
++ for (j = 0; j < free_entry[i].length; j++) {
++ if (pDataBuf[free_entry[i].offset + j] != 0xFF)
++ empty = false;
++ checksum ^= pDataBuf[free_entry[i].offset + j];
++ }
++ }
++
++ if (!empty) {
++ fdm_buf[free_entry[i - 1].offset + free_entry[i - 1].length] = checksum;
++ }
++
++ pBuf32 = (u32 *) fdm_buf;
++ for (i = 0; i < u4SecNum; ++i) {
++ DRV_WriteReg32(NFI_FDM0L_REG32 + (i << 1), *pBuf32++);
++ DRV_WriteReg32(NFI_FDM0M_REG32 + (i << 1), *pBuf32++);
++ }
++}
++
++static void
++mtk_nand_stop_read(void)
++{
++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
++ mtk_nand_reset();
++ if (g_bHwEcc)
++ ECC_Decode_End();
++ DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
++}
++
++static void
++mtk_nand_stop_write(void)
++{
++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
++ if (g_bHwEcc)
++ ECC_Encode_End();
++ DRV_WriteReg16(NFI_INTR_EN_REG16, 0);
++}
++
++bool
++mtk_nand_exec_read_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
++{
++ u8 *buf;
++ bool bRet = true;
++ struct nand_chip *nand = mtd->priv;
++ u32 u4SecNum = u4PageSize >> 9;
++
++ if (((u32) pPageBuf % 16) && local_buffer_16_align)
++ buf = local_buffer_16_align;
++ else
++ buf = pPageBuf;
++ if (mtk_nand_ready_for_read(nand, u4RowAddr, 0, true, buf)) {
++ int j;
++ for (j = 0 ; j < u4SecNum; j++) {
++ if (!mtk_nand_read_page_data(mtd, buf+j*512, 512))
++ bRet = false;
++ if(g_bHwEcc && !mtk_nand_check_dececc_done(j+1))
++ bRet = false;
++ if(g_bHwEcc && !mtk_nand_check_bch_error(mtd, buf+j*512, j, u4RowAddr))
++ bRet = false;
++ }
++ if (!mtk_nand_status_ready(STA_NAND_BUSY))
++ bRet = false;
++
++ mtk_nand_read_fdm_data(pFDMBuf, u4SecNum);
++ mtk_nand_stop_read();
++ }
++
++ if (buf == local_buffer_16_align)
++ memcpy(pPageBuf, buf, u4PageSize);
++
++ return bRet;
++}
++
++int
++mtk_nand_exec_write_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf)
++{
++ struct nand_chip *chip = mtd->priv;
++ u32 u4SecNum = u4PageSize >> 9;
++ u8 *buf;
++ u8 status;
++
++ MSG(WRITE, "mtk_nand_exec_write_page, page: 0x%x\n", u4RowAddr);
++
++ if (((u32) pPageBuf % 16) && local_buffer_16_align) {
++ printk(KERN_INFO "Data buffer not 16 bytes aligned: %p\n", pPageBuf);
++ memcpy(local_buffer_16_align, pPageBuf, mtd->writesize);
++ buf = local_buffer_16_align;
++ } else
++ buf = pPageBuf;
++
++ if (mtk_nand_ready_for_write(chip, u4RowAddr, 0, true, buf)) {
++ mtk_nand_write_fdm_data(chip, pFDMBuf, u4SecNum);
++ (void)mtk_nand_write_page_data(mtd, buf, u4PageSize);
++ (void)mtk_nand_check_RW_count(u4PageSize);
++ mtk_nand_stop_write();
++ (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
++ while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ;
++ }
++
++ status = chip->waitfunc(mtd, chip);
++ if (status & NAND_STATUS_FAIL)
++ return -EIO;
++ return 0;
++}
++
++static int
++get_start_end_block(struct mtd_info *mtd, int block, int *start_blk, int *end_blk)
++{
++ struct nand_chip *chip = mtd->priv;
++ int i;
++
++ *start_blk = 0;
++ for (i = 0; i <= part_num; i++)
++ {
++ if (i == part_num)
++ {
++ // try the last reset partition
++ *end_blk = (chip->chipsize >> chip->phys_erase_shift) - 1;
++ if (*start_blk <= *end_blk)
++ {
++ if ((block >= *start_blk) && (block <= *end_blk))
++ break;
++ }
++ }
++ // skip All partition entry
++ else if (g_pasStatic_Partition[i].size == MTDPART_SIZ_FULL)
++ {
++ continue;
++ }
++ *end_blk = *start_blk + (g_pasStatic_Partition[i].size >> chip->phys_erase_shift) - 1;
++ if ((block >= *start_blk) && (block <= *end_blk))
++ break;
++ *start_blk = *end_blk + 1;
++ }
++ if (*start_blk > *end_blk)
++ {
++ return -1;
++ }
++ return 0;
++}
++
++static int
++block_remap(struct mtd_info *mtd, int block)
++{
++ struct nand_chip *chip = mtd->priv;
++ int start_blk, end_blk;
++ int j, block_offset;
++ int bad_block = 0;
++
++ if (chip->bbt == NULL) {
++ printk("ERROR!! no bbt table for block_remap\n");
++ return -1;
++ }
++
++ if (get_start_end_block(mtd, block, &start_blk, &end_blk) < 0) {
++ printk("ERROR!! can not find start_blk and end_blk\n");
++ return -1;
++ }
++
++ block_offset = block - start_blk;
++ for (j = start_blk; j <= end_blk;j++) {
++ if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) == 0x0) {
++ if (!block_offset)
++ break;
++ block_offset--;
++ } else {
++ bad_block++;
++ }
++ }
++ if (j <= end_blk) {
++ return j;
++ } else {
++ // remap to the bad block
++ for (j = end_blk; bad_block > 0; j--)
++ {
++ if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) != 0x0)
++ {
++ bad_block--;
++ if (bad_block <= block_offset)
++ return j;
++ }
++ }
++ }
++
++ printk("Error!! block_remap error\n");
++ return -1;
++}
++
++int
++check_block_remap(struct mtd_info *mtd, int block)
++{
++ if (shift_on_bbt)
++ return block_remap(mtd, block);
++ else
++ return block;
++}
++EXPORT_SYMBOL(check_block_remap);
++
++
++static int
++write_next_on_fail(struct mtd_info *mtd, char *write_buf, int page, int * to_blk)
++{
++ struct nand_chip *chip = mtd->priv;
++ int i, j, to_page = 0, first_page;
++ char *buf, *oob;
++ int start_blk = 0, end_blk;
++ int mapped_block;
++ int page_per_block_bit = chip->phys_erase_shift - chip->page_shift;
++ int block = page >> page_per_block_bit;
++
++ // find next available block in the same MTD partition
++ mapped_block = block_remap(mtd, block);
++ if (mapped_block == -1)
++ return NAND_STATUS_FAIL;
++
++ get_start_end_block(mtd, block, &start_blk, &end_blk);
++
++ buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL | GFP_DMA);
++ if (buf == NULL)
++ return -1;
++
++ oob = buf + mtd->writesize;
++ for ((*to_blk) = block + 1; (*to_blk) <= end_blk ; (*to_blk)++) {
++ if (nand_bbt_get(mtd, (*to_blk) << page_per_block_bit) == 0) {
++ int status;
++ status = mtk_nand_erase_hw(mtd, (*to_blk) << page_per_block_bit);
++ if (status & NAND_STATUS_FAIL) {
++ mtk_nand_block_markbad_hw(mtd, (*to_blk) << chip->phys_erase_shift);
++ nand_bbt_set(mtd, (*to_blk) << page_per_block_bit, 0x3);
++ } else {
++ /* good block */
++ to_page = (*to_blk) << page_per_block_bit;
++ break;
++ }
++ }
++ }
++
++ if (!to_page) {
++ kfree(buf);
++ return -1;
++ }
++
++ first_page = (page >> page_per_block_bit) << page_per_block_bit;
++ for (i = 0; i < (1 << page_per_block_bit); i++) {
++ if ((first_page + i) != page) {
++ mtk_nand_read_oob_hw(mtd, chip, (first_page+i));
++ for (j = 0; j < mtd->oobsize; j++)
++ if (chip->oob_poi[j] != (unsigned char)0xff)
++ break;
++ if (j < mtd->oobsize) {
++ mtk_nand_exec_read_page(mtd, (first_page+i), mtd->writesize, buf, oob);
++ memset(oob, 0xff, mtd->oobsize);
++ if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)buf, oob) != 0) {
++ int ret, new_blk = 0;
++ nand_bbt_set(mtd, to_page, 0x3);
++ ret = write_next_on_fail(mtd, buf, to_page + i, &new_blk);
++ if (ret) {
++ kfree(buf);
++ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
++ return ret;
++ }
++ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
++ *to_blk = new_blk;
++ to_page = ((*to_blk) << page_per_block_bit);
++ }
++ }
++ } else {
++ memset(chip->oob_poi, 0xff, mtd->oobsize);
++ if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)write_buf, chip->oob_poi) != 0) {
++ int ret, new_blk = 0;
++ nand_bbt_set(mtd, to_page, 0x3);
++ ret = write_next_on_fail(mtd, write_buf, to_page + i, &new_blk);
++ if (ret) {
++ kfree(buf);
++ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
++ return ret;
++ }
++ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift);
++ *to_blk = new_blk;
++ to_page = ((*to_blk) << page_per_block_bit);
++ }
++ }
++ }
++
++ kfree(buf);
++
++ return 0;
++}
++
++static int
++mtk_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offset,
++ int data_len, const u8 * buf, int oob_required, int page, int raw)
++{
++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
++ int block = page / page_per_block;
++ u16 page_in_block = page % page_per_block;
++ int mapped_block = block;
++
++#if defined(MTK_NAND_BMT)
++ mapped_block = get_mapping_block_index(block);
++ // write bad index into oob
++ if (mapped_block != block)
++ set_bad_index_to_oob(chip->oob_poi, block);
++ else
++ set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
++#else
++ if (shift_on_bbt) {
++ mapped_block = block_remap(mtd, block);
++ if (mapped_block == -1)
++ return NAND_STATUS_FAIL;
++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
++ return NAND_STATUS_FAIL;
++ }
++#endif
++ do {
++ if (mtk_nand_exec_write_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, (u8 *)buf, chip->oob_poi)) {
++ MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
++#if defined(MTK_NAND_BMT)
++ if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift, UPDATE_WRITE_FAIL, (u8 *) buf, chip->oob_poi)) {
++ MSG(INIT, "Update BMT success\n");
++ return 0;
++ } else {
++ MSG(INIT, "Update BMT fail\n");
++ return -EIO;
++ }
++#else
++ {
++ int new_blk;
++ nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
++ if (write_next_on_fail(mtd, (char *)buf, page_in_block + mapped_block * page_per_block, &new_blk) != 0)
++ {
++ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
++ return NAND_STATUS_FAIL;
++ }
++ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
++ break;
++ }
++#endif
++ } else
++ break;
++ } while(1);
++
++ return 0;
++}
++
++static void
++mtk_nand_command_bp(struct mtd_info *mtd, unsigned int command, int column, int page_addr)
++{
++ struct nand_chip *nand = mtd->priv;
++
++ switch (command) {
++ case NAND_CMD_SEQIN:
++ memset(g_kCMD.au1OOB, 0xFF, sizeof(g_kCMD.au1OOB));
++ g_kCMD.pDataBuf = NULL;
++ g_kCMD.u4RowAddr = page_addr;
++ g_kCMD.u4ColAddr = column;
++ break;
++
++ case NAND_CMD_PAGEPROG:
++ if (g_kCMD.pDataBuf || (0xFF != g_kCMD.au1OOB[nand_badblock_offset])) {
++ u8 *pDataBuf = g_kCMD.pDataBuf ? g_kCMD.pDataBuf : nand->buffers->databuf;
++ mtk_nand_exec_write_page(mtd, g_kCMD.u4RowAddr, mtd->writesize, pDataBuf, g_kCMD.au1OOB);
++ g_kCMD.u4RowAddr = (u32) - 1;
++ g_kCMD.u4OOBRowAddr = (u32) - 1;
++ }
++ break;
++
++ case NAND_CMD_READOOB:
++ g_kCMD.u4RowAddr = page_addr;
++ g_kCMD.u4ColAddr = column + mtd->writesize;
++ break;
++
++ case NAND_CMD_READ0:
++ g_kCMD.u4RowAddr = page_addr;
++ g_kCMD.u4ColAddr = column;
++ break;
++
++ case NAND_CMD_ERASE1:
++ nand->state=FL_ERASING;
++ (void)mtk_nand_reset();
++ mtk_nand_set_mode(CNFG_OP_ERASE);
++ (void)mtk_nand_set_command(NAND_CMD_ERASE1);
++ (void)mtk_nand_set_address(0, page_addr, 0, devinfo.addr_cycle - 2);
++ break;
++
++ case NAND_CMD_ERASE2:
++ (void)mtk_nand_set_command(NAND_CMD_ERASE2);
++ while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
++ ;
++ break;
++
++ case NAND_CMD_STATUS:
++ (void)mtk_nand_reset();
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW);
++ mtk_nand_set_mode(CNFG_OP_SRD);
++ mtk_nand_set_mode(CNFG_READ_EN);
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++ (void)mtk_nand_set_command(NAND_CMD_STATUS);
++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
++ mb();
++ DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT));
++ g_bcmdstatus = true;
++ break;
++
++ case NAND_CMD_RESET:
++ (void)mtk_nand_reset();
++ DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_RST_DONE_EN);
++ (void)mtk_nand_set_command(NAND_CMD_RESET);
++ DRV_WriteReg16(NFI_BASE+0x44, 0xF1);
++ while(!(DRV_Reg16(NFI_INTR_REG16)&INTR_RST_DONE_EN))
++ ;
++ break;
++
++ case NAND_CMD_READID:
++ mtk_nand_reset();
++ /* Disable HW ECC */
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN | CNFG_BYTE_RW);
++ (void)mtk_nand_reset();
++ mb();
++ mtk_nand_set_mode(CNFG_OP_SRD);
++ (void)mtk_nand_set_command(NAND_CMD_READID);
++ (void)mtk_nand_set_address(0, 0, 1, 0);
++ DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD);
++ while (DRV_Reg32(NFI_STA_REG32) & STA_DATAR_STATE)
++ ;
++ break;
++
++ default:
++ BUG();
++ break;
++ }
++}
++
++static void
++mtk_nand_select_chip(struct mtd_info *mtd, int chip)
++{
++ if ((chip == -1) && (false == g_bInitDone)) {
++ struct nand_chip *nand = mtd->priv;
++ struct mtk_nand_host *host = nand->priv;
++ struct mtk_nand_host_hw *hw = host->hw;
++ u32 spare_per_sector = mtd->oobsize / (mtd->writesize / 512);
++ u32 ecc_bit = 4;
++ u32 spare_bit = PAGEFMT_SPARE_16;
++
++ if (spare_per_sector >= 28) {
++ spare_bit = PAGEFMT_SPARE_28;
++ ecc_bit = 12;
++ spare_per_sector = 28;
++ } else if (spare_per_sector >= 27) {
++ spare_bit = PAGEFMT_SPARE_27;
++ ecc_bit = 8;
++ spare_per_sector = 27;
++ } else if (spare_per_sector >= 26) {
++ spare_bit = PAGEFMT_SPARE_26;
++ ecc_bit = 8;
++ spare_per_sector = 26;
++ } else if (spare_per_sector >= 16) {
++ spare_bit = PAGEFMT_SPARE_16;
++ ecc_bit = 4;
++ spare_per_sector = 16;
++ } else {
++ MSG(INIT, "[NAND]: NFI not support oobsize: %x\n", spare_per_sector);
++ ASSERT(0);
++ }
++ mtd->oobsize = spare_per_sector*(mtd->writesize/512);
++ MSG(INIT, "[NAND]select ecc bit:%d, sparesize :%d spare_per_sector=%d\n",ecc_bit,mtd->oobsize,spare_per_sector);
++ /* Setup PageFormat */
++ if (4096 == mtd->writesize) {
++ NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K);
++ nand->cmdfunc = mtk_nand_command_bp;
++ } else if (2048 == mtd->writesize) {
++ NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K);
++ nand->cmdfunc = mtk_nand_command_bp;
++ }
++ ECC_Config(hw,ecc_bit);
++ g_bInitDone = true;
++ }
++ switch (chip) {
++ case -1:
++ break;
++ case 0:
++ case 1:
++ /* Jun Shen, 2011.04.13 */
++ /* Note: MT6577 EVB NAND is mounted on CS0, but FPGA is CS1 */
++ DRV_WriteReg16(NFI_CSEL_REG16, chip);
++ /* Jun Shen, 2011.04.13 */
++ break;
++ }
++}
++
++static uint8_t
++mtk_nand_read_byte(struct mtd_info *mtd)
++{
++ uint8_t retval = 0;
++
++ if (!mtk_nand_pio_ready()) {
++ printk("pio ready timeout\n");
++ retval = false;
++ }
++
++ if (g_bcmdstatus) {
++ retval = DRV_Reg8(NFI_DATAR_REG32);
++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK);
++ mtk_nand_reset();
++ if (g_bHwEcc) {
++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++ } else {
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++ }
++ g_bcmdstatus = false;
++ } else
++ retval = DRV_Reg8(NFI_DATAR_REG32);
++
++ return retval;
++}
++
++static void
++mtk_nand_read_buf(struct mtd_info *mtd, uint8_t * buf, int len)
++{
++ struct nand_chip *nand = (struct nand_chip *)mtd->priv;
++ struct NAND_CMD *pkCMD = &g_kCMD;
++ u32 u4ColAddr = pkCMD->u4ColAddr;
++ u32 u4PageSize = mtd->writesize;
++
++ if (u4ColAddr < u4PageSize) {
++ if ((u4ColAddr == 0) && (len >= u4PageSize)) {
++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, pkCMD->au1OOB);
++ if (len > u4PageSize) {
++ u32 u4Size = min(len - u4PageSize, sizeof(pkCMD->au1OOB));
++ memcpy(buf + u4PageSize, pkCMD->au1OOB, u4Size);
++ }
++ } else {
++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
++ memcpy(buf, nand->buffers->databuf + u4ColAddr, len);
++ }
++ pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
++ } else {
++ u32 u4Offset = u4ColAddr - u4PageSize;
++ u32 u4Size = min(len - u4Offset, sizeof(pkCMD->au1OOB));
++ if (pkCMD->u4OOBRowAddr != pkCMD->u4RowAddr) {
++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB);
++ pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr;
++ }
++ memcpy(buf, pkCMD->au1OOB + u4Offset, u4Size);
++ }
++ pkCMD->u4ColAddr += len;
++}
++
++static void
++mtk_nand_write_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
++{
++ struct NAND_CMD *pkCMD = &g_kCMD;
++ u32 u4ColAddr = pkCMD->u4ColAddr;
++ u32 u4PageSize = mtd->writesize;
++ int i4Size, i;
++
++ if (u4ColAddr >= u4PageSize) {
++ u32 u4Offset = u4ColAddr - u4PageSize;
++ u8 *pOOB = pkCMD->au1OOB + u4Offset;
++ i4Size = min(len, (int)(sizeof(pkCMD->au1OOB) - u4Offset));
++ for (i = 0; i < i4Size; i++) {
++ pOOB[i] &= buf[i];
++ }
++ } else {
++ pkCMD->pDataBuf = (u8 *) buf;
++ }
++
++ pkCMD->u4ColAddr += len;
++}
++
++static int
++mtk_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t * buf, int oob_required, int page)
++{
++ mtk_nand_write_buf(mtd, buf, mtd->writesize);
++ mtk_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
++ return 0;
++}
++
++static int
++mtk_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t * buf, int oob_required, int page)
++{
++ struct NAND_CMD *pkCMD = &g_kCMD;
++ u32 u4ColAddr = pkCMD->u4ColAddr;
++ u32 u4PageSize = mtd->writesize;
++
++ if (u4ColAddr == 0) {
++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, chip->oob_poi);
++ pkCMD->u4ColAddr += u4PageSize + mtd->oobsize;
++ }
++
++ return 0;
++}
++
++static int
++mtk_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, u8 * buf, int page)
++{
++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
++ int block = page / page_per_block;
++ u16 page_in_block = page % page_per_block;
++ int mapped_block = block;
++
++#if defined (MTK_NAND_BMT)
++ mapped_block = get_mapping_block_index(block);
++ if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block,
++ mtd->writesize, buf, chip->oob_poi))
++ return 0;
++#else
++ if (shift_on_bbt) {
++ mapped_block = block_remap(mtd, block);
++ if (mapped_block == -1)
++ return NAND_STATUS_FAIL;
++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
++ return NAND_STATUS_FAIL;
++ }
++
++ if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, buf, chip->oob_poi))
++ return 0;
++ else
++ return -EIO;
++#endif
++}
++
++int
++mtk_nand_erase_hw(struct mtd_info *mtd, int page)
++{
++ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
++
++ chip->erase(mtd, page);
++
++ return chip->waitfunc(mtd, chip);
++}
++
++static int
++mtk_nand_erase(struct mtd_info *mtd, int page)
++{
++ // get mapping
++ struct nand_chip *chip = mtd->priv;
++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
++ int page_in_block = page % page_per_block;
++ int block = page / page_per_block;
++ int mapped_block = block;
++
++#if defined(MTK_NAND_BMT)
++ mapped_block = get_mapping_block_index(block);
++#else
++ if (shift_on_bbt) {
++ mapped_block = block_remap(mtd, block);
++ if (mapped_block == -1)
++ return NAND_STATUS_FAIL;
++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
++ return NAND_STATUS_FAIL;
++ }
++#endif
++
++ do {
++ int status = mtk_nand_erase_hw(mtd, page_in_block + page_per_block * mapped_block);
++
++ if (status & NAND_STATUS_FAIL) {
++#if defined (MTK_NAND_BMT)
++ if (update_bmt( (page_in_block + mapped_block * page_per_block) << chip->page_shift,
++ UPDATE_ERASE_FAIL, NULL, NULL))
++ {
++ MSG(INIT, "Erase fail at block: 0x%x, update BMT success\n", mapped_block);
++ return 0;
++ } else {
++ MSG(INIT, "Erase fail at block: 0x%x, update BMT fail\n", mapped_block);
++ return NAND_STATUS_FAIL;
++ }
++#else
++ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
++ nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
++ if (shift_on_bbt) {
++ mapped_block = block_remap(mtd, block);
++ if (mapped_block == -1)
++ return NAND_STATUS_FAIL;
++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
++ return NAND_STATUS_FAIL;
++ } else
++ return NAND_STATUS_FAIL;
++#endif
++ } else
++ break;
++ } while(1);
++
++ return 0;
++}
++
++static int
++mtk_nand_read_oob_raw(struct mtd_info *mtd, uint8_t * buf, int page_addr, int len)
++{
++ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
++ u32 col_addr = 0;
++ u32 sector = 0;
++ int res = 0;
++ u32 colnob = 2, rawnob = devinfo.addr_cycle - 2;
++ int randomread = 0;
++ int read_len = 0;
++ int sec_num = 1<<(chip->page_shift-9);
++ int spare_per_sector = mtd->oobsize/sec_num;
++
++ if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
++ printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
++ return -EINVAL;
++ }
++ if (len > spare_per_sector)
++ randomread = 1;
++ if (!randomread || !(devinfo.advancedmode & RAMDOM_READ)) {
++ while (len > 0) {
++ read_len = min(len, spare_per_sector);
++ col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector); // TODO: Fix this hard-code 16
++ if (!mtk_nand_ready_for_read(chip, page_addr, col_addr, false, NULL)) {
++ printk(KERN_WARNING "mtk_nand_ready_for_read return failed\n");
++ res = -EIO;
++ goto error;
++ }
++ if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
++ printk(KERN_WARNING "mtk_nand_mcu_read_data return failed\n");
++ res = -EIO;
++ goto error;
++ }
++ mtk_nand_check_RW_count(read_len);
++ mtk_nand_stop_read();
++ sector++;
++ len -= read_len;
++ }
++ } else {
++ col_addr = NAND_SECTOR_SIZE;
++ if (chip->options & NAND_BUSWIDTH_16)
++ col_addr /= 2;
++ if (!mtk_nand_reset())
++ goto error;
++ mtk_nand_set_mode(0x6000);
++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN);
++ DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
++
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB);
++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++
++ mtk_nand_set_autoformat(false);
++
++ if (!mtk_nand_set_command(NAND_CMD_READ0))
++ goto error;
++ //1 FIXED ME: For Any Kind of AddrCycle
++ if (!mtk_nand_set_address(col_addr, page_addr, colnob, rawnob))
++ goto error;
++ if (!mtk_nand_set_command(NAND_CMD_READSTART))
++ goto error;
++ if (!mtk_nand_status_ready(STA_NAND_BUSY))
++ goto error;
++ read_len = min(len, spare_per_sector);
++ if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
++ printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
++ res = -EIO;
++ goto error;
++ }
++ sector++;
++ len -= read_len;
++ mtk_nand_stop_read();
++ while (len > 0) {
++ read_len = min(len, spare_per_sector);
++ if (!mtk_nand_set_command(0x05))
++ goto error;
++ col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector);
++ if (chip->options & NAND_BUSWIDTH_16)
++ col_addr /= 2;
++ DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);
++ DRV_WriteReg16(NFI_ADDRNOB_REG16, 2);
++ DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT);
++ if (!mtk_nand_status_ready(STA_ADDR_STATE))
++ goto error;
++ if (!mtk_nand_set_command(0xE0))
++ goto error;
++ if (!mtk_nand_status_ready(STA_NAND_BUSY))
++ goto error;
++ if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) {
++ printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n");
++ res = -EIO;
++ goto error;
++ }
++ mtk_nand_stop_read();
++ sector++;
++ len -= read_len;
++ }
++ }
++error:
++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD);
++ return res;
++}
++
++static int
++mtk_nand_write_oob_raw(struct mtd_info *mtd, const uint8_t * buf, int page_addr, int len)
++{
++ struct nand_chip *chip = mtd->priv;
++ u32 col_addr = 0;
++ u32 sector = 0;
++ int write_len = 0;
++ int status;
++ int sec_num = 1<<(chip->page_shift-9);
++ int spare_per_sector = mtd->oobsize/sec_num;
++
++ if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) {
++ printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf);
++ return -EINVAL;
++ }
++
++ while (len > 0) {
++ write_len = min(len, spare_per_sector);
++ col_addr = sector * (NAND_SECTOR_SIZE + spare_per_sector) + NAND_SECTOR_SIZE;
++ if (!mtk_nand_ready_for_write(chip, page_addr, col_addr, false, NULL))
++ return -EIO;
++ if (!mtk_nand_mcu_write_data(mtd, buf + sector * spare_per_sector, write_len))
++ return -EIO;
++ (void)mtk_nand_check_RW_count(write_len);
++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR);
++ (void)mtk_nand_set_command(NAND_CMD_PAGEPROG);
++ while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY)
++ ;
++ status = chip->waitfunc(mtd, chip);
++ if (status & NAND_STATUS_FAIL) {
++ printk(KERN_INFO "status: %d\n", status);
++ return -EIO;
++ }
++ len -= write_len;
++ sector++;
++ }
++
++ return 0;
++}
++
++static int
++mtk_nand_write_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
++{
++ int i, iter;
++ int sec_num = 1<<(chip->page_shift-9);
++ int spare_per_sector = mtd->oobsize/sec_num;
++
++ memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
++
++ // copy ecc data
++ for (i = 0; i < layout->eccbytes; i++) {
++ iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR);
++ local_oob_buf[iter] = chip->oob_poi[layout->eccpos[i]];
++ }
++
++ // copy FDM data
++ for (i = 0; i < sec_num; i++)
++ memcpy(&local_oob_buf[i * spare_per_sector], &chip->oob_poi[i * OOB_AVAI_PER_SECTOR], OOB_AVAI_PER_SECTOR);
++
++ return mtk_nand_write_oob_raw(mtd, local_oob_buf, page, mtd->oobsize);
++}
++
++static int mtk_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
++{
++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
++ int block = page / page_per_block;
++ u16 page_in_block = page % page_per_block;
++ int mapped_block = block;
++
++#if defined(MTK_NAND_BMT)
++ mapped_block = get_mapping_block_index(block);
++ // write bad index into oob
++ if (mapped_block != block)
++ set_bad_index_to_oob(chip->oob_poi, block);
++ else
++ set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX);
++#else
++ if (shift_on_bbt)
++ {
++ mapped_block = block_remap(mtd, block);
++ if (mapped_block == -1)
++ return NAND_STATUS_FAIL;
++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
++ return NAND_STATUS_FAIL;
++ }
++#endif
++ do {
++ if (mtk_nand_write_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block /* page */)) {
++ MSG(INIT, "write oob fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block);
++#if defined(MTK_NAND_BMT)
++ if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift,
++ UPDATE_WRITE_FAIL, NULL, chip->oob_poi))
++ {
++ MSG(INIT, "Update BMT success\n");
++ return 0;
++ } else {
++ MSG(INIT, "Update BMT fail\n");
++ return -EIO;
++ }
++#else
++ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift);
++ nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3);
++ if (shift_on_bbt) {
++ mapped_block = block_remap(mtd, mapped_block);
++ if (mapped_block == -1)
++ return NAND_STATUS_FAIL;
++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0)
++ return NAND_STATUS_FAIL;
++ } else {
++ return NAND_STATUS_FAIL;
++ }
++#endif
++ } else
++ break;
++ } while (1);
++
++ return 0;
++}
++
++int
++mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t offset)
++{
++ struct nand_chip *chip = mtd->priv;
++ int block = (int)offset >> chip->phys_erase_shift;
++ int page = block * (1 << (chip->phys_erase_shift - chip->page_shift));
++ u8 buf[8];
++
++ memset(buf, 0xFF, 8);
++ buf[0] = 0;
++ return mtk_nand_write_oob_raw(mtd, buf, page, 8);
++}
++
++static int
++mtk_nand_block_markbad(struct mtd_info *mtd, loff_t offset)
++{
++ struct nand_chip *chip = mtd->priv;
++ int block = (int)offset >> chip->phys_erase_shift;
++ int ret;
++ int mapped_block = block;
++
++ nand_get_device(chip, mtd, FL_WRITING);
++
++#if defined(MTK_NAND_BMT)
++ mapped_block = get_mapping_block_index(block);
++ ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift);
++#else
++ if (shift_on_bbt) {
++ mapped_block = block_remap(mtd, block);
++ if (mapped_block == -1) {
++ printk("NAND mark bad failed\n");
++ nand_release_device(mtd);
++ return NAND_STATUS_FAIL;
++ }
++ }
++ ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift);
++#endif
++ nand_release_device(mtd);
++
++ return ret;
++}
++
++int
++mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page)
++{
++ int i;
++ u8 iter = 0;
++
++ int sec_num = 1<<(chip->page_shift-9);
++ int spare_per_sector = mtd->oobsize/sec_num;
++
++ if (mtk_nand_read_oob_raw(mtd, chip->oob_poi, page, mtd->oobsize)) {
++ printk(KERN_ERR "[%s]mtk_nand_read_oob_raw return failed\n", __FUNCTION__);
++ return -EIO;
++ }
++
++ // adjust to ecc physical layout to memory layout
++ /*********************************************************/
++ /* FDM0 | ECC0 | FDM1 | ECC1 | FDM2 | ECC2 | FDM3 | ECC3 */
++ /* 8B | 8B | 8B | 8B | 8B | 8B | 8B | 8B */
++ /*********************************************************/
++
++ memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize);
++ // copy ecc data
++ for (i = 0; i < layout->eccbytes; i++) {
++ iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR);
++ chip->oob_poi[layout->eccpos[i]] = local_oob_buf[iter];
++ }
++
++ // copy FDM data
++ for (i = 0; i < sec_num; i++) {
++ memcpy(&chip->oob_poi[i * OOB_AVAI_PER_SECTOR], &local_oob_buf[i * spare_per_sector], OOB_AVAI_PER_SECTOR);
++ }
++
++ return 0;
++}
++
++static int
++mtk_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
++{
++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
++ int block = page / page_per_block;
++ u16 page_in_block = page % page_per_block;
++ int mapped_block = block;
++
++#if defined (MTK_NAND_BMT)
++ mapped_block = get_mapping_block_index(block);
++ mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block);
++#else
++ if (shift_on_bbt) {
++ mapped_block = block_remap(mtd, block);
++ if (mapped_block == -1)
++ return NAND_STATUS_FAIL;
++ // allow to read oob even if the block is bad
++ }
++ if (mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block)!=0)
++ return -1;
++#endif
++ return 0;
++}
++
++int
++mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs)
++{
++ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
++ int page_addr = (int)(ofs >> chip->page_shift);
++ unsigned int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
++ unsigned char oob_buf[8];
++
++ page_addr &= ~(page_per_block - 1);
++ if (mtk_nand_read_oob_raw(mtd, oob_buf, page_addr, sizeof(oob_buf))) {
++ printk(KERN_WARNING "mtk_nand_read_oob_raw return error\n");
++ return 1;
++ }
++
++ if (oob_buf[0] != 0xff) {
++ printk(KERN_WARNING "Bad block detected at 0x%x, oob_buf[0] is 0x%x\n", page_addr, oob_buf[0]);
++ // dump_nfi();
++ return 1;
++ }
++
++ return 0;
++}
++
++static int
++mtk_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
++{
++ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
++ int block = (int)ofs >> chip->phys_erase_shift;
++ int mapped_block = block;
++ int ret;
++
++#if defined(MTK_NAND_BMT)
++ mapped_block = get_mapping_block_index(block);
++#else
++ if (shift_on_bbt) {
++ mapped_block = block_remap(mtd, block);
++ }
++#endif
++
++ ret = mtk_nand_block_bad_hw(mtd, mapped_block << chip->phys_erase_shift);
++#if defined (MTK_NAND_BMT)
++ if (ret) {
++ MSG(INIT, "Unmapped bad block: 0x%x\n", mapped_block);
++ if (update_bmt(mapped_block << chip->phys_erase_shift, UPDATE_UNMAPPED_BLOCK, NULL, NULL)) {
++ MSG(INIT, "Update BMT success\n");
++ ret = 0;
++ } else {
++ MSG(INIT, "Update BMT fail\n");
++ ret = 1;
++ }
++ }
++#endif
++
++ return ret;
++}
++
++#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
++char gacBuf[4096 + 288];
++
++static int
++mtk_nand_verify_buf(struct mtd_info *mtd, const uint8_t * buf, int len)
++{
++ struct nand_chip *chip = (struct nand_chip *)mtd->priv;
++ struct NAND_CMD *pkCMD = &g_kCMD;
++ u32 u4PageSize = mtd->writesize;
++ u32 *pSrc, *pDst;
++ int i;
++
++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, gacBuf, gacBuf + u4PageSize);
++
++ pSrc = (u32 *) buf;
++ pDst = (u32 *) gacBuf;
++ len = len / sizeof(u32);
++ for (i = 0; i < len; ++i) {
++ if (*pSrc != *pDst) {
++ MSG(VERIFY, "mtk_nand_verify_buf page fail at page %d\n", pkCMD->u4RowAddr);
++ return -1;
++ }
++ pSrc++;
++ pDst++;
++ }
++
++ pSrc = (u32 *) chip->oob_poi;
++ pDst = (u32 *) (gacBuf + u4PageSize);
++
++ if ((pSrc[0] != pDst[0]) || (pSrc[1] != pDst[1]) || (pSrc[2] != pDst[2]) || (pSrc[3] != pDst[3]) || (pSrc[4] != pDst[4]) || (pSrc[5] != pDst[5])) {
++ // TODO: Ask Designer Why?
++ //(pSrc[6] != pDst[6]) || (pSrc[7] != pDst[7]))
++ MSG(VERIFY, "mtk_nand_verify_buf oob fail at page %d\n", pkCMD->u4RowAddr);
++ MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pSrc[0], pSrc[1], pSrc[2], pSrc[3], pSrc[4], pSrc[5], pSrc[6], pSrc[7]);
++ MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pDst[0], pDst[1], pDst[2], pDst[3], pDst[4], pDst[5], pDst[6], pDst[7]);
++ return -1;
++ }
++ return 0;
++}
++#endif
++
++static void
++mtk_nand_init_hw(struct mtk_nand_host *host) {
++ struct mtk_nand_host_hw *hw = host->hw;
++ u32 data;
++
++ data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60);
++ data &= ~((0x3<<18)|(0x3<<16));
++ data |= ((0x2<<18) |(0x2<<16));
++ DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data);
++
++ MSG(INIT, "Enable NFI Clock\n");
++ nand_enable_clock();
++
++ g_bInitDone = false;
++ g_kCMD.u4OOBRowAddr = (u32) - 1;
++
++ /* Set default NFI access timing control */
++ DRV_WriteReg32(NFI_ACCCON_REG32, hw->nfi_access_timing);
++ DRV_WriteReg16(NFI_CNFG_REG16, 0);
++ DRV_WriteReg16(NFI_PAGEFMT_REG16, 0);
++
++ /* Reset the state machine and data FIFO, because flushing FIFO */
++ (void)mtk_nand_reset();
++
++ /* Set the ECC engine */
++ if (hw->nand_ecc_mode == NAND_ECC_HW) {
++ MSG(INIT, "%s : Use HW ECC\n", MODULE_NAME);
++ if (g_bHwEcc)
++ NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN);
++ ECC_Config(host->hw,4);
++ mtk_nand_configure_fdm(8);
++ mtk_nand_configure_lock();
++ }
++
++ NFI_SET_REG16(NFI_IOCON_REG16, 0x47);
++}
++
++static int mtk_nand_dev_ready(struct mtd_info *mtd)
++{
++ return !(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY);
++}
++
++#define FACT_BBT_BLOCK_NUM 32 // use the latest 32 BLOCK for factory bbt table
++#define FACT_BBT_OOB_SIGNATURE 1
++#define FACT_BBT_SIGNATURE_LEN 7
++const u8 oob_signature[] = "mtknand";
++static u8 *fact_bbt = 0;
++static u32 bbt_size = 0;
++
++static int
++read_fact_bbt(struct mtd_info *mtd, unsigned int page)
++{
++ struct nand_chip *chip = mtd->priv;
++
++ // read oob
++ if (mtk_nand_read_oob_hw(mtd, chip, page)==0)
++ {
++ if (chip->oob_poi[nand_badblock_offset] != 0xFF)
++ {
++ printk("Bad Block on Page %x\n", page);
++ return -1;
++ }
++ if (memcmp(&chip->oob_poi[FACT_BBT_OOB_SIGNATURE], oob_signature, FACT_BBT_SIGNATURE_LEN) != 0)
++ {
++ printk("compare signature failed %x\n", page);
++ return -1;
++ }
++ if (mtk_nand_exec_read_page(mtd, page, mtd->writesize, chip->buffers->databuf, chip->oob_poi))
++ {
++ printk("Signature matched and data read!\n");
++ memcpy(fact_bbt, chip->buffers->databuf, (bbt_size <= mtd->writesize)? bbt_size:mtd->writesize);
++ return 0;
++ }
++
++ }
++ printk("failed at page %x\n", page);
++ return -1;
++}
++
++static int
++load_fact_bbt(struct mtd_info *mtd)
++{
++ struct nand_chip *chip = mtd->priv;
++ int i;
++ u32 total_block;
++
++ total_block = 1 << (chip->chip_shift - chip->phys_erase_shift);
++ bbt_size = total_block >> 2;
++
++ if ((!fact_bbt) && (bbt_size))
++ fact_bbt = (u8 *)kmalloc(bbt_size, GFP_KERNEL);
++ if (!fact_bbt)
++ return -1;
++
++ for (i = total_block - 1; i >= (total_block - FACT_BBT_BLOCK_NUM); i--)
++ {
++ if (read_fact_bbt(mtd, i << (chip->phys_erase_shift - chip->page_shift)) == 0)
++ {
++ printk("load_fact_bbt success %d\n", i);
++ return 0;
++ }
++
++ }
++ printk("load_fact_bbt failed\n");
++ return -1;
++}
++
++static int oob_mtk_ooblayout_ecc(struct mtd_info *mtd, int section,
++ struct mtd_oob_region *oobregion)
++{
++ oobregion->length = 8;
++ oobregion->offset = layout->eccpos[section * 8];
++
++ return 0;
++}
++
++static int oob_mtk_ooblayout_free(struct mtd_info *mtd, int section,
++ struct mtd_oob_region *oobregion)
++{
++ if (section >= (layout->eccbytes / 8)) {
++ return -ERANGE;
++ }
++ oobregion->offset = layout->oobfree[section].offset;
++ oobregion->length = layout->oobfree[section].length;
++
++ return 0;
++}
++
++
++static const struct mtd_ooblayout_ops oob_mtk_ops = {
++ .ecc = oob_mtk_ooblayout_ecc,
++ .free = oob_mtk_ooblayout_free,
++};
++
++static int
++mtk_nand_probe(struct platform_device *pdev)
++{
++ struct mtd_part_parser_data ppdata;
++ struct mtk_nand_host_hw *hw;
++ struct nand_chip *nand_chip;
++ struct mtd_info *mtd;
++ u8 ext_id1, ext_id2, ext_id3;
++ int err = 0;
++ int id;
++ u32 ext_id;
++ int i;
++ u32 data;
++
++ data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60);
++ data &= ~((0x3<<18)|(0x3<<16));
++ data |= ((0x2<<18) |(0x2<<16));
++ DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data);
++
++ hw = &mt7621_nand_hw;
++ BUG_ON(!hw);
++ /* Allocate memory for the device structure (and zero it) */
++ host = kzalloc(sizeof(struct mtk_nand_host), GFP_KERNEL);
++ if (!host) {
++ MSG(INIT, "mtk_nand: failed to allocate device structure.\n");
++ return -ENOMEM;
++ }
++
++ /* Allocate memory for 16 byte aligned buffer */
++ local_buffer_16_align = local_buffer + 16 - ((u32) local_buffer % 16);
++ printk(KERN_INFO "Allocate 16 byte aligned buffer: %p\n", local_buffer_16_align);
++ host->hw = hw;
++
++ /* init mtd data structure */
++ nand_chip = &host->nand_chip;
++ nand_chip->priv = host; /* link the private data structures */
++
++ mtd = host->mtd = &nand_chip->mtd;
++ mtd->priv = nand_chip;
++ mtd->owner = THIS_MODULE;
++ mtd->name = "MT7621-NAND";
++
++ hw->nand_ecc_mode = NAND_ECC_HW;
++
++ /* Set address of NAND IO lines */
++ nand_chip->IO_ADDR_R = (void __iomem *)NFI_DATAR_REG32;
++ nand_chip->IO_ADDR_W = (void __iomem *)NFI_DATAW_REG32;
++ nand_chip->chip_delay = 20; /* 20us command delay time */
++ nand_chip->ecc.mode = hw->nand_ecc_mode; /* enable ECC */
++ nand_chip->ecc.strength = 1;
++ nand_chip->read_byte = mtk_nand_read_byte;
++ nand_chip->read_buf = mtk_nand_read_buf;
++ nand_chip->write_buf = mtk_nand_write_buf;
++#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
++ nand_chip->verify_buf = mtk_nand_verify_buf;
++#endif
++ nand_chip->select_chip = mtk_nand_select_chip;
++ nand_chip->dev_ready = mtk_nand_dev_ready;
++ nand_chip->cmdfunc = mtk_nand_command_bp;
++ nand_chip->ecc.read_page = mtk_nand_read_page_hwecc;
++ nand_chip->ecc.write_page = mtk_nand_write_page_hwecc;
++
++ mtd_set_ooblayout(mtd, &oob_mtk_ops);
++ nand_chip->ecc.size = hw->nand_ecc_size; //2048
++ nand_chip->ecc.bytes = hw->nand_ecc_bytes; //32
++
++ // For BMT, we need to revise driver architecture
++ nand_chip->write_page = mtk_nand_write_page;
++ nand_chip->ecc.write_oob = mtk_nand_write_oob;
++ nand_chip->block_markbad = mtk_nand_block_markbad; // need to add nand_get_device()/nand_release_device().
++ nand_chip->erase_mtk = mtk_nand_erase;
++ nand_chip->read_page = mtk_nand_read_page;
++ nand_chip->ecc.read_oob = mtk_nand_read_oob;
++ nand_chip->block_bad = mtk_nand_block_bad;
++ nand_chip->cmd_ctrl = mtk_nfc_cmd_ctrl;
++
++ //Qwert:Add for Uboot
++ mtk_nand_init_hw(host);
++ /* Select the device */
++ nand_chip->select_chip(mtd, NFI_DEFAULT_CS);
++
++ /*
++ * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
++ * after power-up
++ */
++ nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
++
++ memset(&devinfo, 0 , sizeof(flashdev_info));
++
++ /* Send the command for reading device ID */
++
++ nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
++
++ /* Read manufacturer and device IDs */
++ manu_id = nand_chip->read_byte(mtd);
++ dev_id = nand_chip->read_byte(mtd);
++ id = dev_id | (manu_id << 8);
++ ext_id1 = nand_chip->read_byte(mtd);
++ ext_id2 = nand_chip->read_byte(mtd);
++ ext_id3 = nand_chip->read_byte(mtd);
++ ext_id = ext_id1 << 16 | ext_id2 << 8 | ext_id3;
++ if (!get_device_info(id, ext_id, &devinfo)) {
++ u32 chip_mode = RALINK_REG(RALINK_SYSCTL_BASE+0x010)&0x0F;
++ MSG(INIT, "Not Support this Device! \r\n");
++ memset(&devinfo, 0 , sizeof(flashdev_info));
++ MSG(INIT, "chip_mode=%08X\n",chip_mode);
++
++ /* apply bootstrap first */
++ devinfo.addr_cycle = 5;
++ devinfo.iowidth = 8;
++
++ switch (chip_mode) {
++ case 10:
++ devinfo.pagesize = 2048;
++ devinfo.sparesize = 128;
++ devinfo.totalsize = 128;
++ devinfo.blocksize = 128;
++ break;
++ case 11:
++ devinfo.pagesize = 4096;
++ devinfo.sparesize = 128;
++ devinfo.totalsize = 1024;
++ devinfo.blocksize = 256;
++ break;
++ case 12:
++ devinfo.pagesize = 4096;
++ devinfo.sparesize = 224;
++ devinfo.totalsize = 2048;
++ devinfo.blocksize = 512;
++ break;
++ default:
++ case 1:
++ devinfo.pagesize = 2048;
++ devinfo.sparesize = 64;
++ devinfo.totalsize = 128;
++ devinfo.blocksize = 128;
++ break;
++ }
++
++ devinfo.timmingsetting = NFI_DEFAULT_ACCESS_TIMING;
++ devinfo.devciename[0] = 'U';
++ devinfo.advancedmode = 0;
++ }
++ mtd->writesize = devinfo.pagesize;
++ mtd->erasesize = (devinfo.blocksize<<10);
++ mtd->oobsize = devinfo.sparesize;
++
++ nand_chip->chipsize = (devinfo.totalsize<<20);
++ nand_chip->page_shift = ffs(mtd->writesize) - 1;
++ nand_chip->pagemask = (nand_chip->chipsize >> nand_chip->page_shift) - 1;
++ nand_chip->phys_erase_shift = ffs(mtd->erasesize) - 1;
++ nand_chip->chip_shift = ffs(nand_chip->chipsize) - 1;//0x1C;//ffs(nand_chip->chipsize) - 1;
++ nand_chip->cmd_ctrl = mtk_nfc_cmd_ctrl;
++
++ /* allocate buffers or call select_chip here or a bit earlier*/
++ {
++ struct nand_buffers *nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize + mtd->oobsize * 3, GFP_KERNEL);
++ if (!nbuf) {
++ return -ENOMEM;
++ }
++ nbuf->ecccalc = (uint8_t *)(nbuf + 1);
++ nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
++ nbuf->databuf = nbuf->ecccode + mtd->oobsize;
++
++ nand_chip->buffers = nbuf;
++ nand_chip->options |= NAND_OWN_BUFFERS;
++ }
++
++ nand_chip->oob_poi = nand_chip->buffers->databuf + mtd->writesize;
++ nand_chip->badblockpos = 0;
++
++ if (devinfo.pagesize == 4096)
++ layout = &nand_oob_128;
++ else if (devinfo.pagesize == 2048)
++ layout = &nand_oob_64;
++ else if (devinfo.pagesize == 512)
++ layout = &nand_oob_16;
++
++ layout->eccbytes = devinfo.sparesize-OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE);
++ for (i = 0; i < layout->eccbytes; i++)
++ layout->eccpos[i]=OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE)+i;
++
++ MSG(INIT, "Support this Device in MTK table! %x \r\n", id);
++ hw->nfi_bus_width = devinfo.iowidth;
++ DRV_WriteReg32(NFI_ACCCON_REG32, devinfo.timmingsetting);
++
++ /* 16-bit bus width */
++ if (hw->nfi_bus_width == 16) {
++ MSG(INIT, "%s : Set the 16-bit I/O settings!\n", MODULE_NAME);
++ nand_chip->options |= NAND_BUSWIDTH_16;
++ }
++ mtd->oobsize = devinfo.sparesize;
++ hw->nfi_cs_num = 1;
++
++ /* Scan to find existance of the device */
++ if (nand_scan(mtd, hw->nfi_cs_num)) {
++ MSG(INIT, "%s : nand_scan fail.\n", MODULE_NAME);
++ err = -ENXIO;
++ goto out;
++ }
++
++ g_page_size = mtd->writesize;
++ platform_set_drvdata(pdev, host);
++ if (hw->nfi_bus_width == 16) {
++ NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN);
++ }
++
++ nand_chip->select_chip(mtd, 0);
++#if defined(MTK_NAND_BMT)
++ nand_chip->chipsize -= (BMT_POOL_SIZE) << nand_chip->phys_erase_shift;
++#endif
++ mtd->size = nand_chip->chipsize;
++
++ CFG_BLOCKSIZE = mtd->erasesize;
++
++#if defined(MTK_NAND_BMT)
++ if (!g_bmt) {
++ if (!(g_bmt = init_bmt(nand_chip, BMT_POOL_SIZE))) {
++ MSG(INIT, "Error: init bmt failed\n");
++ return 0;
++ }
++ }
++#endif
++
++ nand_set_flash_node(nand_chip, pdev->dev.of_node);
++ err = mtd_device_parse_register(mtd, probe_types, &ppdata,
++ NULL, 0);
++ if (!err) {
++ MSG(INIT, "[mtk_nand] probe successfully!\n");
++ nand_disable_clock();
++ shift_on_bbt = 1;
++ if (load_fact_bbt(mtd) == 0) {
++ int i;
++ for (i = 0; i < 0x100; i++)
++ nand_chip->bbt[i] |= fact_bbt[i];
++ }
++
++ return err;
++ }
++
++out:
++ MSG(INIT, "[NFI] mtk_nand_probe fail, err = %d!\n", err);
++ nand_release(mtd);
++ platform_set_drvdata(pdev, NULL);
++ if ( NULL != nand_chip->buffers) {
++ kfree(nand_chip->buffers);
++ }
++ kfree(host);
++ nand_disable_clock();
++ return err;
++}
++
++static int
++mtk_nand_remove(struct platform_device *pdev)
++{
++ struct mtk_nand_host *host = platform_get_drvdata(pdev);
++ struct mtd_info *mtd = host->mtd;
++ struct nand_chip *nand_chip = &host->nand_chip;
++
++ nand_release(mtd);
++ if ( NULL != nand_chip->buffers) {
++ kfree(nand_chip->buffers);
++ }
++ kfree(host);
++ nand_disable_clock();
++
++ return 0;
++}
++
++static const struct of_device_id mt7621_nand_match[] = {
++ { .compatible = "mtk,mt7621-nand" },
++ {},
++};
++MODULE_DEVICE_TABLE(of, mt7621_nand_match);
++
++static struct platform_driver mtk_nand_driver = {
++ .probe = mtk_nand_probe,
++ .remove = mtk_nand_remove,
++ .driver = {
++ .name = "MT7621-NAND",
++ .owner = THIS_MODULE,
++ .of_match_table = mt7621_nand_match,
++ },
++};
++
++static int __init
++mtk_nand_init(void)
++{
++ printk("MediaTek Nand driver init, version %s\n", VERSION);
++
++ return platform_driver_register(&mtk_nand_driver);
++}
++
++static void __exit
++mtk_nand_exit(void)
++{
++ platform_driver_unregister(&mtk_nand_driver);
++}
++
++module_init(mtk_nand_init);
++module_exit(mtk_nand_exit);
++MODULE_LICENSE("GPL");
+--- /dev/null
++++ b/drivers/mtd/nand/mtk_nand2.h
+@@ -0,0 +1,452 @@
++#ifndef __MTK_NAND_H
++#define __MTK_NAND_H
++
++#define RALINK_NAND_CTRL_BASE 0xBE003000
++#define RALINK_SYSCTL_BASE 0xBE000000
++#define RALINK_NANDECC_CTRL_BASE 0xBE003800
++/*******************************************************************************
++ * NFI Register Definition
++ *******************************************************************************/
++
++#define NFI_CNFG_REG16 ((volatile P_U16)(NFI_BASE+0x0000))
++#define NFI_PAGEFMT_REG16 ((volatile P_U16)(NFI_BASE+0x0004))
++#define NFI_CON_REG16 ((volatile P_U16)(NFI_BASE+0x0008))
++#define NFI_ACCCON_REG32 ((volatile P_U32)(NFI_BASE+0x000C))
++#define NFI_INTR_EN_REG16 ((volatile P_U16)(NFI_BASE+0x0010))
++#define NFI_INTR_REG16 ((volatile P_U16)(NFI_BASE+0x0014))
++
++#define NFI_CMD_REG16 ((volatile P_U16)(NFI_BASE+0x0020))
++
++#define NFI_ADDRNOB_REG16 ((volatile P_U16)(NFI_BASE+0x0030))
++#define NFI_COLADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0034))
++#define NFI_ROWADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0038))
++
++#define NFI_STRDATA_REG16 ((volatile P_U16)(NFI_BASE+0x0040))
++
++#define NFI_DATAW_REG32 ((volatile P_U32)(NFI_BASE+0x0050))
++#define NFI_DATAR_REG32 ((volatile P_U32)(NFI_BASE+0x0054))
++#define NFI_PIO_DIRDY_REG16 ((volatile P_U16)(NFI_BASE+0x0058))
++
++#define NFI_STA_REG32 ((volatile P_U32)(NFI_BASE+0x0060))
++#define NFI_FIFOSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0064))
++#define NFI_LOCKSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0068))
++
++#define NFI_ADDRCNTR_REG16 ((volatile P_U16)(NFI_BASE+0x0070))
++
++#define NFI_STRADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0080))
++#define NFI_BYTELEN_REG16 ((volatile P_U16)(NFI_BASE+0x0084))
++
++#define NFI_CSEL_REG16 ((volatile P_U16)(NFI_BASE+0x0090))
++#define NFI_IOCON_REG16 ((volatile P_U16)(NFI_BASE+0x0094))
++
++#define NFI_FDM0L_REG32 ((volatile P_U32)(NFI_BASE+0x00A0))
++#define NFI_FDM0M_REG32 ((volatile P_U32)(NFI_BASE+0x00A4))
++
++#define NFI_LOCK_REG16 ((volatile P_U16)(NFI_BASE+0x0100))
++#define NFI_LOCKCON_REG32 ((volatile P_U32)(NFI_BASE+0x0104))
++#define NFI_LOCKANOB_REG16 ((volatile P_U16)(NFI_BASE+0x0108))
++#define NFI_LOCK00ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0110))
++#define NFI_LOCK00FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0114))
++#define NFI_LOCK01ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0118))
++#define NFI_LOCK01FMT_REG32 ((volatile P_U32)(NFI_BASE+0x011C))
++#define NFI_LOCK02ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0120))
++#define NFI_LOCK02FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0124))
++#define NFI_LOCK03ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0128))
++#define NFI_LOCK03FMT_REG32 ((volatile P_U32)(NFI_BASE+0x012C))
++#define NFI_LOCK04ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0130))
++#define NFI_LOCK04FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0134))
++#define NFI_LOCK05ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0138))
++#define NFI_LOCK05FMT_REG32 ((volatile P_U32)(NFI_BASE+0x013C))
++#define NFI_LOCK06ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0140))
++#define NFI_LOCK06FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0144))
++#define NFI_LOCK07ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0148))
++#define NFI_LOCK07FMT_REG32 ((volatile P_U32)(NFI_BASE+0x014C))
++#define NFI_LOCK08ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0150))
++#define NFI_LOCK08FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0154))
++#define NFI_LOCK09ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0158))
++#define NFI_LOCK09FMT_REG32 ((volatile P_U32)(NFI_BASE+0x015C))
++#define NFI_LOCK10ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0160))
++#define NFI_LOCK10FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0164))
++#define NFI_LOCK11ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0168))
++#define NFI_LOCK11FMT_REG32 ((volatile P_U32)(NFI_BASE+0x016C))
++#define NFI_LOCK12ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0170))
++#define NFI_LOCK12FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0174))
++#define NFI_LOCK13ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0178))
++#define NFI_LOCK13FMT_REG32 ((volatile P_U32)(NFI_BASE+0x017C))
++#define NFI_LOCK14ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0180))
++#define NFI_LOCK14FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0184))
++#define NFI_LOCK15ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0188))
++#define NFI_LOCK15FMT_REG32 ((volatile P_U32)(NFI_BASE+0x018C))
++
++#define NFI_FIFODATA0_REG32 ((volatile P_U32)(NFI_BASE+0x0190))
++#define NFI_FIFODATA1_REG32 ((volatile P_U32)(NFI_BASE+0x0194))
++#define NFI_FIFODATA2_REG32 ((volatile P_U32)(NFI_BASE+0x0198))
++#define NFI_FIFODATA3_REG32 ((volatile P_U32)(NFI_BASE+0x019C))
++#define NFI_MASTERSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0210))
++
++
++/*******************************************************************************
++ * NFI Register Field Definition
++ *******************************************************************************/
++
++/* NFI_CNFG */
++#define CNFG_AHB (0x0001)
++#define CNFG_READ_EN (0x0002)
++#define CNFG_DMA_BURST_EN (0x0004)
++#define CNFG_BYTE_RW (0x0040)
++#define CNFG_HW_ECC_EN (0x0100)
++#define CNFG_AUTO_FMT_EN (0x0200)
++#define CNFG_OP_IDLE (0x0000)
++#define CNFG_OP_READ (0x1000)
++#define CNFG_OP_SRD (0x2000)
++#define CNFG_OP_PRGM (0x3000)
++#define CNFG_OP_ERASE (0x4000)
++#define CNFG_OP_RESET (0x5000)
++#define CNFG_OP_CUST (0x6000)
++#define CNFG_OP_MODE_MASK (0x7000)
++#define CNFG_OP_MODE_SHIFT (12)
++
++/* NFI_PAGEFMT */
++#define PAGEFMT_512 (0x0000)
++#define PAGEFMT_2K (0x0001)
++#define PAGEFMT_4K (0x0002)
++
++#define PAGEFMT_PAGE_MASK (0x0003)
++
++#define PAGEFMT_DBYTE_EN (0x0008)
++
++#define PAGEFMT_SPARE_16 (0x0000)
++#define PAGEFMT_SPARE_26 (0x0001)
++#define PAGEFMT_SPARE_27 (0x0002)
++#define PAGEFMT_SPARE_28 (0x0003)
++#define PAGEFMT_SPARE_MASK (0x0030)
++#define PAGEFMT_SPARE_SHIFT (4)
++
++#define PAGEFMT_FDM_MASK (0x0F00)
++#define PAGEFMT_FDM_SHIFT (8)
++
++#define PAGEFMT_FDM_ECC_MASK (0xF000)
++#define PAGEFMT_FDM_ECC_SHIFT (12)
++
++/* NFI_CON */
++#define CON_FIFO_FLUSH (0x0001)
++#define CON_NFI_RST (0x0002)
++#define CON_NFI_SRD (0x0010)
++
++#define CON_NFI_NOB_MASK (0x0060)
++#define CON_NFI_NOB_SHIFT (5)
++
++#define CON_NFI_BRD (0x0100)
++#define CON_NFI_BWR (0x0200)
++
++#define CON_NFI_SEC_MASK (0xF000)
++#define CON_NFI_SEC_SHIFT (12)
++
++/* NFI_ACCCON */
++#define ACCCON_SETTING ()
++
++/* NFI_INTR_EN */
++#define INTR_RD_DONE_EN (0x0001)
++#define INTR_WR_DONE_EN (0x0002)
++#define INTR_RST_DONE_EN (0x0004)
++#define INTR_ERASE_DONE_EN (0x0008)
++#define INTR_BSY_RTN_EN (0x0010)
++#define INTR_ACC_LOCK_EN (0x0020)
++#define INTR_AHB_DONE_EN (0x0040)
++#define INTR_ALL_INTR_DE (0x0000)
++#define INTR_ALL_INTR_EN (0x007F)
++
++/* NFI_INTR */
++#define INTR_RD_DONE (0x0001)
++#define INTR_WR_DONE (0x0002)
++#define INTR_RST_DONE (0x0004)
++#define INTR_ERASE_DONE (0x0008)
++#define INTR_BSY_RTN (0x0010)
++#define INTR_ACC_LOCK (0x0020)
++#define INTR_AHB_DONE (0x0040)
++
++/* NFI_ADDRNOB */
++#define ADDR_COL_NOB_MASK (0x0003)
++#define ADDR_COL_NOB_SHIFT (0)
++#define ADDR_ROW_NOB_MASK (0x0030)
++#define ADDR_ROW_NOB_SHIFT (4)
++
++/* NFI_STA */
++#define STA_READ_EMPTY (0x00001000)
++#define STA_ACC_LOCK (0x00000010)
++#define STA_CMD_STATE (0x00000001)
++#define STA_ADDR_STATE (0x00000002)
++#define STA_DATAR_STATE (0x00000004)
++#define STA_DATAW_STATE (0x00000008)
++
++#define STA_NAND_FSM_MASK (0x1F000000)
++#define STA_NAND_BUSY (0x00000100)
++#define STA_NAND_BUSY_RETURN (0x00000200)
++#define STA_NFI_FSM_MASK (0x000F0000)
++#define STA_NFI_OP_MASK (0x0000000F)
++
++/* NFI_FIFOSTA */
++#define FIFO_RD_EMPTY (0x0040)
++#define FIFO_RD_FULL (0x0080)
++#define FIFO_WR_FULL (0x8000)
++#define FIFO_WR_EMPTY (0x4000)
++#define FIFO_RD_REMAIN(x) (0x1F&(x))
++#define FIFO_WR_REMAIN(x) ((0x1F00&(x))>>8)
++
++/* NFI_ADDRCNTR */
++#define ADDRCNTR_CNTR(x) ((0xF000&(x))>>12)
++#define ADDRCNTR_OFFSET(x) (0x03FF&(x))
++
++/* NFI_LOCK */
++#define NFI_LOCK_ON (0x0001)
++
++/* NFI_LOCKANOB */
++#define PROG_RADD_NOB_MASK (0x7000)
++#define PROG_RADD_NOB_SHIFT (12)
++#define PROG_CADD_NOB_MASK (0x0300)
++#define PROG_CADD_NOB_SHIFT (8)
++#define ERASE_RADD_NOB_MASK (0x0070)
++#define ERASE_RADD_NOB_SHIFT (4)
++#define ERASE_CADD_NOB_MASK (0x0007)
++#define ERASE_CADD_NOB_SHIFT (0)
++
++/*******************************************************************************
++ * ECC Register Definition
++ *******************************************************************************/
++
++#define ECC_ENCCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0000))
++#define ECC_ENCCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0004))
++#define ECC_ENCDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0008))
++#define ECC_ENCIDLE_REG32 ((volatile P_U32)(NFIECC_BASE+0x000C))
++#define ECC_ENCPAR0_REG32 ((volatile P_U32)(NFIECC_BASE+0x0010))
++#define ECC_ENCPAR1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0014))
++#define ECC_ENCPAR2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0018))
++#define ECC_ENCPAR3_REG32 ((volatile P_U32)(NFIECC_BASE+0x001C))
++#define ECC_ENCPAR4_REG32 ((volatile P_U32)(NFIECC_BASE+0x0020))
++#define ECC_ENCSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0024))
++#define ECC_ENCIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0028))
++#define ECC_ENCIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x002C))
++
++#define ECC_DECCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0100))
++#define ECC_DECCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0104))
++#define ECC_DECDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0108))
++#define ECC_DECIDLE_REG16 ((volatile P_U16)(NFIECC_BASE+0x010C))
++#define ECC_DECFER_REG16 ((volatile P_U16)(NFIECC_BASE+0x0110))
++#define ECC_DECENUM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0114))
++#define ECC_DECDONE_REG16 ((volatile P_U16)(NFIECC_BASE+0x0118))
++#define ECC_DECEL0_REG32 ((volatile P_U32)(NFIECC_BASE+0x011C))
++#define ECC_DECEL1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0120))
++#define ECC_DECEL2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0124))
++#define ECC_DECEL3_REG32 ((volatile P_U32)(NFIECC_BASE+0x0128))
++#define ECC_DECEL4_REG32 ((volatile P_U32)(NFIECC_BASE+0x012C))
++#define ECC_DECEL5_REG32 ((volatile P_U32)(NFIECC_BASE+0x0130))
++#define ECC_DECIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0134))
++#define ECC_DECIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x0138))
++#define ECC_FDMADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x013C))
++#define ECC_DECFSM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0140))
++#define ECC_SYNSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0144))
++#define ECC_DECNFIDI_REG32 ((volatile P_U32)(NFIECC_BASE+0x0148))
++#define ECC_SYN0_REG32 ((volatile P_U32)(NFIECC_BASE+0x014C))
++
++/*******************************************************************************
++ * ECC register definition
++ *******************************************************************************/
++/* ECC_ENCON */
++#define ENC_EN (0x0001)
++#define ENC_DE (0x0000)
++
++/* ECC_ENCCNFG */
++#define ECC_CNFG_ECC4 (0x0000)
++#define ECC_CNFG_ECC6 (0x0001)
++#define ECC_CNFG_ECC8 (0x0002)
++#define ECC_CNFG_ECC10 (0x0003)
++#define ECC_CNFG_ECC12 (0x0004)
++#define ECC_CNFG_ECC_MASK (0x00000007)
++
++#define ENC_CNFG_NFI (0x0010)
++#define ENC_CNFG_MODE_MASK (0x0010)
++
++#define ENC_CNFG_META6 (0x10300000)
++#define ENC_CNFG_META8 (0x10400000)
++
++#define ENC_CNFG_MSG_MASK (0x1FFF0000)
++#define ENC_CNFG_MSG_SHIFT (0x10)
++
++/* ECC_ENCIDLE */
++#define ENC_IDLE (0x0001)
++
++/* ECC_ENCSTA */
++#define STA_FSM (0x001F)
++#define STA_COUNT_PS (0xFF10)
++#define STA_COUNT_MS (0x3FFF0000)
++
++/* ECC_ENCIRQEN */
++#define ENC_IRQEN (0x0001)
++
++/* ECC_ENCIRQSTA */
++#define ENC_IRQSTA (0x0001)
++
++/* ECC_DECCON */
++#define DEC_EN (0x0001)
++#define DEC_DE (0x0000)
++
++/* ECC_ENCCNFG */
++#define DEC_CNFG_ECC4 (0x0000)
++//#define DEC_CNFG_ECC6 (0x0001)
++//#define DEC_CNFG_ECC12 (0x0002)
++#define DEC_CNFG_NFI (0x0010)
++//#define DEC_CNFG_META6 (0x10300000)
++//#define DEC_CNFG_META8 (0x10400000)
++
++#define DEC_CNFG_FER (0x01000)
++#define DEC_CNFG_EL (0x02000)
++#define DEC_CNFG_CORRECT (0x03000)
++#define DEC_CNFG_TYPE_MASK (0x03000)
++
++#define DEC_CNFG_EMPTY_EN (0x80000000)
++
++#define DEC_CNFG_CODE_MASK (0x1FFF0000)
++#define DEC_CNFG_CODE_SHIFT (0x10)
++
++/* ECC_DECIDLE */
++#define DEC_IDLE (0x0001)
++
++/* ECC_DECFER */
++#define DEC_FER0 (0x0001)
++#define DEC_FER1 (0x0002)
++#define DEC_FER2 (0x0004)
++#define DEC_FER3 (0x0008)
++#define DEC_FER4 (0x0010)
++#define DEC_FER5 (0x0020)
++#define DEC_FER6 (0x0040)
++#define DEC_FER7 (0x0080)
++
++/* ECC_DECENUM */
++#define ERR_NUM0 (0x0000000F)
++#define ERR_NUM1 (0x000000F0)
++#define ERR_NUM2 (0x00000F00)
++#define ERR_NUM3 (0x0000F000)
++#define ERR_NUM4 (0x000F0000)
++#define ERR_NUM5 (0x00F00000)
++#define ERR_NUM6 (0x0F000000)
++#define ERR_NUM7 (0xF0000000)
++
++/* ECC_DECDONE */
++#define DEC_DONE0 (0x0001)
++#define DEC_DONE1 (0x0002)
++#define DEC_DONE2 (0x0004)
++#define DEC_DONE3 (0x0008)
++#define DEC_DONE4 (0x0010)
++#define DEC_DONE5 (0x0020)
++#define DEC_DONE6 (0x0040)
++#define DEC_DONE7 (0x0080)
++
++/* ECC_DECIRQEN */
++#define DEC_IRQEN (0x0001)
++
++/* ECC_DECIRQSTA */
++#define DEC_IRQSTA (0x0001)
++
++#define CHIPVER_ECO_1 (0x8a00)
++#define CHIPVER_ECO_2 (0x8a01)
++
++//#define NAND_PFM
++
++/*******************************************************************************
++ * Data Structure Definition
++ *******************************************************************************/
++struct mtk_nand_host
++{
++ struct nand_chip nand_chip;
++ struct mtd_info *mtd;
++ struct mtk_nand_host_hw *hw;
++};
++
++struct NAND_CMD
++{
++ u32 u4ColAddr;
++ u32 u4RowAddr;
++ u32 u4OOBRowAddr;
++ u8 au1OOB[288];
++ u8* pDataBuf;
++#ifdef NAND_PFM
++ u32 pureReadOOB;
++ u32 pureReadOOBNum;
++#endif
++};
++
++/*
++ * ECC layout control structure. Exported to userspace for
++ * diagnosis and to allow creation of raw images
++struct nand_ecclayout {
++ uint32_t eccbytes;
++ uint32_t eccpos[64];
++ uint32_t oobavail;
++ struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES];
++};
++*/
++#define __DEBUG_NAND 1 /* Debug information on/off */
++
++/* Debug message event */
++#define DBG_EVT_NONE 0x00000000 /* No event */
++#define DBG_EVT_INIT 0x00000001 /* Initial related event */
++#define DBG_EVT_VERIFY 0x00000002 /* Verify buffer related event */
++#define DBG_EVT_PERFORMANCE 0x00000004 /* Performance related event */
++#define DBG_EVT_READ 0x00000008 /* Read related event */
++#define DBG_EVT_WRITE 0x00000010 /* Write related event */
++#define DBG_EVT_ERASE 0x00000020 /* Erase related event */
++#define DBG_EVT_BADBLOCK 0x00000040 /* Badblock related event */
++#define DBG_EVT_POWERCTL 0x00000080 /* Suspend/Resume related event */
++
++#define DBG_EVT_ALL 0xffffffff
++
++#define DBG_EVT_MASK (DBG_EVT_INIT)
++
++#if __DEBUG_NAND
++#define MSG(evt, fmt, args...) \
++do { \
++ if ((DBG_EVT_##evt) & DBG_EVT_MASK) { \
++ printk(fmt, ##args); \
++ } \
++} while(0)
++
++#define MSG_FUNC_ENTRY(f) MSG(FUC, "<FUN_ENT>: %s\n", __FUNCTION__)
++#else
++#define MSG(evt, fmt, args...) do{}while(0)
++#define MSG_FUNC_ENTRY(f) do{}while(0)
++#endif
++
++#define RAMDOM_READ 1<<0
++#define CACHE_READ 1<<1
++
++typedef struct
++{
++ u16 id; //deviceid+menuid
++ u32 ext_id;
++ u8 addr_cycle;
++ u8 iowidth;
++ u16 totalsize;
++ u16 blocksize;
++ u16 pagesize;
++ u16 sparesize;
++ u32 timmingsetting;
++ char devciename[14];
++ u32 advancedmode; //
++}flashdev_info,*pflashdev_info;
++
++/* NAND driver */
++#if 0
++struct mtk_nand_host_hw {
++ unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */
++ unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */
++ unsigned int nfi_cs_num; /* NFI_CS_NUM */
++ unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */
++ unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */
++ unsigned int nand_ecc_size;
++ unsigned int nand_ecc_bytes;
++ unsigned int nand_ecc_mode;
++};
++extern struct mtk_nand_host_hw mt7621_nand_hw;
++extern u32 CFG_BLOCKSIZE;
++#endif
++#endif
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -48,7 +48,7 @@
+ #include <linux/mtd/partitions.h>
+ #include <linux/of.h>
+
+-static int nand_get_device(struct mtd_info *mtd, int new_state);
++int nand_get_device(struct mtd_info *mtd, int new_state);
+
+ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops);
+@@ -240,7 +240,7 @@ static int check_offs_len(struct mtd_inf
+ *
+ * Release chip lock and wake up anyone waiting on the device.
+ */
+-static void nand_release_device(struct mtd_info *mtd)
++void nand_release_device(struct mtd_info *mtd)
+ {
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+@@ -963,7 +963,7 @@ static void panic_nand_get_device(struct
+ *
+ * Get the device and lock it for exclusive access
+ */
+-static int
++int
+ nand_get_device(struct mtd_info *mtd, int new_state)
+ {
+ struct nand_chip *chip = mtd_to_nand(mtd);
+--- a/drivers/mtd/nand/nand_bbt.c
++++ b/drivers/mtd/nand/nand_bbt.c
+@@ -1215,6 +1215,25 @@ err:
+ return res;
+ }
+
++void nand_bbt_set(struct mtd_info *mtd, int page, int flag)
++{
++ struct nand_chip *this = mtd->priv;
++ int block;
++
++ block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1));
++ this->bbt[block >> 3] &= ~(0x03 << (block & 0x6));
++ this->bbt[block >> 3] |= (flag & 0x3) << (block & 0x6);
++}
++
++int nand_bbt_get(struct mtd_info *mtd, int page)
++{
++ struct nand_chip *this = mtd->priv;
++ int block;
++
++ block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1));
++ return (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
++}
++
+ /**
+ * nand_update_bbt - update bad block table(s)
+ * @mtd: MTD device structure
+--- /dev/null
++++ b/drivers/mtd/nand/nand_def.h
+@@ -0,0 +1,123 @@
++#ifndef __NAND_DEF_H__
++#define __NAND_DEF_H__
++
++#define VERSION "v2.1 Fix AHB virt2phys error"
++#define MODULE_NAME "# MTK NAND #"
++#define PROCNAME "driver/nand"
++
++#undef TESTTIME
++//#define __UBOOT_NAND__ 1
++#define __KERNEL_NAND__ 1
++//#define __PRELOADER_NAND__ 1
++//#define PMT 1
++//#define _MTK_NAND_DUMMY_DRIVER
++//#define CONFIG_BADBLOCK_CHECK 1
++//#ifdef CONFIG_BADBLOCK_CHECK
++//#define MTK_NAND_BMT 1
++//#endif
++#define ECC_ENABLE 1
++#define MANUAL_CORRECT 1
++//#define __INTERNAL_USE_AHB_MODE__ (0)
++#define SKIP_BAD_BLOCK
++#define FACT_BBT
++
++#ifndef NAND_OTP_SUPPORT
++#define NAND_OTP_SUPPORT 0
++#endif
++
++/*******************************************************************************
++ * Macro definition
++ *******************************************************************************/
++//#define NFI_SET_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) | (value)))
++//#define NFI_SET_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) | (value)))
++//#define NFI_CLN_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) & (~(value))))
++//#define NFI_CLN_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) & (~(value))))
++
++#if defined (__KERNEL_NAND__)
++#define NFI_SET_REG32(reg, value) \
++do { \
++ g_value = (DRV_Reg32(reg) | (value));\
++ DRV_WriteReg32(reg, g_value); \
++} while(0)
++
++#define NFI_SET_REG16(reg, value) \
++do { \
++ g_value = (DRV_Reg16(reg) | (value));\
++ DRV_WriteReg16(reg, g_value); \
++} while(0)
++
++#define NFI_CLN_REG32(reg, value) \
++do { \
++ g_value = (DRV_Reg32(reg) & (~(value)));\
++ DRV_WriteReg32(reg, g_value); \
++} while(0)
++
++#define NFI_CLN_REG16(reg, value) \
++do { \
++ g_value = (DRV_Reg16(reg) & (~(value)));\
++ DRV_WriteReg16(reg, g_value); \
++} while(0)
++#endif
++
++#define NFI_WAIT_STATE_DONE(state) do{;}while (__raw_readl(NFI_STA_REG32) & state)
++#define NFI_WAIT_TO_READY() do{;}while (!(__raw_readl(NFI_STA_REG32) & STA_BUSY2READY))
++
++
++#define NAND_SECTOR_SIZE (512)
++#define OOB_PER_SECTOR (16)
++#define OOB_AVAI_PER_SECTOR (8)
++
++#ifndef PART_SIZE_BMTPOOL
++#define BMT_POOL_SIZE (80)
++#else
++#define BMT_POOL_SIZE (PART_SIZE_BMTPOOL)
++#endif
++
++#define PMT_POOL_SIZE (2)
++
++#define TIMEOUT_1 0x1fff
++#define TIMEOUT_2 0x8ff
++#define TIMEOUT_3 0xffff
++#define TIMEOUT_4 0xffff//5000 //PIO
++
++
++/* temporarity definiation */
++#if !defined (__KERNEL_NAND__)
++#define KERN_INFO
++#define KERN_WARNING
++#define KERN_ERR
++#define PAGE_SIZE (4096)
++#endif
++#define AddStorageTrace //AddStorageTrace
++#define STORAGE_LOGGER_MSG_NAND 0
++#define NFI_BASE RALINK_NAND_CTRL_BASE
++#define NFIECC_BASE RALINK_NANDECC_CTRL_BASE
++
++#ifdef __INTERNAL_USE_AHB_MODE__
++#define MT65xx_POLARITY_LOW 0
++#define MT65XX_PDN_PERI_NFI 0
++#define MT65xx_EDGE_SENSITIVE 0
++#define MT6575_NFI_IRQ_ID (58)
++#endif
++
++#if defined (__KERNEL_NAND__)
++#define RALINK_REG(x) (*((volatile u32 *)(x)))
++#define __virt_to_phys(x) virt_to_phys((volatile void*)x)
++#else
++#define CONFIG_MTD_NAND_VERIFY_WRITE (1)
++#define printk printf
++#define ra_dbg printf
++#define BUG() //BUG()
++#define BUG_ON(x) //BUG_ON()
++#define NUM_PARTITIONS 1
++#endif
++
++#define NFI_DEFAULT_ACCESS_TIMING (0x30C77fff) //(0x44333)
++
++//uboot only support 1 cs
++#define NFI_CS_NUM (1)
++#define NFI_DEFAULT_CS (0)
++
++#include "mt6575_typedefs.h"
++
++#endif /* __NAND_DEF_H__ */
+--- /dev/null
++++ b/drivers/mtd/nand/nand_device_list.h
+@@ -0,0 +1,56 @@
++/* Copyright Statement:
++ *
++ * This software/firmware and related documentation ("MediaTek Software") are
++ * protected under relevant copyright laws. The information contained herein
++ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
++ * Without the prior written permission of MediaTek inc. and/or its licensors,
++ * any reproduction, modification, use or disclosure of MediaTek Software,
++ * and information contained herein, in whole or in part, shall be strictly prohibited.
++ */
++/* MediaTek Inc. (C) 2010. All rights reserved.
++ *
++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
++ *
++ * The following software/firmware and/or related documentation ("MediaTek Software")
++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's
++ * applicable license agreements with MediaTek Inc.
++ */
++
++#ifndef __NAND_DEVICE_LIST_H__
++#define __NAND_DEVICE_LIST_H__
++
++static const flashdev_info gen_FlashTable[]={
++ {0x20BC, 0x105554, 5, 16, 512, 128, 2048, 64, 0x1123, "EHD013151MA_5", 0},
++ {0xECBC, 0x005554, 5, 16, 512, 128, 2048, 64, 0x1123, "K524G2GACB_A0", 0},
++ {0x2CBC, 0x905556, 5, 16, 512, 128, 2048, 64, 0x21044333, "MT29C4G96MAZA", 0},
++ {0x2CDA, 0x909506, 5, 8, 256, 128, 2048, 64, 0x30C77fff, "MT29F2G08ABAE", 0},
++ {0xADBC, 0x905554, 5, 16, 512, 128, 2048, 64, 0x10801011, "H9DA4GH4JJAMC", 0},
++ {0x01F1, 0x801D01, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "S34ML01G100TF", 0},
++ {0x92F1, 0x8095FF, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "F59L1G81A", 0},
++ {0xECD3, 0x519558, 5, 8, 1024, 128, 2048, 64, 0x44333, "K9K8G8000", 0},
++ {0xC2F1, 0x801DC2, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "MX30LF1G08AA", 0},
++ {0x98D3, 0x902676, 5, 8, 1024, 256, 4096, 224, 0x00C25332, "TC58NVG3S0F", 0},
++ {0x01DA, 0x909546, 5, 8, 256, 128, 2048, 128, 0x30C77fff, "S34ML02G200TF", 0},
++ {0x01DC, 0x909556, 5, 8, 512, 128, 2048, 128, 0x30C77fff, "S34ML04G200TF", 0},
++ {0x0000, 0x000000, 0, 0, 0, 0, 0, 0, 0, "xxxxxxxxxx", 0},
++};
++
++
++#endif
+--- /dev/null
++++ b/drivers/mtd/nand/partition.h
+@@ -0,0 +1,115 @@
++/* Copyright Statement:
++ *
++ * This software/firmware and related documentation ("MediaTek Software") are
++ * protected under relevant copyright laws. The information contained herein
++ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
++ * Without the prior written permission of MediaTek inc. and/or its licensors,
++ * any reproduction, modification, use or disclosure of MediaTek Software,
++ * and information contained herein, in whole or in part, shall be strictly prohibited.
++ */
++/* MediaTek Inc. (C) 2010. All rights reserved.
++ *
++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
++ *
++ * The following software/firmware and/or related documentation ("MediaTek Software")
++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's
++ * applicable license agreements with MediaTek Inc.
++ */
++
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/rawnand.h>
++#include <linux/mtd/partitions.h>
++
++#define RECONFIG_PARTITION_SIZE 1
++
++#define MTD_BOOT_PART_SIZE 0x80000
++#define MTD_CONFIG_PART_SIZE 0x20000
++#define MTD_FACTORY_PART_SIZE 0x20000
++
++extern unsigned int CFG_BLOCKSIZE;
++#define LARGE_MTD_BOOT_PART_SIZE (CFG_BLOCKSIZE<<2)
++#define LARGE_MTD_CONFIG_PART_SIZE (CFG_BLOCKSIZE<<2)
++#define LARGE_MTD_FACTORY_PART_SIZE (CFG_BLOCKSIZE<<1)
++
++/*=======================================================================*/
++/* NAND PARTITION Mapping */
++/*=======================================================================*/
++//#ifdef CONFIG_MTD_PARTITIONS
++static struct mtd_partition g_pasStatic_Partition[] = {
++ {
++ name: "ALL",
++ size: MTDPART_SIZ_FULL,
++ offset: 0,
++ },
++ /* Put your own partition definitions here */
++ {
++ name: "Bootloader",
++ size: MTD_BOOT_PART_SIZE,
++ offset: 0,
++ }, {
++ name: "Config",
++ size: MTD_CONFIG_PART_SIZE,
++ offset: MTDPART_OFS_APPEND
++ }, {
++ name: "Factory",
++ size: MTD_FACTORY_PART_SIZE,
++ offset: MTDPART_OFS_APPEND
++#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH
++ }, {
++ name: "Kernel",
++ size: MTD_KERN_PART_SIZE,
++ offset: MTDPART_OFS_APPEND,
++ }, {
++ name: "RootFS",
++ size: MTD_ROOTFS_PART_SIZE,
++ offset: MTDPART_OFS_APPEND,
++#ifdef CONFIG_ROOTFS_IN_FLASH_NO_PADDING
++ }, {
++ name: "Kernel_RootFS",
++ size: MTD_KERN_PART_SIZE + MTD_ROOTFS_PART_SIZE,
++ offset: MTD_BOOT_PART_SIZE + MTD_CONFIG_PART_SIZE + MTD_FACTORY_PART_SIZE,
++#endif
++#else //CONFIG_RT2880_ROOTFS_IN_RAM
++ }, {
++ name: "Kernel",
++ size: 0x10000,
++ offset: MTDPART_OFS_APPEND,
++#endif
++#ifdef CONFIG_DUAL_IMAGE
++ }, {
++ name: "Kernel2",
++ size: MTD_KERN2_PART_SIZE,
++ offset: MTD_KERN2_PART_OFFSET,
++#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH
++ }, {
++ name: "RootFS2",
++ size: MTD_ROOTFS2_PART_SIZE,
++ offset: MTD_ROOTFS2_PART_OFFSET,
++#endif
++#endif
++ }
++
++};
++
++#define NUM_PARTITIONS ARRAY_SIZE(g_pasStatic_Partition)
++extern int part_num; // = NUM_PARTITIONS;
++//#endif
++#undef RECONFIG_PARTITION_SIZE
++
diff --git a/target/linux/ramips/patches-4.14/0040-nand-hack-restore-write_page.patch b/target/linux/ramips/patches-4.14/0040-nand-hack-restore-write_page.patch
new file mode 100644
index 0000000000..80136b89a6
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0040-nand-hack-restore-write_page.patch
@@ -0,0 +1,42 @@
+--- a/include/linux/mtd/rawnand.h
++++ b/include/linux/mtd/rawnand.h
+@@ -885,6 +885,9 @@ struct nand_chip {
+ int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
+ int (*erase)(struct mtd_info *mtd, int page);
+ int (*scan_bbt)(struct mtd_info *mtd);
++ int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
++ uint32_t offset, int data_len, const uint8_t *buf,
++ int oob_required, int page, int raw);
+ int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip,
+ int feature_addr, uint8_t *subfeature_para);
+ int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip,
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2753,9 +2753,14 @@ static int nand_do_write_ops(struct mtd_
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+ }
+
+- ret = nand_write_page(mtd, chip, column, bytes, wbuf,
+- oob_required, page,
+- (ops->mode == MTD_OPS_RAW));
++// if (chip->write_page)
++ ret = chip->write_page(mtd, chip, column, bytes, wbuf,
++ oob_required, page,
++ (ops->mode == MTD_OPS_RAW));
++// else
++// ret = nand_write_page(mtd, chip, column, bytes, wbuf,
++// oob_required, page,
++// (ops->mode == MTD_OPS_RAW));
+ if (ret)
+ break;
+
+@@ -4711,6 +4716,9 @@ int nand_scan_tail(struct mtd_info *mtd)
+ }
+ }
+
++// if (!chip->write_page)
++// chip->write_page = nand_write_page;
++
+ /*
+ * Check ECC mode, default to software if 3byte/512byte hardware ECC is
+ * selected and we have 256 byte pagesize fallback to software ECC
diff --git a/target/linux/ramips/patches-4.14/0040-nand-hack.patch b/target/linux/ramips/patches-4.14/0040-nand-hack.patch
new file mode 100644
index 0000000000..a9d16840f0
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0040-nand-hack.patch
@@ -0,0 +1,65 @@
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -1903,6 +1903,9 @@ static int nand_do_read_ops(struct mtd_i
+ __func__, buf);
+
+ read_retry:
++#ifdef CONFIG_MTK_MTD_NAND
++ ret = chip->read_page(mtd, chip, bufpoi, page);
++#else
+ if (nand_standard_page_accessors(&chip->ecc))
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+
+@@ -1922,6 +1925,7 @@ read_retry:
+ else
+ ret = chip->ecc.read_page(mtd, chip, bufpoi,
+ oob_required, page);
++#endif
+ if (ret < 0) {
+ if (use_bufpoi)
+ /* Invalidate page cache */
+@@ -3076,8 +3080,11 @@ int nand_erase_nand(struct mtd_info *mtd
+ (page + pages_per_block))
+ chip->pagebuf = -1;
+
++#ifdef CONFIG_MTK_MTD_NAND
++ status = chip->erase_mtk(mtd, page & chip->pagemask);
++#else
+ status = chip->erase(mtd, page & chip->pagemask);
+-
++#endif
+ /* See if block erase succeeded */
+ if (status & NAND_STATUS_FAIL) {
+ pr_debug("%s: failed erase, page 0x%08x\n",
+@@ -4207,6 +4214,7 @@ int nand_scan_ident(struct mtd_info *mtd
+ * cmdfunc() both expect cmd_ctrl() to be populated,
+ * so we need to check that that's the case
+ */
++ printk("%s:%s[%d]%p %p %p\n", __FILE__, __func__, __LINE__, chip->cmdfunc, chip->select_chip, chip->cmd_ctrl);
+ pr_err("chip.cmd_ctrl() callback is not provided");
+ return -EINVAL;
+ }
+--- a/drivers/mtd/nand/nand_device_list.h
++++ b/drivers/mtd/nand/nand_device_list.h
+@@ -44,6 +44,8 @@ static const flashdev_info gen_FlashTabl
+ {0xADBC, 0x905554, 5, 16, 512, 128, 2048, 64, 0x10801011, "H9DA4GH4JJAMC", 0},
+ {0x01F1, 0x801D01, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "S34ML01G100TF", 0},
+ {0x92F1, 0x8095FF, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "F59L1G81A", 0},
++ {0xC8DA, 0x909544, 5, 8, 256, 128, 2048, 64, 0x30C77fff, "F59L2G81A", 0},
++ {0xC8DC, 0x909554, 5, 8, 512, 128, 2048, 64, 0x30C77fff, "F59L4G81A", 0},
+ {0xECD3, 0x519558, 5, 8, 1024, 128, 2048, 64, 0x44333, "K9K8G8000", 0},
+ {0xC2F1, 0x801DC2, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "MX30LF1G08AA", 0},
+ {0x98D3, 0x902676, 5, 8, 1024, 256, 4096, 224, 0x00C25332, "TC58NVG3S0F", 0},
+--- a/include/linux/mtd/rawnand.h
++++ b/include/linux/mtd/rawnand.h
+@@ -896,6 +896,10 @@ struct nand_chip {
+ int (*setup_data_interface)(struct mtd_info *mtd, int chipnr,
+ const struct nand_data_interface *conf);
+
++#ifdef CONFIG_MTK_MTD_NAND
++ int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, u8 *buf, int page);
++ int (*erase_mtk)(struct mtd_info *mtd, int page);
++#endif /* CONFIG_MTK_MTD_NAND */
+
+ int chip_delay;
+ unsigned int options;
diff --git a/target/linux/ramips/patches-4.14/0041-DT-Add-documentation-for-spi-rt2880.patch b/target/linux/ramips/patches-4.14/0041-DT-Add-documentation-for-spi-rt2880.patch
new file mode 100644
index 0000000000..e2643e3f25
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0041-DT-Add-documentation-for-spi-rt2880.patch
@@ -0,0 +1,44 @@
+From da6015e7f19d749f135f7ac55c4ec47b06faa868 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Fri, 9 Aug 2013 20:12:59 +0200
+Subject: [PATCH 41/53] DT: Add documentation for spi-rt2880
+
+Describe the SPI master found on the MIPS based Ralink RT2880 SoC.
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ .../devicetree/bindings/spi/spi-rt2880.txt | 28 ++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/spi/spi-rt2880.txt
+
+--- /dev/null
++++ b/Documentation/devicetree/bindings/spi/spi-rt2880.txt
+@@ -0,0 +1,28 @@
++Ralink SoC RT2880 SPI master controller.
++
++This SPI controller is found on most wireless SoCs made by ralink.
++
++Required properties:
++- compatible : "ralink,rt2880-spi"
++- reg : The register base for the controller.
++- #address-cells : <1>, as required by generic SPI binding.
++- #size-cells : <0>, also as required by generic SPI binding.
++
++Child nodes as per the generic SPI binding.
++
++Example:
++
++ spi@b00 {
++ compatible = "ralink,rt2880-spi";
++ reg = <0xb00 0x100>;
++
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ m25p80@0 {
++ compatible = "m25p80";
++ reg = <0>;
++ spi-max-frequency = <10000000>;
++ };
++ };
++
diff --git a/target/linux/ramips/patches-4.14/0042-SPI-ralink-add-Ralink-SoC-spi-driver.patch b/target/linux/ramips/patches-4.14/0042-SPI-ralink-add-Ralink-SoC-spi-driver.patch
new file mode 100644
index 0000000000..1dd9f40c39
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0042-SPI-ralink-add-Ralink-SoC-spi-driver.patch
@@ -0,0 +1,574 @@
+From 683af4ebb91a1600df1946ac4769d916b8a1be65 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 27 Jul 2014 11:15:12 +0100
+Subject: [PATCH 42/53] SPI: ralink: add Ralink SoC spi driver
+
+Add the driver needed to make SPI work on Ralink SoC.
+
+Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
+Acked-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/spi/Kconfig | 6 +
+ drivers/spi/Makefile | 1 +
+ drivers/spi/spi-rt2880.c | 530 ++++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 537 insertions(+)
+ create mode 100644 drivers/spi/spi-rt2880.c
+
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -563,6 +563,12 @@ config SPI_QUP
+ This driver can also be built as a module. If so, the module
+ will be called spi_qup.
+
++config SPI_RT2880
++ tristate "Ralink RT288x SPI Controller"
++ depends on RALINK
++ help
++ This selects a driver for the Ralink RT288x/RT305x SPI Controller.
++
+ config SPI_S3C24XX
+ tristate "Samsung S3C24XX series SPI"
+ depends on ARCH_S3C24XX
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -81,6 +81,7 @@ obj-$(CONFIG_SPI_QUP) += spi-qup.o
+ obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
+ obj-$(CONFIG_SPI_RB4XX) += spi-rb4xx.o
+ obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
++obj-$(CONFIG_SPI_RT2880) += spi-rt2880.o
+ obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
+ spi-s3c24xx-hw-y := spi-s3c24xx.o
+ spi-s3c24xx-hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi-s3c24xx-fiq.o
+--- /dev/null
++++ b/drivers/spi/spi-rt2880.c
+@@ -0,0 +1,530 @@
++/*
++ * spi-rt2880.c -- Ralink RT288x/RT305x SPI controller driver
++ *
++ * Copyright (C) 2011 Sergiy <piratfm@gmail.com>
++ * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
++ *
++ * Some parts are based on spi-orion.c:
++ * Author: Shadi Ammouri <shadi@marvell.com>
++ * Copyright (C) 2007-2008 Marvell Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/delay.h>
++#include <linux/io.h>
++#include <linux/reset.h>
++#include <linux/spi/spi.h>
++#include <linux/platform_device.h>
++#include <linux/gpio.h>
++
++#define DRIVER_NAME "spi-rt2880"
++
++#define RAMIPS_SPI_STAT 0x00
++#define RAMIPS_SPI_CFG 0x10
++#define RAMIPS_SPI_CTL 0x14
++#define RAMIPS_SPI_DATA 0x20
++#define RAMIPS_SPI_ADDR 0x24
++#define RAMIPS_SPI_BS 0x28
++#define RAMIPS_SPI_USER 0x2C
++#define RAMIPS_SPI_TXFIFO 0x30
++#define RAMIPS_SPI_RXFIFO 0x34
++#define RAMIPS_SPI_FIFO_STAT 0x38
++#define RAMIPS_SPI_MODE 0x3C
++#define RAMIPS_SPI_DEV_OFFSET 0x40
++#define RAMIPS_SPI_DMA 0x80
++#define RAMIPS_SPI_DMASTAT 0x84
++#define RAMIPS_SPI_ARBITER 0xF0
++
++/* SPISTAT register bit field */
++#define SPISTAT_BUSY BIT(0)
++
++/* SPICFG register bit field */
++#define SPICFG_ADDRMODE BIT(12)
++#define SPICFG_RXENVDIS BIT(11)
++#define SPICFG_RXCAP BIT(10)
++#define SPICFG_SPIENMODE BIT(9)
++#define SPICFG_MSBFIRST BIT(8)
++#define SPICFG_SPICLKPOL BIT(6)
++#define SPICFG_RXCLKEDGE_FALLING BIT(5)
++#define SPICFG_TXCLKEDGE_FALLING BIT(4)
++#define SPICFG_HIZSPI BIT(3)
++#define SPICFG_SPICLK_PRESCALE_MASK 0x7
++#define SPICFG_SPICLK_DIV2 0
++#define SPICFG_SPICLK_DIV4 1
++#define SPICFG_SPICLK_DIV8 2
++#define SPICFG_SPICLK_DIV16 3
++#define SPICFG_SPICLK_DIV32 4
++#define SPICFG_SPICLK_DIV64 5
++#define SPICFG_SPICLK_DIV128 6
++#define SPICFG_SPICLK_DISABLE 7
++
++/* SPICTL register bit field */
++#define SPICTL_START BIT(4)
++#define SPICTL_HIZSDO BIT(3)
++#define SPICTL_STARTWR BIT(2)
++#define SPICTL_STARTRD BIT(1)
++#define SPICTL_SPIENA BIT(0)
++
++/* SPIUSER register bit field */
++#define SPIUSER_USERMODE BIT(21)
++#define SPIUSER_INSTR_PHASE BIT(20)
++#define SPIUSER_ADDR_PHASE_MASK 0x7
++#define SPIUSER_ADDR_PHASE_OFFSET 17
++#define SPIUSER_MODE_PHASE BIT(16)
++#define SPIUSER_DUMMY_PHASE_MASK 0x3
++#define SPIUSER_DUMMY_PHASE_OFFSET 14
++#define SPIUSER_DATA_PHASE_MASK 0x3
++#define SPIUSER_DATA_PHASE_OFFSET 12
++#define SPIUSER_DATA_READ (BIT(0) << SPIUSER_DATA_PHASE_OFFSET)
++#define SPIUSER_DATA_WRITE (BIT(1) << SPIUSER_DATA_PHASE_OFFSET)
++#define SPIUSER_ADDR_TYPE_OFFSET 9
++#define SPIUSER_MODE_TYPE_OFFSET 6
++#define SPIUSER_DUMMY_TYPE_OFFSET 3
++#define SPIUSER_DATA_TYPE_OFFSET 0
++#define SPIUSER_TRANSFER_MASK 0x7
++#define SPIUSER_TRANSFER_SINGLE BIT(0)
++#define SPIUSER_TRANSFER_DUAL BIT(1)
++#define SPIUSER_TRANSFER_QUAD BIT(2)
++
++#define SPIUSER_TRANSFER_TYPE(type) ( \
++ (type << SPIUSER_ADDR_TYPE_OFFSET) | \
++ (type << SPIUSER_MODE_TYPE_OFFSET) | \
++ (type << SPIUSER_DUMMY_TYPE_OFFSET) | \
++ (type << SPIUSER_DATA_TYPE_OFFSET) \
++)
++
++/* SPIFIFOSTAT register bit field */
++#define SPIFIFOSTAT_TXEMPTY BIT(19)
++#define SPIFIFOSTAT_RXEMPTY BIT(18)
++#define SPIFIFOSTAT_TXFULL BIT(17)
++#define SPIFIFOSTAT_RXFULL BIT(16)
++#define SPIFIFOSTAT_FIFO_MASK 0xff
++#define SPIFIFOSTAT_TX_OFFSET 8
++#define SPIFIFOSTAT_RX_OFFSET 0
++
++#define SPI_FIFO_DEPTH 16
++
++/* SPIMODE register bit field */
++#define SPIMODE_MODE_OFFSET 24
++#define SPIMODE_DUMMY_OFFSET 0
++
++/* SPIARB register bit field */
++#define SPICTL_ARB_EN BIT(31)
++#define SPICTL_CSCTL1 BIT(16)
++#define SPI1_POR BIT(1)
++#define SPI0_POR BIT(0)
++
++#define RT2880_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | \
++ SPI_CS_HIGH)
++
++static atomic_t hw_reset_count = ATOMIC_INIT(0);
++
++struct rt2880_spi {
++ struct spi_master *master;
++ void __iomem *base;
++ u32 speed;
++ u16 wait_loops;
++ u16 mode;
++ struct clk *clk;
++};
++
++static inline struct rt2880_spi *spidev_to_rt2880_spi(struct spi_device *spi)
++{
++ return spi_master_get_devdata(spi->master);
++}
++
++static inline u32 rt2880_spi_read(struct rt2880_spi *rs, u32 reg)
++{
++ return ioread32(rs->base + reg);
++}
++
++static inline void rt2880_spi_write(struct rt2880_spi *rs, u32 reg,
++ const u32 val)
++{
++ iowrite32(val, rs->base + reg);
++}
++
++static inline void rt2880_spi_setbits(struct rt2880_spi *rs, u32 reg, u32 mask)
++{
++ void __iomem *addr = rs->base + reg;
++
++ iowrite32((ioread32(addr) | mask), addr);
++}
++
++static inline void rt2880_spi_clrbits(struct rt2880_spi *rs, u32 reg, u32 mask)
++{
++ void __iomem *addr = rs->base + reg;
++
++ iowrite32((ioread32(addr) & ~mask), addr);
++}
++
++static u32 rt2880_spi_baudrate_get(struct spi_device *spi, unsigned int speed)
++{
++ struct rt2880_spi *rs = spidev_to_rt2880_spi(spi);
++ u32 rate;
++ u32 prescale;
++
++ /*
++ * the supported rates are: 2, 4, 8, ... 128
++ * round up as we look for equal or less speed
++ */
++ rate = DIV_ROUND_UP(clk_get_rate(rs->clk), speed);
++ rate = roundup_pow_of_two(rate);
++
++ /* Convert the rate to SPI clock divisor value. */
++ prescale = ilog2(rate / 2);
++
++ /* some tolerance. double and add 100 */
++ rs->wait_loops = (8 * HZ * loops_per_jiffy) /
++ (clk_get_rate(rs->clk) / rate);
++ rs->wait_loops = (rs->wait_loops << 1) + 100;
++ rs->speed = speed;
++
++ dev_dbg(&spi->dev, "speed: %lu/%u, rate: %u, prescal: %u, loops: %hu\n",
++ clk_get_rate(rs->clk) / rate, speed, rate, prescale,
++ rs->wait_loops);
++
++ return prescale;
++}
++
++static u32 get_arbiter_offset(struct spi_master *master)
++{
++ u32 offset;
++
++ offset = RAMIPS_SPI_ARBITER;
++ if (master->bus_num == 1)
++ offset -= RAMIPS_SPI_DEV_OFFSET;
++
++ return offset;
++}
++
++static void rt2880_spi_set_cs(struct spi_device *spi, bool enable)
++{
++ struct rt2880_spi *rs = spidev_to_rt2880_spi(spi);
++
++ if (enable)
++ rt2880_spi_setbits(rs, RAMIPS_SPI_CTL, SPICTL_SPIENA);
++ else
++ rt2880_spi_clrbits(rs, RAMIPS_SPI_CTL, SPICTL_SPIENA);
++}
++
++static int rt2880_spi_wait_ready(struct rt2880_spi *rs, int len)
++{
++ int loop = rs->wait_loops * len;
++
++ while ((rt2880_spi_read(rs, RAMIPS_SPI_STAT) & SPISTAT_BUSY) && --loop)
++ cpu_relax();
++
++ if (loop)
++ return 0;
++
++ return -ETIMEDOUT;
++}
++
++static void rt2880_dump_reg(struct spi_master *master)
++{
++ struct rt2880_spi *rs = spi_master_get_devdata(master);
++
++ dev_dbg(&master->dev, "stat: %08x, cfg: %08x, ctl: %08x, " \
++ "data: %08x, arb: %08x\n",
++ rt2880_spi_read(rs, RAMIPS_SPI_STAT),
++ rt2880_spi_read(rs, RAMIPS_SPI_CFG),
++ rt2880_spi_read(rs, RAMIPS_SPI_CTL),
++ rt2880_spi_read(rs, RAMIPS_SPI_DATA),
++ rt2880_spi_read(rs, get_arbiter_offset(master)));
++}
++
++static int rt2880_spi_transfer_one(struct spi_master *master,
++ struct spi_device *spi, struct spi_transfer *xfer)
++{
++ struct rt2880_spi *rs = spi_master_get_devdata(master);
++ unsigned len;
++ const u8 *tx = xfer->tx_buf;
++ u8 *rx = xfer->rx_buf;
++ int err = 0;
++
++ /* change clock speed */
++ if (unlikely(rs->speed != xfer->speed_hz)) {
++ u32 reg;
++ reg = rt2880_spi_read(rs, RAMIPS_SPI_CFG);
++ reg &= ~SPICFG_SPICLK_PRESCALE_MASK;
++ reg |= rt2880_spi_baudrate_get(spi, xfer->speed_hz);
++ rt2880_spi_write(rs, RAMIPS_SPI_CFG, reg);
++ }
++
++ if (tx) {
++ len = xfer->len;
++ while (len-- > 0) {
++ rt2880_spi_write(rs, RAMIPS_SPI_DATA, *tx++);
++ rt2880_spi_setbits(rs, RAMIPS_SPI_CTL, SPICTL_STARTWR);
++ err = rt2880_spi_wait_ready(rs, 1);
++ if (err) {
++ dev_err(&spi->dev, "TX failed, err=%d\n", err);
++ goto out;
++ }
++ }
++ }
++
++ if (rx) {
++ len = xfer->len;
++ while (len-- > 0) {
++ rt2880_spi_setbits(rs, RAMIPS_SPI_CTL, SPICTL_STARTRD);
++ err = rt2880_spi_wait_ready(rs, 1);
++ if (err) {
++ dev_err(&spi->dev, "RX failed, err=%d\n", err);
++ goto out;
++ }
++ *rx++ = (u8) rt2880_spi_read(rs, RAMIPS_SPI_DATA);
++ }
++ }
++
++out:
++ return err;
++}
++
++/* copy from spi.c */
++static void spi_set_cs(struct spi_device *spi, bool enable)
++{
++ if (spi->mode & SPI_CS_HIGH)
++ enable = !enable;
++
++ if (spi->cs_gpio >= 0)
++ gpio_set_value(spi->cs_gpio, !enable);
++ else if (spi->master->set_cs)
++ spi->master->set_cs(spi, !enable);
++}
++
++static int rt2880_spi_setup(struct spi_device *spi)
++{
++ struct spi_master *master = spi->master;
++ struct rt2880_spi *rs = spi_master_get_devdata(master);
++ u32 reg, old_reg, arbit_off;
++
++ if ((spi->max_speed_hz > master->max_speed_hz) ||
++ (spi->max_speed_hz < master->min_speed_hz)) {
++ dev_err(&spi->dev, "invalide requested speed %d Hz\n",
++ spi->max_speed_hz);
++ return -EINVAL;
++ }
++
++ if (!(master->bits_per_word_mask &
++ BIT(spi->bits_per_word - 1))) {
++ dev_err(&spi->dev, "invalide bits_per_word %d\n",
++ spi->bits_per_word);
++ return -EINVAL;
++ }
++
++ /* the hardware seems can't work on mode0 force it to mode3 */
++ if ((spi->mode & (SPI_CPOL | SPI_CPHA)) == SPI_MODE_0) {
++ dev_warn(&spi->dev, "force spi mode3\n");
++ spi->mode |= SPI_MODE_3;
++ }
++
++ /* chip polarity */
++ arbit_off = get_arbiter_offset(master);
++ reg = old_reg = rt2880_spi_read(rs, arbit_off);
++ if (spi->mode & SPI_CS_HIGH) {
++ switch (master->bus_num) {
++ case 1:
++ reg |= SPI1_POR;
++ break;
++ default:
++ reg |= SPI0_POR;
++ break;
++ }
++ } else {
++ switch (master->bus_num) {
++ case 1:
++ reg &= ~SPI1_POR;
++ break;
++ default:
++ reg &= ~SPI0_POR;
++ break;
++ }
++ }
++
++ /* enable spi1 */
++ if (master->bus_num == 1)
++ reg |= SPICTL_ARB_EN;
++
++ if (reg != old_reg)
++ rt2880_spi_write(rs, arbit_off, reg);
++
++ /* deselected the spi device */
++ spi_set_cs(spi, false);
++
++ rt2880_dump_reg(master);
++
++ return 0;
++}
++
++static int rt2880_spi_prepare_message(struct spi_master *master,
++ struct spi_message *msg)
++{
++ struct rt2880_spi *rs = spi_master_get_devdata(master);
++ struct spi_device *spi = msg->spi;
++ u32 reg;
++
++ if ((rs->mode == spi->mode) && (rs->speed == spi->max_speed_hz))
++ return 0;
++
++#if 0
++ /* set spido to tri-state */
++ rt2880_spi_setbits(rs, RAMIPS_SPI_CTL, SPICTL_HIZSDO);
++#endif
++
++ reg = rt2880_spi_read(rs, RAMIPS_SPI_CFG);
++
++ reg &= ~(SPICFG_MSBFIRST | SPICFG_SPICLKPOL |
++ SPICFG_RXCLKEDGE_FALLING |
++ SPICFG_TXCLKEDGE_FALLING |
++ SPICFG_SPICLK_PRESCALE_MASK);
++
++ /* MSB */
++ if (!(spi->mode & SPI_LSB_FIRST))
++ reg |= SPICFG_MSBFIRST;
++
++ /* spi mode */
++ switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
++ case SPI_MODE_0:
++ reg |= SPICFG_TXCLKEDGE_FALLING;
++ break;
++ case SPI_MODE_1:
++ reg |= SPICFG_RXCLKEDGE_FALLING;
++ break;
++ case SPI_MODE_2:
++ reg |= SPICFG_SPICLKPOL | SPICFG_RXCLKEDGE_FALLING;
++ break;
++ case SPI_MODE_3:
++ reg |= SPICFG_SPICLKPOL | SPICFG_TXCLKEDGE_FALLING;
++ break;
++ }
++ rs->mode = spi->mode;
++
++#if 0
++ /* set spiclk and spiena to tri-state */
++ reg |= SPICFG_HIZSPI;
++#endif
++
++ /* clock divide */
++ reg |= rt2880_spi_baudrate_get(spi, spi->max_speed_hz);
++
++ rt2880_spi_write(rs, RAMIPS_SPI_CFG, reg);
++
++ return 0;
++}
++
++static int rt2880_spi_probe(struct platform_device *pdev)
++{
++ struct spi_master *master;
++ struct rt2880_spi *rs;
++ void __iomem *base;
++ struct resource *r;
++ struct clk *clk;
++ int ret;
++
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ base = devm_ioremap_resource(&pdev->dev, r);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(clk)) {
++ dev_err(&pdev->dev, "unable to get SYS clock\n");
++ return PTR_ERR(clk);
++ }
++
++ ret = clk_prepare_enable(clk);
++ if (ret)
++ goto err_clk;
++
++ master = spi_alloc_master(&pdev->dev, sizeof(*rs));
++ if (master == NULL) {
++ dev_dbg(&pdev->dev, "master allocation failed\n");
++ ret = -ENOMEM;
++ goto err_clk;
++ }
++
++ master->dev.of_node = pdev->dev.of_node;
++ master->mode_bits = RT2880_SPI_MODE_BITS;
++ master->bits_per_word_mask = SPI_BPW_MASK(8);
++ master->min_speed_hz = clk_get_rate(clk) / 128;
++ master->max_speed_hz = clk_get_rate(clk) / 2;
++ master->flags = SPI_MASTER_HALF_DUPLEX;
++ master->setup = rt2880_spi_setup;
++ master->prepare_message = rt2880_spi_prepare_message;
++ master->set_cs = rt2880_spi_set_cs;
++ master->transfer_one = rt2880_spi_transfer_one,
++
++ dev_set_drvdata(&pdev->dev, master);
++
++ rs = spi_master_get_devdata(master);
++ rs->master = master;
++ rs->base = base;
++ rs->clk = clk;
++
++ if (atomic_inc_return(&hw_reset_count) == 1)
++ device_reset(&pdev->dev);
++
++ ret = devm_spi_register_master(&pdev->dev, master);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "devm_spi_register_master error.\n");
++ goto err_master;
++ }
++
++ return ret;
++
++err_master:
++ spi_master_put(master);
++ kfree(master);
++err_clk:
++ clk_disable_unprepare(clk);
++
++ return ret;
++}
++
++static int rt2880_spi_remove(struct platform_device *pdev)
++{
++ struct spi_master *master;
++ struct rt2880_spi *rs;
++
++ master = dev_get_drvdata(&pdev->dev);
++ rs = spi_master_get_devdata(master);
++
++ clk_disable_unprepare(rs->clk);
++ atomic_dec(&hw_reset_count);
++
++ return 0;
++}
++
++MODULE_ALIAS("platform:" DRIVER_NAME);
++
++static const struct of_device_id rt2880_spi_match[] = {
++ { .compatible = "ralink,rt2880-spi" },
++ {},
++};
++MODULE_DEVICE_TABLE(of, rt2880_spi_match);
++
++static struct platform_driver rt2880_spi_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ .of_match_table = rt2880_spi_match,
++ },
++ .probe = rt2880_spi_probe,
++ .remove = rt2880_spi_remove,
++};
++
++module_platform_driver(rt2880_spi_driver);
++
++MODULE_DESCRIPTION("Ralink SPI driver");
++MODULE_AUTHOR("Sergiy <piratfm@gmail.com>");
++MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
++MODULE_LICENSE("GPL");
diff --git a/target/linux/ramips/patches-4.14/0043-spi-add-mt7621-support.patch b/target/linux/ramips/patches-4.14/0043-spi-add-mt7621-support.patch
new file mode 100644
index 0000000000..bc5418bab2
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0043-spi-add-mt7621-support.patch
@@ -0,0 +1,524 @@
+From 87a5fcd57c577cd94b5b080deb98885077c13a42 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 27 Jul 2014 09:49:07 +0100
+Subject: [PATCH 43/53] spi: add mt7621 support
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/spi/Kconfig | 6 +
+ drivers/spi/Makefile | 1 +
+ drivers/spi/spi-mt7621.c | 480 ++++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 487 insertions(+)
+ create mode 100644 drivers/spi/spi-mt7621.c
+
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -569,6 +569,12 @@ config SPI_RT2880
+ help
+ This selects a driver for the Ralink RT288x/RT305x SPI Controller.
+
++config SPI_MT7621
++ tristate "MediaTek MT7621 SPI Controller"
++ depends on RALINK
++ help
++ This selects a driver for the MediaTek MT7621 SPI Controller.
++
+ config SPI_S3C24XX
+ tristate "Samsung S3C24XX series SPI"
+ depends on ARCH_S3C24XX
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -60,6 +60,7 @@ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mp
+ obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
+ obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
+ obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
++obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
+ obj-$(CONFIG_SPI_MXS) += spi-mxs.o
+ obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
+ obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
+--- /dev/null
++++ b/drivers/spi/spi-mt7621.c
+@@ -0,0 +1,483 @@
++/*
++ * spi-mt7621.c -- MediaTek MT7621 SPI controller driver
++ *
++ * Copyright (C) 2011 Sergiy <piratfm@gmail.com>
++ * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org>
++ * Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name>
++ *
++ * Some parts are based on spi-orion.c:
++ * Author: Shadi Ammouri <shadi@marvell.com>
++ * Copyright (C) 2007-2008 Marvell Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/delay.h>
++#include <linux/io.h>
++#include <linux/reset.h>
++#include <linux/spi/spi.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/swab.h>
++
++#include <ralink_regs.h>
++
++#define SPI_BPW_MASK(bits) BIT((bits) - 1)
++
++#define DRIVER_NAME "spi-mt7621"
++/* in usec */
++#define RALINK_SPI_WAIT_MAX_LOOP 2000
++
++/* SPISTAT register bit field */
++#define SPISTAT_BUSY BIT(0)
++
++#define MT7621_SPI_TRANS 0x00
++#define SPITRANS_BUSY BIT(16)
++
++#define MT7621_SPI_OPCODE 0x04
++#define MT7621_SPI_DATA0 0x08
++#define MT7621_SPI_DATA4 0x18
++#define SPI_CTL_TX_RX_CNT_MASK 0xff
++#define SPI_CTL_START BIT(8)
++
++#define MT7621_SPI_POLAR 0x38
++#define MT7621_SPI_MASTER 0x28
++#define MT7621_SPI_MOREBUF 0x2c
++#define MT7621_SPI_SPACE 0x3c
++
++#define MT7621_CPHA BIT(5)
++#define MT7621_CPOL BIT(4)
++#define MT7621_LSB_FIRST BIT(3)
++
++#define RT2880_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH)
++
++struct mt7621_spi;
++
++struct mt7621_spi {
++ struct spi_master *master;
++ void __iomem *base;
++ unsigned int sys_freq;
++ unsigned int speed;
++ struct clk *clk;
++ spinlock_t lock;
++
++ struct mt7621_spi_ops *ops;
++};
++
++static inline struct mt7621_spi *spidev_to_mt7621_spi(struct spi_device *spi)
++{
++ return spi_master_get_devdata(spi->master);
++}
++
++static inline u32 mt7621_spi_read(struct mt7621_spi *rs, u32 reg)
++{
++ return ioread32(rs->base + reg);
++}
++
++static inline void mt7621_spi_write(struct mt7621_spi *rs, u32 reg, u32 val)
++{
++ iowrite32(val, rs->base + reg);
++}
++
++static void mt7621_spi_reset(struct mt7621_spi *rs, int duplex)
++{
++ u32 master = mt7621_spi_read(rs, MT7621_SPI_MASTER);
++
++ master |= 7 << 29;
++ master |= 1 << 2;
++ if (duplex)
++ master |= 1 << 10;
++ else
++ master &= ~(1 << 10);
++
++ mt7621_spi_write(rs, MT7621_SPI_MASTER, master);
++}
++
++static void mt7621_spi_set_cs(struct spi_device *spi, int enable)
++{
++ struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
++ int cs = spi->chip_select;
++ u32 polar = 0;
++
++ mt7621_spi_reset(rs, cs);
++ if (enable)
++ polar = BIT(cs);
++ mt7621_spi_write(rs, MT7621_SPI_POLAR, polar);
++}
++
++static int mt7621_spi_prepare(struct spi_device *spi, unsigned int speed)
++{
++ struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
++ u32 rate;
++ u32 reg;
++
++ dev_dbg(&spi->dev, "speed:%u\n", speed);
++
++ rate = DIV_ROUND_UP(rs->sys_freq, speed);
++ dev_dbg(&spi->dev, "rate-1:%u\n", rate);
++
++ if (rate > 4097)
++ return -EINVAL;
++
++ if (rate < 2)
++ rate = 2;
++
++ reg = mt7621_spi_read(rs, MT7621_SPI_MASTER);
++ reg &= ~(0xfff << 16);
++ reg |= (rate - 2) << 16;
++ rs->speed = speed;
++
++ reg &= ~MT7621_LSB_FIRST;
++ if (spi->mode & SPI_LSB_FIRST)
++ reg |= MT7621_LSB_FIRST;
++
++ reg &= ~(MT7621_CPHA | MT7621_CPOL);
++ switch(spi->mode & (SPI_CPOL | SPI_CPHA)) {
++ case SPI_MODE_0:
++ break;
++ case SPI_MODE_1:
++ reg |= MT7621_CPHA;
++ break;
++ case SPI_MODE_2:
++ reg |= MT7621_CPOL;
++ break;
++ case SPI_MODE_3:
++ reg |= MT7621_CPOL | MT7621_CPHA;
++ break;
++ }
++ mt7621_spi_write(rs, MT7621_SPI_MASTER, reg);
++
++ return 0;
++}
++
++static inline int mt7621_spi_wait_till_ready(struct spi_device *spi)
++{
++ struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
++ int i;
++
++ for (i = 0; i < RALINK_SPI_WAIT_MAX_LOOP; i++) {
++ u32 status;
++
++ status = mt7621_spi_read(rs, MT7621_SPI_TRANS);
++ if ((status & SPITRANS_BUSY) == 0) {
++ return 0;
++ }
++ cpu_relax();
++ udelay(1);
++ }
++
++ return -ETIMEDOUT;
++}
++
++static int mt7621_spi_transfer_half_duplex(struct spi_master *master,
++ struct spi_message *m)
++{
++ struct mt7621_spi *rs = spi_master_get_devdata(master);
++ struct spi_device *spi = m->spi;
++ unsigned int speed = spi->max_speed_hz;
++ struct spi_transfer *t = NULL;
++ int status = 0;
++ int i, len = 0;
++ int rx_len = 0;
++ u32 data[9] = { 0 };
++ u32 val;
++
++ mt7621_spi_wait_till_ready(spi);
++
++ list_for_each_entry(t, &m->transfers, transfer_list) {
++ const u8 *buf = t->tx_buf;
++
++ if (t->rx_buf)
++ rx_len += t->len;
++
++ if (!buf)
++ continue;
++
++ if (t->speed_hz < speed)
++ speed = t->speed_hz;
++
++ if (WARN_ON(len + t->len > 36)) {
++ status = -EIO;
++ goto msg_done;
++ }
++
++ for (i = 0; i < t->len; i++, len++)
++ data[len / 4] |= buf[i] << (8 * (len & 3));
++ }
++
++ if (WARN_ON(rx_len > 32)) {
++ status = -EIO;
++ goto msg_done;
++ }
++
++ if (mt7621_spi_prepare(spi, speed)) {
++ status = -EIO;
++ goto msg_done;
++ }
++ data[0] = swab32(data[0]);
++ if (len < 4)
++ data[0] >>= (4 - len) * 8;
++
++ for (i = 0; i < len; i += 4)
++ mt7621_spi_write(rs, MT7621_SPI_OPCODE + i, data[i / 4]);
++
++ val = (min_t(int, len, 4) * 8) << 24;
++ if (len > 4)
++ val |= (len - 4) * 8;
++ val |= (rx_len * 8) << 12;
++ mt7621_spi_write(rs, MT7621_SPI_MOREBUF, val);
++
++ mt7621_spi_set_cs(spi, 1);
++
++ val = mt7621_spi_read(rs, MT7621_SPI_TRANS);
++ val |= SPI_CTL_START;
++ mt7621_spi_write(rs, MT7621_SPI_TRANS, val);
++
++ mt7621_spi_wait_till_ready(spi);
++
++ mt7621_spi_set_cs(spi, 0);
++
++ for (i = 0; i < rx_len; i += 4)
++ data[i / 4] = mt7621_spi_read(rs, MT7621_SPI_DATA0 + i);
++
++ m->actual_length = len + rx_len;
++
++ len = 0;
++ list_for_each_entry(t, &m->transfers, transfer_list) {
++ u8 *buf = t->rx_buf;
++
++ if (!buf)
++ continue;
++
++ for (i = 0; i < t->len; i++, len++)
++ buf[i] = data[len / 4] >> (8 * (len & 3));
++ }
++
++msg_done:
++ m->status = status;
++ spi_finalize_current_message(master);
++
++ return 0;
++}
++
++static int mt7621_spi_transfer_full_duplex(struct spi_master *master,
++ struct spi_message *m)
++{
++ struct mt7621_spi *rs = spi_master_get_devdata(master);
++ struct spi_device *spi = m->spi;
++ unsigned int speed = spi->max_speed_hz;
++ struct spi_transfer *t = NULL;
++ int status = 0;
++ int i, len = 0;
++ int rx_len = 0;
++ u32 data[9] = { 0 };
++ u32 val = 0;
++
++ mt7621_spi_wait_till_ready(spi);
++
++ list_for_each_entry(t, &m->transfers, transfer_list) {
++ const u8 *buf = t->tx_buf;
++
++ if (t->rx_buf)
++ rx_len += t->len;
++
++ if (!buf)
++ continue;
++
++ if (WARN_ON(len + t->len > 16)) {
++ status = -EIO;
++ goto msg_done;
++ }
++
++ for (i = 0; i < t->len; i++, len++)
++ data[len / 4] |= buf[i] << (8 * (len & 3));
++ if (speed > t->speed_hz)
++ speed = t->speed_hz;
++ }
++
++ if (WARN_ON(rx_len > 16)) {
++ status = -EIO;
++ goto msg_done;
++ }
++
++ if (mt7621_spi_prepare(spi, speed)) {
++ status = -EIO;
++ goto msg_done;
++ }
++
++ for (i = 0; i < len; i += 4)
++ mt7621_spi_write(rs, MT7621_SPI_DATA0 + i, data[i / 4]);
++
++ val |= len * 8;
++ val |= (rx_len * 8) << 12;
++ mt7621_spi_write(rs, MT7621_SPI_MOREBUF, val);
++
++ mt7621_spi_set_cs(spi, 1);
++
++ val = mt7621_spi_read(rs, MT7621_SPI_TRANS);
++ val |= SPI_CTL_START;
++ mt7621_spi_write(rs, MT7621_SPI_TRANS, val);
++
++ mt7621_spi_wait_till_ready(spi);
++
++ mt7621_spi_set_cs(spi, 0);
++
++ for (i = 0; i < rx_len; i += 4)
++ data[i / 4] = mt7621_spi_read(rs, MT7621_SPI_DATA4 + i);
++
++ m->actual_length = rx_len;
++
++ len = 0;
++ list_for_each_entry(t, &m->transfers, transfer_list) {
++ u8 *buf = t->rx_buf;
++
++ if (!buf)
++ continue;
++
++ for (i = 0; i < t->len; i++, len++)
++ buf[i] = data[len / 4] >> (8 * (len & 3));
++ }
++
++msg_done:
++ m->status = status;
++ spi_finalize_current_message(master);
++
++ return 0;
++}
++
++static int mt7621_spi_transfer_one_message(struct spi_master *master,
++ struct spi_message *m)
++{
++ struct spi_device *spi = m->spi;
++ int cs = spi->chip_select;
++
++ if (cs)
++ return mt7621_spi_transfer_full_duplex(master, m);
++ return mt7621_spi_transfer_half_duplex(master, m);
++}
++
++static int mt7621_spi_setup(struct spi_device *spi)
++{
++ struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
++
++ if ((spi->max_speed_hz == 0) ||
++ (spi->max_speed_hz > (rs->sys_freq / 2)))
++ spi->max_speed_hz = (rs->sys_freq / 2);
++
++ if (spi->max_speed_hz < (rs->sys_freq / 4097)) {
++ dev_err(&spi->dev, "setup: requested speed is too low %d Hz\n",
++ spi->max_speed_hz);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static const struct of_device_id mt7621_spi_match[] = {
++ { .compatible = "ralink,mt7621-spi" },
++ {},
++};
++MODULE_DEVICE_TABLE(of, mt7621_spi_match);
++
++static int mt7621_spi_probe(struct platform_device *pdev)
++{
++ const struct of_device_id *match;
++ struct spi_master *master;
++ struct mt7621_spi *rs;
++ unsigned long flags;
++ void __iomem *base;
++ struct resource *r;
++ int status = 0;
++ struct clk *clk;
++ struct mt7621_spi_ops *ops;
++
++ match = of_match_device(mt7621_spi_match, &pdev->dev);
++ if (!match)
++ return -EINVAL;
++ ops = (struct mt7621_spi_ops *)match->data;
++
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ base = devm_ioremap_resource(&pdev->dev, r);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(clk)) {
++ dev_err(&pdev->dev, "unable to get SYS clock, err=%d\n",
++ status);
++ return PTR_ERR(clk);
++ }
++
++ status = clk_prepare_enable(clk);
++ if (status)
++ return status;
++
++ master = spi_alloc_master(&pdev->dev, sizeof(*rs));
++ if (master == NULL) {
++ dev_info(&pdev->dev, "master allocation failed\n");
++ return -ENOMEM;
++ }
++
++ master->mode_bits = RT2880_SPI_MODE_BITS;
++
++ master->setup = mt7621_spi_setup;
++ master->transfer_one_message = mt7621_spi_transfer_one_message;
++ master->bits_per_word_mask = SPI_BPW_MASK(8);
++ master->dev.of_node = pdev->dev.of_node;
++ master->num_chipselect = 2;
++
++ dev_set_drvdata(&pdev->dev, master);
++
++ rs = spi_master_get_devdata(master);
++ rs->base = base;
++ rs->clk = clk;
++ rs->master = master;
++ rs->sys_freq = clk_get_rate(rs->clk);
++ rs->ops = ops;
++ dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq);
++ spin_lock_irqsave(&rs->lock, flags);
++
++ device_reset(&pdev->dev);
++
++ mt7621_spi_reset(rs, 0);
++
++ return spi_register_master(master);
++}
++
++static int mt7621_spi_remove(struct platform_device *pdev)
++{
++ struct spi_master *master;
++ struct mt7621_spi *rs;
++
++ master = dev_get_drvdata(&pdev->dev);
++ rs = spi_master_get_devdata(master);
++
++ clk_disable(rs->clk);
++ spi_unregister_master(master);
++
++ return 0;
++}
++
++MODULE_ALIAS("platform:" DRIVER_NAME);
++
++static struct platform_driver mt7621_spi_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ .of_match_table = mt7621_spi_match,
++ },
++ .probe = mt7621_spi_probe,
++ .remove = mt7621_spi_remove,
++};
++
++module_platform_driver(mt7621_spi_driver);
++
++MODULE_DESCRIPTION("MT7621 SPI driver");
++MODULE_AUTHOR("Felix Fietkau <nbd@nbd.name>");
++MODULE_LICENSE("GPL");
diff --git a/target/linux/ramips/patches-4.14/0044-i2c-MIPS-adds-ralink-I2C-driver.patch b/target/linux/ramips/patches-4.14/0044-i2c-MIPS-adds-ralink-I2C-driver.patch
new file mode 100644
index 0000000000..471c8f40e0
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0044-i2c-MIPS-adds-ralink-I2C-driver.patch
@@ -0,0 +1,507 @@
+From 723b8beaabf3c3c4b1ce69480141f1e926f3f3b2 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 27 Jul 2014 09:52:56 +0100
+Subject: [PATCH 44/53] i2c: MIPS: adds ralink I2C driver
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ .../devicetree/bindings/i2c/i2c-ralink.txt | 27 ++
+ drivers/i2c/busses/Kconfig | 4 +
+ drivers/i2c/busses/Makefile | 1 +
+ drivers/i2c/busses/i2c-ralink.c | 327 ++++++++++++++++++++
+ 4 files changed, 359 insertions(+)
+ create mode 100644 Documentation/devicetree/bindings/i2c/i2c-ralink.txt
+ create mode 100644 drivers/i2c/busses/i2c-ralink.c
+
+--- /dev/null
++++ b/Documentation/devicetree/bindings/i2c/i2c-ralink.txt
+@@ -0,0 +1,27 @@
++I2C for Ralink platforms
++
++Required properties :
++- compatible : Must be "link,rt3052-i2c"
++- reg: physical base address of the controller and length of memory mapped
++ region.
++- #address-cells = <1>;
++- #size-cells = <0>;
++
++Optional properties:
++- Child nodes conforming to i2c bus binding
++
++Example :
++
++palmbus@10000000 {
++ i2c@900 {
++ compatible = "link,rt3052-i2c";
++ reg = <0x900 0x100>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ hwmon@4b {
++ compatible = "national,lm92";
++ reg = <0x4b>;
++ };
++ };
++};
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -863,6 +863,11 @@ config I2C_RK3X
+ This driver can also be built as a module. If so, the module will
+ be called i2c-rk3x.
+
++config I2C_RALINK
++ tristate "Ralink I2C Controller"
++ depends on RALINK && !SOC_MT7621
++ select OF_I2C
++
+ config HAVE_S3C2410_I2C
+ bool
+ help
+--- a/drivers/i2c/busses/Makefile
++++ b/drivers/i2c/busses/Makefile
+@@ -84,6 +84,7 @@ obj-$(CONFIG_I2C_PNX) += i2c-pnx.o
+ obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o
+ obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
+ obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
++obj-$(CONFIG_I2C_RALINK) += i2c-ralink.o
+ obj-$(CONFIG_I2C_QUP) += i2c-qup.o
+ obj-$(CONFIG_I2C_RIIC) += i2c-riic.o
+ obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o
+--- /dev/null
++++ b/drivers/i2c/busses/i2c-ralink.c
+@@ -0,0 +1,435 @@
++/*
++ * drivers/i2c/busses/i2c-ralink.c
++ *
++ * Copyright (C) 2013 Steven Liu <steven_liu@mediatek.com>
++ * Copyright (C) 2016 Michael Lee <igvtee@gmail.com>
++ *
++ * Improve driver for i2cdetect from i2c-tools to detect i2c devices on the bus.
++ * (C) 2014 Sittisak <sittisaks@hotmail.com>
++ *
++ * This software is licensed under the terms of the GNU General Public
++ * License version 2, as published by the Free Software Foundation, and
++ * may be copied, distributed, and modified under those terms.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ */
++
++#include <linux/interrupt.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/reset.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/platform_device.h>
++#include <linux/of_platform.h>
++#include <linux/i2c.h>
++#include <linux/io.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++
++#define REG_CONFIG_REG 0x00
++#define REG_CLKDIV_REG 0x04
++#define REG_DEVADDR_REG 0x08
++#define REG_ADDR_REG 0x0C
++#define REG_DATAOUT_REG 0x10
++#define REG_DATAIN_REG 0x14
++#define REG_STATUS_REG 0x18
++#define REG_STARTXFR_REG 0x1C
++#define REG_BYTECNT_REG 0x20
++
++/* REG_CONFIG_REG */
++#define I2C_ADDRLEN_OFFSET 5
++#define I2C_DEVADLEN_OFFSET 2
++#define I2C_ADDRLEN_MASK 0x3
++#define I2C_ADDR_DIS BIT(1)
++#define I2C_DEVADDR_DIS BIT(0)
++#define I2C_ADDRLEN_8 (7 << I2C_ADDRLEN_OFFSET)
++#define I2C_DEVADLEN_7 (6 << I2C_DEVADLEN_OFFSET)
++#define I2C_CONF_DEFAULT (I2C_ADDRLEN_8 | I2C_DEVADLEN_7)
++
++/* REG_CLKDIV_REG */
++#define I2C_CLKDIV_MASK 0xffff
++
++/* REG_DEVADDR_REG */
++#define I2C_DEVADDR_MASK 0x7f
++
++/* REG_ADDR_REG */
++#define I2C_ADDR_MASK 0xff
++
++/* REG_STATUS_REG */
++#define I2C_STARTERR BIT(4)
++#define I2C_ACKERR BIT(3)
++#define I2C_DATARDY BIT(2)
++#define I2C_SDOEMPTY BIT(1)
++#define I2C_BUSY BIT(0)
++
++/* REG_STARTXFR_REG */
++#define NOSTOP_CMD BIT(2)
++#define NODATA_CMD BIT(1)
++#define READ_CMD BIT(0)
++
++/* REG_BYTECNT_REG */
++#define BYTECNT_MAX 64
++#define SET_BYTECNT(x) (x - 1)
++
++/* timeout waiting for I2C devices to respond (clock streching) */
++#define TIMEOUT_MS 1000
++#define DELAY_INTERVAL_US 100
++
++struct rt_i2c {
++ void __iomem *base;
++ struct clk *clk;
++ struct device *dev;
++ struct i2c_adapter adap;
++ u32 cur_clk;
++ u32 clk_div;
++ u32 flags;
++};
++
++static void rt_i2c_w32(struct rt_i2c *i2c, u32 val, unsigned reg)
++{
++ iowrite32(val, i2c->base + reg);
++}
++
++static u32 rt_i2c_r32(struct rt_i2c *i2c, unsigned reg)
++{
++ return ioread32(i2c->base + reg);
++}
++
++static int poll_down_timeout(void __iomem *addr, u32 mask)
++{
++ unsigned long timeout = jiffies + msecs_to_jiffies(TIMEOUT_MS);
++
++ do {
++ if (!(readl_relaxed(addr) & mask))
++ return 0;
++
++ usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50);
++ } while (time_before(jiffies, timeout));
++
++ return (readl_relaxed(addr) & mask) ? -EAGAIN : 0;
++}
++
++static int rt_i2c_wait_idle(struct rt_i2c *i2c)
++{
++ int ret;
++
++ ret = poll_down_timeout(i2c->base + REG_STATUS_REG, I2C_BUSY);
++ if (ret < 0)
++ dev_dbg(i2c->dev, "idle err(%d)\n", ret);
++
++ return ret;
++}
++
++static int poll_up_timeout(void __iomem *addr, u32 mask)
++{
++ unsigned long timeout = jiffies + msecs_to_jiffies(TIMEOUT_MS);
++ u32 status;
++
++ do {
++ status = readl_relaxed(addr);
++
++ /* check error status */
++ if (status & I2C_STARTERR)
++ return -EAGAIN;
++ else if (status & I2C_ACKERR)
++ return -ENXIO;
++ else if (status & mask)
++ return 0;
++
++ usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50);
++ } while (time_before(jiffies, timeout));
++
++ return -ETIMEDOUT;
++}
++
++static int rt_i2c_wait_rx_done(struct rt_i2c *i2c)
++{
++ int ret;
++
++ ret = poll_up_timeout(i2c->base + REG_STATUS_REG, I2C_DATARDY);
++ if (ret < 0)
++ dev_dbg(i2c->dev, "rx err(%d)\n", ret);
++
++ return ret;
++}
++
++static int rt_i2c_wait_tx_done(struct rt_i2c *i2c)
++{
++ int ret;
++
++ ret = poll_up_timeout(i2c->base + REG_STATUS_REG, I2C_SDOEMPTY);
++ if (ret < 0)
++ dev_dbg(i2c->dev, "tx err(%d)\n", ret);
++
++ return ret;
++}
++
++static void rt_i2c_reset(struct rt_i2c *i2c)
++{
++ device_reset(i2c->adap.dev.parent);
++ barrier();
++ rt_i2c_w32(i2c, i2c->clk_div, REG_CLKDIV_REG);
++}
++
++static void rt_i2c_dump_reg(struct rt_i2c *i2c)
++{
++ dev_dbg(i2c->dev, "conf %08x, clkdiv %08x, devaddr %08x, " \
++ "addr %08x, dataout %08x, datain %08x, " \
++ "status %08x, startxfr %08x, bytecnt %08x\n",
++ rt_i2c_r32(i2c, REG_CONFIG_REG),
++ rt_i2c_r32(i2c, REG_CLKDIV_REG),
++ rt_i2c_r32(i2c, REG_DEVADDR_REG),
++ rt_i2c_r32(i2c, REG_ADDR_REG),
++ rt_i2c_r32(i2c, REG_DATAOUT_REG),
++ rt_i2c_r32(i2c, REG_DATAIN_REG),
++ rt_i2c_r32(i2c, REG_STATUS_REG),
++ rt_i2c_r32(i2c, REG_STARTXFR_REG),
++ rt_i2c_r32(i2c, REG_BYTECNT_REG));
++}
++
++static int rt_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
++ int num)
++{
++ struct rt_i2c *i2c;
++ struct i2c_msg *pmsg;
++ unsigned char addr;
++ int i, j, ret;
++ u32 cmd;
++
++ i2c = i2c_get_adapdata(adap);
++
++ for (i = 0; i < num; i++) {
++ pmsg = &msgs[i];
++ if (i == (num - 1))
++ cmd = 0;
++ else
++ cmd = NOSTOP_CMD;
++
++ dev_dbg(i2c->dev, "addr: 0x%x, len: %d, flags: 0x%x, stop: %d\n",
++ pmsg->addr, pmsg->len, pmsg->flags,
++ (cmd == 0)? 1 : 0);
++
++ /* wait hardware idle */
++ if ((ret = rt_i2c_wait_idle(i2c)))
++ goto err_timeout;
++
++ if (pmsg->flags & I2C_M_TEN) {
++ rt_i2c_w32(i2c, I2C_CONF_DEFAULT, REG_CONFIG_REG);
++ /* 10 bits address */
++ addr = 0x78 | ((pmsg->addr >> 8) & 0x03);
++ rt_i2c_w32(i2c, addr & I2C_DEVADDR_MASK,
++ REG_DEVADDR_REG);
++ rt_i2c_w32(i2c, pmsg->addr & I2C_ADDR_MASK,
++ REG_ADDR_REG);
++ } else {
++ rt_i2c_w32(i2c, I2C_CONF_DEFAULT | I2C_ADDR_DIS,
++ REG_CONFIG_REG);
++ /* 7 bits address */
++ rt_i2c_w32(i2c, pmsg->addr & I2C_DEVADDR_MASK,
++ REG_DEVADDR_REG);
++ }
++
++ /* buffer length */
++ if (pmsg->len == 0)
++ cmd |= NODATA_CMD;
++ else
++ rt_i2c_w32(i2c, SET_BYTECNT(pmsg->len),
++ REG_BYTECNT_REG);
++
++ j = 0;
++ if (pmsg->flags & I2C_M_RD) {
++ cmd |= READ_CMD;
++ /* start transfer */
++ barrier();
++ rt_i2c_w32(i2c, cmd, REG_STARTXFR_REG);
++ do {
++ /* wait */
++ if ((ret = rt_i2c_wait_rx_done(i2c)))
++ goto err_timeout;
++ /* read data */
++ if (pmsg->len)
++ pmsg->buf[j] = rt_i2c_r32(i2c,
++ REG_DATAIN_REG);
++ j++;
++ } while (j < pmsg->len);
++ } else {
++ do {
++ /* write data */
++ if (pmsg->len)
++ rt_i2c_w32(i2c, pmsg->buf[j],
++ REG_DATAOUT_REG);
++ /* start transfer */
++ if (j == 0) {
++ barrier();
++ rt_i2c_w32(i2c, cmd, REG_STARTXFR_REG);
++ }
++ /* wait */
++ if ((ret = rt_i2c_wait_tx_done(i2c)))
++ goto err_timeout;
++ j++;
++ } while (j < pmsg->len);
++ }
++ }
++ /* the return value is number of executed messages */
++ ret = i;
++
++ return ret;
++
++err_timeout:
++ rt_i2c_dump_reg(i2c);
++ rt_i2c_reset(i2c);
++ return ret;
++}
++
++static u32 rt_i2c_func(struct i2c_adapter *a)
++{
++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
++}
++
++static const struct i2c_algorithm rt_i2c_algo = {
++ .master_xfer = rt_i2c_master_xfer,
++ .functionality = rt_i2c_func,
++};
++
++static const struct of_device_id i2c_rt_dt_ids[] = {
++ { .compatible = "ralink,rt2880-i2c" },
++ { /* sentinel */ }
++};
++
++MODULE_DEVICE_TABLE(of, i2c_rt_dt_ids);
++
++static struct i2c_adapter_quirks rt_i2c_quirks = {
++ .max_write_len = BYTECNT_MAX,
++ .max_read_len = BYTECNT_MAX,
++};
++
++static int rt_i2c_init(struct rt_i2c *i2c)
++{
++ u32 reg;
++
++ /* i2c_sclk = periph_clk / ((2 * clk_div) + 5) */
++ i2c->clk_div = (clk_get_rate(i2c->clk) - (5 * i2c->cur_clk)) /
++ (2 * i2c->cur_clk);
++ if (i2c->clk_div < 8)
++ i2c->clk_div = 8;
++ if (i2c->clk_div > I2C_CLKDIV_MASK)
++ i2c->clk_div = I2C_CLKDIV_MASK;
++
++ /* check support combinde/repeated start message */
++ rt_i2c_w32(i2c, NOSTOP_CMD, REG_STARTXFR_REG);
++ reg = rt_i2c_r32(i2c, REG_STARTXFR_REG) & NOSTOP_CMD;
++
++ rt_i2c_reset(i2c);
++
++ return reg;
++}
++
++static int rt_i2c_probe(struct platform_device *pdev)
++{
++ struct resource *res;
++ struct rt_i2c *i2c;
++ struct i2c_adapter *adap;
++ const struct of_device_id *match;
++ int ret, restart;
++
++ match = of_match_device(i2c_rt_dt_ids, &pdev->dev);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "no memory resource found\n");
++ return -ENODEV;
++ }
++
++ i2c = devm_kzalloc(&pdev->dev, sizeof(struct rt_i2c), GFP_KERNEL);
++ if (!i2c) {
++ dev_err(&pdev->dev, "failed to allocate i2c_adapter\n");
++ return -ENOMEM;
++ }
++
++ i2c->base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(i2c->base))
++ return PTR_ERR(i2c->base);
++
++ i2c->clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(i2c->clk)) {
++ dev_err(&pdev->dev, "no clock defined\n");
++ return -ENODEV;
++ }
++ clk_prepare_enable(i2c->clk);
++ i2c->dev = &pdev->dev;
++
++ if (of_property_read_u32(pdev->dev.of_node,
++ "clock-frequency", &i2c->cur_clk))
++ i2c->cur_clk = 100000;
++
++ adap = &i2c->adap;
++ adap->owner = THIS_MODULE;
++ adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
++ adap->algo = &rt_i2c_algo;
++ adap->retries = 3;
++ adap->dev.parent = &pdev->dev;
++ i2c_set_adapdata(adap, i2c);
++ adap->dev.of_node = pdev->dev.of_node;
++ strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
++ adap->quirks = &rt_i2c_quirks;
++
++ platform_set_drvdata(pdev, i2c);
++
++ restart = rt_i2c_init(i2c);
++
++ ret = i2c_add_adapter(adap);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to add adapter\n");
++ clk_disable_unprepare(i2c->clk);
++ return ret;
++ }
++
++ dev_info(&pdev->dev, "clock %uKHz, re-start %ssupport\n",
++ i2c->cur_clk/1000, restart ? "" : "not ");
++
++ return ret;
++}
++
++static int rt_i2c_remove(struct platform_device *pdev)
++{
++ struct rt_i2c *i2c = platform_get_drvdata(pdev);
++
++ i2c_del_adapter(&i2c->adap);
++ clk_disable_unprepare(i2c->clk);
++
++ return 0;
++}
++
++static struct platform_driver rt_i2c_driver = {
++ .probe = rt_i2c_probe,
++ .remove = rt_i2c_remove,
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "i2c-ralink",
++ .of_match_table = i2c_rt_dt_ids,
++ },
++};
++
++static int __init i2c_rt_init (void)
++{
++ return platform_driver_register(&rt_i2c_driver);
++}
++subsys_initcall(i2c_rt_init);
++
++static void __exit i2c_rt_exit (void)
++{
++ platform_driver_unregister(&rt_i2c_driver);
++}
++module_exit(i2c_rt_exit);
++
++MODULE_AUTHOR("Steven Liu <steven_liu@mediatek.com>");
++MODULE_DESCRIPTION("Ralink I2c host driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:Ralink-I2C");
diff --git a/target/linux/ramips/patches-4.14/0045-i2c-add-mt7621-driver.patch b/target/linux/ramips/patches-4.14/0045-i2c-add-mt7621-driver.patch
new file mode 100644
index 0000000000..9cd8c4b5cf
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0045-i2c-add-mt7621-driver.patch
@@ -0,0 +1,473 @@
+From d5c54ff3d1db0a4348fa04d8e78f3bf6063e3afc Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Mon, 7 Dec 2015 17:21:27 +0100
+Subject: [PATCH 45/53] i2c: add mt7621 driver
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/i2c/busses/Kconfig | 4 +
+ drivers/i2c/busses/Makefile | 1 +
+ drivers/i2c/busses/i2c-mt7621.c | 303 +++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 308 insertions(+)
+ create mode 100644 drivers/i2c/busses/i2c-mt7621.c
+
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -868,6 +868,11 @@ config I2C_RALINK
+ depends on RALINK && !SOC_MT7621
+ select OF_I2C
+
++config I2C_MT7621
++ tristate "MT7621/MT7628 I2C Controller"
++ depends on RALINK && (SOC_MT7620 || SOC_MT7621)
++ select OF_I2C
++
+ config HAVE_S3C2410_I2C
+ bool
+ help
+--- a/drivers/i2c/busses/Makefile
++++ b/drivers/i2c/busses/Makefile
+@@ -85,6 +85,7 @@ obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o
+ obj-$(CONFIG_I2C_PXA) += i2c-pxa.o
+ obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o
+ obj-$(CONFIG_I2C_RALINK) += i2c-ralink.o
++obj-$(CONFIG_I2C_MT7621) += i2c-mt7621.o
+ obj-$(CONFIG_I2C_QUP) += i2c-qup.o
+ obj-$(CONFIG_I2C_RIIC) += i2c-riic.o
+ obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o
+--- /dev/null
++++ b/drivers/i2c/busses/i2c-mt7621.c
+@@ -0,0 +1,433 @@
++/*
++ * drivers/i2c/busses/i2c-mt7621.c
++ *
++ * Copyright (C) 2013 Steven Liu <steven_liu@mediatek.com>
++ * Copyright (C) 2016 Michael Lee <igvtee@gmail.com>
++ *
++ * Improve driver for i2cdetect from i2c-tools to detect i2c devices on the bus.
++ * (C) 2014 Sittisak <sittisaks@hotmail.com>
++ *
++ * This software is licensed under the terms of the GNU General Public
++ * License version 2, as published by the Free Software Foundation, and
++ * may be copied, distributed, and modified under those terms.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ */
++
++#include <linux/interrupt.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/reset.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/errno.h>
++#include <linux/platform_device.h>
++#include <linux/of_platform.h>
++#include <linux/i2c.h>
++#include <linux/io.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++
++#define REG_SM0CFG0 0x08
++#define REG_SM0DOUT 0x10
++#define REG_SM0DIN 0x14
++#define REG_SM0ST 0x18
++#define REG_SM0AUTO 0x1C
++#define REG_SM0CFG1 0x20
++#define REG_SM0CFG2 0x28
++#define REG_SM0CTL0 0x40
++#define REG_SM0CTL1 0x44
++#define REG_SM0D0 0x50
++#define REG_SM0D1 0x54
++#define REG_PINTEN 0x5C
++#define REG_PINTST 0x60
++#define REG_PINTCL 0x64
++
++/* REG_SM0CFG0 */
++#define I2C_DEVADDR_MASK 0x7f
++
++/* REG_SM0ST */
++#define I2C_DATARDY BIT(2)
++#define I2C_SDOEMPTY BIT(1)
++#define I2C_BUSY BIT(0)
++
++/* REG_SM0AUTO */
++#define READ_CMD BIT(0)
++
++/* REG_SM0CFG1 */
++#define BYTECNT_MAX 64
++#define SET_BYTECNT(x) (x - 1)
++
++/* REG_SM0CFG2 */
++#define AUTOMODE_EN BIT(0)
++
++/* REG_SM0CTL0 */
++#define ODRAIN_HIGH_SM0 BIT(31)
++#define VSYNC_SHIFT 28
++#define VSYNC_MASK 0x3
++#define VSYNC_PULSE (0x1 << VSYNC_SHIFT)
++#define VSYNC_RISING (0x2 << VSYNC_SHIFT)
++#define CLK_DIV_SHIFT 16
++#define CLK_DIV_MASK 0xfff
++#define DEG_CNT_SHIFT 8
++#define DEG_CNT_MASK 0xff
++#define WAIT_HIGH BIT(6)
++#define DEG_EN BIT(5)
++#define CS_STATUA BIT(4)
++#define SCL_STATUS BIT(3)
++#define SDA_STATUS BIT(2)
++#define SM0_EN BIT(1)
++#define SCL_STRECH BIT(0)
++
++/* REG_SM0CTL1 */
++#define ACK_SHIFT 16
++#define ACK_MASK 0xff
++#define PGLEN_SHIFT 8
++#define PGLEN_MASK 0x7
++#define SM0_MODE_SHIFT 4
++#define SM0_MODE_MASK 0x7
++#define SM0_MODE_START 0x1
++#define SM0_MODE_WRITE 0x2
++#define SM0_MODE_STOP 0x3
++#define SM0_MODE_READ_NACK 0x4
++#define SM0_MODE_READ_ACK 0x5
++#define SM0_TRI_BUSY BIT(0)
++
++/* timeout waiting for I2C devices to respond (clock streching) */
++#define TIMEOUT_MS 1000
++#define DELAY_INTERVAL_US 100
++
++struct mtk_i2c {
++ void __iomem *base;
++ struct clk *clk;
++ struct device *dev;
++ struct i2c_adapter adap;
++ u32 cur_clk;
++ u32 clk_div;
++ u32 flags;
++};
++
++static void mtk_i2c_w32(struct mtk_i2c *i2c, u32 val, unsigned reg)
++{
++ iowrite32(val, i2c->base + reg);
++}
++
++static u32 mtk_i2c_r32(struct mtk_i2c *i2c, unsigned reg)
++{
++ return ioread32(i2c->base + reg);
++}
++
++static int poll_down_timeout(void __iomem *addr, u32 mask)
++{
++ unsigned long timeout = jiffies + msecs_to_jiffies(TIMEOUT_MS);
++
++ do {
++ if (!(readl_relaxed(addr) & mask))
++ return 0;
++
++ usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50);
++ } while (time_before(jiffies, timeout));
++
++ return (readl_relaxed(addr) & mask) ? -EAGAIN : 0;
++}
++
++static int mtk_i2c_wait_idle(struct mtk_i2c *i2c)
++{
++ int ret;
++
++ ret = poll_down_timeout(i2c->base + REG_SM0ST, I2C_BUSY);
++ if (ret < 0)
++ dev_dbg(i2c->dev, "idle err(%d)\n", ret);
++
++ return ret;
++}
++
++static int poll_up_timeout(void __iomem *addr, u32 mask)
++{
++ unsigned long timeout = jiffies + msecs_to_jiffies(TIMEOUT_MS);
++ u32 status;
++
++ do {
++ status = readl_relaxed(addr);
++ if (status & mask)
++ return 0;
++ usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50);
++ } while (time_before(jiffies, timeout));
++
++ return -ETIMEDOUT;
++}
++
++static int mtk_i2c_wait_rx_done(struct mtk_i2c *i2c)
++{
++ int ret;
++
++ ret = poll_up_timeout(i2c->base + REG_SM0ST, I2C_DATARDY);
++ if (ret < 0)
++ dev_dbg(i2c->dev, "rx err(%d)\n", ret);
++
++ return ret;
++}
++
++static int mtk_i2c_wait_tx_done(struct mtk_i2c *i2c)
++{
++ int ret;
++
++ ret = poll_up_timeout(i2c->base + REG_SM0ST, I2C_SDOEMPTY);
++ if (ret < 0)
++ dev_dbg(i2c->dev, "tx err(%d)\n", ret);
++
++ return ret;
++}
++
++static void mtk_i2c_reset(struct mtk_i2c *i2c)
++{
++ u32 reg;
++ device_reset(i2c->adap.dev.parent);
++ barrier();
++
++ /* ctrl0 */
++ reg = ODRAIN_HIGH_SM0 | VSYNC_PULSE | (i2c->clk_div << CLK_DIV_SHIFT) |
++ WAIT_HIGH | SM0_EN;
++ mtk_i2c_w32(i2c, reg, REG_SM0CTL0);
++
++ /* auto mode */
++ mtk_i2c_w32(i2c, AUTOMODE_EN, REG_SM0CFG2);
++}
++
++static void mtk_i2c_dump_reg(struct mtk_i2c *i2c)
++{
++ dev_dbg(i2c->dev, "cfg0 %08x, dout %08x, din %08x, " \
++ "status %08x, auto %08x, cfg1 %08x, " \
++ "cfg2 %08x, ctl0 %08x, ctl1 %08x\n",
++ mtk_i2c_r32(i2c, REG_SM0CFG0),
++ mtk_i2c_r32(i2c, REG_SM0DOUT),
++ mtk_i2c_r32(i2c, REG_SM0DIN),
++ mtk_i2c_r32(i2c, REG_SM0ST),
++ mtk_i2c_r32(i2c, REG_SM0AUTO),
++ mtk_i2c_r32(i2c, REG_SM0CFG1),
++ mtk_i2c_r32(i2c, REG_SM0CFG2),
++ mtk_i2c_r32(i2c, REG_SM0CTL0),
++ mtk_i2c_r32(i2c, REG_SM0CTL1));
++}
++
++static int mtk_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
++ int num)
++{
++ struct mtk_i2c *i2c;
++ struct i2c_msg *pmsg;
++ int i, j, ret;
++ u32 cmd;
++
++ i2c = i2c_get_adapdata(adap);
++
++ for (i = 0; i < num; i++) {
++ pmsg = &msgs[i];
++ cmd = 0;
++
++ dev_dbg(i2c->dev, "addr: 0x%x, len: %d, flags: 0x%x\n",
++ pmsg->addr, pmsg->len, pmsg->flags);
++
++ /* wait hardware idle */
++ if ((ret = mtk_i2c_wait_idle(i2c)))
++ goto err_timeout;
++
++ if (pmsg->flags & I2C_M_TEN) {
++ dev_dbg(i2c->dev, "10 bits addr not supported\n");
++ return -EINVAL;
++ } else {
++ /* 7 bits address */
++ mtk_i2c_w32(i2c, pmsg->addr & I2C_DEVADDR_MASK,
++ REG_SM0CFG0);
++ }
++
++ /* buffer length */
++ if (pmsg->len == 0) {
++ dev_dbg(i2c->dev, "length is 0\n");
++ return -EINVAL;
++ } else
++ mtk_i2c_w32(i2c, SET_BYTECNT(pmsg->len),
++ REG_SM0CFG1);
++
++ j = 0;
++ if (pmsg->flags & I2C_M_RD) {
++ cmd |= READ_CMD;
++ /* start transfer */
++ barrier();
++ mtk_i2c_w32(i2c, cmd, REG_SM0AUTO);
++ do {
++ /* wait */
++ if ((ret = mtk_i2c_wait_rx_done(i2c)))
++ goto err_timeout;
++ /* read data */
++ if (pmsg->len)
++ pmsg->buf[j] = mtk_i2c_r32(i2c,
++ REG_SM0DIN);
++ j++;
++ } while (j < pmsg->len);
++ } else {
++ do {
++ /* write data */
++ if (pmsg->len)
++ mtk_i2c_w32(i2c, pmsg->buf[j],
++ REG_SM0DOUT);
++ /* start transfer */
++ if (j == 0) {
++ barrier();
++ mtk_i2c_w32(i2c, cmd, REG_SM0AUTO);
++ }
++ /* wait */
++ if ((ret = mtk_i2c_wait_tx_done(i2c)))
++ goto err_timeout;
++ j++;
++ } while (j < pmsg->len);
++ }
++ }
++ /* the return value is number of executed messages */
++ ret = i;
++
++ return ret;
++
++err_timeout:
++ mtk_i2c_dump_reg(i2c);
++ mtk_i2c_reset(i2c);
++ return ret;
++}
++
++static u32 mtk_i2c_func(struct i2c_adapter *a)
++{
++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
++}
++
++static const struct i2c_algorithm mtk_i2c_algo = {
++ .master_xfer = mtk_i2c_master_xfer,
++ .functionality = mtk_i2c_func,
++};
++
++static const struct of_device_id i2c_mtk_dt_ids[] = {
++ { .compatible = "mediatek,mt7621-i2c" },
++ { /* sentinel */ }
++};
++
++MODULE_DEVICE_TABLE(of, i2c_mtk_dt_ids);
++
++static struct i2c_adapter_quirks mtk_i2c_quirks = {
++ .max_write_len = BYTECNT_MAX,
++ .max_read_len = BYTECNT_MAX,
++};
++
++static void mtk_i2c_init(struct mtk_i2c *i2c)
++{
++ i2c->clk_div = clk_get_rate(i2c->clk) / i2c->cur_clk;
++ if (i2c->clk_div > CLK_DIV_MASK)
++ i2c->clk_div = CLK_DIV_MASK;
++
++ mtk_i2c_reset(i2c);
++}
++
++static int mtk_i2c_probe(struct platform_device *pdev)
++{
++ struct resource *res;
++ struct mtk_i2c *i2c;
++ struct i2c_adapter *adap;
++ const struct of_device_id *match;
++ int ret;
++
++ match = of_match_device(i2c_mtk_dt_ids, &pdev->dev);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "no memory resource found\n");
++ return -ENODEV;
++ }
++
++ i2c = devm_kzalloc(&pdev->dev, sizeof(struct mtk_i2c), GFP_KERNEL);
++ if (!i2c) {
++ dev_err(&pdev->dev, "failed to allocate i2c_adapter\n");
++ return -ENOMEM;
++ }
++
++ i2c->base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(i2c->base))
++ return PTR_ERR(i2c->base);
++
++ i2c->clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(i2c->clk)) {
++ dev_err(&pdev->dev, "no clock defined\n");
++ return -ENODEV;
++ }
++ clk_prepare_enable(i2c->clk);
++ i2c->dev = &pdev->dev;
++
++ if (of_property_read_u32(pdev->dev.of_node,
++ "clock-frequency", &i2c->cur_clk))
++ i2c->cur_clk = 100000;
++
++ adap = &i2c->adap;
++ adap->owner = THIS_MODULE;
++ adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
++ adap->algo = &mtk_i2c_algo;
++ adap->retries = 3;
++ adap->dev.parent = &pdev->dev;
++ i2c_set_adapdata(adap, i2c);
++ adap->dev.of_node = pdev->dev.of_node;
++ strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name));
++ adap->quirks = &mtk_i2c_quirks;
++
++ platform_set_drvdata(pdev, i2c);
++
++ mtk_i2c_init(i2c);
++
++ ret = i2c_add_adapter(adap);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to add adapter\n");
++ clk_disable_unprepare(i2c->clk);
++ return ret;
++ }
++
++ dev_info(&pdev->dev, "clock %uKHz, re-start not support\n",
++ i2c->cur_clk/1000);
++
++ return ret;
++}
++
++static int mtk_i2c_remove(struct platform_device *pdev)
++{
++ struct mtk_i2c *i2c = platform_get_drvdata(pdev);
++
++ i2c_del_adapter(&i2c->adap);
++ clk_disable_unprepare(i2c->clk);
++
++ return 0;
++}
++
++static struct platform_driver mtk_i2c_driver = {
++ .probe = mtk_i2c_probe,
++ .remove = mtk_i2c_remove,
++ .driver = {
++ .owner = THIS_MODULE,
++ .name = "i2c-mt7621",
++ .of_match_table = i2c_mtk_dt_ids,
++ },
++};
++
++static int __init i2c_mtk_init (void)
++{
++ return platform_driver_register(&mtk_i2c_driver);
++}
++subsys_initcall(i2c_mtk_init);
++
++static void __exit i2c_mtk_exit (void)
++{
++ platform_driver_unregister(&mtk_i2c_driver);
++}
++module_exit(i2c_mtk_exit);
++
++MODULE_AUTHOR("Steven Liu <steven_liu@mediatek.com>");
++MODULE_DESCRIPTION("MT7621 I2c host driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:MT7621-I2C");
diff --git a/target/linux/ramips/patches-4.14/0046-mmc-MIPS-ralink-add-sdhci-for-mt7620a-SoC.patch b/target/linux/ramips/patches-4.14/0046-mmc-MIPS-ralink-add-sdhci-for-mt7620a-SoC.patch
new file mode 100644
index 0000000000..d3a088b736
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0046-mmc-MIPS-ralink-add-sdhci-for-mt7620a-SoC.patch
@@ -0,0 +1,4831 @@
+From 23147af14531cbdada194b94120ef8774f46292d Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Thu, 13 Nov 2014 19:08:40 +0100
+Subject: [PATCH 46/53] mmc: MIPS: ralink: add sdhci for mt7620a SoC
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/mmc/host/Kconfig | 2 +
+ drivers/mmc/host/Makefile | 1 +
+ drivers/mmc/host/mtk-mmc/Kconfig | 16 +
+ drivers/mmc/host/mtk-mmc/Makefile | 42 +
+ drivers/mmc/host/mtk-mmc/board.h | 137 ++
+ drivers/mmc/host/mtk-mmc/dbg.c | 347 ++++
+ drivers/mmc/host/mtk-mmc/dbg.h | 156 ++
+ drivers/mmc/host/mtk-mmc/mt6575_sd.h | 1001 +++++++++++
+ drivers/mmc/host/mtk-mmc/sd.c | 3060 ++++++++++++++++++++++++++++++++++
+ 9 files changed, 4762 insertions(+)
+ create mode 100644 drivers/mmc/host/mtk-mmc/Kconfig
+ create mode 100644 drivers/mmc/host/mtk-mmc/Makefile
+ create mode 100644 drivers/mmc/host/mtk-mmc/board.h
+ create mode 100644 drivers/mmc/host/mtk-mmc/dbg.c
+ create mode 100644 drivers/mmc/host/mtk-mmc/dbg.h
+ create mode 100644 drivers/mmc/host/mtk-mmc/mt6575_sd.h
+ create mode 100644 drivers/mmc/host/mtk-mmc/sd.c
+
+--- a/drivers/mmc/host/Kconfig
++++ b/drivers/mmc/host/Kconfig
+@@ -899,3 +899,5 @@ config MMC_SDHCI_XENON
+ This selects Marvell Xenon eMMC/SD/SDIO SDHCI.
+ If you have a controller with this interface, say Y or M here.
+ If unsure, say N.
++
++source "drivers/mmc/host/mtk-mmc/Kconfig"
+--- a/drivers/mmc/host/Makefile
++++ b/drivers/mmc/host/Makefile
+@@ -3,6 +3,7 @@
+ # Makefile for MMC/SD host controller drivers
+ #
+
++obj-$(CONFIG_MTK_MMC) += mtk-mmc/
+ obj-$(CONFIG_MMC_ARMMMCI) += armmmci.o
+ armmmci-y := mmci.o
+ armmmci-$(CONFIG_MMC_QCOM_DML) += mmci_qcom_dml.o
+--- /dev/null
++++ b/drivers/mmc/host/mtk-mmc/Kconfig
+@@ -0,0 +1,16 @@
++config MTK_MMC
++ tristate "MTK SD/MMC"
++ depends on !MTD_NAND_RALINK
++
++config MTK_AEE_KDUMP
++ bool "MTK AEE KDUMP"
++ depends on MTK_MMC
++
++config MTK_MMC_CD_POLL
++ bool "Card Detect with Polling"
++ depends on MTK_MMC
++
++config MTK_MMC_EMMC_8BIT
++ bool "eMMC 8-bit support"
++ depends on MTK_MMC && RALINK_MT7628
++
+--- /dev/null
++++ b/drivers/mmc/host/mtk-mmc/Makefile
+@@ -0,0 +1,42 @@
++# Copyright Statement:
++#
++# This software/firmware and related documentation ("MediaTek Software") are
++# protected under relevant copyright laws. The information contained herein
++# is confidential and proprietary to MediaTek Inc. and/or its licensors.
++# Without the prior written permission of MediaTek inc. and/or its licensors,
++# any reproduction, modification, use or disclosure of MediaTek Software,
++# and information contained herein, in whole or in part, shall be strictly prohibited.
++#
++# MediaTek Inc. (C) 2010. All rights reserved.
++#
++# BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
++# THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
++# RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
++# AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
++# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
++# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
++# NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
++# SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
++# SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
++# THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
++# THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
++# CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
++# SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
++# STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
++# CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
++# AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
++# OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
++# MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
++#
++# The following software/firmware and/or related documentation ("MediaTek Software")
++# have been modified by MediaTek Inc. All revisions are subject to any receiver's
++# applicable license agreements with MediaTek Inc.
++
++obj-$(CONFIG_MTK_MMC) += mtk_sd.o
++mtk_sd-objs := sd.o dbg.o
++ifeq ($(CONFIG_MTK_AEE_KDUMP),y)
++EXTRA_CFLAGS += -DMT6575_SD_DEBUG
++endif
++
++clean:
++ @rm -f *.o modules.order .*.cmd
+--- /dev/null
++++ b/drivers/mmc/host/mtk-mmc/board.h
+@@ -0,0 +1,137 @@
++/* Copyright Statement:
++ *
++ * This software/firmware and related documentation ("MediaTek Software") are
++ * protected under relevant copyright laws. The information contained herein
++ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
++ * Without the prior written permission of MediaTek inc. and/or its licensors,
++ * any reproduction, modification, use or disclosure of MediaTek Software,
++ * and information contained herein, in whole or in part, shall be strictly prohibited.
++ */
++/* MediaTek Inc. (C) 2010. All rights reserved.
++ *
++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
++ *
++ * The following software/firmware and/or related documentation ("MediaTek Software")
++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's
++ * applicable license agreements with MediaTek Inc.
++ */
++
++#ifndef __ARCH_ARM_MACH_BOARD_H
++#define __ARCH_ARM_MACH_BOARD_H
++
++#include <generated/autoconf.h>
++#include <linux/pm.h>
++/* --- chhung */
++// #include <mach/mt6575.h>
++// #include <board-custom.h>
++/* end of chhung */
++
++typedef void (*sdio_irq_handler_t)(void*); /* external irq handler */
++typedef void (*pm_callback_t)(pm_message_t state, void *data);
++
++#define MSDC_CD_PIN_EN (1 << 0) /* card detection pin is wired */
++#define MSDC_WP_PIN_EN (1 << 1) /* write protection pin is wired */
++#define MSDC_RST_PIN_EN (1 << 2) /* emmc reset pin is wired */
++#define MSDC_SDIO_IRQ (1 << 3) /* use internal sdio irq (bus) */
++#define MSDC_EXT_SDIO_IRQ (1 << 4) /* use external sdio irq */
++#define MSDC_REMOVABLE (1 << 5) /* removable slot */
++#define MSDC_SYS_SUSPEND (1 << 6) /* suspended by system */
++#define MSDC_HIGHSPEED (1 << 7) /* high-speed mode support */
++#define MSDC_UHS1 (1 << 8) /* uhs-1 mode support */
++#define MSDC_DDR (1 << 9) /* ddr mode support */
++
++
++#define MSDC_SMPL_RISING (0)
++#define MSDC_SMPL_FALLING (1)
++
++#define MSDC_CMD_PIN (0)
++#define MSDC_DAT_PIN (1)
++#define MSDC_CD_PIN (2)
++#define MSDC_WP_PIN (3)
++#define MSDC_RST_PIN (4)
++
++enum {
++ MSDC_CLKSRC_48MHZ = 0,
++// MSDC_CLKSRC_26MHZ = 0,
++// MSDC_CLKSRC_197MHZ = 1,
++// MSDC_CLKSRC_208MHZ = 2
++};
++
++struct msdc_hw {
++ unsigned char clk_src; /* host clock source */
++ unsigned char cmd_edge; /* command latch edge */
++ unsigned char data_edge; /* data latch edge */
++ unsigned char clk_drv; /* clock pad driving */
++ unsigned char cmd_drv; /* command pad driving */
++ unsigned char dat_drv; /* data pad driving */
++ unsigned long flags; /* hardware capability flags */
++ unsigned long data_pins; /* data pins */
++ unsigned long data_offset; /* data address offset */
++
++ /* config gpio pull mode */
++ void (*config_gpio_pin)(int type, int pull);
++
++ /* external power control for card */
++ void (*ext_power_on)(void);
++ void (*ext_power_off)(void);
++
++ /* external sdio irq operations */
++ void (*request_sdio_eirq)(sdio_irq_handler_t sdio_irq_handler, void *data);
++ void (*enable_sdio_eirq)(void);
++ void (*disable_sdio_eirq)(void);
++
++ /* external cd irq operations */
++ void (*request_cd_eirq)(sdio_irq_handler_t cd_irq_handler, void *data);
++ void (*enable_cd_eirq)(void);
++ void (*disable_cd_eirq)(void);
++ int (*get_cd_status)(void);
++
++ /* power management callback for external module */
++ void (*register_pm)(pm_callback_t pm_cb, void *data);
++};
++
++extern struct msdc_hw msdc0_hw;
++extern struct msdc_hw msdc1_hw;
++extern struct msdc_hw msdc2_hw;
++extern struct msdc_hw msdc3_hw;
++
++/*GPS driver*/
++#define GPS_FLAG_FORCE_OFF 0x0001
++struct mt3326_gps_hardware {
++ int (*ext_power_on)(int);
++ int (*ext_power_off)(int);
++};
++extern struct mt3326_gps_hardware mt3326_gps_hw;
++
++/* NAND driver */
++struct mt6575_nand_host_hw {
++ unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */
++ unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */
++ unsigned int nfi_cs_num; /* NFI_CS_NUM */
++ unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */
++ unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */
++ unsigned int nand_ecc_size;
++ unsigned int nand_ecc_bytes;
++ unsigned int nand_ecc_mode;
++};
++extern struct mt6575_nand_host_hw mt6575_nand_hw;
++
++#endif /* __ARCH_ARM_MACH_BOARD_H */
++
+--- /dev/null
++++ b/drivers/mmc/host/mtk-mmc/dbg.c
+@@ -0,0 +1,347 @@
++/* Copyright Statement:
++ *
++ * This software/firmware and related documentation ("MediaTek Software") are
++ * protected under relevant copyright laws. The information contained herein
++ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
++ * Without the prior written permission of MediaTek inc. and/or its licensors,
++ * any reproduction, modification, use or disclosure of MediaTek Software,
++ * and information contained herein, in whole or in part, shall be strictly prohibited.
++ *
++ * MediaTek Inc. (C) 2010. All rights reserved.
++ *
++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
++ *
++ * The following software/firmware and/or related documentation ("MediaTek Software")
++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's
++ * applicable license agreements with MediaTek Inc.
++ */
++
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/kthread.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/string.h>
++#include <linux/uaccess.h>
++// #include <mach/mt6575_gpt.h> /* --- by chhung */
++#include "dbg.h"
++#include "mt6575_sd.h"
++#include <linux/seq_file.h>
++
++static char cmd_buf[256];
++
++/* for debug zone */
++unsigned int sd_debug_zone[4]={
++ 0,
++ 0,
++ 0,
++ 0
++};
++
++/* mode select */
++u32 dma_size[4]={
++ 512,
++ 512,
++ 512,
++ 512
++};
++msdc_mode drv_mode[4]={
++ MODE_SIZE_DEP, /* using DMA or not depend on the size */
++ MODE_SIZE_DEP,
++ MODE_SIZE_DEP,
++ MODE_SIZE_DEP
++};
++
++#if defined (MT6575_SD_DEBUG)
++/* for driver profile */
++#define TICKS_ONE_MS (13000)
++u32 gpt_enable = 0;
++u32 sdio_pro_enable = 0; /* make sure gpt is enabled */
++u32 sdio_pro_time = 0; /* no more than 30s */
++struct sdio_profile sdio_perfomance = {0};
++
++#if 0 /* --- chhung */
++void msdc_init_gpt(void)
++{
++ GPT_CONFIG config;
++
++ config.num = GPT6;
++ config.mode = GPT_FREE_RUN;
++ config.clkSrc = GPT_CLK_SRC_SYS;
++ config.clkDiv = GPT_CLK_DIV_1; /* 13MHz GPT6 */
++
++ if (GPT_Config(config) == FALSE )
++ return;
++
++ GPT_Start(GPT6);
++}
++#endif /* end of --- */
++
++u32 msdc_time_calc(u32 old_L32, u32 old_H32, u32 new_L32, u32 new_H32)
++{
++ u32 ret = 0;
++
++ if (new_H32 == old_H32) {
++ ret = new_L32 - old_L32;
++ } else if(new_H32 == (old_H32 + 1)) {
++ if (new_L32 > old_L32) {
++ printk("msdc old_L<0x%x> new_L<0x%x>\n", old_L32, new_L32);
++ }
++ ret = (0xffffffff - old_L32);
++ ret += new_L32;
++ } else {
++ printk("msdc old_H<0x%x> new_H<0x%x>\n", old_H32, new_H32);
++ }
++
++ return ret;
++}
++
++void msdc_sdio_profile(struct sdio_profile* result)
++{
++ struct cmd_profile* cmd;
++ u32 i;
++
++ printk("sdio === performance dump ===\n");
++ printk("sdio === total execute tick<%d> time<%dms> Tx<%dB> Rx<%dB>\n",
++ result->total_tc, result->total_tc / TICKS_ONE_MS,
++ result->total_tx_bytes, result->total_rx_bytes);
++
++ /* CMD52 Dump */
++ cmd = &result->cmd52_rx;
++ printk("sdio === CMD52 Rx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc,
++ cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count);
++ cmd = &result->cmd52_tx;
++ printk("sdio === CMD52 Tx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc,
++ cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count);
++
++ /* CMD53 Rx bytes + block mode */
++ for (i=0; i<512; i++) {
++ cmd = &result->cmd53_rx_byte[i];
++ if (cmd->count) {
++ printk("sdio<%6d><%3dB>_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
++ cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count,
++ cmd->tot_bytes, (cmd->tot_bytes/10)*13 / (cmd->tot_tc/10));
++ }
++ }
++ for (i=0; i<100; i++) {
++ cmd = &result->cmd53_rx_blk[i];
++ if (cmd->count) {
++ printk("sdio<%6d><%3d>B_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
++ cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count,
++ cmd->tot_bytes, (cmd->tot_bytes/10)*13 / (cmd->tot_tc/10));
++ }
++ }
++
++ /* CMD53 Tx bytes + block mode */
++ for (i=0; i<512; i++) {
++ cmd = &result->cmd53_tx_byte[i];
++ if (cmd->count) {
++ printk("sdio<%6d><%3dB>_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
++ cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count,
++ cmd->tot_bytes, (cmd->tot_bytes/10)*13 / (cmd->tot_tc/10));
++ }
++ }
++ for (i=0; i<100; i++) {
++ cmd = &result->cmd53_tx_blk[i];
++ if (cmd->count) {
++ printk("sdio<%6d><%3d>B_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc,
++ cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count,
++ cmd->tot_bytes, (cmd->tot_bytes/10)*13 / (cmd->tot_tc/10));
++ }
++ }
++
++ printk("sdio === performance dump done ===\n");
++}
++
++//========= sdio command table ===========
++void msdc_performance(u32 opcode, u32 sizes, u32 bRx, u32 ticks)
++{
++ struct sdio_profile* result = &sdio_perfomance;
++ struct cmd_profile* cmd;
++ u32 block;
++
++ if (sdio_pro_enable == 0) {
++ return;
++ }
++
++ if (opcode == 52) {
++ cmd = bRx ? &result->cmd52_rx : &result->cmd52_tx;
++ } else if (opcode == 53) {
++ if (sizes < 512) {
++ cmd = bRx ? &result->cmd53_rx_byte[sizes] : &result->cmd53_tx_byte[sizes];
++ } else {
++ block = sizes / 512;
++ if (block >= 99) {
++ printk("cmd53 error blocks\n");
++ while(1);
++ }
++ cmd = bRx ? &result->cmd53_rx_blk[block] : &result->cmd53_tx_blk[block];
++ }
++ } else {
++ return;
++ }
++
++ /* update the members */
++ if (ticks > cmd->max_tc){
++ cmd->max_tc = ticks;
++ }
++ if (cmd->min_tc == 0 || ticks < cmd->min_tc) {
++ cmd->min_tc = ticks;
++ }
++ cmd->tot_tc += ticks;
++ cmd->tot_bytes += sizes;
++ cmd->count ++;
++
++ if (bRx) {
++ result->total_rx_bytes += sizes;
++ } else {
++ result->total_tx_bytes += sizes;
++ }
++ result->total_tc += ticks;
++
++ /* dump when total_tc > 30s */
++ if (result->total_tc >= sdio_pro_time * TICKS_ONE_MS * 1000) {
++ msdc_sdio_profile(result);
++ memset(result, 0 , sizeof(struct sdio_profile));
++ }
++}
++
++//========== driver proc interface ===========
++static int msdc_debug_proc_read(struct seq_file *s, void *p)
++{
++ seq_printf(s, "\n=========================================\n");
++ seq_printf(s, "Index<0> + Id + Zone\n");
++ seq_printf(s, "-> PWR<9> WRN<8> | FIO<7> OPS<6> FUN<5> CFG<4> | INT<3> RSP<2> CMD<1> DMA<0>\n");
++ seq_printf(s, "-> echo 0 3 0x3ff >msdc_bebug -> host[3] debug zone set to 0x3ff\n");
++ seq_printf(s, "-> MSDC[0] Zone: 0x%.8x\n", sd_debug_zone[0]);
++ seq_printf(s, "-> MSDC[1] Zone: 0x%.8x\n", sd_debug_zone[1]);
++ seq_printf(s, "-> MSDC[2] Zone: 0x%.8x\n", sd_debug_zone[2]);
++ seq_printf(s, "-> MSDC[3] Zone: 0x%.8x\n", sd_debug_zone[3]);
++
++ seq_printf(s, "Index<1> + ID:4|Mode:4 + DMA_SIZE\n");
++ seq_printf(s, "-> 0)PIO 1)DMA 2)SIZE\n");
++ seq_printf(s, "-> echo 1 22 0x200 >msdc_bebug -> host[2] size mode, dma when >= 512\n");
++ seq_printf(s, "-> MSDC[0] mode<%d> size<%d>\n", drv_mode[0], dma_size[0]);
++ seq_printf(s, "-> MSDC[1] mode<%d> size<%d>\n", drv_mode[1], dma_size[1]);
++ seq_printf(s, "-> MSDC[2] mode<%d> size<%d>\n", drv_mode[2], dma_size[2]);
++ seq_printf(s, "-> MSDC[3] mode<%d> size<%d>\n", drv_mode[3], dma_size[3]);
++
++ seq_printf(s, "Index<3> + SDIO_PROFILE + TIME\n");
++ seq_printf(s, "-> echo 3 1 0x1E >msdc_bebug -> enable sdio_profile, 30s\n");
++ seq_printf(s, "-> SDIO_PROFILE<%d> TIME<%ds>\n", sdio_pro_enable, sdio_pro_time);
++ seq_printf(s, "=========================================\n\n");
++
++ return 0;
++}
++
++static ssize_t msdc_debug_proc_write(struct file *file,
++ const char __user *buf, size_t count, loff_t *data)
++{
++ int ret;
++
++ int cmd, p1, p2;
++ int id, zone;
++ int mode, size;
++
++ if (count == 0)return -1;
++ if(count > 255)count = 255;
++
++ ret = copy_from_user(cmd_buf, buf, count);
++ if (ret < 0)return -1;
++
++ cmd_buf[count] = '\0';
++ printk("msdc Write %s\n", cmd_buf);
++
++ sscanf(cmd_buf, "%x %x %x", &cmd, &p1, &p2);
++
++ if(cmd == SD_TOOL_ZONE) {
++ id = p1; zone = p2; zone &= 0x3ff;
++ printk("msdc host_id<%d> zone<0x%.8x>\n", id, zone);
++ if(id >=0 && id<=3){
++ sd_debug_zone[id] = zone;
++ }
++ else if(id == 4){
++ sd_debug_zone[0] = sd_debug_zone[1] = zone;
++ sd_debug_zone[2] = sd_debug_zone[3] = zone;
++ }
++ else{
++ printk("msdc host_id error when set debug zone\n");
++ }
++ } else if (cmd == SD_TOOL_DMA_SIZE) {
++ id = p1>>4; mode = (p1&0xf); size = p2;
++ if(id >=0 && id<=3){
++ drv_mode[id] = mode;
++ dma_size[id] = p2;
++ }
++ else if(id == 4){
++ drv_mode[0] = drv_mode[1] = mode;
++ drv_mode[2] = drv_mode[3] = mode;
++ dma_size[0] = dma_size[1] = p2;
++ dma_size[2] = dma_size[3] = p2;
++ }
++ else{
++ printk("msdc host_id error when select mode\n");
++ }
++ } else if (cmd == SD_TOOL_SDIO_PROFILE) {
++ if (p1 == 1) { /* enable profile */
++ if (gpt_enable == 0) {
++ // msdc_init_gpt(); /* --- by chhung */
++ gpt_enable = 1;
++ }
++ sdio_pro_enable = 1;
++ if (p2 == 0) p2 = 1; if (p2 >= 30) p2 = 30;
++ sdio_pro_time = p2 ;
++ } else if (p1 == 0) {
++ /* todo */
++ sdio_pro_enable = 0;
++ }
++ }
++
++ return count;
++}
++
++static int msdc_debug_show(struct inode *inode, struct file *file)
++{
++ return single_open(file, msdc_debug_proc_read, NULL);
++}
++
++static const struct file_operations msdc_debug_fops = {
++ .owner = THIS_MODULE,
++ .open = msdc_debug_show,
++ .read = seq_read,
++ .write = msdc_debug_proc_write,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++int msdc_debug_proc_init(void)
++{
++ struct proc_dir_entry *de = proc_create("msdc_debug", 0667, NULL, &msdc_debug_fops);
++
++ if (!de || IS_ERR(de))
++ printk("!! Create MSDC debug PROC fail !!\n");
++
++ return 0 ;
++}
++EXPORT_SYMBOL_GPL(msdc_debug_proc_init);
++#endif
+--- /dev/null
++++ b/drivers/mmc/host/mtk-mmc/dbg.h
+@@ -0,0 +1,156 @@
++/* Copyright Statement:
++ *
++ * This software/firmware and related documentation ("MediaTek Software") are
++ * protected under relevant copyright laws. The information contained herein
++ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
++ * Without the prior written permission of MediaTek inc. and/or its licensors,
++ * any reproduction, modification, use or disclosure of MediaTek Software,
++ * and information contained herein, in whole or in part, shall be strictly prohibited.
++ *
++ * MediaTek Inc. (C) 2010. All rights reserved.
++ *
++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
++ *
++ * The following software/firmware and/or related documentation ("MediaTek Software")
++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's
++ * applicable license agreements with MediaTek Inc.
++ */
++#ifndef __MT_MSDC_DEUBG__
++#define __MT_MSDC_DEUBG__
++
++//==========================
++extern u32 sdio_pro_enable;
++/* for a type command, e.g. CMD53, 2 blocks */
++struct cmd_profile {
++ u32 max_tc; /* Max tick count */
++ u32 min_tc;
++ u32 tot_tc; /* total tick count */
++ u32 tot_bytes;
++ u32 count; /* the counts of the command */
++};
++
++/* dump when total_tc and total_bytes */
++struct sdio_profile {
++ u32 total_tc; /* total tick count of CMD52 and CMD53 */
++ u32 total_tx_bytes; /* total bytes of CMD53 Tx */
++ u32 total_rx_bytes; /* total bytes of CMD53 Rx */
++
++ /*CMD52*/
++ struct cmd_profile cmd52_tx;
++ struct cmd_profile cmd52_rx;
++
++ /*CMD53 in byte unit */
++ struct cmd_profile cmd53_tx_byte[512];
++ struct cmd_profile cmd53_rx_byte[512];
++
++ /*CMD53 in block unit */
++ struct cmd_profile cmd53_tx_blk[100];
++ struct cmd_profile cmd53_rx_blk[100];
++};
++
++//==========================
++typedef enum {
++ SD_TOOL_ZONE = 0,
++ SD_TOOL_DMA_SIZE = 1,
++ SD_TOOL_PM_ENABLE = 2,
++ SD_TOOL_SDIO_PROFILE = 3,
++} msdc_dbg;
++
++typedef enum {
++ MODE_PIO = 0,
++ MODE_DMA = 1,
++ MODE_SIZE_DEP = 2,
++} msdc_mode;
++extern msdc_mode drv_mode[4];
++extern u32 dma_size[4];
++
++/* Debug message event */
++#define DBG_EVT_NONE (0) /* No event */
++#define DBG_EVT_DMA (1 << 0) /* DMA related event */
++#define DBG_EVT_CMD (1 << 1) /* MSDC CMD related event */
++#define DBG_EVT_RSP (1 << 2) /* MSDC CMD RSP related event */
++#define DBG_EVT_INT (1 << 3) /* MSDC INT event */
++#define DBG_EVT_CFG (1 << 4) /* MSDC CFG event */
++#define DBG_EVT_FUC (1 << 5) /* Function event */
++#define DBG_EVT_OPS (1 << 6) /* Read/Write operation event */
++#define DBG_EVT_FIO (1 << 7) /* FIFO operation event */
++#define DBG_EVT_WRN (1 << 8) /* Warning event */
++#define DBG_EVT_PWR (1 << 9) /* Power event */
++#define DBG_EVT_ALL (0xffffffff)
++
++#define DBG_EVT_MASK (DBG_EVT_ALL)
++
++extern unsigned int sd_debug_zone[4];
++#define TAG "msdc"
++#if 0 /* +++ chhung */
++#define BUG_ON(x) \
++do { \
++ if (x) { \
++ printk("[BUG] %s LINE:%d FILE:%s\n", #x, __LINE__, __FILE__); \
++ while(1); \
++ } \
++}while(0)
++#endif /* end of +++ */
++
++#define N_MSG(evt, fmt, args...)
++/*
++do { \
++ if ((DBG_EVT_##evt) & sd_debug_zone[host->id]) { \
++ printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d> PID<%s><0x%x>\n", \
++ host->id, ##args , __FUNCTION__, __LINE__, current->comm, current->pid); \
++ } \
++} while(0)
++*/
++
++#define ERR_MSG(fmt, args...) \
++do { \
++ printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d> PID<%s><0x%x>\n", \
++ host->id, ##args , __FUNCTION__, __LINE__, current->comm, current->pid); \
++} while(0);
++
++#if 1
++//defined CONFIG_MTK_MMC_CD_POLL
++#define INIT_MSG(fmt, args...)
++#define IRQ_MSG(fmt, args...)
++#else
++#define INIT_MSG(fmt, args...) \
++do { \
++ printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d> PID<%s><0x%x>\n", \
++ host->id, ##args , __FUNCTION__, __LINE__, current->comm, current->pid); \
++} while(0);
++
++/* PID in ISR in not corrent */
++#define IRQ_MSG(fmt, args...) \
++do { \
++ printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d>\n", \
++ host->id, ##args , __FUNCTION__, __LINE__); \
++} while(0);
++#endif
++
++int msdc_debug_proc_init(void);
++
++#if 0 /* --- chhung */
++void msdc_init_gpt(void);
++extern void GPT_GetCounter64(UINT32 *cntL32, UINT32 *cntH32);
++#endif /* end of --- */
++u32 msdc_time_calc(u32 old_L32, u32 old_H32, u32 new_L32, u32 new_H32);
++void msdc_performance(u32 opcode, u32 sizes, u32 bRx, u32 ticks);
++
++#endif
+--- /dev/null
++++ b/drivers/mmc/host/mtk-mmc/mt6575_sd.h
+@@ -0,0 +1,1002 @@
++/* Copyright Statement:
++ *
++ * This software/firmware and related documentation ("MediaTek Software") are
++ * protected under relevant copyright laws. The information contained herein
++ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
++ * Without the prior written permission of MediaTek inc. and/or its licensors,
++ * any reproduction, modification, use or disclosure of MediaTek Software,
++ * and information contained herein, in whole or in part, shall be strictly prohibited.
++ */
++/* MediaTek Inc. (C) 2010. All rights reserved.
++ *
++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
++ *
++ * The following software/firmware and/or related documentation ("MediaTek Software")
++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's
++ * applicable license agreements with MediaTek Inc.
++ */
++
++#ifndef MT6575_SD_H
++#define MT6575_SD_H
++
++#include <linux/bitops.h>
++#include <linux/interrupt.h>
++#include <linux/mmc/host.h>
++
++// #include <mach/mt6575_reg_base.h> /* --- by chhung */
++
++/*--------------------------------------------------------------------------*/
++/* Common Macro */
++/*--------------------------------------------------------------------------*/
++#define REG_ADDR(x) ((volatile u32*)(base + OFFSET_##x))
++
++/*--------------------------------------------------------------------------*/
++/* Common Definition */
++/*--------------------------------------------------------------------------*/
++#define MSDC_FIFO_SZ (128)
++#define MSDC_FIFO_THD (64) // (128)
++#define MSDC_NUM (4)
++
++#define MSDC_MS (0)
++#define MSDC_SDMMC (1)
++
++#define MSDC_MODE_UNKNOWN (0)
++#define MSDC_MODE_PIO (1)
++#define MSDC_MODE_DMA_BASIC (2)
++#define MSDC_MODE_DMA_DESC (3)
++#define MSDC_MODE_DMA_ENHANCED (4)
++#define MSDC_MODE_MMC_STREAM (5)
++
++#define MSDC_BUS_1BITS (0)
++#define MSDC_BUS_4BITS (1)
++#define MSDC_BUS_8BITS (2)
++
++#define MSDC_BRUST_8B (3)
++#define MSDC_BRUST_16B (4)
++#define MSDC_BRUST_32B (5)
++#define MSDC_BRUST_64B (6)
++
++#define MSDC_PIN_PULL_NONE (0)
++#define MSDC_PIN_PULL_DOWN (1)
++#define MSDC_PIN_PULL_UP (2)
++#define MSDC_PIN_KEEP (3)
++
++#define MSDC_MAX_SCLK (48000000) /* +/- by chhung */
++#define MSDC_MIN_SCLK (260000)
++
++#define MSDC_AUTOCMD12 (0x0001)
++#define MSDC_AUTOCMD23 (0x0002)
++#define MSDC_AUTOCMD19 (0x0003)
++
++#define MSDC_EMMC_BOOTMODE0 (0) /* Pull low CMD mode */
++#define MSDC_EMMC_BOOTMODE1 (1) /* Reset CMD mode */
++
++enum {
++ RESP_NONE = 0,
++ RESP_R1,
++ RESP_R2,
++ RESP_R3,
++ RESP_R4,
++ RESP_R5,
++ RESP_R6,
++ RESP_R7,
++ RESP_R1B
++};
++
++/*--------------------------------------------------------------------------*/
++/* Register Offset */
++/*--------------------------------------------------------------------------*/
++#define OFFSET_MSDC_CFG (0x0)
++#define OFFSET_MSDC_IOCON (0x04)
++#define OFFSET_MSDC_PS (0x08)
++#define OFFSET_MSDC_INT (0x0c)
++#define OFFSET_MSDC_INTEN (0x10)
++#define OFFSET_MSDC_FIFOCS (0x14)
++#define OFFSET_MSDC_TXDATA (0x18)
++#define OFFSET_MSDC_RXDATA (0x1c)
++#define OFFSET_SDC_CFG (0x30)
++#define OFFSET_SDC_CMD (0x34)
++#define OFFSET_SDC_ARG (0x38)
++#define OFFSET_SDC_STS (0x3c)
++#define OFFSET_SDC_RESP0 (0x40)
++#define OFFSET_SDC_RESP1 (0x44)
++#define OFFSET_SDC_RESP2 (0x48)
++#define OFFSET_SDC_RESP3 (0x4c)
++#define OFFSET_SDC_BLK_NUM (0x50)
++#define OFFSET_SDC_CSTS (0x58)
++#define OFFSET_SDC_CSTS_EN (0x5c)
++#define OFFSET_SDC_DCRC_STS (0x60)
++#define OFFSET_EMMC_CFG0 (0x70)
++#define OFFSET_EMMC_CFG1 (0x74)
++#define OFFSET_EMMC_STS (0x78)
++#define OFFSET_EMMC_IOCON (0x7c)
++#define OFFSET_SDC_ACMD_RESP (0x80)
++#define OFFSET_SDC_ACMD19_TRG (0x84)
++#define OFFSET_SDC_ACMD19_STS (0x88)
++#define OFFSET_MSDC_DMA_SA (0x90)
++#define OFFSET_MSDC_DMA_CA (0x94)
++#define OFFSET_MSDC_DMA_CTRL (0x98)
++#define OFFSET_MSDC_DMA_CFG (0x9c)
++#define OFFSET_MSDC_DBG_SEL (0xa0)
++#define OFFSET_MSDC_DBG_OUT (0xa4)
++#define OFFSET_MSDC_PATCH_BIT (0xb0)
++#define OFFSET_MSDC_PATCH_BIT1 (0xb4)
++#define OFFSET_MSDC_PAD_CTL0 (0xe0)
++#define OFFSET_MSDC_PAD_CTL1 (0xe4)
++#define OFFSET_MSDC_PAD_CTL2 (0xe8)
++#define OFFSET_MSDC_PAD_TUNE (0xec)
++#define OFFSET_MSDC_DAT_RDDLY0 (0xf0)
++#define OFFSET_MSDC_DAT_RDDLY1 (0xf4)
++#define OFFSET_MSDC_HW_DBG (0xf8)
++#define OFFSET_MSDC_VERSION (0x100)
++#define OFFSET_MSDC_ECO_VER (0x104)
++
++/*--------------------------------------------------------------------------*/
++/* Register Address */
++/*--------------------------------------------------------------------------*/
++
++/* common register */
++#define MSDC_CFG REG_ADDR(MSDC_CFG)
++#define MSDC_IOCON REG_ADDR(MSDC_IOCON)
++#define MSDC_PS REG_ADDR(MSDC_PS)
++#define MSDC_INT REG_ADDR(MSDC_INT)
++#define MSDC_INTEN REG_ADDR(MSDC_INTEN)
++#define MSDC_FIFOCS REG_ADDR(MSDC_FIFOCS)
++#define MSDC_TXDATA REG_ADDR(MSDC_TXDATA)
++#define MSDC_RXDATA REG_ADDR(MSDC_RXDATA)
++#define MSDC_PATCH_BIT0 REG_ADDR(MSDC_PATCH_BIT)
++
++/* sdmmc register */
++#define SDC_CFG REG_ADDR(SDC_CFG)
++#define SDC_CMD REG_ADDR(SDC_CMD)
++#define SDC_ARG REG_ADDR(SDC_ARG)
++#define SDC_STS REG_ADDR(SDC_STS)
++#define SDC_RESP0 REG_ADDR(SDC_RESP0)
++#define SDC_RESP1 REG_ADDR(SDC_RESP1)
++#define SDC_RESP2 REG_ADDR(SDC_RESP2)
++#define SDC_RESP3 REG_ADDR(SDC_RESP3)
++#define SDC_BLK_NUM REG_ADDR(SDC_BLK_NUM)
++#define SDC_CSTS REG_ADDR(SDC_CSTS)
++#define SDC_CSTS_EN REG_ADDR(SDC_CSTS_EN)
++#define SDC_DCRC_STS REG_ADDR(SDC_DCRC_STS)
++
++/* emmc register*/
++#define EMMC_CFG0 REG_ADDR(EMMC_CFG0)
++#define EMMC_CFG1 REG_ADDR(EMMC_CFG1)
++#define EMMC_STS REG_ADDR(EMMC_STS)
++#define EMMC_IOCON REG_ADDR(EMMC_IOCON)
++
++/* auto command register */
++#define SDC_ACMD_RESP REG_ADDR(SDC_ACMD_RESP)
++#define SDC_ACMD19_TRG REG_ADDR(SDC_ACMD19_TRG)
++#define SDC_ACMD19_STS REG_ADDR(SDC_ACMD19_STS)
++
++/* dma register */
++#define MSDC_DMA_SA REG_ADDR(MSDC_DMA_SA)
++#define MSDC_DMA_CA REG_ADDR(MSDC_DMA_CA)
++#define MSDC_DMA_CTRL REG_ADDR(MSDC_DMA_CTRL)
++#define MSDC_DMA_CFG REG_ADDR(MSDC_DMA_CFG)
++
++/* pad ctrl register */
++#define MSDC_PAD_CTL0 REG_ADDR(MSDC_PAD_CTL0)
++#define MSDC_PAD_CTL1 REG_ADDR(MSDC_PAD_CTL1)
++#define MSDC_PAD_CTL2 REG_ADDR(MSDC_PAD_CTL2)
++
++/* data read delay */
++#define MSDC_DAT_RDDLY0 REG_ADDR(MSDC_DAT_RDDLY0)
++#define MSDC_DAT_RDDLY1 REG_ADDR(MSDC_DAT_RDDLY1)
++
++/* debug register */
++#define MSDC_DBG_SEL REG_ADDR(MSDC_DBG_SEL)
++#define MSDC_DBG_OUT REG_ADDR(MSDC_DBG_OUT)
++
++/* misc register */
++#define MSDC_PATCH_BIT REG_ADDR(MSDC_PATCH_BIT)
++#define MSDC_PATCH_BIT1 REG_ADDR(MSDC_PATCH_BIT1)
++#define MSDC_PAD_TUNE REG_ADDR(MSDC_PAD_TUNE)
++#define MSDC_HW_DBG REG_ADDR(MSDC_HW_DBG)
++#define MSDC_VERSION REG_ADDR(MSDC_VERSION)
++#define MSDC_ECO_VER REG_ADDR(MSDC_ECO_VER) /* ECO Version */
++
++/*--------------------------------------------------------------------------*/
++/* Register Mask */
++/*--------------------------------------------------------------------------*/
++
++/* MSDC_CFG mask */
++#define MSDC_CFG_MODE (0x1 << 0) /* RW */
++#define MSDC_CFG_CKPDN (0x1 << 1) /* RW */
++#define MSDC_CFG_RST (0x1 << 2) /* RW */
++#define MSDC_CFG_PIO (0x1 << 3) /* RW */
++#define MSDC_CFG_CKDRVEN (0x1 << 4) /* RW */
++#define MSDC_CFG_BV18SDT (0x1 << 5) /* RW */
++#define MSDC_CFG_BV18PSS (0x1 << 6) /* R */
++#define MSDC_CFG_CKSTB (0x1 << 7) /* R */
++#define MSDC_CFG_CKDIV (0xff << 8) /* RW */
++#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
++
++/* MSDC_IOCON mask */
++#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
++#define MSDC_IOCON_RSPL (0x1 << 1) /* RW */
++#define MSDC_IOCON_DSPL (0x1 << 2) /* RW */
++#define MSDC_IOCON_DDLSEL (0x1 << 3) /* RW */
++#define MSDC_IOCON_DDR50CKD (0x1 << 4) /* RW */
++#define MSDC_IOCON_DSPLSEL (0x1 << 5) /* RW */
++#define MSDC_IOCON_D0SPL (0x1 << 16) /* RW */
++#define MSDC_IOCON_D1SPL (0x1 << 17) /* RW */
++#define MSDC_IOCON_D2SPL (0x1 << 18) /* RW */
++#define MSDC_IOCON_D3SPL (0x1 << 19) /* RW */
++#define MSDC_IOCON_D4SPL (0x1 << 20) /* RW */
++#define MSDC_IOCON_D5SPL (0x1 << 21) /* RW */
++#define MSDC_IOCON_D6SPL (0x1 << 22) /* RW */
++#define MSDC_IOCON_D7SPL (0x1 << 23) /* RW */
++#define MSDC_IOCON_RISCSZ (0x3 << 24) /* RW */
++
++/* MSDC_PS mask */
++#define MSDC_PS_CDEN (0x1 << 0) /* RW */
++#define MSDC_PS_CDSTS (0x1 << 1) /* R */
++#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */
++#define MSDC_PS_DAT (0xff << 16) /* R */
++#define MSDC_PS_CMD (0x1 << 24) /* R */
++#define MSDC_PS_WP (0x1UL<< 31) /* R */
++
++/* MSDC_INT mask */
++#define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */
++#define MSDC_INT_CDSC (0x1 << 1) /* W1C */
++#define MSDC_INT_ACMDRDY (0x1 << 3) /* W1C */
++#define MSDC_INT_ACMDTMO (0x1 << 4) /* W1C */
++#define MSDC_INT_ACMDCRCERR (0x1 << 5) /* W1C */
++#define MSDC_INT_DMAQ_EMPTY (0x1 << 6) /* W1C */
++#define MSDC_INT_SDIOIRQ (0x1 << 7) /* W1C */
++#define MSDC_INT_CMDRDY (0x1 << 8) /* W1C */
++#define MSDC_INT_CMDTMO (0x1 << 9) /* W1C */
++#define MSDC_INT_RSPCRCERR (0x1 << 10) /* W1C */
++#define MSDC_INT_CSTA (0x1 << 11) /* R */
++#define MSDC_INT_XFER_COMPL (0x1 << 12) /* W1C */
++#define MSDC_INT_DXFER_DONE (0x1 << 13) /* W1C */
++#define MSDC_INT_DATTMO (0x1 << 14) /* W1C */
++#define MSDC_INT_DATCRCERR (0x1 << 15) /* W1C */
++#define MSDC_INT_ACMD19_DONE (0x1 << 16) /* W1C */
++
++/* MSDC_INTEN mask */
++#define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */
++#define MSDC_INTEN_CDSC (0x1 << 1) /* RW */
++#define MSDC_INTEN_ACMDRDY (0x1 << 3) /* RW */
++#define MSDC_INTEN_ACMDTMO (0x1 << 4) /* RW */
++#define MSDC_INTEN_ACMDCRCERR (0x1 << 5) /* RW */
++#define MSDC_INTEN_DMAQ_EMPTY (0x1 << 6) /* RW */
++#define MSDC_INTEN_SDIOIRQ (0x1 << 7) /* RW */
++#define MSDC_INTEN_CMDRDY (0x1 << 8) /* RW */
++#define MSDC_INTEN_CMDTMO (0x1 << 9) /* RW */
++#define MSDC_INTEN_RSPCRCERR (0x1 << 10) /* RW */
++#define MSDC_INTEN_CSTA (0x1 << 11) /* RW */
++#define MSDC_INTEN_XFER_COMPL (0x1 << 12) /* RW */
++#define MSDC_INTEN_DXFER_DONE (0x1 << 13) /* RW */
++#define MSDC_INTEN_DATTMO (0x1 << 14) /* RW */
++#define MSDC_INTEN_DATCRCERR (0x1 << 15) /* RW */
++#define MSDC_INTEN_ACMD19_DONE (0x1 << 16) /* RW */
++
++/* MSDC_FIFOCS mask */
++#define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */
++#define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */
++#define MSDC_FIFOCS_CLR (0x1UL<< 31) /* RW */
++
++/* SDC_CFG mask */
++#define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */
++#define SDC_CFG_INSWKUP (0x1 << 1) /* RW */
++#define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */
++#define SDC_CFG_SDIO (0x1 << 19) /* RW */
++#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */
++#define SDC_CFG_INTATGAP (0x1 << 21) /* RW */
++#define SDC_CFG_DTOC (0xffUL << 24) /* RW */
++
++/* SDC_CMD mask */
++#define SDC_CMD_OPC (0x3f << 0) /* RW */
++#define SDC_CMD_BRK (0x1 << 6) /* RW */
++#define SDC_CMD_RSPTYP (0x7 << 7) /* RW */
++#define SDC_CMD_DTYP (0x3 << 11) /* RW */
++#define SDC_CMD_DTYP (0x3 << 11) /* RW */
++#define SDC_CMD_RW (0x1 << 13) /* RW */
++#define SDC_CMD_STOP (0x1 << 14) /* RW */
++#define SDC_CMD_GOIRQ (0x1 << 15) /* RW */
++#define SDC_CMD_BLKLEN (0xfff<< 16) /* RW */
++#define SDC_CMD_AUTOCMD (0x3 << 28) /* RW */
++#define SDC_CMD_VOLSWTH (0x1 << 30) /* RW */
++
++/* SDC_STS mask */
++#define SDC_STS_SDCBUSY (0x1 << 0) /* RW */
++#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */
++#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */
++
++/* SDC_DCRC_STS mask */
++#define SDC_DCRC_STS_NEG (0xf << 8) /* RO */
++#define SDC_DCRC_STS_POS (0xff << 0) /* RO */
++
++/* EMMC_CFG0 mask */
++#define EMMC_CFG0_BOOTSTART (0x1 << 0) /* W */
++#define EMMC_CFG0_BOOTSTOP (0x1 << 1) /* W */
++#define EMMC_CFG0_BOOTMODE (0x1 << 2) /* RW */
++#define EMMC_CFG0_BOOTACKDIS (0x1 << 3) /* RW */
++#define EMMC_CFG0_BOOTWDLY (0x7 << 12) /* RW */
++#define EMMC_CFG0_BOOTSUPP (0x1 << 15) /* RW */
++
++/* EMMC_CFG1 mask */
++#define EMMC_CFG1_BOOTDATTMC (0xfffff << 0) /* RW */
++#define EMMC_CFG1_BOOTACKTMC (0xfffUL << 20) /* RW */
++
++/* EMMC_STS mask */
++#define EMMC_STS_BOOTCRCERR (0x1 << 0) /* W1C */
++#define EMMC_STS_BOOTACKERR (0x1 << 1) /* W1C */
++#define EMMC_STS_BOOTDATTMO (0x1 << 2) /* W1C */
++#define EMMC_STS_BOOTACKTMO (0x1 << 3) /* W1C */
++#define EMMC_STS_BOOTUPSTATE (0x1 << 4) /* R */
++#define EMMC_STS_BOOTACKRCV (0x1 << 5) /* W1C */
++#define EMMC_STS_BOOTDATRCV (0x1 << 6) /* R */
++
++/* EMMC_IOCON mask */
++#define EMMC_IOCON_BOOTRST (0x1 << 0) /* RW */
++
++/* SDC_ACMD19_TRG mask */
++#define SDC_ACMD19_TRG_TUNESEL (0xf << 0) /* RW */
++
++/* MSDC_DMA_CTRL mask */
++#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */
++#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */
++#define MSDC_DMA_CTRL_RESUME (0x1 << 2) /* W */
++#define MSDC_DMA_CTRL_MODE (0x1 << 8) /* RW */
++#define MSDC_DMA_CTRL_LASTBUF (0x1 << 10) /* RW */
++#define MSDC_DMA_CTRL_BRUSTSZ (0x7 << 12) /* RW */
++#define MSDC_DMA_CTRL_XFERSZ (0xffffUL << 16)/* RW */
++
++/* MSDC_DMA_CFG mask */
++#define MSDC_DMA_CFG_STS (0x1 << 0) /* R */
++#define MSDC_DMA_CFG_DECSEN (0x1 << 1) /* RW */
++#define MSDC_DMA_CFG_BDCSERR (0x1 << 4) /* R */
++#define MSDC_DMA_CFG_GPDCSERR (0x1 << 5) /* R */
++
++/* MSDC_PATCH_BIT mask */
++#define MSDC_PATCH_BIT_WFLSMODE (0x1 << 0) /* RW */
++#define MSDC_PATCH_BIT_ODDSUPP (0x1 << 1) /* RW */
++#define MSDC_PATCH_BIT_CKGEN_CK (0x1 << 6) /* E2: Fixed to 1 */
++#define MSDC_PATCH_BIT_IODSSEL (0x1 << 16) /* RW */
++#define MSDC_PATCH_BIT_IOINTSEL (0x1 << 17) /* RW */
++#define MSDC_PATCH_BIT_BUSYDLY (0xf << 18) /* RW */
++#define MSDC_PATCH_BIT_WDOD (0xf << 22) /* RW */
++#define MSDC_PATCH_BIT_IDRTSEL (0x1 << 26) /* RW */
++#define MSDC_PATCH_BIT_CMDFSEL (0x1 << 27) /* RW */
++#define MSDC_PATCH_BIT_INTDLSEL (0x1 << 28) /* RW */
++#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
++#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
++
++/* MSDC_PATCH_BIT1 mask */
++#define MSDC_PATCH_BIT1_WRDAT_CRCS (0x7 << 3)
++#define MSDC_PATCH_BIT1_CMD_RSP (0x7 << 0)
++
++/* MSDC_PAD_CTL0 mask */
++#define MSDC_PAD_CTL0_CLKDRVN (0x7 << 0) /* RW */
++#define MSDC_PAD_CTL0_CLKDRVP (0x7 << 4) /* RW */
++#define MSDC_PAD_CTL0_CLKSR (0x1 << 8) /* RW */
++#define MSDC_PAD_CTL0_CLKPD (0x1 << 16) /* RW */
++#define MSDC_PAD_CTL0_CLKPU (0x1 << 17) /* RW */
++#define MSDC_PAD_CTL0_CLKSMT (0x1 << 18) /* RW */
++#define MSDC_PAD_CTL0_CLKIES (0x1 << 19) /* RW */
++#define MSDC_PAD_CTL0_CLKTDSEL (0xf << 20) /* RW */
++#define MSDC_PAD_CTL0_CLKRDSEL (0xffUL<< 24) /* RW */
++
++/* MSDC_PAD_CTL1 mask */
++#define MSDC_PAD_CTL1_CMDDRVN (0x7 << 0) /* RW */
++#define MSDC_PAD_CTL1_CMDDRVP (0x7 << 4) /* RW */
++#define MSDC_PAD_CTL1_CMDSR (0x1 << 8) /* RW */
++#define MSDC_PAD_CTL1_CMDPD (0x1 << 16) /* RW */
++#define MSDC_PAD_CTL1_CMDPU (0x1 << 17) /* RW */
++#define MSDC_PAD_CTL1_CMDSMT (0x1 << 18) /* RW */
++#define MSDC_PAD_CTL1_CMDIES (0x1 << 19) /* RW */
++#define MSDC_PAD_CTL1_CMDTDSEL (0xf << 20) /* RW */
++#define MSDC_PAD_CTL1_CMDRDSEL (0xffUL<< 24) /* RW */
++
++/* MSDC_PAD_CTL2 mask */
++#define MSDC_PAD_CTL2_DATDRVN (0x7 << 0) /* RW */
++#define MSDC_PAD_CTL2_DATDRVP (0x7 << 4) /* RW */
++#define MSDC_PAD_CTL2_DATSR (0x1 << 8) /* RW */
++#define MSDC_PAD_CTL2_DATPD (0x1 << 16) /* RW */
++#define MSDC_PAD_CTL2_DATPU (0x1 << 17) /* RW */
++#define MSDC_PAD_CTL2_DATIES (0x1 << 19) /* RW */
++#define MSDC_PAD_CTL2_DATSMT (0x1 << 18) /* RW */
++#define MSDC_PAD_CTL2_DATTDSEL (0xf << 20) /* RW */
++#define MSDC_PAD_CTL2_DATRDSEL (0xffUL<< 24) /* RW */
++
++/* MSDC_PAD_TUNE mask */
++#define MSDC_PAD_TUNE_DATWRDLY (0x1F << 0) /* RW */
++#define MSDC_PAD_TUNE_DATRRDLY (0x1F << 8) /* RW */
++#define MSDC_PAD_TUNE_CMDRDLY (0x1F << 16) /* RW */
++#define MSDC_PAD_TUNE_CMDRRDLY (0x1FUL << 22) /* RW */
++#define MSDC_PAD_TUNE_CLKTXDLY (0x1FUL << 27) /* RW */
++
++/* MSDC_DAT_RDDLY0/1 mask */
++#define MSDC_DAT_RDDLY0_D0 (0x1F << 0) /* RW */
++#define MSDC_DAT_RDDLY0_D1 (0x1F << 8) /* RW */
++#define MSDC_DAT_RDDLY0_D2 (0x1F << 16) /* RW */
++#define MSDC_DAT_RDDLY0_D3 (0x1F << 24) /* RW */
++
++#define MSDC_DAT_RDDLY1_D4 (0x1F << 0) /* RW */
++#define MSDC_DAT_RDDLY1_D5 (0x1F << 8) /* RW */
++#define MSDC_DAT_RDDLY1_D6 (0x1F << 16) /* RW */
++#define MSDC_DAT_RDDLY1_D7 (0x1F << 24) /* RW */
++
++#define MSDC_CKGEN_MSDC_DLY_SEL (0x1F<<10)
++#define MSDC_INT_DAT_LATCH_CK_SEL (0x7<<7)
++#define MSDC_CKGEN_MSDC_CK_SEL (0x1<<6)
++#define CARD_READY_FOR_DATA (1<<8)
++#define CARD_CURRENT_STATE(x) ((x&0x00001E00)>>9)
++
++/*--------------------------------------------------------------------------*/
++/* Descriptor Structure */
++/*--------------------------------------------------------------------------*/
++typedef struct {
++ u32 hwo:1; /* could be changed by hw */
++ u32 bdp:1;
++ u32 rsv0:6;
++ u32 chksum:8;
++ u32 intr:1;
++ u32 rsv1:15;
++ void *next;
++ void *ptr;
++ u32 buflen:16;
++ u32 extlen:8;
++ u32 rsv2:8;
++ u32 arg;
++ u32 blknum;
++ u32 cmd;
++} gpd_t;
++
++typedef struct {
++ u32 eol:1;
++ u32 rsv0:7;
++ u32 chksum:8;
++ u32 rsv1:1;
++ u32 blkpad:1;
++ u32 dwpad:1;
++ u32 rsv2:13;
++ void *next;
++ void *ptr;
++ u32 buflen:16;
++ u32 rsv3:16;
++} bd_t;
++
++/*--------------------------------------------------------------------------*/
++/* Register Debugging Structure */
++/*--------------------------------------------------------------------------*/
++
++typedef struct {
++ u32 msdc:1;
++ u32 ckpwn:1;
++ u32 rst:1;
++ u32 pio:1;
++ u32 ckdrven:1;
++ u32 start18v:1;
++ u32 pass18v:1;
++ u32 ckstb:1;
++ u32 ckdiv:8;
++ u32 ckmod:2;
++ u32 pad:14;
++} msdc_cfg_reg;
++typedef struct {
++ u32 sdr104cksel:1;
++ u32 rsmpl:1;
++ u32 dsmpl:1;
++ u32 ddlysel:1;
++ u32 ddr50ckd:1;
++ u32 dsplsel:1;
++ u32 pad1:10;
++ u32 d0spl:1;
++ u32 d1spl:1;
++ u32 d2spl:1;
++ u32 d3spl:1;
++ u32 d4spl:1;
++ u32 d5spl:1;
++ u32 d6spl:1;
++ u32 d7spl:1;
++ u32 riscsz:1;
++ u32 pad2:7;
++} msdc_iocon_reg;
++typedef struct {
++ u32 cden:1;
++ u32 cdsts:1;
++ u32 pad1:10;
++ u32 cddebounce:4;
++ u32 dat:8;
++ u32 cmd:1;
++ u32 pad2:6;
++ u32 wp:1;
++} msdc_ps_reg;
++typedef struct {
++ u32 mmcirq:1;
++ u32 cdsc:1;
++ u32 pad1:1;
++ u32 atocmdrdy:1;
++ u32 atocmdtmo:1;
++ u32 atocmdcrc:1;
++ u32 dmaqempty:1;
++ u32 sdioirq:1;
++ u32 cmdrdy:1;
++ u32 cmdtmo:1;
++ u32 rspcrc:1;
++ u32 csta:1;
++ u32 xfercomp:1;
++ u32 dxferdone:1;
++ u32 dattmo:1;
++ u32 datcrc:1;
++ u32 atocmd19done:1;
++ u32 pad2:15;
++} msdc_int_reg;
++typedef struct {
++ u32 mmcirq:1;
++ u32 cdsc:1;
++ u32 pad1:1;
++ u32 atocmdrdy:1;
++ u32 atocmdtmo:1;
++ u32 atocmdcrc:1;
++ u32 dmaqempty:1;
++ u32 sdioirq:1;
++ u32 cmdrdy:1;
++ u32 cmdtmo:1;
++ u32 rspcrc:1;
++ u32 csta:1;
++ u32 xfercomp:1;
++ u32 dxferdone:1;
++ u32 dattmo:1;
++ u32 datcrc:1;
++ u32 atocmd19done:1;
++ u32 pad2:15;
++} msdc_inten_reg;
++typedef struct {
++ u32 rxcnt:8;
++ u32 pad1:8;
++ u32 txcnt:8;
++ u32 pad2:7;
++ u32 clr:1;
++} msdc_fifocs_reg;
++typedef struct {
++ u32 val;
++} msdc_txdat_reg;
++typedef struct {
++ u32 val;
++} msdc_rxdat_reg;
++typedef struct {
++ u32 sdiowkup:1;
++ u32 inswkup:1;
++ u32 pad1:14;
++ u32 buswidth:2;
++ u32 pad2:1;
++ u32 sdio:1;
++ u32 sdioide:1;
++ u32 intblkgap:1;
++ u32 pad4:2;
++ u32 dtoc:8;
++} sdc_cfg_reg;
++typedef struct {
++ u32 cmd:6;
++ u32 brk:1;
++ u32 rsptyp:3;
++ u32 pad1:1;
++ u32 dtype:2;
++ u32 rw:1;
++ u32 stop:1;
++ u32 goirq:1;
++ u32 blklen:12;
++ u32 atocmd:2;
++ u32 volswth:1;
++ u32 pad2:1;
++} sdc_cmd_reg;
++typedef struct {
++ u32 arg;
++} sdc_arg_reg;
++typedef struct {
++ u32 sdcbusy:1;
++ u32 cmdbusy:1;
++ u32 pad:29;
++ u32 swrcmpl:1;
++} sdc_sts_reg;
++typedef struct {
++ u32 val;
++} sdc_resp0_reg;
++typedef struct {
++ u32 val;
++} sdc_resp1_reg;
++typedef struct {
++ u32 val;
++} sdc_resp2_reg;
++typedef struct {
++ u32 val;
++} sdc_resp3_reg;
++typedef struct {
++ u32 num;
++} sdc_blknum_reg;
++typedef struct {
++ u32 sts;
++} sdc_csts_reg;
++typedef struct {
++ u32 sts;
++} sdc_cstsen_reg;
++typedef struct {
++ u32 datcrcsts:8;
++ u32 ddrcrcsts:4;
++ u32 pad:20;
++} sdc_datcrcsts_reg;
++typedef struct {
++ u32 bootstart:1;
++ u32 bootstop:1;
++ u32 bootmode:1;
++ u32 pad1:9;
++ u32 bootwaidly:3;
++ u32 bootsupp:1;
++ u32 pad2:16;
++} emmc_cfg0_reg;
++typedef struct {
++ u32 bootcrctmc:16;
++ u32 pad:4;
++ u32 bootacktmc:12;
++} emmc_cfg1_reg;
++typedef struct {
++ u32 bootcrcerr:1;
++ u32 bootackerr:1;
++ u32 bootdattmo:1;
++ u32 bootacktmo:1;
++ u32 bootupstate:1;
++ u32 bootackrcv:1;
++ u32 bootdatrcv:1;
++ u32 pad:25;
++} emmc_sts_reg;
++typedef struct {
++ u32 bootrst:1;
++ u32 pad:31;
++} emmc_iocon_reg;
++typedef struct {
++ u32 val;
++} msdc_acmd_resp_reg;
++typedef struct {
++ u32 tunesel:4;
++ u32 pad:28;
++} msdc_acmd19_trg_reg;
++typedef struct {
++ u32 val;
++} msdc_acmd19_sts_reg;
++typedef struct {
++ u32 addr;
++} msdc_dma_sa_reg;
++typedef struct {
++ u32 addr;
++} msdc_dma_ca_reg;
++typedef struct {
++ u32 start:1;
++ u32 stop:1;
++ u32 resume:1;
++ u32 pad1:5;
++ u32 mode:1;
++ u32 pad2:1;
++ u32 lastbuf:1;
++ u32 pad3:1;
++ u32 brustsz:3;
++ u32 pad4:1;
++ u32 xfersz:16;
++} msdc_dma_ctrl_reg;
++typedef struct {
++ u32 status:1;
++ u32 decsen:1;
++ u32 pad1:2;
++ u32 bdcsen:1;
++ u32 gpdcsen:1;
++ u32 pad2:26;
++} msdc_dma_cfg_reg;
++typedef struct {
++ u32 sel:16;
++ u32 pad2:16;
++} msdc_dbg_sel_reg;
++typedef struct {
++ u32 val;
++} msdc_dbg_out_reg;
++typedef struct {
++ u32 clkdrvn:3;
++ u32 rsv0:1;
++ u32 clkdrvp:3;
++ u32 rsv1:1;
++ u32 clksr:1;
++ u32 rsv2:7;
++ u32 clkpd:1;
++ u32 clkpu:1;
++ u32 clksmt:1;
++ u32 clkies:1;
++ u32 clktdsel:4;
++ u32 clkrdsel:8;
++} msdc_pad_ctl0_reg;
++typedef struct {
++ u32 cmddrvn:3;
++ u32 rsv0:1;
++ u32 cmddrvp:3;
++ u32 rsv1:1;
++ u32 cmdsr:1;
++ u32 rsv2:7;
++ u32 cmdpd:1;
++ u32 cmdpu:1;
++ u32 cmdsmt:1;
++ u32 cmdies:1;
++ u32 cmdtdsel:4;
++ u32 cmdrdsel:8;
++} msdc_pad_ctl1_reg;
++typedef struct {
++ u32 datdrvn:3;
++ u32 rsv0:1;
++ u32 datdrvp:3;
++ u32 rsv1:1;
++ u32 datsr:1;
++ u32 rsv2:7;
++ u32 datpd:1;
++ u32 datpu:1;
++ u32 datsmt:1;
++ u32 daties:1;
++ u32 dattdsel:4;
++ u32 datrdsel:8;
++} msdc_pad_ctl2_reg;
++typedef struct {
++ u32 wrrxdly:3;
++ u32 pad1:5;
++ u32 rdrxdly:8;
++ u32 pad2:16;
++} msdc_pad_tune_reg;
++typedef struct {
++ u32 dat0:5;
++ u32 rsv0:3;
++ u32 dat1:5;
++ u32 rsv1:3;
++ u32 dat2:5;
++ u32 rsv2:3;
++ u32 dat3:5;
++ u32 rsv3:3;
++} msdc_dat_rddly0;
++typedef struct {
++ u32 dat4:5;
++ u32 rsv4:3;
++ u32 dat5:5;
++ u32 rsv5:3;
++ u32 dat6:5;
++ u32 rsv6:3;
++ u32 dat7:5;
++ u32 rsv7:3;
++} msdc_dat_rddly1;
++typedef struct {
++ u32 dbg0sel:8;
++ u32 dbg1sel:6;
++ u32 pad1:2;
++ u32 dbg2sel:6;
++ u32 pad2:2;
++ u32 dbg3sel:6;
++ u32 pad3:2;
++} msdc_hw_dbg_reg;
++typedef struct {
++ u32 val;
++} msdc_version_reg;
++typedef struct {
++ u32 val;
++} msdc_eco_ver_reg;
++
++struct msdc_regs {
++ msdc_cfg_reg msdc_cfg; /* base+0x00h */
++ msdc_iocon_reg msdc_iocon; /* base+0x04h */
++ msdc_ps_reg msdc_ps; /* base+0x08h */
++ msdc_int_reg msdc_int; /* base+0x0ch */
++ msdc_inten_reg msdc_inten; /* base+0x10h */
++ msdc_fifocs_reg msdc_fifocs; /* base+0x14h */
++ msdc_txdat_reg msdc_txdat; /* base+0x18h */
++ msdc_rxdat_reg msdc_rxdat; /* base+0x1ch */
++ u32 rsv1[4];
++ sdc_cfg_reg sdc_cfg; /* base+0x30h */
++ sdc_cmd_reg sdc_cmd; /* base+0x34h */
++ sdc_arg_reg sdc_arg; /* base+0x38h */
++ sdc_sts_reg sdc_sts; /* base+0x3ch */
++ sdc_resp0_reg sdc_resp0; /* base+0x40h */
++ sdc_resp1_reg sdc_resp1; /* base+0x44h */
++ sdc_resp2_reg sdc_resp2; /* base+0x48h */
++ sdc_resp3_reg sdc_resp3; /* base+0x4ch */
++ sdc_blknum_reg sdc_blknum; /* base+0x50h */
++ u32 rsv2[1];
++ sdc_csts_reg sdc_csts; /* base+0x58h */
++ sdc_cstsen_reg sdc_cstsen; /* base+0x5ch */
++ sdc_datcrcsts_reg sdc_dcrcsta; /* base+0x60h */
++ u32 rsv3[3];
++ emmc_cfg0_reg emmc_cfg0; /* base+0x70h */
++ emmc_cfg1_reg emmc_cfg1; /* base+0x74h */
++ emmc_sts_reg emmc_sts; /* base+0x78h */
++ emmc_iocon_reg emmc_iocon; /* base+0x7ch */
++ msdc_acmd_resp_reg acmd_resp; /* base+0x80h */
++ msdc_acmd19_trg_reg acmd19_trg; /* base+0x84h */
++ msdc_acmd19_sts_reg acmd19_sts; /* base+0x88h */
++ u32 rsv4[1];
++ msdc_dma_sa_reg dma_sa; /* base+0x90h */
++ msdc_dma_ca_reg dma_ca; /* base+0x94h */
++ msdc_dma_ctrl_reg dma_ctrl; /* base+0x98h */
++ msdc_dma_cfg_reg dma_cfg; /* base+0x9ch */
++ msdc_dbg_sel_reg dbg_sel; /* base+0xa0h */
++ msdc_dbg_out_reg dbg_out; /* base+0xa4h */
++ u32 rsv5[2];
++ u32 patch0; /* base+0xb0h */
++ u32 patch1; /* base+0xb4h */
++ u32 rsv6[10];
++ msdc_pad_ctl0_reg pad_ctl0; /* base+0xe0h */
++ msdc_pad_ctl1_reg pad_ctl1; /* base+0xe4h */
++ msdc_pad_ctl2_reg pad_ctl2; /* base+0xe8h */
++ msdc_pad_tune_reg pad_tune; /* base+0xech */
++ msdc_dat_rddly0 dat_rddly0; /* base+0xf0h */
++ msdc_dat_rddly1 dat_rddly1; /* base+0xf4h */
++ msdc_hw_dbg_reg hw_dbg; /* base+0xf8h */
++ u32 rsv7[1];
++ msdc_version_reg version; /* base+0x100h */
++ msdc_eco_ver_reg eco_ver; /* base+0x104h */
++};
++
++struct scatterlist_ex {
++ u32 cmd;
++ u32 arg;
++ u32 sglen;
++ struct scatterlist *sg;
++};
++
++#define DMA_FLAG_NONE (0x00000000)
++#define DMA_FLAG_EN_CHKSUM (0x00000001)
++#define DMA_FLAG_PAD_BLOCK (0x00000002)
++#define DMA_FLAG_PAD_DWORD (0x00000004)
++
++struct msdc_dma {
++ u32 flags; /* flags */
++ u32 xfersz; /* xfer size in bytes */
++ u32 sglen; /* size of scatter list */
++ u32 blklen; /* block size */
++ struct scatterlist *sg; /* I/O scatter list */
++ struct scatterlist_ex *esg; /* extended I/O scatter list */
++ u8 mode; /* dma mode */
++ u8 burstsz; /* burst size */
++ u8 intr; /* dma done interrupt */
++ u8 padding; /* padding */
++ u32 cmd; /* enhanced mode command */
++ u32 arg; /* enhanced mode arg */
++ u32 rsp; /* enhanced mode command response */
++ u32 autorsp; /* auto command response */
++
++ gpd_t *gpd; /* pointer to gpd array */
++ bd_t *bd; /* pointer to bd array */
++ dma_addr_t gpd_addr; /* the physical address of gpd array */
++ dma_addr_t bd_addr; /* the physical address of bd array */
++ u32 used_gpd; /* the number of used gpd elements */
++ u32 used_bd; /* the number of used bd elements */
++};
++
++struct msdc_host
++{
++ struct msdc_hw *hw;
++
++ struct mmc_host *mmc; /* mmc structure */
++ struct mmc_command *cmd;
++ struct mmc_data *data;
++ struct mmc_request *mrq;
++ int cmd_rsp;
++ int cmd_rsp_done;
++ int cmd_r1b_done;
++
++ int error;
++ spinlock_t lock; /* mutex */
++ struct semaphore sem;
++
++ u32 blksz; /* host block size */
++ u32 base; /* host base address */
++ int id; /* host id */
++ int pwr_ref; /* core power reference count */
++
++ u32 xfer_size; /* total transferred size */
++
++ struct msdc_dma dma; /* dma channel */
++ u32 dma_addr; /* dma transfer address */
++ u32 dma_left_size; /* dma transfer left size */
++ u32 dma_xfer_size; /* dma transfer size in bytes */
++ int dma_xfer; /* dma transfer mode */
++
++ u32 timeout_ns; /* data timeout ns */
++ u32 timeout_clks; /* data timeout clks */
++
++ atomic_t abort; /* abort transfer */
++
++ int irq; /* host interrupt */
++
++ struct tasklet_struct card_tasklet;
++#if 0
++ struct work_struct card_workqueue;
++#else
++ struct delayed_work card_delaywork;
++#endif
++
++ struct completion cmd_done;
++ struct completion xfer_done;
++ struct pm_message pm_state;
++
++ u32 mclk; /* mmc subsystem clock */
++ u32 hclk; /* host clock speed */
++ u32 sclk; /* SD/MS clock speed */
++ u8 core_clkon; /* Host core clock on ? */
++ u8 card_clkon; /* Card clock on ? */
++ u8 core_power; /* core power */
++ u8 power_mode; /* host power mode */
++ u8 card_inserted; /* card inserted ? */
++ u8 suspend; /* host suspended ? */
++ u8 reserved;
++ u8 app_cmd; /* for app command */
++ u32 app_cmd_arg;
++ u64 starttime;
++};
++
++static inline unsigned int uffs(unsigned int x)
++{
++ unsigned int r = 1;
++
++ if (!x)
++ return 0;
++ if (!(x & 0xffff)) {
++ x >>= 16;
++ r += 16;
++ }
++ if (!(x & 0xff)) {
++ x >>= 8;
++ r += 8;
++ }
++ if (!(x & 0xf)) {
++ x >>= 4;
++ r += 4;
++ }
++ if (!(x & 3)) {
++ x >>= 2;
++ r += 2;
++ }
++ if (!(x & 1)) {
++ x >>= 1;
++ r += 1;
++ }
++ return r;
++}
++#define sdr_read8(reg) __raw_readb(reg)
++#define sdr_read16(reg) __raw_readw(reg)
++#define sdr_read32(reg) __raw_readl(reg)
++#define sdr_write8(reg,val) __raw_writeb(val,reg)
++#define sdr_write16(reg,val) __raw_writew(val,reg)
++#define sdr_write32(reg,val) __raw_writel(val,reg)
++
++#define sdr_set_bits(reg,bs) ((*(volatile u32*)(reg)) |= (u32)(bs))
++#define sdr_clr_bits(reg,bs) ((*(volatile u32*)(reg)) &= ~((u32)(bs)))
++
++#define sdr_set_field(reg,field,val) \
++ do { \
++ volatile unsigned int tv = sdr_read32(reg); \
++ tv &= ~(field); \
++ tv |= ((val) << (uffs((unsigned int)field) - 1)); \
++ sdr_write32(reg,tv); \
++ } while(0)
++#define sdr_get_field(reg,field,val) \
++ do { \
++ volatile unsigned int tv = sdr_read32(reg); \
++ val = ((tv & (field)) >> (uffs((unsigned int)field) - 1)); \
++ } while(0)
++
++#endif
++
+--- /dev/null
++++ b/drivers/mmc/host/mtk-mmc/sd.c
+@@ -0,0 +1,3067 @@
++/* Copyright Statement:
++ *
++ * This software/firmware and related documentation ("MediaTek Software") are
++ * protected under relevant copyright laws. The information contained herein
++ * is confidential and proprietary to MediaTek Inc. and/or its licensors.
++ * Without the prior written permission of MediaTek inc. and/or its licensors,
++ * any reproduction, modification, use or disclosure of MediaTek Software,
++ * and information contained herein, in whole or in part, shall be strictly prohibited.
++ *
++ * MediaTek Inc. (C) 2010. All rights reserved.
++ *
++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES
++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE")
++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON
++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES,
++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF
++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT.
++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE
++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR
++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH
++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES
++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES
++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK
++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR
++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND
++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE,
++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE,
++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO
++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE.
++ *
++ * The following software/firmware and/or related documentation ("MediaTek Software")
++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's
++ * applicable license agreements with MediaTek Inc.
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/init.h>
++#include <linux/spinlock.h>
++#include <linux/timer.h>
++#include <linux/ioport.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/blkdev.h>
++#include <linux/slab.h>
++#include <linux/mmc/host.h>
++#include <linux/mmc/card.h>
++#include <linux/mmc/core.h>
++#include <linux/mmc/mmc.h>
++#include <linux/mmc/sd.h>
++#include <linux/mmc/sdio.h>
++#include <linux/dma-mapping.h>
++
++/* +++ by chhung */
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/pm.h>
++#include <linux/of.h>
++
++#define MSDC_SMPL_FALLING (1)
++#define MSDC_CD_PIN_EN (1 << 0) /* card detection pin is wired */
++#define MSDC_WP_PIN_EN (1 << 1) /* write protection pin is wired */
++#define MSDC_REMOVABLE (1 << 5) /* removable slot */
++#define MSDC_SYS_SUSPEND (1 << 6) /* suspended by system */
++#define MSDC_HIGHSPEED (1 << 7)
++
++//#define IRQ_SDC 14 //MT7620 /*FIXME*/
++#ifdef CONFIG_SOC_MT7621
++#define RALINK_SYSCTL_BASE 0xbe000000
++#define RALINK_MSDC_BASE 0xbe130000
++#else
++#define RALINK_SYSCTL_BASE 0xb0000000
++#define RALINK_MSDC_BASE 0xb0130000
++#endif
++#define IRQ_SDC 22 /*FIXME*/
++
++#include <asm/dma.h>
++/* end of +++ */
++
++
++#include <asm/mach-ralink/ralink_regs.h>
++
++#if 0 /* --- by chhung */
++#include <mach/board.h>
++#include <mach/mt6575_devs.h>
++#include <mach/mt6575_typedefs.h>
++#include <mach/mt6575_clock_manager.h>
++#include <mach/mt6575_pm_ldo.h>
++//#include <mach/mt6575_pll.h>
++//#include <mach/mt6575_gpio.h>
++//#include <mach/mt6575_gpt_sw.h>
++#include <asm/tcm.h>
++// #include <mach/mt6575_gpt.h>
++#endif /* end of --- */
++
++#include "mt6575_sd.h"
++#include "dbg.h"
++
++/* +++ by chhung */
++#include "board.h"
++/* end of +++ */
++
++#if 0 /* --- by chhung */
++#define isb() __asm__ __volatile__ ("" : : : "memory")
++#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
++ : : "r" (0) : "memory")
++#define dmb() __asm__ __volatile__ ("" : : : "memory")
++#endif /* end of --- */
++
++#define DRV_NAME "mtk-sd"
++
++#define HOST_MAX_NUM (1) /* +/- by chhung */
++
++#if defined (CONFIG_SOC_MT7620)
++#define HOST_MAX_MCLK (48000000) /* +/- by chhung */
++#elif defined (CONFIG_SOC_MT7621)
++#define HOST_MAX_MCLK (50000000) /* +/- by chhung */
++#endif
++#define HOST_MIN_MCLK (260000)
++
++#define HOST_MAX_BLKSZ (2048)
++
++#define MSDC_OCR_AVAIL (MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33)
++
++#define GPIO_PULL_DOWN (0)
++#define GPIO_PULL_UP (1)
++
++#if 0 /* --- by chhung */
++#define MSDC_CLKSRC_REG (0xf100000C)
++#define PDN_REG (0xF1000010)
++#endif /* end of --- */
++
++#define DEFAULT_DEBOUNCE (8) /* 8 cycles */
++#define DEFAULT_DTOC (40) /* data timeout counter. 65536x40 sclk. */
++
++#define CMD_TIMEOUT (HZ/10) /* 100ms */
++#define DAT_TIMEOUT (HZ/2 * 5) /* 500ms x5 */
++
++#define MAX_DMA_CNT (64 * 1024 - 512) /* a single transaction for WIFI may be 50K*/
++
++#define MAX_GPD_NUM (1 + 1) /* one null gpd */
++#define MAX_BD_NUM (1024)
++#define MAX_BD_PER_GPD (MAX_BD_NUM)
++
++#define MAX_HW_SGMTS (MAX_BD_NUM)
++#define MAX_PHY_SGMTS (MAX_BD_NUM)
++#define MAX_SGMT_SZ (MAX_DMA_CNT)
++#define MAX_REQ_SZ (MAX_SGMT_SZ * 8)
++
++#ifdef MT6575_SD_DEBUG
++static struct msdc_regs *msdc_reg[HOST_MAX_NUM];
++#endif
++
++static int mtk_sw_poll;
++
++static int cd_active_low = 1;
++
++//=================================
++#define PERI_MSDC0_PDN (15)
++//#define PERI_MSDC1_PDN (16)
++//#define PERI_MSDC2_PDN (17)
++//#define PERI_MSDC3_PDN (18)
++
++struct msdc_host *msdc_6575_host[] = {NULL,NULL,NULL,NULL};
++#if 0 /* --- by chhung */
++/* gate means clock power down */
++static int g_clk_gate = 0;
++#define msdc_gate_clock(id) \
++ do { \
++ g_clk_gate &= ~(1 << ((id) + PERI_MSDC0_PDN)); \
++ } while(0)
++/* not like power down register. 1 means clock on. */
++#define msdc_ungate_clock(id) \
++ do { \
++ g_clk_gate |= 1 << ((id) + PERI_MSDC0_PDN); \
++ } while(0)
++
++// do we need sync object or not
++void msdc_clk_status(int * status)
++{
++ *status = g_clk_gate;
++}
++#endif /* end of --- */
++
++/* +++ by chhung */
++struct msdc_hw msdc0_hw = {
++ .clk_src = 0,
++ .cmd_edge = MSDC_SMPL_FALLING,
++ .data_edge = MSDC_SMPL_FALLING,
++ .clk_drv = 4,
++ .cmd_drv = 4,
++ .dat_drv = 4,
++ .data_pins = 4,
++ .data_offset = 0,
++ .flags = MSDC_SYS_SUSPEND | MSDC_CD_PIN_EN | MSDC_REMOVABLE | MSDC_HIGHSPEED,
++// .flags = MSDC_SYS_SUSPEND | MSDC_WP_PIN_EN | MSDC_CD_PIN_EN | MSDC_REMOVABLE,
++};
++
++static struct resource mtk_sd_resources[] = {
++ [0] = {
++ .start = RALINK_MSDC_BASE,
++ .end = RALINK_MSDC_BASE+0x3fff,
++ .flags = IORESOURCE_MEM,
++ },
++ [1] = {
++ .start = IRQ_SDC, /*FIXME*/
++ .end = IRQ_SDC, /*FIXME*/
++ .flags = IORESOURCE_IRQ,
++ },
++};
++
++static struct platform_device mtk_sd_device = {
++ .name = "mtk-sd",
++ .id = 0,
++ .num_resources = ARRAY_SIZE(mtk_sd_resources),
++ .resource = mtk_sd_resources,
++};
++/* end of +++ */
++
++static int msdc_rsp[] = {
++ 0, /* RESP_NONE */
++ 1, /* RESP_R1 */
++ 2, /* RESP_R2 */
++ 3, /* RESP_R3 */
++ 4, /* RESP_R4 */
++ 1, /* RESP_R5 */
++ 1, /* RESP_R6 */
++ 1, /* RESP_R7 */
++ 7, /* RESP_R1b */
++};
++
++/* For Inhanced DMA */
++#define msdc_init_gpd_ex(gpd,extlen,cmd,arg,blknum) \
++ do { \
++ ((gpd_t*)gpd)->extlen = extlen; \
++ ((gpd_t*)gpd)->cmd = cmd; \
++ ((gpd_t*)gpd)->arg = arg; \
++ ((gpd_t*)gpd)->blknum = blknum; \
++ }while(0)
++
++#define msdc_init_bd(bd, blkpad, dwpad, dptr, dlen) \
++ do { \
++ BUG_ON(dlen > 0xFFFFUL); \
++ ((bd_t*)bd)->blkpad = blkpad; \
++ ((bd_t*)bd)->dwpad = dwpad; \
++ ((bd_t*)bd)->ptr = (void*)dptr; \
++ ((bd_t*)bd)->buflen = dlen; \
++ }while(0)
++
++#define msdc_txfifocnt() ((sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16)
++#define msdc_rxfifocnt() ((sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) >> 0)
++#define msdc_fifo_write32(v) sdr_write32(MSDC_TXDATA, (v))
++#define msdc_fifo_write8(v) sdr_write8(MSDC_TXDATA, (v))
++#define msdc_fifo_read32() sdr_read32(MSDC_RXDATA)
++#define msdc_fifo_read8() sdr_read8(MSDC_RXDATA)
++
++
++#define msdc_dma_on() sdr_clr_bits(MSDC_CFG, MSDC_CFG_PIO)
++#define msdc_dma_off() sdr_set_bits(MSDC_CFG, MSDC_CFG_PIO)
++
++#define msdc_retry(expr,retry,cnt) \
++ do { \
++ int backup = cnt; \
++ while (retry) { \
++ if (!(expr)) break; \
++ if (cnt-- == 0) { \
++ retry--; mdelay(1); cnt = backup; \
++ } \
++ } \
++ WARN_ON(retry == 0); \
++ } while(0)
++
++#if 0 /* --- by chhung */
++#define msdc_reset() \
++ do { \
++ int retry = 3, cnt = 1000; \
++ sdr_set_bits(MSDC_CFG, MSDC_CFG_RST); \
++ dsb(); \
++ msdc_retry(sdr_read32(MSDC_CFG) & MSDC_CFG_RST, retry, cnt); \
++ } while(0)
++#else
++#define msdc_reset() \
++ do { \
++ int retry = 3, cnt = 1000; \
++ sdr_set_bits(MSDC_CFG, MSDC_CFG_RST); \
++ msdc_retry(sdr_read32(MSDC_CFG) & MSDC_CFG_RST, retry, cnt); \
++ } while(0)
++#endif /* end of +/- */
++
++#define msdc_clr_int() \
++ do { \
++ volatile u32 val = sdr_read32(MSDC_INT); \
++ sdr_write32(MSDC_INT, val); \
++ } while(0)
++
++#define msdc_clr_fifo() \
++ do { \
++ int retry = 3, cnt = 1000; \
++ sdr_set_bits(MSDC_FIFOCS, MSDC_FIFOCS_CLR); \
++ msdc_retry(sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_CLR, retry, cnt); \
++ } while(0)
++
++#define msdc_irq_save(val) \
++ do { \
++ val = sdr_read32(MSDC_INTEN); \
++ sdr_clr_bits(MSDC_INTEN, val); \
++ } while(0)
++
++#define msdc_irq_restore(val) \
++ do { \
++ sdr_set_bits(MSDC_INTEN, val); \
++ } while(0)
++
++/* clock source for host: global */
++#if defined (CONFIG_SOC_MT7620)
++static u32 hclks[] = {48000000}; /* +/- by chhung */
++#elif defined (CONFIG_SOC_MT7621)
++static u32 hclks[] = {50000000}; /* +/- by chhung */
++#endif
++
++//============================================
++// the power for msdc host controller: global
++// always keep the VMC on.
++//============================================
++#define msdc_vcore_on(host) \
++ do { \
++ INIT_MSG("[+]VMC ref. count<%d>", ++host->pwr_ref); \
++ (void)hwPowerOn(MT65XX_POWER_LDO_VMC, VOL_3300, "SD"); \
++ } while (0)
++#define msdc_vcore_off(host) \
++ do { \
++ INIT_MSG("[-]VMC ref. count<%d>", --host->pwr_ref); \
++ (void)hwPowerDown(MT65XX_POWER_LDO_VMC, "SD"); \
++ } while (0)
++
++//====================================
++// the vdd output for card: global
++// always keep the VMCH on.
++//====================================
++#define msdc_vdd_on(host) \
++ do { \
++ (void)hwPowerOn(MT65XX_POWER_LDO_VMCH, VOL_3300, "SD"); \
++ } while (0)
++#define msdc_vdd_off(host) \
++ do { \
++ (void)hwPowerDown(MT65XX_POWER_LDO_VMCH, "SD"); \
++ } while (0)
++
++#define sdc_is_busy() (sdr_read32(SDC_STS) & SDC_STS_SDCBUSY)
++#define sdc_is_cmd_busy() (sdr_read32(SDC_STS) & SDC_STS_CMDBUSY)
++
++#define sdc_send_cmd(cmd,arg) \
++ do { \
++ sdr_write32(SDC_ARG, (arg)); \
++ sdr_write32(SDC_CMD, (cmd)); \
++ } while(0)
++
++// can modify to read h/w register.
++//#define is_card_present(h) ((sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 0 : 1);
++#define is_card_present(h) (((struct msdc_host*)(h))->card_inserted)
++
++/* +++ by chhung */
++#ifndef __ASSEMBLY__
++#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff)
++#else
++#define PHYSADDR(a) ((a) & 0x1fffffff)
++#endif
++/* end of +++ */
++static unsigned int msdc_do_command(struct msdc_host *host,
++ struct mmc_command *cmd,
++ int tune,
++ unsigned long timeout);
++
++static int msdc_tune_cmdrsp(struct msdc_host*host,struct mmc_command *cmd);
++
++#ifdef MT6575_SD_DEBUG
++static void msdc_dump_card_status(struct msdc_host *host, u32 status)
++{
++ static char *state[] = {
++ "Idle", /* 0 */
++ "Ready", /* 1 */
++ "Ident", /* 2 */
++ "Stby", /* 3 */
++ "Tran", /* 4 */
++ "Data", /* 5 */
++ "Rcv", /* 6 */
++ "Prg", /* 7 */
++ "Dis", /* 8 */
++ "Reserved", /* 9 */
++ "Reserved", /* 10 */
++ "Reserved", /* 11 */
++ "Reserved", /* 12 */
++ "Reserved", /* 13 */
++ "Reserved", /* 14 */
++ "I/O mode", /* 15 */
++ };
++ if (status & R1_OUT_OF_RANGE)
++ N_MSG(RSP, "[CARD_STATUS] Out of Range");
++ if (status & R1_ADDRESS_ERROR)
++ N_MSG(RSP, "[CARD_STATUS] Address Error");
++ if (status & R1_BLOCK_LEN_ERROR)
++ N_MSG(RSP, "[CARD_STATUS] Block Len Error");
++ if (status & R1_ERASE_SEQ_ERROR)
++ N_MSG(RSP, "[CARD_STATUS] Erase Seq Error");
++ if (status & R1_ERASE_PARAM)
++ N_MSG(RSP, "[CARD_STATUS] Erase Param");
++ if (status & R1_WP_VIOLATION)
++ N_MSG(RSP, "[CARD_STATUS] WP Violation");
++ if (status & R1_CARD_IS_LOCKED)
++ N_MSG(RSP, "[CARD_STATUS] Card is Locked");
++ if (status & R1_LOCK_UNLOCK_FAILED)
++ N_MSG(RSP, "[CARD_STATUS] Lock/Unlock Failed");
++ if (status & R1_COM_CRC_ERROR)
++ N_MSG(RSP, "[CARD_STATUS] Command CRC Error");
++ if (status & R1_ILLEGAL_COMMAND)
++ N_MSG(RSP, "[CARD_STATUS] Illegal Command");
++ if (status & R1_CARD_ECC_FAILED)
++ N_MSG(RSP, "[CARD_STATUS] Card ECC Failed");
++ if (status & R1_CC_ERROR)
++ N_MSG(RSP, "[CARD_STATUS] CC Error");
++ if (status & R1_ERROR)
++ N_MSG(RSP, "[CARD_STATUS] Error");
++ if (status & R1_UNDERRUN)
++ N_MSG(RSP, "[CARD_STATUS] Underrun");
++ if (status & R1_OVERRUN)
++ N_MSG(RSP, "[CARD_STATUS] Overrun");
++ if (status & R1_CID_CSD_OVERWRITE)
++ N_MSG(RSP, "[CARD_STATUS] CID/CSD Overwrite");
++ if (status & R1_WP_ERASE_SKIP)
++ N_MSG(RSP, "[CARD_STATUS] WP Eraser Skip");
++ if (status & R1_CARD_ECC_DISABLED)
++ N_MSG(RSP, "[CARD_STATUS] Card ECC Disabled");
++ if (status & R1_ERASE_RESET)
++ N_MSG(RSP, "[CARD_STATUS] Erase Reset");
++ if (status & R1_READY_FOR_DATA)
++ N_MSG(RSP, "[CARD_STATUS] Ready for Data");
++ if (status & R1_SWITCH_ERROR)
++ N_MSG(RSP, "[CARD_STATUS] Switch error");
++ if (status & R1_APP_CMD)
++ N_MSG(RSP, "[CARD_STATUS] App Command");
++
++ N_MSG(RSP, "[CARD_STATUS] '%s' State", state[R1_CURRENT_STATE(status)]);
++}
++
++static void msdc_dump_ocr_reg(struct msdc_host *host, u32 resp)
++{
++ if (resp & (1 << 7))
++ N_MSG(RSP, "[OCR] Low Voltage Range");
++ if (resp & (1 << 15))
++ N_MSG(RSP, "[OCR] 2.7-2.8 volt");
++ if (resp & (1 << 16))
++ N_MSG(RSP, "[OCR] 2.8-2.9 volt");
++ if (resp & (1 << 17))
++ N_MSG(RSP, "[OCR] 2.9-3.0 volt");
++ if (resp & (1 << 18))
++ N_MSG(RSP, "[OCR] 3.0-3.1 volt");
++ if (resp & (1 << 19))
++ N_MSG(RSP, "[OCR] 3.1-3.2 volt");
++ if (resp & (1 << 20))
++ N_MSG(RSP, "[OCR] 3.2-3.3 volt");
++ if (resp & (1 << 21))
++ N_MSG(RSP, "[OCR] 3.3-3.4 volt");
++ if (resp & (1 << 22))
++ N_MSG(RSP, "[OCR] 3.4-3.5 volt");
++ if (resp & (1 << 23))
++ N_MSG(RSP, "[OCR] 3.5-3.6 volt");
++ if (resp & (1 << 24))
++ N_MSG(RSP, "[OCR] Switching to 1.8V Accepted (S18A)");
++ if (resp & (1 << 30))
++ N_MSG(RSP, "[OCR] Card Capacity Status (CCS)");
++ if (resp & (1 << 31))
++ N_MSG(RSP, "[OCR] Card Power Up Status (Idle)");
++ else
++ N_MSG(RSP, "[OCR] Card Power Up Status (Busy)");
++}
++
++static void msdc_dump_rca_resp(struct msdc_host *host, u32 resp)
++{
++ u32 status = (((resp >> 15) & 0x1) << 23) |
++ (((resp >> 14) & 0x1) << 22) |
++ (((resp >> 13) & 0x1) << 19) |
++ (resp & 0x1fff);
++
++ N_MSG(RSP, "[RCA] 0x%.4x", resp >> 16);
++ msdc_dump_card_status(host, status);
++}
++
++static void msdc_dump_io_resp(struct msdc_host *host, u32 resp)
++{
++ u32 flags = (resp >> 8) & 0xFF;
++ char *state[] = {"DIS", "CMD", "TRN", "RFU"};
++
++ if (flags & (1 << 7))
++ N_MSG(RSP, "[IO] COM_CRC_ERR");
++ if (flags & (1 << 6))
++ N_MSG(RSP, "[IO] Illgal command");
++ if (flags & (1 << 3))
++ N_MSG(RSP, "[IO] Error");
++ if (flags & (1 << 2))
++ N_MSG(RSP, "[IO] RFU");
++ if (flags & (1 << 1))
++ N_MSG(RSP, "[IO] Function number error");
++ if (flags & (1 << 0))
++ N_MSG(RSP, "[IO] Out of range");
++
++ N_MSG(RSP, "[IO] State: %s, Data:0x%x", state[(resp >> 12) & 0x3], resp & 0xFF);
++}
++#endif
++
++static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks)
++{
++ u32 base = host->base;
++ u32 timeout, clk_ns;
++
++ host->timeout_ns = ns;
++ host->timeout_clks = clks;
++
++ clk_ns = 1000000000UL / host->sclk;
++ timeout = ns / clk_ns + clks;
++ timeout = timeout >> 16; /* in 65536 sclk cycle unit */
++ timeout = timeout > 1 ? timeout - 1 : 0;
++ timeout = timeout > 255 ? 255 : timeout;
++
++ sdr_set_field(SDC_CFG, SDC_CFG_DTOC, timeout);
++
++ N_MSG(OPS, "Set read data timeout: %dns %dclks -> %d x 65536 cycles",
++ ns, clks, timeout + 1);
++}
++
++/* msdc_eirq_sdio() will be called when EIRQ(for WIFI) */
++static void msdc_eirq_sdio(void *data)
++{
++ struct msdc_host *host = (struct msdc_host *)data;
++
++ N_MSG(INT, "SDIO EINT");
++
++ mmc_signal_sdio_irq(host->mmc);
++}
++
++/* msdc_eirq_cd will not be used! We not using EINT for card detection. */
++static void msdc_eirq_cd(void *data)
++{
++ struct msdc_host *host = (struct msdc_host *)data;
++
++ N_MSG(INT, "CD EINT");
++
++#if 0
++ tasklet_hi_schedule(&host->card_tasklet);
++#else
++ schedule_delayed_work(&host->card_delaywork, HZ);
++#endif
++}
++
++#if 0
++static void msdc_tasklet_card(unsigned long arg)
++{
++ struct msdc_host *host = (struct msdc_host *)arg;
++#else
++static void msdc_tasklet_card(struct work_struct *work)
++{
++ struct msdc_host *host = (struct msdc_host *)container_of(work,
++ struct msdc_host, card_delaywork.work);
++#endif
++ struct msdc_hw *hw = host->hw;
++ u32 base = host->base;
++ u32 inserted;
++ u32 status = 0;
++ //u32 change = 0;
++
++ spin_lock(&host->lock);
++
++ if (hw->get_cd_status) { // NULL
++ inserted = hw->get_cd_status();
++ } else {
++ status = sdr_read32(MSDC_PS);
++ if (cd_active_low)
++ inserted = (status & MSDC_PS_CDSTS) ? 0 : 1;
++ else
++ inserted = (status & MSDC_PS_CDSTS) ? 1 : 0;
++ }
++ if (host->mmc->caps & MMC_CAP_NEEDS_POLL)
++ inserted = 1;
++
++#if 0
++ change = host->card_inserted ^ inserted;
++ host->card_inserted = inserted;
++
++ if (change && !host->suspend) {
++ if (inserted) {
++ host->mmc->f_max = HOST_MAX_MCLK; // work around
++ }
++ mmc_detect_change(host->mmc, msecs_to_jiffies(20));
++ }
++#else /* Make sure: handle the last interrupt */
++ host->card_inserted = inserted;
++
++ if (!host->suspend) {
++ host->mmc->f_max = HOST_MAX_MCLK;
++ mmc_detect_change(host->mmc, msecs_to_jiffies(20));
++ }
++
++ IRQ_MSG("card found<%s>", inserted ? "inserted" : "removed");
++#endif
++
++ spin_unlock(&host->lock);
++}
++
++#if 0 /* --- by chhung */
++/* For E2 only */
++static u8 clk_src_bit[4] = {
++ 0, 3, 5, 7
++};
++
++static void msdc_select_clksrc(struct msdc_host* host, unsigned char clksrc)
++{
++ u32 val;
++ u32 base = host->base;
++
++ BUG_ON(clksrc > 3);
++ INIT_MSG("set clock source to <%d>", clksrc);
++
++ val = sdr_read32(MSDC_CLKSRC_REG);
++ if (sdr_read32(MSDC_ECO_VER) >= 4) {
++ val &= ~(0x3 << clk_src_bit[host->id]);
++ val |= clksrc << clk_src_bit[host->id];
++ } else {
++ val &= ~0x3; val |= clksrc;
++ }
++ sdr_write32(MSDC_CLKSRC_REG, val);
++
++ host->hclk = hclks[clksrc];
++ host->hw->clk_src = clksrc;
++}
++#endif /* end of --- */
++
++static void msdc_set_mclk(struct msdc_host *host, int ddr, unsigned int hz)
++{
++ //struct msdc_hw *hw = host->hw;
++ u32 base = host->base;
++ u32 mode;
++ u32 flags;
++ u32 div;
++ u32 sclk;
++ u32 hclk = host->hclk;
++ //u8 clksrc = hw->clk_src;
++
++ if (!hz) { // set mmc system clock to 0 ?
++ //ERR_MSG("set mclk to 0!!!");
++ msdc_reset();
++ return;
++ }
++
++ msdc_irq_save(flags);
++
++#if defined (CONFIG_MT7621_FPGA) || defined (CONFIG_MT7628_FPGA)
++ mode = 0x0; /* use divisor */
++ if (hz >= (hclk >> 1)) {
++ div = 0; /* mean div = 1/2 */
++ sclk = hclk >> 1; /* sclk = clk / 2 */
++ } else {
++ div = (hclk + ((hz << 2) - 1)) / (hz << 2);
++ sclk = (hclk >> 2) / div;
++ }
++#else
++ if (ddr) {
++ mode = 0x2; /* ddr mode and use divisor */
++ if (hz >= (hclk >> 2)) {
++ div = 1; /* mean div = 1/4 */
++ sclk = hclk >> 2; /* sclk = clk / 4 */
++ } else {
++ div = (hclk + ((hz << 2) - 1)) / (hz << 2);
++ sclk = (hclk >> 2) / div;
++ }
++ } else if (hz >= hclk) { /* bug fix */
++ mode = 0x1; /* no divisor and divisor is ignored */
++ div = 0;
++ sclk = hclk;
++ } else {
++ mode = 0x0; /* use divisor */
++ if (hz >= (hclk >> 1)) {
++ div = 0; /* mean div = 1/2 */
++ sclk = hclk >> 1; /* sclk = clk / 2 */
++ } else {
++ div = (hclk + ((hz << 2) - 1)) / (hz << 2);
++ sclk = (hclk >> 2) / div;
++ }
++ }
++#endif
++ /* set clock mode and divisor */
++ sdr_set_field(MSDC_CFG, MSDC_CFG_CKMOD, mode);
++ sdr_set_field(MSDC_CFG, MSDC_CFG_CKDIV, div);
++
++ /* wait clock stable */
++ while (!(sdr_read32(MSDC_CFG) & MSDC_CFG_CKSTB));
++
++ host->sclk = sclk;
++ host->mclk = hz;
++ msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); // need?
++
++ INIT_MSG("================");
++ INIT_MSG("!!! Set<%dKHz> Source<%dKHz> -> sclk<%dKHz>", hz/1000, hclk/1000, sclk/1000);
++ INIT_MSG("================");
++
++ msdc_irq_restore(flags);
++}
++
++/* Fix me. when need to abort */
++static void msdc_abort_data(struct msdc_host *host)
++{
++ u32 base = host->base;
++ struct mmc_command *stop = host->mrq->stop;
++
++ ERR_MSG("Need to Abort. dma<%d>", host->dma_xfer);
++
++ msdc_reset();
++ msdc_clr_fifo();
++ msdc_clr_int();
++
++ // need to check FIFO count 0 ?
++
++ if (stop) { /* try to stop, but may not success */
++ ERR_MSG("stop when abort CMD<%d>", stop->opcode);
++ (void)msdc_do_command(host, stop, 0, CMD_TIMEOUT);
++ }
++
++ //if (host->mclk >= 25000000) {
++ // msdc_set_mclk(host, 0, host->mclk >> 1);
++ //}
++}
++
++#if 0 /* --- by chhung */
++static void msdc_pin_config(struct msdc_host *host, int mode)
++{
++ struct msdc_hw *hw = host->hw;
++ u32 base = host->base;
++ int pull = (mode == MSDC_PIN_PULL_UP) ? GPIO_PULL_UP : GPIO_PULL_DOWN;
++
++ /* Config WP pin */
++ if (hw->flags & MSDC_WP_PIN_EN) {
++ if (hw->config_gpio_pin) /* NULL */
++ hw->config_gpio_pin(MSDC_WP_PIN, pull);
++ }
++
++ switch (mode) {
++ case MSDC_PIN_PULL_UP:
++ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 1); /* Check & FIXME */
++ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 0); /* Check & FIXME */
++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 1);
++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0);
++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 1);
++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0);
++ break;
++ case MSDC_PIN_PULL_DOWN:
++ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 0); /* Check & FIXME */
++ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 1); /* Check & FIXME */
++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0);
++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 1);
++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0);
++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 1);
++ break;
++ case MSDC_PIN_PULL_NONE:
++ default:
++ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 0); /* Check & FIXME */
++ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 0); /* Check & FIXME */
++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0);
++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0);
++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0);
++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0);
++ break;
++ }
++
++ N_MSG(CFG, "Pins mode(%d), down(%d), up(%d)",
++ mode, MSDC_PIN_PULL_DOWN, MSDC_PIN_PULL_UP);
++}
++
++void msdc_pin_reset(struct msdc_host *host, int mode)
++{
++ struct msdc_hw *hw = (struct msdc_hw *)host->hw;
++ u32 base = host->base;
++ int pull = (mode == MSDC_PIN_PULL_UP) ? GPIO_PULL_UP : GPIO_PULL_DOWN;
++
++ /* Config reset pin */
++ if (hw->flags & MSDC_RST_PIN_EN) {
++ if (hw->config_gpio_pin) /* NULL */
++ hw->config_gpio_pin(MSDC_RST_PIN, pull);
++
++ if (mode == MSDC_PIN_PULL_UP) {
++ sdr_clr_bits(EMMC_IOCON, EMMC_IOCON_BOOTRST);
++ } else {
++ sdr_set_bits(EMMC_IOCON, EMMC_IOCON_BOOTRST);
++ }
++ }
++}
++
++static void msdc_core_power(struct msdc_host *host, int on)
++{
++ N_MSG(CFG, "Turn %s %s power (copower: %d -> %d)",
++ on ? "on" : "off", "core", host->core_power, on);
++
++ if (on && host->core_power == 0) {
++ msdc_vcore_on(host);
++ host->core_power = 1;
++ msleep(1);
++ } else if (!on && host->core_power == 1) {
++ msdc_vcore_off(host);
++ host->core_power = 0;
++ msleep(1);
++ }
++}
++
++static void msdc_host_power(struct msdc_host *host, int on)
++{
++ N_MSG(CFG, "Turn %s %s power ", on ? "on" : "off", "host");
++
++ if (on) {
++ //msdc_core_power(host, 1); // need do card detection.
++ msdc_pin_reset(host, MSDC_PIN_PULL_UP);
++ } else {
++ msdc_pin_reset(host, MSDC_PIN_PULL_DOWN);
++ //msdc_core_power(host, 0);
++ }
++}
++
++static void msdc_card_power(struct msdc_host *host, int on)
++{
++ N_MSG(CFG, "Turn %s %s power ", on ? "on" : "off", "card");
++
++ if (on) {
++ msdc_pin_config(host, MSDC_PIN_PULL_UP);
++ if (host->hw->ext_power_on) {
++ host->hw->ext_power_on();
++ } else {
++ //msdc_vdd_on(host); // need todo card detection.
++ }
++ msleep(1);
++ } else {
++ if (host->hw->ext_power_off) {
++ host->hw->ext_power_off();
++ } else {
++ //msdc_vdd_off(host);
++ }
++ msdc_pin_config(host, MSDC_PIN_PULL_DOWN);
++ msleep(1);
++ }
++}
++
++static void msdc_set_power_mode(struct msdc_host *host, u8 mode)
++{
++ N_MSG(CFG, "Set power mode(%d)", mode);
++
++ if (host->power_mode == MMC_POWER_OFF && mode != MMC_POWER_OFF) {
++ msdc_host_power(host, 1);
++ msdc_card_power(host, 1);
++ } else if (host->power_mode != MMC_POWER_OFF && mode == MMC_POWER_OFF) {
++ msdc_card_power(host, 0);
++ msdc_host_power(host, 0);
++ }
++ host->power_mode = mode;
++}
++#endif /* end of --- */
++
++#ifdef CONFIG_PM
++/*
++ register as callback function of WIFI(combo_sdio_register_pm) .
++ can called by msdc_drv_suspend/resume too.
++*/
++static void msdc_pm(pm_message_t state, void *data)
++{
++ struct msdc_host *host = (struct msdc_host *)data;
++ int evt = state.event;
++
++ if (evt == PM_EVENT_USER_RESUME || evt == PM_EVENT_USER_SUSPEND) {
++ INIT_MSG("USR_%s: suspend<%d> power<%d>",
++ evt == PM_EVENT_USER_RESUME ? "EVENT_USER_RESUME" : "EVENT_USER_SUSPEND",
++ host->suspend, host->power_mode);
++ }
++
++ if (evt == PM_EVENT_SUSPEND || evt == PM_EVENT_USER_SUSPEND) {
++ if (host->suspend) /* already suspend */ /* default 0*/
++ return;
++
++ /* for memory card. already power off by mmc */
++ if (evt == PM_EVENT_SUSPEND && host->power_mode == MMC_POWER_OFF)
++ return;
++
++ host->suspend = 1;
++ host->pm_state = state; /* default PMSG_RESUME */
++
++ INIT_MSG("%s Suspend", evt == PM_EVENT_SUSPEND ? "PM" : "USR");
++ if(host->hw->flags & MSDC_SYS_SUSPEND) /* set for card */
++ (void)mmc_suspend_host(host->mmc);
++ else {
++ // host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; /* just for double confirm */ /* --- by chhung */
++ mmc_remove_host(host->mmc);
++ }
++ } else if (evt == PM_EVENT_RESUME || evt == PM_EVENT_USER_RESUME) {
++ if (!host->suspend){
++ //ERR_MSG("warning: already resume");
++ return;
++ }
++
++ /* No PM resume when USR suspend */
++ if (evt == PM_EVENT_RESUME && host->pm_state.event == PM_EVENT_USER_SUSPEND) {
++ ERR_MSG("PM Resume when in USR Suspend"); /* won't happen. */
++ return;
++ }
++
++ host->suspend = 0;
++ host->pm_state = state;
++
++ INIT_MSG("%s Resume", evt == PM_EVENT_RESUME ? "PM" : "USR");
++ if(host->hw->flags & MSDC_SYS_SUSPEND) { /* will not set for WIFI */
++ (void)mmc_resume_host(host->mmc);
++ }
++ else {
++ // host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; /* --- by chhung */
++ mmc_add_host(host->mmc);
++ }
++ }
++}
++#endif
++
++/*--------------------------------------------------------------------------*/
++/* mmc_host_ops members */
++/*--------------------------------------------------------------------------*/
++static unsigned int msdc_command_start(struct msdc_host *host,
++ struct mmc_command *cmd,
++ int tune, /* not used */
++ unsigned long timeout)
++{
++ u32 base = host->base;
++ u32 opcode = cmd->opcode;
++ u32 rawcmd;
++ u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO |
++ MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO |
++ MSDC_INT_ACMD19_DONE;
++
++ u32 resp;
++ unsigned long tmo;
++
++ /* Protocol layer does not provide response type, but our hardware needs
++ * to know exact type, not just size!
++ */
++ if (opcode == MMC_SEND_OP_COND || opcode == SD_APP_OP_COND)
++ resp = RESP_R3;
++ else if (opcode == MMC_SET_RELATIVE_ADDR || opcode == SD_SEND_RELATIVE_ADDR)
++ resp = (mmc_cmd_type(cmd) == MMC_CMD_BCR) ? RESP_R6 : RESP_R1;
++ else if (opcode == MMC_FAST_IO)
++ resp = RESP_R4;
++ else if (opcode == MMC_GO_IRQ_STATE)
++ resp = RESP_R5;
++ else if (opcode == MMC_SELECT_CARD)
++ resp = (cmd->arg != 0) ? RESP_R1B : RESP_NONE;
++ else if (opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED)
++ resp = RESP_R1; /* SDIO workaround. */
++ else if (opcode == SD_SEND_IF_COND && (mmc_cmd_type(cmd) == MMC_CMD_BCR))
++ resp = RESP_R1;
++ else {
++ switch (mmc_resp_type(cmd)) {
++ case MMC_RSP_R1:
++ resp = RESP_R1;
++ break;
++ case MMC_RSP_R1B:
++ resp = RESP_R1B;
++ break;
++ case MMC_RSP_R2:
++ resp = RESP_R2;
++ break;
++ case MMC_RSP_R3:
++ resp = RESP_R3;
++ break;
++ case MMC_RSP_NONE:
++ default:
++ resp = RESP_NONE;
++ break;
++ }
++ }
++
++ cmd->error = 0;
++ /* rawcmd :
++ * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 |
++ * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
++ */
++ rawcmd = opcode | msdc_rsp[resp] << 7 | host->blksz << 16;
++
++ if (opcode == MMC_READ_MULTIPLE_BLOCK) {
++ rawcmd |= (2 << 11);
++ } else if (opcode == MMC_READ_SINGLE_BLOCK) {
++ rawcmd |= (1 << 11);
++ } else if (opcode == MMC_WRITE_MULTIPLE_BLOCK) {
++ rawcmd |= ((2 << 11) | (1 << 13));
++ } else if (opcode == MMC_WRITE_BLOCK) {
++ rawcmd |= ((1 << 11) | (1 << 13));
++ } else if (opcode == SD_IO_RW_EXTENDED) {
++ if (cmd->data->flags & MMC_DATA_WRITE)
++ rawcmd |= (1 << 13);
++ if (cmd->data->blocks > 1)
++ rawcmd |= (2 << 11);
++ else
++ rawcmd |= (1 << 11);
++ } else if (opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int)-1) {
++ rawcmd |= (1 << 14);
++ } else if ((opcode == SD_APP_SEND_SCR) ||
++ (opcode == SD_APP_SEND_NUM_WR_BLKS) ||
++ (opcode == SD_SWITCH && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) ||
++ (opcode == SD_APP_SD_STATUS && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) ||
++ (opcode == MMC_SEND_EXT_CSD && (mmc_cmd_type(cmd) == MMC_CMD_ADTC))) {
++ rawcmd |= (1 << 11);
++ } else if (opcode == MMC_STOP_TRANSMISSION) {
++ rawcmd |= (1 << 14);
++ rawcmd &= ~(0x0FFF << 16);
++ }
++
++ N_MSG(CMD, "CMD<%d><0x%.8x> Arg<0x%.8x>", opcode , rawcmd, cmd->arg);
++
++ tmo = jiffies + timeout;
++
++ if (opcode == MMC_SEND_STATUS) {
++ for (;;) {
++ if (!sdc_is_cmd_busy())
++ break;
++
++ if (time_after(jiffies, tmo)) {
++ ERR_MSG("XXX cmd_busy timeout: before CMD<%d>", opcode);
++ cmd->error = (unsigned int)-ETIMEDOUT;
++ msdc_reset();
++ goto end;
++ }
++ }
++ }else {
++ for (;;) {
++ if (!sdc_is_busy())
++ break;
++ if (time_after(jiffies, tmo)) {
++ ERR_MSG("XXX sdc_busy timeout: before CMD<%d>", opcode);
++ cmd->error = (unsigned int)-ETIMEDOUT;
++ msdc_reset();
++ goto end;
++ }
++ }
++ }
++
++ //BUG_ON(in_interrupt());
++ host->cmd = cmd;
++ host->cmd_rsp = resp;
++
++ init_completion(&host->cmd_done);
++
++ sdr_set_bits(MSDC_INTEN, wints);
++ sdc_send_cmd(rawcmd, cmd->arg);
++
++end:
++ return cmd->error;
++}
++
++static unsigned int msdc_command_resp(struct msdc_host *host,
++ struct mmc_command *cmd,
++ int tune,
++ unsigned long timeout)
++{
++ u32 base = host->base;
++ u32 opcode = cmd->opcode;
++ //u32 rawcmd;
++ u32 resp;
++ u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO |
++ MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO |
++ MSDC_INT_ACMD19_DONE;
++
++ resp = host->cmd_rsp;
++
++ BUG_ON(in_interrupt());
++ //init_completion(&host->cmd_done);
++ //sdr_set_bits(MSDC_INTEN, wints);
++
++ spin_unlock(&host->lock);
++ if(!wait_for_completion_timeout(&host->cmd_done, 10*timeout)){
++ ERR_MSG("XXX CMD<%d> wait_for_completion timeout ARG<0x%.8x>", opcode, cmd->arg);
++ cmd->error = (unsigned int)-ETIMEDOUT;
++ msdc_reset();
++ }
++ spin_lock(&host->lock);
++
++ sdr_clr_bits(MSDC_INTEN, wints);
++ host->cmd = NULL;
++
++//end:
++#ifdef MT6575_SD_DEBUG
++ switch (resp) {
++ case RESP_NONE:
++ N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)", opcode, cmd->error, resp);
++ break;
++ case RESP_R2:
++ N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)= %.8x %.8x %.8x %.8x",
++ opcode, cmd->error, resp, cmd->resp[0], cmd->resp[1],
++ cmd->resp[2], cmd->resp[3]);
++ break;
++ default: /* Response types 1, 3, 4, 5, 6, 7(1b) */
++ N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)= 0x%.8x",
++ opcode, cmd->error, resp, cmd->resp[0]);
++ if (cmd->error == 0) {
++ switch (resp) {
++ case RESP_R1:
++ case RESP_R1B:
++ msdc_dump_card_status(host, cmd->resp[0]);
++ break;
++ case RESP_R3:
++ msdc_dump_ocr_reg(host, cmd->resp[0]);
++ break;
++ case RESP_R5:
++ msdc_dump_io_resp(host, cmd->resp[0]);
++ break;
++ case RESP_R6:
++ msdc_dump_rca_resp(host, cmd->resp[0]);
++ break;
++ }
++ }
++ break;
++ }
++#endif
++
++ /* do we need to save card's RCA when SD_SEND_RELATIVE_ADDR */
++
++ if (!tune) {
++ return cmd->error;
++ }
++
++ /* memory card CRC */
++ if(host->hw->flags & MSDC_REMOVABLE && cmd->error == (unsigned int)(-EIO) ) {
++ if (sdr_read32(SDC_CMD) & 0x1800) { /* check if has data phase */
++ msdc_abort_data(host);
++ } else {
++ /* do basic: reset*/
++ msdc_reset();
++ msdc_clr_fifo();
++ msdc_clr_int();
++ }
++ cmd->error = msdc_tune_cmdrsp(host,cmd);
++ }
++
++ // check DAT0
++ /* if (resp == RESP_R1B) {
++ while ((sdr_read32(MSDC_PS) & 0x10000) != 0x10000);
++ } */
++ /* CMD12 Error Handle */
++
++ return cmd->error;
++}
++
++static unsigned int msdc_do_command(struct msdc_host *host,
++ struct mmc_command *cmd,
++ int tune,
++ unsigned long timeout)
++{
++ if (msdc_command_start(host, cmd, tune, timeout))
++ goto end;
++
++ if (msdc_command_resp(host, cmd, tune, timeout))
++ goto end;
++
++end:
++
++ N_MSG(CMD, " return<%d> resp<0x%.8x>", cmd->error, cmd->resp[0]);
++ return cmd->error;
++}
++
++/* The abort condition when PIO read/write
++ tmo:
++*/
++static int msdc_pio_abort(struct msdc_host *host, struct mmc_data *data, unsigned long tmo)
++{
++ int ret = 0;
++ u32 base = host->base;
++
++ if (atomic_read(&host->abort)) {
++ ret = 1;
++ }
++
++ if (time_after(jiffies, tmo)) {
++ data->error = (unsigned int)-ETIMEDOUT;
++ ERR_MSG("XXX PIO Data Timeout: CMD<%d>", host->mrq->cmd->opcode);
++ ret = 1;
++ }
++
++ if(ret) {
++ msdc_reset();
++ msdc_clr_fifo();
++ msdc_clr_int();
++ ERR_MSG("msdc pio find abort");
++ }
++ return ret;
++}
++
++/*
++ Need to add a timeout, or WDT timeout, system reboot.
++*/
++// pio mode data read/write
++static int msdc_pio_read(struct msdc_host *host, struct mmc_data *data)
++{
++ struct scatterlist *sg = data->sg;
++ u32 base = host->base;
++ u32 num = data->sg_len;
++ u32 *ptr;
++ u8 *u8ptr;
++ u32 left = 0;
++ u32 count, size = 0;
++ u32 wints = MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR ;
++ unsigned long tmo = jiffies + DAT_TIMEOUT;
++
++ sdr_set_bits(MSDC_INTEN, wints);
++ while (num) {
++ left = sg_dma_len(sg);
++ ptr = sg_virt(sg);
++ while (left) {
++ if ((left >= MSDC_FIFO_THD) && (msdc_rxfifocnt() >= MSDC_FIFO_THD)) {
++ count = MSDC_FIFO_THD >> 2;
++ do {
++ *ptr++ = msdc_fifo_read32();
++ } while (--count);
++ left -= MSDC_FIFO_THD;
++ } else if ((left < MSDC_FIFO_THD) && msdc_rxfifocnt() >= left) {
++ while (left > 3) {
++ *ptr++ = msdc_fifo_read32();
++ left -= 4;
++ }
++
++ u8ptr = (u8 *)ptr;
++ while(left) {
++ * u8ptr++ = msdc_fifo_read8();
++ left--;
++ }
++ }
++
++ if (msdc_pio_abort(host, data, tmo)) {
++ goto end;
++ }
++ }
++ size += sg_dma_len(sg);
++ sg = sg_next(sg); num--;
++ }
++end:
++ data->bytes_xfered += size;
++ N_MSG(FIO, " PIO Read<%d>bytes", size);
++
++ sdr_clr_bits(MSDC_INTEN, wints);
++ if(data->error) ERR_MSG("read pio data->error<%d> left<%d> size<%d>", data->error, left, size);
++ return data->error;
++}
++
++/* please make sure won't using PIO when size >= 512
++ which means, memory card block read/write won't using pio
++ then don't need to handle the CMD12 when data error.
++*/
++static int msdc_pio_write(struct msdc_host* host, struct mmc_data *data)
++{
++ u32 base = host->base;
++ struct scatterlist *sg = data->sg;
++ u32 num = data->sg_len;
++ u32 *ptr;
++ u8 *u8ptr;
++ u32 left;
++ u32 count, size = 0;
++ u32 wints = MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR ;
++ unsigned long tmo = jiffies + DAT_TIMEOUT;
++
++ sdr_set_bits(MSDC_INTEN, wints);
++ while (num) {
++ left = sg_dma_len(sg);
++ ptr = sg_virt(sg);
++
++ while (left) {
++ if (left >= MSDC_FIFO_SZ && msdc_txfifocnt() == 0) {
++ count = MSDC_FIFO_SZ >> 2;
++ do {
++ msdc_fifo_write32(*ptr); ptr++;
++ } while (--count);
++ left -= MSDC_FIFO_SZ;
++ } else if (left < MSDC_FIFO_SZ && msdc_txfifocnt() == 0) {
++ while (left > 3) {
++ msdc_fifo_write32(*ptr); ptr++;
++ left -= 4;
++ }
++
++ u8ptr = (u8*)ptr;
++ while(left){
++ msdc_fifo_write8(*u8ptr); u8ptr++;
++ left--;
++ }
++ }
++
++ if (msdc_pio_abort(host, data, tmo)) {
++ goto end;
++ }
++ }
++ size += sg_dma_len(sg);
++ sg = sg_next(sg); num--;
++ }
++end:
++ data->bytes_xfered += size;
++ N_MSG(FIO, " PIO Write<%d>bytes", size);
++ if(data->error) ERR_MSG("write pio data->error<%d>", data->error);
++
++ sdr_clr_bits(MSDC_INTEN, wints);
++ return data->error;
++}
++
++#if 0 /* --- by chhung */
++// DMA resume / start / stop
++static void msdc_dma_resume(struct msdc_host *host)
++{
++ u32 base = host->base;
++
++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_RESUME, 1);
++
++ N_MSG(DMA, "DMA resume");
++}
++#endif /* end of --- */
++
++static void msdc_dma_start(struct msdc_host *host)
++{
++ u32 base = host->base;
++ u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR ;
++
++ sdr_set_bits(MSDC_INTEN, wints);
++ //dsb(); /* --- by chhung */
++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1);
++
++ N_MSG(DMA, "DMA start");
++}
++
++static void msdc_dma_stop(struct msdc_host *host)
++{
++ u32 base = host->base;
++ //u32 retries=500;
++ u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR ;
++
++ N_MSG(DMA, "DMA status: 0x%.8x",sdr_read32(MSDC_DMA_CFG));
++ //while (sdr_read32(MSDC_DMA_CFG) & MSDC_DMA_CFG_STS);
++
++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1);
++ while (sdr_read32(MSDC_DMA_CFG) & MSDC_DMA_CFG_STS);
++
++ //dsb(); /* --- by chhung */
++ sdr_clr_bits(MSDC_INTEN, wints); /* Not just xfer_comp */
++
++ N_MSG(DMA, "DMA stop");
++}
++
++#if 0 /* --- by chhung */
++/* dump a gpd list */
++static void msdc_dma_dump(struct msdc_host *host, struct msdc_dma *dma)
++{
++ gpd_t *gpd = dma->gpd;
++ bd_t *bd = dma->bd;
++ bd_t *ptr;
++ int i = 0;
++ int p_to_v;
++
++ if (dma->mode != MSDC_MODE_DMA_DESC) {
++ return;
++ }
++
++ ERR_MSG("try to dump gpd and bd");
++
++ /* dump gpd */
++ ERR_MSG(".gpd<0x%.8x> gpd_phy<0x%.8x>", (int)gpd, (int)dma->gpd_addr);
++ ERR_MSG("...hwo <%d>", gpd->hwo );
++ ERR_MSG("...bdp <%d>", gpd->bdp );
++ ERR_MSG("...chksum<0x%.8x>", gpd->chksum );
++ //ERR_MSG("...intr <0x%.8x>", gpd->intr );
++ ERR_MSG("...next <0x%.8x>", (int)gpd->next );
++ ERR_MSG("...ptr <0x%.8x>", (int)gpd->ptr );
++ ERR_MSG("...buflen<0x%.8x>", gpd->buflen );
++ //ERR_MSG("...extlen<0x%.8x>", gpd->extlen );
++ //ERR_MSG("...arg <0x%.8x>", gpd->arg );
++ //ERR_MSG("...blknum<0x%.8x>", gpd->blknum );
++ //ERR_MSG("...cmd <0x%.8x>", gpd->cmd );
++
++ /* dump bd */
++ ERR_MSG(".bd<0x%.8x> bd_phy<0x%.8x> gpd_ptr<0x%.8x>", (int)bd, (int)dma->bd_addr, (int)gpd->ptr);
++ ptr = bd;
++ p_to_v = ((u32)bd - (u32)dma->bd_addr);
++ while (1) {
++ ERR_MSG(".bd[%d]", i); i++;
++ ERR_MSG("...eol <%d>", ptr->eol );
++ ERR_MSG("...chksum<0x%.8x>", ptr->chksum );
++ //ERR_MSG("...blkpad<0x%.8x>", ptr->blkpad );
++ //ERR_MSG("...dwpad <0x%.8x>", ptr->dwpad );
++ ERR_MSG("...next <0x%.8x>", (int)ptr->next );
++ ERR_MSG("...ptr <0x%.8x>", (int)ptr->ptr );
++ ERR_MSG("...buflen<0x%.8x>", (int)ptr->buflen );
++
++ if (ptr->eol == 1) {
++ break;
++ }
++
++ /* find the next bd, virtual address of ptr->next */
++ /* don't need to enable when use malloc */
++ //BUG_ON( (ptr->next + p_to_v)!=(ptr+1) );
++ //ERR_MSG(".next bd<0x%.8x><0x%.8x>", (ptr->next + p_to_v), (ptr+1));
++ ptr++;
++ }
++
++ ERR_MSG("dump gpd and bd finished");
++}
++#endif /* end of --- */
++
++/* calc checksum */
++static u8 msdc_dma_calcs(u8 *buf, u32 len)
++{
++ u32 i, sum = 0;
++ for (i = 0; i < len; i++) {
++ sum += buf[i];
++ }
++ return 0xFF - (u8)sum;
++}
++
++/* gpd bd setup + dma registers */
++static int msdc_dma_config(struct msdc_host *host, struct msdc_dma *dma)
++{
++ u32 base = host->base;
++ u32 sglen = dma->sglen;
++ //u32 i, j, num, bdlen, arg, xfersz;
++ u32 j, num, bdlen;
++ u8 blkpad, dwpad, chksum;
++ struct scatterlist *sg = dma->sg;
++ gpd_t *gpd;
++ bd_t *bd;
++
++ switch (dma->mode) {
++ case MSDC_MODE_DMA_BASIC:
++ BUG_ON(dma->xfersz > 65535);
++ BUG_ON(dma->sglen != 1);
++ sdr_write32(MSDC_DMA_SA, PHYSADDR(sg_dma_address(sg)));
++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_LASTBUF, 1);
++//#if defined (CONFIG_RALINK_MT7620)
++ if (ralink_soc == MT762X_SOC_MT7620A)
++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_XFERSZ, sg_dma_len(sg));
++//#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628)
++ else
++ sdr_write32((volatile u32*)(RALINK_MSDC_BASE+0xa8), sg_dma_len(sg));
++//#endif
++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ, dma->burstsz);
++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 0);
++ break;
++ case MSDC_MODE_DMA_DESC:
++ blkpad = (dma->flags & DMA_FLAG_PAD_BLOCK) ? 1 : 0;
++ dwpad = (dma->flags & DMA_FLAG_PAD_DWORD) ? 1 : 0;
++ chksum = (dma->flags & DMA_FLAG_EN_CHKSUM) ? 1 : 0;
++
++ /* calculate the required number of gpd */
++ num = (sglen + MAX_BD_PER_GPD - 1) / MAX_BD_PER_GPD;
++ BUG_ON(num !=1 );
++
++ gpd = dma->gpd;
++ bd = dma->bd;
++ bdlen = sglen;
++
++ /* modify gpd*/
++ //gpd->intr = 0;
++ gpd->hwo = 1; /* hw will clear it */
++ gpd->bdp = 1;
++ gpd->chksum = 0; /* need to clear first. */
++ gpd->chksum = (chksum ? msdc_dma_calcs((u8 *)gpd, 16) : 0);
++
++ /* modify bd*/
++ for (j = 0; j < bdlen; j++) {
++ msdc_init_bd(&bd[j], blkpad, dwpad, sg_dma_address(sg), sg_dma_len(sg));
++ if(j == bdlen - 1) {
++ bd[j].eol = 1; /* the last bd */
++ } else {
++ bd[j].eol = 0;
++ }
++ bd[j].chksum = 0; /* checksume need to clear first */
++ bd[j].chksum = (chksum ? msdc_dma_calcs((u8 *)(&bd[j]), 16) : 0);
++ sg++;
++ }
++
++ dma->used_gpd += 2;
++ dma->used_bd += bdlen;
++
++ sdr_set_field(MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, chksum);
++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ, dma->burstsz);
++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 1);
++
++ sdr_write32(MSDC_DMA_SA, PHYSADDR((u32)dma->gpd_addr));
++ break;
++
++ default:
++ break;
++ }
++
++ N_MSG(DMA, "DMA_CTRL = 0x%x", sdr_read32(MSDC_DMA_CTRL));
++ N_MSG(DMA, "DMA_CFG = 0x%x", sdr_read32(MSDC_DMA_CFG));
++ N_MSG(DMA, "DMA_SA = 0x%x", sdr_read32(MSDC_DMA_SA));
++
++ return 0;
++}
++
++static void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma,
++ struct scatterlist *sg, unsigned int sglen)
++{
++ BUG_ON(sglen > MAX_BD_NUM); /* not support currently */
++
++ dma->sg = sg;
++ dma->flags = DMA_FLAG_EN_CHKSUM;
++ //dma->flags = DMA_FLAG_NONE; /* CHECKME */
++ dma->sglen = sglen;
++ dma->xfersz = host->xfer_size;
++ dma->burstsz = MSDC_BRUST_64B;
++
++ if (sglen == 1 && sg_dma_len(sg) <= MAX_DMA_CNT)
++ dma->mode = MSDC_MODE_DMA_BASIC;
++ else
++ dma->mode = MSDC_MODE_DMA_DESC;
++
++ N_MSG(DMA, "DMA mode<%d> sglen<%d> xfersz<%d>", dma->mode, dma->sglen, dma->xfersz);
++
++ msdc_dma_config(host, dma);
++
++ /*if (dma->mode == MSDC_MODE_DMA_DESC) {
++ //msdc_dma_dump(host, dma);
++ } */
++}
++
++/* set block number before send command */
++static void msdc_set_blknum(struct msdc_host *host, u32 blknum)
++{
++ u32 base = host->base;
++
++ sdr_write32(SDC_BLK_NUM, blknum);
++}
++
++static int msdc_do_request(struct mmc_host*mmc, struct mmc_request*mrq)
++{
++ struct msdc_host *host = mmc_priv(mmc);
++ struct mmc_command *cmd;
++ struct mmc_data *data;
++ u32 base = host->base;
++ //u32 intsts = 0;
++ unsigned int left=0;
++ int dma = 0, read = 1, dir = DMA_FROM_DEVICE, send_type=0;
++
++ #define SND_DAT 0
++ #define SND_CMD 1
++
++ BUG_ON(mmc == NULL);
++ BUG_ON(mrq == NULL);
++
++ host->error = 0;
++ atomic_set(&host->abort, 0);
++
++ cmd = mrq->cmd;
++ data = mrq->cmd->data;
++
++#if 0 /* --- by chhung */
++ //if(host->id ==1){
++ N_MSG(OPS, "enable clock!");
++ msdc_ungate_clock(host->id);
++ //}
++#endif /* end of --- */
++
++ if (!data) {
++ send_type=SND_CMD;
++ if (msdc_do_command(host, cmd, 1, CMD_TIMEOUT) != 0) {
++ goto done;
++ }
++ } else {
++ BUG_ON(data->blksz > HOST_MAX_BLKSZ);
++ send_type=SND_DAT;
++
++ data->error = 0;
++ read = data->flags & MMC_DATA_READ ? 1 : 0;
++ host->data = data;
++ host->xfer_size = data->blocks * data->blksz;
++ host->blksz = data->blksz;
++
++ /* deside the transfer mode */
++ if (drv_mode[host->id] == MODE_PIO) {
++ host->dma_xfer = dma = 0;
++ } else if (drv_mode[host->id] == MODE_DMA) {
++ host->dma_xfer = dma = 1;
++ } else if (drv_mode[host->id] == MODE_SIZE_DEP) {
++ host->dma_xfer = dma = ((host->xfer_size >= dma_size[host->id]) ? 1 : 0);
++ }
++
++ if (read) {
++ if ((host->timeout_ns != data->timeout_ns) ||
++ (host->timeout_clks != data->timeout_clks)) {
++ msdc_set_timeout(host, data->timeout_ns, data->timeout_clks);
++ }
++ }
++
++ msdc_set_blknum(host, data->blocks);
++ //msdc_clr_fifo(); /* no need */
++
++ if (dma) {
++ msdc_dma_on(); /* enable DMA mode first!! */
++ init_completion(&host->xfer_done);
++
++ /* start the command first*/
++ if (msdc_command_start(host, cmd, 1, CMD_TIMEOUT) != 0)
++ goto done;
++
++ dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
++ (void)dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, dir);
++ msdc_dma_setup(host, &host->dma, data->sg, data->sg_len);
++
++ /* then wait command done */
++ if (msdc_command_resp(host, cmd, 1, CMD_TIMEOUT) != 0)
++ goto done;
++
++ /* for read, the data coming too fast, then CRC error
++ start DMA no business with CRC. */
++ //init_completion(&host->xfer_done);
++ msdc_dma_start(host);
++
++ spin_unlock(&host->lock);
++ if(!wait_for_completion_timeout(&host->xfer_done, DAT_TIMEOUT)){
++ ERR_MSG("XXX CMD<%d> wait xfer_done<%d> timeout!!", cmd->opcode, data->blocks * data->blksz);
++ ERR_MSG(" DMA_SA = 0x%x", sdr_read32(MSDC_DMA_SA));
++ ERR_MSG(" DMA_CA = 0x%x", sdr_read32(MSDC_DMA_CA));
++ ERR_MSG(" DMA_CTRL = 0x%x", sdr_read32(MSDC_DMA_CTRL));
++ ERR_MSG(" DMA_CFG = 0x%x", sdr_read32(MSDC_DMA_CFG));
++ data->error = (unsigned int)-ETIMEDOUT;
++
++ msdc_reset();
++ msdc_clr_fifo();
++ msdc_clr_int();
++ }
++ spin_lock(&host->lock);
++ msdc_dma_stop(host);
++ } else {
++ /* Firstly: send command */
++ if (msdc_do_command(host, cmd, 1, CMD_TIMEOUT) != 0) {
++ goto done;
++ }
++
++ /* Secondly: pio data phase */
++ if (read) {
++ if (msdc_pio_read(host, data)){
++ goto done;
++ }
++ } else {
++ if (msdc_pio_write(host, data)) {
++ goto done;
++ }
++ }
++
++ /* For write case: make sure contents in fifo flushed to device */
++ if (!read) {
++ while (1) {
++ left=msdc_txfifocnt();
++ if (left == 0) {
++ break;
++ }
++ if (msdc_pio_abort(host, data, jiffies + DAT_TIMEOUT)) {
++ break;
++ /* Fix me: what about if data error, when stop ? how to? */
++ }
++ }
++ } else {
++ /* Fix me: read case: need to check CRC error */
++ }
++
++ /* For write case: SDCBUSY and Xfer_Comp will assert when DAT0 not busy.
++ For read case : SDCBUSY and Xfer_Comp will assert when last byte read out from FIFO.
++ */
++
++ /* try not to wait xfer_comp interrupt.
++ the next command will check SDC_BUSY.
++ SDC_BUSY means xfer_comp assert
++ */
++
++ } // PIO mode
++
++ /* Last: stop transfer */
++ if (data->stop){
++ if (msdc_do_command(host, data->stop, 0, CMD_TIMEOUT) != 0) {
++ goto done;
++ }
++ }
++ }
++
++done:
++ if (data != NULL) {
++ host->data = NULL;
++ host->dma_xfer = 0;
++ if (dma != 0) {
++ msdc_dma_off();
++ host->dma.used_bd = 0;
++ host->dma.used_gpd = 0;
++ dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, dir);
++ }
++ host->blksz = 0;
++
++#if 0 // don't stop twice!
++ if(host->hw->flags & MSDC_REMOVABLE && data->error) {
++ msdc_abort_data(host);
++ /* reset in IRQ, stop command has issued. -> No need */
++ }
++#endif
++
++ N_MSG(OPS, "CMD<%d> data<%s %s> blksz<%d> block<%d> error<%d>",cmd->opcode, (dma? "dma":"pio"),
++ (read ? "read ":"write") ,data->blksz, data->blocks, data->error);
++ }
++
++#if 0 /* --- by chhung */
++#if 1
++ //if(host->id==1) {
++ if(send_type==SND_CMD) {
++ if(cmd->opcode == MMC_SEND_STATUS) {
++ if((cmd->resp[0] & CARD_READY_FOR_DATA) ||(CARD_CURRENT_STATE(cmd->resp[0]) != 7)){
++ N_MSG(OPS,"disable clock, CMD13 IDLE");
++ msdc_gate_clock(host->id);
++ }
++ } else {
++ N_MSG(OPS,"disable clock, CMD<%d>", cmd->opcode);
++ msdc_gate_clock(host->id);
++ }
++ } else {
++ if(read) {
++ N_MSG(OPS,"disable clock!!! Read CMD<%d>",cmd->opcode);
++ msdc_gate_clock(host->id);
++ }
++ }
++ //}
++#else
++ msdc_gate_clock(host->id);
++#endif
++#endif /* end of --- */
++
++ if (mrq->cmd->error) host->error = 0x001;
++ if (mrq->data && mrq->data->error) host->error |= 0x010;
++ if (mrq->stop && mrq->stop->error) host->error |= 0x100;
++
++ //if (host->error) ERR_MSG("host->error<%d>", host->error);
++
++ return host->error;
++}
++
++static int msdc_app_cmd(struct mmc_host *mmc, struct msdc_host *host)
++{
++ struct mmc_command cmd;
++ struct mmc_request mrq;
++ u32 err;
++
++ memset(&cmd, 0, sizeof(struct mmc_command));
++ cmd.opcode = MMC_APP_CMD;
++#if 0 /* bug: we meet mmc->card is null when ACMD6 */
++ cmd.arg = mmc->card->rca << 16;
++#else
++ cmd.arg = host->app_cmd_arg;
++#endif
++ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
++
++ memset(&mrq, 0, sizeof(struct mmc_request));
++ mrq.cmd = &cmd; cmd.mrq = &mrq;
++ cmd.data = NULL;
++
++ err = msdc_do_command(host, &cmd, 0, CMD_TIMEOUT);
++ return err;
++}
++
++static int msdc_tune_cmdrsp(struct msdc_host*host, struct mmc_command *cmd)
++{
++ int result = -1;
++ u32 base = host->base;
++ u32 rsmpl, cur_rsmpl, orig_rsmpl;
++ u32 rrdly, cur_rrdly = 0xffffffff, orig_rrdly;
++ u32 skip = 1;
++
++ /* ==== don't support 3.0 now ====
++ 1: R_SMPL[1]
++ 2: PAD_CMD_RESP_RXDLY[26:22]
++ ==========================*/
++
++ // save the previous tune result
++ sdr_get_field(MSDC_IOCON, MSDC_IOCON_RSPL, orig_rsmpl);
++ sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, orig_rrdly);
++
++ rrdly = 0;
++ do {
++ for (rsmpl = 0; rsmpl < 2; rsmpl++) {
++ /* Lv1: R_SMPL[1] */
++ cur_rsmpl = (orig_rsmpl + rsmpl) % 2;
++ if (skip == 1) {
++ skip = 0;
++ continue;
++ }
++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL, cur_rsmpl);
++
++ if (host->app_cmd) {
++ result = msdc_app_cmd(host->mmc, host);
++ if (result) {
++ ERR_MSG("TUNE_CMD app_cmd<%d> failed: RESP_RXDLY<%d>,R_SMPL<%d>",
++ host->mrq->cmd->opcode, cur_rrdly, cur_rsmpl);
++ continue;
++ }
++ }
++ result = msdc_do_command(host, cmd, 0, CMD_TIMEOUT); // not tune.
++ ERR_MSG("TUNE_CMD<%d> %s PAD_CMD_RESP_RXDLY[26:22]<%d> R_SMPL[1]<%d>", cmd->opcode,
++ (result == 0) ? "PASS" : "FAIL", cur_rrdly, cur_rsmpl);
++
++ if (result == 0) {
++ return 0;
++ }
++ if (result != (unsigned int)(-EIO)) {
++ ERR_MSG("TUNE_CMD<%d> Error<%d> not -EIO", cmd->opcode, result);
++ return result;
++ }
++
++ /* should be EIO */
++ if (sdr_read32(SDC_CMD) & 0x1800) { /* check if has data phase */
++ msdc_abort_data(host);
++ }
++ }
++
++ /* Lv2: PAD_CMD_RESP_RXDLY[26:22] */
++ cur_rrdly = (orig_rrdly + rrdly + 1) % 32;
++ sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, cur_rrdly);
++ }while (++rrdly < 32);
++
++ return result;
++}
++
++/* Support SD2.0 Only */
++static int msdc_tune_bread(struct mmc_host *mmc, struct mmc_request *mrq)
++{
++ struct msdc_host *host = mmc_priv(mmc);
++ u32 base = host->base;
++ u32 ddr=0;
++ u32 dcrc=0;
++ u32 rxdly, cur_rxdly0, cur_rxdly1;
++ u32 dsmpl, cur_dsmpl, orig_dsmpl;
++ u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3;
++ u32 cur_dat4, cur_dat5, cur_dat6, cur_dat7;
++ u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3;
++ u32 orig_dat4, orig_dat5, orig_dat6, orig_dat7;
++ int result = -1;
++ u32 skip = 1;
++
++ sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, orig_dsmpl);
++
++ /* Tune Method 2. */
++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
++
++ rxdly = 0;
++ do {
++ for (dsmpl = 0; dsmpl < 2; dsmpl++) {
++ cur_dsmpl = (orig_dsmpl + dsmpl) % 2;
++ if (skip == 1) {
++ skip = 0;
++ continue;
++ }
++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl);
++
++ if (host->app_cmd) {
++ result = msdc_app_cmd(host->mmc, host);
++ if (result) {
++ ERR_MSG("TUNE_BREAD app_cmd<%d> failed", host->mrq->cmd->opcode);
++ continue;
++ }
++ }
++ result = msdc_do_request(mmc,mrq);
++
++ sdr_get_field(SDC_DCRC_STS, SDC_DCRC_STS_POS|SDC_DCRC_STS_NEG, dcrc); /* RO */
++ if (!ddr) dcrc &= ~SDC_DCRC_STS_NEG;
++ ERR_MSG("TUNE_BREAD<%s> dcrc<0x%x> DATRDDLY0/1<0x%x><0x%x> dsmpl<0x%x>",
++ (result == 0 && dcrc == 0) ? "PASS" : "FAIL", dcrc,
++ sdr_read32(MSDC_DAT_RDDLY0), sdr_read32(MSDC_DAT_RDDLY1), cur_dsmpl);
++
++ /* Fix me: result is 0, but dcrc is still exist */
++ if (result == 0 && dcrc == 0) {
++ goto done;
++ } else {
++ /* there is a case: command timeout, and data phase not processed */
++ if (mrq->data->error != 0 && mrq->data->error != (unsigned int)(-EIO)) {
++ ERR_MSG("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>",
++ result, mrq->cmd->error, mrq->data->error);
++ goto done;
++ }
++ }
++ }
++
++ cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0);
++ cur_rxdly1 = sdr_read32(MSDC_DAT_RDDLY1);
++
++ /* E1 ECO. YD: Reverse */
++ if (sdr_read32(MSDC_ECO_VER) >= 4) {
++ orig_dat0 = (cur_rxdly0 >> 24) & 0x1F;
++ orig_dat1 = (cur_rxdly0 >> 16) & 0x1F;
++ orig_dat2 = (cur_rxdly0 >> 8) & 0x1F;
++ orig_dat3 = (cur_rxdly0 >> 0) & 0x1F;
++ orig_dat4 = (cur_rxdly1 >> 24) & 0x1F;
++ orig_dat5 = (cur_rxdly1 >> 16) & 0x1F;
++ orig_dat6 = (cur_rxdly1 >> 8) & 0x1F;
++ orig_dat7 = (cur_rxdly1 >> 0) & 0x1F;
++ } else {
++ orig_dat0 = (cur_rxdly0 >> 0) & 0x1F;
++ orig_dat1 = (cur_rxdly0 >> 8) & 0x1F;
++ orig_dat2 = (cur_rxdly0 >> 16) & 0x1F;
++ orig_dat3 = (cur_rxdly0 >> 24) & 0x1F;
++ orig_dat4 = (cur_rxdly1 >> 0) & 0x1F;
++ orig_dat5 = (cur_rxdly1 >> 8) & 0x1F;
++ orig_dat6 = (cur_rxdly1 >> 16) & 0x1F;
++ orig_dat7 = (cur_rxdly1 >> 24) & 0x1F;
++ }
++
++ if (ddr) {
++ cur_dat0 = (dcrc & (1 << 0) || dcrc & (1 << 8)) ? ((orig_dat0 + 1) % 32) : orig_dat0;
++ cur_dat1 = (dcrc & (1 << 1) || dcrc & (1 << 9)) ? ((orig_dat1 + 1) % 32) : orig_dat1;
++ cur_dat2 = (dcrc & (1 << 2) || dcrc & (1 << 10)) ? ((orig_dat2 + 1) % 32) : orig_dat2;
++ cur_dat3 = (dcrc & (1 << 3) || dcrc & (1 << 11)) ? ((orig_dat3 + 1) % 32) : orig_dat3;
++ } else {
++ cur_dat0 = (dcrc & (1 << 0)) ? ((orig_dat0 + 1) % 32) : orig_dat0;
++ cur_dat1 = (dcrc & (1 << 1)) ? ((orig_dat1 + 1) % 32) : orig_dat1;
++ cur_dat2 = (dcrc & (1 << 2)) ? ((orig_dat2 + 1) % 32) : orig_dat2;
++ cur_dat3 = (dcrc & (1 << 3)) ? ((orig_dat3 + 1) % 32) : orig_dat3;
++ }
++ cur_dat4 = (dcrc & (1 << 4)) ? ((orig_dat4 + 1) % 32) : orig_dat4;
++ cur_dat5 = (dcrc & (1 << 5)) ? ((orig_dat5 + 1) % 32) : orig_dat5;
++ cur_dat6 = (dcrc & (1 << 6)) ? ((orig_dat6 + 1) % 32) : orig_dat6;
++ cur_dat7 = (dcrc & (1 << 7)) ? ((orig_dat7 + 1) % 32) : orig_dat7;
++
++ cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0);
++ cur_rxdly1 = (cur_dat4 << 24) | (cur_dat5 << 16) | (cur_dat6 << 8) | (cur_dat7 << 0);
++
++ sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0);
++ sdr_write32(MSDC_DAT_RDDLY1, cur_rxdly1);
++
++ } while (++rxdly < 32);
++
++done:
++ return result;
++}
++
++static int msdc_tune_bwrite(struct mmc_host *mmc,struct mmc_request *mrq)
++{
++ struct msdc_host *host = mmc_priv(mmc);
++ u32 base = host->base;
++
++ u32 wrrdly, cur_wrrdly = 0xffffffff, orig_wrrdly;
++ u32 dsmpl, cur_dsmpl, orig_dsmpl;
++ u32 rxdly, cur_rxdly0;
++ u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3;
++ u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3;
++ int result = -1;
++ u32 skip = 1;
++
++ // MSDC_IOCON_DDR50CKD need to check. [Fix me]
++
++ sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, orig_wrrdly);
++ sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, orig_dsmpl );
++
++ /* Tune Method 2. just DAT0 */
++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
++ cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0);
++
++ /* E1 ECO. YD: Reverse */
++ if (sdr_read32(MSDC_ECO_VER) >= 4) {
++ orig_dat0 = (cur_rxdly0 >> 24) & 0x1F;
++ orig_dat1 = (cur_rxdly0 >> 16) & 0x1F;
++ orig_dat2 = (cur_rxdly0 >> 8) & 0x1F;
++ orig_dat3 = (cur_rxdly0 >> 0) & 0x1F;
++ } else {
++ orig_dat0 = (cur_rxdly0 >> 0) & 0x1F;
++ orig_dat1 = (cur_rxdly0 >> 8) & 0x1F;
++ orig_dat2 = (cur_rxdly0 >> 16) & 0x1F;
++ orig_dat3 = (cur_rxdly0 >> 24) & 0x1F;
++ }
++
++ rxdly = 0;
++ do {
++ wrrdly = 0;
++ do {
++ for (dsmpl = 0; dsmpl < 2; dsmpl++) {
++ cur_dsmpl = (orig_dsmpl + dsmpl) % 2;
++ if (skip == 1) {
++ skip = 0;
++ continue;
++ }
++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl);
++
++ if (host->app_cmd) {
++ result = msdc_app_cmd(host->mmc, host);
++ if (result) {
++ ERR_MSG("TUNE_BWRITE app_cmd<%d> failed", host->mrq->cmd->opcode);
++ continue;
++ }
++ }
++ result = msdc_do_request(mmc,mrq);
++
++ ERR_MSG("TUNE_BWRITE<%s> DSPL<%d> DATWRDLY<%d> MSDC_DAT_RDDLY0<0x%x>",
++ result == 0 ? "PASS" : "FAIL",
++ cur_dsmpl, cur_wrrdly, cur_rxdly0);
++
++ if (result == 0) {
++ goto done;
++ }
++ else {
++ /* there is a case: command timeout, and data phase not processed */
++ if (mrq->data->error != (unsigned int)(-EIO)) {
++ ERR_MSG("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>",
++ result, mrq->cmd->error, mrq->data->error);
++ goto done;
++ }
++ }
++ }
++ cur_wrrdly = (orig_wrrdly + wrrdly + 1) % 32;
++ sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, cur_wrrdly);
++ } while (++wrrdly < 32);
++
++ cur_dat0 = (orig_dat0 + rxdly) % 32; /* only adjust bit-1 for crc */
++ cur_dat1 = orig_dat1;
++ cur_dat2 = orig_dat2;
++ cur_dat3 = orig_dat3;
++
++ cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0);
++ sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0);
++ } while (++rxdly < 32);
++
++done:
++ return result;
++}
++
++static int msdc_get_card_status(struct mmc_host *mmc, struct msdc_host *host, u32 *status)
++{
++ struct mmc_command cmd;
++ struct mmc_request mrq;
++ u32 err;
++
++ memset(&cmd, 0, sizeof(struct mmc_command));
++ cmd.opcode = MMC_SEND_STATUS;
++ if (mmc->card) {
++ cmd.arg = mmc->card->rca << 16;
++ } else {
++ ERR_MSG("cmd13 mmc card is null");
++ cmd.arg = host->app_cmd_arg;
++ }
++ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
++
++ memset(&mrq, 0, sizeof(struct mmc_request));
++ mrq.cmd = &cmd; cmd.mrq = &mrq;
++ cmd.data = NULL;
++
++ err = msdc_do_command(host, &cmd, 1, CMD_TIMEOUT);
++
++ if (status) {
++ *status = cmd.resp[0];
++ }
++
++ return err;
++}
++
++static int msdc_check_busy(struct mmc_host *mmc, struct msdc_host *host)
++{
++ u32 err = 0;
++ u32 status = 0;
++
++ do {
++ err = msdc_get_card_status(mmc, host, &status);
++ if (err) return err;
++ /* need cmd12? */
++ ERR_MSG("cmd<13> resp<0x%x>", status);
++ } while (R1_CURRENT_STATE(status) == 7);
++
++ return err;
++}
++
++/* failed when msdc_do_request */
++static int msdc_tune_request(struct mmc_host *mmc, struct mmc_request *mrq)
++{
++ struct msdc_host *host = mmc_priv(mmc);
++ struct mmc_command *cmd;
++ struct mmc_data *data;
++ //u32 base = host->base;
++ int ret=0, read;
++
++ cmd = mrq->cmd;
++ data = mrq->cmd->data;
++
++ read = data->flags & MMC_DATA_READ ? 1 : 0;
++
++ if (read) {
++ if (data->error == (unsigned int)(-EIO)) {
++ ret = msdc_tune_bread(mmc,mrq);
++ }
++ } else {
++ ret = msdc_check_busy(mmc, host);
++ if (ret){
++ ERR_MSG("XXX cmd13 wait program done failed");
++ return ret;
++ }
++ /* CRC and TO */
++ /* Fix me: don't care card status? */
++ ret = msdc_tune_bwrite(mmc,mrq);
++ }
++
++ return ret;
++}
++
++/* ops.request */
++static void msdc_ops_request(struct mmc_host *mmc,struct mmc_request *mrq)
++{
++ struct msdc_host *host = mmc_priv(mmc);
++
++ //=== for sdio profile ===
++#if 0 /* --- by chhung */
++ u32 old_H32, old_L32, new_H32, new_L32;
++ u32 ticks = 0, opcode = 0, sizes = 0, bRx = 0;
++#endif /* end of --- */
++
++ if(host->mrq){
++ ERR_MSG("XXX host->mrq<0x%.8x>", (int)host->mrq);
++ BUG();
++ }
++
++ if (!is_card_present(host) || host->power_mode == MMC_POWER_OFF) {
++ ERR_MSG("cmd<%d> card<%d> power<%d>", mrq->cmd->opcode, is_card_present(host), host->power_mode);
++ mrq->cmd->error = (unsigned int)-ENOMEDIUM;
++
++#if 1
++ mrq->done(mrq); // call done directly.
++#else
++ mrq->cmd->retries = 0; // please don't retry.
++ mmc_request_done(mmc, mrq);
++#endif
++
++ return;
++ }
++
++ /* start to process */
++ spin_lock(&host->lock);
++#if 0 /* --- by chhung */
++ if (sdio_pro_enable) { //=== for sdio profile ===
++ if (mrq->cmd->opcode == 52 || mrq->cmd->opcode == 53) {
++ GPT_GetCounter64(&old_L32, &old_H32);
++ }
++ }
++#endif /* end of --- */
++
++ host->mrq = mrq;
++
++ if (msdc_do_request(mmc,mrq)) {
++ if(host->hw->flags & MSDC_REMOVABLE && ralink_soc == MT762X_SOC_MT7621AT && mrq->data && mrq->data->error) {
++ msdc_tune_request(mmc,mrq);
++ }
++ }
++
++ /* ==== when request done, check if app_cmd ==== */
++ if (mrq->cmd->opcode == MMC_APP_CMD) {
++ host->app_cmd = 1;
++ host->app_cmd_arg = mrq->cmd->arg; /* save the RCA */
++ } else {
++ host->app_cmd = 0;
++ //host->app_cmd_arg = 0;
++ }
++
++ host->mrq = NULL;
++
++#if 0 /* --- by chhung */
++ //=== for sdio profile ===
++ if (sdio_pro_enable) {
++ if (mrq->cmd->opcode == 52 || mrq->cmd->opcode == 53) {
++ GPT_GetCounter64(&new_L32, &new_H32);
++ ticks = msdc_time_calc(old_L32, old_H32, new_L32, new_H32);
++
++ opcode = mrq->cmd->opcode;
++ if (mrq->cmd->data) {
++ sizes = mrq->cmd->data->blocks * mrq->cmd->data->blksz;
++ bRx = mrq->cmd->data->flags & MMC_DATA_READ ? 1 : 0 ;
++ } else {
++ bRx = mrq->cmd->arg & 0x80000000 ? 1 : 0;
++ }
++
++ if (!mrq->cmd->error) {
++ msdc_performance(opcode, sizes, bRx, ticks);
++ }
++ }
++ }
++#endif /* end of --- */
++ spin_unlock(&host->lock);
++
++ mmc_request_done(mmc, mrq);
++
++ return;
++}
++
++/* called by ops.set_ios */
++static void msdc_set_buswidth(struct msdc_host *host, u32 width)
++{
++ u32 base = host->base;
++ u32 val = sdr_read32(SDC_CFG);
++
++ val &= ~SDC_CFG_BUSWIDTH;
++
++ switch (width) {
++ default:
++ case MMC_BUS_WIDTH_1:
++ width = 1;
++ val |= (MSDC_BUS_1BITS << 16);
++ break;
++ case MMC_BUS_WIDTH_4:
++ val |= (MSDC_BUS_4BITS << 16);
++ break;
++ case MMC_BUS_WIDTH_8:
++ val |= (MSDC_BUS_8BITS << 16);
++ break;
++ }
++
++ sdr_write32(SDC_CFG, val);
++
++ N_MSG(CFG, "Bus Width = %d", width);
++}
++
++/* ops.set_ios */
++static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
++{
++ struct msdc_host *host = mmc_priv(mmc);
++ struct msdc_hw *hw=host->hw;
++ u32 base = host->base;
++ u32 ddr = 0;
++
++#ifdef MT6575_SD_DEBUG
++ static char *vdd[] = {
++ "1.50v", "1.55v", "1.60v", "1.65v", "1.70v", "1.80v", "1.90v",
++ "2.00v", "2.10v", "2.20v", "2.30v", "2.40v", "2.50v", "2.60v",
++ "2.70v", "2.80v", "2.90v", "3.00v", "3.10v", "3.20v", "3.30v",
++ "3.40v", "3.50v", "3.60v"
++ };
++ static char *power_mode[] = {
++ "OFF", "UP", "ON"
++ };
++ static char *bus_mode[] = {
++ "UNKNOWN", "OPENDRAIN", "PUSHPULL"
++ };
++ static char *timing[] = {
++ "LEGACY", "MMC_HS", "SD_HS"
++ };
++
++ printk("SET_IOS: CLK(%dkHz), BUS(%s), BW(%u), PWR(%s), VDD(%s), TIMING(%s)",
++ ios->clock / 1000, bus_mode[ios->bus_mode],
++ (ios->bus_width == MMC_BUS_WIDTH_4) ? 4 : 1,
++ power_mode[ios->power_mode], vdd[ios->vdd], timing[ios->timing]);
++#endif
++
++ msdc_set_buswidth(host, ios->bus_width);
++
++ /* Power control ??? */
++ switch (ios->power_mode) {
++ case MMC_POWER_OFF:
++ case MMC_POWER_UP:
++ // msdc_set_power_mode(host, ios->power_mode); /* --- by chhung */
++ break;
++ case MMC_POWER_ON:
++ host->power_mode = MMC_POWER_ON;
++ break;
++ default:
++ break;
++ }
++
++ /* Clock control */
++ if (host->mclk != ios->clock) {
++ if(ios->clock > 25000000) {
++ //if (!(host->hw->flags & MSDC_REMOVABLE)) {
++ INIT_MSG("SD data latch edge<%d>", hw->data_edge);
++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL, hw->cmd_edge);
++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, hw->data_edge);
++ //} /* for tuning debug */
++ } else { /* default value */
++ sdr_write32(MSDC_IOCON, 0x00000000);
++ // sdr_write32(MSDC_DAT_RDDLY0, 0x00000000);
++ sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward
++ sdr_write32(MSDC_DAT_RDDLY1, 0x00000000);
++ // sdr_write32(MSDC_PAD_TUNE, 0x00000000);
++ sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward
++ }
++ msdc_set_mclk(host, ddr, ios->clock);
++ }
++}
++
++/* ops.get_ro */
++static int msdc_ops_get_ro(struct mmc_host *mmc)
++{
++ struct msdc_host *host = mmc_priv(mmc);
++ u32 base = host->base;
++ unsigned long flags;
++ int ro = 0;
++
++ if (host->hw->flags & MSDC_WP_PIN_EN) { /* set for card */
++ spin_lock_irqsave(&host->lock, flags);
++ ro = (sdr_read32(MSDC_PS) >> 31);
++ spin_unlock_irqrestore(&host->lock, flags);
++ }
++ return ro;
++}
++
++/* ops.get_cd */
++static int msdc_ops_get_cd(struct mmc_host *mmc)
++{
++ struct msdc_host *host = mmc_priv(mmc);
++ u32 base = host->base;
++ unsigned long flags;
++ int present = 1;
++
++ /* for sdio, MSDC_REMOVABLE not set, always return 1 */
++ if (!(host->hw->flags & MSDC_REMOVABLE)) {
++ /* For sdio, read H/W always get<1>, but may timeout some times */
++#if 1
++ host->card_inserted = 1;
++ return 1;
++#else
++ host->card_inserted = (host->pm_state.event == PM_EVENT_USER_RESUME) ? 1 : 0;
++ INIT_MSG("sdio ops_get_cd<%d>", host->card_inserted);
++ return host->card_inserted;
++#endif
++ }
++
++ /* MSDC_CD_PIN_EN set for card */
++ if (host->hw->flags & MSDC_CD_PIN_EN) {
++ spin_lock_irqsave(&host->lock, flags);
++#if 0
++ present = host->card_inserted; /* why not read from H/W: Fix me*/
++#else
++ // CD
++ if (cd_active_low)
++ present = (sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 0 : 1;
++ else
++ present = (sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 1 : 0;
++ if (host->mmc->caps & MMC_CAP_NEEDS_POLL)
++ present = 1;
++ host->card_inserted = present;
++#endif
++ spin_unlock_irqrestore(&host->lock, flags);
++ } else {
++ present = 0; /* TODO? Check DAT3 pins for card detection */
++ }
++
++ INIT_MSG("ops_get_cd return<%d>", present);
++ return present;
++}
++
++/* ops.enable_sdio_irq */
++static void msdc_ops_enable_sdio_irq(struct mmc_host *mmc, int enable)
++{
++ struct msdc_host *host = mmc_priv(mmc);
++ struct msdc_hw *hw = host->hw;
++ u32 base = host->base;
++ u32 tmp;
++
++ if (hw->flags & MSDC_EXT_SDIO_IRQ) { /* yes for sdio */
++ if (enable) {
++ hw->enable_sdio_eirq(); /* combo_sdio_enable_eirq */
++ } else {
++ hw->disable_sdio_eirq(); /* combo_sdio_disable_eirq */
++ }
++ } else {
++ ERR_MSG("XXX "); /* so never enter here */
++ tmp = sdr_read32(SDC_CFG);
++ /* FIXME. Need to interrupt gap detection */
++ if (enable) {
++ tmp |= (SDC_CFG_SDIOIDE | SDC_CFG_SDIOINTWKUP);
++ } else {
++ tmp &= ~(SDC_CFG_SDIOIDE | SDC_CFG_SDIOINTWKUP);
++ }
++ sdr_write32(SDC_CFG, tmp);
++ }
++}
++
++static struct mmc_host_ops mt_msdc_ops = {
++ .request = msdc_ops_request,
++ .set_ios = msdc_ops_set_ios,
++ .get_ro = msdc_ops_get_ro,
++ .get_cd = msdc_ops_get_cd,
++ .enable_sdio_irq = msdc_ops_enable_sdio_irq,
++};
++
++/*--------------------------------------------------------------------------*/
++/* interrupt handler */
++/*--------------------------------------------------------------------------*/
++static irqreturn_t msdc_irq(int irq, void *dev_id)
++{
++ struct msdc_host *host = (struct msdc_host *)dev_id;
++ struct mmc_data *data = host->data;
++ struct mmc_command *cmd = host->cmd;
++ u32 base = host->base;
++
++ u32 cmdsts = MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | MSDC_INT_CMDRDY |
++ MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | MSDC_INT_ACMDRDY |
++ MSDC_INT_ACMD19_DONE;
++ u32 datsts = MSDC_INT_DATCRCERR |MSDC_INT_DATTMO;
++
++ u32 intsts = sdr_read32(MSDC_INT);
++ u32 inten = sdr_read32(MSDC_INTEN); inten &= intsts;
++
++ sdr_write32(MSDC_INT, intsts); /* clear interrupts */
++ /* MSG will cause fatal error */
++
++ /* card change interrupt */
++ if (intsts & MSDC_INT_CDSC){
++ if (mtk_sw_poll)
++ return IRQ_HANDLED;
++ IRQ_MSG("MSDC_INT_CDSC irq<0x%.8x>", intsts);
++#if 0 /* ---/+++ by chhung: fix slot mechanical bounce issue */
++ tasklet_hi_schedule(&host->card_tasklet);
++#else
++ schedule_delayed_work(&host->card_delaywork, HZ);
++#endif
++ /* tuning when plug card ? */
++ }
++
++ /* sdio interrupt */
++ if (intsts & MSDC_INT_SDIOIRQ){
++ IRQ_MSG("XXX MSDC_INT_SDIOIRQ"); /* seems not sdio irq */
++ //mmc_signal_sdio_irq(host->mmc);
++ }
++
++ /* transfer complete interrupt */
++ if (data != NULL) {
++ if (inten & MSDC_INT_XFER_COMPL) {
++ data->bytes_xfered = host->dma.xfersz;
++ complete(&host->xfer_done);
++ }
++
++ if (intsts & datsts) {
++ /* do basic reset, or stop command will sdc_busy */
++ msdc_reset();
++ msdc_clr_fifo();
++ msdc_clr_int();
++ atomic_set(&host->abort, 1); /* For PIO mode exit */
++
++ if (intsts & MSDC_INT_DATTMO){
++ IRQ_MSG("XXX CMD<%d> MSDC_INT_DATTMO", host->mrq->cmd->opcode);
++ data->error = (unsigned int)-ETIMEDOUT;
++ }
++ else if (intsts & MSDC_INT_DATCRCERR){
++ IRQ_MSG("XXX CMD<%d> MSDC_INT_DATCRCERR, SDC_DCRC_STS<0x%x>", host->mrq->cmd->opcode, sdr_read32(SDC_DCRC_STS));
++ data->error = (unsigned int)-EIO;
++ }
++
++ //if(sdr_read32(MSDC_INTEN) & MSDC_INT_XFER_COMPL) {
++ if (host->dma_xfer) {
++ complete(&host->xfer_done); /* Read CRC come fast, XFER_COMPL not enabled */
++ } /* PIO mode can't do complete, because not init */
++ }
++ }
++
++ /* command interrupts */
++ if ((cmd != NULL) && (intsts & cmdsts)) {
++ if ((intsts & MSDC_INT_CMDRDY) || (intsts & MSDC_INT_ACMDRDY) ||
++ (intsts & MSDC_INT_ACMD19_DONE)) {
++ u32 *rsp = &cmd->resp[0];
++
++ switch (host->cmd_rsp) {
++ case RESP_NONE:
++ break;
++ case RESP_R2:
++ *rsp++ = sdr_read32(SDC_RESP3); *rsp++ = sdr_read32(SDC_RESP2);
++ *rsp++ = sdr_read32(SDC_RESP1); *rsp++ = sdr_read32(SDC_RESP0);
++ break;
++ default: /* Response types 1, 3, 4, 5, 6, 7(1b) */
++ if ((intsts & MSDC_INT_ACMDRDY) || (intsts & MSDC_INT_ACMD19_DONE)) {
++ *rsp = sdr_read32(SDC_ACMD_RESP);
++ } else {
++ *rsp = sdr_read32(SDC_RESP0);
++ }
++ break;
++ }
++ } else if ((intsts & MSDC_INT_RSPCRCERR) || (intsts & MSDC_INT_ACMDCRCERR)) {
++ if(intsts & MSDC_INT_ACMDCRCERR){
++ IRQ_MSG("XXX CMD<%d> MSDC_INT_ACMDCRCERR",cmd->opcode);
++ }
++ else {
++ IRQ_MSG("XXX CMD<%d> MSDC_INT_RSPCRCERR",cmd->opcode);
++ }
++ cmd->error = (unsigned int)-EIO;
++ } else if ((intsts & MSDC_INT_CMDTMO) || (intsts & MSDC_INT_ACMDTMO)) {
++ if(intsts & MSDC_INT_ACMDTMO){
++ IRQ_MSG("XXX CMD<%d> MSDC_INT_ACMDTMO",cmd->opcode);
++ }
++ else {
++ IRQ_MSG("XXX CMD<%d> MSDC_INT_CMDTMO",cmd->opcode);
++ }
++ cmd->error = (unsigned int)-ETIMEDOUT;
++ msdc_reset();
++ msdc_clr_fifo();
++ msdc_clr_int();
++ }
++ complete(&host->cmd_done);
++ }
++
++ /* mmc irq interrupts */
++ if (intsts & MSDC_INT_MMCIRQ) {
++ printk(KERN_INFO "msdc[%d] MMCIRQ: SDC_CSTS=0x%.8x\r\n", host->id, sdr_read32(SDC_CSTS));
++ }
++
++#ifdef MT6575_SD_DEBUG
++ {
++ msdc_int_reg *int_reg = (msdc_int_reg*)&intsts;
++ N_MSG(INT, "IRQ_EVT(0x%x): MMCIRQ(%d) CDSC(%d), ACRDY(%d), ACTMO(%d), ACCRE(%d) AC19DN(%d)",
++ intsts,
++ int_reg->mmcirq,
++ int_reg->cdsc,
++ int_reg->atocmdrdy,
++ int_reg->atocmdtmo,
++ int_reg->atocmdcrc,
++ int_reg->atocmd19done);
++ N_MSG(INT, "IRQ_EVT(0x%x): SDIO(%d) CMDRDY(%d), CMDTMO(%d), RSPCRC(%d), CSTA(%d)",
++ intsts,
++ int_reg->sdioirq,
++ int_reg->cmdrdy,
++ int_reg->cmdtmo,
++ int_reg->rspcrc,
++ int_reg->csta);
++ N_MSG(INT, "IRQ_EVT(0x%x): XFCMP(%d) DXDONE(%d), DATTMO(%d), DATCRC(%d), DMAEMP(%d)",
++ intsts,
++ int_reg->xfercomp,
++ int_reg->dxferdone,
++ int_reg->dattmo,
++ int_reg->datcrc,
++ int_reg->dmaqempty);
++
++ }
++#endif
++
++ return IRQ_HANDLED;
++}
++
++/*--------------------------------------------------------------------------*/
++/* platform_driver members */
++/*--------------------------------------------------------------------------*/
++/* called by msdc_drv_probe/remove */
++static void msdc_enable_cd_irq(struct msdc_host *host, int enable)
++{
++ struct msdc_hw *hw = host->hw;
++ u32 base = host->base;
++
++ /* for sdio, not set */
++ if ((hw->flags & MSDC_CD_PIN_EN) == 0) {
++ /* Pull down card detection pin since it is not avaiable */
++ /*
++ if (hw->config_gpio_pin)
++ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN);
++ */
++ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN);
++ sdr_clr_bits(MSDC_INTEN, MSDC_INTEN_CDSC);
++ sdr_clr_bits(SDC_CFG, SDC_CFG_INSWKUP);
++ return;
++ }
++
++ N_MSG(CFG, "CD IRQ Eanable(%d)", enable);
++
++ if (enable) {
++ if (hw->enable_cd_eirq) { /* not set, never enter */
++ hw->enable_cd_eirq();
++ } else {
++ /* card detection circuit relies on the core power so that the core power
++ * shouldn't be turned off. Here adds a reference count to keep
++ * the core power alive.
++ */
++ //msdc_vcore_on(host); //did in msdc_init_hw()
++
++ if (hw->config_gpio_pin) /* NULL */
++ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_UP);
++
++ sdr_set_field(MSDC_PS, MSDC_PS_CDDEBOUNCE, DEFAULT_DEBOUNCE);
++ sdr_set_bits(MSDC_PS, MSDC_PS_CDEN);
++ sdr_set_bits(MSDC_INTEN, MSDC_INTEN_CDSC);
++ sdr_set_bits(SDC_CFG, SDC_CFG_INSWKUP); /* not in document! Fix me */
++ }
++ } else {
++ if (hw->disable_cd_eirq) {
++ hw->disable_cd_eirq();
++ } else {
++ if (hw->config_gpio_pin) /* NULL */
++ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN);
++
++ sdr_clr_bits(SDC_CFG, SDC_CFG_INSWKUP);
++ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN);
++ sdr_clr_bits(MSDC_INTEN, MSDC_INTEN_CDSC);
++
++ /* Here decreases a reference count to core power since card
++ * detection circuit is shutdown.
++ */
++ //msdc_vcore_off(host);
++ }
++ }
++}
++
++/* called by msdc_drv_probe */
++static void msdc_init_hw(struct msdc_host *host)
++{
++ u32 base = host->base;
++ struct msdc_hw *hw = host->hw;
++
++#ifdef MT6575_SD_DEBUG
++ msdc_reg[host->id] = (struct msdc_regs *)host->base;
++#endif
++
++ /* Power on */
++#if 0 /* --- by chhung */
++ msdc_vcore_on(host);
++ msdc_pin_reset(host, MSDC_PIN_PULL_UP);
++ msdc_select_clksrc(host, hw->clk_src);
++ enable_clock(PERI_MSDC0_PDN + host->id, "SD");
++ msdc_vdd_on(host);
++#endif /* end of --- */
++ /* Configure to MMC/SD mode */
++ sdr_set_field(MSDC_CFG, MSDC_CFG_MODE, MSDC_SDMMC);
++
++ /* Reset */
++ msdc_reset();
++ msdc_clr_fifo();
++
++ /* Disable card detection */
++ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN);
++
++ /* Disable and clear all interrupts */
++ sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN));
++ sdr_write32(MSDC_INT, sdr_read32(MSDC_INT));
++
++#if 1
++ /* reset tuning parameter */
++ sdr_write32(MSDC_PAD_CTL0, 0x00090000);
++ sdr_write32(MSDC_PAD_CTL1, 0x000A0000);
++ sdr_write32(MSDC_PAD_CTL2, 0x000A0000);
++ // sdr_write32(MSDC_PAD_TUNE, 0x00000000);
++ sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward
++ // sdr_write32(MSDC_DAT_RDDLY0, 0x00000000);
++ sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward
++ sdr_write32(MSDC_DAT_RDDLY1, 0x00000000);
++ sdr_write32(MSDC_IOCON, 0x00000000);
++#if 0 // use MT7620 default value: 0x403c004f
++ sdr_write32(MSDC_PATCH_BIT0, 0x003C000F); /* bit0 modified: Rx Data Clock Source: 1 -> 2.0*/
++#endif
++
++ if (sdr_read32(MSDC_ECO_VER) >= 4) {
++ if (host->id == 1) {
++ sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_WRDAT_CRCS, 1);
++ sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMD_RSP, 1);
++
++ /* internal clock: latch read data */
++ sdr_set_bits(MSDC_PATCH_BIT0, MSDC_PATCH_BIT_CKGEN_CK);
++ }
++ }
++#endif
++
++ /* for safety, should clear SDC_CFG.SDIO_INT_DET_EN & set SDC_CFG.SDIO in
++ pre-loader,uboot,kernel drivers. and SDC_CFG.SDIO_INT_DET_EN will be only
++ set when kernel driver wants to use SDIO bus interrupt */
++ /* Configure to enable SDIO mode. it's must otherwise sdio cmd5 failed */
++ sdr_set_bits(SDC_CFG, SDC_CFG_SDIO);
++
++ /* disable detect SDIO device interupt function */
++ sdr_clr_bits(SDC_CFG, SDC_CFG_SDIOIDE);
++
++ /* eneable SMT for glitch filter */
++ sdr_set_bits(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKSMT);
++ sdr_set_bits(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDSMT);
++ sdr_set_bits(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATSMT);
++
++#if 1
++ /* set clk, cmd, dat pad driving */
++ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, hw->clk_drv);
++ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, hw->clk_drv);
++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, hw->cmd_drv);
++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, hw->cmd_drv);
++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, hw->dat_drv);
++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, hw->dat_drv);
++#else
++ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 0);
++ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 0);
++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 0);
++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 0);
++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 0);
++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 0);
++#endif
++
++ /* set sampling edge */
++
++ /* write crc timeout detection */
++ sdr_set_field(MSDC_PATCH_BIT0, 1 << 30, 1);
++
++ /* Configure to default data timeout */
++ sdr_set_field(SDC_CFG, SDC_CFG_DTOC, DEFAULT_DTOC);
++
++ msdc_set_buswidth(host, MMC_BUS_WIDTH_1);
++
++ N_MSG(FUC, "init hardware done!");
++}
++
++/* called by msdc_drv_remove */
++static void msdc_deinit_hw(struct msdc_host *host)
++{
++ u32 base = host->base;
++
++ /* Disable and clear all interrupts */
++ sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN));
++ sdr_write32(MSDC_INT, sdr_read32(MSDC_INT));
++
++ /* Disable card detection */
++ msdc_enable_cd_irq(host, 0);
++ // msdc_set_power_mode(host, MMC_POWER_OFF); /* make sure power down */ /* --- by chhung */
++}
++
++/* init gpd and bd list in msdc_drv_probe */
++static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma)
++{
++ gpd_t *gpd = dma->gpd;
++ bd_t *bd = dma->bd;
++ bd_t *ptr, *prev;
++
++ /* we just support one gpd */
++ int bdlen = MAX_BD_PER_GPD;
++
++ /* init the 2 gpd */
++ memset(gpd, 0, sizeof(gpd_t) * 2);
++ //gpd->next = (void *)virt_to_phys(gpd + 1); /* pointer to a null gpd, bug! kmalloc <-> virt_to_phys */
++ //gpd->next = (dma->gpd_addr + 1); /* bug */
++ gpd->next = (void *)((u32)dma->gpd_addr + sizeof(gpd_t));
++
++ //gpd->intr = 0;
++ gpd->bdp = 1; /* hwo, cs, bd pointer */
++ //gpd->ptr = (void*)virt_to_phys(bd);
++ gpd->ptr = (void *)dma->bd_addr; /* physical address */
++
++ memset(bd, 0, sizeof(bd_t) * bdlen);
++ ptr = bd + bdlen - 1;
++ //ptr->eol = 1; /* 0 or 1 [Fix me]*/
++ //ptr->next = 0;
++
++ while (ptr != bd) {
++ prev = ptr - 1;
++ prev->next = (void *)(dma->bd_addr + sizeof(bd_t) *(ptr - bd));
++ ptr = prev;
++ }
++}
++
++static int msdc_drv_probe(struct platform_device *pdev)
++{
++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ __iomem void *base;
++ struct mmc_host *mmc;
++ struct resource *mem;
++ struct msdc_host *host;
++ struct msdc_hw *hw;
++ int ret, irq;
++
++ pdev->dev.platform_data = &msdc0_hw;
++
++ if (of_property_read_bool(pdev->dev.of_node, "mtk,wp-en"))
++ msdc0_hw.flags |= MSDC_WP_PIN_EN;
++
++ /* Allocate MMC host for this device */
++ mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev);
++ if (!mmc) return -ENOMEM;
++
++ hw = (struct msdc_hw*)pdev->dev.platform_data;
++ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ irq = platform_get_irq(pdev, 0);
++
++ //BUG_ON((!hw) || (!mem) || (irq < 0)); /* --- by chhung */
++
++ base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ /* Set host parameters to mmc */
++ mmc->ops = &mt_msdc_ops;
++ mmc->f_min = HOST_MIN_MCLK;
++ mmc->f_max = HOST_MAX_MCLK;
++ mmc->ocr_avail = MSDC_OCR_AVAIL;
++
++ /* For sd card: MSDC_SYS_SUSPEND | MSDC_WP_PIN_EN | MSDC_CD_PIN_EN | MSDC_REMOVABLE | MSDC_HIGHSPEED,
++ For sdio : MSDC_EXT_SDIO_IRQ | MSDC_HIGHSPEED */
++ if (hw->flags & MSDC_HIGHSPEED) {
++ mmc->caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
++ }
++ if (hw->data_pins == 4) { /* current data_pins are all 4*/
++ mmc->caps |= MMC_CAP_4_BIT_DATA;
++ } else if (hw->data_pins == 8) {
++ mmc->caps |= MMC_CAP_8_BIT_DATA;
++ }
++ if ((hw->flags & MSDC_SDIO_IRQ) || (hw->flags & MSDC_EXT_SDIO_IRQ))
++ mmc->caps |= MMC_CAP_SDIO_IRQ; /* yes for sdio */
++
++ cd_active_low = !of_property_read_bool(pdev->dev.of_node, "mediatek,cd-high");
++ mtk_sw_poll = of_property_read_bool(pdev->dev.of_node, "mediatek,cd-poll");
++
++ if (mtk_sw_poll)
++ mmc->caps |= MMC_CAP_NEEDS_POLL;
++
++ /* MMC core transfer sizes tunable parameters */
++#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0)
++ mmc->max_segs = MAX_HW_SGMTS;
++#else
++ mmc->max_hw_segs = MAX_HW_SGMTS;
++ mmc->max_phys_segs = MAX_PHY_SGMTS;
++#endif
++ mmc->max_seg_size = MAX_SGMT_SZ;
++ mmc->max_blk_size = HOST_MAX_BLKSZ;
++ mmc->max_req_size = MAX_REQ_SZ;
++ mmc->max_blk_count = mmc->max_req_size;
++
++ host = mmc_priv(mmc);
++ host->hw = hw;
++ host->mmc = mmc;
++ host->id = pdev->id;
++ host->error = 0;
++ host->irq = irq;
++ host->base = (unsigned long) base;
++ host->mclk = 0; /* mclk: the request clock of mmc sub-system */
++ host->hclk = hclks[hw->clk_src]; /* hclk: clock of clock source to msdc controller */
++ host->sclk = 0; /* sclk: the really clock after divition */
++ host->pm_state = PMSG_RESUME;
++ host->suspend = 0;
++ host->core_clkon = 0;
++ host->card_clkon = 0;
++ host->core_power = 0;
++ host->power_mode = MMC_POWER_OFF;
++// host->card_inserted = hw->flags & MSDC_REMOVABLE ? 0 : 1;
++ host->timeout_ns = 0;
++ host->timeout_clks = DEFAULT_DTOC * 65536;
++
++ host->mrq = NULL;
++ //init_MUTEX(&host->sem); /* we don't need to support multiple threads access */
++
++ host->dma.used_gpd = 0;
++ host->dma.used_bd = 0;
++
++ /* using dma_alloc_coherent*/ /* todo: using 1, for all 4 slots */
++ host->dma.gpd = dma_alloc_coherent(NULL, MAX_GPD_NUM * sizeof(gpd_t), &host->dma.gpd_addr, GFP_KERNEL);
++ host->dma.bd = dma_alloc_coherent(NULL, MAX_BD_NUM * sizeof(bd_t), &host->dma.bd_addr, GFP_KERNEL);
++ BUG_ON((!host->dma.gpd) || (!host->dma.bd));
++ msdc_init_gpd_bd(host, &host->dma);
++ /*for emmc*/
++ msdc_6575_host[pdev->id] = host;
++
++#if 0
++ tasklet_init(&host->card_tasklet, msdc_tasklet_card, (ulong)host);
++#else
++ INIT_DELAYED_WORK(&host->card_delaywork, msdc_tasklet_card);
++#endif
++ spin_lock_init(&host->lock);
++ msdc_init_hw(host);
++
++ if (ralink_soc == MT762X_SOC_MT7621AT)
++ ret = request_irq((unsigned int)irq, msdc_irq, 0, dev_name(&pdev->dev), host);
++ else
++ ret = request_irq((unsigned int)irq, msdc_irq, IRQF_TRIGGER_LOW, dev_name(&pdev->dev), host);
++
++ if (ret) goto release;
++ // mt65xx_irq_unmask(irq); /* --- by chhung */
++
++ if (hw->flags & MSDC_CD_PIN_EN) { /* not set for sdio */
++ if (hw->request_cd_eirq) { /* not set for MT6575 */
++ hw->request_cd_eirq(msdc_eirq_cd, (void*)host); /* msdc_eirq_cd will not be used! */
++ }
++ }
++
++ if (hw->request_sdio_eirq) /* set to combo_sdio_request_eirq() for WIFI */
++ hw->request_sdio_eirq(msdc_eirq_sdio, (void*)host); /* msdc_eirq_sdio() will be called when EIRQ */
++
++ if (hw->register_pm) {/* yes for sdio */
++#ifdef CONFIG_PM
++ hw->register_pm(msdc_pm, (void*)host); /* combo_sdio_register_pm() */
++#endif
++ if(hw->flags & MSDC_SYS_SUSPEND) { /* will not set for WIFI */
++ ERR_MSG("MSDC_SYS_SUSPEND and register_pm both set");
++ }
++ //mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; /* pm not controlled by system but by client. */ /* --- by chhung */
++ }
++
++ platform_set_drvdata(pdev, mmc);
++
++ ret = mmc_add_host(mmc);
++ if (ret) goto free_irq;
++
++ /* Config card detection pin and enable interrupts */
++ if (hw->flags & MSDC_CD_PIN_EN) { /* set for card */
++ msdc_enable_cd_irq(host, 1);
++ } else {
++ msdc_enable_cd_irq(host, 0);
++ }
++
++ return 0;
++
++free_irq:
++ free_irq(irq, host);
++release:
++ platform_set_drvdata(pdev, NULL);
++ msdc_deinit_hw(host);
++
++#if 0
++ tasklet_kill(&host->card_tasklet);
++#else
++ cancel_delayed_work_sync(&host->card_delaywork);
++#endif
++
++ if (mem)
++ release_mem_region(mem->start, mem->end - mem->start + 1);
++
++ mmc_free_host(mmc);
++
++ return ret;
++}
++
++/* 4 device share one driver, using "drvdata" to show difference */
++static int msdc_drv_remove(struct platform_device *pdev)
++{
++ struct mmc_host *mmc;
++ struct msdc_host *host;
++ struct resource *mem;
++
++ mmc = platform_get_drvdata(pdev);
++ BUG_ON(!mmc);
++
++ host = mmc_priv(mmc);
++ BUG_ON(!host);
++
++ ERR_MSG("removed !!!");
++
++ platform_set_drvdata(pdev, NULL);
++ mmc_remove_host(host->mmc);
++ msdc_deinit_hw(host);
++
++#if 0
++ tasklet_kill(&host->card_tasklet);
++#else
++ cancel_delayed_work_sync(&host->card_delaywork);
++#endif
++ free_irq(host->irq, host);
++
++ dma_free_coherent(NULL, MAX_GPD_NUM * sizeof(gpd_t), host->dma.gpd, host->dma.gpd_addr);
++ dma_free_coherent(NULL, MAX_BD_NUM * sizeof(bd_t), host->dma.bd, host->dma.bd_addr);
++
++ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ if (mem)
++ release_mem_region(mem->start, mem->end - mem->start + 1);
++
++ mmc_free_host(host->mmc);
++
++ return 0;
++}
++
++/* Fix me: Power Flow */
++#ifdef CONFIG_PM
++static int msdc_drv_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ int ret = 0;
++ struct mmc_host *mmc = platform_get_drvdata(pdev);
++ struct msdc_host *host = mmc_priv(mmc);
++
++ if (mmc && state.event == PM_EVENT_SUSPEND && (host->hw->flags & MSDC_SYS_SUSPEND)) { /* will set for card */
++ msdc_pm(state, (void*)host);
++ }
++
++ return ret;
++}
++
++static int msdc_drv_resume(struct platform_device *pdev)
++{
++ int ret = 0;
++ struct mmc_host *mmc = platform_get_drvdata(pdev);
++ struct msdc_host *host = mmc_priv(mmc);
++ struct pm_message state;
++
++ state.event = PM_EVENT_RESUME;
++ if (mmc && (host->hw->flags & MSDC_SYS_SUSPEND)) {/* will set for card */
++ msdc_pm(state, (void*)host);
++ }
++
++ /* This mean WIFI not controller by PM */
++
++ return ret;
++}
++#endif
++
++static const struct of_device_id mt7620_sdhci_match[] = {
++ { .compatible = "ralink,mt7620-sdhci" },
++ {},
++};
++MODULE_DEVICE_TABLE(of, rt288x_wdt_match);
++
++static struct platform_driver mt_msdc_driver = {
++ .probe = msdc_drv_probe,
++ .remove = msdc_drv_remove,
++#ifdef CONFIG_PM
++ .suspend = msdc_drv_suspend,
++ .resume = msdc_drv_resume,
++#endif
++ .driver = {
++ .name = DRV_NAME,
++ .owner = THIS_MODULE,
++ .of_match_table = mt7620_sdhci_match,
++ },
++};
++
++/*--------------------------------------------------------------------------*/
++/* module init/exit */
++/*--------------------------------------------------------------------------*/
++static int __init mt_msdc_init(void)
++{
++ int ret;
++/* +++ by chhung */
++ u32 reg;
++
++#if defined (CONFIG_MTD_ANY_RALINK)
++ extern int ra_check_flash_type(void);
++ if(ra_check_flash_type() == 2) { /* NAND */
++ printk("%s: !!!!! SDXC Module Initialize Fail !!!!!", __func__);
++ return 0;
++ }
++#endif
++ printk("MTK MSDC device init.\n");
++ mtk_sd_device.dev.platform_data = &msdc0_hw;
++if (ralink_soc == MT762X_SOC_MT7620A || ralink_soc == MT762X_SOC_MT7621AT) {
++//#if defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621)
++ reg = sdr_read32((volatile u32*)(RALINK_SYSCTL_BASE + 0x60)) & ~(0x3<<18);
++//#if defined (CONFIG_RALINK_MT7620)
++ if (ralink_soc == MT762X_SOC_MT7620A)
++ reg |= 0x1<<18;
++//#endif
++} else {
++//#elif defined (CONFIG_RALINK_MT7628)
++ /* TODO: maybe omitted when RAether already toggle AGPIO_CFG */
++ reg = sdr_read32((volatile u32*)(RALINK_SYSCTL_BASE + 0x3c));
++ reg |= 0x1e << 16;
++ sdr_write32((volatile u32*)(RALINK_SYSCTL_BASE + 0x3c), reg);
++
++ reg = sdr_read32((volatile u32*)(RALINK_SYSCTL_BASE + 0x60)) & ~(0x3<<10);
++#if defined (CONFIG_MTK_MMC_EMMC_8BIT)
++ reg |= 0x3<<26 | 0x3<<28 | 0x3<<30;
++ msdc0_hw.data_pins = 8,
++#endif
++//#endif
++}
++ sdr_write32((volatile u32*)(RALINK_SYSCTL_BASE + 0x60), reg);
++ //platform_device_register(&mtk_sd_device);
++/* end of +++ */
++
++ ret = platform_driver_register(&mt_msdc_driver);
++ if (ret) {
++ printk(KERN_ERR DRV_NAME ": Can't register driver");
++ return ret;
++ }
++ printk(KERN_INFO DRV_NAME ": MediaTek MT6575 MSDC Driver\n");
++
++#if defined (MT6575_SD_DEBUG)
++ msdc_debug_proc_init();
++#endif
++ return 0;
++}
++
++static void __exit mt_msdc_exit(void)
++{
++// platform_device_unregister(&mtk_sd_device);
++ platform_driver_unregister(&mt_msdc_driver);
++}
++
++module_init(mt_msdc_init);
++module_exit(mt_msdc_exit);
++MODULE_LICENSE("GPL");
diff --git a/target/linux/ramips/patches-4.14/0047-DMA-ralink-add-rt2880-dma-engine.patch b/target/linux/ramips/patches-4.14/0047-DMA-ralink-add-rt2880-dma-engine.patch
new file mode 100644
index 0000000000..b74a48a220
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0047-DMA-ralink-add-rt2880-dma-engine.patch
@@ -0,0 +1,1757 @@
+From f1c4d9e622c800e1f38b3818f933ec7597d1ccfb Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 27 Jul 2014 09:29:51 +0100
+Subject: [PATCH 47/53] DMA: ralink: add rt2880 dma engine
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/dma/Kconfig | 6 +
+ drivers/dma/Makefile | 1 +
+ drivers/dma/ralink-gdma.c | 577 +++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/dmaengine.h | 1 +
+ 4 files changed, 585 insertions(+)
+ create mode 100644 drivers/dma/ralink-gdma.c
+
+--- a/drivers/dma/Kconfig
++++ b/drivers/dma/Kconfig
+@@ -40,6 +40,18 @@ config ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ bool
+
++config DMA_RALINK
++ tristate "RALINK DMA support"
++ depends on RALINK && !SOC_RT288X
++ select DMA_ENGINE
++ select DMA_VIRTUAL_CHANNELS
++
++config MTK_HSDMA
++ tristate "MTK HSDMA support"
++ depends on RALINK && SOC_MT7621
++ select DMA_ENGINE
++ select DMA_VIRTUAL_CHANNELS
++
+ config DMA_ENGINE
+ bool
+
+--- a/drivers/dma/Makefile
++++ b/drivers/dma/Makefile
+@@ -71,6 +71,8 @@ obj-$(CONFIG_TI_EDMA) += edma.o
+ obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
+ obj-$(CONFIG_ZX_DMA) += zx_dma.o
+ obj-$(CONFIG_ST_FDMA) += st_fdma.o
++obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
++obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
+
+ obj-y += qcom/
+ obj-y += xilinx/
+--- /dev/null
++++ b/drivers/dma/ralink-gdma.c
+@@ -0,0 +1,928 @@
++/*
++ * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
++ * GDMA4740 DMAC support
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ */
++
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/irq.h>
++#include <linux/of_dma.h>
++#include <linux/reset.h>
++#include <linux/of_device.h>
++
++#include "virt-dma.h"
++
++#define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10)
++#define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10)
++
++#define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10)
++#define GDMA_REG_CTRL0_TX_MASK 0xffff
++#define GDMA_REG_CTRL0_TX_SHIFT 16
++#define GDMA_REG_CTRL0_CURR_MASK 0xff
++#define GDMA_REG_CTRL0_CURR_SHIFT 8
++#define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7)
++#define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6)
++#define GDMA_REG_CTRL0_BURST_MASK 0x7
++#define GDMA_REG_CTRL0_BURST_SHIFT 3
++#define GDMA_REG_CTRL0_DONE_INT BIT(2)
++#define GDMA_REG_CTRL0_ENABLE BIT(1)
++#define GDMA_REG_CTRL0_SW_MODE BIT(0)
++
++#define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10)
++#define GDMA_REG_CTRL1_SEG_MASK 0xf
++#define GDMA_REG_CTRL1_SEG_SHIFT 22
++#define GDMA_REG_CTRL1_REQ_MASK 0x3f
++#define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16
++#define GDMA_REG_CTRL1_DST_REQ_SHIFT 8
++#define GDMA_REG_CTRL1_CONTINOUS BIT(14)
++#define GDMA_REG_CTRL1_NEXT_MASK 0x1f
++#define GDMA_REG_CTRL1_NEXT_SHIFT 3
++#define GDMA_REG_CTRL1_COHERENT BIT(2)
++#define GDMA_REG_CTRL1_FAIL BIT(1)
++#define GDMA_REG_CTRL1_MASK BIT(0)
++
++#define GDMA_REG_UNMASK_INT 0x200
++#define GDMA_REG_DONE_INT 0x204
++
++#define GDMA_REG_GCT 0x220
++#define GDMA_REG_GCT_CHAN_MASK 0x3
++#define GDMA_REG_GCT_CHAN_SHIFT 3
++#define GDMA_REG_GCT_VER_MASK 0x3
++#define GDMA_REG_GCT_VER_SHIFT 1
++#define GDMA_REG_GCT_ARBIT_RR BIT(0)
++
++#define GDMA_REG_REQSTS 0x2a0
++#define GDMA_REG_ACKSTS 0x2a4
++#define GDMA_REG_FINSTS 0x2a8
++
++/* for RT305X gdma registers */
++#define GDMA_RT305X_CTRL0_REQ_MASK 0xf
++#define GDMA_RT305X_CTRL0_SRC_REQ_SHIFT 12
++#define GDMA_RT305X_CTRL0_DST_REQ_SHIFT 8
++
++#define GDMA_RT305X_CTRL1_FAIL BIT(4)
++#define GDMA_RT305X_CTRL1_NEXT_MASK 0x7
++#define GDMA_RT305X_CTRL1_NEXT_SHIFT 1
++
++#define GDMA_RT305X_STATUS_INT 0x80
++#define GDMA_RT305X_STATUS_SIGNAL 0x84
++#define GDMA_RT305X_GCT 0x88
++
++/* for MT7621 gdma registers */
++#define GDMA_REG_PERF_START(x) (0x230 + (x) * 0x8)
++#define GDMA_REG_PERF_END(x) (0x234 + (x) * 0x8)
++
++enum gdma_dma_transfer_size {
++ GDMA_TRANSFER_SIZE_4BYTE = 0,
++ GDMA_TRANSFER_SIZE_8BYTE = 1,
++ GDMA_TRANSFER_SIZE_16BYTE = 2,
++ GDMA_TRANSFER_SIZE_32BYTE = 3,
++ GDMA_TRANSFER_SIZE_64BYTE = 4,
++};
++
++struct gdma_dma_sg {
++ dma_addr_t src_addr;
++ dma_addr_t dst_addr;
++ u32 len;
++};
++
++struct gdma_dma_desc {
++ struct virt_dma_desc vdesc;
++
++ enum dma_transfer_direction direction;
++ bool cyclic;
++
++ u32 residue;
++ unsigned int num_sgs;
++ struct gdma_dma_sg sg[];
++};
++
++struct gdma_dmaengine_chan {
++ struct virt_dma_chan vchan;
++ unsigned int id;
++ unsigned int slave_id;
++
++ dma_addr_t fifo_addr;
++ enum gdma_dma_transfer_size burst_size;
++
++ struct gdma_dma_desc *desc;
++ unsigned int next_sg;
++};
++
++struct gdma_dma_dev {
++ struct dma_device ddev;
++ struct device_dma_parameters dma_parms;
++ struct gdma_data *data;
++ void __iomem *base;
++ struct tasklet_struct task;
++ volatile unsigned long chan_issued;
++ atomic_t cnt;
++
++ struct gdma_dmaengine_chan chan[];
++};
++
++struct gdma_data
++{
++ int chancnt;
++ u32 done_int_reg;
++ void (*init)(struct gdma_dma_dev *dma_dev);
++ int (*start_transfer)(struct gdma_dmaengine_chan *chan);
++};
++
++static struct gdma_dma_dev *gdma_dma_chan_get_dev(
++ struct gdma_dmaengine_chan *chan)
++{
++ return container_of(chan->vchan.chan.device, struct gdma_dma_dev,
++ ddev);
++}
++
++static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c)
++{
++ return container_of(c, struct gdma_dmaengine_chan, vchan.chan);
++}
++
++static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc)
++{
++ return container_of(vdesc, struct gdma_dma_desc, vdesc);
++}
++
++static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
++ unsigned int reg)
++{
++ return readl(dma_dev->base + reg);
++}
++
++static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
++ unsigned reg, uint32_t val)
++{
++ writel(val, dma_dev->base + reg);
++}
++
++static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs)
++{
++ return kzalloc(sizeof(struct gdma_dma_desc) +
++ sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC);
++}
++
++static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
++{
++ if (maxburst < 2)
++ return GDMA_TRANSFER_SIZE_4BYTE;
++ else if (maxburst < 4)
++ return GDMA_TRANSFER_SIZE_8BYTE;
++ else if (maxburst < 8)
++ return GDMA_TRANSFER_SIZE_16BYTE;
++ else if (maxburst < 16)
++ return GDMA_TRANSFER_SIZE_32BYTE;
++ else
++ return GDMA_TRANSFER_SIZE_64BYTE;
++}
++
++static int gdma_dma_config(struct dma_chan *c,
++ struct dma_slave_config *config)
++{
++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
++
++ if (config->device_fc) {
++ dev_err(dma_dev->ddev.dev, "not support flow controller\n");
++ return -EINVAL;
++ }
++
++ switch (config->direction) {
++ case DMA_MEM_TO_DEV:
++ if (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
++ dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
++ return -EINVAL;
++ }
++ chan->slave_id = config->slave_id;
++ chan->fifo_addr = config->dst_addr;
++ chan->burst_size = gdma_dma_maxburst(config->dst_maxburst);
++ break;
++ case DMA_DEV_TO_MEM:
++ if (config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
++ dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
++ return -EINVAL;
++ }
++ chan->slave_id = config->slave_id;
++ chan->fifo_addr = config->src_addr;
++ chan->burst_size = gdma_dma_maxburst(config->src_maxburst);
++ break;
++ default:
++ dev_err(dma_dev->ddev.dev, "direction type %d error\n",
++ config->direction);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int gdma_dma_terminate_all(struct dma_chan *c)
++{
++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
++ unsigned long flags, timeout;
++ LIST_HEAD(head);
++ int i = 0;
++
++ spin_lock_irqsave(&chan->vchan.lock, flags);
++ chan->desc = NULL;
++ clear_bit(chan->id, &dma_dev->chan_issued);
++ vchan_get_all_descriptors(&chan->vchan, &head);
++ spin_unlock_irqrestore(&chan->vchan.lock, flags);
++
++ vchan_dma_desc_free_list(&chan->vchan, &head);
++
++ /* wait dma transfer complete */
++ timeout = jiffies + msecs_to_jiffies(5000);
++ while (gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)) &
++ GDMA_REG_CTRL0_ENABLE) {
++ if (time_after_eq(jiffies, timeout)) {
++ dev_err(dma_dev->ddev.dev, "chan %d wait timeout\n",
++ chan->id);
++ /* restore to init value */
++ gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), 0);
++ break;
++ }
++ cpu_relax();
++ i++;
++ }
++
++ if (i)
++ dev_dbg(dma_dev->ddev.dev, "terminate chan %d loops %d\n",
++ chan->id, i);
++
++ return 0;
++}
++
++static void rt305x_dump_reg(struct gdma_dma_dev *dma_dev, int id)
++{
++ dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
++ "ctr1 %08x, intr %08x, signal %08x\n", id,
++ gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
++ gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
++ gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
++ gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
++ gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_INT),
++ gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_SIGNAL));
++}
++
++static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
++{
++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
++ dma_addr_t src_addr, dst_addr;
++ struct gdma_dma_sg *sg;
++ uint32_t ctrl0, ctrl1;
++
++ /* verify chan is already stopped */
++ ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
++ if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
++ dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
++ chan->id, ctrl0);
++ rt305x_dump_reg(dma_dev, chan->id);
++ return -EINVAL;
++ }
++
++ sg = &chan->desc->sg[chan->next_sg];
++ if (chan->desc->direction == DMA_MEM_TO_DEV) {
++ src_addr = sg->src_addr;
++ dst_addr = chan->fifo_addr;
++ ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED | \
++ (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \
++ (chan->slave_id << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
++ } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
++ src_addr = chan->fifo_addr;
++ dst_addr = sg->dst_addr;
++ ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED | \
++ (chan->slave_id << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \
++ (8 << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
++ } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
++ /*
++ * TODO: memcpy function have bugs. sometime it will copy
++ * more 8 bytes data when using dmatest verify.
++ */
++ src_addr = sg->src_addr;
++ dst_addr = sg->dst_addr;
++ ctrl0 = GDMA_REG_CTRL0_SW_MODE | \
++ (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
++ (8 << GDMA_REG_CTRL1_DST_REQ_SHIFT);
++ } else {
++ dev_err(dma_dev->ddev.dev, "direction type %d error\n",
++ chan->desc->direction);
++ return -EINVAL;
++ }
++
++ ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \
++ (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \
++ GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
++ ctrl1 = chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
++
++ chan->next_sg++;
++ gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
++ gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
++ gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
++
++ /* make sure next_sg is update */
++ wmb();
++ gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
++
++ return 0;
++}
++
++static void rt3883_dump_reg(struct gdma_dma_dev *dma_dev, int id)
++{
++ dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \
++ "ctr1 %08x, unmask %08x, done %08x, " \
++ "req %08x, ack %08x, fin %08x\n", id,
++ gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
++ gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
++ gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
++ gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
++ gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT),
++ gdma_dma_read(dma_dev, GDMA_REG_DONE_INT),
++ gdma_dma_read(dma_dev, GDMA_REG_REQSTS),
++ gdma_dma_read(dma_dev, GDMA_REG_ACKSTS),
++ gdma_dma_read(dma_dev, GDMA_REG_FINSTS));
++}
++
++static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
++{
++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
++ dma_addr_t src_addr, dst_addr;
++ struct gdma_dma_sg *sg;
++ uint32_t ctrl0, ctrl1;
++
++ /* verify chan is already stopped */
++ ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
++ if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
++ dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
++ chan->id, ctrl0);
++ rt3883_dump_reg(dma_dev, chan->id);
++ return -EINVAL;
++ }
++
++ sg = &chan->desc->sg[chan->next_sg];
++ if (chan->desc->direction == DMA_MEM_TO_DEV) {
++ src_addr = sg->src_addr;
++ dst_addr = chan->fifo_addr;
++ ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED;
++ ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
++ (chan->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT);
++ } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
++ src_addr = chan->fifo_addr;
++ dst_addr = sg->dst_addr;
++ ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
++ ctrl1 = (chan->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
++ (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \
++ GDMA_REG_CTRL1_COHERENT;
++ } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
++ src_addr = sg->src_addr;
++ dst_addr = sg->dst_addr;
++ ctrl0 = GDMA_REG_CTRL0_SW_MODE;
++ ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \
++ (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \
++ GDMA_REG_CTRL1_COHERENT;
++ } else {
++ dev_err(dma_dev->ddev.dev, "direction type %d error\n",
++ chan->desc->direction);
++ return -EINVAL;
++ }
++
++ ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \
++ (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \
++ GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
++ ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
++
++ chan->next_sg++;
++ gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
++ gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
++ gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
++
++ /* make sure next_sg is update */
++ wmb();
++ gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
++
++ return 0;
++}
++
++static inline int gdma_start_transfer(struct gdma_dma_dev *dma_dev,
++ struct gdma_dmaengine_chan *chan)
++{
++ return dma_dev->data->start_transfer(chan);
++}
++
++static int gdma_next_desc(struct gdma_dmaengine_chan *chan)
++{
++ struct virt_dma_desc *vdesc;
++
++ vdesc = vchan_next_desc(&chan->vchan);
++ if (!vdesc) {
++ chan->desc = NULL;
++ return 0;
++ }
++ chan->desc = to_gdma_dma_desc(vdesc);
++ chan->next_sg = 0;
++
++ return 1;
++}
++
++static void gdma_dma_chan_irq(struct gdma_dma_dev *dma_dev,
++ struct gdma_dmaengine_chan *chan)
++{
++ struct gdma_dma_desc *desc;
++ unsigned long flags;
++ int chan_issued;
++
++ chan_issued = 0;
++ spin_lock_irqsave(&chan->vchan.lock, flags);
++ desc = chan->desc;
++ if (desc) {
++ if (desc->cyclic) {
++ vchan_cyclic_callback(&desc->vdesc);
++ if (chan->next_sg == desc->num_sgs)
++ chan->next_sg = 0;
++ chan_issued = 1;
++ } else {
++ desc->residue -= desc->sg[chan->next_sg - 1].len;
++ if (chan->next_sg == desc->num_sgs) {
++ list_del(&desc->vdesc.node);
++ vchan_cookie_complete(&desc->vdesc);
++ chan_issued = gdma_next_desc(chan);
++ } else
++ chan_issued = 1;
++ }
++ } else
++ dev_dbg(dma_dev->ddev.dev, "chan %d no desc to complete\n",
++ chan->id);
++ if (chan_issued)
++ set_bit(chan->id, &dma_dev->chan_issued);
++ spin_unlock_irqrestore(&chan->vchan.lock, flags);
++}
++
++static irqreturn_t gdma_dma_irq(int irq, void *devid)
++{
++ struct gdma_dma_dev *dma_dev = devid;
++ u32 done, done_reg;
++ unsigned int i;
++
++ done_reg = dma_dev->data->done_int_reg;
++ done = gdma_dma_read(dma_dev, done_reg);
++ if (unlikely(!done))
++ return IRQ_NONE;
++
++ /* clean done bits */
++ gdma_dma_write(dma_dev, done_reg, done);
++
++ i = 0;
++ while (done) {
++ if (done & 0x1) {
++ gdma_dma_chan_irq(dma_dev, &dma_dev->chan[i]);
++ atomic_dec(&dma_dev->cnt);
++ }
++ done >>= 1;
++ i++;
++ }
++
++ /* start only have work to do */
++ if (dma_dev->chan_issued)
++ tasklet_schedule(&dma_dev->task);
++
++ return IRQ_HANDLED;
++}
++
++static void gdma_dma_issue_pending(struct dma_chan *c)
++{
++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
++ unsigned long flags;
++
++ spin_lock_irqsave(&chan->vchan.lock, flags);
++ if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
++ if (gdma_next_desc(chan)) {
++ set_bit(chan->id, &dma_dev->chan_issued);
++ tasklet_schedule(&dma_dev->task);
++ } else
++ dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n",
++ chan->id);
++ }
++ spin_unlock_irqrestore(&chan->vchan.lock, flags);
++}
++
++static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
++ struct dma_chan *c, struct scatterlist *sgl,
++ unsigned int sg_len, enum dma_transfer_direction direction,
++ unsigned long flags, void *context)
++{
++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
++ struct gdma_dma_desc *desc;
++ struct scatterlist *sg;
++ unsigned int i;
++
++ desc = gdma_dma_alloc_desc(sg_len);
++ if (!desc) {
++ dev_err(c->device->dev, "alloc sg decs error\n");
++ return NULL;
++ }
++ desc->residue = 0;
++
++ for_each_sg(sgl, sg, sg_len, i) {
++ if (direction == DMA_MEM_TO_DEV)
++ desc->sg[i].src_addr = sg_dma_address(sg);
++ else if (direction == DMA_DEV_TO_MEM)
++ desc->sg[i].dst_addr = sg_dma_address(sg);
++ else {
++ dev_err(c->device->dev, "direction type %d error\n",
++ direction);
++ goto free_desc;
++ }
++
++ if (unlikely(sg_dma_len(sg) > GDMA_REG_CTRL0_TX_MASK)) {
++ dev_err(c->device->dev, "sg len too large %d\n",
++ sg_dma_len(sg));
++ goto free_desc;
++ }
++ desc->sg[i].len = sg_dma_len(sg);
++ desc->residue += sg_dma_len(sg);
++ }
++
++ desc->num_sgs = sg_len;
++ desc->direction = direction;
++ desc->cyclic = false;
++
++ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
++
++free_desc:
++ kfree(desc);
++ return NULL;
++}
++
++static struct dma_async_tx_descriptor * gdma_dma_prep_dma_memcpy(
++ struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
++ size_t len, unsigned long flags)
++{
++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
++ struct gdma_dma_desc *desc;
++ unsigned int num_periods, i;
++ size_t xfer_count;
++
++ if (len <= 0)
++ return NULL;
++
++ chan->burst_size = gdma_dma_maxburst(len >> 2);
++
++ xfer_count = GDMA_REG_CTRL0_TX_MASK;
++ num_periods = DIV_ROUND_UP(len, xfer_count);
++
++ desc = gdma_dma_alloc_desc(num_periods);
++ if (!desc) {
++ dev_err(c->device->dev, "alloc memcpy decs error\n");
++ return NULL;
++ }
++ desc->residue = len;
++
++ for (i = 0; i < num_periods; i++) {
++ desc->sg[i].src_addr = src;
++ desc->sg[i].dst_addr = dest;
++ if (len > xfer_count) {
++ desc->sg[i].len = xfer_count;
++ } else {
++ desc->sg[i].len = len;
++ }
++ src += desc->sg[i].len;
++ dest += desc->sg[i].len;
++ len -= desc->sg[i].len;
++ }
++
++ desc->num_sgs = num_periods;
++ desc->direction = DMA_MEM_TO_MEM;
++ desc->cyclic = false;
++
++ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
++}
++
++static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
++ struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
++ size_t period_len, enum dma_transfer_direction direction,
++ unsigned long flags)
++{
++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
++ struct gdma_dma_desc *desc;
++ unsigned int num_periods, i;
++
++ if (buf_len % period_len)
++ return NULL;
++
++ if (period_len > GDMA_REG_CTRL0_TX_MASK) {
++ dev_err(c->device->dev, "cyclic len too large %d\n",
++ period_len);
++ return NULL;
++ }
++
++ num_periods = buf_len / period_len;
++ desc = gdma_dma_alloc_desc(num_periods);
++ if (!desc) {
++ dev_err(c->device->dev, "alloc cyclic decs error\n");
++ return NULL;
++ }
++ desc->residue = buf_len;
++
++ for (i = 0; i < num_periods; i++) {
++ if (direction == DMA_MEM_TO_DEV)
++ desc->sg[i].src_addr = buf_addr;
++ else if (direction == DMA_DEV_TO_MEM)
++ desc->sg[i].dst_addr = buf_addr;
++ else {
++ dev_err(c->device->dev, "direction type %d error\n",
++ direction);
++ goto free_desc;
++ }
++ desc->sg[i].len = period_len;
++ buf_addr += period_len;
++ }
++
++ desc->num_sgs = num_periods;
++ desc->direction = direction;
++ desc->cyclic = true;
++
++ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
++
++free_desc:
++ kfree(desc);
++ return NULL;
++}
++
++static enum dma_status gdma_dma_tx_status(struct dma_chan *c,
++ dma_cookie_t cookie, struct dma_tx_state *state)
++{
++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
++ struct virt_dma_desc *vdesc;
++ enum dma_status status;
++ unsigned long flags;
++ struct gdma_dma_desc *desc;
++
++ status = dma_cookie_status(c, cookie, state);
++ if (status == DMA_COMPLETE || !state)
++ return status;
++
++ spin_lock_irqsave(&chan->vchan.lock, flags);
++ desc = chan->desc;
++ if (desc && (cookie == desc->vdesc.tx.cookie)) {
++ /*
++ * We never update edesc->residue in the cyclic case, so we
++ * can tell the remaining room to the end of the circular
++ * buffer.
++ */
++ if (desc->cyclic)
++ state->residue = desc->residue -
++ ((chan->next_sg - 1) * desc->sg[0].len);
++ else
++ state->residue = desc->residue;
++ } else if ((vdesc = vchan_find_desc(&chan->vchan, cookie)))
++ state->residue = to_gdma_dma_desc(vdesc)->residue;
++ spin_unlock_irqrestore(&chan->vchan.lock, flags);
++
++ dev_dbg(c->device->dev, "tx residue %d bytes\n", state->residue);
++
++ return status;
++}
++
++static void gdma_dma_free_chan_resources(struct dma_chan *c)
++{
++ vchan_free_chan_resources(to_virt_chan(c));
++}
++
++static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
++{
++ kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
++}
++
++static void gdma_dma_tasklet(unsigned long arg)
++{
++ struct gdma_dma_dev *dma_dev = (struct gdma_dma_dev *)arg;
++ struct gdma_dmaengine_chan *chan;
++ static unsigned int last_chan;
++ unsigned int i, chan_mask;
++
++ /* record last chan to round robin all chans */
++ i = last_chan;
++ chan_mask = dma_dev->data->chancnt - 1;
++ do {
++ /*
++ * on mt7621. when verify with dmatest with all
++ * channel is enable. we need to limit only two
++ * channel is working at the same time. otherwise the
++ * data will have problem.
++ */
++ if (atomic_read(&dma_dev->cnt) >= 2) {
++ last_chan = i;
++ break;
++ }
++
++ if (test_and_clear_bit(i, &dma_dev->chan_issued)) {
++ chan = &dma_dev->chan[i];
++ if (chan->desc) {
++ atomic_inc(&dma_dev->cnt);
++ gdma_start_transfer(dma_dev, chan);
++ } else
++ dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n", chan->id);
++
++ if (!dma_dev->chan_issued)
++ break;
++ }
++
++ i = (i + 1) & chan_mask;
++ } while (i != last_chan);
++}
++
++static void rt305x_gdma_init(struct gdma_dma_dev *dma_dev)
++{
++ uint32_t gct;
++
++ /* all chans round robin */
++ gdma_dma_write(dma_dev, GDMA_RT305X_GCT, GDMA_REG_GCT_ARBIT_RR);
++
++ gct = gdma_dma_read(dma_dev, GDMA_RT305X_GCT);
++ dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
++ (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
++ 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
++ GDMA_REG_GCT_CHAN_MASK));
++}
++
++static void rt3883_gdma_init(struct gdma_dma_dev *dma_dev)
++{
++ uint32_t gct;
++
++ /* all chans round robin */
++ gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
++
++ gct = gdma_dma_read(dma_dev, GDMA_REG_GCT);
++ dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
++ (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
++ 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
++ GDMA_REG_GCT_CHAN_MASK));
++}
++
++static struct gdma_data rt305x_gdma_data = {
++ .chancnt = 8,
++ .done_int_reg = GDMA_RT305X_STATUS_INT,
++ .init = rt305x_gdma_init,
++ .start_transfer = rt305x_gdma_start_transfer,
++};
++
++static struct gdma_data rt3883_gdma_data = {
++ .chancnt = 16,
++ .done_int_reg = GDMA_REG_DONE_INT,
++ .init = rt3883_gdma_init,
++ .start_transfer = rt3883_gdma_start_transfer,
++};
++
++static const struct of_device_id gdma_of_match_table[] = {
++ { .compatible = "ralink,rt305x-gdma", .data = &rt305x_gdma_data },
++ { .compatible = "ralink,rt3883-gdma", .data = &rt3883_gdma_data },
++ { },
++};
++
++static int gdma_dma_probe(struct platform_device *pdev)
++{
++ const struct of_device_id *match;
++ struct gdma_dmaengine_chan *chan;
++ struct gdma_dma_dev *dma_dev;
++ struct dma_device *dd;
++ unsigned int i;
++ struct resource *res;
++ int ret;
++ int irq;
++ void __iomem *base;
++ struct gdma_data *data;
++
++ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++ if (ret)
++ return ret;
++
++ match = of_match_device(gdma_of_match_table, &pdev->dev);
++ if (!match)
++ return -EINVAL;
++ data = (struct gdma_data *) match->data;
++
++ dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev) +
++ (sizeof(struct gdma_dmaengine_chan) * data->chancnt),
++ GFP_KERNEL);
++ if (!dma_dev) {
++ dev_err(&pdev->dev, "alloc dma device failed\n");
++ return -EINVAL;
++ }
++ dma_dev->data = data;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++ dma_dev->base = base;
++ tasklet_init(&dma_dev->task, gdma_dma_tasklet, (unsigned long)dma_dev);
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0) {
++ dev_err(&pdev->dev, "failed to get irq\n");
++ return -EINVAL;
++ }
++ ret = devm_request_irq(&pdev->dev, irq, gdma_dma_irq,
++ 0, dev_name(&pdev->dev), dma_dev);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to request irq\n");
++ return ret;
++ }
++
++ device_reset(&pdev->dev);
++
++ dd = &dma_dev->ddev;
++ dma_cap_set(DMA_MEMCPY, dd->cap_mask);
++ dma_cap_set(DMA_SLAVE, dd->cap_mask);
++ dma_cap_set(DMA_CYCLIC, dd->cap_mask);
++ dd->device_free_chan_resources = gdma_dma_free_chan_resources;
++ dd->device_prep_dma_memcpy = gdma_dma_prep_dma_memcpy;
++ dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
++ dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
++ dd->device_config = gdma_dma_config;
++ dd->device_terminate_all = gdma_dma_terminate_all;
++ dd->device_tx_status = gdma_dma_tx_status;
++ dd->device_issue_pending = gdma_dma_issue_pending;
++
++ dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
++ dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
++ dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
++ dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
++
++ dd->dev = &pdev->dev;
++ dd->dev->dma_parms = &dma_dev->dma_parms;
++ dma_set_max_seg_size(dd->dev, GDMA_REG_CTRL0_TX_MASK);
++ INIT_LIST_HEAD(&dd->channels);
++
++ for (i = 0; i < data->chancnt; i++) {
++ chan = &dma_dev->chan[i];
++ chan->id = i;
++ chan->vchan.desc_free = gdma_dma_desc_free;
++ vchan_init(&chan->vchan, dd);
++ }
++
++ /* init hardware */
++ data->init(dma_dev);
++
++ ret = dma_async_device_register(dd);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register dma device\n");
++ return ret;
++ }
++
++ ret = of_dma_controller_register(pdev->dev.of_node,
++ of_dma_xlate_by_chan_id, dma_dev);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register of dma controller\n");
++ goto err_unregister;
++ }
++
++ platform_set_drvdata(pdev, dma_dev);
++
++ return 0;
++
++err_unregister:
++ dma_async_device_unregister(dd);
++ return ret;
++}
++
++static int gdma_dma_remove(struct platform_device *pdev)
++{
++ struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev);
++
++ tasklet_kill(&dma_dev->task);
++ of_dma_controller_free(pdev->dev.of_node);
++ dma_async_device_unregister(&dma_dev->ddev);
++
++ return 0;
++}
++
++static struct platform_driver gdma_dma_driver = {
++ .probe = gdma_dma_probe,
++ .remove = gdma_dma_remove,
++ .driver = {
++ .name = "gdma-rt2880",
++ .of_match_table = gdma_of_match_table,
++ },
++};
++module_platform_driver(gdma_dma_driver);
++
++MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
++MODULE_DESCRIPTION("Ralink/MTK DMA driver");
++MODULE_LICENSE("GPL v2");
+--- a/include/linux/dmaengine.h
++++ b/include/linux/dmaengine.h
+@@ -525,6 +525,7 @@ static inline void dma_set_unmap(struct
+ struct dmaengine_unmap_data *
+ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags);
+ void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap);
++struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
+ #else
+ static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+ struct dmaengine_unmap_data *unmap)
+--- /dev/null
++++ b/drivers/dma/mtk-hsdma.c
+@@ -0,0 +1,767 @@
++/*
++ * Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
++ * MTK HSDMA support
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ */
++
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/irq.h>
++#include <linux/of_dma.h>
++#include <linux/reset.h>
++#include <linux/of_device.h>
++
++#include "virt-dma.h"
++
++#define HSDMA_BASE_OFFSET 0x800
++
++#define HSDMA_REG_TX_BASE 0x00
++#define HSDMA_REG_TX_CNT 0x04
++#define HSDMA_REG_TX_CTX 0x08
++#define HSDMA_REG_TX_DTX 0x0c
++#define HSDMA_REG_RX_BASE 0x100
++#define HSDMA_REG_RX_CNT 0x104
++#define HSDMA_REG_RX_CRX 0x108
++#define HSDMA_REG_RX_DRX 0x10c
++#define HSDMA_REG_INFO 0x200
++#define HSDMA_REG_GLO_CFG 0x204
++#define HSDMA_REG_RST_CFG 0x208
++#define HSDMA_REG_DELAY_INT 0x20c
++#define HSDMA_REG_FREEQ_THRES 0x210
++#define HSDMA_REG_INT_STATUS 0x220
++#define HSDMA_REG_INT_MASK 0x228
++#define HSDMA_REG_SCH_Q01 0x280
++#define HSDMA_REG_SCH_Q23 0x284
++
++#define HSDMA_DESCS_MAX 0xfff
++#define HSDMA_DESCS_NUM 8
++#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
++#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
++
++/* HSDMA_REG_INFO */
++#define HSDMA_INFO_INDEX_MASK 0xf
++#define HSDMA_INFO_INDEX_SHIFT 24
++#define HSDMA_INFO_BASE_MASK 0xff
++#define HSDMA_INFO_BASE_SHIFT 16
++#define HSDMA_INFO_RX_MASK 0xff
++#define HSDMA_INFO_RX_SHIFT 8
++#define HSDMA_INFO_TX_MASK 0xff
++#define HSDMA_INFO_TX_SHIFT 0
++
++/* HSDMA_REG_GLO_CFG */
++#define HSDMA_GLO_TX_2B_OFFSET BIT(31)
++#define HSDMA_GLO_CLK_GATE BIT(30)
++#define HSDMA_GLO_BYTE_SWAP BIT(29)
++#define HSDMA_GLO_MULTI_DMA BIT(10)
++#define HSDMA_GLO_TWO_BUF BIT(9)
++#define HSDMA_GLO_32B_DESC BIT(8)
++#define HSDMA_GLO_BIG_ENDIAN BIT(7)
++#define HSDMA_GLO_TX_DONE BIT(6)
++#define HSDMA_GLO_BT_MASK 0x3
++#define HSDMA_GLO_BT_SHIFT 4
++#define HSDMA_GLO_RX_BUSY BIT(3)
++#define HSDMA_GLO_RX_DMA BIT(2)
++#define HSDMA_GLO_TX_BUSY BIT(1)
++#define HSDMA_GLO_TX_DMA BIT(0)
++
++#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
++#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
++#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
++#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
++
++#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
++ HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
++
++/* HSDMA_REG_RST_CFG */
++#define HSDMA_RST_RX_SHIFT 16
++#define HSDMA_RST_TX_SHIFT 0
++
++/* HSDMA_REG_DELAY_INT */
++#define HSDMA_DELAY_INT_EN BIT(15)
++#define HSDMA_DELAY_PEND_OFFSET 8
++#define HSDMA_DELAY_TIME_OFFSET 0
++#define HSDMA_DELAY_TX_OFFSET 16
++#define HSDMA_DELAY_RX_OFFSET 0
++
++#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
++ ((x) << HSDMA_DELAY_PEND_OFFSET))
++#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
++ HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
++
++/* HSDMA_REG_INT_STATUS */
++#define HSDMA_INT_DELAY_RX_COH BIT(31)
++#define HSDMA_INT_DELAY_RX_INT BIT(30)
++#define HSDMA_INT_DELAY_TX_COH BIT(29)
++#define HSDMA_INT_DELAY_TX_INT BIT(28)
++#define HSDMA_INT_RX_MASK 0x3
++#define HSDMA_INT_RX_SHIFT 16
++#define HSDMA_INT_RX_Q0 BIT(16)
++#define HSDMA_INT_TX_MASK 0xf
++#define HSDMA_INT_TX_SHIFT 0
++#define HSDMA_INT_TX_Q0 BIT(0)
++
++/* tx/rx dma desc flags */
++#define HSDMA_PLEN_MASK 0x3fff
++#define HSDMA_DESC_DONE BIT(31)
++#define HSDMA_DESC_LS0 BIT(30)
++#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
++#define HSDMA_DESC_TAG BIT(15)
++#define HSDMA_DESC_LS1 BIT(14)
++#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
++
++/* align 4 bytes */
++#define HSDMA_ALIGN_SIZE 3
++/* align size 128bytes */
++#define HSDMA_MAX_PLEN 0x3f80
++
++struct hsdma_desc {
++ u32 addr0;
++ u32 flags;
++ u32 addr1;
++ u32 unused;
++};
++
++struct mtk_hsdma_sg {
++ dma_addr_t src_addr;
++ dma_addr_t dst_addr;
++ u32 len;
++};
++
++struct mtk_hsdma_desc {
++ struct virt_dma_desc vdesc;
++ unsigned int num_sgs;
++ struct mtk_hsdma_sg sg[1];
++};
++
++struct mtk_hsdma_chan {
++ struct virt_dma_chan vchan;
++ unsigned int id;
++ dma_addr_t desc_addr;
++ int tx_idx;
++ int rx_idx;
++ struct hsdma_desc *tx_ring;
++ struct hsdma_desc *rx_ring;
++ struct mtk_hsdma_desc *desc;
++ unsigned int next_sg;
++};
++
++struct mtk_hsdam_engine {
++ struct dma_device ddev;
++ struct device_dma_parameters dma_parms;
++ void __iomem *base;
++ struct tasklet_struct task;
++ volatile unsigned long chan_issued;
++
++ struct mtk_hsdma_chan chan[1];
++};
++
++static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
++ struct mtk_hsdma_chan *chan)
++{
++ return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
++ ddev);
++}
++
++static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
++{
++ return container_of(c, struct mtk_hsdma_chan, vchan.chan);
++}
++
++static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
++ struct virt_dma_desc *vdesc)
++{
++ return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
++}
++
++static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
++{
++ return readl(hsdma->base + reg);
++}
++
++static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
++ unsigned reg, u32 val)
++{
++ writel(val, hsdma->base + reg);
++}
++
++static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ chan->tx_idx = 0;
++ chan->rx_idx = HSDMA_DESCS_NUM - 1;
++
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
++
++ mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
++ 0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
++ mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
++ 0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
++}
++
++static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
++{
++ dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \
++ "tctx %08x, tdtx: %08x, rbase %08x, " \
++ "rcnt %08x, rctx %08x, rdtx %08x\n",
++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
++
++ dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \
++ "intr_stat %08x, intr_mask %08x\n",
++ mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
++ mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
++ mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
++ mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
++ mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
++}
++
++static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ struct hsdma_desc *tx_desc;
++ struct hsdma_desc *rx_desc;
++ int i;
++
++ dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
++ chan->tx_idx, chan->rx_idx);
++
++ for (i = 0; i < HSDMA_DESCS_NUM; i++) {
++ tx_desc = &chan->tx_ring[i];
++ rx_desc = &chan->rx_ring[i];
++
++ dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \
++ "tx addr1: %08x, rx addr0 %08x, flags %08x\n",
++ i, tx_desc->addr0, tx_desc->flags, \
++ tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
++ }
++}
++
++static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ int i;
++
++ /* disable dma */
++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
++
++ /* disable intr */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
++
++ /* init desc value */
++ for (i = 0; i < HSDMA_DESCS_NUM; i++) {
++ chan->tx_ring[i].addr0 = 0;
++ chan->tx_ring[i].flags = HSDMA_DESC_LS0 |
++ HSDMA_DESC_DONE;
++ }
++ for (i = 0; i < HSDMA_DESCS_NUM; i++) {
++ chan->rx_ring[i].addr0 = 0;
++ chan->rx_ring[i].flags = 0;
++ }
++
++ /* reset */
++ mtk_hsdma_reset_chan(hsdma, chan);
++
++ /* enable intr */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
++
++ /* enable dma */
++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
++}
++
++static int mtk_hsdma_terminate_all(struct dma_chan *c)
++{
++ struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
++ struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
++ unsigned long timeout;
++ LIST_HEAD(head);
++
++ spin_lock_bh(&chan->vchan.lock);
++ chan->desc = NULL;
++ clear_bit(chan->id, &hsdma->chan_issued);
++ vchan_get_all_descriptors(&chan->vchan, &head);
++ spin_unlock_bh(&chan->vchan.lock);
++
++ vchan_dma_desc_free_list(&chan->vchan, &head);
++
++ /* wait dma transfer complete */
++ timeout = jiffies + msecs_to_jiffies(2000);
++ while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
++ (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
++ if (time_after_eq(jiffies, timeout)) {
++ hsdma_dump_desc(hsdma, chan);
++ mtk_hsdma_reset(hsdma, chan);
++ dev_err(hsdma->ddev.dev, "timeout, reset it\n");
++ break;
++ }
++ cpu_relax();
++ }
++
++ return 0;
++}
++
++static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ dma_addr_t src, dst;
++ size_t len, tlen;
++ struct hsdma_desc *tx_desc, *rx_desc;
++ struct mtk_hsdma_sg *sg;
++ unsigned int i;
++ int rx_idx;
++
++ sg = &chan->desc->sg[0];
++ len = sg->len;
++ chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
++
++ /* tx desc */
++ src = sg->src_addr;
++ for (i = 0; i < chan->desc->num_sgs; i++) {
++ if (len > HSDMA_MAX_PLEN)
++ tlen = HSDMA_MAX_PLEN;
++ else
++ tlen = len;
++
++ if (i & 0x1) {
++ tx_desc->addr1 = src;
++ tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
++ } else {
++ tx_desc = &chan->tx_ring[chan->tx_idx];
++ tx_desc->addr0 = src;
++ tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
++
++ /* update index */
++ chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
++ }
++
++ src += tlen;
++ len -= tlen;
++ }
++ if (i & 0x1)
++ tx_desc->flags |= HSDMA_DESC_LS0;
++ else
++ tx_desc->flags |= HSDMA_DESC_LS1;
++
++ /* rx desc */
++ rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
++ len = sg->len;
++ dst = sg->dst_addr;
++ for (i = 0; i < chan->desc->num_sgs; i++) {
++ rx_desc = &chan->rx_ring[rx_idx];
++ if (len > HSDMA_MAX_PLEN)
++ tlen = HSDMA_MAX_PLEN;
++ else
++ tlen = len;
++
++ rx_desc->addr0 = dst;
++ rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
++
++ dst += tlen;
++ len -= tlen;
++
++ /* update index */
++ rx_idx = HSDMA_NEXT_DESC(rx_idx);
++ }
++
++ /* make sure desc and index all up to date */
++ wmb();
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
++
++ return 0;
++}
++
++static int gdma_next_desc(struct mtk_hsdma_chan *chan)
++{
++ struct virt_dma_desc *vdesc;
++
++ vdesc = vchan_next_desc(&chan->vchan);
++ if (!vdesc) {
++ chan->desc = NULL;
++ return 0;
++ }
++ chan->desc = to_mtk_hsdma_desc(vdesc);
++ chan->next_sg = 0;
++
++ return 1;
++}
++
++static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ struct mtk_hsdma_desc *desc;
++ int chan_issued;
++
++ chan_issued = 0;
++ spin_lock_bh(&chan->vchan.lock);
++ desc = chan->desc;
++ if (likely(desc)) {
++ if (chan->next_sg == desc->num_sgs) {
++ list_del(&desc->vdesc.node);
++ vchan_cookie_complete(&desc->vdesc);
++ chan_issued = gdma_next_desc(chan);
++ }
++ } else
++ dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
++
++ if (chan_issued)
++ set_bit(chan->id, &hsdma->chan_issued);
++ spin_unlock_bh(&chan->vchan.lock);
++}
++
++static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
++{
++ struct mtk_hsdam_engine *hsdma = devid;
++ u32 status;
++
++ status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
++ if (unlikely(!status))
++ return IRQ_NONE;
++
++ if (likely(status & HSDMA_INT_RX_Q0))
++ tasklet_schedule(&hsdma->task);
++ else
++ dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n",
++ status);
++ /* clean intr bits */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
++
++ return IRQ_HANDLED;
++}
++
++static void mtk_hsdma_issue_pending(struct dma_chan *c)
++{
++ struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
++ struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
++
++ spin_lock_bh(&chan->vchan.lock);
++ if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
++ if (gdma_next_desc(chan)) {
++ set_bit(chan->id, &hsdma->chan_issued);
++ tasklet_schedule(&hsdma->task);
++ } else
++ dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
++ }
++ spin_unlock_bh(&chan->vchan.lock);
++}
++
++static struct dma_async_tx_descriptor * mtk_hsdma_prep_dma_memcpy(
++ struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
++ size_t len, unsigned long flags)
++{
++ struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
++ struct mtk_hsdma_desc *desc;
++
++ if (len <= 0)
++ return NULL;
++
++ desc = kzalloc(sizeof(struct mtk_hsdma_desc), GFP_ATOMIC);
++ if (!desc) {
++ dev_err(c->device->dev, "alloc memcpy decs error\n");
++ return NULL;
++ }
++
++ desc->sg[0].src_addr = src;
++ desc->sg[0].dst_addr = dest;
++ desc->sg[0].len = len;
++
++ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
++}
++
++static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
++ dma_cookie_t cookie, struct dma_tx_state *state)
++{
++ return dma_cookie_status(c, cookie, state);
++}
++
++static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
++{
++ vchan_free_chan_resources(to_virt_chan(c));
++}
++
++static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
++{
++ kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
++}
++
++static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
++{
++ struct mtk_hsdma_chan *chan;
++
++ if (test_and_clear_bit(0, &hsdma->chan_issued)) {
++ chan = &hsdma->chan[0];
++ if (chan->desc) {
++ mtk_hsdma_start_transfer(hsdma, chan);
++ } else
++ dev_dbg(hsdma->ddev.dev,"chan 0 no desc to issue\n");
++ }
++}
++
++static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
++{
++ struct mtk_hsdma_chan *chan;
++ int next_idx, drx_idx, cnt;
++
++ chan = &hsdma->chan[0];
++ next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
++ drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
++
++ cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
++ if (!cnt)
++ return;
++
++ chan->next_sg += cnt;
++ chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
++
++ /* update rx crx */
++ wmb();
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
++
++ mtk_hsdma_chan_done(hsdma, chan);
++}
++
++static void mtk_hsdma_tasklet(unsigned long arg)
++{
++ struct mtk_hsdam_engine *hsdma = (struct mtk_hsdam_engine *)arg;
++
++ mtk_hsdma_rx(hsdma);
++ mtk_hsdma_tx(hsdma);
++}
++
++static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ int i;
++
++ chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
++ 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
++ &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
++ if (!chan->tx_ring)
++ goto no_mem;
++
++ chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
++
++ /* init tx ring value */
++ for (i = 0; i < HSDMA_DESCS_NUM; i++)
++ chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
++
++ return 0;
++no_mem:
++ return -ENOMEM;
++}
++
++static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ if (chan->tx_ring) {
++ dma_free_coherent(hsdma->ddev.dev,
++ 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
++ chan->tx_ring, chan->desc_addr);
++ chan->tx_ring = NULL;
++ chan->rx_ring = NULL;
++ }
++}
++
++static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
++{
++ struct mtk_hsdma_chan *chan;
++ int ret;
++ u32 reg;
++
++ /* init desc */
++ chan = &hsdma->chan[0];
++ ret = mtk_hsdam_alloc_desc(hsdma, chan);
++ if (ret)
++ return ret;
++
++ /* tx */
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
++ /* rx */
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
++ (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
++ /* reset */
++ mtk_hsdma_reset_chan(hsdma, chan);
++
++ /* enable rx intr */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
++
++ /* enable dma */
++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
++
++ /* hardware info */
++ reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
++ dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
++ (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
++ (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
++
++ hsdma_dump_reg(hsdma);
++
++ return ret;
++}
++
++static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
++{
++ struct mtk_hsdma_chan *chan;
++
++ /* disable dma */
++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
++
++ /* disable intr */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
++
++ /* free desc */
++ chan = &hsdma->chan[0];
++ mtk_hsdam_free_desc(hsdma, chan);
++
++ /* tx */
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
++ /* rx */
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
++ /* reset */
++ mtk_hsdma_reset_chan(hsdma, chan);
++}
++
++static const struct of_device_id mtk_hsdma_of_match[] = {
++ { .compatible = "mediatek,mt7621-hsdma" },
++ { },
++};
++
++static int mtk_hsdma_probe(struct platform_device *pdev)
++{
++ const struct of_device_id *match;
++ struct mtk_hsdma_chan *chan;
++ struct mtk_hsdam_engine *hsdma;
++ struct dma_device *dd;
++ struct resource *res;
++ int ret;
++ int irq;
++ void __iomem *base;
++
++ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++ if (ret)
++ return ret;
++
++ match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
++ if (!match)
++ return -EINVAL;
++
++ hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
++ if (!hsdma) {
++ dev_err(&pdev->dev, "alloc dma device failed\n");
++ return -EINVAL;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++ hsdma->base = base + HSDMA_BASE_OFFSET;
++ tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma);
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0) {
++ dev_err(&pdev->dev, "failed to get irq\n");
++ return -EINVAL;
++ }
++ ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
++ 0, dev_name(&pdev->dev), hsdma);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to request irq\n");
++ return ret;
++ }
++
++ device_reset(&pdev->dev);
++
++ dd = &hsdma->ddev;
++ dma_cap_set(DMA_MEMCPY, dd->cap_mask);
++ dd->copy_align = HSDMA_ALIGN_SIZE;
++ dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
++ dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
++ dd->device_terminate_all = mtk_hsdma_terminate_all;
++ dd->device_tx_status = mtk_hsdma_tx_status;
++ dd->device_issue_pending = mtk_hsdma_issue_pending;
++ dd->dev = &pdev->dev;
++ dd->dev->dma_parms = &hsdma->dma_parms;
++ dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
++ INIT_LIST_HEAD(&dd->channels);
++
++ chan = &hsdma->chan[0];
++ chan->id = 0;
++ chan->vchan.desc_free = mtk_hsdma_desc_free;
++ vchan_init(&chan->vchan, dd);
++
++ /* init hardware */
++ ret = mtk_hsdma_init(hsdma);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to alloc ring descs\n");
++ return ret;
++ }
++
++ ret = dma_async_device_register(dd);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register dma device\n");
++ return ret;
++ }
++
++ ret = of_dma_controller_register(pdev->dev.of_node,
++ of_dma_xlate_by_chan_id, hsdma);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register of dma controller\n");
++ goto err_unregister;
++ }
++
++ platform_set_drvdata(pdev, hsdma);
++
++ return 0;
++
++err_unregister:
++ dma_async_device_unregister(dd);
++ return ret;
++}
++
++static int mtk_hsdma_remove(struct platform_device *pdev)
++{
++ struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
++
++ mtk_hsdma_uninit(hsdma);
++
++ of_dma_controller_free(pdev->dev.of_node);
++ dma_async_device_unregister(&hsdma->ddev);
++
++ return 0;
++}
++
++static struct platform_driver mtk_hsdma_driver = {
++ .probe = mtk_hsdma_probe,
++ .remove = mtk_hsdma_remove,
++ .driver = {
++ .name = "hsdma-mt7621",
++ .of_match_table = mtk_hsdma_of_match,
++ },
++};
++module_platform_driver(mtk_hsdma_driver);
++
++MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
++MODULE_DESCRIPTION("MTK HSDMA driver");
++MODULE_LICENSE("GPL v2");
diff --git a/target/linux/ramips/patches-4.14/0048-asoc-add-mt7620-support.patch b/target/linux/ramips/patches-4.14/0048-asoc-add-mt7620-support.patch
new file mode 100644
index 0000000000..5f105c69e2
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0048-asoc-add-mt7620-support.patch
@@ -0,0 +1,1046 @@
+From 7f29222b1731e8182ba94a331531dec18865a1e4 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Sun, 27 Jul 2014 09:31:47 +0100
+Subject: [PATCH 48/53] asoc: add mt7620 support
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ arch/mips/ralink/of.c | 2 +
+ sound/soc/Kconfig | 1 +
+ sound/soc/Makefile | 1 +
+ sound/soc/ralink/Kconfig | 15 ++
+ sound/soc/ralink/Makefile | 11 +
+ sound/soc/ralink/mt7620-i2s.c | 436 ++++++++++++++++++++++++++++++++++++++
+ sound/soc/ralink/mt7620-wm8960.c | 233 ++++++++++++++++++++
+ 7 files changed, 699 insertions(+)
+ create mode 100644 sound/soc/ralink/Kconfig
+ create mode 100644 sound/soc/ralink/Makefile
+ create mode 100644 sound/soc/ralink/mt7620-i2s.c
+ create mode 100644 sound/soc/ralink/mt7620-wm8960.c
+
+--- a/arch/mips/ralink/of.c
++++ b/arch/mips/ralink/of.c
+@@ -15,6 +15,7 @@
+ #include <linux/of_fdt.h>
+ #include <linux/kernel.h>
+ #include <linux/bootmem.h>
++#include <linux/module.h>
+ #include <linux/of_platform.h>
+ #include <linux/of_address.h>
+
+@@ -26,6 +27,7 @@
+ #include "common.h"
+
+ __iomem void *rt_sysc_membase;
++EXPORT_SYMBOL(rt_sysc_membase);
+ __iomem void *rt_memc_membase;
+
+ __iomem void *plat_of_remap_node(const char *node)
+--- a/sound/soc/Kconfig
++++ b/sound/soc/Kconfig
+@@ -59,6 +59,7 @@ source "sound/soc/mxs/Kconfig"
+ source "sound/soc/pxa/Kconfig"
+ source "sound/soc/qcom/Kconfig"
+ source "sound/soc/rockchip/Kconfig"
++source "sound/soc/ralink/Kconfig"
+ source "sound/soc/samsung/Kconfig"
+ source "sound/soc/sh/Kconfig"
+ source "sound/soc/sirf/Kconfig"
+--- a/sound/soc/Makefile
++++ b/sound/soc/Makefile
+@@ -40,6 +40,7 @@ obj-$(CONFIG_SND_SOC) += kirkwood/
+ obj-$(CONFIG_SND_SOC) += pxa/
+ obj-$(CONFIG_SND_SOC) += qcom/
+ obj-$(CONFIG_SND_SOC) += rockchip/
++obj-$(CONFIG_SND_SOC) += ralink/
+ obj-$(CONFIG_SND_SOC) += samsung/
+ obj-$(CONFIG_SND_SOC) += sh/
+ obj-$(CONFIG_SND_SOC) += sirf/
+--- /dev/null
++++ b/sound/soc/ralink/Kconfig
+@@ -0,0 +1,8 @@
++config SND_RALINK_SOC_I2S
++ depends on RALINK && SND_SOC && !SOC_RT288X
++ select SND_SOC_GENERIC_DMAENGINE_PCM
++ select REGMAP_MMIO
++ tristate "SoC Audio (I2S protocol) for Ralink SoC"
++ help
++ Say Y if you want to use I2S protocol and I2S codec on Ralink/MediaTek
++ based boards.
+--- /dev/null
++++ b/sound/soc/ralink/Makefile
+@@ -0,0 +1,6 @@
++#
++# Ralink/MediaTek Platform Support
++#
++snd-soc-ralink-i2s-objs := ralink-i2s.o
++
++obj-$(CONFIG_SND_RALINK_SOC_I2S) += snd-soc-ralink-i2s.o
+--- /dev/null
++++ b/sound/soc/ralink/ralink-i2s.c
+@@ -0,0 +1,965 @@
++/*
++ * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
++ * Copyright (C) 2016 Michael Lee <igvtee@gmail.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 675 Mass Ave, Cambridge, MA 02139, USA.
++ *
++ */
++
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/clk.h>
++#include <linux/regmap.h>
++#include <linux/reset.h>
++#include <linux/debugfs.h>
++#include <linux/of_device.h>
++#include <sound/pcm_params.h>
++#include <sound/dmaengine_pcm.h>
++
++#include <asm/mach-ralink/ralink_regs.h>
++
++#define DRV_NAME "ralink-i2s"
++
++#define I2S_REG_CFG0 0x00
++#define I2S_REG_INT_STATUS 0x04
++#define I2S_REG_INT_EN 0x08
++#define I2S_REG_FF_STATUS 0x0c
++#define I2S_REG_WREG 0x10
++#define I2S_REG_RREG 0x14
++#define I2S_REG_CFG1 0x18
++#define I2S_REG_DIVCMP 0x20
++#define I2S_REG_DIVINT 0x24
++
++/* I2S_REG_CFG0 */
++#define I2S_REG_CFG0_EN BIT(31)
++#define I2S_REG_CFG0_DMA_EN BIT(30)
++#define I2S_REG_CFG0_BYTE_SWAP BIT(28)
++#define I2S_REG_CFG0_TX_EN BIT(24)
++#define I2S_REG_CFG0_RX_EN BIT(20)
++#define I2S_REG_CFG0_SLAVE BIT(16)
++#define I2S_REG_CFG0_RX_THRES 12
++#define I2S_REG_CFG0_TX_THRES 4
++#define I2S_REG_CFG0_THRES_MASK (0xf << I2S_REG_CFG0_RX_THRES) | \
++ (4 << I2S_REG_CFG0_TX_THRES)
++#define I2S_REG_CFG0_DFT_THRES (4 << I2S_REG_CFG0_RX_THRES) | \
++ (4 << I2S_REG_CFG0_TX_THRES)
++/* RT305x */
++#define I2S_REG_CFG0_CLK_DIS BIT(8)
++#define I2S_REG_CFG0_TXCH_SWAP BIT(3)
++#define I2S_REG_CFG0_TXCH1_OFF BIT(2)
++#define I2S_REG_CFG0_TXCH0_OFF BIT(1)
++#define I2S_REG_CFG0_SLAVE_EN BIT(0)
++/* RT3883 */
++#define I2S_REG_CFG0_RXCH_SWAP BIT(11)
++#define I2S_REG_CFG0_RXCH1_OFF BIT(10)
++#define I2S_REG_CFG0_RXCH0_OFF BIT(9)
++#define I2S_REG_CFG0_WS_INV BIT(0)
++/* MT7628 */
++#define I2S_REG_CFG0_FMT_LE BIT(29)
++#define I2S_REG_CFG0_SYS_BE BIT(28)
++#define I2S_REG_CFG0_NORM_24 BIT(18)
++#define I2S_REG_CFG0_DATA_24 BIT(17)
++
++/* I2S_REG_INT_STATUS */
++#define I2S_REG_INT_RX_FAULT BIT(7)
++#define I2S_REG_INT_RX_OVRUN BIT(6)
++#define I2S_REG_INT_RX_UNRUN BIT(5)
++#define I2S_REG_INT_RX_THRES BIT(4)
++#define I2S_REG_INT_TX_FAULT BIT(3)
++#define I2S_REG_INT_TX_OVRUN BIT(2)
++#define I2S_REG_INT_TX_UNRUN BIT(1)
++#define I2S_REG_INT_TX_THRES BIT(0)
++#define I2S_REG_INT_TX_MASK 0xf
++#define I2S_REG_INT_RX_MASK 0xf0
++
++/* I2S_REG_INT_STATUS */
++#define I2S_RX_AVCNT(x) ((x >> 4) & 0xf)
++#define I2S_TX_AVCNT(x) (x & 0xf)
++/* MT7628 */
++#define MT7628_I2S_RX_AVCNT(x) ((x >> 8) & 0x1f)
++#define MT7628_I2S_TX_AVCNT(x) (x & 0x1f)
++
++/* I2S_REG_CFG1 */
++#define I2S_REG_CFG1_LBK BIT(31)
++#define I2S_REG_CFG1_EXTLBK BIT(30)
++/* RT3883 */
++#define I2S_REG_CFG1_LEFT_J BIT(0)
++#define I2S_REG_CFG1_RIGHT_J BIT(1)
++#define I2S_REG_CFG1_FMT_MASK 0x3
++
++/* I2S_REG_DIVCMP */
++#define I2S_REG_DIVCMP_CLKEN BIT(31)
++#define I2S_REG_DIVCMP_DIVCOMP_MASK 0x1ff
++
++/* I2S_REG_DIVINT */
++#define I2S_REG_DIVINT_MASK 0x3ff
++
++/* BCLK dividers */
++#define RALINK_I2S_DIVCMP 0
++#define RALINK_I2S_DIVINT 1
++
++/* FIFO */
++#define RALINK_I2S_FIFO_SIZE 32
++
++/* feature flags */
++#define RALINK_FLAGS_TXONLY BIT(0)
++#define RALINK_FLAGS_LEFT_J BIT(1)
++#define RALINK_FLAGS_RIGHT_J BIT(2)
++#define RALINK_FLAGS_ENDIAN BIT(3)
++#define RALINK_FLAGS_24BIT BIT(4)
++
++#define RALINK_I2S_INT_EN 0
++
++struct ralink_i2s_stats {
++ u32 dmafault;
++ u32 overrun;
++ u32 underrun;
++ u32 belowthres;
++};
++
++struct ralink_i2s {
++ struct device *dev;
++ void __iomem *regs;
++ struct clk *clk;
++ struct regmap *regmap;
++ u32 flags;
++ unsigned int fmt;
++ u16 txdma_req;
++ u16 rxdma_req;
++
++ struct snd_dmaengine_dai_dma_data playback_dma_data;
++ struct snd_dmaengine_dai_dma_data capture_dma_data;
++
++ struct dentry *dbg_dir;
++ struct dentry *dbg_stats;
++ struct ralink_i2s_stats txstats;
++ struct ralink_i2s_stats rxstats;
++};
++
++static void ralink_i2s_dump_regs(struct ralink_i2s *i2s)
++{
++ u32 buf[10];
++ int ret;
++
++ ret = regmap_bulk_read(i2s->regmap, I2S_REG_CFG0,
++ buf, ARRAY_SIZE(buf));
++
++ dev_dbg(i2s->dev, "CFG0: %08x, INTSTAT: %08x, INTEN: %08x, " \
++ "FFSTAT: %08x, WREG: %08x, RREG: %08x, " \
++ "CFG1: %08x, DIVCMP: %08x, DIVINT: %08x\n",
++ buf[0], buf[1], buf[2], buf[3], buf[4],
++ buf[5], buf[6], buf[8], buf[9]);
++}
++
++static int ralink_i2s_set_sysclk(struct snd_soc_dai *dai,
++ int clk_id, unsigned int freq, int dir)
++{
++ return 0;
++}
++
++static int ralink_i2s_set_sys_bclk(struct snd_soc_dai *dai, int width, int rate)
++{
++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai);
++ unsigned long clk = clk_get_rate(i2s->clk);
++ int div;
++ uint32_t data;
++
++ /* disable clock at slave mode */
++ if ((i2s->fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
++ SND_SOC_DAIFMT_CBM_CFM) {
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0,
++ I2S_REG_CFG0_CLK_DIS,
++ I2S_REG_CFG0_CLK_DIS);
++ return 0;
++ }
++
++ /* FREQOUT = FREQIN / (I2S_CLK_DIV + 1) */
++ div = (clk / rate ) - 1;
++
++ data = rt_sysc_r32(0x30);
++ data &= (0xff << 8);
++ data |= (0x1 << 15) | (div << 8);
++ rt_sysc_w32(data, 0x30);
++
++ /* enable clock */
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, I2S_REG_CFG0_CLK_DIS, 0);
++
++ dev_dbg(i2s->dev, "clk: %lu, rate: %u, div: %d\n",
++ clk, rate, div);
++
++ return 0;
++}
++
++static int ralink_i2s_set_bclk(struct snd_soc_dai *dai, int width, int rate)
++{
++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai);
++ unsigned long clk = clk_get_rate(i2s->clk);
++ int divint, divcomp;
++
++ /* disable clock at slave mode */
++ if ((i2s->fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
++ SND_SOC_DAIFMT_CBM_CFM) {
++ regmap_update_bits(i2s->regmap, I2S_REG_DIVCMP,
++ I2S_REG_DIVCMP_CLKEN, 0);
++ return 0;
++ }
++
++ /* FREQOUT = FREQIN * (1/2) * (1/(DIVINT + DIVCOMP/512)) */
++ clk = clk / (2 * 2 * width);
++ divint = clk / rate;
++ divcomp = ((clk % rate) * 512) / rate;
++
++ if ((divint > I2S_REG_DIVINT_MASK) ||
++ (divcomp > I2S_REG_DIVCMP_DIVCOMP_MASK))
++ return -EINVAL;
++
++ regmap_update_bits(i2s->regmap, I2S_REG_DIVINT,
++ I2S_REG_DIVINT_MASK, divint);
++ regmap_update_bits(i2s->regmap, I2S_REG_DIVCMP,
++ I2S_REG_DIVCMP_DIVCOMP_MASK, divcomp);
++
++ /* enable clock */
++ regmap_update_bits(i2s->regmap, I2S_REG_DIVCMP, I2S_REG_DIVCMP_CLKEN,
++ I2S_REG_DIVCMP_CLKEN);
++
++ dev_dbg(i2s->dev, "clk: %lu, rate: %u, int: %d, comp: %d\n",
++ clk_get_rate(i2s->clk), rate, divint, divcomp);
++
++ return 0;
++}
++
++static int ralink_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
++{
++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai);
++ unsigned int cfg0 = 0, cfg1 = 0;
++
++ /* set master/slave audio interface */
++ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
++ case SND_SOC_DAIFMT_CBM_CFM:
++ if (i2s->flags & RALINK_FLAGS_TXONLY)
++ cfg0 |= I2S_REG_CFG0_SLAVE_EN;
++ else
++ cfg0 |= I2S_REG_CFG0_SLAVE;
++ break;
++ case SND_SOC_DAIFMT_CBS_CFS:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* interface format */
++ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
++ case SND_SOC_DAIFMT_I2S:
++ break;
++ case SND_SOC_DAIFMT_RIGHT_J:
++ if (i2s->flags & RALINK_FLAGS_RIGHT_J) {
++ cfg1 |= I2S_REG_CFG1_RIGHT_J;
++ break;
++ }
++ return -EINVAL;
++ case SND_SOC_DAIFMT_LEFT_J:
++ if (i2s->flags & RALINK_FLAGS_LEFT_J) {
++ cfg1 |= I2S_REG_CFG1_LEFT_J;
++ break;
++ }
++ return -EINVAL;
++ default:
++ return -EINVAL;
++ }
++
++ /* clock inversion */
++ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
++ case SND_SOC_DAIFMT_NB_NF:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (i2s->flags & RALINK_FLAGS_TXONLY) {
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0,
++ I2S_REG_CFG0_SLAVE_EN, cfg0);
++ } else {
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0,
++ I2S_REG_CFG0_SLAVE, cfg0);
++ }
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG1,
++ I2S_REG_CFG1_FMT_MASK, cfg1);
++ i2s->fmt = fmt;
++
++ return 0;
++}
++
++static int ralink_i2s_startup(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai);
++
++ if (dai->active)
++ return 0;
++
++ /* setup status interrupt */
++#if (RALINK_I2S_INT_EN)
++ regmap_write(i2s->regmap, I2S_REG_INT_EN, 0xff);
++#else
++ regmap_write(i2s->regmap, I2S_REG_INT_EN, 0x0);
++#endif
++
++ /* enable */
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0,
++ I2S_REG_CFG0_EN | I2S_REG_CFG0_DMA_EN |
++ I2S_REG_CFG0_THRES_MASK,
++ I2S_REG_CFG0_EN | I2S_REG_CFG0_DMA_EN |
++ I2S_REG_CFG0_DFT_THRES);
++
++ return 0;
++}
++
++static void ralink_i2s_shutdown(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai);
++
++ /* If both streams are stopped, disable module and clock */
++ if (dai->active)
++ return;
++
++ /*
++ * datasheet mention when disable all control regs are cleared
++ * to initial values. need reinit at startup.
++ */
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, I2S_REG_CFG0_EN, 0);
++}
++
++static int ralink_i2s_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
++{
++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai);
++ int width;
++ int ret;
++
++ width = params_width(params);
++ switch (width) {
++ case 16:
++ if (i2s->flags & RALINK_FLAGS_24BIT)
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0,
++ I2S_REG_CFG0_DATA_24, 0);
++ break;
++ case 24:
++ if (i2s->flags & RALINK_FLAGS_24BIT) {
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0,
++ I2S_REG_CFG0_DATA_24,
++ I2S_REG_CFG0_DATA_24);
++ break;
++ }
++ return -EINVAL;
++ default:
++ return -EINVAL;
++ }
++
++ switch (params_channels(params)) {
++ case 2:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (i2s->flags & RALINK_FLAGS_ENDIAN) {
++ /* system endian */
++#ifdef SNDRV_LITTLE_ENDIAN
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0,
++ I2S_REG_CFG0_SYS_BE, 0);
++#else
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0,
++ I2S_REG_CFG0_SYS_BE,
++ I2S_REG_CFG0_SYS_BE);
++#endif
++
++ /* data endian */
++ switch (params_format(params)) {
++ case SNDRV_PCM_FORMAT_S16_LE:
++ case SNDRV_PCM_FORMAT_S24_LE:
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0,
++ I2S_REG_CFG0_FMT_LE,
++ I2S_REG_CFG0_FMT_LE);
++ break;
++ case SNDRV_PCM_FORMAT_S16_BE:
++ case SNDRV_PCM_FORMAT_S24_BE:
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0,
++ I2S_REG_CFG0_FMT_LE, 0);
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
++
++ /* setup bclk rate */
++ if (i2s->flags & RALINK_FLAGS_TXONLY)
++ ret = ralink_i2s_set_sys_bclk(dai, width, params_rate(params));
++ else
++ ret = ralink_i2s_set_bclk(dai, width, params_rate(params));
++
++ return ret;
++}
++
++static int ralink_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
++ struct snd_soc_dai *dai)
++{
++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai);
++ unsigned int mask, val;
++
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++ mask = I2S_REG_CFG0_TX_EN;
++ else
++ mask = I2S_REG_CFG0_RX_EN;
++
++ switch (cmd) {
++ case SNDRV_PCM_TRIGGER_START:
++ case SNDRV_PCM_TRIGGER_RESUME:
++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++ val = mask;
++ break;
++ case SNDRV_PCM_TRIGGER_STOP:
++ case SNDRV_PCM_TRIGGER_SUSPEND:
++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++ val = 0;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, mask, val);
++
++ return 0;
++}
++
++static void ralink_i2s_init_dma_data(struct ralink_i2s *i2s,
++ struct resource *res)
++{
++ struct snd_dmaengine_dai_dma_data *dma_data;
++
++ /* Playback */
++ dma_data = &i2s->playback_dma_data;
++ dma_data->addr = res->start + I2S_REG_WREG;
++ dma_data->addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
++ dma_data->maxburst = 1;
++ dma_data->slave_id = i2s->txdma_req;
++
++ if (i2s->flags & RALINK_FLAGS_TXONLY)
++ return;
++
++ /* Capture */
++ dma_data = &i2s->capture_dma_data;
++ dma_data->addr = res->start + I2S_REG_RREG;
++ dma_data->addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
++ dma_data->maxburst = 1;
++ dma_data->slave_id = i2s->rxdma_req;
++}
++
++static int ralink_i2s_dai_probe(struct snd_soc_dai *dai)
++{
++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai);
++
++ snd_soc_dai_init_dma_data(dai, &i2s->playback_dma_data,
++ &i2s->capture_dma_data);
++
++ return 0;
++}
++
++static int ralink_i2s_dai_remove(struct snd_soc_dai *dai)
++{
++ return 0;
++}
++
++static const struct snd_soc_dai_ops ralink_i2s_dai_ops = {
++ .set_sysclk = ralink_i2s_set_sysclk,
++ .set_fmt = ralink_i2s_set_fmt,
++ .startup = ralink_i2s_startup,
++ .shutdown = ralink_i2s_shutdown,
++ .hw_params = ralink_i2s_hw_params,
++ .trigger = ralink_i2s_trigger,
++};
++
++static struct snd_soc_dai_driver ralink_i2s_dai = {
++ .name = DRV_NAME,
++ .probe = ralink_i2s_dai_probe,
++ .remove = ralink_i2s_dai_remove,
++ .ops = &ralink_i2s_dai_ops,
++ .capture = {
++ .stream_name = "I2S Capture",
++ .channels_min = 2,
++ .channels_max = 2,
++ .rate_min = 5512,
++ .rate_max = 192000,
++ .rates = SNDRV_PCM_RATE_CONTINUOUS,
++ .formats = SNDRV_PCM_FMTBIT_S16_LE,
++ },
++ .playback = {
++ .stream_name = "I2S Playback",
++ .channels_min = 2,
++ .channels_max = 2,
++ .rate_min = 5512,
++ .rate_max = 192000,
++ .rates = SNDRV_PCM_RATE_CONTINUOUS,
++ .formats = SNDRV_PCM_FMTBIT_S16_LE,
++ },
++ .symmetric_rates = 1,
++};
++
++static struct snd_pcm_hardware ralink_pcm_hardware = {
++ .info = SNDRV_PCM_INFO_MMAP |
++ SNDRV_PCM_INFO_MMAP_VALID |
++ SNDRV_PCM_INFO_INTERLEAVED |
++ SNDRV_PCM_INFO_BLOCK_TRANSFER,
++ .formats = SNDRV_PCM_FMTBIT_S16_LE,
++ .channels_min = 2,
++ .channels_max = 2,
++ .period_bytes_min = PAGE_SIZE,
++ .period_bytes_max = PAGE_SIZE * 2,
++ .periods_min = 2,
++ .periods_max = 128,
++ .buffer_bytes_max = 128 * 1024,
++ .fifo_size = RALINK_I2S_FIFO_SIZE,
++};
++
++static const struct snd_dmaengine_pcm_config ralink_dmaengine_pcm_config = {
++ .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
++ .pcm_hardware = &ralink_pcm_hardware,
++ .prealloc_buffer_size = 256 * PAGE_SIZE,
++};
++
++static const struct snd_soc_component_driver ralink_i2s_component = {
++ .name = DRV_NAME,
++};
++
++static bool ralink_i2s_readable_reg(struct device *dev, unsigned int reg)
++{
++ return true;
++}
++
++static bool ralink_i2s_volatile_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case I2S_REG_INT_STATUS:
++ case I2S_REG_FF_STATUS:
++ return true;
++ }
++ return false;
++}
++
++static bool ralink_i2s_writeable_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case I2S_REG_FF_STATUS:
++ case I2S_REG_RREG:
++ return false;
++ }
++ return true;
++}
++
++static const struct regmap_config ralink_i2s_regmap_config = {
++ .reg_bits = 32,
++ .reg_stride = 4,
++ .val_bits = 32,
++ .writeable_reg = ralink_i2s_writeable_reg,
++ .readable_reg = ralink_i2s_readable_reg,
++ .volatile_reg = ralink_i2s_volatile_reg,
++ .max_register = I2S_REG_DIVINT,
++};
++
++#if (RALINK_I2S_INT_EN)
++static irqreturn_t ralink_i2s_irq(int irq, void *devid)
++{
++ struct ralink_i2s *i2s = devid;
++ u32 status;
++
++ regmap_read(i2s->regmap, I2S_REG_INT_STATUS, &status);
++ if (unlikely(!status))
++ return IRQ_NONE;
++
++ /* tx stats */
++ if (status & I2S_REG_INT_TX_MASK) {
++ if (status & I2S_REG_INT_TX_THRES)
++ i2s->txstats.belowthres++;
++ if (status & I2S_REG_INT_TX_UNRUN)
++ i2s->txstats.underrun++;
++ if (status & I2S_REG_INT_TX_OVRUN)
++ i2s->txstats.overrun++;
++ if (status & I2S_REG_INT_TX_FAULT)
++ i2s->txstats.dmafault++;
++ }
++
++ /* rx stats */
++ if (status & I2S_REG_INT_RX_MASK) {
++ if (status & I2S_REG_INT_RX_THRES)
++ i2s->rxstats.belowthres++;
++ if (status & I2S_REG_INT_RX_UNRUN)
++ i2s->rxstats.underrun++;
++ if (status & I2S_REG_INT_RX_OVRUN)
++ i2s->rxstats.overrun++;
++ if (status & I2S_REG_INT_RX_FAULT)
++ i2s->rxstats.dmafault++;
++ }
++
++ /* clean status bits */
++ regmap_write(i2s->regmap, I2S_REG_INT_STATUS, status);
++
++ return IRQ_HANDLED;
++}
++#endif
++
++#if IS_ENABLED(CONFIG_DEBUG_FS)
++static int ralink_i2s_stats_show(struct seq_file *s, void *unused)
++{
++ struct ralink_i2s *i2s = s->private;
++
++ seq_printf(s, "tx stats\n");
++ seq_printf(s, "\tbelow threshold\t%u\n", i2s->txstats.belowthres);
++ seq_printf(s, "\tunder run\t%u\n", i2s->txstats.underrun);
++ seq_printf(s, "\tover run\t%u\n", i2s->txstats.overrun);
++ seq_printf(s, "\tdma fault\t%u\n", i2s->txstats.dmafault);
++
++ seq_printf(s, "rx stats\n");
++ seq_printf(s, "\tbelow threshold\t%u\n", i2s->rxstats.belowthres);
++ seq_printf(s, "\tunder run\t%u\n", i2s->rxstats.underrun);
++ seq_printf(s, "\tover run\t%u\n", i2s->rxstats.overrun);
++ seq_printf(s, "\tdma fault\t%u\n", i2s->rxstats.dmafault);
++
++ ralink_i2s_dump_regs(i2s);
++
++ return 0;
++}
++
++static int ralink_i2s_stats_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, ralink_i2s_stats_show, inode->i_private);
++}
++
++static const struct file_operations ralink_i2s_stats_ops = {
++ .open = ralink_i2s_stats_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static inline int ralink_i2s_debugfs_create(struct ralink_i2s *i2s)
++{
++ i2s->dbg_dir = debugfs_create_dir(dev_name(i2s->dev), NULL);
++ if (!i2s->dbg_dir)
++ return -ENOMEM;
++
++ i2s->dbg_stats = debugfs_create_file("stats", S_IRUGO,
++ i2s->dbg_dir, i2s, &ralink_i2s_stats_ops);
++ if (!i2s->dbg_stats) {
++ debugfs_remove(i2s->dbg_dir);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static inline void ralink_i2s_debugfs_remove(struct ralink_i2s *i2s)
++{
++ debugfs_remove(i2s->dbg_stats);
++ debugfs_remove(i2s->dbg_dir);
++}
++#else
++static inline int ralink_i2s_debugfs_create(struct ralink_i2s *i2s)
++{
++ return 0;
++}
++
++static inline void ralink_i2s_debugfs_remove(struct fsl_ssi_dbg *ssi_dbg)
++{
++}
++#endif
++
++/*
++ * TODO: these refclk setup functions should use
++ * clock framework instead. hardcode it now.
++ */
++static void rt3350_refclk_setup(void)
++{
++ uint32_t data;
++
++ /* set refclk output 12Mhz clock */
++ data = rt_sysc_r32(0x2c);
++ data |= (0x1 << 8);
++ rt_sysc_w32(data, 0x2c);
++}
++
++static void rt3883_refclk_setup(void)
++{
++ uint32_t data;
++
++ /* set refclk output 12Mhz clock */
++ data = rt_sysc_r32(0x2c);
++ data &= ~(0x3 << 13);
++ data |= (0x1 << 13);
++ rt_sysc_w32(data, 0x2c);
++}
++
++static void rt3552_refclk_setup(void)
++{
++ uint32_t data;
++
++ /* set refclk output 12Mhz clock */
++ data = rt_sysc_r32(0x2c);
++ data &= ~(0xf << 8);
++ data |= (0x3 << 8);
++ rt_sysc_w32(data, 0x2c);
++}
++
++static void mt7620_refclk_setup(void)
++{
++ uint32_t data;
++
++ /* set refclk output 12Mhz clock */
++ data = rt_sysc_r32(0x2c);
++ data &= ~(0x7 << 9);
++ data |= 0x1 << 9;
++ rt_sysc_w32(data, 0x2c);
++}
++
++static void mt7621_refclk_setup(void)
++{
++ uint32_t data;
++
++ /* set refclk output 12Mhz clock */
++ data = rt_sysc_r32(0x2c);
++ data &= ~(0x1f << 18);
++ data |= (0x19 << 18);
++ data &= ~(0x1f << 12);
++ data |= (0x1 << 12);
++ data &= ~(0x7 << 9);
++ data |= (0x5 << 9);
++ rt_sysc_w32(data, 0x2c);
++}
++
++static void mt7628_refclk_setup(void)
++{
++ uint32_t data;
++
++ /* set i2s and refclk digital pad */
++ data = rt_sysc_r32(0x3c);
++ data |= 0x1f;
++ rt_sysc_w32(data, 0x3c);
++
++ /* Adjust REFCLK0's driving strength */
++ data = rt_sysc_r32(0x1354);
++ data &= ~(0x1 << 5);
++ rt_sysc_w32(data, 0x1354);
++ data = rt_sysc_r32(0x1364);
++ data |= ~(0x1 << 5);
++ rt_sysc_w32(data, 0x1364);
++
++ /* set refclk output 12Mhz clock */
++ data = rt_sysc_r32(0x2c);
++ data &= ~(0x7 << 9);
++ data |= 0x1 << 9;
++ rt_sysc_w32(data, 0x2c);
++}
++
++struct rt_i2s_data {
++ u32 flags;
++ void (*refclk_setup)(void);
++};
++
++struct rt_i2s_data rt3050_i2s_data = { .flags = RALINK_FLAGS_TXONLY };
++struct rt_i2s_data rt3350_i2s_data = { .flags = RALINK_FLAGS_TXONLY,
++ .refclk_setup = rt3350_refclk_setup };
++struct rt_i2s_data rt3883_i2s_data = {
++ .flags = (RALINK_FLAGS_LEFT_J | RALINK_FLAGS_RIGHT_J),
++ .refclk_setup = rt3883_refclk_setup };
++struct rt_i2s_data rt3352_i2s_data = { .refclk_setup = rt3552_refclk_setup};
++struct rt_i2s_data mt7620_i2s_data = { .refclk_setup = mt7620_refclk_setup};
++struct rt_i2s_data mt7621_i2s_data = { .refclk_setup = mt7621_refclk_setup};
++struct rt_i2s_data mt7628_i2s_data = {
++ .flags = (RALINK_FLAGS_ENDIAN | RALINK_FLAGS_24BIT |
++ RALINK_FLAGS_LEFT_J),
++ .refclk_setup = mt7628_refclk_setup};
++
++static const struct of_device_id ralink_i2s_match_table[] = {
++ { .compatible = "ralink,rt3050-i2s",
++ .data = (void *)&rt3050_i2s_data },
++ { .compatible = "ralink,rt3350-i2s",
++ .data = (void *)&rt3350_i2s_data },
++ { .compatible = "ralink,rt3883-i2s",
++ .data = (void *)&rt3883_i2s_data },
++ { .compatible = "ralink,rt3352-i2s",
++ .data = (void *)&rt3352_i2s_data },
++ { .compatible = "mediatek,mt7620-i2s",
++ .data = (void *)&mt7620_i2s_data },
++ { .compatible = "mediatek,mt7621-i2s",
++ .data = (void *)&mt7621_i2s_data },
++ { .compatible = "mediatek,mt7628-i2s",
++ .data = (void *)&mt7628_i2s_data },
++};
++MODULE_DEVICE_TABLE(of, ralink_i2s_match_table);
++
++static int ralink_i2s_probe(struct platform_device *pdev)
++{
++ const struct of_device_id *match;
++ struct device_node *np = pdev->dev.of_node;
++ struct ralink_i2s *i2s;
++ struct resource *res;
++ int irq, ret;
++ u32 dma_req;
++ struct rt_i2s_data *data;
++
++ i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL);
++ if (!i2s)
++ return -ENOMEM;
++
++ platform_set_drvdata(pdev, i2s);
++ i2s->dev = &pdev->dev;
++
++ match = of_match_device(ralink_i2s_match_table, &pdev->dev);
++ if (!match)
++ return -EINVAL;
++ data = (struct rt_i2s_data *)match->data;
++ i2s->flags = data->flags;
++ /* setup out 12Mhz refclk to codec as mclk */
++ if (data->refclk_setup)
++ data->refclk_setup();
++
++ if (of_property_read_u32(np, "txdma-req", &dma_req)) {
++ dev_err(&pdev->dev, "no txdma-req define\n");
++ return -EINVAL;
++ }
++ i2s->txdma_req = (u16)dma_req;
++ if (!(i2s->flags & RALINK_FLAGS_TXONLY)) {
++ if (of_property_read_u32(np, "rxdma-req", &dma_req)) {
++ dev_err(&pdev->dev, "no rxdma-req define\n");
++ return -EINVAL;
++ }
++ i2s->rxdma_req = (u16)dma_req;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ i2s->regs = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(i2s->regs))
++ return PTR_ERR(i2s->regs);
++
++ i2s->regmap = devm_regmap_init_mmio(&pdev->dev, i2s->regs,
++ &ralink_i2s_regmap_config);
++ if (IS_ERR(i2s->regmap)) {
++ dev_err(&pdev->dev, "regmap init failed\n");
++ return PTR_ERR(i2s->regmap);
++ }
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0) {
++ dev_err(&pdev->dev, "failed to get irq\n");
++ return -EINVAL;
++ }
++
++#if (RALINK_I2S_INT_EN)
++ ret = devm_request_irq(&pdev->dev, irq, ralink_i2s_irq,
++ 0, dev_name(&pdev->dev), i2s);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to request irq\n");
++ return ret;
++ }
++#endif
++
++ i2s->clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(i2s->clk)) {
++ dev_err(&pdev->dev, "no clock defined\n");
++ return PTR_ERR(i2s->clk);
++ }
++
++ ret = clk_prepare_enable(i2s->clk);
++ if (ret)
++ return ret;
++
++ ralink_i2s_init_dma_data(i2s, res);
++
++ device_reset(&pdev->dev);
++
++ ret = ralink_i2s_debugfs_create(i2s);
++ if (ret) {
++ dev_err(&pdev->dev, "create debugfs failed\n");
++ goto err_clk_disable;
++ }
++
++ /* enable 24bits support */
++ if (i2s->flags & RALINK_FLAGS_24BIT) {
++ ralink_i2s_dai.capture.formats |= SNDRV_PCM_FMTBIT_S24_LE;
++ ralink_i2s_dai.playback.formats |= SNDRV_PCM_FMTBIT_S24_LE;
++ }
++
++ /* enable big endian support */
++ if (i2s->flags & RALINK_FLAGS_ENDIAN) {
++ ralink_i2s_dai.capture.formats |= SNDRV_PCM_FMTBIT_S16_BE;
++ ralink_i2s_dai.playback.formats |= SNDRV_PCM_FMTBIT_S16_BE;
++ ralink_pcm_hardware.formats |= SNDRV_PCM_FMTBIT_S16_BE;
++ if (i2s->flags & RALINK_FLAGS_24BIT) {
++ ralink_i2s_dai.capture.formats |=
++ SNDRV_PCM_FMTBIT_S24_BE;
++ ralink_i2s_dai.playback.formats |=
++ SNDRV_PCM_FMTBIT_S24_BE;
++ ralink_pcm_hardware.formats |=
++ SNDRV_PCM_FMTBIT_S24_BE;
++ }
++ }
++
++ /* disable capture support */
++ if (i2s->flags & RALINK_FLAGS_TXONLY)
++ memset(&ralink_i2s_dai.capture, sizeof(ralink_i2s_dai.capture),
++ 0);
++
++ ret = devm_snd_soc_register_component(&pdev->dev, &ralink_i2s_component,
++ &ralink_i2s_dai, 1);
++ if (ret)
++ goto err_debugfs;
++
++ ret = devm_snd_dmaengine_pcm_register(&pdev->dev,
++ &ralink_dmaengine_pcm_config,
++ SND_DMAENGINE_PCM_FLAG_COMPAT);
++ if (ret)
++ goto err_debugfs;
++
++ dev_info(i2s->dev, "mclk %luKHz\n", clk_get_rate(i2s->clk) / 1000000);
++
++ return 0;
++
++err_debugfs:
++ ralink_i2s_debugfs_remove(i2s);
++
++err_clk_disable:
++ clk_disable_unprepare(i2s->clk);
++
++ return ret;
++}
++
++static int ralink_i2s_remove(struct platform_device *pdev)
++{
++ struct ralink_i2s *i2s = platform_get_drvdata(pdev);
++
++ ralink_i2s_debugfs_remove(i2s);
++ clk_disable_unprepare(i2s->clk);
++
++ return 0;
++}
++
++static struct platform_driver ralink_i2s_driver = {
++ .probe = ralink_i2s_probe,
++ .remove = ralink_i2s_remove,
++ .driver = {
++ .name = DRV_NAME,
++ .of_match_table = ralink_i2s_match_table,
++ },
++};
++module_platform_driver(ralink_i2s_driver);
++
++MODULE_AUTHOR("Lars-Peter Clausen, <lars@metafoo.de>");
++MODULE_DESCRIPTION("Ralink/MediaTek I2S driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/target/linux/ramips/patches-4.14/0051-serial-add-ugly-custom-baud-rate-hack.patch b/target/linux/ramips/patches-4.14/0051-serial-add-ugly-custom-baud-rate-hack.patch
new file mode 100644
index 0000000000..b71d8f38e0
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0051-serial-add-ugly-custom-baud-rate-hack.patch
@@ -0,0 +1,22 @@
+From a7eb46e0ea4a11e4dfb56ab129bf816d1059a6c5 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Mon, 7 Dec 2015 17:31:08 +0100
+Subject: [PATCH 51/53] serial: add ugly custom baud rate hack
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/tty/serial/serial_core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -411,6 +411,9 @@ uart_get_baud_rate(struct uart_port *por
+ break;
+ }
+
++ if (tty_termios_baud_rate(termios) == 2500000)
++ return 250000;
++
+ for (try = 0; try < 2; try++) {
+ baud = tty_termios_baud_rate(termios);
+
diff --git a/target/linux/ramips/patches-4.14/0052-pwm-add-mediatek-support.patch b/target/linux/ramips/patches-4.14/0052-pwm-add-mediatek-support.patch
new file mode 100644
index 0000000000..6eb7f16b9d
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0052-pwm-add-mediatek-support.patch
@@ -0,0 +1,217 @@
+From fc8f96309c21c1bc3276427309cd7d361347d66e Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Mon, 7 Dec 2015 17:16:50 +0100
+Subject: [PATCH 52/53] pwm: add mediatek support
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ drivers/pwm/Kconfig | 9 +++
+ drivers/pwm/Makefile | 1 +
+ drivers/pwm/pwm-mediatek.c | 173 ++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 183 insertions(+)
+ create mode 100644 drivers/pwm/pwm-mediatek.c
+
+--- a/drivers/pwm/Kconfig
++++ b/drivers/pwm/Kconfig
+@@ -302,6 +302,15 @@ config PWM_MEDIATEK
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-mediatek.
+
++config PWM_MEDIATEK_RAMIPS
++ tristate "Mediatek PWM support"
++ depends on RALINK && OF
++ help
++ Generic PWM framework driver for Mediatek ARM SoC.
++
++ To compile this driver as a module, choose M here: the module
++ will be called pwm-mxs.
++
+ config PWM_MXS
+ tristate "Freescale MXS PWM support"
+ depends on ARCH_MXS && OF
+--- a/drivers/pwm/Makefile
++++ b/drivers/pwm/Makefile
+@@ -28,6 +28,7 @@ obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-p
+ obj-$(CONFIG_PWM_LPSS_PLATFORM) += pwm-lpss-platform.o
+ obj-$(CONFIG_PWM_MESON) += pwm-meson.o
+ obj-$(CONFIG_PWM_MEDIATEK) += pwm-mediatek.o
++obj-$(CONFIG_PWM_MEDIATEK_RAMIPS) += pwm-mediatek-ramips.o
+ obj-$(CONFIG_PWM_MTK_DISP) += pwm-mtk-disp.o
+ obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
+ obj-$(CONFIG_PWM_OMAP_DMTIMER) += pwm-omap-dmtimer.o
+--- /dev/null
++++ b/drivers/pwm/pwm-mediatek-ramips.c
+@@ -0,0 +1,173 @@
++/*
++ * Mediatek Pulse Width Modulator driver
++ *
++ * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/ioport.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/pwm.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++
++#define NUM_PWM 4
++
++/* PWM registers and bits definitions */
++#define PWMCON 0x00
++#define PWMHDUR 0x04
++#define PWMLDUR 0x08
++#define PWMGDUR 0x0c
++#define PWMWAVENUM 0x28
++#define PWMDWIDTH 0x2c
++#define PWMTHRES 0x30
++
++/**
++ * struct mtk_pwm_chip - struct representing pwm chip
++ *
++ * @mmio_base: base address of pwm chip
++ * @chip: linux pwm chip representation
++ */
++struct mtk_pwm_chip {
++ void __iomem *mmio_base;
++ struct pwm_chip chip;
++};
++
++static inline struct mtk_pwm_chip *to_mtk_pwm_chip(struct pwm_chip *chip)
++{
++ return container_of(chip, struct mtk_pwm_chip, chip);
++}
++
++static inline u32 mtk_pwm_readl(struct mtk_pwm_chip *chip, unsigned int num,
++ unsigned long offset)
++{
++ return ioread32(chip->mmio_base + 0x10 + (num * 0x40) + offset);
++}
++
++static inline void mtk_pwm_writel(struct mtk_pwm_chip *chip,
++ unsigned int num, unsigned long offset,
++ unsigned long val)
++{
++ iowrite32(val, chip->mmio_base + 0x10 + (num * 0x40) + offset);
++}
++
++static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
++ int duty_ns, int period_ns)
++{
++ struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
++ u32 resolution = 100 / 4;
++ u32 clkdiv = 0;
++
++ while (period_ns / resolution > 8191) {
++ clkdiv++;
++ resolution *= 2;
++ }
++
++ if (clkdiv > 7)
++ return -1;
++
++ mtk_pwm_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | BIT(3) | clkdiv);
++ mtk_pwm_writel(pc, pwm->hwpwm, PWMDWIDTH, period_ns / resolution);
++ mtk_pwm_writel(pc, pwm->hwpwm, PWMTHRES, duty_ns / resolution);
++ return 0;
++}
++
++static int mtk_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
++{
++ struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
++ u32 val;
++
++ val = ioread32(pc->mmio_base);
++ val |= BIT(pwm->hwpwm);
++ iowrite32(val, pc->mmio_base);
++
++ return 0;
++}
++
++static void mtk_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
++{
++ struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
++ u32 val;
++
++ val = ioread32(pc->mmio_base);
++ val &= ~BIT(pwm->hwpwm);
++ iowrite32(val, pc->mmio_base);
++}
++
++static const struct pwm_ops mtk_pwm_ops = {
++ .config = mtk_pwm_config,
++ .enable = mtk_pwm_enable,
++ .disable = mtk_pwm_disable,
++ .owner = THIS_MODULE,
++};
++
++static int mtk_pwm_probe(struct platform_device *pdev)
++{
++ struct mtk_pwm_chip *pc;
++ struct resource *r;
++ int ret;
++
++ pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
++ if (!pc)
++ return -ENOMEM;
++
++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
++ if (IS_ERR(pc->mmio_base))
++ return PTR_ERR(pc->mmio_base);
++
++ platform_set_drvdata(pdev, pc);
++
++ pc->chip.dev = &pdev->dev;
++ pc->chip.ops = &mtk_pwm_ops;
++ pc->chip.base = -1;
++ pc->chip.npwm = NUM_PWM;
++
++ ret = pwmchip_add(&pc->chip);
++ if (ret < 0)
++ dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
++
++ return ret;
++}
++
++static int mtk_pwm_remove(struct platform_device *pdev)
++{
++ struct mtk_pwm_chip *pc = platform_get_drvdata(pdev);
++ int i;
++
++ for (i = 0; i < NUM_PWM; i++)
++ pwm_disable(&pc->chip.pwms[i]);
++
++ return pwmchip_remove(&pc->chip);
++}
++
++static const struct of_device_id mtk_pwm_of_match[] = {
++ { .compatible = "mediatek,mt7628-pwm" },
++ { }
++};
++
++MODULE_DEVICE_TABLE(of, mtk_pwm_of_match);
++
++static struct platform_driver mtk_pwm_driver = {
++ .driver = {
++ .name = "mtk-pwm",
++ .owner = THIS_MODULE,
++ .of_match_table = mtk_pwm_of_match,
++ },
++ .probe = mtk_pwm_probe,
++ .remove = mtk_pwm_remove,
++};
++
++module_platform_driver(mtk_pwm_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
++MODULE_ALIAS("platform:mtk-pwm");
diff --git a/target/linux/ramips/patches-4.14/0053-mtd-spi-nor-add-w25q256-3b-mode-switch.patch b/target/linux/ramips/patches-4.14/0053-mtd-spi-nor-add-w25q256-3b-mode-switch.patch
new file mode 100644
index 0000000000..6ee0845ffe
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0053-mtd-spi-nor-add-w25q256-3b-mode-switch.patch
@@ -0,0 +1,214 @@
+mtd: spi-nor: add support for switching between 3-byte and 4-byte addressing on w25q256 flash
+
+On some devices the flash chip needs to be in 3-byte addressing mode during
+reboot, otherwise the boot loader will fail to start.
+This mode however does not allow regular reads/writes onto the upper 16M
+half. W25Q256 has separate read commands for reading from >16M, however
+it does not have any separate write commands.
+This patch changes the code to leave the chip in 3-byte mode most of the
+time and only switch during erase/write cycles that go to >16M
+addresses.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -89,6 +89,10 @@ struct flash_info {
+ #define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */
+ #define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
+ #define USE_CLSR BIT(14) /* use CLSR command */
++#define SPI_NOR_4B_READ_OP BIT(15) /*
++ * Like SPI_NOR_4B_OPCODES, but for read
++ * op code only.
++ */
+ };
+
+ #define JEDEC_MFR(info) ((info)->id[0])
+@@ -240,6 +244,15 @@ static inline u8 spi_nor_convert_3to4_er
+ ARRAY_SIZE(spi_nor_3to4_erase));
+ }
+
++static void spi_nor_set_4byte_read(struct spi_nor *nor,
++ const struct flash_info *info)
++{
++ nor->addr_width = 3;
++ nor->ext_addr = 0;
++ nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
++ nor->flags |= SNOR_F_4B_EXT_ADDR;
++}
++
+ static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
+ const struct flash_info *info)
+ {
+@@ -467,6 +480,36 @@ static int spi_nor_erase_sector(struct s
+ return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
+ }
+
++static int spi_nor_check_ext_addr(struct spi_nor *nor, u32 addr)
++{
++ bool ext_addr;
++ int ret;
++ u8 cmd;
++
++ if (!(nor->flags & SNOR_F_4B_EXT_ADDR))
++ return 0;
++
++ ext_addr = !!(addr & 0xff000000);
++ if (nor->ext_addr == ext_addr)
++ return 0;
++
++ cmd = ext_addr ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
++ write_enable(nor);
++ ret = nor->write_reg(nor, cmd, NULL, 0);
++ if (ret)
++ return ret;
++
++ cmd = 0;
++ ret = nor->write_reg(nor, SPINOR_OP_WREAR, &cmd, 1);
++ if (ret)
++ return ret;
++
++ nor->addr_width = 3 + ext_addr;
++ nor->ext_addr = ext_addr;
++ write_disable(nor);
++ return 0;
++}
++
+ /*
+ * Erase an address range on the nor chip. The address range may extend
+ * one or more erase sectors. Return an error is there is a problem erasing.
+@@ -492,6 +535,10 @@ static int spi_nor_erase(struct mtd_info
+ if (ret)
+ return ret;
+
++ ret = spi_nor_check_ext_addr(nor, addr + len);
++ if (ret)
++ return ret;
++
+ /* whole-chip erase? */
+ if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
+ unsigned long timeout;
+@@ -542,6 +589,7 @@ static int spi_nor_erase(struct mtd_info
+ write_disable(nor);
+
+ erase_err:
++ spi_nor_check_ext_addr(nor, 0);
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
+
+ instr->state = ret ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
+@@ -834,7 +882,9 @@ static int spi_nor_lock(struct mtd_info
+ if (ret)
+ return ret;
+
++ spi_nor_check_ext_addr(nor, ofs + len);
+ ret = nor->flash_lock(nor, ofs, len);
++ spi_nor_check_ext_addr(nor, 0);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
+ return ret;
+@@ -849,7 +899,9 @@ static int spi_nor_unlock(struct mtd_inf
+ if (ret)
+ return ret;
+
++ spi_nor_check_ext_addr(nor, ofs + len);
+ ret = nor->flash_unlock(nor, ofs, len);
++ spi_nor_check_ext_addr(nor, 0);
+
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+ return ret;
+@@ -1164,7 +1216,7 @@ static const struct flash_info spi_nor_i
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+- { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
++ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_READ_OP) },
+ { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
+
+@@ -1220,6 +1272,9 @@ static int spi_nor_read(struct mtd_info
+ if (ret)
+ return ret;
+
++ if (nor->flags & SNOR_F_4B_EXT_ADDR)
++ nor->addr_width = 4;
++
+ while (len) {
+ loff_t addr = from;
+
+@@ -1244,6 +1299,18 @@ static int spi_nor_read(struct mtd_info
+ ret = 0;
+
+ read_err:
++ if (nor->flags & SNOR_F_4B_EXT_ADDR) {
++ u8 val = 0;
++
++ if ((from + len) & 0xff000000) {
++ write_enable(nor);
++ nor->write_reg(nor, SPINOR_OP_WREAR, &val, 1);
++ write_disable(nor);
++ }
++
++ nor->addr_width = 3;
++ }
++
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
+ return ret;
+ }
+@@ -1345,6 +1412,10 @@ static int spi_nor_write(struct mtd_info
+ if (ret)
+ return ret;
+
++ ret = spi_nor_check_ext_addr(nor, to + len);
++ if (ret < 0)
++ return ret;
++
+ for (i = 0; i < len; ) {
+ ssize_t written;
+ loff_t addr = to + i;
+@@ -1392,6 +1463,7 @@ static int spi_nor_write(struct mtd_info
+ }
+
+ write_err:
++ spi_nor_check_ext_addr(nor, 0);
+ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+ return ret;
+ }
+@@ -2808,8 +2880,10 @@ int spi_nor_scan(struct spi_nor *nor, co
+ } else if (mtd->size > 0x1000000) {
+ /* enable 4-byte addressing if the device exceeds 16MiB */
+ nor->addr_width = 4;
+- if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
+- info->flags & SPI_NOR_4B_OPCODES)
++ if (info->flags & SPI_NOR_4B_READ_OP)
++ spi_nor_set_4byte_read(nor, info);
++ else if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
++ info->flags & SPI_NOR_4B_OPCODES)
+ spi_nor_set_4byte_opcodes(nor, info);
+ else
+ set_4byte(nor, info, 1);
+--- a/include/linux/mtd/spi-nor.h
++++ b/include/linux/mtd/spi-nor.h
+@@ -102,6 +102,7 @@
+ /* Used for Macronix and Winbond flashes. */
+ #define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
+ #define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */
++#define SPINOR_OP_WREAR 0xc5 /* Write extended address register */
+
+ /* Used for Spansion flashes only. */
+ #define SPINOR_OP_BRWR 0x17 /* Bank register write */
+@@ -229,6 +230,7 @@ enum spi_nor_option_flags {
+ SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
+ SNOR_F_READY_XSR_RDY = BIT(4),
+ SNOR_F_USE_CLSR = BIT(5),
++ SNOR_F_4B_EXT_ADDR = BIT(6),
+ };
+
+ /**
+@@ -280,6 +282,7 @@ struct spi_nor {
+ enum spi_nor_protocol reg_proto;
+ bool sst_write_second;
+ u32 flags;
++ u8 ext_addr;
+ u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
+
+ int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
diff --git a/target/linux/ramips/patches-4.14/0054-mtd-add-chunked-read-io-to-m25p80.patch b/target/linux/ramips/patches-4.14/0054-mtd-add-chunked-read-io-to-m25p80.patch
new file mode 100644
index 0000000000..b20f081afe
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0054-mtd-add-chunked-read-io-to-m25p80.patch
@@ -0,0 +1,124 @@
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1468,6 +1468,67 @@ write_err:
+ return ret;
+ }
+
++static int spi_nor_chunked_write(struct mtd_info *mtd, loff_t _to, size_t _len,
++ size_t *_retlen, const u_char *_buf)
++{
++ struct spi_nor *nor = mtd_to_spi_nor(mtd);
++ u32 addr_width = nor->addr_width + !!(nor->flags & SNOR_F_4B_EXT_ADDR);
++ int chunk_size;
++ int retlen = 0;
++ int ret;
++
++ chunk_size = nor->chunk_size;
++ if (!chunk_size)
++ chunk_size = _len;
++
++ if (addr_width > 3)
++ chunk_size -= addr_width - 3;
++
++ while (retlen < _len) {
++ size_t len = min_t(int, chunk_size, _len - retlen);
++ const u_char *buf = _buf + retlen;
++ loff_t to = _to + retlen;
++
++ if (nor->flags & SNOR_F_SST)
++ ret = sst_write(mtd, to, len, &retlen, buf);
++ else
++ ret = spi_nor_write(mtd, to, len, &retlen, buf);
++ if (ret)
++ return ret;
++ }
++
++ *_retlen += retlen;
++ return 0;
++}
++
++static int spi_nor_chunked_read(struct mtd_info *mtd, loff_t _from, size_t _len,
++ size_t *_retlen, u_char *_buf)
++{
++ struct spi_nor *nor = mtd_to_spi_nor(mtd);
++ int chunk_size;
++ int ret;
++
++ chunk_size = nor->chunk_size;
++ if (!chunk_size)
++ chunk_size = _len;
++
++ *_retlen = 0;
++ while (*_retlen < _len) {
++ size_t len = min_t(int, chunk_size, _len - *_retlen);
++ u_char *buf = _buf + *_retlen;
++ loff_t from = _from + *_retlen;
++ int retlen = 0;
++
++ ret = spi_nor_read(mtd, from, len, &retlen, buf);
++ if (ret)
++ return ret;
++
++ *_retlen += retlen;
++ }
++
++ return 0;
++}
++
+ /**
+ * macronix_quad_enable() - set QE bit in Status Register.
+ * @nor: pointer to a 'struct spi_nor'
+@@ -2826,10 +2887,12 @@ int spi_nor_scan(struct spi_nor *nor, co
+ }
+
+ /* sst nor chips use AAI word program */
+- if (info->flags & SST_WRITE)
++ if (info->flags & SST_WRITE) {
+ mtd->_write = sst_write;
+- else
++ nor->flags |= SNOR_F_SST;
++ } else {
+ mtd->_write = spi_nor_write;
++ }
+
+ if (info->flags & USE_FSR)
+ nor->flags |= SNOR_F_USE_FSR;
+@@ -2848,11 +2911,20 @@ int spi_nor_scan(struct spi_nor *nor, co
+ mtd->writebufsize = nor->page_size;
+
+ if (np) {
++ u32 val;
++
+ /* If we were instantiated by DT, use it */
+ if (of_property_read_bool(np, "m25p,fast-read"))
+ params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+ else
+ params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
++
++ if (!of_property_read_u32(np, "m25p,chunked-io", &val)) {
++ dev_info(dev, "using chunked io (size=%d)\n", val);
++ mtd->_read = spi_nor_chunked_read;
++ mtd->_write = spi_nor_chunked_write;
++ nor->chunk_size = val;
++ }
+ } else {
+ /* If we weren't instantiated by DT, default to fast-read */
+ params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+--- a/include/linux/mtd/spi-nor.h
++++ b/include/linux/mtd/spi-nor.h
+@@ -231,6 +231,7 @@ enum spi_nor_option_flags {
+ SNOR_F_READY_XSR_RDY = BIT(4),
+ SNOR_F_USE_CLSR = BIT(5),
+ SNOR_F_4B_EXT_ADDR = BIT(6),
++ SNOR_F_SST = BIT(7),
+ };
+
+ /**
+@@ -272,6 +273,7 @@ struct spi_nor {
+ struct mutex lock;
+ struct device *dev;
+ u32 page_size;
++ size_t chunk_size;
+ u8 addr_width;
+ u8 erase_opcode;
+ u8 read_opcode;
diff --git a/target/linux/ramips/patches-4.14/0069-awake-rt305x-dwc2-controller.patch b/target/linux/ramips/patches-4.14/0069-awake-rt305x-dwc2-controller.patch
new file mode 100644
index 0000000000..0e09e1d4e0
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0069-awake-rt305x-dwc2-controller.patch
@@ -0,0 +1,15 @@
+--- a/drivers/usb/dwc2/platform.c
++++ b/drivers/usb/dwc2/platform.c
+@@ -406,6 +406,12 @@ static int dwc2_driver_probe(struct plat
+ if (retval)
+ return retval;
+
++ /* Enable USB port before any regs access */
++ if (dwc2_readl(hsotg->regs + PCGCTL) & 0x0f) {
++ dwc2_writel(0x00, hsotg->regs + PCGCTL);
++ /* TODO: mdelay(25) here? vendor driver don't use it */
++ }
++
+ retval = dwc2_get_dr_mode(hsotg);
+ if (retval)
+ goto error;
diff --git a/target/linux/ramips/patches-4.14/0070-weak_reordering.patch b/target/linux/ramips/patches-4.14/0070-weak_reordering.patch
new file mode 100644
index 0000000000..8f8c42c47f
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0070-weak_reordering.patch
@@ -0,0 +1,10 @@
+--- a/arch/mips/ralink/Kconfig
++++ b/arch/mips/ralink/Kconfig
+@@ -57,6 +57,7 @@ choice
+ select COMMON_CLK
+ select CLKSRC_MIPS_GIC
+ select HW_HAS_PCI
++ select WEAK_REORDERING_BEYOND_LLSC
+ endchoice
+
+ choice
diff --git a/target/linux/ramips/patches-4.14/0090-ethernet.patch b/target/linux/ramips/patches-4.14/0090-ethernet.patch
new file mode 100644
index 0000000000..20f69fcc01
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0090-ethernet.patch
@@ -0,0 +1,41 @@
+From b6f779ea9c329451b89404583b45b9eb00155b32 Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Wed, 18 Nov 2015 03:58:26 +0100
+Subject: [PATCH 510/513] net-next: mediatek: add Kconfig and Makefile
+
+This patch adds the Makefile and Kconfig required to make the driver build.
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Michael Lee <igvtee@gmail.com>
+---
+ drivers/net/ethernet/Kconfig | 1 +
+ drivers/net/ethernet/Makefile | 1 +
+ drivers/net/ethernet/mtk/Kconfig | 62 ++++++++++++++++++++++++++++++++
+ drivers/net/ethernet/mtk/Makefile | 20 +++++++++++
+ 4 files changed, 84 insertions(+)
+ create mode 100644 drivers/net/ethernet/mtk/Kconfig
+ create mode 100644 drivers/net/ethernet/mtk/Makefile
+
+--- a/drivers/net/ethernet/Kconfig
++++ b/drivers/net/ethernet/Kconfig
+@@ -110,7 +110,7 @@ config LANTIQ_ETOP
+ Support for the MII0 inside the Lantiq SoC
+
+ source "drivers/net/ethernet/marvell/Kconfig"
+-source "drivers/net/ethernet/mediatek/Kconfig"
++source "drivers/net/ethernet/mtk/Kconfig"
+ source "drivers/net/ethernet/mellanox/Kconfig"
+ source "drivers/net/ethernet/micrel/Kconfig"
+ source "drivers/net/ethernet/microchip/Kconfig"
+--- a/drivers/net/ethernet/Makefile
++++ b/drivers/net/ethernet/Makefile
+@@ -51,7 +51,7 @@ obj-$(CONFIG_JME) += jme.o
+ obj-$(CONFIG_KORINA) += korina.o
+ obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
+ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
+-obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/
++obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mtk/
+ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
+ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
+ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
diff --git a/target/linux/ramips/patches-4.14/0098-disable_cm.patch b/target/linux/ramips/patches-4.14/0098-disable_cm.patch
new file mode 100644
index 0000000000..bc00619dff
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0098-disable_cm.patch
@@ -0,0 +1,19 @@
+--- a/arch/mips/kernel/mips-cm.c
++++ b/arch/mips/kernel/mips-cm.c
+@@ -237,6 +237,7 @@ int mips_cm_probe(void)
+
+ /* disable CM regions */
+ write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR);
++ /*
+ write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK);
+ write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK);
+@@ -244,7 +245,7 @@ int mips_cm_probe(void)
+ write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK);
+ write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK);
+-
++*/
+ /* probe for an L2-only sync region */
+ mips_cm_probe_l2sync();
+
diff --git a/target/linux/ramips/patches-4.14/0099-pci-mt7620.patch b/target/linux/ramips/patches-4.14/0099-pci-mt7620.patch
new file mode 100644
index 0000000000..997fb6a2b3
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0099-pci-mt7620.patch
@@ -0,0 +1,10 @@
+--- a/arch/mips/pci/pci-mt7620.c
++++ b/arch/mips/pci/pci-mt7620.c
+@@ -33,7 +33,6 @@
+ #define RALINK_GPIOMODE 0x60
+
+ #define PPLL_CFG1 0x9c
+-#define PDRV_SW_SET BIT(23)
+
+ #define PPLL_DRV 0xa0
+ #define PDRV_SW_SET (1<<31)
diff --git a/target/linux/ramips/patches-4.14/0100-prom_fixes.patch b/target/linux/ramips/patches-4.14/0100-prom_fixes.patch
new file mode 100644
index 0000000000..91ac3b22c4
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0100-prom_fixes.patch
@@ -0,0 +1,66 @@
+From 67b7bff0fd364c194e653f69baa623ba2141bd4c Mon Sep 17 00:00:00 2001
+From: John Crispin <blogic@openwrt.org>
+Date: Mon, 4 Aug 2014 18:46:02 +0200
+Subject: [PATCH 07/53] MIPS: ralink: copy the commandline from the devicetree
+
+Signed-off-by: John Crispin <blogic@openwrt.org>
+---
+ arch/mips/ralink/of.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/mips/ralink/of.c
++++ b/arch/mips/ralink/of.c
+@@ -3,7 +3,7 @@
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
++ * Copyright (C) 2008-2014 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
+ */
+@@ -66,6 +66,19 @@ static int __init early_init_dt_find_mem
+ return 0;
+ }
+
++static int chosen_dtb;
++
++static int __init early_init_dt_find_chosen(unsigned long node, const char *uname,
++ int depth, void *data)
++{
++ if (depth == 1 && !strcmp(uname, "chosen"))
++ chosen_dtb = 1;
++
++ return 0;
++}
++
++extern struct boot_param_header __image_dtb;
++
+ void __init plat_mem_setup(void)
+ {
+ void *dtb = NULL;
+@@ -82,7 +95,11 @@ void __init plat_mem_setup(void)
+ else if (__dtb_start != __dtb_end)
+ dtb = (void *)__dtb_start;
+
+- __dt_setup_arch(dtb);
++ __dt_setup_arch(&__image_dtb);
++
++ of_scan_flat_dt(early_init_dt_find_chosen, NULL);
++ if (chosen_dtb)
++ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+
+ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+
+--- a/arch/mips/kernel/head.S
++++ b/arch/mips/kernel/head.S
+@@ -85,6 +85,9 @@ EXPORT(__image_cmdline)
+ .fill 0x400
+ #endif /* CONFIG_IMAGE_CMDLINE_HACK */
+
++ .ascii "OWRTDTB:"
++ EXPORT(__image_dtb)
++ .fill 0x4000
+ __REF
+
+ NESTED(kernel_entry, 16, sp) # kernel entry point
diff --git a/target/linux/ramips/patches-4.14/0200-linkit_bootstrap.patch b/target/linux/ramips/patches-4.14/0200-linkit_bootstrap.patch
new file mode 100644
index 0000000000..865ae79f43
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/0200-linkit_bootstrap.patch
@@ -0,0 +1,97 @@
+--- a/drivers/misc/Makefile
++++ b/drivers/misc/Makefile
+@@ -57,6 +57,7 @@ obj-$(CONFIG_CXL_BASE) += cxl/
+ obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
+ obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
+ obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o
++obj-$(CONFIG_SOC_MT7620) += linkit.o
+
+ lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
+ lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
+--- /dev/null
++++ b/drivers/misc/linkit.c
+@@ -0,0 +1,84 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * publishhed by the Free Software Foundation.
++ *
++ * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
++ */
++
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/of.h>
++#include <linux/mtd/mtd.h>
++#include <linux/gpio.h>
++
++#define LINKIT_LATCH_GPIO 11
++
++struct linkit_hw_data {
++ char board[16];
++ char rev[16];
++};
++
++static void sanify_string(char *s)
++{
++ int i;
++
++ for (i = 0; i < 15; i++)
++ if (s[i] <= 0x20)
++ s[i] = '\0';
++ s[15] = '\0';
++}
++
++static int linkit_probe(struct platform_device *pdev)
++{
++ struct linkit_hw_data hw;
++ struct mtd_info *mtd;
++ size_t retlen;
++ int ret;
++
++ mtd = get_mtd_device_nm("factory");
++ if (IS_ERR(mtd))
++ return PTR_ERR(mtd);
++
++ ret = mtd_read(mtd, 0x400, sizeof(hw), &retlen, (u_char *) &hw);
++ put_mtd_device(mtd);
++
++ sanify_string(hw.board);
++ sanify_string(hw.rev);
++
++ dev_info(&pdev->dev, "Version : %s\n", hw.board);
++ dev_info(&pdev->dev, "Revision : %s\n", hw.rev);
++
++ if (!strcmp(hw.board, "LINKITS7688")) {
++ dev_info(&pdev->dev, "setting up bootstrap latch\n");
++
++ if (devm_gpio_request(&pdev->dev, LINKIT_LATCH_GPIO, "bootstrap")) {
++ dev_err(&pdev->dev, "failed to setup bootstrap gpio\n");
++ return -1;
++ }
++ gpio_direction_output(LINKIT_LATCH_GPIO, 0);
++ }
++
++ return 0;
++}
++
++static const struct of_device_id linkit_match[] = {
++ { .compatible = "mediatek,linkit" },
++ {},
++};
++MODULE_DEVICE_TABLE(of, linkit_match);
++
++static struct platform_driver linkit_driver = {
++ .probe = linkit_probe,
++ .driver = {
++ .name = "mtk-linkit",
++ .owner = THIS_MODULE,
++ .of_match_table = linkit_match,
++ },
++};
++
++int __init linkit_init(void)
++{
++ return platform_driver_register(&linkit_driver);
++}
++late_initcall_sync(linkit_init);
diff --git a/target/linux/ramips/patches-4.14/100-mt7621-core-detect-hack.patch b/target/linux/ramips/patches-4.14/100-mt7621-core-detect-hack.patch
new file mode 100644
index 0000000000..991e19b6df
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/100-mt7621-core-detect-hack.patch
@@ -0,0 +1,61 @@
+There is a variant of MT7621 which contains only one CPU core instead of 2.
+This is not reflected in the config register, so the kernel detects more
+physical cores, which leads to a hang on SMP bringup.
+Add a hack to detect missing cores.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+
+--- a/arch/mips/kernel/smp-cps.c
++++ b/arch/mips/kernel/smp-cps.c
+@@ -47,6 +47,11 @@ static unsigned core_vpe_count(unsigned
+ return mips_cps_numvps(cluster, core);
+ }
+
++bool __weak plat_cpu_core_present(int core)
++{
++ return true;
++}
++
+ static void __init cps_smp_setup(void)
+ {
+ unsigned int nclusters, ncores, nvpes, core_vpes;
+@@ -64,6 +69,8 @@ static void __init cps_smp_setup(void)
+
+ ncores = mips_cps_numcores(cl);
+ for (c = 0; c < ncores; c++) {
++ if (!plat_cpu_core_present(c))
++ continue;
+ core_vpes = core_vpe_count(cl, c);
+
+ if (c > 0)
+--- a/arch/mips/ralink/mt7621.c
++++ b/arch/mips/ralink/mt7621.c
+@@ -15,6 +15,7 @@
+ #include <asm/mips-cps.h>
+ #include <asm/mach-ralink/ralink_regs.h>
+ #include <asm/mach-ralink/mt7621.h>
++#include <asm/mips-boards/launch.h>
+
+ #include <pinmux.h>
+
+@@ -162,6 +163,20 @@ void __init ralink_of_remap(void)
+ panic("Failed to remap core resources");
+ }
+
++bool plat_cpu_core_present(int core)
++{
++ struct cpulaunch *launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH);
++
++ if (!core)
++ return true;
++ launch += core * 2; /* 2 VPEs per core */
++ if (!(launch->flags & LAUNCH_FREADY))
++ return false;
++ if (launch->flags & (LAUNCH_FGO | LAUNCH_FGONE))
++ return false;
++ return true;
++}
++
+ void prom_soc_init(struct ralink_soc_info *soc_info)
+ {
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE);
diff --git a/target/linux/ramips/patches-4.14/101-mt7621-timer.patch b/target/linux/ramips/patches-4.14/101-mt7621-timer.patch
new file mode 100644
index 0000000000..ea8be56942
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/101-mt7621-timer.patch
@@ -0,0 +1,98 @@
+--- a/arch/mips/ralink/mt7621.c
++++ b/arch/mips/ralink/mt7621.c
+@@ -16,6 +16,7 @@
+ #include <asm/mach-ralink/ralink_regs.h>
+ #include <asm/mach-ralink/mt7621.h>
+ #include <asm/mips-boards/launch.h>
++#include <asm/delay.h>
+
+ #include <pinmux.h>
+
+@@ -177,6 +178,58 @@ bool plat_cpu_core_present(int core)
+ return true;
+ }
+
++#define LPS_PREC 8
++/*
++* Re-calibration lpj(loop-per-jiffy).
++* (derived from kernel/calibrate.c)
++*/
++static int udelay_recal(void)
++{
++ unsigned int i, lpj = 0;
++ unsigned long ticks, loopbit;
++ int lps_precision = LPS_PREC;
++
++ lpj = (1<<12);
++
++ while ((lpj <<= 1) != 0) {
++ /* wait for "start of" clock tick */
++ ticks = jiffies;
++ while (ticks == jiffies)
++ /* nothing */;
++
++ /* Go .. */
++ ticks = jiffies;
++ __delay(lpj);
++ ticks = jiffies - ticks;
++ if (ticks)
++ break;
++ }
++
++ /*
++ * Do a binary approximation to get lpj set to
++ * equal one clock (up to lps_precision bits)
++ */
++ lpj >>= 1;
++ loopbit = lpj;
++ while (lps_precision-- && (loopbit >>= 1)) {
++ lpj |= loopbit;
++ ticks = jiffies;
++ while (ticks == jiffies)
++ /* nothing */;
++ ticks = jiffies;
++ __delay(lpj);
++ if (jiffies != ticks) /* longer than 1 tick */
++ lpj &= ~loopbit;
++ }
++ printk(KERN_INFO "%d CPUs re-calibrate udelay(lpj = %d)\n", NR_CPUS, lpj);
++
++ for(i=0; i< NR_CPUS; i++)
++ cpu_data[i].udelay_val = lpj;
++
++ return 0;
++}
++device_initcall(udelay_recal);
++
+ void prom_soc_init(struct ralink_soc_info *soc_info)
+ {
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE);
+--- a/arch/mips/ralink/Kconfig
++++ b/arch/mips/ralink/Kconfig
+@@ -58,6 +58,7 @@ choice
+ select CLKSRC_MIPS_GIC
+ select HW_HAS_PCI
+ select WEAK_REORDERING_BEYOND_LLSC
++ select GENERIC_CLOCKEVENTS_BROADCAST
+ endchoice
+
+ choice
+--- a/arch/mips/ralink/timer-gic.c
++++ b/arch/mips/ralink/timer-gic.c
+@@ -12,6 +12,7 @@
+ #include <linux/of.h>
+ #include <linux/clk-provider.h>
+ #include <linux/clocksource.h>
++#include <asm/time.h>
+
+ #include "common.h"
+
+@@ -19,6 +20,8 @@ void __init plat_time_init(void)
+ {
+ ralink_of_remap();
+
++ mips_hpt_frequency = 880000000 / 2;
++
+ of_clk_init(NULL);
+ timer_probe();
+ }
diff --git a/target/linux/ramips/patches-4.14/302-spi-nor-add-gd25q512.patch b/target/linux/ramips/patches-4.14/302-spi-nor-add-gd25q512.patch
new file mode 100644
index 0000000000..408fc13e7a
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/302-spi-nor-add-gd25q512.patch
@@ -0,0 +1,14 @@
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1050,6 +1050,11 @@ static const struct flash_info spi_nor_i
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
++ {
++ "gd25q512", INFO(0xc84020, 0, 64 * 1024, 1024,
++ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
++ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4B_OPCODES)
++ },
+
+ /* Intel/Numonyx -- xxxs33b */
+ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
diff --git a/target/linux/ramips/patches-4.14/303-spi-nor-enable-4B-opcodes-for-mx66l51235l.patch b/target/linux/ramips/patches-4.14/303-spi-nor-enable-4B-opcodes-for-mx66l51235l.patch
new file mode 100644
index 0000000000..7b13e68d8f
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/303-spi-nor-enable-4B-opcodes-for-mx66l51235l.patch
@@ -0,0 +1,64 @@
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1083,7 +1083,7 @@ static const struct flash_info spi_nor_i
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
+ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
++ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+ { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
+@@ -1947,7 +1947,11 @@ static int spi_nor_read_sfdp(struct spi_
+ nor->read_dummy = 8;
+
+ while (len) {
+- ret = nor->read(nor, addr, len, (u8 *)buf);
++ size_t _len = len;
++
++ if (nor->chunk_size)
++ _len = min(len, nor->chunk_size);
++ ret = nor->read(nor, addr, _len, (u8 *)buf);
+ if (!ret || ret > len) {
+ ret = -EIO;
+ goto read_err;
+@@ -2791,6 +2795,7 @@ int spi_nor_scan(struct spi_nor *nor, co
+ struct device *dev = nor->dev;
+ struct mtd_info *mtd = &nor->mtd;
+ struct device_node *np = spi_nor_get_flash_node(nor);
++ u32 val;
+ int ret;
+ int i;
+
+@@ -2803,6 +2808,9 @@ int spi_nor_scan(struct spi_nor *nor, co
+ nor->read_proto = SNOR_PROTO_1_1_1;
+ nor->write_proto = SNOR_PROTO_1_1_1;
+
++ if (np && !of_property_read_u32(np, "m25p,chunked-io", &val))
++ nor->chunk_size = val;
++
+ if (name)
+ info = spi_nor_match_id(name);
+ /* Try to auto-detect if chip name wasn't specified or not found */
+@@ -2916,19 +2924,16 @@ int spi_nor_scan(struct spi_nor *nor, co
+ mtd->writebufsize = nor->page_size;
+
+ if (np) {
+- u32 val;
+-
+ /* If we were instantiated by DT, use it */
+ if (of_property_read_bool(np, "m25p,fast-read"))
+ params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+ else
+ params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+
+- if (!of_property_read_u32(np, "m25p,chunked-io", &val)) {
+- dev_info(dev, "using chunked io (size=%d)\n", val);
++ if (nor->chunk_size) {
++ dev_info(dev, "using chunked io (size=%d)\n", nor->chunk_size);
+ mtd->_read = spi_nor_chunked_read;
+ mtd->_write = spi_nor_chunked_write;
+- nor->chunk_size = val;
+ }
+ } else {
+ /* If we weren't instantiated by DT, default to fast-read */
diff --git a/target/linux/ramips/patches-4.14/998-mt7621-needs-jiffies.patch b/target/linux/ramips/patches-4.14/998-mt7621-needs-jiffies.patch
new file mode 100644
index 0000000000..906f68adb5
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/998-mt7621-needs-jiffies.patch
@@ -0,0 +1,10 @@
+--- a/arch/mips/ralink/mt7621.c
++++ b/arch/mips/ralink/mt7621.c
+@@ -9,6 +9,7 @@
+
+ #include <linux/kernel.h>
+ #include <linux/init.h>
++#include <linux/jiffies.h>
+
+ #include <asm/mipsregs.h>
+ #include <asm/smp-ops.h>
diff --git a/target/linux/ramips/patches-4.14/999-fix-pci-init-mt7620.patch b/target/linux/ramips/patches-4.14/999-fix-pci-init-mt7620.patch
new file mode 100644
index 0000000000..3310a6bdba
--- /dev/null
+++ b/target/linux/ramips/patches-4.14/999-fix-pci-init-mt7620.patch
@@ -0,0 +1,21 @@
+--- a/arch/mips/pci/pci-mt7620.c
++++ b/arch/mips/pci/pci-mt7620.c
+@@ -35,6 +35,7 @@
+ #define PPLL_CFG1 0x9c
+
+ #define PPLL_DRV 0xa0
++#define PPLL_LD (1<<23)
+ #define PDRV_SW_SET (1<<31)
+ #define LC_CKDRVPD (1<<19)
+ #define LC_CKDRVOHZ (1<<18)
+@@ -242,8 +243,8 @@ static int mt7620_pci_hw_init(struct pla
+ rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
+ mdelay(100);
+
+- if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) {
+- dev_err(&pdev->dev, "MT7620 PPLL unlock\n");
++ if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) {
++ dev_err(&pdev->dev, "MT7620 PPLL is unlocked, aborting init\n");
+ reset_control_assert(rstpcie0);
+ rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
+ return -1;