diff options
Diffstat (limited to 'target')
77 files changed, 27451 insertions, 24 deletions
diff --git a/target/linux/generic/backport-4.14/080-v4.15-0001-arch-define-weak-abort.patch b/target/linux/generic/backport-4.14/080-v4.15-0001-arch-define-weak-abort.patch index 2d247a39e4..1867234540 100644 --- a/target/linux/generic/backport-4.14/080-v4.15-0001-arch-define-weak-abort.patch +++ b/target/linux/generic/backport-4.14/080-v4.15-0001-arch-define-weak-abort.patch @@ -30,11 +30,9 @@ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> kernel/exit.c | 8 ++++++++ 1 file changed, 8 insertions(+) -diff --git a/kernel/exit.c b/kernel/exit.c -index 6b4298a41167..df0c91d5606c 100644 --- a/kernel/exit.c +++ b/kernel/exit.c -@@ -1755,3 +1755,11 @@ COMPAT_SYSCALL_DEFINE5(waitid, +@@ -1755,3 +1755,11 @@ Efault: return -EFAULT; } #endif @@ -46,6 +44,3 @@ index 6b4298a41167..df0c91d5606c 100644 + /* if that doesn't kill us, halt */ + panic("Oops failed to kill thread"); +} --- -2.11.0 - diff --git a/target/linux/generic/backport-4.14/081-v4.15-0002-kernel-exit.c-export-abort-to-modules.patch b/target/linux/generic/backport-4.14/081-v4.15-0002-kernel-exit.c-export-abort-to-modules.patch index 5b95b6798d..4c1402f153 100644 --- a/target/linux/generic/backport-4.14/081-v4.15-0002-kernel-exit.c-export-abort-to-modules.patch +++ b/target/linux/generic/backport-4.14/081-v4.15-0002-kernel-exit.c-export-abort-to-modules.patch @@ -24,11 +24,9 @@ Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> kernel/exit.c | 1 + 4 files changed, 1 insertion(+), 3 deletions(-) -diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c -index 5cf04888c581..3e26c6f7a191 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c -@@ -793,7 +793,6 @@ void abort(void) +@@ -790,7 +790,6 @@ void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } @@ -36,8 +34,6 @@ index 5cf04888c581..3e26c6f7a191 100644 void __init trap_init(void) { -diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c -index cb79fba79d43..b88a8dd14933 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -122,7 +122,6 @@ void abort(void) @@ -48,8 +44,6 @@ index cb79fba79d43..b88a8dd14933 100644 void __init trap_init(void) { -diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c -index 5f25b39f04d4..c4ac6043ebb0 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -298,7 +298,6 @@ void abort(void) @@ -60,8 +54,6 @@ index 5f25b39f04d4..c4ac6043ebb0 100644 void __init trap_init(void) { -diff --git a/kernel/exit.c b/kernel/exit.c -index df0c91d5606c..995453d9fb55 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1763,3 +1763,4 @@ __weak void abort(void) @@ -69,6 +61,3 @@ index df0c91d5606c..995453d9fb55 100644 panic("Oops failed to kill thread"); } +EXPORT_SYMBOL(abort); --- -2.11.0 - diff --git a/target/linux/generic/hack-4.14/204-module_strip.patch b/target/linux/generic/hack-4.14/204-module_strip.patch index 2cbcf0f72f..f450d6c070 100644 --- a/target/linux/generic/hack-4.14/204-module_strip.patch +++ b/target/linux/generic/hack-4.14/204-module_strip.patch @@ -133,8 +133,8 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name> } +#endif - check_modinfo_retpoline(mod, info); - + check_modinfo_retpoline(mod, info); + --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -1982,7 +1982,9 @@ static void read_symbols(char *modname) @@ -172,12 +172,12 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name> static void add_retpoline(struct buffer *b) { +#ifndef CONFIG_MODULE_STRIPPED - buf_printf(b, "\n#ifdef RETPOLINE\n"); - buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n"); - buf_printf(b, "#endif\n"); + buf_printf(b, "\n#ifdef RETPOLINE\n"); + buf_printf(b, "MODULE_INFO(retpoline, \"Y\");\n"); + buf_printf(b, "#endif\n"); +#endif } - + static void add_staging_flag(struct buffer *b, const char *name) { +#ifndef CONFIG_MODULE_STRIPPED diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/Kconfig b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/Kconfig new file mode 100644 index 0000000000..cf883c4160 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/Kconfig @@ -0,0 +1,62 @@ +config NET_VENDOR_MEDIATEK + tristate "Mediatek/Ralink ethernet driver" + depends on RALINK + help + This driver supports the ethernet mac inside the Mediatek and Ralink WiSoCs + +config NET_MEDIATEK_SOC + def_tristate NET_VENDOR_MEDIATEK + +if NET_MEDIATEK_SOC +choice + prompt "MAC type" + +config NET_MEDIATEK_RT2880 + bool "RT2882" + depends on MIPS && SOC_RT288X + +config NET_MEDIATEK_RT3050 + bool "RT3050/MT7628" + depends on MIPS && (SOC_RT305X || SOC_MT7620) + +config NET_MEDIATEK_RT3883 + bool "RT3883" + depends on MIPS && SOC_RT3883 + +config NET_MEDIATEK_MT7620 + bool "MT7620" + depends on MIPS && SOC_MT7620 + +config NET_MEDIATEK_MT7621 + bool "MT7621" + depends on MIPS && SOC_MT7621 + +endchoice + +config NET_MEDIATEK_MDIO + def_bool NET_MEDIATEK_SOC + depends on (NET_MEDIATEK_RT2880 || NET_MEDIATEK_RT3883 || NET_MEDIATEK_MT7620 || NET_MEDIATEK_MT7621) + select PHYLIB + +config NET_MEDIATEK_MDIO_RT2880 + def_bool NET_MEDIATEK_SOC + depends on (NET_MEDIATEK_RT2880 || NET_MEDIATEK_RT3883) + select NET_MEDIATEK_MDIO + +config NET_MEDIATEK_MDIO_MT7620 + def_bool NET_MEDIATEK_SOC + depends on (NET_MEDIATEK_MT7620 || NET_MEDIATEK_MT7621) + select NET_MEDIATEK_MDIO + +config NET_MEDIATEK_ESW_RT3050 + def_tristate NET_MEDIATEK_SOC + depends on NET_MEDIATEK_RT3050 + +config NET_MEDIATEK_GSW_MT7620 + def_tristate NET_MEDIATEK_SOC + depends on NET_MEDIATEK_MT7620 + +config NET_MEDIATEK_GSW_MT7621 + def_tristate NET_MEDIATEK_SOC + depends on NET_MEDIATEK_MT7621 +endif diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/Makefile b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/Makefile new file mode 100644 index 0000000000..07ba4c2ecf --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/Makefile @@ -0,0 +1,20 @@ +# +# Makefile for the Ralink SoCs built-in ethernet macs +# + +mtk-eth-soc-y += mtk_eth_soc.o ethtool.o + +mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO) += mdio.o +mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_RT2880) += mdio_rt2880.o +mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_MT7620) += mdio_mt7620.o + +mtk-eth-soc-$(CONFIG_NET_MEDIATEK_RT2880) += soc_rt2880.o +mtk-eth-soc-$(CONFIG_NET_MEDIATEK_RT3050) += soc_rt3050.o +mtk-eth-soc-$(CONFIG_NET_MEDIATEK_RT3883) += soc_rt3883.o +mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7620) += soc_mt7620.o +mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7621) += soc_mt7621.o + +obj-$(CONFIG_NET_MEDIATEK_ESW_RT3050) += esw_rt3050.o +obj-$(CONFIG_NET_MEDIATEK_GSW_MT7620) += gsw_mt7620.o mt7530.o +obj-$(CONFIG_NET_MEDIATEK_GSW_MT7621) += gsw_mt7621.o mt7530.o +obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk-eth-soc.o diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/esw_rt3050.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/esw_rt3050.c new file mode 100644 index 0000000000..6cad5856cd --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/esw_rt3050.c @@ -0,0 +1,1461 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + * Copyright (C) 2016 Vittorio Gambaletta <openwrt@vittgam.net> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <asm/mach-ralink/ralink_regs.h> +#include <linux/of_irq.h> + +#include <linux/switch.h> + +#include "mtk_eth_soc.h" + +/* HW limitations for this switch: + * - No large frame support (PKT_MAX_LEN at most 1536) + * - Can't have untagged vlan and tagged vlan on one port at the same time, + * though this might be possible using the undocumented PPE. + */ + +#define RT305X_ESW_REG_ISR 0x00 +#define RT305X_ESW_REG_IMR 0x04 +#define RT305X_ESW_REG_FCT0 0x08 +#define RT305X_ESW_REG_PFC1 0x14 +#define RT305X_ESW_REG_ATS 0x24 +#define RT305X_ESW_REG_ATS0 0x28 +#define RT305X_ESW_REG_ATS1 0x2c +#define RT305X_ESW_REG_ATS2 0x30 +#define RT305X_ESW_REG_PVIDC(_n) (0x40 + 4 * (_n)) +#define RT305X_ESW_REG_VLANI(_n) (0x50 + 4 * (_n)) +#define RT305X_ESW_REG_VMSC(_n) (0x70 + 4 * (_n)) +#define RT305X_ESW_REG_POA 0x80 +#define RT305X_ESW_REG_FPA 0x84 +#define RT305X_ESW_REG_SOCPC 0x8c +#define RT305X_ESW_REG_POC0 0x90 +#define RT305X_ESW_REG_POC1 0x94 +#define RT305X_ESW_REG_POC2 0x98 +#define RT305X_ESW_REG_SGC 0x9c +#define RT305X_ESW_REG_STRT 0xa0 +#define RT305X_ESW_REG_PCR0 0xc0 +#define RT305X_ESW_REG_PCR1 0xc4 +#define RT305X_ESW_REG_FPA2 0xc8 +#define RT305X_ESW_REG_FCT2 0xcc +#define RT305X_ESW_REG_SGC2 0xe4 +#define RT305X_ESW_REG_P0LED 0xa4 +#define RT305X_ESW_REG_P1LED 0xa8 +#define RT305X_ESW_REG_P2LED 0xac +#define RT305X_ESW_REG_P3LED 0xb0 +#define RT305X_ESW_REG_P4LED 0xb4 +#define RT305X_ESW_REG_PXPC(_x) (0xe8 + (4 * _x)) +#define RT305X_ESW_REG_P1PC 0xec +#define RT305X_ESW_REG_P2PC 0xf0 +#define RT305X_ESW_REG_P3PC 0xf4 +#define RT305X_ESW_REG_P4PC 0xf8 +#define RT305X_ESW_REG_P5PC 0xfc + +#define RT305X_ESW_LED_LINK 0 +#define RT305X_ESW_LED_100M 1 +#define RT305X_ESW_LED_DUPLEX 2 +#define RT305X_ESW_LED_ACTIVITY 3 +#define RT305X_ESW_LED_COLLISION 4 +#define RT305X_ESW_LED_LINKACT 5 +#define RT305X_ESW_LED_DUPLCOLL 6 +#define RT305X_ESW_LED_10MACT 7 +#define RT305X_ESW_LED_100MACT 8 +/* Additional led states not in datasheet: */ +#define RT305X_ESW_LED_BLINK 10 +#define RT305X_ESW_LED_ON 12 + +#define RT305X_ESW_LINK_S 25 +#define RT305X_ESW_DUPLEX_S 9 +#define RT305X_ESW_SPD_S 0 + +#define RT305X_ESW_PCR0_WT_NWAY_DATA_S 16 +#define RT305X_ESW_PCR0_WT_PHY_CMD BIT(13) +#define RT305X_ESW_PCR0_CPU_PHY_REG_S 8 + +#define RT305X_ESW_PCR1_WT_DONE BIT(0) + +#define RT305X_ESW_ATS_TIMEOUT (5 * HZ) +#define RT305X_ESW_PHY_TIMEOUT (5 * HZ) + +#define RT305X_ESW_PVIDC_PVID_M 0xfff +#define RT305X_ESW_PVIDC_PVID_S 12 + +#define RT305X_ESW_VLANI_VID_M 0xfff +#define RT305X_ESW_VLANI_VID_S 12 + +#define RT305X_ESW_VMSC_MSC_M 0xff +#define RT305X_ESW_VMSC_MSC_S 8 + +#define RT305X_ESW_SOCPC_DISUN2CPU_S 0 +#define RT305X_ESW_SOCPC_DISMC2CPU_S 8 +#define RT305X_ESW_SOCPC_DISBC2CPU_S 16 +#define RT305X_ESW_SOCPC_CRC_PADDING BIT(25) + +#define RT305X_ESW_POC0_EN_BP_S 0 +#define RT305X_ESW_POC0_EN_FC_S 8 +#define RT305X_ESW_POC0_DIS_RMC2CPU_S 16 +#define RT305X_ESW_POC0_DIS_PORT_M 0x7f +#define RT305X_ESW_POC0_DIS_PORT_S 23 + +#define RT305X_ESW_POC2_UNTAG_EN_M 0xff +#define RT305X_ESW_POC2_UNTAG_EN_S 0 +#define RT305X_ESW_POC2_ENAGING_S 8 +#define RT305X_ESW_POC2_DIS_UC_PAUSE_S 16 + +#define RT305X_ESW_SGC2_DOUBLE_TAG_M 0x7f +#define RT305X_ESW_SGC2_DOUBLE_TAG_S 0 +#define RT305X_ESW_SGC2_LAN_PMAP_M 0x3f +#define RT305X_ESW_SGC2_LAN_PMAP_S 24 + +#define RT305X_ESW_PFC1_EN_VLAN_M 0xff +#define RT305X_ESW_PFC1_EN_VLAN_S 16 +#define RT305X_ESW_PFC1_EN_TOS_S 24 + +#define RT305X_ESW_VLAN_NONE 0xfff + +#define RT305X_ESW_GSC_BC_STROM_MASK 0x3 +#define RT305X_ESW_GSC_BC_STROM_SHIFT 4 + +#define RT305X_ESW_GSC_LED_FREQ_MASK 0x3 +#define RT305X_ESW_GSC_LED_FREQ_SHIFT 23 + +#define RT305X_ESW_POA_LINK_MASK 0x1f +#define RT305X_ESW_POA_LINK_SHIFT 25 + +#define RT305X_ESW_PORT_ST_CHG BIT(26) +#define RT305X_ESW_PORT0 0 +#define RT305X_ESW_PORT1 1 +#define RT305X_ESW_PORT2 2 +#define RT305X_ESW_PORT3 3 +#define RT305X_ESW_PORT4 4 +#define RT305X_ESW_PORT5 5 +#define RT305X_ESW_PORT6 6 + +#define RT305X_ESW_PORTS_NONE 0 + +#define RT305X_ESW_PMAP_LLLLLL 0x3f +#define RT305X_ESW_PMAP_LLLLWL 0x2f +#define RT305X_ESW_PMAP_WLLLLL 0x3e + +#define RT305X_ESW_PORTS_INTERNAL \ + (BIT(RT305X_ESW_PORT0) | BIT(RT305X_ESW_PORT1) | \ + BIT(RT305X_ESW_PORT2) | BIT(RT305X_ESW_PORT3) | \ + BIT(RT305X_ESW_PORT4)) + +#define RT305X_ESW_PORTS_NOCPU \ + (RT305X_ESW_PORTS_INTERNAL | BIT(RT305X_ESW_PORT5)) + +#define RT305X_ESW_PORTS_CPU BIT(RT305X_ESW_PORT6) + +#define RT305X_ESW_PORTS_ALL \ + (RT305X_ESW_PORTS_NOCPU | RT305X_ESW_PORTS_CPU) + +#define RT305X_ESW_NUM_VLANS 16 +#define RT305X_ESW_NUM_VIDS 4096 +#define RT305X_ESW_NUM_PORTS 7 +#define RT305X_ESW_NUM_LANWAN 6 +#define RT305X_ESW_NUM_LEDS 5 + +#define RT5350_ESW_REG_PXTPC(_x) (0x150 + (4 * _x)) +#define RT5350_EWS_REG_LED_POLARITY 0x168 +#define RT5350_RESET_EPHY BIT(24) + +enum { + /* Global attributes. */ + RT305X_ESW_ATTR_ENABLE_VLAN, + RT305X_ESW_ATTR_ALT_VLAN_DISABLE, + RT305X_ESW_ATTR_BC_STATUS, + RT305X_ESW_ATTR_LED_FREQ, + /* Port attributes. */ + RT305X_ESW_ATTR_PORT_DISABLE, + RT305X_ESW_ATTR_PORT_DOUBLETAG, + RT305X_ESW_ATTR_PORT_UNTAG, + RT305X_ESW_ATTR_PORT_LED, + RT305X_ESW_ATTR_PORT_LAN, + RT305X_ESW_ATTR_PORT_RECV_BAD, + RT305X_ESW_ATTR_PORT_RECV_GOOD, + RT5350_ESW_ATTR_PORT_TR_BAD, + RT5350_ESW_ATTR_PORT_TR_GOOD, +}; + +struct esw_port { + bool disable; + bool doubletag; + bool untag; + u8 led; + u16 pvid; +}; + +struct esw_vlan { + u8 ports; + u16 vid; +}; + +enum { + RT305X_ESW_VLAN_CONFIG_NONE = 0, + RT305X_ESW_VLAN_CONFIG_LLLLW, + RT305X_ESW_VLAN_CONFIG_WLLLL, +}; + +struct rt305x_esw { + struct device *dev; + void __iomem *base; + int irq; + + /* Protects against concurrent register r/w operations. */ + spinlock_t reg_rw_lock; + + unsigned char port_map; + unsigned char port_disable; + unsigned int reg_initval_fct2; + unsigned int reg_initval_fpa2; + unsigned int reg_led_polarity; + + struct switch_dev swdev; + bool global_vlan_enable; + bool alt_vlan_disable; + int bc_storm_protect; + int led_frequency; + struct esw_vlan vlans[RT305X_ESW_NUM_VLANS]; + struct esw_port ports[RT305X_ESW_NUM_PORTS]; + +}; + +static inline void esw_w32(struct rt305x_esw *esw, u32 val, unsigned reg) +{ + __raw_writel(val, esw->base + reg); +} + +static inline u32 esw_r32(struct rt305x_esw *esw, unsigned reg) +{ + return __raw_readl(esw->base + reg); +} + +static inline void esw_rmw_raw(struct rt305x_esw *esw, unsigned reg, + unsigned long mask, unsigned long val) +{ + unsigned long t; + + t = __raw_readl(esw->base + reg) & ~mask; + __raw_writel(t | val, esw->base + reg); +} + +static void esw_rmw(struct rt305x_esw *esw, unsigned reg, + unsigned long mask, unsigned long val) +{ + unsigned long flags; + + spin_lock_irqsave(&esw->reg_rw_lock, flags); + esw_rmw_raw(esw, reg, mask, val); + spin_unlock_irqrestore(&esw->reg_rw_lock, flags); +} + +static u32 rt305x_mii_write(struct rt305x_esw *esw, u32 phy_addr, + u32 phy_register, u32 write_data) +{ + unsigned long t_start = jiffies; + int ret = 0; + + while (1) { + if (!(esw_r32(esw, RT305X_ESW_REG_PCR1) & + RT305X_ESW_PCR1_WT_DONE)) + break; + if (time_after(jiffies, t_start + RT305X_ESW_PHY_TIMEOUT)) { + ret = 1; + goto out; + } + } + + write_data &= 0xffff; + esw_w32(esw, (write_data << RT305X_ESW_PCR0_WT_NWAY_DATA_S) | + (phy_register << RT305X_ESW_PCR0_CPU_PHY_REG_S) | + (phy_addr) | RT305X_ESW_PCR0_WT_PHY_CMD, + RT305X_ESW_REG_PCR0); + + t_start = jiffies; + while (1) { + if (esw_r32(esw, RT305X_ESW_REG_PCR1) & + RT305X_ESW_PCR1_WT_DONE) + break; + + if (time_after(jiffies, t_start + RT305X_ESW_PHY_TIMEOUT)) { + ret = 1; + break; + } + } +out: + if (ret) + dev_err(esw->dev, "ramips_eth: MDIO timeout\n"); + return ret; +} + +static unsigned esw_get_vlan_id(struct rt305x_esw *esw, unsigned vlan) +{ + unsigned s; + unsigned val; + + s = RT305X_ESW_VLANI_VID_S * (vlan % 2); + val = esw_r32(esw, RT305X_ESW_REG_VLANI(vlan / 2)); + val = (val >> s) & RT305X_ESW_VLANI_VID_M; + + return val; +} + +static void esw_set_vlan_id(struct rt305x_esw *esw, unsigned vlan, unsigned vid) +{ + unsigned s; + + s = RT305X_ESW_VLANI_VID_S * (vlan % 2); + esw_rmw(esw, + RT305X_ESW_REG_VLANI(vlan / 2), + RT305X_ESW_VLANI_VID_M << s, + (vid & RT305X_ESW_VLANI_VID_M) << s); +} + +static unsigned esw_get_pvid(struct rt305x_esw *esw, unsigned port) +{ + unsigned s, val; + + s = RT305X_ESW_PVIDC_PVID_S * (port % 2); + val = esw_r32(esw, RT305X_ESW_REG_PVIDC(port / 2)); + return (val >> s) & RT305X_ESW_PVIDC_PVID_M; +} + +static void esw_set_pvid(struct rt305x_esw *esw, unsigned port, unsigned pvid) +{ + unsigned s; + + s = RT305X_ESW_PVIDC_PVID_S * (port % 2); + esw_rmw(esw, + RT305X_ESW_REG_PVIDC(port / 2), + RT305X_ESW_PVIDC_PVID_M << s, + (pvid & RT305X_ESW_PVIDC_PVID_M) << s); +} + +static unsigned esw_get_vmsc(struct rt305x_esw *esw, unsigned vlan) +{ + unsigned s, val; + + s = RT305X_ESW_VMSC_MSC_S * (vlan % 4); + val = esw_r32(esw, RT305X_ESW_REG_VMSC(vlan / 4)); + val = (val >> s) & RT305X_ESW_VMSC_MSC_M; + + return val; +} + +static void esw_set_vmsc(struct rt305x_esw *esw, unsigned vlan, unsigned msc) +{ + unsigned s; + + s = RT305X_ESW_VMSC_MSC_S * (vlan % 4); + esw_rmw(esw, + RT305X_ESW_REG_VMSC(vlan / 4), + RT305X_ESW_VMSC_MSC_M << s, + (msc & RT305X_ESW_VMSC_MSC_M) << s); +} + +static unsigned esw_get_port_disable(struct rt305x_esw *esw) +{ + unsigned reg; + + reg = esw_r32(esw, RT305X_ESW_REG_POC0); + return (reg >> RT305X_ESW_POC0_DIS_PORT_S) & + RT305X_ESW_POC0_DIS_PORT_M; +} + +static void esw_set_port_disable(struct rt305x_esw *esw, unsigned disable_mask) +{ + unsigned old_mask; + unsigned enable_mask; + unsigned changed; + int i; + + old_mask = esw_get_port_disable(esw); + changed = old_mask ^ disable_mask; + enable_mask = old_mask & disable_mask; + + /* enable before writing to MII */ + esw_rmw(esw, RT305X_ESW_REG_POC0, + (RT305X_ESW_POC0_DIS_PORT_M << + RT305X_ESW_POC0_DIS_PORT_S), + enable_mask << RT305X_ESW_POC0_DIS_PORT_S); + + for (i = 0; i < RT305X_ESW_NUM_LEDS; i++) { + if (!(changed & (1 << i))) + continue; + if (disable_mask & (1 << i)) { + /* disable */ + rt305x_mii_write(esw, i, MII_BMCR, + BMCR_PDOWN); + } else { + /* enable */ + rt305x_mii_write(esw, i, MII_BMCR, + BMCR_FULLDPLX | + BMCR_ANENABLE | + BMCR_ANRESTART | + BMCR_SPEED100); + } + } + + /* disable after writing to MII */ + esw_rmw(esw, RT305X_ESW_REG_POC0, + (RT305X_ESW_POC0_DIS_PORT_M << + RT305X_ESW_POC0_DIS_PORT_S), + disable_mask << RT305X_ESW_POC0_DIS_PORT_S); +} + +static void esw_set_gsc(struct rt305x_esw *esw) +{ + esw_rmw(esw, RT305X_ESW_REG_SGC, + RT305X_ESW_GSC_BC_STROM_MASK << RT305X_ESW_GSC_BC_STROM_SHIFT, + esw->bc_storm_protect << RT305X_ESW_GSC_BC_STROM_SHIFT); + esw_rmw(esw, RT305X_ESW_REG_SGC, + RT305X_ESW_GSC_LED_FREQ_MASK << RT305X_ESW_GSC_LED_FREQ_SHIFT, + esw->led_frequency << RT305X_ESW_GSC_LED_FREQ_SHIFT); +} + +static int esw_apply_config(struct switch_dev *dev); + +static void esw_hw_init(struct rt305x_esw *esw) +{ + int i; + u8 port_disable = 0; + u8 port_map = RT305X_ESW_PMAP_LLLLLL; + + /* vodoo from original driver */ + esw_w32(esw, 0xC8A07850, RT305X_ESW_REG_FCT0); + esw_w32(esw, 0x00000000, RT305X_ESW_REG_SGC2); + /* Port priority 1 for all ports, vlan enabled. */ + esw_w32(esw, 0x00005555 | + (RT305X_ESW_PORTS_ALL << RT305X_ESW_PFC1_EN_VLAN_S), + RT305X_ESW_REG_PFC1); + + /* Enable all ports, Back Pressure and Flow Control */ + esw_w32(esw, ((RT305X_ESW_PORTS_ALL << RT305X_ESW_POC0_EN_BP_S) | + (RT305X_ESW_PORTS_ALL << RT305X_ESW_POC0_EN_FC_S)), + RT305X_ESW_REG_POC0); + + /* Enable Aging, and VLAN TAG removal */ + esw_w32(esw, ((RT305X_ESW_PORTS_ALL << RT305X_ESW_POC2_ENAGING_S) | + (RT305X_ESW_PORTS_NOCPU << RT305X_ESW_POC2_UNTAG_EN_S)), + RT305X_ESW_REG_POC2); + + if (esw->reg_initval_fct2) + esw_w32(esw, esw->reg_initval_fct2, RT305X_ESW_REG_FCT2); + else + esw_w32(esw, 0x0002500c, RT305X_ESW_REG_FCT2); + + /* 300s aging timer, max packet len 1536, broadcast storm prevention + * disabled, disable collision abort, mac xor48 hash, 10 packet back + * pressure jam, GMII disable was_transmit, back pressure disabled, + * 30ms led flash, unmatched IGMP as broadcast, rmc tb fault to all + * ports. + */ + esw_w32(esw, 0x0008a301, RT305X_ESW_REG_SGC); + + /* Setup SoC Port control register */ + esw_w32(esw, + (RT305X_ESW_SOCPC_CRC_PADDING | + (RT305X_ESW_PORTS_CPU << RT305X_ESW_SOCPC_DISUN2CPU_S) | + (RT305X_ESW_PORTS_CPU << RT305X_ESW_SOCPC_DISMC2CPU_S) | + (RT305X_ESW_PORTS_CPU << RT305X_ESW_SOCPC_DISBC2CPU_S)), + RT305X_ESW_REG_SOCPC); + + /* ext phy base addr 31, enable port 5 polling, rx/tx clock skew 1, + * turbo mii off, rgmi 3.3v off + * port5: disabled + * port6: enabled, gige, full-duplex, rx/tx-flow-control + */ + if (esw->reg_initval_fpa2) + esw_w32(esw, esw->reg_initval_fpa2, RT305X_ESW_REG_FPA2); + else + esw_w32(esw, 0x3f502b28, RT305X_ESW_REG_FPA2); + esw_w32(esw, 0x00000000, RT305X_ESW_REG_FPA); + + /* Force Link/Activity on ports */ + esw_w32(esw, 0x00000005, RT305X_ESW_REG_P0LED); + esw_w32(esw, 0x00000005, RT305X_ESW_REG_P1LED); + esw_w32(esw, 0x00000005, RT305X_ESW_REG_P2LED); + esw_w32(esw, 0x00000005, RT305X_ESW_REG_P3LED); + esw_w32(esw, 0x00000005, RT305X_ESW_REG_P4LED); + + /* Copy disabled port configuration from device tree setup */ + port_disable = esw->port_disable; + + /* Disable nonexistent ports by reading the switch config + * after having enabled all possible ports above + */ + port_disable |= esw_get_port_disable(esw); + + for (i = 0; i < 6; i++) + esw->ports[i].disable = (port_disable & (1 << i)) != 0; + + if (ralink_soc == RT305X_SOC_RT3352) { + /* reset EPHY */ + fe_reset(RT5350_RESET_EPHY); + + rt305x_mii_write(esw, 0, 31, 0x8000); + for (i = 0; i < 5; i++) { + if (esw->ports[i].disable) { + rt305x_mii_write(esw, i, MII_BMCR, BMCR_PDOWN); + } else { + rt305x_mii_write(esw, i, MII_BMCR, + BMCR_FULLDPLX | + BMCR_ANENABLE | + BMCR_SPEED100); + } + /* TX10 waveform coefficient LSB=0 disable PHY */ + rt305x_mii_write(esw, i, 26, 0x1601); + /* TX100/TX10 AD/DA current bias */ + rt305x_mii_write(esw, i, 29, 0x7016); + /* TX100 slew rate control */ + rt305x_mii_write(esw, i, 30, 0x0038); + } + + /* select global register */ + rt305x_mii_write(esw, 0, 31, 0x0); + /* enlarge agcsel threshold 3 and threshold 2 */ + rt305x_mii_write(esw, 0, 1, 0x4a40); + /* enlarge agcsel threshold 5 and threshold 4 */ + rt305x_mii_write(esw, 0, 2, 0x6254); + /* enlarge agcsel threshold */ + rt305x_mii_write(esw, 0, 3, 0xa17f); + rt305x_mii_write(esw, 0, 12, 0x7eaa); + /* longer TP_IDL tail length */ + rt305x_mii_write(esw, 0, 14, 0x65); + /* increased squelch pulse count threshold. */ + rt305x_mii_write(esw, 0, 16, 0x0684); + /* set TX10 signal amplitude threshold to minimum */ + rt305x_mii_write(esw, 0, 17, 0x0fe0); + /* set squelch amplitude to higher threshold */ + rt305x_mii_write(esw, 0, 18, 0x40ba); + /* tune TP_IDL tail and head waveform, enable power + * down slew rate control + */ + rt305x_mii_write(esw, 0, 22, 0x253f); + /* set PLL/Receive bias current are calibrated */ + rt305x_mii_write(esw, 0, 27, 0x2fda); + /* change PLL/Receive bias current to internal(RT3350) */ + rt305x_mii_write(esw, 0, 28, 0xc410); + /* change PLL bias current to internal(RT3052_MP3) */ + rt305x_mii_write(esw, 0, 29, 0x598b); + /* select local register */ + rt305x_mii_write(esw, 0, 31, 0x8000); + } else if (ralink_soc == RT305X_SOC_RT5350) { + /* reset EPHY */ + fe_reset(RT5350_RESET_EPHY); + + /* set the led polarity */ + esw_w32(esw, esw->reg_led_polarity & 0x1F, + RT5350_EWS_REG_LED_POLARITY); + + /* local registers */ + rt305x_mii_write(esw, 0, 31, 0x8000); + for (i = 0; i < 5; i++) { + if (esw->ports[i].disable) { + rt305x_mii_write(esw, i, MII_BMCR, BMCR_PDOWN); + } else { + rt305x_mii_write(esw, i, MII_BMCR, + BMCR_FULLDPLX | + BMCR_ANENABLE | + BMCR_SPEED100); + } + /* TX10 waveform coefficient LSB=0 disable PHY */ + rt305x_mii_write(esw, i, 26, 0x1601); + /* TX100/TX10 AD/DA current bias */ + rt305x_mii_write(esw, i, 29, 0x7015); + /* TX100 slew rate control */ + rt305x_mii_write(esw, i, 30, 0x0038); + } + + /* global registers */ + rt305x_mii_write(esw, 0, 31, 0x0); + /* enlarge agcsel threshold 3 and threshold 2 */ + rt305x_mii_write(esw, 0, 1, 0x4a40); + /* enlarge agcsel threshold 5 and threshold 4 */ + rt305x_mii_write(esw, 0, 2, 0x6254); + /* enlarge agcsel threshold 6 */ + rt305x_mii_write(esw, 0, 3, 0xa17f); + rt305x_mii_write(esw, 0, 12, 0x7eaa); + /* longer TP_IDL tail length */ + rt305x_mii_write(esw, 0, 14, 0x65); + /* increased squelch pulse count threshold. */ + rt305x_mii_write(esw, 0, 16, 0x0684); + /* set TX10 signal amplitude threshold to minimum */ + rt305x_mii_write(esw, 0, 17, 0x0fe0); + /* set squelch amplitude to higher threshold */ + rt305x_mii_write(esw, 0, 18, 0x40ba); + /* tune TP_IDL tail and head waveform, enable power + * down slew rate control + */ + rt305x_mii_write(esw, 0, 22, 0x253f); + /* set PLL/Receive bias current are calibrated */ + rt305x_mii_write(esw, 0, 27, 0x2fda); + /* change PLL/Receive bias current to internal(RT3350) */ + rt305x_mii_write(esw, 0, 28, 0xc410); + /* change PLL bias current to internal(RT3052_MP3) */ + rt305x_mii_write(esw, 0, 29, 0x598b); + /* select local register */ + rt305x_mii_write(esw, 0, 31, 0x8000); + } else if (ralink_soc == MT762X_SOC_MT7628AN || ralink_soc == MT762X_SOC_MT7688) { + int i; + + /* reset EPHY */ + fe_reset(RT5350_RESET_EPHY); + + rt305x_mii_write(esw, 0, 31, 0x2000); /* change G2 page */ + rt305x_mii_write(esw, 0, 26, 0x0020); + + for (i = 0; i < 5; i++) { + rt305x_mii_write(esw, i, 31, 0x8000); + rt305x_mii_write(esw, i, 0, 0x3100); + rt305x_mii_write(esw, i, 30, 0xa000); + rt305x_mii_write(esw, i, 31, 0xa000); + rt305x_mii_write(esw, i, 16, 0x0606); + rt305x_mii_write(esw, i, 23, 0x0f0e); + rt305x_mii_write(esw, i, 24, 0x1610); + rt305x_mii_write(esw, i, 30, 0x1f15); + rt305x_mii_write(esw, i, 28, 0x6111); + rt305x_mii_write(esw, i, 31, 0x2000); + rt305x_mii_write(esw, i, 26, 0x0000); + } + + /* 100Base AOI setting */ + rt305x_mii_write(esw, 0, 31, 0x5000); + rt305x_mii_write(esw, 0, 19, 0x004a); + rt305x_mii_write(esw, 0, 20, 0x015a); + rt305x_mii_write(esw, 0, 21, 0x00ee); + rt305x_mii_write(esw, 0, 22, 0x0033); + rt305x_mii_write(esw, 0, 23, 0x020a); + rt305x_mii_write(esw, 0, 24, 0x0000); + rt305x_mii_write(esw, 0, 25, 0x024a); + rt305x_mii_write(esw, 0, 26, 0x035a); + rt305x_mii_write(esw, 0, 27, 0x02ee); + rt305x_mii_write(esw, 0, 28, 0x0233); + rt305x_mii_write(esw, 0, 29, 0x000a); + rt305x_mii_write(esw, 0, 30, 0x0000); + } else { + rt305x_mii_write(esw, 0, 31, 0x8000); + for (i = 0; i < 5; i++) { + if (esw->ports[i].disable) { + rt305x_mii_write(esw, i, MII_BMCR, BMCR_PDOWN); + } else { + rt305x_mii_write(esw, i, MII_BMCR, + BMCR_FULLDPLX | + BMCR_ANENABLE | + BMCR_SPEED100); + } + /* TX10 waveform coefficient */ + rt305x_mii_write(esw, i, 26, 0x1601); + /* TX100/TX10 AD/DA current bias */ + rt305x_mii_write(esw, i, 29, 0x7058); + /* TX100 slew rate control */ + rt305x_mii_write(esw, i, 30, 0x0018); + } + + /* PHY IOT */ + /* select global register */ + rt305x_mii_write(esw, 0, 31, 0x0); + /* tune TP_IDL tail and head waveform */ + rt305x_mii_write(esw, 0, 22, 0x052f); + /* set TX10 signal amplitude threshold to minimum */ + rt305x_mii_write(esw, 0, 17, 0x0fe0); + /* set squelch amplitude to higher threshold */ + rt305x_mii_write(esw, 0, 18, 0x40ba); + /* longer TP_IDL tail length */ + rt305x_mii_write(esw, 0, 14, 0x65); + /* select local register */ + rt305x_mii_write(esw, 0, 31, 0x8000); + } + + if (esw->port_map) + port_map = esw->port_map; + else + port_map = RT305X_ESW_PMAP_LLLLLL; + + /* Unused HW feature, but still nice to be consistent here... + * This is also exported to userspace ('lan' attribute) so it's + * conveniently usable to decide which ports go into the wan vlan by + * default. + */ + esw_rmw(esw, RT305X_ESW_REG_SGC2, + RT305X_ESW_SGC2_LAN_PMAP_M << RT305X_ESW_SGC2_LAN_PMAP_S, + port_map << RT305X_ESW_SGC2_LAN_PMAP_S); + + /* make the switch leds blink */ + for (i = 0; i < RT305X_ESW_NUM_LEDS; i++) + esw->ports[i].led = 0x05; + + /* Apply the empty config. */ + esw_apply_config(&esw->swdev); + + /* Only unmask the port change interrupt */ + esw_w32(esw, ~RT305X_ESW_PORT_ST_CHG, RT305X_ESW_REG_IMR); +} + +static irqreturn_t esw_interrupt(int irq, void *_esw) +{ + struct rt305x_esw *esw = (struct rt305x_esw *)_esw; + u32 status; + + status = esw_r32(esw, RT305X_ESW_REG_ISR); + if (status & RT305X_ESW_PORT_ST_CHG) { + u32 link = esw_r32(esw, RT305X_ESW_REG_POA); + + link >>= RT305X_ESW_POA_LINK_SHIFT; + link &= RT305X_ESW_POA_LINK_MASK; + dev_info(esw->dev, "link changed 0x%02X\n", link); + } + esw_w32(esw, status, RT305X_ESW_REG_ISR); + + return IRQ_HANDLED; +} + +static int esw_apply_config(struct switch_dev *dev) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + int i; + u8 disable = 0; + u8 doubletag = 0; + u8 en_vlan = 0; + u8 untag = 0; + + for (i = 0; i < RT305X_ESW_NUM_VLANS; i++) { + u32 vid, vmsc; + if (esw->global_vlan_enable) { + vid = esw->vlans[i].vid; + vmsc = esw->vlans[i].ports; + } else { + vid = RT305X_ESW_VLAN_NONE; + vmsc = RT305X_ESW_PORTS_NONE; + } + esw_set_vlan_id(esw, i, vid); + esw_set_vmsc(esw, i, vmsc); + } + + for (i = 0; i < RT305X_ESW_NUM_PORTS; i++) { + u32 pvid; + disable |= esw->ports[i].disable << i; + if (esw->global_vlan_enable) { + doubletag |= esw->ports[i].doubletag << i; + en_vlan |= 1 << i; + untag |= esw->ports[i].untag << i; + pvid = esw->ports[i].pvid; + } else { + int x = esw->alt_vlan_disable ? 0 : 1; + doubletag |= x << i; + en_vlan |= x << i; + untag |= x << i; + pvid = 0; + } + esw_set_pvid(esw, i, pvid); + if (i < RT305X_ESW_NUM_LEDS) + esw_w32(esw, esw->ports[i].led, + RT305X_ESW_REG_P0LED + 4*i); + } + + esw_set_gsc(esw); + esw_set_port_disable(esw, disable); + esw_rmw(esw, RT305X_ESW_REG_SGC2, + (RT305X_ESW_SGC2_DOUBLE_TAG_M << + RT305X_ESW_SGC2_DOUBLE_TAG_S), + doubletag << RT305X_ESW_SGC2_DOUBLE_TAG_S); + esw_rmw(esw, RT305X_ESW_REG_PFC1, + RT305X_ESW_PFC1_EN_VLAN_M << RT305X_ESW_PFC1_EN_VLAN_S, + en_vlan << RT305X_ESW_PFC1_EN_VLAN_S); + esw_rmw(esw, RT305X_ESW_REG_POC2, + RT305X_ESW_POC2_UNTAG_EN_M << RT305X_ESW_POC2_UNTAG_EN_S, + untag << RT305X_ESW_POC2_UNTAG_EN_S); + + if (!esw->global_vlan_enable) { + /* + * Still need to put all ports into vlan 0 or they'll be + * isolated. + * NOTE: vlan 0 is special, no vlan tag is prepended + */ + esw_set_vlan_id(esw, 0, 0); + esw_set_vmsc(esw, 0, RT305X_ESW_PORTS_ALL); + } + + return 0; +} + +static int esw_reset_switch(struct switch_dev *dev) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + + esw->global_vlan_enable = 0; + memset(esw->ports, 0, sizeof(esw->ports)); + memset(esw->vlans, 0, sizeof(esw->vlans)); + esw_hw_init(esw); + + return 0; +} + +static int esw_get_vlan_enable(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + + val->value.i = esw->global_vlan_enable; + + return 0; +} + +static int esw_set_vlan_enable(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + + esw->global_vlan_enable = val->value.i != 0; + + return 0; +} + +static int esw_get_alt_vlan_disable(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + + val->value.i = esw->alt_vlan_disable; + + return 0; +} + +static int esw_set_alt_vlan_disable(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + + esw->alt_vlan_disable = val->value.i != 0; + + return 0; +} + +static int +rt305x_esw_set_bc_status(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + + esw->bc_storm_protect = val->value.i & RT305X_ESW_GSC_BC_STROM_MASK; + + return 0; +} + +static int +rt305x_esw_get_bc_status(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + + val->value.i = esw->bc_storm_protect; + + return 0; +} + +static int +rt305x_esw_set_led_freq(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + + esw->led_frequency = val->value.i & RT305X_ESW_GSC_LED_FREQ_MASK; + + return 0; +} + +static int +rt305x_esw_get_led_freq(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + + val->value.i = esw->led_frequency; + + return 0; +} + +static int esw_get_port_link(struct switch_dev *dev, + int port, + struct switch_port_link *link) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + u32 speed, poa; + + if (port < 0 || port >= RT305X_ESW_NUM_PORTS) + return -EINVAL; + + poa = esw_r32(esw, RT305X_ESW_REG_POA) >> port; + + link->link = (poa >> RT305X_ESW_LINK_S) & 1; + link->duplex = (poa >> RT305X_ESW_DUPLEX_S) & 1; + if (port < RT305X_ESW_NUM_LEDS) { + speed = (poa >> RT305X_ESW_SPD_S) & 1; + } else { + if (port == RT305X_ESW_NUM_PORTS - 1) + poa >>= 1; + speed = (poa >> RT305X_ESW_SPD_S) & 3; + } + switch (speed) { + case 0: + link->speed = SWITCH_PORT_SPEED_10; + break; + case 1: + link->speed = SWITCH_PORT_SPEED_100; + break; + case 2: + case 3: /* forced gige speed can be 2 or 3 */ + link->speed = SWITCH_PORT_SPEED_1000; + break; + default: + link->speed = SWITCH_PORT_SPEED_UNKNOWN; + break; + } + + return 0; +} + +static int esw_get_port_bool(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + int idx = val->port_vlan; + u32 x, reg, shift; + + if (idx < 0 || idx >= RT305X_ESW_NUM_PORTS) + return -EINVAL; + + switch (attr->id) { + case RT305X_ESW_ATTR_PORT_DISABLE: + reg = RT305X_ESW_REG_POC0; + shift = RT305X_ESW_POC0_DIS_PORT_S; + break; + case RT305X_ESW_ATTR_PORT_DOUBLETAG: + reg = RT305X_ESW_REG_SGC2; + shift = RT305X_ESW_SGC2_DOUBLE_TAG_S; + break; + case RT305X_ESW_ATTR_PORT_UNTAG: + reg = RT305X_ESW_REG_POC2; + shift = RT305X_ESW_POC2_UNTAG_EN_S; + break; + case RT305X_ESW_ATTR_PORT_LAN: + reg = RT305X_ESW_REG_SGC2; + shift = RT305X_ESW_SGC2_LAN_PMAP_S; + if (idx >= RT305X_ESW_NUM_LANWAN) + return -EINVAL; + break; + default: + return -EINVAL; + } + + x = esw_r32(esw, reg); + val->value.i = (x >> (idx + shift)) & 1; + + return 0; +} + +static int esw_set_port_bool(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + int idx = val->port_vlan; + + if (idx < 0 || idx >= RT305X_ESW_NUM_PORTS || + val->value.i < 0 || val->value.i > 1) + return -EINVAL; + + switch (attr->id) { + case RT305X_ESW_ATTR_PORT_DISABLE: + esw->ports[idx].disable = val->value.i; + break; + case RT305X_ESW_ATTR_PORT_DOUBLETAG: + esw->ports[idx].doubletag = val->value.i; + break; + case RT305X_ESW_ATTR_PORT_UNTAG: + esw->ports[idx].untag = val->value.i; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int esw_get_port_recv_badgood(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + int idx = val->port_vlan; + int shift = attr->id == RT305X_ESW_ATTR_PORT_RECV_GOOD ? 0 : 16; + u32 reg; + + if (idx < 0 || idx >= RT305X_ESW_NUM_LANWAN) + return -EINVAL; + reg = esw_r32(esw, RT305X_ESW_REG_PXPC(idx)); + val->value.i = (reg >> shift) & 0xffff; + + return 0; +} + +static int +esw_get_port_tr_badgood(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + + int idx = val->port_vlan; + int shift = attr->id == RT5350_ESW_ATTR_PORT_TR_GOOD ? 0 : 16; + u32 reg; + + if ((ralink_soc != RT305X_SOC_RT5350) && (ralink_soc != MT762X_SOC_MT7628AN) && (ralink_soc != MT762X_SOC_MT7688)) + return -EINVAL; + + if (idx < 0 || idx >= RT305X_ESW_NUM_LANWAN) + return -EINVAL; + + reg = esw_r32(esw, RT5350_ESW_REG_PXTPC(idx)); + val->value.i = (reg >> shift) & 0xffff; + + return 0; +} + +static int esw_get_port_led(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + int idx = val->port_vlan; + + if (idx < 0 || idx >= RT305X_ESW_NUM_PORTS || + idx >= RT305X_ESW_NUM_LEDS) + return -EINVAL; + + val->value.i = esw_r32(esw, RT305X_ESW_REG_P0LED + 4*idx); + + return 0; +} + +static int esw_set_port_led(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + int idx = val->port_vlan; + + if (idx < 0 || idx >= RT305X_ESW_NUM_LEDS) + return -EINVAL; + + esw->ports[idx].led = val->value.i; + + return 0; +} + +static int esw_get_port_pvid(struct switch_dev *dev, int port, int *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + + if (port >= RT305X_ESW_NUM_PORTS) + return -EINVAL; + + *val = esw_get_pvid(esw, port); + + return 0; +} + +static int esw_set_port_pvid(struct switch_dev *dev, int port, int val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + + if (port >= RT305X_ESW_NUM_PORTS) + return -EINVAL; + + esw->ports[port].pvid = val; + + return 0; +} + +static int esw_get_vlan_ports(struct switch_dev *dev, struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + u32 vmsc, poc2; + int vlan_idx = -1; + int i; + + val->len = 0; + + if (val->port_vlan < 0 || val->port_vlan >= RT305X_ESW_NUM_VIDS) + return -EINVAL; + + /* valid vlan? */ + for (i = 0; i < RT305X_ESW_NUM_VLANS; i++) { + if (esw_get_vlan_id(esw, i) == val->port_vlan && + esw_get_vmsc(esw, i) != RT305X_ESW_PORTS_NONE) { + vlan_idx = i; + break; + } + } + + if (vlan_idx == -1) + return -EINVAL; + + vmsc = esw_get_vmsc(esw, vlan_idx); + poc2 = esw_r32(esw, RT305X_ESW_REG_POC2); + + for (i = 0; i < RT305X_ESW_NUM_PORTS; i++) { + struct switch_port *p; + int port_mask = 1 << i; + + if (!(vmsc & port_mask)) + continue; + + p = &val->value.ports[val->len++]; + p->id = i; + if (poc2 & (port_mask << RT305X_ESW_POC2_UNTAG_EN_S)) + p->flags = 0; + else + p->flags = 1 << SWITCH_PORT_FLAG_TAGGED; + } + + return 0; +} + +static int esw_set_vlan_ports(struct switch_dev *dev, struct switch_val *val) +{ + struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); + int ports; + int vlan_idx = -1; + int i; + + if (val->port_vlan < 0 || val->port_vlan >= RT305X_ESW_NUM_VIDS || + val->len > RT305X_ESW_NUM_PORTS) + return -EINVAL; + + /* one of the already defined vlans? */ + for (i = 0; i < RT305X_ESW_NUM_VLANS; i++) { + if (esw->vlans[i].vid == val->port_vlan && + esw->vlans[i].ports != RT305X_ESW_PORTS_NONE) { + vlan_idx = i; + break; + } + } + + /* select a free slot */ + for (i = 0; vlan_idx == -1 && i < RT305X_ESW_NUM_VLANS; i++) { + if (esw->vlans[i].ports == RT305X_ESW_PORTS_NONE) + vlan_idx = i; + } + + /* bail if all slots are in use */ + if (vlan_idx == -1) + return -EINVAL; + + ports = RT305X_ESW_PORTS_NONE; + for (i = 0; i < val->len; i++) { + struct switch_port *p = &val->value.ports[i]; + int port_mask = 1 << p->id; + bool untagged = !(p->flags & (1 << SWITCH_PORT_FLAG_TAGGED)); + + if (p->id >= RT305X_ESW_NUM_PORTS) + return -EINVAL; + + ports |= port_mask; + esw->ports[p->id].untag = untagged; + } + esw->vlans[vlan_idx].ports = ports; + if (ports == RT305X_ESW_PORTS_NONE) + esw->vlans[vlan_idx].vid = RT305X_ESW_VLAN_NONE; + else + esw->vlans[vlan_idx].vid = val->port_vlan; + + return 0; +} + +static const struct switch_attr esw_global[] = { + { + .type = SWITCH_TYPE_INT, + .name = "enable_vlan", + .description = "VLAN mode (1:enabled)", + .max = 1, + .id = RT305X_ESW_ATTR_ENABLE_VLAN, + .get = esw_get_vlan_enable, + .set = esw_set_vlan_enable, + }, + { + .type = SWITCH_TYPE_INT, + .name = "alternate_vlan_disable", + .description = "Use en_vlan instead of doubletag to disable" + " VLAN mode", + .max = 1, + .id = RT305X_ESW_ATTR_ALT_VLAN_DISABLE, + .get = esw_get_alt_vlan_disable, + .set = esw_set_alt_vlan_disable, + }, + { + .type = SWITCH_TYPE_INT, + .name = "bc_storm_protect", + .description = "Global broadcast storm protection (0:Disable, 1:64 blocks, 2:96 blocks, 3:128 blocks)", + .max = 3, + .id = RT305X_ESW_ATTR_BC_STATUS, + .get = rt305x_esw_get_bc_status, + .set = rt305x_esw_set_bc_status, + }, + { + .type = SWITCH_TYPE_INT, + .name = "led_frequency", + .description = "LED Flash frequency (0:30mS, 1:60mS, 2:240mS, 3:480mS)", + .max = 3, + .id = RT305X_ESW_ATTR_LED_FREQ, + .get = rt305x_esw_get_led_freq, + .set = rt305x_esw_set_led_freq, + } +}; + +static const struct switch_attr esw_port[] = { + { + .type = SWITCH_TYPE_INT, + .name = "disable", + .description = "Port state (1:disabled)", + .max = 1, + .id = RT305X_ESW_ATTR_PORT_DISABLE, + .get = esw_get_port_bool, + .set = esw_set_port_bool, + }, + { + .type = SWITCH_TYPE_INT, + .name = "doubletag", + .description = "Double tagging for incoming vlan packets " + "(1:enabled)", + .max = 1, + .id = RT305X_ESW_ATTR_PORT_DOUBLETAG, + .get = esw_get_port_bool, + .set = esw_set_port_bool, + }, + { + .type = SWITCH_TYPE_INT, + .name = "untag", + .description = "Untag (1:strip outgoing vlan tag)", + .max = 1, + .id = RT305X_ESW_ATTR_PORT_UNTAG, + .get = esw_get_port_bool, + .set = esw_set_port_bool, + }, + { + .type = SWITCH_TYPE_INT, + .name = "led", + .description = "LED mode (0:link, 1:100m, 2:duplex, 3:activity," + " 4:collision, 5:linkact, 6:duplcoll, 7:10mact," + " 8:100mact, 10:blink, 11:off, 12:on)", + .max = 15, + .id = RT305X_ESW_ATTR_PORT_LED, + .get = esw_get_port_led, + .set = esw_set_port_led, + }, + { + .type = SWITCH_TYPE_INT, + .name = "lan", + .description = "HW port group (0:wan, 1:lan)", + .max = 1, + .id = RT305X_ESW_ATTR_PORT_LAN, + .get = esw_get_port_bool, + }, + { + .type = SWITCH_TYPE_INT, + .name = "recv_bad", + .description = "Receive bad packet counter", + .id = RT305X_ESW_ATTR_PORT_RECV_BAD, + .get = esw_get_port_recv_badgood, + }, + { + .type = SWITCH_TYPE_INT, + .name = "recv_good", + .description = "Receive good packet counter", + .id = RT305X_ESW_ATTR_PORT_RECV_GOOD, + .get = esw_get_port_recv_badgood, + }, + { + .type = SWITCH_TYPE_INT, + .name = "tr_bad", + + .description = "Transmit bad packet counter. rt5350 only", + .id = RT5350_ESW_ATTR_PORT_TR_BAD, + .get = esw_get_port_tr_badgood, + }, + { + .type = SWITCH_TYPE_INT, + .name = "tr_good", + + .description = "Transmit good packet counter. rt5350 only", + .id = RT5350_ESW_ATTR_PORT_TR_GOOD, + .get = esw_get_port_tr_badgood, + }, +}; + +static const struct switch_attr esw_vlan[] = { +}; + +static const struct switch_dev_ops esw_ops = { + .attr_global = { + .attr = esw_global, + .n_attr = ARRAY_SIZE(esw_global), + }, + .attr_port = { + .attr = esw_port, + .n_attr = ARRAY_SIZE(esw_port), + }, + .attr_vlan = { + .attr = esw_vlan, + .n_attr = ARRAY_SIZE(esw_vlan), + }, + .get_vlan_ports = esw_get_vlan_ports, + .set_vlan_ports = esw_set_vlan_ports, + .get_port_pvid = esw_get_port_pvid, + .set_port_pvid = esw_set_port_pvid, + .get_port_link = esw_get_port_link, + .apply_config = esw_apply_config, + .reset_switch = esw_reset_switch, +}; + +static int esw_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct device_node *np = pdev->dev.of_node; + const __be32 *port_map, *port_disable, *reg_init; + struct switch_dev *swdev; + struct rt305x_esw *esw; + int ret; + + esw = devm_kzalloc(&pdev->dev, sizeof(*esw), GFP_KERNEL); + if (!esw) + return -ENOMEM; + + esw->dev = &pdev->dev; + esw->irq = irq_of_parse_and_map(np, 0); + esw->base = devm_ioremap_resource(&pdev->dev, res); + if (!esw->base) + return -EADDRNOTAVAIL; + + port_map = of_get_property(np, "mediatek,portmap", NULL); + if (port_map) + esw->port_map = be32_to_cpu(*port_map); + + port_disable = of_get_property(np, "mediatek,portdisable", NULL); + if (port_disable) + esw->port_disable = be32_to_cpu(*port_disable); + + reg_init = of_get_property(np, "ralink,fct2", NULL); + if (reg_init) + esw->reg_initval_fct2 = be32_to_cpu(*reg_init); + + reg_init = of_get_property(np, "ralink,fpa2", NULL); + if (reg_init) + esw->reg_initval_fpa2 = be32_to_cpu(*reg_init); + + reg_init = of_get_property(np, "mediatek,led_polarity", NULL); + if (reg_init) + esw->reg_led_polarity = be32_to_cpu(*reg_init); + + swdev = &esw->swdev; + swdev->of_node = pdev->dev.of_node; + swdev->name = "rt305x-esw"; + swdev->alias = "rt305x"; + swdev->cpu_port = RT305X_ESW_PORT6; + swdev->ports = RT305X_ESW_NUM_PORTS; + swdev->vlans = RT305X_ESW_NUM_VIDS; + swdev->ops = &esw_ops; + + ret = register_switch(swdev, NULL); + if (ret < 0) { + dev_err(&pdev->dev, "register_switch failed\n"); + return ret; + } + + platform_set_drvdata(pdev, esw); + + spin_lock_init(&esw->reg_rw_lock); + + esw_hw_init(esw); + + reg_init = of_get_property(np, "ralink,rgmii", NULL); + if (reg_init && be32_to_cpu(*reg_init) == 1) { + /* + * External switch connected to RGMII interface. + * Unregister the switch device after initialization. + */ + dev_err(&pdev->dev, "RGMII mode, not exporting switch device.\n"); + unregister_switch(&esw->swdev); + platform_set_drvdata(pdev, NULL); + return -ENODEV; + } + + ret = devm_request_irq(&pdev->dev, esw->irq, esw_interrupt, 0, "esw", + esw); + + if (!ret) { + esw_w32(esw, RT305X_ESW_PORT_ST_CHG, RT305X_ESW_REG_ISR); + esw_w32(esw, ~RT305X_ESW_PORT_ST_CHG, RT305X_ESW_REG_IMR); + } + + return ret; +} + +static int esw_remove(struct platform_device *pdev) +{ + struct rt305x_esw *esw = platform_get_drvdata(pdev); + + if (esw) { + esw_w32(esw, ~0, RT305X_ESW_REG_IMR); + platform_set_drvdata(pdev, NULL); + } + + return 0; +} + +static const struct of_device_id ralink_esw_match[] = { + { .compatible = "ralink,rt3050-esw" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ralink_esw_match); + +static struct platform_driver esw_driver = { + .probe = esw_probe, + .remove = esw_remove, + .driver = { + .name = "rt3050-esw", + .owner = THIS_MODULE, + .of_match_table = ralink_esw_match, + }, +}; + +module_platform_driver(esw_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); +MODULE_DESCRIPTION("Switch driver for RT305X SoC"); +MODULE_VERSION(MTK_FE_DRV_VERSION); diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/esw_rt3050.h b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/esw_rt3050.h new file mode 100644 index 0000000000..b757e5d639 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/esw_rt3050.h @@ -0,0 +1,29 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#ifndef _RALINK_ESW_RT3052_H__ +#define _RALINK_ESW_RT3052_H__ + +#ifdef CONFIG_NET_MEDIATEK_ESW_RT3052 + +int __init mtk_switch_init(void); +void mtk_switch_exit(void); + +#else + +static inline int __init mtk_switch_init(void) { return 0; } +static inline void mtk_switch_exit(void) { } + +#endif +#endif diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/ethtool.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/ethtool.c new file mode 100644 index 0000000000..803346ac12 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/ethtool.c @@ -0,0 +1,230 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#include "mtk_eth_soc.h" + +static const char fe_gdma_str[][ETH_GSTRING_LEN] = { +#define _FE(x...) # x, +FE_STAT_REG_DECLARE +#undef _FE +}; + +static int fe_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct fe_priv *priv = netdev_priv(ndev); + + if (!priv->phy_dev) + return -ENODEV; + + if (priv->phy_flags == FE_PHY_FLAG_ATTACH) { + if (phy_read_status(priv->phy_dev)) + return -ENODEV; + } + + phy_ethtool_ksettings_get(ndev->phydev, cmd); + + return 0; +} + +static int fe_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct fe_priv *priv = netdev_priv(ndev); + + if (!priv->phy_dev) + goto out_sset; + + if (cmd->base.phy_address != priv->phy_dev->mdio.addr) { + if (priv->phy->phy_node[cmd->base.phy_address]) { + priv->phy_dev = priv->phy->phy[cmd->base.phy_address]; + priv->phy_flags = FE_PHY_FLAG_PORT; + } else if (priv->mii_bus && mdiobus_get_phy(priv->mii_bus, cmd->base.phy_address)) { + priv->phy_dev = mdiobus_get_phy(priv->mii_bus, cmd->base.phy_address); + priv->phy_flags = FE_PHY_FLAG_ATTACH; + } else { + goto out_sset; + } + } + + return phy_ethtool_ksettings_set(ndev->phydev, cmd); + +out_sset: + return -ENODEV; +} + +static void fe_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct fe_priv *priv = netdev_priv(dev); + struct fe_soc_data *soc = priv->soc; + + strlcpy(info->driver, priv->device->driver->name, sizeof(info->driver)); + strlcpy(info->version, MTK_FE_DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, dev_name(priv->device), sizeof(info->bus_info)); + + if (soc->reg_table[FE_REG_FE_COUNTER_BASE]) + info->n_stats = ARRAY_SIZE(fe_gdma_str); +} + +static u32 fe_get_msglevel(struct net_device *dev) +{ + struct fe_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void fe_set_msglevel(struct net_device *dev, u32 value) +{ + struct fe_priv *priv = netdev_priv(dev); + + priv->msg_enable = value; +} + +static int fe_nway_reset(struct net_device *dev) +{ + struct fe_priv *priv = netdev_priv(dev); + + if (!priv->phy_dev) + goto out_nway_reset; + + return genphy_restart_aneg(priv->phy_dev); + +out_nway_reset: + return -EOPNOTSUPP; +} + +static u32 fe_get_link(struct net_device *dev) +{ + struct fe_priv *priv = netdev_priv(dev); + int err; + + if (!priv->phy_dev) + goto out_get_link; + + if (priv->phy_flags == FE_PHY_FLAG_ATTACH) { + err = genphy_update_link(priv->phy_dev); + if (err) + goto out_get_link; + } + + return priv->phy_dev->link; + +out_get_link: + return ethtool_op_get_link(dev); +} + +static int fe_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct fe_priv *priv = netdev_priv(dev); + + if ((ring->tx_pending < 2) || + (ring->rx_pending < 2) || + (ring->rx_pending > MAX_DMA_DESC) || + (ring->tx_pending > MAX_DMA_DESC)) + return -EINVAL; + + dev->netdev_ops->ndo_stop(dev); + + priv->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1); + priv->rx_ring.rx_ring_size = BIT(fls(ring->rx_pending) - 1); + + dev->netdev_ops->ndo_open(dev); + + return 0; +} + +static void fe_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct fe_priv *priv = netdev_priv(dev); + + ring->rx_max_pending = MAX_DMA_DESC; + ring->tx_max_pending = MAX_DMA_DESC; + ring->rx_pending = priv->rx_ring.rx_ring_size; + ring->tx_pending = priv->tx_ring.tx_ring_size; +} + +static void fe_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, *fe_gdma_str, sizeof(fe_gdma_str)); + break; + } +} + +static int fe_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(fe_gdma_str); + default: + return -EOPNOTSUPP; + } +} + +static void fe_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct fe_priv *priv = netdev_priv(dev); + struct fe_hw_stats *hwstats = priv->hw_stats; + u64 *data_src, *data_dst; + unsigned int start; + int i; + + if (netif_running(dev) && netif_device_present(dev)) { + if (spin_trylock(&hwstats->stats_lock)) { + fe_stats_update(priv); + spin_unlock(&hwstats->stats_lock); + } + } + + do { + data_src = &hwstats->tx_bytes; + data_dst = data; + start = u64_stats_fetch_begin_irq(&hwstats->syncp); + + for (i = 0; i < ARRAY_SIZE(fe_gdma_str); i++) + *data_dst++ = *data_src++; + + } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); +} + +static struct ethtool_ops fe_ethtool_ops = { + .get_link_ksettings = fe_get_link_ksettings, + .set_link_ksettings = fe_set_link_ksettings, + .get_drvinfo = fe_get_drvinfo, + .get_msglevel = fe_get_msglevel, + .set_msglevel = fe_set_msglevel, + .nway_reset = fe_nway_reset, + .get_link = fe_get_link, + .set_ringparam = fe_set_ringparam, + .get_ringparam = fe_get_ringparam, +}; + +void fe_set_ethtool_ops(struct net_device *netdev) +{ + struct fe_priv *priv = netdev_priv(netdev); + struct fe_soc_data *soc = priv->soc; + + if (soc->reg_table[FE_REG_FE_COUNTER_BASE]) { + fe_ethtool_ops.get_strings = fe_get_strings; + fe_ethtool_ops.get_sset_count = fe_get_sset_count; + fe_ethtool_ops.get_ethtool_stats = fe_get_ethtool_stats; + } + + netdev->ethtool_ops = &fe_ethtool_ops; +} diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/ethtool.h b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/ethtool.h new file mode 100644 index 0000000000..6fd16f0b66 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/ethtool.h @@ -0,0 +1,22 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#ifndef FE_ETHTOOL_H +#define FE_ETHTOOL_H + +#include <linux/ethtool.h> + +void fe_set_ethtool_ops(struct net_device *netdev); + +#endif /* FE_ETHTOOL_H */ diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/gsw_mt7620.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/gsw_mt7620.c new file mode 100644 index 0000000000..b675ad0c4a --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/gsw_mt7620.c @@ -0,0 +1,259 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> + +#include <ralink_regs.h> + +#include "mtk_eth_soc.h" +#include "gsw_mt7620.h" + +void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned reg) +{ + iowrite32(val, gsw->base + reg); +} + +u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned reg) +{ + return ioread32(gsw->base + reg); +} + +static irqreturn_t gsw_interrupt_mt7620(int irq, void *_priv) +{ + struct fe_priv *priv = (struct fe_priv *)_priv; + struct mt7620_gsw *gsw = (struct mt7620_gsw *)priv->soc->swpriv; + u32 status; + int i, max = (gsw->port4 == PORT4_EPHY) ? (4) : (3); + + status = mtk_switch_r32(gsw, GSW_REG_ISR); + if (status & PORT_IRQ_ST_CHG) + for (i = 0; i <= max; i++) { + u32 status = mtk_switch_r32(gsw, GSW_REG_PORT_STATUS(i)); + int link = status & 0x1; + + if (link != priv->link[i]) + mt7620_print_link_state(priv, i, link, + (status >> 2) & 3, + (status & 0x2)); + + priv->link[i] = link; + } + mt7620_handle_carrier(priv); + mtk_switch_w32(gsw, status, GSW_REG_ISR); + + return IRQ_HANDLED; +} + +static void mt7620_hw_init(struct mt7620_gsw *gsw, struct device_node *np) +{ + u32 is_BGA = (rt_sysc_r32(0x0c) >> 16) & 1; + + rt_sysc_w32(rt_sysc_r32(SYSC_REG_CFG1) | BIT(8), SYSC_REG_CFG1); + mtk_switch_w32(gsw, mtk_switch_r32(gsw, GSW_REG_CKGCR) & ~(0x3 << 4), GSW_REG_CKGCR); + + /* Enable MIB stats */ + mtk_switch_w32(gsw, mtk_switch_r32(gsw, GSW_REG_MIB_CNT_EN) | (1 << 1), GSW_REG_MIB_CNT_EN); + + if (of_property_read_bool(np, "mediatek,mt7530")) { + u32 val; + + /* turn off ephy and set phy base addr to 12 */ + mtk_switch_w32(gsw, mtk_switch_r32(gsw, GSW_REG_GPC1) | + (0x1f << 24) | (0xc << 16), + GSW_REG_GPC1); + + /* set MT7530 central align */ + val = mt7530_mdio_r32(gsw, 0x7830); + val &= ~BIT(0); + val |= BIT(1); + mt7530_mdio_w32(gsw, 0x7830, val); + + val = mt7530_mdio_r32(gsw, 0x7a40); + val &= ~BIT(30); + mt7530_mdio_w32(gsw, 0x7a40, val); + + mt7530_mdio_w32(gsw, 0x7a78, 0x855); + } else { + /* global page 4 */ + _mt7620_mii_write(gsw, 1, 31, 0x4000); + + _mt7620_mii_write(gsw, 1, 17, 0x7444); + if (is_BGA) + _mt7620_mii_write(gsw, 1, 19, 0x0114); + else + _mt7620_mii_write(gsw, 1, 19, 0x0117); + + _mt7620_mii_write(gsw, 1, 22, 0x10cf); + _mt7620_mii_write(gsw, 1, 25, 0x6212); + _mt7620_mii_write(gsw, 1, 26, 0x0777); + _mt7620_mii_write(gsw, 1, 29, 0x4000); + _mt7620_mii_write(gsw, 1, 28, 0xc077); + _mt7620_mii_write(gsw, 1, 24, 0x0000); + + /* global page 3 */ + _mt7620_mii_write(gsw, 1, 31, 0x3000); + _mt7620_mii_write(gsw, 1, 17, 0x4838); + + /* global page 2 */ + _mt7620_mii_write(gsw, 1, 31, 0x2000); + if (is_BGA) { + _mt7620_mii_write(gsw, 1, 21, 0x0515); + _mt7620_mii_write(gsw, 1, 22, 0x0053); + _mt7620_mii_write(gsw, 1, 23, 0x00bf); + _mt7620_mii_write(gsw, 1, 24, 0x0aaf); + _mt7620_mii_write(gsw, 1, 25, 0x0fad); + _mt7620_mii_write(gsw, 1, 26, 0x0fc1); + } else { + _mt7620_mii_write(gsw, 1, 21, 0x0517); + _mt7620_mii_write(gsw, 1, 22, 0x0fd2); + _mt7620_mii_write(gsw, 1, 23, 0x00bf); + _mt7620_mii_write(gsw, 1, 24, 0x0aab); + _mt7620_mii_write(gsw, 1, 25, 0x00ae); + _mt7620_mii_write(gsw, 1, 26, 0x0fff); + } + /* global page 1 */ + _mt7620_mii_write(gsw, 1, 31, 0x1000); + _mt7620_mii_write(gsw, 1, 17, 0xe7f8); + } + + /* global page 0 */ + _mt7620_mii_write(gsw, 1, 31, 0x8000); + _mt7620_mii_write(gsw, 0, 30, 0xa000); + _mt7620_mii_write(gsw, 1, 30, 0xa000); + _mt7620_mii_write(gsw, 2, 30, 0xa000); + _mt7620_mii_write(gsw, 3, 30, 0xa000); + + _mt7620_mii_write(gsw, 0, 4, 0x05e1); + _mt7620_mii_write(gsw, 1, 4, 0x05e1); + _mt7620_mii_write(gsw, 2, 4, 0x05e1); + _mt7620_mii_write(gsw, 3, 4, 0x05e1); + + /* global page 2 */ + _mt7620_mii_write(gsw, 1, 31, 0xa000); + _mt7620_mii_write(gsw, 0, 16, 0x1111); + _mt7620_mii_write(gsw, 1, 16, 0x1010); + _mt7620_mii_write(gsw, 2, 16, 0x1515); + _mt7620_mii_write(gsw, 3, 16, 0x0f0f); + + /* CPU Port6 Force Link 1G, FC ON */ + mtk_switch_w32(gsw, 0x5e33b, GSW_REG_PORT_PMCR(6)); + + /* Set Port 6 as CPU Port */ + mtk_switch_w32(gsw, 0x7f7f7fe0, 0x0010); + + /* setup port 4 */ + if (gsw->port4 == PORT4_EPHY) { + u32 val = rt_sysc_r32(SYSC_REG_CFG1); + + val |= 3 << 14; + rt_sysc_w32(val, SYSC_REG_CFG1); + _mt7620_mii_write(gsw, 4, 30, 0xa000); + _mt7620_mii_write(gsw, 4, 4, 0x05e1); + _mt7620_mii_write(gsw, 4, 16, 0x1313); + pr_info("gsw: setting port4 to ephy mode\n"); + } +} + +static const struct of_device_id mediatek_gsw_match[] = { + { .compatible = "mediatek,mt7620-gsw" }, + {}, +}; +MODULE_DEVICE_TABLE(of, mediatek_gsw_match); + +int mtk_gsw_init(struct fe_priv *priv) +{ + struct device_node *np = priv->switch_np; + struct platform_device *pdev = of_find_device_by_node(np); + struct mt7620_gsw *gsw; + + if (!pdev) + return -ENODEV; + + if (!of_device_is_compatible(np, mediatek_gsw_match->compatible)) + return -EINVAL; + + gsw = platform_get_drvdata(pdev); + priv->soc->swpriv = gsw; + + mt7620_hw_init(gsw, np); + + if (gsw->irq) { + request_irq(gsw->irq, gsw_interrupt_mt7620, 0, + "gsw", priv); + mtk_switch_w32(gsw, ~PORT_IRQ_ST_CHG, GSW_REG_IMR); + } + + return 0; +} + +static int mt7620_gsw_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + const char *port4 = NULL; + struct mt7620_gsw *gsw; + struct device_node *np = pdev->dev.of_node; + + gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL); + if (!gsw) + return -ENOMEM; + + gsw->base = devm_ioremap_resource(&pdev->dev, res); + if (!gsw->base) + return -EADDRNOTAVAIL; + + gsw->dev = &pdev->dev; + + of_property_read_string(np, "mediatek,port4", &port4); + if (port4 && !strcmp(port4, "ephy")) + gsw->port4 = PORT4_EPHY; + else if (port4 && !strcmp(port4, "gmac")) + gsw->port4 = PORT4_EXT; + else + gsw->port4 = PORT4_EPHY; + + gsw->irq = platform_get_irq(pdev, 0); + + platform_set_drvdata(pdev, gsw); + + return 0; +} + +static int mt7620_gsw_remove(struct platform_device *pdev) +{ + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver gsw_driver = { + .probe = mt7620_gsw_probe, + .remove = mt7620_gsw_remove, + .driver = { + .name = "mt7620-gsw", + .owner = THIS_MODULE, + .of_match_table = mediatek_gsw_match, + }, +}; + +module_platform_driver(gsw_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); +MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7620 SoC"); +MODULE_VERSION(MTK_FE_DRV_VERSION); diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/gsw_mt7620.h b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/gsw_mt7620.h new file mode 100644 index 0000000000..ae0b6de024 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/gsw_mt7620.h @@ -0,0 +1,127 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#ifndef _RALINK_GSW_MT7620_H__ +#define _RALINK_GSW_MT7620_H__ + +#define GSW_REG_PHY_TIMEOUT (5 * HZ) + +#ifdef CONFIG_SOC_MT7621 +#define MT7620A_GSW_REG_PIAC 0x0004 +#else +#define MT7620A_GSW_REG_PIAC 0x7004 +#endif + +#define GSW_NUM_VLANS 16 +#define GSW_NUM_VIDS 4096 +#define GSW_NUM_PORTS 7 +#define GSW_PORT6 6 + +#define GSW_MDIO_ACCESS BIT(31) +#define GSW_MDIO_READ BIT(19) +#define GSW_MDIO_WRITE BIT(18) +#define GSW_MDIO_START BIT(16) +#define GSW_MDIO_ADDR_SHIFT 20 +#define GSW_MDIO_REG_SHIFT 25 + +#define GSW_REG_MIB_CNT_EN 0x4000 + +#define GSW_REG_PORT_PMCR(x) (0x3000 + (x * 0x100)) +#define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100)) +#define GSW_REG_SMACCR0 0x3fE4 +#define GSW_REG_SMACCR1 0x3fE8 +#define GSW_REG_CKGCR 0x3ff0 + +#define GSW_REG_IMR 0x7008 +#define GSW_REG_ISR 0x700c +#define GSW_REG_GPC1 0x7014 + +#define GSW_REG_MAC_P0_MCR 0x100 +#define GSW_REG_MAC_P1_MCR 0x200 + +// Global MAC control register +#define GSW_REG_GMACCR 0x30E0 + +#define SYSC_REG_CHIP_REV_ID 0x0c +#define SYSC_REG_CFG1 0x14 +#define RST_CTRL_MCM BIT(2) +#define SYSC_PAD_RGMII2_MDIO 0x58 +#define SYSC_GPIO_MODE 0x60 + +#define PORT_IRQ_ST_CHG 0x7f + +#ifdef CONFIG_SOC_MT7621 +#define ESW_PHY_POLLING 0x0000 +#else +#define ESW_PHY_POLLING 0x7000 +#endif + +#define PMCR_IPG BIT(18) +#define PMCR_MAC_MODE BIT(16) +#define PMCR_FORCE BIT(15) +#define PMCR_TX_EN BIT(14) +#define PMCR_RX_EN BIT(13) +#define PMCR_BACKOFF BIT(9) +#define PMCR_BACKPRES BIT(8) +#define PMCR_RX_FC BIT(5) +#define PMCR_TX_FC BIT(4) +#define PMCR_SPEED(_x) (_x << 2) +#define PMCR_DUPLEX BIT(1) +#define PMCR_LINK BIT(0) + +#define PHY_AN_EN BIT(31) +#define PHY_PRE_EN BIT(30) +#define PMY_MDC_CONF(_x) ((_x & 0x3f) << 24) + + +enum { + /* Global attributes. */ + GSW_ATTR_ENABLE_VLAN, + /* Port attributes. */ + GSW_ATTR_PORT_UNTAG, +}; + +enum { + PORT4_EPHY = 0, + PORT4_EXT, +}; + +struct mt7620_gsw { + struct device *dev; + void __iomem *base; + int irq; + int port4; + unsigned long int autopoll; +}; + +void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned reg); +u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned reg); +int mtk_gsw_init(struct fe_priv *priv); + +int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val); +int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg); +void mt7620_mdio_link_adjust(struct fe_priv *priv, int port); +int mt7620_has_carrier(struct fe_priv *priv); +void mt7620_print_link_state(struct fe_priv *priv, int port, int link, + int speed, int duplex); + +void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val); +u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg); + +u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr, + u32 phy_register, u32 write_data); +u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg); +void mt7620_handle_carrier(struct fe_priv *priv); + +#endif diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/gsw_mt7621.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/gsw_mt7621.c new file mode 100644 index 0000000000..ef0b79e09d --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/gsw_mt7621.c @@ -0,0 +1,289 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> + +#include <ralink_regs.h> + +#include "mtk_eth_soc.h" +#include "gsw_mt7620.h" + +void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned reg) +{ + iowrite32(val, gsw->base + reg); +} + +u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned reg) +{ + return ioread32(gsw->base + reg); +} + +static irqreturn_t gsw_interrupt_mt7621(int irq, void *_priv) +{ + struct fe_priv *priv = (struct fe_priv *)_priv; + struct mt7620_gsw *gsw = (struct mt7620_gsw *)priv->soc->swpriv; + u32 reg, i; + + reg = mt7530_mdio_r32(gsw, 0x700c); + + for (i = 0; i < 5; i++) + if (reg & BIT(i)) { + unsigned int link; + + link = mt7530_mdio_r32(gsw, + 0x3008 + (i * 0x100)) & 0x1; + + if (link != priv->link[i]) { + priv->link[i] = link; + if (link) + netdev_info(priv->netdev, + "port %d link up\n", i); + else + netdev_info(priv->netdev, + "port %d link down\n", i); + } + } + + mt7620_handle_carrier(priv); + mt7530_mdio_w32(gsw, 0x700c, 0x1f); + + return IRQ_HANDLED; +} + +static void mt7621_hw_init(struct mt7620_gsw *gsw, struct device_node *np) +{ + u32 i; + u32 val; + + /* wardware reset the switch */ + fe_reset(RST_CTRL_MCM); + mdelay(10); + + /* reduce RGMII2 PAD driving strength */ + rt_sysc_m32(3 << 4, 0, SYSC_PAD_RGMII2_MDIO); + + /* gpio mux - RGMII1=Normal mode */ + rt_sysc_m32(BIT(14), 0, SYSC_GPIO_MODE); + + /* set GMAC1 RGMII mode */ + rt_sysc_m32(3 << 12, 0, SYSC_REG_CFG1); + + /* enable MDIO to control MT7530 */ + rt_sysc_m32(3 << 12, 0, SYSC_GPIO_MODE); + + /* turn off all PHYs */ + for (i = 0; i <= 4; i++) { + val = _mt7620_mii_read(gsw, i, 0x0); + val |= BIT(11); + _mt7620_mii_write(gsw, i, 0x0, val); + } + + /* reset the switch */ + mt7530_mdio_w32(gsw, 0x7000, 0x3); + usleep_range(10, 20); + + if ((rt_sysc_r32(SYSC_REG_CHIP_REV_ID) & 0xFFFF) == 0x0101) { + /* (GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536) */ + mtk_switch_w32(gsw, 0x2305e30b, GSW_REG_MAC_P0_MCR); + mt7530_mdio_w32(gsw, 0x3600, 0x5e30b); + } else { + /* (GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536) */ + mtk_switch_w32(gsw, 0x2305e33b, GSW_REG_MAC_P0_MCR); + mt7530_mdio_w32(gsw, 0x3600, 0x5e33b); + } + + /* (GE2, Link down) */ + mtk_switch_w32(gsw, 0x8000, GSW_REG_MAC_P1_MCR); + + /* Set switch max RX frame length to 2k */ + mt7530_mdio_w32(gsw, GSW_REG_GMACCR, 0x3F0B); + + /* Enable Port 6, P5 as GMAC5, P5 disable */ + val = mt7530_mdio_r32(gsw, 0x7804); + val &= ~BIT(8); + val |= BIT(6) | BIT(13) | BIT(16); + mt7530_mdio_w32(gsw, 0x7804, val); + + val = rt_sysc_r32(0x10); + val = (val >> 6) & 0x7; + if (val >= 6) { + /* 25Mhz Xtal - do nothing */ + } else if (val >= 3) { + /* 40Mhz */ + + /* disable MT7530 core clock */ + _mt7620_mii_write(gsw, 0, 13, 0x1f); + _mt7620_mii_write(gsw, 0, 14, 0x410); + _mt7620_mii_write(gsw, 0, 13, 0x401f); + _mt7620_mii_write(gsw, 0, 14, 0x0); + + /* disable MT7530 PLL */ + _mt7620_mii_write(gsw, 0, 13, 0x1f); + _mt7620_mii_write(gsw, 0, 14, 0x40d); + _mt7620_mii_write(gsw, 0, 13, 0x401f); + _mt7620_mii_write(gsw, 0, 14, 0x2020); + + /* for MT7530 core clock = 500Mhz */ + _mt7620_mii_write(gsw, 0, 13, 0x1f); + _mt7620_mii_write(gsw, 0, 14, 0x40e); + _mt7620_mii_write(gsw, 0, 13, 0x401f); + _mt7620_mii_write(gsw, 0, 14, 0x119); + + /* enable MT7530 PLL */ + _mt7620_mii_write(gsw, 0, 13, 0x1f); + _mt7620_mii_write(gsw, 0, 14, 0x40d); + _mt7620_mii_write(gsw, 0, 13, 0x401f); + _mt7620_mii_write(gsw, 0, 14, 0x2820); + + usleep_range(20, 40); + + /* enable MT7530 core clock */ + _mt7620_mii_write(gsw, 0, 13, 0x1f); + _mt7620_mii_write(gsw, 0, 14, 0x410); + _mt7620_mii_write(gsw, 0, 13, 0x401f); + } else { + /* 20Mhz Xtal - TODO */ + } + + /* RGMII */ + _mt7620_mii_write(gsw, 0, 14, 0x1); + + /* set MT7530 central align */ + val = mt7530_mdio_r32(gsw, 0x7830); + val &= ~BIT(0); + val |= BIT(1); + mt7530_mdio_w32(gsw, 0x7830, val); + val = mt7530_mdio_r32(gsw, 0x7a40); + val &= ~BIT(30); + mt7530_mdio_w32(gsw, 0x7a40, val); + mt7530_mdio_w32(gsw, 0x7a78, 0x855); + + /* delay setting for 10/1000M */ + mt7530_mdio_w32(gsw, 0x7b00, 0x102); + mt7530_mdio_w32(gsw, 0x7b04, 0x14); + + /* lower Tx Driving*/ + mt7530_mdio_w32(gsw, 0x7a54, 0x44); + mt7530_mdio_w32(gsw, 0x7a5c, 0x44); + mt7530_mdio_w32(gsw, 0x7a64, 0x44); + mt7530_mdio_w32(gsw, 0x7a6c, 0x44); + mt7530_mdio_w32(gsw, 0x7a74, 0x44); + mt7530_mdio_w32(gsw, 0x7a7c, 0x44); + + /* turn on all PHYs */ + for (i = 0; i <= 4; i++) { + val = _mt7620_mii_read(gsw, i, 0); + val &= ~BIT(11); + _mt7620_mii_write(gsw, i, 0, val); + } + + /* mask irq */ + mt7530_mdio_w32(gsw, 0x7008, 0x1f); + /* enable irq */ + val = mt7530_mdio_r32(gsw, 0x7808); + val |= 3 << 16; + mt7530_mdio_w32(gsw, 0x7808, val); +} + +static const struct of_device_id mediatek_gsw_match[] = { + { .compatible = "mediatek,mt7621-gsw" }, + {}, +}; +MODULE_DEVICE_TABLE(of, mediatek_gsw_match); + +int mtk_gsw_init(struct fe_priv *priv) +{ + struct device_node *np = priv->switch_np; + struct platform_device *pdev = of_find_device_by_node(np); + struct mt7620_gsw *gsw; + + if (!pdev) + return -ENODEV; + + if (!of_device_is_compatible(np, mediatek_gsw_match->compatible)) + return -EINVAL; + + gsw = platform_get_drvdata(pdev); + priv->soc->swpriv = gsw; + + if (gsw->irq) { + request_irq(gsw->irq, gsw_interrupt_mt7621, 0, + "gsw", priv); + mt7530_mdio_w32(gsw, 0x7008, ~0x1f); + } + mt7621_hw_init(gsw, np); + + return 0; +} + +static int mt7621_gsw_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + const char *port4 = NULL; + struct mt7620_gsw *gsw; + struct device_node *np; + + gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL); + if (!gsw) + return -ENOMEM; + + gsw->base = devm_ioremap_resource(&pdev->dev, res); + if (!gsw->base) + return -EADDRNOTAVAIL; + + gsw->dev = &pdev->dev; + + of_property_read_string(np, "mediatek,port4", &port4); + if (port4 && !strcmp(port4, "ephy")) + gsw->port4 = PORT4_EPHY; + else if (port4 && !strcmp(port4, "gmac")) + gsw->port4 = PORT4_EXT; + else + gsw->port4 = PORT4_EPHY; + + gsw->irq = platform_get_irq(pdev, 0); + + platform_set_drvdata(pdev, gsw); + + return 0; +} + +static int mt7621_gsw_remove(struct platform_device *pdev) +{ + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver gsw_driver = { + .probe = mt7621_gsw_probe, + .remove = mt7621_gsw_remove, + .driver = { + .name = "mt7621-gsw", + .owner = THIS_MODULE, + .of_match_table = mediatek_gsw_match, + }, +}; + +module_platform_driver(gsw_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); +MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7621 SoC"); +MODULE_VERSION(MTK_FE_DRV_VERSION); diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio.c new file mode 100644 index 0000000000..d3798bbd65 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio.c @@ -0,0 +1,259 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/phy.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> + +#include "mtk_eth_soc.h" +#include "mdio.h" + +static int fe_mdio_reset(struct mii_bus *bus) +{ + /* TODO */ + return 0; +} + +static void fe_phy_link_adjust(struct net_device *dev) +{ + struct fe_priv *priv = netdev_priv(dev); + unsigned long flags; + int i; + + spin_lock_irqsave(&priv->phy->lock, flags); + for (i = 0; i < 8; i++) { + if (priv->phy->phy_node[i]) { + struct phy_device *phydev = priv->phy->phy[i]; + int status_change = 0; + + if (phydev->link) + if (priv->phy->duplex[i] != phydev->duplex || + priv->phy->speed[i] != phydev->speed) + status_change = 1; + + if (phydev->link != priv->link[i]) + status_change = 1; + + switch (phydev->speed) { + case SPEED_1000: + case SPEED_100: + case SPEED_10: + priv->link[i] = phydev->link; + priv->phy->duplex[i] = phydev->duplex; + priv->phy->speed[i] = phydev->speed; + + if (status_change && + priv->soc->mdio_adjust_link) + priv->soc->mdio_adjust_link(priv, i); + break; + } + } + } +} + +int fe_connect_phy_node(struct fe_priv *priv, struct device_node *phy_node) +{ + const __be32 *_port = NULL; + struct phy_device *phydev; + int phy_mode, port; + + _port = of_get_property(phy_node, "reg", NULL); + + if (!_port || (be32_to_cpu(*_port) >= 0x20)) { + pr_err("%s: invalid port id\n", phy_node->name); + return -EINVAL; + } + port = be32_to_cpu(*_port); + phy_mode = of_get_phy_mode(phy_node); + if (phy_mode < 0) { + dev_err(priv->device, "incorrect phy-mode %d\n", phy_mode); + priv->phy->phy_node[port] = NULL; + return -EINVAL; + } + + phydev = of_phy_connect(priv->netdev, phy_node, fe_phy_link_adjust, + 0, phy_mode); + if (IS_ERR(phydev)) { + dev_err(priv->device, "could not connect to PHY\n"); + priv->phy->phy_node[port] = NULL; + return PTR_ERR(phydev); + } + + phydev->supported &= PHY_GBIT_FEATURES; + phydev->advertising = phydev->supported; + phydev->no_auto_carrier_off = 1; + + dev_info(priv->device, + "connected port %d to PHY at %s [uid=%08x, driver=%s]\n", + port, dev_name(&phydev->mdio.dev), phydev->phy_id, + phydev->drv->name); + + priv->phy->phy[port] = phydev; + priv->link[port] = 0; + + return 0; +} + +static void phy_init(struct fe_priv *priv, struct phy_device *phy) +{ + phy_attach(priv->netdev, dev_name(&phy->mdio.dev), PHY_INTERFACE_MODE_MII); + + phy->autoneg = AUTONEG_ENABLE; + phy->speed = 0; + phy->duplex = 0; + phy->supported &= PHY_BASIC_FEATURES; + phy->advertising = phy->supported | ADVERTISED_Autoneg; + + phy_start_aneg(phy); +} + +static int fe_phy_connect(struct fe_priv *priv) +{ + int i; + + for (i = 0; i < 8; i++) { + if (priv->phy->phy_node[i]) { + if (!priv->phy_dev) { + priv->phy_dev = priv->phy->phy[i]; + priv->phy_flags = FE_PHY_FLAG_PORT; + } + } else if (priv->mii_bus && mdiobus_get_phy(priv->mii_bus, i)) { + phy_init(priv, mdiobus_get_phy(priv->mii_bus, i)); + if (!priv->phy_dev) { + priv->phy_dev = mdiobus_get_phy(priv->mii_bus, i); + priv->phy_flags = FE_PHY_FLAG_ATTACH; + } + } + } + + return 0; +} + +static void fe_phy_disconnect(struct fe_priv *priv) +{ + unsigned long flags; + int i; + + for (i = 0; i < 8; i++) + if (priv->phy->phy_fixed[i]) { + spin_lock_irqsave(&priv->phy->lock, flags); + priv->link[i] = 0; + if (priv->soc->mdio_adjust_link) + priv->soc->mdio_adjust_link(priv, i); + spin_unlock_irqrestore(&priv->phy->lock, flags); + } else if (priv->phy->phy[i]) { + phy_disconnect(priv->phy->phy[i]); + } else if (priv->mii_bus && mdiobus_get_phy(priv->mii_bus, i)) { + phy_detach(mdiobus_get_phy(priv->mii_bus, i)); + } +} + +static void fe_phy_start(struct fe_priv *priv) +{ + unsigned long flags; + int i; + + for (i = 0; i < 8; i++) { + if (priv->phy->phy_fixed[i]) { + spin_lock_irqsave(&priv->phy->lock, flags); + priv->link[i] = 1; + if (priv->soc->mdio_adjust_link) + priv->soc->mdio_adjust_link(priv, i); + spin_unlock_irqrestore(&priv->phy->lock, flags); + } else if (priv->phy->phy[i]) { + phy_start(priv->phy->phy[i]); + } + } +} + +static void fe_phy_stop(struct fe_priv *priv) +{ + unsigned long flags; + int i; + + for (i = 0; i < 8; i++) + if (priv->phy->phy_fixed[i]) { + spin_lock_irqsave(&priv->phy->lock, flags); + priv->link[i] = 0; + if (priv->soc->mdio_adjust_link) + priv->soc->mdio_adjust_link(priv, i); + spin_unlock_irqrestore(&priv->phy->lock, flags); + } else if (priv->phy->phy[i]) { + phy_stop(priv->phy->phy[i]); + } +} + +static struct fe_phy phy_ralink = { + .connect = fe_phy_connect, + .disconnect = fe_phy_disconnect, + .start = fe_phy_start, + .stop = fe_phy_stop, +}; + +int fe_mdio_init(struct fe_priv *priv) +{ + struct device_node *mii_np; + int err; + + if (!priv->soc->mdio_read || !priv->soc->mdio_write) + return 0; + + spin_lock_init(&phy_ralink.lock); + priv->phy = &phy_ralink; + + mii_np = of_get_child_by_name(priv->device->of_node, "mdio-bus"); + if (!mii_np) { + dev_err(priv->device, "no %s child node found", "mdio-bus"); + return -ENODEV; + } + + if (!of_device_is_available(mii_np)) { + err = 0; + goto err_put_node; + } + + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) { + err = -ENOMEM; + goto err_put_node; + } + + priv->mii_bus->name = "mdio"; + priv->mii_bus->read = priv->soc->mdio_read; + priv->mii_bus->write = priv->soc->mdio_write; + priv->mii_bus->reset = fe_mdio_reset; + priv->mii_bus->priv = priv; + priv->mii_bus->parent = priv->device; + + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); + err = of_mdiobus_register(priv->mii_bus, mii_np); + if (err) + goto err_free_bus; + + return 0; + +err_free_bus: + kfree(priv->mii_bus); +err_put_node: + of_node_put(mii_np); + priv->mii_bus = NULL; + return err; +} + +void fe_mdio_cleanup(struct fe_priv *priv) +{ + if (!priv->mii_bus) + return; + + mdiobus_unregister(priv->mii_bus); + of_node_put(priv->mii_bus->dev.of_node); + kfree(priv->mii_bus); +} diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio.h b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio.h new file mode 100644 index 0000000000..498cf144e6 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio.h @@ -0,0 +1,27 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#ifndef _RALINK_MDIO_H__ +#define _RALINK_MDIO_H__ + +#ifdef CONFIG_NET_MEDIATEK_MDIO +int fe_mdio_init(struct fe_priv *priv); +void fe_mdio_cleanup(struct fe_priv *priv); +int fe_connect_phy_node(struct fe_priv *priv, + struct device_node *phy_node); +#else +static inline int fe_mdio_init(struct fe_priv *priv) { return 0; } +static inline void fe_mdio_cleanup(struct fe_priv *priv) {} +#endif +#endif diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio_mt7620.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio_mt7620.c new file mode 100644 index 0000000000..9efe7896a5 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio_mt7620.c @@ -0,0 +1,168 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> + +#include "mtk_eth_soc.h" +#include "gsw_mt7620.h" +#include "mdio.h" + +static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw) +{ + unsigned long t_start = jiffies; + + while (1) { + if (!(mtk_switch_r32(gsw, MT7620A_GSW_REG_PIAC) & GSW_MDIO_ACCESS)) + return 0; + if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT)) + break; + } + + dev_err(gsw->dev, "mdio: MDIO timeout\n"); + return -1; +} + +u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr, + u32 phy_register, u32 write_data) +{ + if (mt7620_mii_busy_wait(gsw)) + return -1; + + write_data &= 0xffff; + + mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE | + (phy_register << GSW_MDIO_REG_SHIFT) | + (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data, + MT7620A_GSW_REG_PIAC); + + if (mt7620_mii_busy_wait(gsw)) + return -1; + + return 0; +} + +u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg) +{ + u32 d; + + if (mt7620_mii_busy_wait(gsw)) + return 0xffff; + + mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ | + (phy_reg << GSW_MDIO_REG_SHIFT) | + (phy_addr << GSW_MDIO_ADDR_SHIFT), + MT7620A_GSW_REG_PIAC); + + if (mt7620_mii_busy_wait(gsw)) + return 0xffff; + + d = mtk_switch_r32(gsw, MT7620A_GSW_REG_PIAC) & 0xffff; + + return d; +} + +int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val) +{ + struct fe_priv *priv = bus->priv; + struct mt7620_gsw *gsw = (struct mt7620_gsw *)priv->soc->swpriv; + + return _mt7620_mii_write(gsw, phy_addr, phy_reg, val); +} + +int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) +{ + struct fe_priv *priv = bus->priv; + struct mt7620_gsw *gsw = (struct mt7620_gsw *)priv->soc->swpriv; + + return _mt7620_mii_read(gsw, phy_addr, phy_reg); +} + +void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val) +{ + _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff); + _mt7620_mii_write(gsw, 0x1f, (reg >> 2) & 0xf, val & 0xffff); + _mt7620_mii_write(gsw, 0x1f, 0x10, val >> 16); +} + +u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg) +{ + u16 high, low; + + _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff); + low = _mt7620_mii_read(gsw, 0x1f, (reg >> 2) & 0xf); + high = _mt7620_mii_read(gsw, 0x1f, 0x10); + + return (high << 16) | (low & 0xffff); +} + +static unsigned char *fe_speed_str(int speed) +{ + switch (speed) { + case 2: + case SPEED_1000: + return "1000"; + case 1: + case SPEED_100: + return "100"; + case 0: + case SPEED_10: + return "10"; + } + + return "? "; +} + +int mt7620_has_carrier(struct fe_priv *priv) +{ + struct mt7620_gsw *gsw = (struct mt7620_gsw *)priv->soc->swpriv; + int i; + + for (i = 0; i < GSW_PORT6; i++) + if (mtk_switch_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1) + return 1; + return 0; +} + + +void mt7620_handle_carrier(struct fe_priv *priv) +{ + if (!priv->phy) + return; + + if (mt7620_has_carrier(priv)) + netif_carrier_on(priv->netdev); + else + netif_carrier_off(priv->netdev); +} + +void mt7620_print_link_state(struct fe_priv *priv, int port, int link, + int speed, int duplex) +{ + if (link) + netdev_info(priv->netdev, "port %d link up (%sMbps/%s duplex)\n", + port, fe_speed_str(speed), + (duplex) ? "Full" : "Half"); + else + netdev_info(priv->netdev, "port %d link down\n", port); +} + +void mt7620_mdio_link_adjust(struct fe_priv *priv, int port) +{ + mt7620_print_link_state(priv, port, priv->link[port], + priv->phy->speed[port], + (priv->phy->duplex[port] == DUPLEX_FULL)); + mt7620_handle_carrier(priv); +} diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio_rt2880.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio_rt2880.c new file mode 100644 index 0000000000..e72f8c72b1 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio_rt2880.c @@ -0,0 +1,222 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> + +#include "mtk_eth_soc.h" +#include "mdio_rt2880.h" +#include "mdio.h" + +#define FE_MDIO_RETRY 1000 + +static unsigned char *rt2880_speed_str(struct fe_priv *priv) +{ + switch (priv->phy->speed[0]) { + case SPEED_1000: + return "1000"; + case SPEED_100: + return "100"; + case SPEED_10: + return "10"; + } + + return "?"; +} + +void rt2880_mdio_link_adjust(struct fe_priv *priv, int port) +{ + u32 mdio_cfg; + + if (!priv->link[0]) { + netif_carrier_off(priv->netdev); + netdev_info(priv->netdev, "link down\n"); + return; + } + + mdio_cfg = FE_MDIO_CFG_TX_CLK_SKEW_200 | + FE_MDIO_CFG_RX_CLK_SKEW_200 | + FE_MDIO_CFG_GP1_FRC_EN; + + if (priv->phy->duplex[0] == DUPLEX_FULL) + mdio_cfg |= FE_MDIO_CFG_GP1_DUPLEX; + + if (priv->phy->tx_fc[0]) + mdio_cfg |= FE_MDIO_CFG_GP1_FC_TX; + + if (priv->phy->rx_fc[0]) + mdio_cfg |= FE_MDIO_CFG_GP1_FC_RX; + + switch (priv->phy->speed[0]) { + case SPEED_10: + mdio_cfg |= FE_MDIO_CFG_GP1_SPEED_10; + break; + case SPEED_100: + mdio_cfg |= FE_MDIO_CFG_GP1_SPEED_100; + break; + case SPEED_1000: + mdio_cfg |= FE_MDIO_CFG_GP1_SPEED_1000; + break; + default: + BUG(); + } + + fe_w32(mdio_cfg, FE_MDIO_CFG); + + netif_carrier_on(priv->netdev); + netdev_info(priv->netdev, "link up (%sMbps/%s duplex)\n", + rt2880_speed_str(priv), + (priv->phy->duplex[0] == DUPLEX_FULL) ? "Full" : "Half"); +} + +static int rt2880_mdio_wait_ready(struct fe_priv *priv) +{ + int retries; + + retries = FE_MDIO_RETRY; + while (1) { + u32 t; + + t = fe_r32(FE_MDIO_ACCESS); + if ((t & BIT(31)) == 0) + return 0; + + if (retries-- == 0) + break; + + udelay(1); + } + + dev_err(priv->device, "MDIO operation timed out\n"); + return -ETIMEDOUT; +} + +int rt2880_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) +{ + struct fe_priv *priv = bus->priv; + int err; + u32 t; + + err = rt2880_mdio_wait_ready(priv); + if (err) + return 0xffff; + + t = (phy_addr << 24) | (phy_reg << 16); + fe_w32(t, FE_MDIO_ACCESS); + t |= BIT(31); + fe_w32(t, FE_MDIO_ACCESS); + + err = rt2880_mdio_wait_ready(priv); + if (err) + return 0xffff; + + pr_debug("%s: addr=%04x, reg=%04x, value=%04x\n", __func__, + phy_addr, phy_reg, fe_r32(FE_MDIO_ACCESS) & 0xffff); + + return fe_r32(FE_MDIO_ACCESS) & 0xffff; +} + +int rt2880_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val) +{ + struct fe_priv *priv = bus->priv; + int err; + u32 t; + + pr_debug("%s: addr=%04x, reg=%04x, value=%04x\n", __func__, + phy_addr, phy_reg, fe_r32(FE_MDIO_ACCESS) & 0xffff); + + err = rt2880_mdio_wait_ready(priv); + if (err) + return err; + + t = (1 << 30) | (phy_addr << 24) | (phy_reg << 16) | val; + fe_w32(t, FE_MDIO_ACCESS); + t |= BIT(31); + fe_w32(t, FE_MDIO_ACCESS); + + return rt2880_mdio_wait_ready(priv); +} + +void rt2880_port_init(struct fe_priv *priv, struct device_node *np) +{ + const __be32 *id = of_get_property(np, "reg", NULL); + const __be32 *link; + int size; + int phy_mode; + + if (!id || (be32_to_cpu(*id) != 0)) { + pr_err("%s: invalid port id\n", np->name); + return; + } + + priv->phy->phy_fixed[0] = of_get_property(np, + "mediatek,fixed-link", &size); + if (priv->phy->phy_fixed[0] && + (size != (4 * sizeof(*priv->phy->phy_fixed[0])))) { + pr_err("%s: invalid fixed link property\n", np->name); + priv->phy->phy_fixed[0] = NULL; + return; + } + + phy_mode = of_get_phy_mode(np); + switch (phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + break; + case PHY_INTERFACE_MODE_MII: + break; + case PHY_INTERFACE_MODE_RMII: + break; + default: + if (!priv->phy->phy_fixed[0]) + dev_err(priv->device, "port %d - invalid phy mode\n", + priv->phy->speed[0]); + break; + } + + priv->phy->phy_node[0] = of_parse_phandle(np, "phy-handle", 0); + if (!priv->phy->phy_node[0] && !priv->phy->phy_fixed[0]) + return; + + if (priv->phy->phy_fixed[0]) { + link = priv->phy->phy_fixed[0]; + priv->phy->speed[0] = be32_to_cpup(link++); + priv->phy->duplex[0] = be32_to_cpup(link++); + priv->phy->tx_fc[0] = be32_to_cpup(link++); + priv->phy->rx_fc[0] = be32_to_cpup(link++); + + priv->link[0] = 1; + switch (priv->phy->speed[0]) { + case SPEED_10: + break; + case SPEED_100: + break; + case SPEED_1000: + break; + default: + dev_err(priv->device, "invalid link speed: %d\n", + priv->phy->speed[0]); + priv->phy->phy_fixed[0] = 0; + return; + } + dev_info(priv->device, "using fixed link parameters\n"); + rt2880_mdio_link_adjust(priv, 0); + return; + } + + if (priv->phy->phy_node[0] && mdiobus_get_phy(priv->mii_bus, 0)) + fe_connect_phy_node(priv, priv->phy->phy_node[0]); +} diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio_rt2880.h b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio_rt2880.h new file mode 100644 index 0000000000..70e344283e --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mdio_rt2880.h @@ -0,0 +1,23 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#ifndef _RALINK_MDIO_RT2880_H__ +#define _RALINK_MDIO_RT2880_H__ + +void rt2880_mdio_link_adjust(struct fe_priv *priv, int port); +int rt2880_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg); +int rt2880_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val); +void rt2880_port_init(struct fe_priv *priv, struct device_node *np); + +#endif diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mt7530.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mt7530.c new file mode 100644 index 0000000000..5216cb5c66 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mt7530.c @@ -0,0 +1,979 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + * Copyright (C) 2016 Vitaly Chekryzhev <13hakta@gmail.com> + */ + +#include <linux/if.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/if_ether.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/netlink.h> +#include <linux/bitops.h> +#include <net/genetlink.h> +#include <linux/switch.h> +#include <linux/delay.h> +#include <linux/phy.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/lockdep.h> +#include <linux/workqueue.h> +#include <linux/of_device.h> + +#include "mt7530.h" + +#define MT7530_CPU_PORT 6 +#define MT7530_NUM_PORTS 8 +#ifdef CONFIG_SOC_MT7621 +#define MT7530_NUM_VLANS 4095 +#else +#define MT7530_NUM_VLANS 16 +#endif +#define MT7530_MAX_VID 4095 +#define MT7530_MIN_VID 0 + +#define MT7530_PORT_MIB_TXB_ID 2 /* TxGOC */ +#define MT7530_PORT_MIB_RXB_ID 6 /* RxGOC */ + +#define MT7621_PORT_MIB_TXB_ID 18 /* TxByte */ +#define MT7621_PORT_MIB_RXB_ID 37 /* RxByte */ + +/* registers */ +#define REG_ESW_VLAN_VTCR 0x90 +#define REG_ESW_VLAN_VAWD1 0x94 +#define REG_ESW_VLAN_VAWD2 0x98 +#define REG_ESW_VLAN_VTIM(x) (0x100 + 4 * ((x) / 2)) + +#define REG_ESW_VLAN_VAWD1_IVL_MAC BIT(30) +#define REG_ESW_VLAN_VAWD1_VTAG_EN BIT(28) +#define REG_ESW_VLAN_VAWD1_VALID BIT(0) + +/* vlan egress mode */ +enum { + ETAG_CTRL_UNTAG = 0, + ETAG_CTRL_TAG = 2, + ETAG_CTRL_SWAP = 1, + ETAG_CTRL_STACK = 3, +}; + +#define REG_ESW_PORT_PCR(x) (0x2004 | ((x) << 8)) +#define REG_ESW_PORT_PVC(x) (0x2010 | ((x) << 8)) +#define REG_ESW_PORT_PPBV1(x) (0x2014 | ((x) << 8)) + +#define REG_HWTRAP 0x7804 + +#define MIB_DESC(_s , _o, _n) \ + { \ + .size = (_s), \ + .offset = (_o), \ + .name = (_n), \ + } + +struct mt7xxx_mib_desc { + unsigned int size; + unsigned int offset; + const char *name; +}; + +static const struct mt7xxx_mib_desc mt7620_mibs[] = { + MIB_DESC(1, MT7620_MIB_STATS_PPE_AC_BCNT0, "PPE_AC_BCNT0"), + MIB_DESC(1, MT7620_MIB_STATS_PPE_AC_PCNT0, "PPE_AC_PCNT0"), + MIB_DESC(1, MT7620_MIB_STATS_PPE_AC_BCNT63, "PPE_AC_BCNT63"), + MIB_DESC(1, MT7620_MIB_STATS_PPE_AC_PCNT63, "PPE_AC_PCNT63"), + MIB_DESC(1, MT7620_MIB_STATS_PPE_MTR_CNT0, "PPE_MTR_CNT0"), + MIB_DESC(1, MT7620_MIB_STATS_PPE_MTR_CNT63, "PPE_MTR_CNT63"), + MIB_DESC(1, MT7620_MIB_STATS_GDM1_TX_GBCNT, "GDM1_TX_GBCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM1_TX_GPCNT, "GDM1_TX_GPCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM1_TX_SKIPCNT, "GDM1_TX_SKIPCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM1_TX_COLCNT, "GDM1_TX_COLCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM1_RX_GBCNT1, "GDM1_RX_GBCNT1"), + MIB_DESC(1, MT7620_MIB_STATS_GDM1_RX_GPCNT1, "GDM1_RX_GPCNT1"), + MIB_DESC(1, MT7620_MIB_STATS_GDM1_RX_OERCNT, "GDM1_RX_OERCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM1_RX_FERCNT, "GDM1_RX_FERCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM1_RX_SERCNT, "GDM1_RX_SERCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM1_RX_LERCNT, "GDM1_RX_LERCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM1_RX_CERCNT, "GDM1_RX_CERCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM1_RX_FCCNT, "GDM1_RX_FCCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM2_TX_GBCNT, "GDM2_TX_GBCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM2_TX_GPCNT, "GDM2_TX_GPCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM2_TX_SKIPCNT, "GDM2_TX_SKIPCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM2_TX_COLCNT, "GDM2_TX_COLCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM2_RX_GBCNT, "GDM2_RX_GBCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM2_RX_GPCNT, "GDM2_RX_GPCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM2_RX_OERCNT, "GDM2_RX_OERCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM2_RX_FERCNT, "GDM2_RX_FERCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM2_RX_SERCNT, "GDM2_RX_SERCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM2_RX_LERCNT, "GDM2_RX_LERCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM2_RX_CERCNT, "GDM2_RX_CERCNT"), + MIB_DESC(1, MT7620_MIB_STATS_GDM2_RX_FCCNT, "GDM2_RX_FCCNT") +}; + +static const struct mt7xxx_mib_desc mt7620_port_mibs[] = { + MIB_DESC(1, MT7620_MIB_STATS_PORT_TGPCN, "TxGPC"), + MIB_DESC(1, MT7620_MIB_STATS_PORT_TBOCN, "TxBOC"), + MIB_DESC(1, MT7620_MIB_STATS_PORT_TGOCN, "TxGOC"), + MIB_DESC(1, MT7620_MIB_STATS_PORT_TEPCN, "TxEPC"), + MIB_DESC(1, MT7620_MIB_STATS_PORT_RGPCN, "RxGPC"), + MIB_DESC(1, MT7620_MIB_STATS_PORT_RBOCN, "RxBOC"), + MIB_DESC(1, MT7620_MIB_STATS_PORT_RGOCN, "RxGOC"), + MIB_DESC(1, MT7620_MIB_STATS_PORT_REPC1N, "RxEPC1"), + MIB_DESC(1, MT7620_MIB_STATS_PORT_REPC2N, "RxEPC2") +}; + +static const struct mt7xxx_mib_desc mt7621_mibs[] = { + MIB_DESC(1, MT7621_STATS_TDPC, "TxDrop"), + MIB_DESC(1, MT7621_STATS_TCRC, "TxCRC"), + MIB_DESC(1, MT7621_STATS_TUPC, "TxUni"), + MIB_DESC(1, MT7621_STATS_TMPC, "TxMulti"), + MIB_DESC(1, MT7621_STATS_TBPC, "TxBroad"), + MIB_DESC(1, MT7621_STATS_TCEC, "TxCollision"), + MIB_DESC(1, MT7621_STATS_TSCEC, "TxSingleCol"), + MIB_DESC(1, MT7621_STATS_TMCEC, "TxMultiCol"), + MIB_DESC(1, MT7621_STATS_TDEC, "TxDefer"), + MIB_DESC(1, MT7621_STATS_TLCEC, "TxLateCol"), + MIB_DESC(1, MT7621_STATS_TXCEC, "TxExcCol"), + MIB_DESC(1, MT7621_STATS_TPPC, "TxPause"), + MIB_DESC(1, MT7621_STATS_TL64PC, "Tx64Byte"), + MIB_DESC(1, MT7621_STATS_TL65PC, "Tx65Byte"), + MIB_DESC(1, MT7621_STATS_TL128PC, "Tx128Byte"), + MIB_DESC(1, MT7621_STATS_TL256PC, "Tx256Byte"), + MIB_DESC(1, MT7621_STATS_TL512PC, "Tx512Byte"), + MIB_DESC(1, MT7621_STATS_TL1024PC, "Tx1024Byte"), + MIB_DESC(2, MT7621_STATS_TOC, "TxByte"), + MIB_DESC(1, MT7621_STATS_RDPC, "RxDrop"), + MIB_DESC(1, MT7621_STATS_RFPC, "RxFiltered"), + MIB_DESC(1, MT7621_STATS_RUPC, "RxUni"), + MIB_DESC(1, MT7621_STATS_RMPC, "RxMulti"), + MIB_DESC(1, MT7621_STATS_RBPC, "RxBroad"), + MIB_DESC(1, MT7621_STATS_RAEPC, "RxAlignErr"), + MIB_DESC(1, MT7621_STATS_RCEPC, "RxCRC"), + MIB_DESC(1, MT7621_STATS_RUSPC, "RxUnderSize"), + MIB_DESC(1, MT7621_STATS_RFEPC, "RxFragment"), + MIB_DESC(1, MT7621_STATS_ROSPC, "RxOverSize"), + MIB_DESC(1, MT7621_STATS_RJEPC, "RxJabber"), + MIB_DESC(1, MT7621_STATS_RPPC, "RxPause"), + MIB_DESC(1, MT7621_STATS_RL64PC, "Rx64Byte"), + MIB_DESC(1, MT7621_STATS_RL65PC, "Rx65Byte"), + MIB_DESC(1, MT7621_STATS_RL128PC, "Rx128Byte"), + MIB_DESC(1, MT7621_STATS_RL256PC, "Rx256Byte"), + MIB_DESC(1, MT7621_STATS_RL512PC, "Rx512Byte"), + MIB_DESC(1, MT7621_STATS_RL1024PC, "Rx1024Byte"), + MIB_DESC(2, MT7621_STATS_ROC, "RxByte"), + MIB_DESC(1, MT7621_STATS_RDPC_CTRL, "RxCtrlDrop"), + MIB_DESC(1, MT7621_STATS_RDPC_ING, "RxIngDrop"), + MIB_DESC(1, MT7621_STATS_RDPC_ARL, "RxARLDrop") +}; + +enum { + /* Global attributes. */ + MT7530_ATTR_ENABLE_VLAN, +}; + +struct mt7530_port_entry { + u16 pvid; +}; + +struct mt7530_vlan_entry { + u16 vid; + u8 member; + u8 etags; +}; + +struct mt7530_priv { + void __iomem *base; + struct mii_bus *bus; + struct switch_dev swdev; + + bool global_vlan_enable; + struct mt7530_vlan_entry vlan_entries[MT7530_NUM_VLANS]; + struct mt7530_port_entry port_entries[MT7530_NUM_PORTS]; +}; + +struct mt7530_mapping { + char *name; + u16 pvids[MT7530_NUM_PORTS]; + u8 members[MT7530_NUM_VLANS]; + u8 etags[MT7530_NUM_VLANS]; + u16 vids[MT7530_NUM_VLANS]; +} mt7530_defaults[] = { + { + .name = "llllw", + .pvids = { 1, 1, 1, 1, 2, 1, 1 }, + .members = { 0, 0x6f, 0x50 }, + .etags = { 0, 0x40, 0x40 }, + .vids = { 0, 1, 2 }, + }, { + .name = "wllll", + .pvids = { 2, 1, 1, 1, 1, 1, 1 }, + .members = { 0, 0x7e, 0x41 }, + .etags = { 0, 0x40, 0x40 }, + .vids = { 0, 1, 2 }, + }, { + .name = "lwlll", + .pvids = { 1, 2, 1, 1, 1, 1, 1 }, + .members = { 0, 0x7d, 0x42 }, + .etags = { 0, 0x40, 0x40 }, + .vids = { 0, 1, 2 }, + }, +}; + +struct mt7530_mapping* +mt7530_find_mapping(struct device_node *np) +{ + const char *map; + int i; + + if (of_property_read_string(np, "mediatek,portmap", &map)) + return NULL; + + for (i = 0; i < ARRAY_SIZE(mt7530_defaults); i++) + if (!strcmp(map, mt7530_defaults[i].name)) + return &mt7530_defaults[i]; + + return NULL; +} + +static void +mt7530_apply_mapping(struct mt7530_priv *mt7530, struct mt7530_mapping *map) +{ + int i = 0; + + for (i = 0; i < MT7530_NUM_PORTS; i++) + mt7530->port_entries[i].pvid = map->pvids[i]; + + for (i = 0; i < MT7530_NUM_VLANS; i++) { + mt7530->vlan_entries[i].member = map->members[i]; + mt7530->vlan_entries[i].etags = map->etags[i]; + mt7530->vlan_entries[i].vid = map->vids[i]; + } +} + +static int +mt7530_reset_switch(struct switch_dev *dev) +{ + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + int i; + + memset(priv->port_entries, 0, sizeof(priv->port_entries)); + memset(priv->vlan_entries, 0, sizeof(priv->vlan_entries)); + + /* set default vid of each vlan to the same number of vlan, so the vid + * won't need be set explicitly. + */ + for (i = 0; i < MT7530_NUM_VLANS; i++) { + priv->vlan_entries[i].vid = i; + } + + return 0; +} + +static int +mt7530_get_vlan_enable(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + + val->value.i = priv->global_vlan_enable; + + return 0; +} + +static int +mt7530_set_vlan_enable(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + + priv->global_vlan_enable = val->value.i != 0; + + return 0; +} + +static u32 +mt7530_r32(struct mt7530_priv *priv, u32 reg) +{ + u32 val; + if (priv->bus) { + u16 high, low; + + mdiobus_write(priv->bus, 0x1f, 0x1f, (reg >> 6) & 0x3ff); + low = mdiobus_read(priv->bus, 0x1f, (reg >> 2) & 0xf); + high = mdiobus_read(priv->bus, 0x1f, 0x10); + + return (high << 16) | (low & 0xffff); + } + + val = ioread32(priv->base + reg); + pr_debug("MT7530 MDIO Read [%04x]=%08x\n", reg, val); + + return val; +} + +static void +mt7530_w32(struct mt7530_priv *priv, u32 reg, u32 val) +{ + if (priv->bus) { + mdiobus_write(priv->bus, 0x1f, 0x1f, (reg >> 6) & 0x3ff); + mdiobus_write(priv->bus, 0x1f, (reg >> 2) & 0xf, val & 0xffff); + mdiobus_write(priv->bus, 0x1f, 0x10, val >> 16); + return; + } + + pr_debug("MT7530 MDIO Write[%04x]=%08x\n", reg, val); + iowrite32(val, priv->base + reg); +} + +static void +mt7530_vtcr(struct mt7530_priv *priv, u32 cmd, u32 val) +{ + int i; + + mt7530_w32(priv, REG_ESW_VLAN_VTCR, BIT(31) | (cmd << 12) | val); + + for (i = 0; i < 20; i++) { + u32 val = mt7530_r32(priv, REG_ESW_VLAN_VTCR); + + if ((val & BIT(31)) == 0) + break; + + udelay(1000); + } + if (i == 20) + printk("mt7530: vtcr timeout\n"); +} + +static int +mt7530_get_port_pvid(struct switch_dev *dev, int port, int *val) +{ + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + + if (port >= MT7530_NUM_PORTS) + return -EINVAL; + + *val = mt7530_r32(priv, REG_ESW_PORT_PPBV1(port)); + *val &= 0xfff; + + return 0; +} + +static int +mt7530_set_port_pvid(struct switch_dev *dev, int port, int pvid) +{ + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + + if (port >= MT7530_NUM_PORTS) + return -EINVAL; + + if (pvid < MT7530_MIN_VID || pvid > MT7530_MAX_VID) + return -EINVAL; + + priv->port_entries[port].pvid = pvid; + + return 0; +} + +static int +mt7530_get_vlan_ports(struct switch_dev *dev, struct switch_val *val) +{ + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + u32 member; + u32 etags; + int i; + + val->len = 0; + + if (val->port_vlan < 0 || val->port_vlan >= MT7530_NUM_VLANS) + return -EINVAL; + + mt7530_vtcr(priv, 0, val->port_vlan); + + member = mt7530_r32(priv, REG_ESW_VLAN_VAWD1); + member >>= 16; + member &= 0xff; + + etags = mt7530_r32(priv, REG_ESW_VLAN_VAWD2); + + for (i = 0; i < MT7530_NUM_PORTS; i++) { + struct switch_port *p; + int etag; + + if (!(member & BIT(i))) + continue; + + p = &val->value.ports[val->len++]; + p->id = i; + + etag = (etags >> (i * 2)) & 0x3; + + if (etag == ETAG_CTRL_TAG) + p->flags |= BIT(SWITCH_PORT_FLAG_TAGGED); + else if (etag != ETAG_CTRL_UNTAG) + printk("vlan egress tag control neither untag nor tag.\n"); + } + + return 0; +} + +static int +mt7530_set_vlan_ports(struct switch_dev *dev, struct switch_val *val) +{ + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + u8 member = 0; + u8 etags = 0; + int i; + + if (val->port_vlan < 0 || val->port_vlan >= MT7530_NUM_VLANS || + val->len > MT7530_NUM_PORTS) + return -EINVAL; + + for (i = 0; i < val->len; i++) { + struct switch_port *p = &val->value.ports[i]; + + if (p->id >= MT7530_NUM_PORTS) + return -EINVAL; + + member |= BIT(p->id); + + if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) + etags |= BIT(p->id); + } + priv->vlan_entries[val->port_vlan].member = member; + priv->vlan_entries[val->port_vlan].etags = etags; + + return 0; +} + +static int +mt7530_set_vid(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + int vlan; + u16 vid; + + vlan = val->port_vlan; + vid = (u16)val->value.i; + + if (vlan < 0 || vlan >= MT7530_NUM_VLANS) + return -EINVAL; + + if (vid < MT7530_MIN_VID || vid > MT7530_MAX_VID) + return -EINVAL; + + priv->vlan_entries[vlan].vid = vid; + return 0; +} + +static int +mt7621_get_vid(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + val->value.i = val->port_vlan; + return 0; +} + +static int +mt7530_get_vid(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + u32 vid; + int vlan; + + vlan = val->port_vlan; + + vid = mt7530_r32(priv, REG_ESW_VLAN_VTIM(vlan)); + if (vlan & 1) + vid = vid >> 12; + vid &= 0xfff; + + val->value.i = vid; + return 0; +} + +static void +mt7530_write_vlan_entry(struct mt7530_priv *priv, int vlan, u16 vid, + u8 ports, u8 etags) +{ + int port; + u32 val; + +#ifndef CONFIG_SOC_MT7621 + /* vid of vlan */ + val = mt7530_r32(priv, REG_ESW_VLAN_VTIM(vlan)); + if (vlan % 2 == 0) { + val &= 0xfff000; + val |= vid; + } else { + val &= 0xfff; + val |= (vid << 12); + } + mt7530_w32(priv, REG_ESW_VLAN_VTIM(vlan), val); +#endif + + /* vlan port membership */ + if (ports) + mt7530_w32(priv, REG_ESW_VLAN_VAWD1, REG_ESW_VLAN_VAWD1_IVL_MAC | + REG_ESW_VLAN_VAWD1_VTAG_EN | (ports << 16) | + REG_ESW_VLAN_VAWD1_VALID); + else + mt7530_w32(priv, REG_ESW_VLAN_VAWD1, 0); + + /* egress mode */ + val = 0; + for (port = 0; port < MT7530_NUM_PORTS; port++) { + if (etags & BIT(port)) + val |= ETAG_CTRL_TAG << (port * 2); + else + val |= ETAG_CTRL_UNTAG << (port * 2); + } + mt7530_w32(priv, REG_ESW_VLAN_VAWD2, val); + + /* write to vlan table */ +#ifdef CONFIG_SOC_MT7621 + mt7530_vtcr(priv, 1, vid); +#else + mt7530_vtcr(priv, 1, vlan); +#endif +} + +static int +mt7530_apply_config(struct switch_dev *dev) +{ + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + int i, j; + u8 tag_ports; + u8 untag_ports; + + if (!priv->global_vlan_enable) { + for (i = 0; i < MT7530_NUM_PORTS; i++) + mt7530_w32(priv, REG_ESW_PORT_PCR(i), 0x00400000); + + mt7530_w32(priv, REG_ESW_PORT_PCR(MT7530_CPU_PORT), 0x00ff0000); + + for (i = 0; i < MT7530_NUM_PORTS; i++) + mt7530_w32(priv, REG_ESW_PORT_PVC(i), 0x810000c0); + + return 0; + } + + /* set all ports as security mode */ + for (i = 0; i < MT7530_NUM_PORTS; i++) + mt7530_w32(priv, REG_ESW_PORT_PCR(i), 0x00ff0003); + + /* check if a port is used in tag/untag vlan egress mode */ + tag_ports = 0; + untag_ports = 0; + + for (i = 0; i < MT7530_NUM_VLANS; i++) { + u8 member = priv->vlan_entries[i].member; + u8 etags = priv->vlan_entries[i].etags; + + if (!member) + continue; + + for (j = 0; j < MT7530_NUM_PORTS; j++) { + if (!(member & BIT(j))) + continue; + + if (etags & BIT(j)) + tag_ports |= 1u << j; + else + untag_ports |= 1u << j; + } + } + + /* set all untag-only ports as transparent and the rest as user port */ + for (i = 0; i < MT7530_NUM_PORTS; i++) { + u32 pvc_mode = 0x81000000; + + if (untag_ports & BIT(i) && !(tag_ports & BIT(i))) + pvc_mode = 0x810000c0; + + mt7530_w32(priv, REG_ESW_PORT_PVC(i), pvc_mode); + } + + /* first clear the swtich vlan table */ + for (i = 0; i < MT7530_NUM_VLANS; i++) + mt7530_write_vlan_entry(priv, i, i, 0, 0); + + /* now program only vlans with members to avoid + clobbering remapped entries in later iterations */ + for (i = 0; i < MT7530_NUM_VLANS; i++) { + u16 vid = priv->vlan_entries[i].vid; + u8 member = priv->vlan_entries[i].member; + u8 etags = priv->vlan_entries[i].etags; + + if (member) + mt7530_write_vlan_entry(priv, i, vid, member, etags); + } + + /* Port Default PVID */ + for (i = 0; i < MT7530_NUM_PORTS; i++) { + int vlan = priv->port_entries[i].pvid; + u16 pvid = 0; + u32 val; + + if (vlan < MT7530_NUM_VLANS && priv->vlan_entries[vlan].member) + pvid = priv->vlan_entries[vlan].vid; + + val = mt7530_r32(priv, REG_ESW_PORT_PPBV1(i)); + val &= ~0xfff; + val |= pvid; + mt7530_w32(priv, REG_ESW_PORT_PPBV1(i), val); + } + + return 0; +} + +static int +mt7530_get_port_link(struct switch_dev *dev, int port, + struct switch_port_link *link) +{ + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + u32 speed, pmsr; + + if (port < 0 || port >= MT7530_NUM_PORTS) + return -EINVAL; + + pmsr = mt7530_r32(priv, 0x3008 + (0x100 * port)); + + link->link = pmsr & 1; + link->duplex = (pmsr >> 1) & 1; + speed = (pmsr >> 2) & 3; + + switch (speed) { + case 0: + link->speed = SWITCH_PORT_SPEED_10; + break; + case 1: + link->speed = SWITCH_PORT_SPEED_100; + break; + case 2: + case 3: /* forced gige speed can be 2 or 3 */ + link->speed = SWITCH_PORT_SPEED_1000; + break; + default: + link->speed = SWITCH_PORT_SPEED_UNKNOWN; + break; + } + + return 0; +} + +static u64 get_mib_counter(struct mt7530_priv *priv, int i, int port) +{ + unsigned int port_base; + u64 lo; + + port_base = MT7621_MIB_COUNTER_BASE + + MT7621_MIB_COUNTER_PORT_OFFSET * port; + + lo = mt7530_r32(priv, port_base + mt7621_mibs[i].offset); + if (mt7621_mibs[i].size == 2) { + u64 hi; + + hi = mt7530_r32(priv, port_base + mt7621_mibs[i].offset + 4); + lo |= hi << 32; + } + + return lo; +} + +static int mt7621_sw_get_port_mib(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + static char buf[4096]; + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + int i, len = 0; + + if (val->port_vlan >= MT7530_NUM_PORTS) + return -EINVAL; + + len += snprintf(buf + len, sizeof(buf) - len, + "Port %d MIB counters\n", val->port_vlan); + + for (i = 0; i < ARRAY_SIZE(mt7621_mibs); ++i) { + u64 counter; + len += snprintf(buf + len, sizeof(buf) - len, + "%-11s: ", mt7621_mibs[i].name); + counter = get_mib_counter(priv, i, val->port_vlan); + len += snprintf(buf + len, sizeof(buf) - len, "%llu\n", + counter); + } + + val->value.s = buf; + val->len = len; + return 0; +} + +static u64 get_mib_counter_7620(struct mt7530_priv *priv, int i) +{ + return mt7530_r32(priv, MT7620_MIB_COUNTER_BASE + mt7620_mibs[i].offset); +} + +static u64 get_mib_counter_port_7620(struct mt7530_priv *priv, int i, int port) +{ + return mt7530_r32(priv, + MT7620_MIB_COUNTER_BASE_PORT + + (MT7620_MIB_COUNTER_PORT_OFFSET * port) + + mt7620_port_mibs[i].offset); +} + +static int mt7530_sw_get_mib(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + static char buf[4096]; + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + int i, len = 0; + + len += snprintf(buf + len, sizeof(buf) - len, "Switch MIB counters\n"); + + for (i = 0; i < ARRAY_SIZE(mt7620_mibs); ++i) { + u64 counter; + len += snprintf(buf + len, sizeof(buf) - len, + "%-11s: ", mt7620_mibs[i].name); + counter = get_mib_counter_7620(priv, i); + len += snprintf(buf + len, sizeof(buf) - len, "%llu\n", + counter); + } + + val->value.s = buf; + val->len = len; + return 0; +} + +static int mt7530_sw_get_port_mib(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + static char buf[4096]; + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + int i, len = 0; + + if (val->port_vlan >= MT7530_NUM_PORTS) + return -EINVAL; + + len += snprintf(buf + len, sizeof(buf) - len, + "Port %d MIB counters\n", val->port_vlan); + + for (i = 0; i < ARRAY_SIZE(mt7620_port_mibs); ++i) { + u64 counter; + len += snprintf(buf + len, sizeof(buf) - len, + "%-11s: ", mt7620_port_mibs[i].name); + counter = get_mib_counter_port_7620(priv, i, val->port_vlan); + len += snprintf(buf + len, sizeof(buf) - len, "%llu\n", + counter); + } + + val->value.s = buf; + val->len = len; + return 0; +} + +static int mt7530_get_port_stats(struct switch_dev *dev, int port, + struct switch_port_stats *stats) +{ + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + + if (port < 0 || port >= MT7530_NUM_PORTS) + return -EINVAL; + + stats->tx_bytes = get_mib_counter_port_7620(priv, MT7530_PORT_MIB_TXB_ID, port); + stats->rx_bytes = get_mib_counter_port_7620(priv, MT7530_PORT_MIB_RXB_ID, port); + + return 0; +} + +static int mt7621_get_port_stats(struct switch_dev *dev, int port, + struct switch_port_stats *stats) +{ + struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); + + if (port < 0 || port >= MT7530_NUM_PORTS) + return -EINVAL; + + stats->tx_bytes = get_mib_counter(priv, MT7621_PORT_MIB_TXB_ID, port); + stats->rx_bytes = get_mib_counter(priv, MT7621_PORT_MIB_RXB_ID, port); + + return 0; +} + +static const struct switch_attr mt7530_global[] = { + { + .type = SWITCH_TYPE_INT, + .name = "enable_vlan", + .description = "VLAN mode (1:enabled)", + .max = 1, + .id = MT7530_ATTR_ENABLE_VLAN, + .get = mt7530_get_vlan_enable, + .set = mt7530_set_vlan_enable, + }, { + .type = SWITCH_TYPE_STRING, + .name = "mib", + .description = "Get MIB counters for switch", + .get = mt7530_sw_get_mib, + .set = NULL, + }, +}; + +static const struct switch_attr mt7621_port[] = { + { + .type = SWITCH_TYPE_STRING, + .name = "mib", + .description = "Get MIB counters for port", + .get = mt7621_sw_get_port_mib, + .set = NULL, + }, +}; + +static const struct switch_attr mt7621_vlan[] = { + { + .type = SWITCH_TYPE_INT, + .name = "vid", + .description = "VLAN ID (0-4094)", + .set = mt7530_set_vid, + .get = mt7621_get_vid, + .max = 4094, + }, +}; + +static const struct switch_attr mt7530_port[] = { + { + .type = SWITCH_TYPE_STRING, + .name = "mib", + .description = "Get MIB counters for port", + .get = mt7530_sw_get_port_mib, + .set = NULL, + }, +}; + +static const struct switch_attr mt7530_vlan[] = { + { + .type = SWITCH_TYPE_INT, + .name = "vid", + .description = "VLAN ID (0-4094)", + .set = mt7530_set_vid, + .get = mt7530_get_vid, + .max = 4094, + }, +}; + +static const struct switch_dev_ops mt7621_ops = { + .attr_global = { + .attr = mt7530_global, + .n_attr = ARRAY_SIZE(mt7530_global), + }, + .attr_port = { + .attr = mt7621_port, + .n_attr = ARRAY_SIZE(mt7621_port), + }, + .attr_vlan = { + .attr = mt7621_vlan, + .n_attr = ARRAY_SIZE(mt7621_vlan), + }, + .get_vlan_ports = mt7530_get_vlan_ports, + .set_vlan_ports = mt7530_set_vlan_ports, + .get_port_pvid = mt7530_get_port_pvid, + .set_port_pvid = mt7530_set_port_pvid, + .get_port_link = mt7530_get_port_link, + .get_port_stats = mt7621_get_port_stats, + .apply_config = mt7530_apply_config, + .reset_switch = mt7530_reset_switch, +}; + +static const struct switch_dev_ops mt7530_ops = { + .attr_global = { + .attr = mt7530_global, + .n_attr = ARRAY_SIZE(mt7530_global), + }, + .attr_port = { + .attr = mt7530_port, + .n_attr = ARRAY_SIZE(mt7530_port), + }, + .attr_vlan = { + .attr = mt7530_vlan, + .n_attr = ARRAY_SIZE(mt7530_vlan), + }, + .get_vlan_ports = mt7530_get_vlan_ports, + .set_vlan_ports = mt7530_set_vlan_ports, + .get_port_pvid = mt7530_get_port_pvid, + .set_port_pvid = mt7530_set_port_pvid, + .get_port_link = mt7530_get_port_link, + .get_port_stats = mt7530_get_port_stats, + .apply_config = mt7530_apply_config, + .reset_switch = mt7530_reset_switch, +}; + +int +mt7530_probe(struct device *dev, void __iomem *base, struct mii_bus *bus, int vlan) +{ + struct switch_dev *swdev; + struct mt7530_priv *mt7530; + struct mt7530_mapping *map; + int ret; + + mt7530 = devm_kzalloc(dev, sizeof(struct mt7530_priv), GFP_KERNEL); + if (!mt7530) + return -ENOMEM; + + mt7530->base = base; + mt7530->bus = bus; + mt7530->global_vlan_enable = vlan; + + swdev = &mt7530->swdev; + if (bus) { + swdev->alias = "mt7530"; + swdev->name = "mt7530"; + } else if (IS_ENABLED(CONFIG_SOC_MT7621)) { + swdev->alias = "mt7621"; + swdev->name = "mt7621"; + } else { + swdev->alias = "mt7620"; + swdev->name = "mt7620"; + } + swdev->cpu_port = MT7530_CPU_PORT; + swdev->ports = MT7530_NUM_PORTS; + swdev->vlans = MT7530_NUM_VLANS; + if (IS_ENABLED(CONFIG_SOC_MT7621)) + swdev->ops = &mt7621_ops; + else + swdev->ops = &mt7530_ops; + + ret = register_switch(swdev, NULL); + if (ret) { + dev_err(dev, "failed to register mt7530\n"); + return ret; + } + + + map = mt7530_find_mapping(dev->of_node); + if (map) + mt7530_apply_mapping(mt7530, map); + mt7530_apply_config(swdev); + + /* magic vodoo */ + if (!IS_ENABLED(CONFIG_SOC_MT7621) && bus && mt7530_r32(mt7530, REG_HWTRAP) != 0x1117edf) { + dev_info(dev, "fixing up MHWTRAP register - bootloader probably played with it\n"); + mt7530_w32(mt7530, REG_HWTRAP, 0x1117edf); + } + dev_info(dev, "loaded %s driver\n", swdev->name); + + return 0; +} diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mt7530.h b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mt7530.h new file mode 100644 index 0000000000..cf725c2f2b --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mt7530.h @@ -0,0 +1,186 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2013 John Crispin <blogic@openwrt.org> + * Copyright (C) 2016 Vitaly Chekryzhev <13hakta@gmail.com> + */ + +#ifndef _MT7530_H__ +#define _MT7530_H__ + +#define MT7620_MIB_COUNTER_BASE_PORT 0x4000 +#define MT7620_MIB_COUNTER_PORT_OFFSET 0x100 +#define MT7620_MIB_COUNTER_BASE 0x1010 + +/* PPE Accounting Group #0 Byte Counter */ +#define MT7620_MIB_STATS_PPE_AC_BCNT0 0x000 + +/* PPE Accounting Group #0 Packet Counter */ +#define MT7620_MIB_STATS_PPE_AC_PCNT0 0x004 + +/* PPE Accounting Group #63 Byte Counter */ +#define MT7620_MIB_STATS_PPE_AC_BCNT63 0x1F8 + +/* PPE Accounting Group #63 Packet Counter */ +#define MT7620_MIB_STATS_PPE_AC_PCNT63 0x1FC + +/* PPE Meter Group #0 */ +#define MT7620_MIB_STATS_PPE_MTR_CNT0 0x200 + +/* PPE Meter Group #63 */ +#define MT7620_MIB_STATS_PPE_MTR_CNT63 0x2FC + +/* Transmit good byte count for CPU GDM */ +#define MT7620_MIB_STATS_GDM1_TX_GBCNT 0x300 + +/* Transmit good packet count for CPU GDM (exclude flow control frames) */ +#define MT7620_MIB_STATS_GDM1_TX_GPCNT 0x304 + +/* Transmit abort count for CPU GDM */ +#define MT7620_MIB_STATS_GDM1_TX_SKIPCNT 0x308 + +/* Transmit collision count for CPU GDM */ +#define MT7620_MIB_STATS_GDM1_TX_COLCNT 0x30C + +/* Received good byte count for CPU GDM */ +#define MT7620_MIB_STATS_GDM1_RX_GBCNT1 0x320 + +/* Received good packet count for CPU GDM (exclude flow control frame) */ +#define MT7620_MIB_STATS_GDM1_RX_GPCNT1 0x324 + +/* Received overflow error packet count for CPU GDM */ +#define MT7620_MIB_STATS_GDM1_RX_OERCNT 0x328 + +/* Received FCS error packet count for CPU GDM */ +#define MT7620_MIB_STATS_GDM1_RX_FERCNT 0x32C + +/* Received too short error packet count for CPU GDM */ +#define MT7620_MIB_STATS_GDM1_RX_SERCNT 0x330 + +/* Received too long error packet count for CPU GDM */ +#define MT7620_MIB_STATS_GDM1_RX_LERCNT 0x334 + +/* Received IP/TCP/UDP checksum error packet count for CPU GDM */ +#define MT7620_MIB_STATS_GDM1_RX_CERCNT 0x338 + +/* Received flow control pkt count for CPU GDM */ +#define MT7620_MIB_STATS_GDM1_RX_FCCNT 0x33C + +/* Transmit good byte count for PPE GDM */ +#define MT7620_MIB_STATS_GDM2_TX_GBCNT 0x340 + +/* Transmit good packet count for PPE GDM (exclude flow control frames) */ +#define MT7620_MIB_STATS_GDM2_TX_GPCNT 0x344 + +/* Transmit abort count for PPE GDM */ +#define MT7620_MIB_STATS_GDM2_TX_SKIPCNT 0x348 + +/* Transmit collision count for PPE GDM */ +#define MT7620_MIB_STATS_GDM2_TX_COLCNT 0x34C + +/* Received good byte count for PPE GDM */ +#define MT7620_MIB_STATS_GDM2_RX_GBCNT 0x360 + +/* Received good packet count for PPE GDM (exclude flow control frame) */ +#define MT7620_MIB_STATS_GDM2_RX_GPCNT 0x364 + +/* Received overflow error packet count for PPE GDM */ +#define MT7620_MIB_STATS_GDM2_RX_OERCNT 0x368 + +/* Received FCS error packet count for PPE GDM */ +#define MT7620_MIB_STATS_GDM2_RX_FERCNT 0x36C + +/* Received too short error packet count for PPE GDM */ +#define MT7620_MIB_STATS_GDM2_RX_SERCNT 0x370 + +/* Received too long error packet count for PPE GDM */ +#define MT7620_MIB_STATS_GDM2_RX_LERCNT 0x374 + +/* Received IP/TCP/UDP checksum error packet count for PPE GDM */ +#define MT7620_MIB_STATS_GDM2_RX_CERCNT 0x378 + +/* Received flow control pkt count for PPE GDM */ +#define MT7620_MIB_STATS_GDM2_RX_FCCNT 0x37C + +/* Tx Packet Counter of Port n */ +#define MT7620_MIB_STATS_PORT_TGPCN 0x10 + +/* Tx Bad Octet Counter of Port n */ +#define MT7620_MIB_STATS_PORT_TBOCN 0x14 + +/* Tx Good Octet Counter of Port n */ +#define MT7620_MIB_STATS_PORT_TGOCN 0x18 + +/* Tx Event Packet Counter of Port n */ +#define MT7620_MIB_STATS_PORT_TEPCN 0x1C + +/* Rx Packet Counter of Port n */ +#define MT7620_MIB_STATS_PORT_RGPCN 0x20 + +/* Rx Bad Octet Counter of Port n */ +#define MT7620_MIB_STATS_PORT_RBOCN 0x24 + +/* Rx Good Octet Counter of Port n */ +#define MT7620_MIB_STATS_PORT_RGOCN 0x28 + +/* Rx Event Packet Counter of Port n */ +#define MT7620_MIB_STATS_PORT_REPC1N 0x2C + +/* Rx Event Packet Counter of Port n */ +#define MT7620_MIB_STATS_PORT_REPC2N 0x30 + +#define MT7621_MIB_COUNTER_BASE 0x4000 +#define MT7621_MIB_COUNTER_PORT_OFFSET 0x100 +#define MT7621_STATS_TDPC 0x00 +#define MT7621_STATS_TCRC 0x04 +#define MT7621_STATS_TUPC 0x08 +#define MT7621_STATS_TMPC 0x0C +#define MT7621_STATS_TBPC 0x10 +#define MT7621_STATS_TCEC 0x14 +#define MT7621_STATS_TSCEC 0x18 +#define MT7621_STATS_TMCEC 0x1C +#define MT7621_STATS_TDEC 0x20 +#define MT7621_STATS_TLCEC 0x24 +#define MT7621_STATS_TXCEC 0x28 +#define MT7621_STATS_TPPC 0x2C +#define MT7621_STATS_TL64PC 0x30 +#define MT7621_STATS_TL65PC 0x34 +#define MT7621_STATS_TL128PC 0x38 +#define MT7621_STATS_TL256PC 0x3C +#define MT7621_STATS_TL512PC 0x40 +#define MT7621_STATS_TL1024PC 0x44 +#define MT7621_STATS_TOC 0x48 +#define MT7621_STATS_RDPC 0x60 +#define MT7621_STATS_RFPC 0x64 +#define MT7621_STATS_RUPC 0x68 +#define MT7621_STATS_RMPC 0x6C +#define MT7621_STATS_RBPC 0x70 +#define MT7621_STATS_RAEPC 0x74 +#define MT7621_STATS_RCEPC 0x78 +#define MT7621_STATS_RUSPC 0x7C +#define MT7621_STATS_RFEPC 0x80 +#define MT7621_STATS_ROSPC 0x84 +#define MT7621_STATS_RJEPC 0x88 +#define MT7621_STATS_RPPC 0x8C +#define MT7621_STATS_RL64PC 0x90 +#define MT7621_STATS_RL65PC 0x94 +#define MT7621_STATS_RL128PC 0x98 +#define MT7621_STATS_RL256PC 0x9C +#define MT7621_STATS_RL512PC 0xA0 +#define MT7621_STATS_RL1024PC 0xA4 +#define MT7621_STATS_ROC 0xA8 +#define MT7621_STATS_RDPC_CTRL 0xB0 +#define MT7621_STATS_RDPC_ING 0xB4 +#define MT7621_STATS_RDPC_ARL 0xB8 + +int mt7530_probe(struct device *dev, void __iomem *base, struct mii_bus *bus, int vlan); + +#endif diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mtk_eth_soc.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mtk_eth_soc.c new file mode 100644 index 0000000000..db7b5b2053 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mtk_eth_soc.c @@ -0,0 +1,1599 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/dma-mapping.h> +#include <linux/init.h> +#include <linux/skbuff.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/platform_device.h> +#include <linux/of_device.h> +#include <linux/clk.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> +#include <linux/if_vlan.h> +#include <linux/reset.h> +#include <linux/tcp.h> +#include <linux/io.h> +#include <linux/bug.h> + +#include <asm/mach-ralink/ralink_regs.h> + +#include "mtk_eth_soc.h" +#include "mdio.h" +#include "ethtool.h" + +#define MAX_RX_LENGTH 1536 +#define FE_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) +#define FE_RX_HLEN (NET_SKB_PAD + FE_RX_ETH_HLEN + NET_IP_ALIGN) +#define DMA_DUMMY_DESC 0xffffffff +#define FE_DEFAULT_MSG_ENABLE \ + (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE) +#define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1)) +#define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1)) +#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1)) + +#define SYSC_REG_RSTCTRL 0x34 + +static int fe_msg_level = -1; +module_param_named(msg_level, fe_msg_level, int, 0); +MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); + +static const u16 fe_reg_table_default[FE_REG_COUNT] = { + [FE_REG_PDMA_GLO_CFG] = FE_PDMA_GLO_CFG, + [FE_REG_PDMA_RST_CFG] = FE_PDMA_RST_CFG, + [FE_REG_DLY_INT_CFG] = FE_DLY_INT_CFG, + [FE_REG_TX_BASE_PTR0] = FE_TX_BASE_PTR0, + [FE_REG_TX_MAX_CNT0] = FE_TX_MAX_CNT0, + [FE_REG_TX_CTX_IDX0] = FE_TX_CTX_IDX0, + [FE_REG_TX_DTX_IDX0] = FE_TX_DTX_IDX0, + [FE_REG_RX_BASE_PTR0] = FE_RX_BASE_PTR0, + [FE_REG_RX_MAX_CNT0] = FE_RX_MAX_CNT0, + [FE_REG_RX_CALC_IDX0] = FE_RX_CALC_IDX0, + [FE_REG_RX_DRX_IDX0] = FE_RX_DRX_IDX0, + [FE_REG_FE_INT_ENABLE] = FE_FE_INT_ENABLE, + [FE_REG_FE_INT_STATUS] = FE_FE_INT_STATUS, + [FE_REG_FE_DMA_VID_BASE] = FE_DMA_VID0, + [FE_REG_FE_COUNTER_BASE] = FE_GDMA1_TX_GBCNT, + [FE_REG_FE_RST_GL] = FE_FE_RST_GL, +}; + +static const u16 *fe_reg_table = fe_reg_table_default; + +struct fe_work_t { + int bitnr; + void (*action)(struct fe_priv *); +}; + +static void __iomem *fe_base; + +void fe_w32(u32 val, unsigned reg) +{ + __raw_writel(val, fe_base + reg); +} + +u32 fe_r32(unsigned reg) +{ + return __raw_readl(fe_base + reg); +} + +void fe_reg_w32(u32 val, enum fe_reg reg) +{ + fe_w32(val, fe_reg_table[reg]); +} + +u32 fe_reg_r32(enum fe_reg reg) +{ + return fe_r32(fe_reg_table[reg]); +} + +void fe_reset(u32 reset_bits) +{ + u32 t; + + t = rt_sysc_r32(SYSC_REG_RSTCTRL); + t |= reset_bits; + rt_sysc_w32(t, SYSC_REG_RSTCTRL); + usleep_range(10, 20); + + t &= ~reset_bits; + rt_sysc_w32(t, SYSC_REG_RSTCTRL); + usleep_range(10, 20); +} + +static inline void fe_int_disable(u32 mask) +{ + fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) & ~mask, + FE_REG_FE_INT_ENABLE); + /* flush write */ + fe_reg_r32(FE_REG_FE_INT_ENABLE); +} + +static inline void fe_int_enable(u32 mask) +{ + fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) | mask, + FE_REG_FE_INT_ENABLE); + /* flush write */ + fe_reg_r32(FE_REG_FE_INT_ENABLE); +} + +static inline void fe_hw_set_macaddr(struct fe_priv *priv, unsigned char *mac) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->page_lock, flags); + fe_w32((mac[0] << 8) | mac[1], FE_GDMA1_MAC_ADRH); + fe_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], + FE_GDMA1_MAC_ADRL); + spin_unlock_irqrestore(&priv->page_lock, flags); +} + +static int fe_set_mac_address(struct net_device *dev, void *p) +{ + int ret = eth_mac_addr(dev, p); + + if (!ret) { + struct fe_priv *priv = netdev_priv(dev); + + if (priv->soc->set_mac) + priv->soc->set_mac(priv, dev->dev_addr); + else + fe_hw_set_macaddr(priv, p); + } + + return ret; +} + +static inline int fe_max_frag_size(int mtu) +{ + /* make sure buf_size will be at least MAX_RX_LENGTH */ + if (mtu + FE_RX_ETH_HLEN < MAX_RX_LENGTH) + mtu = MAX_RX_LENGTH - FE_RX_ETH_HLEN; + + return SKB_DATA_ALIGN(FE_RX_HLEN + mtu) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static inline int fe_max_buf_size(int frag_size) +{ + int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + BUG_ON(buf_size < MAX_RX_LENGTH); + return buf_size; +} + +static inline void fe_get_rxd(struct fe_rx_dma *rxd, struct fe_rx_dma *dma_rxd) +{ + rxd->rxd1 = dma_rxd->rxd1; + rxd->rxd2 = dma_rxd->rxd2; + rxd->rxd3 = dma_rxd->rxd3; + rxd->rxd4 = dma_rxd->rxd4; +} + +static inline void fe_set_txd(struct fe_tx_dma *txd, struct fe_tx_dma *dma_txd) +{ + dma_txd->txd1 = txd->txd1; + dma_txd->txd3 = txd->txd3; + dma_txd->txd4 = txd->txd4; + /* clean dma done flag last */ + dma_txd->txd2 = txd->txd2; +} + +static void fe_clean_rx(struct fe_priv *priv) +{ + int i; + struct fe_rx_ring *ring = &priv->rx_ring; + + if (ring->rx_data) { + for (i = 0; i < ring->rx_ring_size; i++) + if (ring->rx_data[i]) { + if (ring->rx_dma && ring->rx_dma[i].rxd1) + dma_unmap_single(&priv->netdev->dev, + ring->rx_dma[i].rxd1, + ring->rx_buf_size, + DMA_FROM_DEVICE); + put_page(virt_to_head_page(ring->rx_data[i])); + } + + kfree(ring->rx_data); + ring->rx_data = NULL; + } + + if (ring->rx_dma) { + dma_free_coherent(&priv->netdev->dev, + ring->rx_ring_size * sizeof(*ring->rx_dma), + ring->rx_dma, + ring->rx_phys); + ring->rx_dma = NULL; + } +} + +static int fe_alloc_rx(struct fe_priv *priv) +{ + struct net_device *netdev = priv->netdev; + struct fe_rx_ring *ring = &priv->rx_ring; + int i, pad; + + ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data), + GFP_KERNEL); + if (!ring->rx_data) + goto no_rx_mem; + + for (i = 0; i < ring->rx_ring_size; i++) { + ring->rx_data[i] = netdev_alloc_frag(ring->frag_size); + if (!ring->rx_data[i]) + goto no_rx_mem; + } + + ring->rx_dma = dma_alloc_coherent(&netdev->dev, + ring->rx_ring_size * sizeof(*ring->rx_dma), + &ring->rx_phys, + GFP_ATOMIC | __GFP_ZERO); + if (!ring->rx_dma) + goto no_rx_mem; + + if (priv->flags & FE_FLAG_RX_2B_OFFSET) + pad = 0; + else + pad = NET_IP_ALIGN; + for (i = 0; i < ring->rx_ring_size; i++) { + dma_addr_t dma_addr = dma_map_single(&netdev->dev, + ring->rx_data[i] + NET_SKB_PAD + pad, + ring->rx_buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) + goto no_rx_mem; + ring->rx_dma[i].rxd1 = (unsigned int)dma_addr; + + if (priv->flags & FE_FLAG_RX_SG_DMA) + ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size); + else + ring->rx_dma[i].rxd2 = RX_DMA_LSO; + } + ring->rx_calc_idx = ring->rx_ring_size - 1; + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + fe_reg_w32(ring->rx_phys, FE_REG_RX_BASE_PTR0); + fe_reg_w32(ring->rx_ring_size, FE_REG_RX_MAX_CNT0); + fe_reg_w32(ring->rx_calc_idx, FE_REG_RX_CALC_IDX0); + fe_reg_w32(FE_PST_DRX_IDX0, FE_REG_PDMA_RST_CFG); + + return 0; + +no_rx_mem: + return -ENOMEM; +} + +static void fe_txd_unmap(struct device *dev, struct fe_tx_buf *tx_buf) +{ + if (tx_buf->flags & FE_TX_FLAGS_SINGLE0) { + dma_unmap_single(dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } else if (tx_buf->flags & FE_TX_FLAGS_PAGE0) { + dma_unmap_page(dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + if (tx_buf->flags & FE_TX_FLAGS_PAGE1) + dma_unmap_page(dev, + dma_unmap_addr(tx_buf, dma_addr1), + dma_unmap_len(tx_buf, dma_len1), + DMA_TO_DEVICE); + + tx_buf->flags = 0; + if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC)) + dev_kfree_skb_any(tx_buf->skb); + tx_buf->skb = NULL; +} + +static void fe_clean_tx(struct fe_priv *priv) +{ + int i; + struct device *dev = &priv->netdev->dev; + struct fe_tx_ring *ring = &priv->tx_ring; + + if (ring->tx_buf) { + for (i = 0; i < ring->tx_ring_size; i++) + fe_txd_unmap(dev, &ring->tx_buf[i]); + kfree(ring->tx_buf); + ring->tx_buf = NULL; + } + + if (ring->tx_dma) { + dma_free_coherent(dev, + ring->tx_ring_size * sizeof(*ring->tx_dma), + ring->tx_dma, + ring->tx_phys); + ring->tx_dma = NULL; + } + + netdev_reset_queue(priv->netdev); +} + +static int fe_alloc_tx(struct fe_priv *priv) +{ + int i; + struct fe_tx_ring *ring = &priv->tx_ring; + + ring->tx_free_idx = 0; + ring->tx_next_idx = 0; + ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2, + MAX_SKB_FRAGS); + + ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf), + GFP_KERNEL); + if (!ring->tx_buf) + goto no_tx_mem; + + ring->tx_dma = dma_alloc_coherent(&priv->netdev->dev, + ring->tx_ring_size * sizeof(*ring->tx_dma), + &ring->tx_phys, + GFP_ATOMIC | __GFP_ZERO); + if (!ring->tx_dma) + goto no_tx_mem; + + for (i = 0; i < ring->tx_ring_size; i++) { + if (priv->soc->tx_dma) + priv->soc->tx_dma(&ring->tx_dma[i]); + ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF; + } + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + fe_reg_w32(ring->tx_phys, FE_REG_TX_BASE_PTR0); + fe_reg_w32(ring->tx_ring_size, FE_REG_TX_MAX_CNT0); + fe_reg_w32(0, FE_REG_TX_CTX_IDX0); + fe_reg_w32(FE_PST_DTX_IDX0, FE_REG_PDMA_RST_CFG); + + return 0; + +no_tx_mem: + return -ENOMEM; +} + +static int fe_init_dma(struct fe_priv *priv) +{ + int err; + + err = fe_alloc_tx(priv); + if (err) + return err; + + err = fe_alloc_rx(priv); + if (err) + return err; + + return 0; +} + +static void fe_free_dma(struct fe_priv *priv) +{ + fe_clean_tx(priv); + fe_clean_rx(priv); +} + +void fe_stats_update(struct fe_priv *priv) +{ + struct fe_hw_stats *hwstats = priv->hw_stats; + unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE]; + u64 stats; + + u64_stats_update_begin(&hwstats->syncp); + + if (IS_ENABLED(CONFIG_SOC_MT7621)) { + hwstats->rx_bytes += fe_r32(base); + stats = fe_r32(base + 0x04); + if (stats) + hwstats->rx_bytes += (stats << 32); + hwstats->rx_packets += fe_r32(base + 0x08); + hwstats->rx_overflow += fe_r32(base + 0x10); + hwstats->rx_fcs_errors += fe_r32(base + 0x14); + hwstats->rx_short_errors += fe_r32(base + 0x18); + hwstats->rx_long_errors += fe_r32(base + 0x1c); + hwstats->rx_checksum_errors += fe_r32(base + 0x20); + hwstats->rx_flow_control_packets += fe_r32(base + 0x24); + hwstats->tx_skip += fe_r32(base + 0x28); + hwstats->tx_collisions += fe_r32(base + 0x2c); + hwstats->tx_bytes += fe_r32(base + 0x30); + stats = fe_r32(base + 0x34); + if (stats) + hwstats->tx_bytes += (stats << 32); + hwstats->tx_packets += fe_r32(base + 0x38); + } else { + hwstats->tx_bytes += fe_r32(base); + hwstats->tx_packets += fe_r32(base + 0x04); + hwstats->tx_skip += fe_r32(base + 0x08); + hwstats->tx_collisions += fe_r32(base + 0x0c); + hwstats->rx_bytes += fe_r32(base + 0x20); + hwstats->rx_packets += fe_r32(base + 0x24); + hwstats->rx_overflow += fe_r32(base + 0x28); + hwstats->rx_fcs_errors += fe_r32(base + 0x2c); + hwstats->rx_short_errors += fe_r32(base + 0x30); + hwstats->rx_long_errors += fe_r32(base + 0x34); + hwstats->rx_checksum_errors += fe_r32(base + 0x38); + hwstats->rx_flow_control_packets += fe_r32(base + 0x3c); + } + + u64_stats_update_end(&hwstats->syncp); +} + +static void fe_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) +{ + struct fe_priv *priv = netdev_priv(dev); + struct fe_hw_stats *hwstats = priv->hw_stats; + unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE]; + unsigned int start; + + if (!base) { + netdev_stats_to_stats64(storage, &dev->stats); + return; + } + + if (netif_running(dev) && netif_device_present(dev)) { + if (spin_trylock(&hwstats->stats_lock)) { + fe_stats_update(priv); + spin_unlock(&hwstats->stats_lock); + } + } + + do { + start = u64_stats_fetch_begin_irq(&hwstats->syncp); + storage->rx_packets = hwstats->rx_packets; + storage->tx_packets = hwstats->tx_packets; + storage->rx_bytes = hwstats->rx_bytes; + storage->tx_bytes = hwstats->tx_bytes; + storage->collisions = hwstats->tx_collisions; + storage->rx_length_errors = hwstats->rx_short_errors + + hwstats->rx_long_errors; + storage->rx_over_errors = hwstats->rx_overflow; + storage->rx_crc_errors = hwstats->rx_fcs_errors; + storage->rx_errors = hwstats->rx_checksum_errors; + storage->tx_aborted_errors = hwstats->tx_skip; + } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); + + storage->tx_errors = priv->netdev->stats.tx_errors; + storage->rx_dropped = priv->netdev->stats.rx_dropped; + storage->tx_dropped = priv->netdev->stats.tx_dropped; +} + +static int fe_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct fe_priv *priv = netdev_priv(dev); + u32 idx = (vid & 0xf); + u32 vlan_cfg; + + if (!((fe_reg_table[FE_REG_FE_DMA_VID_BASE]) && + (dev->features & NETIF_F_HW_VLAN_CTAG_TX))) + return 0; + + if (test_bit(idx, &priv->vlan_map)) { + netdev_warn(dev, "disable tx vlan offload\n"); + dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX; + netdev_update_features(dev); + } else { + vlan_cfg = fe_r32(fe_reg_table[FE_REG_FE_DMA_VID_BASE] + + ((idx >> 1) << 2)); + if (idx & 0x1) { + vlan_cfg &= 0xffff; + vlan_cfg |= (vid << 16); + } else { + vlan_cfg &= 0xffff0000; + vlan_cfg |= vid; + } + fe_w32(vlan_cfg, fe_reg_table[FE_REG_FE_DMA_VID_BASE] + + ((idx >> 1) << 2)); + set_bit(idx, &priv->vlan_map); + } + + return 0; +} + +static int fe_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct fe_priv *priv = netdev_priv(dev); + u32 idx = (vid & 0xf); + + if (!((fe_reg_table[FE_REG_FE_DMA_VID_BASE]) && + (dev->features & NETIF_F_HW_VLAN_CTAG_TX))) + return 0; + + clear_bit(idx, &priv->vlan_map); + + return 0; +} + +static inline u32 fe_empty_txd(struct fe_tx_ring *ring) +{ + barrier(); + return (u32)(ring->tx_ring_size - + ((ring->tx_next_idx - ring->tx_free_idx) & + (ring->tx_ring_size - 1))); +} + +static int fe_tx_map_dma(struct sk_buff *skb, struct net_device *dev, + int tx_num, struct fe_tx_ring *ring) +{ + struct fe_priv *priv = netdev_priv(dev); + struct skb_frag_struct *frag; + struct fe_tx_dma txd, *ptxd; + struct fe_tx_buf *tx_buf; + dma_addr_t mapped_addr; + unsigned int nr_frags; + u32 def_txd4; + int i, j, k, frag_size, frag_map_size, offset; + + tx_buf = &ring->tx_buf[ring->tx_next_idx]; + memset(tx_buf, 0, sizeof(*tx_buf)); + memset(&txd, 0, sizeof(txd)); + nr_frags = skb_shinfo(skb)->nr_frags; + + /* init tx descriptor */ + if (priv->soc->tx_dma) + priv->soc->tx_dma(&txd); + else + txd.txd4 = TX_DMA_DESP4_DEF; + def_txd4 = txd.txd4; + + /* TX Checksum offload */ + if (skb->ip_summed == CHECKSUM_PARTIAL) + txd.txd4 |= TX_DMA_CHKSUM; + + /* VLAN header offload */ + if (skb_vlan_tag_present(skb)) { + u16 tag = skb_vlan_tag_get(skb); + + if (IS_ENABLED(CONFIG_SOC_MT7621)) + txd.txd4 |= TX_DMA_INS_VLAN_MT7621 | tag; + else + txd.txd4 |= TX_DMA_INS_VLAN | + ((tag >> VLAN_PRIO_SHIFT) << 4) | + (tag & 0xF); + } + + /* TSO: fill MSS info in tcp checksum field */ + if (skb_is_gso(skb)) { + if (skb_cow_head(skb, 0)) { + netif_warn(priv, tx_err, dev, + "GSO expand head fail.\n"); + goto err_out; + } + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + txd.txd4 |= TX_DMA_TSO; + tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); + } + } + + mapped_addr = dma_map_single(&dev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) + goto err_out; + txd.txd1 = mapped_addr; + txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb)); + + tx_buf->flags |= FE_TX_FLAGS_SINGLE0; + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); + + /* TX SG offload */ + j = ring->tx_next_idx; + k = 0; + for (i = 0; i < nr_frags; i++) { + offset = 0; + frag = &skb_shinfo(skb)->frags[i]; + frag_size = skb_frag_size(frag); + + while (frag_size > 0) { + frag_map_size = min(frag_size, TX_DMA_BUF_LEN); + mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, + frag_map_size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) + goto err_dma; + + if (k & 0x1) { + j = NEXT_TX_DESP_IDX(j); + txd.txd1 = mapped_addr; + txd.txd2 = TX_DMA_PLEN0(frag_map_size); + txd.txd4 = def_txd4; + + tx_buf = &ring->tx_buf[j]; + memset(tx_buf, 0, sizeof(*tx_buf)); + + tx_buf->flags |= FE_TX_FLAGS_PAGE0; + dma_unmap_addr_set(tx_buf, dma_addr0, + mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, + frag_map_size); + } else { + txd.txd3 = mapped_addr; + txd.txd2 |= TX_DMA_PLEN1(frag_map_size); + + tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC; + tx_buf->flags |= FE_TX_FLAGS_PAGE1; + dma_unmap_addr_set(tx_buf, dma_addr1, + mapped_addr); + dma_unmap_len_set(tx_buf, dma_len1, + frag_map_size); + + if (!((i == (nr_frags - 1)) && + (frag_map_size == frag_size))) { + fe_set_txd(&txd, &ring->tx_dma[j]); + memset(&txd, 0, sizeof(txd)); + } + } + frag_size -= frag_map_size; + offset += frag_map_size; + k++; + } + } + + /* set last segment */ + if (k & 0x1) + txd.txd2 |= TX_DMA_LS1; + else + txd.txd2 |= TX_DMA_LS0; + fe_set_txd(&txd, &ring->tx_dma[j]); + + /* store skb to cleanup */ + tx_buf->skb = skb; + + netdev_sent_queue(dev, skb->len); + skb_tx_timestamp(skb); + + ring->tx_next_idx = NEXT_TX_DESP_IDX(j); + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + if (unlikely(fe_empty_txd(ring) <= ring->tx_thresh)) { + netif_stop_queue(dev); + smp_mb(); + if (unlikely(fe_empty_txd(ring) > ring->tx_thresh)) + netif_wake_queue(dev); + } + + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) + fe_reg_w32(ring->tx_next_idx, FE_REG_TX_CTX_IDX0); + + return 0; + +err_dma: + j = ring->tx_next_idx; + for (i = 0; i < tx_num; i++) { + ptxd = &ring->tx_dma[j]; + tx_buf = &ring->tx_buf[j]; + + /* unmap dma */ + fe_txd_unmap(&dev->dev, tx_buf); + + ptxd->txd2 = TX_DMA_DESP2_DEF; + j = NEXT_TX_DESP_IDX(j); + } + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + +err_out: + return -1; +} + +static inline int fe_skb_padto(struct sk_buff *skb, struct fe_priv *priv) +{ + unsigned int len; + int ret; + + ret = 0; + if (unlikely(skb->len < VLAN_ETH_ZLEN)) { + if ((priv->flags & FE_FLAG_PADDING_64B) && + !(priv->flags & FE_FLAG_PADDING_BUG)) + return ret; + + if (skb_vlan_tag_present(skb)) + len = ETH_ZLEN; + else if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) + len = VLAN_ETH_ZLEN; + else if (!(priv->flags & FE_FLAG_PADDING_64B)) + len = ETH_ZLEN; + else + return ret; + + if (skb->len < len) { + ret = skb_pad(skb, len - skb->len); + if (ret < 0) + return ret; + skb->len = len; + skb_set_tail_pointer(skb, len); + } + } + + return ret; +} + +static inline int fe_cal_txd_req(struct sk_buff *skb) +{ + int i, nfrags; + struct skb_frag_struct *frag; + + nfrags = 1; + if (skb_is_gso(skb)) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN); + } + } else { + nfrags += skb_shinfo(skb)->nr_frags; + } + + return DIV_ROUND_UP(nfrags, 2); +} + +static int fe_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct fe_priv *priv = netdev_priv(dev); + struct fe_tx_ring *ring = &priv->tx_ring; + struct net_device_stats *stats = &dev->stats; + int tx_num; + int len = skb->len; + + if (fe_skb_padto(skb, priv)) { + netif_warn(priv, tx_err, dev, "tx padding failed!\n"); + return NETDEV_TX_OK; + } + + tx_num = fe_cal_txd_req(skb); + if (unlikely(fe_empty_txd(ring) <= tx_num)) { + netif_stop_queue(dev); + netif_err(priv, tx_queued, dev, + "Tx Ring full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + if (fe_tx_map_dma(skb, dev, tx_num, ring) < 0) { + stats->tx_dropped++; + } else { + stats->tx_packets++; + stats->tx_bytes += len; + } + + return NETDEV_TX_OK; +} + +static int fe_poll_rx(struct napi_struct *napi, int budget, + struct fe_priv *priv, u32 rx_intr) +{ + struct net_device *netdev = priv->netdev; + struct net_device_stats *stats = &netdev->stats; + struct fe_soc_data *soc = priv->soc; + struct fe_rx_ring *ring = &priv->rx_ring; + int idx = ring->rx_calc_idx; + u32 checksum_bit; + struct sk_buff *skb; + u8 *data, *new_data; + struct fe_rx_dma *rxd, trxd; + int done = 0, pad; + + if (netdev->features & NETIF_F_RXCSUM) + checksum_bit = soc->checksum_bit; + else + checksum_bit = 0; + + if (priv->flags & FE_FLAG_RX_2B_OFFSET) + pad = 0; + else + pad = NET_IP_ALIGN; + + while (done < budget) { + unsigned int pktlen; + dma_addr_t dma_addr; + + idx = NEXT_RX_DESP_IDX(idx); + rxd = &ring->rx_dma[idx]; + data = ring->rx_data[idx]; + + fe_get_rxd(&trxd, rxd); + if (!(trxd.rxd2 & RX_DMA_DONE)) + break; + + /* alloc new buffer */ + new_data = netdev_alloc_frag(ring->frag_size); + if (unlikely(!new_data)) { + stats->rx_dropped++; + goto release_desc; + } + dma_addr = dma_map_single(&netdev->dev, + new_data + NET_SKB_PAD + pad, + ring->rx_buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { + put_page(virt_to_head_page(new_data)); + goto release_desc; + } + + /* receive data */ + skb = build_skb(data, ring->frag_size); + if (unlikely(!skb)) { + put_page(virt_to_head_page(new_data)); + goto release_desc; + } + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + + dma_unmap_single(&netdev->dev, trxd.rxd1, + ring->rx_buf_size, DMA_FROM_DEVICE); + pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); + skb->dev = netdev; + skb_put(skb, pktlen); + if (trxd.rxd4 & checksum_bit) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, netdev); + + stats->rx_packets++; + stats->rx_bytes += pktlen; + + napi_gro_receive(napi, skb); + + ring->rx_data[idx] = new_data; + rxd->rxd1 = (unsigned int)dma_addr; + +release_desc: + if (priv->flags & FE_FLAG_RX_SG_DMA) + rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size); + else + rxd->rxd2 = RX_DMA_LSO; + + ring->rx_calc_idx = idx; + /* make sure that all changes to the dma ring are flushed before + * we continue + */ + wmb(); + fe_reg_w32(ring->rx_calc_idx, FE_REG_RX_CALC_IDX0); + done++; + } + + if (done < budget) + fe_reg_w32(rx_intr, FE_REG_FE_INT_STATUS); + + return done; +} + +static int fe_poll_tx(struct fe_priv *priv, int budget, u32 tx_intr, + int *tx_again) +{ + struct net_device *netdev = priv->netdev; + struct device *dev = &netdev->dev; + unsigned int bytes_compl = 0; + struct sk_buff *skb; + struct fe_tx_buf *tx_buf; + int done = 0; + u32 idx, hwidx; + struct fe_tx_ring *ring = &priv->tx_ring; + + idx = ring->tx_free_idx; + hwidx = fe_reg_r32(FE_REG_TX_DTX_IDX0); + + while ((idx != hwidx) && budget) { + tx_buf = &ring->tx_buf[idx]; + skb = tx_buf->skb; + + if (!skb) + break; + + if (skb != (struct sk_buff *)DMA_DUMMY_DESC) { + bytes_compl += skb->len; + done++; + budget--; + } + fe_txd_unmap(dev, tx_buf); + idx = NEXT_TX_DESP_IDX(idx); + } + ring->tx_free_idx = idx; + + if (idx == hwidx) { + /* read hw index again make sure no new tx packet */ + hwidx = fe_reg_r32(FE_REG_TX_DTX_IDX0); + if (idx == hwidx) + fe_reg_w32(tx_intr, FE_REG_FE_INT_STATUS); + else + *tx_again = 1; + } else { + *tx_again = 1; + } + + if (done) { + netdev_completed_queue(netdev, done, bytes_compl); + smp_mb(); + if (unlikely(netif_queue_stopped(netdev) && + (fe_empty_txd(ring) > ring->tx_thresh))) + netif_wake_queue(netdev); + } + + return done; +} + +static int fe_poll(struct napi_struct *napi, int budget) +{ + struct fe_priv *priv = container_of(napi, struct fe_priv, rx_napi); + struct fe_hw_stats *hwstat = priv->hw_stats; + int tx_done, rx_done, tx_again; + u32 status, fe_status, status_reg, mask; + u32 tx_intr, rx_intr, status_intr; + + status = fe_reg_r32(FE_REG_FE_INT_STATUS); + fe_status = status; + tx_intr = priv->soc->tx_int; + rx_intr = priv->soc->rx_int; + status_intr = priv->soc->status_int; + tx_done = 0; + rx_done = 0; + tx_again = 0; + + if (fe_reg_table[FE_REG_FE_INT_STATUS2]) { + fe_status = fe_reg_r32(FE_REG_FE_INT_STATUS2); + status_reg = FE_REG_FE_INT_STATUS2; + } else { + status_reg = FE_REG_FE_INT_STATUS; + } + + if (status & tx_intr) + tx_done = fe_poll_tx(priv, budget, tx_intr, &tx_again); + + if (status & rx_intr) + rx_done = fe_poll_rx(napi, budget, priv, rx_intr); + + if (unlikely(fe_status & status_intr)) { + if (hwstat && spin_trylock(&hwstat->stats_lock)) { + fe_stats_update(priv); + spin_unlock(&hwstat->stats_lock); + } + fe_reg_w32(status_intr, status_reg); + } + + if (unlikely(netif_msg_intr(priv))) { + mask = fe_reg_r32(FE_REG_FE_INT_ENABLE); + netdev_info(priv->netdev, + "done tx %d, rx %d, intr 0x%08x/0x%x\n", + tx_done, rx_done, status, mask); + } + + if (!tx_again && (rx_done < budget)) { + status = fe_reg_r32(FE_REG_FE_INT_STATUS); + if (status & (tx_intr | rx_intr)) { + /* let napi poll again */ + rx_done = budget; + goto poll_again; + } + + napi_complete_done(napi, rx_done); + fe_int_enable(tx_intr | rx_intr); + } else { + rx_done = budget; + } + +poll_again: + return rx_done; +} + +static void fe_tx_timeout(struct net_device *dev) +{ + struct fe_priv *priv = netdev_priv(dev); + struct fe_tx_ring *ring = &priv->tx_ring; + + priv->netdev->stats.tx_errors++; + netif_err(priv, tx_err, dev, + "transmit timed out\n"); + netif_info(priv, drv, dev, "dma_cfg:%08x\n", + fe_reg_r32(FE_REG_PDMA_GLO_CFG)); + netif_info(priv, drv, dev, "tx_ring=%d, " + "base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n", + 0, fe_reg_r32(FE_REG_TX_BASE_PTR0), + fe_reg_r32(FE_REG_TX_MAX_CNT0), + fe_reg_r32(FE_REG_TX_CTX_IDX0), + fe_reg_r32(FE_REG_TX_DTX_IDX0), + ring->tx_free_idx, + ring->tx_next_idx); + netif_info(priv, drv, dev, + "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n", + 0, fe_reg_r32(FE_REG_RX_BASE_PTR0), + fe_reg_r32(FE_REG_RX_MAX_CNT0), + fe_reg_r32(FE_REG_RX_CALC_IDX0), + fe_reg_r32(FE_REG_RX_DRX_IDX0)); + + if (!test_and_set_bit(FE_FLAG_RESET_PENDING, priv->pending_flags)) + schedule_work(&priv->pending_work); +} + +static irqreturn_t fe_handle_irq(int irq, void *dev) +{ + struct fe_priv *priv = netdev_priv(dev); + u32 status, int_mask; + + status = fe_reg_r32(FE_REG_FE_INT_STATUS); + + if (unlikely(!status)) + return IRQ_NONE; + + int_mask = (priv->soc->rx_int | priv->soc->tx_int); + if (likely(status & int_mask)) { + if (likely(napi_schedule_prep(&priv->rx_napi))) { + fe_int_disable(int_mask); + __napi_schedule(&priv->rx_napi); + } + } else { + fe_reg_w32(status, FE_REG_FE_INT_STATUS); + } + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void fe_poll_controller(struct net_device *dev) +{ + struct fe_priv *priv = netdev_priv(dev); + u32 int_mask = priv->soc->tx_int | priv->soc->rx_int; + + fe_int_disable(int_mask); + fe_handle_irq(dev->irq, dev); + fe_int_enable(int_mask); +} +#endif + +int fe_set_clock_cycle(struct fe_priv *priv) +{ + unsigned long sysclk = priv->sysclk; + + sysclk /= FE_US_CYC_CNT_DIVISOR; + sysclk <<= FE_US_CYC_CNT_SHIFT; + + fe_w32((fe_r32(FE_FE_GLO_CFG) & + ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) | + sysclk, + FE_FE_GLO_CFG); + return 0; +} + +void fe_fwd_config(struct fe_priv *priv) +{ + u32 fwd_cfg; + + fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG); + + /* disable jumbo frame */ + if (priv->flags & FE_FLAG_JUMBO_FRAME) + fwd_cfg &= ~FE_GDM1_JMB_EN; + + /* set unicast/multicast/broadcast frame to cpu */ + fwd_cfg &= ~0xffff; + + fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG); +} + +static void fe_rxcsum_config(bool enable) +{ + if (enable) + fe_w32(fe_r32(FE_GDMA1_FWD_CFG) | (FE_GDM1_ICS_EN | + FE_GDM1_TCS_EN | FE_GDM1_UCS_EN), + FE_GDMA1_FWD_CFG); + else + fe_w32(fe_r32(FE_GDMA1_FWD_CFG) & ~(FE_GDM1_ICS_EN | + FE_GDM1_TCS_EN | FE_GDM1_UCS_EN), + FE_GDMA1_FWD_CFG); +} + +static void fe_txcsum_config(bool enable) +{ + if (enable) + fe_w32(fe_r32(FE_CDMA_CSG_CFG) | (FE_ICS_GEN_EN | + FE_TCS_GEN_EN | FE_UCS_GEN_EN), + FE_CDMA_CSG_CFG); + else + fe_w32(fe_r32(FE_CDMA_CSG_CFG) & ~(FE_ICS_GEN_EN | + FE_TCS_GEN_EN | FE_UCS_GEN_EN), + FE_CDMA_CSG_CFG); +} + +void fe_csum_config(struct fe_priv *priv) +{ + struct net_device *dev = priv_netdev(priv); + + fe_txcsum_config((dev->features & NETIF_F_IP_CSUM)); + fe_rxcsum_config((dev->features & NETIF_F_RXCSUM)); +} + +static int fe_hw_init(struct net_device *dev) +{ + struct fe_priv *priv = netdev_priv(dev); + int i, err; + + err = devm_request_irq(priv->device, dev->irq, fe_handle_irq, 0, + dev_name(priv->device), dev); + if (err) + return err; + + if (priv->soc->set_mac) + priv->soc->set_mac(priv, dev->dev_addr); + else + fe_hw_set_macaddr(priv, dev->dev_addr); + + /* disable delay interrupt */ + fe_reg_w32(0, FE_REG_DLY_INT_CFG); + + fe_int_disable(priv->soc->tx_int | priv->soc->rx_int); + + /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc */ + if (fe_reg_table[FE_REG_FE_DMA_VID_BASE]) + for (i = 0; i < 16; i += 2) + fe_w32(((i + 1) << 16) + i, + fe_reg_table[FE_REG_FE_DMA_VID_BASE] + + (i * 2)); + + if (priv->soc->fwd_config(priv)) + netdev_err(dev, "unable to get clock\n"); + + if (fe_reg_table[FE_REG_FE_RST_GL]) { + fe_reg_w32(1, FE_REG_FE_RST_GL); + fe_reg_w32(0, FE_REG_FE_RST_GL); + } + + return 0; +} + +static int fe_open(struct net_device *dev) +{ + struct fe_priv *priv = netdev_priv(dev); + unsigned long flags; + u32 val; + int err; + + err = fe_init_dma(priv); + if (err) { + fe_free_dma(priv); + return err; + } + + spin_lock_irqsave(&priv->page_lock, flags); + + val = FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN; + if (priv->flags & FE_FLAG_RX_2B_OFFSET) + val |= FE_RX_2B_OFFSET; + val |= priv->soc->pdma_glo_cfg; + fe_reg_w32(val, FE_REG_PDMA_GLO_CFG); + + spin_unlock_irqrestore(&priv->page_lock, flags); + + if (priv->phy) + priv->phy->start(priv); + + if (priv->soc->has_carrier && priv->soc->has_carrier(priv)) + netif_carrier_on(dev); + + napi_enable(&priv->rx_napi); + fe_int_enable(priv->soc->tx_int | priv->soc->rx_int); + netif_start_queue(dev); + + return 0; +} + +static int fe_stop(struct net_device *dev) +{ + struct fe_priv *priv = netdev_priv(dev); + unsigned long flags; + int i; + + netif_tx_disable(dev); + fe_int_disable(priv->soc->tx_int | priv->soc->rx_int); + napi_disable(&priv->rx_napi); + + if (priv->phy) + priv->phy->stop(priv); + + spin_lock_irqsave(&priv->page_lock, flags); + + fe_reg_w32(fe_reg_r32(FE_REG_PDMA_GLO_CFG) & + ~(FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN), + FE_REG_PDMA_GLO_CFG); + spin_unlock_irqrestore(&priv->page_lock, flags); + + /* wait dma stop */ + for (i = 0; i < 10; i++) { + if (fe_reg_r32(FE_REG_PDMA_GLO_CFG) & + (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)) { + msleep(20); + continue; + } + break; + } + + fe_free_dma(priv); + + return 0; +} + +static int __init fe_init(struct net_device *dev) +{ + struct fe_priv *priv = netdev_priv(dev); + struct device_node *port; + const char *mac_addr; + int err; + + priv->soc->reset_fe(); + + if (priv->soc->switch_init) + if (priv->soc->switch_init(priv)) { + netdev_err(dev, "failed to initialize switch core\n"); + return -ENODEV; + } + + mac_addr = of_get_mac_address(priv->device->of_node); + if (mac_addr) + ether_addr_copy(dev->dev_addr, mac_addr); + + /* If the mac address is invalid, use random mac address */ + if (!is_valid_ether_addr(dev->dev_addr)) { + random_ether_addr(dev->dev_addr); + dev_err(priv->device, "generated random MAC address %pM\n", + dev->dev_addr); + } + + err = fe_mdio_init(priv); + if (err) + return err; + + if (priv->soc->port_init) + for_each_child_of_node(priv->device->of_node, port) + if (of_device_is_compatible(port, "mediatek,eth-port") && + of_device_is_available(port)) + priv->soc->port_init(priv, port); + + if (priv->phy) { + err = priv->phy->connect(priv); + if (err) + goto err_phy_disconnect; + } + + err = fe_hw_init(dev); + if (err) + goto err_phy_disconnect; + + if ((priv->flags & FE_FLAG_HAS_SWITCH) && priv->soc->switch_config) + priv->soc->switch_config(priv); + + return 0; + +err_phy_disconnect: + if (priv->phy) + priv->phy->disconnect(priv); + fe_mdio_cleanup(priv); + + return err; +} + +static void fe_uninit(struct net_device *dev) +{ + struct fe_priv *priv = netdev_priv(dev); + + if (priv->phy) + priv->phy->disconnect(priv); + fe_mdio_cleanup(priv); + + fe_reg_w32(0, FE_REG_FE_INT_ENABLE); + free_irq(dev->irq, dev); +} + +static int fe_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct fe_priv *priv = netdev_priv(dev); + + if (!priv->phy_dev) + return -ENODEV; + + switch (cmd) { + case SIOCETHTOOL: + return phy_ethtool_ioctl(priv->phy_dev, + (void *) ifr->ifr_data); + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return phy_mii_ioctl(priv->phy_dev, ifr, cmd); + default: + break; + } + + return -EOPNOTSUPP; +} + +static int fe_change_mtu(struct net_device *dev, int new_mtu) +{ + struct fe_priv *priv = netdev_priv(dev); + int frag_size, old_mtu; + u32 fwd_cfg; + + if (!(priv->flags & FE_FLAG_JUMBO_FRAME)) + return eth_change_mtu(dev, new_mtu); + + if (IS_ENABLED(CONFIG_SOC_MT7621)) + if (new_mtu > 2048) + return -EINVAL; + + frag_size = fe_max_frag_size(new_mtu); + if (new_mtu < 68 || frag_size > PAGE_SIZE) + return -EINVAL; + + old_mtu = dev->mtu; + dev->mtu = new_mtu; + + /* return early if the buffer sizes will not change */ + if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) + return 0; + if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN) + return 0; + + if (new_mtu <= ETH_DATA_LEN) + priv->rx_ring.frag_size = fe_max_frag_size(ETH_DATA_LEN); + else + priv->rx_ring.frag_size = PAGE_SIZE; + priv->rx_ring.rx_buf_size = fe_max_buf_size(priv->rx_ring.frag_size); + + if (!netif_running(dev)) + return 0; + + fe_stop(dev); + if (!IS_ENABLED(CONFIG_SOC_MT7621)) { + fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG); + if (new_mtu <= ETH_DATA_LEN) { + fwd_cfg &= ~FE_GDM1_JMB_EN; + } else { + fwd_cfg &= ~(FE_GDM1_JMB_LEN_MASK << FE_GDM1_JMB_LEN_SHIFT); + fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) << + FE_GDM1_JMB_LEN_SHIFT) | FE_GDM1_JMB_EN; + } + fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG); + } + + return fe_open(dev); +} + +static const struct net_device_ops fe_netdev_ops = { + .ndo_init = fe_init, + .ndo_uninit = fe_uninit, + .ndo_open = fe_open, + .ndo_stop = fe_stop, + .ndo_start_xmit = fe_start_xmit, + .ndo_set_mac_address = fe_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = fe_do_ioctl, + .ndo_change_mtu = fe_change_mtu, + .ndo_tx_timeout = fe_tx_timeout, + .ndo_get_stats64 = fe_get_stats64, + .ndo_vlan_rx_add_vid = fe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = fe_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = fe_poll_controller, +#endif +}; + +static void fe_reset_pending(struct fe_priv *priv) +{ + struct net_device *dev = priv->netdev; + int err; + + rtnl_lock(); + fe_stop(dev); + + err = fe_open(dev); + if (err) { + netif_alert(priv, ifup, dev, + "Driver up/down cycle failed, closing device.\n"); + dev_close(dev); + } + rtnl_unlock(); +} + +static const struct fe_work_t fe_work[] = { + {FE_FLAG_RESET_PENDING, fe_reset_pending}, +}; + +static void fe_pending_work(struct work_struct *work) +{ + struct fe_priv *priv = container_of(work, struct fe_priv, pending_work); + int i; + bool pending; + + for (i = 0; i < ARRAY_SIZE(fe_work); i++) { + pending = test_and_clear_bit(fe_work[i].bitnr, + priv->pending_flags); + if (pending) + fe_work[i].action(priv); + } +} + +static int fe_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + const struct of_device_id *match; + struct fe_soc_data *soc; + struct net_device *netdev; + struct fe_priv *priv; + struct clk *sysclk; + int err, napi_weight; + + device_reset(&pdev->dev); + + match = of_match_device(of_fe_match, &pdev->dev); + soc = (struct fe_soc_data *)match->data; + + if (soc->reg_table) + fe_reg_table = soc->reg_table; + else + soc->reg_table = fe_reg_table; + + fe_base = devm_ioremap_resource(&pdev->dev, res); + if (!fe_base) { + err = -EADDRNOTAVAIL; + goto err_out; + } + + netdev = alloc_etherdev(sizeof(*priv)); + if (!netdev) { + dev_err(&pdev->dev, "alloc_etherdev failed\n"); + err = -ENOMEM; + goto err_iounmap; + } + + SET_NETDEV_DEV(netdev, &pdev->dev); + netdev->netdev_ops = &fe_netdev_ops; + netdev->base_addr = (unsigned long)fe_base; + + netdev->irq = platform_get_irq(pdev, 0); + if (netdev->irq < 0) { + dev_err(&pdev->dev, "no IRQ resource found\n"); + err = -ENXIO; + goto err_free_dev; + } + + if (soc->init_data) + soc->init_data(soc, netdev); + netdev->vlan_features = netdev->hw_features & ~NETIF_F_HW_VLAN_CTAG_TX; + netdev->features |= netdev->hw_features; + + /* fake rx vlan filter func. to support tx vlan offload func */ + if (fe_reg_table[FE_REG_FE_DMA_VID_BASE]) + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + priv = netdev_priv(netdev); + spin_lock_init(&priv->page_lock); + if (fe_reg_table[FE_REG_FE_COUNTER_BASE]) { + priv->hw_stats = kzalloc(sizeof(*priv->hw_stats), GFP_KERNEL); + if (!priv->hw_stats) { + err = -ENOMEM; + goto err_free_dev; + } + spin_lock_init(&priv->hw_stats->stats_lock); + } + + sysclk = devm_clk_get(&pdev->dev, NULL); + if (!IS_ERR(sysclk)) { + priv->sysclk = clk_get_rate(sysclk); + } else if ((priv->flags & FE_FLAG_CALIBRATE_CLK)) { + dev_err(&pdev->dev, "this soc needs a clk for calibration\n"); + err = -ENXIO; + goto err_free_dev; + } + + priv->switch_np = of_parse_phandle(pdev->dev.of_node, "mediatek,switch", 0); + if ((priv->flags & FE_FLAG_HAS_SWITCH) && !priv->switch_np) { + dev_err(&pdev->dev, "failed to read switch phandle\n"); + err = -ENODEV; + goto err_free_dev; + } + + priv->netdev = netdev; + priv->device = &pdev->dev; + priv->soc = soc; + priv->msg_enable = netif_msg_init(fe_msg_level, FE_DEFAULT_MSG_ENABLE); + priv->rx_ring.frag_size = fe_max_frag_size(ETH_DATA_LEN); + priv->rx_ring.rx_buf_size = fe_max_buf_size(priv->rx_ring.frag_size); + priv->tx_ring.tx_ring_size = NUM_DMA_DESC; + priv->rx_ring.rx_ring_size = NUM_DMA_DESC; + INIT_WORK(&priv->pending_work, fe_pending_work); + + napi_weight = 16; + if (priv->flags & FE_FLAG_NAPI_WEIGHT) { + napi_weight *= 4; + priv->tx_ring.tx_ring_size *= 4; + priv->rx_ring.rx_ring_size *= 4; + } + netif_napi_add(netdev, &priv->rx_napi, fe_poll, napi_weight); + fe_set_ethtool_ops(netdev); + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "error bringing up device\n"); + goto err_free_dev; + } + + platform_set_drvdata(pdev, netdev); + + netif_info(priv, probe, netdev, "mediatek frame engine at 0x%08lx, irq %d\n", + netdev->base_addr, netdev->irq); + + return 0; + +err_free_dev: + free_netdev(netdev); +err_iounmap: + devm_iounmap(&pdev->dev, fe_base); +err_out: + return err; +} + +static int fe_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct fe_priv *priv = netdev_priv(dev); + + netif_napi_del(&priv->rx_napi); + kfree(priv->hw_stats); + + cancel_work_sync(&priv->pending_work); + + unregister_netdev(dev); + free_netdev(dev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver fe_driver = { + .probe = fe_probe, + .remove = fe_remove, + .driver = { + .name = "mtk_soc_eth", + .owner = THIS_MODULE, + .of_match_table = of_fe_match, + }, +}; + +module_platform_driver(fe_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); +MODULE_DESCRIPTION("Ethernet driver for Ralink SoC"); +MODULE_VERSION(MTK_FE_DRV_VERSION); diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mtk_eth_soc.h b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mtk_eth_soc.h new file mode 100644 index 0000000000..6a946c7668 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/mtk_eth_soc.h @@ -0,0 +1,523 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#ifndef FE_ETH_H +#define FE_ETH_H + +#include <linux/mii.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/dma-mapping.h> +#include <linux/phy.h> +#include <linux/ethtool.h> +#include <linux/version.h> + +enum fe_reg { + FE_REG_PDMA_GLO_CFG = 0, + FE_REG_PDMA_RST_CFG, + FE_REG_DLY_INT_CFG, + FE_REG_TX_BASE_PTR0, + FE_REG_TX_MAX_CNT0, + FE_REG_TX_CTX_IDX0, + FE_REG_TX_DTX_IDX0, + FE_REG_RX_BASE_PTR0, + FE_REG_RX_MAX_CNT0, + FE_REG_RX_CALC_IDX0, + FE_REG_RX_DRX_IDX0, + FE_REG_FE_INT_ENABLE, + FE_REG_FE_INT_STATUS, + FE_REG_FE_DMA_VID_BASE, + FE_REG_FE_COUNTER_BASE, + FE_REG_FE_RST_GL, + FE_REG_FE_INT_STATUS2, + FE_REG_COUNT +}; + +enum fe_work_flag { + FE_FLAG_RESET_PENDING, + FE_FLAG_MAX +}; + +#define MTK_FE_DRV_VERSION "0.1.2" + +/* power of 2 to let NEXT_TX_DESP_IDX work */ +#define NUM_DMA_DESC BIT(10) +#define MAX_DMA_DESC 0xfff + +#define FE_DELAY_EN_INT 0x80 +#define FE_DELAY_MAX_INT 0x04 +#define FE_DELAY_MAX_TOUT 0x04 +#define FE_DELAY_TIME 20 +#define FE_DELAY_CHAN (((FE_DELAY_EN_INT | FE_DELAY_MAX_INT) << 8) | \ + FE_DELAY_MAX_TOUT) +#define FE_DELAY_INIT ((FE_DELAY_CHAN << 16) | FE_DELAY_CHAN) +#define FE_PSE_FQFC_CFG_INIT 0x80504000 +#define FE_PSE_FQFC_CFG_256Q 0xff908000 + +/* interrupt bits */ +#define FE_CNT_PPE_AF BIT(31) +#define FE_CNT_GDM_AF BIT(29) +#define FE_PSE_P2_FC BIT(26) +#define FE_PSE_BUF_DROP BIT(24) +#define FE_GDM_OTHER_DROP BIT(23) +#define FE_PSE_P1_FC BIT(22) +#define FE_PSE_P0_FC BIT(21) +#define FE_PSE_FQ_EMPTY BIT(20) +#define FE_GE1_STA_CHG BIT(18) +#define FE_TX_COHERENT BIT(17) +#define FE_RX_COHERENT BIT(16) +#define FE_TX_DONE_INT3 BIT(11) +#define FE_TX_DONE_INT2 BIT(10) +#define FE_TX_DONE_INT1 BIT(9) +#define FE_TX_DONE_INT0 BIT(8) +#define FE_RX_DONE_INT0 BIT(2) +#define FE_TX_DLY_INT BIT(1) +#define FE_RX_DLY_INT BIT(0) + +#define FE_RX_DONE_INT FE_RX_DONE_INT0 +#define FE_TX_DONE_INT (FE_TX_DONE_INT0 | FE_TX_DONE_INT1 | \ + FE_TX_DONE_INT2 | FE_TX_DONE_INT3) + +#define RT5350_RX_DLY_INT BIT(30) +#define RT5350_TX_DLY_INT BIT(28) +#define RT5350_RX_DONE_INT1 BIT(17) +#define RT5350_RX_DONE_INT0 BIT(16) +#define RT5350_TX_DONE_INT3 BIT(3) +#define RT5350_TX_DONE_INT2 BIT(2) +#define RT5350_TX_DONE_INT1 BIT(1) +#define RT5350_TX_DONE_INT0 BIT(0) + +#define RT5350_RX_DONE_INT (RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1) +#define RT5350_TX_DONE_INT (RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \ + RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3) + +/* registers */ +#define FE_FE_OFFSET 0x0000 +#define FE_GDMA_OFFSET 0x0020 +#define FE_PSE_OFFSET 0x0040 +#define FE_GDMA2_OFFSET 0x0060 +#define FE_CDMA_OFFSET 0x0080 +#define FE_DMA_VID0 0x00a8 +#define FE_PDMA_OFFSET 0x0100 +#define FE_PPE_OFFSET 0x0200 +#define FE_CMTABLE_OFFSET 0x0400 +#define FE_POLICYTABLE_OFFSET 0x1000 + +#define RT5350_PDMA_OFFSET 0x0800 +#define RT5350_SDM_OFFSET 0x0c00 + +#define FE_MDIO_ACCESS (FE_FE_OFFSET + 0x00) +#define FE_MDIO_CFG (FE_FE_OFFSET + 0x04) +#define FE_FE_GLO_CFG (FE_FE_OFFSET + 0x08) +#define FE_FE_RST_GL (FE_FE_OFFSET + 0x0C) +#define FE_FE_INT_STATUS (FE_FE_OFFSET + 0x10) +#define FE_FE_INT_ENABLE (FE_FE_OFFSET + 0x14) +#define FE_MDIO_CFG2 (FE_FE_OFFSET + 0x18) +#define FE_FOC_TS_T (FE_FE_OFFSET + 0x1C) + +#define FE_GDMA1_FWD_CFG (FE_GDMA_OFFSET + 0x00) +#define FE_GDMA1_SCH_CFG (FE_GDMA_OFFSET + 0x04) +#define FE_GDMA1_SHPR_CFG (FE_GDMA_OFFSET + 0x08) +#define FE_GDMA1_MAC_ADRL (FE_GDMA_OFFSET + 0x0C) +#define FE_GDMA1_MAC_ADRH (FE_GDMA_OFFSET + 0x10) + +#define FE_GDMA2_FWD_CFG (FE_GDMA2_OFFSET + 0x00) +#define FE_GDMA2_SCH_CFG (FE_GDMA2_OFFSET + 0x04) +#define FE_GDMA2_SHPR_CFG (FE_GDMA2_OFFSET + 0x08) +#define FE_GDMA2_MAC_ADRL (FE_GDMA2_OFFSET + 0x0C) +#define FE_GDMA2_MAC_ADRH (FE_GDMA2_OFFSET + 0x10) + +#define FE_PSE_FQ_CFG (FE_PSE_OFFSET + 0x00) +#define FE_CDMA_FC_CFG (FE_PSE_OFFSET + 0x04) +#define FE_GDMA1_FC_CFG (FE_PSE_OFFSET + 0x08) +#define FE_GDMA2_FC_CFG (FE_PSE_OFFSET + 0x0C) + +#define FE_CDMA_CSG_CFG (FE_CDMA_OFFSET + 0x00) +#define FE_CDMA_SCH_CFG (FE_CDMA_OFFSET + 0x04) + +#ifdef CONFIG_SOC_MT7621 +#define MT7620A_GDMA_OFFSET 0x0500 +#else +#define MT7620A_GDMA_OFFSET 0x0600 +#endif +#define MT7620A_GDMA1_FWD_CFG (MT7620A_GDMA_OFFSET + 0x00) +#define MT7620A_FE_GDMA1_SCH_CFG (MT7620A_GDMA_OFFSET + 0x04) +#define MT7620A_FE_GDMA1_SHPR_CFG (MT7620A_GDMA_OFFSET + 0x08) +#define MT7620A_FE_GDMA1_MAC_ADRL (MT7620A_GDMA_OFFSET + 0x0C) +#define MT7620A_FE_GDMA1_MAC_ADRH (MT7620A_GDMA_OFFSET + 0x10) + +#define RT5350_TX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x00) +#define RT5350_TX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x04) +#define RT5350_TX_CTX_IDX0 (RT5350_PDMA_OFFSET + 0x08) +#define RT5350_TX_DTX_IDX0 (RT5350_PDMA_OFFSET + 0x0C) +#define RT5350_TX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x10) +#define RT5350_TX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x14) +#define RT5350_TX_CTX_IDX1 (RT5350_PDMA_OFFSET + 0x18) +#define RT5350_TX_DTX_IDX1 (RT5350_PDMA_OFFSET + 0x1C) +#define RT5350_TX_BASE_PTR2 (RT5350_PDMA_OFFSET + 0x20) +#define RT5350_TX_MAX_CNT2 (RT5350_PDMA_OFFSET + 0x24) +#define RT5350_TX_CTX_IDX2 (RT5350_PDMA_OFFSET + 0x28) +#define RT5350_TX_DTX_IDX2 (RT5350_PDMA_OFFSET + 0x2C) +#define RT5350_TX_BASE_PTR3 (RT5350_PDMA_OFFSET + 0x30) +#define RT5350_TX_MAX_CNT3 (RT5350_PDMA_OFFSET + 0x34) +#define RT5350_TX_CTX_IDX3 (RT5350_PDMA_OFFSET + 0x38) +#define RT5350_TX_DTX_IDX3 (RT5350_PDMA_OFFSET + 0x3C) +#define RT5350_RX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x100) +#define RT5350_RX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x104) +#define RT5350_RX_CALC_IDX0 (RT5350_PDMA_OFFSET + 0x108) +#define RT5350_RX_DRX_IDX0 (RT5350_PDMA_OFFSET + 0x10C) +#define RT5350_RX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x110) +#define RT5350_RX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x114) +#define RT5350_RX_CALC_IDX1 (RT5350_PDMA_OFFSET + 0x118) +#define RT5350_RX_DRX_IDX1 (RT5350_PDMA_OFFSET + 0x11C) +#define RT5350_PDMA_GLO_CFG (RT5350_PDMA_OFFSET + 0x204) +#define RT5350_PDMA_RST_CFG (RT5350_PDMA_OFFSET + 0x208) +#define RT5350_DLY_INT_CFG (RT5350_PDMA_OFFSET + 0x20c) +#define RT5350_FE_INT_STATUS (RT5350_PDMA_OFFSET + 0x220) +#define RT5350_FE_INT_ENABLE (RT5350_PDMA_OFFSET + 0x228) +#define RT5350_PDMA_SCH_CFG (RT5350_PDMA_OFFSET + 0x280) + +#define FE_PDMA_GLO_CFG (FE_PDMA_OFFSET + 0x00) +#define FE_PDMA_RST_CFG (FE_PDMA_OFFSET + 0x04) +#define FE_PDMA_SCH_CFG (FE_PDMA_OFFSET + 0x08) +#define FE_DLY_INT_CFG (FE_PDMA_OFFSET + 0x0C) +#define FE_TX_BASE_PTR0 (FE_PDMA_OFFSET + 0x10) +#define FE_TX_MAX_CNT0 (FE_PDMA_OFFSET + 0x14) +#define FE_TX_CTX_IDX0 (FE_PDMA_OFFSET + 0x18) +#define FE_TX_DTX_IDX0 (FE_PDMA_OFFSET + 0x1C) +#define FE_TX_BASE_PTR1 (FE_PDMA_OFFSET + 0x20) +#define FE_TX_MAX_CNT1 (FE_PDMA_OFFSET + 0x24) +#define FE_TX_CTX_IDX1 (FE_PDMA_OFFSET + 0x28) +#define FE_TX_DTX_IDX1 (FE_PDMA_OFFSET + 0x2C) +#define FE_RX_BASE_PTR0 (FE_PDMA_OFFSET + 0x30) +#define FE_RX_MAX_CNT0 (FE_PDMA_OFFSET + 0x34) +#define FE_RX_CALC_IDX0 (FE_PDMA_OFFSET + 0x38) +#define FE_RX_DRX_IDX0 (FE_PDMA_OFFSET + 0x3C) +#define FE_TX_BASE_PTR2 (FE_PDMA_OFFSET + 0x40) +#define FE_TX_MAX_CNT2 (FE_PDMA_OFFSET + 0x44) +#define FE_TX_CTX_IDX2 (FE_PDMA_OFFSET + 0x48) +#define FE_TX_DTX_IDX2 (FE_PDMA_OFFSET + 0x4C) +#define FE_TX_BASE_PTR3 (FE_PDMA_OFFSET + 0x50) +#define FE_TX_MAX_CNT3 (FE_PDMA_OFFSET + 0x54) +#define FE_TX_CTX_IDX3 (FE_PDMA_OFFSET + 0x58) +#define FE_TX_DTX_IDX3 (FE_PDMA_OFFSET + 0x5C) +#define FE_RX_BASE_PTR1 (FE_PDMA_OFFSET + 0x60) +#define FE_RX_MAX_CNT1 (FE_PDMA_OFFSET + 0x64) +#define FE_RX_CALC_IDX1 (FE_PDMA_OFFSET + 0x68) +#define FE_RX_DRX_IDX1 (FE_PDMA_OFFSET + 0x6C) + +/* Switch DMA configuration */ +#define RT5350_SDM_CFG (RT5350_SDM_OFFSET + 0x00) +#define RT5350_SDM_RRING (RT5350_SDM_OFFSET + 0x04) +#define RT5350_SDM_TRING (RT5350_SDM_OFFSET + 0x08) +#define RT5350_SDM_MAC_ADRL (RT5350_SDM_OFFSET + 0x0C) +#define RT5350_SDM_MAC_ADRH (RT5350_SDM_OFFSET + 0x10) +#define RT5350_SDM_TPCNT (RT5350_SDM_OFFSET + 0x100) +#define RT5350_SDM_TBCNT (RT5350_SDM_OFFSET + 0x104) +#define RT5350_SDM_RPCNT (RT5350_SDM_OFFSET + 0x108) +#define RT5350_SDM_RBCNT (RT5350_SDM_OFFSET + 0x10C) +#define RT5350_SDM_CS_ERR (RT5350_SDM_OFFSET + 0x110) + +#define RT5350_SDM_ICS_EN BIT(16) +#define RT5350_SDM_TCS_EN BIT(17) +#define RT5350_SDM_UCS_EN BIT(18) + +/* MDIO_CFG register bits */ +#define FE_MDIO_CFG_AUTO_POLL_EN BIT(29) +#define FE_MDIO_CFG_GP1_BP_EN BIT(16) +#define FE_MDIO_CFG_GP1_FRC_EN BIT(15) +#define FE_MDIO_CFG_GP1_SPEED_10 (0 << 13) +#define FE_MDIO_CFG_GP1_SPEED_100 (1 << 13) +#define FE_MDIO_CFG_GP1_SPEED_1000 (2 << 13) +#define FE_MDIO_CFG_GP1_DUPLEX BIT(12) +#define FE_MDIO_CFG_GP1_FC_TX BIT(11) +#define FE_MDIO_CFG_GP1_FC_RX BIT(10) +#define FE_MDIO_CFG_GP1_LNK_DWN BIT(9) +#define FE_MDIO_CFG_GP1_AN_FAIL BIT(8) +#define FE_MDIO_CFG_MDC_CLK_DIV_1 (0 << 6) +#define FE_MDIO_CFG_MDC_CLK_DIV_2 (1 << 6) +#define FE_MDIO_CFG_MDC_CLK_DIV_4 (2 << 6) +#define FE_MDIO_CFG_MDC_CLK_DIV_8 (3 << 6) +#define FE_MDIO_CFG_TURBO_MII_FREQ BIT(5) +#define FE_MDIO_CFG_TURBO_MII_MODE BIT(4) +#define FE_MDIO_CFG_RX_CLK_SKEW_0 (0 << 2) +#define FE_MDIO_CFG_RX_CLK_SKEW_200 (1 << 2) +#define FE_MDIO_CFG_RX_CLK_SKEW_400 (2 << 2) +#define FE_MDIO_CFG_RX_CLK_SKEW_INV (3 << 2) +#define FE_MDIO_CFG_TX_CLK_SKEW_0 0 +#define FE_MDIO_CFG_TX_CLK_SKEW_200 1 +#define FE_MDIO_CFG_TX_CLK_SKEW_400 2 +#define FE_MDIO_CFG_TX_CLK_SKEW_INV 3 + +/* uni-cast port */ +#define FE_GDM1_JMB_LEN_MASK 0xf +#define FE_GDM1_JMB_LEN_SHIFT 28 +#define FE_GDM1_ICS_EN BIT(22) +#define FE_GDM1_TCS_EN BIT(21) +#define FE_GDM1_UCS_EN BIT(20) +#define FE_GDM1_JMB_EN BIT(19) +#define FE_GDM1_STRPCRC BIT(16) +#define FE_GDM1_UFRC_P_CPU (0 << 12) +#define FE_GDM1_UFRC_P_GDMA1 (1 << 12) +#define FE_GDM1_UFRC_P_PPE (6 << 12) + +/* checksums */ +#define FE_ICS_GEN_EN BIT(2) +#define FE_UCS_GEN_EN BIT(1) +#define FE_TCS_GEN_EN BIT(0) + +/* dma ring */ +#define FE_PST_DRX_IDX0 BIT(16) +#define FE_PST_DTX_IDX3 BIT(3) +#define FE_PST_DTX_IDX2 BIT(2) +#define FE_PST_DTX_IDX1 BIT(1) +#define FE_PST_DTX_IDX0 BIT(0) + +#define FE_RX_2B_OFFSET BIT(31) +#define FE_TX_WB_DDONE BIT(6) +#define FE_RX_DMA_BUSY BIT(3) +#define FE_TX_DMA_BUSY BIT(1) +#define FE_RX_DMA_EN BIT(2) +#define FE_TX_DMA_EN BIT(0) + +#define FE_PDMA_SIZE_4DWORDS (0 << 4) +#define FE_PDMA_SIZE_8DWORDS (1 << 4) +#define FE_PDMA_SIZE_16DWORDS (2 << 4) + +#define FE_US_CYC_CNT_MASK 0xff +#define FE_US_CYC_CNT_SHIFT 0x8 +#define FE_US_CYC_CNT_DIVISOR 1000000 + +/* rxd2 */ +#define RX_DMA_DONE BIT(31) +#define RX_DMA_LSO BIT(30) +#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) +#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff) +#define RX_DMA_TAG BIT(15) +/* rxd3 */ +#define RX_DMA_TPID(_x) (((_x) >> 16) & 0xffff) +#define RX_DMA_VID(_x) ((_x) & 0xffff) +/* rxd4 */ +#define RX_DMA_L4VALID BIT(30) + +struct fe_rx_dma { + unsigned int rxd1; + unsigned int rxd2; + unsigned int rxd3; + unsigned int rxd4; +} __packed __aligned(4); + +#define TX_DMA_BUF_LEN 0x3fff +#define TX_DMA_PLEN0_MASK (TX_DMA_BUF_LEN << 16) +#define TX_DMA_PLEN0(_x) (((_x) & TX_DMA_BUF_LEN) << 16) +#define TX_DMA_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN) +#define TX_DMA_GET_PLEN0(_x) (((_x) >> 16) & TX_DMA_BUF_LEN) +#define TX_DMA_GET_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN) +#define TX_DMA_LS1 BIT(14) +#define TX_DMA_LS0 BIT(30) +#define TX_DMA_DONE BIT(31) + +#define TX_DMA_INS_VLAN_MT7621 BIT(16) +#define TX_DMA_INS_VLAN BIT(7) +#define TX_DMA_INS_PPPOE BIT(12) +#define TX_DMA_QN(_x) ((_x) << 16) +#define TX_DMA_PN(_x) ((_x) << 24) +#define TX_DMA_QN_MASK TX_DMA_QN(0x7) +#define TX_DMA_PN_MASK TX_DMA_PN(0x7) +#define TX_DMA_UDF BIT(20) +#define TX_DMA_CHKSUM (0x7 << 29) +#define TX_DMA_TSO BIT(28) + +/* frame engine counters */ +#define FE_PPE_AC_BCNT0 (FE_CMTABLE_OFFSET + 0x00) +#define FE_GDMA1_TX_GBCNT (FE_CMTABLE_OFFSET + 0x300) +#define FE_GDMA2_TX_GBCNT (FE_GDMA1_TX_GBCNT + 0x40) + +/* phy device flags */ +#define FE_PHY_FLAG_PORT BIT(0) +#define FE_PHY_FLAG_ATTACH BIT(1) + +struct fe_tx_dma { + unsigned int txd1; + unsigned int txd2; + unsigned int txd3; + unsigned int txd4; +} __packed __aligned(4); + +struct fe_priv; + +struct fe_phy { + /* make sure that phy operations are atomic */ + spinlock_t lock; + + struct phy_device *phy[8]; + struct device_node *phy_node[8]; + const __be32 *phy_fixed[8]; + int duplex[8]; + int speed[8]; + int tx_fc[8]; + int rx_fc[8]; + int (*connect)(struct fe_priv *priv); + void (*disconnect)(struct fe_priv *priv); + void (*start)(struct fe_priv *priv); + void (*stop)(struct fe_priv *priv); +}; + +struct fe_soc_data { + const u16 *reg_table; + + void (*init_data)(struct fe_soc_data *data, struct net_device *netdev); + void (*reset_fe)(void); + void (*set_mac)(struct fe_priv *priv, unsigned char *mac); + int (*fwd_config)(struct fe_priv *priv); + void (*tx_dma)(struct fe_tx_dma *txd); + int (*switch_init)(struct fe_priv *priv); + int (*switch_config)(struct fe_priv *priv); + void (*port_init)(struct fe_priv *priv, struct device_node *port); + int (*has_carrier)(struct fe_priv *priv); + int (*mdio_init)(struct fe_priv *priv); + void (*mdio_cleanup)(struct fe_priv *priv); + int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg, + u16 val); + int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg); + void (*mdio_adjust_link)(struct fe_priv *priv, int port); + + void *swpriv; + u32 pdma_glo_cfg; + u32 rx_int; + u32 tx_int; + u32 status_int; + u32 checksum_bit; +}; + +#define FE_FLAG_PADDING_64B BIT(0) +#define FE_FLAG_PADDING_BUG BIT(1) +#define FE_FLAG_JUMBO_FRAME BIT(2) +#define FE_FLAG_RX_2B_OFFSET BIT(3) +#define FE_FLAG_RX_SG_DMA BIT(4) +#define FE_FLAG_RX_VLAN_CTAG BIT(5) +#define FE_FLAG_NAPI_WEIGHT BIT(6) +#define FE_FLAG_CALIBRATE_CLK BIT(7) +#define FE_FLAG_HAS_SWITCH BIT(8) + +#define FE_STAT_REG_DECLARE \ + _FE(tx_bytes) \ + _FE(tx_packets) \ + _FE(tx_skip) \ + _FE(tx_collisions) \ + _FE(rx_bytes) \ + _FE(rx_packets) \ + _FE(rx_overflow) \ + _FE(rx_fcs_errors) \ + _FE(rx_short_errors) \ + _FE(rx_long_errors) \ + _FE(rx_checksum_errors) \ + _FE(rx_flow_control_packets) + +struct fe_hw_stats { + /* make sure that stats operations are atomic */ + spinlock_t stats_lock; + + struct u64_stats_sync syncp; +#define _FE(x) u64 x; + FE_STAT_REG_DECLARE +#undef _FE +}; + +enum fe_tx_flags { + FE_TX_FLAGS_SINGLE0 = 0x01, + FE_TX_FLAGS_PAGE0 = 0x02, + FE_TX_FLAGS_PAGE1 = 0x04, +}; + +struct fe_tx_buf { + struct sk_buff *skb; + u32 flags; + DEFINE_DMA_UNMAP_ADDR(dma_addr0); + DEFINE_DMA_UNMAP_LEN(dma_len0); + DEFINE_DMA_UNMAP_ADDR(dma_addr1); + DEFINE_DMA_UNMAP_LEN(dma_len1); +}; + +struct fe_tx_ring { + struct fe_tx_dma *tx_dma; + struct fe_tx_buf *tx_buf; + dma_addr_t tx_phys; + u16 tx_ring_size; + u16 tx_free_idx; + u16 tx_next_idx; + u16 tx_thresh; +}; + +struct fe_rx_ring { + struct fe_rx_dma *rx_dma; + u8 **rx_data; + dma_addr_t rx_phys; + u16 rx_ring_size; + u16 frag_size; + u16 rx_buf_size; + u16 rx_calc_idx; +}; + +struct fe_priv { + /* make sure that register operations are atomic */ + spinlock_t page_lock; + + struct fe_soc_data *soc; + struct net_device *netdev; + struct device_node *switch_np; + u32 msg_enable; + u32 flags; + + struct device *device; + unsigned long sysclk; + + struct fe_rx_ring rx_ring; + struct napi_struct rx_napi; + + struct fe_tx_ring tx_ring; + + struct fe_phy *phy; + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + u32 phy_flags; + + int link[8]; + + struct fe_hw_stats *hw_stats; + unsigned long vlan_map; + struct work_struct pending_work; + DECLARE_BITMAP(pending_flags, FE_FLAG_MAX); +}; + +extern const struct of_device_id of_fe_match[]; + +void fe_w32(u32 val, unsigned reg); +u32 fe_r32(unsigned reg); + +int fe_set_clock_cycle(struct fe_priv *priv); +void fe_csum_config(struct fe_priv *priv); +void fe_stats_update(struct fe_priv *priv); +void fe_fwd_config(struct fe_priv *priv); +void fe_reg_w32(u32 val, enum fe_reg reg); +u32 fe_reg_r32(enum fe_reg reg); + +void fe_reset(u32 reset_bits); + +static inline void *priv_netdev(struct fe_priv *priv) +{ + return (char *)priv - ALIGN(sizeof(struct net_device), NETDEV_ALIGN); +} + +#endif /* FE_ETH_H */ diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_mt7620.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_mt7620.c new file mode 100644 index 0000000000..40bbec45ae --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_mt7620.c @@ -0,0 +1,335 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/if_vlan.h> +#include <linux/of_net.h> + +#include <asm/mach-ralink/ralink_regs.h> + +#include <mt7620.h> +#include "mtk_eth_soc.h" +#include "gsw_mt7620.h" +#include "mt7530.h" +#include "mdio.h" + +#define MT7620A_CDMA_CSG_CFG 0x400 +#define MT7620_DMA_VID (MT7620A_CDMA_CSG_CFG | 0x30) +#define MT7621_CDMP_IG_CTRL (MT7620A_CDMA_CSG_CFG + 0x00) +#define MT7621_CDMP_EG_CTRL (MT7620A_CDMA_CSG_CFG + 0x04) +#define MT7620A_RESET_FE BIT(21) +#define MT7621_RESET_FE BIT(6) +#define MT7620A_RESET_ESW BIT(23) +#define MT7620_L4_VALID BIT(23) +#define MT7621_L4_VALID BIT(24) + +#define MT7620_TX_DMA_UDF BIT(15) +#define MT7621_TX_DMA_UDF BIT(19) +#define TX_DMA_FP_BMAP ((0xff) << 19) + +#define CDMA_ICS_EN BIT(2) +#define CDMA_UCS_EN BIT(1) +#define CDMA_TCS_EN BIT(0) + +#define GDMA_ICS_EN BIT(22) +#define GDMA_TCS_EN BIT(21) +#define GDMA_UCS_EN BIT(20) + +/* frame engine counters */ +#define MT7620_REG_MIB_OFFSET 0x1000 +#define MT7620_PPE_AC_BCNT0 (MT7620_REG_MIB_OFFSET + 0x00) +#define MT7620_GDM1_TX_GBCNT (MT7620_REG_MIB_OFFSET + 0x300) +#define MT7620_GDM2_TX_GBCNT (MT7620_GDM1_TX_GBCNT + 0x40) + +#define MT7621_REG_MIB_OFFSET 0x2000 +#define MT7621_PPE_AC_BCNT0 (MT7621_REG_MIB_OFFSET + 0x00) +#define MT7621_GDM1_TX_GBCNT (MT7621_REG_MIB_OFFSET + 0x400) +#define MT7621_GDM2_TX_GBCNT (MT7621_GDM1_TX_GBCNT + 0x40) + +#define GSW_REG_GDMA1_MAC_ADRL 0x508 +#define GSW_REG_GDMA1_MAC_ADRH 0x50C + +#define MT7621_FE_RST_GL (FE_FE_OFFSET + 0x04) +#define MT7620_FE_INT_STATUS2 (FE_FE_OFFSET + 0x08) + +/* FE_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29) + * but after test it should be BIT(13). + */ +#define MT7620_FE_GDM1_AF BIT(13) +#define MT7621_FE_GDM1_AF BIT(28) +#define MT7621_FE_GDM2_AF BIT(29) + +static const u16 mt7620_reg_table[FE_REG_COUNT] = { + [FE_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG, + [FE_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG, + [FE_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG, + [FE_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0, + [FE_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0, + [FE_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0, + [FE_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0, + [FE_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0, + [FE_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0, + [FE_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0, + [FE_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0, + [FE_REG_FE_INT_ENABLE] = RT5350_FE_INT_ENABLE, + [FE_REG_FE_INT_STATUS] = RT5350_FE_INT_STATUS, + [FE_REG_FE_DMA_VID_BASE] = MT7620_DMA_VID, + [FE_REG_FE_COUNTER_BASE] = MT7620_GDM1_TX_GBCNT, + [FE_REG_FE_RST_GL] = MT7621_FE_RST_GL, + [FE_REG_FE_INT_STATUS2] = MT7620_FE_INT_STATUS2, +}; + +static int mt7620_gsw_config(struct fe_priv *priv) +{ + struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; + + /* is the mt7530 internal or external */ + if (priv->mii_bus && mdiobus_get_phy(priv->mii_bus, 0x1f)) { + mt7530_probe(priv->device, gsw->base, NULL, 0); + mt7530_probe(priv->device, NULL, priv->mii_bus, 1); + } else { + mt7530_probe(priv->device, gsw->base, NULL, 1); + } + + return 0; +} + +static void mt7620_set_mac(struct fe_priv *priv, unsigned char *mac) +{ + struct mt7620_gsw *gsw = (struct mt7620_gsw *)priv->soc->swpriv; + unsigned long flags; + + spin_lock_irqsave(&priv->page_lock, flags); + mtk_switch_w32(gsw, (mac[0] << 8) | mac[1], GSW_REG_SMACCR1); + mtk_switch_w32(gsw, (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], + GSW_REG_SMACCR0); + spin_unlock_irqrestore(&priv->page_lock, flags); +} + +static void mt7620_auto_poll(struct mt7620_gsw *gsw) +{ + int phy; + int lsb = -1, msb = 0; + + for_each_set_bit(phy, &gsw->autopoll, 32) { + if (lsb < 0) + lsb = phy; + msb = phy; + } + + if (lsb == msb) + lsb--; + + mtk_switch_w32(gsw, PHY_AN_EN | PHY_PRE_EN | PMY_MDC_CONF(5) | + (msb << 8) | lsb, ESW_PHY_POLLING); +} + +static void mt7620_port_init(struct fe_priv *priv, struct device_node *np) +{ + struct mt7620_gsw *gsw = (struct mt7620_gsw *)priv->soc->swpriv; + const __be32 *_id = of_get_property(np, "reg", NULL); + int phy_mode, size, id; + int shift = 12; + u32 val, mask = 0; + int min = (gsw->port4 == PORT4_EPHY) ? (5) : (4); + + if (!_id || (be32_to_cpu(*_id) < min) || (be32_to_cpu(*_id) > 5)) { + if (_id) + pr_err("%s: invalid port id %d\n", np->name, + be32_to_cpu(*_id)); + else + pr_err("%s: invalid port id\n", np->name); + return; + } + + id = be32_to_cpu(*_id); + + if (id == 4) + shift = 14; + + priv->phy->phy_fixed[id] = of_get_property(np, "mediatek,fixed-link", + &size); + if (priv->phy->phy_fixed[id] && + (size != (4 * sizeof(*priv->phy->phy_fixed[id])))) { + pr_err("%s: invalid fixed link property\n", np->name); + priv->phy->phy_fixed[id] = NULL; + return; + } + + phy_mode = of_get_phy_mode(np); + switch (phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + mask = 0; + break; + case PHY_INTERFACE_MODE_MII: + mask = 1; + break; + case PHY_INTERFACE_MODE_RMII: + mask = 2; + break; + default: + dev_err(priv->device, "port %d - invalid phy mode\n", id); + return; + } + + priv->phy->phy_node[id] = of_parse_phandle(np, "phy-handle", 0); + if (!priv->phy->phy_node[id] && !priv->phy->phy_fixed[id]) + return; + + val = rt_sysc_r32(SYSC_REG_CFG1); + val &= ~(3 << shift); + val |= mask << shift; + rt_sysc_w32(val, SYSC_REG_CFG1); + + if (priv->phy->phy_fixed[id]) { + const __be32 *link = priv->phy->phy_fixed[id]; + int tx_fc, rx_fc; + u32 val = 0; + + priv->phy->speed[id] = be32_to_cpup(link++); + tx_fc = be32_to_cpup(link++); + rx_fc = be32_to_cpup(link++); + priv->phy->duplex[id] = be32_to_cpup(link++); + priv->link[id] = 1; + + switch (priv->phy->speed[id]) { + case SPEED_10: + val = 0; + break; + case SPEED_100: + val = 1; + break; + case SPEED_1000: + val = 2; + break; + default: + dev_err(priv->device, "invalid link speed: %d\n", + priv->phy->speed[id]); + priv->phy->phy_fixed[id] = 0; + return; + } + val = PMCR_SPEED(val); + val |= PMCR_LINK | PMCR_BACKPRES | PMCR_BACKOFF | PMCR_RX_EN | + PMCR_TX_EN | PMCR_FORCE | PMCR_MAC_MODE | PMCR_IPG; + if (tx_fc) + val |= PMCR_TX_FC; + if (rx_fc) + val |= PMCR_RX_FC; + if (priv->phy->duplex[id]) + val |= PMCR_DUPLEX; + mtk_switch_w32(gsw, val, GSW_REG_PORT_PMCR(id)); + dev_info(priv->device, "using fixed link parameters\n"); + return; + } + + if (priv->phy->phy_node[id] && mdiobus_get_phy(priv->mii_bus, id)) { + u32 val = PMCR_BACKPRES | PMCR_BACKOFF | PMCR_RX_EN | + PMCR_TX_EN | PMCR_MAC_MODE | PMCR_IPG; + + mtk_switch_w32(gsw, val, GSW_REG_PORT_PMCR(id)); + fe_connect_phy_node(priv, priv->phy->phy_node[id]); + gsw->autopoll |= BIT(id); + mt7620_auto_poll(gsw); + return; + } +} + +static void mt7620_fe_reset(void) +{ + fe_reset(MT7620A_RESET_FE | MT7620A_RESET_ESW); +} + +static void mt7620_rxcsum_config(bool enable) +{ + if (enable) + fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) | (GDMA_ICS_EN | + GDMA_TCS_EN | GDMA_UCS_EN), + MT7620A_GDMA1_FWD_CFG); + else + fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) & ~(GDMA_ICS_EN | + GDMA_TCS_EN | GDMA_UCS_EN), + MT7620A_GDMA1_FWD_CFG); +} + +static void mt7620_txcsum_config(bool enable) +{ + if (enable) + fe_w32(fe_r32(MT7620A_CDMA_CSG_CFG) | (CDMA_ICS_EN | + CDMA_UCS_EN | CDMA_TCS_EN), + MT7620A_CDMA_CSG_CFG); + else + fe_w32(fe_r32(MT7620A_CDMA_CSG_CFG) & ~(CDMA_ICS_EN | + CDMA_UCS_EN | CDMA_TCS_EN), + MT7620A_CDMA_CSG_CFG); +} + +static int mt7620_fwd_config(struct fe_priv *priv) +{ + struct net_device *dev = priv_netdev(priv); + + fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) & ~7, MT7620A_GDMA1_FWD_CFG); + + mt7620_txcsum_config((dev->features & NETIF_F_IP_CSUM)); + mt7620_rxcsum_config((dev->features & NETIF_F_RXCSUM)); + + return 0; +} + +static void mt7620_tx_dma(struct fe_tx_dma *txd) +{ +} + +static void mt7620_init_data(struct fe_soc_data *data, + struct net_device *netdev) +{ + struct fe_priv *priv = netdev_priv(netdev); + + priv->flags = FE_FLAG_PADDING_64B | FE_FLAG_RX_2B_OFFSET | + FE_FLAG_RX_SG_DMA | FE_FLAG_HAS_SWITCH; + + netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX; + if (mt7620_get_eco() >= 5) + netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_IPV6_CSUM; +} + +static struct fe_soc_data mt7620_data = { + .init_data = mt7620_init_data, + .reset_fe = mt7620_fe_reset, + .set_mac = mt7620_set_mac, + .fwd_config = mt7620_fwd_config, + .tx_dma = mt7620_tx_dma, + .switch_init = mtk_gsw_init, + .switch_config = mt7620_gsw_config, + .port_init = mt7620_port_init, + .reg_table = mt7620_reg_table, + .pdma_glo_cfg = FE_PDMA_SIZE_16DWORDS, + .rx_int = RT5350_RX_DONE_INT, + .tx_int = RT5350_TX_DONE_INT, + .status_int = MT7620_FE_GDM1_AF, + .checksum_bit = MT7620_L4_VALID, + .has_carrier = mt7620_has_carrier, + .mdio_read = mt7620_mdio_read, + .mdio_write = mt7620_mdio_write, + .mdio_adjust_link = mt7620_mdio_link_adjust, +}; + +const struct of_device_id of_fe_match[] = { + { .compatible = "mediatek,mt7620-eth", .data = &mt7620_data }, + {}, +}; + +MODULE_DEVICE_TABLE(of, of_fe_match); diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_mt7621.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_mt7621.c new file mode 100644 index 0000000000..ce41b342e7 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_mt7621.c @@ -0,0 +1,185 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/if_vlan.h> +#include <linux/of_net.h> + +#include <asm/mach-ralink/ralink_regs.h> + +#include "mtk_eth_soc.h" +#include "gsw_mt7620.h" +#include "mt7530.h" +#include "mdio.h" + +#define MT7620A_CDMA_CSG_CFG 0x400 +#define MT7621_CDMP_IG_CTRL (MT7620A_CDMA_CSG_CFG + 0x00) +#define MT7621_CDMP_EG_CTRL (MT7620A_CDMA_CSG_CFG + 0x04) +#define MT7621_RESET_FE BIT(6) +#define MT7621_L4_VALID BIT(24) + +#define MT7621_TX_DMA_UDF BIT(19) +#define MT7621_TX_DMA_FPORT BIT(25) + +#define CDMA_ICS_EN BIT(2) +#define CDMA_UCS_EN BIT(1) +#define CDMA_TCS_EN BIT(0) + +#define GDMA_ICS_EN BIT(22) +#define GDMA_TCS_EN BIT(21) +#define GDMA_UCS_EN BIT(20) + +/* frame engine counters */ +#define MT7621_REG_MIB_OFFSET 0x2000 +#define MT7621_PPE_AC_BCNT0 (MT7621_REG_MIB_OFFSET + 0x00) +#define MT7621_GDM1_TX_GBCNT (MT7621_REG_MIB_OFFSET + 0x400) +#define MT7621_GDM2_TX_GBCNT (MT7621_GDM1_TX_GBCNT + 0x40) + +#define GSW_REG_GDMA1_MAC_ADRL 0x508 +#define GSW_REG_GDMA1_MAC_ADRH 0x50C + +#define MT7621_FE_RST_GL (FE_FE_OFFSET + 0x04) +#define MT7620_FE_INT_STATUS2 (FE_FE_OFFSET + 0x08) + +/* FE_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29) + * but after test it should be BIT(13). + */ +#define MT7620_FE_GDM1_AF BIT(13) +#define MT7621_FE_GDM1_AF BIT(28) +#define MT7621_FE_GDM2_AF BIT(29) + +static const u16 mt7621_reg_table[FE_REG_COUNT] = { + [FE_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG, + [FE_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG, + [FE_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG, + [FE_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0, + [FE_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0, + [FE_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0, + [FE_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0, + [FE_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0, + [FE_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0, + [FE_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0, + [FE_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0, + [FE_REG_FE_INT_ENABLE] = RT5350_FE_INT_ENABLE, + [FE_REG_FE_INT_STATUS] = RT5350_FE_INT_STATUS, + [FE_REG_FE_DMA_VID_BASE] = 0, + [FE_REG_FE_COUNTER_BASE] = MT7621_GDM1_TX_GBCNT, + [FE_REG_FE_RST_GL] = MT7621_FE_RST_GL, + [FE_REG_FE_INT_STATUS2] = MT7620_FE_INT_STATUS2, +}; + +static int mt7621_gsw_config(struct fe_priv *priv) +{ + if (priv->mii_bus && mdiobus_get_phy(priv->mii_bus, 0x1f)) + mt7530_probe(priv->device, NULL, priv->mii_bus, 1); + + return 0; +} + +static void mt7621_fe_reset(void) +{ + fe_reset(MT7621_RESET_FE); +} + +static void mt7621_rxcsum_config(bool enable) +{ + if (enable) + fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) | (GDMA_ICS_EN | + GDMA_TCS_EN | GDMA_UCS_EN), + MT7620A_GDMA1_FWD_CFG); + else + fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) & ~(GDMA_ICS_EN | + GDMA_TCS_EN | GDMA_UCS_EN), + MT7620A_GDMA1_FWD_CFG); +} + +static void mt7621_rxvlan_config(bool enable) +{ + if (enable) + fe_w32(1, MT7621_CDMP_EG_CTRL); + else + fe_w32(0, MT7621_CDMP_EG_CTRL); +} + +static int mt7621_fwd_config(struct fe_priv *priv) +{ + struct net_device *dev = priv_netdev(priv); + + fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) & ~0xffff, + MT7620A_GDMA1_FWD_CFG); + + /* mt7621 doesn't have txcsum config */ + mt7621_rxcsum_config((dev->features & NETIF_F_RXCSUM)); + mt7621_rxvlan_config(priv->flags & FE_FLAG_RX_VLAN_CTAG); + + return 0; +} + +static void mt7621_tx_dma(struct fe_tx_dma *txd) +{ + txd->txd4 = MT7621_TX_DMA_FPORT; +} + +static void mt7621_init_data(struct fe_soc_data *data, + struct net_device *netdev) +{ + struct fe_priv *priv = netdev_priv(netdev); + + priv->flags = FE_FLAG_PADDING_64B | FE_FLAG_RX_2B_OFFSET | + FE_FLAG_RX_SG_DMA | FE_FLAG_NAPI_WEIGHT | + FE_FLAG_HAS_SWITCH | FE_FLAG_JUMBO_FRAME; + + netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_TSO | + NETIF_F_TSO6 | NETIF_F_IPV6_CSUM; +} + +static void mt7621_set_mac(struct fe_priv *priv, unsigned char *mac) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->page_lock, flags); + fe_w32((mac[0] << 8) | mac[1], GSW_REG_GDMA1_MAC_ADRH); + fe_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], + GSW_REG_GDMA1_MAC_ADRL); + spin_unlock_irqrestore(&priv->page_lock, flags); +} + +static struct fe_soc_data mt7621_data = { + .init_data = mt7621_init_data, + .reset_fe = mt7621_fe_reset, + .set_mac = mt7621_set_mac, + .fwd_config = mt7621_fwd_config, + .tx_dma = mt7621_tx_dma, + .switch_init = mtk_gsw_init, + .switch_config = mt7621_gsw_config, + .reg_table = mt7621_reg_table, + .pdma_glo_cfg = FE_PDMA_SIZE_16DWORDS, + .rx_int = RT5350_RX_DONE_INT, + .tx_int = RT5350_TX_DONE_INT, + .status_int = (MT7621_FE_GDM1_AF | MT7621_FE_GDM2_AF), + .checksum_bit = MT7621_L4_VALID, + .has_carrier = mt7620_has_carrier, + .mdio_read = mt7620_mdio_read, + .mdio_write = mt7620_mdio_write, + .mdio_adjust_link = mt7620_mdio_link_adjust, +}; + +const struct of_device_id of_fe_match[] = { + { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data }, + {}, +}; + +MODULE_DEVICE_TABLE(of, of_fe_match); diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_rt2880.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_rt2880.c new file mode 100644 index 0000000000..6c89c997d9 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_rt2880.c @@ -0,0 +1,76 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#include <linux/module.h> + +#include <asm/mach-ralink/ralink_regs.h> + +#include "mtk_eth_soc.h" +#include "mdio_rt2880.h" + +#define RT2880_RESET_FE BIT(18) + +static void rt2880_init_data(struct fe_soc_data *data, + struct net_device *netdev) +{ + struct fe_priv *priv = netdev_priv(netdev); + + priv->flags = FE_FLAG_PADDING_64B | FE_FLAG_PADDING_BUG | + FE_FLAG_JUMBO_FRAME | FE_FLAG_CALIBRATE_CLK; + netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX; + /* this should work according to the datasheet but actually does not*/ + /* netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; */ +} + +void rt2880_fe_reset(void) +{ + fe_reset(RT2880_RESET_FE); +} + +static int rt2880_fwd_config(struct fe_priv *priv) +{ + int ret; + + ret = fe_set_clock_cycle(priv); + if (ret) + return ret; + + fe_fwd_config(priv); + fe_w32(FE_PSE_FQFC_CFG_INIT, FE_PSE_FQ_CFG); + fe_csum_config(priv); + + return ret; +} + +struct fe_soc_data rt2880_data = { + .init_data = rt2880_init_data, + .reset_fe = rt2880_fe_reset, + .fwd_config = rt2880_fwd_config, + .pdma_glo_cfg = FE_PDMA_SIZE_8DWORDS, + .checksum_bit = RX_DMA_L4VALID, + .rx_int = FE_RX_DONE_INT, + .tx_int = FE_TX_DONE_INT, + .status_int = FE_CNT_GDM_AF, + .mdio_read = rt2880_mdio_read, + .mdio_write = rt2880_mdio_write, + .mdio_adjust_link = rt2880_mdio_link_adjust, + .port_init = rt2880_port_init, +}; + +const struct of_device_id of_fe_match[] = { + { .compatible = "ralink,rt2880-eth", .data = &rt2880_data }, + {}, +}; + +MODULE_DEVICE_TABLE(of, of_fe_match); diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_rt3050.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_rt3050.c new file mode 100644 index 0000000000..914b81410e --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_rt3050.c @@ -0,0 +1,158 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#include <linux/module.h> + +#include <asm/mach-ralink/ralink_regs.h> + +#include "mtk_eth_soc.h" +#include "mdio_rt2880.h" + +#define RT305X_RESET_FE BIT(21) +#define RT305X_RESET_ESW BIT(23) + +static const u16 rt5350_reg_table[FE_REG_COUNT] = { + [FE_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG, + [FE_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG, + [FE_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG, + [FE_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0, + [FE_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0, + [FE_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0, + [FE_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0, + [FE_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0, + [FE_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0, + [FE_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0, + [FE_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0, + [FE_REG_FE_INT_ENABLE] = RT5350_FE_INT_ENABLE, + [FE_REG_FE_INT_STATUS] = RT5350_FE_INT_STATUS, + [FE_REG_FE_RST_GL] = 0, + [FE_REG_FE_DMA_VID_BASE] = 0, +}; + +static void rt305x_init_data(struct fe_soc_data *data, + struct net_device *netdev) +{ + struct fe_priv *priv = netdev_priv(netdev); + + priv->flags = FE_FLAG_PADDING_64B | FE_FLAG_PADDING_BUG | + FE_FLAG_CALIBRATE_CLK | FE_FLAG_HAS_SWITCH; + netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX; +} + +static int rt3050_fwd_config(struct fe_priv *priv) +{ + int ret; + + if (ralink_soc != RT305X_SOC_RT3052) { + ret = fe_set_clock_cycle(priv); + if (ret) + return ret; + } + + fe_fwd_config(priv); + if (ralink_soc != RT305X_SOC_RT3352) + fe_w32(FE_PSE_FQFC_CFG_INIT, FE_PSE_FQ_CFG); + fe_csum_config(priv); + + return 0; +} + +static void rt305x_fe_reset(void) +{ + fe_reset(RT305X_RESET_FE); +} + +static void rt5350_init_data(struct fe_soc_data *data, + struct net_device *netdev) +{ + struct fe_priv *priv = netdev_priv(netdev); + + priv->flags = FE_FLAG_HAS_SWITCH; + netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM; +} + +static void rt5350_set_mac(struct fe_priv *priv, unsigned char *mac) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->page_lock, flags); + fe_w32((mac[0] << 8) | mac[1], RT5350_SDM_MAC_ADRH); + fe_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], + RT5350_SDM_MAC_ADRL); + spin_unlock_irqrestore(&priv->page_lock, flags); +} + +static void rt5350_rxcsum_config(bool enable) +{ + if (enable) + fe_w32(fe_r32(RT5350_SDM_CFG) | (RT5350_SDM_ICS_EN | + RT5350_SDM_TCS_EN | RT5350_SDM_UCS_EN), + RT5350_SDM_CFG); + else + fe_w32(fe_r32(RT5350_SDM_CFG) & ~(RT5350_SDM_ICS_EN | + RT5350_SDM_TCS_EN | RT5350_SDM_UCS_EN), + RT5350_SDM_CFG); +} + +static int rt5350_fwd_config(struct fe_priv *priv) +{ + struct net_device *dev = priv_netdev(priv); + + rt5350_rxcsum_config((dev->features & NETIF_F_RXCSUM)); + + return 0; +} + +static void rt5350_tx_dma(struct fe_tx_dma *txd) +{ + txd->txd4 = 0; +} + +static void rt5350_fe_reset(void) +{ + fe_reset(RT305X_RESET_FE | RT305X_RESET_ESW); +} + +static struct fe_soc_data rt3050_data = { + .init_data = rt305x_init_data, + .reset_fe = rt305x_fe_reset, + .fwd_config = rt3050_fwd_config, + .pdma_glo_cfg = FE_PDMA_SIZE_8DWORDS, + .checksum_bit = RX_DMA_L4VALID, + .rx_int = FE_RX_DONE_INT, + .tx_int = FE_TX_DONE_INT, + .status_int = FE_CNT_GDM_AF, +}; + +static struct fe_soc_data rt5350_data = { + .init_data = rt5350_init_data, + .reg_table = rt5350_reg_table, + .reset_fe = rt5350_fe_reset, + .set_mac = rt5350_set_mac, + .fwd_config = rt5350_fwd_config, + .tx_dma = rt5350_tx_dma, + .pdma_glo_cfg = FE_PDMA_SIZE_8DWORDS, + .checksum_bit = RX_DMA_L4VALID, + .rx_int = RT5350_RX_DONE_INT, + .tx_int = RT5350_TX_DONE_INT, +}; + +const struct of_device_id of_fe_match[] = { + { .compatible = "ralink,rt3050-eth", .data = &rt3050_data }, + { .compatible = "ralink,rt5350-eth", .data = &rt5350_data }, + {}, +}; + +MODULE_DEVICE_TABLE(of, of_fe_match); diff --git a/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_rt3883.c b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_rt3883.c new file mode 100644 index 0000000000..4935b7fbd4 --- /dev/null +++ b/target/linux/ramips/files-4.14/drivers/net/ethernet/mtk/soc_rt3883.c @@ -0,0 +1,75 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> + * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> + */ + +#include <linux/module.h> + +#include <asm/mach-ralink/ralink_regs.h> + +#include "mtk_eth_soc.h" +#include "mdio_rt2880.h" + +#define RT3883_RSTCTRL_FE BIT(21) + +static void rt3883_fe_reset(void) +{ + fe_reset(RT3883_RSTCTRL_FE); +} + +static int rt3883_fwd_config(struct fe_priv *priv) +{ + int ret; + + ret = fe_set_clock_cycle(priv); + if (ret) + return ret; + + fe_fwd_config(priv); + fe_w32(FE_PSE_FQFC_CFG_256Q, FE_PSE_FQ_CFG); + fe_csum_config(priv); + + return ret; +} + +static void rt3883_init_data(struct fe_soc_data *data, + struct net_device *netdev) +{ + struct fe_priv *priv = netdev_priv(netdev); + + priv->flags = FE_FLAG_PADDING_64B | FE_FLAG_PADDING_BUG | + FE_FLAG_JUMBO_FRAME | FE_FLAG_CALIBRATE_CLK; + netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX; +} + +static struct fe_soc_data rt3883_data = { + .init_data = rt3883_init_data, + .reset_fe = rt3883_fe_reset, + .fwd_config = rt3883_fwd_config, + .pdma_glo_cfg = FE_PDMA_SIZE_8DWORDS, + .rx_int = FE_RX_DONE_INT, + .tx_int = FE_TX_DONE_INT, + .status_int = FE_CNT_GDM_AF, + .checksum_bit = RX_DMA_L4VALID, + .mdio_read = rt2880_mdio_read, + .mdio_write = rt2880_mdio_write, + .mdio_adjust_link = rt2880_mdio_link_adjust, + .port_init = rt2880_port_init, +}; + +const struct of_device_id of_fe_match[] = { + { .compatible = "ralink,rt3883-eth", .data = &rt3883_data }, + {}, +}; + +MODULE_DEVICE_TABLE(of, of_fe_match); diff --git a/target/linux/ramips/mt7620/config-4.14 b/target/linux/ramips/mt7620/config-4.14 new file mode 100644 index 0000000000..49263248be --- /dev/null +++ b/target/linux/ramips/mt7620/config-4.14 @@ -0,0 +1,240 @@ +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +# CONFIG_ARCH_HAS_GCOV_PROFILE_ALL is not set +CONFIG_ARCH_HAS_RESET_CONTROLLER=y +# CONFIG_ARCH_HAS_SG_CHAIN is not set +# CONFIG_ARCH_HAS_STRICT_KERNEL_RWX is not set +# CONFIG_ARCH_HAS_STRICT_MODULE_RWX is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_ARCH_MMAP_RND_BITS_MAX=15 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=15 +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +# CONFIG_ARCH_WANTS_THP_SWAP is not set +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_AT803X_PHY=y +CONFIG_BLK_MQ_PCI=y +CONFIG_CEVT_R4K=y +CONFIG_CEVT_SYSTICK_QUIRK=y +CONFIG_CLKDEV_LOOKUP=y +CONFIG_CLKEVT_RT3352=y +CONFIG_CLKSRC_MMIO=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_CMDLINE="rootfstype=squashfs,jffs2" +CONFIG_CMDLINE_BOOL=y +# CONFIG_CMDLINE_OVERRIDE is not set +CONFIG_CPU_GENERIC_DUMP_TLB=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_CPU_HAS_RIXI=y +CONFIG_CPU_HAS_SYNC=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32=y +# CONFIG_CPU_MIPS32_R1 is not set +CONFIG_CPU_MIPS32_R2=y +CONFIG_CPU_MIPSR2=y +CONFIG_CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS=y +CONFIG_CPU_R4K_CACHE_TLB=y +CONFIG_CPU_R4K_FPU=y +CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y +CONFIG_CPU_SUPPORTS_HIGHMEM=y +CONFIG_CPU_SUPPORTS_MSA=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CSRC_R4K=y +CONFIG_DEBUG_PINCTRL=y +CONFIG_DMA_NONCOHERENT=y +# CONFIG_DMA_NOOP_OPS is not set +# CONFIG_DMA_VIRT_OPS is not set +# CONFIG_DRM_LIB_RANDOM is not set +# CONFIG_DTB_MT7620A_EVAL is not set +# CONFIG_DTB_OMEGA2P is not set +CONFIG_DTB_RT_NONE=y +# CONFIG_DTB_VOCORE2 is not set +CONFIG_DTC=y +CONFIG_EARLY_PRINTK=y +CONFIG_EXPORTFS=y +CONFIG_FIXED_PHY=y +CONFIG_FUTEX_PI=y +CONFIG_GENERIC_ATOMIC64=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_IO=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_PHY=y +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GPIOLIB=y +# CONFIG_GPIO_MT7621 is not set +CONFIG_GPIO_RALINK=y +CONFIG_GPIO_SYSFS=y +# CONFIG_GRO_CELLS is not set +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_HARDWARE_WATCHPOINTS=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +# CONFIG_HAVE_ARCH_BITREVERSE is not set +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_CBPF_JIT=y +CONFIG_HAVE_CC_STACKPROTECTOR=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_IDE=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_HAVE_NET_DSA=y +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HW_HAS_PCI=y +CONFIG_HZ_PERIODIC=y +CONFIG_ICPLUS_PHY=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_IRQCHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_INTC=y +CONFIG_IRQ_MIPS_CPU=y +CONFIG_IRQ_WORK=y +CONFIG_LIBFDT=y +CONFIG_MDIO_BUS=y +CONFIG_MDIO_DEVICE=y +CONFIG_MFD_SYSCON=y +CONFIG_MIPS=y +CONFIG_MIPS_ASID_BITS=8 +CONFIG_MIPS_ASID_SHIFT=0 +CONFIG_MIPS_CLOCK_VSYSCALL=y +# CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND is not set +# CONFIG_MIPS_CMDLINE_DTB_EXTEND is not set +# CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER is not set +CONFIG_MIPS_CMDLINE_FROM_DTB=y +# CONFIG_MIPS_ELF_APPENDED_DTB is not set +# CONFIG_MIPS_HUGE_TLB_SUPPORT is not set +CONFIG_MIPS_L1_CACHE_SHIFT=5 +# CONFIG_MIPS_MACHINE is not set +CONFIG_MIPS_NO_APPENDED_DTB=y +# CONFIG_MIPS_RAW_APPENDED_DTB is not set +CONFIG_MIPS_SPRAM=y +CONFIG_MODULES_USE_ELF_REL=y +# CONFIG_MT7621_WDT is not set +# CONFIG_MTD_CFI_INTELEXT is not set +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_M25P80=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT=16384 +CONFIG_MTD_SPLIT_FIRMWARE=y +CONFIG_MTD_SPLIT_SEAMA_FW=y +CONFIG_MTD_SPLIT_TPLINK_FW=y +CONFIG_MTD_SPLIT_UIMAGE_FW=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_PER_CPU_KM=y +CONFIG_NET_MEDIATEK_GSW_MT7620=y +CONFIG_NET_MEDIATEK_MDIO=y +CONFIG_NET_MEDIATEK_MDIO_MT7620=y +CONFIG_NET_MEDIATEK_MT7620=y +# CONFIG_NET_MEDIATEK_RT3050 is not set +CONFIG_NET_MEDIATEK_SOC=y +CONFIG_NET_VENDOR_MEDIATEK=y +CONFIG_NO_GENERIC_PCI_IOPORT_MAP=y +# CONFIG_NO_IOPORT_MAP is not set +CONFIG_OF=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_ADDRESS_PCI=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_GPIO=y +CONFIG_OF_IRQ=y +CONFIG_OF_MDIO=y +CONFIG_OF_NET=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DRIVERS_LEGACY=y +CONFIG_PERF_USE_VMALLOC=y +CONFIG_PGTABLE_LEVELS=2 +CONFIG_PHYLIB=y +CONFIG_PHY_RALINK_USB=y +CONFIG_PINCTRL=y +CONFIG_PINCTRL_RT2880=y +# CONFIG_PINCTRL_SINGLE is not set +CONFIG_RALINK=y +CONFIG_RALINK_WDT=y +# CONFIG_RCU_NEED_SEGCBLIST is not set +# CONFIG_RCU_STALL_COMMON is not set +CONFIG_REGMAP=y +CONFIG_REGMAP_MMIO=y +CONFIG_RESET_CONTROLLER=y +# CONFIG_SCHED_INFO is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SERIAL_8250_FSL is not set +CONFIG_SERIAL_8250_RT288X=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SOC_MT7620=y +# CONFIG_SOC_MT7621 is not set +# CONFIG_SOC_RT288X is not set +# CONFIG_SOC_RT305X is not set +# CONFIG_SOC_RT3883 is not set +CONFIG_SPI=y +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MT7621 is not set +CONFIG_SPI_RT2880=y +CONFIG_SRCU=y +CONFIG_SWCONFIG=y +CONFIG_SWCONFIG_LEDS=y +CONFIG_SWPHY=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYS_HAS_CPU_MIPS32_R1=y +CONFIG_SYS_HAS_CPU_MIPS32_R2=y +CONFIG_SYS_HAS_EARLY_PRINTK=y +CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y +CONFIG_SYS_SUPPORTS_ARBIT_HZ=y +CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y +CONFIG_SYS_SUPPORTS_MIPS16=y +CONFIG_THIN_ARCHIVES=y +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_TIMER_OF=y +CONFIG_TIMER_PROBE=y +CONFIG_TINY_SRCU=y +CONFIG_USB_SUPPORT=y +CONFIG_USE_OF=y +CONFIG_WATCHDOG_CORE=y diff --git a/target/linux/ramips/mt7621/config-4.14 b/target/linux/ramips/mt7621/config-4.14 new file mode 100644 index 0000000000..3a5cd4296a --- /dev/null +++ b/target/linux/ramips/mt7621/config-4.14 @@ -0,0 +1,286 @@ +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +# CONFIG_ARCH_HAS_GCOV_PROFILE_ALL is not set +CONFIG_ARCH_HAS_RESET_CONTROLLER=y +# CONFIG_ARCH_HAS_SG_CHAIN is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_BLK_MQ_PCI=y +CONFIG_BOARD_SCACHE=y +CONFIG_BOUNCE=y +CONFIG_CEVT_R4K=y +# CONFIG_CEVT_SYSTICK_QUIRK is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_CLKSRC_MIPS_GIC=y +CONFIG_CLKSRC_OF=y +CONFIG_CLKSRC_PROBE=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_CMDLINE="rootfstype=squashfs,jffs2" +CONFIG_CMDLINE_BOOL=y +# CONFIG_CMDLINE_OVERRIDE is not set +CONFIG_COMMON_CLK=y +# CONFIG_COMMON_CLK_BOSTON is not set +CONFIG_CPU_GENERIC_DUMP_TLB=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_CPU_HAS_RIXI=y +CONFIG_CPU_HAS_SYNC=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32=y +# CONFIG_CPU_MIPS32_R1 is not set +CONFIG_CPU_MIPS32_R2=y +CONFIG_CPU_MIPSR2=y +CONFIG_CPU_MIPSR2_IRQ_EI=y +CONFIG_CPU_MIPSR2_IRQ_VI=y +CONFIG_CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS=y +CONFIG_CPU_R4K_CACHE_TLB=y +CONFIG_CPU_R4K_FPU=y +CONFIG_CPU_RMAP=y +CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y +CONFIG_CPU_SUPPORTS_HIGHMEM=y +CONFIG_CPU_SUPPORTS_MSA=y +CONFIG_CRC16=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CSRC_R4K=y +CONFIG_DEBUG_PINCTRL=y +CONFIG_DMA_NONCOHERENT=y +CONFIG_DTB_RT_NONE=y +CONFIG_DTC=y +CONFIG_EARLY_PRINTK=y +CONFIG_FIXED_PHY=y +CONFIG_GENERIC_ATOMIC64=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_GENERIC_IO=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_GENERIC_IRQ_IPI=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_MT7621=y +# CONFIG_GPIO_RALINK is not set +CONFIG_GPIO_SYSFS=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_HARDWARE_WATCHPOINTS=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +# CONFIG_HAVE_ARCH_BITREVERSE is not set +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_CBPF_JIT=y +CONFIG_HAVE_CC_STACKPROTECTOR=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_IDE=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_HAVE_NET_DSA=y +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HIGHMEM=y +CONFIG_HW_HAS_PCI=y +CONFIG_HZ_PERIODIC=y +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_MT7621=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_IRQCHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_MIPS_CPU=y +CONFIG_IRQ_WORK=y +CONFIG_LIBFDT=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_MDIO_BOARDINFO=y +CONFIG_MIPS=y +CONFIG_MIPS_ASID_BITS=8 +CONFIG_MIPS_ASID_SHIFT=0 +CONFIG_MIPS_CLOCK_VSYSCALL=y +CONFIG_MIPS_CM=y +# CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND is not set +# CONFIG_MIPS_CMDLINE_DTB_EXTEND is not set +# CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER is not set +CONFIG_MIPS_CMDLINE_FROM_DTB=y +CONFIG_MIPS_CPC=y +CONFIG_MIPS_CPS=y +# CONFIG_MIPS_CPS_NS16550 is not set +CONFIG_MIPS_CPU_SCACHE=y +# CONFIG_MIPS_ELF_APPENDED_DTB is not set +CONFIG_MIPS_GIC=y +# CONFIG_MIPS_HUGE_TLB_SUPPORT is not set +CONFIG_MIPS_L1_CACHE_SHIFT=5 +# CONFIG_MIPS_MACHINE is not set +CONFIG_MIPS_MT=y +CONFIG_MIPS_MT_FPAFF=y +CONFIG_MIPS_MT_SMP=y +CONFIG_MIPS_NO_APPENDED_DTB=y +CONFIG_MIPS_PERF_SHARED_TC_COUNTERS=y +# CONFIG_MIPS_RAW_APPENDED_DTB is not set +CONFIG_MIPS_SPRAM=y +# CONFIG_MIPS_VPE_LOADER is not set +CONFIG_MODULES_USE_ELF_REL=y +CONFIG_MT7621_WDT=y +# CONFIG_MTD_CFI_INTELEXT is not set +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_M25P80=y +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_ECC=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_SPLIT_FIRMWARE=y +CONFIG_MTD_SPLIT_SEAMA_FW=y +CONFIG_MTD_SPLIT_TPLINK_FW=y +CONFIG_MTD_SPLIT_TRX_FW=y +CONFIG_MTD_SPLIT_UIMAGE_FW=y +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_BEB_LIMIT=20 +CONFIG_MTD_UBI_BLOCK=y +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTK_MTD_NAND=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NET_FLOW_LIMIT=y +CONFIG_NET_MEDIATEK_GSW_MT7621=y +CONFIG_NET_MEDIATEK_MDIO=y +CONFIG_NET_MEDIATEK_MDIO_MT7620=y +CONFIG_NET_MEDIATEK_MT7621=y +CONFIG_NET_MEDIATEK_SOC=y +CONFIG_NET_VENDOR_MEDIATEK=y +CONFIG_NO_GENERIC_PCI_IOPORT_MAP=y +# CONFIG_NO_IOPORT_MAP is not set +CONFIG_NR_CPUS=4 +CONFIG_OF=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_ADDRESS_PCI=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_GPIO=y +CONFIG_OF_IRQ=y +CONFIG_OF_MDIO=y +CONFIG_OF_NET=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +CONFIG_PADATA=y +CONFIG_PCI=y +CONFIG_PCI_DISABLE_COMMON_QUIRKS=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DRIVERS_LEGACY=y +CONFIG_PERF_USE_VMALLOC=y +CONFIG_PGTABLE_LEVELS=2 +CONFIG_PHYLIB=y +# CONFIG_PHY_RALINK_USB is not set +CONFIG_PINCTRL=y +CONFIG_PINCTRL_RT2880=y +# CONFIG_PINCTRL_SINGLE is not set +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_SUPPLY=y +CONFIG_RALINK=y +# CONFIG_RALINK_WDT is not set +CONFIG_RATIONAL=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPI=y +CONFIG_RESET_CONTROLLER=y +CONFIG_RFS_ACCEL=y +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PCF8563=y +CONFIG_RTC_I2C_AND_SPI=y +CONFIG_RTC_MC146818_LIB=y +# CONFIG_SCHED_INFO is not set +CONFIG_SCHED_SMT=y +# CONFIG_SCSI_DMA is not set +# CONFIG_SERIAL_8250_FSL is not set +CONFIG_SERIAL_8250_NR_UARTS=3 +CONFIG_SERIAL_8250_RUNTIME_UARTS=3 +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SMP=y +CONFIG_SMP_UP=y +# CONFIG_SOC_MT7620 is not set +CONFIG_SOC_MT7621=y +# CONFIG_SOC_RT288X is not set +# CONFIG_SOC_RT305X is not set +# CONFIG_SOC_RT3883 is not set +CONFIG_SPI=y +CONFIG_SPI_MASTER=y +CONFIG_SPI_MT7621=y +# CONFIG_SPI_RT2880 is not set +CONFIG_SRCU=y +CONFIG_SWCONFIG_LEDS=y +CONFIG_SWCONFIG=y +CONFIG_SWPHY=y +CONFIG_SYNC_R4K=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYS_HAS_CPU_MIPS32_R1=y +CONFIG_SYS_HAS_CPU_MIPS32_R2=y +CONFIG_SYS_HAS_EARLY_PRINTK=y +CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y +CONFIG_SYS_SUPPORTS_ARBIT_HZ=y +CONFIG_SYS_SUPPORTS_HIGHMEM=y +CONFIG_SYS_SUPPORTS_HOTPLUG_CPU=y +CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y +CONFIG_SYS_SUPPORTS_MIPS16=y +CONFIG_SYS_SUPPORTS_MIPS_CPS=y +CONFIG_SYS_SUPPORTS_MULTITHREADING=y +CONFIG_SYS_SUPPORTS_SCHED_SMT=y +CONFIG_SYS_SUPPORTS_SMP=y +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_TREE_RCU=y +CONFIG_UBIFS_FS=y +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_USB_SUPPORT=y +CONFIG_USE_OF=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WEAK_ORDERING=y +CONFIG_XPS=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_ZLIB_INFLATE=y diff --git a/target/linux/ramips/mt76x8/config-4.14 b/target/linux/ramips/mt76x8/config-4.14 new file mode 100644 index 0000000000..56e734d0d3 --- /dev/null +++ b/target/linux/ramips/mt76x8/config-4.14 @@ -0,0 +1,237 @@ +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +# CONFIG_ARCH_HAS_GCOV_PROFILE_ALL is not set +CONFIG_ARCH_HAS_RESET_CONTROLLER=y +# CONFIG_ARCH_HAS_SG_CHAIN is not set +# CONFIG_ARCH_HAS_STRICT_KERNEL_RWX is not set +# CONFIG_ARCH_HAS_STRICT_MODULE_RWX is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_ARCH_MMAP_RND_BITS_MAX=15 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=15 +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +# CONFIG_ARCH_WANTS_THP_SWAP is not set +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_AT803X_PHY=y +CONFIG_BLK_MQ_PCI=y +CONFIG_CEVT_R4K=y +CONFIG_CEVT_SYSTICK_QUIRK=y +CONFIG_CLKDEV_LOOKUP=y +CONFIG_CLKEVT_RT3352=y +CONFIG_CLKSRC_MMIO=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_CMDLINE="rootfstype=squashfs,jffs2" +CONFIG_CMDLINE_BOOL=y +# CONFIG_CMDLINE_OVERRIDE is not set +CONFIG_CPU_GENERIC_DUMP_TLB=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_CPU_HAS_RIXI=y +CONFIG_CPU_HAS_SYNC=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32=y +# CONFIG_CPU_MIPS32_R1 is not set +CONFIG_CPU_MIPS32_R2=y +CONFIG_CPU_MIPSR2=y +CONFIG_CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS=y +CONFIG_CPU_R4K_CACHE_TLB=y +CONFIG_CPU_R4K_FPU=y +CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y +CONFIG_CPU_SUPPORTS_HIGHMEM=y +CONFIG_CPU_SUPPORTS_MSA=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CSRC_R4K=y +CONFIG_DEBUG_PINCTRL=y +CONFIG_DMA_NONCOHERENT=y +# CONFIG_DMA_NOOP_OPS is not set +# CONFIG_DMA_VIRT_OPS is not set +# CONFIG_DRM_LIB_RANDOM is not set +# CONFIG_DTB_MT7620A_EVAL is not set +# CONFIG_DTB_OMEGA2P is not set +CONFIG_DTB_RT_NONE=y +# CONFIG_DTB_VOCORE2 is not set +CONFIG_DTC=y +CONFIG_EARLY_PRINTK=y +CONFIG_EXPORTFS=y +CONFIG_FIXED_PHY=y +CONFIG_FUTEX_PI=y +CONFIG_GENERIC_ATOMIC64=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_IO=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_PHY=y +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_MT7621=y +# CONFIG_GPIO_RALINK is not set +CONFIG_GPIO_SYSFS=y +# CONFIG_GRO_CELLS is not set +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_HARDWARE_WATCHPOINTS=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +# CONFIG_HAVE_ARCH_BITREVERSE is not set +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_CBPF_JIT=y +CONFIG_HAVE_CC_STACKPROTECTOR=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_IDE=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_HAVE_NET_DSA=y +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HW_HAS_PCI=y +CONFIG_HZ_PERIODIC=y +CONFIG_ICPLUS_PHY=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_IRQCHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_INTC=y +CONFIG_IRQ_MIPS_CPU=y +CONFIG_IRQ_WORK=y +CONFIG_LIBFDT=y +CONFIG_MDIO_BUS=y +CONFIG_MDIO_DEVICE=y +CONFIG_MFD_SYSCON=y +CONFIG_MIPS=y +CONFIG_MIPS_ASID_BITS=8 +CONFIG_MIPS_ASID_SHIFT=0 +CONFIG_MIPS_CLOCK_VSYSCALL=y +# CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND is not set +# CONFIG_MIPS_CMDLINE_DTB_EXTEND is not set +# CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER is not set +CONFIG_MIPS_CMDLINE_FROM_DTB=y +# CONFIG_MIPS_ELF_APPENDED_DTB is not set +# CONFIG_MIPS_HUGE_TLB_SUPPORT is not set +CONFIG_MIPS_L1_CACHE_SHIFT=5 +# CONFIG_MIPS_MACHINE is not set +CONFIG_MIPS_NO_APPENDED_DTB=y +# CONFIG_MIPS_RAW_APPENDED_DTB is not set +CONFIG_MIPS_SPRAM=y +CONFIG_MODULES_USE_ELF_REL=y +CONFIG_MT7621_WDT=y +# CONFIG_MTD_CFI_INTELEXT is not set +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_M25P80=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_SPLIT_FIRMWARE=y +CONFIG_MTD_SPLIT_TPLINK_FW=y +CONFIG_MTD_SPLIT_TRX_FW=y +CONFIG_MTD_SPLIT_UIMAGE_FW=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_PER_CPU_KM=y +CONFIG_NET_MEDIATEK_ESW_RT3050=y +# CONFIG_NET_MEDIATEK_MT7620 is not set +CONFIG_NET_MEDIATEK_RT3050=y +CONFIG_NET_MEDIATEK_SOC=y +CONFIG_NET_VENDOR_MEDIATEK=y +CONFIG_NO_GENERIC_PCI_IOPORT_MAP=y +# CONFIG_NO_IOPORT_MAP is not set +CONFIG_OF=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_ADDRESS_PCI=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_GPIO=y +CONFIG_OF_IRQ=y +CONFIG_OF_MDIO=y +CONFIG_OF_NET=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DRIVERS_LEGACY=y +CONFIG_PERF_USE_VMALLOC=y +CONFIG_PGTABLE_LEVELS=2 +CONFIG_PHYLIB=y +CONFIG_PHY_RALINK_USB=y +CONFIG_PINCTRL=y +CONFIG_PINCTRL_RT2880=y +# CONFIG_PINCTRL_SINGLE is not set +CONFIG_RALINK=y +# CONFIG_RALINK_WDT is not set +# CONFIG_RCU_NEED_SEGCBLIST is not set +# CONFIG_RCU_STALL_COMMON is not set +CONFIG_REGMAP=y +CONFIG_REGMAP_MMIO=y +CONFIG_RESET_CONTROLLER=y +# CONFIG_SCHED_INFO is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SERIAL_8250_FSL is not set +CONFIG_SERIAL_8250_NR_UARTS=3 +CONFIG_SERIAL_8250_RUNTIME_UARTS=3 +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SOC_MT7620=y +# CONFIG_SOC_MT7621 is not set +# CONFIG_SOC_RT288X is not set +# CONFIG_SOC_RT305X is not set +# CONFIG_SOC_RT3883 is not set +CONFIG_SPI=y +CONFIG_SPI_MASTER=y +CONFIG_SPI_MT7621=y +# CONFIG_SPI_RT2880 is not set +CONFIG_SRCU=y +CONFIG_SWCONFIG=y +CONFIG_SWCONFIG_LEDS=y +CONFIG_SWPHY=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYS_HAS_CPU_MIPS32_R1=y +CONFIG_SYS_HAS_CPU_MIPS32_R2=y +CONFIG_SYS_HAS_EARLY_PRINTK=y +CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y +CONFIG_SYS_SUPPORTS_ARBIT_HZ=y +CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y +CONFIG_SYS_SUPPORTS_MIPS16=y +CONFIG_THIN_ARCHIVES=y +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_TIMER_OF=y +CONFIG_TIMER_PROBE=y +CONFIG_TINY_SRCU=y +CONFIG_USB_SUPPORT=y +CONFIG_USE_OF=y +CONFIG_WATCHDOG_CORE=y diff --git a/target/linux/ramips/patches-4.14/0004-MIPS-ralink-add-MT7621-pcie-driver.patch b/target/linux/ramips/patches-4.14/0004-MIPS-ralink-add-MT7621-pcie-driver.patch new file mode 100644 index 0000000000..0c96d6b984 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0004-MIPS-ralink-add-MT7621-pcie-driver.patch @@ -0,0 +1,861 @@ +From fec11d4e8dc5cc79bcd7c8fd55038ac21ac39965 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 16 Mar 2014 05:22:39 +0000 +Subject: [PATCH 04/53] MIPS: ralink: add MT7621 pcie driver + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + arch/mips/pci/Makefile | 1 + + arch/mips/pci/pci-mt7621.c | 813 ++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 814 insertions(+) + create mode 100644 arch/mips/pci/pci-mt7621.c + +--- a/arch/mips/pci/Makefile ++++ b/arch/mips/pci/Makefile +@@ -47,6 +47,7 @@ obj-$(CONFIG_SNI_RM) += fixup-sni.o ops + obj-$(CONFIG_LANTIQ) += fixup-lantiq.o + obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o + obj-$(CONFIG_SOC_MT7620) += pci-mt7620.o ++obj-$(CONFIG_SOC_MT7621) += pci-mt7621.o + obj-$(CONFIG_SOC_RT288X) += pci-rt2880.o + obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o + obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o +--- /dev/null ++++ b/arch/mips/pci/pci-mt7621.c +@@ -0,0 +1,836 @@ ++/************************************************************************** ++ * ++ * BRIEF MODULE DESCRIPTION ++ * PCI init for Ralink RT2880 solution ++ * ++ * Copyright 2007 Ralink Inc. (bruce_chang@ralinktech.com.tw) ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED ++ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN ++ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF ++ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ++ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ * ++ ************************************************************************** ++ * May 2007 Bruce Chang ++ * Initial Release ++ * ++ * May 2009 Bruce Chang ++ * support RT2880/RT3883 PCIe ++ * ++ * May 2011 Bruce Chang ++ * support RT6855/MT7620 PCIe ++ * ++ ************************************************************************** ++ */ ++ ++#include <linux/types.h> ++#include <linux/pci.h> ++#include <linux/kernel.h> ++#include <linux/slab.h> ++#include <linux/version.h> ++#include <asm/pci.h> ++#include <asm/io.h> ++#include <asm/mips-cm.h> ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/delay.h> ++#include <linux/of.h> ++#include <linux/of_pci.h> ++#include <linux/of_irq.h> ++#include <linux/platform_device.h> ++ ++#include <ralink_regs.h> ++ ++extern void pcie_phy_init(void); ++extern void chk_phy_pll(void); ++ ++/* ++ * These functions and structures provide the BIOS scan and mapping of the PCI ++ * devices. ++ */ ++ ++#define CONFIG_PCIE_PORT0 ++#define CONFIG_PCIE_PORT1 ++#define CONFIG_PCIE_PORT2 ++#define RALINK_PCIE0_CLK_EN (1<<24) ++#define RALINK_PCIE1_CLK_EN (1<<25) ++#define RALINK_PCIE2_CLK_EN (1<<26) ++ ++#define RALINK_PCI_CONFIG_ADDR 0x20 ++#define RALINK_PCI_CONFIG_DATA_VIRTUAL_REG 0x24 ++#define RALINK_INT_PCIE0 pcie_irq[0] ++#define RALINK_INT_PCIE1 pcie_irq[1] ++#define RALINK_INT_PCIE2 pcie_irq[2] ++#define RALINK_PCI_MEMBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x0028) ++#define RALINK_PCI_IOBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x002C) ++#define RALINK_PCIE0_RST (1<<24) ++#define RALINK_PCIE1_RST (1<<25) ++#define RALINK_PCIE2_RST (1<<26) ++#define RALINK_SYSCTL_BASE 0xBE000000 ++ ++#define RALINK_PCI_PCICFG_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0000) ++#define RALINK_PCI_PCIMSK_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x000C) ++#define RALINK_PCI_BASE 0xBE140000 ++ ++#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000) ++#define RT6855_PCIE0_OFFSET 0x2000 ++#define RT6855_PCIE1_OFFSET 0x3000 ++#define RT6855_PCIE2_OFFSET 0x4000 ++ ++#define RALINK_PCI0_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0010) ++#define RALINK_PCI0_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0018) ++#define RALINK_PCI0_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0030) ++#define RALINK_PCI0_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0034) ++#define RALINK_PCI0_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0038) ++#define RALINK_PCI0_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0050) ++#define RALINK_PCI0_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0060) ++#define RALINK_PCI0_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0064) ++ ++#define RALINK_PCI1_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0010) ++#define RALINK_PCI1_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0018) ++#define RALINK_PCI1_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0030) ++#define RALINK_PCI1_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0034) ++#define RALINK_PCI1_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0038) ++#define RALINK_PCI1_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0050) ++#define RALINK_PCI1_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0060) ++#define RALINK_PCI1_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0064) ++ ++#define RALINK_PCI2_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0010) ++#define RALINK_PCI2_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0018) ++#define RALINK_PCI2_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0030) ++#define RALINK_PCI2_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0034) ++#define RALINK_PCI2_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0038) ++#define RALINK_PCI2_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0050) ++#define RALINK_PCI2_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0060) ++#define RALINK_PCI2_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0064) ++ ++#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000) ++#define RALINK_PCIEPHY_P2_CTL_OFFSET (RALINK_PCI_BASE + 0xA000) ++ ++ ++#define MV_WRITE(ofs, data) \ ++ *(volatile u32 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le32(data) ++#define MV_READ(ofs, data) \ ++ *(data) = le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs))) ++#define MV_READ_DATA(ofs) \ ++ le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs))) ++ ++#define MV_WRITE_16(ofs, data) \ ++ *(volatile u16 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le16(data) ++#define MV_READ_16(ofs, data) \ ++ *(data) = le16_to_cpu(*(volatile u16 *)(RALINK_PCI_BASE+(ofs))) ++ ++#define MV_WRITE_8(ofs, data) \ ++ *(volatile u8 *)(RALINK_PCI_BASE+(ofs)) = data ++#define MV_READ_8(ofs, data) \ ++ *(data) = *(volatile u8 *)(RALINK_PCI_BASE+(ofs)) ++ ++ ++ ++#define RALINK_PCI_MM_MAP_BASE 0x60000000 ++#define RALINK_PCI_IO_MAP_BASE 0x1e160000 ++ ++#define RALINK_SYSTEM_CONTROL_BASE 0xbe000000 ++#define GPIO_PERST ++#define ASSERT_SYSRST_PCIE(val) do { \ ++ if (*(unsigned int *)(0xbe00000c) == 0x00030101) \ ++ RALINK_RSTCTRL |= val; \ ++ else \ ++ RALINK_RSTCTRL &= ~val; \ ++ } while(0) ++#define DEASSERT_SYSRST_PCIE(val) do { \ ++ if (*(unsigned int *)(0xbe00000c) == 0x00030101) \ ++ RALINK_RSTCTRL &= ~val; \ ++ else \ ++ RALINK_RSTCTRL |= val; \ ++ } while(0) ++#define RALINK_SYSCFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x14) ++#define RALINK_CLKCFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x30) ++#define RALINK_RSTCTRL *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x34) ++#define RALINK_GPIOMODE *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x60) ++#define RALINK_PCIE_CLK_GEN *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x7c) ++#define RALINK_PCIE_CLK_GEN1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x80) ++#define PPLL_CFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x9c) ++#define PPLL_DRV *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0xa0) ++//RALINK_SYSCFG1 bit ++#define RALINK_PCI_HOST_MODE_EN (1<<7) ++#define RALINK_PCIE_RC_MODE_EN (1<<8) ++//RALINK_RSTCTRL bit ++#define RALINK_PCIE_RST (1<<23) ++#define RALINK_PCI_RST (1<<24) ++//RALINK_CLKCFG1 bit ++#define RALINK_PCI_CLK_EN (1<<19) ++#define RALINK_PCIE_CLK_EN (1<<21) ++//RALINK_GPIOMODE bit ++#define PCI_SLOTx2 (1<<11) ++#define PCI_SLOTx1 (2<<11) ++//MTK PCIE PLL bit ++#define PDRV_SW_SET (1<<31) ++#define LC_CKDRVPD_ (1<<19) ++ ++#define MEMORY_BASE 0x0 ++static int pcie_link_status = 0; ++ ++#define PCI_ACCESS_READ_1 0 ++#define PCI_ACCESS_READ_2 1 ++#define PCI_ACCESS_READ_4 2 ++#define PCI_ACCESS_WRITE_1 3 ++#define PCI_ACCESS_WRITE_2 4 ++#define PCI_ACCESS_WRITE_4 5 ++ ++static int pcie_irq[3]; ++ ++static int config_access(unsigned char access_type, struct pci_bus *bus, ++ unsigned int devfn, unsigned int where, u32 * data) ++{ ++ unsigned int slot = PCI_SLOT(devfn); ++ u8 func = PCI_FUNC(devfn); ++ uint32_t address_reg, data_reg; ++ unsigned int address; ++ ++ address_reg = RALINK_PCI_CONFIG_ADDR; ++ data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG; ++ ++ address = (((where&0xF00)>>8)<<24) |(bus->number << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | 0x80000000; ++ MV_WRITE(address_reg, address); ++ ++ switch(access_type) { ++ case PCI_ACCESS_WRITE_1: ++ MV_WRITE_8(data_reg+(where&0x3), *data); ++ break; ++ case PCI_ACCESS_WRITE_2: ++ MV_WRITE_16(data_reg+(where&0x3), *data); ++ break; ++ case PCI_ACCESS_WRITE_4: ++ MV_WRITE(data_reg, *data); ++ break; ++ case PCI_ACCESS_READ_1: ++ MV_READ_8( data_reg+(where&0x3), data); ++ break; ++ case PCI_ACCESS_READ_2: ++ MV_READ_16(data_reg+(where&0x3), data); ++ break; ++ case PCI_ACCESS_READ_4: ++ MV_READ(data_reg, data); ++ break; ++ default: ++ printk("no specify access type\n"); ++ break; ++ } ++ return 0; ++} ++ ++static int ++read_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 * val) ++{ ++ return config_access(PCI_ACCESS_READ_1, bus, devfn, (unsigned int)where, (u32 *)val); ++} ++ ++static int ++read_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 * val) ++{ ++ return config_access(PCI_ACCESS_READ_2, bus, devfn, (unsigned int)where, (u32 *)val); ++} ++ ++static int ++read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 * val) ++{ ++ return config_access(PCI_ACCESS_READ_4, bus, devfn, (unsigned int)where, (u32 *)val); ++} ++ ++static int ++write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val) ++{ ++ if (config_access(PCI_ACCESS_WRITE_1, bus, devfn, (unsigned int)where, (u32 *)&val)) ++ return -1; ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int ++write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 val) ++{ ++ if (config_access(PCI_ACCESS_WRITE_2, bus, devfn, where, (u32 *)&val)) ++ return -1; ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int ++write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val) ++{ ++ if (config_access(PCI_ACCESS_WRITE_4, bus, devfn, where, &val)) ++ return -1; ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++ ++static int ++pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val) ++{ ++ switch (size) { ++ case 1: ++ return read_config_byte(bus, devfn, where, (u8 *) val); ++ case 2: ++ return read_config_word(bus, devfn, where, (u16 *) val); ++ default: ++ return read_config_dword(bus, devfn, where, val); ++ } ++} ++ ++static int ++pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) ++{ ++ switch (size) { ++ case 1: ++ return write_config_byte(bus, devfn, where, (u8) val); ++ case 2: ++ return write_config_word(bus, devfn, where, (u16) val); ++ default: ++ return write_config_dword(bus, devfn, where, val); ++ } ++} ++ ++struct pci_ops mt7621_pci_ops= { ++ .read = pci_config_read, ++ .write = pci_config_write, ++}; ++ ++static struct resource mt7621_res_pci_mem1 = { ++ .name = "PCI MEM1", ++ .start = RALINK_PCI_MM_MAP_BASE, ++ .end = (u32)((RALINK_PCI_MM_MAP_BASE + (unsigned char *)0x0fffffff)), ++ .flags = IORESOURCE_MEM, ++}; ++static struct resource mt7621_res_pci_io1 = { ++ .name = "PCI I/O1", ++ .start = RALINK_PCI_IO_MAP_BASE, ++ .end = (u32)((RALINK_PCI_IO_MAP_BASE + (unsigned char *)0x0ffff)), ++ .flags = IORESOURCE_IO, ++}; ++ ++static struct pci_controller mt7621_controller = { ++ .pci_ops = &mt7621_pci_ops, ++ .mem_resource = &mt7621_res_pci_mem1, ++ .io_resource = &mt7621_res_pci_io1, ++ .mem_offset = 0x00000000UL, ++ .io_offset = 0x00000000UL, ++ .io_map_base = 0xa0000000, ++}; ++ ++static void ++read_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long *val) ++{ ++ unsigned int address_reg, data_reg, address; ++ ++ address_reg = RALINK_PCI_CONFIG_ADDR; ++ data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG; ++ address = (((reg & 0xF00)>>8)<<24) | (bus << 16) | (dev << 11) | (func << 8) | (reg & 0xfc) | 0x80000000 ; ++ MV_WRITE(address_reg, address); ++ MV_READ(data_reg, val); ++ return; ++} ++ ++static void ++write_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long val) ++{ ++ unsigned int address_reg, data_reg, address; ++ ++ address_reg = RALINK_PCI_CONFIG_ADDR; ++ data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG; ++ address = (((reg & 0xF00)>>8)<<24) | (bus << 16) | (dev << 11) | (func << 8) | (reg & 0xfc) | 0x80000000 ; ++ MV_WRITE(address_reg, address); ++ MV_WRITE(data_reg, val); ++ return; ++} ++ ++ ++int __init ++pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) ++{ ++ u16 cmd; ++ u32 val; ++ int irq = 0; ++ ++ if ((dev->bus->number == 0) && (slot == 0)) { ++ write_config(0, 0, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE); ++ read_config(0, 0, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val); ++ printk("BAR0 at slot 0 = %x\n", val); ++ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot); ++ } else if((dev->bus->number == 0) && (slot == 0x1)) { ++ write_config(0, 1, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE); ++ read_config(0, 1, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val); ++ printk("BAR0 at slot 1 = %x\n", val); ++ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot); ++ } else if((dev->bus->number == 0) && (slot == 0x2)) { ++ write_config(0, 2, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE); ++ read_config(0, 2, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val); ++ printk("BAR0 at slot 2 = %x\n", val); ++ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot); ++ } else if ((dev->bus->number == 1) && (slot == 0x0)) { ++ switch (pcie_link_status) { ++ case 2: ++ case 6: ++ irq = RALINK_INT_PCIE1; ++ break; ++ case 4: ++ irq = RALINK_INT_PCIE2; ++ break; ++ default: ++ irq = RALINK_INT_PCIE0; ++ } ++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); ++ } else if ((dev->bus->number == 2) && (slot == 0x0)) { ++ switch (pcie_link_status) { ++ case 5: ++ case 6: ++ irq = RALINK_INT_PCIE2; ++ break; ++ default: ++ irq = RALINK_INT_PCIE1; ++ } ++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); ++ } else if ((dev->bus->number == 2) && (slot == 0x1)) { ++ switch (pcie_link_status) { ++ case 5: ++ case 6: ++ irq = RALINK_INT_PCIE2; ++ break; ++ default: ++ irq = RALINK_INT_PCIE1; ++ } ++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); ++ } else if ((dev->bus->number ==3) && (slot == 0x0)) { ++ irq = RALINK_INT_PCIE2; ++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); ++ } else if ((dev->bus->number ==3) && (slot == 0x1)) { ++ irq = RALINK_INT_PCIE2; ++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); ++ } else if ((dev->bus->number ==3) && (slot == 0x2)) { ++ irq = RALINK_INT_PCIE2; ++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); ++ } else { ++ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot); ++ return 0; ++ } ++ ++ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x14); //configure cache line size 0x14 ++ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xFF); //configure latency timer 0x10 ++ pci_read_config_word(dev, PCI_COMMAND, &cmd); ++ cmd = cmd | PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY; ++ pci_write_config_word(dev, PCI_COMMAND, cmd); ++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); ++ return irq; ++} ++ ++void ++set_pcie_phy(u32 *addr, int start_b, int bits, int val) ++{ ++// printk("0x%p:", addr); ++// printk(" %x", *addr); ++ *(unsigned int *)(addr) &= ~(((1<<bits) - 1)<<start_b); ++ *(unsigned int *)(addr) |= val << start_b; ++// printk(" -> %x\n", *addr); ++} ++ ++void ++bypass_pipe_rst(void) ++{ ++#if defined (CONFIG_PCIE_PORT0) ++ /* PCIe Port 0 */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4] ++#endif ++#if defined (CONFIG_PCIE_PORT1) ++ /* PCIe Port 1 */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 12, 1, 0x01); // rg_pe1_pipe_rst_b ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4] ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ /* PCIe Port 2 */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4] ++#endif ++} ++ ++void ++set_phy_for_ssc(void) ++{ ++ unsigned long reg = (*(volatile u32 *)(RALINK_SYSCTL_BASE + 0x10)); ++ ++ reg = (reg >> 6) & 0x7; ++#if defined (CONFIG_PCIE_PORT0) || defined (CONFIG_PCIE_PORT1) ++ /* Set PCIe Port0 & Port1 PHY to disable SSC */ ++ /* Debug Xtal Type */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 1 enable control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x00); // rg_pe1_phy_en //Port 1 disable ++ if(reg <= 5 && reg >= 3) { // 40MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode) ++ printk("***** Xtal 40MHz *****\n"); ++ } else { // 25MHz | 20MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode) ++ if (reg >= 6) { ++ printk("***** Xtal 25MHz *****\n"); ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode) ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial ++ } else { ++ printk("***** Xtal 20MHz *****\n"); ++ } ++ } ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN ++ if(reg <= 5 && reg >= 3) { // 40MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv ++ } ++ /* Enable PHY and disable force mode */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x01); // rg_pe1_phy_en //Port 1 enable ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 1 disable control ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ /* Set PCIe Port2 PHY to disable SSC */ ++ /* Debug Xtal Type */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable ++ if(reg <= 5 && reg >= 3) { // 40MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode) ++ } else { // 25MHz | 20MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode) ++ if (reg >= 6) { // 25MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode) ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial ++ } ++ } ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN ++ if(reg <= 5 && reg >= 3) { // 40MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv ++ } ++ /* Enable PHY and disable force mode */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control ++#endif ++} ++ ++void setup_cm_memory_region(struct resource *mem_resource) ++{ ++ resource_size_t mask; ++ if (mips_cps_numiocu(0)) { ++ /* FIXME: hardware doesn't accept mask values with 1s after ++ 0s (e.g. 0xffef), so it would be great to warn if that's ++ about to happen */ ++ mask = ~(mem_resource->end - mem_resource->start); ++ ++ write_gcr_reg1_base(mem_resource->start); ++ write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0); ++ printk("PCI coherence region base: 0x%08lx, mask/settings: 0x%08lx\n", ++ read_gcr_reg1_base(), ++ read_gcr_reg1_mask()); ++ } ++} ++ ++static int mt7621_pci_probe(struct platform_device *pdev) ++{ ++ unsigned long val = 0; ++ int i; ++ ++ for (i = 0; i < 3; i++) ++ pcie_irq[i] = irq_of_parse_and_map(pdev->dev.of_node, i); ++ ++ iomem_resource.start = 0; ++ iomem_resource.end= ~0; ++ ioport_resource.start= 0; ++ ioport_resource.end = ~0; ++ ++#if defined (CONFIG_PCIE_PORT0) ++ val = RALINK_PCIE0_RST; ++#endif ++#if defined (CONFIG_PCIE_PORT1) ++ val |= RALINK_PCIE1_RST; ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ val |= RALINK_PCIE2_RST; ++#endif ++ ASSERT_SYSRST_PCIE(RALINK_PCIE0_RST | RALINK_PCIE1_RST | RALINK_PCIE2_RST); ++ printk("pull PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL); ++#if defined GPIO_PERST /* add GPIO control instead of PERST_N */ /*chhung*/ ++ *(unsigned int *)(0xbe000060) &= ~(0x3<<10 | 0x3<<3); ++ *(unsigned int *)(0xbe000060) |= 0x1<<10 | 0x1<<3; ++ mdelay(100); ++ *(unsigned int *)(0xbe000600) |= 0x1<<19 | 0x1<<8 | 0x1<<7; // use GPIO19/GPIO8/GPIO7 (PERST_N/UART_RXD3/UART_TXD3) ++ mdelay(100); ++ *(unsigned int *)(0xbe000620) &= ~(0x1<<19 | 0x1<<8 | 0x1<<7); // clear DATA ++ ++ mdelay(100); ++#else ++ *(unsigned int *)(0xbe000060) &= ~0x00000c00; ++#endif ++#if defined (CONFIG_PCIE_PORT0) ++ val = RALINK_PCIE0_RST; ++#endif ++#if defined (CONFIG_PCIE_PORT1) ++ val |= RALINK_PCIE1_RST; ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ val |= RALINK_PCIE2_RST; ++#endif ++ DEASSERT_SYSRST_PCIE(val); ++ printk("release PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL); ++ ++ if ((*(unsigned int *)(0xbe00000c)&0xFFFF) == 0x0101) // MT7621 E2 ++ bypass_pipe_rst(); ++ set_phy_for_ssc(); ++ printk("release PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL); ++ ++#if defined (CONFIG_PCIE_PORT0) ++ read_config(0, 0, 0, 0x70c, &val); ++ printk("Port 0 N_FTS = %x\n", (unsigned int)val); ++#endif ++#if defined (CONFIG_PCIE_PORT1) ++ read_config(0, 1, 0, 0x70c, &val); ++ printk("Port 1 N_FTS = %x\n", (unsigned int)val); ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ read_config(0, 2, 0, 0x70c, &val); ++ printk("Port 2 N_FTS = %x\n", (unsigned int)val); ++#endif ++ ++ RALINK_RSTCTRL = (RALINK_RSTCTRL | RALINK_PCIE_RST); ++ RALINK_SYSCFG1 &= ~(0x30); ++ RALINK_SYSCFG1 |= (2<<4); ++ RALINK_PCIE_CLK_GEN &= 0x7fffffff; ++ RALINK_PCIE_CLK_GEN1 &= 0x80ffffff; ++ RALINK_PCIE_CLK_GEN1 |= 0xa << 24; ++ RALINK_PCIE_CLK_GEN |= 0x80000000; ++ mdelay(50); ++ RALINK_RSTCTRL = (RALINK_RSTCTRL & ~RALINK_PCIE_RST); ++ ++ ++#if defined GPIO_PERST /* add GPIO control instead of PERST_N */ /*chhung*/ ++ *(unsigned int *)(0xbe000620) |= 0x1<<19 | 0x1<<8 | 0x1<<7; // set DATA ++ mdelay(100); ++#else ++ RALINK_PCI_PCICFG_ADDR &= ~(1<<1); //de-assert PERST ++#endif ++ mdelay(500); ++ ++ ++ mdelay(500); ++#if defined (CONFIG_PCIE_PORT0) ++ if(( RALINK_PCI0_STATUS & 0x1) == 0) ++ { ++ printk("PCIE0 no card, disable it(RST&CLK)\n"); ++ ASSERT_SYSRST_PCIE(RALINK_PCIE0_RST); ++ RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE0_CLK_EN); ++ pcie_link_status &= ~(1<<0); ++ } else { ++ pcie_link_status |= 1<<0; ++ RALINK_PCI_PCIMSK_ADDR |= (1<<20); // enable pcie1 interrupt ++ } ++#endif ++#if defined (CONFIG_PCIE_PORT1) ++ if(( RALINK_PCI1_STATUS & 0x1) == 0) ++ { ++ printk("PCIE1 no card, disable it(RST&CLK)\n"); ++ ASSERT_SYSRST_PCIE(RALINK_PCIE1_RST); ++ RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE1_CLK_EN); ++ pcie_link_status &= ~(1<<1); ++ } else { ++ pcie_link_status |= 1<<1; ++ RALINK_PCI_PCIMSK_ADDR |= (1<<21); // enable pcie1 interrupt ++ } ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ if (( RALINK_PCI2_STATUS & 0x1) == 0) { ++ printk("PCIE2 no card, disable it(RST&CLK)\n"); ++ ASSERT_SYSRST_PCIE(RALINK_PCIE2_RST); ++ RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE2_CLK_EN); ++ pcie_link_status &= ~(1<<2); ++ } else { ++ pcie_link_status |= 1<<2; ++ RALINK_PCI_PCIMSK_ADDR |= (1<<22); // enable pcie2 interrupt ++ } ++#endif ++ if (pcie_link_status == 0) ++ return 0; ++ ++/* ++pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num ++3'b000 x x x ++3'b001 x x 0 ++3'b010 x 0 x ++3'b011 x 1 0 ++3'b100 0 x x ++3'b101 1 x 0 ++3'b110 1 0 x ++3'b111 2 1 0 ++*/ ++ switch(pcie_link_status) { ++ case 2: ++ RALINK_PCI_PCICFG_ADDR &= ~0x00ff0000; ++ RALINK_PCI_PCICFG_ADDR |= 0x1 << 16; //port0 ++ RALINK_PCI_PCICFG_ADDR |= 0x0 << 20; //port1 ++ break; ++ case 4: ++ RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000; ++ RALINK_PCI_PCICFG_ADDR |= 0x1 << 16; //port0 ++ RALINK_PCI_PCICFG_ADDR |= 0x2 << 20; //port1 ++ RALINK_PCI_PCICFG_ADDR |= 0x0 << 24; //port2 ++ break; ++ case 5: ++ RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000; ++ RALINK_PCI_PCICFG_ADDR |= 0x0 << 16; //port0 ++ RALINK_PCI_PCICFG_ADDR |= 0x2 << 20; //port1 ++ RALINK_PCI_PCICFG_ADDR |= 0x1 << 24; //port2 ++ break; ++ case 6: ++ RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000; ++ RALINK_PCI_PCICFG_ADDR |= 0x2 << 16; //port0 ++ RALINK_PCI_PCICFG_ADDR |= 0x0 << 20; //port1 ++ RALINK_PCI_PCICFG_ADDR |= 0x1 << 24; //port2 ++ break; ++ } ++ printk(" -> %x\n", RALINK_PCI_PCICFG_ADDR); ++ //printk(" RALINK_PCI_ARBCTL = %x\n", RALINK_PCI_ARBCTL); ++ ++/* ++ ioport_resource.start = mt7621_res_pci_io1.start; ++ ioport_resource.end = mt7621_res_pci_io1.end; ++*/ ++ ++ RALINK_PCI_MEMBASE = 0xffffffff; //RALINK_PCI_MM_MAP_BASE; ++ RALINK_PCI_IOBASE = RALINK_PCI_IO_MAP_BASE; ++ ++#if defined (CONFIG_PCIE_PORT0) ++ //PCIe0 ++ if((pcie_link_status & 0x1) != 0) { ++ RALINK_PCI0_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE ++ RALINK_PCI0_IMBASEBAR0_ADDR = MEMORY_BASE; ++ RALINK_PCI0_CLASS = 0x06040001; ++ printk("PCIE0 enabled\n"); ++ } ++#endif ++#if defined (CONFIG_PCIE_PORT1) ++ //PCIe1 ++ if ((pcie_link_status & 0x2) != 0) { ++ RALINK_PCI1_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE ++ RALINK_PCI1_IMBASEBAR0_ADDR = MEMORY_BASE; ++ RALINK_PCI1_CLASS = 0x06040001; ++ printk("PCIE1 enabled\n"); ++ } ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ //PCIe2 ++ if ((pcie_link_status & 0x4) != 0) { ++ RALINK_PCI2_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE ++ RALINK_PCI2_IMBASEBAR0_ADDR = MEMORY_BASE; ++ RALINK_PCI2_CLASS = 0x06040001; ++ printk("PCIE2 enabled\n"); ++ } ++#endif ++ ++ ++ switch(pcie_link_status) { ++ case 7: ++ read_config(0, 2, 0, 0x4, &val); ++ write_config(0, 2, 0, 0x4, val|0x4); ++ // write_config(0, 1, 0, 0x4, val|0x7); ++ read_config(0, 2, 0, 0x70c, &val); ++ val &= ~(0xff)<<8; ++ val |= 0x50<<8; ++ write_config(0, 2, 0, 0x70c, val); ++ case 3: ++ case 5: ++ case 6: ++ read_config(0, 1, 0, 0x4, &val); ++ write_config(0, 1, 0, 0x4, val|0x4); ++ // write_config(0, 1, 0, 0x4, val|0x7); ++ read_config(0, 1, 0, 0x70c, &val); ++ val &= ~(0xff)<<8; ++ val |= 0x50<<8; ++ write_config(0, 1, 0, 0x70c, val); ++ default: ++ read_config(0, 0, 0, 0x4, &val); ++ write_config(0, 0, 0, 0x4, val|0x4); //bus master enable ++ // write_config(0, 0, 0, 0x4, val|0x7); //bus master enable ++ read_config(0, 0, 0, 0x70c, &val); ++ val &= ~(0xff)<<8; ++ val |= 0x50<<8; ++ write_config(0, 0, 0, 0x70c, val); ++ } ++ ++ pci_load_of_ranges(&mt7621_controller, pdev->dev.of_node); ++ setup_cm_memory_region(mt7621_controller.mem_resource); ++ register_pci_controller(&mt7621_controller); ++ return 0; ++ ++} ++ ++int pcibios_plat_dev_init(struct pci_dev *dev) ++{ ++ return 0; ++} ++ ++static const struct of_device_id mt7621_pci_ids[] = { ++ { .compatible = "mediatek,mt7621-pci" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, mt7621_pci_ids); ++ ++static struct platform_driver mt7621_pci_driver = { ++ .probe = mt7621_pci_probe, ++ .driver = { ++ .name = "mt7621-pci", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(mt7621_pci_ids), ++ }, ++}; ++ ++static int __init mt7621_pci_init(void) ++{ ++ return platform_driver_register(&mt7621_pci_driver); ++} ++ ++arch_initcall(mt7621_pci_init); diff --git a/target/linux/ramips/patches-4.14/0005-MIPS-use-set_mode-to-enable-disable-the-cevt-r4k-irq.patch b/target/linux/ramips/patches-4.14/0005-MIPS-use-set_mode-to-enable-disable-the-cevt-r4k-irq.patch new file mode 100644 index 0000000000..773c74f4e9 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0005-MIPS-use-set_mode-to-enable-disable-the-cevt-r4k-irq.patch @@ -0,0 +1,82 @@ +From ce3d4a4111a5f7e6b4e74bceae5faa6ce388e8ec Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 14 Jul 2013 23:08:11 +0200 +Subject: [PATCH 05/53] MIPS: use set_mode() to enable/disable the cevt-r4k + irq + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + arch/mips/ralink/Kconfig | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/arch/mips/ralink/Kconfig ++++ b/arch/mips/ralink/Kconfig +@@ -1,12 +1,17 @@ + # SPDX-License-Identifier: GPL-2.0 + if RALINK + ++config CEVT_SYSTICK_QUIRK ++ bool ++ default n ++ + config CLKEVT_RT3352 + bool + depends on SOC_RT305X || SOC_MT7620 + default y + select TIMER_OF + select CLKSRC_MMIO ++ select CEVT_SYSTICK_QUIRK + + config RALINK_ILL_ACC + bool +--- a/arch/mips/kernel/cevt-r4k.c ++++ b/arch/mips/kernel/cevt-r4k.c +@@ -15,6 +15,26 @@ + #include <asm/time.h> + #include <asm/cevt-r4k.h> + ++static int mips_state_oneshot(struct clock_event_device *evt) ++{ ++ if (!cp0_timer_irq_installed) { ++ cp0_timer_irq_installed = 1; ++ setup_irq(evt->irq, &c0_compare_irqaction); ++ } ++ ++ return 0; ++} ++ ++static int mips_state_shutdown(struct clock_event_device *evt) ++{ ++ if (cp0_timer_irq_installed) { ++ cp0_timer_irq_installed = 0; ++ remove_irq(evt->irq, &c0_compare_irqaction); ++ } ++ ++ return 0; ++} ++ + static int mips_next_event(unsigned long delta, + struct clock_event_device *evt) + { +@@ -281,17 +301,21 @@ int r4k_clockevent_init(void) + cd->rating = 300; + cd->irq = irq; + cd->cpumask = cpumask_of(cpu); ++ cd->set_state_shutdown = mips_state_shutdown; ++ cd->set_state_oneshot = mips_state_oneshot; + cd->set_next_event = mips_next_event; + cd->event_handler = mips_event_handler; + + clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff); + ++#ifndef CONFIG_CEVT_SYSTICK_QUIRK + if (cp0_timer_irq_installed) + return 0; + + cp0_timer_irq_installed = 1; + + setup_irq(irq, &c0_compare_irqaction); ++#endif + + return 0; + } diff --git a/target/linux/ramips/patches-4.14/0006-MIPS-ralink-add-cpu-frequency-scaling.patch b/target/linux/ramips/patches-4.14/0006-MIPS-ralink-add-cpu-frequency-scaling.patch new file mode 100644 index 0000000000..90215fbf86 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0006-MIPS-ralink-add-cpu-frequency-scaling.patch @@ -0,0 +1,200 @@ +From bd30f19a006fb52bac80c6463c49dd2f4159f4ac Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 28 Jul 2013 16:26:41 +0200 +Subject: [PATCH 06/53] MIPS: ralink: add cpu frequency scaling + +This feature will break udelay() and cause the delay loop to have longer delays +when the frequency is scaled causing a performance hit. + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + arch/mips/ralink/cevt-rt3352.c | 38 ++++++++++++++++++++++++++++++++++++++ + 1 file changed, 38 insertions(+) + +--- a/arch/mips/ralink/cevt-rt3352.c ++++ b/arch/mips/ralink/cevt-rt3352.c +@@ -29,6 +29,10 @@ + /* enable the counter */ + #define CFG_CNT_EN 0x1 + ++/* mt7620 frequency scaling defines */ ++#define CLK_LUT_CFG 0x40 ++#define SLEEP_EN BIT(31) ++ + struct systick_device { + void __iomem *membase; + struct clock_event_device dev; +@@ -36,21 +40,53 @@ struct systick_device { + int freq_scale; + }; + ++static void (*systick_freq_scaling)(struct systick_device *sdev, int status); ++ + static int systick_set_oneshot(struct clock_event_device *evt); + static int systick_shutdown(struct clock_event_device *evt); + ++static inline void mt7620_freq_scaling(struct systick_device *sdev, int status) ++{ ++ if (sdev->freq_scale == status) ++ return; ++ ++ sdev->freq_scale = status; ++ ++ pr_info("%s: %s autosleep mode\n", sdev->dev.name, ++ (status) ? ("enable") : ("disable")); ++ if (status) ++ rt_sysc_w32(rt_sysc_r32(CLK_LUT_CFG) | SLEEP_EN, CLK_LUT_CFG); ++ else ++ rt_sysc_w32(rt_sysc_r32(CLK_LUT_CFG) & ~SLEEP_EN, CLK_LUT_CFG); ++} ++ ++static inline unsigned int read_count(struct systick_device *sdev) ++{ ++ return ioread32(sdev->membase + SYSTICK_COUNT); ++} ++ ++static inline unsigned int read_compare(struct systick_device *sdev) ++{ ++ return ioread32(sdev->membase + SYSTICK_COMPARE); ++} ++ ++static inline void write_compare(struct systick_device *sdev, unsigned int val) ++{ ++ iowrite32(val, sdev->membase + SYSTICK_COMPARE); ++} ++ + static int systick_next_event(unsigned long delta, + struct clock_event_device *evt) + { + struct systick_device *sdev; +- u32 count; ++ int res; + + sdev = container_of(evt, struct systick_device, dev); +- count = ioread32(sdev->membase + SYSTICK_COUNT); +- count = (count + delta) % SYSTICK_FREQ; +- iowrite32(count, sdev->membase + SYSTICK_COMPARE); ++ delta += read_count(sdev); ++ write_compare(sdev, delta); ++ res = ((int)(read_count(sdev) - delta) >= 0) ? -ETIME : 0; + +- return 0; ++ return res; + } + + static void systick_event_handler(struct clock_event_device *dev) +@@ -60,20 +96,25 @@ static void systick_event_handler(struct + + static irqreturn_t systick_interrupt(int irq, void *dev_id) + { +- struct clock_event_device *dev = (struct clock_event_device *) dev_id; ++ int ret = 0; ++ struct clock_event_device *cdev; ++ struct systick_device *sdev; + +- dev->event_handler(dev); ++ if (read_c0_cause() & STATUSF_IP7) { ++ cdev = (struct clock_event_device *) dev_id; ++ sdev = container_of(cdev, struct systick_device, dev); ++ ++ /* Clear Count/Compare Interrupt */ ++ write_compare(sdev, read_compare(sdev)); ++ cdev->event_handler(cdev); ++ ret = 1; ++ } + +- return IRQ_HANDLED; ++ return IRQ_RETVAL(ret); + } + + static struct systick_device systick = { + .dev = { +- /* +- * cevt-r4k uses 300, make sure systick +- * gets used if available +- */ +- .rating = 310, + .features = CLOCK_EVT_FEAT_ONESHOT, + .set_next_event = systick_next_event, + .set_state_shutdown = systick_shutdown, +@@ -95,9 +136,15 @@ static int systick_shutdown(struct clock + sdev = container_of(evt, struct systick_device, dev); + + if (sdev->irq_requested) +- free_irq(systick.dev.irq, &systick_irqaction); ++ remove_irq(systick.dev.irq, &systick_irqaction); + sdev->irq_requested = 0; +- iowrite32(0, systick.membase + SYSTICK_CONFIG); ++ iowrite32(CFG_CNT_EN, systick.membase + SYSTICK_CONFIG); ++ ++ if (systick_freq_scaling) ++ systick_freq_scaling(sdev, 0); ++ ++ if (systick_freq_scaling) ++ systick_freq_scaling(sdev, 1); + + return 0; + } +@@ -117,34 +164,48 @@ static int systick_set_oneshot(struct cl + return 0; + } + ++static const struct of_device_id systick_match[] = { ++ { .compatible = "ralink,mt7620a-systick", .data = mt7620_freq_scaling}, ++ {}, ++}; ++ + static int __init ralink_systick_init(struct device_node *np) + { ++ const struct of_device_id *match; ++ int rating = 200; + int ret; + + systick.membase = of_iomap(np, 0); + if (!systick.membase) + return -ENXIO; + +- systick_irqaction.name = np->name; +- systick.dev.name = np->name; +- clockevents_calc_mult_shift(&systick.dev, SYSTICK_FREQ, 60); +- systick.dev.max_delta_ns = clockevent_delta2ns(0x7fff, &systick.dev); +- systick.dev.max_delta_ticks = 0x7fff; +- systick.dev.min_delta_ns = clockevent_delta2ns(0x3, &systick.dev); +- systick.dev.min_delta_ticks = 0x3; ++ match = of_match_node(systick_match, np); ++ if (match) { ++ systick_freq_scaling = match->data; ++ /* ++ * cevt-r4k uses 300, make sure systick ++ * gets used if available ++ */ ++ rating = 310; ++ } ++ ++ /* enable counter than register clock source */ ++ iowrite32(CFG_CNT_EN, systick.membase + SYSTICK_CONFIG); ++ clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name, ++ SYSTICK_FREQ, rating, 16, clocksource_mmio_readl_up); ++ ++ /* register clock event */ + systick.dev.irq = irq_of_parse_and_map(np, 0); + if (!systick.dev.irq) { + pr_err("%s: request_irq failed", np->name); + return -EINVAL; + } + +- ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name, +- SYSTICK_FREQ, 301, 16, +- clocksource_mmio_readl_up); +- if (ret) +- return ret; +- +- clockevents_register_device(&systick.dev); ++ systick_irqaction.name = np->name; ++ systick.dev.name = np->name; ++ systick.dev.rating = rating; ++ systick.dev.cpumask = cpumask_of(0); ++ clockevents_config_and_register(&systick.dev, SYSTICK_FREQ, 0x3, 0x7fff); + + pr_info("%s: running - mult: %d, shift: %d\n", + np->name, systick.dev.mult, systick.dev.shift); diff --git a/target/linux/ramips/patches-4.14/0007-MIPS-ralink-copy-the-commandline-from-the-devicetree.patch b/target/linux/ramips/patches-4.14/0007-MIPS-ralink-copy-the-commandline-from-the-devicetree.patch new file mode 100644 index 0000000000..c05ee8018d --- /dev/null +++ b/target/linux/ramips/patches-4.14/0007-MIPS-ralink-copy-the-commandline-from-the-devicetree.patch @@ -0,0 +1,21 @@ +From 67b7bff0fd364c194e653f69baa623ba2141bd4c Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Mon, 4 Aug 2014 18:46:02 +0200 +Subject: [PATCH 07/53] MIPS: ralink: copy the commandline from the devicetree + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + arch/mips/ralink/of.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/mips/ralink/of.c ++++ b/arch/mips/ralink/of.c +@@ -82,6 +82,8 @@ void __init plat_mem_setup(void) + + __dt_setup_arch(dtb); + ++ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); ++ + of_scan_flat_dt(early_init_dt_find_memory, NULL); + if (memory_dtb) + of_scan_flat_dt(early_init_dt_scan_memory, NULL); diff --git a/target/linux/ramips/patches-4.14/0009-PCI-MIPS-enable-PCIe-on-MT7688.patch b/target/linux/ramips/patches-4.14/0009-PCI-MIPS-enable-PCIe-on-MT7688.patch new file mode 100644 index 0000000000..6ca15fe32e --- /dev/null +++ b/target/linux/ramips/patches-4.14/0009-PCI-MIPS-enable-PCIe-on-MT7688.patch @@ -0,0 +1,28 @@ +From 7768798964eb0e4f95eaecffb93b5d0ca28a38af Mon Sep 17 00:00:00 2001 +From: Daniel Golle <daniel@makrotopia.org> +Date: Sat, 3 Jun 2017 20:00:03 +0200 +Subject: [PATCH] MIPS: pci-mt7620: enabled PCIe on MT7688 +To: linux-mips@linux-mips.org, + John Crispin <john@phrozen.org> +Cc: Wei Yongjun <yongjun_wei@trendmicro.com.cn>, + Ralf Baechle <ralf@linux-mips.org>, + linux-mediatek@lists.infradead.org + +Use PCIe support for MT7628AN also on MT7688. +Tested on WRTNODE2R. + +Signed-off-by: Daniel Golle <daniel@makrotopia.org> +--- + arch/mips/pci/pci-mt7620.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/arch/mips/pci/pci-mt7620.c ++++ b/arch/mips/pci/pci-mt7620.c +@@ -316,6 +316,7 @@ static int mt7620_pci_probe(struct platf + break; + + case MT762X_SOC_MT7628AN: ++ case MT762X_SOC_MT7688: + if (mt7628_pci_hw_init(pdev)) + return -1; + break; diff --git a/target/linux/ramips/patches-4.14/0013-owrt-hack-fix-mt7688-cache-issue.patch b/target/linux/ramips/patches-4.14/0013-owrt-hack-fix-mt7688-cache-issue.patch new file mode 100644 index 0000000000..528c629649 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0013-owrt-hack-fix-mt7688-cache-issue.patch @@ -0,0 +1,28 @@ +From 5ede027f6c4a57ed25da872420508b7f1168b36b Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Mon, 7 Dec 2015 17:15:32 +0100 +Subject: [PATCH 13/53] owrt: hack: fix mt7688 cache issue + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + arch/mips/kernel/setup.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/mips/kernel/setup.c ++++ b/arch/mips/kernel/setup.c +@@ -902,7 +902,6 @@ static void __init arch_mem_init(char ** + crashk_res.end - crashk_res.start + 1, + BOOTMEM_DEFAULT); + #endif +- device_tree_init(); + sparse_init(); + plat_swiotlb_setup(); + +@@ -1018,6 +1017,7 @@ void __init setup_arch(char **cmdline_p) + + cpu_cache_init(); + paging_init(); ++ device_tree_init(); + } + + unsigned long kernelsp[NR_CPUS]; diff --git a/target/linux/ramips/patches-4.14/0015-arch-mips-do-not-select-illegal-access-driver-by-def.patch b/target/linux/ramips/patches-4.14/0015-arch-mips-do-not-select-illegal-access-driver-by-def.patch new file mode 100644 index 0000000000..1dc54ccf23 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0015-arch-mips-do-not-select-illegal-access-driver-by-def.patch @@ -0,0 +1,25 @@ +From 9e6ce539092a1dd605a20bf73c655a9de58d8641 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Mon, 7 Dec 2015 17:18:05 +0100 +Subject: [PATCH 15/53] arch: mips: do not select illegal access driver by + default + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + arch/mips/ralink/Kconfig | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/arch/mips/ralink/Kconfig ++++ b/arch/mips/ralink/Kconfig +@@ -14,9 +14,9 @@ config CLKEVT_RT3352 + select CEVT_SYSTICK_QUIRK + + config RALINK_ILL_ACC +- bool ++ bool "illegal access irq" + depends on SOC_RT305X +- default y ++ default n + + config IRQ_INTC + bool diff --git a/target/linux/ramips/patches-4.14/0024-GPIO-add-named-gpio-exports.patch b/target/linux/ramips/patches-4.14/0024-GPIO-add-named-gpio-exports.patch new file mode 100644 index 0000000000..0c1bc73926 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0024-GPIO-add-named-gpio-exports.patch @@ -0,0 +1,166 @@ +From 4267880319bc1a2270d352e0ded6d6386242a7ef Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Tue, 12 Aug 2014 20:49:27 +0200 +Subject: [PATCH 24/53] GPIO: add named gpio exports + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + drivers/gpio/gpiolib-of.c | 68 +++++++++++++++++++++++++++++++++++++++++ + drivers/gpio/gpiolib-sysfs.c | 10 +++++- + include/asm-generic/gpio.h | 6 ++++ + include/linux/gpio/consumer.h | 8 +++++ + 4 files changed, 91 insertions(+), 1 deletion(-) + +--- a/drivers/gpio/gpiolib-of.c ++++ b/drivers/gpio/gpiolib-of.c +@@ -23,6 +23,8 @@ + #include <linux/pinctrl/pinctrl.h> + #include <linux/slab.h> + #include <linux/gpio/machine.h> ++#include <linux/init.h> ++#include <linux/platform_device.h> + + #include "gpiolib.h" + +@@ -506,3 +508,69 @@ void of_gpiochip_remove(struct gpio_chip + gpiochip_remove_pin_ranges(chip); + of_node_put(chip->of_node); + } ++ ++static struct of_device_id gpio_export_ids[] = { ++ { .compatible = "gpio-export" }, ++ { /* sentinel */ } ++}; ++ ++static int __init of_gpio_export_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct device_node *cnp; ++ u32 val; ++ int nb = 0; ++ ++ for_each_child_of_node(np, cnp) { ++ const char *name = NULL; ++ int gpio; ++ bool dmc; ++ int max_gpio = 1; ++ int i; ++ ++ of_property_read_string(cnp, "gpio-export,name", &name); ++ ++ if (!name) ++ max_gpio = of_gpio_count(cnp); ++ ++ for (i = 0; i < max_gpio; i++) { ++ unsigned flags = 0; ++ enum of_gpio_flags of_flags; ++ ++ gpio = of_get_gpio_flags(cnp, i, &of_flags); ++ ++ if (of_flags == OF_GPIO_ACTIVE_LOW) ++ flags |= GPIOF_ACTIVE_LOW; ++ ++ if (!of_property_read_u32(cnp, "gpio-export,output", &val)) ++ flags |= val ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW; ++ else ++ flags |= GPIOF_IN; ++ ++ if (devm_gpio_request_one(&pdev->dev, gpio, flags, name ? name : of_node_full_name(np))) ++ continue; ++ ++ dmc = of_property_read_bool(cnp, "gpio-export,direction_may_change"); ++ gpio_export_with_name(gpio, dmc, name); ++ nb++; ++ } ++ } ++ ++ dev_info(&pdev->dev, "%d gpio(s) exported\n", nb); ++ ++ return 0; ++} ++ ++static struct platform_driver gpio_export_driver = { ++ .driver = { ++ .name = "gpio-export", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(gpio_export_ids), ++ }, ++}; ++ ++static int __init of_gpio_export_init(void) ++{ ++ return platform_driver_probe(&gpio_export_driver, of_gpio_export_probe); ++} ++device_initcall(of_gpio_export_init); +--- a/drivers/gpio/gpiolib-sysfs.c ++++ b/drivers/gpio/gpiolib-sysfs.c +@@ -553,7 +553,7 @@ static struct class gpio_class = { + * + * Returns zero on success, else an error. + */ +-int gpiod_export(struct gpio_desc *desc, bool direction_may_change) ++int __gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name) + { + struct gpio_chip *chip; + struct gpio_device *gdev; +@@ -615,6 +615,8 @@ int gpiod_export(struct gpio_desc *desc, + offset = gpio_chip_hwgpio(desc); + if (chip->names && chip->names[offset]) + ioname = chip->names[offset]; ++ if (name) ++ ioname = name; + + dev = device_create_with_groups(&gpio_class, &gdev->dev, + MKDEV(0, 0), data, gpio_groups, +@@ -636,6 +638,12 @@ err_unlock: + gpiod_dbg(desc, "%s: status %d\n", __func__, status); + return status; + } ++EXPORT_SYMBOL_GPL(__gpiod_export); ++ ++int gpiod_export(struct gpio_desc *desc, bool direction_may_change) ++{ ++ return __gpiod_export(desc, direction_may_change, NULL); ++} + EXPORT_SYMBOL_GPL(gpiod_export); + + static int match_export(struct device *dev, const void *desc) +--- a/include/asm-generic/gpio.h ++++ b/include/asm-generic/gpio.h +@@ -127,6 +127,12 @@ static inline int gpio_export(unsigned g + return gpiod_export(gpio_to_desc(gpio), direction_may_change); + } + ++int __gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name); ++static inline int gpio_export_with_name(unsigned gpio, bool direction_may_change, const char *name) ++{ ++ return __gpiod_export(gpio_to_desc(gpio), direction_may_change, name); ++} ++ + static inline int gpio_export_link(struct device *dev, const char *name, + unsigned gpio) + { +--- a/include/linux/gpio/consumer.h ++++ b/include/linux/gpio/consumer.h +@@ -451,6 +451,7 @@ struct gpio_desc *devm_fwnode_get_gpiod_ + + #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) + ++int _gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name); + int gpiod_export(struct gpio_desc *desc, bool direction_may_change); + int gpiod_export_link(struct device *dev, const char *name, + struct gpio_desc *desc); +@@ -458,6 +459,13 @@ void gpiod_unexport(struct gpio_desc *de + + #else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */ + ++static inline int _gpiod_export(struct gpio_desc *desc, ++ bool direction_may_change, ++ const char *name) ++{ ++ return -ENOSYS; ++} ++ + static inline int gpiod_export(struct gpio_desc *desc, + bool direction_may_change) + { diff --git a/target/linux/ramips/patches-4.14/0025-pinctrl-ralink-add-pinctrl-driver.patch b/target/linux/ramips/patches-4.14/0025-pinctrl-ralink-add-pinctrl-driver.patch new file mode 100644 index 0000000000..3e5dac5376 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0025-pinctrl-ralink-add-pinctrl-driver.patch @@ -0,0 +1,524 @@ +From 7adbe9a88c33c6e362a10b109d963b5500a21f00 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 27 Jul 2014 09:34:05 +0100 +Subject: [PATCH 25/53] pinctrl: ralink: add pinctrl driver + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + arch/mips/Kconfig | 2 + + drivers/pinctrl/Kconfig | 5 + + drivers/pinctrl/Makefile | 1 + + drivers/pinctrl/pinctrl-rt2880.c | 474 ++++++++++++++++++++++++++++++++++++++ + 4 files changed, 482 insertions(+) + create mode 100644 drivers/pinctrl/pinctrl-rt2880.c + +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -628,6 +628,8 @@ config RALINK + select CLKDEV_LOOKUP + select ARCH_HAS_RESET_CONTROLLER + select RESET_CONTROLLER ++ select PINCTRL ++ select PINCTRL_RT2880 + + config SGI_IP22 + bool "SGI IP22 (Indy/Indigo2)" +--- a/drivers/pinctrl/Kconfig ++++ b/drivers/pinctrl/Kconfig +@@ -143,6 +143,11 @@ config PINCTRL_LPC18XX + help + Pinctrl driver for NXP LPC18xx/43xx System Control Unit (SCU). + ++config PINCTRL_RT2880 ++ bool ++ depends on RALINK ++ select PINMUX ++ + config PINCTRL_FALCON + bool + depends on SOC_FALCON +--- a/drivers/pinctrl/Makefile ++++ b/drivers/pinctrl/Makefile +@@ -28,6 +28,7 @@ obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl- + obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o + obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o + obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o ++obj-$(CONFIG_PINCTRL_RT2880) += pinctrl-rt2880.o + obj-$(CONFIG_PINCTRL_RZA1) += pinctrl-rza1.o + obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o + obj-$(CONFIG_PINCTRL_SIRF) += sirf/ +--- /dev/null ++++ b/drivers/pinctrl/pinctrl-rt2880.c +@@ -0,0 +1,472 @@ ++/* ++ * linux/drivers/pinctrl/pinctrl-rt2880.c ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * publishhed by the Free Software Foundation. ++ * ++ * Copyright (C) 2013 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/module.h> ++#include <linux/device.h> ++#include <linux/io.h> ++#include <linux/platform_device.h> ++#include <linux/slab.h> ++#include <linux/of.h> ++#include <linux/pinctrl/pinctrl.h> ++#include <linux/pinctrl/pinconf.h> ++#include <linux/pinctrl/pinmux.h> ++#include <linux/pinctrl/consumer.h> ++#include <linux/pinctrl/machine.h> ++ ++#include <asm/mach-ralink/ralink_regs.h> ++#include <asm/mach-ralink/pinmux.h> ++#include <asm/mach-ralink/mt7620.h> ++ ++#include "core.h" ++ ++#define SYSC_REG_GPIO_MODE 0x60 ++#define SYSC_REG_GPIO_MODE2 0x64 ++ ++struct rt2880_priv { ++ struct device *dev; ++ ++ struct pinctrl_pin_desc *pads; ++ struct pinctrl_desc *desc; ++ ++ struct rt2880_pmx_func **func; ++ int func_count; ++ ++ struct rt2880_pmx_group *groups; ++ const char **group_names; ++ int group_count; ++ ++ uint8_t *gpio; ++ int max_pins; ++}; ++ ++static int rt2880_get_group_count(struct pinctrl_dev *pctrldev) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ return p->group_count; ++} ++ ++static const char *rt2880_get_group_name(struct pinctrl_dev *pctrldev, ++ unsigned group) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ if (group >= p->group_count) ++ return NULL; ++ ++ return p->group_names[group]; ++} ++ ++static int rt2880_get_group_pins(struct pinctrl_dev *pctrldev, ++ unsigned group, ++ const unsigned **pins, ++ unsigned *num_pins) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ if (group >= p->group_count) ++ return -EINVAL; ++ ++ *pins = p->groups[group].func[0].pins; ++ *num_pins = p->groups[group].func[0].pin_count; ++ ++ return 0; ++} ++ ++static void rt2880_pinctrl_dt_free_map(struct pinctrl_dev *pctrldev, ++ struct pinctrl_map *map, unsigned num_maps) ++{ ++ int i; ++ ++ for (i = 0; i < num_maps; i++) ++ if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN || ++ map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP) ++ kfree(map[i].data.configs.configs); ++ kfree(map); ++} ++ ++static void rt2880_pinctrl_pin_dbg_show(struct pinctrl_dev *pctrldev, ++ struct seq_file *s, ++ unsigned offset) ++{ ++ seq_printf(s, "ralink pio"); ++} ++ ++static void rt2880_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctrldev, ++ struct device_node *np, ++ struct pinctrl_map **map) ++{ ++ const char *function; ++ int func = of_property_read_string(np, "ralink,function", &function); ++ int grps = of_property_count_strings(np, "ralink,group"); ++ int i; ++ ++ if (func || !grps) ++ return; ++ ++ for (i = 0; i < grps; i++) { ++ const char *group; ++ ++ of_property_read_string_index(np, "ralink,group", i, &group); ++ ++ (*map)->type = PIN_MAP_TYPE_MUX_GROUP; ++ (*map)->name = function; ++ (*map)->data.mux.group = group; ++ (*map)->data.mux.function = function; ++ (*map)++; ++ } ++} ++ ++static int rt2880_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrldev, ++ struct device_node *np_config, ++ struct pinctrl_map **map, ++ unsigned *num_maps) ++{ ++ int max_maps = 0; ++ struct pinctrl_map *tmp; ++ struct device_node *np; ++ ++ for_each_child_of_node(np_config, np) { ++ int ret = of_property_count_strings(np, "ralink,group"); ++ ++ if (ret >= 0) ++ max_maps += ret; ++ } ++ ++ if (!max_maps) ++ return -EINVAL; ++ ++ *map = kzalloc(max_maps * sizeof(struct pinctrl_map), GFP_KERNEL); ++ if (!*map) ++ return -ENOMEM; ++ ++ tmp = *map; ++ ++ for_each_child_of_node(np_config, np) ++ rt2880_pinctrl_dt_subnode_to_map(pctrldev, np, &tmp); ++ *num_maps = max_maps; ++ ++ return 0; ++} ++ ++static const struct pinctrl_ops rt2880_pctrl_ops = { ++ .get_groups_count = rt2880_get_group_count, ++ .get_group_name = rt2880_get_group_name, ++ .get_group_pins = rt2880_get_group_pins, ++ .pin_dbg_show = rt2880_pinctrl_pin_dbg_show, ++ .dt_node_to_map = rt2880_pinctrl_dt_node_to_map, ++ .dt_free_map = rt2880_pinctrl_dt_free_map, ++}; ++ ++static int rt2880_pmx_func_count(struct pinctrl_dev *pctrldev) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ return p->func_count; ++} ++ ++static const char *rt2880_pmx_func_name(struct pinctrl_dev *pctrldev, ++ unsigned func) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ return p->func[func]->name; ++} ++ ++static int rt2880_pmx_group_get_groups(struct pinctrl_dev *pctrldev, ++ unsigned func, ++ const char * const **groups, ++ unsigned * const num_groups) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ if (p->func[func]->group_count == 1) ++ *groups = &p->group_names[p->func[func]->groups[0]]; ++ else ++ *groups = p->group_names; ++ ++ *num_groups = p->func[func]->group_count; ++ ++ return 0; ++} ++ ++static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev, ++ unsigned func, ++ unsigned group) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ u32 mode = 0; ++ u32 reg = SYSC_REG_GPIO_MODE; ++ int i; ++ int shift; ++ ++ /* dont allow double use */ ++ if (p->groups[group].enabled) { ++ dev_err(p->dev, "%s is already enabled\n", p->groups[group].name); ++ return -EBUSY; ++ } ++ ++ p->groups[group].enabled = 1; ++ p->func[func]->enabled = 1; ++ ++ shift = p->groups[group].shift; ++ if (shift >= 32) { ++ shift -= 32; ++ reg = SYSC_REG_GPIO_MODE2; ++ } ++ mode = rt_sysc_r32(reg); ++ mode &= ~(p->groups[group].mask << shift); ++ ++ /* mark the pins as gpio */ ++ for (i = 0; i < p->groups[group].func[0].pin_count; i++) ++ p->gpio[p->groups[group].func[0].pins[i]] = 1; ++ ++ /* function 0 is gpio and needs special handling */ ++ if (func == 0) { ++ mode |= p->groups[group].gpio << shift; ++ } else { ++ for (i = 0; i < p->func[func]->pin_count; i++) ++ p->gpio[p->func[func]->pins[i]] = 0; ++ mode |= p->func[func]->value << shift; ++ } ++ rt_sysc_w32(mode, reg); ++ ++ return 0; ++} ++ ++static int rt2880_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev, ++ struct pinctrl_gpio_range *range, ++ unsigned pin) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ if (!p->gpio[pin]) { ++ dev_err(p->dev, "pin %d is not set to gpio mux\n", pin); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static const struct pinmux_ops rt2880_pmx_group_ops = { ++ .get_functions_count = rt2880_pmx_func_count, ++ .get_function_name = rt2880_pmx_func_name, ++ .get_function_groups = rt2880_pmx_group_get_groups, ++ .set_mux = rt2880_pmx_group_enable, ++ .gpio_request_enable = rt2880_pmx_group_gpio_request_enable, ++}; ++ ++static struct pinctrl_desc rt2880_pctrl_desc = { ++ .owner = THIS_MODULE, ++ .name = "rt2880-pinmux", ++ .pctlops = &rt2880_pctrl_ops, ++ .pmxops = &rt2880_pmx_group_ops, ++}; ++ ++static struct rt2880_pmx_func gpio_func = { ++ .name = "gpio", ++}; ++ ++static int rt2880_pinmux_index(struct rt2880_priv *p) ++{ ++ struct rt2880_pmx_func **f; ++ struct rt2880_pmx_group *mux = p->groups; ++ int i, j, c = 0; ++ ++ /* count the mux functions */ ++ while (mux->name) { ++ p->group_count++; ++ mux++; ++ } ++ ++ /* allocate the group names array needed by the gpio function */ ++ p->group_names = devm_kzalloc(p->dev, sizeof(char *) * p->group_count, GFP_KERNEL); ++ if (!p->group_names) ++ return -1; ++ ++ for (i = 0; i < p->group_count; i++) { ++ p->group_names[i] = p->groups[i].name; ++ p->func_count += p->groups[i].func_count; ++ } ++ ++ /* we have a dummy function[0] for gpio */ ++ p->func_count++; ++ ++ /* allocate our function and group mapping index buffers */ ++ f = p->func = devm_kzalloc(p->dev, sizeof(struct rt2880_pmx_func) * p->func_count, GFP_KERNEL); ++ gpio_func.groups = devm_kzalloc(p->dev, sizeof(int) * p->group_count, GFP_KERNEL); ++ if (!f || !gpio_func.groups) ++ return -1; ++ ++ /* add a backpointer to the function so it knows its group */ ++ gpio_func.group_count = p->group_count; ++ for (i = 0; i < gpio_func.group_count; i++) ++ gpio_func.groups[i] = i; ++ ++ f[c] = &gpio_func; ++ c++; ++ ++ /* add remaining functions */ ++ for (i = 0; i < p->group_count; i++) { ++ for (j = 0; j < p->groups[i].func_count; j++) { ++ f[c] = &p->groups[i].func[j]; ++ f[c]->groups = devm_kzalloc(p->dev, sizeof(int), GFP_KERNEL); ++ f[c]->groups[0] = i; ++ f[c]->group_count = 1; ++ c++; ++ } ++ } ++ return 0; ++} ++ ++static int rt2880_pinmux_pins(struct rt2880_priv *p) ++{ ++ int i, j; ++ ++ /* loop over the functions and initialize the pins array. also work out the highest pin used */ ++ for (i = 0; i < p->func_count; i++) { ++ int pin; ++ ++ if (!p->func[i]->pin_count) ++ continue; ++ ++ p->func[i]->pins = devm_kzalloc(p->dev, sizeof(int) * p->func[i]->pin_count, GFP_KERNEL); ++ for (j = 0; j < p->func[i]->pin_count; j++) ++ p->func[i]->pins[j] = p->func[i]->pin_first + j; ++ ++ pin = p->func[i]->pin_first + p->func[i]->pin_count; ++ if (pin > p->max_pins) ++ p->max_pins = pin; ++ } ++ ++ /* the buffer that tells us which pins are gpio */ ++ p->gpio = devm_kzalloc(p->dev,sizeof(uint8_t) * p->max_pins, ++ GFP_KERNEL); ++ /* the pads needed to tell pinctrl about our pins */ ++ p->pads = devm_kzalloc(p->dev, ++ sizeof(struct pinctrl_pin_desc) * p->max_pins, ++ GFP_KERNEL); ++ if (!p->pads || !p->gpio ) { ++ dev_err(p->dev, "Failed to allocate gpio data\n"); ++ return -ENOMEM; ++ } ++ ++ memset(p->gpio, 1, sizeof(uint8_t) * p->max_pins); ++ for (i = 0; i < p->func_count; i++) { ++ if (!p->func[i]->pin_count) ++ continue; ++ ++ for (j = 0; j < p->func[i]->pin_count; j++) ++ p->gpio[p->func[i]->pins[j]] = 0; ++ } ++ ++ /* pin 0 is always a gpio */ ++ p->gpio[0] = 1; ++ ++ /* set the pads */ ++ for (i = 0; i < p->max_pins; i++) { ++ /* strlen("ioXY") + 1 = 5 */ ++ char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL); ++ ++ if (!name) { ++ dev_err(p->dev, "Failed to allocate pad name\n"); ++ return -ENOMEM; ++ } ++ snprintf(name, 5, "io%d", i); ++ p->pads[i].number = i; ++ p->pads[i].name = name; ++ } ++ p->desc->pins = p->pads; ++ p->desc->npins = p->max_pins; ++ ++ return 0; ++} ++ ++static int rt2880_pinmux_probe(struct platform_device *pdev) ++{ ++ struct rt2880_priv *p; ++ struct pinctrl_dev *dev; ++ struct device_node *np; ++ ++ if (!rt2880_pinmux_data) ++ return -ENOSYS; ++ ++ /* setup the private data */ ++ p = devm_kzalloc(&pdev->dev, sizeof(struct rt2880_priv), GFP_KERNEL); ++ if (!p) ++ return -ENOMEM; ++ ++ p->dev = &pdev->dev; ++ p->desc = &rt2880_pctrl_desc; ++ p->groups = rt2880_pinmux_data; ++ platform_set_drvdata(pdev, p); ++ ++ /* init the device */ ++ if (rt2880_pinmux_index(p)) { ++ dev_err(&pdev->dev, "failed to load index\n"); ++ return -EINVAL; ++ } ++ if (rt2880_pinmux_pins(p)) { ++ dev_err(&pdev->dev, "failed to load pins\n"); ++ return -EINVAL; ++ } ++ dev = pinctrl_register(p->desc, &pdev->dev, p); ++ if (IS_ERR(dev)) ++ return PTR_ERR(dev); ++ ++ /* finalize by adding gpio ranges for enables gpio controllers */ ++ for_each_compatible_node(np, NULL, "ralink,rt2880-gpio") { ++ const __be32 *ngpio, *gpiobase; ++ struct pinctrl_gpio_range *range; ++ char *name; ++ ++ if (!of_device_is_available(np)) ++ continue; ++ ++ ngpio = of_get_property(np, "ralink,num-gpios", NULL); ++ gpiobase = of_get_property(np, "ralink,gpio-base", NULL); ++ if (!ngpio || !gpiobase) { ++ dev_err(&pdev->dev, "failed to load chip info\n"); ++ return -EINVAL; ++ } ++ ++ range = devm_kzalloc(p->dev, sizeof(struct pinctrl_gpio_range) + 4, GFP_KERNEL); ++ range->name = name = (char *) &range[1]; ++ sprintf(name, "pio"); ++ range->npins = __be32_to_cpu(*ngpio); ++ range->base = __be32_to_cpu(*gpiobase); ++ range->pin_base = range->base; ++ pinctrl_add_gpio_range(dev, range); ++ } ++ ++ return 0; ++} ++ ++static const struct of_device_id rt2880_pinmux_match[] = { ++ { .compatible = "ralink,rt2880-pinmux" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, rt2880_pinmux_match); ++ ++static struct platform_driver rt2880_pinmux_driver = { ++ .probe = rt2880_pinmux_probe, ++ .driver = { ++ .name = "rt2880-pinmux", ++ .owner = THIS_MODULE, ++ .of_match_table = rt2880_pinmux_match, ++ }, ++}; ++ ++int __init rt2880_pinmux_init(void) ++{ ++ return platform_driver_register(&rt2880_pinmux_driver); ++} ++ ++core_initcall_sync(rt2880_pinmux_init); diff --git a/target/linux/ramips/patches-4.14/0026-DT-Add-documentation-for-gpio-ralink.patch b/target/linux/ramips/patches-4.14/0026-DT-Add-documentation-for-gpio-ralink.patch new file mode 100644 index 0000000000..7d5f98f647 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0026-DT-Add-documentation-for-gpio-ralink.patch @@ -0,0 +1,59 @@ +From d410e5478c622c01fcf31427533df5f433df9146 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 28 Jul 2013 19:45:30 +0200 +Subject: [PATCH 26/53] DT: Add documentation for gpio-ralink + +Describe gpio-ralink binding. + +Signed-off-by: John Crispin <blogic@openwrt.org> +Cc: linux-mips@linux-mips.org +Cc: devicetree@vger.kernel.org +Cc: linux-gpio@vger.kernel.org +--- + .../devicetree/bindings/gpio/gpio-ralink.txt | 40 ++++++++++++++++++++ + 1 file changed, 40 insertions(+) + create mode 100644 Documentation/devicetree/bindings/gpio/gpio-ralink.txt + +--- /dev/null ++++ b/Documentation/devicetree/bindings/gpio/gpio-ralink.txt +@@ -0,0 +1,40 @@ ++Ralink SoC GPIO controller bindings ++ ++Required properties: ++- compatible: ++ - "ralink,rt2880-gpio" for Ralink controllers ++- #gpio-cells : Should be two. ++ - first cell is the pin number ++ - second cell is used to specify optional parameters (unused) ++- gpio-controller : Marks the device node as a GPIO controller ++- reg : Physical base address and length of the controller's registers ++- interrupt-parent: phandle to the INTC device node ++- interrupts : Specify the INTC interrupt number ++- ralink,num-gpios : Specify the number of GPIOs ++- ralink,register-map : The register layout depends on the GPIO bank and actual ++ SoC type. Register offsets need to be in this order. ++ [ INT, EDGE, RENA, FENA, DATA, DIR, POL, SET, RESET, TOGGLE ] ++ ++Optional properties: ++- ralink,gpio-base : Specify the GPIO chips base number ++ ++Example: ++ ++ gpio0: gpio@600 { ++ compatible = "ralink,rt5350-gpio", "ralink,rt2880-gpio"; ++ ++ #gpio-cells = <2>; ++ gpio-controller; ++ ++ reg = <0x600 0x34>; ++ ++ interrupt-parent = <&intc>; ++ interrupts = <6>; ++ ++ ralink,gpio-base = <0>; ++ ralink,num-gpios = <24>; ++ ralink,register-map = [ 00 04 08 0c ++ 20 24 28 2c ++ 30 34 ]; ++ ++ }; diff --git a/target/linux/ramips/patches-4.14/0027-GPIO-MIPS-ralink-add-gpio-driver-for-ralink-SoC.patch b/target/linux/ramips/patches-4.14/0027-GPIO-MIPS-ralink-add-gpio-driver-for-ralink-SoC.patch new file mode 100644 index 0000000000..35cedf61fd --- /dev/null +++ b/target/linux/ramips/patches-4.14/0027-GPIO-MIPS-ralink-add-gpio-driver-for-ralink-SoC.patch @@ -0,0 +1,430 @@ +From 69fdd2c4f937796b934e89c33acde9d082e27bfd Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Mon, 4 Aug 2014 20:36:29 +0200 +Subject: [PATCH 27/53] GPIO: MIPS: ralink: add gpio driver for ralink SoC + +Add gpio driver for Ralink SoC. This driver makes the gpio core on +RT2880, RT305x, rt3352, rt3662, rt3883, rt5350 and mt7620 work. + +Signed-off-by: John Crispin <blogic@openwrt.org> +Cc: linux-mips@linux-mips.org +Cc: linux-gpio@vger.kernel.org +--- + arch/mips/include/asm/mach-ralink/gpio.h | 24 ++ + drivers/gpio/Kconfig | 6 + + drivers/gpio/Makefile | 1 + + drivers/gpio/gpio-ralink.c | 355 ++++++++++++++++++++++++++++++ + 4 files changed, 386 insertions(+) + create mode 100644 arch/mips/include/asm/mach-ralink/gpio.h + create mode 100644 drivers/gpio/gpio-ralink.c + +--- /dev/null ++++ b/arch/mips/include/asm/mach-ralink/gpio.h +@@ -0,0 +1,24 @@ ++/* ++ * Ralink SoC GPIO API support ++ * ++ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org> ++ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ */ ++ ++#ifndef __ASM_MACH_RALINK_GPIO_H ++#define __ASM_MACH_RALINK_GPIO_H ++ ++#define ARCH_NR_GPIOS 128 ++#include <asm-generic/gpio.h> ++ ++#define gpio_get_value __gpio_get_value ++#define gpio_set_value __gpio_set_value ++#define gpio_cansleep __gpio_cansleep ++#define gpio_to_irq __gpio_to_irq ++ ++#endif /* __ASM_MACH_RALINK_GPIO_H */ +--- a/drivers/gpio/Kconfig ++++ b/drivers/gpio/Kconfig +@@ -398,6 +398,12 @@ config GPIO_REG + A 32-bit single register GPIO fixed in/out implementation. This + can be used to represent any register as a set of GPIO signals. + ++config GPIO_RALINK ++ bool "Ralink GPIO Support" ++ depends on RALINK ++ help ++ Say yes here to support the Ralink SoC GPIO device ++ + config GPIO_SPEAR_SPICS + bool "ST SPEAr13xx SPI Chip Select as GPIO support" + depends on PLAT_SPEAR +--- a/drivers/gpio/Makefile ++++ b/drivers/gpio/Makefile +@@ -98,6 +98,7 @@ obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-p + obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o + obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o + obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o ++obj-$(CONFIG_GPIO_RALINK) += gpio-ralink.o + obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o + obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o + obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o +--- /dev/null ++++ b/drivers/gpio/gpio-ralink.c +@@ -0,0 +1,355 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org> ++ * Copyright (C) 2013 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/module.h> ++#include <linux/io.h> ++#include <linux/gpio.h> ++#include <linux/spinlock.h> ++#include <linux/platform_device.h> ++#include <linux/of_irq.h> ++#include <linux/irqdomain.h> ++#include <linux/interrupt.h> ++ ++enum ralink_gpio_reg { ++ GPIO_REG_INT = 0, ++ GPIO_REG_EDGE, ++ GPIO_REG_RENA, ++ GPIO_REG_FENA, ++ GPIO_REG_DATA, ++ GPIO_REG_DIR, ++ GPIO_REG_POL, ++ GPIO_REG_SET, ++ GPIO_REG_RESET, ++ GPIO_REG_TOGGLE, ++ GPIO_REG_MAX ++}; ++ ++struct ralink_gpio_chip { ++ struct gpio_chip chip; ++ u8 regs[GPIO_REG_MAX]; ++ ++ spinlock_t lock; ++ void __iomem *membase; ++ struct irq_domain *domain; ++ int irq; ++ ++ u32 rising; ++ u32 falling; ++}; ++ ++#define MAP_MAX 4 ++static struct irq_domain *irq_map[MAP_MAX]; ++static int irq_map_count; ++static atomic_t irq_refcount = ATOMIC_INIT(0); ++ ++static inline struct ralink_gpio_chip *to_ralink_gpio(struct gpio_chip *chip) ++{ ++ struct ralink_gpio_chip *rg; ++ ++ rg = container_of(chip, struct ralink_gpio_chip, chip); ++ ++ return rg; ++} ++ ++static inline void rt_gpio_w32(struct ralink_gpio_chip *rg, u8 reg, u32 val) ++{ ++ iowrite32(val, rg->membase + rg->regs[reg]); ++} ++ ++static inline u32 rt_gpio_r32(struct ralink_gpio_chip *rg, u8 reg) ++{ ++ return ioread32(rg->membase + rg->regs[reg]); ++} ++ ++static void ralink_gpio_set(struct gpio_chip *chip, unsigned offset, int value) ++{ ++ struct ralink_gpio_chip *rg = to_ralink_gpio(chip); ++ ++ rt_gpio_w32(rg, (value) ? GPIO_REG_SET : GPIO_REG_RESET, BIT(offset)); ++} ++ ++static int ralink_gpio_get(struct gpio_chip *chip, unsigned offset) ++{ ++ struct ralink_gpio_chip *rg = to_ralink_gpio(chip); ++ ++ return !!(rt_gpio_r32(rg, GPIO_REG_DATA) & BIT(offset)); ++} ++ ++static int ralink_gpio_direction_input(struct gpio_chip *chip, unsigned offset) ++{ ++ struct ralink_gpio_chip *rg = to_ralink_gpio(chip); ++ unsigned long flags; ++ u32 t; ++ ++ spin_lock_irqsave(&rg->lock, flags); ++ t = rt_gpio_r32(rg, GPIO_REG_DIR); ++ t &= ~BIT(offset); ++ rt_gpio_w32(rg, GPIO_REG_DIR, t); ++ spin_unlock_irqrestore(&rg->lock, flags); ++ ++ return 0; ++} ++ ++static int ralink_gpio_direction_output(struct gpio_chip *chip, ++ unsigned offset, int value) ++{ ++ struct ralink_gpio_chip *rg = to_ralink_gpio(chip); ++ unsigned long flags; ++ u32 t; ++ ++ spin_lock_irqsave(&rg->lock, flags); ++ ralink_gpio_set(chip, offset, value); ++ t = rt_gpio_r32(rg, GPIO_REG_DIR); ++ t |= BIT(offset); ++ rt_gpio_w32(rg, GPIO_REG_DIR, t); ++ spin_unlock_irqrestore(&rg->lock, flags); ++ ++ return 0; ++} ++ ++static int ralink_gpio_to_irq(struct gpio_chip *chip, unsigned pin) ++{ ++ struct ralink_gpio_chip *rg = to_ralink_gpio(chip); ++ ++ if (rg->irq < 1) ++ return -1; ++ ++ return irq_create_mapping(rg->domain, pin); ++} ++ ++static void ralink_gpio_irq_handler(struct irq_desc *desc) ++{ ++ int i; ++ ++ for (i = 0; i < irq_map_count; i++) { ++ struct irq_domain *domain = irq_map[i]; ++ struct ralink_gpio_chip *rg; ++ unsigned long pending; ++ int bit; ++ ++ rg = (struct ralink_gpio_chip *) domain->host_data; ++ pending = rt_gpio_r32(rg, GPIO_REG_INT); ++ ++ for_each_set_bit(bit, &pending, rg->chip.ngpio) { ++ u32 map = irq_find_mapping(domain, bit); ++ generic_handle_irq(map); ++ rt_gpio_w32(rg, GPIO_REG_INT, BIT(bit)); ++ } ++ } ++} ++ ++static void ralink_gpio_irq_unmask(struct irq_data *d) ++{ ++ struct ralink_gpio_chip *rg; ++ unsigned long flags; ++ u32 rise, fall; ++ ++ rg = (struct ralink_gpio_chip *) d->domain->host_data; ++ rise = rt_gpio_r32(rg, GPIO_REG_RENA); ++ fall = rt_gpio_r32(rg, GPIO_REG_FENA); ++ ++ spin_lock_irqsave(&rg->lock, flags); ++ rt_gpio_w32(rg, GPIO_REG_RENA, rise | (BIT(d->hwirq) & rg->rising)); ++ rt_gpio_w32(rg, GPIO_REG_FENA, fall | (BIT(d->hwirq) & rg->falling)); ++ spin_unlock_irqrestore(&rg->lock, flags); ++} ++ ++static void ralink_gpio_irq_mask(struct irq_data *d) ++{ ++ struct ralink_gpio_chip *rg; ++ unsigned long flags; ++ u32 rise, fall; ++ ++ rg = (struct ralink_gpio_chip *) d->domain->host_data; ++ rise = rt_gpio_r32(rg, GPIO_REG_RENA); ++ fall = rt_gpio_r32(rg, GPIO_REG_FENA); ++ ++ spin_lock_irqsave(&rg->lock, flags); ++ rt_gpio_w32(rg, GPIO_REG_FENA, fall & ~BIT(d->hwirq)); ++ rt_gpio_w32(rg, GPIO_REG_RENA, rise & ~BIT(d->hwirq)); ++ spin_unlock_irqrestore(&rg->lock, flags); ++} ++ ++static int ralink_gpio_irq_type(struct irq_data *d, unsigned int type) ++{ ++ struct ralink_gpio_chip *rg; ++ u32 mask = BIT(d->hwirq); ++ ++ rg = (struct ralink_gpio_chip *) d->domain->host_data; ++ ++ if (type == IRQ_TYPE_PROBE) { ++ if ((rg->rising | rg->falling) & mask) ++ return 0; ++ ++ type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; ++ } ++ ++ if (type & IRQ_TYPE_EDGE_RISING) ++ rg->rising |= mask; ++ else ++ rg->rising &= ~mask; ++ ++ if (type & IRQ_TYPE_EDGE_FALLING) ++ rg->falling |= mask; ++ else ++ rg->falling &= ~mask; ++ ++ return 0; ++} ++ ++static struct irq_chip ralink_gpio_irq_chip = { ++ .name = "GPIO", ++ .irq_unmask = ralink_gpio_irq_unmask, ++ .irq_mask = ralink_gpio_irq_mask, ++ .irq_mask_ack = ralink_gpio_irq_mask, ++ .irq_set_type = ralink_gpio_irq_type, ++}; ++ ++static int gpio_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) ++{ ++ irq_set_chip_and_handler(irq, &ralink_gpio_irq_chip, handle_level_irq); ++ irq_set_handler_data(irq, d); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops irq_domain_ops = { ++ .xlate = irq_domain_xlate_onecell, ++ .map = gpio_map, ++}; ++ ++static void ralink_gpio_irq_init(struct device_node *np, ++ struct ralink_gpio_chip *rg) ++{ ++ if (irq_map_count >= MAP_MAX) ++ return; ++ ++ rg->irq = irq_of_parse_and_map(np, 0); ++ if (!rg->irq) ++ return; ++ ++ rg->domain = irq_domain_add_linear(np, rg->chip.ngpio, ++ &irq_domain_ops, rg); ++ if (!rg->domain) { ++ dev_err(rg->chip.parent, "irq_domain_add_linear failed\n"); ++ return; ++ } ++ ++ irq_map[irq_map_count++] = rg->domain; ++ ++ rt_gpio_w32(rg, GPIO_REG_RENA, 0x0); ++ rt_gpio_w32(rg, GPIO_REG_FENA, 0x0); ++ ++ if (!atomic_read(&irq_refcount)) ++ irq_set_chained_handler(rg->irq, ralink_gpio_irq_handler); ++ atomic_inc(&irq_refcount); ++ ++ dev_info(rg->chip.parent, "registering %d irq handlers\n", rg->chip.ngpio); ++} ++ ++static int ralink_gpio_request(struct gpio_chip *chip, unsigned offset) ++{ ++ int gpio = chip->base + offset; ++ ++ return pinctrl_request_gpio(gpio); ++} ++ ++static void ralink_gpio_free(struct gpio_chip *chip, unsigned offset) ++{ ++ int gpio = chip->base + offset; ++ ++ pinctrl_free_gpio(gpio); ++} ++ ++static int ralink_gpio_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ struct ralink_gpio_chip *rg; ++ const __be32 *ngpio, *gpiobase; ++ ++ if (!res) { ++ dev_err(&pdev->dev, "failed to find resource\n"); ++ return -ENOMEM; ++ } ++ ++ rg = devm_kzalloc(&pdev->dev, ++ sizeof(struct ralink_gpio_chip), GFP_KERNEL); ++ if (!rg) ++ return -ENOMEM; ++ ++ rg->membase = devm_ioremap_resource(&pdev->dev, res); ++ if (!rg->membase) { ++ dev_err(&pdev->dev, "cannot remap I/O memory region\n"); ++ return -ENOMEM; ++ } ++ ++ if (of_property_read_u8_array(np, "ralink,register-map", ++ rg->regs, GPIO_REG_MAX)) { ++ dev_err(&pdev->dev, "failed to read register definition\n"); ++ return -EINVAL; ++ } ++ ++ ngpio = of_get_property(np, "ralink,num-gpios", NULL); ++ if (!ngpio) { ++ dev_err(&pdev->dev, "failed to read number of pins\n"); ++ return -EINVAL; ++ } ++ ++ gpiobase = of_get_property(np, "ralink,gpio-base", NULL); ++ if (gpiobase) ++ rg->chip.base = be32_to_cpu(*gpiobase); ++ else ++ rg->chip.base = -1; ++ ++ spin_lock_init(&rg->lock); ++ ++ rg->chip.parent = &pdev->dev; ++ rg->chip.label = dev_name(&pdev->dev); ++ rg->chip.of_node = np; ++ rg->chip.ngpio = be32_to_cpu(*ngpio); ++ rg->chip.direction_input = ralink_gpio_direction_input; ++ rg->chip.direction_output = ralink_gpio_direction_output; ++ rg->chip.get = ralink_gpio_get; ++ rg->chip.set = ralink_gpio_set; ++ rg->chip.request = ralink_gpio_request; ++ rg->chip.to_irq = ralink_gpio_to_irq; ++ rg->chip.free = ralink_gpio_free; ++ ++ /* set polarity to low for all lines */ ++ rt_gpio_w32(rg, GPIO_REG_POL, 0); ++ ++ dev_info(&pdev->dev, "registering %d gpios\n", rg->chip.ngpio); ++ ++ ralink_gpio_irq_init(np, rg); ++ ++ return gpiochip_add(&rg->chip); ++} ++ ++static const struct of_device_id ralink_gpio_match[] = { ++ { .compatible = "ralink,rt2880-gpio" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, ralink_gpio_match); ++ ++static struct platform_driver ralink_gpio_driver = { ++ .probe = ralink_gpio_probe, ++ .driver = { ++ .name = "rt2880_gpio", ++ .owner = THIS_MODULE, ++ .of_match_table = ralink_gpio_match, ++ }, ++}; ++ ++static int __init ralink_gpio_init(void) ++{ ++ return platform_driver_register(&ralink_gpio_driver); ++} ++ ++subsys_initcall(ralink_gpio_init); diff --git a/target/linux/ramips/patches-4.14/0028-GPIO-ralink-add-mt7621-gpio-controller.patch b/target/linux/ramips/patches-4.14/0028-GPIO-ralink-add-mt7621-gpio-controller.patch new file mode 100644 index 0000000000..d657274efe --- /dev/null +++ b/target/linux/ramips/patches-4.14/0028-GPIO-ralink-add-mt7621-gpio-controller.patch @@ -0,0 +1,405 @@ +From 61ac7d9b4228de8c332900902c2b93189b042eab Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 27 Jul 2014 11:00:32 +0100 +Subject: [PATCH 28/53] GPIO: ralink: add mt7621 gpio controller + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + arch/mips/Kconfig | 3 + + drivers/gpio/Kconfig | 6 + + drivers/gpio/Makefile | 1 + + drivers/gpio/gpio-mt7621.c | 354 ++++++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 364 insertions(+) + create mode 100644 drivers/gpio/gpio-mt7621.c + +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -630,6 +630,9 @@ config RALINK + select RESET_CONTROLLER + select PINCTRL + select PINCTRL_RT2880 ++ select ARCH_HAS_RESET_CONTROLLER ++ select RESET_CONTROLLER ++ select ARCH_REQUIRE_GPIOLIB + + config SGI_IP22 + bool "SGI IP22 (Indy/Indigo2)" +--- a/drivers/gpio/Kconfig ++++ b/drivers/gpio/Kconfig +@@ -298,6 +298,12 @@ config GPIO_MENZ127 + help + Say yes here to support the MEN 16Z127 GPIO Controller + ++config GPIO_MT7621 ++ bool "Mediatek GPIO Support" ++ depends on SOC_MT7620 || SOC_MT7621 ++ help ++ Say yes here to support the Mediatek SoC GPIO device ++ + config GPIO_MM_LANTIQ + bool "Lantiq Memory mapped GPIOs" + depends on LANTIQ && SOC_XWAY +--- a/drivers/gpio/Makefile ++++ b/drivers/gpio/Makefile +@@ -152,3 +152,4 @@ obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o + obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o + obj-$(CONFIG_GPIO_ZX) += gpio-zx.o + obj-$(CONFIG_GPIO_LOONGSON1) += gpio-loongson1.o ++obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o +--- /dev/null ++++ b/drivers/gpio/gpio-mt7621.c +@@ -0,0 +1,354 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org> ++ * Copyright (C) 2013 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/io.h> ++#include <linux/err.h> ++#include <linux/gpio.h> ++#include <linux/module.h> ++#include <linux/of_irq.h> ++#include <linux/spinlock.h> ++#include <linux/irqdomain.h> ++#include <linux/interrupt.h> ++#include <linux/platform_device.h> ++ ++#define MTK_MAX_BANK 3 ++#define MTK_BANK_WIDTH 32 ++ ++enum mediatek_gpio_reg { ++ GPIO_REG_CTRL = 0, ++ GPIO_REG_POL, ++ GPIO_REG_DATA, ++ GPIO_REG_DSET, ++ GPIO_REG_DCLR, ++ GPIO_REG_REDGE, ++ GPIO_REG_FEDGE, ++ GPIO_REG_HLVL, ++ GPIO_REG_LLVL, ++ GPIO_REG_STAT, ++ GPIO_REG_EDGE, ++}; ++ ++static void __iomem *mediatek_gpio_membase; ++static int mediatek_gpio_irq; ++static struct irq_domain *mediatek_gpio_irq_domain; ++static atomic_t irq_refcount = ATOMIC_INIT(0); ++ ++struct mtk_gc { ++ struct gpio_chip chip; ++ spinlock_t lock; ++ int bank; ++ u32 rising; ++ u32 falling; ++} *gc_map[MTK_MAX_BANK]; ++ ++static inline struct mtk_gc ++*to_mediatek_gpio(struct gpio_chip *chip) ++{ ++ struct mtk_gc *mgc; ++ ++ mgc = container_of(chip, struct mtk_gc, chip); ++ ++ return mgc; ++} ++ ++static inline void ++mtk_gpio_w32(struct mtk_gc *rg, u8 reg, u32 val) ++{ ++ iowrite32(val, mediatek_gpio_membase + (reg * 0x10) + (rg->bank * 0x4)); ++} ++ ++static inline u32 ++mtk_gpio_r32(struct mtk_gc *rg, u8 reg) ++{ ++ return ioread32(mediatek_gpio_membase + (reg * 0x10) + (rg->bank * 0x4)); ++} ++ ++static void ++mediatek_gpio_set(struct gpio_chip *chip, unsigned offset, int value) ++{ ++ struct mtk_gc *rg = to_mediatek_gpio(chip); ++ ++ mtk_gpio_w32(rg, (value) ? GPIO_REG_DSET : GPIO_REG_DCLR, BIT(offset)); ++} ++ ++static int ++mediatek_gpio_get(struct gpio_chip *chip, unsigned offset) ++{ ++ struct mtk_gc *rg = to_mediatek_gpio(chip); ++ ++ return !!(mtk_gpio_r32(rg, GPIO_REG_DATA) & BIT(offset)); ++} ++ ++static int ++mediatek_gpio_direction_input(struct gpio_chip *chip, unsigned offset) ++{ ++ struct mtk_gc *rg = to_mediatek_gpio(chip); ++ unsigned long flags; ++ u32 t; ++ ++ spin_lock_irqsave(&rg->lock, flags); ++ t = mtk_gpio_r32(rg, GPIO_REG_CTRL); ++ t &= ~BIT(offset); ++ mtk_gpio_w32(rg, GPIO_REG_CTRL, t); ++ spin_unlock_irqrestore(&rg->lock, flags); ++ ++ return 0; ++} ++ ++static int ++mediatek_gpio_direction_output(struct gpio_chip *chip, ++ unsigned offset, int value) ++{ ++ struct mtk_gc *rg = to_mediatek_gpio(chip); ++ unsigned long flags; ++ u32 t; ++ ++ spin_lock_irqsave(&rg->lock, flags); ++ t = mtk_gpio_r32(rg, GPIO_REG_CTRL); ++ t |= BIT(offset); ++ mtk_gpio_w32(rg, GPIO_REG_CTRL, t); ++ mediatek_gpio_set(chip, offset, value); ++ spin_unlock_irqrestore(&rg->lock, flags); ++ ++ return 0; ++} ++ ++static int ++mediatek_gpio_get_direction(struct gpio_chip *chip, unsigned offset) ++{ ++ struct mtk_gc *rg = to_mediatek_gpio(chip); ++ unsigned long flags; ++ u32 t; ++ ++ spin_lock_irqsave(&rg->lock, flags); ++ t = mtk_gpio_r32(rg, GPIO_REG_CTRL); ++ spin_unlock_irqrestore(&rg->lock, flags); ++ ++ if (t & BIT(offset)) ++ return 0; ++ ++ return 1; ++} ++ ++static int ++mediatek_gpio_to_irq(struct gpio_chip *chip, unsigned pin) ++{ ++ struct mtk_gc *rg = to_mediatek_gpio(chip); ++ ++ return irq_create_mapping(mediatek_gpio_irq_domain, pin + (rg->bank * MTK_BANK_WIDTH)); ++} ++ ++static int ++mediatek_gpio_bank_probe(struct platform_device *pdev, struct device_node *bank) ++{ ++ const __be32 *id = of_get_property(bank, "reg", NULL); ++ struct mtk_gc *rg = devm_kzalloc(&pdev->dev, ++ sizeof(struct mtk_gc), GFP_KERNEL); ++ ++ if (!rg || !id || be32_to_cpu(*id) > MTK_MAX_BANK) ++ return -ENOMEM; ++ ++ gc_map[be32_to_cpu(*id)] = rg; ++ ++ memset(rg, 0, sizeof(struct mtk_gc)); ++ ++ spin_lock_init(&rg->lock); ++ ++ rg->chip.parent = &pdev->dev; ++ rg->chip.label = dev_name(&pdev->dev); ++ rg->chip.of_node = bank; ++ rg->chip.base = MTK_BANK_WIDTH * be32_to_cpu(*id); ++ rg->chip.ngpio = MTK_BANK_WIDTH; ++ rg->chip.direction_input = mediatek_gpio_direction_input; ++ rg->chip.direction_output = mediatek_gpio_direction_output; ++ rg->chip.get_direction = mediatek_gpio_get_direction; ++ rg->chip.get = mediatek_gpio_get; ++ rg->chip.set = mediatek_gpio_set; ++ if (mediatek_gpio_irq_domain) ++ rg->chip.to_irq = mediatek_gpio_to_irq; ++ rg->bank = be32_to_cpu(*id); ++ ++ /* set polarity to low for all gpios */ ++ mtk_gpio_w32(rg, GPIO_REG_POL, 0); ++ ++ dev_info(&pdev->dev, "registering %d gpios\n", rg->chip.ngpio); ++ ++ return gpiochip_add(&rg->chip); ++} ++ ++static void ++mediatek_gpio_irq_handler(struct irq_desc *desc) ++{ ++ int i; ++ ++ for (i = 0; i < MTK_MAX_BANK; i++) { ++ struct mtk_gc *rg = gc_map[i]; ++ unsigned long pending; ++ int bit; ++ ++ if (!rg) ++ continue; ++ ++ pending = mtk_gpio_r32(rg, GPIO_REG_STAT); ++ ++ for_each_set_bit(bit, &pending, MTK_BANK_WIDTH) { ++ u32 map = irq_find_mapping(mediatek_gpio_irq_domain, (MTK_BANK_WIDTH * i) + bit); ++ ++ generic_handle_irq(map); ++ mtk_gpio_w32(rg, GPIO_REG_STAT, BIT(bit)); ++ } ++ } ++} ++ ++static void ++mediatek_gpio_irq_unmask(struct irq_data *d) ++{ ++ int pin = d->hwirq; ++ int bank = pin / 32; ++ struct mtk_gc *rg = gc_map[bank]; ++ unsigned long flags; ++ u32 rise, fall; ++ ++ if (!rg) ++ return; ++ ++ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE); ++ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE); ++ ++ spin_lock_irqsave(&rg->lock, flags); ++ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise | (BIT(d->hwirq) & rg->rising)); ++ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall | (BIT(d->hwirq) & rg->falling)); ++ spin_unlock_irqrestore(&rg->lock, flags); ++} ++ ++static void ++mediatek_gpio_irq_mask(struct irq_data *d) ++{ ++ int pin = d->hwirq; ++ int bank = pin / 32; ++ struct mtk_gc *rg = gc_map[bank]; ++ unsigned long flags; ++ u32 rise, fall; ++ ++ if (!rg) ++ return; ++ ++ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE); ++ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE); ++ ++ spin_lock_irqsave(&rg->lock, flags); ++ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(d->hwirq)); ++ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(d->hwirq)); ++ spin_unlock_irqrestore(&rg->lock, flags); ++} ++ ++static int ++mediatek_gpio_irq_type(struct irq_data *d, unsigned int type) ++{ ++ int pin = d->hwirq; ++ int bank = pin / 32; ++ struct mtk_gc *rg = gc_map[bank]; ++ u32 mask = BIT(d->hwirq); ++ ++ if (!rg) ++ return -1; ++ ++ if (type == IRQ_TYPE_PROBE) { ++ if ((rg->rising | rg->falling) & mask) ++ return 0; ++ ++ type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; ++ } ++ ++ if (type & IRQ_TYPE_EDGE_RISING) ++ rg->rising |= mask; ++ else ++ rg->rising &= ~mask; ++ ++ if (type & IRQ_TYPE_EDGE_FALLING) ++ rg->falling |= mask; ++ else ++ rg->falling &= ~mask; ++ ++ return 0; ++} ++ ++static struct irq_chip mediatek_gpio_irq_chip = { ++ .name = "GPIO", ++ .irq_unmask = mediatek_gpio_irq_unmask, ++ .irq_mask = mediatek_gpio_irq_mask, ++ .irq_mask_ack = mediatek_gpio_irq_mask, ++ .irq_set_type = mediatek_gpio_irq_type, ++}; ++ ++static int ++mediatek_gpio_gpio_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) ++{ ++ irq_set_chip_and_handler(irq, &mediatek_gpio_irq_chip, handle_level_irq); ++ irq_set_handler_data(irq, d); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops irq_domain_ops = { ++ .xlate = irq_domain_xlate_onecell, ++ .map = mediatek_gpio_gpio_map, ++}; ++ ++static int ++mediatek_gpio_probe(struct platform_device *pdev) ++{ ++ struct device_node *bank, *np = pdev->dev.of_node; ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ ++ mediatek_gpio_membase = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(mediatek_gpio_membase)) ++ return PTR_ERR(mediatek_gpio_membase); ++ ++ mediatek_gpio_irq = irq_of_parse_and_map(np, 0); ++ if (mediatek_gpio_irq) { ++ mediatek_gpio_irq_domain = irq_domain_add_linear(np, ++ MTK_MAX_BANK * MTK_BANK_WIDTH, ++ &irq_domain_ops, NULL); ++ if (!mediatek_gpio_irq_domain) ++ dev_err(&pdev->dev, "irq_domain_add_linear failed\n"); ++ } ++ ++ for_each_child_of_node(np, bank) ++ if (of_device_is_compatible(bank, "mtk,mt7621-gpio-bank")) ++ mediatek_gpio_bank_probe(pdev, bank); ++ ++ if (mediatek_gpio_irq_domain) ++ irq_set_chained_handler(mediatek_gpio_irq, mediatek_gpio_irq_handler); ++ ++ return 0; ++} ++ ++static const struct of_device_id mediatek_gpio_match[] = { ++ { .compatible = "mtk,mt7621-gpio" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, mediatek_gpio_match); ++ ++static struct platform_driver mediatek_gpio_driver = { ++ .probe = mediatek_gpio_probe, ++ .driver = { ++ .name = "mt7621_gpio", ++ .owner = THIS_MODULE, ++ .of_match_table = mediatek_gpio_match, ++ }, ++}; ++ ++static int __init ++mediatek_gpio_init(void) ++{ ++ return platform_driver_register(&mediatek_gpio_driver); ++} ++ ++subsys_initcall(mediatek_gpio_init); diff --git a/target/linux/ramips/patches-4.14/0031-uvc-add-iPassion-iP2970-support.patch b/target/linux/ramips/patches-4.14/0031-uvc-add-iPassion-iP2970-support.patch new file mode 100644 index 0000000000..5829b2fdd0 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0031-uvc-add-iPassion-iP2970-support.patch @@ -0,0 +1,246 @@ +From 975e76214cd2516eb6cfff4c3eec581872645e88 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Thu, 19 Sep 2013 01:50:59 +0200 +Subject: [PATCH 31/53] uvc: add iPassion iP2970 support + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + drivers/media/usb/uvc/uvc_driver.c | 12 +++ + drivers/media/usb/uvc/uvc_status.c | 2 + + drivers/media/usb/uvc/uvc_video.c | 147 ++++++++++++++++++++++++++++++++++++ + drivers/media/usb/uvc/uvcvideo.h | 5 +- + 4 files changed, 165 insertions(+), 1 deletion(-) + +--- a/drivers/media/usb/uvc/uvc_driver.c ++++ b/drivers/media/usb/uvc/uvc_driver.c +@@ -2734,6 +2734,18 @@ static const struct usb_device_id uvc_id + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_QUIRK_FORCE_Y8 }, ++ /* iPassion iP2970 */ ++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE ++ | USB_DEVICE_ID_MATCH_INT_INFO, ++ .idVendor = 0x1B3B, ++ .idProduct = 0x2970, ++ .bInterfaceClass = USB_CLASS_VIDEO, ++ .bInterfaceSubClass = 1, ++ .bInterfaceProtocol = 0, ++ .driver_info = UVC_QUIRK_PROBE_MINMAX ++ | UVC_QUIRK_STREAM_NO_FID ++ | UVC_QUIRK_MOTION ++ | UVC_QUIRK_SINGLE_ISO }, + /* Generic USB Video Class */ + { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_UNDEFINED) }, + { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, UVC_PC_PROTOCOL_15) }, +--- a/drivers/media/usb/uvc/uvc_status.c ++++ b/drivers/media/usb/uvc/uvc_status.c +@@ -139,6 +139,7 @@ static void uvc_status_complete(struct u + switch (dev->status[0] & 0x0f) { + case UVC_STATUS_TYPE_CONTROL: + uvc_event_control(dev, dev->status, len); ++ dev->motion = 1; + break; + + case UVC_STATUS_TYPE_STREAMING: +@@ -182,6 +183,7 @@ int uvc_status_init(struct uvc_device *d + } + + pipe = usb_rcvintpipe(dev->udev, ep->desc.bEndpointAddress); ++ dev->motion = 0; + + /* For high-speed interrupt endpoints, the bInterval value is used as + * an exponent of two. Some developers forgot about it. +--- a/drivers/media/usb/uvc/uvc_video.c ++++ b/drivers/media/usb/uvc/uvc_video.c +@@ -21,6 +21,11 @@ + #include <linux/wait.h> + #include <linux/atomic.h> + #include <asm/unaligned.h> ++#include <linux/skbuff.h> ++#include <linux/kobject.h> ++#include <linux/netlink.h> ++#include <linux/kobject.h> ++#include <linux/workqueue.h> + + #include <media/v4l2-common.h> + +@@ -1081,9 +1086,149 @@ static void uvc_video_decode_data(struct + } + } + ++struct bh_priv { ++ unsigned long seen; ++}; ++ ++struct bh_event { ++ const char *name; ++ struct sk_buff *skb; ++ struct work_struct work; ++}; ++ ++#define BH_ERR(fmt, args...) printk(KERN_ERR "%s: " fmt, "webcam", ##args ) ++#define BH_DBG(fmt, args...) do {} while (0) ++#define BH_SKB_SIZE 2048 ++ ++extern u64 uevent_next_seqnum(void); ++static int seen = 0; ++ ++static int bh_event_add_var(struct bh_event *event, int argv, ++ const char *format, ...) ++{ ++ static char buf[128]; ++ char *s; ++ va_list args; ++ int len; ++ ++ if (argv) ++ return 0; ++ ++ va_start(args, format); ++ len = vsnprintf(buf, sizeof(buf), format, args); ++ va_end(args); ++ ++ if (len >= sizeof(buf)) { ++ BH_ERR("buffer size too small\n"); ++ WARN_ON(1); ++ return -ENOMEM; ++ } ++ ++ s = skb_put(event->skb, len + 1); ++ strcpy(s, buf); ++ ++ BH_DBG("added variable '%s'\n", s); ++ ++ return 0; ++} ++ ++static int motion_hotplug_fill_event(struct bh_event *event) ++{ ++ int s = jiffies; ++ int ret; ++ ++ if (!seen) ++ seen = jiffies; ++ ++ ret = bh_event_add_var(event, 0, "HOME=%s", "/"); ++ if (ret) ++ return ret; ++ ++ ret = bh_event_add_var(event, 0, "PATH=%s", ++ "/sbin:/bin:/usr/sbin:/usr/bin"); ++ if (ret) ++ return ret; ++ ++ ret = bh_event_add_var(event, 0, "SUBSYSTEM=usb"); ++ if (ret) ++ return ret; ++ ++ ret = bh_event_add_var(event, 0, "ACTION=motion"); ++ if (ret) ++ return ret; ++ ++ ret = bh_event_add_var(event, 0, "SEEN=%d", s - seen); ++ if (ret) ++ return ret; ++ seen = s; ++ ++ ret = bh_event_add_var(event, 0, "SEQNUM=%llu", uevent_next_seqnum()); ++ ++ return ret; ++} ++ ++static void motion_hotplug_work(struct work_struct *work) ++{ ++ struct bh_event *event = container_of(work, struct bh_event, work); ++ int ret = 0; ++ ++ event->skb = alloc_skb(BH_SKB_SIZE, GFP_KERNEL); ++ if (!event->skb) ++ goto out_free_event; ++ ++ ret = bh_event_add_var(event, 0, "%s@", "add"); ++ if (ret) ++ goto out_free_skb; ++ ++ ret = motion_hotplug_fill_event(event); ++ if (ret) ++ goto out_free_skb; ++ ++ NETLINK_CB(event->skb).dst_group = 1; ++ broadcast_uevent(event->skb, 0, 1, GFP_KERNEL); ++ ++out_free_skb: ++ if (ret) { ++ BH_ERR("work error %d\n", ret); ++ kfree_skb(event->skb); ++ } ++out_free_event: ++ kfree(event); ++} ++ ++static int motion_hotplug_create_event(void) ++{ ++ struct bh_event *event; ++ ++ event = kzalloc(sizeof(*event), GFP_KERNEL); ++ if (!event) ++ return -ENOMEM; ++ ++ event->name = "motion"; ++ ++ INIT_WORK(&event->work, (void *)(void *)motion_hotplug_work); ++ schedule_work(&event->work); ++ ++ return 0; ++} ++ ++#define MOTION_FLAG_OFFSET 4 + static void uvc_video_decode_end(struct uvc_streaming *stream, + struct uvc_buffer *buf, const __u8 *data, int len) + { ++ if ((stream->dev->quirks & UVC_QUIRK_MOTION) && ++ (data[len - 2] == 0xff) && (data[len - 1] == 0xd9)) { ++ u8 *mem; ++ buf->state = UVC_BUF_STATE_READY; ++ mem = (u8 *) (buf->mem + MOTION_FLAG_OFFSET); ++ if ( stream->dev->motion ) { ++ stream->dev->motion = 0; ++ motion_hotplug_create_event(); ++ } else { ++ *mem &= 0x7f; ++ } ++ } ++ + /* Mark the buffer as done if the EOF marker is set. */ + if (data[1] & UVC_STREAM_EOF && buf->bytesused != 0) { + uvc_trace(UVC_TRACE_FRAME, "Frame complete (EOF found).\n"); +@@ -1498,6 +1643,8 @@ static int uvc_init_video_isoc(struct uv + if (npackets == 0) + return -ENOMEM; + ++ if (stream->dev->quirks & UVC_QUIRK_SINGLE_ISO) ++ npackets = 1; + size = npackets * psize; + + for (i = 0; i < UVC_URBS; ++i) { +--- a/drivers/media/usb/uvc/uvcvideo.h ++++ b/drivers/media/usb/uvc/uvcvideo.h +@@ -186,7 +186,9 @@ + #define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200 + #define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400 + #define UVC_QUIRK_FORCE_Y8 0x00000800 +- ++#define UVC_QUIRK_MOTION 0x00001000 ++#define UVC_QUIRK_SINGLE_ISO 0x00002000 ++ + /* Format flags */ + #define UVC_FMT_FLAG_COMPRESSED 0x00000001 + #define UVC_FMT_FLAG_STREAM 0x00000002 +@@ -584,6 +586,7 @@ struct uvc_device { + __u8 *status; + struct input_dev *input; + char input_phys[64]; ++ int motion; + }; + + enum uvc_handle_state { diff --git a/target/linux/ramips/patches-4.14/0032-USB-dwc2-add-device_reset.patch b/target/linux/ramips/patches-4.14/0032-USB-dwc2-add-device_reset.patch new file mode 100644 index 0000000000..623eaf7291 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0032-USB-dwc2-add-device_reset.patch @@ -0,0 +1,29 @@ +From a758e0870c6d1e4b0272f6e7f9efa9face5534bb Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 27 Jul 2014 09:49:07 +0100 +Subject: [PATCH 32/53] USB: dwc2: add device_reset() + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + drivers/usb/dwc2/hcd.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/usb/dwc2/hcd.c ++++ b/drivers/usb/dwc2/hcd.c +@@ -48,6 +48,7 @@ + #include <linux/io.h> + #include <linux/slab.h> + #include <linux/usb.h> ++#include <linux/reset.h> + + #include <linux/usb/hcd.h> + #include <linux/usb/ch11.h> +@@ -5072,6 +5073,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hso + + retval = -ENOMEM; + ++ device_reset(hsotg->dev); ++ + hcfg = dwc2_readl(hsotg->regs + HCFG); + dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg); + diff --git a/target/linux/ramips/patches-4.14/0034-NET-multi-phy-support.patch b/target/linux/ramips/patches-4.14/0034-NET-multi-phy-support.patch new file mode 100644 index 0000000000..0bcb617ba1 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0034-NET-multi-phy-support.patch @@ -0,0 +1,59 @@ +From 0b6eb1e68290243d439ee330ea8d0b239a5aec69 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 27 Jul 2014 09:38:50 +0100 +Subject: [PATCH 34/53] NET: multi phy support + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + drivers/net/phy/phy.c | 9 ++++++--- + include/linux/phy.h | 1 + + 2 files changed, 7 insertions(+), 3 deletions(-) + +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -984,7 +984,10 @@ void phy_state_machine(struct work_struc + /* If the link is down, give up on negotiation for now */ + if (!phydev->link) { + phydev->state = PHY_NOLINK; +- phy_link_down(phydev, true); ++ if (!phydev->no_auto_carrier_off) ++ phy_link_down(phydev, true); ++ else ++ phy_link_down(phydev, false); + break; + } + +@@ -1071,7 +1074,10 @@ void phy_state_machine(struct work_struc + phy_link_up(phydev); + } else { + phydev->state = PHY_NOLINK; +- phy_link_down(phydev, true); ++ if (!phydev->no_auto_carrier_off) ++ phy_link_down(phydev, true); ++ else ++ phy_link_down(phydev, false); + } + + if (phy_interrupt_is_valid(phydev)) +@@ -1081,7 +1087,10 @@ void phy_state_machine(struct work_struc + case PHY_HALTED: + if (phydev->link) { + phydev->link = 0; +- phy_link_down(phydev, true); ++ if (!phydev->no_auto_carrier_off) ++ phy_link_down(phydev, true); ++ else ++ phy_link_down(phydev, false); + do_suspend = true; + } + break; +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -412,6 +412,7 @@ struct phy_device { + bool suspended; + bool sysfs_links; + bool loopback_enabled; ++ bool no_auto_carrier_off; + + enum phy_state state; + diff --git a/target/linux/ramips/patches-4.14/0036-mtd-fix-cfi-cmdset-0002-erase-status-check.patch b/target/linux/ramips/patches-4.14/0036-mtd-fix-cfi-cmdset-0002-erase-status-check.patch new file mode 100644 index 0000000000..0b3a6c40e4 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0036-mtd-fix-cfi-cmdset-0002-erase-status-check.patch @@ -0,0 +1,29 @@ +From 8e72a3a1be8f6328bd7ef491332ba541547b6086 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Mon, 15 Jul 2013 00:38:51 +0200 +Subject: [PATCH 36/53] mtd: fix cfi cmdset 0002 erase status check + +--- + drivers/mtd/chips/cfi_cmdset_0002.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/mtd/chips/cfi_cmdset_0002.c ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c +@@ -2293,7 +2293,7 @@ static int __xipram do_erase_chip(struct + chip->erase_suspended = 0; + } + +- if (chip_ready(map, adr)) ++ if (chip_good(map, adr, map_word_ff(map))) + break; + + if (time_after(jiffies, timeo)) { +@@ -2382,7 +2382,7 @@ static int __xipram do_erase_oneblock(st + chip->erase_suspended = 0; + } + +- if (chip_ready(map, adr)) { ++ if (chip_good(map, adr, map_word_ff(map))) { + xip_enable(map, chip, adr); + break; + } diff --git a/target/linux/ramips/patches-4.14/0037-mtd-cfi-cmdset-0002-force-word-write.patch b/target/linux/ramips/patches-4.14/0037-mtd-cfi-cmdset-0002-force-word-write.patch new file mode 100644 index 0000000000..8ac6964cd1 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0037-mtd-cfi-cmdset-0002-force-word-write.patch @@ -0,0 +1,70 @@ +From ee9081b2726a5ca8cde5497afdc5425e21ff8f8b Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Mon, 15 Jul 2013 00:39:21 +0200 +Subject: [PATCH 37/53] mtd: cfi cmdset 0002 force word write + +--- + drivers/mtd/chips/cfi_cmdset_0002.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +--- a/drivers/mtd/chips/cfi_cmdset_0002.c ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c +@@ -40,7 +40,7 @@ + #include <linux/mtd/xip.h> + + #define AMD_BOOTLOC_BUG +-#define FORCE_WORD_WRITE 0 ++#define FORCE_WORD_WRITE 1 + + #define MAX_WORD_RETRIES 3 + +@@ -51,7 +51,9 @@ + + static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); + static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); ++#if !FORCE_WORD_WRITE + static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); ++#endif + static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); + static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); + static void cfi_amdstd_sync (struct mtd_info *); +@@ -202,6 +204,7 @@ static void fixup_amd_bootblock(struct m + } + #endif + ++#if !FORCE_WORD_WRITE + static void fixup_use_write_buffers(struct mtd_info *mtd) + { + struct map_info *map = mtd->priv; +@@ -211,6 +214,7 @@ static void fixup_use_write_buffers(stru + mtd->_write = cfi_amdstd_write_buffers; + } + } ++#endif /* !FORCE_WORD_WRITE */ + + /* Atmel chips don't use the same PRI format as AMD chips */ + static void fixup_convert_atmel_pri(struct mtd_info *mtd) +@@ -1791,6 +1795,7 @@ static int cfi_amdstd_write_words(struct + /* + * FIXME: interleaved mode not tested, and probably not supported! + */ ++#if !FORCE_WORD_WRITE + static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, + unsigned long adr, const u_char *buf, + int len) +@@ -1919,7 +1924,6 @@ static int __xipram do_write_buffer(stru + return ret; + } + +- + static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) + { +@@ -1994,6 +1998,7 @@ static int cfi_amdstd_write_buffers(stru + + return 0; + } ++#endif /* !FORCE_WORD_WRITE */ + + /* + * Wait for the flash chip to become ready to write data diff --git a/target/linux/ramips/patches-4.14/0039-mtd-add-mt7621-nand-support.patch b/target/linux/ramips/patches-4.14/0039-mtd-add-mt7621-nand-support.patch new file mode 100644 index 0000000000..dae115dd13 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0039-mtd-add-mt7621-nand-support.patch @@ -0,0 +1,4480 @@ +From 0e1c4e3c97b83b4e7da65b1c56f0a7d40736ac53 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 27 Jul 2014 11:05:17 +0100 +Subject: [PATCH 39/53] mtd: add mt7621 nand support + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + drivers/mtd/nand/Kconfig | 6 + + drivers/mtd/nand/Makefile | 1 + + drivers/mtd/nand/bmt.c | 750 ++++++++++++ + drivers/mtd/nand/bmt.h | 80 ++ + drivers/mtd/nand/dev-nand.c | 63 + + drivers/mtd/nand/mt6575_typedefs.h | 340 ++++++ + drivers/mtd/nand/mtk_nand2.c | 2304 +++++++++++++++++++++++++++++++++++ + drivers/mtd/nand/mtk_nand2.h | 452 +++++++ + drivers/mtd/nand/nand_base.c | 6 +- + drivers/mtd/nand/nand_bbt.c | 19 + + drivers/mtd/nand/nand_def.h | 123 ++ + drivers/mtd/nand/nand_device_list.h | 55 + + drivers/mtd/nand/partition.h | 115 ++ + 13 files changed, 4311 insertions(+), 3 deletions(-) + create mode 100644 drivers/mtd/nand/bmt.c + create mode 100644 drivers/mtd/nand/bmt.h + create mode 100644 drivers/mtd/nand/dev-nand.c + create mode 100644 drivers/mtd/nand/mt6575_typedefs.h + create mode 100644 drivers/mtd/nand/mtk_nand2.c + create mode 100644 drivers/mtd/nand/mtk_nand2.h + create mode 100644 drivers/mtd/nand/nand_def.h + create mode 100644 drivers/mtd/nand/nand_device_list.h + create mode 100644 drivers/mtd/nand/partition.h + +--- a/drivers/mtd/nand/Kconfig ++++ b/drivers/mtd/nand/Kconfig +@@ -563,4 +563,10 @@ config MTD_NAND_MTK + Enables support for NAND controller on MTK SoCs. + This controller is found on mt27xx, mt81xx, mt65xx SoCs. + ++config MTK_MTD_NAND ++ tristate "Support for MTK SoC NAND controller" ++ depends on SOC_MT7621 ++ select MTD_NAND_IDS ++ select MTD_NAND_ECC ++ + endif # MTD_NAND +--- a/drivers/mtd/nand/Makefile ++++ b/drivers/mtd/nand/Makefile +@@ -60,6 +60,7 @@ obj-$(CONFIG_MTD_NAND_HISI504) + + obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ + obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o + obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o ++obj-$(CONFIG_MTK_MTD_NAND) += mtk_nand2.o bmt.o + + nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o + nand-objs += nand_amd.o +--- /dev/null ++++ b/drivers/mtd/nand/bmt.c +@@ -0,0 +1,750 @@ ++#include "bmt.h" ++ ++typedef struct ++{ ++ char signature[3]; ++ u8 version; ++ u8 bad_count; // bad block count in pool ++ u8 mapped_count; // mapped block count in pool ++ u8 checksum; ++ u8 reseverd[13]; ++} phys_bmt_header; ++ ++typedef struct ++{ ++ phys_bmt_header header; ++ bmt_entry table[MAX_BMT_SIZE]; ++} phys_bmt_struct; ++ ++typedef struct ++{ ++ char signature[3]; ++} bmt_oob_data; ++ ++static char MAIN_SIGNATURE[] = "BMT"; ++static char OOB_SIGNATURE[] = "bmt"; ++#define SIGNATURE_SIZE (3) ++ ++#define MAX_DAT_SIZE 0x1000 ++#define MAX_OOB_SIZE 0x80 ++ ++static struct mtd_info *mtd_bmt; ++static struct nand_chip *nand_chip_bmt; ++#define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift) ++#define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift) ++ ++#define OFFSET(block) ((block) * BLOCK_SIZE_BMT) ++#define PAGE_ADDR(block) ((block) * BLOCK_SIZE_BMT / PAGE_SIZE_BMT) ++ ++/********************************************************************* ++* Flash is splited into 2 parts, system part is for normal system * ++* system usage, size is system_block_count, another is replace pool * ++* +-------------------------------------------------+ * ++* | system_block_count | bmt_block_count | * ++* +-------------------------------------------------+ * ++*********************************************************************/ ++static u32 total_block_count; // block number in flash ++static u32 system_block_count; ++static int bmt_block_count; // bmt table size ++// static int bmt_count; // block used in bmt ++static int page_per_block; // page per count ++ ++static u32 bmt_block_index; // bmt block index ++static bmt_struct bmt; // dynamic created global bmt table ++ ++static u8 dat_buf[MAX_DAT_SIZE]; ++static u8 oob_buf[MAX_OOB_SIZE]; ++static bool pool_erased; ++ ++/*************************************************************** ++* ++* Interface adaptor for preloader/uboot/kernel ++* These interfaces operate on physical address, read/write ++* physical data. ++* ++***************************************************************/ ++int nand_read_page_bmt(u32 page, u8 * dat, u8 * oob) ++{ ++ return mtk_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob); ++} ++ ++bool nand_block_bad_bmt(u32 offset) ++{ ++ return mtk_nand_block_bad_hw(mtd_bmt, offset); ++} ++ ++bool nand_erase_bmt(u32 offset) ++{ ++ int status; ++ if (offset < 0x20000) ++ { ++ MSG(INIT, "erase offset: 0x%x\n", offset); ++ } ++ ++ status = mtk_nand_erase_hw(mtd_bmt, offset / PAGE_SIZE_BMT); // as nand_chip structure doesn't have a erase function defined ++ if (status & NAND_STATUS_FAIL) ++ return false; ++ else ++ return true; ++} ++ ++int mark_block_bad_bmt(u32 offset) ++{ ++ return mtk_nand_block_markbad_hw(mtd_bmt, offset); //mark_block_bad_hw(offset); ++} ++ ++bool nand_write_page_bmt(u32 page, u8 * dat, u8 * oob) ++{ ++ if (mtk_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob)) ++ return false; ++ else ++ return true; ++} ++ ++/*************************************************************** ++* * ++* static internal function * ++* * ++***************************************************************/ ++static void dump_bmt_info(bmt_struct * bmt) ++{ ++ int i; ++ ++ MSG(INIT, "BMT v%d. total %d mapping:\n", bmt->version, bmt->mapped_count); ++ for (i = 0; i < bmt->mapped_count; i++) ++ { ++ MSG(INIT, "\t0x%x -> 0x%x\n", bmt->table[i].bad_index, bmt->table[i].mapped_index); ++ } ++} ++ ++static bool match_bmt_signature(u8 * dat, u8 * oob) ++{ ++ ++ if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE)) ++ { ++ return false; ++ } ++ ++ if (memcmp(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE)) ++ { ++ MSG(INIT, "main signature match, oob signature doesn't match, but ignore\n"); ++ } ++ return true; ++} ++ ++static u8 cal_bmt_checksum(phys_bmt_struct * phys_table, int bmt_size) ++{ ++ int i; ++ u8 checksum = 0; ++ u8 *dat = (u8 *) phys_table; ++ ++ checksum += phys_table->header.version; ++ checksum += phys_table->header.mapped_count; ++ ++ dat += sizeof(phys_bmt_header); ++ for (i = 0; i < bmt_size * sizeof(bmt_entry); i++) ++ { ++ checksum += dat[i]; ++ } ++ ++ return checksum; ++} ++ ++ ++static int is_block_mapped(int index) ++{ ++ int i; ++ for (i = 0; i < bmt.mapped_count; i++) ++ { ++ if (index == bmt.table[i].mapped_index) ++ return i; ++ } ++ return -1; ++} ++ ++static bool is_page_used(u8 * dat, u8 * oob) ++{ ++ return ((oob[OOB_INDEX_OFFSET] != 0xFF) || (oob[OOB_INDEX_OFFSET + 1] != 0xFF)); ++} ++ ++static bool valid_bmt_data(phys_bmt_struct * phys_table) ++{ ++ int i; ++ u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count); ++ ++ // checksum correct? ++ if (phys_table->header.checksum != checksum) ++ { ++ MSG(INIT, "BMT Data checksum error: %x %x\n", phys_table->header.checksum, checksum); ++ return false; ++ } ++ ++ MSG(INIT, "BMT Checksum is: 0x%x\n", phys_table->header.checksum); ++ ++ // block index correct? ++ for (i = 0; i < phys_table->header.mapped_count; i++) ++ { ++ if (phys_table->table[i].bad_index >= total_block_count || phys_table->table[i].mapped_index >= total_block_count || phys_table->table[i].mapped_index < system_block_count) ++ { ++ MSG(INIT, "index error: bad_index: %d, mapped_index: %d\n", phys_table->table[i].bad_index, phys_table->table[i].mapped_index); ++ return false; ++ } ++ } ++ ++ // pass check, valid bmt. ++ MSG(INIT, "Valid BMT, version v%d\n", phys_table->header.version); ++ return true; ++} ++ ++static void fill_nand_bmt_buffer(bmt_struct * bmt, u8 * dat, u8 * oob) ++{ ++ phys_bmt_struct phys_bmt; ++ ++ dump_bmt_info(bmt); ++ ++ // fill phys_bmt_struct structure with bmt_struct ++ memset(&phys_bmt, 0xFF, sizeof(phys_bmt)); ++ ++ memcpy(phys_bmt.header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE); ++ phys_bmt.header.version = BMT_VERSION; ++ // phys_bmt.header.bad_count = bmt->bad_count; ++ phys_bmt.header.mapped_count = bmt->mapped_count; ++ memcpy(phys_bmt.table, bmt->table, sizeof(bmt_entry) * bmt_block_count); ++ ++ phys_bmt.header.checksum = cal_bmt_checksum(&phys_bmt, bmt_block_count); ++ ++ memcpy(dat + MAIN_SIGNATURE_OFFSET, &phys_bmt, sizeof(phys_bmt)); ++ memcpy(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE); ++} ++ ++// return valid index if found BMT, else return 0 ++static int load_bmt_data(int start, int pool_size) ++{ ++ int bmt_index = start + pool_size - 1; // find from the end ++ phys_bmt_struct phys_table; ++ int i; ++ ++ MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index); ++ ++ for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--) ++ { ++ if (nand_block_bad_bmt(OFFSET(bmt_index))) ++ { ++ MSG(INIT, "Skip bad block: %d\n", bmt_index); ++ continue; ++ } ++ ++ if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf)) ++ { ++ MSG(INIT, "Error found when read block %d\n", bmt_index); ++ continue; ++ } ++ ++ if (!match_bmt_signature(dat_buf, oob_buf)) ++ { ++ continue; ++ } ++ ++ MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index); ++ ++ memcpy(&phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_table)); ++ ++ if (!valid_bmt_data(&phys_table)) ++ { ++ MSG(INIT, "BMT data is not correct %d\n", bmt_index); ++ continue; ++ } else ++ { ++ bmt.mapped_count = phys_table.header.mapped_count; ++ bmt.version = phys_table.header.version; ++ // bmt.bad_count = phys_table.header.bad_count; ++ memcpy(bmt.table, phys_table.table, bmt.mapped_count * sizeof(bmt_entry)); ++ ++ MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count); ++ ++ for (i = 0; i < bmt.mapped_count; i++) ++ { ++ if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index))) ++ { ++ MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index); ++ mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index)); ++ } ++ } ++ ++ return bmt_index; ++ } ++ } ++ ++ MSG(INIT, "bmt block not found!\n"); ++ return 0; ++} ++ ++/************************************************************************* ++* Find an available block and erase. * ++* start_from_end: if true, find available block from end of flash. * ++* else, find from the beginning of the pool * ++* need_erase: if true, all unmapped blocks in the pool will be erased * ++*************************************************************************/ ++static int find_available_block(bool start_from_end) ++{ ++ int i; // , j; ++ int block = system_block_count; ++ int direction; ++ // int avail_index = 0; ++ MSG(INIT, "Try to find_available_block, pool_erase: %d\n", pool_erased); ++ ++ // erase all un-mapped blocks in pool when finding avaliable block ++ if (!pool_erased) ++ { ++ MSG(INIT, "Erase all un-mapped blocks in pool\n"); ++ for (i = 0; i < bmt_block_count; i++) ++ { ++ if (block == bmt_block_index) ++ { ++ MSG(INIT, "Skip bmt block 0x%x\n", block); ++ continue; ++ } ++ ++ if (nand_block_bad_bmt(OFFSET(block + i))) ++ { ++ MSG(INIT, "Skip bad block 0x%x\n", block + i); ++ continue; ++ } ++//if(block==4095) ++//{ ++// continue; ++//} ++ ++ if (is_block_mapped(block + i) >= 0) ++ { ++ MSG(INIT, "Skip mapped block 0x%x\n", block + i); ++ continue; ++ } ++ ++ if (!nand_erase_bmt(OFFSET(block + i))) ++ { ++ MSG(INIT, "Erase block 0x%x failed\n", block + i); ++ mark_block_bad_bmt(OFFSET(block + i)); ++ } ++ } ++ ++ pool_erased = 1; ++ } ++ ++ if (start_from_end) ++ { ++ block = total_block_count - 1; ++ direction = -1; ++ } else ++ { ++ block = system_block_count; ++ direction = 1; ++ } ++ ++ for (i = 0; i < bmt_block_count; i++, block += direction) ++ { ++ if (block == bmt_block_index) ++ { ++ MSG(INIT, "Skip bmt block 0x%x\n", block); ++ continue; ++ } ++ ++ if (nand_block_bad_bmt(OFFSET(block))) ++ { ++ MSG(INIT, "Skip bad block 0x%x\n", block); ++ continue; ++ } ++ ++ if (is_block_mapped(block) >= 0) ++ { ++ MSG(INIT, "Skip mapped block 0x%x\n", block); ++ continue; ++ } ++ ++ MSG(INIT, "Find block 0x%x available\n", block); ++ return block; ++ } ++ ++ return 0; ++} ++ ++static unsigned short get_bad_index_from_oob(u8 * oob_buf) ++{ ++ unsigned short index; ++ memcpy(&index, oob_buf + OOB_INDEX_OFFSET, OOB_INDEX_SIZE); ++ ++ return index; ++} ++ ++void set_bad_index_to_oob(u8 * oob, u16 index) ++{ ++ memcpy(oob + OOB_INDEX_OFFSET, &index, sizeof(index)); ++} ++ ++static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob) ++{ ++ int page; ++ int error_block = offset / BLOCK_SIZE_BMT; ++ int error_page = (offset / PAGE_SIZE_BMT) % page_per_block; ++ int to_index; ++ ++ memcpy(oob_buf, write_oob, MAX_OOB_SIZE); ++ ++ to_index = find_available_block(false); ++ ++ if (!to_index) ++ { ++ MSG(INIT, "Cannot find an available block for BMT\n"); ++ return 0; ++ } ++ ++ { // migrate error page first ++ MSG(INIT, "Write error page: 0x%x\n", error_page); ++ if (!write_dat) ++ { ++ nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL); ++ write_dat = dat_buf; ++ } ++ // memcpy(oob_buf, write_oob, MAX_OOB_SIZE); ++ ++ if (error_block < system_block_count) ++ set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB. ++ ++ if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) ++ { ++ MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page); ++ mark_block_bad_bmt(to_index); ++ return migrate_from_bad(offset, write_dat, write_oob); ++ } ++ } ++ ++ for (page = 0; page < page_per_block; page++) ++ { ++ if (page != error_page) ++ { ++ nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf); ++ if (is_page_used(dat_buf, oob_buf)) ++ { ++ if (error_block < system_block_count) ++ { ++ set_bad_index_to_oob(oob_buf, error_block); ++ } ++ MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page); ++ if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) ++ { ++ MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page); ++ mark_block_bad_bmt(to_index); ++ return migrate_from_bad(offset, write_dat, write_oob); ++ } ++ } ++ } ++ } ++ ++ MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index); ++ ++ return to_index; ++} ++ ++static bool write_bmt_to_flash(u8 * dat, u8 * oob) ++{ ++ bool need_erase = true; ++ MSG(INIT, "Try to write BMT\n"); ++ ++ if (bmt_block_index == 0) ++ { ++ // if we don't have index, we don't need to erase found block as it has been erased in find_available_block() ++ need_erase = false; ++ if (!(bmt_block_index = find_available_block(true))) ++ { ++ MSG(INIT, "Cannot find an available block for BMT\n"); ++ return false; ++ } ++ } ++ ++ MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index); ++ ++ // write bmt to flash ++ if (need_erase) ++ { ++ if (!nand_erase_bmt(OFFSET(bmt_block_index))) ++ { ++ MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index); ++ mark_block_bad_bmt(OFFSET(bmt_block_index)); ++ // bmt.bad_count++; ++ ++ bmt_block_index = 0; ++ return write_bmt_to_flash(dat, oob); // recursive call ++ } ++ } ++ ++ if (!nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob)) ++ { ++ MSG(INIT, "Write BMT data fail, need to write again\n"); ++ mark_block_bad_bmt(OFFSET(bmt_block_index)); ++ // bmt.bad_count++; ++ ++ bmt_block_index = 0; ++ return write_bmt_to_flash(dat, oob); // recursive call ++ } ++ ++ MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index); ++ return true; ++} ++ ++/******************************************************************* ++* Reconstruct bmt, called when found bmt info doesn't match bad ++* block info in flash. ++* ++* Return NULL for failure ++*******************************************************************/ ++bmt_struct *reconstruct_bmt(bmt_struct * bmt) ++{ ++ int i; ++ int index = system_block_count; ++ unsigned short bad_index; ++ int mapped; ++ ++ // init everything in BMT struct ++ bmt->version = BMT_VERSION; ++ bmt->bad_count = 0; ++ bmt->mapped_count = 0; ++ ++ memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry)); ++ ++ for (i = 0; i < bmt_block_count; i++, index++) ++ { ++ if (nand_block_bad_bmt(OFFSET(index))) ++ { ++ MSG(INIT, "Skip bad block: 0x%x\n", index); ++ // bmt->bad_count++; ++ continue; ++ } ++ ++ MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index)); ++ nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf); ++ /* if (mtk_nand_read_page_hw(PAGE_ADDR(index), dat_buf)) ++ { ++ MSG(INIT, "Error when read block %d\n", bmt_block_index); ++ continue; ++ } */ ++ ++ if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count) ++ { ++ MSG(INIT, "get bad index: 0x%x\n", bad_index); ++ if (bad_index != 0xFFFF) ++ MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index, bad_index); ++ continue; ++ } ++ ++ MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index); ++ ++ if (!nand_block_bad_bmt(OFFSET(bad_index))) ++ { ++ MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index); ++ continue; // no need to erase here, it will be erased later when trying to write BMT ++ } ++ ++ if ((mapped = is_block_mapped(bad_index)) >= 0) ++ { ++ MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index); ++ bmt->table[mapped].mapped_index = index; // use new one instead. ++ } else ++ { ++ // add mapping to BMT ++ bmt->table[bmt->mapped_count].bad_index = bad_index; ++ bmt->table[bmt->mapped_count].mapped_index = index; ++ bmt->mapped_count++; ++ } ++ ++ MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index); ++ ++ } ++ ++ MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count); ++ // dump_bmt_info(bmt); ++ ++ // fill NAND BMT buffer ++ memset(oob_buf, 0xFF, sizeof(oob_buf)); ++ fill_nand_bmt_buffer(bmt, dat_buf, oob_buf); ++ ++ // write BMT back ++ if (!write_bmt_to_flash(dat_buf, oob_buf)) ++ { ++ MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n"); ++ } ++ ++ return bmt; ++} ++ ++/******************************************************************* ++* [BMT Interface] ++* ++* Description: ++* Init bmt from nand. Reconstruct if not found or data error ++* ++* Parameter: ++* size: size of bmt and replace pool ++* ++* Return: ++* NULL for failure, and a bmt struct for success ++*******************************************************************/ ++bmt_struct *init_bmt(struct nand_chip * chip, int size) ++{ ++ struct mtk_nand_host *host; ++ ++ if (size > 0 && size < MAX_BMT_SIZE) ++ { ++ MSG(INIT, "Init bmt table, size: %d\n", size); ++ bmt_block_count = size; ++ } else ++ { ++ MSG(INIT, "Invalid bmt table size: %d\n", size); ++ return NULL; ++ } ++ nand_chip_bmt = chip; ++ system_block_count = chip->chipsize >> chip->phys_erase_shift; ++ total_block_count = bmt_block_count + system_block_count; ++ page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT; ++ host = (struct mtk_nand_host *)chip->priv; ++ mtd_bmt = host->mtd; ++ ++ MSG(INIT, "mtd_bmt: %p, nand_chip_bmt: %p\n", mtd_bmt, nand_chip_bmt); ++ MSG(INIT, "bmt count: %d, system count: %d\n", bmt_block_count, system_block_count); ++ ++ // set this flag, and unmapped block in pool will be erased. ++ pool_erased = 0; ++ memset(bmt.table, 0, size * sizeof(bmt_entry)); ++ if ((bmt_block_index = load_bmt_data(system_block_count, size))) ++ { ++ MSG(INIT, "Load bmt data success @ block 0x%x\n", bmt_block_index); ++ dump_bmt_info(&bmt); ++ return &bmt; ++ } else ++ { ++ MSG(INIT, "Load bmt data fail, need re-construct!\n"); ++#ifndef __UBOOT_NAND__ // BMT is not re-constructed in UBOOT. ++ if (reconstruct_bmt(&bmt)) ++ return &bmt; ++ else ++#endif ++ return NULL; ++ } ++} ++ ++/******************************************************************* ++* [BMT Interface] ++* ++* Description: ++* Update BMT. ++* ++* Parameter: ++* offset: update block/page offset. ++* reason: update reason, see update_reason_t for reason. ++* dat/oob: data and oob buffer for write fail. ++* ++* Return: ++* Return true for success, and false for failure. ++*******************************************************************/ ++bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob) ++{ ++ int map_index; ++ int orig_bad_block = -1; ++ // int bmt_update_index; ++ int i; ++ int bad_index = offset / BLOCK_SIZE_BMT; ++ ++#ifndef MTK_NAND_BMT ++ return false; ++#endif ++ if (reason == UPDATE_WRITE_FAIL) ++ { ++ MSG(INIT, "Write fail, need to migrate\n"); ++ if (!(map_index = migrate_from_bad(offset, dat, oob))) ++ { ++ MSG(INIT, "migrate fail\n"); ++ return false; ++ } ++ } else ++ { ++ if (!(map_index = find_available_block(false))) ++ { ++ MSG(INIT, "Cannot find block in pool\n"); ++ return false; ++ } ++ } ++ ++ // now let's update BMT ++ if (bad_index >= system_block_count) // mapped block become bad, find original bad block ++ { ++ for (i = 0; i < bmt_block_count; i++) ++ { ++ if (bmt.table[i].mapped_index == bad_index) ++ { ++ orig_bad_block = bmt.table[i].bad_index; ++ break; ++ } ++ } ++ // bmt.bad_count++; ++ MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block); ++ ++ bmt.table[i].mapped_index = map_index; ++ } else ++ { ++ bmt.table[bmt.mapped_count].mapped_index = map_index; ++ bmt.table[bmt.mapped_count].bad_index = bad_index; ++ bmt.mapped_count++; ++ } ++ ++ memset(oob_buf, 0xFF, sizeof(oob_buf)); ++ fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf); ++ if (!write_bmt_to_flash(dat_buf, oob_buf)) ++ return false; ++ ++ mark_block_bad_bmt(offset); ++ ++ return true; ++} ++ ++/******************************************************************* ++* [BMT Interface] ++* ++* Description: ++* Given an block index, return mapped index if it's mapped, else ++* return given index. ++* ++* Parameter: ++* index: given an block index. This value cannot exceed ++* system_block_count. ++* ++* Return NULL for failure ++*******************************************************************/ ++u16 get_mapping_block_index(int index) ++{ ++ int i; ++#ifndef MTK_NAND_BMT ++ return index; ++#endif ++ if (index > system_block_count) ++ { ++ return index; ++ } ++ ++ for (i = 0; i < bmt.mapped_count; i++) ++ { ++ if (bmt.table[i].bad_index == index) ++ { ++ return bmt.table[i].mapped_index; ++ } ++ } ++ ++ return index; ++} ++#ifdef __KERNEL_NAND__ ++EXPORT_SYMBOL_GPL(init_bmt); ++EXPORT_SYMBOL_GPL(update_bmt); ++EXPORT_SYMBOL_GPL(get_mapping_block_index); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("MediaTek"); ++MODULE_DESCRIPTION("Bad Block mapping management for MediaTek NAND Flash Driver"); ++#endif +--- /dev/null ++++ b/drivers/mtd/nand/bmt.h +@@ -0,0 +1,80 @@ ++#ifndef __BMT_H__ ++#define __BMT_H__ ++ ++#include "nand_def.h" ++ ++#if defined(__PRELOADER_NAND__) ++ ++#include "nand.h" ++ ++#elif defined(__UBOOT_NAND__) ++ ++#include <linux/mtd/nand.h> ++#include "mtk_nand2.h" ++ ++#elif defined(__KERNEL_NAND__) ++ ++#include <linux/mtd/mtd.h> ++#include <linux/mtd/rawnand.h> ++#include <linux/module.h> ++#include "mtk_nand2.h" ++ ++#endif ++ ++ ++#define MAX_BMT_SIZE (0x80) ++#define BMT_VERSION (1) // initial version ++ ++#define MAIN_SIGNATURE_OFFSET (0) ++#define OOB_SIGNATURE_OFFSET (1) ++#define OOB_INDEX_OFFSET (29) ++#define OOB_INDEX_SIZE (2) ++#define FAKE_INDEX (0xAAAA) ++ ++typedef struct _bmt_entry_ ++{ ++ u16 bad_index; // bad block index ++ u16 mapped_index; // mapping block index in the replace pool ++} bmt_entry; ++ ++typedef enum ++{ ++ UPDATE_ERASE_FAIL, ++ UPDATE_WRITE_FAIL, ++ UPDATE_UNMAPPED_BLOCK, ++ UPDATE_REASON_COUNT, ++} update_reason_t; ++ ++typedef struct ++{ ++ bmt_entry table[MAX_BMT_SIZE]; ++ u8 version; ++ u8 mapped_count; // mapped block count in pool ++ u8 bad_count; // bad block count in pool. Not used in V1 ++} bmt_struct; ++ ++/*************************************************************** ++* * ++* Interface BMT need to use * ++* * ++***************************************************************/ ++extern bool mtk_nand_exec_read_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob); ++extern int mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs); ++extern int mtk_nand_erase_hw(struct mtd_info *mtd, int page); ++extern int mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t ofs); ++extern int mtk_nand_exec_write_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob); ++ ++ ++/*************************************************************** ++* * ++* Different function interface for preloader/uboot/kernel * ++* * ++***************************************************************/ ++void set_bad_index_to_oob(u8 * oob, u16 index); ++ ++ ++bmt_struct *init_bmt(struct nand_chip *nand, int size); ++bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob); ++unsigned short get_mapping_block_index(int index); ++ ++#endif // #ifndef __BMT_H__ +--- /dev/null ++++ b/drivers/mtd/nand/dev-nand.c +@@ -0,0 +1,63 @@ ++#include <linux/init.h> ++#include <linux/kernel.h> ++#include <linux/platform_device.h> ++ ++#include "mt6575_typedefs.h" ++ ++#define RALINK_NAND_CTRL_BASE 0xBE003000 ++#define NFI_base RALINK_NAND_CTRL_BASE ++#define RALINK_NANDECC_CTRL_BASE 0xBE003800 ++#define NFIECC_base RALINK_NANDECC_CTRL_BASE ++#define MT7621_NFI_IRQ_ID SURFBOARDINT_NAND ++#define MT7621_NFIECC_IRQ_ID SURFBOARDINT_NAND_ECC ++ ++#define SURFBOARDINT_NAND 22 ++#define SURFBOARDINT_NAND_ECC 23 ++ ++static struct resource MT7621_resource_nand[] = { ++ { ++ .start = NFI_base, ++ .end = NFI_base + 0x1A0, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = NFIECC_base, ++ .end = NFIECC_base + 0x150, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = MT7621_NFI_IRQ_ID, ++ .flags = IORESOURCE_IRQ, ++ }, ++ { ++ .start = MT7621_NFIECC_IRQ_ID, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct platform_device MT7621_nand_dev = { ++ .name = "MT7621-NAND", ++ .id = 0, ++ .num_resources = ARRAY_SIZE(MT7621_resource_nand), ++ .resource = MT7621_resource_nand, ++ .dev = { ++ .platform_data = &mt7621_nand_hw, ++ }, ++}; ++ ++ ++int __init mtk_nand_register(void) ++{ ++ ++ int retval = 0; ++ ++ retval = platform_device_register(&MT7621_nand_dev); ++ if (retval != 0) { ++ printk(KERN_ERR "register nand device fail\n"); ++ return retval; ++ } ++ ++ ++ return retval; ++} ++arch_initcall(mtk_nand_register); +--- /dev/null ++++ b/drivers/mtd/nand/mt6575_typedefs.h +@@ -0,0 +1,340 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ */ ++/* MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++ ++/***************************************************************************** ++* Copyright Statement: ++* -------------------- ++* This software is protected by Copyright and the information contained ++* herein is confidential. The software may not be copied and the information ++* contained herein may not be used or disclosed except with the written ++* permission of MediaTek Inc. (C) 2008 ++* ++* BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++* THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++* RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON ++* AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++* NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++* SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++* SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH ++* THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO ++* NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S ++* SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM. ++* ++* BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE ++* LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++* AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++* OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO ++* MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++* ++* THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE ++* WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF ++* LAWS PRINCIPLES. ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND ++* RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER ++* THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC). ++* ++*****************************************************************************/ ++ ++#ifndef _MT6575_TYPEDEFS_H ++#define _MT6575_TYPEDEFS_H ++ ++#if defined (__KERNEL_NAND__) ++#include <linux/bug.h> ++#else ++#define true 1 ++#define false 0 ++#define bool u8 ++#endif ++ ++// --------------------------------------------------------------------------- ++// Basic Type Definitions ++// --------------------------------------------------------------------------- ++ ++typedef volatile unsigned char *P_kal_uint8; ++typedef volatile unsigned short *P_kal_uint16; ++typedef volatile unsigned int *P_kal_uint32; ++ ++typedef long LONG; ++typedef unsigned char UBYTE; ++typedef short SHORT; ++ ++typedef signed char kal_int8; ++typedef signed short kal_int16; ++typedef signed int kal_int32; ++typedef long long kal_int64; ++typedef unsigned char kal_uint8; ++typedef unsigned short kal_uint16; ++typedef unsigned int kal_uint32; ++typedef unsigned long long kal_uint64; ++typedef char kal_char; ++ ++typedef unsigned int *UINT32P; ++typedef volatile unsigned short *UINT16P; ++typedef volatile unsigned char *UINT8P; ++typedef unsigned char *U8P; ++ ++typedef volatile unsigned char *P_U8; ++typedef volatile signed char *P_S8; ++typedef volatile unsigned short *P_U16; ++typedef volatile signed short *P_S16; ++typedef volatile unsigned int *P_U32; ++typedef volatile signed int *P_S32; ++typedef unsigned long long *P_U64; ++typedef signed long long *P_S64; ++ ++typedef unsigned char U8; ++typedef signed char S8; ++typedef unsigned short U16; ++typedef signed short S16; ++typedef unsigned int U32; ++typedef signed int S32; ++typedef unsigned long long U64; ++typedef signed long long S64; ++//typedef unsigned char bool; ++ ++typedef unsigned char UINT8; ++typedef unsigned short UINT16; ++typedef unsigned int UINT32; ++typedef unsigned short USHORT; ++typedef signed char INT8; ++typedef signed short INT16; ++typedef signed int INT32; ++typedef unsigned int DWORD; ++typedef void VOID; ++typedef unsigned char BYTE; ++typedef float FLOAT; ++ ++typedef char *LPCSTR; ++typedef short *LPWSTR; ++ ++ ++// --------------------------------------------------------------------------- ++// Constants ++// --------------------------------------------------------------------------- ++ ++#define IMPORT EXTERN ++#ifndef __cplusplus ++ #define EXTERN extern ++#else ++ #define EXTERN extern "C" ++#endif ++#define LOCAL static ++#define GLOBAL ++#define EXPORT GLOBAL ++ ++#define EQ == ++#define NEQ != ++#define AND && ++#define OR || ++#define XOR(A,B) ((!(A) AND (B)) OR ((A) AND !(B))) ++ ++#ifndef FALSE ++ #define FALSE (0) ++#endif ++ ++#ifndef TRUE ++ #define TRUE (1) ++#endif ++ ++#ifndef NULL ++ #define NULL (0) ++#endif ++ ++//enum boolean {false, true}; ++enum {RX, TX, NONE}; ++ ++#ifndef BOOL ++typedef unsigned char BOOL; ++#endif ++ ++typedef enum { ++ KAL_FALSE = 0, ++ KAL_TRUE = 1, ++} kal_bool; ++ ++ ++// --------------------------------------------------------------------------- ++// Type Casting ++// --------------------------------------------------------------------------- ++ ++#define AS_INT32(x) (*(INT32 *)((void*)x)) ++#define AS_INT16(x) (*(INT16 *)((void*)x)) ++#define AS_INT8(x) (*(INT8 *)((void*)x)) ++ ++#define AS_UINT32(x) (*(UINT32 *)((void*)x)) ++#define AS_UINT16(x) (*(UINT16 *)((void*)x)) ++#define AS_UINT8(x) (*(UINT8 *)((void*)x)) ++ ++ ++// --------------------------------------------------------------------------- ++// Register Manipulations ++// --------------------------------------------------------------------------- ++ ++#define READ_REGISTER_UINT32(reg) \ ++ (*(volatile UINT32 * const)(reg)) ++ ++#define WRITE_REGISTER_UINT32(reg, val) \ ++ (*(volatile UINT32 * const)(reg)) = (val) ++ ++#define READ_REGISTER_UINT16(reg) \ ++ (*(volatile UINT16 * const)(reg)) ++ ++#define WRITE_REGISTER_UINT16(reg, val) \ ++ (*(volatile UINT16 * const)(reg)) = (val) ++ ++#define READ_REGISTER_UINT8(reg) \ ++ (*(volatile UINT8 * const)(reg)) ++ ++#define WRITE_REGISTER_UINT8(reg, val) \ ++ (*(volatile UINT8 * const)(reg)) = (val) ++ ++#define INREG8(x) READ_REGISTER_UINT8((UINT8*)((void*)(x))) ++#define OUTREG8(x, y) WRITE_REGISTER_UINT8((UINT8*)((void*)(x)), (UINT8)(y)) ++#define SETREG8(x, y) OUTREG8(x, INREG8(x)|(y)) ++#define CLRREG8(x, y) OUTREG8(x, INREG8(x)&~(y)) ++#define MASKREG8(x, y, z) OUTREG8(x, (INREG8(x)&~(y))|(z)) ++ ++#define INREG16(x) READ_REGISTER_UINT16((UINT16*)((void*)(x))) ++#define OUTREG16(x, y) WRITE_REGISTER_UINT16((UINT16*)((void*)(x)),(UINT16)(y)) ++#define SETREG16(x, y) OUTREG16(x, INREG16(x)|(y)) ++#define CLRREG16(x, y) OUTREG16(x, INREG16(x)&~(y)) ++#define MASKREG16(x, y, z) OUTREG16(x, (INREG16(x)&~(y))|(z)) ++ ++#define INREG32(x) READ_REGISTER_UINT32((UINT32*)((void*)(x))) ++#define OUTREG32(x, y) WRITE_REGISTER_UINT32((UINT32*)((void*)(x)), (UINT32)(y)) ++#define SETREG32(x, y) OUTREG32(x, INREG32(x)|(y)) ++#define CLRREG32(x, y) OUTREG32(x, INREG32(x)&~(y)) ++#define MASKREG32(x, y, z) OUTREG32(x, (INREG32(x)&~(y))|(z)) ++ ++ ++#define DRV_Reg8(addr) INREG8(addr) ++#define DRV_WriteReg8(addr, data) OUTREG8(addr, data) ++#define DRV_SetReg8(addr, data) SETREG8(addr, data) ++#define DRV_ClrReg8(addr, data) CLRREG8(addr, data) ++ ++#define DRV_Reg16(addr) INREG16(addr) ++#define DRV_WriteReg16(addr, data) OUTREG16(addr, data) ++#define DRV_SetReg16(addr, data) SETREG16(addr, data) ++#define DRV_ClrReg16(addr, data) CLRREG16(addr, data) ++ ++#define DRV_Reg32(addr) INREG32(addr) ++#define DRV_WriteReg32(addr, data) OUTREG32(addr, data) ++#define DRV_SetReg32(addr, data) SETREG32(addr, data) ++#define DRV_ClrReg32(addr, data) CLRREG32(addr, data) ++ ++// !!! DEPRECATED, WILL BE REMOVED LATER !!! ++#define DRV_Reg(addr) DRV_Reg16(addr) ++#define DRV_WriteReg(addr, data) DRV_WriteReg16(addr, data) ++#define DRV_SetReg(addr, data) DRV_SetReg16(addr, data) ++#define DRV_ClrReg(addr, data) DRV_ClrReg16(addr, data) ++ ++ ++// --------------------------------------------------------------------------- ++// Compiler Time Deduction Macros ++// --------------------------------------------------------------------------- ++ ++#define _MASK_OFFSET_1(x, n) ((x) & 0x1) ? (n) : ++#define _MASK_OFFSET_2(x, n) _MASK_OFFSET_1((x), (n)) _MASK_OFFSET_1((x) >> 1, (n) + 1) ++#define _MASK_OFFSET_4(x, n) _MASK_OFFSET_2((x), (n)) _MASK_OFFSET_2((x) >> 2, (n) + 2) ++#define _MASK_OFFSET_8(x, n) _MASK_OFFSET_4((x), (n)) _MASK_OFFSET_4((x) >> 4, (n) + 4) ++#define _MASK_OFFSET_16(x, n) _MASK_OFFSET_8((x), (n)) _MASK_OFFSET_8((x) >> 8, (n) + 8) ++#define _MASK_OFFSET_32(x, n) _MASK_OFFSET_16((x), (n)) _MASK_OFFSET_16((x) >> 16, (n) + 16) ++ ++#define MASK_OFFSET_ERROR (0xFFFFFFFF) ++ ++#define MASK_OFFSET(x) (_MASK_OFFSET_32(x, 0) MASK_OFFSET_ERROR) ++ ++ ++// --------------------------------------------------------------------------- ++// Assertions ++// --------------------------------------------------------------------------- ++ ++#ifndef ASSERT ++ #define ASSERT(expr) BUG_ON(!(expr)) ++#endif ++ ++#ifndef NOT_IMPLEMENTED ++ #define NOT_IMPLEMENTED() BUG_ON(1) ++#endif ++ ++#define STATIC_ASSERT(pred) STATIC_ASSERT_X(pred, __LINE__) ++#define STATIC_ASSERT_X(pred, line) STATIC_ASSERT_XX(pred, line) ++#define STATIC_ASSERT_XX(pred, line) \ ++ extern char assertion_failed_at_##line[(pred) ? 1 : -1] ++ ++// --------------------------------------------------------------------------- ++// Resolve Compiler Warnings ++// --------------------------------------------------------------------------- ++ ++#define NOT_REFERENCED(x) { (x) = (x); } ++ ++ ++// --------------------------------------------------------------------------- ++// Utilities ++// --------------------------------------------------------------------------- ++ ++#define MAXIMUM(A,B) (((A)>(B))?(A):(B)) ++#define MINIMUM(A,B) (((A)<(B))?(A):(B)) ++ ++#define ARY_SIZE(x) (sizeof((x)) / sizeof((x[0]))) ++#define DVT_DELAYMACRO(u4Num) \ ++{ \ ++ UINT32 u4Count = 0 ; \ ++ for (u4Count = 0; u4Count < u4Num; u4Count++ ); \ ++} \ ++ ++#define A68351B 0 ++#define B68351B 1 ++#define B68351D 2 ++#define B68351E 3 ++#define UNKNOWN_IC_VERSION 0xFF ++ ++/* NAND driver */ ++struct mtk_nand_host_hw { ++ unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */ ++ unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */ ++ unsigned int nfi_cs_num; /* NFI_CS_NUM */ ++ unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */ ++ unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */ ++ unsigned int nand_ecc_size; ++ unsigned int nand_ecc_bytes; ++ unsigned int nand_ecc_mode; ++}; ++extern struct mtk_nand_host_hw mt7621_nand_hw; ++extern unsigned int CFG_BLOCKSIZE; ++ ++#endif // _MT6575_TYPEDEFS_H ++ +--- /dev/null ++++ b/drivers/mtd/nand/mtk_nand2.c +@@ -0,0 +1,2363 @@ ++/****************************************************************************** ++* mtk_nand2.c - MTK NAND Flash Device Driver ++ * ++* Copyright 2009-2012 MediaTek Co.,Ltd. ++ * ++* DESCRIPTION: ++* This file provid the other drivers nand relative functions ++ * ++* modification history ++* ---------------------------------------- ++* v3.0, 11 Feb 2010, mtk ++* ---------------------------------------- ++******************************************************************************/ ++#include "nand_def.h" ++#include <linux/slab.h> ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/delay.h> ++#include <linux/errno.h> ++#include <linux/sched.h> ++#include <linux/types.h> ++#include <linux/wait.h> ++#include <linux/spinlock.h> ++#include <linux/interrupt.h> ++#include <linux/mtd/mtd.h> ++#include <linux/mtd/rawnand.h> ++#include <linux/mtd/partitions.h> ++#include <linux/mtd/nand_ecc.h> ++#include <linux/dma-mapping.h> ++#include <linux/jiffies.h> ++#include <linux/platform_device.h> ++#include <linux/proc_fs.h> ++#include <linux/time.h> ++#include <linux/mm.h> ++#include <asm/io.h> ++#include <asm/cacheflush.h> ++#include <asm/uaccess.h> ++#include <linux/miscdevice.h> ++#include "mtk_nand2.h" ++#include "nand_device_list.h" ++ ++#include "bmt.h" ++#include "partition.h" ++ ++unsigned int CFG_BLOCKSIZE; ++ ++static int shift_on_bbt = 0; ++extern void nand_bbt_set(struct mtd_info *mtd, int page, int flag); ++extern int nand_bbt_get(struct mtd_info *mtd, int page); ++int mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page); ++ ++static const char * const probe_types[] = { "cmdlinepart", "ofpart", NULL }; ++ ++#define NAND_CMD_STATUS_MULTI 0x71 ++ ++void show_stack(struct task_struct *tsk, unsigned long *sp); ++extern void mt_irq_set_sens(unsigned int irq, unsigned int sens); ++extern void mt_irq_set_polarity(unsigned int irq,unsigned int polarity); ++ ++struct mtk_nand_host mtk_nand_host; /* include mtd_info and nand_chip structs */ ++struct mtk_nand_host_hw mt7621_nand_hw = { ++ .nfi_bus_width = 8, ++ .nfi_access_timing = NFI_DEFAULT_ACCESS_TIMING, ++ .nfi_cs_num = NFI_CS_NUM, ++ .nand_sec_size = 512, ++ .nand_sec_shift = 9, ++ .nand_ecc_size = 2048, ++ .nand_ecc_bytes = 32, ++ .nand_ecc_mode = NAND_ECC_HW, ++}; ++ ++ ++/******************************************************************************* ++ * Gloable Varible Definition ++ *******************************************************************************/ ++ ++#define NFI_ISSUE_COMMAND(cmd, col_addr, row_addr, col_num, row_num) \ ++ do { \ ++ DRV_WriteReg(NFI_CMD_REG16,cmd);\ ++ while (DRV_Reg32(NFI_STA_REG32) & STA_CMD_STATE);\ ++ DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);\ ++ DRV_WriteReg32(NFI_ROWADDR_REG32, row_addr);\ ++ DRV_WriteReg(NFI_ADDRNOB_REG16, col_num | (row_num<<ADDR_ROW_NOB_SHIFT));\ ++ while (DRV_Reg32(NFI_STA_REG32) & STA_ADDR_STATE);\ ++ }while(0); ++ ++//------------------------------------------------------------------------------- ++static struct NAND_CMD g_kCMD; ++static u32 g_u4ChipVer; ++bool g_bInitDone; ++static bool g_bcmdstatus; ++static u32 g_value = 0; ++static int g_page_size; ++ ++BOOL g_bHwEcc = true; ++ ++ ++static u8 *local_buffer_16_align; // 16 byte aligned buffer, for HW issue ++static u8 local_buffer[4096 + 512]; ++ ++extern void nand_release_device(struct mtd_info *mtd); ++extern int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state); ++ ++#if defined(MTK_NAND_BMT) ++static bmt_struct *g_bmt; ++#endif ++struct mtk_nand_host *host; ++extern struct mtd_partition g_pasStatic_Partition[]; ++int part_num = NUM_PARTITIONS; ++int manu_id; ++int dev_id; ++ ++/* this constant was taken from linux/nand/nand.h v 3.14 ++ * in later versions it seems it was removed in order to save a bit of space ++ */ ++#define NAND_MAX_OOBSIZE 774 ++static u8 local_oob_buf[NAND_MAX_OOBSIZE]; ++ ++static u8 nand_badblock_offset = 0; ++ ++void nand_enable_clock(void) ++{ ++ //enable_clock(MT65XX_PDN_PERI_NFI, "NAND"); ++} ++ ++void nand_disable_clock(void) ++{ ++ //disable_clock(MT65XX_PDN_PERI_NFI, "NAND"); ++} ++ ++struct nand_ecclayout { ++ __u32 eccbytes; ++ __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE]; ++ __u32 oobavail; ++ struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE]; ++}; ++ ++static struct nand_ecclayout *layout; ++ ++static struct nand_ecclayout nand_oob_16 = { ++ .eccbytes = 8, ++ .eccpos = {8, 9, 10, 11, 12, 13, 14, 15}, ++ .oobfree = {{1, 6}, {0, 0}} ++}; ++ ++struct nand_ecclayout nand_oob_64 = { ++ .eccbytes = 32, ++ .eccpos = {32, 33, 34, 35, 36, 37, 38, 39, ++ 40, 41, 42, 43, 44, 45, 46, 47, ++ 48, 49, 50, 51, 52, 53, 54, 55, ++ 56, 57, 58, 59, 60, 61, 62, 63}, ++ .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 6}, {0, 0}} ++}; ++ ++struct nand_ecclayout nand_oob_128 = { ++ .eccbytes = 64, ++ .eccpos = { ++ 64, 65, 66, 67, 68, 69, 70, 71, ++ 72, 73, 74, 75, 76, 77, 78, 79, ++ 80, 81, 82, 83, 84, 85, 86, 86, ++ 88, 89, 90, 91, 92, 93, 94, 95, ++ 96, 97, 98, 99, 100, 101, 102, 103, ++ 104, 105, 106, 107, 108, 109, 110, 111, ++ 112, 113, 114, 115, 116, 117, 118, 119, ++ 120, 121, 122, 123, 124, 125, 126, 127}, ++ .oobfree = {{1, 7}, {9, 7}, {17, 7}, {25, 7}, {33, 7}, {41, 7}, {49, 7}, {57, 6}} ++}; ++ ++flashdev_info devinfo; ++ ++void dump_nfi(void) ++{ ++} ++ ++void dump_ecc(void) ++{ ++} ++ ++u32 ++nand_virt_to_phys_add(u32 va) ++{ ++ u32 pageOffset = (va & (PAGE_SIZE - 1)); ++ pgd_t *pgd; ++ pmd_t *pmd; ++ pte_t *pte; ++ u32 pa; ++ ++ if (virt_addr_valid(va)) ++ return __virt_to_phys(va); ++ ++ if (NULL == current) { ++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR ,current is NULL! \n"); ++ return 0; ++ } ++ ++ if (NULL == current->mm) { ++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm); ++ return 0; ++ } ++ ++ pgd = pgd_offset(current->mm, va); /* what is tsk->mm */ ++ if (pgd_none(*pgd) || pgd_bad(*pgd)) { ++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pgd invalid! \n", va); ++ return 0; ++ } ++ ++ pmd = pmd_offset((pud_t *)pgd, va); ++ if (pmd_none(*pmd) || pmd_bad(*pmd)) { ++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pmd invalid! \n", va); ++ return 0; ++ } ++ ++ pte = pte_offset_map(pmd, va); ++ if (pte_present(*pte)) { ++ pa = (pte_val(*pte) & (PAGE_MASK)) | pageOffset; ++ return pa; ++ } ++ ++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR va=0x%x, pte invalid! \n", va); ++ return 0; ++} ++EXPORT_SYMBOL(nand_virt_to_phys_add); ++ ++bool ++get_device_info(u16 id, u32 ext_id, flashdev_info * pdevinfo) ++{ ++ u32 index; ++ for (index = 0; gen_FlashTable[index].id != 0; index++) { ++ if (id == gen_FlashTable[index].id && ext_id == gen_FlashTable[index].ext_id) { ++ pdevinfo->id = gen_FlashTable[index].id; ++ pdevinfo->ext_id = gen_FlashTable[index].ext_id; ++ pdevinfo->blocksize = gen_FlashTable[index].blocksize; ++ pdevinfo->addr_cycle = gen_FlashTable[index].addr_cycle; ++ pdevinfo->iowidth = gen_FlashTable[index].iowidth; ++ pdevinfo->timmingsetting = gen_FlashTable[index].timmingsetting; ++ pdevinfo->advancedmode = gen_FlashTable[index].advancedmode; ++ pdevinfo->pagesize = gen_FlashTable[index].pagesize; ++ pdevinfo->sparesize = gen_FlashTable[index].sparesize; ++ pdevinfo->totalsize = gen_FlashTable[index].totalsize; ++ memcpy(pdevinfo->devciename, gen_FlashTable[index].devciename, sizeof(pdevinfo->devciename)); ++ printk(KERN_INFO "Device found in MTK table, ID: %x, EXT_ID: %x\n", id, ext_id); ++ ++ goto find; ++ } ++ } ++ ++find: ++ if (0 == pdevinfo->id) { ++ printk(KERN_INFO "Device not found, ID: %x\n", id); ++ return false; ++ } else { ++ return true; ++ } ++} ++ ++static void ++ECC_Config(struct mtk_nand_host_hw *hw,u32 ecc_bit) ++{ ++ u32 u4ENCODESize; ++ u32 u4DECODESize; ++ u32 ecc_bit_cfg = ECC_CNFG_ECC4; ++ ++ switch(ecc_bit){ ++ case 4: ++ ecc_bit_cfg = ECC_CNFG_ECC4; ++ break; ++ case 8: ++ ecc_bit_cfg = ECC_CNFG_ECC8; ++ break; ++ case 10: ++ ecc_bit_cfg = ECC_CNFG_ECC10; ++ break; ++ case 12: ++ ecc_bit_cfg = ECC_CNFG_ECC12; ++ break; ++ default: ++ break; ++ } ++ DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE); ++ do { ++ } while (!DRV_Reg16(ECC_DECIDLE_REG16)); ++ ++ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE); ++ do { ++ } while (!DRV_Reg32(ECC_ENCIDLE_REG32)); ++ ++ /* setup FDM register base */ ++ DRV_WriteReg32(ECC_FDMADDR_REG32, NFI_FDM0L_REG32); ++ ++ /* Sector + FDM */ ++ u4ENCODESize = (hw->nand_sec_size + 8) << 3; ++ /* Sector + FDM + YAFFS2 meta data bits */ ++ u4DECODESize = ((hw->nand_sec_size + 8) << 3) + ecc_bit * 13; ++ ++ /* configure ECC decoder && encoder */ ++ DRV_WriteReg32(ECC_DECCNFG_REG32, ecc_bit_cfg | DEC_CNFG_NFI | DEC_CNFG_EMPTY_EN | (u4DECODESize << DEC_CNFG_CODE_SHIFT)); ++ ++ DRV_WriteReg32(ECC_ENCCNFG_REG32, ecc_bit_cfg | ENC_CNFG_NFI | (u4ENCODESize << ENC_CNFG_MSG_SHIFT)); ++ NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_EL); ++} ++ ++static void ++ECC_Decode_Start(void) ++{ ++ while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE)) ++ ; ++ DRV_WriteReg16(ECC_DECCON_REG16, DEC_EN); ++} ++ ++static void ++ECC_Decode_End(void) ++{ ++ while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE)) ++ ; ++ DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE); ++} ++ ++static void ++ECC_Encode_Start(void) ++{ ++ while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) ++ ; ++ mb(); ++ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_EN); ++} ++ ++static void ++ECC_Encode_End(void) ++{ ++ /* wait for device returning idle */ ++ while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) ; ++ mb(); ++ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE); ++} ++ ++static bool ++mtk_nand_check_bch_error(struct mtd_info *mtd, u8 * pDataBuf, u32 u4SecIndex, u32 u4PageAddr) ++{ ++ bool bRet = true; ++ u16 u2SectorDoneMask = 1 << u4SecIndex; ++ u32 u4ErrorNumDebug, i, u4ErrNum; ++ u32 timeout = 0xFFFF; ++ // int el; ++ u32 au4ErrBitLoc[6]; ++ u32 u4ErrByteLoc, u4BitOffset; ++ u32 u4ErrBitLoc1th, u4ErrBitLoc2nd; ++ ++ //4 // Wait for Decode Done ++ while (0 == (u2SectorDoneMask & DRV_Reg16(ECC_DECDONE_REG16))) { ++ timeout--; ++ if (0 == timeout) ++ return false; ++ } ++ /* We will manually correct the error bits in the last sector, not all the sectors of the page! */ ++ memset(au4ErrBitLoc, 0x0, sizeof(au4ErrBitLoc)); ++ u4ErrorNumDebug = DRV_Reg32(ECC_DECENUM_REG32); ++ u4ErrNum = DRV_Reg32(ECC_DECENUM_REG32) >> (u4SecIndex << 2); ++ u4ErrNum &= 0xF; ++ ++ if (u4ErrNum) { ++ if (0xF == u4ErrNum) { ++ mtd->ecc_stats.failed++; ++ bRet = false; ++ printk(KERN_ERR"mtk_nand: UnCorrectable at PageAddr=%d\n", u4PageAddr); ++ } else { ++ for (i = 0; i < ((u4ErrNum + 1) >> 1); ++i) { ++ au4ErrBitLoc[i] = DRV_Reg32(ECC_DECEL0_REG32 + i); ++ u4ErrBitLoc1th = au4ErrBitLoc[i] & 0x1FFF; ++ if (u4ErrBitLoc1th < 0x1000) { ++ u4ErrByteLoc = u4ErrBitLoc1th / 8; ++ u4BitOffset = u4ErrBitLoc1th % 8; ++ pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset); ++ mtd->ecc_stats.corrected++; ++ } else { ++ mtd->ecc_stats.failed++; ++ } ++ u4ErrBitLoc2nd = (au4ErrBitLoc[i] >> 16) & 0x1FFF; ++ if (0 != u4ErrBitLoc2nd) { ++ if (u4ErrBitLoc2nd < 0x1000) { ++ u4ErrByteLoc = u4ErrBitLoc2nd / 8; ++ u4BitOffset = u4ErrBitLoc2nd % 8; ++ pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset); ++ mtd->ecc_stats.corrected++; ++ } else { ++ mtd->ecc_stats.failed++; ++ //printk(KERN_ERR"UnCorrectable High ErrLoc=%d\n", au4ErrBitLoc[i]); ++ } ++ } ++ } ++ } ++ if (0 == (DRV_Reg16(ECC_DECFER_REG16) & (1 << u4SecIndex))) ++ bRet = false; ++ } ++ return bRet; ++} ++ ++static bool ++mtk_nand_RFIFOValidSize(u16 u2Size) ++{ ++ u32 timeout = 0xFFFF; ++ while (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) < u2Size) { ++ timeout--; ++ if (0 == timeout) ++ return false; ++ } ++ return true; ++} ++ ++static bool ++mtk_nand_WFIFOValidSize(u16 u2Size) ++{ ++ u32 timeout = 0xFFFF; ++ ++ while (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) > u2Size) { ++ timeout--; ++ if (0 == timeout) ++ return false; ++ } ++ return true; ++} ++ ++static bool ++mtk_nand_status_ready(u32 u4Status) ++{ ++ u32 timeout = 0xFFFF; ++ ++ while ((DRV_Reg32(NFI_STA_REG32) & u4Status) != 0) { ++ timeout--; ++ if (0 == timeout) ++ return false; ++ } ++ return true; ++} ++ ++static bool ++mtk_nand_reset(void) ++{ ++ int timeout = 0xFFFF; ++ if (DRV_Reg16(NFI_MASTERSTA_REG16)) { ++ mb(); ++ DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST); ++ while (DRV_Reg16(NFI_MASTERSTA_REG16)) { ++ timeout--; ++ if (!timeout) ++ MSG(INIT, "Wait for NFI_MASTERSTA timeout\n"); ++ } ++ } ++ /* issue reset operation */ ++ mb(); ++ DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST); ++ ++ return mtk_nand_status_ready(STA_NFI_FSM_MASK | STA_NAND_BUSY) && mtk_nand_RFIFOValidSize(0) && mtk_nand_WFIFOValidSize(0); ++} ++ ++static void ++mtk_nand_set_mode(u16 u2OpMode) ++{ ++ u16 u2Mode = DRV_Reg16(NFI_CNFG_REG16); ++ u2Mode &= ~CNFG_OP_MODE_MASK; ++ u2Mode |= u2OpMode; ++ DRV_WriteReg16(NFI_CNFG_REG16, u2Mode); ++} ++ ++static void ++mtk_nand_set_autoformat(bool bEnable) ++{ ++ if (bEnable) ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN); ++ else ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN); ++} ++ ++static void ++mtk_nand_configure_fdm(u16 u2FDMSize) ++{ ++ NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_FDM_MASK | PAGEFMT_FDM_ECC_MASK); ++ NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_SHIFT); ++ NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_ECC_SHIFT); ++} ++ ++static void ++mtk_nand_configure_lock(void) ++{ ++ u32 u4WriteColNOB = 2; ++ u32 u4WriteRowNOB = 3; ++ u32 u4EraseColNOB = 0; ++ u32 u4EraseRowNOB = 3; ++ DRV_WriteReg16(NFI_LOCKANOB_REG16, ++ (u4WriteColNOB << PROG_CADD_NOB_SHIFT) | (u4WriteRowNOB << PROG_RADD_NOB_SHIFT) | (u4EraseColNOB << ERASE_CADD_NOB_SHIFT) | (u4EraseRowNOB << ERASE_RADD_NOB_SHIFT)); ++ ++ if (CHIPVER_ECO_1 == g_u4ChipVer) { ++ int i; ++ for (i = 0; i < 16; ++i) { ++ DRV_WriteReg32(NFI_LOCK00ADD_REG32 + (i << 1), 0xFFFFFFFF); ++ DRV_WriteReg32(NFI_LOCK00FMT_REG32 + (i << 1), 0xFFFFFFFF); ++ } ++ //DRV_WriteReg16(NFI_LOCKANOB_REG16, 0x0); ++ DRV_WriteReg32(NFI_LOCKCON_REG32, 0xFFFFFFFF); ++ DRV_WriteReg16(NFI_LOCK_REG16, NFI_LOCK_ON); ++ } ++} ++ ++static bool ++mtk_nand_pio_ready(void) ++{ ++ int count = 0; ++ while (!(DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)) { ++ count++; ++ if (count > 0xffff) { ++ printk("PIO_DIRDY timeout\n"); ++ return false; ++ } ++ } ++ ++ return true; ++} ++ ++static bool ++mtk_nand_set_command(u16 command) ++{ ++ mb(); ++ DRV_WriteReg16(NFI_CMD_REG16, command); ++ return mtk_nand_status_ready(STA_CMD_STATE); ++} ++ ++static bool ++mtk_nand_set_address(u32 u4ColAddr, u32 u4RowAddr, u16 u2ColNOB, u16 u2RowNOB) ++{ ++ mb(); ++ DRV_WriteReg32(NFI_COLADDR_REG32, u4ColAddr); ++ DRV_WriteReg32(NFI_ROWADDR_REG32, u4RowAddr); ++ DRV_WriteReg16(NFI_ADDRNOB_REG16, u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT)); ++ return mtk_nand_status_ready(STA_ADDR_STATE); ++} ++ ++static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) ++{ ++ if (ctrl & NAND_ALE) { ++ mtk_nand_set_address(dat, 0, 1, 0); ++ } else if (ctrl & NAND_CLE) { ++ mtk_nand_reset(); ++ mtk_nand_set_mode(0x6000); ++ mtk_nand_set_command(dat); ++ } ++} ++ ++static bool ++mtk_nand_check_RW_count(u16 u2WriteSize) ++{ ++ u32 timeout = 0xFFFF; ++ u16 u2SecNum = u2WriteSize >> 9; ++ ++ while (ADDRCNTR_CNTR(DRV_Reg16(NFI_ADDRCNTR_REG16)) < u2SecNum) { ++ timeout--; ++ if (0 == timeout) { ++ printk(KERN_INFO "[%s] timeout\n", __FUNCTION__); ++ return false; ++ } ++ } ++ return true; ++} ++ ++static bool ++mtk_nand_ready_for_read(struct nand_chip *nand, u32 u4RowAddr, u32 u4ColAddr, bool full, u8 * buf) ++{ ++ /* Reset NFI HW internal state machine and flush NFI in/out FIFO */ ++ bool bRet = false; ++ u16 sec_num = 1 << (nand->page_shift - 9); ++ u32 col_addr = u4ColAddr; ++ u32 colnob = 2, rownob = devinfo.addr_cycle - 2; ++ if (nand->options & NAND_BUSWIDTH_16) ++ col_addr /= 2; ++ ++ if (!mtk_nand_reset()) ++ goto cleanup; ++ if (g_bHwEcc) { ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ } else { ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ } ++ ++ mtk_nand_set_mode(CNFG_OP_READ); ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN); ++ DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT); ++ ++ if (full) { ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ ++ if (g_bHwEcc) ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ else ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ } else { ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ } ++ ++ mtk_nand_set_autoformat(full); ++ if (full) ++ if (g_bHwEcc) ++ ECC_Decode_Start(); ++ if (!mtk_nand_set_command(NAND_CMD_READ0)) ++ goto cleanup; ++ if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob)) ++ goto cleanup; ++ if (!mtk_nand_set_command(NAND_CMD_READSTART)) ++ goto cleanup; ++ if (!mtk_nand_status_ready(STA_NAND_BUSY)) ++ goto cleanup; ++ ++ bRet = true; ++ ++cleanup: ++ return bRet; ++} ++ ++static bool ++mtk_nand_ready_for_write(struct nand_chip *nand, u32 u4RowAddr, u32 col_addr, bool full, u8 * buf) ++{ ++ bool bRet = false; ++ u32 sec_num = 1 << (nand->page_shift - 9); ++ u32 colnob = 2, rownob = devinfo.addr_cycle - 2; ++ if (nand->options & NAND_BUSWIDTH_16) ++ col_addr /= 2; ++ ++ /* Reset NFI HW internal state machine and flush NFI in/out FIFO */ ++ if (!mtk_nand_reset()) ++ return false; ++ ++ mtk_nand_set_mode(CNFG_OP_PRGM); ++ ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN); ++ ++ DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT); ++ ++ if (full) { ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ if (g_bHwEcc) ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ else ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ } else { ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ } ++ ++ mtk_nand_set_autoformat(full); ++ ++ if (full) ++ if (g_bHwEcc) ++ ECC_Encode_Start(); ++ ++ if (!mtk_nand_set_command(NAND_CMD_SEQIN)) ++ goto cleanup; ++ //1 FIXED ME: For Any Kind of AddrCycle ++ if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob)) ++ goto cleanup; ++ ++ if (!mtk_nand_status_ready(STA_NAND_BUSY)) ++ goto cleanup; ++ ++ bRet = true; ++ ++cleanup: ++ return bRet; ++} ++ ++static bool ++mtk_nand_check_dececc_done(u32 u4SecNum) ++{ ++ u32 timeout, dec_mask; ++ ++ timeout = 0xffff; ++ dec_mask = (1 << u4SecNum) - 1; ++ while ((dec_mask != DRV_Reg(ECC_DECDONE_REG16)) && timeout > 0) ++ timeout--; ++ if (timeout == 0) { ++ MSG(VERIFY, "ECC_DECDONE: timeout\n"); ++ return false; ++ } ++ return true; ++} ++ ++static bool ++mtk_nand_mcu_read_data(u8 * buf, u32 length) ++{ ++ int timeout = 0xffff; ++ u32 i; ++ u32 *buf32 = (u32 *) buf; ++ if ((u32) buf % 4 || length % 4) ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); ++ else ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); ++ ++ //DRV_WriteReg32(NFI_STRADDR_REG32, 0); ++ mb(); ++ NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BRD); ++ ++ if ((u32) buf % 4 || length % 4) { ++ for (i = 0; (i < (length)) && (timeout > 0);) { ++ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) { ++ *buf++ = (u8) DRV_Reg32(NFI_DATAR_REG32); ++ i++; ++ } else { ++ timeout--; ++ } ++ if (0 == timeout) { ++ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__); ++ dump_nfi(); ++ return false; ++ } ++ } ++ } else { ++ for (i = 0; (i < (length >> 2)) && (timeout > 0);) { ++ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) { ++ *buf32++ = DRV_Reg32(NFI_DATAR_REG32); ++ i++; ++ } else { ++ timeout--; ++ } ++ if (0 == timeout) { ++ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__); ++ dump_nfi(); ++ return false; ++ } ++ } ++ } ++ return true; ++} ++ ++static bool ++mtk_nand_read_page_data(struct mtd_info *mtd, u8 * pDataBuf, u32 u4Size) ++{ ++ return mtk_nand_mcu_read_data(pDataBuf, u4Size); ++} ++ ++static bool ++mtk_nand_mcu_write_data(struct mtd_info *mtd, const u8 * buf, u32 length) ++{ ++ u32 timeout = 0xFFFF; ++ u32 i; ++ u32 *pBuf32; ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); ++ mb(); ++ NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BWR); ++ pBuf32 = (u32 *) buf; ++ ++ if ((u32) buf % 4 || length % 4) ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); ++ else ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); ++ ++ if ((u32) buf % 4 || length % 4) { ++ for (i = 0; (i < (length)) && (timeout > 0);) { ++ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) { ++ DRV_WriteReg32(NFI_DATAW_REG32, *buf++); ++ i++; ++ } else { ++ timeout--; ++ } ++ if (0 == timeout) { ++ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__); ++ dump_nfi(); ++ return false; ++ } ++ } ++ } else { ++ for (i = 0; (i < (length >> 2)) && (timeout > 0);) { ++ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) { ++ DRV_WriteReg32(NFI_DATAW_REG32, *pBuf32++); ++ i++; ++ } else { ++ timeout--; ++ } ++ if (0 == timeout) { ++ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__); ++ dump_nfi(); ++ return false; ++ } ++ } ++ } ++ ++ return true; ++} ++ ++static bool ++mtk_nand_write_page_data(struct mtd_info *mtd, u8 * buf, u32 size) ++{ ++ return mtk_nand_mcu_write_data(mtd, buf, size); ++} ++ ++static void ++mtk_nand_read_fdm_data(u8 * pDataBuf, u32 u4SecNum) ++{ ++ u32 i; ++ u32 *pBuf32 = (u32 *) pDataBuf; ++ ++ if (pBuf32) { ++ for (i = 0; i < u4SecNum; ++i) { ++ *pBuf32++ = DRV_Reg32(NFI_FDM0L_REG32 + (i << 1)); ++ *pBuf32++ = DRV_Reg32(NFI_FDM0M_REG32 + (i << 1)); ++ } ++ } ++} ++ ++static u8 fdm_buf[64]; ++static void ++mtk_nand_write_fdm_data(struct nand_chip *chip, u8 * pDataBuf, u32 u4SecNum) ++{ ++ u32 i, j; ++ u8 checksum = 0; ++ bool empty = true; ++ struct nand_oobfree *free_entry; ++ u32 *pBuf32; ++ ++ memcpy(fdm_buf, pDataBuf, u4SecNum * 8); ++ ++ free_entry = layout->oobfree; ++ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free_entry[i].length; i++) { ++ for (j = 0; j < free_entry[i].length; j++) { ++ if (pDataBuf[free_entry[i].offset + j] != 0xFF) ++ empty = false; ++ checksum ^= pDataBuf[free_entry[i].offset + j]; ++ } ++ } ++ ++ if (!empty) { ++ fdm_buf[free_entry[i - 1].offset + free_entry[i - 1].length] = checksum; ++ } ++ ++ pBuf32 = (u32 *) fdm_buf; ++ for (i = 0; i < u4SecNum; ++i) { ++ DRV_WriteReg32(NFI_FDM0L_REG32 + (i << 1), *pBuf32++); ++ DRV_WriteReg32(NFI_FDM0M_REG32 + (i << 1), *pBuf32++); ++ } ++} ++ ++static void ++mtk_nand_stop_read(void) ++{ ++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD); ++ mtk_nand_reset(); ++ if (g_bHwEcc) ++ ECC_Decode_End(); ++ DRV_WriteReg16(NFI_INTR_EN_REG16, 0); ++} ++ ++static void ++mtk_nand_stop_write(void) ++{ ++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR); ++ if (g_bHwEcc) ++ ECC_Encode_End(); ++ DRV_WriteReg16(NFI_INTR_EN_REG16, 0); ++} ++ ++bool ++mtk_nand_exec_read_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf) ++{ ++ u8 *buf; ++ bool bRet = true; ++ struct nand_chip *nand = mtd->priv; ++ u32 u4SecNum = u4PageSize >> 9; ++ ++ if (((u32) pPageBuf % 16) && local_buffer_16_align) ++ buf = local_buffer_16_align; ++ else ++ buf = pPageBuf; ++ if (mtk_nand_ready_for_read(nand, u4RowAddr, 0, true, buf)) { ++ int j; ++ for (j = 0 ; j < u4SecNum; j++) { ++ if (!mtk_nand_read_page_data(mtd, buf+j*512, 512)) ++ bRet = false; ++ if(g_bHwEcc && !mtk_nand_check_dececc_done(j+1)) ++ bRet = false; ++ if(g_bHwEcc && !mtk_nand_check_bch_error(mtd, buf+j*512, j, u4RowAddr)) ++ bRet = false; ++ } ++ if (!mtk_nand_status_ready(STA_NAND_BUSY)) ++ bRet = false; ++ ++ mtk_nand_read_fdm_data(pFDMBuf, u4SecNum); ++ mtk_nand_stop_read(); ++ } ++ ++ if (buf == local_buffer_16_align) ++ memcpy(pPageBuf, buf, u4PageSize); ++ ++ return bRet; ++} ++ ++int ++mtk_nand_exec_write_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf) ++{ ++ struct nand_chip *chip = mtd->priv; ++ u32 u4SecNum = u4PageSize >> 9; ++ u8 *buf; ++ u8 status; ++ ++ MSG(WRITE, "mtk_nand_exec_write_page, page: 0x%x\n", u4RowAddr); ++ ++ if (((u32) pPageBuf % 16) && local_buffer_16_align) { ++ printk(KERN_INFO "Data buffer not 16 bytes aligned: %p\n", pPageBuf); ++ memcpy(local_buffer_16_align, pPageBuf, mtd->writesize); ++ buf = local_buffer_16_align; ++ } else ++ buf = pPageBuf; ++ ++ if (mtk_nand_ready_for_write(chip, u4RowAddr, 0, true, buf)) { ++ mtk_nand_write_fdm_data(chip, pFDMBuf, u4SecNum); ++ (void)mtk_nand_write_page_data(mtd, buf, u4PageSize); ++ (void)mtk_nand_check_RW_count(u4PageSize); ++ mtk_nand_stop_write(); ++ (void)mtk_nand_set_command(NAND_CMD_PAGEPROG); ++ while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ; ++ } ++ ++ status = chip->waitfunc(mtd, chip); ++ if (status & NAND_STATUS_FAIL) ++ return -EIO; ++ return 0; ++} ++ ++static int ++get_start_end_block(struct mtd_info *mtd, int block, int *start_blk, int *end_blk) ++{ ++ struct nand_chip *chip = mtd->priv; ++ int i; ++ ++ *start_blk = 0; ++ for (i = 0; i <= part_num; i++) ++ { ++ if (i == part_num) ++ { ++ // try the last reset partition ++ *end_blk = (chip->chipsize >> chip->phys_erase_shift) - 1; ++ if (*start_blk <= *end_blk) ++ { ++ if ((block >= *start_blk) && (block <= *end_blk)) ++ break; ++ } ++ } ++ // skip All partition entry ++ else if (g_pasStatic_Partition[i].size == MTDPART_SIZ_FULL) ++ { ++ continue; ++ } ++ *end_blk = *start_blk + (g_pasStatic_Partition[i].size >> chip->phys_erase_shift) - 1; ++ if ((block >= *start_blk) && (block <= *end_blk)) ++ break; ++ *start_blk = *end_blk + 1; ++ } ++ if (*start_blk > *end_blk) ++ { ++ return -1; ++ } ++ return 0; ++} ++ ++static int ++block_remap(struct mtd_info *mtd, int block) ++{ ++ struct nand_chip *chip = mtd->priv; ++ int start_blk, end_blk; ++ int j, block_offset; ++ int bad_block = 0; ++ ++ if (chip->bbt == NULL) { ++ printk("ERROR!! no bbt table for block_remap\n"); ++ return -1; ++ } ++ ++ if (get_start_end_block(mtd, block, &start_blk, &end_blk) < 0) { ++ printk("ERROR!! can not find start_blk and end_blk\n"); ++ return -1; ++ } ++ ++ block_offset = block - start_blk; ++ for (j = start_blk; j <= end_blk;j++) { ++ if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) == 0x0) { ++ if (!block_offset) ++ break; ++ block_offset--; ++ } else { ++ bad_block++; ++ } ++ } ++ if (j <= end_blk) { ++ return j; ++ } else { ++ // remap to the bad block ++ for (j = end_blk; bad_block > 0; j--) ++ { ++ if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) != 0x0) ++ { ++ bad_block--; ++ if (bad_block <= block_offset) ++ return j; ++ } ++ } ++ } ++ ++ printk("Error!! block_remap error\n"); ++ return -1; ++} ++ ++int ++check_block_remap(struct mtd_info *mtd, int block) ++{ ++ if (shift_on_bbt) ++ return block_remap(mtd, block); ++ else ++ return block; ++} ++EXPORT_SYMBOL(check_block_remap); ++ ++ ++static int ++write_next_on_fail(struct mtd_info *mtd, char *write_buf, int page, int * to_blk) ++{ ++ struct nand_chip *chip = mtd->priv; ++ int i, j, to_page = 0, first_page; ++ char *buf, *oob; ++ int start_blk = 0, end_blk; ++ int mapped_block; ++ int page_per_block_bit = chip->phys_erase_shift - chip->page_shift; ++ int block = page >> page_per_block_bit; ++ ++ // find next available block in the same MTD partition ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ ++ get_start_end_block(mtd, block, &start_blk, &end_blk); ++ ++ buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL | GFP_DMA); ++ if (buf == NULL) ++ return -1; ++ ++ oob = buf + mtd->writesize; ++ for ((*to_blk) = block + 1; (*to_blk) <= end_blk ; (*to_blk)++) { ++ if (nand_bbt_get(mtd, (*to_blk) << page_per_block_bit) == 0) { ++ int status; ++ status = mtk_nand_erase_hw(mtd, (*to_blk) << page_per_block_bit); ++ if (status & NAND_STATUS_FAIL) { ++ mtk_nand_block_markbad_hw(mtd, (*to_blk) << chip->phys_erase_shift); ++ nand_bbt_set(mtd, (*to_blk) << page_per_block_bit, 0x3); ++ } else { ++ /* good block */ ++ to_page = (*to_blk) << page_per_block_bit; ++ break; ++ } ++ } ++ } ++ ++ if (!to_page) { ++ kfree(buf); ++ return -1; ++ } ++ ++ first_page = (page >> page_per_block_bit) << page_per_block_bit; ++ for (i = 0; i < (1 << page_per_block_bit); i++) { ++ if ((first_page + i) != page) { ++ mtk_nand_read_oob_hw(mtd, chip, (first_page+i)); ++ for (j = 0; j < mtd->oobsize; j++) ++ if (chip->oob_poi[j] != (unsigned char)0xff) ++ break; ++ if (j < mtd->oobsize) { ++ mtk_nand_exec_read_page(mtd, (first_page+i), mtd->writesize, buf, oob); ++ memset(oob, 0xff, mtd->oobsize); ++ if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)buf, oob) != 0) { ++ int ret, new_blk = 0; ++ nand_bbt_set(mtd, to_page, 0x3); ++ ret = write_next_on_fail(mtd, buf, to_page + i, &new_blk); ++ if (ret) { ++ kfree(buf); ++ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift); ++ return ret; ++ } ++ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift); ++ *to_blk = new_blk; ++ to_page = ((*to_blk) << page_per_block_bit); ++ } ++ } ++ } else { ++ memset(chip->oob_poi, 0xff, mtd->oobsize); ++ if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)write_buf, chip->oob_poi) != 0) { ++ int ret, new_blk = 0; ++ nand_bbt_set(mtd, to_page, 0x3); ++ ret = write_next_on_fail(mtd, write_buf, to_page + i, &new_blk); ++ if (ret) { ++ kfree(buf); ++ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift); ++ return ret; ++ } ++ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift); ++ *to_blk = new_blk; ++ to_page = ((*to_blk) << page_per_block_bit); ++ } ++ } ++ } ++ ++ kfree(buf); ++ ++ return 0; ++} ++ ++static int ++mtk_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offset, ++ int data_len, const u8 * buf, int oob_required, int page, int raw) ++{ ++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); ++ int block = page / page_per_block; ++ u16 page_in_block = page % page_per_block; ++ int mapped_block = block; ++ ++#if defined(MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++ // write bad index into oob ++ if (mapped_block != block) ++ set_bad_index_to_oob(chip->oob_poi, block); ++ else ++ set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX); ++#else ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) ++ return NAND_STATUS_FAIL; ++ } ++#endif ++ do { ++ if (mtk_nand_exec_write_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, (u8 *)buf, chip->oob_poi)) { ++ MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block); ++#if defined(MTK_NAND_BMT) ++ if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift, UPDATE_WRITE_FAIL, (u8 *) buf, chip->oob_poi)) { ++ MSG(INIT, "Update BMT success\n"); ++ return 0; ++ } else { ++ MSG(INIT, "Update BMT fail\n"); ++ return -EIO; ++ } ++#else ++ { ++ int new_blk; ++ nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3); ++ if (write_next_on_fail(mtd, (char *)buf, page_in_block + mapped_block * page_per_block, &new_blk) != 0) ++ { ++ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift); ++ return NAND_STATUS_FAIL; ++ } ++ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift); ++ break; ++ } ++#endif ++ } else ++ break; ++ } while(1); ++ ++ return 0; ++} ++ ++static void ++mtk_nand_command_bp(struct mtd_info *mtd, unsigned int command, int column, int page_addr) ++{ ++ struct nand_chip *nand = mtd->priv; ++ ++ switch (command) { ++ case NAND_CMD_SEQIN: ++ memset(g_kCMD.au1OOB, 0xFF, sizeof(g_kCMD.au1OOB)); ++ g_kCMD.pDataBuf = NULL; ++ g_kCMD.u4RowAddr = page_addr; ++ g_kCMD.u4ColAddr = column; ++ break; ++ ++ case NAND_CMD_PAGEPROG: ++ if (g_kCMD.pDataBuf || (0xFF != g_kCMD.au1OOB[nand_badblock_offset])) { ++ u8 *pDataBuf = g_kCMD.pDataBuf ? g_kCMD.pDataBuf : nand->buffers->databuf; ++ mtk_nand_exec_write_page(mtd, g_kCMD.u4RowAddr, mtd->writesize, pDataBuf, g_kCMD.au1OOB); ++ g_kCMD.u4RowAddr = (u32) - 1; ++ g_kCMD.u4OOBRowAddr = (u32) - 1; ++ } ++ break; ++ ++ case NAND_CMD_READOOB: ++ g_kCMD.u4RowAddr = page_addr; ++ g_kCMD.u4ColAddr = column + mtd->writesize; ++ break; ++ ++ case NAND_CMD_READ0: ++ g_kCMD.u4RowAddr = page_addr; ++ g_kCMD.u4ColAddr = column; ++ break; ++ ++ case NAND_CMD_ERASE1: ++ nand->state=FL_ERASING; ++ (void)mtk_nand_reset(); ++ mtk_nand_set_mode(CNFG_OP_ERASE); ++ (void)mtk_nand_set_command(NAND_CMD_ERASE1); ++ (void)mtk_nand_set_address(0, page_addr, 0, devinfo.addr_cycle - 2); ++ break; ++ ++ case NAND_CMD_ERASE2: ++ (void)mtk_nand_set_command(NAND_CMD_ERASE2); ++ while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ++ ; ++ break; ++ ++ case NAND_CMD_STATUS: ++ (void)mtk_nand_reset(); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); ++ mtk_nand_set_mode(CNFG_OP_SRD); ++ mtk_nand_set_mode(CNFG_READ_EN); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ (void)mtk_nand_set_command(NAND_CMD_STATUS); ++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK); ++ mb(); ++ DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT)); ++ g_bcmdstatus = true; ++ break; ++ ++ case NAND_CMD_RESET: ++ (void)mtk_nand_reset(); ++ DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_RST_DONE_EN); ++ (void)mtk_nand_set_command(NAND_CMD_RESET); ++ DRV_WriteReg16(NFI_BASE+0x44, 0xF1); ++ while(!(DRV_Reg16(NFI_INTR_REG16)&INTR_RST_DONE_EN)) ++ ; ++ break; ++ ++ case NAND_CMD_READID: ++ mtk_nand_reset(); ++ /* Disable HW ECC */ ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN | CNFG_BYTE_RW); ++ (void)mtk_nand_reset(); ++ mb(); ++ mtk_nand_set_mode(CNFG_OP_SRD); ++ (void)mtk_nand_set_command(NAND_CMD_READID); ++ (void)mtk_nand_set_address(0, 0, 1, 0); ++ DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD); ++ while (DRV_Reg32(NFI_STA_REG32) & STA_DATAR_STATE) ++ ; ++ break; ++ ++ default: ++ BUG(); ++ break; ++ } ++} ++ ++static void ++mtk_nand_select_chip(struct mtd_info *mtd, int chip) ++{ ++ if ((chip == -1) && (false == g_bInitDone)) { ++ struct nand_chip *nand = mtd->priv; ++ struct mtk_nand_host *host = nand->priv; ++ struct mtk_nand_host_hw *hw = host->hw; ++ u32 spare_per_sector = mtd->oobsize / (mtd->writesize / 512); ++ u32 ecc_bit = 4; ++ u32 spare_bit = PAGEFMT_SPARE_16; ++ ++ if (spare_per_sector >= 28) { ++ spare_bit = PAGEFMT_SPARE_28; ++ ecc_bit = 12; ++ spare_per_sector = 28; ++ } else if (spare_per_sector >= 27) { ++ spare_bit = PAGEFMT_SPARE_27; ++ ecc_bit = 8; ++ spare_per_sector = 27; ++ } else if (spare_per_sector >= 26) { ++ spare_bit = PAGEFMT_SPARE_26; ++ ecc_bit = 8; ++ spare_per_sector = 26; ++ } else if (spare_per_sector >= 16) { ++ spare_bit = PAGEFMT_SPARE_16; ++ ecc_bit = 4; ++ spare_per_sector = 16; ++ } else { ++ MSG(INIT, "[NAND]: NFI not support oobsize: %x\n", spare_per_sector); ++ ASSERT(0); ++ } ++ mtd->oobsize = spare_per_sector*(mtd->writesize/512); ++ MSG(INIT, "[NAND]select ecc bit:%d, sparesize :%d spare_per_sector=%d\n",ecc_bit,mtd->oobsize,spare_per_sector); ++ /* Setup PageFormat */ ++ if (4096 == mtd->writesize) { ++ NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K); ++ nand->cmdfunc = mtk_nand_command_bp; ++ } else if (2048 == mtd->writesize) { ++ NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K); ++ nand->cmdfunc = mtk_nand_command_bp; ++ } ++ ECC_Config(hw,ecc_bit); ++ g_bInitDone = true; ++ } ++ switch (chip) { ++ case -1: ++ break; ++ case 0: ++ case 1: ++ /* Jun Shen, 2011.04.13 */ ++ /* Note: MT6577 EVB NAND is mounted on CS0, but FPGA is CS1 */ ++ DRV_WriteReg16(NFI_CSEL_REG16, chip); ++ /* Jun Shen, 2011.04.13 */ ++ break; ++ } ++} ++ ++static uint8_t ++mtk_nand_read_byte(struct mtd_info *mtd) ++{ ++ uint8_t retval = 0; ++ ++ if (!mtk_nand_pio_ready()) { ++ printk("pio ready timeout\n"); ++ retval = false; ++ } ++ ++ if (g_bcmdstatus) { ++ retval = DRV_Reg8(NFI_DATAR_REG32); ++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK); ++ mtk_nand_reset(); ++ if (g_bHwEcc) { ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ } else { ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ } ++ g_bcmdstatus = false; ++ } else ++ retval = DRV_Reg8(NFI_DATAR_REG32); ++ ++ return retval; ++} ++ ++static void ++mtk_nand_read_buf(struct mtd_info *mtd, uint8_t * buf, int len) ++{ ++ struct nand_chip *nand = (struct nand_chip *)mtd->priv; ++ struct NAND_CMD *pkCMD = &g_kCMD; ++ u32 u4ColAddr = pkCMD->u4ColAddr; ++ u32 u4PageSize = mtd->writesize; ++ ++ if (u4ColAddr < u4PageSize) { ++ if ((u4ColAddr == 0) && (len >= u4PageSize)) { ++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, pkCMD->au1OOB); ++ if (len > u4PageSize) { ++ u32 u4Size = min(len - u4PageSize, sizeof(pkCMD->au1OOB)); ++ memcpy(buf + u4PageSize, pkCMD->au1OOB, u4Size); ++ } ++ } else { ++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB); ++ memcpy(buf, nand->buffers->databuf + u4ColAddr, len); ++ } ++ pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr; ++ } else { ++ u32 u4Offset = u4ColAddr - u4PageSize; ++ u32 u4Size = min(len - u4Offset, sizeof(pkCMD->au1OOB)); ++ if (pkCMD->u4OOBRowAddr != pkCMD->u4RowAddr) { ++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB); ++ pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr; ++ } ++ memcpy(buf, pkCMD->au1OOB + u4Offset, u4Size); ++ } ++ pkCMD->u4ColAddr += len; ++} ++ ++static void ++mtk_nand_write_buf(struct mtd_info *mtd, const uint8_t * buf, int len) ++{ ++ struct NAND_CMD *pkCMD = &g_kCMD; ++ u32 u4ColAddr = pkCMD->u4ColAddr; ++ u32 u4PageSize = mtd->writesize; ++ int i4Size, i; ++ ++ if (u4ColAddr >= u4PageSize) { ++ u32 u4Offset = u4ColAddr - u4PageSize; ++ u8 *pOOB = pkCMD->au1OOB + u4Offset; ++ i4Size = min(len, (int)(sizeof(pkCMD->au1OOB) - u4Offset)); ++ for (i = 0; i < i4Size; i++) { ++ pOOB[i] &= buf[i]; ++ } ++ } else { ++ pkCMD->pDataBuf = (u8 *) buf; ++ } ++ ++ pkCMD->u4ColAddr += len; ++} ++ ++static int ++mtk_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t * buf, int oob_required, int page) ++{ ++ mtk_nand_write_buf(mtd, buf, mtd->writesize); ++ mtk_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); ++ return 0; ++} ++ ++static int ++mtk_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t * buf, int oob_required, int page) ++{ ++ struct NAND_CMD *pkCMD = &g_kCMD; ++ u32 u4ColAddr = pkCMD->u4ColAddr; ++ u32 u4PageSize = mtd->writesize; ++ ++ if (u4ColAddr == 0) { ++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, chip->oob_poi); ++ pkCMD->u4ColAddr += u4PageSize + mtd->oobsize; ++ } ++ ++ return 0; ++} ++ ++static int ++mtk_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, u8 * buf, int page) ++{ ++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); ++ int block = page / page_per_block; ++ u16 page_in_block = page % page_per_block; ++ int mapped_block = block; ++ ++#if defined (MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++ if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, ++ mtd->writesize, buf, chip->oob_poi)) ++ return 0; ++#else ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) ++ return NAND_STATUS_FAIL; ++ } ++ ++ if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, buf, chip->oob_poi)) ++ return 0; ++ else ++ return -EIO; ++#endif ++} ++ ++int ++mtk_nand_erase_hw(struct mtd_info *mtd, int page) ++{ ++ struct nand_chip *chip = (struct nand_chip *)mtd->priv; ++ ++ chip->erase(mtd, page); ++ ++ return chip->waitfunc(mtd, chip); ++} ++ ++static int ++mtk_nand_erase(struct mtd_info *mtd, int page) ++{ ++ // get mapping ++ struct nand_chip *chip = mtd->priv; ++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); ++ int page_in_block = page % page_per_block; ++ int block = page / page_per_block; ++ int mapped_block = block; ++ ++#if defined(MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++#else ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) ++ return NAND_STATUS_FAIL; ++ } ++#endif ++ ++ do { ++ int status = mtk_nand_erase_hw(mtd, page_in_block + page_per_block * mapped_block); ++ ++ if (status & NAND_STATUS_FAIL) { ++#if defined (MTK_NAND_BMT) ++ if (update_bmt( (page_in_block + mapped_block * page_per_block) << chip->page_shift, ++ UPDATE_ERASE_FAIL, NULL, NULL)) ++ { ++ MSG(INIT, "Erase fail at block: 0x%x, update BMT success\n", mapped_block); ++ return 0; ++ } else { ++ MSG(INIT, "Erase fail at block: 0x%x, update BMT fail\n", mapped_block); ++ return NAND_STATUS_FAIL; ++ } ++#else ++ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift); ++ nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3); ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) ++ return NAND_STATUS_FAIL; ++ } else ++ return NAND_STATUS_FAIL; ++#endif ++ } else ++ break; ++ } while(1); ++ ++ return 0; ++} ++ ++static int ++mtk_nand_read_oob_raw(struct mtd_info *mtd, uint8_t * buf, int page_addr, int len) ++{ ++ struct nand_chip *chip = (struct nand_chip *)mtd->priv; ++ u32 col_addr = 0; ++ u32 sector = 0; ++ int res = 0; ++ u32 colnob = 2, rawnob = devinfo.addr_cycle - 2; ++ int randomread = 0; ++ int read_len = 0; ++ int sec_num = 1<<(chip->page_shift-9); ++ int spare_per_sector = mtd->oobsize/sec_num; ++ ++ if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) { ++ printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf); ++ return -EINVAL; ++ } ++ if (len > spare_per_sector) ++ randomread = 1; ++ if (!randomread || !(devinfo.advancedmode & RAMDOM_READ)) { ++ while (len > 0) { ++ read_len = min(len, spare_per_sector); ++ col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector); // TODO: Fix this hard-code 16 ++ if (!mtk_nand_ready_for_read(chip, page_addr, col_addr, false, NULL)) { ++ printk(KERN_WARNING "mtk_nand_ready_for_read return failed\n"); ++ res = -EIO; ++ goto error; ++ } ++ if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) { ++ printk(KERN_WARNING "mtk_nand_mcu_read_data return failed\n"); ++ res = -EIO; ++ goto error; ++ } ++ mtk_nand_check_RW_count(read_len); ++ mtk_nand_stop_read(); ++ sector++; ++ len -= read_len; ++ } ++ } else { ++ col_addr = NAND_SECTOR_SIZE; ++ if (chip->options & NAND_BUSWIDTH_16) ++ col_addr /= 2; ++ if (!mtk_nand_reset()) ++ goto error; ++ mtk_nand_set_mode(0x6000); ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN); ++ DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT); ++ ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ ++ mtk_nand_set_autoformat(false); ++ ++ if (!mtk_nand_set_command(NAND_CMD_READ0)) ++ goto error; ++ //1 FIXED ME: For Any Kind of AddrCycle ++ if (!mtk_nand_set_address(col_addr, page_addr, colnob, rawnob)) ++ goto error; ++ if (!mtk_nand_set_command(NAND_CMD_READSTART)) ++ goto error; ++ if (!mtk_nand_status_ready(STA_NAND_BUSY)) ++ goto error; ++ read_len = min(len, spare_per_sector); ++ if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) { ++ printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n"); ++ res = -EIO; ++ goto error; ++ } ++ sector++; ++ len -= read_len; ++ mtk_nand_stop_read(); ++ while (len > 0) { ++ read_len = min(len, spare_per_sector); ++ if (!mtk_nand_set_command(0x05)) ++ goto error; ++ col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector); ++ if (chip->options & NAND_BUSWIDTH_16) ++ col_addr /= 2; ++ DRV_WriteReg32(NFI_COLADDR_REG32, col_addr); ++ DRV_WriteReg16(NFI_ADDRNOB_REG16, 2); ++ DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT); ++ if (!mtk_nand_status_ready(STA_ADDR_STATE)) ++ goto error; ++ if (!mtk_nand_set_command(0xE0)) ++ goto error; ++ if (!mtk_nand_status_ready(STA_NAND_BUSY)) ++ goto error; ++ if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) { ++ printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n"); ++ res = -EIO; ++ goto error; ++ } ++ mtk_nand_stop_read(); ++ sector++; ++ len -= read_len; ++ } ++ } ++error: ++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD); ++ return res; ++} ++ ++static int ++mtk_nand_write_oob_raw(struct mtd_info *mtd, const uint8_t * buf, int page_addr, int len) ++{ ++ struct nand_chip *chip = mtd->priv; ++ u32 col_addr = 0; ++ u32 sector = 0; ++ int write_len = 0; ++ int status; ++ int sec_num = 1<<(chip->page_shift-9); ++ int spare_per_sector = mtd->oobsize/sec_num; ++ ++ if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) { ++ printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf); ++ return -EINVAL; ++ } ++ ++ while (len > 0) { ++ write_len = min(len, spare_per_sector); ++ col_addr = sector * (NAND_SECTOR_SIZE + spare_per_sector) + NAND_SECTOR_SIZE; ++ if (!mtk_nand_ready_for_write(chip, page_addr, col_addr, false, NULL)) ++ return -EIO; ++ if (!mtk_nand_mcu_write_data(mtd, buf + sector * spare_per_sector, write_len)) ++ return -EIO; ++ (void)mtk_nand_check_RW_count(write_len); ++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR); ++ (void)mtk_nand_set_command(NAND_CMD_PAGEPROG); ++ while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ++ ; ++ status = chip->waitfunc(mtd, chip); ++ if (status & NAND_STATUS_FAIL) { ++ printk(KERN_INFO "status: %d\n", status); ++ return -EIO; ++ } ++ len -= write_len; ++ sector++; ++ } ++ ++ return 0; ++} ++ ++static int ++mtk_nand_write_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page) ++{ ++ int i, iter; ++ int sec_num = 1<<(chip->page_shift-9); ++ int spare_per_sector = mtd->oobsize/sec_num; ++ ++ memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize); ++ ++ // copy ecc data ++ for (i = 0; i < layout->eccbytes; i++) { ++ iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR); ++ local_oob_buf[iter] = chip->oob_poi[layout->eccpos[i]]; ++ } ++ ++ // copy FDM data ++ for (i = 0; i < sec_num; i++) ++ memcpy(&local_oob_buf[i * spare_per_sector], &chip->oob_poi[i * OOB_AVAI_PER_SECTOR], OOB_AVAI_PER_SECTOR); ++ ++ return mtk_nand_write_oob_raw(mtd, local_oob_buf, page, mtd->oobsize); ++} ++ ++static int mtk_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) ++{ ++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); ++ int block = page / page_per_block; ++ u16 page_in_block = page % page_per_block; ++ int mapped_block = block; ++ ++#if defined(MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++ // write bad index into oob ++ if (mapped_block != block) ++ set_bad_index_to_oob(chip->oob_poi, block); ++ else ++ set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX); ++#else ++ if (shift_on_bbt) ++ { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) ++ return NAND_STATUS_FAIL; ++ } ++#endif ++ do { ++ if (mtk_nand_write_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block /* page */)) { ++ MSG(INIT, "write oob fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block); ++#if defined(MTK_NAND_BMT) ++ if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift, ++ UPDATE_WRITE_FAIL, NULL, chip->oob_poi)) ++ { ++ MSG(INIT, "Update BMT success\n"); ++ return 0; ++ } else { ++ MSG(INIT, "Update BMT fail\n"); ++ return -EIO; ++ } ++#else ++ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift); ++ nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3); ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, mapped_block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) ++ return NAND_STATUS_FAIL; ++ } else { ++ return NAND_STATUS_FAIL; ++ } ++#endif ++ } else ++ break; ++ } while (1); ++ ++ return 0; ++} ++ ++int ++mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t offset) ++{ ++ struct nand_chip *chip = mtd->priv; ++ int block = (int)offset >> chip->phys_erase_shift; ++ int page = block * (1 << (chip->phys_erase_shift - chip->page_shift)); ++ u8 buf[8]; ++ ++ memset(buf, 0xFF, 8); ++ buf[0] = 0; ++ return mtk_nand_write_oob_raw(mtd, buf, page, 8); ++} ++ ++static int ++mtk_nand_block_markbad(struct mtd_info *mtd, loff_t offset) ++{ ++ struct nand_chip *chip = mtd->priv; ++ int block = (int)offset >> chip->phys_erase_shift; ++ int ret; ++ int mapped_block = block; ++ ++ nand_get_device(chip, mtd, FL_WRITING); ++ ++#if defined(MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++ ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift); ++#else ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) { ++ printk("NAND mark bad failed\n"); ++ nand_release_device(mtd); ++ return NAND_STATUS_FAIL; ++ } ++ } ++ ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift); ++#endif ++ nand_release_device(mtd); ++ ++ return ret; ++} ++ ++int ++mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page) ++{ ++ int i; ++ u8 iter = 0; ++ ++ int sec_num = 1<<(chip->page_shift-9); ++ int spare_per_sector = mtd->oobsize/sec_num; ++ ++ if (mtk_nand_read_oob_raw(mtd, chip->oob_poi, page, mtd->oobsize)) { ++ printk(KERN_ERR "[%s]mtk_nand_read_oob_raw return failed\n", __FUNCTION__); ++ return -EIO; ++ } ++ ++ // adjust to ecc physical layout to memory layout ++ /*********************************************************/ ++ /* FDM0 | ECC0 | FDM1 | ECC1 | FDM2 | ECC2 | FDM3 | ECC3 */ ++ /* 8B | 8B | 8B | 8B | 8B | 8B | 8B | 8B */ ++ /*********************************************************/ ++ ++ memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize); ++ // copy ecc data ++ for (i = 0; i < layout->eccbytes; i++) { ++ iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR); ++ chip->oob_poi[layout->eccpos[i]] = local_oob_buf[iter]; ++ } ++ ++ // copy FDM data ++ for (i = 0; i < sec_num; i++) { ++ memcpy(&chip->oob_poi[i * OOB_AVAI_PER_SECTOR], &local_oob_buf[i * spare_per_sector], OOB_AVAI_PER_SECTOR); ++ } ++ ++ return 0; ++} ++ ++static int ++mtk_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) ++{ ++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); ++ int block = page / page_per_block; ++ u16 page_in_block = page % page_per_block; ++ int mapped_block = block; ++ ++#if defined (MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++ mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block); ++#else ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ // allow to read oob even if the block is bad ++ } ++ if (mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block)!=0) ++ return -1; ++#endif ++ return 0; ++} ++ ++int ++mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs) ++{ ++ struct nand_chip *chip = (struct nand_chip *)mtd->priv; ++ int page_addr = (int)(ofs >> chip->page_shift); ++ unsigned int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); ++ unsigned char oob_buf[8]; ++ ++ page_addr &= ~(page_per_block - 1); ++ if (mtk_nand_read_oob_raw(mtd, oob_buf, page_addr, sizeof(oob_buf))) { ++ printk(KERN_WARNING "mtk_nand_read_oob_raw return error\n"); ++ return 1; ++ } ++ ++ if (oob_buf[0] != 0xff) { ++ printk(KERN_WARNING "Bad block detected at 0x%x, oob_buf[0] is 0x%x\n", page_addr, oob_buf[0]); ++ // dump_nfi(); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static int ++mtk_nand_block_bad(struct mtd_info *mtd, loff_t ofs) ++{ ++ struct nand_chip *chip = (struct nand_chip *)mtd->priv; ++ int block = (int)ofs >> chip->phys_erase_shift; ++ int mapped_block = block; ++ int ret; ++ ++#if defined(MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++#else ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ } ++#endif ++ ++ ret = mtk_nand_block_bad_hw(mtd, mapped_block << chip->phys_erase_shift); ++#if defined (MTK_NAND_BMT) ++ if (ret) { ++ MSG(INIT, "Unmapped bad block: 0x%x\n", mapped_block); ++ if (update_bmt(mapped_block << chip->phys_erase_shift, UPDATE_UNMAPPED_BLOCK, NULL, NULL)) { ++ MSG(INIT, "Update BMT success\n"); ++ ret = 0; ++ } else { ++ MSG(INIT, "Update BMT fail\n"); ++ ret = 1; ++ } ++ } ++#endif ++ ++ return ret; ++} ++ ++#ifdef CONFIG_MTD_NAND_VERIFY_WRITE ++char gacBuf[4096 + 288]; ++ ++static int ++mtk_nand_verify_buf(struct mtd_info *mtd, const uint8_t * buf, int len) ++{ ++ struct nand_chip *chip = (struct nand_chip *)mtd->priv; ++ struct NAND_CMD *pkCMD = &g_kCMD; ++ u32 u4PageSize = mtd->writesize; ++ u32 *pSrc, *pDst; ++ int i; ++ ++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, gacBuf, gacBuf + u4PageSize); ++ ++ pSrc = (u32 *) buf; ++ pDst = (u32 *) gacBuf; ++ len = len / sizeof(u32); ++ for (i = 0; i < len; ++i) { ++ if (*pSrc != *pDst) { ++ MSG(VERIFY, "mtk_nand_verify_buf page fail at page %d\n", pkCMD->u4RowAddr); ++ return -1; ++ } ++ pSrc++; ++ pDst++; ++ } ++ ++ pSrc = (u32 *) chip->oob_poi; ++ pDst = (u32 *) (gacBuf + u4PageSize); ++ ++ if ((pSrc[0] != pDst[0]) || (pSrc[1] != pDst[1]) || (pSrc[2] != pDst[2]) || (pSrc[3] != pDst[3]) || (pSrc[4] != pDst[4]) || (pSrc[5] != pDst[5])) { ++ // TODO: Ask Designer Why? ++ //(pSrc[6] != pDst[6]) || (pSrc[7] != pDst[7])) ++ MSG(VERIFY, "mtk_nand_verify_buf oob fail at page %d\n", pkCMD->u4RowAddr); ++ MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pSrc[0], pSrc[1], pSrc[2], pSrc[3], pSrc[4], pSrc[5], pSrc[6], pSrc[7]); ++ MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pDst[0], pDst[1], pDst[2], pDst[3], pDst[4], pDst[5], pDst[6], pDst[7]); ++ return -1; ++ } ++ return 0; ++} ++#endif ++ ++static void ++mtk_nand_init_hw(struct mtk_nand_host *host) { ++ struct mtk_nand_host_hw *hw = host->hw; ++ u32 data; ++ ++ data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60); ++ data &= ~((0x3<<18)|(0x3<<16)); ++ data |= ((0x2<<18) |(0x2<<16)); ++ DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data); ++ ++ MSG(INIT, "Enable NFI Clock\n"); ++ nand_enable_clock(); ++ ++ g_bInitDone = false; ++ g_kCMD.u4OOBRowAddr = (u32) - 1; ++ ++ /* Set default NFI access timing control */ ++ DRV_WriteReg32(NFI_ACCCON_REG32, hw->nfi_access_timing); ++ DRV_WriteReg16(NFI_CNFG_REG16, 0); ++ DRV_WriteReg16(NFI_PAGEFMT_REG16, 0); ++ ++ /* Reset the state machine and data FIFO, because flushing FIFO */ ++ (void)mtk_nand_reset(); ++ ++ /* Set the ECC engine */ ++ if (hw->nand_ecc_mode == NAND_ECC_HW) { ++ MSG(INIT, "%s : Use HW ECC\n", MODULE_NAME); ++ if (g_bHwEcc) ++ NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ ECC_Config(host->hw,4); ++ mtk_nand_configure_fdm(8); ++ mtk_nand_configure_lock(); ++ } ++ ++ NFI_SET_REG16(NFI_IOCON_REG16, 0x47); ++} ++ ++static int mtk_nand_dev_ready(struct mtd_info *mtd) ++{ ++ return !(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY); ++} ++ ++#define FACT_BBT_BLOCK_NUM 32 // use the latest 32 BLOCK for factory bbt table ++#define FACT_BBT_OOB_SIGNATURE 1 ++#define FACT_BBT_SIGNATURE_LEN 7 ++const u8 oob_signature[] = "mtknand"; ++static u8 *fact_bbt = 0; ++static u32 bbt_size = 0; ++ ++static int ++read_fact_bbt(struct mtd_info *mtd, unsigned int page) ++{ ++ struct nand_chip *chip = mtd->priv; ++ ++ // read oob ++ if (mtk_nand_read_oob_hw(mtd, chip, page)==0) ++ { ++ if (chip->oob_poi[nand_badblock_offset] != 0xFF) ++ { ++ printk("Bad Block on Page %x\n", page); ++ return -1; ++ } ++ if (memcmp(&chip->oob_poi[FACT_BBT_OOB_SIGNATURE], oob_signature, FACT_BBT_SIGNATURE_LEN) != 0) ++ { ++ printk("compare signature failed %x\n", page); ++ return -1; ++ } ++ if (mtk_nand_exec_read_page(mtd, page, mtd->writesize, chip->buffers->databuf, chip->oob_poi)) ++ { ++ printk("Signature matched and data read!\n"); ++ memcpy(fact_bbt, chip->buffers->databuf, (bbt_size <= mtd->writesize)? bbt_size:mtd->writesize); ++ return 0; ++ } ++ ++ } ++ printk("failed at page %x\n", page); ++ return -1; ++} ++ ++static int ++load_fact_bbt(struct mtd_info *mtd) ++{ ++ struct nand_chip *chip = mtd->priv; ++ int i; ++ u32 total_block; ++ ++ total_block = 1 << (chip->chip_shift - chip->phys_erase_shift); ++ bbt_size = total_block >> 2; ++ ++ if ((!fact_bbt) && (bbt_size)) ++ fact_bbt = (u8 *)kmalloc(bbt_size, GFP_KERNEL); ++ if (!fact_bbt) ++ return -1; ++ ++ for (i = total_block - 1; i >= (total_block - FACT_BBT_BLOCK_NUM); i--) ++ { ++ if (read_fact_bbt(mtd, i << (chip->phys_erase_shift - chip->page_shift)) == 0) ++ { ++ printk("load_fact_bbt success %d\n", i); ++ return 0; ++ } ++ ++ } ++ printk("load_fact_bbt failed\n"); ++ return -1; ++} ++ ++static int oob_mtk_ooblayout_ecc(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ oobregion->length = 8; ++ oobregion->offset = layout->eccpos[section * 8]; ++ ++ return 0; ++} ++ ++static int oob_mtk_ooblayout_free(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobregion) ++{ ++ if (section >= (layout->eccbytes / 8)) { ++ return -ERANGE; ++ } ++ oobregion->offset = layout->oobfree[section].offset; ++ oobregion->length = layout->oobfree[section].length; ++ ++ return 0; ++} ++ ++ ++static const struct mtd_ooblayout_ops oob_mtk_ops = { ++ .ecc = oob_mtk_ooblayout_ecc, ++ .free = oob_mtk_ooblayout_free, ++}; ++ ++static int ++mtk_nand_probe(struct platform_device *pdev) ++{ ++ struct mtd_part_parser_data ppdata; ++ struct mtk_nand_host_hw *hw; ++ struct nand_chip *nand_chip; ++ struct mtd_info *mtd; ++ u8 ext_id1, ext_id2, ext_id3; ++ int err = 0; ++ int id; ++ u32 ext_id; ++ int i; ++ u32 data; ++ ++ data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60); ++ data &= ~((0x3<<18)|(0x3<<16)); ++ data |= ((0x2<<18) |(0x2<<16)); ++ DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data); ++ ++ hw = &mt7621_nand_hw; ++ BUG_ON(!hw); ++ /* Allocate memory for the device structure (and zero it) */ ++ host = kzalloc(sizeof(struct mtk_nand_host), GFP_KERNEL); ++ if (!host) { ++ MSG(INIT, "mtk_nand: failed to allocate device structure.\n"); ++ return -ENOMEM; ++ } ++ ++ /* Allocate memory for 16 byte aligned buffer */ ++ local_buffer_16_align = local_buffer + 16 - ((u32) local_buffer % 16); ++ printk(KERN_INFO "Allocate 16 byte aligned buffer: %p\n", local_buffer_16_align); ++ host->hw = hw; ++ ++ /* init mtd data structure */ ++ nand_chip = &host->nand_chip; ++ nand_chip->priv = host; /* link the private data structures */ ++ ++ mtd = host->mtd = &nand_chip->mtd; ++ mtd->priv = nand_chip; ++ mtd->owner = THIS_MODULE; ++ mtd->name = "MT7621-NAND"; ++ ++ hw->nand_ecc_mode = NAND_ECC_HW; ++ ++ /* Set address of NAND IO lines */ ++ nand_chip->IO_ADDR_R = (void __iomem *)NFI_DATAR_REG32; ++ nand_chip->IO_ADDR_W = (void __iomem *)NFI_DATAW_REG32; ++ nand_chip->chip_delay = 20; /* 20us command delay time */ ++ nand_chip->ecc.mode = hw->nand_ecc_mode; /* enable ECC */ ++ nand_chip->ecc.strength = 1; ++ nand_chip->read_byte = mtk_nand_read_byte; ++ nand_chip->read_buf = mtk_nand_read_buf; ++ nand_chip->write_buf = mtk_nand_write_buf; ++#ifdef CONFIG_MTD_NAND_VERIFY_WRITE ++ nand_chip->verify_buf = mtk_nand_verify_buf; ++#endif ++ nand_chip->select_chip = mtk_nand_select_chip; ++ nand_chip->dev_ready = mtk_nand_dev_ready; ++ nand_chip->cmdfunc = mtk_nand_command_bp; ++ nand_chip->ecc.read_page = mtk_nand_read_page_hwecc; ++ nand_chip->ecc.write_page = mtk_nand_write_page_hwecc; ++ ++ mtd_set_ooblayout(mtd, &oob_mtk_ops); ++ nand_chip->ecc.size = hw->nand_ecc_size; //2048 ++ nand_chip->ecc.bytes = hw->nand_ecc_bytes; //32 ++ ++ // For BMT, we need to revise driver architecture ++ nand_chip->write_page = mtk_nand_write_page; ++ nand_chip->ecc.write_oob = mtk_nand_write_oob; ++ nand_chip->block_markbad = mtk_nand_block_markbad; // need to add nand_get_device()/nand_release_device(). ++ nand_chip->erase_mtk = mtk_nand_erase; ++ nand_chip->read_page = mtk_nand_read_page; ++ nand_chip->ecc.read_oob = mtk_nand_read_oob; ++ nand_chip->block_bad = mtk_nand_block_bad; ++ nand_chip->cmd_ctrl = mtk_nfc_cmd_ctrl; ++ ++ //Qwert:Add for Uboot ++ mtk_nand_init_hw(host); ++ /* Select the device */ ++ nand_chip->select_chip(mtd, NFI_DEFAULT_CS); ++ ++ /* ++ * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) ++ * after power-up ++ */ ++ nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); ++ ++ memset(&devinfo, 0 , sizeof(flashdev_info)); ++ ++ /* Send the command for reading device ID */ ++ ++ nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); ++ ++ /* Read manufacturer and device IDs */ ++ manu_id = nand_chip->read_byte(mtd); ++ dev_id = nand_chip->read_byte(mtd); ++ id = dev_id | (manu_id << 8); ++ ext_id1 = nand_chip->read_byte(mtd); ++ ext_id2 = nand_chip->read_byte(mtd); ++ ext_id3 = nand_chip->read_byte(mtd); ++ ext_id = ext_id1 << 16 | ext_id2 << 8 | ext_id3; ++ if (!get_device_info(id, ext_id, &devinfo)) { ++ u32 chip_mode = RALINK_REG(RALINK_SYSCTL_BASE+0x010)&0x0F; ++ MSG(INIT, "Not Support this Device! \r\n"); ++ memset(&devinfo, 0 , sizeof(flashdev_info)); ++ MSG(INIT, "chip_mode=%08X\n",chip_mode); ++ ++ /* apply bootstrap first */ ++ devinfo.addr_cycle = 5; ++ devinfo.iowidth = 8; ++ ++ switch (chip_mode) { ++ case 10: ++ devinfo.pagesize = 2048; ++ devinfo.sparesize = 128; ++ devinfo.totalsize = 128; ++ devinfo.blocksize = 128; ++ break; ++ case 11: ++ devinfo.pagesize = 4096; ++ devinfo.sparesize = 128; ++ devinfo.totalsize = 1024; ++ devinfo.blocksize = 256; ++ break; ++ case 12: ++ devinfo.pagesize = 4096; ++ devinfo.sparesize = 224; ++ devinfo.totalsize = 2048; ++ devinfo.blocksize = 512; ++ break; ++ default: ++ case 1: ++ devinfo.pagesize = 2048; ++ devinfo.sparesize = 64; ++ devinfo.totalsize = 128; ++ devinfo.blocksize = 128; ++ break; ++ } ++ ++ devinfo.timmingsetting = NFI_DEFAULT_ACCESS_TIMING; ++ devinfo.devciename[0] = 'U'; ++ devinfo.advancedmode = 0; ++ } ++ mtd->writesize = devinfo.pagesize; ++ mtd->erasesize = (devinfo.blocksize<<10); ++ mtd->oobsize = devinfo.sparesize; ++ ++ nand_chip->chipsize = (devinfo.totalsize<<20); ++ nand_chip->page_shift = ffs(mtd->writesize) - 1; ++ nand_chip->pagemask = (nand_chip->chipsize >> nand_chip->page_shift) - 1; ++ nand_chip->phys_erase_shift = ffs(mtd->erasesize) - 1; ++ nand_chip->chip_shift = ffs(nand_chip->chipsize) - 1;//0x1C;//ffs(nand_chip->chipsize) - 1; ++ nand_chip->cmd_ctrl = mtk_nfc_cmd_ctrl; ++ ++ /* allocate buffers or call select_chip here or a bit earlier*/ ++ { ++ struct nand_buffers *nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize + mtd->oobsize * 3, GFP_KERNEL); ++ if (!nbuf) { ++ return -ENOMEM; ++ } ++ nbuf->ecccalc = (uint8_t *)(nbuf + 1); ++ nbuf->ecccode = nbuf->ecccalc + mtd->oobsize; ++ nbuf->databuf = nbuf->ecccode + mtd->oobsize; ++ ++ nand_chip->buffers = nbuf; ++ nand_chip->options |= NAND_OWN_BUFFERS; ++ } ++ ++ nand_chip->oob_poi = nand_chip->buffers->databuf + mtd->writesize; ++ nand_chip->badblockpos = 0; ++ ++ if (devinfo.pagesize == 4096) ++ layout = &nand_oob_128; ++ else if (devinfo.pagesize == 2048) ++ layout = &nand_oob_64; ++ else if (devinfo.pagesize == 512) ++ layout = &nand_oob_16; ++ ++ layout->eccbytes = devinfo.sparesize-OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE); ++ for (i = 0; i < layout->eccbytes; i++) ++ layout->eccpos[i]=OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE)+i; ++ ++ MSG(INIT, "Support this Device in MTK table! %x \r\n", id); ++ hw->nfi_bus_width = devinfo.iowidth; ++ DRV_WriteReg32(NFI_ACCCON_REG32, devinfo.timmingsetting); ++ ++ /* 16-bit bus width */ ++ if (hw->nfi_bus_width == 16) { ++ MSG(INIT, "%s : Set the 16-bit I/O settings!\n", MODULE_NAME); ++ nand_chip->options |= NAND_BUSWIDTH_16; ++ } ++ mtd->oobsize = devinfo.sparesize; ++ hw->nfi_cs_num = 1; ++ ++ /* Scan to find existance of the device */ ++ if (nand_scan(mtd, hw->nfi_cs_num)) { ++ MSG(INIT, "%s : nand_scan fail.\n", MODULE_NAME); ++ err = -ENXIO; ++ goto out; ++ } ++ ++ g_page_size = mtd->writesize; ++ platform_set_drvdata(pdev, host); ++ if (hw->nfi_bus_width == 16) { ++ NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN); ++ } ++ ++ nand_chip->select_chip(mtd, 0); ++#if defined(MTK_NAND_BMT) ++ nand_chip->chipsize -= (BMT_POOL_SIZE) << nand_chip->phys_erase_shift; ++#endif ++ mtd->size = nand_chip->chipsize; ++ ++ CFG_BLOCKSIZE = mtd->erasesize; ++ ++#if defined(MTK_NAND_BMT) ++ if (!g_bmt) { ++ if (!(g_bmt = init_bmt(nand_chip, BMT_POOL_SIZE))) { ++ MSG(INIT, "Error: init bmt failed\n"); ++ return 0; ++ } ++ } ++#endif ++ ++ nand_set_flash_node(nand_chip, pdev->dev.of_node); ++ err = mtd_device_parse_register(mtd, probe_types, &ppdata, ++ NULL, 0); ++ if (!err) { ++ MSG(INIT, "[mtk_nand] probe successfully!\n"); ++ nand_disable_clock(); ++ shift_on_bbt = 1; ++ if (load_fact_bbt(mtd) == 0) { ++ int i; ++ for (i = 0; i < 0x100; i++) ++ nand_chip->bbt[i] |= fact_bbt[i]; ++ } ++ ++ return err; ++ } ++ ++out: ++ MSG(INIT, "[NFI] mtk_nand_probe fail, err = %d!\n", err); ++ nand_release(mtd); ++ platform_set_drvdata(pdev, NULL); ++ if ( NULL != nand_chip->buffers) { ++ kfree(nand_chip->buffers); ++ } ++ kfree(host); ++ nand_disable_clock(); ++ return err; ++} ++ ++static int ++mtk_nand_remove(struct platform_device *pdev) ++{ ++ struct mtk_nand_host *host = platform_get_drvdata(pdev); ++ struct mtd_info *mtd = host->mtd; ++ struct nand_chip *nand_chip = &host->nand_chip; ++ ++ nand_release(mtd); ++ if ( NULL != nand_chip->buffers) { ++ kfree(nand_chip->buffers); ++ } ++ kfree(host); ++ nand_disable_clock(); ++ ++ return 0; ++} ++ ++static const struct of_device_id mt7621_nand_match[] = { ++ { .compatible = "mtk,mt7621-nand" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, mt7621_nand_match); ++ ++static struct platform_driver mtk_nand_driver = { ++ .probe = mtk_nand_probe, ++ .remove = mtk_nand_remove, ++ .driver = { ++ .name = "MT7621-NAND", ++ .owner = THIS_MODULE, ++ .of_match_table = mt7621_nand_match, ++ }, ++}; ++ ++static int __init ++mtk_nand_init(void) ++{ ++ printk("MediaTek Nand driver init, version %s\n", VERSION); ++ ++ return platform_driver_register(&mtk_nand_driver); ++} ++ ++static void __exit ++mtk_nand_exit(void) ++{ ++ platform_driver_unregister(&mtk_nand_driver); ++} ++ ++module_init(mtk_nand_init); ++module_exit(mtk_nand_exit); ++MODULE_LICENSE("GPL"); +--- /dev/null ++++ b/drivers/mtd/nand/mtk_nand2.h +@@ -0,0 +1,452 @@ ++#ifndef __MTK_NAND_H ++#define __MTK_NAND_H ++ ++#define RALINK_NAND_CTRL_BASE 0xBE003000 ++#define RALINK_SYSCTL_BASE 0xBE000000 ++#define RALINK_NANDECC_CTRL_BASE 0xBE003800 ++/******************************************************************************* ++ * NFI Register Definition ++ *******************************************************************************/ ++ ++#define NFI_CNFG_REG16 ((volatile P_U16)(NFI_BASE+0x0000)) ++#define NFI_PAGEFMT_REG16 ((volatile P_U16)(NFI_BASE+0x0004)) ++#define NFI_CON_REG16 ((volatile P_U16)(NFI_BASE+0x0008)) ++#define NFI_ACCCON_REG32 ((volatile P_U32)(NFI_BASE+0x000C)) ++#define NFI_INTR_EN_REG16 ((volatile P_U16)(NFI_BASE+0x0010)) ++#define NFI_INTR_REG16 ((volatile P_U16)(NFI_BASE+0x0014)) ++ ++#define NFI_CMD_REG16 ((volatile P_U16)(NFI_BASE+0x0020)) ++ ++#define NFI_ADDRNOB_REG16 ((volatile P_U16)(NFI_BASE+0x0030)) ++#define NFI_COLADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0034)) ++#define NFI_ROWADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0038)) ++ ++#define NFI_STRDATA_REG16 ((volatile P_U16)(NFI_BASE+0x0040)) ++ ++#define NFI_DATAW_REG32 ((volatile P_U32)(NFI_BASE+0x0050)) ++#define NFI_DATAR_REG32 ((volatile P_U32)(NFI_BASE+0x0054)) ++#define NFI_PIO_DIRDY_REG16 ((volatile P_U16)(NFI_BASE+0x0058)) ++ ++#define NFI_STA_REG32 ((volatile P_U32)(NFI_BASE+0x0060)) ++#define NFI_FIFOSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0064)) ++#define NFI_LOCKSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0068)) ++ ++#define NFI_ADDRCNTR_REG16 ((volatile P_U16)(NFI_BASE+0x0070)) ++ ++#define NFI_STRADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0080)) ++#define NFI_BYTELEN_REG16 ((volatile P_U16)(NFI_BASE+0x0084)) ++ ++#define NFI_CSEL_REG16 ((volatile P_U16)(NFI_BASE+0x0090)) ++#define NFI_IOCON_REG16 ((volatile P_U16)(NFI_BASE+0x0094)) ++ ++#define NFI_FDM0L_REG32 ((volatile P_U32)(NFI_BASE+0x00A0)) ++#define NFI_FDM0M_REG32 ((volatile P_U32)(NFI_BASE+0x00A4)) ++ ++#define NFI_LOCK_REG16 ((volatile P_U16)(NFI_BASE+0x0100)) ++#define NFI_LOCKCON_REG32 ((volatile P_U32)(NFI_BASE+0x0104)) ++#define NFI_LOCKANOB_REG16 ((volatile P_U16)(NFI_BASE+0x0108)) ++#define NFI_LOCK00ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0110)) ++#define NFI_LOCK00FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0114)) ++#define NFI_LOCK01ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0118)) ++#define NFI_LOCK01FMT_REG32 ((volatile P_U32)(NFI_BASE+0x011C)) ++#define NFI_LOCK02ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0120)) ++#define NFI_LOCK02FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0124)) ++#define NFI_LOCK03ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0128)) ++#define NFI_LOCK03FMT_REG32 ((volatile P_U32)(NFI_BASE+0x012C)) ++#define NFI_LOCK04ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0130)) ++#define NFI_LOCK04FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0134)) ++#define NFI_LOCK05ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0138)) ++#define NFI_LOCK05FMT_REG32 ((volatile P_U32)(NFI_BASE+0x013C)) ++#define NFI_LOCK06ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0140)) ++#define NFI_LOCK06FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0144)) ++#define NFI_LOCK07ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0148)) ++#define NFI_LOCK07FMT_REG32 ((volatile P_U32)(NFI_BASE+0x014C)) ++#define NFI_LOCK08ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0150)) ++#define NFI_LOCK08FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0154)) ++#define NFI_LOCK09ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0158)) ++#define NFI_LOCK09FMT_REG32 ((volatile P_U32)(NFI_BASE+0x015C)) ++#define NFI_LOCK10ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0160)) ++#define NFI_LOCK10FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0164)) ++#define NFI_LOCK11ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0168)) ++#define NFI_LOCK11FMT_REG32 ((volatile P_U32)(NFI_BASE+0x016C)) ++#define NFI_LOCK12ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0170)) ++#define NFI_LOCK12FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0174)) ++#define NFI_LOCK13ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0178)) ++#define NFI_LOCK13FMT_REG32 ((volatile P_U32)(NFI_BASE+0x017C)) ++#define NFI_LOCK14ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0180)) ++#define NFI_LOCK14FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0184)) ++#define NFI_LOCK15ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0188)) ++#define NFI_LOCK15FMT_REG32 ((volatile P_U32)(NFI_BASE+0x018C)) ++ ++#define NFI_FIFODATA0_REG32 ((volatile P_U32)(NFI_BASE+0x0190)) ++#define NFI_FIFODATA1_REG32 ((volatile P_U32)(NFI_BASE+0x0194)) ++#define NFI_FIFODATA2_REG32 ((volatile P_U32)(NFI_BASE+0x0198)) ++#define NFI_FIFODATA3_REG32 ((volatile P_U32)(NFI_BASE+0x019C)) ++#define NFI_MASTERSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0210)) ++ ++ ++/******************************************************************************* ++ * NFI Register Field Definition ++ *******************************************************************************/ ++ ++/* NFI_CNFG */ ++#define CNFG_AHB (0x0001) ++#define CNFG_READ_EN (0x0002) ++#define CNFG_DMA_BURST_EN (0x0004) ++#define CNFG_BYTE_RW (0x0040) ++#define CNFG_HW_ECC_EN (0x0100) ++#define CNFG_AUTO_FMT_EN (0x0200) ++#define CNFG_OP_IDLE (0x0000) ++#define CNFG_OP_READ (0x1000) ++#define CNFG_OP_SRD (0x2000) ++#define CNFG_OP_PRGM (0x3000) ++#define CNFG_OP_ERASE (0x4000) ++#define CNFG_OP_RESET (0x5000) ++#define CNFG_OP_CUST (0x6000) ++#define CNFG_OP_MODE_MASK (0x7000) ++#define CNFG_OP_MODE_SHIFT (12) ++ ++/* NFI_PAGEFMT */ ++#define PAGEFMT_512 (0x0000) ++#define PAGEFMT_2K (0x0001) ++#define PAGEFMT_4K (0x0002) ++ ++#define PAGEFMT_PAGE_MASK (0x0003) ++ ++#define PAGEFMT_DBYTE_EN (0x0008) ++ ++#define PAGEFMT_SPARE_16 (0x0000) ++#define PAGEFMT_SPARE_26 (0x0001) ++#define PAGEFMT_SPARE_27 (0x0002) ++#define PAGEFMT_SPARE_28 (0x0003) ++#define PAGEFMT_SPARE_MASK (0x0030) ++#define PAGEFMT_SPARE_SHIFT (4) ++ ++#define PAGEFMT_FDM_MASK (0x0F00) ++#define PAGEFMT_FDM_SHIFT (8) ++ ++#define PAGEFMT_FDM_ECC_MASK (0xF000) ++#define PAGEFMT_FDM_ECC_SHIFT (12) ++ ++/* NFI_CON */ ++#define CON_FIFO_FLUSH (0x0001) ++#define CON_NFI_RST (0x0002) ++#define CON_NFI_SRD (0x0010) ++ ++#define CON_NFI_NOB_MASK (0x0060) ++#define CON_NFI_NOB_SHIFT (5) ++ ++#define CON_NFI_BRD (0x0100) ++#define CON_NFI_BWR (0x0200) ++ ++#define CON_NFI_SEC_MASK (0xF000) ++#define CON_NFI_SEC_SHIFT (12) ++ ++/* NFI_ACCCON */ ++#define ACCCON_SETTING () ++ ++/* NFI_INTR_EN */ ++#define INTR_RD_DONE_EN (0x0001) ++#define INTR_WR_DONE_EN (0x0002) ++#define INTR_RST_DONE_EN (0x0004) ++#define INTR_ERASE_DONE_EN (0x0008) ++#define INTR_BSY_RTN_EN (0x0010) ++#define INTR_ACC_LOCK_EN (0x0020) ++#define INTR_AHB_DONE_EN (0x0040) ++#define INTR_ALL_INTR_DE (0x0000) ++#define INTR_ALL_INTR_EN (0x007F) ++ ++/* NFI_INTR */ ++#define INTR_RD_DONE (0x0001) ++#define INTR_WR_DONE (0x0002) ++#define INTR_RST_DONE (0x0004) ++#define INTR_ERASE_DONE (0x0008) ++#define INTR_BSY_RTN (0x0010) ++#define INTR_ACC_LOCK (0x0020) ++#define INTR_AHB_DONE (0x0040) ++ ++/* NFI_ADDRNOB */ ++#define ADDR_COL_NOB_MASK (0x0003) ++#define ADDR_COL_NOB_SHIFT (0) ++#define ADDR_ROW_NOB_MASK (0x0030) ++#define ADDR_ROW_NOB_SHIFT (4) ++ ++/* NFI_STA */ ++#define STA_READ_EMPTY (0x00001000) ++#define STA_ACC_LOCK (0x00000010) ++#define STA_CMD_STATE (0x00000001) ++#define STA_ADDR_STATE (0x00000002) ++#define STA_DATAR_STATE (0x00000004) ++#define STA_DATAW_STATE (0x00000008) ++ ++#define STA_NAND_FSM_MASK (0x1F000000) ++#define STA_NAND_BUSY (0x00000100) ++#define STA_NAND_BUSY_RETURN (0x00000200) ++#define STA_NFI_FSM_MASK (0x000F0000) ++#define STA_NFI_OP_MASK (0x0000000F) ++ ++/* NFI_FIFOSTA */ ++#define FIFO_RD_EMPTY (0x0040) ++#define FIFO_RD_FULL (0x0080) ++#define FIFO_WR_FULL (0x8000) ++#define FIFO_WR_EMPTY (0x4000) ++#define FIFO_RD_REMAIN(x) (0x1F&(x)) ++#define FIFO_WR_REMAIN(x) ((0x1F00&(x))>>8) ++ ++/* NFI_ADDRCNTR */ ++#define ADDRCNTR_CNTR(x) ((0xF000&(x))>>12) ++#define ADDRCNTR_OFFSET(x) (0x03FF&(x)) ++ ++/* NFI_LOCK */ ++#define NFI_LOCK_ON (0x0001) ++ ++/* NFI_LOCKANOB */ ++#define PROG_RADD_NOB_MASK (0x7000) ++#define PROG_RADD_NOB_SHIFT (12) ++#define PROG_CADD_NOB_MASK (0x0300) ++#define PROG_CADD_NOB_SHIFT (8) ++#define ERASE_RADD_NOB_MASK (0x0070) ++#define ERASE_RADD_NOB_SHIFT (4) ++#define ERASE_CADD_NOB_MASK (0x0007) ++#define ERASE_CADD_NOB_SHIFT (0) ++ ++/******************************************************************************* ++ * ECC Register Definition ++ *******************************************************************************/ ++ ++#define ECC_ENCCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0000)) ++#define ECC_ENCCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0004)) ++#define ECC_ENCDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0008)) ++#define ECC_ENCIDLE_REG32 ((volatile P_U32)(NFIECC_BASE+0x000C)) ++#define ECC_ENCPAR0_REG32 ((volatile P_U32)(NFIECC_BASE+0x0010)) ++#define ECC_ENCPAR1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0014)) ++#define ECC_ENCPAR2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0018)) ++#define ECC_ENCPAR3_REG32 ((volatile P_U32)(NFIECC_BASE+0x001C)) ++#define ECC_ENCPAR4_REG32 ((volatile P_U32)(NFIECC_BASE+0x0020)) ++#define ECC_ENCSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0024)) ++#define ECC_ENCIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0028)) ++#define ECC_ENCIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x002C)) ++ ++#define ECC_DECCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0100)) ++#define ECC_DECCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0104)) ++#define ECC_DECDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0108)) ++#define ECC_DECIDLE_REG16 ((volatile P_U16)(NFIECC_BASE+0x010C)) ++#define ECC_DECFER_REG16 ((volatile P_U16)(NFIECC_BASE+0x0110)) ++#define ECC_DECENUM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0114)) ++#define ECC_DECDONE_REG16 ((volatile P_U16)(NFIECC_BASE+0x0118)) ++#define ECC_DECEL0_REG32 ((volatile P_U32)(NFIECC_BASE+0x011C)) ++#define ECC_DECEL1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0120)) ++#define ECC_DECEL2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0124)) ++#define ECC_DECEL3_REG32 ((volatile P_U32)(NFIECC_BASE+0x0128)) ++#define ECC_DECEL4_REG32 ((volatile P_U32)(NFIECC_BASE+0x012C)) ++#define ECC_DECEL5_REG32 ((volatile P_U32)(NFIECC_BASE+0x0130)) ++#define ECC_DECIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0134)) ++#define ECC_DECIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x0138)) ++#define ECC_FDMADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x013C)) ++#define ECC_DECFSM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0140)) ++#define ECC_SYNSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0144)) ++#define ECC_DECNFIDI_REG32 ((volatile P_U32)(NFIECC_BASE+0x0148)) ++#define ECC_SYN0_REG32 ((volatile P_U32)(NFIECC_BASE+0x014C)) ++ ++/******************************************************************************* ++ * ECC register definition ++ *******************************************************************************/ ++/* ECC_ENCON */ ++#define ENC_EN (0x0001) ++#define ENC_DE (0x0000) ++ ++/* ECC_ENCCNFG */ ++#define ECC_CNFG_ECC4 (0x0000) ++#define ECC_CNFG_ECC6 (0x0001) ++#define ECC_CNFG_ECC8 (0x0002) ++#define ECC_CNFG_ECC10 (0x0003) ++#define ECC_CNFG_ECC12 (0x0004) ++#define ECC_CNFG_ECC_MASK (0x00000007) ++ ++#define ENC_CNFG_NFI (0x0010) ++#define ENC_CNFG_MODE_MASK (0x0010) ++ ++#define ENC_CNFG_META6 (0x10300000) ++#define ENC_CNFG_META8 (0x10400000) ++ ++#define ENC_CNFG_MSG_MASK (0x1FFF0000) ++#define ENC_CNFG_MSG_SHIFT (0x10) ++ ++/* ECC_ENCIDLE */ ++#define ENC_IDLE (0x0001) ++ ++/* ECC_ENCSTA */ ++#define STA_FSM (0x001F) ++#define STA_COUNT_PS (0xFF10) ++#define STA_COUNT_MS (0x3FFF0000) ++ ++/* ECC_ENCIRQEN */ ++#define ENC_IRQEN (0x0001) ++ ++/* ECC_ENCIRQSTA */ ++#define ENC_IRQSTA (0x0001) ++ ++/* ECC_DECCON */ ++#define DEC_EN (0x0001) ++#define DEC_DE (0x0000) ++ ++/* ECC_ENCCNFG */ ++#define DEC_CNFG_ECC4 (0x0000) ++//#define DEC_CNFG_ECC6 (0x0001) ++//#define DEC_CNFG_ECC12 (0x0002) ++#define DEC_CNFG_NFI (0x0010) ++//#define DEC_CNFG_META6 (0x10300000) ++//#define DEC_CNFG_META8 (0x10400000) ++ ++#define DEC_CNFG_FER (0x01000) ++#define DEC_CNFG_EL (0x02000) ++#define DEC_CNFG_CORRECT (0x03000) ++#define DEC_CNFG_TYPE_MASK (0x03000) ++ ++#define DEC_CNFG_EMPTY_EN (0x80000000) ++ ++#define DEC_CNFG_CODE_MASK (0x1FFF0000) ++#define DEC_CNFG_CODE_SHIFT (0x10) ++ ++/* ECC_DECIDLE */ ++#define DEC_IDLE (0x0001) ++ ++/* ECC_DECFER */ ++#define DEC_FER0 (0x0001) ++#define DEC_FER1 (0x0002) ++#define DEC_FER2 (0x0004) ++#define DEC_FER3 (0x0008) ++#define DEC_FER4 (0x0010) ++#define DEC_FER5 (0x0020) ++#define DEC_FER6 (0x0040) ++#define DEC_FER7 (0x0080) ++ ++/* ECC_DECENUM */ ++#define ERR_NUM0 (0x0000000F) ++#define ERR_NUM1 (0x000000F0) ++#define ERR_NUM2 (0x00000F00) ++#define ERR_NUM3 (0x0000F000) ++#define ERR_NUM4 (0x000F0000) ++#define ERR_NUM5 (0x00F00000) ++#define ERR_NUM6 (0x0F000000) ++#define ERR_NUM7 (0xF0000000) ++ ++/* ECC_DECDONE */ ++#define DEC_DONE0 (0x0001) ++#define DEC_DONE1 (0x0002) ++#define DEC_DONE2 (0x0004) ++#define DEC_DONE3 (0x0008) ++#define DEC_DONE4 (0x0010) ++#define DEC_DONE5 (0x0020) ++#define DEC_DONE6 (0x0040) ++#define DEC_DONE7 (0x0080) ++ ++/* ECC_DECIRQEN */ ++#define DEC_IRQEN (0x0001) ++ ++/* ECC_DECIRQSTA */ ++#define DEC_IRQSTA (0x0001) ++ ++#define CHIPVER_ECO_1 (0x8a00) ++#define CHIPVER_ECO_2 (0x8a01) ++ ++//#define NAND_PFM ++ ++/******************************************************************************* ++ * Data Structure Definition ++ *******************************************************************************/ ++struct mtk_nand_host ++{ ++ struct nand_chip nand_chip; ++ struct mtd_info *mtd; ++ struct mtk_nand_host_hw *hw; ++}; ++ ++struct NAND_CMD ++{ ++ u32 u4ColAddr; ++ u32 u4RowAddr; ++ u32 u4OOBRowAddr; ++ u8 au1OOB[288]; ++ u8* pDataBuf; ++#ifdef NAND_PFM ++ u32 pureReadOOB; ++ u32 pureReadOOBNum; ++#endif ++}; ++ ++/* ++ * ECC layout control structure. Exported to userspace for ++ * diagnosis and to allow creation of raw images ++struct nand_ecclayout { ++ uint32_t eccbytes; ++ uint32_t eccpos[64]; ++ uint32_t oobavail; ++ struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES]; ++}; ++*/ ++#define __DEBUG_NAND 1 /* Debug information on/off */ ++ ++/* Debug message event */ ++#define DBG_EVT_NONE 0x00000000 /* No event */ ++#define DBG_EVT_INIT 0x00000001 /* Initial related event */ ++#define DBG_EVT_VERIFY 0x00000002 /* Verify buffer related event */ ++#define DBG_EVT_PERFORMANCE 0x00000004 /* Performance related event */ ++#define DBG_EVT_READ 0x00000008 /* Read related event */ ++#define DBG_EVT_WRITE 0x00000010 /* Write related event */ ++#define DBG_EVT_ERASE 0x00000020 /* Erase related event */ ++#define DBG_EVT_BADBLOCK 0x00000040 /* Badblock related event */ ++#define DBG_EVT_POWERCTL 0x00000080 /* Suspend/Resume related event */ ++ ++#define DBG_EVT_ALL 0xffffffff ++ ++#define DBG_EVT_MASK (DBG_EVT_INIT) ++ ++#if __DEBUG_NAND ++#define MSG(evt, fmt, args...) \ ++do { \ ++ if ((DBG_EVT_##evt) & DBG_EVT_MASK) { \ ++ printk(fmt, ##args); \ ++ } \ ++} while(0) ++ ++#define MSG_FUNC_ENTRY(f) MSG(FUC, "<FUN_ENT>: %s\n", __FUNCTION__) ++#else ++#define MSG(evt, fmt, args...) do{}while(0) ++#define MSG_FUNC_ENTRY(f) do{}while(0) ++#endif ++ ++#define RAMDOM_READ 1<<0 ++#define CACHE_READ 1<<1 ++ ++typedef struct ++{ ++ u16 id; //deviceid+menuid ++ u32 ext_id; ++ u8 addr_cycle; ++ u8 iowidth; ++ u16 totalsize; ++ u16 blocksize; ++ u16 pagesize; ++ u16 sparesize; ++ u32 timmingsetting; ++ char devciename[14]; ++ u32 advancedmode; // ++}flashdev_info,*pflashdev_info; ++ ++/* NAND driver */ ++#if 0 ++struct mtk_nand_host_hw { ++ unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */ ++ unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */ ++ unsigned int nfi_cs_num; /* NFI_CS_NUM */ ++ unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */ ++ unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */ ++ unsigned int nand_ecc_size; ++ unsigned int nand_ecc_bytes; ++ unsigned int nand_ecc_mode; ++}; ++extern struct mtk_nand_host_hw mt7621_nand_hw; ++extern u32 CFG_BLOCKSIZE; ++#endif ++#endif +--- a/drivers/mtd/nand/nand_base.c ++++ b/drivers/mtd/nand/nand_base.c +@@ -48,7 +48,7 @@ + #include <linux/mtd/partitions.h> + #include <linux/of.h> + +-static int nand_get_device(struct mtd_info *mtd, int new_state); ++int nand_get_device(struct mtd_info *mtd, int new_state); + + static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops); +@@ -240,7 +240,7 @@ static int check_offs_len(struct mtd_inf + * + * Release chip lock and wake up anyone waiting on the device. + */ +-static void nand_release_device(struct mtd_info *mtd) ++void nand_release_device(struct mtd_info *mtd) + { + struct nand_chip *chip = mtd_to_nand(mtd); + +@@ -963,7 +963,7 @@ static void panic_nand_get_device(struct + * + * Get the device and lock it for exclusive access + */ +-static int ++int + nand_get_device(struct mtd_info *mtd, int new_state) + { + struct nand_chip *chip = mtd_to_nand(mtd); +--- a/drivers/mtd/nand/nand_bbt.c ++++ b/drivers/mtd/nand/nand_bbt.c +@@ -1215,6 +1215,25 @@ err: + return res; + } + ++void nand_bbt_set(struct mtd_info *mtd, int page, int flag) ++{ ++ struct nand_chip *this = mtd->priv; ++ int block; ++ ++ block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1)); ++ this->bbt[block >> 3] &= ~(0x03 << (block & 0x6)); ++ this->bbt[block >> 3] |= (flag & 0x3) << (block & 0x6); ++} ++ ++int nand_bbt_get(struct mtd_info *mtd, int page) ++{ ++ struct nand_chip *this = mtd->priv; ++ int block; ++ ++ block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1)); ++ return (this->bbt[block >> 3] >> (block & 0x06)) & 0x03; ++} ++ + /** + * nand_update_bbt - update bad block table(s) + * @mtd: MTD device structure +--- /dev/null ++++ b/drivers/mtd/nand/nand_def.h +@@ -0,0 +1,123 @@ ++#ifndef __NAND_DEF_H__ ++#define __NAND_DEF_H__ ++ ++#define VERSION "v2.1 Fix AHB virt2phys error" ++#define MODULE_NAME "# MTK NAND #" ++#define PROCNAME "driver/nand" ++ ++#undef TESTTIME ++//#define __UBOOT_NAND__ 1 ++#define __KERNEL_NAND__ 1 ++//#define __PRELOADER_NAND__ 1 ++//#define PMT 1 ++//#define _MTK_NAND_DUMMY_DRIVER ++//#define CONFIG_BADBLOCK_CHECK 1 ++//#ifdef CONFIG_BADBLOCK_CHECK ++//#define MTK_NAND_BMT 1 ++//#endif ++#define ECC_ENABLE 1 ++#define MANUAL_CORRECT 1 ++//#define __INTERNAL_USE_AHB_MODE__ (0) ++#define SKIP_BAD_BLOCK ++#define FACT_BBT ++ ++#ifndef NAND_OTP_SUPPORT ++#define NAND_OTP_SUPPORT 0 ++#endif ++ ++/******************************************************************************* ++ * Macro definition ++ *******************************************************************************/ ++//#define NFI_SET_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) | (value))) ++//#define NFI_SET_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) | (value))) ++//#define NFI_CLN_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) & (~(value)))) ++//#define NFI_CLN_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) & (~(value)))) ++ ++#if defined (__KERNEL_NAND__) ++#define NFI_SET_REG32(reg, value) \ ++do { \ ++ g_value = (DRV_Reg32(reg) | (value));\ ++ DRV_WriteReg32(reg, g_value); \ ++} while(0) ++ ++#define NFI_SET_REG16(reg, value) \ ++do { \ ++ g_value = (DRV_Reg16(reg) | (value));\ ++ DRV_WriteReg16(reg, g_value); \ ++} while(0) ++ ++#define NFI_CLN_REG32(reg, value) \ ++do { \ ++ g_value = (DRV_Reg32(reg) & (~(value)));\ ++ DRV_WriteReg32(reg, g_value); \ ++} while(0) ++ ++#define NFI_CLN_REG16(reg, value) \ ++do { \ ++ g_value = (DRV_Reg16(reg) & (~(value)));\ ++ DRV_WriteReg16(reg, g_value); \ ++} while(0) ++#endif ++ ++#define NFI_WAIT_STATE_DONE(state) do{;}while (__raw_readl(NFI_STA_REG32) & state) ++#define NFI_WAIT_TO_READY() do{;}while (!(__raw_readl(NFI_STA_REG32) & STA_BUSY2READY)) ++ ++ ++#define NAND_SECTOR_SIZE (512) ++#define OOB_PER_SECTOR (16) ++#define OOB_AVAI_PER_SECTOR (8) ++ ++#ifndef PART_SIZE_BMTPOOL ++#define BMT_POOL_SIZE (80) ++#else ++#define BMT_POOL_SIZE (PART_SIZE_BMTPOOL) ++#endif ++ ++#define PMT_POOL_SIZE (2) ++ ++#define TIMEOUT_1 0x1fff ++#define TIMEOUT_2 0x8ff ++#define TIMEOUT_3 0xffff ++#define TIMEOUT_4 0xffff//5000 //PIO ++ ++ ++/* temporarity definiation */ ++#if !defined (__KERNEL_NAND__) ++#define KERN_INFO ++#define KERN_WARNING ++#define KERN_ERR ++#define PAGE_SIZE (4096) ++#endif ++#define AddStorageTrace //AddStorageTrace ++#define STORAGE_LOGGER_MSG_NAND 0 ++#define NFI_BASE RALINK_NAND_CTRL_BASE ++#define NFIECC_BASE RALINK_NANDECC_CTRL_BASE ++ ++#ifdef __INTERNAL_USE_AHB_MODE__ ++#define MT65xx_POLARITY_LOW 0 ++#define MT65XX_PDN_PERI_NFI 0 ++#define MT65xx_EDGE_SENSITIVE 0 ++#define MT6575_NFI_IRQ_ID (58) ++#endif ++ ++#if defined (__KERNEL_NAND__) ++#define RALINK_REG(x) (*((volatile u32 *)(x))) ++#define __virt_to_phys(x) virt_to_phys((volatile void*)x) ++#else ++#define CONFIG_MTD_NAND_VERIFY_WRITE (1) ++#define printk printf ++#define ra_dbg printf ++#define BUG() //BUG() ++#define BUG_ON(x) //BUG_ON() ++#define NUM_PARTITIONS 1 ++#endif ++ ++#define NFI_DEFAULT_ACCESS_TIMING (0x30C77fff) //(0x44333) ++ ++//uboot only support 1 cs ++#define NFI_CS_NUM (1) ++#define NFI_DEFAULT_CS (0) ++ ++#include "mt6575_typedefs.h" ++ ++#endif /* __NAND_DEF_H__ */ +--- /dev/null ++++ b/drivers/mtd/nand/nand_device_list.h +@@ -0,0 +1,56 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ */ ++/* MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++ ++#ifndef __NAND_DEVICE_LIST_H__ ++#define __NAND_DEVICE_LIST_H__ ++ ++static const flashdev_info gen_FlashTable[]={ ++ {0x20BC, 0x105554, 5, 16, 512, 128, 2048, 64, 0x1123, "EHD013151MA_5", 0}, ++ {0xECBC, 0x005554, 5, 16, 512, 128, 2048, 64, 0x1123, "K524G2GACB_A0", 0}, ++ {0x2CBC, 0x905556, 5, 16, 512, 128, 2048, 64, 0x21044333, "MT29C4G96MAZA", 0}, ++ {0x2CDA, 0x909506, 5, 8, 256, 128, 2048, 64, 0x30C77fff, "MT29F2G08ABAE", 0}, ++ {0xADBC, 0x905554, 5, 16, 512, 128, 2048, 64, 0x10801011, "H9DA4GH4JJAMC", 0}, ++ {0x01F1, 0x801D01, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "S34ML01G100TF", 0}, ++ {0x92F1, 0x8095FF, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "F59L1G81A", 0}, ++ {0xECD3, 0x519558, 5, 8, 1024, 128, 2048, 64, 0x44333, "K9K8G8000", 0}, ++ {0xC2F1, 0x801DC2, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "MX30LF1G08AA", 0}, ++ {0x98D3, 0x902676, 5, 8, 1024, 256, 4096, 224, 0x00C25332, "TC58NVG3S0F", 0}, ++ {0x01DA, 0x909546, 5, 8, 256, 128, 2048, 128, 0x30C77fff, "S34ML02G200TF", 0}, ++ {0x01DC, 0x909556, 5, 8, 512, 128, 2048, 128, 0x30C77fff, "S34ML04G200TF", 0}, ++ {0x0000, 0x000000, 0, 0, 0, 0, 0, 0, 0, "xxxxxxxxxx", 0}, ++}; ++ ++ ++#endif +--- /dev/null ++++ b/drivers/mtd/nand/partition.h +@@ -0,0 +1,115 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ */ ++/* MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++ ++#include <linux/mtd/mtd.h> ++#include <linux/mtd/rawnand.h> ++#include <linux/mtd/partitions.h> ++ ++#define RECONFIG_PARTITION_SIZE 1 ++ ++#define MTD_BOOT_PART_SIZE 0x80000 ++#define MTD_CONFIG_PART_SIZE 0x20000 ++#define MTD_FACTORY_PART_SIZE 0x20000 ++ ++extern unsigned int CFG_BLOCKSIZE; ++#define LARGE_MTD_BOOT_PART_SIZE (CFG_BLOCKSIZE<<2) ++#define LARGE_MTD_CONFIG_PART_SIZE (CFG_BLOCKSIZE<<2) ++#define LARGE_MTD_FACTORY_PART_SIZE (CFG_BLOCKSIZE<<1) ++ ++/*=======================================================================*/ ++/* NAND PARTITION Mapping */ ++/*=======================================================================*/ ++//#ifdef CONFIG_MTD_PARTITIONS ++static struct mtd_partition g_pasStatic_Partition[] = { ++ { ++ name: "ALL", ++ size: MTDPART_SIZ_FULL, ++ offset: 0, ++ }, ++ /* Put your own partition definitions here */ ++ { ++ name: "Bootloader", ++ size: MTD_BOOT_PART_SIZE, ++ offset: 0, ++ }, { ++ name: "Config", ++ size: MTD_CONFIG_PART_SIZE, ++ offset: MTDPART_OFS_APPEND ++ }, { ++ name: "Factory", ++ size: MTD_FACTORY_PART_SIZE, ++ offset: MTDPART_OFS_APPEND ++#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH ++ }, { ++ name: "Kernel", ++ size: MTD_KERN_PART_SIZE, ++ offset: MTDPART_OFS_APPEND, ++ }, { ++ name: "RootFS", ++ size: MTD_ROOTFS_PART_SIZE, ++ offset: MTDPART_OFS_APPEND, ++#ifdef CONFIG_ROOTFS_IN_FLASH_NO_PADDING ++ }, { ++ name: "Kernel_RootFS", ++ size: MTD_KERN_PART_SIZE + MTD_ROOTFS_PART_SIZE, ++ offset: MTD_BOOT_PART_SIZE + MTD_CONFIG_PART_SIZE + MTD_FACTORY_PART_SIZE, ++#endif ++#else //CONFIG_RT2880_ROOTFS_IN_RAM ++ }, { ++ name: "Kernel", ++ size: 0x10000, ++ offset: MTDPART_OFS_APPEND, ++#endif ++#ifdef CONFIG_DUAL_IMAGE ++ }, { ++ name: "Kernel2", ++ size: MTD_KERN2_PART_SIZE, ++ offset: MTD_KERN2_PART_OFFSET, ++#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH ++ }, { ++ name: "RootFS2", ++ size: MTD_ROOTFS2_PART_SIZE, ++ offset: MTD_ROOTFS2_PART_OFFSET, ++#endif ++#endif ++ } ++ ++}; ++ ++#define NUM_PARTITIONS ARRAY_SIZE(g_pasStatic_Partition) ++extern int part_num; // = NUM_PARTITIONS; ++//#endif ++#undef RECONFIG_PARTITION_SIZE ++ diff --git a/target/linux/ramips/patches-4.14/0040-nand-hack-restore-write_page.patch b/target/linux/ramips/patches-4.14/0040-nand-hack-restore-write_page.patch new file mode 100644 index 0000000000..80136b89a6 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0040-nand-hack-restore-write_page.patch @@ -0,0 +1,42 @@ +--- a/include/linux/mtd/rawnand.h ++++ b/include/linux/mtd/rawnand.h +@@ -885,6 +885,9 @@ struct nand_chip { + int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this); + int (*erase)(struct mtd_info *mtd, int page); + int (*scan_bbt)(struct mtd_info *mtd); ++ int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, ++ uint32_t offset, int data_len, const uint8_t *buf, ++ int oob_required, int page, int raw); + int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip, + int feature_addr, uint8_t *subfeature_para); + int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip, +--- a/drivers/mtd/nand/nand_base.c ++++ b/drivers/mtd/nand/nand_base.c +@@ -2753,9 +2753,14 @@ static int nand_do_write_ops(struct mtd_ + memset(chip->oob_poi, 0xff, mtd->oobsize); + } + +- ret = nand_write_page(mtd, chip, column, bytes, wbuf, +- oob_required, page, +- (ops->mode == MTD_OPS_RAW)); ++// if (chip->write_page) ++ ret = chip->write_page(mtd, chip, column, bytes, wbuf, ++ oob_required, page, ++ (ops->mode == MTD_OPS_RAW)); ++// else ++// ret = nand_write_page(mtd, chip, column, bytes, wbuf, ++// oob_required, page, ++// (ops->mode == MTD_OPS_RAW)); + if (ret) + break; + +@@ -4711,6 +4716,9 @@ int nand_scan_tail(struct mtd_info *mtd) + } + } + ++// if (!chip->write_page) ++// chip->write_page = nand_write_page; ++ + /* + * Check ECC mode, default to software if 3byte/512byte hardware ECC is + * selected and we have 256 byte pagesize fallback to software ECC diff --git a/target/linux/ramips/patches-4.14/0040-nand-hack.patch b/target/linux/ramips/patches-4.14/0040-nand-hack.patch new file mode 100644 index 0000000000..a9d16840f0 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0040-nand-hack.patch @@ -0,0 +1,65 @@ +--- a/drivers/mtd/nand/nand_base.c ++++ b/drivers/mtd/nand/nand_base.c +@@ -1903,6 +1903,9 @@ static int nand_do_read_ops(struct mtd_i + __func__, buf); + + read_retry: ++#ifdef CONFIG_MTK_MTD_NAND ++ ret = chip->read_page(mtd, chip, bufpoi, page); ++#else + if (nand_standard_page_accessors(&chip->ecc)) + chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); + +@@ -1922,6 +1925,7 @@ read_retry: + else + ret = chip->ecc.read_page(mtd, chip, bufpoi, + oob_required, page); ++#endif + if (ret < 0) { + if (use_bufpoi) + /* Invalidate page cache */ +@@ -3076,8 +3080,11 @@ int nand_erase_nand(struct mtd_info *mtd + (page + pages_per_block)) + chip->pagebuf = -1; + ++#ifdef CONFIG_MTK_MTD_NAND ++ status = chip->erase_mtk(mtd, page & chip->pagemask); ++#else + status = chip->erase(mtd, page & chip->pagemask); +- ++#endif + /* See if block erase succeeded */ + if (status & NAND_STATUS_FAIL) { + pr_debug("%s: failed erase, page 0x%08x\n", +@@ -4207,6 +4214,7 @@ int nand_scan_ident(struct mtd_info *mtd + * cmdfunc() both expect cmd_ctrl() to be populated, + * so we need to check that that's the case + */ ++ printk("%s:%s[%d]%p %p %p\n", __FILE__, __func__, __LINE__, chip->cmdfunc, chip->select_chip, chip->cmd_ctrl); + pr_err("chip.cmd_ctrl() callback is not provided"); + return -EINVAL; + } +--- a/drivers/mtd/nand/nand_device_list.h ++++ b/drivers/mtd/nand/nand_device_list.h +@@ -44,6 +44,8 @@ static const flashdev_info gen_FlashTabl + {0xADBC, 0x905554, 5, 16, 512, 128, 2048, 64, 0x10801011, "H9DA4GH4JJAMC", 0}, + {0x01F1, 0x801D01, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "S34ML01G100TF", 0}, + {0x92F1, 0x8095FF, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "F59L1G81A", 0}, ++ {0xC8DA, 0x909544, 5, 8, 256, 128, 2048, 64, 0x30C77fff, "F59L2G81A", 0}, ++ {0xC8DC, 0x909554, 5, 8, 512, 128, 2048, 64, 0x30C77fff, "F59L4G81A", 0}, + {0xECD3, 0x519558, 5, 8, 1024, 128, 2048, 64, 0x44333, "K9K8G8000", 0}, + {0xC2F1, 0x801DC2, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "MX30LF1G08AA", 0}, + {0x98D3, 0x902676, 5, 8, 1024, 256, 4096, 224, 0x00C25332, "TC58NVG3S0F", 0}, +--- a/include/linux/mtd/rawnand.h ++++ b/include/linux/mtd/rawnand.h +@@ -896,6 +896,10 @@ struct nand_chip { + int (*setup_data_interface)(struct mtd_info *mtd, int chipnr, + const struct nand_data_interface *conf); + ++#ifdef CONFIG_MTK_MTD_NAND ++ int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, u8 *buf, int page); ++ int (*erase_mtk)(struct mtd_info *mtd, int page); ++#endif /* CONFIG_MTK_MTD_NAND */ + + int chip_delay; + unsigned int options; diff --git a/target/linux/ramips/patches-4.14/0041-DT-Add-documentation-for-spi-rt2880.patch b/target/linux/ramips/patches-4.14/0041-DT-Add-documentation-for-spi-rt2880.patch new file mode 100644 index 0000000000..e2643e3f25 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0041-DT-Add-documentation-for-spi-rt2880.patch @@ -0,0 +1,44 @@ +From da6015e7f19d749f135f7ac55c4ec47b06faa868 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Fri, 9 Aug 2013 20:12:59 +0200 +Subject: [PATCH 41/53] DT: Add documentation for spi-rt2880 + +Describe the SPI master found on the MIPS based Ralink RT2880 SoC. + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + .../devicetree/bindings/spi/spi-rt2880.txt | 28 ++++++++++++++++++++ + 1 file changed, 28 insertions(+) + create mode 100644 Documentation/devicetree/bindings/spi/spi-rt2880.txt + +--- /dev/null ++++ b/Documentation/devicetree/bindings/spi/spi-rt2880.txt +@@ -0,0 +1,28 @@ ++Ralink SoC RT2880 SPI master controller. ++ ++This SPI controller is found on most wireless SoCs made by ralink. ++ ++Required properties: ++- compatible : "ralink,rt2880-spi" ++- reg : The register base for the controller. ++- #address-cells : <1>, as required by generic SPI binding. ++- #size-cells : <0>, also as required by generic SPI binding. ++ ++Child nodes as per the generic SPI binding. ++ ++Example: ++ ++ spi@b00 { ++ compatible = "ralink,rt2880-spi"; ++ reg = <0xb00 0x100>; ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ m25p80@0 { ++ compatible = "m25p80"; ++ reg = <0>; ++ spi-max-frequency = <10000000>; ++ }; ++ }; ++ diff --git a/target/linux/ramips/patches-4.14/0042-SPI-ralink-add-Ralink-SoC-spi-driver.patch b/target/linux/ramips/patches-4.14/0042-SPI-ralink-add-Ralink-SoC-spi-driver.patch new file mode 100644 index 0000000000..1dd9f40c39 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0042-SPI-ralink-add-Ralink-SoC-spi-driver.patch @@ -0,0 +1,574 @@ +From 683af4ebb91a1600df1946ac4769d916b8a1be65 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 27 Jul 2014 11:15:12 +0100 +Subject: [PATCH 42/53] SPI: ralink: add Ralink SoC spi driver + +Add the driver needed to make SPI work on Ralink SoC. + +Signed-off-by: Gabor Juhos <juhosg@openwrt.org> +Acked-by: John Crispin <blogic@openwrt.org> +--- + drivers/spi/Kconfig | 6 + + drivers/spi/Makefile | 1 + + drivers/spi/spi-rt2880.c | 530 ++++++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 537 insertions(+) + create mode 100644 drivers/spi/spi-rt2880.c + +--- a/drivers/spi/Kconfig ++++ b/drivers/spi/Kconfig +@@ -563,6 +563,12 @@ config SPI_QUP + This driver can also be built as a module. If so, the module + will be called spi_qup. + ++config SPI_RT2880 ++ tristate "Ralink RT288x SPI Controller" ++ depends on RALINK ++ help ++ This selects a driver for the Ralink RT288x/RT305x SPI Controller. ++ + config SPI_S3C24XX + tristate "Samsung S3C24XX series SPI" + depends on ARCH_S3C24XX +--- a/drivers/spi/Makefile ++++ b/drivers/spi/Makefile +@@ -81,6 +81,7 @@ obj-$(CONFIG_SPI_QUP) += spi-qup.o + obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o + obj-$(CONFIG_SPI_RB4XX) += spi-rb4xx.o + obj-$(CONFIG_SPI_RSPI) += spi-rspi.o ++obj-$(CONFIG_SPI_RT2880) += spi-rt2880.o + obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o + spi-s3c24xx-hw-y := spi-s3c24xx.o + spi-s3c24xx-hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi-s3c24xx-fiq.o +--- /dev/null ++++ b/drivers/spi/spi-rt2880.c +@@ -0,0 +1,530 @@ ++/* ++ * spi-rt2880.c -- Ralink RT288x/RT305x SPI controller driver ++ * ++ * Copyright (C) 2011 Sergiy <piratfm@gmail.com> ++ * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org> ++ * ++ * Some parts are based on spi-orion.c: ++ * Author: Shadi Ammouri <shadi@marvell.com> ++ * Copyright (C) 2007-2008 Marvell Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/clk.h> ++#include <linux/err.h> ++#include <linux/delay.h> ++#include <linux/io.h> ++#include <linux/reset.h> ++#include <linux/spi/spi.h> ++#include <linux/platform_device.h> ++#include <linux/gpio.h> ++ ++#define DRIVER_NAME "spi-rt2880" ++ ++#define RAMIPS_SPI_STAT 0x00 ++#define RAMIPS_SPI_CFG 0x10 ++#define RAMIPS_SPI_CTL 0x14 ++#define RAMIPS_SPI_DATA 0x20 ++#define RAMIPS_SPI_ADDR 0x24 ++#define RAMIPS_SPI_BS 0x28 ++#define RAMIPS_SPI_USER 0x2C ++#define RAMIPS_SPI_TXFIFO 0x30 ++#define RAMIPS_SPI_RXFIFO 0x34 ++#define RAMIPS_SPI_FIFO_STAT 0x38 ++#define RAMIPS_SPI_MODE 0x3C ++#define RAMIPS_SPI_DEV_OFFSET 0x40 ++#define RAMIPS_SPI_DMA 0x80 ++#define RAMIPS_SPI_DMASTAT 0x84 ++#define RAMIPS_SPI_ARBITER 0xF0 ++ ++/* SPISTAT register bit field */ ++#define SPISTAT_BUSY BIT(0) ++ ++/* SPICFG register bit field */ ++#define SPICFG_ADDRMODE BIT(12) ++#define SPICFG_RXENVDIS BIT(11) ++#define SPICFG_RXCAP BIT(10) ++#define SPICFG_SPIENMODE BIT(9) ++#define SPICFG_MSBFIRST BIT(8) ++#define SPICFG_SPICLKPOL BIT(6) ++#define SPICFG_RXCLKEDGE_FALLING BIT(5) ++#define SPICFG_TXCLKEDGE_FALLING BIT(4) ++#define SPICFG_HIZSPI BIT(3) ++#define SPICFG_SPICLK_PRESCALE_MASK 0x7 ++#define SPICFG_SPICLK_DIV2 0 ++#define SPICFG_SPICLK_DIV4 1 ++#define SPICFG_SPICLK_DIV8 2 ++#define SPICFG_SPICLK_DIV16 3 ++#define SPICFG_SPICLK_DIV32 4 ++#define SPICFG_SPICLK_DIV64 5 ++#define SPICFG_SPICLK_DIV128 6 ++#define SPICFG_SPICLK_DISABLE 7 ++ ++/* SPICTL register bit field */ ++#define SPICTL_START BIT(4) ++#define SPICTL_HIZSDO BIT(3) ++#define SPICTL_STARTWR BIT(2) ++#define SPICTL_STARTRD BIT(1) ++#define SPICTL_SPIENA BIT(0) ++ ++/* SPIUSER register bit field */ ++#define SPIUSER_USERMODE BIT(21) ++#define SPIUSER_INSTR_PHASE BIT(20) ++#define SPIUSER_ADDR_PHASE_MASK 0x7 ++#define SPIUSER_ADDR_PHASE_OFFSET 17 ++#define SPIUSER_MODE_PHASE BIT(16) ++#define SPIUSER_DUMMY_PHASE_MASK 0x3 ++#define SPIUSER_DUMMY_PHASE_OFFSET 14 ++#define SPIUSER_DATA_PHASE_MASK 0x3 ++#define SPIUSER_DATA_PHASE_OFFSET 12 ++#define SPIUSER_DATA_READ (BIT(0) << SPIUSER_DATA_PHASE_OFFSET) ++#define SPIUSER_DATA_WRITE (BIT(1) << SPIUSER_DATA_PHASE_OFFSET) ++#define SPIUSER_ADDR_TYPE_OFFSET 9 ++#define SPIUSER_MODE_TYPE_OFFSET 6 ++#define SPIUSER_DUMMY_TYPE_OFFSET 3 ++#define SPIUSER_DATA_TYPE_OFFSET 0 ++#define SPIUSER_TRANSFER_MASK 0x7 ++#define SPIUSER_TRANSFER_SINGLE BIT(0) ++#define SPIUSER_TRANSFER_DUAL BIT(1) ++#define SPIUSER_TRANSFER_QUAD BIT(2) ++ ++#define SPIUSER_TRANSFER_TYPE(type) ( \ ++ (type << SPIUSER_ADDR_TYPE_OFFSET) | \ ++ (type << SPIUSER_MODE_TYPE_OFFSET) | \ ++ (type << SPIUSER_DUMMY_TYPE_OFFSET) | \ ++ (type << SPIUSER_DATA_TYPE_OFFSET) \ ++) ++ ++/* SPIFIFOSTAT register bit field */ ++#define SPIFIFOSTAT_TXEMPTY BIT(19) ++#define SPIFIFOSTAT_RXEMPTY BIT(18) ++#define SPIFIFOSTAT_TXFULL BIT(17) ++#define SPIFIFOSTAT_RXFULL BIT(16) ++#define SPIFIFOSTAT_FIFO_MASK 0xff ++#define SPIFIFOSTAT_TX_OFFSET 8 ++#define SPIFIFOSTAT_RX_OFFSET 0 ++ ++#define SPI_FIFO_DEPTH 16 ++ ++/* SPIMODE register bit field */ ++#define SPIMODE_MODE_OFFSET 24 ++#define SPIMODE_DUMMY_OFFSET 0 ++ ++/* SPIARB register bit field */ ++#define SPICTL_ARB_EN BIT(31) ++#define SPICTL_CSCTL1 BIT(16) ++#define SPI1_POR BIT(1) ++#define SPI0_POR BIT(0) ++ ++#define RT2880_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | \ ++ SPI_CS_HIGH) ++ ++static atomic_t hw_reset_count = ATOMIC_INIT(0); ++ ++struct rt2880_spi { ++ struct spi_master *master; ++ void __iomem *base; ++ u32 speed; ++ u16 wait_loops; ++ u16 mode; ++ struct clk *clk; ++}; ++ ++static inline struct rt2880_spi *spidev_to_rt2880_spi(struct spi_device *spi) ++{ ++ return spi_master_get_devdata(spi->master); ++} ++ ++static inline u32 rt2880_spi_read(struct rt2880_spi *rs, u32 reg) ++{ ++ return ioread32(rs->base + reg); ++} ++ ++static inline void rt2880_spi_write(struct rt2880_spi *rs, u32 reg, ++ const u32 val) ++{ ++ iowrite32(val, rs->base + reg); ++} ++ ++static inline void rt2880_spi_setbits(struct rt2880_spi *rs, u32 reg, u32 mask) ++{ ++ void __iomem *addr = rs->base + reg; ++ ++ iowrite32((ioread32(addr) | mask), addr); ++} ++ ++static inline void rt2880_spi_clrbits(struct rt2880_spi *rs, u32 reg, u32 mask) ++{ ++ void __iomem *addr = rs->base + reg; ++ ++ iowrite32((ioread32(addr) & ~mask), addr); ++} ++ ++static u32 rt2880_spi_baudrate_get(struct spi_device *spi, unsigned int speed) ++{ ++ struct rt2880_spi *rs = spidev_to_rt2880_spi(spi); ++ u32 rate; ++ u32 prescale; ++ ++ /* ++ * the supported rates are: 2, 4, 8, ... 128 ++ * round up as we look for equal or less speed ++ */ ++ rate = DIV_ROUND_UP(clk_get_rate(rs->clk), speed); ++ rate = roundup_pow_of_two(rate); ++ ++ /* Convert the rate to SPI clock divisor value. */ ++ prescale = ilog2(rate / 2); ++ ++ /* some tolerance. double and add 100 */ ++ rs->wait_loops = (8 * HZ * loops_per_jiffy) / ++ (clk_get_rate(rs->clk) / rate); ++ rs->wait_loops = (rs->wait_loops << 1) + 100; ++ rs->speed = speed; ++ ++ dev_dbg(&spi->dev, "speed: %lu/%u, rate: %u, prescal: %u, loops: %hu\n", ++ clk_get_rate(rs->clk) / rate, speed, rate, prescale, ++ rs->wait_loops); ++ ++ return prescale; ++} ++ ++static u32 get_arbiter_offset(struct spi_master *master) ++{ ++ u32 offset; ++ ++ offset = RAMIPS_SPI_ARBITER; ++ if (master->bus_num == 1) ++ offset -= RAMIPS_SPI_DEV_OFFSET; ++ ++ return offset; ++} ++ ++static void rt2880_spi_set_cs(struct spi_device *spi, bool enable) ++{ ++ struct rt2880_spi *rs = spidev_to_rt2880_spi(spi); ++ ++ if (enable) ++ rt2880_spi_setbits(rs, RAMIPS_SPI_CTL, SPICTL_SPIENA); ++ else ++ rt2880_spi_clrbits(rs, RAMIPS_SPI_CTL, SPICTL_SPIENA); ++} ++ ++static int rt2880_spi_wait_ready(struct rt2880_spi *rs, int len) ++{ ++ int loop = rs->wait_loops * len; ++ ++ while ((rt2880_spi_read(rs, RAMIPS_SPI_STAT) & SPISTAT_BUSY) && --loop) ++ cpu_relax(); ++ ++ if (loop) ++ return 0; ++ ++ return -ETIMEDOUT; ++} ++ ++static void rt2880_dump_reg(struct spi_master *master) ++{ ++ struct rt2880_spi *rs = spi_master_get_devdata(master); ++ ++ dev_dbg(&master->dev, "stat: %08x, cfg: %08x, ctl: %08x, " \ ++ "data: %08x, arb: %08x\n", ++ rt2880_spi_read(rs, RAMIPS_SPI_STAT), ++ rt2880_spi_read(rs, RAMIPS_SPI_CFG), ++ rt2880_spi_read(rs, RAMIPS_SPI_CTL), ++ rt2880_spi_read(rs, RAMIPS_SPI_DATA), ++ rt2880_spi_read(rs, get_arbiter_offset(master))); ++} ++ ++static int rt2880_spi_transfer_one(struct spi_master *master, ++ struct spi_device *spi, struct spi_transfer *xfer) ++{ ++ struct rt2880_spi *rs = spi_master_get_devdata(master); ++ unsigned len; ++ const u8 *tx = xfer->tx_buf; ++ u8 *rx = xfer->rx_buf; ++ int err = 0; ++ ++ /* change clock speed */ ++ if (unlikely(rs->speed != xfer->speed_hz)) { ++ u32 reg; ++ reg = rt2880_spi_read(rs, RAMIPS_SPI_CFG); ++ reg &= ~SPICFG_SPICLK_PRESCALE_MASK; ++ reg |= rt2880_spi_baudrate_get(spi, xfer->speed_hz); ++ rt2880_spi_write(rs, RAMIPS_SPI_CFG, reg); ++ } ++ ++ if (tx) { ++ len = xfer->len; ++ while (len-- > 0) { ++ rt2880_spi_write(rs, RAMIPS_SPI_DATA, *tx++); ++ rt2880_spi_setbits(rs, RAMIPS_SPI_CTL, SPICTL_STARTWR); ++ err = rt2880_spi_wait_ready(rs, 1); ++ if (err) { ++ dev_err(&spi->dev, "TX failed, err=%d\n", err); ++ goto out; ++ } ++ } ++ } ++ ++ if (rx) { ++ len = xfer->len; ++ while (len-- > 0) { ++ rt2880_spi_setbits(rs, RAMIPS_SPI_CTL, SPICTL_STARTRD); ++ err = rt2880_spi_wait_ready(rs, 1); ++ if (err) { ++ dev_err(&spi->dev, "RX failed, err=%d\n", err); ++ goto out; ++ } ++ *rx++ = (u8) rt2880_spi_read(rs, RAMIPS_SPI_DATA); ++ } ++ } ++ ++out: ++ return err; ++} ++ ++/* copy from spi.c */ ++static void spi_set_cs(struct spi_device *spi, bool enable) ++{ ++ if (spi->mode & SPI_CS_HIGH) ++ enable = !enable; ++ ++ if (spi->cs_gpio >= 0) ++ gpio_set_value(spi->cs_gpio, !enable); ++ else if (spi->master->set_cs) ++ spi->master->set_cs(spi, !enable); ++} ++ ++static int rt2880_spi_setup(struct spi_device *spi) ++{ ++ struct spi_master *master = spi->master; ++ struct rt2880_spi *rs = spi_master_get_devdata(master); ++ u32 reg, old_reg, arbit_off; ++ ++ if ((spi->max_speed_hz > master->max_speed_hz) || ++ (spi->max_speed_hz < master->min_speed_hz)) { ++ dev_err(&spi->dev, "invalide requested speed %d Hz\n", ++ spi->max_speed_hz); ++ return -EINVAL; ++ } ++ ++ if (!(master->bits_per_word_mask & ++ BIT(spi->bits_per_word - 1))) { ++ dev_err(&spi->dev, "invalide bits_per_word %d\n", ++ spi->bits_per_word); ++ return -EINVAL; ++ } ++ ++ /* the hardware seems can't work on mode0 force it to mode3 */ ++ if ((spi->mode & (SPI_CPOL | SPI_CPHA)) == SPI_MODE_0) { ++ dev_warn(&spi->dev, "force spi mode3\n"); ++ spi->mode |= SPI_MODE_3; ++ } ++ ++ /* chip polarity */ ++ arbit_off = get_arbiter_offset(master); ++ reg = old_reg = rt2880_spi_read(rs, arbit_off); ++ if (spi->mode & SPI_CS_HIGH) { ++ switch (master->bus_num) { ++ case 1: ++ reg |= SPI1_POR; ++ break; ++ default: ++ reg |= SPI0_POR; ++ break; ++ } ++ } else { ++ switch (master->bus_num) { ++ case 1: ++ reg &= ~SPI1_POR; ++ break; ++ default: ++ reg &= ~SPI0_POR; ++ break; ++ } ++ } ++ ++ /* enable spi1 */ ++ if (master->bus_num == 1) ++ reg |= SPICTL_ARB_EN; ++ ++ if (reg != old_reg) ++ rt2880_spi_write(rs, arbit_off, reg); ++ ++ /* deselected the spi device */ ++ spi_set_cs(spi, false); ++ ++ rt2880_dump_reg(master); ++ ++ return 0; ++} ++ ++static int rt2880_spi_prepare_message(struct spi_master *master, ++ struct spi_message *msg) ++{ ++ struct rt2880_spi *rs = spi_master_get_devdata(master); ++ struct spi_device *spi = msg->spi; ++ u32 reg; ++ ++ if ((rs->mode == spi->mode) && (rs->speed == spi->max_speed_hz)) ++ return 0; ++ ++#if 0 ++ /* set spido to tri-state */ ++ rt2880_spi_setbits(rs, RAMIPS_SPI_CTL, SPICTL_HIZSDO); ++#endif ++ ++ reg = rt2880_spi_read(rs, RAMIPS_SPI_CFG); ++ ++ reg &= ~(SPICFG_MSBFIRST | SPICFG_SPICLKPOL | ++ SPICFG_RXCLKEDGE_FALLING | ++ SPICFG_TXCLKEDGE_FALLING | ++ SPICFG_SPICLK_PRESCALE_MASK); ++ ++ /* MSB */ ++ if (!(spi->mode & SPI_LSB_FIRST)) ++ reg |= SPICFG_MSBFIRST; ++ ++ /* spi mode */ ++ switch (spi->mode & (SPI_CPOL | SPI_CPHA)) { ++ case SPI_MODE_0: ++ reg |= SPICFG_TXCLKEDGE_FALLING; ++ break; ++ case SPI_MODE_1: ++ reg |= SPICFG_RXCLKEDGE_FALLING; ++ break; ++ case SPI_MODE_2: ++ reg |= SPICFG_SPICLKPOL | SPICFG_RXCLKEDGE_FALLING; ++ break; ++ case SPI_MODE_3: ++ reg |= SPICFG_SPICLKPOL | SPICFG_TXCLKEDGE_FALLING; ++ break; ++ } ++ rs->mode = spi->mode; ++ ++#if 0 ++ /* set spiclk and spiena to tri-state */ ++ reg |= SPICFG_HIZSPI; ++#endif ++ ++ /* clock divide */ ++ reg |= rt2880_spi_baudrate_get(spi, spi->max_speed_hz); ++ ++ rt2880_spi_write(rs, RAMIPS_SPI_CFG, reg); ++ ++ return 0; ++} ++ ++static int rt2880_spi_probe(struct platform_device *pdev) ++{ ++ struct spi_master *master; ++ struct rt2880_spi *rs; ++ void __iomem *base; ++ struct resource *r; ++ struct clk *clk; ++ int ret; ++ ++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ base = devm_ioremap_resource(&pdev->dev, r); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); ++ ++ clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(clk)) { ++ dev_err(&pdev->dev, "unable to get SYS clock\n"); ++ return PTR_ERR(clk); ++ } ++ ++ ret = clk_prepare_enable(clk); ++ if (ret) ++ goto err_clk; ++ ++ master = spi_alloc_master(&pdev->dev, sizeof(*rs)); ++ if (master == NULL) { ++ dev_dbg(&pdev->dev, "master allocation failed\n"); ++ ret = -ENOMEM; ++ goto err_clk; ++ } ++ ++ master->dev.of_node = pdev->dev.of_node; ++ master->mode_bits = RT2880_SPI_MODE_BITS; ++ master->bits_per_word_mask = SPI_BPW_MASK(8); ++ master->min_speed_hz = clk_get_rate(clk) / 128; ++ master->max_speed_hz = clk_get_rate(clk) / 2; ++ master->flags = SPI_MASTER_HALF_DUPLEX; ++ master->setup = rt2880_spi_setup; ++ master->prepare_message = rt2880_spi_prepare_message; ++ master->set_cs = rt2880_spi_set_cs; ++ master->transfer_one = rt2880_spi_transfer_one, ++ ++ dev_set_drvdata(&pdev->dev, master); ++ ++ rs = spi_master_get_devdata(master); ++ rs->master = master; ++ rs->base = base; ++ rs->clk = clk; ++ ++ if (atomic_inc_return(&hw_reset_count) == 1) ++ device_reset(&pdev->dev); ++ ++ ret = devm_spi_register_master(&pdev->dev, master); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "devm_spi_register_master error.\n"); ++ goto err_master; ++ } ++ ++ return ret; ++ ++err_master: ++ spi_master_put(master); ++ kfree(master); ++err_clk: ++ clk_disable_unprepare(clk); ++ ++ return ret; ++} ++ ++static int rt2880_spi_remove(struct platform_device *pdev) ++{ ++ struct spi_master *master; ++ struct rt2880_spi *rs; ++ ++ master = dev_get_drvdata(&pdev->dev); ++ rs = spi_master_get_devdata(master); ++ ++ clk_disable_unprepare(rs->clk); ++ atomic_dec(&hw_reset_count); ++ ++ return 0; ++} ++ ++MODULE_ALIAS("platform:" DRIVER_NAME); ++ ++static const struct of_device_id rt2880_spi_match[] = { ++ { .compatible = "ralink,rt2880-spi" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, rt2880_spi_match); ++ ++static struct platform_driver rt2880_spi_driver = { ++ .driver = { ++ .name = DRIVER_NAME, ++ .owner = THIS_MODULE, ++ .of_match_table = rt2880_spi_match, ++ }, ++ .probe = rt2880_spi_probe, ++ .remove = rt2880_spi_remove, ++}; ++ ++module_platform_driver(rt2880_spi_driver); ++ ++MODULE_DESCRIPTION("Ralink SPI driver"); ++MODULE_AUTHOR("Sergiy <piratfm@gmail.com>"); ++MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>"); ++MODULE_LICENSE("GPL"); diff --git a/target/linux/ramips/patches-4.14/0043-spi-add-mt7621-support.patch b/target/linux/ramips/patches-4.14/0043-spi-add-mt7621-support.patch new file mode 100644 index 0000000000..bc5418bab2 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0043-spi-add-mt7621-support.patch @@ -0,0 +1,524 @@ +From 87a5fcd57c577cd94b5b080deb98885077c13a42 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 27 Jul 2014 09:49:07 +0100 +Subject: [PATCH 43/53] spi: add mt7621 support + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + drivers/spi/Kconfig | 6 + + drivers/spi/Makefile | 1 + + drivers/spi/spi-mt7621.c | 480 ++++++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 487 insertions(+) + create mode 100644 drivers/spi/spi-mt7621.c + +--- a/drivers/spi/Kconfig ++++ b/drivers/spi/Kconfig +@@ -569,6 +569,12 @@ config SPI_RT2880 + help + This selects a driver for the Ralink RT288x/RT305x SPI Controller. + ++config SPI_MT7621 ++ tristate "MediaTek MT7621 SPI Controller" ++ depends on RALINK ++ help ++ This selects a driver for the MediaTek MT7621 SPI Controller. ++ + config SPI_S3C24XX + tristate "Samsung S3C24XX series SPI" + depends on ARCH_S3C24XX +--- a/drivers/spi/Makefile ++++ b/drivers/spi/Makefile +@@ -60,6 +60,7 @@ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mp + obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o + obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o + obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o ++obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o + obj-$(CONFIG_SPI_MXS) += spi-mxs.o + obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o + obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o +--- /dev/null ++++ b/drivers/spi/spi-mt7621.c +@@ -0,0 +1,483 @@ ++/* ++ * spi-mt7621.c -- MediaTek MT7621 SPI controller driver ++ * ++ * Copyright (C) 2011 Sergiy <piratfm@gmail.com> ++ * Copyright (C) 2011-2013 Gabor Juhos <juhosg@openwrt.org> ++ * Copyright (C) 2014-2015 Felix Fietkau <nbd@nbd.name> ++ * ++ * Some parts are based on spi-orion.c: ++ * Author: Shadi Ammouri <shadi@marvell.com> ++ * Copyright (C) 2007-2008 Marvell Ltd. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/clk.h> ++#include <linux/err.h> ++#include <linux/delay.h> ++#include <linux/io.h> ++#include <linux/reset.h> ++#include <linux/spi/spi.h> ++#include <linux/of_device.h> ++#include <linux/platform_device.h> ++#include <linux/swab.h> ++ ++#include <ralink_regs.h> ++ ++#define SPI_BPW_MASK(bits) BIT((bits) - 1) ++ ++#define DRIVER_NAME "spi-mt7621" ++/* in usec */ ++#define RALINK_SPI_WAIT_MAX_LOOP 2000 ++ ++/* SPISTAT register bit field */ ++#define SPISTAT_BUSY BIT(0) ++ ++#define MT7621_SPI_TRANS 0x00 ++#define SPITRANS_BUSY BIT(16) ++ ++#define MT7621_SPI_OPCODE 0x04 ++#define MT7621_SPI_DATA0 0x08 ++#define MT7621_SPI_DATA4 0x18 ++#define SPI_CTL_TX_RX_CNT_MASK 0xff ++#define SPI_CTL_START BIT(8) ++ ++#define MT7621_SPI_POLAR 0x38 ++#define MT7621_SPI_MASTER 0x28 ++#define MT7621_SPI_MOREBUF 0x2c ++#define MT7621_SPI_SPACE 0x3c ++ ++#define MT7621_CPHA BIT(5) ++#define MT7621_CPOL BIT(4) ++#define MT7621_LSB_FIRST BIT(3) ++ ++#define RT2880_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH) ++ ++struct mt7621_spi; ++ ++struct mt7621_spi { ++ struct spi_master *master; ++ void __iomem *base; ++ unsigned int sys_freq; ++ unsigned int speed; ++ struct clk *clk; ++ spinlock_t lock; ++ ++ struct mt7621_spi_ops *ops; ++}; ++ ++static inline struct mt7621_spi *spidev_to_mt7621_spi(struct spi_device *spi) ++{ ++ return spi_master_get_devdata(spi->master); ++} ++ ++static inline u32 mt7621_spi_read(struct mt7621_spi *rs, u32 reg) ++{ ++ return ioread32(rs->base + reg); ++} ++ ++static inline void mt7621_spi_write(struct mt7621_spi *rs, u32 reg, u32 val) ++{ ++ iowrite32(val, rs->base + reg); ++} ++ ++static void mt7621_spi_reset(struct mt7621_spi *rs, int duplex) ++{ ++ u32 master = mt7621_spi_read(rs, MT7621_SPI_MASTER); ++ ++ master |= 7 << 29; ++ master |= 1 << 2; ++ if (duplex) ++ master |= 1 << 10; ++ else ++ master &= ~(1 << 10); ++ ++ mt7621_spi_write(rs, MT7621_SPI_MASTER, master); ++} ++ ++static void mt7621_spi_set_cs(struct spi_device *spi, int enable) ++{ ++ struct mt7621_spi *rs = spidev_to_mt7621_spi(spi); ++ int cs = spi->chip_select; ++ u32 polar = 0; ++ ++ mt7621_spi_reset(rs, cs); ++ if (enable) ++ polar = BIT(cs); ++ mt7621_spi_write(rs, MT7621_SPI_POLAR, polar); ++} ++ ++static int mt7621_spi_prepare(struct spi_device *spi, unsigned int speed) ++{ ++ struct mt7621_spi *rs = spidev_to_mt7621_spi(spi); ++ u32 rate; ++ u32 reg; ++ ++ dev_dbg(&spi->dev, "speed:%u\n", speed); ++ ++ rate = DIV_ROUND_UP(rs->sys_freq, speed); ++ dev_dbg(&spi->dev, "rate-1:%u\n", rate); ++ ++ if (rate > 4097) ++ return -EINVAL; ++ ++ if (rate < 2) ++ rate = 2; ++ ++ reg = mt7621_spi_read(rs, MT7621_SPI_MASTER); ++ reg &= ~(0xfff << 16); ++ reg |= (rate - 2) << 16; ++ rs->speed = speed; ++ ++ reg &= ~MT7621_LSB_FIRST; ++ if (spi->mode & SPI_LSB_FIRST) ++ reg |= MT7621_LSB_FIRST; ++ ++ reg &= ~(MT7621_CPHA | MT7621_CPOL); ++ switch(spi->mode & (SPI_CPOL | SPI_CPHA)) { ++ case SPI_MODE_0: ++ break; ++ case SPI_MODE_1: ++ reg |= MT7621_CPHA; ++ break; ++ case SPI_MODE_2: ++ reg |= MT7621_CPOL; ++ break; ++ case SPI_MODE_3: ++ reg |= MT7621_CPOL | MT7621_CPHA; ++ break; ++ } ++ mt7621_spi_write(rs, MT7621_SPI_MASTER, reg); ++ ++ return 0; ++} ++ ++static inline int mt7621_spi_wait_till_ready(struct spi_device *spi) ++{ ++ struct mt7621_spi *rs = spidev_to_mt7621_spi(spi); ++ int i; ++ ++ for (i = 0; i < RALINK_SPI_WAIT_MAX_LOOP; i++) { ++ u32 status; ++ ++ status = mt7621_spi_read(rs, MT7621_SPI_TRANS); ++ if ((status & SPITRANS_BUSY) == 0) { ++ return 0; ++ } ++ cpu_relax(); ++ udelay(1); ++ } ++ ++ return -ETIMEDOUT; ++} ++ ++static int mt7621_spi_transfer_half_duplex(struct spi_master *master, ++ struct spi_message *m) ++{ ++ struct mt7621_spi *rs = spi_master_get_devdata(master); ++ struct spi_device *spi = m->spi; ++ unsigned int speed = spi->max_speed_hz; ++ struct spi_transfer *t = NULL; ++ int status = 0; ++ int i, len = 0; ++ int rx_len = 0; ++ u32 data[9] = { 0 }; ++ u32 val; ++ ++ mt7621_spi_wait_till_ready(spi); ++ ++ list_for_each_entry(t, &m->transfers, transfer_list) { ++ const u8 *buf = t->tx_buf; ++ ++ if (t->rx_buf) ++ rx_len += t->len; ++ ++ if (!buf) ++ continue; ++ ++ if (t->speed_hz < speed) ++ speed = t->speed_hz; ++ ++ if (WARN_ON(len + t->len > 36)) { ++ status = -EIO; ++ goto msg_done; ++ } ++ ++ for (i = 0; i < t->len; i++, len++) ++ data[len / 4] |= buf[i] << (8 * (len & 3)); ++ } ++ ++ if (WARN_ON(rx_len > 32)) { ++ status = -EIO; ++ goto msg_done; ++ } ++ ++ if (mt7621_spi_prepare(spi, speed)) { ++ status = -EIO; ++ goto msg_done; ++ } ++ data[0] = swab32(data[0]); ++ if (len < 4) ++ data[0] >>= (4 - len) * 8; ++ ++ for (i = 0; i < len; i += 4) ++ mt7621_spi_write(rs, MT7621_SPI_OPCODE + i, data[i / 4]); ++ ++ val = (min_t(int, len, 4) * 8) << 24; ++ if (len > 4) ++ val |= (len - 4) * 8; ++ val |= (rx_len * 8) << 12; ++ mt7621_spi_write(rs, MT7621_SPI_MOREBUF, val); ++ ++ mt7621_spi_set_cs(spi, 1); ++ ++ val = mt7621_spi_read(rs, MT7621_SPI_TRANS); ++ val |= SPI_CTL_START; ++ mt7621_spi_write(rs, MT7621_SPI_TRANS, val); ++ ++ mt7621_spi_wait_till_ready(spi); ++ ++ mt7621_spi_set_cs(spi, 0); ++ ++ for (i = 0; i < rx_len; i += 4) ++ data[i / 4] = mt7621_spi_read(rs, MT7621_SPI_DATA0 + i); ++ ++ m->actual_length = len + rx_len; ++ ++ len = 0; ++ list_for_each_entry(t, &m->transfers, transfer_list) { ++ u8 *buf = t->rx_buf; ++ ++ if (!buf) ++ continue; ++ ++ for (i = 0; i < t->len; i++, len++) ++ buf[i] = data[len / 4] >> (8 * (len & 3)); ++ } ++ ++msg_done: ++ m->status = status; ++ spi_finalize_current_message(master); ++ ++ return 0; ++} ++ ++static int mt7621_spi_transfer_full_duplex(struct spi_master *master, ++ struct spi_message *m) ++{ ++ struct mt7621_spi *rs = spi_master_get_devdata(master); ++ struct spi_device *spi = m->spi; ++ unsigned int speed = spi->max_speed_hz; ++ struct spi_transfer *t = NULL; ++ int status = 0; ++ int i, len = 0; ++ int rx_len = 0; ++ u32 data[9] = { 0 }; ++ u32 val = 0; ++ ++ mt7621_spi_wait_till_ready(spi); ++ ++ list_for_each_entry(t, &m->transfers, transfer_list) { ++ const u8 *buf = t->tx_buf; ++ ++ if (t->rx_buf) ++ rx_len += t->len; ++ ++ if (!buf) ++ continue; ++ ++ if (WARN_ON(len + t->len > 16)) { ++ status = -EIO; ++ goto msg_done; ++ } ++ ++ for (i = 0; i < t->len; i++, len++) ++ data[len / 4] |= buf[i] << (8 * (len & 3)); ++ if (speed > t->speed_hz) ++ speed = t->speed_hz; ++ } ++ ++ if (WARN_ON(rx_len > 16)) { ++ status = -EIO; ++ goto msg_done; ++ } ++ ++ if (mt7621_spi_prepare(spi, speed)) { ++ status = -EIO; ++ goto msg_done; ++ } ++ ++ for (i = 0; i < len; i += 4) ++ mt7621_spi_write(rs, MT7621_SPI_DATA0 + i, data[i / 4]); ++ ++ val |= len * 8; ++ val |= (rx_len * 8) << 12; ++ mt7621_spi_write(rs, MT7621_SPI_MOREBUF, val); ++ ++ mt7621_spi_set_cs(spi, 1); ++ ++ val = mt7621_spi_read(rs, MT7621_SPI_TRANS); ++ val |= SPI_CTL_START; ++ mt7621_spi_write(rs, MT7621_SPI_TRANS, val); ++ ++ mt7621_spi_wait_till_ready(spi); ++ ++ mt7621_spi_set_cs(spi, 0); ++ ++ for (i = 0; i < rx_len; i += 4) ++ data[i / 4] = mt7621_spi_read(rs, MT7621_SPI_DATA4 + i); ++ ++ m->actual_length = rx_len; ++ ++ len = 0; ++ list_for_each_entry(t, &m->transfers, transfer_list) { ++ u8 *buf = t->rx_buf; ++ ++ if (!buf) ++ continue; ++ ++ for (i = 0; i < t->len; i++, len++) ++ buf[i] = data[len / 4] >> (8 * (len & 3)); ++ } ++ ++msg_done: ++ m->status = status; ++ spi_finalize_current_message(master); ++ ++ return 0; ++} ++ ++static int mt7621_spi_transfer_one_message(struct spi_master *master, ++ struct spi_message *m) ++{ ++ struct spi_device *spi = m->spi; ++ int cs = spi->chip_select; ++ ++ if (cs) ++ return mt7621_spi_transfer_full_duplex(master, m); ++ return mt7621_spi_transfer_half_duplex(master, m); ++} ++ ++static int mt7621_spi_setup(struct spi_device *spi) ++{ ++ struct mt7621_spi *rs = spidev_to_mt7621_spi(spi); ++ ++ if ((spi->max_speed_hz == 0) || ++ (spi->max_speed_hz > (rs->sys_freq / 2))) ++ spi->max_speed_hz = (rs->sys_freq / 2); ++ ++ if (spi->max_speed_hz < (rs->sys_freq / 4097)) { ++ dev_err(&spi->dev, "setup: requested speed is too low %d Hz\n", ++ spi->max_speed_hz); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static const struct of_device_id mt7621_spi_match[] = { ++ { .compatible = "ralink,mt7621-spi" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, mt7621_spi_match); ++ ++static int mt7621_spi_probe(struct platform_device *pdev) ++{ ++ const struct of_device_id *match; ++ struct spi_master *master; ++ struct mt7621_spi *rs; ++ unsigned long flags; ++ void __iomem *base; ++ struct resource *r; ++ int status = 0; ++ struct clk *clk; ++ struct mt7621_spi_ops *ops; ++ ++ match = of_match_device(mt7621_spi_match, &pdev->dev); ++ if (!match) ++ return -EINVAL; ++ ops = (struct mt7621_spi_ops *)match->data; ++ ++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ base = devm_ioremap_resource(&pdev->dev, r); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); ++ ++ clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(clk)) { ++ dev_err(&pdev->dev, "unable to get SYS clock, err=%d\n", ++ status); ++ return PTR_ERR(clk); ++ } ++ ++ status = clk_prepare_enable(clk); ++ if (status) ++ return status; ++ ++ master = spi_alloc_master(&pdev->dev, sizeof(*rs)); ++ if (master == NULL) { ++ dev_info(&pdev->dev, "master allocation failed\n"); ++ return -ENOMEM; ++ } ++ ++ master->mode_bits = RT2880_SPI_MODE_BITS; ++ ++ master->setup = mt7621_spi_setup; ++ master->transfer_one_message = mt7621_spi_transfer_one_message; ++ master->bits_per_word_mask = SPI_BPW_MASK(8); ++ master->dev.of_node = pdev->dev.of_node; ++ master->num_chipselect = 2; ++ ++ dev_set_drvdata(&pdev->dev, master); ++ ++ rs = spi_master_get_devdata(master); ++ rs->base = base; ++ rs->clk = clk; ++ rs->master = master; ++ rs->sys_freq = clk_get_rate(rs->clk); ++ rs->ops = ops; ++ dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq); ++ spin_lock_irqsave(&rs->lock, flags); ++ ++ device_reset(&pdev->dev); ++ ++ mt7621_spi_reset(rs, 0); ++ ++ return spi_register_master(master); ++} ++ ++static int mt7621_spi_remove(struct platform_device *pdev) ++{ ++ struct spi_master *master; ++ struct mt7621_spi *rs; ++ ++ master = dev_get_drvdata(&pdev->dev); ++ rs = spi_master_get_devdata(master); ++ ++ clk_disable(rs->clk); ++ spi_unregister_master(master); ++ ++ return 0; ++} ++ ++MODULE_ALIAS("platform:" DRIVER_NAME); ++ ++static struct platform_driver mt7621_spi_driver = { ++ .driver = { ++ .name = DRIVER_NAME, ++ .owner = THIS_MODULE, ++ .of_match_table = mt7621_spi_match, ++ }, ++ .probe = mt7621_spi_probe, ++ .remove = mt7621_spi_remove, ++}; ++ ++module_platform_driver(mt7621_spi_driver); ++ ++MODULE_DESCRIPTION("MT7621 SPI driver"); ++MODULE_AUTHOR("Felix Fietkau <nbd@nbd.name>"); ++MODULE_LICENSE("GPL"); diff --git a/target/linux/ramips/patches-4.14/0044-i2c-MIPS-adds-ralink-I2C-driver.patch b/target/linux/ramips/patches-4.14/0044-i2c-MIPS-adds-ralink-I2C-driver.patch new file mode 100644 index 0000000000..471c8f40e0 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0044-i2c-MIPS-adds-ralink-I2C-driver.patch @@ -0,0 +1,507 @@ +From 723b8beaabf3c3c4b1ce69480141f1e926f3f3b2 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 27 Jul 2014 09:52:56 +0100 +Subject: [PATCH 44/53] i2c: MIPS: adds ralink I2C driver + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + .../devicetree/bindings/i2c/i2c-ralink.txt | 27 ++ + drivers/i2c/busses/Kconfig | 4 + + drivers/i2c/busses/Makefile | 1 + + drivers/i2c/busses/i2c-ralink.c | 327 ++++++++++++++++++++ + 4 files changed, 359 insertions(+) + create mode 100644 Documentation/devicetree/bindings/i2c/i2c-ralink.txt + create mode 100644 drivers/i2c/busses/i2c-ralink.c + +--- /dev/null ++++ b/Documentation/devicetree/bindings/i2c/i2c-ralink.txt +@@ -0,0 +1,27 @@ ++I2C for Ralink platforms ++ ++Required properties : ++- compatible : Must be "link,rt3052-i2c" ++- reg: physical base address of the controller and length of memory mapped ++ region. ++- #address-cells = <1>; ++- #size-cells = <0>; ++ ++Optional properties: ++- Child nodes conforming to i2c bus binding ++ ++Example : ++ ++palmbus@10000000 { ++ i2c@900 { ++ compatible = "link,rt3052-i2c"; ++ reg = <0x900 0x100>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ hwmon@4b { ++ compatible = "national,lm92"; ++ reg = <0x4b>; ++ }; ++ }; ++}; +--- a/drivers/i2c/busses/Kconfig ++++ b/drivers/i2c/busses/Kconfig +@@ -863,6 +863,11 @@ config I2C_RK3X + This driver can also be built as a module. If so, the module will + be called i2c-rk3x. + ++config I2C_RALINK ++ tristate "Ralink I2C Controller" ++ depends on RALINK && !SOC_MT7621 ++ select OF_I2C ++ + config HAVE_S3C2410_I2C + bool + help +--- a/drivers/i2c/busses/Makefile ++++ b/drivers/i2c/busses/Makefile +@@ -84,6 +84,7 @@ obj-$(CONFIG_I2C_PNX) += i2c-pnx.o + obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o + obj-$(CONFIG_I2C_PXA) += i2c-pxa.o + obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o ++obj-$(CONFIG_I2C_RALINK) += i2c-ralink.o + obj-$(CONFIG_I2C_QUP) += i2c-qup.o + obj-$(CONFIG_I2C_RIIC) += i2c-riic.o + obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o +--- /dev/null ++++ b/drivers/i2c/busses/i2c-ralink.c +@@ -0,0 +1,435 @@ ++/* ++ * drivers/i2c/busses/i2c-ralink.c ++ * ++ * Copyright (C) 2013 Steven Liu <steven_liu@mediatek.com> ++ * Copyright (C) 2016 Michael Lee <igvtee@gmail.com> ++ * ++ * Improve driver for i2cdetect from i2c-tools to detect i2c devices on the bus. ++ * (C) 2014 Sittisak <sittisaks@hotmail.com> ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include <linux/interrupt.h> ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/reset.h> ++#include <linux/delay.h> ++#include <linux/slab.h> ++#include <linux/init.h> ++#include <linux/errno.h> ++#include <linux/platform_device.h> ++#include <linux/of_platform.h> ++#include <linux/i2c.h> ++#include <linux/io.h> ++#include <linux/err.h> ++#include <linux/clk.h> ++ ++#define REG_CONFIG_REG 0x00 ++#define REG_CLKDIV_REG 0x04 ++#define REG_DEVADDR_REG 0x08 ++#define REG_ADDR_REG 0x0C ++#define REG_DATAOUT_REG 0x10 ++#define REG_DATAIN_REG 0x14 ++#define REG_STATUS_REG 0x18 ++#define REG_STARTXFR_REG 0x1C ++#define REG_BYTECNT_REG 0x20 ++ ++/* REG_CONFIG_REG */ ++#define I2C_ADDRLEN_OFFSET 5 ++#define I2C_DEVADLEN_OFFSET 2 ++#define I2C_ADDRLEN_MASK 0x3 ++#define I2C_ADDR_DIS BIT(1) ++#define I2C_DEVADDR_DIS BIT(0) ++#define I2C_ADDRLEN_8 (7 << I2C_ADDRLEN_OFFSET) ++#define I2C_DEVADLEN_7 (6 << I2C_DEVADLEN_OFFSET) ++#define I2C_CONF_DEFAULT (I2C_ADDRLEN_8 | I2C_DEVADLEN_7) ++ ++/* REG_CLKDIV_REG */ ++#define I2C_CLKDIV_MASK 0xffff ++ ++/* REG_DEVADDR_REG */ ++#define I2C_DEVADDR_MASK 0x7f ++ ++/* REG_ADDR_REG */ ++#define I2C_ADDR_MASK 0xff ++ ++/* REG_STATUS_REG */ ++#define I2C_STARTERR BIT(4) ++#define I2C_ACKERR BIT(3) ++#define I2C_DATARDY BIT(2) ++#define I2C_SDOEMPTY BIT(1) ++#define I2C_BUSY BIT(0) ++ ++/* REG_STARTXFR_REG */ ++#define NOSTOP_CMD BIT(2) ++#define NODATA_CMD BIT(1) ++#define READ_CMD BIT(0) ++ ++/* REG_BYTECNT_REG */ ++#define BYTECNT_MAX 64 ++#define SET_BYTECNT(x) (x - 1) ++ ++/* timeout waiting for I2C devices to respond (clock streching) */ ++#define TIMEOUT_MS 1000 ++#define DELAY_INTERVAL_US 100 ++ ++struct rt_i2c { ++ void __iomem *base; ++ struct clk *clk; ++ struct device *dev; ++ struct i2c_adapter adap; ++ u32 cur_clk; ++ u32 clk_div; ++ u32 flags; ++}; ++ ++static void rt_i2c_w32(struct rt_i2c *i2c, u32 val, unsigned reg) ++{ ++ iowrite32(val, i2c->base + reg); ++} ++ ++static u32 rt_i2c_r32(struct rt_i2c *i2c, unsigned reg) ++{ ++ return ioread32(i2c->base + reg); ++} ++ ++static int poll_down_timeout(void __iomem *addr, u32 mask) ++{ ++ unsigned long timeout = jiffies + msecs_to_jiffies(TIMEOUT_MS); ++ ++ do { ++ if (!(readl_relaxed(addr) & mask)) ++ return 0; ++ ++ usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50); ++ } while (time_before(jiffies, timeout)); ++ ++ return (readl_relaxed(addr) & mask) ? -EAGAIN : 0; ++} ++ ++static int rt_i2c_wait_idle(struct rt_i2c *i2c) ++{ ++ int ret; ++ ++ ret = poll_down_timeout(i2c->base + REG_STATUS_REG, I2C_BUSY); ++ if (ret < 0) ++ dev_dbg(i2c->dev, "idle err(%d)\n", ret); ++ ++ return ret; ++} ++ ++static int poll_up_timeout(void __iomem *addr, u32 mask) ++{ ++ unsigned long timeout = jiffies + msecs_to_jiffies(TIMEOUT_MS); ++ u32 status; ++ ++ do { ++ status = readl_relaxed(addr); ++ ++ /* check error status */ ++ if (status & I2C_STARTERR) ++ return -EAGAIN; ++ else if (status & I2C_ACKERR) ++ return -ENXIO; ++ else if (status & mask) ++ return 0; ++ ++ usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50); ++ } while (time_before(jiffies, timeout)); ++ ++ return -ETIMEDOUT; ++} ++ ++static int rt_i2c_wait_rx_done(struct rt_i2c *i2c) ++{ ++ int ret; ++ ++ ret = poll_up_timeout(i2c->base + REG_STATUS_REG, I2C_DATARDY); ++ if (ret < 0) ++ dev_dbg(i2c->dev, "rx err(%d)\n", ret); ++ ++ return ret; ++} ++ ++static int rt_i2c_wait_tx_done(struct rt_i2c *i2c) ++{ ++ int ret; ++ ++ ret = poll_up_timeout(i2c->base + REG_STATUS_REG, I2C_SDOEMPTY); ++ if (ret < 0) ++ dev_dbg(i2c->dev, "tx err(%d)\n", ret); ++ ++ return ret; ++} ++ ++static void rt_i2c_reset(struct rt_i2c *i2c) ++{ ++ device_reset(i2c->adap.dev.parent); ++ barrier(); ++ rt_i2c_w32(i2c, i2c->clk_div, REG_CLKDIV_REG); ++} ++ ++static void rt_i2c_dump_reg(struct rt_i2c *i2c) ++{ ++ dev_dbg(i2c->dev, "conf %08x, clkdiv %08x, devaddr %08x, " \ ++ "addr %08x, dataout %08x, datain %08x, " \ ++ "status %08x, startxfr %08x, bytecnt %08x\n", ++ rt_i2c_r32(i2c, REG_CONFIG_REG), ++ rt_i2c_r32(i2c, REG_CLKDIV_REG), ++ rt_i2c_r32(i2c, REG_DEVADDR_REG), ++ rt_i2c_r32(i2c, REG_ADDR_REG), ++ rt_i2c_r32(i2c, REG_DATAOUT_REG), ++ rt_i2c_r32(i2c, REG_DATAIN_REG), ++ rt_i2c_r32(i2c, REG_STATUS_REG), ++ rt_i2c_r32(i2c, REG_STARTXFR_REG), ++ rt_i2c_r32(i2c, REG_BYTECNT_REG)); ++} ++ ++static int rt_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, ++ int num) ++{ ++ struct rt_i2c *i2c; ++ struct i2c_msg *pmsg; ++ unsigned char addr; ++ int i, j, ret; ++ u32 cmd; ++ ++ i2c = i2c_get_adapdata(adap); ++ ++ for (i = 0; i < num; i++) { ++ pmsg = &msgs[i]; ++ if (i == (num - 1)) ++ cmd = 0; ++ else ++ cmd = NOSTOP_CMD; ++ ++ dev_dbg(i2c->dev, "addr: 0x%x, len: %d, flags: 0x%x, stop: %d\n", ++ pmsg->addr, pmsg->len, pmsg->flags, ++ (cmd == 0)? 1 : 0); ++ ++ /* wait hardware idle */ ++ if ((ret = rt_i2c_wait_idle(i2c))) ++ goto err_timeout; ++ ++ if (pmsg->flags & I2C_M_TEN) { ++ rt_i2c_w32(i2c, I2C_CONF_DEFAULT, REG_CONFIG_REG); ++ /* 10 bits address */ ++ addr = 0x78 | ((pmsg->addr >> 8) & 0x03); ++ rt_i2c_w32(i2c, addr & I2C_DEVADDR_MASK, ++ REG_DEVADDR_REG); ++ rt_i2c_w32(i2c, pmsg->addr & I2C_ADDR_MASK, ++ REG_ADDR_REG); ++ } else { ++ rt_i2c_w32(i2c, I2C_CONF_DEFAULT | I2C_ADDR_DIS, ++ REG_CONFIG_REG); ++ /* 7 bits address */ ++ rt_i2c_w32(i2c, pmsg->addr & I2C_DEVADDR_MASK, ++ REG_DEVADDR_REG); ++ } ++ ++ /* buffer length */ ++ if (pmsg->len == 0) ++ cmd |= NODATA_CMD; ++ else ++ rt_i2c_w32(i2c, SET_BYTECNT(pmsg->len), ++ REG_BYTECNT_REG); ++ ++ j = 0; ++ if (pmsg->flags & I2C_M_RD) { ++ cmd |= READ_CMD; ++ /* start transfer */ ++ barrier(); ++ rt_i2c_w32(i2c, cmd, REG_STARTXFR_REG); ++ do { ++ /* wait */ ++ if ((ret = rt_i2c_wait_rx_done(i2c))) ++ goto err_timeout; ++ /* read data */ ++ if (pmsg->len) ++ pmsg->buf[j] = rt_i2c_r32(i2c, ++ REG_DATAIN_REG); ++ j++; ++ } while (j < pmsg->len); ++ } else { ++ do { ++ /* write data */ ++ if (pmsg->len) ++ rt_i2c_w32(i2c, pmsg->buf[j], ++ REG_DATAOUT_REG); ++ /* start transfer */ ++ if (j == 0) { ++ barrier(); ++ rt_i2c_w32(i2c, cmd, REG_STARTXFR_REG); ++ } ++ /* wait */ ++ if ((ret = rt_i2c_wait_tx_done(i2c))) ++ goto err_timeout; ++ j++; ++ } while (j < pmsg->len); ++ } ++ } ++ /* the return value is number of executed messages */ ++ ret = i; ++ ++ return ret; ++ ++err_timeout: ++ rt_i2c_dump_reg(i2c); ++ rt_i2c_reset(i2c); ++ return ret; ++} ++ ++static u32 rt_i2c_func(struct i2c_adapter *a) ++{ ++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; ++} ++ ++static const struct i2c_algorithm rt_i2c_algo = { ++ .master_xfer = rt_i2c_master_xfer, ++ .functionality = rt_i2c_func, ++}; ++ ++static const struct of_device_id i2c_rt_dt_ids[] = { ++ { .compatible = "ralink,rt2880-i2c" }, ++ { /* sentinel */ } ++}; ++ ++MODULE_DEVICE_TABLE(of, i2c_rt_dt_ids); ++ ++static struct i2c_adapter_quirks rt_i2c_quirks = { ++ .max_write_len = BYTECNT_MAX, ++ .max_read_len = BYTECNT_MAX, ++}; ++ ++static int rt_i2c_init(struct rt_i2c *i2c) ++{ ++ u32 reg; ++ ++ /* i2c_sclk = periph_clk / ((2 * clk_div) + 5) */ ++ i2c->clk_div = (clk_get_rate(i2c->clk) - (5 * i2c->cur_clk)) / ++ (2 * i2c->cur_clk); ++ if (i2c->clk_div < 8) ++ i2c->clk_div = 8; ++ if (i2c->clk_div > I2C_CLKDIV_MASK) ++ i2c->clk_div = I2C_CLKDIV_MASK; ++ ++ /* check support combinde/repeated start message */ ++ rt_i2c_w32(i2c, NOSTOP_CMD, REG_STARTXFR_REG); ++ reg = rt_i2c_r32(i2c, REG_STARTXFR_REG) & NOSTOP_CMD; ++ ++ rt_i2c_reset(i2c); ++ ++ return reg; ++} ++ ++static int rt_i2c_probe(struct platform_device *pdev) ++{ ++ struct resource *res; ++ struct rt_i2c *i2c; ++ struct i2c_adapter *adap; ++ const struct of_device_id *match; ++ int ret, restart; ++ ++ match = of_match_device(i2c_rt_dt_ids, &pdev->dev); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "no memory resource found\n"); ++ return -ENODEV; ++ } ++ ++ i2c = devm_kzalloc(&pdev->dev, sizeof(struct rt_i2c), GFP_KERNEL); ++ if (!i2c) { ++ dev_err(&pdev->dev, "failed to allocate i2c_adapter\n"); ++ return -ENOMEM; ++ } ++ ++ i2c->base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(i2c->base)) ++ return PTR_ERR(i2c->base); ++ ++ i2c->clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(i2c->clk)) { ++ dev_err(&pdev->dev, "no clock defined\n"); ++ return -ENODEV; ++ } ++ clk_prepare_enable(i2c->clk); ++ i2c->dev = &pdev->dev; ++ ++ if (of_property_read_u32(pdev->dev.of_node, ++ "clock-frequency", &i2c->cur_clk)) ++ i2c->cur_clk = 100000; ++ ++ adap = &i2c->adap; ++ adap->owner = THIS_MODULE; ++ adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; ++ adap->algo = &rt_i2c_algo; ++ adap->retries = 3; ++ adap->dev.parent = &pdev->dev; ++ i2c_set_adapdata(adap, i2c); ++ adap->dev.of_node = pdev->dev.of_node; ++ strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name)); ++ adap->quirks = &rt_i2c_quirks; ++ ++ platform_set_drvdata(pdev, i2c); ++ ++ restart = rt_i2c_init(i2c); ++ ++ ret = i2c_add_adapter(adap); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "failed to add adapter\n"); ++ clk_disable_unprepare(i2c->clk); ++ return ret; ++ } ++ ++ dev_info(&pdev->dev, "clock %uKHz, re-start %ssupport\n", ++ i2c->cur_clk/1000, restart ? "" : "not "); ++ ++ return ret; ++} ++ ++static int rt_i2c_remove(struct platform_device *pdev) ++{ ++ struct rt_i2c *i2c = platform_get_drvdata(pdev); ++ ++ i2c_del_adapter(&i2c->adap); ++ clk_disable_unprepare(i2c->clk); ++ ++ return 0; ++} ++ ++static struct platform_driver rt_i2c_driver = { ++ .probe = rt_i2c_probe, ++ .remove = rt_i2c_remove, ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "i2c-ralink", ++ .of_match_table = i2c_rt_dt_ids, ++ }, ++}; ++ ++static int __init i2c_rt_init (void) ++{ ++ return platform_driver_register(&rt_i2c_driver); ++} ++subsys_initcall(i2c_rt_init); ++ ++static void __exit i2c_rt_exit (void) ++{ ++ platform_driver_unregister(&rt_i2c_driver); ++} ++module_exit(i2c_rt_exit); ++ ++MODULE_AUTHOR("Steven Liu <steven_liu@mediatek.com>"); ++MODULE_DESCRIPTION("Ralink I2c host driver"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:Ralink-I2C"); diff --git a/target/linux/ramips/patches-4.14/0045-i2c-add-mt7621-driver.patch b/target/linux/ramips/patches-4.14/0045-i2c-add-mt7621-driver.patch new file mode 100644 index 0000000000..9cd8c4b5cf --- /dev/null +++ b/target/linux/ramips/patches-4.14/0045-i2c-add-mt7621-driver.patch @@ -0,0 +1,473 @@ +From d5c54ff3d1db0a4348fa04d8e78f3bf6063e3afc Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Mon, 7 Dec 2015 17:21:27 +0100 +Subject: [PATCH 45/53] i2c: add mt7621 driver + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + drivers/i2c/busses/Kconfig | 4 + + drivers/i2c/busses/Makefile | 1 + + drivers/i2c/busses/i2c-mt7621.c | 303 +++++++++++++++++++++++++++++++++++++++ + 3 files changed, 308 insertions(+) + create mode 100644 drivers/i2c/busses/i2c-mt7621.c + +--- a/drivers/i2c/busses/Kconfig ++++ b/drivers/i2c/busses/Kconfig +@@ -868,6 +868,11 @@ config I2C_RALINK + depends on RALINK && !SOC_MT7621 + select OF_I2C + ++config I2C_MT7621 ++ tristate "MT7621/MT7628 I2C Controller" ++ depends on RALINK && (SOC_MT7620 || SOC_MT7621) ++ select OF_I2C ++ + config HAVE_S3C2410_I2C + bool + help +--- a/drivers/i2c/busses/Makefile ++++ b/drivers/i2c/busses/Makefile +@@ -85,6 +85,7 @@ obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o + obj-$(CONFIG_I2C_PXA) += i2c-pxa.o + obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o + obj-$(CONFIG_I2C_RALINK) += i2c-ralink.o ++obj-$(CONFIG_I2C_MT7621) += i2c-mt7621.o + obj-$(CONFIG_I2C_QUP) += i2c-qup.o + obj-$(CONFIG_I2C_RIIC) += i2c-riic.o + obj-$(CONFIG_I2C_RK3X) += i2c-rk3x.o +--- /dev/null ++++ b/drivers/i2c/busses/i2c-mt7621.c +@@ -0,0 +1,433 @@ ++/* ++ * drivers/i2c/busses/i2c-mt7621.c ++ * ++ * Copyright (C) 2013 Steven Liu <steven_liu@mediatek.com> ++ * Copyright (C) 2016 Michael Lee <igvtee@gmail.com> ++ * ++ * Improve driver for i2cdetect from i2c-tools to detect i2c devices on the bus. ++ * (C) 2014 Sittisak <sittisaks@hotmail.com> ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include <linux/interrupt.h> ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/reset.h> ++#include <linux/delay.h> ++#include <linux/slab.h> ++#include <linux/init.h> ++#include <linux/errno.h> ++#include <linux/platform_device.h> ++#include <linux/of_platform.h> ++#include <linux/i2c.h> ++#include <linux/io.h> ++#include <linux/err.h> ++#include <linux/clk.h> ++ ++#define REG_SM0CFG0 0x08 ++#define REG_SM0DOUT 0x10 ++#define REG_SM0DIN 0x14 ++#define REG_SM0ST 0x18 ++#define REG_SM0AUTO 0x1C ++#define REG_SM0CFG1 0x20 ++#define REG_SM0CFG2 0x28 ++#define REG_SM0CTL0 0x40 ++#define REG_SM0CTL1 0x44 ++#define REG_SM0D0 0x50 ++#define REG_SM0D1 0x54 ++#define REG_PINTEN 0x5C ++#define REG_PINTST 0x60 ++#define REG_PINTCL 0x64 ++ ++/* REG_SM0CFG0 */ ++#define I2C_DEVADDR_MASK 0x7f ++ ++/* REG_SM0ST */ ++#define I2C_DATARDY BIT(2) ++#define I2C_SDOEMPTY BIT(1) ++#define I2C_BUSY BIT(0) ++ ++/* REG_SM0AUTO */ ++#define READ_CMD BIT(0) ++ ++/* REG_SM0CFG1 */ ++#define BYTECNT_MAX 64 ++#define SET_BYTECNT(x) (x - 1) ++ ++/* REG_SM0CFG2 */ ++#define AUTOMODE_EN BIT(0) ++ ++/* REG_SM0CTL0 */ ++#define ODRAIN_HIGH_SM0 BIT(31) ++#define VSYNC_SHIFT 28 ++#define VSYNC_MASK 0x3 ++#define VSYNC_PULSE (0x1 << VSYNC_SHIFT) ++#define VSYNC_RISING (0x2 << VSYNC_SHIFT) ++#define CLK_DIV_SHIFT 16 ++#define CLK_DIV_MASK 0xfff ++#define DEG_CNT_SHIFT 8 ++#define DEG_CNT_MASK 0xff ++#define WAIT_HIGH BIT(6) ++#define DEG_EN BIT(5) ++#define CS_STATUA BIT(4) ++#define SCL_STATUS BIT(3) ++#define SDA_STATUS BIT(2) ++#define SM0_EN BIT(1) ++#define SCL_STRECH BIT(0) ++ ++/* REG_SM0CTL1 */ ++#define ACK_SHIFT 16 ++#define ACK_MASK 0xff ++#define PGLEN_SHIFT 8 ++#define PGLEN_MASK 0x7 ++#define SM0_MODE_SHIFT 4 ++#define SM0_MODE_MASK 0x7 ++#define SM0_MODE_START 0x1 ++#define SM0_MODE_WRITE 0x2 ++#define SM0_MODE_STOP 0x3 ++#define SM0_MODE_READ_NACK 0x4 ++#define SM0_MODE_READ_ACK 0x5 ++#define SM0_TRI_BUSY BIT(0) ++ ++/* timeout waiting for I2C devices to respond (clock streching) */ ++#define TIMEOUT_MS 1000 ++#define DELAY_INTERVAL_US 100 ++ ++struct mtk_i2c { ++ void __iomem *base; ++ struct clk *clk; ++ struct device *dev; ++ struct i2c_adapter adap; ++ u32 cur_clk; ++ u32 clk_div; ++ u32 flags; ++}; ++ ++static void mtk_i2c_w32(struct mtk_i2c *i2c, u32 val, unsigned reg) ++{ ++ iowrite32(val, i2c->base + reg); ++} ++ ++static u32 mtk_i2c_r32(struct mtk_i2c *i2c, unsigned reg) ++{ ++ return ioread32(i2c->base + reg); ++} ++ ++static int poll_down_timeout(void __iomem *addr, u32 mask) ++{ ++ unsigned long timeout = jiffies + msecs_to_jiffies(TIMEOUT_MS); ++ ++ do { ++ if (!(readl_relaxed(addr) & mask)) ++ return 0; ++ ++ usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50); ++ } while (time_before(jiffies, timeout)); ++ ++ return (readl_relaxed(addr) & mask) ? -EAGAIN : 0; ++} ++ ++static int mtk_i2c_wait_idle(struct mtk_i2c *i2c) ++{ ++ int ret; ++ ++ ret = poll_down_timeout(i2c->base + REG_SM0ST, I2C_BUSY); ++ if (ret < 0) ++ dev_dbg(i2c->dev, "idle err(%d)\n", ret); ++ ++ return ret; ++} ++ ++static int poll_up_timeout(void __iomem *addr, u32 mask) ++{ ++ unsigned long timeout = jiffies + msecs_to_jiffies(TIMEOUT_MS); ++ u32 status; ++ ++ do { ++ status = readl_relaxed(addr); ++ if (status & mask) ++ return 0; ++ usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50); ++ } while (time_before(jiffies, timeout)); ++ ++ return -ETIMEDOUT; ++} ++ ++static int mtk_i2c_wait_rx_done(struct mtk_i2c *i2c) ++{ ++ int ret; ++ ++ ret = poll_up_timeout(i2c->base + REG_SM0ST, I2C_DATARDY); ++ if (ret < 0) ++ dev_dbg(i2c->dev, "rx err(%d)\n", ret); ++ ++ return ret; ++} ++ ++static int mtk_i2c_wait_tx_done(struct mtk_i2c *i2c) ++{ ++ int ret; ++ ++ ret = poll_up_timeout(i2c->base + REG_SM0ST, I2C_SDOEMPTY); ++ if (ret < 0) ++ dev_dbg(i2c->dev, "tx err(%d)\n", ret); ++ ++ return ret; ++} ++ ++static void mtk_i2c_reset(struct mtk_i2c *i2c) ++{ ++ u32 reg; ++ device_reset(i2c->adap.dev.parent); ++ barrier(); ++ ++ /* ctrl0 */ ++ reg = ODRAIN_HIGH_SM0 | VSYNC_PULSE | (i2c->clk_div << CLK_DIV_SHIFT) | ++ WAIT_HIGH | SM0_EN; ++ mtk_i2c_w32(i2c, reg, REG_SM0CTL0); ++ ++ /* auto mode */ ++ mtk_i2c_w32(i2c, AUTOMODE_EN, REG_SM0CFG2); ++} ++ ++static void mtk_i2c_dump_reg(struct mtk_i2c *i2c) ++{ ++ dev_dbg(i2c->dev, "cfg0 %08x, dout %08x, din %08x, " \ ++ "status %08x, auto %08x, cfg1 %08x, " \ ++ "cfg2 %08x, ctl0 %08x, ctl1 %08x\n", ++ mtk_i2c_r32(i2c, REG_SM0CFG0), ++ mtk_i2c_r32(i2c, REG_SM0DOUT), ++ mtk_i2c_r32(i2c, REG_SM0DIN), ++ mtk_i2c_r32(i2c, REG_SM0ST), ++ mtk_i2c_r32(i2c, REG_SM0AUTO), ++ mtk_i2c_r32(i2c, REG_SM0CFG1), ++ mtk_i2c_r32(i2c, REG_SM0CFG2), ++ mtk_i2c_r32(i2c, REG_SM0CTL0), ++ mtk_i2c_r32(i2c, REG_SM0CTL1)); ++} ++ ++static int mtk_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, ++ int num) ++{ ++ struct mtk_i2c *i2c; ++ struct i2c_msg *pmsg; ++ int i, j, ret; ++ u32 cmd; ++ ++ i2c = i2c_get_adapdata(adap); ++ ++ for (i = 0; i < num; i++) { ++ pmsg = &msgs[i]; ++ cmd = 0; ++ ++ dev_dbg(i2c->dev, "addr: 0x%x, len: %d, flags: 0x%x\n", ++ pmsg->addr, pmsg->len, pmsg->flags); ++ ++ /* wait hardware idle */ ++ if ((ret = mtk_i2c_wait_idle(i2c))) ++ goto err_timeout; ++ ++ if (pmsg->flags & I2C_M_TEN) { ++ dev_dbg(i2c->dev, "10 bits addr not supported\n"); ++ return -EINVAL; ++ } else { ++ /* 7 bits address */ ++ mtk_i2c_w32(i2c, pmsg->addr & I2C_DEVADDR_MASK, ++ REG_SM0CFG0); ++ } ++ ++ /* buffer length */ ++ if (pmsg->len == 0) { ++ dev_dbg(i2c->dev, "length is 0\n"); ++ return -EINVAL; ++ } else ++ mtk_i2c_w32(i2c, SET_BYTECNT(pmsg->len), ++ REG_SM0CFG1); ++ ++ j = 0; ++ if (pmsg->flags & I2C_M_RD) { ++ cmd |= READ_CMD; ++ /* start transfer */ ++ barrier(); ++ mtk_i2c_w32(i2c, cmd, REG_SM0AUTO); ++ do { ++ /* wait */ ++ if ((ret = mtk_i2c_wait_rx_done(i2c))) ++ goto err_timeout; ++ /* read data */ ++ if (pmsg->len) ++ pmsg->buf[j] = mtk_i2c_r32(i2c, ++ REG_SM0DIN); ++ j++; ++ } while (j < pmsg->len); ++ } else { ++ do { ++ /* write data */ ++ if (pmsg->len) ++ mtk_i2c_w32(i2c, pmsg->buf[j], ++ REG_SM0DOUT); ++ /* start transfer */ ++ if (j == 0) { ++ barrier(); ++ mtk_i2c_w32(i2c, cmd, REG_SM0AUTO); ++ } ++ /* wait */ ++ if ((ret = mtk_i2c_wait_tx_done(i2c))) ++ goto err_timeout; ++ j++; ++ } while (j < pmsg->len); ++ } ++ } ++ /* the return value is number of executed messages */ ++ ret = i; ++ ++ return ret; ++ ++err_timeout: ++ mtk_i2c_dump_reg(i2c); ++ mtk_i2c_reset(i2c); ++ return ret; ++} ++ ++static u32 mtk_i2c_func(struct i2c_adapter *a) ++{ ++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; ++} ++ ++static const struct i2c_algorithm mtk_i2c_algo = { ++ .master_xfer = mtk_i2c_master_xfer, ++ .functionality = mtk_i2c_func, ++}; ++ ++static const struct of_device_id i2c_mtk_dt_ids[] = { ++ { .compatible = "mediatek,mt7621-i2c" }, ++ { /* sentinel */ } ++}; ++ ++MODULE_DEVICE_TABLE(of, i2c_mtk_dt_ids); ++ ++static struct i2c_adapter_quirks mtk_i2c_quirks = { ++ .max_write_len = BYTECNT_MAX, ++ .max_read_len = BYTECNT_MAX, ++}; ++ ++static void mtk_i2c_init(struct mtk_i2c *i2c) ++{ ++ i2c->clk_div = clk_get_rate(i2c->clk) / i2c->cur_clk; ++ if (i2c->clk_div > CLK_DIV_MASK) ++ i2c->clk_div = CLK_DIV_MASK; ++ ++ mtk_i2c_reset(i2c); ++} ++ ++static int mtk_i2c_probe(struct platform_device *pdev) ++{ ++ struct resource *res; ++ struct mtk_i2c *i2c; ++ struct i2c_adapter *adap; ++ const struct of_device_id *match; ++ int ret; ++ ++ match = of_match_device(i2c_mtk_dt_ids, &pdev->dev); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "no memory resource found\n"); ++ return -ENODEV; ++ } ++ ++ i2c = devm_kzalloc(&pdev->dev, sizeof(struct mtk_i2c), GFP_KERNEL); ++ if (!i2c) { ++ dev_err(&pdev->dev, "failed to allocate i2c_adapter\n"); ++ return -ENOMEM; ++ } ++ ++ i2c->base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(i2c->base)) ++ return PTR_ERR(i2c->base); ++ ++ i2c->clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(i2c->clk)) { ++ dev_err(&pdev->dev, "no clock defined\n"); ++ return -ENODEV; ++ } ++ clk_prepare_enable(i2c->clk); ++ i2c->dev = &pdev->dev; ++ ++ if (of_property_read_u32(pdev->dev.of_node, ++ "clock-frequency", &i2c->cur_clk)) ++ i2c->cur_clk = 100000; ++ ++ adap = &i2c->adap; ++ adap->owner = THIS_MODULE; ++ adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; ++ adap->algo = &mtk_i2c_algo; ++ adap->retries = 3; ++ adap->dev.parent = &pdev->dev; ++ i2c_set_adapdata(adap, i2c); ++ adap->dev.of_node = pdev->dev.of_node; ++ strlcpy(adap->name, dev_name(&pdev->dev), sizeof(adap->name)); ++ adap->quirks = &mtk_i2c_quirks; ++ ++ platform_set_drvdata(pdev, i2c); ++ ++ mtk_i2c_init(i2c); ++ ++ ret = i2c_add_adapter(adap); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "failed to add adapter\n"); ++ clk_disable_unprepare(i2c->clk); ++ return ret; ++ } ++ ++ dev_info(&pdev->dev, "clock %uKHz, re-start not support\n", ++ i2c->cur_clk/1000); ++ ++ return ret; ++} ++ ++static int mtk_i2c_remove(struct platform_device *pdev) ++{ ++ struct mtk_i2c *i2c = platform_get_drvdata(pdev); ++ ++ i2c_del_adapter(&i2c->adap); ++ clk_disable_unprepare(i2c->clk); ++ ++ return 0; ++} ++ ++static struct platform_driver mtk_i2c_driver = { ++ .probe = mtk_i2c_probe, ++ .remove = mtk_i2c_remove, ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "i2c-mt7621", ++ .of_match_table = i2c_mtk_dt_ids, ++ }, ++}; ++ ++static int __init i2c_mtk_init (void) ++{ ++ return platform_driver_register(&mtk_i2c_driver); ++} ++subsys_initcall(i2c_mtk_init); ++ ++static void __exit i2c_mtk_exit (void) ++{ ++ platform_driver_unregister(&mtk_i2c_driver); ++} ++module_exit(i2c_mtk_exit); ++ ++MODULE_AUTHOR("Steven Liu <steven_liu@mediatek.com>"); ++MODULE_DESCRIPTION("MT7621 I2c host driver"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:MT7621-I2C"); diff --git a/target/linux/ramips/patches-4.14/0046-mmc-MIPS-ralink-add-sdhci-for-mt7620a-SoC.patch b/target/linux/ramips/patches-4.14/0046-mmc-MIPS-ralink-add-sdhci-for-mt7620a-SoC.patch new file mode 100644 index 0000000000..d3a088b736 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0046-mmc-MIPS-ralink-add-sdhci-for-mt7620a-SoC.patch @@ -0,0 +1,4831 @@ +From 23147af14531cbdada194b94120ef8774f46292d Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Thu, 13 Nov 2014 19:08:40 +0100 +Subject: [PATCH 46/53] mmc: MIPS: ralink: add sdhci for mt7620a SoC + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + drivers/mmc/host/Kconfig | 2 + + drivers/mmc/host/Makefile | 1 + + drivers/mmc/host/mtk-mmc/Kconfig | 16 + + drivers/mmc/host/mtk-mmc/Makefile | 42 + + drivers/mmc/host/mtk-mmc/board.h | 137 ++ + drivers/mmc/host/mtk-mmc/dbg.c | 347 ++++ + drivers/mmc/host/mtk-mmc/dbg.h | 156 ++ + drivers/mmc/host/mtk-mmc/mt6575_sd.h | 1001 +++++++++++ + drivers/mmc/host/mtk-mmc/sd.c | 3060 ++++++++++++++++++++++++++++++++++ + 9 files changed, 4762 insertions(+) + create mode 100644 drivers/mmc/host/mtk-mmc/Kconfig + create mode 100644 drivers/mmc/host/mtk-mmc/Makefile + create mode 100644 drivers/mmc/host/mtk-mmc/board.h + create mode 100644 drivers/mmc/host/mtk-mmc/dbg.c + create mode 100644 drivers/mmc/host/mtk-mmc/dbg.h + create mode 100644 drivers/mmc/host/mtk-mmc/mt6575_sd.h + create mode 100644 drivers/mmc/host/mtk-mmc/sd.c + +--- a/drivers/mmc/host/Kconfig ++++ b/drivers/mmc/host/Kconfig +@@ -899,3 +899,5 @@ config MMC_SDHCI_XENON + This selects Marvell Xenon eMMC/SD/SDIO SDHCI. + If you have a controller with this interface, say Y or M here. + If unsure, say N. ++ ++source "drivers/mmc/host/mtk-mmc/Kconfig" +--- a/drivers/mmc/host/Makefile ++++ b/drivers/mmc/host/Makefile +@@ -3,6 +3,7 @@ + # Makefile for MMC/SD host controller drivers + # + ++obj-$(CONFIG_MTK_MMC) += mtk-mmc/ + obj-$(CONFIG_MMC_ARMMMCI) += armmmci.o + armmmci-y := mmci.o + armmmci-$(CONFIG_MMC_QCOM_DML) += mmci_qcom_dml.o +--- /dev/null ++++ b/drivers/mmc/host/mtk-mmc/Kconfig +@@ -0,0 +1,16 @@ ++config MTK_MMC ++ tristate "MTK SD/MMC" ++ depends on !MTD_NAND_RALINK ++ ++config MTK_AEE_KDUMP ++ bool "MTK AEE KDUMP" ++ depends on MTK_MMC ++ ++config MTK_MMC_CD_POLL ++ bool "Card Detect with Polling" ++ depends on MTK_MMC ++ ++config MTK_MMC_EMMC_8BIT ++ bool "eMMC 8-bit support" ++ depends on MTK_MMC && RALINK_MT7628 ++ +--- /dev/null ++++ b/drivers/mmc/host/mtk-mmc/Makefile +@@ -0,0 +1,42 @@ ++# Copyright Statement: ++# ++# This software/firmware and related documentation ("MediaTek Software") are ++# protected under relevant copyright laws. The information contained herein ++# is confidential and proprietary to MediaTek Inc. and/or its licensors. ++# Without the prior written permission of MediaTek inc. and/or its licensors, ++# any reproduction, modification, use or disclosure of MediaTek Software, ++# and information contained herein, in whole or in part, shall be strictly prohibited. ++# ++# MediaTek Inc. (C) 2010. All rights reserved. ++# ++# BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++# THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++# RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++# AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++# NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++# SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++# SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++# THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++# THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++# CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++# SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++# STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++# CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++# AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++# OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++# MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++# ++# The following software/firmware and/or related documentation ("MediaTek Software") ++# have been modified by MediaTek Inc. All revisions are subject to any receiver's ++# applicable license agreements with MediaTek Inc. ++ ++obj-$(CONFIG_MTK_MMC) += mtk_sd.o ++mtk_sd-objs := sd.o dbg.o ++ifeq ($(CONFIG_MTK_AEE_KDUMP),y) ++EXTRA_CFLAGS += -DMT6575_SD_DEBUG ++endif ++ ++clean: ++ @rm -f *.o modules.order .*.cmd +--- /dev/null ++++ b/drivers/mmc/host/mtk-mmc/board.h +@@ -0,0 +1,137 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ */ ++/* MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++ ++#ifndef __ARCH_ARM_MACH_BOARD_H ++#define __ARCH_ARM_MACH_BOARD_H ++ ++#include <generated/autoconf.h> ++#include <linux/pm.h> ++/* --- chhung */ ++// #include <mach/mt6575.h> ++// #include <board-custom.h> ++/* end of chhung */ ++ ++typedef void (*sdio_irq_handler_t)(void*); /* external irq handler */ ++typedef void (*pm_callback_t)(pm_message_t state, void *data); ++ ++#define MSDC_CD_PIN_EN (1 << 0) /* card detection pin is wired */ ++#define MSDC_WP_PIN_EN (1 << 1) /* write protection pin is wired */ ++#define MSDC_RST_PIN_EN (1 << 2) /* emmc reset pin is wired */ ++#define MSDC_SDIO_IRQ (1 << 3) /* use internal sdio irq (bus) */ ++#define MSDC_EXT_SDIO_IRQ (1 << 4) /* use external sdio irq */ ++#define MSDC_REMOVABLE (1 << 5) /* removable slot */ ++#define MSDC_SYS_SUSPEND (1 << 6) /* suspended by system */ ++#define MSDC_HIGHSPEED (1 << 7) /* high-speed mode support */ ++#define MSDC_UHS1 (1 << 8) /* uhs-1 mode support */ ++#define MSDC_DDR (1 << 9) /* ddr mode support */ ++ ++ ++#define MSDC_SMPL_RISING (0) ++#define MSDC_SMPL_FALLING (1) ++ ++#define MSDC_CMD_PIN (0) ++#define MSDC_DAT_PIN (1) ++#define MSDC_CD_PIN (2) ++#define MSDC_WP_PIN (3) ++#define MSDC_RST_PIN (4) ++ ++enum { ++ MSDC_CLKSRC_48MHZ = 0, ++// MSDC_CLKSRC_26MHZ = 0, ++// MSDC_CLKSRC_197MHZ = 1, ++// MSDC_CLKSRC_208MHZ = 2 ++}; ++ ++struct msdc_hw { ++ unsigned char clk_src; /* host clock source */ ++ unsigned char cmd_edge; /* command latch edge */ ++ unsigned char data_edge; /* data latch edge */ ++ unsigned char clk_drv; /* clock pad driving */ ++ unsigned char cmd_drv; /* command pad driving */ ++ unsigned char dat_drv; /* data pad driving */ ++ unsigned long flags; /* hardware capability flags */ ++ unsigned long data_pins; /* data pins */ ++ unsigned long data_offset; /* data address offset */ ++ ++ /* config gpio pull mode */ ++ void (*config_gpio_pin)(int type, int pull); ++ ++ /* external power control for card */ ++ void (*ext_power_on)(void); ++ void (*ext_power_off)(void); ++ ++ /* external sdio irq operations */ ++ void (*request_sdio_eirq)(sdio_irq_handler_t sdio_irq_handler, void *data); ++ void (*enable_sdio_eirq)(void); ++ void (*disable_sdio_eirq)(void); ++ ++ /* external cd irq operations */ ++ void (*request_cd_eirq)(sdio_irq_handler_t cd_irq_handler, void *data); ++ void (*enable_cd_eirq)(void); ++ void (*disable_cd_eirq)(void); ++ int (*get_cd_status)(void); ++ ++ /* power management callback for external module */ ++ void (*register_pm)(pm_callback_t pm_cb, void *data); ++}; ++ ++extern struct msdc_hw msdc0_hw; ++extern struct msdc_hw msdc1_hw; ++extern struct msdc_hw msdc2_hw; ++extern struct msdc_hw msdc3_hw; ++ ++/*GPS driver*/ ++#define GPS_FLAG_FORCE_OFF 0x0001 ++struct mt3326_gps_hardware { ++ int (*ext_power_on)(int); ++ int (*ext_power_off)(int); ++}; ++extern struct mt3326_gps_hardware mt3326_gps_hw; ++ ++/* NAND driver */ ++struct mt6575_nand_host_hw { ++ unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */ ++ unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */ ++ unsigned int nfi_cs_num; /* NFI_CS_NUM */ ++ unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */ ++ unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */ ++ unsigned int nand_ecc_size; ++ unsigned int nand_ecc_bytes; ++ unsigned int nand_ecc_mode; ++}; ++extern struct mt6575_nand_host_hw mt6575_nand_hw; ++ ++#endif /* __ARCH_ARM_MACH_BOARD_H */ ++ +--- /dev/null ++++ b/drivers/mmc/host/mtk-mmc/dbg.c +@@ -0,0 +1,347 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ * ++ * MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++ ++#include <linux/version.h> ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/kthread.h> ++#include <linux/delay.h> ++#include <linux/module.h> ++#include <linux/init.h> ++#include <linux/proc_fs.h> ++#include <linux/string.h> ++#include <linux/uaccess.h> ++// #include <mach/mt6575_gpt.h> /* --- by chhung */ ++#include "dbg.h" ++#include "mt6575_sd.h" ++#include <linux/seq_file.h> ++ ++static char cmd_buf[256]; ++ ++/* for debug zone */ ++unsigned int sd_debug_zone[4]={ ++ 0, ++ 0, ++ 0, ++ 0 ++}; ++ ++/* mode select */ ++u32 dma_size[4]={ ++ 512, ++ 512, ++ 512, ++ 512 ++}; ++msdc_mode drv_mode[4]={ ++ MODE_SIZE_DEP, /* using DMA or not depend on the size */ ++ MODE_SIZE_DEP, ++ MODE_SIZE_DEP, ++ MODE_SIZE_DEP ++}; ++ ++#if defined (MT6575_SD_DEBUG) ++/* for driver profile */ ++#define TICKS_ONE_MS (13000) ++u32 gpt_enable = 0; ++u32 sdio_pro_enable = 0; /* make sure gpt is enabled */ ++u32 sdio_pro_time = 0; /* no more than 30s */ ++struct sdio_profile sdio_perfomance = {0}; ++ ++#if 0 /* --- chhung */ ++void msdc_init_gpt(void) ++{ ++ GPT_CONFIG config; ++ ++ config.num = GPT6; ++ config.mode = GPT_FREE_RUN; ++ config.clkSrc = GPT_CLK_SRC_SYS; ++ config.clkDiv = GPT_CLK_DIV_1; /* 13MHz GPT6 */ ++ ++ if (GPT_Config(config) == FALSE ) ++ return; ++ ++ GPT_Start(GPT6); ++} ++#endif /* end of --- */ ++ ++u32 msdc_time_calc(u32 old_L32, u32 old_H32, u32 new_L32, u32 new_H32) ++{ ++ u32 ret = 0; ++ ++ if (new_H32 == old_H32) { ++ ret = new_L32 - old_L32; ++ } else if(new_H32 == (old_H32 + 1)) { ++ if (new_L32 > old_L32) { ++ printk("msdc old_L<0x%x> new_L<0x%x>\n", old_L32, new_L32); ++ } ++ ret = (0xffffffff - old_L32); ++ ret += new_L32; ++ } else { ++ printk("msdc old_H<0x%x> new_H<0x%x>\n", old_H32, new_H32); ++ } ++ ++ return ret; ++} ++ ++void msdc_sdio_profile(struct sdio_profile* result) ++{ ++ struct cmd_profile* cmd; ++ u32 i; ++ ++ printk("sdio === performance dump ===\n"); ++ printk("sdio === total execute tick<%d> time<%dms> Tx<%dB> Rx<%dB>\n", ++ result->total_tc, result->total_tc / TICKS_ONE_MS, ++ result->total_tx_bytes, result->total_rx_bytes); ++ ++ /* CMD52 Dump */ ++ cmd = &result->cmd52_rx; ++ printk("sdio === CMD52 Rx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc, ++ cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count); ++ cmd = &result->cmd52_tx; ++ printk("sdio === CMD52 Tx <%d>times tick<%d> Max<%d> Min<%d> Aver<%d>\n", cmd->count, cmd->tot_tc, ++ cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count); ++ ++ /* CMD53 Rx bytes + block mode */ ++ for (i=0; i<512; i++) { ++ cmd = &result->cmd53_rx_byte[i]; ++ if (cmd->count) { ++ printk("sdio<%6d><%3dB>_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc, ++ cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count, ++ cmd->tot_bytes, (cmd->tot_bytes/10)*13 / (cmd->tot_tc/10)); ++ } ++ } ++ for (i=0; i<100; i++) { ++ cmd = &result->cmd53_rx_blk[i]; ++ if (cmd->count) { ++ printk("sdio<%6d><%3d>B_Rx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc, ++ cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count, ++ cmd->tot_bytes, (cmd->tot_bytes/10)*13 / (cmd->tot_tc/10)); ++ } ++ } ++ ++ /* CMD53 Tx bytes + block mode */ ++ for (i=0; i<512; i++) { ++ cmd = &result->cmd53_tx_byte[i]; ++ if (cmd->count) { ++ printk("sdio<%6d><%3dB>_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc, ++ cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count, ++ cmd->tot_bytes, (cmd->tot_bytes/10)*13 / (cmd->tot_tc/10)); ++ } ++ } ++ for (i=0; i<100; i++) { ++ cmd = &result->cmd53_tx_blk[i]; ++ if (cmd->count) { ++ printk("sdio<%6d><%3d>B_Tx_<%9d><%9d><%6d><%6d>_<%9dB><%2dM>\n", cmd->count, i, cmd->tot_tc, ++ cmd->max_tc, cmd->min_tc, cmd->tot_tc/cmd->count, ++ cmd->tot_bytes, (cmd->tot_bytes/10)*13 / (cmd->tot_tc/10)); ++ } ++ } ++ ++ printk("sdio === performance dump done ===\n"); ++} ++ ++//========= sdio command table =========== ++void msdc_performance(u32 opcode, u32 sizes, u32 bRx, u32 ticks) ++{ ++ struct sdio_profile* result = &sdio_perfomance; ++ struct cmd_profile* cmd; ++ u32 block; ++ ++ if (sdio_pro_enable == 0) { ++ return; ++ } ++ ++ if (opcode == 52) { ++ cmd = bRx ? &result->cmd52_rx : &result->cmd52_tx; ++ } else if (opcode == 53) { ++ if (sizes < 512) { ++ cmd = bRx ? &result->cmd53_rx_byte[sizes] : &result->cmd53_tx_byte[sizes]; ++ } else { ++ block = sizes / 512; ++ if (block >= 99) { ++ printk("cmd53 error blocks\n"); ++ while(1); ++ } ++ cmd = bRx ? &result->cmd53_rx_blk[block] : &result->cmd53_tx_blk[block]; ++ } ++ } else { ++ return; ++ } ++ ++ /* update the members */ ++ if (ticks > cmd->max_tc){ ++ cmd->max_tc = ticks; ++ } ++ if (cmd->min_tc == 0 || ticks < cmd->min_tc) { ++ cmd->min_tc = ticks; ++ } ++ cmd->tot_tc += ticks; ++ cmd->tot_bytes += sizes; ++ cmd->count ++; ++ ++ if (bRx) { ++ result->total_rx_bytes += sizes; ++ } else { ++ result->total_tx_bytes += sizes; ++ } ++ result->total_tc += ticks; ++ ++ /* dump when total_tc > 30s */ ++ if (result->total_tc >= sdio_pro_time * TICKS_ONE_MS * 1000) { ++ msdc_sdio_profile(result); ++ memset(result, 0 , sizeof(struct sdio_profile)); ++ } ++} ++ ++//========== driver proc interface =========== ++static int msdc_debug_proc_read(struct seq_file *s, void *p) ++{ ++ seq_printf(s, "\n=========================================\n"); ++ seq_printf(s, "Index<0> + Id + Zone\n"); ++ seq_printf(s, "-> PWR<9> WRN<8> | FIO<7> OPS<6> FUN<5> CFG<4> | INT<3> RSP<2> CMD<1> DMA<0>\n"); ++ seq_printf(s, "-> echo 0 3 0x3ff >msdc_bebug -> host[3] debug zone set to 0x3ff\n"); ++ seq_printf(s, "-> MSDC[0] Zone: 0x%.8x\n", sd_debug_zone[0]); ++ seq_printf(s, "-> MSDC[1] Zone: 0x%.8x\n", sd_debug_zone[1]); ++ seq_printf(s, "-> MSDC[2] Zone: 0x%.8x\n", sd_debug_zone[2]); ++ seq_printf(s, "-> MSDC[3] Zone: 0x%.8x\n", sd_debug_zone[3]); ++ ++ seq_printf(s, "Index<1> + ID:4|Mode:4 + DMA_SIZE\n"); ++ seq_printf(s, "-> 0)PIO 1)DMA 2)SIZE\n"); ++ seq_printf(s, "-> echo 1 22 0x200 >msdc_bebug -> host[2] size mode, dma when >= 512\n"); ++ seq_printf(s, "-> MSDC[0] mode<%d> size<%d>\n", drv_mode[0], dma_size[0]); ++ seq_printf(s, "-> MSDC[1] mode<%d> size<%d>\n", drv_mode[1], dma_size[1]); ++ seq_printf(s, "-> MSDC[2] mode<%d> size<%d>\n", drv_mode[2], dma_size[2]); ++ seq_printf(s, "-> MSDC[3] mode<%d> size<%d>\n", drv_mode[3], dma_size[3]); ++ ++ seq_printf(s, "Index<3> + SDIO_PROFILE + TIME\n"); ++ seq_printf(s, "-> echo 3 1 0x1E >msdc_bebug -> enable sdio_profile, 30s\n"); ++ seq_printf(s, "-> SDIO_PROFILE<%d> TIME<%ds>\n", sdio_pro_enable, sdio_pro_time); ++ seq_printf(s, "=========================================\n\n"); ++ ++ return 0; ++} ++ ++static ssize_t msdc_debug_proc_write(struct file *file, ++ const char __user *buf, size_t count, loff_t *data) ++{ ++ int ret; ++ ++ int cmd, p1, p2; ++ int id, zone; ++ int mode, size; ++ ++ if (count == 0)return -1; ++ if(count > 255)count = 255; ++ ++ ret = copy_from_user(cmd_buf, buf, count); ++ if (ret < 0)return -1; ++ ++ cmd_buf[count] = '\0'; ++ printk("msdc Write %s\n", cmd_buf); ++ ++ sscanf(cmd_buf, "%x %x %x", &cmd, &p1, &p2); ++ ++ if(cmd == SD_TOOL_ZONE) { ++ id = p1; zone = p2; zone &= 0x3ff; ++ printk("msdc host_id<%d> zone<0x%.8x>\n", id, zone); ++ if(id >=0 && id<=3){ ++ sd_debug_zone[id] = zone; ++ } ++ else if(id == 4){ ++ sd_debug_zone[0] = sd_debug_zone[1] = zone; ++ sd_debug_zone[2] = sd_debug_zone[3] = zone; ++ } ++ else{ ++ printk("msdc host_id error when set debug zone\n"); ++ } ++ } else if (cmd == SD_TOOL_DMA_SIZE) { ++ id = p1>>4; mode = (p1&0xf); size = p2; ++ if(id >=0 && id<=3){ ++ drv_mode[id] = mode; ++ dma_size[id] = p2; ++ } ++ else if(id == 4){ ++ drv_mode[0] = drv_mode[1] = mode; ++ drv_mode[2] = drv_mode[3] = mode; ++ dma_size[0] = dma_size[1] = p2; ++ dma_size[2] = dma_size[3] = p2; ++ } ++ else{ ++ printk("msdc host_id error when select mode\n"); ++ } ++ } else if (cmd == SD_TOOL_SDIO_PROFILE) { ++ if (p1 == 1) { /* enable profile */ ++ if (gpt_enable == 0) { ++ // msdc_init_gpt(); /* --- by chhung */ ++ gpt_enable = 1; ++ } ++ sdio_pro_enable = 1; ++ if (p2 == 0) p2 = 1; if (p2 >= 30) p2 = 30; ++ sdio_pro_time = p2 ; ++ } else if (p1 == 0) { ++ /* todo */ ++ sdio_pro_enable = 0; ++ } ++ } ++ ++ return count; ++} ++ ++static int msdc_debug_show(struct inode *inode, struct file *file) ++{ ++ return single_open(file, msdc_debug_proc_read, NULL); ++} ++ ++static const struct file_operations msdc_debug_fops = { ++ .owner = THIS_MODULE, ++ .open = msdc_debug_show, ++ .read = seq_read, ++ .write = msdc_debug_proc_write, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++int msdc_debug_proc_init(void) ++{ ++ struct proc_dir_entry *de = proc_create("msdc_debug", 0667, NULL, &msdc_debug_fops); ++ ++ if (!de || IS_ERR(de)) ++ printk("!! Create MSDC debug PROC fail !!\n"); ++ ++ return 0 ; ++} ++EXPORT_SYMBOL_GPL(msdc_debug_proc_init); ++#endif +--- /dev/null ++++ b/drivers/mmc/host/mtk-mmc/dbg.h +@@ -0,0 +1,156 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ * ++ * MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++#ifndef __MT_MSDC_DEUBG__ ++#define __MT_MSDC_DEUBG__ ++ ++//========================== ++extern u32 sdio_pro_enable; ++/* for a type command, e.g. CMD53, 2 blocks */ ++struct cmd_profile { ++ u32 max_tc; /* Max tick count */ ++ u32 min_tc; ++ u32 tot_tc; /* total tick count */ ++ u32 tot_bytes; ++ u32 count; /* the counts of the command */ ++}; ++ ++/* dump when total_tc and total_bytes */ ++struct sdio_profile { ++ u32 total_tc; /* total tick count of CMD52 and CMD53 */ ++ u32 total_tx_bytes; /* total bytes of CMD53 Tx */ ++ u32 total_rx_bytes; /* total bytes of CMD53 Rx */ ++ ++ /*CMD52*/ ++ struct cmd_profile cmd52_tx; ++ struct cmd_profile cmd52_rx; ++ ++ /*CMD53 in byte unit */ ++ struct cmd_profile cmd53_tx_byte[512]; ++ struct cmd_profile cmd53_rx_byte[512]; ++ ++ /*CMD53 in block unit */ ++ struct cmd_profile cmd53_tx_blk[100]; ++ struct cmd_profile cmd53_rx_blk[100]; ++}; ++ ++//========================== ++typedef enum { ++ SD_TOOL_ZONE = 0, ++ SD_TOOL_DMA_SIZE = 1, ++ SD_TOOL_PM_ENABLE = 2, ++ SD_TOOL_SDIO_PROFILE = 3, ++} msdc_dbg; ++ ++typedef enum { ++ MODE_PIO = 0, ++ MODE_DMA = 1, ++ MODE_SIZE_DEP = 2, ++} msdc_mode; ++extern msdc_mode drv_mode[4]; ++extern u32 dma_size[4]; ++ ++/* Debug message event */ ++#define DBG_EVT_NONE (0) /* No event */ ++#define DBG_EVT_DMA (1 << 0) /* DMA related event */ ++#define DBG_EVT_CMD (1 << 1) /* MSDC CMD related event */ ++#define DBG_EVT_RSP (1 << 2) /* MSDC CMD RSP related event */ ++#define DBG_EVT_INT (1 << 3) /* MSDC INT event */ ++#define DBG_EVT_CFG (1 << 4) /* MSDC CFG event */ ++#define DBG_EVT_FUC (1 << 5) /* Function event */ ++#define DBG_EVT_OPS (1 << 6) /* Read/Write operation event */ ++#define DBG_EVT_FIO (1 << 7) /* FIFO operation event */ ++#define DBG_EVT_WRN (1 << 8) /* Warning event */ ++#define DBG_EVT_PWR (1 << 9) /* Power event */ ++#define DBG_EVT_ALL (0xffffffff) ++ ++#define DBG_EVT_MASK (DBG_EVT_ALL) ++ ++extern unsigned int sd_debug_zone[4]; ++#define TAG "msdc" ++#if 0 /* +++ chhung */ ++#define BUG_ON(x) \ ++do { \ ++ if (x) { \ ++ printk("[BUG] %s LINE:%d FILE:%s\n", #x, __LINE__, __FILE__); \ ++ while(1); \ ++ } \ ++}while(0) ++#endif /* end of +++ */ ++ ++#define N_MSG(evt, fmt, args...) ++/* ++do { \ ++ if ((DBG_EVT_##evt) & sd_debug_zone[host->id]) { \ ++ printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d> PID<%s><0x%x>\n", \ ++ host->id, ##args , __FUNCTION__, __LINE__, current->comm, current->pid); \ ++ } \ ++} while(0) ++*/ ++ ++#define ERR_MSG(fmt, args...) \ ++do { \ ++ printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d> PID<%s><0x%x>\n", \ ++ host->id, ##args , __FUNCTION__, __LINE__, current->comm, current->pid); \ ++} while(0); ++ ++#if 1 ++//defined CONFIG_MTK_MMC_CD_POLL ++#define INIT_MSG(fmt, args...) ++#define IRQ_MSG(fmt, args...) ++#else ++#define INIT_MSG(fmt, args...) \ ++do { \ ++ printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d> PID<%s><0x%x>\n", \ ++ host->id, ##args , __FUNCTION__, __LINE__, current->comm, current->pid); \ ++} while(0); ++ ++/* PID in ISR in not corrent */ ++#define IRQ_MSG(fmt, args...) \ ++do { \ ++ printk(KERN_ERR TAG"%d -> "fmt" <- %s() : L<%d>\n", \ ++ host->id, ##args , __FUNCTION__, __LINE__); \ ++} while(0); ++#endif ++ ++int msdc_debug_proc_init(void); ++ ++#if 0 /* --- chhung */ ++void msdc_init_gpt(void); ++extern void GPT_GetCounter64(UINT32 *cntL32, UINT32 *cntH32); ++#endif /* end of --- */ ++u32 msdc_time_calc(u32 old_L32, u32 old_H32, u32 new_L32, u32 new_H32); ++void msdc_performance(u32 opcode, u32 sizes, u32 bRx, u32 ticks); ++ ++#endif +--- /dev/null ++++ b/drivers/mmc/host/mtk-mmc/mt6575_sd.h +@@ -0,0 +1,1002 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ */ ++/* MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++ ++#ifndef MT6575_SD_H ++#define MT6575_SD_H ++ ++#include <linux/bitops.h> ++#include <linux/interrupt.h> ++#include <linux/mmc/host.h> ++ ++// #include <mach/mt6575_reg_base.h> /* --- by chhung */ ++ ++/*--------------------------------------------------------------------------*/ ++/* Common Macro */ ++/*--------------------------------------------------------------------------*/ ++#define REG_ADDR(x) ((volatile u32*)(base + OFFSET_##x)) ++ ++/*--------------------------------------------------------------------------*/ ++/* Common Definition */ ++/*--------------------------------------------------------------------------*/ ++#define MSDC_FIFO_SZ (128) ++#define MSDC_FIFO_THD (64) // (128) ++#define MSDC_NUM (4) ++ ++#define MSDC_MS (0) ++#define MSDC_SDMMC (1) ++ ++#define MSDC_MODE_UNKNOWN (0) ++#define MSDC_MODE_PIO (1) ++#define MSDC_MODE_DMA_BASIC (2) ++#define MSDC_MODE_DMA_DESC (3) ++#define MSDC_MODE_DMA_ENHANCED (4) ++#define MSDC_MODE_MMC_STREAM (5) ++ ++#define MSDC_BUS_1BITS (0) ++#define MSDC_BUS_4BITS (1) ++#define MSDC_BUS_8BITS (2) ++ ++#define MSDC_BRUST_8B (3) ++#define MSDC_BRUST_16B (4) ++#define MSDC_BRUST_32B (5) ++#define MSDC_BRUST_64B (6) ++ ++#define MSDC_PIN_PULL_NONE (0) ++#define MSDC_PIN_PULL_DOWN (1) ++#define MSDC_PIN_PULL_UP (2) ++#define MSDC_PIN_KEEP (3) ++ ++#define MSDC_MAX_SCLK (48000000) /* +/- by chhung */ ++#define MSDC_MIN_SCLK (260000) ++ ++#define MSDC_AUTOCMD12 (0x0001) ++#define MSDC_AUTOCMD23 (0x0002) ++#define MSDC_AUTOCMD19 (0x0003) ++ ++#define MSDC_EMMC_BOOTMODE0 (0) /* Pull low CMD mode */ ++#define MSDC_EMMC_BOOTMODE1 (1) /* Reset CMD mode */ ++ ++enum { ++ RESP_NONE = 0, ++ RESP_R1, ++ RESP_R2, ++ RESP_R3, ++ RESP_R4, ++ RESP_R5, ++ RESP_R6, ++ RESP_R7, ++ RESP_R1B ++}; ++ ++/*--------------------------------------------------------------------------*/ ++/* Register Offset */ ++/*--------------------------------------------------------------------------*/ ++#define OFFSET_MSDC_CFG (0x0) ++#define OFFSET_MSDC_IOCON (0x04) ++#define OFFSET_MSDC_PS (0x08) ++#define OFFSET_MSDC_INT (0x0c) ++#define OFFSET_MSDC_INTEN (0x10) ++#define OFFSET_MSDC_FIFOCS (0x14) ++#define OFFSET_MSDC_TXDATA (0x18) ++#define OFFSET_MSDC_RXDATA (0x1c) ++#define OFFSET_SDC_CFG (0x30) ++#define OFFSET_SDC_CMD (0x34) ++#define OFFSET_SDC_ARG (0x38) ++#define OFFSET_SDC_STS (0x3c) ++#define OFFSET_SDC_RESP0 (0x40) ++#define OFFSET_SDC_RESP1 (0x44) ++#define OFFSET_SDC_RESP2 (0x48) ++#define OFFSET_SDC_RESP3 (0x4c) ++#define OFFSET_SDC_BLK_NUM (0x50) ++#define OFFSET_SDC_CSTS (0x58) ++#define OFFSET_SDC_CSTS_EN (0x5c) ++#define OFFSET_SDC_DCRC_STS (0x60) ++#define OFFSET_EMMC_CFG0 (0x70) ++#define OFFSET_EMMC_CFG1 (0x74) ++#define OFFSET_EMMC_STS (0x78) ++#define OFFSET_EMMC_IOCON (0x7c) ++#define OFFSET_SDC_ACMD_RESP (0x80) ++#define OFFSET_SDC_ACMD19_TRG (0x84) ++#define OFFSET_SDC_ACMD19_STS (0x88) ++#define OFFSET_MSDC_DMA_SA (0x90) ++#define OFFSET_MSDC_DMA_CA (0x94) ++#define OFFSET_MSDC_DMA_CTRL (0x98) ++#define OFFSET_MSDC_DMA_CFG (0x9c) ++#define OFFSET_MSDC_DBG_SEL (0xa0) ++#define OFFSET_MSDC_DBG_OUT (0xa4) ++#define OFFSET_MSDC_PATCH_BIT (0xb0) ++#define OFFSET_MSDC_PATCH_BIT1 (0xb4) ++#define OFFSET_MSDC_PAD_CTL0 (0xe0) ++#define OFFSET_MSDC_PAD_CTL1 (0xe4) ++#define OFFSET_MSDC_PAD_CTL2 (0xe8) ++#define OFFSET_MSDC_PAD_TUNE (0xec) ++#define OFFSET_MSDC_DAT_RDDLY0 (0xf0) ++#define OFFSET_MSDC_DAT_RDDLY1 (0xf4) ++#define OFFSET_MSDC_HW_DBG (0xf8) ++#define OFFSET_MSDC_VERSION (0x100) ++#define OFFSET_MSDC_ECO_VER (0x104) ++ ++/*--------------------------------------------------------------------------*/ ++/* Register Address */ ++/*--------------------------------------------------------------------------*/ ++ ++/* common register */ ++#define MSDC_CFG REG_ADDR(MSDC_CFG) ++#define MSDC_IOCON REG_ADDR(MSDC_IOCON) ++#define MSDC_PS REG_ADDR(MSDC_PS) ++#define MSDC_INT REG_ADDR(MSDC_INT) ++#define MSDC_INTEN REG_ADDR(MSDC_INTEN) ++#define MSDC_FIFOCS REG_ADDR(MSDC_FIFOCS) ++#define MSDC_TXDATA REG_ADDR(MSDC_TXDATA) ++#define MSDC_RXDATA REG_ADDR(MSDC_RXDATA) ++#define MSDC_PATCH_BIT0 REG_ADDR(MSDC_PATCH_BIT) ++ ++/* sdmmc register */ ++#define SDC_CFG REG_ADDR(SDC_CFG) ++#define SDC_CMD REG_ADDR(SDC_CMD) ++#define SDC_ARG REG_ADDR(SDC_ARG) ++#define SDC_STS REG_ADDR(SDC_STS) ++#define SDC_RESP0 REG_ADDR(SDC_RESP0) ++#define SDC_RESP1 REG_ADDR(SDC_RESP1) ++#define SDC_RESP2 REG_ADDR(SDC_RESP2) ++#define SDC_RESP3 REG_ADDR(SDC_RESP3) ++#define SDC_BLK_NUM REG_ADDR(SDC_BLK_NUM) ++#define SDC_CSTS REG_ADDR(SDC_CSTS) ++#define SDC_CSTS_EN REG_ADDR(SDC_CSTS_EN) ++#define SDC_DCRC_STS REG_ADDR(SDC_DCRC_STS) ++ ++/* emmc register*/ ++#define EMMC_CFG0 REG_ADDR(EMMC_CFG0) ++#define EMMC_CFG1 REG_ADDR(EMMC_CFG1) ++#define EMMC_STS REG_ADDR(EMMC_STS) ++#define EMMC_IOCON REG_ADDR(EMMC_IOCON) ++ ++/* auto command register */ ++#define SDC_ACMD_RESP REG_ADDR(SDC_ACMD_RESP) ++#define SDC_ACMD19_TRG REG_ADDR(SDC_ACMD19_TRG) ++#define SDC_ACMD19_STS REG_ADDR(SDC_ACMD19_STS) ++ ++/* dma register */ ++#define MSDC_DMA_SA REG_ADDR(MSDC_DMA_SA) ++#define MSDC_DMA_CA REG_ADDR(MSDC_DMA_CA) ++#define MSDC_DMA_CTRL REG_ADDR(MSDC_DMA_CTRL) ++#define MSDC_DMA_CFG REG_ADDR(MSDC_DMA_CFG) ++ ++/* pad ctrl register */ ++#define MSDC_PAD_CTL0 REG_ADDR(MSDC_PAD_CTL0) ++#define MSDC_PAD_CTL1 REG_ADDR(MSDC_PAD_CTL1) ++#define MSDC_PAD_CTL2 REG_ADDR(MSDC_PAD_CTL2) ++ ++/* data read delay */ ++#define MSDC_DAT_RDDLY0 REG_ADDR(MSDC_DAT_RDDLY0) ++#define MSDC_DAT_RDDLY1 REG_ADDR(MSDC_DAT_RDDLY1) ++ ++/* debug register */ ++#define MSDC_DBG_SEL REG_ADDR(MSDC_DBG_SEL) ++#define MSDC_DBG_OUT REG_ADDR(MSDC_DBG_OUT) ++ ++/* misc register */ ++#define MSDC_PATCH_BIT REG_ADDR(MSDC_PATCH_BIT) ++#define MSDC_PATCH_BIT1 REG_ADDR(MSDC_PATCH_BIT1) ++#define MSDC_PAD_TUNE REG_ADDR(MSDC_PAD_TUNE) ++#define MSDC_HW_DBG REG_ADDR(MSDC_HW_DBG) ++#define MSDC_VERSION REG_ADDR(MSDC_VERSION) ++#define MSDC_ECO_VER REG_ADDR(MSDC_ECO_VER) /* ECO Version */ ++ ++/*--------------------------------------------------------------------------*/ ++/* Register Mask */ ++/*--------------------------------------------------------------------------*/ ++ ++/* MSDC_CFG mask */ ++#define MSDC_CFG_MODE (0x1 << 0) /* RW */ ++#define MSDC_CFG_CKPDN (0x1 << 1) /* RW */ ++#define MSDC_CFG_RST (0x1 << 2) /* RW */ ++#define MSDC_CFG_PIO (0x1 << 3) /* RW */ ++#define MSDC_CFG_CKDRVEN (0x1 << 4) /* RW */ ++#define MSDC_CFG_BV18SDT (0x1 << 5) /* RW */ ++#define MSDC_CFG_BV18PSS (0x1 << 6) /* R */ ++#define MSDC_CFG_CKSTB (0x1 << 7) /* R */ ++#define MSDC_CFG_CKDIV (0xff << 8) /* RW */ ++#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */ ++ ++/* MSDC_IOCON mask */ ++#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */ ++#define MSDC_IOCON_RSPL (0x1 << 1) /* RW */ ++#define MSDC_IOCON_DSPL (0x1 << 2) /* RW */ ++#define MSDC_IOCON_DDLSEL (0x1 << 3) /* RW */ ++#define MSDC_IOCON_DDR50CKD (0x1 << 4) /* RW */ ++#define MSDC_IOCON_DSPLSEL (0x1 << 5) /* RW */ ++#define MSDC_IOCON_D0SPL (0x1 << 16) /* RW */ ++#define MSDC_IOCON_D1SPL (0x1 << 17) /* RW */ ++#define MSDC_IOCON_D2SPL (0x1 << 18) /* RW */ ++#define MSDC_IOCON_D3SPL (0x1 << 19) /* RW */ ++#define MSDC_IOCON_D4SPL (0x1 << 20) /* RW */ ++#define MSDC_IOCON_D5SPL (0x1 << 21) /* RW */ ++#define MSDC_IOCON_D6SPL (0x1 << 22) /* RW */ ++#define MSDC_IOCON_D7SPL (0x1 << 23) /* RW */ ++#define MSDC_IOCON_RISCSZ (0x3 << 24) /* RW */ ++ ++/* MSDC_PS mask */ ++#define MSDC_PS_CDEN (0x1 << 0) /* RW */ ++#define MSDC_PS_CDSTS (0x1 << 1) /* R */ ++#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */ ++#define MSDC_PS_DAT (0xff << 16) /* R */ ++#define MSDC_PS_CMD (0x1 << 24) /* R */ ++#define MSDC_PS_WP (0x1UL<< 31) /* R */ ++ ++/* MSDC_INT mask */ ++#define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */ ++#define MSDC_INT_CDSC (0x1 << 1) /* W1C */ ++#define MSDC_INT_ACMDRDY (0x1 << 3) /* W1C */ ++#define MSDC_INT_ACMDTMO (0x1 << 4) /* W1C */ ++#define MSDC_INT_ACMDCRCERR (0x1 << 5) /* W1C */ ++#define MSDC_INT_DMAQ_EMPTY (0x1 << 6) /* W1C */ ++#define MSDC_INT_SDIOIRQ (0x1 << 7) /* W1C */ ++#define MSDC_INT_CMDRDY (0x1 << 8) /* W1C */ ++#define MSDC_INT_CMDTMO (0x1 << 9) /* W1C */ ++#define MSDC_INT_RSPCRCERR (0x1 << 10) /* W1C */ ++#define MSDC_INT_CSTA (0x1 << 11) /* R */ ++#define MSDC_INT_XFER_COMPL (0x1 << 12) /* W1C */ ++#define MSDC_INT_DXFER_DONE (0x1 << 13) /* W1C */ ++#define MSDC_INT_DATTMO (0x1 << 14) /* W1C */ ++#define MSDC_INT_DATCRCERR (0x1 << 15) /* W1C */ ++#define MSDC_INT_ACMD19_DONE (0x1 << 16) /* W1C */ ++ ++/* MSDC_INTEN mask */ ++#define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */ ++#define MSDC_INTEN_CDSC (0x1 << 1) /* RW */ ++#define MSDC_INTEN_ACMDRDY (0x1 << 3) /* RW */ ++#define MSDC_INTEN_ACMDTMO (0x1 << 4) /* RW */ ++#define MSDC_INTEN_ACMDCRCERR (0x1 << 5) /* RW */ ++#define MSDC_INTEN_DMAQ_EMPTY (0x1 << 6) /* RW */ ++#define MSDC_INTEN_SDIOIRQ (0x1 << 7) /* RW */ ++#define MSDC_INTEN_CMDRDY (0x1 << 8) /* RW */ ++#define MSDC_INTEN_CMDTMO (0x1 << 9) /* RW */ ++#define MSDC_INTEN_RSPCRCERR (0x1 << 10) /* RW */ ++#define MSDC_INTEN_CSTA (0x1 << 11) /* RW */ ++#define MSDC_INTEN_XFER_COMPL (0x1 << 12) /* RW */ ++#define MSDC_INTEN_DXFER_DONE (0x1 << 13) /* RW */ ++#define MSDC_INTEN_DATTMO (0x1 << 14) /* RW */ ++#define MSDC_INTEN_DATCRCERR (0x1 << 15) /* RW */ ++#define MSDC_INTEN_ACMD19_DONE (0x1 << 16) /* RW */ ++ ++/* MSDC_FIFOCS mask */ ++#define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */ ++#define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */ ++#define MSDC_FIFOCS_CLR (0x1UL<< 31) /* RW */ ++ ++/* SDC_CFG mask */ ++#define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */ ++#define SDC_CFG_INSWKUP (0x1 << 1) /* RW */ ++#define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */ ++#define SDC_CFG_SDIO (0x1 << 19) /* RW */ ++#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */ ++#define SDC_CFG_INTATGAP (0x1 << 21) /* RW */ ++#define SDC_CFG_DTOC (0xffUL << 24) /* RW */ ++ ++/* SDC_CMD mask */ ++#define SDC_CMD_OPC (0x3f << 0) /* RW */ ++#define SDC_CMD_BRK (0x1 << 6) /* RW */ ++#define SDC_CMD_RSPTYP (0x7 << 7) /* RW */ ++#define SDC_CMD_DTYP (0x3 << 11) /* RW */ ++#define SDC_CMD_DTYP (0x3 << 11) /* RW */ ++#define SDC_CMD_RW (0x1 << 13) /* RW */ ++#define SDC_CMD_STOP (0x1 << 14) /* RW */ ++#define SDC_CMD_GOIRQ (0x1 << 15) /* RW */ ++#define SDC_CMD_BLKLEN (0xfff<< 16) /* RW */ ++#define SDC_CMD_AUTOCMD (0x3 << 28) /* RW */ ++#define SDC_CMD_VOLSWTH (0x1 << 30) /* RW */ ++ ++/* SDC_STS mask */ ++#define SDC_STS_SDCBUSY (0x1 << 0) /* RW */ ++#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */ ++#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */ ++ ++/* SDC_DCRC_STS mask */ ++#define SDC_DCRC_STS_NEG (0xf << 8) /* RO */ ++#define SDC_DCRC_STS_POS (0xff << 0) /* RO */ ++ ++/* EMMC_CFG0 mask */ ++#define EMMC_CFG0_BOOTSTART (0x1 << 0) /* W */ ++#define EMMC_CFG0_BOOTSTOP (0x1 << 1) /* W */ ++#define EMMC_CFG0_BOOTMODE (0x1 << 2) /* RW */ ++#define EMMC_CFG0_BOOTACKDIS (0x1 << 3) /* RW */ ++#define EMMC_CFG0_BOOTWDLY (0x7 << 12) /* RW */ ++#define EMMC_CFG0_BOOTSUPP (0x1 << 15) /* RW */ ++ ++/* EMMC_CFG1 mask */ ++#define EMMC_CFG1_BOOTDATTMC (0xfffff << 0) /* RW */ ++#define EMMC_CFG1_BOOTACKTMC (0xfffUL << 20) /* RW */ ++ ++/* EMMC_STS mask */ ++#define EMMC_STS_BOOTCRCERR (0x1 << 0) /* W1C */ ++#define EMMC_STS_BOOTACKERR (0x1 << 1) /* W1C */ ++#define EMMC_STS_BOOTDATTMO (0x1 << 2) /* W1C */ ++#define EMMC_STS_BOOTACKTMO (0x1 << 3) /* W1C */ ++#define EMMC_STS_BOOTUPSTATE (0x1 << 4) /* R */ ++#define EMMC_STS_BOOTACKRCV (0x1 << 5) /* W1C */ ++#define EMMC_STS_BOOTDATRCV (0x1 << 6) /* R */ ++ ++/* EMMC_IOCON mask */ ++#define EMMC_IOCON_BOOTRST (0x1 << 0) /* RW */ ++ ++/* SDC_ACMD19_TRG mask */ ++#define SDC_ACMD19_TRG_TUNESEL (0xf << 0) /* RW */ ++ ++/* MSDC_DMA_CTRL mask */ ++#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */ ++#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */ ++#define MSDC_DMA_CTRL_RESUME (0x1 << 2) /* W */ ++#define MSDC_DMA_CTRL_MODE (0x1 << 8) /* RW */ ++#define MSDC_DMA_CTRL_LASTBUF (0x1 << 10) /* RW */ ++#define MSDC_DMA_CTRL_BRUSTSZ (0x7 << 12) /* RW */ ++#define MSDC_DMA_CTRL_XFERSZ (0xffffUL << 16)/* RW */ ++ ++/* MSDC_DMA_CFG mask */ ++#define MSDC_DMA_CFG_STS (0x1 << 0) /* R */ ++#define MSDC_DMA_CFG_DECSEN (0x1 << 1) /* RW */ ++#define MSDC_DMA_CFG_BDCSERR (0x1 << 4) /* R */ ++#define MSDC_DMA_CFG_GPDCSERR (0x1 << 5) /* R */ ++ ++/* MSDC_PATCH_BIT mask */ ++#define MSDC_PATCH_BIT_WFLSMODE (0x1 << 0) /* RW */ ++#define MSDC_PATCH_BIT_ODDSUPP (0x1 << 1) /* RW */ ++#define MSDC_PATCH_BIT_CKGEN_CK (0x1 << 6) /* E2: Fixed to 1 */ ++#define MSDC_PATCH_BIT_IODSSEL (0x1 << 16) /* RW */ ++#define MSDC_PATCH_BIT_IOINTSEL (0x1 << 17) /* RW */ ++#define MSDC_PATCH_BIT_BUSYDLY (0xf << 18) /* RW */ ++#define MSDC_PATCH_BIT_WDOD (0xf << 22) /* RW */ ++#define MSDC_PATCH_BIT_IDRTSEL (0x1 << 26) /* RW */ ++#define MSDC_PATCH_BIT_CMDFSEL (0x1 << 27) /* RW */ ++#define MSDC_PATCH_BIT_INTDLSEL (0x1 << 28) /* RW */ ++#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ ++#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ ++ ++/* MSDC_PATCH_BIT1 mask */ ++#define MSDC_PATCH_BIT1_WRDAT_CRCS (0x7 << 3) ++#define MSDC_PATCH_BIT1_CMD_RSP (0x7 << 0) ++ ++/* MSDC_PAD_CTL0 mask */ ++#define MSDC_PAD_CTL0_CLKDRVN (0x7 << 0) /* RW */ ++#define MSDC_PAD_CTL0_CLKDRVP (0x7 << 4) /* RW */ ++#define MSDC_PAD_CTL0_CLKSR (0x1 << 8) /* RW */ ++#define MSDC_PAD_CTL0_CLKPD (0x1 << 16) /* RW */ ++#define MSDC_PAD_CTL0_CLKPU (0x1 << 17) /* RW */ ++#define MSDC_PAD_CTL0_CLKSMT (0x1 << 18) /* RW */ ++#define MSDC_PAD_CTL0_CLKIES (0x1 << 19) /* RW */ ++#define MSDC_PAD_CTL0_CLKTDSEL (0xf << 20) /* RW */ ++#define MSDC_PAD_CTL0_CLKRDSEL (0xffUL<< 24) /* RW */ ++ ++/* MSDC_PAD_CTL1 mask */ ++#define MSDC_PAD_CTL1_CMDDRVN (0x7 << 0) /* RW */ ++#define MSDC_PAD_CTL1_CMDDRVP (0x7 << 4) /* RW */ ++#define MSDC_PAD_CTL1_CMDSR (0x1 << 8) /* RW */ ++#define MSDC_PAD_CTL1_CMDPD (0x1 << 16) /* RW */ ++#define MSDC_PAD_CTL1_CMDPU (0x1 << 17) /* RW */ ++#define MSDC_PAD_CTL1_CMDSMT (0x1 << 18) /* RW */ ++#define MSDC_PAD_CTL1_CMDIES (0x1 << 19) /* RW */ ++#define MSDC_PAD_CTL1_CMDTDSEL (0xf << 20) /* RW */ ++#define MSDC_PAD_CTL1_CMDRDSEL (0xffUL<< 24) /* RW */ ++ ++/* MSDC_PAD_CTL2 mask */ ++#define MSDC_PAD_CTL2_DATDRVN (0x7 << 0) /* RW */ ++#define MSDC_PAD_CTL2_DATDRVP (0x7 << 4) /* RW */ ++#define MSDC_PAD_CTL2_DATSR (0x1 << 8) /* RW */ ++#define MSDC_PAD_CTL2_DATPD (0x1 << 16) /* RW */ ++#define MSDC_PAD_CTL2_DATPU (0x1 << 17) /* RW */ ++#define MSDC_PAD_CTL2_DATIES (0x1 << 19) /* RW */ ++#define MSDC_PAD_CTL2_DATSMT (0x1 << 18) /* RW */ ++#define MSDC_PAD_CTL2_DATTDSEL (0xf << 20) /* RW */ ++#define MSDC_PAD_CTL2_DATRDSEL (0xffUL<< 24) /* RW */ ++ ++/* MSDC_PAD_TUNE mask */ ++#define MSDC_PAD_TUNE_DATWRDLY (0x1F << 0) /* RW */ ++#define MSDC_PAD_TUNE_DATRRDLY (0x1F << 8) /* RW */ ++#define MSDC_PAD_TUNE_CMDRDLY (0x1F << 16) /* RW */ ++#define MSDC_PAD_TUNE_CMDRRDLY (0x1FUL << 22) /* RW */ ++#define MSDC_PAD_TUNE_CLKTXDLY (0x1FUL << 27) /* RW */ ++ ++/* MSDC_DAT_RDDLY0/1 mask */ ++#define MSDC_DAT_RDDLY0_D0 (0x1F << 0) /* RW */ ++#define MSDC_DAT_RDDLY0_D1 (0x1F << 8) /* RW */ ++#define MSDC_DAT_RDDLY0_D2 (0x1F << 16) /* RW */ ++#define MSDC_DAT_RDDLY0_D3 (0x1F << 24) /* RW */ ++ ++#define MSDC_DAT_RDDLY1_D4 (0x1F << 0) /* RW */ ++#define MSDC_DAT_RDDLY1_D5 (0x1F << 8) /* RW */ ++#define MSDC_DAT_RDDLY1_D6 (0x1F << 16) /* RW */ ++#define MSDC_DAT_RDDLY1_D7 (0x1F << 24) /* RW */ ++ ++#define MSDC_CKGEN_MSDC_DLY_SEL (0x1F<<10) ++#define MSDC_INT_DAT_LATCH_CK_SEL (0x7<<7) ++#define MSDC_CKGEN_MSDC_CK_SEL (0x1<<6) ++#define CARD_READY_FOR_DATA (1<<8) ++#define CARD_CURRENT_STATE(x) ((x&0x00001E00)>>9) ++ ++/*--------------------------------------------------------------------------*/ ++/* Descriptor Structure */ ++/*--------------------------------------------------------------------------*/ ++typedef struct { ++ u32 hwo:1; /* could be changed by hw */ ++ u32 bdp:1; ++ u32 rsv0:6; ++ u32 chksum:8; ++ u32 intr:1; ++ u32 rsv1:15; ++ void *next; ++ void *ptr; ++ u32 buflen:16; ++ u32 extlen:8; ++ u32 rsv2:8; ++ u32 arg; ++ u32 blknum; ++ u32 cmd; ++} gpd_t; ++ ++typedef struct { ++ u32 eol:1; ++ u32 rsv0:7; ++ u32 chksum:8; ++ u32 rsv1:1; ++ u32 blkpad:1; ++ u32 dwpad:1; ++ u32 rsv2:13; ++ void *next; ++ void *ptr; ++ u32 buflen:16; ++ u32 rsv3:16; ++} bd_t; ++ ++/*--------------------------------------------------------------------------*/ ++/* Register Debugging Structure */ ++/*--------------------------------------------------------------------------*/ ++ ++typedef struct { ++ u32 msdc:1; ++ u32 ckpwn:1; ++ u32 rst:1; ++ u32 pio:1; ++ u32 ckdrven:1; ++ u32 start18v:1; ++ u32 pass18v:1; ++ u32 ckstb:1; ++ u32 ckdiv:8; ++ u32 ckmod:2; ++ u32 pad:14; ++} msdc_cfg_reg; ++typedef struct { ++ u32 sdr104cksel:1; ++ u32 rsmpl:1; ++ u32 dsmpl:1; ++ u32 ddlysel:1; ++ u32 ddr50ckd:1; ++ u32 dsplsel:1; ++ u32 pad1:10; ++ u32 d0spl:1; ++ u32 d1spl:1; ++ u32 d2spl:1; ++ u32 d3spl:1; ++ u32 d4spl:1; ++ u32 d5spl:1; ++ u32 d6spl:1; ++ u32 d7spl:1; ++ u32 riscsz:1; ++ u32 pad2:7; ++} msdc_iocon_reg; ++typedef struct { ++ u32 cden:1; ++ u32 cdsts:1; ++ u32 pad1:10; ++ u32 cddebounce:4; ++ u32 dat:8; ++ u32 cmd:1; ++ u32 pad2:6; ++ u32 wp:1; ++} msdc_ps_reg; ++typedef struct { ++ u32 mmcirq:1; ++ u32 cdsc:1; ++ u32 pad1:1; ++ u32 atocmdrdy:1; ++ u32 atocmdtmo:1; ++ u32 atocmdcrc:1; ++ u32 dmaqempty:1; ++ u32 sdioirq:1; ++ u32 cmdrdy:1; ++ u32 cmdtmo:1; ++ u32 rspcrc:1; ++ u32 csta:1; ++ u32 xfercomp:1; ++ u32 dxferdone:1; ++ u32 dattmo:1; ++ u32 datcrc:1; ++ u32 atocmd19done:1; ++ u32 pad2:15; ++} msdc_int_reg; ++typedef struct { ++ u32 mmcirq:1; ++ u32 cdsc:1; ++ u32 pad1:1; ++ u32 atocmdrdy:1; ++ u32 atocmdtmo:1; ++ u32 atocmdcrc:1; ++ u32 dmaqempty:1; ++ u32 sdioirq:1; ++ u32 cmdrdy:1; ++ u32 cmdtmo:1; ++ u32 rspcrc:1; ++ u32 csta:1; ++ u32 xfercomp:1; ++ u32 dxferdone:1; ++ u32 dattmo:1; ++ u32 datcrc:1; ++ u32 atocmd19done:1; ++ u32 pad2:15; ++} msdc_inten_reg; ++typedef struct { ++ u32 rxcnt:8; ++ u32 pad1:8; ++ u32 txcnt:8; ++ u32 pad2:7; ++ u32 clr:1; ++} msdc_fifocs_reg; ++typedef struct { ++ u32 val; ++} msdc_txdat_reg; ++typedef struct { ++ u32 val; ++} msdc_rxdat_reg; ++typedef struct { ++ u32 sdiowkup:1; ++ u32 inswkup:1; ++ u32 pad1:14; ++ u32 buswidth:2; ++ u32 pad2:1; ++ u32 sdio:1; ++ u32 sdioide:1; ++ u32 intblkgap:1; ++ u32 pad4:2; ++ u32 dtoc:8; ++} sdc_cfg_reg; ++typedef struct { ++ u32 cmd:6; ++ u32 brk:1; ++ u32 rsptyp:3; ++ u32 pad1:1; ++ u32 dtype:2; ++ u32 rw:1; ++ u32 stop:1; ++ u32 goirq:1; ++ u32 blklen:12; ++ u32 atocmd:2; ++ u32 volswth:1; ++ u32 pad2:1; ++} sdc_cmd_reg; ++typedef struct { ++ u32 arg; ++} sdc_arg_reg; ++typedef struct { ++ u32 sdcbusy:1; ++ u32 cmdbusy:1; ++ u32 pad:29; ++ u32 swrcmpl:1; ++} sdc_sts_reg; ++typedef struct { ++ u32 val; ++} sdc_resp0_reg; ++typedef struct { ++ u32 val; ++} sdc_resp1_reg; ++typedef struct { ++ u32 val; ++} sdc_resp2_reg; ++typedef struct { ++ u32 val; ++} sdc_resp3_reg; ++typedef struct { ++ u32 num; ++} sdc_blknum_reg; ++typedef struct { ++ u32 sts; ++} sdc_csts_reg; ++typedef struct { ++ u32 sts; ++} sdc_cstsen_reg; ++typedef struct { ++ u32 datcrcsts:8; ++ u32 ddrcrcsts:4; ++ u32 pad:20; ++} sdc_datcrcsts_reg; ++typedef struct { ++ u32 bootstart:1; ++ u32 bootstop:1; ++ u32 bootmode:1; ++ u32 pad1:9; ++ u32 bootwaidly:3; ++ u32 bootsupp:1; ++ u32 pad2:16; ++} emmc_cfg0_reg; ++typedef struct { ++ u32 bootcrctmc:16; ++ u32 pad:4; ++ u32 bootacktmc:12; ++} emmc_cfg1_reg; ++typedef struct { ++ u32 bootcrcerr:1; ++ u32 bootackerr:1; ++ u32 bootdattmo:1; ++ u32 bootacktmo:1; ++ u32 bootupstate:1; ++ u32 bootackrcv:1; ++ u32 bootdatrcv:1; ++ u32 pad:25; ++} emmc_sts_reg; ++typedef struct { ++ u32 bootrst:1; ++ u32 pad:31; ++} emmc_iocon_reg; ++typedef struct { ++ u32 val; ++} msdc_acmd_resp_reg; ++typedef struct { ++ u32 tunesel:4; ++ u32 pad:28; ++} msdc_acmd19_trg_reg; ++typedef struct { ++ u32 val; ++} msdc_acmd19_sts_reg; ++typedef struct { ++ u32 addr; ++} msdc_dma_sa_reg; ++typedef struct { ++ u32 addr; ++} msdc_dma_ca_reg; ++typedef struct { ++ u32 start:1; ++ u32 stop:1; ++ u32 resume:1; ++ u32 pad1:5; ++ u32 mode:1; ++ u32 pad2:1; ++ u32 lastbuf:1; ++ u32 pad3:1; ++ u32 brustsz:3; ++ u32 pad4:1; ++ u32 xfersz:16; ++} msdc_dma_ctrl_reg; ++typedef struct { ++ u32 status:1; ++ u32 decsen:1; ++ u32 pad1:2; ++ u32 bdcsen:1; ++ u32 gpdcsen:1; ++ u32 pad2:26; ++} msdc_dma_cfg_reg; ++typedef struct { ++ u32 sel:16; ++ u32 pad2:16; ++} msdc_dbg_sel_reg; ++typedef struct { ++ u32 val; ++} msdc_dbg_out_reg; ++typedef struct { ++ u32 clkdrvn:3; ++ u32 rsv0:1; ++ u32 clkdrvp:3; ++ u32 rsv1:1; ++ u32 clksr:1; ++ u32 rsv2:7; ++ u32 clkpd:1; ++ u32 clkpu:1; ++ u32 clksmt:1; ++ u32 clkies:1; ++ u32 clktdsel:4; ++ u32 clkrdsel:8; ++} msdc_pad_ctl0_reg; ++typedef struct { ++ u32 cmddrvn:3; ++ u32 rsv0:1; ++ u32 cmddrvp:3; ++ u32 rsv1:1; ++ u32 cmdsr:1; ++ u32 rsv2:7; ++ u32 cmdpd:1; ++ u32 cmdpu:1; ++ u32 cmdsmt:1; ++ u32 cmdies:1; ++ u32 cmdtdsel:4; ++ u32 cmdrdsel:8; ++} msdc_pad_ctl1_reg; ++typedef struct { ++ u32 datdrvn:3; ++ u32 rsv0:1; ++ u32 datdrvp:3; ++ u32 rsv1:1; ++ u32 datsr:1; ++ u32 rsv2:7; ++ u32 datpd:1; ++ u32 datpu:1; ++ u32 datsmt:1; ++ u32 daties:1; ++ u32 dattdsel:4; ++ u32 datrdsel:8; ++} msdc_pad_ctl2_reg; ++typedef struct { ++ u32 wrrxdly:3; ++ u32 pad1:5; ++ u32 rdrxdly:8; ++ u32 pad2:16; ++} msdc_pad_tune_reg; ++typedef struct { ++ u32 dat0:5; ++ u32 rsv0:3; ++ u32 dat1:5; ++ u32 rsv1:3; ++ u32 dat2:5; ++ u32 rsv2:3; ++ u32 dat3:5; ++ u32 rsv3:3; ++} msdc_dat_rddly0; ++typedef struct { ++ u32 dat4:5; ++ u32 rsv4:3; ++ u32 dat5:5; ++ u32 rsv5:3; ++ u32 dat6:5; ++ u32 rsv6:3; ++ u32 dat7:5; ++ u32 rsv7:3; ++} msdc_dat_rddly1; ++typedef struct { ++ u32 dbg0sel:8; ++ u32 dbg1sel:6; ++ u32 pad1:2; ++ u32 dbg2sel:6; ++ u32 pad2:2; ++ u32 dbg3sel:6; ++ u32 pad3:2; ++} msdc_hw_dbg_reg; ++typedef struct { ++ u32 val; ++} msdc_version_reg; ++typedef struct { ++ u32 val; ++} msdc_eco_ver_reg; ++ ++struct msdc_regs { ++ msdc_cfg_reg msdc_cfg; /* base+0x00h */ ++ msdc_iocon_reg msdc_iocon; /* base+0x04h */ ++ msdc_ps_reg msdc_ps; /* base+0x08h */ ++ msdc_int_reg msdc_int; /* base+0x0ch */ ++ msdc_inten_reg msdc_inten; /* base+0x10h */ ++ msdc_fifocs_reg msdc_fifocs; /* base+0x14h */ ++ msdc_txdat_reg msdc_txdat; /* base+0x18h */ ++ msdc_rxdat_reg msdc_rxdat; /* base+0x1ch */ ++ u32 rsv1[4]; ++ sdc_cfg_reg sdc_cfg; /* base+0x30h */ ++ sdc_cmd_reg sdc_cmd; /* base+0x34h */ ++ sdc_arg_reg sdc_arg; /* base+0x38h */ ++ sdc_sts_reg sdc_sts; /* base+0x3ch */ ++ sdc_resp0_reg sdc_resp0; /* base+0x40h */ ++ sdc_resp1_reg sdc_resp1; /* base+0x44h */ ++ sdc_resp2_reg sdc_resp2; /* base+0x48h */ ++ sdc_resp3_reg sdc_resp3; /* base+0x4ch */ ++ sdc_blknum_reg sdc_blknum; /* base+0x50h */ ++ u32 rsv2[1]; ++ sdc_csts_reg sdc_csts; /* base+0x58h */ ++ sdc_cstsen_reg sdc_cstsen; /* base+0x5ch */ ++ sdc_datcrcsts_reg sdc_dcrcsta; /* base+0x60h */ ++ u32 rsv3[3]; ++ emmc_cfg0_reg emmc_cfg0; /* base+0x70h */ ++ emmc_cfg1_reg emmc_cfg1; /* base+0x74h */ ++ emmc_sts_reg emmc_sts; /* base+0x78h */ ++ emmc_iocon_reg emmc_iocon; /* base+0x7ch */ ++ msdc_acmd_resp_reg acmd_resp; /* base+0x80h */ ++ msdc_acmd19_trg_reg acmd19_trg; /* base+0x84h */ ++ msdc_acmd19_sts_reg acmd19_sts; /* base+0x88h */ ++ u32 rsv4[1]; ++ msdc_dma_sa_reg dma_sa; /* base+0x90h */ ++ msdc_dma_ca_reg dma_ca; /* base+0x94h */ ++ msdc_dma_ctrl_reg dma_ctrl; /* base+0x98h */ ++ msdc_dma_cfg_reg dma_cfg; /* base+0x9ch */ ++ msdc_dbg_sel_reg dbg_sel; /* base+0xa0h */ ++ msdc_dbg_out_reg dbg_out; /* base+0xa4h */ ++ u32 rsv5[2]; ++ u32 patch0; /* base+0xb0h */ ++ u32 patch1; /* base+0xb4h */ ++ u32 rsv6[10]; ++ msdc_pad_ctl0_reg pad_ctl0; /* base+0xe0h */ ++ msdc_pad_ctl1_reg pad_ctl1; /* base+0xe4h */ ++ msdc_pad_ctl2_reg pad_ctl2; /* base+0xe8h */ ++ msdc_pad_tune_reg pad_tune; /* base+0xech */ ++ msdc_dat_rddly0 dat_rddly0; /* base+0xf0h */ ++ msdc_dat_rddly1 dat_rddly1; /* base+0xf4h */ ++ msdc_hw_dbg_reg hw_dbg; /* base+0xf8h */ ++ u32 rsv7[1]; ++ msdc_version_reg version; /* base+0x100h */ ++ msdc_eco_ver_reg eco_ver; /* base+0x104h */ ++}; ++ ++struct scatterlist_ex { ++ u32 cmd; ++ u32 arg; ++ u32 sglen; ++ struct scatterlist *sg; ++}; ++ ++#define DMA_FLAG_NONE (0x00000000) ++#define DMA_FLAG_EN_CHKSUM (0x00000001) ++#define DMA_FLAG_PAD_BLOCK (0x00000002) ++#define DMA_FLAG_PAD_DWORD (0x00000004) ++ ++struct msdc_dma { ++ u32 flags; /* flags */ ++ u32 xfersz; /* xfer size in bytes */ ++ u32 sglen; /* size of scatter list */ ++ u32 blklen; /* block size */ ++ struct scatterlist *sg; /* I/O scatter list */ ++ struct scatterlist_ex *esg; /* extended I/O scatter list */ ++ u8 mode; /* dma mode */ ++ u8 burstsz; /* burst size */ ++ u8 intr; /* dma done interrupt */ ++ u8 padding; /* padding */ ++ u32 cmd; /* enhanced mode command */ ++ u32 arg; /* enhanced mode arg */ ++ u32 rsp; /* enhanced mode command response */ ++ u32 autorsp; /* auto command response */ ++ ++ gpd_t *gpd; /* pointer to gpd array */ ++ bd_t *bd; /* pointer to bd array */ ++ dma_addr_t gpd_addr; /* the physical address of gpd array */ ++ dma_addr_t bd_addr; /* the physical address of bd array */ ++ u32 used_gpd; /* the number of used gpd elements */ ++ u32 used_bd; /* the number of used bd elements */ ++}; ++ ++struct msdc_host ++{ ++ struct msdc_hw *hw; ++ ++ struct mmc_host *mmc; /* mmc structure */ ++ struct mmc_command *cmd; ++ struct mmc_data *data; ++ struct mmc_request *mrq; ++ int cmd_rsp; ++ int cmd_rsp_done; ++ int cmd_r1b_done; ++ ++ int error; ++ spinlock_t lock; /* mutex */ ++ struct semaphore sem; ++ ++ u32 blksz; /* host block size */ ++ u32 base; /* host base address */ ++ int id; /* host id */ ++ int pwr_ref; /* core power reference count */ ++ ++ u32 xfer_size; /* total transferred size */ ++ ++ struct msdc_dma dma; /* dma channel */ ++ u32 dma_addr; /* dma transfer address */ ++ u32 dma_left_size; /* dma transfer left size */ ++ u32 dma_xfer_size; /* dma transfer size in bytes */ ++ int dma_xfer; /* dma transfer mode */ ++ ++ u32 timeout_ns; /* data timeout ns */ ++ u32 timeout_clks; /* data timeout clks */ ++ ++ atomic_t abort; /* abort transfer */ ++ ++ int irq; /* host interrupt */ ++ ++ struct tasklet_struct card_tasklet; ++#if 0 ++ struct work_struct card_workqueue; ++#else ++ struct delayed_work card_delaywork; ++#endif ++ ++ struct completion cmd_done; ++ struct completion xfer_done; ++ struct pm_message pm_state; ++ ++ u32 mclk; /* mmc subsystem clock */ ++ u32 hclk; /* host clock speed */ ++ u32 sclk; /* SD/MS clock speed */ ++ u8 core_clkon; /* Host core clock on ? */ ++ u8 card_clkon; /* Card clock on ? */ ++ u8 core_power; /* core power */ ++ u8 power_mode; /* host power mode */ ++ u8 card_inserted; /* card inserted ? */ ++ u8 suspend; /* host suspended ? */ ++ u8 reserved; ++ u8 app_cmd; /* for app command */ ++ u32 app_cmd_arg; ++ u64 starttime; ++}; ++ ++static inline unsigned int uffs(unsigned int x) ++{ ++ unsigned int r = 1; ++ ++ if (!x) ++ return 0; ++ if (!(x & 0xffff)) { ++ x >>= 16; ++ r += 16; ++ } ++ if (!(x & 0xff)) { ++ x >>= 8; ++ r += 8; ++ } ++ if (!(x & 0xf)) { ++ x >>= 4; ++ r += 4; ++ } ++ if (!(x & 3)) { ++ x >>= 2; ++ r += 2; ++ } ++ if (!(x & 1)) { ++ x >>= 1; ++ r += 1; ++ } ++ return r; ++} ++#define sdr_read8(reg) __raw_readb(reg) ++#define sdr_read16(reg) __raw_readw(reg) ++#define sdr_read32(reg) __raw_readl(reg) ++#define sdr_write8(reg,val) __raw_writeb(val,reg) ++#define sdr_write16(reg,val) __raw_writew(val,reg) ++#define sdr_write32(reg,val) __raw_writel(val,reg) ++ ++#define sdr_set_bits(reg,bs) ((*(volatile u32*)(reg)) |= (u32)(bs)) ++#define sdr_clr_bits(reg,bs) ((*(volatile u32*)(reg)) &= ~((u32)(bs))) ++ ++#define sdr_set_field(reg,field,val) \ ++ do { \ ++ volatile unsigned int tv = sdr_read32(reg); \ ++ tv &= ~(field); \ ++ tv |= ((val) << (uffs((unsigned int)field) - 1)); \ ++ sdr_write32(reg,tv); \ ++ } while(0) ++#define sdr_get_field(reg,field,val) \ ++ do { \ ++ volatile unsigned int tv = sdr_read32(reg); \ ++ val = ((tv & (field)) >> (uffs((unsigned int)field) - 1)); \ ++ } while(0) ++ ++#endif ++ +--- /dev/null ++++ b/drivers/mmc/host/mtk-mmc/sd.c +@@ -0,0 +1,3067 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ * ++ * MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++ ++#include <linux/module.h> ++#include <linux/moduleparam.h> ++#include <linux/init.h> ++#include <linux/spinlock.h> ++#include <linux/timer.h> ++#include <linux/ioport.h> ++#include <linux/device.h> ++#include <linux/platform_device.h> ++#include <linux/interrupt.h> ++#include <linux/delay.h> ++#include <linux/blkdev.h> ++#include <linux/slab.h> ++#include <linux/mmc/host.h> ++#include <linux/mmc/card.h> ++#include <linux/mmc/core.h> ++#include <linux/mmc/mmc.h> ++#include <linux/mmc/sd.h> ++#include <linux/mmc/sdio.h> ++#include <linux/dma-mapping.h> ++ ++/* +++ by chhung */ ++#include <linux/types.h> ++#include <linux/kernel.h> ++#include <linux/version.h> ++#include <linux/pm.h> ++#include <linux/of.h> ++ ++#define MSDC_SMPL_FALLING (1) ++#define MSDC_CD_PIN_EN (1 << 0) /* card detection pin is wired */ ++#define MSDC_WP_PIN_EN (1 << 1) /* write protection pin is wired */ ++#define MSDC_REMOVABLE (1 << 5) /* removable slot */ ++#define MSDC_SYS_SUSPEND (1 << 6) /* suspended by system */ ++#define MSDC_HIGHSPEED (1 << 7) ++ ++//#define IRQ_SDC 14 //MT7620 /*FIXME*/ ++#ifdef CONFIG_SOC_MT7621 ++#define RALINK_SYSCTL_BASE 0xbe000000 ++#define RALINK_MSDC_BASE 0xbe130000 ++#else ++#define RALINK_SYSCTL_BASE 0xb0000000 ++#define RALINK_MSDC_BASE 0xb0130000 ++#endif ++#define IRQ_SDC 22 /*FIXME*/ ++ ++#include <asm/dma.h> ++/* end of +++ */ ++ ++ ++#include <asm/mach-ralink/ralink_regs.h> ++ ++#if 0 /* --- by chhung */ ++#include <mach/board.h> ++#include <mach/mt6575_devs.h> ++#include <mach/mt6575_typedefs.h> ++#include <mach/mt6575_clock_manager.h> ++#include <mach/mt6575_pm_ldo.h> ++//#include <mach/mt6575_pll.h> ++//#include <mach/mt6575_gpio.h> ++//#include <mach/mt6575_gpt_sw.h> ++#include <asm/tcm.h> ++// #include <mach/mt6575_gpt.h> ++#endif /* end of --- */ ++ ++#include "mt6575_sd.h" ++#include "dbg.h" ++ ++/* +++ by chhung */ ++#include "board.h" ++/* end of +++ */ ++ ++#if 0 /* --- by chhung */ ++#define isb() __asm__ __volatile__ ("" : : : "memory") ++#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ ++ : : "r" (0) : "memory") ++#define dmb() __asm__ __volatile__ ("" : : : "memory") ++#endif /* end of --- */ ++ ++#define DRV_NAME "mtk-sd" ++ ++#define HOST_MAX_NUM (1) /* +/- by chhung */ ++ ++#if defined (CONFIG_SOC_MT7620) ++#define HOST_MAX_MCLK (48000000) /* +/- by chhung */ ++#elif defined (CONFIG_SOC_MT7621) ++#define HOST_MAX_MCLK (50000000) /* +/- by chhung */ ++#endif ++#define HOST_MIN_MCLK (260000) ++ ++#define HOST_MAX_BLKSZ (2048) ++ ++#define MSDC_OCR_AVAIL (MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33) ++ ++#define GPIO_PULL_DOWN (0) ++#define GPIO_PULL_UP (1) ++ ++#if 0 /* --- by chhung */ ++#define MSDC_CLKSRC_REG (0xf100000C) ++#define PDN_REG (0xF1000010) ++#endif /* end of --- */ ++ ++#define DEFAULT_DEBOUNCE (8) /* 8 cycles */ ++#define DEFAULT_DTOC (40) /* data timeout counter. 65536x40 sclk. */ ++ ++#define CMD_TIMEOUT (HZ/10) /* 100ms */ ++#define DAT_TIMEOUT (HZ/2 * 5) /* 500ms x5 */ ++ ++#define MAX_DMA_CNT (64 * 1024 - 512) /* a single transaction for WIFI may be 50K*/ ++ ++#define MAX_GPD_NUM (1 + 1) /* one null gpd */ ++#define MAX_BD_NUM (1024) ++#define MAX_BD_PER_GPD (MAX_BD_NUM) ++ ++#define MAX_HW_SGMTS (MAX_BD_NUM) ++#define MAX_PHY_SGMTS (MAX_BD_NUM) ++#define MAX_SGMT_SZ (MAX_DMA_CNT) ++#define MAX_REQ_SZ (MAX_SGMT_SZ * 8) ++ ++#ifdef MT6575_SD_DEBUG ++static struct msdc_regs *msdc_reg[HOST_MAX_NUM]; ++#endif ++ ++static int mtk_sw_poll; ++ ++static int cd_active_low = 1; ++ ++//================================= ++#define PERI_MSDC0_PDN (15) ++//#define PERI_MSDC1_PDN (16) ++//#define PERI_MSDC2_PDN (17) ++//#define PERI_MSDC3_PDN (18) ++ ++struct msdc_host *msdc_6575_host[] = {NULL,NULL,NULL,NULL}; ++#if 0 /* --- by chhung */ ++/* gate means clock power down */ ++static int g_clk_gate = 0; ++#define msdc_gate_clock(id) \ ++ do { \ ++ g_clk_gate &= ~(1 << ((id) + PERI_MSDC0_PDN)); \ ++ } while(0) ++/* not like power down register. 1 means clock on. */ ++#define msdc_ungate_clock(id) \ ++ do { \ ++ g_clk_gate |= 1 << ((id) + PERI_MSDC0_PDN); \ ++ } while(0) ++ ++// do we need sync object or not ++void msdc_clk_status(int * status) ++{ ++ *status = g_clk_gate; ++} ++#endif /* end of --- */ ++ ++/* +++ by chhung */ ++struct msdc_hw msdc0_hw = { ++ .clk_src = 0, ++ .cmd_edge = MSDC_SMPL_FALLING, ++ .data_edge = MSDC_SMPL_FALLING, ++ .clk_drv = 4, ++ .cmd_drv = 4, ++ .dat_drv = 4, ++ .data_pins = 4, ++ .data_offset = 0, ++ .flags = MSDC_SYS_SUSPEND | MSDC_CD_PIN_EN | MSDC_REMOVABLE | MSDC_HIGHSPEED, ++// .flags = MSDC_SYS_SUSPEND | MSDC_WP_PIN_EN | MSDC_CD_PIN_EN | MSDC_REMOVABLE, ++}; ++ ++static struct resource mtk_sd_resources[] = { ++ [0] = { ++ .start = RALINK_MSDC_BASE, ++ .end = RALINK_MSDC_BASE+0x3fff, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .start = IRQ_SDC, /*FIXME*/ ++ .end = IRQ_SDC, /*FIXME*/ ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct platform_device mtk_sd_device = { ++ .name = "mtk-sd", ++ .id = 0, ++ .num_resources = ARRAY_SIZE(mtk_sd_resources), ++ .resource = mtk_sd_resources, ++}; ++/* end of +++ */ ++ ++static int msdc_rsp[] = { ++ 0, /* RESP_NONE */ ++ 1, /* RESP_R1 */ ++ 2, /* RESP_R2 */ ++ 3, /* RESP_R3 */ ++ 4, /* RESP_R4 */ ++ 1, /* RESP_R5 */ ++ 1, /* RESP_R6 */ ++ 1, /* RESP_R7 */ ++ 7, /* RESP_R1b */ ++}; ++ ++/* For Inhanced DMA */ ++#define msdc_init_gpd_ex(gpd,extlen,cmd,arg,blknum) \ ++ do { \ ++ ((gpd_t*)gpd)->extlen = extlen; \ ++ ((gpd_t*)gpd)->cmd = cmd; \ ++ ((gpd_t*)gpd)->arg = arg; \ ++ ((gpd_t*)gpd)->blknum = blknum; \ ++ }while(0) ++ ++#define msdc_init_bd(bd, blkpad, dwpad, dptr, dlen) \ ++ do { \ ++ BUG_ON(dlen > 0xFFFFUL); \ ++ ((bd_t*)bd)->blkpad = blkpad; \ ++ ((bd_t*)bd)->dwpad = dwpad; \ ++ ((bd_t*)bd)->ptr = (void*)dptr; \ ++ ((bd_t*)bd)->buflen = dlen; \ ++ }while(0) ++ ++#define msdc_txfifocnt() ((sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16) ++#define msdc_rxfifocnt() ((sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) >> 0) ++#define msdc_fifo_write32(v) sdr_write32(MSDC_TXDATA, (v)) ++#define msdc_fifo_write8(v) sdr_write8(MSDC_TXDATA, (v)) ++#define msdc_fifo_read32() sdr_read32(MSDC_RXDATA) ++#define msdc_fifo_read8() sdr_read8(MSDC_RXDATA) ++ ++ ++#define msdc_dma_on() sdr_clr_bits(MSDC_CFG, MSDC_CFG_PIO) ++#define msdc_dma_off() sdr_set_bits(MSDC_CFG, MSDC_CFG_PIO) ++ ++#define msdc_retry(expr,retry,cnt) \ ++ do { \ ++ int backup = cnt; \ ++ while (retry) { \ ++ if (!(expr)) break; \ ++ if (cnt-- == 0) { \ ++ retry--; mdelay(1); cnt = backup; \ ++ } \ ++ } \ ++ WARN_ON(retry == 0); \ ++ } while(0) ++ ++#if 0 /* --- by chhung */ ++#define msdc_reset() \ ++ do { \ ++ int retry = 3, cnt = 1000; \ ++ sdr_set_bits(MSDC_CFG, MSDC_CFG_RST); \ ++ dsb(); \ ++ msdc_retry(sdr_read32(MSDC_CFG) & MSDC_CFG_RST, retry, cnt); \ ++ } while(0) ++#else ++#define msdc_reset() \ ++ do { \ ++ int retry = 3, cnt = 1000; \ ++ sdr_set_bits(MSDC_CFG, MSDC_CFG_RST); \ ++ msdc_retry(sdr_read32(MSDC_CFG) & MSDC_CFG_RST, retry, cnt); \ ++ } while(0) ++#endif /* end of +/- */ ++ ++#define msdc_clr_int() \ ++ do { \ ++ volatile u32 val = sdr_read32(MSDC_INT); \ ++ sdr_write32(MSDC_INT, val); \ ++ } while(0) ++ ++#define msdc_clr_fifo() \ ++ do { \ ++ int retry = 3, cnt = 1000; \ ++ sdr_set_bits(MSDC_FIFOCS, MSDC_FIFOCS_CLR); \ ++ msdc_retry(sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_CLR, retry, cnt); \ ++ } while(0) ++ ++#define msdc_irq_save(val) \ ++ do { \ ++ val = sdr_read32(MSDC_INTEN); \ ++ sdr_clr_bits(MSDC_INTEN, val); \ ++ } while(0) ++ ++#define msdc_irq_restore(val) \ ++ do { \ ++ sdr_set_bits(MSDC_INTEN, val); \ ++ } while(0) ++ ++/* clock source for host: global */ ++#if defined (CONFIG_SOC_MT7620) ++static u32 hclks[] = {48000000}; /* +/- by chhung */ ++#elif defined (CONFIG_SOC_MT7621) ++static u32 hclks[] = {50000000}; /* +/- by chhung */ ++#endif ++ ++//============================================ ++// the power for msdc host controller: global ++// always keep the VMC on. ++//============================================ ++#define msdc_vcore_on(host) \ ++ do { \ ++ INIT_MSG("[+]VMC ref. count<%d>", ++host->pwr_ref); \ ++ (void)hwPowerOn(MT65XX_POWER_LDO_VMC, VOL_3300, "SD"); \ ++ } while (0) ++#define msdc_vcore_off(host) \ ++ do { \ ++ INIT_MSG("[-]VMC ref. count<%d>", --host->pwr_ref); \ ++ (void)hwPowerDown(MT65XX_POWER_LDO_VMC, "SD"); \ ++ } while (0) ++ ++//==================================== ++// the vdd output for card: global ++// always keep the VMCH on. ++//==================================== ++#define msdc_vdd_on(host) \ ++ do { \ ++ (void)hwPowerOn(MT65XX_POWER_LDO_VMCH, VOL_3300, "SD"); \ ++ } while (0) ++#define msdc_vdd_off(host) \ ++ do { \ ++ (void)hwPowerDown(MT65XX_POWER_LDO_VMCH, "SD"); \ ++ } while (0) ++ ++#define sdc_is_busy() (sdr_read32(SDC_STS) & SDC_STS_SDCBUSY) ++#define sdc_is_cmd_busy() (sdr_read32(SDC_STS) & SDC_STS_CMDBUSY) ++ ++#define sdc_send_cmd(cmd,arg) \ ++ do { \ ++ sdr_write32(SDC_ARG, (arg)); \ ++ sdr_write32(SDC_CMD, (cmd)); \ ++ } while(0) ++ ++// can modify to read h/w register. ++//#define is_card_present(h) ((sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 0 : 1); ++#define is_card_present(h) (((struct msdc_host*)(h))->card_inserted) ++ ++/* +++ by chhung */ ++#ifndef __ASSEMBLY__ ++#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) ++#else ++#define PHYSADDR(a) ((a) & 0x1fffffff) ++#endif ++/* end of +++ */ ++static unsigned int msdc_do_command(struct msdc_host *host, ++ struct mmc_command *cmd, ++ int tune, ++ unsigned long timeout); ++ ++static int msdc_tune_cmdrsp(struct msdc_host*host,struct mmc_command *cmd); ++ ++#ifdef MT6575_SD_DEBUG ++static void msdc_dump_card_status(struct msdc_host *host, u32 status) ++{ ++ static char *state[] = { ++ "Idle", /* 0 */ ++ "Ready", /* 1 */ ++ "Ident", /* 2 */ ++ "Stby", /* 3 */ ++ "Tran", /* 4 */ ++ "Data", /* 5 */ ++ "Rcv", /* 6 */ ++ "Prg", /* 7 */ ++ "Dis", /* 8 */ ++ "Reserved", /* 9 */ ++ "Reserved", /* 10 */ ++ "Reserved", /* 11 */ ++ "Reserved", /* 12 */ ++ "Reserved", /* 13 */ ++ "Reserved", /* 14 */ ++ "I/O mode", /* 15 */ ++ }; ++ if (status & R1_OUT_OF_RANGE) ++ N_MSG(RSP, "[CARD_STATUS] Out of Range"); ++ if (status & R1_ADDRESS_ERROR) ++ N_MSG(RSP, "[CARD_STATUS] Address Error"); ++ if (status & R1_BLOCK_LEN_ERROR) ++ N_MSG(RSP, "[CARD_STATUS] Block Len Error"); ++ if (status & R1_ERASE_SEQ_ERROR) ++ N_MSG(RSP, "[CARD_STATUS] Erase Seq Error"); ++ if (status & R1_ERASE_PARAM) ++ N_MSG(RSP, "[CARD_STATUS] Erase Param"); ++ if (status & R1_WP_VIOLATION) ++ N_MSG(RSP, "[CARD_STATUS] WP Violation"); ++ if (status & R1_CARD_IS_LOCKED) ++ N_MSG(RSP, "[CARD_STATUS] Card is Locked"); ++ if (status & R1_LOCK_UNLOCK_FAILED) ++ N_MSG(RSP, "[CARD_STATUS] Lock/Unlock Failed"); ++ if (status & R1_COM_CRC_ERROR) ++ N_MSG(RSP, "[CARD_STATUS] Command CRC Error"); ++ if (status & R1_ILLEGAL_COMMAND) ++ N_MSG(RSP, "[CARD_STATUS] Illegal Command"); ++ if (status & R1_CARD_ECC_FAILED) ++ N_MSG(RSP, "[CARD_STATUS] Card ECC Failed"); ++ if (status & R1_CC_ERROR) ++ N_MSG(RSP, "[CARD_STATUS] CC Error"); ++ if (status & R1_ERROR) ++ N_MSG(RSP, "[CARD_STATUS] Error"); ++ if (status & R1_UNDERRUN) ++ N_MSG(RSP, "[CARD_STATUS] Underrun"); ++ if (status & R1_OVERRUN) ++ N_MSG(RSP, "[CARD_STATUS] Overrun"); ++ if (status & R1_CID_CSD_OVERWRITE) ++ N_MSG(RSP, "[CARD_STATUS] CID/CSD Overwrite"); ++ if (status & R1_WP_ERASE_SKIP) ++ N_MSG(RSP, "[CARD_STATUS] WP Eraser Skip"); ++ if (status & R1_CARD_ECC_DISABLED) ++ N_MSG(RSP, "[CARD_STATUS] Card ECC Disabled"); ++ if (status & R1_ERASE_RESET) ++ N_MSG(RSP, "[CARD_STATUS] Erase Reset"); ++ if (status & R1_READY_FOR_DATA) ++ N_MSG(RSP, "[CARD_STATUS] Ready for Data"); ++ if (status & R1_SWITCH_ERROR) ++ N_MSG(RSP, "[CARD_STATUS] Switch error"); ++ if (status & R1_APP_CMD) ++ N_MSG(RSP, "[CARD_STATUS] App Command"); ++ ++ N_MSG(RSP, "[CARD_STATUS] '%s' State", state[R1_CURRENT_STATE(status)]); ++} ++ ++static void msdc_dump_ocr_reg(struct msdc_host *host, u32 resp) ++{ ++ if (resp & (1 << 7)) ++ N_MSG(RSP, "[OCR] Low Voltage Range"); ++ if (resp & (1 << 15)) ++ N_MSG(RSP, "[OCR] 2.7-2.8 volt"); ++ if (resp & (1 << 16)) ++ N_MSG(RSP, "[OCR] 2.8-2.9 volt"); ++ if (resp & (1 << 17)) ++ N_MSG(RSP, "[OCR] 2.9-3.0 volt"); ++ if (resp & (1 << 18)) ++ N_MSG(RSP, "[OCR] 3.0-3.1 volt"); ++ if (resp & (1 << 19)) ++ N_MSG(RSP, "[OCR] 3.1-3.2 volt"); ++ if (resp & (1 << 20)) ++ N_MSG(RSP, "[OCR] 3.2-3.3 volt"); ++ if (resp & (1 << 21)) ++ N_MSG(RSP, "[OCR] 3.3-3.4 volt"); ++ if (resp & (1 << 22)) ++ N_MSG(RSP, "[OCR] 3.4-3.5 volt"); ++ if (resp & (1 << 23)) ++ N_MSG(RSP, "[OCR] 3.5-3.6 volt"); ++ if (resp & (1 << 24)) ++ N_MSG(RSP, "[OCR] Switching to 1.8V Accepted (S18A)"); ++ if (resp & (1 << 30)) ++ N_MSG(RSP, "[OCR] Card Capacity Status (CCS)"); ++ if (resp & (1 << 31)) ++ N_MSG(RSP, "[OCR] Card Power Up Status (Idle)"); ++ else ++ N_MSG(RSP, "[OCR] Card Power Up Status (Busy)"); ++} ++ ++static void msdc_dump_rca_resp(struct msdc_host *host, u32 resp) ++{ ++ u32 status = (((resp >> 15) & 0x1) << 23) | ++ (((resp >> 14) & 0x1) << 22) | ++ (((resp >> 13) & 0x1) << 19) | ++ (resp & 0x1fff); ++ ++ N_MSG(RSP, "[RCA] 0x%.4x", resp >> 16); ++ msdc_dump_card_status(host, status); ++} ++ ++static void msdc_dump_io_resp(struct msdc_host *host, u32 resp) ++{ ++ u32 flags = (resp >> 8) & 0xFF; ++ char *state[] = {"DIS", "CMD", "TRN", "RFU"}; ++ ++ if (flags & (1 << 7)) ++ N_MSG(RSP, "[IO] COM_CRC_ERR"); ++ if (flags & (1 << 6)) ++ N_MSG(RSP, "[IO] Illgal command"); ++ if (flags & (1 << 3)) ++ N_MSG(RSP, "[IO] Error"); ++ if (flags & (1 << 2)) ++ N_MSG(RSP, "[IO] RFU"); ++ if (flags & (1 << 1)) ++ N_MSG(RSP, "[IO] Function number error"); ++ if (flags & (1 << 0)) ++ N_MSG(RSP, "[IO] Out of range"); ++ ++ N_MSG(RSP, "[IO] State: %s, Data:0x%x", state[(resp >> 12) & 0x3], resp & 0xFF); ++} ++#endif ++ ++static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks) ++{ ++ u32 base = host->base; ++ u32 timeout, clk_ns; ++ ++ host->timeout_ns = ns; ++ host->timeout_clks = clks; ++ ++ clk_ns = 1000000000UL / host->sclk; ++ timeout = ns / clk_ns + clks; ++ timeout = timeout >> 16; /* in 65536 sclk cycle unit */ ++ timeout = timeout > 1 ? timeout - 1 : 0; ++ timeout = timeout > 255 ? 255 : timeout; ++ ++ sdr_set_field(SDC_CFG, SDC_CFG_DTOC, timeout); ++ ++ N_MSG(OPS, "Set read data timeout: %dns %dclks -> %d x 65536 cycles", ++ ns, clks, timeout + 1); ++} ++ ++/* msdc_eirq_sdio() will be called when EIRQ(for WIFI) */ ++static void msdc_eirq_sdio(void *data) ++{ ++ struct msdc_host *host = (struct msdc_host *)data; ++ ++ N_MSG(INT, "SDIO EINT"); ++ ++ mmc_signal_sdio_irq(host->mmc); ++} ++ ++/* msdc_eirq_cd will not be used! We not using EINT for card detection. */ ++static void msdc_eirq_cd(void *data) ++{ ++ struct msdc_host *host = (struct msdc_host *)data; ++ ++ N_MSG(INT, "CD EINT"); ++ ++#if 0 ++ tasklet_hi_schedule(&host->card_tasklet); ++#else ++ schedule_delayed_work(&host->card_delaywork, HZ); ++#endif ++} ++ ++#if 0 ++static void msdc_tasklet_card(unsigned long arg) ++{ ++ struct msdc_host *host = (struct msdc_host *)arg; ++#else ++static void msdc_tasklet_card(struct work_struct *work) ++{ ++ struct msdc_host *host = (struct msdc_host *)container_of(work, ++ struct msdc_host, card_delaywork.work); ++#endif ++ struct msdc_hw *hw = host->hw; ++ u32 base = host->base; ++ u32 inserted; ++ u32 status = 0; ++ //u32 change = 0; ++ ++ spin_lock(&host->lock); ++ ++ if (hw->get_cd_status) { // NULL ++ inserted = hw->get_cd_status(); ++ } else { ++ status = sdr_read32(MSDC_PS); ++ if (cd_active_low) ++ inserted = (status & MSDC_PS_CDSTS) ? 0 : 1; ++ else ++ inserted = (status & MSDC_PS_CDSTS) ? 1 : 0; ++ } ++ if (host->mmc->caps & MMC_CAP_NEEDS_POLL) ++ inserted = 1; ++ ++#if 0 ++ change = host->card_inserted ^ inserted; ++ host->card_inserted = inserted; ++ ++ if (change && !host->suspend) { ++ if (inserted) { ++ host->mmc->f_max = HOST_MAX_MCLK; // work around ++ } ++ mmc_detect_change(host->mmc, msecs_to_jiffies(20)); ++ } ++#else /* Make sure: handle the last interrupt */ ++ host->card_inserted = inserted; ++ ++ if (!host->suspend) { ++ host->mmc->f_max = HOST_MAX_MCLK; ++ mmc_detect_change(host->mmc, msecs_to_jiffies(20)); ++ } ++ ++ IRQ_MSG("card found<%s>", inserted ? "inserted" : "removed"); ++#endif ++ ++ spin_unlock(&host->lock); ++} ++ ++#if 0 /* --- by chhung */ ++/* For E2 only */ ++static u8 clk_src_bit[4] = { ++ 0, 3, 5, 7 ++}; ++ ++static void msdc_select_clksrc(struct msdc_host* host, unsigned char clksrc) ++{ ++ u32 val; ++ u32 base = host->base; ++ ++ BUG_ON(clksrc > 3); ++ INIT_MSG("set clock source to <%d>", clksrc); ++ ++ val = sdr_read32(MSDC_CLKSRC_REG); ++ if (sdr_read32(MSDC_ECO_VER) >= 4) { ++ val &= ~(0x3 << clk_src_bit[host->id]); ++ val |= clksrc << clk_src_bit[host->id]; ++ } else { ++ val &= ~0x3; val |= clksrc; ++ } ++ sdr_write32(MSDC_CLKSRC_REG, val); ++ ++ host->hclk = hclks[clksrc]; ++ host->hw->clk_src = clksrc; ++} ++#endif /* end of --- */ ++ ++static void msdc_set_mclk(struct msdc_host *host, int ddr, unsigned int hz) ++{ ++ //struct msdc_hw *hw = host->hw; ++ u32 base = host->base; ++ u32 mode; ++ u32 flags; ++ u32 div; ++ u32 sclk; ++ u32 hclk = host->hclk; ++ //u8 clksrc = hw->clk_src; ++ ++ if (!hz) { // set mmc system clock to 0 ? ++ //ERR_MSG("set mclk to 0!!!"); ++ msdc_reset(); ++ return; ++ } ++ ++ msdc_irq_save(flags); ++ ++#if defined (CONFIG_MT7621_FPGA) || defined (CONFIG_MT7628_FPGA) ++ mode = 0x0; /* use divisor */ ++ if (hz >= (hclk >> 1)) { ++ div = 0; /* mean div = 1/2 */ ++ sclk = hclk >> 1; /* sclk = clk / 2 */ ++ } else { ++ div = (hclk + ((hz << 2) - 1)) / (hz << 2); ++ sclk = (hclk >> 2) / div; ++ } ++#else ++ if (ddr) { ++ mode = 0x2; /* ddr mode and use divisor */ ++ if (hz >= (hclk >> 2)) { ++ div = 1; /* mean div = 1/4 */ ++ sclk = hclk >> 2; /* sclk = clk / 4 */ ++ } else { ++ div = (hclk + ((hz << 2) - 1)) / (hz << 2); ++ sclk = (hclk >> 2) / div; ++ } ++ } else if (hz >= hclk) { /* bug fix */ ++ mode = 0x1; /* no divisor and divisor is ignored */ ++ div = 0; ++ sclk = hclk; ++ } else { ++ mode = 0x0; /* use divisor */ ++ if (hz >= (hclk >> 1)) { ++ div = 0; /* mean div = 1/2 */ ++ sclk = hclk >> 1; /* sclk = clk / 2 */ ++ } else { ++ div = (hclk + ((hz << 2) - 1)) / (hz << 2); ++ sclk = (hclk >> 2) / div; ++ } ++ } ++#endif ++ /* set clock mode and divisor */ ++ sdr_set_field(MSDC_CFG, MSDC_CFG_CKMOD, mode); ++ sdr_set_field(MSDC_CFG, MSDC_CFG_CKDIV, div); ++ ++ /* wait clock stable */ ++ while (!(sdr_read32(MSDC_CFG) & MSDC_CFG_CKSTB)); ++ ++ host->sclk = sclk; ++ host->mclk = hz; ++ msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); // need? ++ ++ INIT_MSG("================"); ++ INIT_MSG("!!! Set<%dKHz> Source<%dKHz> -> sclk<%dKHz>", hz/1000, hclk/1000, sclk/1000); ++ INIT_MSG("================"); ++ ++ msdc_irq_restore(flags); ++} ++ ++/* Fix me. when need to abort */ ++static void msdc_abort_data(struct msdc_host *host) ++{ ++ u32 base = host->base; ++ struct mmc_command *stop = host->mrq->stop; ++ ++ ERR_MSG("Need to Abort. dma<%d>", host->dma_xfer); ++ ++ msdc_reset(); ++ msdc_clr_fifo(); ++ msdc_clr_int(); ++ ++ // need to check FIFO count 0 ? ++ ++ if (stop) { /* try to stop, but may not success */ ++ ERR_MSG("stop when abort CMD<%d>", stop->opcode); ++ (void)msdc_do_command(host, stop, 0, CMD_TIMEOUT); ++ } ++ ++ //if (host->mclk >= 25000000) { ++ // msdc_set_mclk(host, 0, host->mclk >> 1); ++ //} ++} ++ ++#if 0 /* --- by chhung */ ++static void msdc_pin_config(struct msdc_host *host, int mode) ++{ ++ struct msdc_hw *hw = host->hw; ++ u32 base = host->base; ++ int pull = (mode == MSDC_PIN_PULL_UP) ? GPIO_PULL_UP : GPIO_PULL_DOWN; ++ ++ /* Config WP pin */ ++ if (hw->flags & MSDC_WP_PIN_EN) { ++ if (hw->config_gpio_pin) /* NULL */ ++ hw->config_gpio_pin(MSDC_WP_PIN, pull); ++ } ++ ++ switch (mode) { ++ case MSDC_PIN_PULL_UP: ++ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 1); /* Check & FIXME */ ++ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 0); /* Check & FIXME */ ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 1); ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 1); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0); ++ break; ++ case MSDC_PIN_PULL_DOWN: ++ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 0); /* Check & FIXME */ ++ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 1); /* Check & FIXME */ ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0); ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 1); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 1); ++ break; ++ case MSDC_PIN_PULL_NONE: ++ default: ++ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPU, 0); /* Check & FIXME */ ++ //sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKPD, 0); /* Check & FIXME */ ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPU, 0); ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDPD, 0); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPU, 0); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATPD, 0); ++ break; ++ } ++ ++ N_MSG(CFG, "Pins mode(%d), down(%d), up(%d)", ++ mode, MSDC_PIN_PULL_DOWN, MSDC_PIN_PULL_UP); ++} ++ ++void msdc_pin_reset(struct msdc_host *host, int mode) ++{ ++ struct msdc_hw *hw = (struct msdc_hw *)host->hw; ++ u32 base = host->base; ++ int pull = (mode == MSDC_PIN_PULL_UP) ? GPIO_PULL_UP : GPIO_PULL_DOWN; ++ ++ /* Config reset pin */ ++ if (hw->flags & MSDC_RST_PIN_EN) { ++ if (hw->config_gpio_pin) /* NULL */ ++ hw->config_gpio_pin(MSDC_RST_PIN, pull); ++ ++ if (mode == MSDC_PIN_PULL_UP) { ++ sdr_clr_bits(EMMC_IOCON, EMMC_IOCON_BOOTRST); ++ } else { ++ sdr_set_bits(EMMC_IOCON, EMMC_IOCON_BOOTRST); ++ } ++ } ++} ++ ++static void msdc_core_power(struct msdc_host *host, int on) ++{ ++ N_MSG(CFG, "Turn %s %s power (copower: %d -> %d)", ++ on ? "on" : "off", "core", host->core_power, on); ++ ++ if (on && host->core_power == 0) { ++ msdc_vcore_on(host); ++ host->core_power = 1; ++ msleep(1); ++ } else if (!on && host->core_power == 1) { ++ msdc_vcore_off(host); ++ host->core_power = 0; ++ msleep(1); ++ } ++} ++ ++static void msdc_host_power(struct msdc_host *host, int on) ++{ ++ N_MSG(CFG, "Turn %s %s power ", on ? "on" : "off", "host"); ++ ++ if (on) { ++ //msdc_core_power(host, 1); // need do card detection. ++ msdc_pin_reset(host, MSDC_PIN_PULL_UP); ++ } else { ++ msdc_pin_reset(host, MSDC_PIN_PULL_DOWN); ++ //msdc_core_power(host, 0); ++ } ++} ++ ++static void msdc_card_power(struct msdc_host *host, int on) ++{ ++ N_MSG(CFG, "Turn %s %s power ", on ? "on" : "off", "card"); ++ ++ if (on) { ++ msdc_pin_config(host, MSDC_PIN_PULL_UP); ++ if (host->hw->ext_power_on) { ++ host->hw->ext_power_on(); ++ } else { ++ //msdc_vdd_on(host); // need todo card detection. ++ } ++ msleep(1); ++ } else { ++ if (host->hw->ext_power_off) { ++ host->hw->ext_power_off(); ++ } else { ++ //msdc_vdd_off(host); ++ } ++ msdc_pin_config(host, MSDC_PIN_PULL_DOWN); ++ msleep(1); ++ } ++} ++ ++static void msdc_set_power_mode(struct msdc_host *host, u8 mode) ++{ ++ N_MSG(CFG, "Set power mode(%d)", mode); ++ ++ if (host->power_mode == MMC_POWER_OFF && mode != MMC_POWER_OFF) { ++ msdc_host_power(host, 1); ++ msdc_card_power(host, 1); ++ } else if (host->power_mode != MMC_POWER_OFF && mode == MMC_POWER_OFF) { ++ msdc_card_power(host, 0); ++ msdc_host_power(host, 0); ++ } ++ host->power_mode = mode; ++} ++#endif /* end of --- */ ++ ++#ifdef CONFIG_PM ++/* ++ register as callback function of WIFI(combo_sdio_register_pm) . ++ can called by msdc_drv_suspend/resume too. ++*/ ++static void msdc_pm(pm_message_t state, void *data) ++{ ++ struct msdc_host *host = (struct msdc_host *)data; ++ int evt = state.event; ++ ++ if (evt == PM_EVENT_USER_RESUME || evt == PM_EVENT_USER_SUSPEND) { ++ INIT_MSG("USR_%s: suspend<%d> power<%d>", ++ evt == PM_EVENT_USER_RESUME ? "EVENT_USER_RESUME" : "EVENT_USER_SUSPEND", ++ host->suspend, host->power_mode); ++ } ++ ++ if (evt == PM_EVENT_SUSPEND || evt == PM_EVENT_USER_SUSPEND) { ++ if (host->suspend) /* already suspend */ /* default 0*/ ++ return; ++ ++ /* for memory card. already power off by mmc */ ++ if (evt == PM_EVENT_SUSPEND && host->power_mode == MMC_POWER_OFF) ++ return; ++ ++ host->suspend = 1; ++ host->pm_state = state; /* default PMSG_RESUME */ ++ ++ INIT_MSG("%s Suspend", evt == PM_EVENT_SUSPEND ? "PM" : "USR"); ++ if(host->hw->flags & MSDC_SYS_SUSPEND) /* set for card */ ++ (void)mmc_suspend_host(host->mmc); ++ else { ++ // host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; /* just for double confirm */ /* --- by chhung */ ++ mmc_remove_host(host->mmc); ++ } ++ } else if (evt == PM_EVENT_RESUME || evt == PM_EVENT_USER_RESUME) { ++ if (!host->suspend){ ++ //ERR_MSG("warning: already resume"); ++ return; ++ } ++ ++ /* No PM resume when USR suspend */ ++ if (evt == PM_EVENT_RESUME && host->pm_state.event == PM_EVENT_USER_SUSPEND) { ++ ERR_MSG("PM Resume when in USR Suspend"); /* won't happen. */ ++ return; ++ } ++ ++ host->suspend = 0; ++ host->pm_state = state; ++ ++ INIT_MSG("%s Resume", evt == PM_EVENT_RESUME ? "PM" : "USR"); ++ if(host->hw->flags & MSDC_SYS_SUSPEND) { /* will not set for WIFI */ ++ (void)mmc_resume_host(host->mmc); ++ } ++ else { ++ // host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; /* --- by chhung */ ++ mmc_add_host(host->mmc); ++ } ++ } ++} ++#endif ++ ++/*--------------------------------------------------------------------------*/ ++/* mmc_host_ops members */ ++/*--------------------------------------------------------------------------*/ ++static unsigned int msdc_command_start(struct msdc_host *host, ++ struct mmc_command *cmd, ++ int tune, /* not used */ ++ unsigned long timeout) ++{ ++ u32 base = host->base; ++ u32 opcode = cmd->opcode; ++ u32 rawcmd; ++ u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | ++ MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | ++ MSDC_INT_ACMD19_DONE; ++ ++ u32 resp; ++ unsigned long tmo; ++ ++ /* Protocol layer does not provide response type, but our hardware needs ++ * to know exact type, not just size! ++ */ ++ if (opcode == MMC_SEND_OP_COND || opcode == SD_APP_OP_COND) ++ resp = RESP_R3; ++ else if (opcode == MMC_SET_RELATIVE_ADDR || opcode == SD_SEND_RELATIVE_ADDR) ++ resp = (mmc_cmd_type(cmd) == MMC_CMD_BCR) ? RESP_R6 : RESP_R1; ++ else if (opcode == MMC_FAST_IO) ++ resp = RESP_R4; ++ else if (opcode == MMC_GO_IRQ_STATE) ++ resp = RESP_R5; ++ else if (opcode == MMC_SELECT_CARD) ++ resp = (cmd->arg != 0) ? RESP_R1B : RESP_NONE; ++ else if (opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED) ++ resp = RESP_R1; /* SDIO workaround. */ ++ else if (opcode == SD_SEND_IF_COND && (mmc_cmd_type(cmd) == MMC_CMD_BCR)) ++ resp = RESP_R1; ++ else { ++ switch (mmc_resp_type(cmd)) { ++ case MMC_RSP_R1: ++ resp = RESP_R1; ++ break; ++ case MMC_RSP_R1B: ++ resp = RESP_R1B; ++ break; ++ case MMC_RSP_R2: ++ resp = RESP_R2; ++ break; ++ case MMC_RSP_R3: ++ resp = RESP_R3; ++ break; ++ case MMC_RSP_NONE: ++ default: ++ resp = RESP_NONE; ++ break; ++ } ++ } ++ ++ cmd->error = 0; ++ /* rawcmd : ++ * vol_swt << 30 | auto_cmd << 28 | blklen << 16 | go_irq << 15 | ++ * stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode ++ */ ++ rawcmd = opcode | msdc_rsp[resp] << 7 | host->blksz << 16; ++ ++ if (opcode == MMC_READ_MULTIPLE_BLOCK) { ++ rawcmd |= (2 << 11); ++ } else if (opcode == MMC_READ_SINGLE_BLOCK) { ++ rawcmd |= (1 << 11); ++ } else if (opcode == MMC_WRITE_MULTIPLE_BLOCK) { ++ rawcmd |= ((2 << 11) | (1 << 13)); ++ } else if (opcode == MMC_WRITE_BLOCK) { ++ rawcmd |= ((1 << 11) | (1 << 13)); ++ } else if (opcode == SD_IO_RW_EXTENDED) { ++ if (cmd->data->flags & MMC_DATA_WRITE) ++ rawcmd |= (1 << 13); ++ if (cmd->data->blocks > 1) ++ rawcmd |= (2 << 11); ++ else ++ rawcmd |= (1 << 11); ++ } else if (opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int)-1) { ++ rawcmd |= (1 << 14); ++ } else if ((opcode == SD_APP_SEND_SCR) || ++ (opcode == SD_APP_SEND_NUM_WR_BLKS) || ++ (opcode == SD_SWITCH && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) || ++ (opcode == SD_APP_SD_STATUS && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) || ++ (opcode == MMC_SEND_EXT_CSD && (mmc_cmd_type(cmd) == MMC_CMD_ADTC))) { ++ rawcmd |= (1 << 11); ++ } else if (opcode == MMC_STOP_TRANSMISSION) { ++ rawcmd |= (1 << 14); ++ rawcmd &= ~(0x0FFF << 16); ++ } ++ ++ N_MSG(CMD, "CMD<%d><0x%.8x> Arg<0x%.8x>", opcode , rawcmd, cmd->arg); ++ ++ tmo = jiffies + timeout; ++ ++ if (opcode == MMC_SEND_STATUS) { ++ for (;;) { ++ if (!sdc_is_cmd_busy()) ++ break; ++ ++ if (time_after(jiffies, tmo)) { ++ ERR_MSG("XXX cmd_busy timeout: before CMD<%d>", opcode); ++ cmd->error = (unsigned int)-ETIMEDOUT; ++ msdc_reset(); ++ goto end; ++ } ++ } ++ }else { ++ for (;;) { ++ if (!sdc_is_busy()) ++ break; ++ if (time_after(jiffies, tmo)) { ++ ERR_MSG("XXX sdc_busy timeout: before CMD<%d>", opcode); ++ cmd->error = (unsigned int)-ETIMEDOUT; ++ msdc_reset(); ++ goto end; ++ } ++ } ++ } ++ ++ //BUG_ON(in_interrupt()); ++ host->cmd = cmd; ++ host->cmd_rsp = resp; ++ ++ init_completion(&host->cmd_done); ++ ++ sdr_set_bits(MSDC_INTEN, wints); ++ sdc_send_cmd(rawcmd, cmd->arg); ++ ++end: ++ return cmd->error; ++} ++ ++static unsigned int msdc_command_resp(struct msdc_host *host, ++ struct mmc_command *cmd, ++ int tune, ++ unsigned long timeout) ++{ ++ u32 base = host->base; ++ u32 opcode = cmd->opcode; ++ //u32 rawcmd; ++ u32 resp; ++ u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | ++ MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | ++ MSDC_INT_ACMD19_DONE; ++ ++ resp = host->cmd_rsp; ++ ++ BUG_ON(in_interrupt()); ++ //init_completion(&host->cmd_done); ++ //sdr_set_bits(MSDC_INTEN, wints); ++ ++ spin_unlock(&host->lock); ++ if(!wait_for_completion_timeout(&host->cmd_done, 10*timeout)){ ++ ERR_MSG("XXX CMD<%d> wait_for_completion timeout ARG<0x%.8x>", opcode, cmd->arg); ++ cmd->error = (unsigned int)-ETIMEDOUT; ++ msdc_reset(); ++ } ++ spin_lock(&host->lock); ++ ++ sdr_clr_bits(MSDC_INTEN, wints); ++ host->cmd = NULL; ++ ++//end: ++#ifdef MT6575_SD_DEBUG ++ switch (resp) { ++ case RESP_NONE: ++ N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)", opcode, cmd->error, resp); ++ break; ++ case RESP_R2: ++ N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)= %.8x %.8x %.8x %.8x", ++ opcode, cmd->error, resp, cmd->resp[0], cmd->resp[1], ++ cmd->resp[2], cmd->resp[3]); ++ break; ++ default: /* Response types 1, 3, 4, 5, 6, 7(1b) */ ++ N_MSG(RSP, "CMD_RSP(%d): %d RSP(%d)= 0x%.8x", ++ opcode, cmd->error, resp, cmd->resp[0]); ++ if (cmd->error == 0) { ++ switch (resp) { ++ case RESP_R1: ++ case RESP_R1B: ++ msdc_dump_card_status(host, cmd->resp[0]); ++ break; ++ case RESP_R3: ++ msdc_dump_ocr_reg(host, cmd->resp[0]); ++ break; ++ case RESP_R5: ++ msdc_dump_io_resp(host, cmd->resp[0]); ++ break; ++ case RESP_R6: ++ msdc_dump_rca_resp(host, cmd->resp[0]); ++ break; ++ } ++ } ++ break; ++ } ++#endif ++ ++ /* do we need to save card's RCA when SD_SEND_RELATIVE_ADDR */ ++ ++ if (!tune) { ++ return cmd->error; ++ } ++ ++ /* memory card CRC */ ++ if(host->hw->flags & MSDC_REMOVABLE && cmd->error == (unsigned int)(-EIO) ) { ++ if (sdr_read32(SDC_CMD) & 0x1800) { /* check if has data phase */ ++ msdc_abort_data(host); ++ } else { ++ /* do basic: reset*/ ++ msdc_reset(); ++ msdc_clr_fifo(); ++ msdc_clr_int(); ++ } ++ cmd->error = msdc_tune_cmdrsp(host,cmd); ++ } ++ ++ // check DAT0 ++ /* if (resp == RESP_R1B) { ++ while ((sdr_read32(MSDC_PS) & 0x10000) != 0x10000); ++ } */ ++ /* CMD12 Error Handle */ ++ ++ return cmd->error; ++} ++ ++static unsigned int msdc_do_command(struct msdc_host *host, ++ struct mmc_command *cmd, ++ int tune, ++ unsigned long timeout) ++{ ++ if (msdc_command_start(host, cmd, tune, timeout)) ++ goto end; ++ ++ if (msdc_command_resp(host, cmd, tune, timeout)) ++ goto end; ++ ++end: ++ ++ N_MSG(CMD, " return<%d> resp<0x%.8x>", cmd->error, cmd->resp[0]); ++ return cmd->error; ++} ++ ++/* The abort condition when PIO read/write ++ tmo: ++*/ ++static int msdc_pio_abort(struct msdc_host *host, struct mmc_data *data, unsigned long tmo) ++{ ++ int ret = 0; ++ u32 base = host->base; ++ ++ if (atomic_read(&host->abort)) { ++ ret = 1; ++ } ++ ++ if (time_after(jiffies, tmo)) { ++ data->error = (unsigned int)-ETIMEDOUT; ++ ERR_MSG("XXX PIO Data Timeout: CMD<%d>", host->mrq->cmd->opcode); ++ ret = 1; ++ } ++ ++ if(ret) { ++ msdc_reset(); ++ msdc_clr_fifo(); ++ msdc_clr_int(); ++ ERR_MSG("msdc pio find abort"); ++ } ++ return ret; ++} ++ ++/* ++ Need to add a timeout, or WDT timeout, system reboot. ++*/ ++// pio mode data read/write ++static int msdc_pio_read(struct msdc_host *host, struct mmc_data *data) ++{ ++ struct scatterlist *sg = data->sg; ++ u32 base = host->base; ++ u32 num = data->sg_len; ++ u32 *ptr; ++ u8 *u8ptr; ++ u32 left = 0; ++ u32 count, size = 0; ++ u32 wints = MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR ; ++ unsigned long tmo = jiffies + DAT_TIMEOUT; ++ ++ sdr_set_bits(MSDC_INTEN, wints); ++ while (num) { ++ left = sg_dma_len(sg); ++ ptr = sg_virt(sg); ++ while (left) { ++ if ((left >= MSDC_FIFO_THD) && (msdc_rxfifocnt() >= MSDC_FIFO_THD)) { ++ count = MSDC_FIFO_THD >> 2; ++ do { ++ *ptr++ = msdc_fifo_read32(); ++ } while (--count); ++ left -= MSDC_FIFO_THD; ++ } else if ((left < MSDC_FIFO_THD) && msdc_rxfifocnt() >= left) { ++ while (left > 3) { ++ *ptr++ = msdc_fifo_read32(); ++ left -= 4; ++ } ++ ++ u8ptr = (u8 *)ptr; ++ while(left) { ++ * u8ptr++ = msdc_fifo_read8(); ++ left--; ++ } ++ } ++ ++ if (msdc_pio_abort(host, data, tmo)) { ++ goto end; ++ } ++ } ++ size += sg_dma_len(sg); ++ sg = sg_next(sg); num--; ++ } ++end: ++ data->bytes_xfered += size; ++ N_MSG(FIO, " PIO Read<%d>bytes", size); ++ ++ sdr_clr_bits(MSDC_INTEN, wints); ++ if(data->error) ERR_MSG("read pio data->error<%d> left<%d> size<%d>", data->error, left, size); ++ return data->error; ++} ++ ++/* please make sure won't using PIO when size >= 512 ++ which means, memory card block read/write won't using pio ++ then don't need to handle the CMD12 when data error. ++*/ ++static int msdc_pio_write(struct msdc_host* host, struct mmc_data *data) ++{ ++ u32 base = host->base; ++ struct scatterlist *sg = data->sg; ++ u32 num = data->sg_len; ++ u32 *ptr; ++ u8 *u8ptr; ++ u32 left; ++ u32 count, size = 0; ++ u32 wints = MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR ; ++ unsigned long tmo = jiffies + DAT_TIMEOUT; ++ ++ sdr_set_bits(MSDC_INTEN, wints); ++ while (num) { ++ left = sg_dma_len(sg); ++ ptr = sg_virt(sg); ++ ++ while (left) { ++ if (left >= MSDC_FIFO_SZ && msdc_txfifocnt() == 0) { ++ count = MSDC_FIFO_SZ >> 2; ++ do { ++ msdc_fifo_write32(*ptr); ptr++; ++ } while (--count); ++ left -= MSDC_FIFO_SZ; ++ } else if (left < MSDC_FIFO_SZ && msdc_txfifocnt() == 0) { ++ while (left > 3) { ++ msdc_fifo_write32(*ptr); ptr++; ++ left -= 4; ++ } ++ ++ u8ptr = (u8*)ptr; ++ while(left){ ++ msdc_fifo_write8(*u8ptr); u8ptr++; ++ left--; ++ } ++ } ++ ++ if (msdc_pio_abort(host, data, tmo)) { ++ goto end; ++ } ++ } ++ size += sg_dma_len(sg); ++ sg = sg_next(sg); num--; ++ } ++end: ++ data->bytes_xfered += size; ++ N_MSG(FIO, " PIO Write<%d>bytes", size); ++ if(data->error) ERR_MSG("write pio data->error<%d>", data->error); ++ ++ sdr_clr_bits(MSDC_INTEN, wints); ++ return data->error; ++} ++ ++#if 0 /* --- by chhung */ ++// DMA resume / start / stop ++static void msdc_dma_resume(struct msdc_host *host) ++{ ++ u32 base = host->base; ++ ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_RESUME, 1); ++ ++ N_MSG(DMA, "DMA resume"); ++} ++#endif /* end of --- */ ++ ++static void msdc_dma_start(struct msdc_host *host) ++{ ++ u32 base = host->base; ++ u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR ; ++ ++ sdr_set_bits(MSDC_INTEN, wints); ++ //dsb(); /* --- by chhung */ ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1); ++ ++ N_MSG(DMA, "DMA start"); ++} ++ ++static void msdc_dma_stop(struct msdc_host *host) ++{ ++ u32 base = host->base; ++ //u32 retries=500; ++ u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR ; ++ ++ N_MSG(DMA, "DMA status: 0x%.8x",sdr_read32(MSDC_DMA_CFG)); ++ //while (sdr_read32(MSDC_DMA_CFG) & MSDC_DMA_CFG_STS); ++ ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1); ++ while (sdr_read32(MSDC_DMA_CFG) & MSDC_DMA_CFG_STS); ++ ++ //dsb(); /* --- by chhung */ ++ sdr_clr_bits(MSDC_INTEN, wints); /* Not just xfer_comp */ ++ ++ N_MSG(DMA, "DMA stop"); ++} ++ ++#if 0 /* --- by chhung */ ++/* dump a gpd list */ ++static void msdc_dma_dump(struct msdc_host *host, struct msdc_dma *dma) ++{ ++ gpd_t *gpd = dma->gpd; ++ bd_t *bd = dma->bd; ++ bd_t *ptr; ++ int i = 0; ++ int p_to_v; ++ ++ if (dma->mode != MSDC_MODE_DMA_DESC) { ++ return; ++ } ++ ++ ERR_MSG("try to dump gpd and bd"); ++ ++ /* dump gpd */ ++ ERR_MSG(".gpd<0x%.8x> gpd_phy<0x%.8x>", (int)gpd, (int)dma->gpd_addr); ++ ERR_MSG("...hwo <%d>", gpd->hwo ); ++ ERR_MSG("...bdp <%d>", gpd->bdp ); ++ ERR_MSG("...chksum<0x%.8x>", gpd->chksum ); ++ //ERR_MSG("...intr <0x%.8x>", gpd->intr ); ++ ERR_MSG("...next <0x%.8x>", (int)gpd->next ); ++ ERR_MSG("...ptr <0x%.8x>", (int)gpd->ptr ); ++ ERR_MSG("...buflen<0x%.8x>", gpd->buflen ); ++ //ERR_MSG("...extlen<0x%.8x>", gpd->extlen ); ++ //ERR_MSG("...arg <0x%.8x>", gpd->arg ); ++ //ERR_MSG("...blknum<0x%.8x>", gpd->blknum ); ++ //ERR_MSG("...cmd <0x%.8x>", gpd->cmd ); ++ ++ /* dump bd */ ++ ERR_MSG(".bd<0x%.8x> bd_phy<0x%.8x> gpd_ptr<0x%.8x>", (int)bd, (int)dma->bd_addr, (int)gpd->ptr); ++ ptr = bd; ++ p_to_v = ((u32)bd - (u32)dma->bd_addr); ++ while (1) { ++ ERR_MSG(".bd[%d]", i); i++; ++ ERR_MSG("...eol <%d>", ptr->eol ); ++ ERR_MSG("...chksum<0x%.8x>", ptr->chksum ); ++ //ERR_MSG("...blkpad<0x%.8x>", ptr->blkpad ); ++ //ERR_MSG("...dwpad <0x%.8x>", ptr->dwpad ); ++ ERR_MSG("...next <0x%.8x>", (int)ptr->next ); ++ ERR_MSG("...ptr <0x%.8x>", (int)ptr->ptr ); ++ ERR_MSG("...buflen<0x%.8x>", (int)ptr->buflen ); ++ ++ if (ptr->eol == 1) { ++ break; ++ } ++ ++ /* find the next bd, virtual address of ptr->next */ ++ /* don't need to enable when use malloc */ ++ //BUG_ON( (ptr->next + p_to_v)!=(ptr+1) ); ++ //ERR_MSG(".next bd<0x%.8x><0x%.8x>", (ptr->next + p_to_v), (ptr+1)); ++ ptr++; ++ } ++ ++ ERR_MSG("dump gpd and bd finished"); ++} ++#endif /* end of --- */ ++ ++/* calc checksum */ ++static u8 msdc_dma_calcs(u8 *buf, u32 len) ++{ ++ u32 i, sum = 0; ++ for (i = 0; i < len; i++) { ++ sum += buf[i]; ++ } ++ return 0xFF - (u8)sum; ++} ++ ++/* gpd bd setup + dma registers */ ++static int msdc_dma_config(struct msdc_host *host, struct msdc_dma *dma) ++{ ++ u32 base = host->base; ++ u32 sglen = dma->sglen; ++ //u32 i, j, num, bdlen, arg, xfersz; ++ u32 j, num, bdlen; ++ u8 blkpad, dwpad, chksum; ++ struct scatterlist *sg = dma->sg; ++ gpd_t *gpd; ++ bd_t *bd; ++ ++ switch (dma->mode) { ++ case MSDC_MODE_DMA_BASIC: ++ BUG_ON(dma->xfersz > 65535); ++ BUG_ON(dma->sglen != 1); ++ sdr_write32(MSDC_DMA_SA, PHYSADDR(sg_dma_address(sg))); ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_LASTBUF, 1); ++//#if defined (CONFIG_RALINK_MT7620) ++ if (ralink_soc == MT762X_SOC_MT7620A) ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_XFERSZ, sg_dma_len(sg)); ++//#elif defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) ++ else ++ sdr_write32((volatile u32*)(RALINK_MSDC_BASE+0xa8), sg_dma_len(sg)); ++//#endif ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ, dma->burstsz); ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 0); ++ break; ++ case MSDC_MODE_DMA_DESC: ++ blkpad = (dma->flags & DMA_FLAG_PAD_BLOCK) ? 1 : 0; ++ dwpad = (dma->flags & DMA_FLAG_PAD_DWORD) ? 1 : 0; ++ chksum = (dma->flags & DMA_FLAG_EN_CHKSUM) ? 1 : 0; ++ ++ /* calculate the required number of gpd */ ++ num = (sglen + MAX_BD_PER_GPD - 1) / MAX_BD_PER_GPD; ++ BUG_ON(num !=1 ); ++ ++ gpd = dma->gpd; ++ bd = dma->bd; ++ bdlen = sglen; ++ ++ /* modify gpd*/ ++ //gpd->intr = 0; ++ gpd->hwo = 1; /* hw will clear it */ ++ gpd->bdp = 1; ++ gpd->chksum = 0; /* need to clear first. */ ++ gpd->chksum = (chksum ? msdc_dma_calcs((u8 *)gpd, 16) : 0); ++ ++ /* modify bd*/ ++ for (j = 0; j < bdlen; j++) { ++ msdc_init_bd(&bd[j], blkpad, dwpad, sg_dma_address(sg), sg_dma_len(sg)); ++ if(j == bdlen - 1) { ++ bd[j].eol = 1; /* the last bd */ ++ } else { ++ bd[j].eol = 0; ++ } ++ bd[j].chksum = 0; /* checksume need to clear first */ ++ bd[j].chksum = (chksum ? msdc_dma_calcs((u8 *)(&bd[j]), 16) : 0); ++ sg++; ++ } ++ ++ dma->used_gpd += 2; ++ dma->used_bd += bdlen; ++ ++ sdr_set_field(MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, chksum); ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ, dma->burstsz); ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 1); ++ ++ sdr_write32(MSDC_DMA_SA, PHYSADDR((u32)dma->gpd_addr)); ++ break; ++ ++ default: ++ break; ++ } ++ ++ N_MSG(DMA, "DMA_CTRL = 0x%x", sdr_read32(MSDC_DMA_CTRL)); ++ N_MSG(DMA, "DMA_CFG = 0x%x", sdr_read32(MSDC_DMA_CFG)); ++ N_MSG(DMA, "DMA_SA = 0x%x", sdr_read32(MSDC_DMA_SA)); ++ ++ return 0; ++} ++ ++static void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma, ++ struct scatterlist *sg, unsigned int sglen) ++{ ++ BUG_ON(sglen > MAX_BD_NUM); /* not support currently */ ++ ++ dma->sg = sg; ++ dma->flags = DMA_FLAG_EN_CHKSUM; ++ //dma->flags = DMA_FLAG_NONE; /* CHECKME */ ++ dma->sglen = sglen; ++ dma->xfersz = host->xfer_size; ++ dma->burstsz = MSDC_BRUST_64B; ++ ++ if (sglen == 1 && sg_dma_len(sg) <= MAX_DMA_CNT) ++ dma->mode = MSDC_MODE_DMA_BASIC; ++ else ++ dma->mode = MSDC_MODE_DMA_DESC; ++ ++ N_MSG(DMA, "DMA mode<%d> sglen<%d> xfersz<%d>", dma->mode, dma->sglen, dma->xfersz); ++ ++ msdc_dma_config(host, dma); ++ ++ /*if (dma->mode == MSDC_MODE_DMA_DESC) { ++ //msdc_dma_dump(host, dma); ++ } */ ++} ++ ++/* set block number before send command */ ++static void msdc_set_blknum(struct msdc_host *host, u32 blknum) ++{ ++ u32 base = host->base; ++ ++ sdr_write32(SDC_BLK_NUM, blknum); ++} ++ ++static int msdc_do_request(struct mmc_host*mmc, struct mmc_request*mrq) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ struct mmc_command *cmd; ++ struct mmc_data *data; ++ u32 base = host->base; ++ //u32 intsts = 0; ++ unsigned int left=0; ++ int dma = 0, read = 1, dir = DMA_FROM_DEVICE, send_type=0; ++ ++ #define SND_DAT 0 ++ #define SND_CMD 1 ++ ++ BUG_ON(mmc == NULL); ++ BUG_ON(mrq == NULL); ++ ++ host->error = 0; ++ atomic_set(&host->abort, 0); ++ ++ cmd = mrq->cmd; ++ data = mrq->cmd->data; ++ ++#if 0 /* --- by chhung */ ++ //if(host->id ==1){ ++ N_MSG(OPS, "enable clock!"); ++ msdc_ungate_clock(host->id); ++ //} ++#endif /* end of --- */ ++ ++ if (!data) { ++ send_type=SND_CMD; ++ if (msdc_do_command(host, cmd, 1, CMD_TIMEOUT) != 0) { ++ goto done; ++ } ++ } else { ++ BUG_ON(data->blksz > HOST_MAX_BLKSZ); ++ send_type=SND_DAT; ++ ++ data->error = 0; ++ read = data->flags & MMC_DATA_READ ? 1 : 0; ++ host->data = data; ++ host->xfer_size = data->blocks * data->blksz; ++ host->blksz = data->blksz; ++ ++ /* deside the transfer mode */ ++ if (drv_mode[host->id] == MODE_PIO) { ++ host->dma_xfer = dma = 0; ++ } else if (drv_mode[host->id] == MODE_DMA) { ++ host->dma_xfer = dma = 1; ++ } else if (drv_mode[host->id] == MODE_SIZE_DEP) { ++ host->dma_xfer = dma = ((host->xfer_size >= dma_size[host->id]) ? 1 : 0); ++ } ++ ++ if (read) { ++ if ((host->timeout_ns != data->timeout_ns) || ++ (host->timeout_clks != data->timeout_clks)) { ++ msdc_set_timeout(host, data->timeout_ns, data->timeout_clks); ++ } ++ } ++ ++ msdc_set_blknum(host, data->blocks); ++ //msdc_clr_fifo(); /* no need */ ++ ++ if (dma) { ++ msdc_dma_on(); /* enable DMA mode first!! */ ++ init_completion(&host->xfer_done); ++ ++ /* start the command first*/ ++ if (msdc_command_start(host, cmd, 1, CMD_TIMEOUT) != 0) ++ goto done; ++ ++ dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; ++ (void)dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, dir); ++ msdc_dma_setup(host, &host->dma, data->sg, data->sg_len); ++ ++ /* then wait command done */ ++ if (msdc_command_resp(host, cmd, 1, CMD_TIMEOUT) != 0) ++ goto done; ++ ++ /* for read, the data coming too fast, then CRC error ++ start DMA no business with CRC. */ ++ //init_completion(&host->xfer_done); ++ msdc_dma_start(host); ++ ++ spin_unlock(&host->lock); ++ if(!wait_for_completion_timeout(&host->xfer_done, DAT_TIMEOUT)){ ++ ERR_MSG("XXX CMD<%d> wait xfer_done<%d> timeout!!", cmd->opcode, data->blocks * data->blksz); ++ ERR_MSG(" DMA_SA = 0x%x", sdr_read32(MSDC_DMA_SA)); ++ ERR_MSG(" DMA_CA = 0x%x", sdr_read32(MSDC_DMA_CA)); ++ ERR_MSG(" DMA_CTRL = 0x%x", sdr_read32(MSDC_DMA_CTRL)); ++ ERR_MSG(" DMA_CFG = 0x%x", sdr_read32(MSDC_DMA_CFG)); ++ data->error = (unsigned int)-ETIMEDOUT; ++ ++ msdc_reset(); ++ msdc_clr_fifo(); ++ msdc_clr_int(); ++ } ++ spin_lock(&host->lock); ++ msdc_dma_stop(host); ++ } else { ++ /* Firstly: send command */ ++ if (msdc_do_command(host, cmd, 1, CMD_TIMEOUT) != 0) { ++ goto done; ++ } ++ ++ /* Secondly: pio data phase */ ++ if (read) { ++ if (msdc_pio_read(host, data)){ ++ goto done; ++ } ++ } else { ++ if (msdc_pio_write(host, data)) { ++ goto done; ++ } ++ } ++ ++ /* For write case: make sure contents in fifo flushed to device */ ++ if (!read) { ++ while (1) { ++ left=msdc_txfifocnt(); ++ if (left == 0) { ++ break; ++ } ++ if (msdc_pio_abort(host, data, jiffies + DAT_TIMEOUT)) { ++ break; ++ /* Fix me: what about if data error, when stop ? how to? */ ++ } ++ } ++ } else { ++ /* Fix me: read case: need to check CRC error */ ++ } ++ ++ /* For write case: SDCBUSY and Xfer_Comp will assert when DAT0 not busy. ++ For read case : SDCBUSY and Xfer_Comp will assert when last byte read out from FIFO. ++ */ ++ ++ /* try not to wait xfer_comp interrupt. ++ the next command will check SDC_BUSY. ++ SDC_BUSY means xfer_comp assert ++ */ ++ ++ } // PIO mode ++ ++ /* Last: stop transfer */ ++ if (data->stop){ ++ if (msdc_do_command(host, data->stop, 0, CMD_TIMEOUT) != 0) { ++ goto done; ++ } ++ } ++ } ++ ++done: ++ if (data != NULL) { ++ host->data = NULL; ++ host->dma_xfer = 0; ++ if (dma != 0) { ++ msdc_dma_off(); ++ host->dma.used_bd = 0; ++ host->dma.used_gpd = 0; ++ dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, dir); ++ } ++ host->blksz = 0; ++ ++#if 0 // don't stop twice! ++ if(host->hw->flags & MSDC_REMOVABLE && data->error) { ++ msdc_abort_data(host); ++ /* reset in IRQ, stop command has issued. -> No need */ ++ } ++#endif ++ ++ N_MSG(OPS, "CMD<%d> data<%s %s> blksz<%d> block<%d> error<%d>",cmd->opcode, (dma? "dma":"pio"), ++ (read ? "read ":"write") ,data->blksz, data->blocks, data->error); ++ } ++ ++#if 0 /* --- by chhung */ ++#if 1 ++ //if(host->id==1) { ++ if(send_type==SND_CMD) { ++ if(cmd->opcode == MMC_SEND_STATUS) { ++ if((cmd->resp[0] & CARD_READY_FOR_DATA) ||(CARD_CURRENT_STATE(cmd->resp[0]) != 7)){ ++ N_MSG(OPS,"disable clock, CMD13 IDLE"); ++ msdc_gate_clock(host->id); ++ } ++ } else { ++ N_MSG(OPS,"disable clock, CMD<%d>", cmd->opcode); ++ msdc_gate_clock(host->id); ++ } ++ } else { ++ if(read) { ++ N_MSG(OPS,"disable clock!!! Read CMD<%d>",cmd->opcode); ++ msdc_gate_clock(host->id); ++ } ++ } ++ //} ++#else ++ msdc_gate_clock(host->id); ++#endif ++#endif /* end of --- */ ++ ++ if (mrq->cmd->error) host->error = 0x001; ++ if (mrq->data && mrq->data->error) host->error |= 0x010; ++ if (mrq->stop && mrq->stop->error) host->error |= 0x100; ++ ++ //if (host->error) ERR_MSG("host->error<%d>", host->error); ++ ++ return host->error; ++} ++ ++static int msdc_app_cmd(struct mmc_host *mmc, struct msdc_host *host) ++{ ++ struct mmc_command cmd; ++ struct mmc_request mrq; ++ u32 err; ++ ++ memset(&cmd, 0, sizeof(struct mmc_command)); ++ cmd.opcode = MMC_APP_CMD; ++#if 0 /* bug: we meet mmc->card is null when ACMD6 */ ++ cmd.arg = mmc->card->rca << 16; ++#else ++ cmd.arg = host->app_cmd_arg; ++#endif ++ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; ++ ++ memset(&mrq, 0, sizeof(struct mmc_request)); ++ mrq.cmd = &cmd; cmd.mrq = &mrq; ++ cmd.data = NULL; ++ ++ err = msdc_do_command(host, &cmd, 0, CMD_TIMEOUT); ++ return err; ++} ++ ++static int msdc_tune_cmdrsp(struct msdc_host*host, struct mmc_command *cmd) ++{ ++ int result = -1; ++ u32 base = host->base; ++ u32 rsmpl, cur_rsmpl, orig_rsmpl; ++ u32 rrdly, cur_rrdly = 0xffffffff, orig_rrdly; ++ u32 skip = 1; ++ ++ /* ==== don't support 3.0 now ==== ++ 1: R_SMPL[1] ++ 2: PAD_CMD_RESP_RXDLY[26:22] ++ ==========================*/ ++ ++ // save the previous tune result ++ sdr_get_field(MSDC_IOCON, MSDC_IOCON_RSPL, orig_rsmpl); ++ sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, orig_rrdly); ++ ++ rrdly = 0; ++ do { ++ for (rsmpl = 0; rsmpl < 2; rsmpl++) { ++ /* Lv1: R_SMPL[1] */ ++ cur_rsmpl = (orig_rsmpl + rsmpl) % 2; ++ if (skip == 1) { ++ skip = 0; ++ continue; ++ } ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL, cur_rsmpl); ++ ++ if (host->app_cmd) { ++ result = msdc_app_cmd(host->mmc, host); ++ if (result) { ++ ERR_MSG("TUNE_CMD app_cmd<%d> failed: RESP_RXDLY<%d>,R_SMPL<%d>", ++ host->mrq->cmd->opcode, cur_rrdly, cur_rsmpl); ++ continue; ++ } ++ } ++ result = msdc_do_command(host, cmd, 0, CMD_TIMEOUT); // not tune. ++ ERR_MSG("TUNE_CMD<%d> %s PAD_CMD_RESP_RXDLY[26:22]<%d> R_SMPL[1]<%d>", cmd->opcode, ++ (result == 0) ? "PASS" : "FAIL", cur_rrdly, cur_rsmpl); ++ ++ if (result == 0) { ++ return 0; ++ } ++ if (result != (unsigned int)(-EIO)) { ++ ERR_MSG("TUNE_CMD<%d> Error<%d> not -EIO", cmd->opcode, result); ++ return result; ++ } ++ ++ /* should be EIO */ ++ if (sdr_read32(SDC_CMD) & 0x1800) { /* check if has data phase */ ++ msdc_abort_data(host); ++ } ++ } ++ ++ /* Lv2: PAD_CMD_RESP_RXDLY[26:22] */ ++ cur_rrdly = (orig_rrdly + rrdly + 1) % 32; ++ sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, cur_rrdly); ++ }while (++rrdly < 32); ++ ++ return result; ++} ++ ++/* Support SD2.0 Only */ ++static int msdc_tune_bread(struct mmc_host *mmc, struct mmc_request *mrq) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ u32 base = host->base; ++ u32 ddr=0; ++ u32 dcrc=0; ++ u32 rxdly, cur_rxdly0, cur_rxdly1; ++ u32 dsmpl, cur_dsmpl, orig_dsmpl; ++ u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3; ++ u32 cur_dat4, cur_dat5, cur_dat6, cur_dat7; ++ u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3; ++ u32 orig_dat4, orig_dat5, orig_dat6, orig_dat7; ++ int result = -1; ++ u32 skip = 1; ++ ++ sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, orig_dsmpl); ++ ++ /* Tune Method 2. */ ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1); ++ ++ rxdly = 0; ++ do { ++ for (dsmpl = 0; dsmpl < 2; dsmpl++) { ++ cur_dsmpl = (orig_dsmpl + dsmpl) % 2; ++ if (skip == 1) { ++ skip = 0; ++ continue; ++ } ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl); ++ ++ if (host->app_cmd) { ++ result = msdc_app_cmd(host->mmc, host); ++ if (result) { ++ ERR_MSG("TUNE_BREAD app_cmd<%d> failed", host->mrq->cmd->opcode); ++ continue; ++ } ++ } ++ result = msdc_do_request(mmc,mrq); ++ ++ sdr_get_field(SDC_DCRC_STS, SDC_DCRC_STS_POS|SDC_DCRC_STS_NEG, dcrc); /* RO */ ++ if (!ddr) dcrc &= ~SDC_DCRC_STS_NEG; ++ ERR_MSG("TUNE_BREAD<%s> dcrc<0x%x> DATRDDLY0/1<0x%x><0x%x> dsmpl<0x%x>", ++ (result == 0 && dcrc == 0) ? "PASS" : "FAIL", dcrc, ++ sdr_read32(MSDC_DAT_RDDLY0), sdr_read32(MSDC_DAT_RDDLY1), cur_dsmpl); ++ ++ /* Fix me: result is 0, but dcrc is still exist */ ++ if (result == 0 && dcrc == 0) { ++ goto done; ++ } else { ++ /* there is a case: command timeout, and data phase not processed */ ++ if (mrq->data->error != 0 && mrq->data->error != (unsigned int)(-EIO)) { ++ ERR_MSG("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>", ++ result, mrq->cmd->error, mrq->data->error); ++ goto done; ++ } ++ } ++ } ++ ++ cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0); ++ cur_rxdly1 = sdr_read32(MSDC_DAT_RDDLY1); ++ ++ /* E1 ECO. YD: Reverse */ ++ if (sdr_read32(MSDC_ECO_VER) >= 4) { ++ orig_dat0 = (cur_rxdly0 >> 24) & 0x1F; ++ orig_dat1 = (cur_rxdly0 >> 16) & 0x1F; ++ orig_dat2 = (cur_rxdly0 >> 8) & 0x1F; ++ orig_dat3 = (cur_rxdly0 >> 0) & 0x1F; ++ orig_dat4 = (cur_rxdly1 >> 24) & 0x1F; ++ orig_dat5 = (cur_rxdly1 >> 16) & 0x1F; ++ orig_dat6 = (cur_rxdly1 >> 8) & 0x1F; ++ orig_dat7 = (cur_rxdly1 >> 0) & 0x1F; ++ } else { ++ orig_dat0 = (cur_rxdly0 >> 0) & 0x1F; ++ orig_dat1 = (cur_rxdly0 >> 8) & 0x1F; ++ orig_dat2 = (cur_rxdly0 >> 16) & 0x1F; ++ orig_dat3 = (cur_rxdly0 >> 24) & 0x1F; ++ orig_dat4 = (cur_rxdly1 >> 0) & 0x1F; ++ orig_dat5 = (cur_rxdly1 >> 8) & 0x1F; ++ orig_dat6 = (cur_rxdly1 >> 16) & 0x1F; ++ orig_dat7 = (cur_rxdly1 >> 24) & 0x1F; ++ } ++ ++ if (ddr) { ++ cur_dat0 = (dcrc & (1 << 0) || dcrc & (1 << 8)) ? ((orig_dat0 + 1) % 32) : orig_dat0; ++ cur_dat1 = (dcrc & (1 << 1) || dcrc & (1 << 9)) ? ((orig_dat1 + 1) % 32) : orig_dat1; ++ cur_dat2 = (dcrc & (1 << 2) || dcrc & (1 << 10)) ? ((orig_dat2 + 1) % 32) : orig_dat2; ++ cur_dat3 = (dcrc & (1 << 3) || dcrc & (1 << 11)) ? ((orig_dat3 + 1) % 32) : orig_dat3; ++ } else { ++ cur_dat0 = (dcrc & (1 << 0)) ? ((orig_dat0 + 1) % 32) : orig_dat0; ++ cur_dat1 = (dcrc & (1 << 1)) ? ((orig_dat1 + 1) % 32) : orig_dat1; ++ cur_dat2 = (dcrc & (1 << 2)) ? ((orig_dat2 + 1) % 32) : orig_dat2; ++ cur_dat3 = (dcrc & (1 << 3)) ? ((orig_dat3 + 1) % 32) : orig_dat3; ++ } ++ cur_dat4 = (dcrc & (1 << 4)) ? ((orig_dat4 + 1) % 32) : orig_dat4; ++ cur_dat5 = (dcrc & (1 << 5)) ? ((orig_dat5 + 1) % 32) : orig_dat5; ++ cur_dat6 = (dcrc & (1 << 6)) ? ((orig_dat6 + 1) % 32) : orig_dat6; ++ cur_dat7 = (dcrc & (1 << 7)) ? ((orig_dat7 + 1) % 32) : orig_dat7; ++ ++ cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0); ++ cur_rxdly1 = (cur_dat4 << 24) | (cur_dat5 << 16) | (cur_dat6 << 8) | (cur_dat7 << 0); ++ ++ sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0); ++ sdr_write32(MSDC_DAT_RDDLY1, cur_rxdly1); ++ ++ } while (++rxdly < 32); ++ ++done: ++ return result; ++} ++ ++static int msdc_tune_bwrite(struct mmc_host *mmc,struct mmc_request *mrq) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ u32 base = host->base; ++ ++ u32 wrrdly, cur_wrrdly = 0xffffffff, orig_wrrdly; ++ u32 dsmpl, cur_dsmpl, orig_dsmpl; ++ u32 rxdly, cur_rxdly0; ++ u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3; ++ u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3; ++ int result = -1; ++ u32 skip = 1; ++ ++ // MSDC_IOCON_DDR50CKD need to check. [Fix me] ++ ++ sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, orig_wrrdly); ++ sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, orig_dsmpl ); ++ ++ /* Tune Method 2. just DAT0 */ ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1); ++ cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0); ++ ++ /* E1 ECO. YD: Reverse */ ++ if (sdr_read32(MSDC_ECO_VER) >= 4) { ++ orig_dat0 = (cur_rxdly0 >> 24) & 0x1F; ++ orig_dat1 = (cur_rxdly0 >> 16) & 0x1F; ++ orig_dat2 = (cur_rxdly0 >> 8) & 0x1F; ++ orig_dat3 = (cur_rxdly0 >> 0) & 0x1F; ++ } else { ++ orig_dat0 = (cur_rxdly0 >> 0) & 0x1F; ++ orig_dat1 = (cur_rxdly0 >> 8) & 0x1F; ++ orig_dat2 = (cur_rxdly0 >> 16) & 0x1F; ++ orig_dat3 = (cur_rxdly0 >> 24) & 0x1F; ++ } ++ ++ rxdly = 0; ++ do { ++ wrrdly = 0; ++ do { ++ for (dsmpl = 0; dsmpl < 2; dsmpl++) { ++ cur_dsmpl = (orig_dsmpl + dsmpl) % 2; ++ if (skip == 1) { ++ skip = 0; ++ continue; ++ } ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl); ++ ++ if (host->app_cmd) { ++ result = msdc_app_cmd(host->mmc, host); ++ if (result) { ++ ERR_MSG("TUNE_BWRITE app_cmd<%d> failed", host->mrq->cmd->opcode); ++ continue; ++ } ++ } ++ result = msdc_do_request(mmc,mrq); ++ ++ ERR_MSG("TUNE_BWRITE<%s> DSPL<%d> DATWRDLY<%d> MSDC_DAT_RDDLY0<0x%x>", ++ result == 0 ? "PASS" : "FAIL", ++ cur_dsmpl, cur_wrrdly, cur_rxdly0); ++ ++ if (result == 0) { ++ goto done; ++ } ++ else { ++ /* there is a case: command timeout, and data phase not processed */ ++ if (mrq->data->error != (unsigned int)(-EIO)) { ++ ERR_MSG("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>", ++ result, mrq->cmd->error, mrq->data->error); ++ goto done; ++ } ++ } ++ } ++ cur_wrrdly = (orig_wrrdly + wrrdly + 1) % 32; ++ sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, cur_wrrdly); ++ } while (++wrrdly < 32); ++ ++ cur_dat0 = (orig_dat0 + rxdly) % 32; /* only adjust bit-1 for crc */ ++ cur_dat1 = orig_dat1; ++ cur_dat2 = orig_dat2; ++ cur_dat3 = orig_dat3; ++ ++ cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0); ++ sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0); ++ } while (++rxdly < 32); ++ ++done: ++ return result; ++} ++ ++static int msdc_get_card_status(struct mmc_host *mmc, struct msdc_host *host, u32 *status) ++{ ++ struct mmc_command cmd; ++ struct mmc_request mrq; ++ u32 err; ++ ++ memset(&cmd, 0, sizeof(struct mmc_command)); ++ cmd.opcode = MMC_SEND_STATUS; ++ if (mmc->card) { ++ cmd.arg = mmc->card->rca << 16; ++ } else { ++ ERR_MSG("cmd13 mmc card is null"); ++ cmd.arg = host->app_cmd_arg; ++ } ++ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; ++ ++ memset(&mrq, 0, sizeof(struct mmc_request)); ++ mrq.cmd = &cmd; cmd.mrq = &mrq; ++ cmd.data = NULL; ++ ++ err = msdc_do_command(host, &cmd, 1, CMD_TIMEOUT); ++ ++ if (status) { ++ *status = cmd.resp[0]; ++ } ++ ++ return err; ++} ++ ++static int msdc_check_busy(struct mmc_host *mmc, struct msdc_host *host) ++{ ++ u32 err = 0; ++ u32 status = 0; ++ ++ do { ++ err = msdc_get_card_status(mmc, host, &status); ++ if (err) return err; ++ /* need cmd12? */ ++ ERR_MSG("cmd<13> resp<0x%x>", status); ++ } while (R1_CURRENT_STATE(status) == 7); ++ ++ return err; ++} ++ ++/* failed when msdc_do_request */ ++static int msdc_tune_request(struct mmc_host *mmc, struct mmc_request *mrq) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ struct mmc_command *cmd; ++ struct mmc_data *data; ++ //u32 base = host->base; ++ int ret=0, read; ++ ++ cmd = mrq->cmd; ++ data = mrq->cmd->data; ++ ++ read = data->flags & MMC_DATA_READ ? 1 : 0; ++ ++ if (read) { ++ if (data->error == (unsigned int)(-EIO)) { ++ ret = msdc_tune_bread(mmc,mrq); ++ } ++ } else { ++ ret = msdc_check_busy(mmc, host); ++ if (ret){ ++ ERR_MSG("XXX cmd13 wait program done failed"); ++ return ret; ++ } ++ /* CRC and TO */ ++ /* Fix me: don't care card status? */ ++ ret = msdc_tune_bwrite(mmc,mrq); ++ } ++ ++ return ret; ++} ++ ++/* ops.request */ ++static void msdc_ops_request(struct mmc_host *mmc,struct mmc_request *mrq) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ ++ //=== for sdio profile === ++#if 0 /* --- by chhung */ ++ u32 old_H32, old_L32, new_H32, new_L32; ++ u32 ticks = 0, opcode = 0, sizes = 0, bRx = 0; ++#endif /* end of --- */ ++ ++ if(host->mrq){ ++ ERR_MSG("XXX host->mrq<0x%.8x>", (int)host->mrq); ++ BUG(); ++ } ++ ++ if (!is_card_present(host) || host->power_mode == MMC_POWER_OFF) { ++ ERR_MSG("cmd<%d> card<%d> power<%d>", mrq->cmd->opcode, is_card_present(host), host->power_mode); ++ mrq->cmd->error = (unsigned int)-ENOMEDIUM; ++ ++#if 1 ++ mrq->done(mrq); // call done directly. ++#else ++ mrq->cmd->retries = 0; // please don't retry. ++ mmc_request_done(mmc, mrq); ++#endif ++ ++ return; ++ } ++ ++ /* start to process */ ++ spin_lock(&host->lock); ++#if 0 /* --- by chhung */ ++ if (sdio_pro_enable) { //=== for sdio profile === ++ if (mrq->cmd->opcode == 52 || mrq->cmd->opcode == 53) { ++ GPT_GetCounter64(&old_L32, &old_H32); ++ } ++ } ++#endif /* end of --- */ ++ ++ host->mrq = mrq; ++ ++ if (msdc_do_request(mmc,mrq)) { ++ if(host->hw->flags & MSDC_REMOVABLE && ralink_soc == MT762X_SOC_MT7621AT && mrq->data && mrq->data->error) { ++ msdc_tune_request(mmc,mrq); ++ } ++ } ++ ++ /* ==== when request done, check if app_cmd ==== */ ++ if (mrq->cmd->opcode == MMC_APP_CMD) { ++ host->app_cmd = 1; ++ host->app_cmd_arg = mrq->cmd->arg; /* save the RCA */ ++ } else { ++ host->app_cmd = 0; ++ //host->app_cmd_arg = 0; ++ } ++ ++ host->mrq = NULL; ++ ++#if 0 /* --- by chhung */ ++ //=== for sdio profile === ++ if (sdio_pro_enable) { ++ if (mrq->cmd->opcode == 52 || mrq->cmd->opcode == 53) { ++ GPT_GetCounter64(&new_L32, &new_H32); ++ ticks = msdc_time_calc(old_L32, old_H32, new_L32, new_H32); ++ ++ opcode = mrq->cmd->opcode; ++ if (mrq->cmd->data) { ++ sizes = mrq->cmd->data->blocks * mrq->cmd->data->blksz; ++ bRx = mrq->cmd->data->flags & MMC_DATA_READ ? 1 : 0 ; ++ } else { ++ bRx = mrq->cmd->arg & 0x80000000 ? 1 : 0; ++ } ++ ++ if (!mrq->cmd->error) { ++ msdc_performance(opcode, sizes, bRx, ticks); ++ } ++ } ++ } ++#endif /* end of --- */ ++ spin_unlock(&host->lock); ++ ++ mmc_request_done(mmc, mrq); ++ ++ return; ++} ++ ++/* called by ops.set_ios */ ++static void msdc_set_buswidth(struct msdc_host *host, u32 width) ++{ ++ u32 base = host->base; ++ u32 val = sdr_read32(SDC_CFG); ++ ++ val &= ~SDC_CFG_BUSWIDTH; ++ ++ switch (width) { ++ default: ++ case MMC_BUS_WIDTH_1: ++ width = 1; ++ val |= (MSDC_BUS_1BITS << 16); ++ break; ++ case MMC_BUS_WIDTH_4: ++ val |= (MSDC_BUS_4BITS << 16); ++ break; ++ case MMC_BUS_WIDTH_8: ++ val |= (MSDC_BUS_8BITS << 16); ++ break; ++ } ++ ++ sdr_write32(SDC_CFG, val); ++ ++ N_MSG(CFG, "Bus Width = %d", width); ++} ++ ++/* ops.set_ios */ ++static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ struct msdc_hw *hw=host->hw; ++ u32 base = host->base; ++ u32 ddr = 0; ++ ++#ifdef MT6575_SD_DEBUG ++ static char *vdd[] = { ++ "1.50v", "1.55v", "1.60v", "1.65v", "1.70v", "1.80v", "1.90v", ++ "2.00v", "2.10v", "2.20v", "2.30v", "2.40v", "2.50v", "2.60v", ++ "2.70v", "2.80v", "2.90v", "3.00v", "3.10v", "3.20v", "3.30v", ++ "3.40v", "3.50v", "3.60v" ++ }; ++ static char *power_mode[] = { ++ "OFF", "UP", "ON" ++ }; ++ static char *bus_mode[] = { ++ "UNKNOWN", "OPENDRAIN", "PUSHPULL" ++ }; ++ static char *timing[] = { ++ "LEGACY", "MMC_HS", "SD_HS" ++ }; ++ ++ printk("SET_IOS: CLK(%dkHz), BUS(%s), BW(%u), PWR(%s), VDD(%s), TIMING(%s)", ++ ios->clock / 1000, bus_mode[ios->bus_mode], ++ (ios->bus_width == MMC_BUS_WIDTH_4) ? 4 : 1, ++ power_mode[ios->power_mode], vdd[ios->vdd], timing[ios->timing]); ++#endif ++ ++ msdc_set_buswidth(host, ios->bus_width); ++ ++ /* Power control ??? */ ++ switch (ios->power_mode) { ++ case MMC_POWER_OFF: ++ case MMC_POWER_UP: ++ // msdc_set_power_mode(host, ios->power_mode); /* --- by chhung */ ++ break; ++ case MMC_POWER_ON: ++ host->power_mode = MMC_POWER_ON; ++ break; ++ default: ++ break; ++ } ++ ++ /* Clock control */ ++ if (host->mclk != ios->clock) { ++ if(ios->clock > 25000000) { ++ //if (!(host->hw->flags & MSDC_REMOVABLE)) { ++ INIT_MSG("SD data latch edge<%d>", hw->data_edge); ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL, hw->cmd_edge); ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, hw->data_edge); ++ //} /* for tuning debug */ ++ } else { /* default value */ ++ sdr_write32(MSDC_IOCON, 0x00000000); ++ // sdr_write32(MSDC_DAT_RDDLY0, 0x00000000); ++ sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward ++ sdr_write32(MSDC_DAT_RDDLY1, 0x00000000); ++ // sdr_write32(MSDC_PAD_TUNE, 0x00000000); ++ sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward ++ } ++ msdc_set_mclk(host, ddr, ios->clock); ++ } ++} ++ ++/* ops.get_ro */ ++static int msdc_ops_get_ro(struct mmc_host *mmc) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ u32 base = host->base; ++ unsigned long flags; ++ int ro = 0; ++ ++ if (host->hw->flags & MSDC_WP_PIN_EN) { /* set for card */ ++ spin_lock_irqsave(&host->lock, flags); ++ ro = (sdr_read32(MSDC_PS) >> 31); ++ spin_unlock_irqrestore(&host->lock, flags); ++ } ++ return ro; ++} ++ ++/* ops.get_cd */ ++static int msdc_ops_get_cd(struct mmc_host *mmc) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ u32 base = host->base; ++ unsigned long flags; ++ int present = 1; ++ ++ /* for sdio, MSDC_REMOVABLE not set, always return 1 */ ++ if (!(host->hw->flags & MSDC_REMOVABLE)) { ++ /* For sdio, read H/W always get<1>, but may timeout some times */ ++#if 1 ++ host->card_inserted = 1; ++ return 1; ++#else ++ host->card_inserted = (host->pm_state.event == PM_EVENT_USER_RESUME) ? 1 : 0; ++ INIT_MSG("sdio ops_get_cd<%d>", host->card_inserted); ++ return host->card_inserted; ++#endif ++ } ++ ++ /* MSDC_CD_PIN_EN set for card */ ++ if (host->hw->flags & MSDC_CD_PIN_EN) { ++ spin_lock_irqsave(&host->lock, flags); ++#if 0 ++ present = host->card_inserted; /* why not read from H/W: Fix me*/ ++#else ++ // CD ++ if (cd_active_low) ++ present = (sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 0 : 1; ++ else ++ present = (sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 1 : 0; ++ if (host->mmc->caps & MMC_CAP_NEEDS_POLL) ++ present = 1; ++ host->card_inserted = present; ++#endif ++ spin_unlock_irqrestore(&host->lock, flags); ++ } else { ++ present = 0; /* TODO? Check DAT3 pins for card detection */ ++ } ++ ++ INIT_MSG("ops_get_cd return<%d>", present); ++ return present; ++} ++ ++/* ops.enable_sdio_irq */ ++static void msdc_ops_enable_sdio_irq(struct mmc_host *mmc, int enable) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ struct msdc_hw *hw = host->hw; ++ u32 base = host->base; ++ u32 tmp; ++ ++ if (hw->flags & MSDC_EXT_SDIO_IRQ) { /* yes for sdio */ ++ if (enable) { ++ hw->enable_sdio_eirq(); /* combo_sdio_enable_eirq */ ++ } else { ++ hw->disable_sdio_eirq(); /* combo_sdio_disable_eirq */ ++ } ++ } else { ++ ERR_MSG("XXX "); /* so never enter here */ ++ tmp = sdr_read32(SDC_CFG); ++ /* FIXME. Need to interrupt gap detection */ ++ if (enable) { ++ tmp |= (SDC_CFG_SDIOIDE | SDC_CFG_SDIOINTWKUP); ++ } else { ++ tmp &= ~(SDC_CFG_SDIOIDE | SDC_CFG_SDIOINTWKUP); ++ } ++ sdr_write32(SDC_CFG, tmp); ++ } ++} ++ ++static struct mmc_host_ops mt_msdc_ops = { ++ .request = msdc_ops_request, ++ .set_ios = msdc_ops_set_ios, ++ .get_ro = msdc_ops_get_ro, ++ .get_cd = msdc_ops_get_cd, ++ .enable_sdio_irq = msdc_ops_enable_sdio_irq, ++}; ++ ++/*--------------------------------------------------------------------------*/ ++/* interrupt handler */ ++/*--------------------------------------------------------------------------*/ ++static irqreturn_t msdc_irq(int irq, void *dev_id) ++{ ++ struct msdc_host *host = (struct msdc_host *)dev_id; ++ struct mmc_data *data = host->data; ++ struct mmc_command *cmd = host->cmd; ++ u32 base = host->base; ++ ++ u32 cmdsts = MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | MSDC_INT_CMDRDY | ++ MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | MSDC_INT_ACMDRDY | ++ MSDC_INT_ACMD19_DONE; ++ u32 datsts = MSDC_INT_DATCRCERR |MSDC_INT_DATTMO; ++ ++ u32 intsts = sdr_read32(MSDC_INT); ++ u32 inten = sdr_read32(MSDC_INTEN); inten &= intsts; ++ ++ sdr_write32(MSDC_INT, intsts); /* clear interrupts */ ++ /* MSG will cause fatal error */ ++ ++ /* card change interrupt */ ++ if (intsts & MSDC_INT_CDSC){ ++ if (mtk_sw_poll) ++ return IRQ_HANDLED; ++ IRQ_MSG("MSDC_INT_CDSC irq<0x%.8x>", intsts); ++#if 0 /* ---/+++ by chhung: fix slot mechanical bounce issue */ ++ tasklet_hi_schedule(&host->card_tasklet); ++#else ++ schedule_delayed_work(&host->card_delaywork, HZ); ++#endif ++ /* tuning when plug card ? */ ++ } ++ ++ /* sdio interrupt */ ++ if (intsts & MSDC_INT_SDIOIRQ){ ++ IRQ_MSG("XXX MSDC_INT_SDIOIRQ"); /* seems not sdio irq */ ++ //mmc_signal_sdio_irq(host->mmc); ++ } ++ ++ /* transfer complete interrupt */ ++ if (data != NULL) { ++ if (inten & MSDC_INT_XFER_COMPL) { ++ data->bytes_xfered = host->dma.xfersz; ++ complete(&host->xfer_done); ++ } ++ ++ if (intsts & datsts) { ++ /* do basic reset, or stop command will sdc_busy */ ++ msdc_reset(); ++ msdc_clr_fifo(); ++ msdc_clr_int(); ++ atomic_set(&host->abort, 1); /* For PIO mode exit */ ++ ++ if (intsts & MSDC_INT_DATTMO){ ++ IRQ_MSG("XXX CMD<%d> MSDC_INT_DATTMO", host->mrq->cmd->opcode); ++ data->error = (unsigned int)-ETIMEDOUT; ++ } ++ else if (intsts & MSDC_INT_DATCRCERR){ ++ IRQ_MSG("XXX CMD<%d> MSDC_INT_DATCRCERR, SDC_DCRC_STS<0x%x>", host->mrq->cmd->opcode, sdr_read32(SDC_DCRC_STS)); ++ data->error = (unsigned int)-EIO; ++ } ++ ++ //if(sdr_read32(MSDC_INTEN) & MSDC_INT_XFER_COMPL) { ++ if (host->dma_xfer) { ++ complete(&host->xfer_done); /* Read CRC come fast, XFER_COMPL not enabled */ ++ } /* PIO mode can't do complete, because not init */ ++ } ++ } ++ ++ /* command interrupts */ ++ if ((cmd != NULL) && (intsts & cmdsts)) { ++ if ((intsts & MSDC_INT_CMDRDY) || (intsts & MSDC_INT_ACMDRDY) || ++ (intsts & MSDC_INT_ACMD19_DONE)) { ++ u32 *rsp = &cmd->resp[0]; ++ ++ switch (host->cmd_rsp) { ++ case RESP_NONE: ++ break; ++ case RESP_R2: ++ *rsp++ = sdr_read32(SDC_RESP3); *rsp++ = sdr_read32(SDC_RESP2); ++ *rsp++ = sdr_read32(SDC_RESP1); *rsp++ = sdr_read32(SDC_RESP0); ++ break; ++ default: /* Response types 1, 3, 4, 5, 6, 7(1b) */ ++ if ((intsts & MSDC_INT_ACMDRDY) || (intsts & MSDC_INT_ACMD19_DONE)) { ++ *rsp = sdr_read32(SDC_ACMD_RESP); ++ } else { ++ *rsp = sdr_read32(SDC_RESP0); ++ } ++ break; ++ } ++ } else if ((intsts & MSDC_INT_RSPCRCERR) || (intsts & MSDC_INT_ACMDCRCERR)) { ++ if(intsts & MSDC_INT_ACMDCRCERR){ ++ IRQ_MSG("XXX CMD<%d> MSDC_INT_ACMDCRCERR",cmd->opcode); ++ } ++ else { ++ IRQ_MSG("XXX CMD<%d> MSDC_INT_RSPCRCERR",cmd->opcode); ++ } ++ cmd->error = (unsigned int)-EIO; ++ } else if ((intsts & MSDC_INT_CMDTMO) || (intsts & MSDC_INT_ACMDTMO)) { ++ if(intsts & MSDC_INT_ACMDTMO){ ++ IRQ_MSG("XXX CMD<%d> MSDC_INT_ACMDTMO",cmd->opcode); ++ } ++ else { ++ IRQ_MSG("XXX CMD<%d> MSDC_INT_CMDTMO",cmd->opcode); ++ } ++ cmd->error = (unsigned int)-ETIMEDOUT; ++ msdc_reset(); ++ msdc_clr_fifo(); ++ msdc_clr_int(); ++ } ++ complete(&host->cmd_done); ++ } ++ ++ /* mmc irq interrupts */ ++ if (intsts & MSDC_INT_MMCIRQ) { ++ printk(KERN_INFO "msdc[%d] MMCIRQ: SDC_CSTS=0x%.8x\r\n", host->id, sdr_read32(SDC_CSTS)); ++ } ++ ++#ifdef MT6575_SD_DEBUG ++ { ++ msdc_int_reg *int_reg = (msdc_int_reg*)&intsts; ++ N_MSG(INT, "IRQ_EVT(0x%x): MMCIRQ(%d) CDSC(%d), ACRDY(%d), ACTMO(%d), ACCRE(%d) AC19DN(%d)", ++ intsts, ++ int_reg->mmcirq, ++ int_reg->cdsc, ++ int_reg->atocmdrdy, ++ int_reg->atocmdtmo, ++ int_reg->atocmdcrc, ++ int_reg->atocmd19done); ++ N_MSG(INT, "IRQ_EVT(0x%x): SDIO(%d) CMDRDY(%d), CMDTMO(%d), RSPCRC(%d), CSTA(%d)", ++ intsts, ++ int_reg->sdioirq, ++ int_reg->cmdrdy, ++ int_reg->cmdtmo, ++ int_reg->rspcrc, ++ int_reg->csta); ++ N_MSG(INT, "IRQ_EVT(0x%x): XFCMP(%d) DXDONE(%d), DATTMO(%d), DATCRC(%d), DMAEMP(%d)", ++ intsts, ++ int_reg->xfercomp, ++ int_reg->dxferdone, ++ int_reg->dattmo, ++ int_reg->datcrc, ++ int_reg->dmaqempty); ++ ++ } ++#endif ++ ++ return IRQ_HANDLED; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* platform_driver members */ ++/*--------------------------------------------------------------------------*/ ++/* called by msdc_drv_probe/remove */ ++static void msdc_enable_cd_irq(struct msdc_host *host, int enable) ++{ ++ struct msdc_hw *hw = host->hw; ++ u32 base = host->base; ++ ++ /* for sdio, not set */ ++ if ((hw->flags & MSDC_CD_PIN_EN) == 0) { ++ /* Pull down card detection pin since it is not avaiable */ ++ /* ++ if (hw->config_gpio_pin) ++ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN); ++ */ ++ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN); ++ sdr_clr_bits(MSDC_INTEN, MSDC_INTEN_CDSC); ++ sdr_clr_bits(SDC_CFG, SDC_CFG_INSWKUP); ++ return; ++ } ++ ++ N_MSG(CFG, "CD IRQ Eanable(%d)", enable); ++ ++ if (enable) { ++ if (hw->enable_cd_eirq) { /* not set, never enter */ ++ hw->enable_cd_eirq(); ++ } else { ++ /* card detection circuit relies on the core power so that the core power ++ * shouldn't be turned off. Here adds a reference count to keep ++ * the core power alive. ++ */ ++ //msdc_vcore_on(host); //did in msdc_init_hw() ++ ++ if (hw->config_gpio_pin) /* NULL */ ++ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_UP); ++ ++ sdr_set_field(MSDC_PS, MSDC_PS_CDDEBOUNCE, DEFAULT_DEBOUNCE); ++ sdr_set_bits(MSDC_PS, MSDC_PS_CDEN); ++ sdr_set_bits(MSDC_INTEN, MSDC_INTEN_CDSC); ++ sdr_set_bits(SDC_CFG, SDC_CFG_INSWKUP); /* not in document! Fix me */ ++ } ++ } else { ++ if (hw->disable_cd_eirq) { ++ hw->disable_cd_eirq(); ++ } else { ++ if (hw->config_gpio_pin) /* NULL */ ++ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN); ++ ++ sdr_clr_bits(SDC_CFG, SDC_CFG_INSWKUP); ++ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN); ++ sdr_clr_bits(MSDC_INTEN, MSDC_INTEN_CDSC); ++ ++ /* Here decreases a reference count to core power since card ++ * detection circuit is shutdown. ++ */ ++ //msdc_vcore_off(host); ++ } ++ } ++} ++ ++/* called by msdc_drv_probe */ ++static void msdc_init_hw(struct msdc_host *host) ++{ ++ u32 base = host->base; ++ struct msdc_hw *hw = host->hw; ++ ++#ifdef MT6575_SD_DEBUG ++ msdc_reg[host->id] = (struct msdc_regs *)host->base; ++#endif ++ ++ /* Power on */ ++#if 0 /* --- by chhung */ ++ msdc_vcore_on(host); ++ msdc_pin_reset(host, MSDC_PIN_PULL_UP); ++ msdc_select_clksrc(host, hw->clk_src); ++ enable_clock(PERI_MSDC0_PDN + host->id, "SD"); ++ msdc_vdd_on(host); ++#endif /* end of --- */ ++ /* Configure to MMC/SD mode */ ++ sdr_set_field(MSDC_CFG, MSDC_CFG_MODE, MSDC_SDMMC); ++ ++ /* Reset */ ++ msdc_reset(); ++ msdc_clr_fifo(); ++ ++ /* Disable card detection */ ++ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN); ++ ++ /* Disable and clear all interrupts */ ++ sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN)); ++ sdr_write32(MSDC_INT, sdr_read32(MSDC_INT)); ++ ++#if 1 ++ /* reset tuning parameter */ ++ sdr_write32(MSDC_PAD_CTL0, 0x00090000); ++ sdr_write32(MSDC_PAD_CTL1, 0x000A0000); ++ sdr_write32(MSDC_PAD_CTL2, 0x000A0000); ++ // sdr_write32(MSDC_PAD_TUNE, 0x00000000); ++ sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward ++ // sdr_write32(MSDC_DAT_RDDLY0, 0x00000000); ++ sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward ++ sdr_write32(MSDC_DAT_RDDLY1, 0x00000000); ++ sdr_write32(MSDC_IOCON, 0x00000000); ++#if 0 // use MT7620 default value: 0x403c004f ++ sdr_write32(MSDC_PATCH_BIT0, 0x003C000F); /* bit0 modified: Rx Data Clock Source: 1 -> 2.0*/ ++#endif ++ ++ if (sdr_read32(MSDC_ECO_VER) >= 4) { ++ if (host->id == 1) { ++ sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_WRDAT_CRCS, 1); ++ sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMD_RSP, 1); ++ ++ /* internal clock: latch read data */ ++ sdr_set_bits(MSDC_PATCH_BIT0, MSDC_PATCH_BIT_CKGEN_CK); ++ } ++ } ++#endif ++ ++ /* for safety, should clear SDC_CFG.SDIO_INT_DET_EN & set SDC_CFG.SDIO in ++ pre-loader,uboot,kernel drivers. and SDC_CFG.SDIO_INT_DET_EN will be only ++ set when kernel driver wants to use SDIO bus interrupt */ ++ /* Configure to enable SDIO mode. it's must otherwise sdio cmd5 failed */ ++ sdr_set_bits(SDC_CFG, SDC_CFG_SDIO); ++ ++ /* disable detect SDIO device interupt function */ ++ sdr_clr_bits(SDC_CFG, SDC_CFG_SDIOIDE); ++ ++ /* eneable SMT for glitch filter */ ++ sdr_set_bits(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKSMT); ++ sdr_set_bits(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDSMT); ++ sdr_set_bits(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATSMT); ++ ++#if 1 ++ /* set clk, cmd, dat pad driving */ ++ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, hw->clk_drv); ++ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, hw->clk_drv); ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, hw->cmd_drv); ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, hw->cmd_drv); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, hw->dat_drv); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, hw->dat_drv); ++#else ++ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 0); ++ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 0); ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 0); ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 0); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 0); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 0); ++#endif ++ ++ /* set sampling edge */ ++ ++ /* write crc timeout detection */ ++ sdr_set_field(MSDC_PATCH_BIT0, 1 << 30, 1); ++ ++ /* Configure to default data timeout */ ++ sdr_set_field(SDC_CFG, SDC_CFG_DTOC, DEFAULT_DTOC); ++ ++ msdc_set_buswidth(host, MMC_BUS_WIDTH_1); ++ ++ N_MSG(FUC, "init hardware done!"); ++} ++ ++/* called by msdc_drv_remove */ ++static void msdc_deinit_hw(struct msdc_host *host) ++{ ++ u32 base = host->base; ++ ++ /* Disable and clear all interrupts */ ++ sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN)); ++ sdr_write32(MSDC_INT, sdr_read32(MSDC_INT)); ++ ++ /* Disable card detection */ ++ msdc_enable_cd_irq(host, 0); ++ // msdc_set_power_mode(host, MMC_POWER_OFF); /* make sure power down */ /* --- by chhung */ ++} ++ ++/* init gpd and bd list in msdc_drv_probe */ ++static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma) ++{ ++ gpd_t *gpd = dma->gpd; ++ bd_t *bd = dma->bd; ++ bd_t *ptr, *prev; ++ ++ /* we just support one gpd */ ++ int bdlen = MAX_BD_PER_GPD; ++ ++ /* init the 2 gpd */ ++ memset(gpd, 0, sizeof(gpd_t) * 2); ++ //gpd->next = (void *)virt_to_phys(gpd + 1); /* pointer to a null gpd, bug! kmalloc <-> virt_to_phys */ ++ //gpd->next = (dma->gpd_addr + 1); /* bug */ ++ gpd->next = (void *)((u32)dma->gpd_addr + sizeof(gpd_t)); ++ ++ //gpd->intr = 0; ++ gpd->bdp = 1; /* hwo, cs, bd pointer */ ++ //gpd->ptr = (void*)virt_to_phys(bd); ++ gpd->ptr = (void *)dma->bd_addr; /* physical address */ ++ ++ memset(bd, 0, sizeof(bd_t) * bdlen); ++ ptr = bd + bdlen - 1; ++ //ptr->eol = 1; /* 0 or 1 [Fix me]*/ ++ //ptr->next = 0; ++ ++ while (ptr != bd) { ++ prev = ptr - 1; ++ prev->next = (void *)(dma->bd_addr + sizeof(bd_t) *(ptr - bd)); ++ ptr = prev; ++ } ++} ++ ++static int msdc_drv_probe(struct platform_device *pdev) ++{ ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ __iomem void *base; ++ struct mmc_host *mmc; ++ struct resource *mem; ++ struct msdc_host *host; ++ struct msdc_hw *hw; ++ int ret, irq; ++ ++ pdev->dev.platform_data = &msdc0_hw; ++ ++ if (of_property_read_bool(pdev->dev.of_node, "mtk,wp-en")) ++ msdc0_hw.flags |= MSDC_WP_PIN_EN; ++ ++ /* Allocate MMC host for this device */ ++ mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev); ++ if (!mmc) return -ENOMEM; ++ ++ hw = (struct msdc_hw*)pdev->dev.platform_data; ++ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ irq = platform_get_irq(pdev, 0); ++ ++ //BUG_ON((!hw) || (!mem) || (irq < 0)); /* --- by chhung */ ++ ++ base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); ++ ++ /* Set host parameters to mmc */ ++ mmc->ops = &mt_msdc_ops; ++ mmc->f_min = HOST_MIN_MCLK; ++ mmc->f_max = HOST_MAX_MCLK; ++ mmc->ocr_avail = MSDC_OCR_AVAIL; ++ ++ /* For sd card: MSDC_SYS_SUSPEND | MSDC_WP_PIN_EN | MSDC_CD_PIN_EN | MSDC_REMOVABLE | MSDC_HIGHSPEED, ++ For sdio : MSDC_EXT_SDIO_IRQ | MSDC_HIGHSPEED */ ++ if (hw->flags & MSDC_HIGHSPEED) { ++ mmc->caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; ++ } ++ if (hw->data_pins == 4) { /* current data_pins are all 4*/ ++ mmc->caps |= MMC_CAP_4_BIT_DATA; ++ } else if (hw->data_pins == 8) { ++ mmc->caps |= MMC_CAP_8_BIT_DATA; ++ } ++ if ((hw->flags & MSDC_SDIO_IRQ) || (hw->flags & MSDC_EXT_SDIO_IRQ)) ++ mmc->caps |= MMC_CAP_SDIO_IRQ; /* yes for sdio */ ++ ++ cd_active_low = !of_property_read_bool(pdev->dev.of_node, "mediatek,cd-high"); ++ mtk_sw_poll = of_property_read_bool(pdev->dev.of_node, "mediatek,cd-poll"); ++ ++ if (mtk_sw_poll) ++ mmc->caps |= MMC_CAP_NEEDS_POLL; ++ ++ /* MMC core transfer sizes tunable parameters */ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0) ++ mmc->max_segs = MAX_HW_SGMTS; ++#else ++ mmc->max_hw_segs = MAX_HW_SGMTS; ++ mmc->max_phys_segs = MAX_PHY_SGMTS; ++#endif ++ mmc->max_seg_size = MAX_SGMT_SZ; ++ mmc->max_blk_size = HOST_MAX_BLKSZ; ++ mmc->max_req_size = MAX_REQ_SZ; ++ mmc->max_blk_count = mmc->max_req_size; ++ ++ host = mmc_priv(mmc); ++ host->hw = hw; ++ host->mmc = mmc; ++ host->id = pdev->id; ++ host->error = 0; ++ host->irq = irq; ++ host->base = (unsigned long) base; ++ host->mclk = 0; /* mclk: the request clock of mmc sub-system */ ++ host->hclk = hclks[hw->clk_src]; /* hclk: clock of clock source to msdc controller */ ++ host->sclk = 0; /* sclk: the really clock after divition */ ++ host->pm_state = PMSG_RESUME; ++ host->suspend = 0; ++ host->core_clkon = 0; ++ host->card_clkon = 0; ++ host->core_power = 0; ++ host->power_mode = MMC_POWER_OFF; ++// host->card_inserted = hw->flags & MSDC_REMOVABLE ? 0 : 1; ++ host->timeout_ns = 0; ++ host->timeout_clks = DEFAULT_DTOC * 65536; ++ ++ host->mrq = NULL; ++ //init_MUTEX(&host->sem); /* we don't need to support multiple threads access */ ++ ++ host->dma.used_gpd = 0; ++ host->dma.used_bd = 0; ++ ++ /* using dma_alloc_coherent*/ /* todo: using 1, for all 4 slots */ ++ host->dma.gpd = dma_alloc_coherent(NULL, MAX_GPD_NUM * sizeof(gpd_t), &host->dma.gpd_addr, GFP_KERNEL); ++ host->dma.bd = dma_alloc_coherent(NULL, MAX_BD_NUM * sizeof(bd_t), &host->dma.bd_addr, GFP_KERNEL); ++ BUG_ON((!host->dma.gpd) || (!host->dma.bd)); ++ msdc_init_gpd_bd(host, &host->dma); ++ /*for emmc*/ ++ msdc_6575_host[pdev->id] = host; ++ ++#if 0 ++ tasklet_init(&host->card_tasklet, msdc_tasklet_card, (ulong)host); ++#else ++ INIT_DELAYED_WORK(&host->card_delaywork, msdc_tasklet_card); ++#endif ++ spin_lock_init(&host->lock); ++ msdc_init_hw(host); ++ ++ if (ralink_soc == MT762X_SOC_MT7621AT) ++ ret = request_irq((unsigned int)irq, msdc_irq, 0, dev_name(&pdev->dev), host); ++ else ++ ret = request_irq((unsigned int)irq, msdc_irq, IRQF_TRIGGER_LOW, dev_name(&pdev->dev), host); ++ ++ if (ret) goto release; ++ // mt65xx_irq_unmask(irq); /* --- by chhung */ ++ ++ if (hw->flags & MSDC_CD_PIN_EN) { /* not set for sdio */ ++ if (hw->request_cd_eirq) { /* not set for MT6575 */ ++ hw->request_cd_eirq(msdc_eirq_cd, (void*)host); /* msdc_eirq_cd will not be used! */ ++ } ++ } ++ ++ if (hw->request_sdio_eirq) /* set to combo_sdio_request_eirq() for WIFI */ ++ hw->request_sdio_eirq(msdc_eirq_sdio, (void*)host); /* msdc_eirq_sdio() will be called when EIRQ */ ++ ++ if (hw->register_pm) {/* yes for sdio */ ++#ifdef CONFIG_PM ++ hw->register_pm(msdc_pm, (void*)host); /* combo_sdio_register_pm() */ ++#endif ++ if(hw->flags & MSDC_SYS_SUSPEND) { /* will not set for WIFI */ ++ ERR_MSG("MSDC_SYS_SUSPEND and register_pm both set"); ++ } ++ //mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; /* pm not controlled by system but by client. */ /* --- by chhung */ ++ } ++ ++ platform_set_drvdata(pdev, mmc); ++ ++ ret = mmc_add_host(mmc); ++ if (ret) goto free_irq; ++ ++ /* Config card detection pin and enable interrupts */ ++ if (hw->flags & MSDC_CD_PIN_EN) { /* set for card */ ++ msdc_enable_cd_irq(host, 1); ++ } else { ++ msdc_enable_cd_irq(host, 0); ++ } ++ ++ return 0; ++ ++free_irq: ++ free_irq(irq, host); ++release: ++ platform_set_drvdata(pdev, NULL); ++ msdc_deinit_hw(host); ++ ++#if 0 ++ tasklet_kill(&host->card_tasklet); ++#else ++ cancel_delayed_work_sync(&host->card_delaywork); ++#endif ++ ++ if (mem) ++ release_mem_region(mem->start, mem->end - mem->start + 1); ++ ++ mmc_free_host(mmc); ++ ++ return ret; ++} ++ ++/* 4 device share one driver, using "drvdata" to show difference */ ++static int msdc_drv_remove(struct platform_device *pdev) ++{ ++ struct mmc_host *mmc; ++ struct msdc_host *host; ++ struct resource *mem; ++ ++ mmc = platform_get_drvdata(pdev); ++ BUG_ON(!mmc); ++ ++ host = mmc_priv(mmc); ++ BUG_ON(!host); ++ ++ ERR_MSG("removed !!!"); ++ ++ platform_set_drvdata(pdev, NULL); ++ mmc_remove_host(host->mmc); ++ msdc_deinit_hw(host); ++ ++#if 0 ++ tasklet_kill(&host->card_tasklet); ++#else ++ cancel_delayed_work_sync(&host->card_delaywork); ++#endif ++ free_irq(host->irq, host); ++ ++ dma_free_coherent(NULL, MAX_GPD_NUM * sizeof(gpd_t), host->dma.gpd, host->dma.gpd_addr); ++ dma_free_coherent(NULL, MAX_BD_NUM * sizeof(bd_t), host->dma.bd, host->dma.bd_addr); ++ ++ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ ++ if (mem) ++ release_mem_region(mem->start, mem->end - mem->start + 1); ++ ++ mmc_free_host(host->mmc); ++ ++ return 0; ++} ++ ++/* Fix me: Power Flow */ ++#ifdef CONFIG_PM ++static int msdc_drv_suspend(struct platform_device *pdev, pm_message_t state) ++{ ++ int ret = 0; ++ struct mmc_host *mmc = platform_get_drvdata(pdev); ++ struct msdc_host *host = mmc_priv(mmc); ++ ++ if (mmc && state.event == PM_EVENT_SUSPEND && (host->hw->flags & MSDC_SYS_SUSPEND)) { /* will set for card */ ++ msdc_pm(state, (void*)host); ++ } ++ ++ return ret; ++} ++ ++static int msdc_drv_resume(struct platform_device *pdev) ++{ ++ int ret = 0; ++ struct mmc_host *mmc = platform_get_drvdata(pdev); ++ struct msdc_host *host = mmc_priv(mmc); ++ struct pm_message state; ++ ++ state.event = PM_EVENT_RESUME; ++ if (mmc && (host->hw->flags & MSDC_SYS_SUSPEND)) {/* will set for card */ ++ msdc_pm(state, (void*)host); ++ } ++ ++ /* This mean WIFI not controller by PM */ ++ ++ return ret; ++} ++#endif ++ ++static const struct of_device_id mt7620_sdhci_match[] = { ++ { .compatible = "ralink,mt7620-sdhci" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, rt288x_wdt_match); ++ ++static struct platform_driver mt_msdc_driver = { ++ .probe = msdc_drv_probe, ++ .remove = msdc_drv_remove, ++#ifdef CONFIG_PM ++ .suspend = msdc_drv_suspend, ++ .resume = msdc_drv_resume, ++#endif ++ .driver = { ++ .name = DRV_NAME, ++ .owner = THIS_MODULE, ++ .of_match_table = mt7620_sdhci_match, ++ }, ++}; ++ ++/*--------------------------------------------------------------------------*/ ++/* module init/exit */ ++/*--------------------------------------------------------------------------*/ ++static int __init mt_msdc_init(void) ++{ ++ int ret; ++/* +++ by chhung */ ++ u32 reg; ++ ++#if defined (CONFIG_MTD_ANY_RALINK) ++ extern int ra_check_flash_type(void); ++ if(ra_check_flash_type() == 2) { /* NAND */ ++ printk("%s: !!!!! SDXC Module Initialize Fail !!!!!", __func__); ++ return 0; ++ } ++#endif ++ printk("MTK MSDC device init.\n"); ++ mtk_sd_device.dev.platform_data = &msdc0_hw; ++if (ralink_soc == MT762X_SOC_MT7620A || ralink_soc == MT762X_SOC_MT7621AT) { ++//#if defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) ++ reg = sdr_read32((volatile u32*)(RALINK_SYSCTL_BASE + 0x60)) & ~(0x3<<18); ++//#if defined (CONFIG_RALINK_MT7620) ++ if (ralink_soc == MT762X_SOC_MT7620A) ++ reg |= 0x1<<18; ++//#endif ++} else { ++//#elif defined (CONFIG_RALINK_MT7628) ++ /* TODO: maybe omitted when RAether already toggle AGPIO_CFG */ ++ reg = sdr_read32((volatile u32*)(RALINK_SYSCTL_BASE + 0x3c)); ++ reg |= 0x1e << 16; ++ sdr_write32((volatile u32*)(RALINK_SYSCTL_BASE + 0x3c), reg); ++ ++ reg = sdr_read32((volatile u32*)(RALINK_SYSCTL_BASE + 0x60)) & ~(0x3<<10); ++#if defined (CONFIG_MTK_MMC_EMMC_8BIT) ++ reg |= 0x3<<26 | 0x3<<28 | 0x3<<30; ++ msdc0_hw.data_pins = 8, ++#endif ++//#endif ++} ++ sdr_write32((volatile u32*)(RALINK_SYSCTL_BASE + 0x60), reg); ++ //platform_device_register(&mtk_sd_device); ++/* end of +++ */ ++ ++ ret = platform_driver_register(&mt_msdc_driver); ++ if (ret) { ++ printk(KERN_ERR DRV_NAME ": Can't register driver"); ++ return ret; ++ } ++ printk(KERN_INFO DRV_NAME ": MediaTek MT6575 MSDC Driver\n"); ++ ++#if defined (MT6575_SD_DEBUG) ++ msdc_debug_proc_init(); ++#endif ++ return 0; ++} ++ ++static void __exit mt_msdc_exit(void) ++{ ++// platform_device_unregister(&mtk_sd_device); ++ platform_driver_unregister(&mt_msdc_driver); ++} ++ ++module_init(mt_msdc_init); ++module_exit(mt_msdc_exit); ++MODULE_LICENSE("GPL"); diff --git a/target/linux/ramips/patches-4.14/0047-DMA-ralink-add-rt2880-dma-engine.patch b/target/linux/ramips/patches-4.14/0047-DMA-ralink-add-rt2880-dma-engine.patch new file mode 100644 index 0000000000..b74a48a220 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0047-DMA-ralink-add-rt2880-dma-engine.patch @@ -0,0 +1,1757 @@ +From f1c4d9e622c800e1f38b3818f933ec7597d1ccfb Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 27 Jul 2014 09:29:51 +0100 +Subject: [PATCH 47/53] DMA: ralink: add rt2880 dma engine + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + drivers/dma/Kconfig | 6 + + drivers/dma/Makefile | 1 + + drivers/dma/ralink-gdma.c | 577 +++++++++++++++++++++++++++++++++++++++++++++ + include/linux/dmaengine.h | 1 + + 4 files changed, 585 insertions(+) + create mode 100644 drivers/dma/ralink-gdma.c + +--- a/drivers/dma/Kconfig ++++ b/drivers/dma/Kconfig +@@ -40,6 +40,18 @@ config ASYNC_TX_ENABLE_CHANNEL_SWITCH + config ARCH_HAS_ASYNC_TX_FIND_CHANNEL + bool + ++config DMA_RALINK ++ tristate "RALINK DMA support" ++ depends on RALINK && !SOC_RT288X ++ select DMA_ENGINE ++ select DMA_VIRTUAL_CHANNELS ++ ++config MTK_HSDMA ++ tristate "MTK HSDMA support" ++ depends on RALINK && SOC_MT7621 ++ select DMA_ENGINE ++ select DMA_VIRTUAL_CHANNELS ++ + config DMA_ENGINE + bool + +--- a/drivers/dma/Makefile ++++ b/drivers/dma/Makefile +@@ -71,6 +71,8 @@ obj-$(CONFIG_TI_EDMA) += edma.o + obj-$(CONFIG_XGENE_DMA) += xgene-dma.o + obj-$(CONFIG_ZX_DMA) += zx_dma.o + obj-$(CONFIG_ST_FDMA) += st_fdma.o ++obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o ++obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o + + obj-y += qcom/ + obj-y += xilinx/ +--- /dev/null ++++ b/drivers/dma/ralink-gdma.c +@@ -0,0 +1,928 @@ ++/* ++ * Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de> ++ * GDMA4740 DMAC support ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ */ ++ ++#include <linux/dmaengine.h> ++#include <linux/dma-mapping.h> ++#include <linux/err.h> ++#include <linux/init.h> ++#include <linux/list.h> ++#include <linux/module.h> ++#include <linux/platform_device.h> ++#include <linux/slab.h> ++#include <linux/spinlock.h> ++#include <linux/irq.h> ++#include <linux/of_dma.h> ++#include <linux/reset.h> ++#include <linux/of_device.h> ++ ++#include "virt-dma.h" ++ ++#define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10) ++#define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10) ++ ++#define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10) ++#define GDMA_REG_CTRL0_TX_MASK 0xffff ++#define GDMA_REG_CTRL0_TX_SHIFT 16 ++#define GDMA_REG_CTRL0_CURR_MASK 0xff ++#define GDMA_REG_CTRL0_CURR_SHIFT 8 ++#define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7) ++#define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6) ++#define GDMA_REG_CTRL0_BURST_MASK 0x7 ++#define GDMA_REG_CTRL0_BURST_SHIFT 3 ++#define GDMA_REG_CTRL0_DONE_INT BIT(2) ++#define GDMA_REG_CTRL0_ENABLE BIT(1) ++#define GDMA_REG_CTRL0_SW_MODE BIT(0) ++ ++#define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10) ++#define GDMA_REG_CTRL1_SEG_MASK 0xf ++#define GDMA_REG_CTRL1_SEG_SHIFT 22 ++#define GDMA_REG_CTRL1_REQ_MASK 0x3f ++#define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16 ++#define GDMA_REG_CTRL1_DST_REQ_SHIFT 8 ++#define GDMA_REG_CTRL1_CONTINOUS BIT(14) ++#define GDMA_REG_CTRL1_NEXT_MASK 0x1f ++#define GDMA_REG_CTRL1_NEXT_SHIFT 3 ++#define GDMA_REG_CTRL1_COHERENT BIT(2) ++#define GDMA_REG_CTRL1_FAIL BIT(1) ++#define GDMA_REG_CTRL1_MASK BIT(0) ++ ++#define GDMA_REG_UNMASK_INT 0x200 ++#define GDMA_REG_DONE_INT 0x204 ++ ++#define GDMA_REG_GCT 0x220 ++#define GDMA_REG_GCT_CHAN_MASK 0x3 ++#define GDMA_REG_GCT_CHAN_SHIFT 3 ++#define GDMA_REG_GCT_VER_MASK 0x3 ++#define GDMA_REG_GCT_VER_SHIFT 1 ++#define GDMA_REG_GCT_ARBIT_RR BIT(0) ++ ++#define GDMA_REG_REQSTS 0x2a0 ++#define GDMA_REG_ACKSTS 0x2a4 ++#define GDMA_REG_FINSTS 0x2a8 ++ ++/* for RT305X gdma registers */ ++#define GDMA_RT305X_CTRL0_REQ_MASK 0xf ++#define GDMA_RT305X_CTRL0_SRC_REQ_SHIFT 12 ++#define GDMA_RT305X_CTRL0_DST_REQ_SHIFT 8 ++ ++#define GDMA_RT305X_CTRL1_FAIL BIT(4) ++#define GDMA_RT305X_CTRL1_NEXT_MASK 0x7 ++#define GDMA_RT305X_CTRL1_NEXT_SHIFT 1 ++ ++#define GDMA_RT305X_STATUS_INT 0x80 ++#define GDMA_RT305X_STATUS_SIGNAL 0x84 ++#define GDMA_RT305X_GCT 0x88 ++ ++/* for MT7621 gdma registers */ ++#define GDMA_REG_PERF_START(x) (0x230 + (x) * 0x8) ++#define GDMA_REG_PERF_END(x) (0x234 + (x) * 0x8) ++ ++enum gdma_dma_transfer_size { ++ GDMA_TRANSFER_SIZE_4BYTE = 0, ++ GDMA_TRANSFER_SIZE_8BYTE = 1, ++ GDMA_TRANSFER_SIZE_16BYTE = 2, ++ GDMA_TRANSFER_SIZE_32BYTE = 3, ++ GDMA_TRANSFER_SIZE_64BYTE = 4, ++}; ++ ++struct gdma_dma_sg { ++ dma_addr_t src_addr; ++ dma_addr_t dst_addr; ++ u32 len; ++}; ++ ++struct gdma_dma_desc { ++ struct virt_dma_desc vdesc; ++ ++ enum dma_transfer_direction direction; ++ bool cyclic; ++ ++ u32 residue; ++ unsigned int num_sgs; ++ struct gdma_dma_sg sg[]; ++}; ++ ++struct gdma_dmaengine_chan { ++ struct virt_dma_chan vchan; ++ unsigned int id; ++ unsigned int slave_id; ++ ++ dma_addr_t fifo_addr; ++ enum gdma_dma_transfer_size burst_size; ++ ++ struct gdma_dma_desc *desc; ++ unsigned int next_sg; ++}; ++ ++struct gdma_dma_dev { ++ struct dma_device ddev; ++ struct device_dma_parameters dma_parms; ++ struct gdma_data *data; ++ void __iomem *base; ++ struct tasklet_struct task; ++ volatile unsigned long chan_issued; ++ atomic_t cnt; ++ ++ struct gdma_dmaengine_chan chan[]; ++}; ++ ++struct gdma_data ++{ ++ int chancnt; ++ u32 done_int_reg; ++ void (*init)(struct gdma_dma_dev *dma_dev); ++ int (*start_transfer)(struct gdma_dmaengine_chan *chan); ++}; ++ ++static struct gdma_dma_dev *gdma_dma_chan_get_dev( ++ struct gdma_dmaengine_chan *chan) ++{ ++ return container_of(chan->vchan.chan.device, struct gdma_dma_dev, ++ ddev); ++} ++ ++static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c) ++{ ++ return container_of(c, struct gdma_dmaengine_chan, vchan.chan); ++} ++ ++static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc) ++{ ++ return container_of(vdesc, struct gdma_dma_desc, vdesc); ++} ++ ++static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev, ++ unsigned int reg) ++{ ++ return readl(dma_dev->base + reg); ++} ++ ++static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev, ++ unsigned reg, uint32_t val) ++{ ++ writel(val, dma_dev->base + reg); ++} ++ ++static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs) ++{ ++ return kzalloc(sizeof(struct gdma_dma_desc) + ++ sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC); ++} ++ ++static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst) ++{ ++ if (maxburst < 2) ++ return GDMA_TRANSFER_SIZE_4BYTE; ++ else if (maxburst < 4) ++ return GDMA_TRANSFER_SIZE_8BYTE; ++ else if (maxburst < 8) ++ return GDMA_TRANSFER_SIZE_16BYTE; ++ else if (maxburst < 16) ++ return GDMA_TRANSFER_SIZE_32BYTE; ++ else ++ return GDMA_TRANSFER_SIZE_64BYTE; ++} ++ ++static int gdma_dma_config(struct dma_chan *c, ++ struct dma_slave_config *config) ++{ ++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); ++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); ++ ++ if (config->device_fc) { ++ dev_err(dma_dev->ddev.dev, "not support flow controller\n"); ++ return -EINVAL; ++ } ++ ++ switch (config->direction) { ++ case DMA_MEM_TO_DEV: ++ if (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) { ++ dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n"); ++ return -EINVAL; ++ } ++ chan->slave_id = config->slave_id; ++ chan->fifo_addr = config->dst_addr; ++ chan->burst_size = gdma_dma_maxburst(config->dst_maxburst); ++ break; ++ case DMA_DEV_TO_MEM: ++ if (config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) { ++ dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n"); ++ return -EINVAL; ++ } ++ chan->slave_id = config->slave_id; ++ chan->fifo_addr = config->src_addr; ++ chan->burst_size = gdma_dma_maxburst(config->src_maxburst); ++ break; ++ default: ++ dev_err(dma_dev->ddev.dev, "direction type %d error\n", ++ config->direction); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int gdma_dma_terminate_all(struct dma_chan *c) ++{ ++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); ++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); ++ unsigned long flags, timeout; ++ LIST_HEAD(head); ++ int i = 0; ++ ++ spin_lock_irqsave(&chan->vchan.lock, flags); ++ chan->desc = NULL; ++ clear_bit(chan->id, &dma_dev->chan_issued); ++ vchan_get_all_descriptors(&chan->vchan, &head); ++ spin_unlock_irqrestore(&chan->vchan.lock, flags); ++ ++ vchan_dma_desc_free_list(&chan->vchan, &head); ++ ++ /* wait dma transfer complete */ ++ timeout = jiffies + msecs_to_jiffies(5000); ++ while (gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)) & ++ GDMA_REG_CTRL0_ENABLE) { ++ if (time_after_eq(jiffies, timeout)) { ++ dev_err(dma_dev->ddev.dev, "chan %d wait timeout\n", ++ chan->id); ++ /* restore to init value */ ++ gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), 0); ++ break; ++ } ++ cpu_relax(); ++ i++; ++ } ++ ++ if (i) ++ dev_dbg(dma_dev->ddev.dev, "terminate chan %d loops %d\n", ++ chan->id, i); ++ ++ return 0; ++} ++ ++static void rt305x_dump_reg(struct gdma_dma_dev *dma_dev, int id) ++{ ++ dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \ ++ "ctr1 %08x, intr %08x, signal %08x\n", id, ++ gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)), ++ gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)), ++ gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)), ++ gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)), ++ gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_INT), ++ gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_SIGNAL)); ++} ++ ++static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan) ++{ ++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); ++ dma_addr_t src_addr, dst_addr; ++ struct gdma_dma_sg *sg; ++ uint32_t ctrl0, ctrl1; ++ ++ /* verify chan is already stopped */ ++ ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)); ++ if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) { ++ dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n", ++ chan->id, ctrl0); ++ rt305x_dump_reg(dma_dev, chan->id); ++ return -EINVAL; ++ } ++ ++ sg = &chan->desc->sg[chan->next_sg]; ++ if (chan->desc->direction == DMA_MEM_TO_DEV) { ++ src_addr = sg->src_addr; ++ dst_addr = chan->fifo_addr; ++ ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED | \ ++ (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \ ++ (chan->slave_id << GDMA_RT305X_CTRL0_DST_REQ_SHIFT); ++ } else if (chan->desc->direction == DMA_DEV_TO_MEM) { ++ src_addr = chan->fifo_addr; ++ dst_addr = sg->dst_addr; ++ ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED | \ ++ (chan->slave_id << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) | \ ++ (8 << GDMA_RT305X_CTRL0_DST_REQ_SHIFT); ++ } else if (chan->desc->direction == DMA_MEM_TO_MEM) { ++ /* ++ * TODO: memcpy function have bugs. sometime it will copy ++ * more 8 bytes data when using dmatest verify. ++ */ ++ src_addr = sg->src_addr; ++ dst_addr = sg->dst_addr; ++ ctrl0 = GDMA_REG_CTRL0_SW_MODE | \ ++ (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \ ++ (8 << GDMA_REG_CTRL1_DST_REQ_SHIFT); ++ } else { ++ dev_err(dma_dev->ddev.dev, "direction type %d error\n", ++ chan->desc->direction); ++ return -EINVAL; ++ } ++ ++ ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \ ++ (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \ ++ GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE; ++ ctrl1 = chan->id << GDMA_REG_CTRL1_NEXT_SHIFT; ++ ++ chan->next_sg++; ++ gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr); ++ gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr); ++ gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1); ++ ++ /* make sure next_sg is update */ ++ wmb(); ++ gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0); ++ ++ return 0; ++} ++ ++static void rt3883_dump_reg(struct gdma_dma_dev *dma_dev, int id) ++{ ++ dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, " \ ++ "ctr1 %08x, unmask %08x, done %08x, " \ ++ "req %08x, ack %08x, fin %08x\n", id, ++ gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)), ++ gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)), ++ gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)), ++ gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)), ++ gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT), ++ gdma_dma_read(dma_dev, GDMA_REG_DONE_INT), ++ gdma_dma_read(dma_dev, GDMA_REG_REQSTS), ++ gdma_dma_read(dma_dev, GDMA_REG_ACKSTS), ++ gdma_dma_read(dma_dev, GDMA_REG_FINSTS)); ++} ++ ++static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan) ++{ ++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); ++ dma_addr_t src_addr, dst_addr; ++ struct gdma_dma_sg *sg; ++ uint32_t ctrl0, ctrl1; ++ ++ /* verify chan is already stopped */ ++ ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)); ++ if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) { ++ dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n", ++ chan->id, ctrl0); ++ rt3883_dump_reg(dma_dev, chan->id); ++ return -EINVAL; ++ } ++ ++ sg = &chan->desc->sg[chan->next_sg]; ++ if (chan->desc->direction == DMA_MEM_TO_DEV) { ++ src_addr = sg->src_addr; ++ dst_addr = chan->fifo_addr; ++ ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED; ++ ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \ ++ (chan->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT); ++ } else if (chan->desc->direction == DMA_DEV_TO_MEM) { ++ src_addr = chan->fifo_addr; ++ dst_addr = sg->dst_addr; ++ ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED; ++ ctrl1 = (chan->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \ ++ (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \ ++ GDMA_REG_CTRL1_COHERENT; ++ } else if (chan->desc->direction == DMA_MEM_TO_MEM) { ++ src_addr = sg->src_addr; ++ dst_addr = sg->dst_addr; ++ ctrl0 = GDMA_REG_CTRL0_SW_MODE; ++ ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) | \ ++ (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) | \ ++ GDMA_REG_CTRL1_COHERENT; ++ } else { ++ dev_err(dma_dev->ddev.dev, "direction type %d error\n", ++ chan->desc->direction); ++ return -EINVAL; ++ } ++ ++ ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | \ ++ (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) | \ ++ GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE; ++ ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT; ++ ++ chan->next_sg++; ++ gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr); ++ gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr); ++ gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1); ++ ++ /* make sure next_sg is update */ ++ wmb(); ++ gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0); ++ ++ return 0; ++} ++ ++static inline int gdma_start_transfer(struct gdma_dma_dev *dma_dev, ++ struct gdma_dmaengine_chan *chan) ++{ ++ return dma_dev->data->start_transfer(chan); ++} ++ ++static int gdma_next_desc(struct gdma_dmaengine_chan *chan) ++{ ++ struct virt_dma_desc *vdesc; ++ ++ vdesc = vchan_next_desc(&chan->vchan); ++ if (!vdesc) { ++ chan->desc = NULL; ++ return 0; ++ } ++ chan->desc = to_gdma_dma_desc(vdesc); ++ chan->next_sg = 0; ++ ++ return 1; ++} ++ ++static void gdma_dma_chan_irq(struct gdma_dma_dev *dma_dev, ++ struct gdma_dmaengine_chan *chan) ++{ ++ struct gdma_dma_desc *desc; ++ unsigned long flags; ++ int chan_issued; ++ ++ chan_issued = 0; ++ spin_lock_irqsave(&chan->vchan.lock, flags); ++ desc = chan->desc; ++ if (desc) { ++ if (desc->cyclic) { ++ vchan_cyclic_callback(&desc->vdesc); ++ if (chan->next_sg == desc->num_sgs) ++ chan->next_sg = 0; ++ chan_issued = 1; ++ } else { ++ desc->residue -= desc->sg[chan->next_sg - 1].len; ++ if (chan->next_sg == desc->num_sgs) { ++ list_del(&desc->vdesc.node); ++ vchan_cookie_complete(&desc->vdesc); ++ chan_issued = gdma_next_desc(chan); ++ } else ++ chan_issued = 1; ++ } ++ } else ++ dev_dbg(dma_dev->ddev.dev, "chan %d no desc to complete\n", ++ chan->id); ++ if (chan_issued) ++ set_bit(chan->id, &dma_dev->chan_issued); ++ spin_unlock_irqrestore(&chan->vchan.lock, flags); ++} ++ ++static irqreturn_t gdma_dma_irq(int irq, void *devid) ++{ ++ struct gdma_dma_dev *dma_dev = devid; ++ u32 done, done_reg; ++ unsigned int i; ++ ++ done_reg = dma_dev->data->done_int_reg; ++ done = gdma_dma_read(dma_dev, done_reg); ++ if (unlikely(!done)) ++ return IRQ_NONE; ++ ++ /* clean done bits */ ++ gdma_dma_write(dma_dev, done_reg, done); ++ ++ i = 0; ++ while (done) { ++ if (done & 0x1) { ++ gdma_dma_chan_irq(dma_dev, &dma_dev->chan[i]); ++ atomic_dec(&dma_dev->cnt); ++ } ++ done >>= 1; ++ i++; ++ } ++ ++ /* start only have work to do */ ++ if (dma_dev->chan_issued) ++ tasklet_schedule(&dma_dev->task); ++ ++ return IRQ_HANDLED; ++} ++ ++static void gdma_dma_issue_pending(struct dma_chan *c) ++{ ++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); ++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&chan->vchan.lock, flags); ++ if (vchan_issue_pending(&chan->vchan) && !chan->desc) { ++ if (gdma_next_desc(chan)) { ++ set_bit(chan->id, &dma_dev->chan_issued); ++ tasklet_schedule(&dma_dev->task); ++ } else ++ dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n", ++ chan->id); ++ } ++ spin_unlock_irqrestore(&chan->vchan.lock, flags); ++} ++ ++static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg( ++ struct dma_chan *c, struct scatterlist *sgl, ++ unsigned int sg_len, enum dma_transfer_direction direction, ++ unsigned long flags, void *context) ++{ ++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); ++ struct gdma_dma_desc *desc; ++ struct scatterlist *sg; ++ unsigned int i; ++ ++ desc = gdma_dma_alloc_desc(sg_len); ++ if (!desc) { ++ dev_err(c->device->dev, "alloc sg decs error\n"); ++ return NULL; ++ } ++ desc->residue = 0; ++ ++ for_each_sg(sgl, sg, sg_len, i) { ++ if (direction == DMA_MEM_TO_DEV) ++ desc->sg[i].src_addr = sg_dma_address(sg); ++ else if (direction == DMA_DEV_TO_MEM) ++ desc->sg[i].dst_addr = sg_dma_address(sg); ++ else { ++ dev_err(c->device->dev, "direction type %d error\n", ++ direction); ++ goto free_desc; ++ } ++ ++ if (unlikely(sg_dma_len(sg) > GDMA_REG_CTRL0_TX_MASK)) { ++ dev_err(c->device->dev, "sg len too large %d\n", ++ sg_dma_len(sg)); ++ goto free_desc; ++ } ++ desc->sg[i].len = sg_dma_len(sg); ++ desc->residue += sg_dma_len(sg); ++ } ++ ++ desc->num_sgs = sg_len; ++ desc->direction = direction; ++ desc->cyclic = false; ++ ++ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); ++ ++free_desc: ++ kfree(desc); ++ return NULL; ++} ++ ++static struct dma_async_tx_descriptor * gdma_dma_prep_dma_memcpy( ++ struct dma_chan *c, dma_addr_t dest, dma_addr_t src, ++ size_t len, unsigned long flags) ++{ ++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); ++ struct gdma_dma_desc *desc; ++ unsigned int num_periods, i; ++ size_t xfer_count; ++ ++ if (len <= 0) ++ return NULL; ++ ++ chan->burst_size = gdma_dma_maxburst(len >> 2); ++ ++ xfer_count = GDMA_REG_CTRL0_TX_MASK; ++ num_periods = DIV_ROUND_UP(len, xfer_count); ++ ++ desc = gdma_dma_alloc_desc(num_periods); ++ if (!desc) { ++ dev_err(c->device->dev, "alloc memcpy decs error\n"); ++ return NULL; ++ } ++ desc->residue = len; ++ ++ for (i = 0; i < num_periods; i++) { ++ desc->sg[i].src_addr = src; ++ desc->sg[i].dst_addr = dest; ++ if (len > xfer_count) { ++ desc->sg[i].len = xfer_count; ++ } else { ++ desc->sg[i].len = len; ++ } ++ src += desc->sg[i].len; ++ dest += desc->sg[i].len; ++ len -= desc->sg[i].len; ++ } ++ ++ desc->num_sgs = num_periods; ++ desc->direction = DMA_MEM_TO_MEM; ++ desc->cyclic = false; ++ ++ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); ++} ++ ++static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic( ++ struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, ++ size_t period_len, enum dma_transfer_direction direction, ++ unsigned long flags) ++{ ++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); ++ struct gdma_dma_desc *desc; ++ unsigned int num_periods, i; ++ ++ if (buf_len % period_len) ++ return NULL; ++ ++ if (period_len > GDMA_REG_CTRL0_TX_MASK) { ++ dev_err(c->device->dev, "cyclic len too large %d\n", ++ period_len); ++ return NULL; ++ } ++ ++ num_periods = buf_len / period_len; ++ desc = gdma_dma_alloc_desc(num_periods); ++ if (!desc) { ++ dev_err(c->device->dev, "alloc cyclic decs error\n"); ++ return NULL; ++ } ++ desc->residue = buf_len; ++ ++ for (i = 0; i < num_periods; i++) { ++ if (direction == DMA_MEM_TO_DEV) ++ desc->sg[i].src_addr = buf_addr; ++ else if (direction == DMA_DEV_TO_MEM) ++ desc->sg[i].dst_addr = buf_addr; ++ else { ++ dev_err(c->device->dev, "direction type %d error\n", ++ direction); ++ goto free_desc; ++ } ++ desc->sg[i].len = period_len; ++ buf_addr += period_len; ++ } ++ ++ desc->num_sgs = num_periods; ++ desc->direction = direction; ++ desc->cyclic = true; ++ ++ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); ++ ++free_desc: ++ kfree(desc); ++ return NULL; ++} ++ ++static enum dma_status gdma_dma_tx_status(struct dma_chan *c, ++ dma_cookie_t cookie, struct dma_tx_state *state) ++{ ++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); ++ struct virt_dma_desc *vdesc; ++ enum dma_status status; ++ unsigned long flags; ++ struct gdma_dma_desc *desc; ++ ++ status = dma_cookie_status(c, cookie, state); ++ if (status == DMA_COMPLETE || !state) ++ return status; ++ ++ spin_lock_irqsave(&chan->vchan.lock, flags); ++ desc = chan->desc; ++ if (desc && (cookie == desc->vdesc.tx.cookie)) { ++ /* ++ * We never update edesc->residue in the cyclic case, so we ++ * can tell the remaining room to the end of the circular ++ * buffer. ++ */ ++ if (desc->cyclic) ++ state->residue = desc->residue - ++ ((chan->next_sg - 1) * desc->sg[0].len); ++ else ++ state->residue = desc->residue; ++ } else if ((vdesc = vchan_find_desc(&chan->vchan, cookie))) ++ state->residue = to_gdma_dma_desc(vdesc)->residue; ++ spin_unlock_irqrestore(&chan->vchan.lock, flags); ++ ++ dev_dbg(c->device->dev, "tx residue %d bytes\n", state->residue); ++ ++ return status; ++} ++ ++static void gdma_dma_free_chan_resources(struct dma_chan *c) ++{ ++ vchan_free_chan_resources(to_virt_chan(c)); ++} ++ ++static void gdma_dma_desc_free(struct virt_dma_desc *vdesc) ++{ ++ kfree(container_of(vdesc, struct gdma_dma_desc, vdesc)); ++} ++ ++static void gdma_dma_tasklet(unsigned long arg) ++{ ++ struct gdma_dma_dev *dma_dev = (struct gdma_dma_dev *)arg; ++ struct gdma_dmaengine_chan *chan; ++ static unsigned int last_chan; ++ unsigned int i, chan_mask; ++ ++ /* record last chan to round robin all chans */ ++ i = last_chan; ++ chan_mask = dma_dev->data->chancnt - 1; ++ do { ++ /* ++ * on mt7621. when verify with dmatest with all ++ * channel is enable. we need to limit only two ++ * channel is working at the same time. otherwise the ++ * data will have problem. ++ */ ++ if (atomic_read(&dma_dev->cnt) >= 2) { ++ last_chan = i; ++ break; ++ } ++ ++ if (test_and_clear_bit(i, &dma_dev->chan_issued)) { ++ chan = &dma_dev->chan[i]; ++ if (chan->desc) { ++ atomic_inc(&dma_dev->cnt); ++ gdma_start_transfer(dma_dev, chan); ++ } else ++ dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n", chan->id); ++ ++ if (!dma_dev->chan_issued) ++ break; ++ } ++ ++ i = (i + 1) & chan_mask; ++ } while (i != last_chan); ++} ++ ++static void rt305x_gdma_init(struct gdma_dma_dev *dma_dev) ++{ ++ uint32_t gct; ++ ++ /* all chans round robin */ ++ gdma_dma_write(dma_dev, GDMA_RT305X_GCT, GDMA_REG_GCT_ARBIT_RR); ++ ++ gct = gdma_dma_read(dma_dev, GDMA_RT305X_GCT); ++ dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n", ++ (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK, ++ 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) & ++ GDMA_REG_GCT_CHAN_MASK)); ++} ++ ++static void rt3883_gdma_init(struct gdma_dma_dev *dma_dev) ++{ ++ uint32_t gct; ++ ++ /* all chans round robin */ ++ gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR); ++ ++ gct = gdma_dma_read(dma_dev, GDMA_REG_GCT); ++ dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n", ++ (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK, ++ 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) & ++ GDMA_REG_GCT_CHAN_MASK)); ++} ++ ++static struct gdma_data rt305x_gdma_data = { ++ .chancnt = 8, ++ .done_int_reg = GDMA_RT305X_STATUS_INT, ++ .init = rt305x_gdma_init, ++ .start_transfer = rt305x_gdma_start_transfer, ++}; ++ ++static struct gdma_data rt3883_gdma_data = { ++ .chancnt = 16, ++ .done_int_reg = GDMA_REG_DONE_INT, ++ .init = rt3883_gdma_init, ++ .start_transfer = rt3883_gdma_start_transfer, ++}; ++ ++static const struct of_device_id gdma_of_match_table[] = { ++ { .compatible = "ralink,rt305x-gdma", .data = &rt305x_gdma_data }, ++ { .compatible = "ralink,rt3883-gdma", .data = &rt3883_gdma_data }, ++ { }, ++}; ++ ++static int gdma_dma_probe(struct platform_device *pdev) ++{ ++ const struct of_device_id *match; ++ struct gdma_dmaengine_chan *chan; ++ struct gdma_dma_dev *dma_dev; ++ struct dma_device *dd; ++ unsigned int i; ++ struct resource *res; ++ int ret; ++ int irq; ++ void __iomem *base; ++ struct gdma_data *data; ++ ++ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); ++ if (ret) ++ return ret; ++ ++ match = of_match_device(gdma_of_match_table, &pdev->dev); ++ if (!match) ++ return -EINVAL; ++ data = (struct gdma_data *) match->data; ++ ++ dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev) + ++ (sizeof(struct gdma_dmaengine_chan) * data->chancnt), ++ GFP_KERNEL); ++ if (!dma_dev) { ++ dev_err(&pdev->dev, "alloc dma device failed\n"); ++ return -EINVAL; ++ } ++ dma_dev->data = data; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); ++ dma_dev->base = base; ++ tasklet_init(&dma_dev->task, gdma_dma_tasklet, (unsigned long)dma_dev); ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) { ++ dev_err(&pdev->dev, "failed to get irq\n"); ++ return -EINVAL; ++ } ++ ret = devm_request_irq(&pdev->dev, irq, gdma_dma_irq, ++ 0, dev_name(&pdev->dev), dma_dev); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to request irq\n"); ++ return ret; ++ } ++ ++ device_reset(&pdev->dev); ++ ++ dd = &dma_dev->ddev; ++ dma_cap_set(DMA_MEMCPY, dd->cap_mask); ++ dma_cap_set(DMA_SLAVE, dd->cap_mask); ++ dma_cap_set(DMA_CYCLIC, dd->cap_mask); ++ dd->device_free_chan_resources = gdma_dma_free_chan_resources; ++ dd->device_prep_dma_memcpy = gdma_dma_prep_dma_memcpy; ++ dd->device_prep_slave_sg = gdma_dma_prep_slave_sg; ++ dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic; ++ dd->device_config = gdma_dma_config; ++ dd->device_terminate_all = gdma_dma_terminate_all; ++ dd->device_tx_status = gdma_dma_tx_status; ++ dd->device_issue_pending = gdma_dma_issue_pending; ++ ++ dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); ++ dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); ++ dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); ++ dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; ++ ++ dd->dev = &pdev->dev; ++ dd->dev->dma_parms = &dma_dev->dma_parms; ++ dma_set_max_seg_size(dd->dev, GDMA_REG_CTRL0_TX_MASK); ++ INIT_LIST_HEAD(&dd->channels); ++ ++ for (i = 0; i < data->chancnt; i++) { ++ chan = &dma_dev->chan[i]; ++ chan->id = i; ++ chan->vchan.desc_free = gdma_dma_desc_free; ++ vchan_init(&chan->vchan, dd); ++ } ++ ++ /* init hardware */ ++ data->init(dma_dev); ++ ++ ret = dma_async_device_register(dd); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to register dma device\n"); ++ return ret; ++ } ++ ++ ret = of_dma_controller_register(pdev->dev.of_node, ++ of_dma_xlate_by_chan_id, dma_dev); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to register of dma controller\n"); ++ goto err_unregister; ++ } ++ ++ platform_set_drvdata(pdev, dma_dev); ++ ++ return 0; ++ ++err_unregister: ++ dma_async_device_unregister(dd); ++ return ret; ++} ++ ++static int gdma_dma_remove(struct platform_device *pdev) ++{ ++ struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev); ++ ++ tasklet_kill(&dma_dev->task); ++ of_dma_controller_free(pdev->dev.of_node); ++ dma_async_device_unregister(&dma_dev->ddev); ++ ++ return 0; ++} ++ ++static struct platform_driver gdma_dma_driver = { ++ .probe = gdma_dma_probe, ++ .remove = gdma_dma_remove, ++ .driver = { ++ .name = "gdma-rt2880", ++ .of_match_table = gdma_of_match_table, ++ }, ++}; ++module_platform_driver(gdma_dma_driver); ++ ++MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); ++MODULE_DESCRIPTION("Ralink/MTK DMA driver"); ++MODULE_LICENSE("GPL v2"); +--- a/include/linux/dmaengine.h ++++ b/include/linux/dmaengine.h +@@ -525,6 +525,7 @@ static inline void dma_set_unmap(struct + struct dmaengine_unmap_data * + dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags); + void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap); ++struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); + #else + static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, + struct dmaengine_unmap_data *unmap) +--- /dev/null ++++ b/drivers/dma/mtk-hsdma.c +@@ -0,0 +1,767 @@ ++/* ++ * Copyright (C) 2015, Michael Lee <igvtee@gmail.com> ++ * MTK HSDMA support ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ */ ++ ++#include <linux/dmaengine.h> ++#include <linux/dma-mapping.h> ++#include <linux/err.h> ++#include <linux/init.h> ++#include <linux/list.h> ++#include <linux/module.h> ++#include <linux/platform_device.h> ++#include <linux/slab.h> ++#include <linux/spinlock.h> ++#include <linux/irq.h> ++#include <linux/of_dma.h> ++#include <linux/reset.h> ++#include <linux/of_device.h> ++ ++#include "virt-dma.h" ++ ++#define HSDMA_BASE_OFFSET 0x800 ++ ++#define HSDMA_REG_TX_BASE 0x00 ++#define HSDMA_REG_TX_CNT 0x04 ++#define HSDMA_REG_TX_CTX 0x08 ++#define HSDMA_REG_TX_DTX 0x0c ++#define HSDMA_REG_RX_BASE 0x100 ++#define HSDMA_REG_RX_CNT 0x104 ++#define HSDMA_REG_RX_CRX 0x108 ++#define HSDMA_REG_RX_DRX 0x10c ++#define HSDMA_REG_INFO 0x200 ++#define HSDMA_REG_GLO_CFG 0x204 ++#define HSDMA_REG_RST_CFG 0x208 ++#define HSDMA_REG_DELAY_INT 0x20c ++#define HSDMA_REG_FREEQ_THRES 0x210 ++#define HSDMA_REG_INT_STATUS 0x220 ++#define HSDMA_REG_INT_MASK 0x228 ++#define HSDMA_REG_SCH_Q01 0x280 ++#define HSDMA_REG_SCH_Q23 0x284 ++ ++#define HSDMA_DESCS_MAX 0xfff ++#define HSDMA_DESCS_NUM 8 ++#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1) ++#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK) ++ ++/* HSDMA_REG_INFO */ ++#define HSDMA_INFO_INDEX_MASK 0xf ++#define HSDMA_INFO_INDEX_SHIFT 24 ++#define HSDMA_INFO_BASE_MASK 0xff ++#define HSDMA_INFO_BASE_SHIFT 16 ++#define HSDMA_INFO_RX_MASK 0xff ++#define HSDMA_INFO_RX_SHIFT 8 ++#define HSDMA_INFO_TX_MASK 0xff ++#define HSDMA_INFO_TX_SHIFT 0 ++ ++/* HSDMA_REG_GLO_CFG */ ++#define HSDMA_GLO_TX_2B_OFFSET BIT(31) ++#define HSDMA_GLO_CLK_GATE BIT(30) ++#define HSDMA_GLO_BYTE_SWAP BIT(29) ++#define HSDMA_GLO_MULTI_DMA BIT(10) ++#define HSDMA_GLO_TWO_BUF BIT(9) ++#define HSDMA_GLO_32B_DESC BIT(8) ++#define HSDMA_GLO_BIG_ENDIAN BIT(7) ++#define HSDMA_GLO_TX_DONE BIT(6) ++#define HSDMA_GLO_BT_MASK 0x3 ++#define HSDMA_GLO_BT_SHIFT 4 ++#define HSDMA_GLO_RX_BUSY BIT(3) ++#define HSDMA_GLO_RX_DMA BIT(2) ++#define HSDMA_GLO_TX_BUSY BIT(1) ++#define HSDMA_GLO_TX_DMA BIT(0) ++ ++#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT) ++#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT) ++#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT) ++#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT) ++ ++#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \ ++ HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES) ++ ++/* HSDMA_REG_RST_CFG */ ++#define HSDMA_RST_RX_SHIFT 16 ++#define HSDMA_RST_TX_SHIFT 0 ++ ++/* HSDMA_REG_DELAY_INT */ ++#define HSDMA_DELAY_INT_EN BIT(15) ++#define HSDMA_DELAY_PEND_OFFSET 8 ++#define HSDMA_DELAY_TIME_OFFSET 0 ++#define HSDMA_DELAY_TX_OFFSET 16 ++#define HSDMA_DELAY_RX_OFFSET 0 ++ ++#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \ ++ ((x) << HSDMA_DELAY_PEND_OFFSET)) ++#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \ ++ HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x)) ++ ++/* HSDMA_REG_INT_STATUS */ ++#define HSDMA_INT_DELAY_RX_COH BIT(31) ++#define HSDMA_INT_DELAY_RX_INT BIT(30) ++#define HSDMA_INT_DELAY_TX_COH BIT(29) ++#define HSDMA_INT_DELAY_TX_INT BIT(28) ++#define HSDMA_INT_RX_MASK 0x3 ++#define HSDMA_INT_RX_SHIFT 16 ++#define HSDMA_INT_RX_Q0 BIT(16) ++#define HSDMA_INT_TX_MASK 0xf ++#define HSDMA_INT_TX_SHIFT 0 ++#define HSDMA_INT_TX_Q0 BIT(0) ++ ++/* tx/rx dma desc flags */ ++#define HSDMA_PLEN_MASK 0x3fff ++#define HSDMA_DESC_DONE BIT(31) ++#define HSDMA_DESC_LS0 BIT(30) ++#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16) ++#define HSDMA_DESC_TAG BIT(15) ++#define HSDMA_DESC_LS1 BIT(14) ++#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK) ++ ++/* align 4 bytes */ ++#define HSDMA_ALIGN_SIZE 3 ++/* align size 128bytes */ ++#define HSDMA_MAX_PLEN 0x3f80 ++ ++struct hsdma_desc { ++ u32 addr0; ++ u32 flags; ++ u32 addr1; ++ u32 unused; ++}; ++ ++struct mtk_hsdma_sg { ++ dma_addr_t src_addr; ++ dma_addr_t dst_addr; ++ u32 len; ++}; ++ ++struct mtk_hsdma_desc { ++ struct virt_dma_desc vdesc; ++ unsigned int num_sgs; ++ struct mtk_hsdma_sg sg[1]; ++}; ++ ++struct mtk_hsdma_chan { ++ struct virt_dma_chan vchan; ++ unsigned int id; ++ dma_addr_t desc_addr; ++ int tx_idx; ++ int rx_idx; ++ struct hsdma_desc *tx_ring; ++ struct hsdma_desc *rx_ring; ++ struct mtk_hsdma_desc *desc; ++ unsigned int next_sg; ++}; ++ ++struct mtk_hsdam_engine { ++ struct dma_device ddev; ++ struct device_dma_parameters dma_parms; ++ void __iomem *base; ++ struct tasklet_struct task; ++ volatile unsigned long chan_issued; ++ ++ struct mtk_hsdma_chan chan[1]; ++}; ++ ++static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev( ++ struct mtk_hsdma_chan *chan) ++{ ++ return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine, ++ ddev); ++} ++ ++static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c) ++{ ++ return container_of(c, struct mtk_hsdma_chan, vchan.chan); ++} ++ ++static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc( ++ struct virt_dma_desc *vdesc) ++{ ++ return container_of(vdesc, struct mtk_hsdma_desc, vdesc); ++} ++ ++static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg) ++{ ++ return readl(hsdma->base + reg); ++} ++ ++static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma, ++ unsigned reg, u32 val) ++{ ++ writel(val, hsdma->base + reg); ++} ++ ++static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma, ++ struct mtk_hsdma_chan *chan) ++{ ++ chan->tx_idx = 0; ++ chan->rx_idx = HSDMA_DESCS_NUM - 1; ++ ++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx); ++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx); ++ ++ mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG, ++ 0x1 << (chan->id + HSDMA_RST_TX_SHIFT)); ++ mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG, ++ 0x1 << (chan->id + HSDMA_RST_RX_SHIFT)); ++} ++ ++static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma) ++{ ++ dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \ ++ "tctx %08x, tdtx: %08x, rbase %08x, " \ ++ "rcnt %08x, rctx %08x, rdtx %08x\n", ++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE), ++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT), ++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX), ++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX), ++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE), ++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT), ++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX), ++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX)); ++ ++ dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \ ++ "intr_stat %08x, intr_mask %08x\n", ++ mtk_hsdma_read(hsdma, HSDMA_REG_INFO), ++ mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG), ++ mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT), ++ mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS), ++ mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK)); ++} ++ ++static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma, ++ struct mtk_hsdma_chan *chan) ++{ ++ struct hsdma_desc *tx_desc; ++ struct hsdma_desc *rx_desc; ++ int i; ++ ++ dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n", ++ chan->tx_idx, chan->rx_idx); ++ ++ for (i = 0; i < HSDMA_DESCS_NUM; i++) { ++ tx_desc = &chan->tx_ring[i]; ++ rx_desc = &chan->rx_ring[i]; ++ ++ dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \ ++ "tx addr1: %08x, rx addr0 %08x, flags %08x\n", ++ i, tx_desc->addr0, tx_desc->flags, \ ++ tx_desc->addr1, rx_desc->addr0, rx_desc->flags); ++ } ++} ++ ++static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma, ++ struct mtk_hsdma_chan *chan) ++{ ++ int i; ++ ++ /* disable dma */ ++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0); ++ ++ /* disable intr */ ++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0); ++ ++ /* init desc value */ ++ for (i = 0; i < HSDMA_DESCS_NUM; i++) { ++ chan->tx_ring[i].addr0 = 0; ++ chan->tx_ring[i].flags = HSDMA_DESC_LS0 | ++ HSDMA_DESC_DONE; ++ } ++ for (i = 0; i < HSDMA_DESCS_NUM; i++) { ++ chan->rx_ring[i].addr0 = 0; ++ chan->rx_ring[i].flags = 0; ++ } ++ ++ /* reset */ ++ mtk_hsdma_reset_chan(hsdma, chan); ++ ++ /* enable intr */ ++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0); ++ ++ /* enable dma */ ++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT); ++} ++ ++static int mtk_hsdma_terminate_all(struct dma_chan *c) ++{ ++ struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c); ++ struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan); ++ unsigned long timeout; ++ LIST_HEAD(head); ++ ++ spin_lock_bh(&chan->vchan.lock); ++ chan->desc = NULL; ++ clear_bit(chan->id, &hsdma->chan_issued); ++ vchan_get_all_descriptors(&chan->vchan, &head); ++ spin_unlock_bh(&chan->vchan.lock); ++ ++ vchan_dma_desc_free_list(&chan->vchan, &head); ++ ++ /* wait dma transfer complete */ ++ timeout = jiffies + msecs_to_jiffies(2000); ++ while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) & ++ (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) { ++ if (time_after_eq(jiffies, timeout)) { ++ hsdma_dump_desc(hsdma, chan); ++ mtk_hsdma_reset(hsdma, chan); ++ dev_err(hsdma->ddev.dev, "timeout, reset it\n"); ++ break; ++ } ++ cpu_relax(); ++ } ++ ++ return 0; ++} ++ ++static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma, ++ struct mtk_hsdma_chan *chan) ++{ ++ dma_addr_t src, dst; ++ size_t len, tlen; ++ struct hsdma_desc *tx_desc, *rx_desc; ++ struct mtk_hsdma_sg *sg; ++ unsigned int i; ++ int rx_idx; ++ ++ sg = &chan->desc->sg[0]; ++ len = sg->len; ++ chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN); ++ ++ /* tx desc */ ++ src = sg->src_addr; ++ for (i = 0; i < chan->desc->num_sgs; i++) { ++ if (len > HSDMA_MAX_PLEN) ++ tlen = HSDMA_MAX_PLEN; ++ else ++ tlen = len; ++ ++ if (i & 0x1) { ++ tx_desc->addr1 = src; ++ tx_desc->flags |= HSDMA_DESC_PLEN1(tlen); ++ } else { ++ tx_desc = &chan->tx_ring[chan->tx_idx]; ++ tx_desc->addr0 = src; ++ tx_desc->flags = HSDMA_DESC_PLEN0(tlen); ++ ++ /* update index */ ++ chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx); ++ } ++ ++ src += tlen; ++ len -= tlen; ++ } ++ if (i & 0x1) ++ tx_desc->flags |= HSDMA_DESC_LS0; ++ else ++ tx_desc->flags |= HSDMA_DESC_LS1; ++ ++ /* rx desc */ ++ rx_idx = HSDMA_NEXT_DESC(chan->rx_idx); ++ len = sg->len; ++ dst = sg->dst_addr; ++ for (i = 0; i < chan->desc->num_sgs; i++) { ++ rx_desc = &chan->rx_ring[rx_idx]; ++ if (len > HSDMA_MAX_PLEN) ++ tlen = HSDMA_MAX_PLEN; ++ else ++ tlen = len; ++ ++ rx_desc->addr0 = dst; ++ rx_desc->flags = HSDMA_DESC_PLEN0(tlen); ++ ++ dst += tlen; ++ len -= tlen; ++ ++ /* update index */ ++ rx_idx = HSDMA_NEXT_DESC(rx_idx); ++ } ++ ++ /* make sure desc and index all up to date */ ++ wmb(); ++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx); ++ ++ return 0; ++} ++ ++static int gdma_next_desc(struct mtk_hsdma_chan *chan) ++{ ++ struct virt_dma_desc *vdesc; ++ ++ vdesc = vchan_next_desc(&chan->vchan); ++ if (!vdesc) { ++ chan->desc = NULL; ++ return 0; ++ } ++ chan->desc = to_mtk_hsdma_desc(vdesc); ++ chan->next_sg = 0; ++ ++ return 1; ++} ++ ++static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma, ++ struct mtk_hsdma_chan *chan) ++{ ++ struct mtk_hsdma_desc *desc; ++ int chan_issued; ++ ++ chan_issued = 0; ++ spin_lock_bh(&chan->vchan.lock); ++ desc = chan->desc; ++ if (likely(desc)) { ++ if (chan->next_sg == desc->num_sgs) { ++ list_del(&desc->vdesc.node); ++ vchan_cookie_complete(&desc->vdesc); ++ chan_issued = gdma_next_desc(chan); ++ } ++ } else ++ dev_dbg(hsdma->ddev.dev, "no desc to complete\n"); ++ ++ if (chan_issued) ++ set_bit(chan->id, &hsdma->chan_issued); ++ spin_unlock_bh(&chan->vchan.lock); ++} ++ ++static irqreturn_t mtk_hsdma_irq(int irq, void *devid) ++{ ++ struct mtk_hsdam_engine *hsdma = devid; ++ u32 status; ++ ++ status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS); ++ if (unlikely(!status)) ++ return IRQ_NONE; ++ ++ if (likely(status & HSDMA_INT_RX_Q0)) ++ tasklet_schedule(&hsdma->task); ++ else ++ dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", ++ status); ++ /* clean intr bits */ ++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status); ++ ++ return IRQ_HANDLED; ++} ++ ++static void mtk_hsdma_issue_pending(struct dma_chan *c) ++{ ++ struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c); ++ struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan); ++ ++ spin_lock_bh(&chan->vchan.lock); ++ if (vchan_issue_pending(&chan->vchan) && !chan->desc) { ++ if (gdma_next_desc(chan)) { ++ set_bit(chan->id, &hsdma->chan_issued); ++ tasklet_schedule(&hsdma->task); ++ } else ++ dev_dbg(hsdma->ddev.dev, "no desc to issue\n"); ++ } ++ spin_unlock_bh(&chan->vchan.lock); ++} ++ ++static struct dma_async_tx_descriptor * mtk_hsdma_prep_dma_memcpy( ++ struct dma_chan *c, dma_addr_t dest, dma_addr_t src, ++ size_t len, unsigned long flags) ++{ ++ struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c); ++ struct mtk_hsdma_desc *desc; ++ ++ if (len <= 0) ++ return NULL; ++ ++ desc = kzalloc(sizeof(struct mtk_hsdma_desc), GFP_ATOMIC); ++ if (!desc) { ++ dev_err(c->device->dev, "alloc memcpy decs error\n"); ++ return NULL; ++ } ++ ++ desc->sg[0].src_addr = src; ++ desc->sg[0].dst_addr = dest; ++ desc->sg[0].len = len; ++ ++ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); ++} ++ ++static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c, ++ dma_cookie_t cookie, struct dma_tx_state *state) ++{ ++ return dma_cookie_status(c, cookie, state); ++} ++ ++static void mtk_hsdma_free_chan_resources(struct dma_chan *c) ++{ ++ vchan_free_chan_resources(to_virt_chan(c)); ++} ++ ++static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc) ++{ ++ kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc)); ++} ++ ++static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma) ++{ ++ struct mtk_hsdma_chan *chan; ++ ++ if (test_and_clear_bit(0, &hsdma->chan_issued)) { ++ chan = &hsdma->chan[0]; ++ if (chan->desc) { ++ mtk_hsdma_start_transfer(hsdma, chan); ++ } else ++ dev_dbg(hsdma->ddev.dev,"chan 0 no desc to issue\n"); ++ } ++} ++ ++static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma) ++{ ++ struct mtk_hsdma_chan *chan; ++ int next_idx, drx_idx, cnt; ++ ++ chan = &hsdma->chan[0]; ++ next_idx = HSDMA_NEXT_DESC(chan->rx_idx); ++ drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX); ++ ++ cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK; ++ if (!cnt) ++ return; ++ ++ chan->next_sg += cnt; ++ chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK; ++ ++ /* update rx crx */ ++ wmb(); ++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx); ++ ++ mtk_hsdma_chan_done(hsdma, chan); ++} ++ ++static void mtk_hsdma_tasklet(unsigned long arg) ++{ ++ struct mtk_hsdam_engine *hsdma = (struct mtk_hsdam_engine *)arg; ++ ++ mtk_hsdma_rx(hsdma); ++ mtk_hsdma_tx(hsdma); ++} ++ ++static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma, ++ struct mtk_hsdma_chan *chan) ++{ ++ int i; ++ ++ chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev, ++ 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring), ++ &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO); ++ if (!chan->tx_ring) ++ goto no_mem; ++ ++ chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM]; ++ ++ /* init tx ring value */ ++ for (i = 0; i < HSDMA_DESCS_NUM; i++) ++ chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE; ++ ++ return 0; ++no_mem: ++ return -ENOMEM; ++} ++ ++static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma, ++ struct mtk_hsdma_chan *chan) ++{ ++ if (chan->tx_ring) { ++ dma_free_coherent(hsdma->ddev.dev, ++ 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring), ++ chan->tx_ring, chan->desc_addr); ++ chan->tx_ring = NULL; ++ chan->rx_ring = NULL; ++ } ++} ++ ++static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma) ++{ ++ struct mtk_hsdma_chan *chan; ++ int ret; ++ u32 reg; ++ ++ /* init desc */ ++ chan = &hsdma->chan[0]; ++ ret = mtk_hsdam_alloc_desc(hsdma, chan); ++ if (ret) ++ return ret; ++ ++ /* tx */ ++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr); ++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM); ++ /* rx */ ++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr + ++ (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM)); ++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM); ++ /* reset */ ++ mtk_hsdma_reset_chan(hsdma, chan); ++ ++ /* enable rx intr */ ++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0); ++ ++ /* enable dma */ ++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT); ++ ++ /* hardware info */ ++ reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO); ++ dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n", ++ (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK, ++ (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK); ++ ++ hsdma_dump_reg(hsdma); ++ ++ return ret; ++} ++ ++static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma) ++{ ++ struct mtk_hsdma_chan *chan; ++ ++ /* disable dma */ ++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0); ++ ++ /* disable intr */ ++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0); ++ ++ /* free desc */ ++ chan = &hsdma->chan[0]; ++ mtk_hsdam_free_desc(hsdma, chan); ++ ++ /* tx */ ++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0); ++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0); ++ /* rx */ ++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0); ++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0); ++ /* reset */ ++ mtk_hsdma_reset_chan(hsdma, chan); ++} ++ ++static const struct of_device_id mtk_hsdma_of_match[] = { ++ { .compatible = "mediatek,mt7621-hsdma" }, ++ { }, ++}; ++ ++static int mtk_hsdma_probe(struct platform_device *pdev) ++{ ++ const struct of_device_id *match; ++ struct mtk_hsdma_chan *chan; ++ struct mtk_hsdam_engine *hsdma; ++ struct dma_device *dd; ++ struct resource *res; ++ int ret; ++ int irq; ++ void __iomem *base; ++ ++ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); ++ if (ret) ++ return ret; ++ ++ match = of_match_device(mtk_hsdma_of_match, &pdev->dev); ++ if (!match) ++ return -EINVAL; ++ ++ hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL); ++ if (!hsdma) { ++ dev_err(&pdev->dev, "alloc dma device failed\n"); ++ return -EINVAL; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); ++ hsdma->base = base + HSDMA_BASE_OFFSET; ++ tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma); ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) { ++ dev_err(&pdev->dev, "failed to get irq\n"); ++ return -EINVAL; ++ } ++ ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq, ++ 0, dev_name(&pdev->dev), hsdma); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to request irq\n"); ++ return ret; ++ } ++ ++ device_reset(&pdev->dev); ++ ++ dd = &hsdma->ddev; ++ dma_cap_set(DMA_MEMCPY, dd->cap_mask); ++ dd->copy_align = HSDMA_ALIGN_SIZE; ++ dd->device_free_chan_resources = mtk_hsdma_free_chan_resources; ++ dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy; ++ dd->device_terminate_all = mtk_hsdma_terminate_all; ++ dd->device_tx_status = mtk_hsdma_tx_status; ++ dd->device_issue_pending = mtk_hsdma_issue_pending; ++ dd->dev = &pdev->dev; ++ dd->dev->dma_parms = &hsdma->dma_parms; ++ dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN); ++ INIT_LIST_HEAD(&dd->channels); ++ ++ chan = &hsdma->chan[0]; ++ chan->id = 0; ++ chan->vchan.desc_free = mtk_hsdma_desc_free; ++ vchan_init(&chan->vchan, dd); ++ ++ /* init hardware */ ++ ret = mtk_hsdma_init(hsdma); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to alloc ring descs\n"); ++ return ret; ++ } ++ ++ ret = dma_async_device_register(dd); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to register dma device\n"); ++ return ret; ++ } ++ ++ ret = of_dma_controller_register(pdev->dev.of_node, ++ of_dma_xlate_by_chan_id, hsdma); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to register of dma controller\n"); ++ goto err_unregister; ++ } ++ ++ platform_set_drvdata(pdev, hsdma); ++ ++ return 0; ++ ++err_unregister: ++ dma_async_device_unregister(dd); ++ return ret; ++} ++ ++static int mtk_hsdma_remove(struct platform_device *pdev) ++{ ++ struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev); ++ ++ mtk_hsdma_uninit(hsdma); ++ ++ of_dma_controller_free(pdev->dev.of_node); ++ dma_async_device_unregister(&hsdma->ddev); ++ ++ return 0; ++} ++ ++static struct platform_driver mtk_hsdma_driver = { ++ .probe = mtk_hsdma_probe, ++ .remove = mtk_hsdma_remove, ++ .driver = { ++ .name = "hsdma-mt7621", ++ .of_match_table = mtk_hsdma_of_match, ++ }, ++}; ++module_platform_driver(mtk_hsdma_driver); ++ ++MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>"); ++MODULE_DESCRIPTION("MTK HSDMA driver"); ++MODULE_LICENSE("GPL v2"); diff --git a/target/linux/ramips/patches-4.14/0048-asoc-add-mt7620-support.patch b/target/linux/ramips/patches-4.14/0048-asoc-add-mt7620-support.patch new file mode 100644 index 0000000000..5f105c69e2 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0048-asoc-add-mt7620-support.patch @@ -0,0 +1,1046 @@ +From 7f29222b1731e8182ba94a331531dec18865a1e4 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Sun, 27 Jul 2014 09:31:47 +0100 +Subject: [PATCH 48/53] asoc: add mt7620 support + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + arch/mips/ralink/of.c | 2 + + sound/soc/Kconfig | 1 + + sound/soc/Makefile | 1 + + sound/soc/ralink/Kconfig | 15 ++ + sound/soc/ralink/Makefile | 11 + + sound/soc/ralink/mt7620-i2s.c | 436 ++++++++++++++++++++++++++++++++++++++ + sound/soc/ralink/mt7620-wm8960.c | 233 ++++++++++++++++++++ + 7 files changed, 699 insertions(+) + create mode 100644 sound/soc/ralink/Kconfig + create mode 100644 sound/soc/ralink/Makefile + create mode 100644 sound/soc/ralink/mt7620-i2s.c + create mode 100644 sound/soc/ralink/mt7620-wm8960.c + +--- a/arch/mips/ralink/of.c ++++ b/arch/mips/ralink/of.c +@@ -15,6 +15,7 @@ + #include <linux/of_fdt.h> + #include <linux/kernel.h> + #include <linux/bootmem.h> ++#include <linux/module.h> + #include <linux/of_platform.h> + #include <linux/of_address.h> + +@@ -26,6 +27,7 @@ + #include "common.h" + + __iomem void *rt_sysc_membase; ++EXPORT_SYMBOL(rt_sysc_membase); + __iomem void *rt_memc_membase; + + __iomem void *plat_of_remap_node(const char *node) +--- a/sound/soc/Kconfig ++++ b/sound/soc/Kconfig +@@ -59,6 +59,7 @@ source "sound/soc/mxs/Kconfig" + source "sound/soc/pxa/Kconfig" + source "sound/soc/qcom/Kconfig" + source "sound/soc/rockchip/Kconfig" ++source "sound/soc/ralink/Kconfig" + source "sound/soc/samsung/Kconfig" + source "sound/soc/sh/Kconfig" + source "sound/soc/sirf/Kconfig" +--- a/sound/soc/Makefile ++++ b/sound/soc/Makefile +@@ -40,6 +40,7 @@ obj-$(CONFIG_SND_SOC) += kirkwood/ + obj-$(CONFIG_SND_SOC) += pxa/ + obj-$(CONFIG_SND_SOC) += qcom/ + obj-$(CONFIG_SND_SOC) += rockchip/ ++obj-$(CONFIG_SND_SOC) += ralink/ + obj-$(CONFIG_SND_SOC) += samsung/ + obj-$(CONFIG_SND_SOC) += sh/ + obj-$(CONFIG_SND_SOC) += sirf/ +--- /dev/null ++++ b/sound/soc/ralink/Kconfig +@@ -0,0 +1,8 @@ ++config SND_RALINK_SOC_I2S ++ depends on RALINK && SND_SOC && !SOC_RT288X ++ select SND_SOC_GENERIC_DMAENGINE_PCM ++ select REGMAP_MMIO ++ tristate "SoC Audio (I2S protocol) for Ralink SoC" ++ help ++ Say Y if you want to use I2S protocol and I2S codec on Ralink/MediaTek ++ based boards. +--- /dev/null ++++ b/sound/soc/ralink/Makefile +@@ -0,0 +1,6 @@ ++# ++# Ralink/MediaTek Platform Support ++# ++snd-soc-ralink-i2s-objs := ralink-i2s.o ++ ++obj-$(CONFIG_SND_RALINK_SOC_I2S) += snd-soc-ralink-i2s.o +--- /dev/null ++++ b/sound/soc/ralink/ralink-i2s.c +@@ -0,0 +1,965 @@ ++/* ++ * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> ++ * Copyright (C) 2016 Michael Lee <igvtee@gmail.com> ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ */ ++ ++#include <linux/module.h> ++#include <linux/platform_device.h> ++#include <linux/clk.h> ++#include <linux/regmap.h> ++#include <linux/reset.h> ++#include <linux/debugfs.h> ++#include <linux/of_device.h> ++#include <sound/pcm_params.h> ++#include <sound/dmaengine_pcm.h> ++ ++#include <asm/mach-ralink/ralink_regs.h> ++ ++#define DRV_NAME "ralink-i2s" ++ ++#define I2S_REG_CFG0 0x00 ++#define I2S_REG_INT_STATUS 0x04 ++#define I2S_REG_INT_EN 0x08 ++#define I2S_REG_FF_STATUS 0x0c ++#define I2S_REG_WREG 0x10 ++#define I2S_REG_RREG 0x14 ++#define I2S_REG_CFG1 0x18 ++#define I2S_REG_DIVCMP 0x20 ++#define I2S_REG_DIVINT 0x24 ++ ++/* I2S_REG_CFG0 */ ++#define I2S_REG_CFG0_EN BIT(31) ++#define I2S_REG_CFG0_DMA_EN BIT(30) ++#define I2S_REG_CFG0_BYTE_SWAP BIT(28) ++#define I2S_REG_CFG0_TX_EN BIT(24) ++#define I2S_REG_CFG0_RX_EN BIT(20) ++#define I2S_REG_CFG0_SLAVE BIT(16) ++#define I2S_REG_CFG0_RX_THRES 12 ++#define I2S_REG_CFG0_TX_THRES 4 ++#define I2S_REG_CFG0_THRES_MASK (0xf << I2S_REG_CFG0_RX_THRES) | \ ++ (4 << I2S_REG_CFG0_TX_THRES) ++#define I2S_REG_CFG0_DFT_THRES (4 << I2S_REG_CFG0_RX_THRES) | \ ++ (4 << I2S_REG_CFG0_TX_THRES) ++/* RT305x */ ++#define I2S_REG_CFG0_CLK_DIS BIT(8) ++#define I2S_REG_CFG0_TXCH_SWAP BIT(3) ++#define I2S_REG_CFG0_TXCH1_OFF BIT(2) ++#define I2S_REG_CFG0_TXCH0_OFF BIT(1) ++#define I2S_REG_CFG0_SLAVE_EN BIT(0) ++/* RT3883 */ ++#define I2S_REG_CFG0_RXCH_SWAP BIT(11) ++#define I2S_REG_CFG0_RXCH1_OFF BIT(10) ++#define I2S_REG_CFG0_RXCH0_OFF BIT(9) ++#define I2S_REG_CFG0_WS_INV BIT(0) ++/* MT7628 */ ++#define I2S_REG_CFG0_FMT_LE BIT(29) ++#define I2S_REG_CFG0_SYS_BE BIT(28) ++#define I2S_REG_CFG0_NORM_24 BIT(18) ++#define I2S_REG_CFG0_DATA_24 BIT(17) ++ ++/* I2S_REG_INT_STATUS */ ++#define I2S_REG_INT_RX_FAULT BIT(7) ++#define I2S_REG_INT_RX_OVRUN BIT(6) ++#define I2S_REG_INT_RX_UNRUN BIT(5) ++#define I2S_REG_INT_RX_THRES BIT(4) ++#define I2S_REG_INT_TX_FAULT BIT(3) ++#define I2S_REG_INT_TX_OVRUN BIT(2) ++#define I2S_REG_INT_TX_UNRUN BIT(1) ++#define I2S_REG_INT_TX_THRES BIT(0) ++#define I2S_REG_INT_TX_MASK 0xf ++#define I2S_REG_INT_RX_MASK 0xf0 ++ ++/* I2S_REG_INT_STATUS */ ++#define I2S_RX_AVCNT(x) ((x >> 4) & 0xf) ++#define I2S_TX_AVCNT(x) (x & 0xf) ++/* MT7628 */ ++#define MT7628_I2S_RX_AVCNT(x) ((x >> 8) & 0x1f) ++#define MT7628_I2S_TX_AVCNT(x) (x & 0x1f) ++ ++/* I2S_REG_CFG1 */ ++#define I2S_REG_CFG1_LBK BIT(31) ++#define I2S_REG_CFG1_EXTLBK BIT(30) ++/* RT3883 */ ++#define I2S_REG_CFG1_LEFT_J BIT(0) ++#define I2S_REG_CFG1_RIGHT_J BIT(1) ++#define I2S_REG_CFG1_FMT_MASK 0x3 ++ ++/* I2S_REG_DIVCMP */ ++#define I2S_REG_DIVCMP_CLKEN BIT(31) ++#define I2S_REG_DIVCMP_DIVCOMP_MASK 0x1ff ++ ++/* I2S_REG_DIVINT */ ++#define I2S_REG_DIVINT_MASK 0x3ff ++ ++/* BCLK dividers */ ++#define RALINK_I2S_DIVCMP 0 ++#define RALINK_I2S_DIVINT 1 ++ ++/* FIFO */ ++#define RALINK_I2S_FIFO_SIZE 32 ++ ++/* feature flags */ ++#define RALINK_FLAGS_TXONLY BIT(0) ++#define RALINK_FLAGS_LEFT_J BIT(1) ++#define RALINK_FLAGS_RIGHT_J BIT(2) ++#define RALINK_FLAGS_ENDIAN BIT(3) ++#define RALINK_FLAGS_24BIT BIT(4) ++ ++#define RALINK_I2S_INT_EN 0 ++ ++struct ralink_i2s_stats { ++ u32 dmafault; ++ u32 overrun; ++ u32 underrun; ++ u32 belowthres; ++}; ++ ++struct ralink_i2s { ++ struct device *dev; ++ void __iomem *regs; ++ struct clk *clk; ++ struct regmap *regmap; ++ u32 flags; ++ unsigned int fmt; ++ u16 txdma_req; ++ u16 rxdma_req; ++ ++ struct snd_dmaengine_dai_dma_data playback_dma_data; ++ struct snd_dmaengine_dai_dma_data capture_dma_data; ++ ++ struct dentry *dbg_dir; ++ struct dentry *dbg_stats; ++ struct ralink_i2s_stats txstats; ++ struct ralink_i2s_stats rxstats; ++}; ++ ++static void ralink_i2s_dump_regs(struct ralink_i2s *i2s) ++{ ++ u32 buf[10]; ++ int ret; ++ ++ ret = regmap_bulk_read(i2s->regmap, I2S_REG_CFG0, ++ buf, ARRAY_SIZE(buf)); ++ ++ dev_dbg(i2s->dev, "CFG0: %08x, INTSTAT: %08x, INTEN: %08x, " \ ++ "FFSTAT: %08x, WREG: %08x, RREG: %08x, " \ ++ "CFG1: %08x, DIVCMP: %08x, DIVINT: %08x\n", ++ buf[0], buf[1], buf[2], buf[3], buf[4], ++ buf[5], buf[6], buf[8], buf[9]); ++} ++ ++static int ralink_i2s_set_sysclk(struct snd_soc_dai *dai, ++ int clk_id, unsigned int freq, int dir) ++{ ++ return 0; ++} ++ ++static int ralink_i2s_set_sys_bclk(struct snd_soc_dai *dai, int width, int rate) ++{ ++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ unsigned long clk = clk_get_rate(i2s->clk); ++ int div; ++ uint32_t data; ++ ++ /* disable clock at slave mode */ ++ if ((i2s->fmt & SND_SOC_DAIFMT_MASTER_MASK) == ++ SND_SOC_DAIFMT_CBM_CFM) { ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, ++ I2S_REG_CFG0_CLK_DIS, ++ I2S_REG_CFG0_CLK_DIS); ++ return 0; ++ } ++ ++ /* FREQOUT = FREQIN / (I2S_CLK_DIV + 1) */ ++ div = (clk / rate ) - 1; ++ ++ data = rt_sysc_r32(0x30); ++ data &= (0xff << 8); ++ data |= (0x1 << 15) | (div << 8); ++ rt_sysc_w32(data, 0x30); ++ ++ /* enable clock */ ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, I2S_REG_CFG0_CLK_DIS, 0); ++ ++ dev_dbg(i2s->dev, "clk: %lu, rate: %u, div: %d\n", ++ clk, rate, div); ++ ++ return 0; ++} ++ ++static int ralink_i2s_set_bclk(struct snd_soc_dai *dai, int width, int rate) ++{ ++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ unsigned long clk = clk_get_rate(i2s->clk); ++ int divint, divcomp; ++ ++ /* disable clock at slave mode */ ++ if ((i2s->fmt & SND_SOC_DAIFMT_MASTER_MASK) == ++ SND_SOC_DAIFMT_CBM_CFM) { ++ regmap_update_bits(i2s->regmap, I2S_REG_DIVCMP, ++ I2S_REG_DIVCMP_CLKEN, 0); ++ return 0; ++ } ++ ++ /* FREQOUT = FREQIN * (1/2) * (1/(DIVINT + DIVCOMP/512)) */ ++ clk = clk / (2 * 2 * width); ++ divint = clk / rate; ++ divcomp = ((clk % rate) * 512) / rate; ++ ++ if ((divint > I2S_REG_DIVINT_MASK) || ++ (divcomp > I2S_REG_DIVCMP_DIVCOMP_MASK)) ++ return -EINVAL; ++ ++ regmap_update_bits(i2s->regmap, I2S_REG_DIVINT, ++ I2S_REG_DIVINT_MASK, divint); ++ regmap_update_bits(i2s->regmap, I2S_REG_DIVCMP, ++ I2S_REG_DIVCMP_DIVCOMP_MASK, divcomp); ++ ++ /* enable clock */ ++ regmap_update_bits(i2s->regmap, I2S_REG_DIVCMP, I2S_REG_DIVCMP_CLKEN, ++ I2S_REG_DIVCMP_CLKEN); ++ ++ dev_dbg(i2s->dev, "clk: %lu, rate: %u, int: %d, comp: %d\n", ++ clk_get_rate(i2s->clk), rate, divint, divcomp); ++ ++ return 0; ++} ++ ++static int ralink_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) ++{ ++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ unsigned int cfg0 = 0, cfg1 = 0; ++ ++ /* set master/slave audio interface */ ++ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { ++ case SND_SOC_DAIFMT_CBM_CFM: ++ if (i2s->flags & RALINK_FLAGS_TXONLY) ++ cfg0 |= I2S_REG_CFG0_SLAVE_EN; ++ else ++ cfg0 |= I2S_REG_CFG0_SLAVE; ++ break; ++ case SND_SOC_DAIFMT_CBS_CFS: ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ /* interface format */ ++ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { ++ case SND_SOC_DAIFMT_I2S: ++ break; ++ case SND_SOC_DAIFMT_RIGHT_J: ++ if (i2s->flags & RALINK_FLAGS_RIGHT_J) { ++ cfg1 |= I2S_REG_CFG1_RIGHT_J; ++ break; ++ } ++ return -EINVAL; ++ case SND_SOC_DAIFMT_LEFT_J: ++ if (i2s->flags & RALINK_FLAGS_LEFT_J) { ++ cfg1 |= I2S_REG_CFG1_LEFT_J; ++ break; ++ } ++ return -EINVAL; ++ default: ++ return -EINVAL; ++ } ++ ++ /* clock inversion */ ++ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { ++ case SND_SOC_DAIFMT_NB_NF: ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (i2s->flags & RALINK_FLAGS_TXONLY) { ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, ++ I2S_REG_CFG0_SLAVE_EN, cfg0); ++ } else { ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, ++ I2S_REG_CFG0_SLAVE, cfg0); ++ } ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG1, ++ I2S_REG_CFG1_FMT_MASK, cfg1); ++ i2s->fmt = fmt; ++ ++ return 0; ++} ++ ++static int ralink_i2s_startup(struct snd_pcm_substream *substream, ++ struct snd_soc_dai *dai) ++{ ++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ ++ if (dai->active) ++ return 0; ++ ++ /* setup status interrupt */ ++#if (RALINK_I2S_INT_EN) ++ regmap_write(i2s->regmap, I2S_REG_INT_EN, 0xff); ++#else ++ regmap_write(i2s->regmap, I2S_REG_INT_EN, 0x0); ++#endif ++ ++ /* enable */ ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, ++ I2S_REG_CFG0_EN | I2S_REG_CFG0_DMA_EN | ++ I2S_REG_CFG0_THRES_MASK, ++ I2S_REG_CFG0_EN | I2S_REG_CFG0_DMA_EN | ++ I2S_REG_CFG0_DFT_THRES); ++ ++ return 0; ++} ++ ++static void ralink_i2s_shutdown(struct snd_pcm_substream *substream, ++ struct snd_soc_dai *dai) ++{ ++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ ++ /* If both streams are stopped, disable module and clock */ ++ if (dai->active) ++ return; ++ ++ /* ++ * datasheet mention when disable all control regs are cleared ++ * to initial values. need reinit at startup. ++ */ ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, I2S_REG_CFG0_EN, 0); ++} ++ ++static int ralink_i2s_hw_params(struct snd_pcm_substream *substream, ++ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) ++{ ++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ int width; ++ int ret; ++ ++ width = params_width(params); ++ switch (width) { ++ case 16: ++ if (i2s->flags & RALINK_FLAGS_24BIT) ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, ++ I2S_REG_CFG0_DATA_24, 0); ++ break; ++ case 24: ++ if (i2s->flags & RALINK_FLAGS_24BIT) { ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, ++ I2S_REG_CFG0_DATA_24, ++ I2S_REG_CFG0_DATA_24); ++ break; ++ } ++ return -EINVAL; ++ default: ++ return -EINVAL; ++ } ++ ++ switch (params_channels(params)) { ++ case 2: ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (i2s->flags & RALINK_FLAGS_ENDIAN) { ++ /* system endian */ ++#ifdef SNDRV_LITTLE_ENDIAN ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, ++ I2S_REG_CFG0_SYS_BE, 0); ++#else ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, ++ I2S_REG_CFG0_SYS_BE, ++ I2S_REG_CFG0_SYS_BE); ++#endif ++ ++ /* data endian */ ++ switch (params_format(params)) { ++ case SNDRV_PCM_FORMAT_S16_LE: ++ case SNDRV_PCM_FORMAT_S24_LE: ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, ++ I2S_REG_CFG0_FMT_LE, ++ I2S_REG_CFG0_FMT_LE); ++ break; ++ case SNDRV_PCM_FORMAT_S16_BE: ++ case SNDRV_PCM_FORMAT_S24_BE: ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, ++ I2S_REG_CFG0_FMT_LE, 0); ++ break; ++ default: ++ return -EINVAL; ++ } ++ } ++ ++ /* setup bclk rate */ ++ if (i2s->flags & RALINK_FLAGS_TXONLY) ++ ret = ralink_i2s_set_sys_bclk(dai, width, params_rate(params)); ++ else ++ ret = ralink_i2s_set_bclk(dai, width, params_rate(params)); ++ ++ return ret; ++} ++ ++static int ralink_i2s_trigger(struct snd_pcm_substream *substream, int cmd, ++ struct snd_soc_dai *dai) ++{ ++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ unsigned int mask, val; ++ ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ++ mask = I2S_REG_CFG0_TX_EN; ++ else ++ mask = I2S_REG_CFG0_RX_EN; ++ ++ switch (cmd) { ++ case SNDRV_PCM_TRIGGER_START: ++ case SNDRV_PCM_TRIGGER_RESUME: ++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ++ val = mask; ++ break; ++ case SNDRV_PCM_TRIGGER_STOP: ++ case SNDRV_PCM_TRIGGER_SUSPEND: ++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ++ val = 0; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ regmap_update_bits(i2s->regmap, I2S_REG_CFG0, mask, val); ++ ++ return 0; ++} ++ ++static void ralink_i2s_init_dma_data(struct ralink_i2s *i2s, ++ struct resource *res) ++{ ++ struct snd_dmaengine_dai_dma_data *dma_data; ++ ++ /* Playback */ ++ dma_data = &i2s->playback_dma_data; ++ dma_data->addr = res->start + I2S_REG_WREG; ++ dma_data->addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; ++ dma_data->maxburst = 1; ++ dma_data->slave_id = i2s->txdma_req; ++ ++ if (i2s->flags & RALINK_FLAGS_TXONLY) ++ return; ++ ++ /* Capture */ ++ dma_data = &i2s->capture_dma_data; ++ dma_data->addr = res->start + I2S_REG_RREG; ++ dma_data->addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; ++ dma_data->maxburst = 1; ++ dma_data->slave_id = i2s->rxdma_req; ++} ++ ++static int ralink_i2s_dai_probe(struct snd_soc_dai *dai) ++{ ++ struct ralink_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ ++ snd_soc_dai_init_dma_data(dai, &i2s->playback_dma_data, ++ &i2s->capture_dma_data); ++ ++ return 0; ++} ++ ++static int ralink_i2s_dai_remove(struct snd_soc_dai *dai) ++{ ++ return 0; ++} ++ ++static const struct snd_soc_dai_ops ralink_i2s_dai_ops = { ++ .set_sysclk = ralink_i2s_set_sysclk, ++ .set_fmt = ralink_i2s_set_fmt, ++ .startup = ralink_i2s_startup, ++ .shutdown = ralink_i2s_shutdown, ++ .hw_params = ralink_i2s_hw_params, ++ .trigger = ralink_i2s_trigger, ++}; ++ ++static struct snd_soc_dai_driver ralink_i2s_dai = { ++ .name = DRV_NAME, ++ .probe = ralink_i2s_dai_probe, ++ .remove = ralink_i2s_dai_remove, ++ .ops = &ralink_i2s_dai_ops, ++ .capture = { ++ .stream_name = "I2S Capture", ++ .channels_min = 2, ++ .channels_max = 2, ++ .rate_min = 5512, ++ .rate_max = 192000, ++ .rates = SNDRV_PCM_RATE_CONTINUOUS, ++ .formats = SNDRV_PCM_FMTBIT_S16_LE, ++ }, ++ .playback = { ++ .stream_name = "I2S Playback", ++ .channels_min = 2, ++ .channels_max = 2, ++ .rate_min = 5512, ++ .rate_max = 192000, ++ .rates = SNDRV_PCM_RATE_CONTINUOUS, ++ .formats = SNDRV_PCM_FMTBIT_S16_LE, ++ }, ++ .symmetric_rates = 1, ++}; ++ ++static struct snd_pcm_hardware ralink_pcm_hardware = { ++ .info = SNDRV_PCM_INFO_MMAP | ++ SNDRV_PCM_INFO_MMAP_VALID | ++ SNDRV_PCM_INFO_INTERLEAVED | ++ SNDRV_PCM_INFO_BLOCK_TRANSFER, ++ .formats = SNDRV_PCM_FMTBIT_S16_LE, ++ .channels_min = 2, ++ .channels_max = 2, ++ .period_bytes_min = PAGE_SIZE, ++ .period_bytes_max = PAGE_SIZE * 2, ++ .periods_min = 2, ++ .periods_max = 128, ++ .buffer_bytes_max = 128 * 1024, ++ .fifo_size = RALINK_I2S_FIFO_SIZE, ++}; ++ ++static const struct snd_dmaengine_pcm_config ralink_dmaengine_pcm_config = { ++ .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config, ++ .pcm_hardware = &ralink_pcm_hardware, ++ .prealloc_buffer_size = 256 * PAGE_SIZE, ++}; ++ ++static const struct snd_soc_component_driver ralink_i2s_component = { ++ .name = DRV_NAME, ++}; ++ ++static bool ralink_i2s_readable_reg(struct device *dev, unsigned int reg) ++{ ++ return true; ++} ++ ++static bool ralink_i2s_volatile_reg(struct device *dev, unsigned int reg) ++{ ++ switch (reg) { ++ case I2S_REG_INT_STATUS: ++ case I2S_REG_FF_STATUS: ++ return true; ++ } ++ return false; ++} ++ ++static bool ralink_i2s_writeable_reg(struct device *dev, unsigned int reg) ++{ ++ switch (reg) { ++ case I2S_REG_FF_STATUS: ++ case I2S_REG_RREG: ++ return false; ++ } ++ return true; ++} ++ ++static const struct regmap_config ralink_i2s_regmap_config = { ++ .reg_bits = 32, ++ .reg_stride = 4, ++ .val_bits = 32, ++ .writeable_reg = ralink_i2s_writeable_reg, ++ .readable_reg = ralink_i2s_readable_reg, ++ .volatile_reg = ralink_i2s_volatile_reg, ++ .max_register = I2S_REG_DIVINT, ++}; ++ ++#if (RALINK_I2S_INT_EN) ++static irqreturn_t ralink_i2s_irq(int irq, void *devid) ++{ ++ struct ralink_i2s *i2s = devid; ++ u32 status; ++ ++ regmap_read(i2s->regmap, I2S_REG_INT_STATUS, &status); ++ if (unlikely(!status)) ++ return IRQ_NONE; ++ ++ /* tx stats */ ++ if (status & I2S_REG_INT_TX_MASK) { ++ if (status & I2S_REG_INT_TX_THRES) ++ i2s->txstats.belowthres++; ++ if (status & I2S_REG_INT_TX_UNRUN) ++ i2s->txstats.underrun++; ++ if (status & I2S_REG_INT_TX_OVRUN) ++ i2s->txstats.overrun++; ++ if (status & I2S_REG_INT_TX_FAULT) ++ i2s->txstats.dmafault++; ++ } ++ ++ /* rx stats */ ++ if (status & I2S_REG_INT_RX_MASK) { ++ if (status & I2S_REG_INT_RX_THRES) ++ i2s->rxstats.belowthres++; ++ if (status & I2S_REG_INT_RX_UNRUN) ++ i2s->rxstats.underrun++; ++ if (status & I2S_REG_INT_RX_OVRUN) ++ i2s->rxstats.overrun++; ++ if (status & I2S_REG_INT_RX_FAULT) ++ i2s->rxstats.dmafault++; ++ } ++ ++ /* clean status bits */ ++ regmap_write(i2s->regmap, I2S_REG_INT_STATUS, status); ++ ++ return IRQ_HANDLED; ++} ++#endif ++ ++#if IS_ENABLED(CONFIG_DEBUG_FS) ++static int ralink_i2s_stats_show(struct seq_file *s, void *unused) ++{ ++ struct ralink_i2s *i2s = s->private; ++ ++ seq_printf(s, "tx stats\n"); ++ seq_printf(s, "\tbelow threshold\t%u\n", i2s->txstats.belowthres); ++ seq_printf(s, "\tunder run\t%u\n", i2s->txstats.underrun); ++ seq_printf(s, "\tover run\t%u\n", i2s->txstats.overrun); ++ seq_printf(s, "\tdma fault\t%u\n", i2s->txstats.dmafault); ++ ++ seq_printf(s, "rx stats\n"); ++ seq_printf(s, "\tbelow threshold\t%u\n", i2s->rxstats.belowthres); ++ seq_printf(s, "\tunder run\t%u\n", i2s->rxstats.underrun); ++ seq_printf(s, "\tover run\t%u\n", i2s->rxstats.overrun); ++ seq_printf(s, "\tdma fault\t%u\n", i2s->rxstats.dmafault); ++ ++ ralink_i2s_dump_regs(i2s); ++ ++ return 0; ++} ++ ++static int ralink_i2s_stats_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, ralink_i2s_stats_show, inode->i_private); ++} ++ ++static const struct file_operations ralink_i2s_stats_ops = { ++ .open = ralink_i2s_stats_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static inline int ralink_i2s_debugfs_create(struct ralink_i2s *i2s) ++{ ++ i2s->dbg_dir = debugfs_create_dir(dev_name(i2s->dev), NULL); ++ if (!i2s->dbg_dir) ++ return -ENOMEM; ++ ++ i2s->dbg_stats = debugfs_create_file("stats", S_IRUGO, ++ i2s->dbg_dir, i2s, &ralink_i2s_stats_ops); ++ if (!i2s->dbg_stats) { ++ debugfs_remove(i2s->dbg_dir); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static inline void ralink_i2s_debugfs_remove(struct ralink_i2s *i2s) ++{ ++ debugfs_remove(i2s->dbg_stats); ++ debugfs_remove(i2s->dbg_dir); ++} ++#else ++static inline int ralink_i2s_debugfs_create(struct ralink_i2s *i2s) ++{ ++ return 0; ++} ++ ++static inline void ralink_i2s_debugfs_remove(struct fsl_ssi_dbg *ssi_dbg) ++{ ++} ++#endif ++ ++/* ++ * TODO: these refclk setup functions should use ++ * clock framework instead. hardcode it now. ++ */ ++static void rt3350_refclk_setup(void) ++{ ++ uint32_t data; ++ ++ /* set refclk output 12Mhz clock */ ++ data = rt_sysc_r32(0x2c); ++ data |= (0x1 << 8); ++ rt_sysc_w32(data, 0x2c); ++} ++ ++static void rt3883_refclk_setup(void) ++{ ++ uint32_t data; ++ ++ /* set refclk output 12Mhz clock */ ++ data = rt_sysc_r32(0x2c); ++ data &= ~(0x3 << 13); ++ data |= (0x1 << 13); ++ rt_sysc_w32(data, 0x2c); ++} ++ ++static void rt3552_refclk_setup(void) ++{ ++ uint32_t data; ++ ++ /* set refclk output 12Mhz clock */ ++ data = rt_sysc_r32(0x2c); ++ data &= ~(0xf << 8); ++ data |= (0x3 << 8); ++ rt_sysc_w32(data, 0x2c); ++} ++ ++static void mt7620_refclk_setup(void) ++{ ++ uint32_t data; ++ ++ /* set refclk output 12Mhz clock */ ++ data = rt_sysc_r32(0x2c); ++ data &= ~(0x7 << 9); ++ data |= 0x1 << 9; ++ rt_sysc_w32(data, 0x2c); ++} ++ ++static void mt7621_refclk_setup(void) ++{ ++ uint32_t data; ++ ++ /* set refclk output 12Mhz clock */ ++ data = rt_sysc_r32(0x2c); ++ data &= ~(0x1f << 18); ++ data |= (0x19 << 18); ++ data &= ~(0x1f << 12); ++ data |= (0x1 << 12); ++ data &= ~(0x7 << 9); ++ data |= (0x5 << 9); ++ rt_sysc_w32(data, 0x2c); ++} ++ ++static void mt7628_refclk_setup(void) ++{ ++ uint32_t data; ++ ++ /* set i2s and refclk digital pad */ ++ data = rt_sysc_r32(0x3c); ++ data |= 0x1f; ++ rt_sysc_w32(data, 0x3c); ++ ++ /* Adjust REFCLK0's driving strength */ ++ data = rt_sysc_r32(0x1354); ++ data &= ~(0x1 << 5); ++ rt_sysc_w32(data, 0x1354); ++ data = rt_sysc_r32(0x1364); ++ data |= ~(0x1 << 5); ++ rt_sysc_w32(data, 0x1364); ++ ++ /* set refclk output 12Mhz clock */ ++ data = rt_sysc_r32(0x2c); ++ data &= ~(0x7 << 9); ++ data |= 0x1 << 9; ++ rt_sysc_w32(data, 0x2c); ++} ++ ++struct rt_i2s_data { ++ u32 flags; ++ void (*refclk_setup)(void); ++}; ++ ++struct rt_i2s_data rt3050_i2s_data = { .flags = RALINK_FLAGS_TXONLY }; ++struct rt_i2s_data rt3350_i2s_data = { .flags = RALINK_FLAGS_TXONLY, ++ .refclk_setup = rt3350_refclk_setup }; ++struct rt_i2s_data rt3883_i2s_data = { ++ .flags = (RALINK_FLAGS_LEFT_J | RALINK_FLAGS_RIGHT_J), ++ .refclk_setup = rt3883_refclk_setup }; ++struct rt_i2s_data rt3352_i2s_data = { .refclk_setup = rt3552_refclk_setup}; ++struct rt_i2s_data mt7620_i2s_data = { .refclk_setup = mt7620_refclk_setup}; ++struct rt_i2s_data mt7621_i2s_data = { .refclk_setup = mt7621_refclk_setup}; ++struct rt_i2s_data mt7628_i2s_data = { ++ .flags = (RALINK_FLAGS_ENDIAN | RALINK_FLAGS_24BIT | ++ RALINK_FLAGS_LEFT_J), ++ .refclk_setup = mt7628_refclk_setup}; ++ ++static const struct of_device_id ralink_i2s_match_table[] = { ++ { .compatible = "ralink,rt3050-i2s", ++ .data = (void *)&rt3050_i2s_data }, ++ { .compatible = "ralink,rt3350-i2s", ++ .data = (void *)&rt3350_i2s_data }, ++ { .compatible = "ralink,rt3883-i2s", ++ .data = (void *)&rt3883_i2s_data }, ++ { .compatible = "ralink,rt3352-i2s", ++ .data = (void *)&rt3352_i2s_data }, ++ { .compatible = "mediatek,mt7620-i2s", ++ .data = (void *)&mt7620_i2s_data }, ++ { .compatible = "mediatek,mt7621-i2s", ++ .data = (void *)&mt7621_i2s_data }, ++ { .compatible = "mediatek,mt7628-i2s", ++ .data = (void *)&mt7628_i2s_data }, ++}; ++MODULE_DEVICE_TABLE(of, ralink_i2s_match_table); ++ ++static int ralink_i2s_probe(struct platform_device *pdev) ++{ ++ const struct of_device_id *match; ++ struct device_node *np = pdev->dev.of_node; ++ struct ralink_i2s *i2s; ++ struct resource *res; ++ int irq, ret; ++ u32 dma_req; ++ struct rt_i2s_data *data; ++ ++ i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL); ++ if (!i2s) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, i2s); ++ i2s->dev = &pdev->dev; ++ ++ match = of_match_device(ralink_i2s_match_table, &pdev->dev); ++ if (!match) ++ return -EINVAL; ++ data = (struct rt_i2s_data *)match->data; ++ i2s->flags = data->flags; ++ /* setup out 12Mhz refclk to codec as mclk */ ++ if (data->refclk_setup) ++ data->refclk_setup(); ++ ++ if (of_property_read_u32(np, "txdma-req", &dma_req)) { ++ dev_err(&pdev->dev, "no txdma-req define\n"); ++ return -EINVAL; ++ } ++ i2s->txdma_req = (u16)dma_req; ++ if (!(i2s->flags & RALINK_FLAGS_TXONLY)) { ++ if (of_property_read_u32(np, "rxdma-req", &dma_req)) { ++ dev_err(&pdev->dev, "no rxdma-req define\n"); ++ return -EINVAL; ++ } ++ i2s->rxdma_req = (u16)dma_req; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ i2s->regs = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(i2s->regs)) ++ return PTR_ERR(i2s->regs); ++ ++ i2s->regmap = devm_regmap_init_mmio(&pdev->dev, i2s->regs, ++ &ralink_i2s_regmap_config); ++ if (IS_ERR(i2s->regmap)) { ++ dev_err(&pdev->dev, "regmap init failed\n"); ++ return PTR_ERR(i2s->regmap); ++ } ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) { ++ dev_err(&pdev->dev, "failed to get irq\n"); ++ return -EINVAL; ++ } ++ ++#if (RALINK_I2S_INT_EN) ++ ret = devm_request_irq(&pdev->dev, irq, ralink_i2s_irq, ++ 0, dev_name(&pdev->dev), i2s); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to request irq\n"); ++ return ret; ++ } ++#endif ++ ++ i2s->clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(i2s->clk)) { ++ dev_err(&pdev->dev, "no clock defined\n"); ++ return PTR_ERR(i2s->clk); ++ } ++ ++ ret = clk_prepare_enable(i2s->clk); ++ if (ret) ++ return ret; ++ ++ ralink_i2s_init_dma_data(i2s, res); ++ ++ device_reset(&pdev->dev); ++ ++ ret = ralink_i2s_debugfs_create(i2s); ++ if (ret) { ++ dev_err(&pdev->dev, "create debugfs failed\n"); ++ goto err_clk_disable; ++ } ++ ++ /* enable 24bits support */ ++ if (i2s->flags & RALINK_FLAGS_24BIT) { ++ ralink_i2s_dai.capture.formats |= SNDRV_PCM_FMTBIT_S24_LE; ++ ralink_i2s_dai.playback.formats |= SNDRV_PCM_FMTBIT_S24_LE; ++ } ++ ++ /* enable big endian support */ ++ if (i2s->flags & RALINK_FLAGS_ENDIAN) { ++ ralink_i2s_dai.capture.formats |= SNDRV_PCM_FMTBIT_S16_BE; ++ ralink_i2s_dai.playback.formats |= SNDRV_PCM_FMTBIT_S16_BE; ++ ralink_pcm_hardware.formats |= SNDRV_PCM_FMTBIT_S16_BE; ++ if (i2s->flags & RALINK_FLAGS_24BIT) { ++ ralink_i2s_dai.capture.formats |= ++ SNDRV_PCM_FMTBIT_S24_BE; ++ ralink_i2s_dai.playback.formats |= ++ SNDRV_PCM_FMTBIT_S24_BE; ++ ralink_pcm_hardware.formats |= ++ SNDRV_PCM_FMTBIT_S24_BE; ++ } ++ } ++ ++ /* disable capture support */ ++ if (i2s->flags & RALINK_FLAGS_TXONLY) ++ memset(&ralink_i2s_dai.capture, sizeof(ralink_i2s_dai.capture), ++ 0); ++ ++ ret = devm_snd_soc_register_component(&pdev->dev, &ralink_i2s_component, ++ &ralink_i2s_dai, 1); ++ if (ret) ++ goto err_debugfs; ++ ++ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, ++ &ralink_dmaengine_pcm_config, ++ SND_DMAENGINE_PCM_FLAG_COMPAT); ++ if (ret) ++ goto err_debugfs; ++ ++ dev_info(i2s->dev, "mclk %luKHz\n", clk_get_rate(i2s->clk) / 1000000); ++ ++ return 0; ++ ++err_debugfs: ++ ralink_i2s_debugfs_remove(i2s); ++ ++err_clk_disable: ++ clk_disable_unprepare(i2s->clk); ++ ++ return ret; ++} ++ ++static int ralink_i2s_remove(struct platform_device *pdev) ++{ ++ struct ralink_i2s *i2s = platform_get_drvdata(pdev); ++ ++ ralink_i2s_debugfs_remove(i2s); ++ clk_disable_unprepare(i2s->clk); ++ ++ return 0; ++} ++ ++static struct platform_driver ralink_i2s_driver = { ++ .probe = ralink_i2s_probe, ++ .remove = ralink_i2s_remove, ++ .driver = { ++ .name = DRV_NAME, ++ .of_match_table = ralink_i2s_match_table, ++ }, ++}; ++module_platform_driver(ralink_i2s_driver); ++ ++MODULE_AUTHOR("Lars-Peter Clausen, <lars@metafoo.de>"); ++MODULE_DESCRIPTION("Ralink/MediaTek I2S driver"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:" DRV_NAME); diff --git a/target/linux/ramips/patches-4.14/0051-serial-add-ugly-custom-baud-rate-hack.patch b/target/linux/ramips/patches-4.14/0051-serial-add-ugly-custom-baud-rate-hack.patch new file mode 100644 index 0000000000..b71d8f38e0 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0051-serial-add-ugly-custom-baud-rate-hack.patch @@ -0,0 +1,22 @@ +From a7eb46e0ea4a11e4dfb56ab129bf816d1059a6c5 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Mon, 7 Dec 2015 17:31:08 +0100 +Subject: [PATCH 51/53] serial: add ugly custom baud rate hack + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + drivers/tty/serial/serial_core.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -411,6 +411,9 @@ uart_get_baud_rate(struct uart_port *por + break; + } + ++ if (tty_termios_baud_rate(termios) == 2500000) ++ return 250000; ++ + for (try = 0; try < 2; try++) { + baud = tty_termios_baud_rate(termios); + diff --git a/target/linux/ramips/patches-4.14/0052-pwm-add-mediatek-support.patch b/target/linux/ramips/patches-4.14/0052-pwm-add-mediatek-support.patch new file mode 100644 index 0000000000..6eb7f16b9d --- /dev/null +++ b/target/linux/ramips/patches-4.14/0052-pwm-add-mediatek-support.patch @@ -0,0 +1,217 @@ +From fc8f96309c21c1bc3276427309cd7d361347d66e Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Mon, 7 Dec 2015 17:16:50 +0100 +Subject: [PATCH 52/53] pwm: add mediatek support + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + drivers/pwm/Kconfig | 9 +++ + drivers/pwm/Makefile | 1 + + drivers/pwm/pwm-mediatek.c | 173 ++++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 183 insertions(+) + create mode 100644 drivers/pwm/pwm-mediatek.c + +--- a/drivers/pwm/Kconfig ++++ b/drivers/pwm/Kconfig +@@ -302,6 +302,15 @@ config PWM_MEDIATEK + To compile this driver as a module, choose M here: the module + will be called pwm-mediatek. + ++config PWM_MEDIATEK_RAMIPS ++ tristate "Mediatek PWM support" ++ depends on RALINK && OF ++ help ++ Generic PWM framework driver for Mediatek ARM SoC. ++ ++ To compile this driver as a module, choose M here: the module ++ will be called pwm-mxs. ++ + config PWM_MXS + tristate "Freescale MXS PWM support" + depends on ARCH_MXS && OF +--- a/drivers/pwm/Makefile ++++ b/drivers/pwm/Makefile +@@ -28,6 +28,7 @@ obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-p + obj-$(CONFIG_PWM_LPSS_PLATFORM) += pwm-lpss-platform.o + obj-$(CONFIG_PWM_MESON) += pwm-meson.o + obj-$(CONFIG_PWM_MEDIATEK) += pwm-mediatek.o ++obj-$(CONFIG_PWM_MEDIATEK_RAMIPS) += pwm-mediatek-ramips.o + obj-$(CONFIG_PWM_MTK_DISP) += pwm-mtk-disp.o + obj-$(CONFIG_PWM_MXS) += pwm-mxs.o + obj-$(CONFIG_PWM_OMAP_DMTIMER) += pwm-omap-dmtimer.o +--- /dev/null ++++ b/drivers/pwm/pwm-mediatek-ramips.c +@@ -0,0 +1,173 @@ ++/* ++ * Mediatek Pulse Width Modulator driver ++ * ++ * Copyright (C) 2015 John Crispin <blogic@openwrt.org> ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include <linux/err.h> ++#include <linux/io.h> ++#include <linux/ioport.h> ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/of.h> ++#include <linux/platform_device.h> ++#include <linux/pwm.h> ++#include <linux/slab.h> ++#include <linux/types.h> ++ ++#define NUM_PWM 4 ++ ++/* PWM registers and bits definitions */ ++#define PWMCON 0x00 ++#define PWMHDUR 0x04 ++#define PWMLDUR 0x08 ++#define PWMGDUR 0x0c ++#define PWMWAVENUM 0x28 ++#define PWMDWIDTH 0x2c ++#define PWMTHRES 0x30 ++ ++/** ++ * struct mtk_pwm_chip - struct representing pwm chip ++ * ++ * @mmio_base: base address of pwm chip ++ * @chip: linux pwm chip representation ++ */ ++struct mtk_pwm_chip { ++ void __iomem *mmio_base; ++ struct pwm_chip chip; ++}; ++ ++static inline struct mtk_pwm_chip *to_mtk_pwm_chip(struct pwm_chip *chip) ++{ ++ return container_of(chip, struct mtk_pwm_chip, chip); ++} ++ ++static inline u32 mtk_pwm_readl(struct mtk_pwm_chip *chip, unsigned int num, ++ unsigned long offset) ++{ ++ return ioread32(chip->mmio_base + 0x10 + (num * 0x40) + offset); ++} ++ ++static inline void mtk_pwm_writel(struct mtk_pwm_chip *chip, ++ unsigned int num, unsigned long offset, ++ unsigned long val) ++{ ++ iowrite32(val, chip->mmio_base + 0x10 + (num * 0x40) + offset); ++} ++ ++static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, ++ int duty_ns, int period_ns) ++{ ++ struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip); ++ u32 resolution = 100 / 4; ++ u32 clkdiv = 0; ++ ++ while (period_ns / resolution > 8191) { ++ clkdiv++; ++ resolution *= 2; ++ } ++ ++ if (clkdiv > 7) ++ return -1; ++ ++ mtk_pwm_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | BIT(3) | clkdiv); ++ mtk_pwm_writel(pc, pwm->hwpwm, PWMDWIDTH, period_ns / resolution); ++ mtk_pwm_writel(pc, pwm->hwpwm, PWMTHRES, duty_ns / resolution); ++ return 0; ++} ++ ++static int mtk_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) ++{ ++ struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip); ++ u32 val; ++ ++ val = ioread32(pc->mmio_base); ++ val |= BIT(pwm->hwpwm); ++ iowrite32(val, pc->mmio_base); ++ ++ return 0; ++} ++ ++static void mtk_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) ++{ ++ struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip); ++ u32 val; ++ ++ val = ioread32(pc->mmio_base); ++ val &= ~BIT(pwm->hwpwm); ++ iowrite32(val, pc->mmio_base); ++} ++ ++static const struct pwm_ops mtk_pwm_ops = { ++ .config = mtk_pwm_config, ++ .enable = mtk_pwm_enable, ++ .disable = mtk_pwm_disable, ++ .owner = THIS_MODULE, ++}; ++ ++static int mtk_pwm_probe(struct platform_device *pdev) ++{ ++ struct mtk_pwm_chip *pc; ++ struct resource *r; ++ int ret; ++ ++ pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL); ++ if (!pc) ++ return -ENOMEM; ++ ++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); ++ if (IS_ERR(pc->mmio_base)) ++ return PTR_ERR(pc->mmio_base); ++ ++ platform_set_drvdata(pdev, pc); ++ ++ pc->chip.dev = &pdev->dev; ++ pc->chip.ops = &mtk_pwm_ops; ++ pc->chip.base = -1; ++ pc->chip.npwm = NUM_PWM; ++ ++ ret = pwmchip_add(&pc->chip); ++ if (ret < 0) ++ dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret); ++ ++ return ret; ++} ++ ++static int mtk_pwm_remove(struct platform_device *pdev) ++{ ++ struct mtk_pwm_chip *pc = platform_get_drvdata(pdev); ++ int i; ++ ++ for (i = 0; i < NUM_PWM; i++) ++ pwm_disable(&pc->chip.pwms[i]); ++ ++ return pwmchip_remove(&pc->chip); ++} ++ ++static const struct of_device_id mtk_pwm_of_match[] = { ++ { .compatible = "mediatek,mt7628-pwm" }, ++ { } ++}; ++ ++MODULE_DEVICE_TABLE(of, mtk_pwm_of_match); ++ ++static struct platform_driver mtk_pwm_driver = { ++ .driver = { ++ .name = "mtk-pwm", ++ .owner = THIS_MODULE, ++ .of_match_table = mtk_pwm_of_match, ++ }, ++ .probe = mtk_pwm_probe, ++ .remove = mtk_pwm_remove, ++}; ++ ++module_platform_driver(mtk_pwm_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); ++MODULE_ALIAS("platform:mtk-pwm"); diff --git a/target/linux/ramips/patches-4.14/0053-mtd-spi-nor-add-w25q256-3b-mode-switch.patch b/target/linux/ramips/patches-4.14/0053-mtd-spi-nor-add-w25q256-3b-mode-switch.patch new file mode 100644 index 0000000000..6ee0845ffe --- /dev/null +++ b/target/linux/ramips/patches-4.14/0053-mtd-spi-nor-add-w25q256-3b-mode-switch.patch @@ -0,0 +1,214 @@ +mtd: spi-nor: add support for switching between 3-byte and 4-byte addressing on w25q256 flash + +On some devices the flash chip needs to be in 3-byte addressing mode during +reboot, otherwise the boot loader will fail to start. +This mode however does not allow regular reads/writes onto the upper 16M +half. W25Q256 has separate read commands for reading from >16M, however +it does not have any separate write commands. +This patch changes the code to leave the chip in 3-byte mode most of the +time and only switch during erase/write cycles that go to >16M +addresses. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- +--- a/drivers/mtd/spi-nor/spi-nor.c ++++ b/drivers/mtd/spi-nor/spi-nor.c +@@ -89,6 +89,10 @@ struct flash_info { + #define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */ + #define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */ + #define USE_CLSR BIT(14) /* use CLSR command */ ++#define SPI_NOR_4B_READ_OP BIT(15) /* ++ * Like SPI_NOR_4B_OPCODES, but for read ++ * op code only. ++ */ + }; + + #define JEDEC_MFR(info) ((info)->id[0]) +@@ -240,6 +244,15 @@ static inline u8 spi_nor_convert_3to4_er + ARRAY_SIZE(spi_nor_3to4_erase)); + } + ++static void spi_nor_set_4byte_read(struct spi_nor *nor, ++ const struct flash_info *info) ++{ ++ nor->addr_width = 3; ++ nor->ext_addr = 0; ++ nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode); ++ nor->flags |= SNOR_F_4B_EXT_ADDR; ++} ++ + static void spi_nor_set_4byte_opcodes(struct spi_nor *nor, + const struct flash_info *info) + { +@@ -467,6 +480,36 @@ static int spi_nor_erase_sector(struct s + return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width); + } + ++static int spi_nor_check_ext_addr(struct spi_nor *nor, u32 addr) ++{ ++ bool ext_addr; ++ int ret; ++ u8 cmd; ++ ++ if (!(nor->flags & SNOR_F_4B_EXT_ADDR)) ++ return 0; ++ ++ ext_addr = !!(addr & 0xff000000); ++ if (nor->ext_addr == ext_addr) ++ return 0; ++ ++ cmd = ext_addr ? SPINOR_OP_EN4B : SPINOR_OP_EX4B; ++ write_enable(nor); ++ ret = nor->write_reg(nor, cmd, NULL, 0); ++ if (ret) ++ return ret; ++ ++ cmd = 0; ++ ret = nor->write_reg(nor, SPINOR_OP_WREAR, &cmd, 1); ++ if (ret) ++ return ret; ++ ++ nor->addr_width = 3 + ext_addr; ++ nor->ext_addr = ext_addr; ++ write_disable(nor); ++ return 0; ++} ++ + /* + * Erase an address range on the nor chip. The address range may extend + * one or more erase sectors. Return an error is there is a problem erasing. +@@ -492,6 +535,10 @@ static int spi_nor_erase(struct mtd_info + if (ret) + return ret; + ++ ret = spi_nor_check_ext_addr(nor, addr + len); ++ if (ret) ++ return ret; ++ + /* whole-chip erase? */ + if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) { + unsigned long timeout; +@@ -542,6 +589,7 @@ static int spi_nor_erase(struct mtd_info + write_disable(nor); + + erase_err: ++ spi_nor_check_ext_addr(nor, 0); + spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE); + + instr->state = ret ? MTD_ERASE_FAILED : MTD_ERASE_DONE; +@@ -834,7 +882,9 @@ static int spi_nor_lock(struct mtd_info + if (ret) + return ret; + ++ spi_nor_check_ext_addr(nor, ofs + len); + ret = nor->flash_lock(nor, ofs, len); ++ spi_nor_check_ext_addr(nor, 0); + + spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK); + return ret; +@@ -849,7 +899,9 @@ static int spi_nor_unlock(struct mtd_inf + if (ret) + return ret; + ++ spi_nor_check_ext_addr(nor, ofs + len); + ret = nor->flash_unlock(nor, ofs, len); ++ spi_nor_check_ext_addr(nor, 0); + + spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK); + return ret; +@@ -1164,7 +1216,7 @@ static const struct flash_info spi_nor_i + { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, + { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, + { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, +- { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_READ_OP) }, + { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024, + SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) }, + +@@ -1220,6 +1272,9 @@ static int spi_nor_read(struct mtd_info + if (ret) + return ret; + ++ if (nor->flags & SNOR_F_4B_EXT_ADDR) ++ nor->addr_width = 4; ++ + while (len) { + loff_t addr = from; + +@@ -1244,6 +1299,18 @@ static int spi_nor_read(struct mtd_info + ret = 0; + + read_err: ++ if (nor->flags & SNOR_F_4B_EXT_ADDR) { ++ u8 val = 0; ++ ++ if ((from + len) & 0xff000000) { ++ write_enable(nor); ++ nor->write_reg(nor, SPINOR_OP_WREAR, &val, 1); ++ write_disable(nor); ++ } ++ ++ nor->addr_width = 3; ++ } ++ + spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ); + return ret; + } +@@ -1345,6 +1412,10 @@ static int spi_nor_write(struct mtd_info + if (ret) + return ret; + ++ ret = spi_nor_check_ext_addr(nor, to + len); ++ if (ret < 0) ++ return ret; ++ + for (i = 0; i < len; ) { + ssize_t written; + loff_t addr = to + i; +@@ -1392,6 +1463,7 @@ static int spi_nor_write(struct mtd_info + } + + write_err: ++ spi_nor_check_ext_addr(nor, 0); + spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); + return ret; + } +@@ -2808,8 +2880,10 @@ int spi_nor_scan(struct spi_nor *nor, co + } else if (mtd->size > 0x1000000) { + /* enable 4-byte addressing if the device exceeds 16MiB */ + nor->addr_width = 4; +- if (JEDEC_MFR(info) == SNOR_MFR_SPANSION || +- info->flags & SPI_NOR_4B_OPCODES) ++ if (info->flags & SPI_NOR_4B_READ_OP) ++ spi_nor_set_4byte_read(nor, info); ++ else if (JEDEC_MFR(info) == SNOR_MFR_SPANSION || ++ info->flags & SPI_NOR_4B_OPCODES) + spi_nor_set_4byte_opcodes(nor, info); + else + set_4byte(nor, info, 1); +--- a/include/linux/mtd/spi-nor.h ++++ b/include/linux/mtd/spi-nor.h +@@ -102,6 +102,7 @@ + /* Used for Macronix and Winbond flashes. */ + #define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */ + #define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */ ++#define SPINOR_OP_WREAR 0xc5 /* Write extended address register */ + + /* Used for Spansion flashes only. */ + #define SPINOR_OP_BRWR 0x17 /* Bank register write */ +@@ -229,6 +230,7 @@ enum spi_nor_option_flags { + SNOR_F_S3AN_ADDR_DEFAULT = BIT(3), + SNOR_F_READY_XSR_RDY = BIT(4), + SNOR_F_USE_CLSR = BIT(5), ++ SNOR_F_4B_EXT_ADDR = BIT(6), + }; + + /** +@@ -280,6 +282,7 @@ struct spi_nor { + enum spi_nor_protocol reg_proto; + bool sst_write_second; + u32 flags; ++ u8 ext_addr; + u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; + + int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); diff --git a/target/linux/ramips/patches-4.14/0054-mtd-add-chunked-read-io-to-m25p80.patch b/target/linux/ramips/patches-4.14/0054-mtd-add-chunked-read-io-to-m25p80.patch new file mode 100644 index 0000000000..b20f081afe --- /dev/null +++ b/target/linux/ramips/patches-4.14/0054-mtd-add-chunked-read-io-to-m25p80.patch @@ -0,0 +1,124 @@ +--- a/drivers/mtd/spi-nor/spi-nor.c ++++ b/drivers/mtd/spi-nor/spi-nor.c +@@ -1468,6 +1468,67 @@ write_err: + return ret; + } + ++static int spi_nor_chunked_write(struct mtd_info *mtd, loff_t _to, size_t _len, ++ size_t *_retlen, const u_char *_buf) ++{ ++ struct spi_nor *nor = mtd_to_spi_nor(mtd); ++ u32 addr_width = nor->addr_width + !!(nor->flags & SNOR_F_4B_EXT_ADDR); ++ int chunk_size; ++ int retlen = 0; ++ int ret; ++ ++ chunk_size = nor->chunk_size; ++ if (!chunk_size) ++ chunk_size = _len; ++ ++ if (addr_width > 3) ++ chunk_size -= addr_width - 3; ++ ++ while (retlen < _len) { ++ size_t len = min_t(int, chunk_size, _len - retlen); ++ const u_char *buf = _buf + retlen; ++ loff_t to = _to + retlen; ++ ++ if (nor->flags & SNOR_F_SST) ++ ret = sst_write(mtd, to, len, &retlen, buf); ++ else ++ ret = spi_nor_write(mtd, to, len, &retlen, buf); ++ if (ret) ++ return ret; ++ } ++ ++ *_retlen += retlen; ++ return 0; ++} ++ ++static int spi_nor_chunked_read(struct mtd_info *mtd, loff_t _from, size_t _len, ++ size_t *_retlen, u_char *_buf) ++{ ++ struct spi_nor *nor = mtd_to_spi_nor(mtd); ++ int chunk_size; ++ int ret; ++ ++ chunk_size = nor->chunk_size; ++ if (!chunk_size) ++ chunk_size = _len; ++ ++ *_retlen = 0; ++ while (*_retlen < _len) { ++ size_t len = min_t(int, chunk_size, _len - *_retlen); ++ u_char *buf = _buf + *_retlen; ++ loff_t from = _from + *_retlen; ++ int retlen = 0; ++ ++ ret = spi_nor_read(mtd, from, len, &retlen, buf); ++ if (ret) ++ return ret; ++ ++ *_retlen += retlen; ++ } ++ ++ return 0; ++} ++ + /** + * macronix_quad_enable() - set QE bit in Status Register. + * @nor: pointer to a 'struct spi_nor' +@@ -2826,10 +2887,12 @@ int spi_nor_scan(struct spi_nor *nor, co + } + + /* sst nor chips use AAI word program */ +- if (info->flags & SST_WRITE) ++ if (info->flags & SST_WRITE) { + mtd->_write = sst_write; +- else ++ nor->flags |= SNOR_F_SST; ++ } else { + mtd->_write = spi_nor_write; ++ } + + if (info->flags & USE_FSR) + nor->flags |= SNOR_F_USE_FSR; +@@ -2848,11 +2911,20 @@ int spi_nor_scan(struct spi_nor *nor, co + mtd->writebufsize = nor->page_size; + + if (np) { ++ u32 val; ++ + /* If we were instantiated by DT, use it */ + if (of_property_read_bool(np, "m25p,fast-read")) + params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST; + else + params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST; ++ ++ if (!of_property_read_u32(np, "m25p,chunked-io", &val)) { ++ dev_info(dev, "using chunked io (size=%d)\n", val); ++ mtd->_read = spi_nor_chunked_read; ++ mtd->_write = spi_nor_chunked_write; ++ nor->chunk_size = val; ++ } + } else { + /* If we weren't instantiated by DT, default to fast-read */ + params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST; +--- a/include/linux/mtd/spi-nor.h ++++ b/include/linux/mtd/spi-nor.h +@@ -231,6 +231,7 @@ enum spi_nor_option_flags { + SNOR_F_READY_XSR_RDY = BIT(4), + SNOR_F_USE_CLSR = BIT(5), + SNOR_F_4B_EXT_ADDR = BIT(6), ++ SNOR_F_SST = BIT(7), + }; + + /** +@@ -272,6 +273,7 @@ struct spi_nor { + struct mutex lock; + struct device *dev; + u32 page_size; ++ size_t chunk_size; + u8 addr_width; + u8 erase_opcode; + u8 read_opcode; diff --git a/target/linux/ramips/patches-4.14/0069-awake-rt305x-dwc2-controller.patch b/target/linux/ramips/patches-4.14/0069-awake-rt305x-dwc2-controller.patch new file mode 100644 index 0000000000..0e09e1d4e0 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0069-awake-rt305x-dwc2-controller.patch @@ -0,0 +1,15 @@ +--- a/drivers/usb/dwc2/platform.c ++++ b/drivers/usb/dwc2/platform.c +@@ -406,6 +406,12 @@ static int dwc2_driver_probe(struct plat + if (retval) + return retval; + ++ /* Enable USB port before any regs access */ ++ if (dwc2_readl(hsotg->regs + PCGCTL) & 0x0f) { ++ dwc2_writel(0x00, hsotg->regs + PCGCTL); ++ /* TODO: mdelay(25) here? vendor driver don't use it */ ++ } ++ + retval = dwc2_get_dr_mode(hsotg); + if (retval) + goto error; diff --git a/target/linux/ramips/patches-4.14/0070-weak_reordering.patch b/target/linux/ramips/patches-4.14/0070-weak_reordering.patch new file mode 100644 index 0000000000..8f8c42c47f --- /dev/null +++ b/target/linux/ramips/patches-4.14/0070-weak_reordering.patch @@ -0,0 +1,10 @@ +--- a/arch/mips/ralink/Kconfig ++++ b/arch/mips/ralink/Kconfig +@@ -57,6 +57,7 @@ choice + select COMMON_CLK + select CLKSRC_MIPS_GIC + select HW_HAS_PCI ++ select WEAK_REORDERING_BEYOND_LLSC + endchoice + + choice diff --git a/target/linux/ramips/patches-4.14/0090-ethernet.patch b/target/linux/ramips/patches-4.14/0090-ethernet.patch new file mode 100644 index 0000000000..20f69fcc01 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0090-ethernet.patch @@ -0,0 +1,41 @@ +From b6f779ea9c329451b89404583b45b9eb00155b32 Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Wed, 18 Nov 2015 03:58:26 +0100 +Subject: [PATCH 510/513] net-next: mediatek: add Kconfig and Makefile + +This patch adds the Makefile and Kconfig required to make the driver build. + +Signed-off-by: John Crispin <blogic@openwrt.org> +Signed-off-by: Felix Fietkau <nbd@nbd.name> +Signed-off-by: Michael Lee <igvtee@gmail.com> +--- + drivers/net/ethernet/Kconfig | 1 + + drivers/net/ethernet/Makefile | 1 + + drivers/net/ethernet/mtk/Kconfig | 62 ++++++++++++++++++++++++++++++++ + drivers/net/ethernet/mtk/Makefile | 20 +++++++++++ + 4 files changed, 84 insertions(+) + create mode 100644 drivers/net/ethernet/mtk/Kconfig + create mode 100644 drivers/net/ethernet/mtk/Makefile + +--- a/drivers/net/ethernet/Kconfig ++++ b/drivers/net/ethernet/Kconfig +@@ -110,7 +110,7 @@ config LANTIQ_ETOP + Support for the MII0 inside the Lantiq SoC + + source "drivers/net/ethernet/marvell/Kconfig" +-source "drivers/net/ethernet/mediatek/Kconfig" ++source "drivers/net/ethernet/mtk/Kconfig" + source "drivers/net/ethernet/mellanox/Kconfig" + source "drivers/net/ethernet/micrel/Kconfig" + source "drivers/net/ethernet/microchip/Kconfig" +--- a/drivers/net/ethernet/Makefile ++++ b/drivers/net/ethernet/Makefile +@@ -51,7 +51,7 @@ obj-$(CONFIG_JME) += jme.o + obj-$(CONFIG_KORINA) += korina.o + obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o + obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ +-obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ ++obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mtk/ + obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ + obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ + obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ diff --git a/target/linux/ramips/patches-4.14/0098-disable_cm.patch b/target/linux/ramips/patches-4.14/0098-disable_cm.patch new file mode 100644 index 0000000000..bc00619dff --- /dev/null +++ b/target/linux/ramips/patches-4.14/0098-disable_cm.patch @@ -0,0 +1,19 @@ +--- a/arch/mips/kernel/mips-cm.c ++++ b/arch/mips/kernel/mips-cm.c +@@ -237,6 +237,7 @@ int mips_cm_probe(void) + + /* disable CM regions */ + write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR); ++ /* + write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK); + write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR); + write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK); +@@ -244,7 +245,7 @@ int mips_cm_probe(void) + write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK); + write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR); + write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK); +- ++*/ + /* probe for an L2-only sync region */ + mips_cm_probe_l2sync(); + diff --git a/target/linux/ramips/patches-4.14/0099-pci-mt7620.patch b/target/linux/ramips/patches-4.14/0099-pci-mt7620.patch new file mode 100644 index 0000000000..997fb6a2b3 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0099-pci-mt7620.patch @@ -0,0 +1,10 @@ +--- a/arch/mips/pci/pci-mt7620.c ++++ b/arch/mips/pci/pci-mt7620.c +@@ -33,7 +33,6 @@ + #define RALINK_GPIOMODE 0x60 + + #define PPLL_CFG1 0x9c +-#define PDRV_SW_SET BIT(23) + + #define PPLL_DRV 0xa0 + #define PDRV_SW_SET (1<<31) diff --git a/target/linux/ramips/patches-4.14/0100-prom_fixes.patch b/target/linux/ramips/patches-4.14/0100-prom_fixes.patch new file mode 100644 index 0000000000..91ac3b22c4 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0100-prom_fixes.patch @@ -0,0 +1,66 @@ +From 67b7bff0fd364c194e653f69baa623ba2141bd4c Mon Sep 17 00:00:00 2001 +From: John Crispin <blogic@openwrt.org> +Date: Mon, 4 Aug 2014 18:46:02 +0200 +Subject: [PATCH 07/53] MIPS: ralink: copy the commandline from the devicetree + +Signed-off-by: John Crispin <blogic@openwrt.org> +--- + arch/mips/ralink/of.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/mips/ralink/of.c ++++ b/arch/mips/ralink/of.c +@@ -3,7 +3,7 @@ + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * +- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> ++ * Copyright (C) 2008-2014 Imre Kaloz <kaloz@openwrt.org> + * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2013 John Crispin <john@phrozen.org> + */ +@@ -66,6 +66,19 @@ static int __init early_init_dt_find_mem + return 0; + } + ++static int chosen_dtb; ++ ++static int __init early_init_dt_find_chosen(unsigned long node, const char *uname, ++ int depth, void *data) ++{ ++ if (depth == 1 && !strcmp(uname, "chosen")) ++ chosen_dtb = 1; ++ ++ return 0; ++} ++ ++extern struct boot_param_header __image_dtb; ++ + void __init plat_mem_setup(void) + { + void *dtb = NULL; +@@ -82,7 +95,11 @@ void __init plat_mem_setup(void) + else if (__dtb_start != __dtb_end) + dtb = (void *)__dtb_start; + +- __dt_setup_arch(dtb); ++ __dt_setup_arch(&__image_dtb); ++ ++ of_scan_flat_dt(early_init_dt_find_chosen, NULL); ++ if (chosen_dtb) ++ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); + + strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); + +--- a/arch/mips/kernel/head.S ++++ b/arch/mips/kernel/head.S +@@ -85,6 +85,9 @@ EXPORT(__image_cmdline) + .fill 0x400 + #endif /* CONFIG_IMAGE_CMDLINE_HACK */ + ++ .ascii "OWRTDTB:" ++ EXPORT(__image_dtb) ++ .fill 0x4000 + __REF + + NESTED(kernel_entry, 16, sp) # kernel entry point diff --git a/target/linux/ramips/patches-4.14/0200-linkit_bootstrap.patch b/target/linux/ramips/patches-4.14/0200-linkit_bootstrap.patch new file mode 100644 index 0000000000..865ae79f43 --- /dev/null +++ b/target/linux/ramips/patches-4.14/0200-linkit_bootstrap.patch @@ -0,0 +1,97 @@ +--- a/drivers/misc/Makefile ++++ b/drivers/misc/Makefile +@@ -57,6 +57,7 @@ obj-$(CONFIG_CXL_BASE) += cxl/ + obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o + obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o + obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o ++obj-$(CONFIG_SOC_MT7620) += linkit.o + + lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o + lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o +--- /dev/null ++++ b/drivers/misc/linkit.c +@@ -0,0 +1,84 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * publishhed by the Free Software Foundation. ++ * ++ * Copyright (C) 2015 John Crispin <blogic@openwrt.org> ++ */ ++ ++#include <linux/module.h> ++#include <linux/platform_device.h> ++#include <linux/of.h> ++#include <linux/mtd/mtd.h> ++#include <linux/gpio.h> ++ ++#define LINKIT_LATCH_GPIO 11 ++ ++struct linkit_hw_data { ++ char board[16]; ++ char rev[16]; ++}; ++ ++static void sanify_string(char *s) ++{ ++ int i; ++ ++ for (i = 0; i < 15; i++) ++ if (s[i] <= 0x20) ++ s[i] = '\0'; ++ s[15] = '\0'; ++} ++ ++static int linkit_probe(struct platform_device *pdev) ++{ ++ struct linkit_hw_data hw; ++ struct mtd_info *mtd; ++ size_t retlen; ++ int ret; ++ ++ mtd = get_mtd_device_nm("factory"); ++ if (IS_ERR(mtd)) ++ return PTR_ERR(mtd); ++ ++ ret = mtd_read(mtd, 0x400, sizeof(hw), &retlen, (u_char *) &hw); ++ put_mtd_device(mtd); ++ ++ sanify_string(hw.board); ++ sanify_string(hw.rev); ++ ++ dev_info(&pdev->dev, "Version : %s\n", hw.board); ++ dev_info(&pdev->dev, "Revision : %s\n", hw.rev); ++ ++ if (!strcmp(hw.board, "LINKITS7688")) { ++ dev_info(&pdev->dev, "setting up bootstrap latch\n"); ++ ++ if (devm_gpio_request(&pdev->dev, LINKIT_LATCH_GPIO, "bootstrap")) { ++ dev_err(&pdev->dev, "failed to setup bootstrap gpio\n"); ++ return -1; ++ } ++ gpio_direction_output(LINKIT_LATCH_GPIO, 0); ++ } ++ ++ return 0; ++} ++ ++static const struct of_device_id linkit_match[] = { ++ { .compatible = "mediatek,linkit" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, linkit_match); ++ ++static struct platform_driver linkit_driver = { ++ .probe = linkit_probe, ++ .driver = { ++ .name = "mtk-linkit", ++ .owner = THIS_MODULE, ++ .of_match_table = linkit_match, ++ }, ++}; ++ ++int __init linkit_init(void) ++{ ++ return platform_driver_register(&linkit_driver); ++} ++late_initcall_sync(linkit_init); diff --git a/target/linux/ramips/patches-4.14/100-mt7621-core-detect-hack.patch b/target/linux/ramips/patches-4.14/100-mt7621-core-detect-hack.patch new file mode 100644 index 0000000000..991e19b6df --- /dev/null +++ b/target/linux/ramips/patches-4.14/100-mt7621-core-detect-hack.patch @@ -0,0 +1,61 @@ +There is a variant of MT7621 which contains only one CPU core instead of 2. +This is not reflected in the config register, so the kernel detects more +physical cores, which leads to a hang on SMP bringup. +Add a hack to detect missing cores. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> + +--- a/arch/mips/kernel/smp-cps.c ++++ b/arch/mips/kernel/smp-cps.c +@@ -47,6 +47,11 @@ static unsigned core_vpe_count(unsigned + return mips_cps_numvps(cluster, core); + } + ++bool __weak plat_cpu_core_present(int core) ++{ ++ return true; ++} ++ + static void __init cps_smp_setup(void) + { + unsigned int nclusters, ncores, nvpes, core_vpes; +@@ -64,6 +69,8 @@ static void __init cps_smp_setup(void) + + ncores = mips_cps_numcores(cl); + for (c = 0; c < ncores; c++) { ++ if (!plat_cpu_core_present(c)) ++ continue; + core_vpes = core_vpe_count(cl, c); + + if (c > 0) +--- a/arch/mips/ralink/mt7621.c ++++ b/arch/mips/ralink/mt7621.c +@@ -15,6 +15,7 @@ + #include <asm/mips-cps.h> + #include <asm/mach-ralink/ralink_regs.h> + #include <asm/mach-ralink/mt7621.h> ++#include <asm/mips-boards/launch.h> + + #include <pinmux.h> + +@@ -162,6 +163,20 @@ void __init ralink_of_remap(void) + panic("Failed to remap core resources"); + } + ++bool plat_cpu_core_present(int core) ++{ ++ struct cpulaunch *launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH); ++ ++ if (!core) ++ return true; ++ launch += core * 2; /* 2 VPEs per core */ ++ if (!(launch->flags & LAUNCH_FREADY)) ++ return false; ++ if (launch->flags & (LAUNCH_FGO | LAUNCH_FGONE)) ++ return false; ++ return true; ++} ++ + void prom_soc_init(struct ralink_soc_info *soc_info) + { + void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE); diff --git a/target/linux/ramips/patches-4.14/101-mt7621-timer.patch b/target/linux/ramips/patches-4.14/101-mt7621-timer.patch new file mode 100644 index 0000000000..ea8be56942 --- /dev/null +++ b/target/linux/ramips/patches-4.14/101-mt7621-timer.patch @@ -0,0 +1,98 @@ +--- a/arch/mips/ralink/mt7621.c ++++ b/arch/mips/ralink/mt7621.c +@@ -16,6 +16,7 @@ + #include <asm/mach-ralink/ralink_regs.h> + #include <asm/mach-ralink/mt7621.h> + #include <asm/mips-boards/launch.h> ++#include <asm/delay.h> + + #include <pinmux.h> + +@@ -177,6 +178,58 @@ bool plat_cpu_core_present(int core) + return true; + } + ++#define LPS_PREC 8 ++/* ++* Re-calibration lpj(loop-per-jiffy). ++* (derived from kernel/calibrate.c) ++*/ ++static int udelay_recal(void) ++{ ++ unsigned int i, lpj = 0; ++ unsigned long ticks, loopbit; ++ int lps_precision = LPS_PREC; ++ ++ lpj = (1<<12); ++ ++ while ((lpj <<= 1) != 0) { ++ /* wait for "start of" clock tick */ ++ ticks = jiffies; ++ while (ticks == jiffies) ++ /* nothing */; ++ ++ /* Go .. */ ++ ticks = jiffies; ++ __delay(lpj); ++ ticks = jiffies - ticks; ++ if (ticks) ++ break; ++ } ++ ++ /* ++ * Do a binary approximation to get lpj set to ++ * equal one clock (up to lps_precision bits) ++ */ ++ lpj >>= 1; ++ loopbit = lpj; ++ while (lps_precision-- && (loopbit >>= 1)) { ++ lpj |= loopbit; ++ ticks = jiffies; ++ while (ticks == jiffies) ++ /* nothing */; ++ ticks = jiffies; ++ __delay(lpj); ++ if (jiffies != ticks) /* longer than 1 tick */ ++ lpj &= ~loopbit; ++ } ++ printk(KERN_INFO "%d CPUs re-calibrate udelay(lpj = %d)\n", NR_CPUS, lpj); ++ ++ for(i=0; i< NR_CPUS; i++) ++ cpu_data[i].udelay_val = lpj; ++ ++ return 0; ++} ++device_initcall(udelay_recal); ++ + void prom_soc_init(struct ralink_soc_info *soc_info) + { + void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE); +--- a/arch/mips/ralink/Kconfig ++++ b/arch/mips/ralink/Kconfig +@@ -58,6 +58,7 @@ choice + select CLKSRC_MIPS_GIC + select HW_HAS_PCI + select WEAK_REORDERING_BEYOND_LLSC ++ select GENERIC_CLOCKEVENTS_BROADCAST + endchoice + + choice +--- a/arch/mips/ralink/timer-gic.c ++++ b/arch/mips/ralink/timer-gic.c +@@ -12,6 +12,7 @@ + #include <linux/of.h> + #include <linux/clk-provider.h> + #include <linux/clocksource.h> ++#include <asm/time.h> + + #include "common.h" + +@@ -19,6 +20,8 @@ void __init plat_time_init(void) + { + ralink_of_remap(); + ++ mips_hpt_frequency = 880000000 / 2; ++ + of_clk_init(NULL); + timer_probe(); + } diff --git a/target/linux/ramips/patches-4.14/302-spi-nor-add-gd25q512.patch b/target/linux/ramips/patches-4.14/302-spi-nor-add-gd25q512.patch new file mode 100644 index 0000000000..408fc13e7a --- /dev/null +++ b/target/linux/ramips/patches-4.14/302-spi-nor-add-gd25q512.patch @@ -0,0 +1,14 @@ +--- a/drivers/mtd/spi-nor/spi-nor.c ++++ b/drivers/mtd/spi-nor/spi-nor.c +@@ -1050,6 +1050,11 @@ static const struct flash_info spi_nor_i + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, ++ { ++ "gd25q512", INFO(0xc84020, 0, 64 * 1024, 1024, ++ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | ++ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4B_OPCODES) ++ }, + + /* Intel/Numonyx -- xxxs33b */ + { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, diff --git a/target/linux/ramips/patches-4.14/303-spi-nor-enable-4B-opcodes-for-mx66l51235l.patch b/target/linux/ramips/patches-4.14/303-spi-nor-enable-4B-opcodes-for-mx66l51235l.patch new file mode 100644 index 0000000000..7b13e68d8f --- /dev/null +++ b/target/linux/ramips/patches-4.14/303-spi-nor-enable-4B-opcodes-for-mx66l51235l.patch @@ -0,0 +1,64 @@ +--- a/drivers/mtd/spi-nor/spi-nor.c ++++ b/drivers/mtd/spi-nor/spi-nor.c +@@ -1083,7 +1083,7 @@ static const struct flash_info spi_nor_i + { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) }, + { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, +- { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, +@@ -1947,7 +1947,11 @@ static int spi_nor_read_sfdp(struct spi_ + nor->read_dummy = 8; + + while (len) { +- ret = nor->read(nor, addr, len, (u8 *)buf); ++ size_t _len = len; ++ ++ if (nor->chunk_size) ++ _len = min(len, nor->chunk_size); ++ ret = nor->read(nor, addr, _len, (u8 *)buf); + if (!ret || ret > len) { + ret = -EIO; + goto read_err; +@@ -2791,6 +2795,7 @@ int spi_nor_scan(struct spi_nor *nor, co + struct device *dev = nor->dev; + struct mtd_info *mtd = &nor->mtd; + struct device_node *np = spi_nor_get_flash_node(nor); ++ u32 val; + int ret; + int i; + +@@ -2803,6 +2808,9 @@ int spi_nor_scan(struct spi_nor *nor, co + nor->read_proto = SNOR_PROTO_1_1_1; + nor->write_proto = SNOR_PROTO_1_1_1; + ++ if (np && !of_property_read_u32(np, "m25p,chunked-io", &val)) ++ nor->chunk_size = val; ++ + if (name) + info = spi_nor_match_id(name); + /* Try to auto-detect if chip name wasn't specified or not found */ +@@ -2916,19 +2924,16 @@ int spi_nor_scan(struct spi_nor *nor, co + mtd->writebufsize = nor->page_size; + + if (np) { +- u32 val; +- + /* If we were instantiated by DT, use it */ + if (of_property_read_bool(np, "m25p,fast-read")) + params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST; + else + params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST; + +- if (!of_property_read_u32(np, "m25p,chunked-io", &val)) { +- dev_info(dev, "using chunked io (size=%d)\n", val); ++ if (nor->chunk_size) { ++ dev_info(dev, "using chunked io (size=%d)\n", nor->chunk_size); + mtd->_read = spi_nor_chunked_read; + mtd->_write = spi_nor_chunked_write; +- nor->chunk_size = val; + } + } else { + /* If we weren't instantiated by DT, default to fast-read */ diff --git a/target/linux/ramips/patches-4.14/998-mt7621-needs-jiffies.patch b/target/linux/ramips/patches-4.14/998-mt7621-needs-jiffies.patch new file mode 100644 index 0000000000..906f68adb5 --- /dev/null +++ b/target/linux/ramips/patches-4.14/998-mt7621-needs-jiffies.patch @@ -0,0 +1,10 @@ +--- a/arch/mips/ralink/mt7621.c ++++ b/arch/mips/ralink/mt7621.c +@@ -9,6 +9,7 @@ + + #include <linux/kernel.h> + #include <linux/init.h> ++#include <linux/jiffies.h> + + #include <asm/mipsregs.h> + #include <asm/smp-ops.h> diff --git a/target/linux/ramips/patches-4.14/999-fix-pci-init-mt7620.patch b/target/linux/ramips/patches-4.14/999-fix-pci-init-mt7620.patch new file mode 100644 index 0000000000..3310a6bdba --- /dev/null +++ b/target/linux/ramips/patches-4.14/999-fix-pci-init-mt7620.patch @@ -0,0 +1,21 @@ +--- a/arch/mips/pci/pci-mt7620.c ++++ b/arch/mips/pci/pci-mt7620.c +@@ -35,6 +35,7 @@ + #define PPLL_CFG1 0x9c + + #define PPLL_DRV 0xa0 ++#define PPLL_LD (1<<23) + #define PDRV_SW_SET (1<<31) + #define LC_CKDRVPD (1<<19) + #define LC_CKDRVOHZ (1<<18) +@@ -242,8 +243,8 @@ static int mt7620_pci_hw_init(struct pla + rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1); + mdelay(100); + +- if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) { +- dev_err(&pdev->dev, "MT7620 PPLL unlock\n"); ++ if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) { ++ dev_err(&pdev->dev, "MT7620 PPLL is unlocked, aborting init\n"); + reset_control_assert(rstpcie0); + rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1); + return -1; diff --git a/target/linux/ramips/rt288x/config-4.14 b/target/linux/ramips/rt288x/config-4.14 new file mode 100644 index 0000000000..724cbeb99a --- /dev/null +++ b/target/linux/ramips/rt288x/config-4.14 @@ -0,0 +1,229 @@ +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +# CONFIG_ARCH_HAS_GCOV_PROFILE_ALL is not set +CONFIG_ARCH_HAS_RESET_CONTROLLER=y +# CONFIG_ARCH_HAS_SG_CHAIN is not set +# CONFIG_ARCH_HAS_STRICT_KERNEL_RWX is not set +# CONFIG_ARCH_HAS_STRICT_MODULE_RWX is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_ARCH_MMAP_RND_BITS_MAX=15 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=15 +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +# CONFIG_ARCH_WANTS_THP_SWAP is not set +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_BLK_MQ_PCI=y +CONFIG_CEVT_R4K=y +# CONFIG_CEVT_SYSTICK_QUIRK is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_CMDLINE="rootfstype=squashfs,jffs2" +CONFIG_CMDLINE_BOOL=y +# CONFIG_CMDLINE_OVERRIDE is not set +CONFIG_CPU_GENERIC_DUMP_TLB=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_CPU_HAS_RIXI=y +CONFIG_CPU_HAS_SYNC=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32=y +# CONFIG_CPU_MIPS32_R1 is not set +CONFIG_CPU_MIPS32_R2=y +CONFIG_CPU_MIPSR2=y +CONFIG_CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS=y +CONFIG_CPU_R4K_CACHE_TLB=y +CONFIG_CPU_R4K_FPU=y +CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y +CONFIG_CPU_SUPPORTS_HIGHMEM=y +CONFIG_CPU_SUPPORTS_MSA=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CSRC_R4K=y +CONFIG_DMA_NONCOHERENT=y +# CONFIG_DMA_NOOP_OPS is not set +# CONFIG_DMA_VIRT_OPS is not set +# CONFIG_DRM_LIB_RANDOM is not set +# CONFIG_DTB_RT2880_EVAL is not set +CONFIG_DTB_RT_NONE=y +CONFIG_DTC=y +CONFIG_EARLY_PRINTK=y +CONFIG_EXPORTFS=y +CONFIG_FIXED_PHY=y +CONFIG_FUTEX_PI=y +CONFIG_GENERIC_ATOMIC64=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_IO=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_RALINK=y +CONFIG_GPIO_SYSFS=y +# CONFIG_GRO_CELLS is not set +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_HARDWARE_WATCHPOINTS=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +# CONFIG_HAVE_ARCH_BITREVERSE is not set +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_CBPF_JIT=y +CONFIG_HAVE_CC_STACKPROTECTOR=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_IDE=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_HAVE_NET_DSA=y +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HW_HAS_PCI=y +CONFIG_HZ_PERIODIC=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_IP17XX_PHY=y +CONFIG_IRQCHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_INTC=y +CONFIG_IRQ_MIPS_CPU=y +CONFIG_IRQ_WORK=y +CONFIG_LIBFDT=y +CONFIG_MDIO_BUS=y +CONFIG_MDIO_DEVICE=y +CONFIG_MIPS=y +CONFIG_MIPS_ASID_BITS=8 +CONFIG_MIPS_ASID_SHIFT=0 +CONFIG_MIPS_CLOCK_VSYSCALL=y +# CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND is not set +# CONFIG_MIPS_CMDLINE_DTB_EXTEND is not set +# CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER is not set +CONFIG_MIPS_CMDLINE_FROM_DTB=y +# CONFIG_MIPS_ELF_APPENDED_DTB is not set +# CONFIG_MIPS_HUGE_TLB_SUPPORT is not set +CONFIG_MIPS_L1_CACHE_SHIFT=4 +CONFIG_MIPS_L1_CACHE_SHIFT_4=y +# CONFIG_MIPS_MACHINE is not set +# CONFIG_MIPS_NO_APPENDED_DTB is not set +CONFIG_MIPS_RAW_APPENDED_DTB=y +CONFIG_MIPS_SPRAM=y +CONFIG_MODULES_USE_ELF_REL=y +# CONFIG_MTD_CFI_INTELEXT is not set +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_M25P80=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_SPLIT_FIRMWARE=y +CONFIG_MTD_SPLIT_LZMA_FW=y +CONFIG_MTD_SPLIT_UIMAGE_FW=y +CONFIG_MTD_SPLIT_WRGG_FW=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_PER_CPU_KM=y +CONFIG_NET_MEDIATEK_MDIO=y +CONFIG_NET_MEDIATEK_MDIO_RT2880=y +CONFIG_NET_MEDIATEK_RT2880=y +CONFIG_NET_MEDIATEK_SOC=y +CONFIG_NET_VENDOR_MEDIATEK=y +CONFIG_NLS=m +CONFIG_NO_GENERIC_PCI_IOPORT_MAP=y +# CONFIG_NO_IOPORT_MAP is not set +CONFIG_OF=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_ADDRESS_PCI=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_GPIO=y +CONFIG_OF_IRQ=y +CONFIG_OF_MDIO=y +CONFIG_OF_NET=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DRIVERS_LEGACY=y +CONFIG_PERF_USE_VMALLOC=y +CONFIG_PGTABLE_LEVELS=2 +CONFIG_PHYLIB=y +# CONFIG_PHY_RALINK_USB is not set +CONFIG_PINCTRL=y +CONFIG_PINCTRL_RT2880=y +# CONFIG_PINCTRL_SINGLE is not set +CONFIG_RALINK=y +CONFIG_RALINK_WDT=y +# CONFIG_RCU_NEED_SEGCBLIST is not set +# CONFIG_RCU_STALL_COMMON is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_SCHED_INFO is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SERIAL_8250_FSL is not set +CONFIG_SERIAL_8250_RT288X=y +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_SOC_MT7620 is not set +# CONFIG_SOC_MT7621 is not set +CONFIG_SOC_RT288X=y +# CONFIG_SOC_RT305X is not set +# CONFIG_SOC_RT3883 is not set +CONFIG_SPI=y +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MT7621 is not set +CONFIG_SPI_RT2880=y +CONFIG_SRCU=y +CONFIG_SWCONFIG=y +CONFIG_SWPHY=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYS_HAS_CPU_MIPS32_R1=y +CONFIG_SYS_HAS_CPU_MIPS32_R2=y +CONFIG_SYS_HAS_EARLY_PRINTK=y +CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y +CONFIG_SYS_SUPPORTS_ARBIT_HZ=y +CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y +CONFIG_SYS_SUPPORTS_MIPS16=y +CONFIG_THIN_ARCHIVES=y +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_TINY_SRCU=y +CONFIG_USB=m +CONFIG_USB_COMMON=m +CONFIG_USB_EHCI_HCD=m +CONFIG_USB_EHCI_HCD_PLATFORM=m +CONFIG_USB_OHCI_HCD=m +CONFIG_USB_OHCI_HCD_PLATFORM=m +CONFIG_USB_SUPPORT=y +CONFIG_USE_OF=y +CONFIG_WATCHDOG_CORE=y diff --git a/target/linux/ramips/rt305x/config-4.14 b/target/linux/ramips/rt305x/config-4.14 new file mode 100644 index 0000000000..ae843f3720 --- /dev/null +++ b/target/linux/ramips/rt305x/config-4.14 @@ -0,0 +1,198 @@ +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +# CONFIG_ARCH_HAS_GCOV_PROFILE_ALL is not set +CONFIG_ARCH_HAS_RESET_CONTROLLER=y +# CONFIG_ARCH_HAS_SG_CHAIN is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_CEVT_R4K=y +CONFIG_CEVT_SYSTICK_QUIRK=y +CONFIG_CLKDEV_LOOKUP=y +CONFIG_CLKEVT_RT3352=y +CONFIG_CLKSRC_MMIO=y +CONFIG_CLKSRC_OF=y +CONFIG_CLKSRC_PROBE=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_CMDLINE="rootfstype=squashfs,jffs2" +CONFIG_CMDLINE_BOOL=y +# CONFIG_CMDLINE_OVERRIDE is not set +CONFIG_CPU_GENERIC_DUMP_TLB=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_CPU_HAS_RIXI=y +CONFIG_CPU_HAS_SYNC=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32=y +# CONFIG_CPU_MIPS32_R1 is not set +CONFIG_CPU_MIPS32_R2=y +CONFIG_CPU_MIPSR2=y +CONFIG_CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS=y +CONFIG_CPU_R4K_CACHE_TLB=y +CONFIG_CPU_R4K_FPU=y +CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y +CONFIG_CPU_SUPPORTS_HIGHMEM=y +CONFIG_CPU_SUPPORTS_MSA=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CSRC_R4K=y +CONFIG_DEBUG_PINCTRL=y +CONFIG_DMA_NONCOHERENT=y +# CONFIG_DTB_RT305X_EVAL is not set +CONFIG_DTB_RT_NONE=y +CONFIG_DTC=y +CONFIG_EARLY_PRINTK=y +CONFIG_FIXED_PHY=y +CONFIG_GENERIC_ATOMIC64=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_GENERIC_IO=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_PHY=y +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_RALINK=y +CONFIG_GPIO_SYSFS=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_HARDWARE_WATCHPOINTS=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +# CONFIG_HAVE_ARCH_BITREVERSE is not set +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_CBPF_JIT=y +CONFIG_HAVE_CC_STACKPROTECTOR=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_IDE=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_HAVE_NET_DSA=y +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HZ_PERIODIC=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_IRQCHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_INTC=y +CONFIG_IRQ_MIPS_CPU=y +CONFIG_IRQ_WORK=y +CONFIG_LIBFDT=y +CONFIG_MDIO_BOARDINFO=y +CONFIG_MIPS=y +CONFIG_MIPS_ASID_BITS=8 +CONFIG_MIPS_ASID_SHIFT=0 +CONFIG_MIPS_CLOCK_VSYSCALL=y +# CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND is not set +# CONFIG_MIPS_CMDLINE_DTB_EXTEND is not set +# CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER is not set +CONFIG_MIPS_CMDLINE_FROM_DTB=y +# CONFIG_MIPS_ELF_APPENDED_DTB is not set +# CONFIG_MIPS_HUGE_TLB_SUPPORT is not set +CONFIG_MIPS_L1_CACHE_SHIFT=5 +# CONFIG_MIPS_MACHINE is not set +CONFIG_MIPS_NO_APPENDED_DTB=y +# CONFIG_MIPS_RAW_APPENDED_DTB is not set +CONFIG_MIPS_SPRAM=y +CONFIG_MODULES_USE_ELF_REL=y +# CONFIG_MTD_CFI_INTELEXT is not set +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_M25P80=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT=16384 +CONFIG_MTD_SPLIT_FIRMWARE=y +CONFIG_MTD_SPLIT_SEAMA_FW=y +CONFIG_MTD_SPLIT_UIMAGE_FW=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_PER_CPU_KM=y +CONFIG_NET_MEDIATEK_ESW_RT3050=y +CONFIG_NET_MEDIATEK_RT3050=y +CONFIG_NET_MEDIATEK_SOC=y +CONFIG_NET_VENDOR_MEDIATEK=y +CONFIG_NO_GENERIC_PCI_IOPORT_MAP=y +# CONFIG_NO_IOPORT_MAP is not set +CONFIG_OF=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_GPIO=y +CONFIG_OF_IRQ=y +CONFIG_OF_MDIO=y +CONFIG_OF_NET=y +CONFIG_PCI_DRIVERS_LEGACY=y +CONFIG_PERF_USE_VMALLOC=y +CONFIG_PGTABLE_LEVELS=2 +CONFIG_PHYLIB=y +CONFIG_PHY_RALINK_USB=y +CONFIG_PINCTRL=y +CONFIG_PINCTRL_RT2880=y +# CONFIG_PINCTRL_SINGLE is not set +CONFIG_RALINK=y +# CONFIG_RALINK_ILL_ACC is not set +CONFIG_RALINK_WDT=y +# CONFIG_RCU_STALL_COMMON is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_SCHED_INFO is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SERIAL_8250_FSL is not set +CONFIG_SERIAL_8250_RT288X=y +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_SOC_MT7620 is not set +# CONFIG_SOC_MT7621 is not set +# CONFIG_SOC_RT288X is not set +CONFIG_SOC_RT305X=y +# CONFIG_SOC_RT3883 is not set +CONFIG_SPI=y +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MT7621 is not set +CONFIG_SPI_RT2880=y +CONFIG_SRCU=y +CONFIG_SWCONFIG=y +CONFIG_SWPHY=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYS_HAS_CPU_MIPS32_R1=y +CONFIG_SYS_HAS_CPU_MIPS32_R2=y +CONFIG_SYS_HAS_EARLY_PRINTK=y +CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y +CONFIG_SYS_SUPPORTS_ARBIT_HZ=y +CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y +CONFIG_SYS_SUPPORTS_MIPS16=y +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_USB_SUPPORT=y +CONFIG_USE_OF=y +CONFIG_WATCHDOG_CORE=y diff --git a/target/linux/ramips/rt3883/config-4.14 b/target/linux/ramips/rt3883/config-4.14 new file mode 100644 index 0000000000..2e2550a936 --- /dev/null +++ b/target/linux/ramips/rt3883/config-4.14 @@ -0,0 +1,231 @@ +CONFIG_AR8216_PHY=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ARCH_CLOCKSOURCE_DATA=y +CONFIG_ARCH_DISCARD_MEMBLOCK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +# CONFIG_ARCH_HAS_GCOV_PROFILE_ALL is not set +CONFIG_ARCH_HAS_RESET_CONTROLLER=y +# CONFIG_ARCH_HAS_SG_CHAIN is not set +# CONFIG_ARCH_HAS_STRICT_KERNEL_RWX is not set +# CONFIG_ARCH_HAS_STRICT_MODULE_RWX is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_ARCH_MMAP_RND_BITS_MAX=15 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=15 +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX is not set +# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +# CONFIG_ARCH_WANTS_THP_SWAP is not set +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_BLK_MQ_PCI=y +CONFIG_CEVT_R4K=y +# CONFIG_CEVT_SYSTICK_QUIRK is not set +CONFIG_CLKDEV_LOOKUP=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_CMDLINE="rootfstype=squashfs,jffs2" +CONFIG_CMDLINE_BOOL=y +# CONFIG_CMDLINE_OVERRIDE is not set +CONFIG_CPU_GENERIC_DUMP_TLB=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_CPU_HAS_RIXI=y +CONFIG_CPU_HAS_SYNC=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32=y +# CONFIG_CPU_MIPS32_R1 is not set +CONFIG_CPU_MIPS32_R2=y +CONFIG_CPU_MIPSR2=y +CONFIG_CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS=y +CONFIG_CPU_R4K_CACHE_TLB=y +CONFIG_CPU_R4K_FPU=y +CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y +CONFIG_CPU_SUPPORTS_HIGHMEM=y +CONFIG_CPU_SUPPORTS_MSA=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CSRC_R4K=y +CONFIG_DEBUG_PINCTRL=y +CONFIG_DMA_NONCOHERENT=y +# CONFIG_DMA_NOOP_OPS is not set +# CONFIG_DMA_VIRT_OPS is not set +# CONFIG_DRM_LIB_RANDOM is not set +# CONFIG_DTB_RT3883_EVAL is not set +CONFIG_DTB_RT_NONE=y +CONFIG_DTC=y +CONFIG_EARLY_PRINTK=y +CONFIG_ETHERNET_PACKET_MANGLE=y +CONFIG_EXPORTFS=y +CONFIG_FIXED_PHY=y +CONFIG_FUTEX_PI=y +CONFIG_GENERIC_ATOMIC64=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_IO=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_PHY=y +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_RALINK=y +CONFIG_GPIO_SYSFS=y +# CONFIG_GRO_CELLS is not set +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_HARDWARE_WATCHPOINTS=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +# CONFIG_HAVE_ARCH_BITREVERSE is not set +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_CBPF_JIT=y +CONFIG_HAVE_CC_STACKPROTECTOR=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_COPY_THREAD_TLS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_IDE=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MEMBLOCK_NODE_MAP=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_HAVE_NET_DSA=y +CONFIG_HAVE_OPROFILE=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HW_HAS_PCI=y +CONFIG_HZ_PERIODIC=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_IRQCHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_INTC=y +CONFIG_IRQ_MIPS_CPU=y +CONFIG_IRQ_WORK=y +CONFIG_LIBFDT=y +CONFIG_MDIO_BUS=y +CONFIG_MDIO_DEVICE=y +CONFIG_MFD_SYSCON=y +CONFIG_MIPS=y +CONFIG_MIPS_ASID_BITS=8 +CONFIG_MIPS_ASID_SHIFT=0 +CONFIG_MIPS_CLOCK_VSYSCALL=y +# CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND is not set +# CONFIG_MIPS_CMDLINE_DTB_EXTEND is not set +# CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER is not set +CONFIG_MIPS_CMDLINE_FROM_DTB=y +# CONFIG_MIPS_ELF_APPENDED_DTB is not set +# CONFIG_MIPS_HUGE_TLB_SUPPORT is not set +CONFIG_MIPS_L1_CACHE_SHIFT=5 +# CONFIG_MIPS_MACHINE is not set +CONFIG_MIPS_NO_APPENDED_DTB=y +# CONFIG_MIPS_RAW_APPENDED_DTB is not set +CONFIG_MIPS_SPRAM=y +CONFIG_MODULES_USE_ELF_REL=y +# CONFIG_MTD_CFI_INTELEXT is not set +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_M25P80=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT=16384 +CONFIG_MTD_SPLIT_FIRMWARE=y +CONFIG_MTD_SPLIT_SEAMA_FW=y +CONFIG_MTD_SPLIT_UIMAGE_FW=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEED_PER_CPU_KM=y +CONFIG_NET_MEDIATEK_MDIO=y +CONFIG_NET_MEDIATEK_MDIO_RT2880=y +CONFIG_NET_MEDIATEK_RT3883=y +CONFIG_NET_MEDIATEK_SOC=y +CONFIG_NET_VENDOR_MEDIATEK=y +CONFIG_NO_GENERIC_PCI_IOPORT_MAP=y +# CONFIG_NO_IOPORT_MAP is not set +CONFIG_OF=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_ADDRESS_PCI=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_GPIO=y +CONFIG_OF_IRQ=y +CONFIG_OF_MDIO=y +CONFIG_OF_NET=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DRIVERS_LEGACY=y +CONFIG_PERF_USE_VMALLOC=y +CONFIG_PGTABLE_LEVELS=2 +CONFIG_PHYLIB=y +CONFIG_PHY_RALINK_USB=y +CONFIG_PINCTRL=y +CONFIG_PINCTRL_RT2880=y +# CONFIG_PINCTRL_SINGLE is not set +CONFIG_RALINK=y +CONFIG_RALINK_WDT=y +# CONFIG_RCU_NEED_SEGCBLIST is not set +# CONFIG_RCU_STALL_COMMON is not set +CONFIG_REGMAP=y +CONFIG_REGMAP_MMIO=y +CONFIG_RESET_CONTROLLER=y +CONFIG_RTL8366_SMI=y +CONFIG_RTL8367B_PHY=y +CONFIG_RTL8367_PHY=y +# CONFIG_SCHED_INFO is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SERIAL_8250_FSL is not set +CONFIG_SERIAL_8250_RT288X=y +CONFIG_SERIAL_OF_PLATFORM=y +# CONFIG_SOC_MT7620 is not set +# CONFIG_SOC_MT7621 is not set +# CONFIG_SOC_RT288X is not set +# CONFIG_SOC_RT305X is not set +CONFIG_SOC_RT3883=y +CONFIG_SPI=y +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MT7621 is not set +CONFIG_SPI_RT2880=y +CONFIG_SRCU=y +CONFIG_SWCONFIG=y +CONFIG_SWPHY=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYS_HAS_CPU_MIPS32_R1=y +CONFIG_SYS_HAS_CPU_MIPS32_R2=y +CONFIG_SYS_HAS_EARLY_PRINTK=y +CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y +CONFIG_SYS_SUPPORTS_ARBIT_HZ=y +CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y +CONFIG_SYS_SUPPORTS_MIPS16=y +CONFIG_THIN_ARCHIVES=y +CONFIG_TICK_CPU_ACCOUNTING=y +CONFIG_TINY_SRCU=y +CONFIG_USB_SUPPORT=y +CONFIG_USE_OF=y +CONFIG_WATCHDOG_CORE=y |