diff options
author | John Crispin <john@phrozen.org> | 2018-05-06 10:20:11 +0200 |
---|---|---|
committer | John Crispin <john@phrozen.org> | 2018-05-07 08:06:51 +0200 |
commit | 53c474abbdfef8eb3499e2d10c9ad491788b8a72 (patch) | |
tree | acd19415420664f59bc63c1ceb4ad37bb7323027 /target/linux/ath79/files | |
parent | 3dc523f232ff01d31d59345f5fa6de508d5059ef (diff) | |
download | upstream-53c474abbdfef8eb3499e2d10c9ad491788b8a72.tar.gz upstream-53c474abbdfef8eb3499e2d10c9ad491788b8a72.tar.bz2 upstream-53c474abbdfef8eb3499e2d10c9ad491788b8a72.zip |
ath79: add new OF only target for QCA MIPS silicon
This target aims to replace ar71xx mid-term. The big part that is still
missing is making the MMIO/AHB wifi work using OF. NAND and mikrotik
subtargets will follow.
Signed-off-by: John Crispin <john@phrozen.org>
Diffstat (limited to 'target/linux/ath79/files')
14 files changed, 4848 insertions, 0 deletions
diff --git a/target/linux/ath79/files/arch/mips/ath79/pci-ath9k-fixup.c b/target/linux/ath79/files/arch/mips/ath79/pci-ath9k-fixup.c new file mode 100644 index 0000000000..2202351806 --- /dev/null +++ b/target/linux/ath79/files/arch/mips/ath79/pci-ath9k-fixup.c @@ -0,0 +1,126 @@ +/* + * Atheros AP94 reference board PCI initialization + * + * Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/pci.h> +#include <linux/delay.h> + +#include <asm/mach-ath79/ar71xx_regs.h> +#include <asm/mach-ath79/ath79.h> + +struct ath9k_fixup { + u16 *cal_data; + unsigned slot; +}; + +static int ath9k_num_fixups; +static struct ath9k_fixup ath9k_fixups[2]; + +static void ath9k_pci_fixup(struct pci_dev *dev) +{ + void __iomem *mem; + u16 *cal_data = NULL; + u16 cmd; + u32 bar0; + u32 val; + unsigned i; + + for (i = 0; i < ath9k_num_fixups; i++) { + if (ath9k_fixups[i].cal_data == NULL) + continue; + + if (ath9k_fixups[i].slot != PCI_SLOT(dev->devfn)) + continue; + + cal_data = ath9k_fixups[i].cal_data; + break; + } + + if (cal_data == NULL) + return; + + if (*cal_data != 0xa55a) { + pr_err("pci %s: invalid calibration data\n", pci_name(dev)); + return; + } + + pr_info("pci %s: fixup device configuration\n", pci_name(dev)); + + mem = ioremap(AR71XX_PCI_MEM_BASE, 0x10000); + if (!mem) { + pr_err("pci %s: ioremap error\n", pci_name(dev)); + return; + } + + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar0); + + switch (ath79_soc) { + case ATH79_SOC_AR7161: + pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, + AR71XX_PCI_MEM_BASE); + break; + case ATH79_SOC_AR7240: + pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0xffff); + break; + + case ATH79_SOC_AR7241: + case ATH79_SOC_AR7242: + pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x1000ffff); + break; + case ATH79_SOC_AR9344: + pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x1000ffff); + break; + + default: + BUG(); + } + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY; + pci_write_config_word(dev, PCI_COMMAND, cmd); + + /* set pointer to first reg address */ + cal_data += 3; + while (*cal_data != 0xffff) { + u32 reg; + reg = *cal_data++; + val = *cal_data++; + val |= (*cal_data++) << 16; + + __raw_writel(val, mem + reg); + udelay(100); + } + + pci_read_config_dword(dev, PCI_VENDOR_ID, &val); + dev->vendor = val & 0xffff; + dev->device = (val >> 16) & 0xffff; + + pci_read_config_dword(dev, PCI_CLASS_REVISION, &val); + dev->revision = val & 0xff; + dev->class = val >> 8; /* upper 3 bytes */ + + pci_read_config_word(dev, PCI_COMMAND, &cmd); + cmd &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY); + pci_write_config_word(dev, PCI_COMMAND, cmd); + + pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, bar0); + + iounmap(mem); +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATHEROS, PCI_ANY_ID, ath9k_pci_fixup); + +void __init pci_enable_ath9k_fixup(unsigned slot, u16 *cal_data) +{ + if (ath9k_num_fixups >= ARRAY_SIZE(ath9k_fixups)) + return; + + ath9k_fixups[ath9k_num_fixups].slot = slot; + ath9k_fixups[ath9k_num_fixups].cal_data = cal_data; + ath9k_num_fixups++; +} diff --git a/target/linux/ath79/files/arch/mips/ath79/pci-ath9k-fixup.h b/target/linux/ath79/files/arch/mips/ath79/pci-ath9k-fixup.h new file mode 100644 index 0000000000..5794941f08 --- /dev/null +++ b/target/linux/ath79/files/arch/mips/ath79/pci-ath9k-fixup.h @@ -0,0 +1,6 @@ +#ifndef _PCI_ATH9K_FIXUP +#define _PCI_ATH9K_FIXUP + +void pci_enable_ath9k_fixup(unsigned slot, u16 *cal_data) __init; + +#endif /* _PCI_ATH9K_FIXUP */ diff --git a/target/linux/ath79/files/arch/mips/include/asm/fw/myloader/myloader.h b/target/linux/ath79/files/arch/mips/include/asm/fw/myloader/myloader.h new file mode 100644 index 0000000000..8a99d566d7 --- /dev/null +++ b/target/linux/ath79/files/arch/mips/include/asm/fw/myloader/myloader.h @@ -0,0 +1,34 @@ +/* + * Compex's MyLoader specific definitions + * + * Copyright (C) 2006-2008 Gabor Juhos <juhosg@openwrt.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ + +#ifndef _ASM_MIPS_FW_MYLOADER_H +#define _ASM_MIPS_FW_MYLOADER_H + +#include <linux/myloader.h> + +struct myloader_info { + uint32_t vid; + uint32_t did; + uint32_t svid; + uint32_t sdid; + uint8_t macs[MYLO_ETHADDR_COUNT][6]; +}; + +#ifdef CONFIG_MYLOADER +extern struct myloader_info *myloader_get_info(void) __init; +#else +static inline struct myloader_info *myloader_get_info(void) +{ + return NULL; +} +#endif /* CONFIG_MYLOADER */ + +#endif /* _ASM_MIPS_FW_MYLOADER_H */ diff --git a/target/linux/ath79/files/drivers/mtd/tplinkpart.c b/target/linux/ath79/files/drivers/mtd/tplinkpart.c new file mode 100644 index 0000000000..1b94163b83 --- /dev/null +++ b/target/linux/ath79/files/drivers/mtd/tplinkpart.c @@ -0,0 +1,235 @@ +/* + * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/magic.h> + +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/version.h> + +#define TPLINK_NUM_PARTS 5 +#define TPLINK_HEADER_V1 0x01000000 +#define TPLINK_HEADER_V2 0x02000000 +#define MD5SUM_LEN 16 + +#define TPLINK_ART_LEN 0x10000 +#define TPLINK_KERNEL_OFFS 0x20000 +#define TPLINK_64K_KERNEL_OFFS 0x10000 + +struct tplink_fw_header { + uint32_t version; /* header version */ + char vendor_name[24]; + char fw_version[36]; + uint32_t hw_id; /* hardware id */ + uint32_t hw_rev; /* hardware revision */ + uint32_t unk1; + uint8_t md5sum1[MD5SUM_LEN]; + uint32_t unk2; + uint8_t md5sum2[MD5SUM_LEN]; + uint32_t unk3; + uint32_t kernel_la; /* kernel load address */ + uint32_t kernel_ep; /* kernel entry point */ + uint32_t fw_length; /* total length of the firmware */ + uint32_t kernel_ofs; /* kernel data offset */ + uint32_t kernel_len; /* kernel data length */ + uint32_t rootfs_ofs; /* rootfs data offset */ + uint32_t rootfs_len; /* rootfs data length */ + uint32_t boot_ofs; /* bootloader data offset */ + uint32_t boot_len; /* bootloader data length */ + uint8_t pad[360]; +} __attribute__ ((packed)); + +static struct tplink_fw_header * +tplink_read_header(struct mtd_info *mtd, size_t offset) +{ + struct tplink_fw_header *header; + size_t header_len; + size_t retlen; + int ret; + u32 t; + + header = vmalloc(sizeof(*header)); + if (!header) + goto err; + + header_len = sizeof(struct tplink_fw_header); + ret = mtd_read(mtd, offset, header_len, &retlen, + (unsigned char *) header); + if (ret) + goto err_free_header; + + if (retlen != header_len) + goto err_free_header; + + /* sanity checks */ + t = be32_to_cpu(header->version); + if ((t != TPLINK_HEADER_V1) && (t != TPLINK_HEADER_V2)) + goto err_free_header; + + t = be32_to_cpu(header->kernel_ofs); + if (t != header_len) + goto err_free_header; + + return header; + +err_free_header: + vfree(header); +err: + return NULL; +} + +static int tplink_check_rootfs_magic(struct mtd_info *mtd, size_t offset) +{ + u32 magic; + size_t retlen; + int ret; + + ret = mtd_read(mtd, offset, sizeof(magic), &retlen, + (unsigned char *) &magic); + if (ret) + return ret; + + if (retlen != sizeof(magic)) + return -EIO; + + if (le32_to_cpu(magic) != SQUASHFS_MAGIC && + magic != 0x19852003) + return -EINVAL; + + return 0; +} + +static int tplink_parse_partitions_offset(struct mtd_info *master, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) + struct mtd_partition **pparts, +#else + const struct mtd_partition **pparts, +#endif + struct mtd_part_parser_data *data, + size_t offset) +{ + struct mtd_partition *parts; + struct tplink_fw_header *header; + int nr_parts; + size_t art_offset; + size_t rootfs_offset; + size_t squashfs_offset; + int ret; + + nr_parts = TPLINK_NUM_PARTS; + parts = kzalloc(nr_parts * sizeof(struct mtd_partition), GFP_KERNEL); + if (!parts) { + ret = -ENOMEM; + goto err; + } + + header = tplink_read_header(master, offset); + if (!header) { + pr_notice("%s: no TP-Link header found\n", master->name); + ret = -ENODEV; + goto err_free_parts; + } + + squashfs_offset = offset + sizeof(struct tplink_fw_header) + + be32_to_cpu(header->kernel_len); + + ret = tplink_check_rootfs_magic(master, squashfs_offset); + if (ret == 0) + rootfs_offset = squashfs_offset; + else + rootfs_offset = offset + be32_to_cpu(header->rootfs_ofs); + + art_offset = master->size - TPLINK_ART_LEN; + + parts[0].name = "u-boot"; + parts[0].offset = 0; + parts[0].size = offset; + parts[0].mask_flags = MTD_WRITEABLE; + + parts[1].name = "kernel"; + parts[1].offset = offset; + parts[1].size = rootfs_offset - offset; + + parts[2].name = "rootfs"; + parts[2].offset = rootfs_offset; + parts[2].size = art_offset - rootfs_offset; + + parts[3].name = "art"; + parts[3].offset = art_offset; + parts[3].size = TPLINK_ART_LEN; + parts[3].mask_flags = MTD_WRITEABLE; + + parts[4].name = "firmware"; + parts[4].offset = offset; + parts[4].size = art_offset - offset; + + vfree(header); + + *pparts = parts; + return nr_parts; + +err_free_parts: + kfree(parts); +err: + *pparts = NULL; + return ret; +} + +static int tplink_parse_partitions(struct mtd_info *master, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) + struct mtd_partition **pparts, +#else + const struct mtd_partition **pparts, +#endif + struct mtd_part_parser_data *data) +{ + return tplink_parse_partitions_offset(master, pparts, data, + TPLINK_KERNEL_OFFS); +} + +static int tplink_parse_64k_partitions(struct mtd_info *master, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) + struct mtd_partition **pparts, +#else + const struct mtd_partition **pparts, +#endif + struct mtd_part_parser_data *data) +{ + return tplink_parse_partitions_offset(master, pparts, data, + TPLINK_64K_KERNEL_OFFS); +} + +static struct mtd_part_parser tplink_parser = { + .owner = THIS_MODULE, + .parse_fn = tplink_parse_partitions, + .name = "tp-link", +}; + +static struct mtd_part_parser tplink_64k_parser = { + .owner = THIS_MODULE, + .parse_fn = tplink_parse_64k_partitions, + .name = "tp-link-64k", +}; + +static int __init tplink_parser_init(void) +{ + register_mtd_parser(&tplink_parser); + register_mtd_parser(&tplink_64k_parser); + + return 0; +} + +module_init(tplink_parser_init); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>"); diff --git a/target/linux/ath79/files/drivers/net/dsa/mv88e6063.c b/target/linux/ath79/files/drivers/net/dsa/mv88e6063.c new file mode 100644 index 0000000000..5b6da713f4 --- /dev/null +++ b/target/linux/ath79/files/drivers/net/dsa/mv88e6063.c @@ -0,0 +1,289 @@ +/* + * net/dsa/mv88e6063.c - Driver for Marvell 88e6063 switch chips + * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> + * + * This driver was base on: net/dsa/mv88e6060.c + * net/dsa/mv88e6063.c - Driver for Marvell 88e6060 switch chips + * Copyright (c) 2008-2009 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/version.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <net/dsa.h> +#include <linux/version.h> + +#define REG_BASE 0x10 +#define REG_PHY(p) (REG_BASE + (p)) +#define REG_PORT(p) (REG_BASE + 8 + (p)) +#define REG_GLOBAL (REG_BASE + 0x0f) +#define NUM_PORTS 7 + +static int reg_read(struct dsa_switch *ds, int addr, int reg) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) + struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev); + return mdiobus_read(bus, addr, reg); +#else + struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->dev); + return mdiobus_read(bus, addr, reg); +#endif +} + +#define REG_READ(addr, reg) \ + ({ \ + int __ret; \ + \ + __ret = reg_read(ds, addr, reg); \ + if (__ret < 0) \ + return __ret; \ + __ret; \ + }) + + +static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) + struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev); + return mdiobus_write(bus, addr, reg, val); +#else + struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->dev); + return mdiobus_write(bus, addr, reg, val); +#endif +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0) +static enum dsa_tag_protocol mv88e6063_get_tag_protocol(struct dsa_switch *ds) +{ + return DSA_TAG_PROTO_TRAILER; +} +#endif + +#define REG_WRITE(addr, reg, val) \ + ({ \ + int __ret; \ + \ + __ret = reg_write(ds, addr, reg, val); \ + if (__ret < 0) \ + return __ret; \ + }) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) +static char *mv88e6063_drv_probe(struct device *host_dev, int sw_addr) +#else +static const char *mv88e6063_drv_probe(struct device *dsa_dev, + struct device *host_dev, int sw_addr, + void **_priv) +#endif +{ + struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev); + int ret; + + if (!bus) + return NULL; + + ret = mdiobus_read(bus, REG_PORT(0), 0x03); + if (ret >= 0) { + ret &= 0xfff0; + if (ret == 0x1530) + return "Marvell 88E6063"; + } + + return NULL; +} + +static int mv88e6063_switch_reset(struct dsa_switch *ds) +{ + int i; + int ret; + + /* + * Set all ports to the disabled state. + */ + for (i = 0; i < NUM_PORTS; i++) { + ret = REG_READ(REG_PORT(i), 0x04); + REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); + } + + /* + * Wait for transmit queues to drain. + */ + msleep(2); + + /* + * Reset the switch. + */ + REG_WRITE(REG_GLOBAL, 0x0a, 0xa130); + + /* + * Wait up to one second for reset to complete. + */ + for (i = 0; i < 1000; i++) { + ret = REG_READ(REG_GLOBAL, 0x00); + if ((ret & 0x8000) == 0x0000) + break; + + msleep(1); + } + if (i == 1000) + return -ETIMEDOUT; + + return 0; +} + +static int mv88e6063_setup_global(struct dsa_switch *ds) +{ + /* + * Disable discarding of frames with excessive collisions, + * set the maximum frame size to 1536 bytes, and mask all + * interrupt sources. + */ + REG_WRITE(REG_GLOBAL, 0x04, 0x0800); + + /* + * Enable automatic address learning, set the address + * database size to 1024 entries, and set the default aging + * time to 5 minutes. + */ + REG_WRITE(REG_GLOBAL, 0x0a, 0x2130); + + return 0; +} + +static int mv88e6063_setup_port(struct dsa_switch *ds, int p) +{ + int addr = REG_PORT(p); + + /* + * Do not force flow control, disable Ingress and Egress + * Header tagging, disable VLAN tunneling, and set the port + * state to Forwarding. Additionally, if this is the CPU + * port, enable Ingress and Egress Trailer tagging mode. + */ + REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003); + + /* + * Port based VLAN map: give each port its own address + * database, allow the CPU port to talk to each of the 'real' + * ports, and allow each of the 'real' ports to only talk to + * the CPU port. + */ + REG_WRITE(addr, 0x06, + ((p & 0xf) << 12) | + (dsa_is_cpu_port(ds, p) ? +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) + ds->phys_port_mask : +#else + ds->enabled_port_mask : +#endif + (1 << ds->dst->cpu_port))); + + /* + * Port Association Vector: when learning source addresses + * of packets, add the address to the address database using + * a port bitmap that has only the bit for this port set and + * the other bits clear. + */ + REG_WRITE(addr, 0x0b, 1 << p); + + return 0; +} + +static int mv88e6063_setup(struct dsa_switch *ds) +{ + int i; + int ret; + + ret = mv88e6063_switch_reset(ds); + if (ret < 0) + return ret; + + /* @@@ initialise atu */ + + ret = mv88e6063_setup_global(ds); + if (ret < 0) + return ret; + + for (i = 0; i < NUM_PORTS; i++) { + ret = mv88e6063_setup_port(ds, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static int mv88e6063_set_addr(struct dsa_switch *ds, u8 *addr) +{ + REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]); + REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]); + REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]); + + return 0; +} + +static int mv88e6063_port_to_phy_addr(int port) +{ + if (port >= 0 && port <= NUM_PORTS) + return REG_PHY(port); + return -1; +} + +static int mv88e6063_phy_read(struct dsa_switch *ds, int port, int regnum) +{ + int addr; + + addr = mv88e6063_port_to_phy_addr(port); + if (addr == -1) + return 0xffff; + + return reg_read(ds, addr, regnum); +} + +static int +mv88e6063_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) +{ + int addr; + + addr = mv88e6063_port_to_phy_addr(port); + if (addr == -1) + return 0xffff; + + return reg_write(ds, addr, regnum, val); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0) +static struct dsa_switch_driver mv88e6063_switch_ops = { +#else +static struct dsa_switch_ops mv88e6063_switch_ops = { +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) + .tag_protocol = htons(ETH_P_TRAILER), +#else + .get_tag_protocol = mv88e6063_get_tag_protocol, +#endif + .probe = mv88e6063_drv_probe, + .setup = mv88e6063_setup, + .set_addr = mv88e6063_set_addr, + .phy_read = mv88e6063_phy_read, + .phy_write = mv88e6063_phy_write, +}; + +static int __init mv88e6063_init(void) +{ + register_switch_driver(&mv88e6063_switch_ops); + return 0; +} +module_init(mv88e6063_init); + +static void __exit mv88e6063_cleanup(void) +{ + unregister_switch_driver(&mv88e6063_switch_ops); +} +module_exit(mv88e6063_cleanup); diff --git a/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/Kconfig b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/Kconfig new file mode 100644 index 0000000000..4df2d21e34 --- /dev/null +++ b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/Kconfig @@ -0,0 +1,25 @@ +config AG71XX + tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support" + depends on ATH79 + select PHYLIB + help + If you wish to compile a kernel for AR7XXX/91XXX and enable + ethernet support, then you should always answer Y to this. + +if AG71XX + +config AG71XX_DEBUG + bool "Atheros AR71xx built-in ethernet driver debugging" + default n + help + Atheros AR71xx built-in ethernet driver debugging messages. + +config AG71XX_DEBUG_FS + bool "Atheros AR71xx built-in ethernet driver debugfs support" + depends on DEBUG_FS + default n + help + Say Y, if you need access to various statistics provided by + the ag71xx driver. + +endif diff --git a/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/Makefile b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/Makefile new file mode 100644 index 0000000000..7f396ad46e --- /dev/null +++ b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/Makefile @@ -0,0 +1,14 @@ +# +# Makefile for the Atheros AR71xx built-in ethernet macs +# + +ag71xx-y += ag71xx_main.o +ag71xx-y += ag71xx_ethtool.o +ag71xx-y += ag71xx_phy.o +ag71xx-y += ag71xx_mdio.o +ag71xx-y += ag71xx_ar7240.o + +ag71xx-$(CONFIG_AG71XX_DEBUG_FS) += ag71xx_debugfs.o + +obj-$(CONFIG_AG71XX) += ag71xx.o + diff --git a/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx.h b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx.h new file mode 100644 index 0000000000..22b22522a3 --- /dev/null +++ b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx.h @@ -0,0 +1,453 @@ +/* + * Atheros AR71xx built-in ethernet mac driver + * + * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> + * + * Based on Atheros' AG7100 driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#ifndef __AG71XX_H +#define __AG71XX_H + +#include <linux/kernel.h> +#include <linux/version.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/types.h> +#include <linux/random.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/ethtool.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/phy.h> +#include <linux/skbuff.h> +#include <linux/dma-mapping.h> +#include <linux/workqueue.h> +#include <linux/reset.h> +#include <linux/of.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> + +#include <linux/bitops.h> + +#include <asm/mach-ath79/ar71xx_regs.h> +#include <asm/mach-ath79/ath79.h> + +#define AG71XX_DRV_NAME "ag71xx" + +/* + * For our NAPI weight bigger does *NOT* mean better - it means more + * D-cache misses and lots more wasted cycles than we'll ever + * possibly gain from saving instructions. + */ +#define AG71XX_NAPI_WEIGHT 32 +#define AG71XX_OOM_REFILL (1 + HZ/10) + +#define AG71XX_INT_ERR (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE) +#define AG71XX_INT_TX (AG71XX_INT_TX_PS) +#define AG71XX_INT_RX (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF) + +#define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX) +#define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL) + +#define AG71XX_TX_MTU_LEN 1540 + +#define AG71XX_TX_RING_SPLIT 512 +#define AG71XX_TX_RING_DS_PER_PKT DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \ + AG71XX_TX_RING_SPLIT) +#define AG71XX_TX_RING_SIZE_DEFAULT 128 +#define AG71XX_RX_RING_SIZE_DEFAULT 256 + +#define AG71XX_TX_RING_SIZE_MAX 128 +#define AG71XX_RX_RING_SIZE_MAX 256 + +#ifdef CONFIG_AG71XX_DEBUG +#define DBG(fmt, args...) pr_debug(fmt, ## args) +#else +#define DBG(fmt, args...) do {} while (0) +#endif + +#define ag71xx_assert(_cond) \ +do { \ + if (_cond) \ + break; \ + printk("%s,%d: assertion failed\n", __FILE__, __LINE__); \ + BUG(); \ +} while (0) + +struct ag71xx_desc { + u32 data; + u32 ctrl; +#define DESC_EMPTY BIT(31) +#define DESC_MORE BIT(24) +#define DESC_PKTLEN_M 0xfff + u32 next; + u32 pad; +} __attribute__((aligned(4))); + +#define AG71XX_DESC_SIZE roundup(sizeof(struct ag71xx_desc), \ + L1_CACHE_BYTES) + +struct ag71xx_buf { + union { + struct sk_buff *skb; + void *rx_buf; + }; + union { + dma_addr_t dma_addr; + unsigned int len; + }; +}; + +struct ag71xx_ring { + struct ag71xx_buf *buf; + u8 *descs_cpu; + dma_addr_t descs_dma; + u16 desc_split; + u16 order; + unsigned int curr; + unsigned int dirty; +}; + +struct ag71xx_int_stats { + unsigned long rx_pr; + unsigned long rx_be; + unsigned long rx_of; + unsigned long tx_ps; + unsigned long tx_be; + unsigned long tx_ur; + unsigned long total; +}; + +struct ag71xx_napi_stats { + unsigned long napi_calls; + unsigned long rx_count; + unsigned long rx_packets; + unsigned long rx_packets_max; + unsigned long tx_count; + unsigned long tx_packets; + unsigned long tx_packets_max; + + unsigned long rx[AG71XX_NAPI_WEIGHT + 1]; + unsigned long tx[AG71XX_NAPI_WEIGHT + 1]; +}; + +struct ag71xx_debug { + struct dentry *debugfs_dir; + + struct ag71xx_int_stats int_stats; + struct ag71xx_napi_stats napi_stats; +}; + +struct ag71xx { + /* + * Critical data related to the per-packet data path are clustered + * early in this structure to help improve the D-cache footprint. + */ + struct ag71xx_ring rx_ring ____cacheline_aligned; + struct ag71xx_ring tx_ring ____cacheline_aligned; + + u16 desc_pktlen_mask; + u16 rx_buf_size; + u8 rx_buf_offset; + u8 tx_hang_workaround:1; + + struct net_device *dev; + struct platform_device *pdev; + spinlock_t lock; + struct napi_struct napi; + u32 msg_enable; + + /* + * From this point onwards we're not looking at per-packet fields. + */ + void __iomem *mac_base; + void __iomem *mii_base; + struct regmap *mii_regmap; + + struct ag71xx_desc *stop_desc; + dma_addr_t stop_desc_dma; + + struct mii_bus *mii_bus; + struct phy_device *phy_dev; + void *phy_priv; + int phy_if_mode; + + unsigned int link; + unsigned int speed; + int duplex; + + struct delayed_work restart_work; + struct timer_list oom_timer; + + struct reset_control *mac_reset; + struct reset_control *phy_reset; + struct reset_control *mdio_reset; + + u32 fifodata[3]; + u32 plldata[3]; + u32 pllreg[3]; + struct regmap *pllregmap; + +#ifdef CONFIG_AG71XX_DEBUG_FS + struct ag71xx_debug debug; +#endif +}; + +extern struct ethtool_ops ag71xx_ethtool_ops; +void ag71xx_link_adjust(struct ag71xx *ag); + +int ag71xx_phy_connect(struct ag71xx *ag); +void ag71xx_phy_disconnect(struct ag71xx *ag); + +static inline int ag71xx_desc_empty(struct ag71xx_desc *desc) +{ + return (desc->ctrl & DESC_EMPTY) != 0; +} + +static inline struct ag71xx_desc * +ag71xx_ring_desc(struct ag71xx_ring *ring, int idx) +{ + return (struct ag71xx_desc *) &ring->descs_cpu[idx * AG71XX_DESC_SIZE]; +} + +static inline int +ag71xx_ring_size_order(int size) +{ + return fls(size - 1); +} + +/* Register offsets */ +#define AG71XX_REG_MAC_CFG1 0x0000 +#define AG71XX_REG_MAC_CFG2 0x0004 +#define AG71XX_REG_MAC_IPG 0x0008 +#define AG71XX_REG_MAC_HDX 0x000c +#define AG71XX_REG_MAC_MFL 0x0010 +#define AG71XX_REG_MII_CFG 0x0020 +#define AG71XX_REG_MII_CMD 0x0024 +#define AG71XX_REG_MII_ADDR 0x0028 +#define AG71XX_REG_MII_CTRL 0x002c +#define AG71XX_REG_MII_STATUS 0x0030 +#define AG71XX_REG_MII_IND 0x0034 +#define AG71XX_REG_MAC_IFCTL 0x0038 +#define AG71XX_REG_MAC_ADDR1 0x0040 +#define AG71XX_REG_MAC_ADDR2 0x0044 +#define AG71XX_REG_FIFO_CFG0 0x0048 +#define AG71XX_REG_FIFO_CFG1 0x004c +#define AG71XX_REG_FIFO_CFG2 0x0050 +#define AG71XX_REG_FIFO_CFG3 0x0054 +#define AG71XX_REG_FIFO_CFG4 0x0058 +#define AG71XX_REG_FIFO_CFG5 0x005c +#define AG71XX_REG_FIFO_RAM0 0x0060 +#define AG71XX_REG_FIFO_RAM1 0x0064 +#define AG71XX_REG_FIFO_RAM2 0x0068 +#define AG71XX_REG_FIFO_RAM3 0x006c +#define AG71XX_REG_FIFO_RAM4 0x0070 +#define AG71XX_REG_FIFO_RAM5 0x0074 +#define AG71XX_REG_FIFO_RAM6 0x0078 +#define AG71XX_REG_FIFO_RAM7 0x007c + +#define AG71XX_REG_TX_CTRL 0x0180 +#define AG71XX_REG_TX_DESC 0x0184 +#define AG71XX_REG_TX_STATUS 0x0188 +#define AG71XX_REG_RX_CTRL 0x018c +#define AG71XX_REG_RX_DESC 0x0190 +#define AG71XX_REG_RX_STATUS 0x0194 +#define AG71XX_REG_INT_ENABLE 0x0198 +#define AG71XX_REG_INT_STATUS 0x019c + +#define AG71XX_REG_FIFO_DEPTH 0x01a8 +#define AG71XX_REG_RX_SM 0x01b0 +#define AG71XX_REG_TX_SM 0x01b4 + +#define MAC_CFG1_TXE BIT(0) /* Tx Enable */ +#define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */ +#define MAC_CFG1_RXE BIT(2) /* Rx Enable */ +#define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */ +#define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */ +#define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */ +#define MAC_CFG1_LB BIT(8) /* Loopback mode */ +#define MAC_CFG1_SR BIT(31) /* Soft Reset */ + +#define MAC_CFG2_FDX BIT(0) +#define MAC_CFG2_CRC_EN BIT(1) +#define MAC_CFG2_PAD_CRC_EN BIT(2) +#define MAC_CFG2_LEN_CHECK BIT(4) +#define MAC_CFG2_HUGE_FRAME_EN BIT(5) +#define MAC_CFG2_IF_1000 BIT(9) +#define MAC_CFG2_IF_10_100 BIT(8) + +#define FIFO_CFG0_WTM BIT(0) /* Watermark Module */ +#define FIFO_CFG0_RXS BIT(1) /* Rx System Module */ +#define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */ +#define FIFO_CFG0_TXS BIT(3) /* Tx System Module */ +#define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */ +#define FIFO_CFG0_ALL (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \ + | FIFO_CFG0_TXS | FIFO_CFG0_TXF) + +#define FIFO_CFG0_ENABLE_SHIFT 8 + +#define FIFO_CFG4_DE BIT(0) /* Drop Event */ +#define FIFO_CFG4_DV BIT(1) /* RX_DV Event */ +#define FIFO_CFG4_FC BIT(2) /* False Carrier */ +#define FIFO_CFG4_CE BIT(3) /* Code Error */ +#define FIFO_CFG4_CR BIT(4) /* CRC error */ +#define FIFO_CFG4_LM BIT(5) /* Length Mismatch */ +#define FIFO_CFG4_LO BIT(6) /* Length out of range */ +#define FIFO_CFG4_OK BIT(7) /* Packet is OK */ +#define FIFO_CFG4_MC BIT(8) /* Multicast Packet */ +#define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */ +#define FIFO_CFG4_DR BIT(10) /* Dribble */ +#define FIFO_CFG4_LE BIT(11) /* Long Event */ +#define FIFO_CFG4_CF BIT(12) /* Control Frame */ +#define FIFO_CFG4_PF BIT(13) /* Pause Frame */ +#define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */ +#define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */ +#define FIFO_CFG4_FT BIT(16) /* Frame Truncated */ +#define FIFO_CFG4_UC BIT(17) /* Unicast Packet */ + +#define FIFO_CFG5_DE BIT(0) /* Drop Event */ +#define FIFO_CFG5_DV BIT(1) /* RX_DV Event */ +#define FIFO_CFG5_FC BIT(2) /* False Carrier */ +#define FIFO_CFG5_CE BIT(3) /* Code Error */ +#define FIFO_CFG5_LM BIT(4) /* Length Mismatch */ +#define FIFO_CFG5_LO BIT(5) /* Length Out of Range */ +#define FIFO_CFG5_OK BIT(6) /* Packet is OK */ +#define FIFO_CFG5_MC BIT(7) /* Multicast Packet */ +#define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */ +#define FIFO_CFG5_DR BIT(9) /* Dribble */ +#define FIFO_CFG5_CF BIT(10) /* Control Frame */ +#define FIFO_CFG5_PF BIT(11) /* Pause Frame */ +#define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */ +#define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */ +#define FIFO_CFG5_LE BIT(14) /* Long Event */ +#define FIFO_CFG5_FT BIT(15) /* Frame Truncated */ +#define FIFO_CFG5_16 BIT(16) /* unknown */ +#define FIFO_CFG5_17 BIT(17) /* unknown */ +#define FIFO_CFG5_SF BIT(18) /* Short Frame */ +#define FIFO_CFG5_BM BIT(19) /* Byte Mode */ + +#define AG71XX_INT_TX_PS BIT(0) +#define AG71XX_INT_TX_UR BIT(1) +#define AG71XX_INT_TX_BE BIT(3) +#define AG71XX_INT_RX_PR BIT(4) +#define AG71XX_INT_RX_OF BIT(6) +#define AG71XX_INT_RX_BE BIT(7) + +#define MAC_IFCTL_SPEED BIT(16) + +#define MII_CFG_CLK_DIV_4 0 +#define MII_CFG_CLK_DIV_6 2 +#define MII_CFG_CLK_DIV_8 3 +#define MII_CFG_CLK_DIV_10 4 +#define MII_CFG_CLK_DIV_14 5 +#define MII_CFG_CLK_DIV_20 6 +#define MII_CFG_CLK_DIV_28 7 +#define MII_CFG_CLK_DIV_34 8 +#define MII_CFG_CLK_DIV_42 9 +#define MII_CFG_CLK_DIV_50 10 +#define MII_CFG_CLK_DIV_58 11 +#define MII_CFG_CLK_DIV_66 12 +#define MII_CFG_CLK_DIV_74 13 +#define MII_CFG_CLK_DIV_82 14 +#define MII_CFG_CLK_DIV_98 15 +#define MII_CFG_RESET BIT(31) + +#define MII_CMD_WRITE 0x0 +#define MII_CMD_READ 0x1 +#define MII_ADDR_SHIFT 8 +#define MII_IND_BUSY BIT(0) +#define MII_IND_INVALID BIT(2) + +#define TX_CTRL_TXE BIT(0) /* Tx Enable */ + +#define TX_STATUS_PS BIT(0) /* Packet Sent */ +#define TX_STATUS_UR BIT(1) /* Tx Underrun */ +#define TX_STATUS_BE BIT(3) /* Bus Error */ + +#define RX_CTRL_RXE BIT(0) /* Rx Enable */ + +#define RX_STATUS_PR BIT(0) /* Packet Received */ +#define RX_STATUS_OF BIT(2) /* Rx Overflow */ +#define RX_STATUS_BE BIT(3) /* Bus Error */ + +static inline void ag71xx_wr(struct ag71xx *ag, unsigned reg, u32 value) +{ + __raw_writel(value, ag->mac_base + reg); + /* flush write */ + (void) __raw_readl(ag->mac_base + reg); +} + +static inline u32 ag71xx_rr(struct ag71xx *ag, unsigned reg) +{ + return __raw_readl(ag->mac_base + reg); +} + +static inline void ag71xx_sb(struct ag71xx *ag, unsigned reg, u32 mask) +{ + void __iomem *r; + + r = ag->mac_base + reg; + __raw_writel(__raw_readl(r) | mask, r); + /* flush write */ + (void) __raw_readl(r); +} + +static inline void ag71xx_cb(struct ag71xx *ag, unsigned reg, u32 mask) +{ + void __iomem *r; + + r = ag->mac_base + reg; + __raw_writel(__raw_readl(r) & ~mask, r); + /* flush write */ + (void) __raw_readl(r); +} + +static inline void ag71xx_int_enable(struct ag71xx *ag, u32 ints) +{ + ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints); +} + +static inline void ag71xx_int_disable(struct ag71xx *ag, u32 ints) +{ + ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints); +} + +#ifdef CONFIG_AG71XX_DEBUG_FS +int ag71xx_debugfs_root_init(void); +void ag71xx_debugfs_root_exit(void); +int ag71xx_debugfs_init(struct ag71xx *ag); +void ag71xx_debugfs_exit(struct ag71xx *ag); +void ag71xx_debugfs_update_int_stats(struct ag71xx *ag, u32 status); +void ag71xx_debugfs_update_napi_stats(struct ag71xx *ag, int rx, int tx); +#else +static inline int ag71xx_debugfs_root_init(void) { return 0; } +static inline void ag71xx_debugfs_root_exit(void) {} +static inline int ag71xx_debugfs_init(struct ag71xx *ag) { return 0; } +static inline void ag71xx_debugfs_exit(struct ag71xx *ag) {} +static inline void ag71xx_debugfs_update_int_stats(struct ag71xx *ag, + u32 status) {} +static inline void ag71xx_debugfs_update_napi_stats(struct ag71xx *ag, + int rx, int tx) {} +#endif /* CONFIG_AG71XX_DEBUG_FS */ + +int ag71xx_ar7240_init(struct ag71xx *ag, struct device_node *np); +void ag71xx_ar7240_cleanup(struct ag71xx *ag); +void ag71xx_ar7240_start(struct ag71xx *ag); + +int ag71xx_mdio_init(struct ag71xx *ag); +void ag71xx_mdio_cleanup(struct ag71xx *ag); +int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg); +int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg, u16 val); + +int ar7240sw_phy_read(struct mii_bus *mii, int addr, int reg); +int ar7240sw_phy_write(struct mii_bus *mii, int addr, int reg, u16 val); + +#endif /* _AG71XX_H */ diff --git a/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_ar7240.c b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_ar7240.c new file mode 100644 index 0000000000..52c0f964cf --- /dev/null +++ b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_ar7240.c @@ -0,0 +1,1340 @@ +/* + * Driver for the built-in ethernet switch of the Atheros AR7240 SoC + * Copyright (c) 2010 Gabor Juhos <juhosg@openwrt.org> + * Copyright (c) 2010 Felix Fietkau <nbd@nbd.name> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ + +#include <linux/etherdevice.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/mii.h> +#include <linux/bitops.h> +#include <linux/switch.h> +#include "ag71xx.h" + +#define BITM(_count) (BIT(_count) - 1) +#define BITS(_shift, _count) (BITM(_count) << _shift) + +#define AR7240_REG_MASK_CTRL 0x00 +#define AR7240_MASK_CTRL_REVISION_M BITM(8) +#define AR7240_MASK_CTRL_VERSION_M BITM(8) +#define AR7240_MASK_CTRL_VERSION_S 8 +#define AR7240_MASK_CTRL_VERSION_AR7240 0x01 +#define AR7240_MASK_CTRL_VERSION_AR934X 0x02 +#define AR7240_MASK_CTRL_SOFT_RESET BIT(31) + +#define AR7240_REG_MAC_ADDR0 0x20 +#define AR7240_REG_MAC_ADDR1 0x24 + +#define AR7240_REG_FLOOD_MASK 0x2c +#define AR7240_FLOOD_MASK_BROAD_TO_CPU BIT(26) + +#define AR7240_REG_GLOBAL_CTRL 0x30 +#define AR7240_GLOBAL_CTRL_MTU_M BITM(11) +#define AR9340_GLOBAL_CTRL_MTU_M BITM(14) + +#define AR7240_REG_VTU 0x0040 +#define AR7240_VTU_OP BITM(3) +#define AR7240_VTU_OP_NOOP 0x0 +#define AR7240_VTU_OP_FLUSH 0x1 +#define AR7240_VTU_OP_LOAD 0x2 +#define AR7240_VTU_OP_PURGE 0x3 +#define AR7240_VTU_OP_REMOVE_PORT 0x4 +#define AR7240_VTU_ACTIVE BIT(3) +#define AR7240_VTU_FULL BIT(4) +#define AR7240_VTU_PORT BITS(8, 4) +#define AR7240_VTU_PORT_S 8 +#define AR7240_VTU_VID BITS(16, 12) +#define AR7240_VTU_VID_S 16 +#define AR7240_VTU_PRIO BITS(28, 3) +#define AR7240_VTU_PRIO_S 28 +#define AR7240_VTU_PRIO_EN BIT(31) + +#define AR7240_REG_VTU_DATA 0x0044 +#define AR7240_VTUDATA_MEMBER BITS(0, 10) +#define AR7240_VTUDATA_VALID BIT(11) + +#define AR7240_REG_ATU 0x50 +#define AR7240_ATU_FLUSH_ALL 0x1 + +#define AR7240_REG_AT_CTRL 0x5c +#define AR7240_AT_CTRL_AGE_TIME BITS(0, 15) +#define AR7240_AT_CTRL_AGE_EN BIT(17) +#define AR7240_AT_CTRL_LEARN_CHANGE BIT(18) +#define AR7240_AT_CTRL_RESERVED BIT(19) +#define AR7240_AT_CTRL_ARP_EN BIT(20) + +#define AR7240_REG_TAG_PRIORITY 0x70 + +#define AR7240_REG_SERVICE_TAG 0x74 +#define AR7240_SERVICE_TAG_M BITM(16) + +#define AR7240_REG_CPU_PORT 0x78 +#define AR7240_MIRROR_PORT_S 4 +#define AR7240_MIRROR_PORT_M BITM(4) +#define AR7240_CPU_PORT_EN BIT(8) + +#define AR7240_REG_MIB_FUNCTION0 0x80 +#define AR7240_MIB_TIMER_M BITM(16) +#define AR7240_MIB_AT_HALF_EN BIT(16) +#define AR7240_MIB_BUSY BIT(17) +#define AR7240_MIB_FUNC_S 24 +#define AR7240_MIB_FUNC_M BITM(3) +#define AR7240_MIB_FUNC_NO_OP 0x0 +#define AR7240_MIB_FUNC_FLUSH 0x1 +#define AR7240_MIB_FUNC_CAPTURE 0x3 + +#define AR7240_REG_MDIO_CTRL 0x98 +#define AR7240_MDIO_CTRL_DATA_M BITM(16) +#define AR7240_MDIO_CTRL_REG_ADDR_S 16 +#define AR7240_MDIO_CTRL_PHY_ADDR_S 21 +#define AR7240_MDIO_CTRL_CMD_WRITE 0 +#define AR7240_MDIO_CTRL_CMD_READ BIT(27) +#define AR7240_MDIO_CTRL_MASTER_EN BIT(30) +#define AR7240_MDIO_CTRL_BUSY BIT(31) + +#define AR7240_REG_PORT_BASE(_port) (0x100 + (_port) * 0x100) + +#define AR7240_REG_PORT_STATUS(_port) (AR7240_REG_PORT_BASE((_port)) + 0x00) +#define AR7240_PORT_STATUS_SPEED_S 0 +#define AR7240_PORT_STATUS_SPEED_M BITM(2) +#define AR7240_PORT_STATUS_SPEED_10 0 +#define AR7240_PORT_STATUS_SPEED_100 1 +#define AR7240_PORT_STATUS_SPEED_1000 2 +#define AR7240_PORT_STATUS_TXMAC BIT(2) +#define AR7240_PORT_STATUS_RXMAC BIT(3) +#define AR7240_PORT_STATUS_TXFLOW BIT(4) +#define AR7240_PORT_STATUS_RXFLOW BIT(5) +#define AR7240_PORT_STATUS_DUPLEX BIT(6) +#define AR7240_PORT_STATUS_LINK_UP BIT(8) +#define AR7240_PORT_STATUS_LINK_AUTO BIT(9) +#define AR7240_PORT_STATUS_LINK_PAUSE BIT(10) + +#define AR7240_REG_PORT_CTRL(_port) (AR7240_REG_PORT_BASE((_port)) + 0x04) +#define AR7240_PORT_CTRL_STATE_M BITM(3) +#define AR7240_PORT_CTRL_STATE_DISABLED 0 +#define AR7240_PORT_CTRL_STATE_BLOCK 1 +#define AR7240_PORT_CTRL_STATE_LISTEN 2 +#define AR7240_PORT_CTRL_STATE_LEARN 3 +#define AR7240_PORT_CTRL_STATE_FORWARD 4 +#define AR7240_PORT_CTRL_LEARN_LOCK BIT(7) +#define AR7240_PORT_CTRL_VLAN_MODE_S 8 +#define AR7240_PORT_CTRL_VLAN_MODE_KEEP 0 +#define AR7240_PORT_CTRL_VLAN_MODE_STRIP 1 +#define AR7240_PORT_CTRL_VLAN_MODE_ADD 2 +#define AR7240_PORT_CTRL_VLAN_MODE_DOUBLE_TAG 3 +#define AR7240_PORT_CTRL_IGMP_SNOOP BIT(10) +#define AR7240_PORT_CTRL_HEADER BIT(11) +#define AR7240_PORT_CTRL_MAC_LOOP BIT(12) +#define AR7240_PORT_CTRL_SINGLE_VLAN BIT(13) +#define AR7240_PORT_CTRL_LEARN BIT(14) +#define AR7240_PORT_CTRL_DOUBLE_TAG BIT(15) +#define AR7240_PORT_CTRL_MIRROR_TX BIT(16) +#define AR7240_PORT_CTRL_MIRROR_RX BIT(17) + +#define AR7240_REG_PORT_VLAN(_port) (AR7240_REG_PORT_BASE((_port)) + 0x08) + +#define AR7240_PORT_VLAN_DEFAULT_ID_S 0 +#define AR7240_PORT_VLAN_DEST_PORTS_S 16 +#define AR7240_PORT_VLAN_MODE_S 30 +#define AR7240_PORT_VLAN_MODE_PORT_ONLY 0 +#define AR7240_PORT_VLAN_MODE_PORT_FALLBACK 1 +#define AR7240_PORT_VLAN_MODE_VLAN_ONLY 2 +#define AR7240_PORT_VLAN_MODE_SECURE 3 + + +#define AR7240_REG_STATS_BASE(_port) (0x20000 + (_port) * 0x100) + +#define AR7240_STATS_RXBROAD 0x00 +#define AR7240_STATS_RXPAUSE 0x04 +#define AR7240_STATS_RXMULTI 0x08 +#define AR7240_STATS_RXFCSERR 0x0c +#define AR7240_STATS_RXALIGNERR 0x10 +#define AR7240_STATS_RXRUNT 0x14 +#define AR7240_STATS_RXFRAGMENT 0x18 +#define AR7240_STATS_RX64BYTE 0x1c +#define AR7240_STATS_RX128BYTE 0x20 +#define AR7240_STATS_RX256BYTE 0x24 +#define AR7240_STATS_RX512BYTE 0x28 +#define AR7240_STATS_RX1024BYTE 0x2c +#define AR7240_STATS_RX1518BYTE 0x30 +#define AR7240_STATS_RXMAXBYTE 0x34 +#define AR7240_STATS_RXTOOLONG 0x38 +#define AR7240_STATS_RXGOODBYTE 0x3c +#define AR7240_STATS_RXBADBYTE 0x44 +#define AR7240_STATS_RXOVERFLOW 0x4c +#define AR7240_STATS_FILTERED 0x50 +#define AR7240_STATS_TXBROAD 0x54 +#define AR7240_STATS_TXPAUSE 0x58 +#define AR7240_STATS_TXMULTI 0x5c +#define AR7240_STATS_TXUNDERRUN 0x60 +#define AR7240_STATS_TX64BYTE 0x64 +#define AR7240_STATS_TX128BYTE 0x68 +#define AR7240_STATS_TX256BYTE 0x6c +#define AR7240_STATS_TX512BYTE 0x70 +#define AR7240_STATS_TX1024BYTE 0x74 +#define AR7240_STATS_TX1518BYTE 0x78 +#define AR7240_STATS_TXMAXBYTE 0x7c +#define AR7240_STATS_TXOVERSIZE 0x80 +#define AR7240_STATS_TXBYTE 0x84 +#define AR7240_STATS_TXCOLLISION 0x8c +#define AR7240_STATS_TXABORTCOL 0x90 +#define AR7240_STATS_TXMULTICOL 0x94 +#define AR7240_STATS_TXSINGLECOL 0x98 +#define AR7240_STATS_TXEXCDEFER 0x9c +#define AR7240_STATS_TXDEFER 0xa0 +#define AR7240_STATS_TXLATECOL 0xa4 + +#define AR7240_PORT_CPU 0 +#define AR7240_NUM_PORTS 6 +#define AR7240_NUM_PHYS 5 + +#define AR7240_PHY_ID1 0x004d +#define AR7240_PHY_ID2 0xd041 + +#define AR934X_PHY_ID1 0x004d +#define AR934X_PHY_ID2 0xd042 + +#define AR7240_MAX_VLANS 16 + +#define AR934X_REG_OPER_MODE0 0x04 +#define AR934X_OPER_MODE0_MAC_GMII_EN BIT(6) +#define AR934X_OPER_MODE0_PHY_MII_EN BIT(10) + +#define AR934X_REG_OPER_MODE1 0x08 +#define AR934X_REG_OPER_MODE1_PHY4_MII_EN BIT(28) + +#define AR934X_REG_FLOOD_MASK 0x2c +#define AR934X_FLOOD_MASK_MC_DP(_p) BIT(16 + (_p)) +#define AR934X_FLOOD_MASK_BC_DP(_p) BIT(25 + (_p)) + +#define AR934X_REG_QM_CTRL 0x3c +#define AR934X_QM_CTRL_ARP_EN BIT(15) + +#define AR934X_REG_AT_CTRL 0x5c +#define AR934X_AT_CTRL_AGE_TIME BITS(0, 15) +#define AR934X_AT_CTRL_AGE_EN BIT(17) +#define AR934X_AT_CTRL_LEARN_CHANGE BIT(18) + +#define AR934X_MIB_ENABLE BIT(30) + +#define AR934X_REG_PORT_BASE(_port) (0x100 + (_port) * 0x100) + +#define AR934X_REG_PORT_VLAN1(_port) (AR934X_REG_PORT_BASE((_port)) + 0x08) +#define AR934X_PORT_VLAN1_DEFAULT_SVID_S 0 +#define AR934X_PORT_VLAN1_FORCE_DEFAULT_VID_EN BIT(12) +#define AR934X_PORT_VLAN1_PORT_TLS_MODE BIT(13) +#define AR934X_PORT_VLAN1_PORT_VLAN_PROP_EN BIT(14) +#define AR934X_PORT_VLAN1_PORT_CLONE_EN BIT(15) +#define AR934X_PORT_VLAN1_DEFAULT_CVID_S 16 +#define AR934X_PORT_VLAN1_FORCE_PORT_VLAN_EN BIT(28) +#define AR934X_PORT_VLAN1_ING_PORT_PRI_S 29 + +#define AR934X_REG_PORT_VLAN2(_port) (AR934X_REG_PORT_BASE((_port)) + 0x0c) +#define AR934X_PORT_VLAN2_PORT_VID_MEM_S 16 +#define AR934X_PORT_VLAN2_8021Q_MODE_S 30 +#define AR934X_PORT_VLAN2_8021Q_MODE_PORT_ONLY 0 +#define AR934X_PORT_VLAN2_8021Q_MODE_PORT_FALLBACK 1 +#define AR934X_PORT_VLAN2_8021Q_MODE_VLAN_ONLY 2 +#define AR934X_PORT_VLAN2_8021Q_MODE_SECURE 3 + +#define sw_to_ar7240(_dev) container_of(_dev, struct ar7240sw, swdev) + +struct ar7240sw_port_stat { + unsigned long rx_broadcast; + unsigned long rx_pause; + unsigned long rx_multicast; + unsigned long rx_fcs_error; + unsigned long rx_align_error; + unsigned long rx_runt; + unsigned long rx_fragments; + unsigned long rx_64byte; + unsigned long rx_128byte; + unsigned long rx_256byte; + unsigned long rx_512byte; + unsigned long rx_1024byte; + unsigned long rx_1518byte; + unsigned long rx_maxbyte; + unsigned long rx_toolong; + unsigned long rx_good_byte; + unsigned long rx_bad_byte; + unsigned long rx_overflow; + unsigned long filtered; + + unsigned long tx_broadcast; + unsigned long tx_pause; + unsigned long tx_multicast; + unsigned long tx_underrun; + unsigned long tx_64byte; + unsigned long tx_128byte; + unsigned long tx_256byte; + unsigned long tx_512byte; + unsigned long tx_1024byte; + unsigned long tx_1518byte; + unsigned long tx_maxbyte; + unsigned long tx_oversize; + unsigned long tx_byte; + unsigned long tx_collision; + unsigned long tx_abortcol; + unsigned long tx_multicol; + unsigned long tx_singlecol; + unsigned long tx_excdefer; + unsigned long tx_defer; + unsigned long tx_xlatecol; +}; + +struct ar7240sw { + struct mii_bus *mii_bus; + struct ag71xx_switch_platform_data *swdata; + struct switch_dev swdev; + int num_ports; + u8 ver; + bool vlan; + u16 vlan_id[AR7240_MAX_VLANS]; + u8 vlan_table[AR7240_MAX_VLANS]; + u8 vlan_tagged; + u16 pvid[AR7240_NUM_PORTS]; + char buf[80]; + + rwlock_t stats_lock; + struct ar7240sw_port_stat port_stats[AR7240_NUM_PORTS]; +}; + +struct ar7240sw_hw_stat { + char string[ETH_GSTRING_LEN]; + int sizeof_stat; + int reg; +}; + +static DEFINE_MUTEX(reg_mutex); + +static inline int sw_is_ar7240(struct ar7240sw *as) +{ + return as->ver == AR7240_MASK_CTRL_VERSION_AR7240; +} + +static inline int sw_is_ar934x(struct ar7240sw *as) +{ + return as->ver == AR7240_MASK_CTRL_VERSION_AR934X; +} + +static inline u32 ar7240sw_port_mask(struct ar7240sw *as, int port) +{ + return BIT(port); +} + +static inline u32 ar7240sw_port_mask_all(struct ar7240sw *as) +{ + return BIT(as->swdev.ports) - 1; +} + +static inline u32 ar7240sw_port_mask_but(struct ar7240sw *as, int port) +{ + return ar7240sw_port_mask_all(as) & ~BIT(port); +} + +static inline u16 mk_phy_addr(u32 reg) +{ + return 0x17 & ((reg >> 4) | 0x10); +} + +static inline u16 mk_phy_reg(u32 reg) +{ + return (reg << 1) & 0x1e; +} + +static inline u16 mk_high_addr(u32 reg) +{ + return (reg >> 7) & 0x1ff; +} + +static u32 __ar7240sw_reg_read(struct mii_bus *mii, u32 reg) +{ + unsigned long flags; + u16 phy_addr; + u16 phy_reg; + u32 hi, lo; + + reg = (reg & 0xfffffffc) >> 2; + phy_addr = mk_phy_addr(reg); + phy_reg = mk_phy_reg(reg); + + local_irq_save(flags); + ag71xx_mdio_mii_write(mii, 0x1f, 0x10, mk_high_addr(reg)); + lo = (u32) ag71xx_mdio_mii_read(mii, phy_addr, phy_reg); + hi = (u32) ag71xx_mdio_mii_read(mii, phy_addr, phy_reg + 1); + local_irq_restore(flags); + + return (hi << 16) | lo; +} + +static void __ar7240sw_reg_write(struct mii_bus *mii, u32 reg, u32 val) +{ + unsigned long flags; + u16 phy_addr; + u16 phy_reg; + + reg = (reg & 0xfffffffc) >> 2; + phy_addr = mk_phy_addr(reg); + phy_reg = mk_phy_reg(reg); + + local_irq_save(flags); + ag71xx_mdio_mii_write(mii, 0x1f, 0x10, mk_high_addr(reg)); + ag71xx_mdio_mii_write(mii, phy_addr, phy_reg + 1, (val >> 16)); + ag71xx_mdio_mii_write(mii, phy_addr, phy_reg, (val & 0xffff)); + local_irq_restore(flags); +} + +static u32 ar7240sw_reg_read(struct mii_bus *mii, u32 reg_addr) +{ + u32 ret; + + mutex_lock(®_mutex); + ret = __ar7240sw_reg_read(mii, reg_addr); + mutex_unlock(®_mutex); + + return ret; +} + +static void ar7240sw_reg_write(struct mii_bus *mii, u32 reg_addr, u32 reg_val) +{ + mutex_lock(®_mutex); + __ar7240sw_reg_write(mii, reg_addr, reg_val); + mutex_unlock(®_mutex); +} + +static u32 ar7240sw_reg_rmw(struct mii_bus *mii, u32 reg, u32 mask, u32 val) +{ + u32 t; + + mutex_lock(®_mutex); + t = __ar7240sw_reg_read(mii, reg); + t &= ~mask; + t |= val; + __ar7240sw_reg_write(mii, reg, t); + mutex_unlock(®_mutex); + + return t; +} + +static void ar7240sw_reg_set(struct mii_bus *mii, u32 reg, u32 val) +{ + u32 t; + + mutex_lock(®_mutex); + t = __ar7240sw_reg_read(mii, reg); + t |= val; + __ar7240sw_reg_write(mii, reg, t); + mutex_unlock(®_mutex); +} + +static int __ar7240sw_reg_wait(struct mii_bus *mii, u32 reg, u32 mask, u32 val, + unsigned timeout) +{ + int i; + + for (i = 0; i < timeout; i++) { + u32 t; + + t = __ar7240sw_reg_read(mii, reg); + if ((t & mask) == val) + return 0; + + usleep_range(1000, 2000); + } + + return -ETIMEDOUT; +} + +static int ar7240sw_reg_wait(struct mii_bus *mii, u32 reg, u32 mask, u32 val, + unsigned timeout) +{ + int ret; + + mutex_lock(®_mutex); + ret = __ar7240sw_reg_wait(mii, reg, mask, val, timeout); + mutex_unlock(®_mutex); + return ret; +} + +int ar7240sw_phy_read(struct mii_bus *mii, int phy_addr, int reg_addr) +{ + u32 t, val = 0xffff; + int err; + + if (phy_addr >= AR7240_NUM_PHYS) + return 0xffff; + + mutex_lock(®_mutex); + t = (reg_addr << AR7240_MDIO_CTRL_REG_ADDR_S) | + (phy_addr << AR7240_MDIO_CTRL_PHY_ADDR_S) | + AR7240_MDIO_CTRL_MASTER_EN | + AR7240_MDIO_CTRL_BUSY | + AR7240_MDIO_CTRL_CMD_READ; + + __ar7240sw_reg_write(mii, AR7240_REG_MDIO_CTRL, t); + err = __ar7240sw_reg_wait(mii, AR7240_REG_MDIO_CTRL, + AR7240_MDIO_CTRL_BUSY, 0, 5); + if (!err) + val = __ar7240sw_reg_read(mii, AR7240_REG_MDIO_CTRL); + mutex_unlock(®_mutex); + + return val & AR7240_MDIO_CTRL_DATA_M; +} + +int ar7240sw_phy_write(struct mii_bus *mii, int phy_addr, int reg_addr, + u16 reg_val) +{ + u32 t; + int ret; + + if (phy_addr >= AR7240_NUM_PHYS) + return -EINVAL; + + mutex_lock(®_mutex); + t = (phy_addr << AR7240_MDIO_CTRL_PHY_ADDR_S) | + (reg_addr << AR7240_MDIO_CTRL_REG_ADDR_S) | + AR7240_MDIO_CTRL_MASTER_EN | + AR7240_MDIO_CTRL_BUSY | + AR7240_MDIO_CTRL_CMD_WRITE | + reg_val; + + __ar7240sw_reg_write(mii, AR7240_REG_MDIO_CTRL, t); + ret = __ar7240sw_reg_wait(mii, AR7240_REG_MDIO_CTRL, + AR7240_MDIO_CTRL_BUSY, 0, 5); + mutex_unlock(®_mutex); + + return ret; +} + +static int ar7240sw_capture_stats(struct ar7240sw *as) +{ + struct mii_bus *mii = as->mii_bus; + int port; + int ret; + + write_lock(&as->stats_lock); + + /* Capture the hardware statistics for all ports */ + ar7240sw_reg_rmw(mii, AR7240_REG_MIB_FUNCTION0, + (AR7240_MIB_FUNC_M << AR7240_MIB_FUNC_S), + (AR7240_MIB_FUNC_CAPTURE << AR7240_MIB_FUNC_S)); + + /* Wait for the capturing to complete. */ + ret = ar7240sw_reg_wait(mii, AR7240_REG_MIB_FUNCTION0, + AR7240_MIB_BUSY, 0, 10); + + if (ret) + goto unlock; + + for (port = 0; port < AR7240_NUM_PORTS; port++) { + unsigned int base; + struct ar7240sw_port_stat *stats; + + base = AR7240_REG_STATS_BASE(port); + stats = &as->port_stats[port]; + +#define READ_STAT(_r) ar7240sw_reg_read(mii, base + AR7240_STATS_ ## _r) + + stats->rx_good_byte += READ_STAT(RXGOODBYTE); + stats->tx_byte += READ_STAT(TXBYTE); + +#undef READ_STAT + } + + ret = 0; + +unlock: + write_unlock(&as->stats_lock); + return ret; +} + +static void ar7240sw_disable_port(struct ar7240sw *as, unsigned port) +{ + ar7240sw_reg_write(as->mii_bus, AR7240_REG_PORT_CTRL(port), + AR7240_PORT_CTRL_STATE_DISABLED); +} + +static void ar7240sw_setup(struct ar7240sw *as) +{ + struct mii_bus *mii = as->mii_bus; + + /* Enable CPU port, and disable mirror port */ + ar7240sw_reg_write(mii, AR7240_REG_CPU_PORT, + AR7240_CPU_PORT_EN | + (15 << AR7240_MIRROR_PORT_S)); + + /* Setup TAG priority mapping */ + ar7240sw_reg_write(mii, AR7240_REG_TAG_PRIORITY, 0xfa50); + + if (sw_is_ar934x(as)) { + /* Enable aging, MAC replacing */ + ar7240sw_reg_write(mii, AR934X_REG_AT_CTRL, + 0x2b /* 5 min age time */ | + AR934X_AT_CTRL_AGE_EN | + AR934X_AT_CTRL_LEARN_CHANGE); + /* Enable ARP frame acknowledge */ + ar7240sw_reg_set(mii, AR934X_REG_QM_CTRL, + AR934X_QM_CTRL_ARP_EN); + /* Enable Broadcast/Multicast frames transmitted to the CPU */ + ar7240sw_reg_set(mii, AR934X_REG_FLOOD_MASK, + AR934X_FLOOD_MASK_BC_DP(0) | + AR934X_FLOOD_MASK_MC_DP(0)); + + /* setup MTU */ + ar7240sw_reg_rmw(mii, AR7240_REG_GLOBAL_CTRL, + AR9340_GLOBAL_CTRL_MTU_M, + AR9340_GLOBAL_CTRL_MTU_M); + + /* Enable MIB counters */ + ar7240sw_reg_set(mii, AR7240_REG_MIB_FUNCTION0, + AR934X_MIB_ENABLE); + + } else { + /* Enable ARP frame acknowledge, aging, MAC replacing */ + ar7240sw_reg_write(mii, AR7240_REG_AT_CTRL, + AR7240_AT_CTRL_RESERVED | + 0x2b /* 5 min age time */ | + AR7240_AT_CTRL_AGE_EN | + AR7240_AT_CTRL_ARP_EN | + AR7240_AT_CTRL_LEARN_CHANGE); + /* Enable Broadcast frames transmitted to the CPU */ + ar7240sw_reg_set(mii, AR7240_REG_FLOOD_MASK, + AR7240_FLOOD_MASK_BROAD_TO_CPU); + + /* setup MTU */ + ar7240sw_reg_rmw(mii, AR7240_REG_GLOBAL_CTRL, + AR7240_GLOBAL_CTRL_MTU_M, + AR7240_GLOBAL_CTRL_MTU_M); + } + + /* setup Service TAG */ + ar7240sw_reg_rmw(mii, AR7240_REG_SERVICE_TAG, AR7240_SERVICE_TAG_M, 0); +} + +/* inspired by phy_poll_reset in drivers/net/phy/phy_device.c */ +static int +ar7240sw_phy_poll_reset(struct mii_bus *bus) +{ + const unsigned int sleep_msecs = 20; + int ret, elapsed, i; + + for (elapsed = sleep_msecs; elapsed <= 600; + elapsed += sleep_msecs) { + msleep(sleep_msecs); + for (i = 0; i < AR7240_NUM_PHYS; i++) { + ret = ar7240sw_phy_read(bus, i, MII_BMCR); + if (ret < 0) + return ret; + if (ret & BMCR_RESET) + break; + if (i == AR7240_NUM_PHYS - 1) { + usleep_range(1000, 2000); + return 0; + } + } + } + return -ETIMEDOUT; +} + +static int ar7240sw_reset(struct ar7240sw *as) +{ + struct mii_bus *mii = as->mii_bus; + int ret; + int i; + + /* Set all ports to disabled state. */ + for (i = 0; i < AR7240_NUM_PORTS; i++) + ar7240sw_disable_port(as, i); + + /* Wait for transmit queues to drain. */ + usleep_range(2000, 3000); + + /* Reset the switch. */ + ar7240sw_reg_write(mii, AR7240_REG_MASK_CTRL, + AR7240_MASK_CTRL_SOFT_RESET); + + ret = ar7240sw_reg_wait(mii, AR7240_REG_MASK_CTRL, + AR7240_MASK_CTRL_SOFT_RESET, 0, 1000); + + /* setup PHYs */ + for (i = 0; i < AR7240_NUM_PHYS; i++) { + ar7240sw_phy_write(mii, i, MII_ADVERTISE, + ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + ar7240sw_phy_write(mii, i, MII_BMCR, + BMCR_RESET | BMCR_ANENABLE); + } + ret = ar7240sw_phy_poll_reset(mii); + if (ret) + return ret; + + ar7240sw_setup(as); + return ret; +} + +static void ar7240sw_setup_port(struct ar7240sw *as, unsigned port, u8 portmask) +{ + struct mii_bus *mii = as->mii_bus; + u32 ctrl; + u32 vid, mode; + + ctrl = AR7240_PORT_CTRL_STATE_FORWARD | AR7240_PORT_CTRL_LEARN | + AR7240_PORT_CTRL_SINGLE_VLAN; + + if (port == AR7240_PORT_CPU) { + ar7240sw_reg_write(mii, AR7240_REG_PORT_STATUS(port), + AR7240_PORT_STATUS_SPEED_1000 | + AR7240_PORT_STATUS_TXFLOW | + AR7240_PORT_STATUS_RXFLOW | + AR7240_PORT_STATUS_TXMAC | + AR7240_PORT_STATUS_RXMAC | + AR7240_PORT_STATUS_DUPLEX); + } else { + ar7240sw_reg_write(mii, AR7240_REG_PORT_STATUS(port), + AR7240_PORT_STATUS_LINK_AUTO); + } + + /* Set the default VID for this port */ + if (as->vlan) { + vid = as->vlan_id[as->pvid[port]]; + mode = AR7240_PORT_VLAN_MODE_SECURE; + } else { + vid = port; + mode = AR7240_PORT_VLAN_MODE_PORT_ONLY; + } + + if (as->vlan) { + if (as->vlan_tagged & BIT(port)) + ctrl |= AR7240_PORT_CTRL_VLAN_MODE_ADD << + AR7240_PORT_CTRL_VLAN_MODE_S; + else + ctrl |= AR7240_PORT_CTRL_VLAN_MODE_STRIP << + AR7240_PORT_CTRL_VLAN_MODE_S; + } else { + ctrl |= AR7240_PORT_CTRL_VLAN_MODE_KEEP << + AR7240_PORT_CTRL_VLAN_MODE_S; + } + + if (!portmask) { + if (port == AR7240_PORT_CPU) + portmask = ar7240sw_port_mask_but(as, AR7240_PORT_CPU); + else + portmask = ar7240sw_port_mask(as, AR7240_PORT_CPU); + } + + /* allow the port to talk to all other ports, but exclude its + * own ID to prevent frames from being reflected back to the + * port that they came from */ + portmask &= ar7240sw_port_mask_but(as, port); + + ar7240sw_reg_write(mii, AR7240_REG_PORT_CTRL(port), ctrl); + if (sw_is_ar934x(as)) { + u32 vlan1, vlan2; + + vlan1 = (vid << AR934X_PORT_VLAN1_DEFAULT_CVID_S); + vlan2 = (portmask << AR934X_PORT_VLAN2_PORT_VID_MEM_S) | + (mode << AR934X_PORT_VLAN2_8021Q_MODE_S); + ar7240sw_reg_write(mii, AR934X_REG_PORT_VLAN1(port), vlan1); + ar7240sw_reg_write(mii, AR934X_REG_PORT_VLAN2(port), vlan2); + } else { + u32 vlan; + + vlan = vid | (mode << AR7240_PORT_VLAN_MODE_S) | + (portmask << AR7240_PORT_VLAN_DEST_PORTS_S); + + ar7240sw_reg_write(mii, AR7240_REG_PORT_VLAN(port), vlan); + } +} + +static int ar7240_set_addr(struct ar7240sw *as, u8 *addr) +{ + struct mii_bus *mii = as->mii_bus; + u32 t; + + t = (addr[4] << 8) | addr[5]; + ar7240sw_reg_write(mii, AR7240_REG_MAC_ADDR0, t); + + t = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]; + ar7240sw_reg_write(mii, AR7240_REG_MAC_ADDR1, t); + + return 0; +} + +static int +ar7240_set_vid(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + as->vlan_id[val->port_vlan] = val->value.i; + return 0; +} + +static int +ar7240_get_vid(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + val->value.i = as->vlan_id[val->port_vlan]; + return 0; +} + +static int +ar7240_set_pvid(struct switch_dev *dev, int port, int vlan) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + + /* make sure no invalid PVIDs get set */ + + if (vlan >= dev->vlans) + return -EINVAL; + + as->pvid[port] = vlan; + return 0; +} + +static int +ar7240_get_pvid(struct switch_dev *dev, int port, int *vlan) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + *vlan = as->pvid[port]; + return 0; +} + +static int +ar7240_get_ports(struct switch_dev *dev, struct switch_val *val) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + u8 ports = as->vlan_table[val->port_vlan]; + int i; + + val->len = 0; + for (i = 0; i < as->swdev.ports; i++) { + struct switch_port *p; + + if (!(ports & (1 << i))) + continue; + + p = &val->value.ports[val->len++]; + p->id = i; + if (as->vlan_tagged & (1 << i)) + p->flags = (1 << SWITCH_PORT_FLAG_TAGGED); + else + p->flags = 0; + } + return 0; +} + +static int +ar7240_set_ports(struct switch_dev *dev, struct switch_val *val) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + u8 *vt = &as->vlan_table[val->port_vlan]; + int i, j; + + *vt = 0; + for (i = 0; i < val->len; i++) { + struct switch_port *p = &val->value.ports[i]; + + if (p->flags & (1 << SWITCH_PORT_FLAG_TAGGED)) + as->vlan_tagged |= (1 << p->id); + else { + as->vlan_tagged &= ~(1 << p->id); + as->pvid[p->id] = val->port_vlan; + + /* make sure that an untagged port does not + * appear in other vlans */ + for (j = 0; j < AR7240_MAX_VLANS; j++) { + if (j == val->port_vlan) + continue; + as->vlan_table[j] &= ~(1 << p->id); + } + } + + *vt |= 1 << p->id; + } + return 0; +} + +static int +ar7240_set_vlan(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + as->vlan = !!val->value.i; + return 0; +} + +static int +ar7240_get_vlan(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + val->value.i = as->vlan; + return 0; +} + +static void +ar7240_vtu_op(struct ar7240sw *as, u32 op, u32 val) +{ + struct mii_bus *mii = as->mii_bus; + + if (ar7240sw_reg_wait(mii, AR7240_REG_VTU, AR7240_VTU_ACTIVE, 0, 5)) + return; + + if ((op & AR7240_VTU_OP) == AR7240_VTU_OP_LOAD) { + val &= AR7240_VTUDATA_MEMBER; + val |= AR7240_VTUDATA_VALID; + ar7240sw_reg_write(mii, AR7240_REG_VTU_DATA, val); + } + op |= AR7240_VTU_ACTIVE; + ar7240sw_reg_write(mii, AR7240_REG_VTU, op); +} + +static int +ar7240_hw_apply(struct switch_dev *dev) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + u8 portmask[AR7240_NUM_PORTS]; + int i, j; + + /* flush all vlan translation unit entries */ + ar7240_vtu_op(as, AR7240_VTU_OP_FLUSH, 0); + + memset(portmask, 0, sizeof(portmask)); + if (as->vlan) { + /* calculate the port destination masks and load vlans + * into the vlan translation unit */ + for (j = 0; j < AR7240_MAX_VLANS; j++) { + u8 vp = as->vlan_table[j]; + + if (!vp) + continue; + + for (i = 0; i < as->swdev.ports; i++) { + u8 mask = (1 << i); + if (vp & mask) + portmask[i] |= vp & ~mask; + } + + ar7240_vtu_op(as, + AR7240_VTU_OP_LOAD | + (as->vlan_id[j] << AR7240_VTU_VID_S), + as->vlan_table[j]); + } + } else { + /* vlan disabled: + * isolate all ports, but connect them to the cpu port */ + for (i = 0; i < as->swdev.ports; i++) { + if (i == AR7240_PORT_CPU) + continue; + + portmask[i] = 1 << AR7240_PORT_CPU; + portmask[AR7240_PORT_CPU] |= (1 << i); + } + } + + /* update the port destination mask registers and tag settings */ + for (i = 0; i < as->swdev.ports; i++) + ar7240sw_setup_port(as, i, portmask[i]); + + return 0; +} + +static int +ar7240_reset_switch(struct switch_dev *dev) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + ar7240sw_reset(as); + return 0; +} + +static int +ar7240_get_port_link(struct switch_dev *dev, int port, + struct switch_port_link *link) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + struct mii_bus *mii = as->mii_bus; + u32 status; + + if (port >= AR7240_NUM_PORTS) + return -EINVAL; + + status = ar7240sw_reg_read(mii, AR7240_REG_PORT_STATUS(port)); + link->aneg = !!(status & AR7240_PORT_STATUS_LINK_AUTO); + if (link->aneg) { + link->link = !!(status & AR7240_PORT_STATUS_LINK_UP); + if (!link->link) + return 0; + } else { + link->link = true; + } + + link->duplex = !!(status & AR7240_PORT_STATUS_DUPLEX); + link->tx_flow = !!(status & AR7240_PORT_STATUS_TXFLOW); + link->rx_flow = !!(status & AR7240_PORT_STATUS_RXFLOW); + switch (status & AR7240_PORT_STATUS_SPEED_M) { + case AR7240_PORT_STATUS_SPEED_10: + link->speed = SWITCH_PORT_SPEED_10; + break; + case AR7240_PORT_STATUS_SPEED_100: + link->speed = SWITCH_PORT_SPEED_100; + break; + case AR7240_PORT_STATUS_SPEED_1000: + link->speed = SWITCH_PORT_SPEED_1000; + break; + } + + return 0; +} + +static int +ar7240_get_port_stats(struct switch_dev *dev, int port, + struct switch_port_stats *stats) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + + if (port >= AR7240_NUM_PORTS) + return -EINVAL; + + ar7240sw_capture_stats(as); + + read_lock(&as->stats_lock); + stats->rx_bytes = as->port_stats[port].rx_good_byte; + stats->tx_bytes = as->port_stats[port].tx_byte; + read_unlock(&as->stats_lock); + + return 0; +} + +static int +ar7240_set_mirror_monitor_port(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + struct mii_bus *mii = as->mii_bus; + + int port = val->value.i; + + if (port > 15) + return -EINVAL; + + ar7240sw_reg_rmw(mii, AR7240_REG_CPU_PORT, + AR7240_MIRROR_PORT_M << AR7240_MIRROR_PORT_S, + port << AR7240_MIRROR_PORT_S); + + return 0; +} + +static int +ar7240_get_mirror_monitor_port(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + struct mii_bus *mii = as->mii_bus; + + u32 ret; + + ret = ar7240sw_reg_read(mii, AR7240_REG_CPU_PORT); + val->value.i = (ret >> AR7240_MIRROR_PORT_S) & AR7240_MIRROR_PORT_M; + + return 0; +} + +static int +ar7240_set_mirror_rx(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + struct mii_bus *mii = as->mii_bus; + + int port = val->port_vlan; + + if (port >= dev->ports) + return -EINVAL; + + if (val && val->value.i == 1) + ar7240sw_reg_set(mii, AR7240_REG_PORT_CTRL(port), + AR7240_PORT_CTRL_MIRROR_RX); + else + ar7240sw_reg_rmw(mii, AR7240_REG_PORT_CTRL(port), + AR7240_PORT_CTRL_MIRROR_RX, 0); + + return 0; +} + +static int +ar7240_get_mirror_rx(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + struct mii_bus *mii = as->mii_bus; + + u32 ctrl; + + int port = val->port_vlan; + + if (port >= dev->ports) + return -EINVAL; + + ctrl = ar7240sw_reg_read(mii, AR7240_REG_PORT_CTRL(port)); + + if ((ctrl & AR7240_PORT_CTRL_MIRROR_RX) == AR7240_PORT_CTRL_MIRROR_RX) + val->value.i = 1; + else + val->value.i = 0; + + return 0; +} + +static int +ar7240_set_mirror_tx(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + struct mii_bus *mii = as->mii_bus; + + int port = val->port_vlan; + + if (port >= dev->ports) + return -EINVAL; + + if (val && val->value.i == 1) + ar7240sw_reg_set(mii, AR7240_REG_PORT_CTRL(port), + AR7240_PORT_CTRL_MIRROR_TX); + else + ar7240sw_reg_rmw(mii, AR7240_REG_PORT_CTRL(port), + AR7240_PORT_CTRL_MIRROR_TX, 0); + + return 0; +} + +static int +ar7240_get_mirror_tx(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar7240sw *as = sw_to_ar7240(dev); + struct mii_bus *mii = as->mii_bus; + + u32 ctrl; + + int port = val->port_vlan; + + if (port >= dev->ports) + return -EINVAL; + + ctrl = ar7240sw_reg_read(mii, AR7240_REG_PORT_CTRL(port)); + + if ((ctrl & AR7240_PORT_CTRL_MIRROR_TX) == AR7240_PORT_CTRL_MIRROR_TX) + val->value.i = 1; + else + val->value.i = 0; + + return 0; +} + +static struct switch_attr ar7240_globals[] = { + { + .type = SWITCH_TYPE_INT, + .name = "enable_vlan", + .description = "Enable VLAN mode", + .set = ar7240_set_vlan, + .get = ar7240_get_vlan, + .max = 1 + }, + { + .type = SWITCH_TYPE_INT, + .name = "mirror_monitor_port", + .description = "Mirror monitor port", + .set = ar7240_set_mirror_monitor_port, + .get = ar7240_get_mirror_monitor_port, + .max = 15 + }, +}; + +static struct switch_attr ar7240_port[] = { + { + .type = SWITCH_TYPE_INT, + .name = "enable_mirror_rx", + .description = "Enable mirroring of RX packets", + .set = ar7240_set_mirror_rx, + .get = ar7240_get_mirror_rx, + .max = 1 + }, + { + .type = SWITCH_TYPE_INT, + .name = "enable_mirror_tx", + .description = "Enable mirroring of TX packets", + .set = ar7240_set_mirror_tx, + .get = ar7240_get_mirror_tx, + .max = 1 + }, +}; + +static struct switch_attr ar7240_vlan[] = { + { + .type = SWITCH_TYPE_INT, + .name = "vid", + .description = "VLAN ID", + .set = ar7240_set_vid, + .get = ar7240_get_vid, + .max = 4094, + }, +}; + +static const struct switch_dev_ops ar7240_ops = { + .attr_global = { + .attr = ar7240_globals, + .n_attr = ARRAY_SIZE(ar7240_globals), + }, + .attr_port = { + .attr = ar7240_port, + .n_attr = ARRAY_SIZE(ar7240_port), + }, + .attr_vlan = { + .attr = ar7240_vlan, + .n_attr = ARRAY_SIZE(ar7240_vlan), + }, + .get_port_pvid = ar7240_get_pvid, + .set_port_pvid = ar7240_set_pvid, + .get_vlan_ports = ar7240_get_ports, + .set_vlan_ports = ar7240_set_ports, + .apply_config = ar7240_hw_apply, + .reset_switch = ar7240_reset_switch, + .get_port_link = ar7240_get_port_link, + .get_port_stats = ar7240_get_port_stats, +}; + +static struct ar7240sw * +ar7240_probe(struct ag71xx *ag, struct device_node *np) +{ + struct mii_bus *mii = ag->mii_bus; + struct ar7240sw *as; + struct switch_dev *swdev; + u32 ctrl; + u16 phy_id1; + u16 phy_id2; + int i; + + phy_id1 = ar7240sw_phy_read(mii, 0, MII_PHYSID1); + phy_id2 = ar7240sw_phy_read(mii, 0, MII_PHYSID2); + if ((phy_id1 != AR7240_PHY_ID1 || phy_id2 != AR7240_PHY_ID2) && + (phy_id1 != AR934X_PHY_ID1 || phy_id2 != AR934X_PHY_ID2)) { + pr_err("%s: unknown phy id '%04x:%04x'\n", + dev_name(&mii->dev), phy_id1, phy_id2); + return NULL; + } + + as = kzalloc(sizeof(*as), GFP_KERNEL); + if (!as) + return NULL; + + as->mii_bus = mii; + + swdev = &as->swdev; + + ctrl = ar7240sw_reg_read(mii, AR7240_REG_MASK_CTRL); + as->ver = (ctrl >> AR7240_MASK_CTRL_VERSION_S) & + AR7240_MASK_CTRL_VERSION_M; + + if (sw_is_ar7240(as)) { + swdev->name = "AR7240/AR9330 built-in switch"; + swdev->ports = AR7240_NUM_PORTS - 1; + } else if (sw_is_ar934x(as)) { + swdev->name = "AR934X built-in switch"; + + if (ag->phy_if_mode == PHY_INTERFACE_MODE_GMII) { + ar7240sw_reg_set(mii, AR934X_REG_OPER_MODE0, + AR934X_OPER_MODE0_MAC_GMII_EN); + } else if (ag->phy_if_mode == PHY_INTERFACE_MODE_MII) { + ar7240sw_reg_set(mii, AR934X_REG_OPER_MODE0, + AR934X_OPER_MODE0_PHY_MII_EN); + } else { + pr_err("%s: invalid PHY interface mode\n", + dev_name(&mii->dev)); + goto err_free; + } + + if (of_property_read_bool(np, "phy4-mii-enable")) { + ar7240sw_reg_set(mii, AR934X_REG_OPER_MODE1, + AR934X_REG_OPER_MODE1_PHY4_MII_EN); + swdev->ports = AR7240_NUM_PORTS - 1; + } else { + swdev->ports = AR7240_NUM_PORTS; + } + } else { + pr_err("%s: unsupported chip, ctrl=%08x\n", + dev_name(&mii->dev), ctrl); + goto err_free; + } + + swdev->cpu_port = AR7240_PORT_CPU; + swdev->vlans = AR7240_MAX_VLANS; + swdev->ops = &ar7240_ops; + + if (register_switch(&as->swdev, ag->dev) < 0) + goto err_free; + + pr_info("%s: Found an %s\n", dev_name(&mii->dev), swdev->name); + + /* initialize defaults */ + for (i = 0; i < AR7240_MAX_VLANS; i++) + as->vlan_id[i] = i; + + as->vlan_table[0] = ar7240sw_port_mask_all(as); + + return as; + +err_free: + kfree(as); + return NULL; +} + +void ag71xx_ar7240_start(struct ag71xx *ag) +{ + struct ar7240sw *as = ag->phy_priv; + + if (!as) + return; + + ar7240sw_reset(as); + + ar7240_set_addr(as, ag->dev->dev_addr); + ar7240_hw_apply(&as->swdev); +} + +int ag71xx_ar7240_init(struct ag71xx *ag, struct device_node *np) +{ + struct ar7240sw *as; + + as = ar7240_probe(ag, np); + if (!as) + return -ENODEV; + + ag->phy_priv = as; + ar7240sw_reset(as); + + rwlock_init(&as->stats_lock); + + return 0; +} + +void ag71xx_ar7240_cleanup(struct ag71xx *ag) +{ + struct ar7240sw *as = ag->phy_priv; + + if (!as) + return; + + unregister_switch(&as->swdev); + kfree(as); + ag->phy_priv = NULL; +} diff --git a/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_debugfs.c b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_debugfs.c new file mode 100644 index 0000000000..20cf1c15c8 --- /dev/null +++ b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_debugfs.c @@ -0,0 +1,285 @@ +/* + * Atheros AR71xx built-in ethernet mac driver + * + * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> + * + * Based on Atheros' AG7100 driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/debugfs.h> + +#include "ag71xx.h" + +static struct dentry *ag71xx_debugfs_root; + +static int ag71xx_debugfs_generic_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +void ag71xx_debugfs_update_int_stats(struct ag71xx *ag, u32 status) +{ + if (status) + ag->debug.int_stats.total++; + if (status & AG71XX_INT_TX_PS) + ag->debug.int_stats.tx_ps++; + if (status & AG71XX_INT_TX_UR) + ag->debug.int_stats.tx_ur++; + if (status & AG71XX_INT_TX_BE) + ag->debug.int_stats.tx_be++; + if (status & AG71XX_INT_RX_PR) + ag->debug.int_stats.rx_pr++; + if (status & AG71XX_INT_RX_OF) + ag->debug.int_stats.rx_of++; + if (status & AG71XX_INT_RX_BE) + ag->debug.int_stats.rx_be++; +} + +static ssize_t read_file_int_stats(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ +#define PR_INT_STAT(_label, _field) \ + len += snprintf(buf + len, sizeof(buf) - len, \ + "%20s: %10lu\n", _label, ag->debug.int_stats._field); + + struct ag71xx *ag = file->private_data; + char buf[256]; + unsigned int len = 0; + + PR_INT_STAT("TX Packet Sent", tx_ps); + PR_INT_STAT("TX Underrun", tx_ur); + PR_INT_STAT("TX Bus Error", tx_be); + PR_INT_STAT("RX Packet Received", rx_pr); + PR_INT_STAT("RX Overflow", rx_of); + PR_INT_STAT("RX Bus Error", rx_be); + len += snprintf(buf + len, sizeof(buf) - len, "\n"); + PR_INT_STAT("Total", total); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +#undef PR_INT_STAT +} + +static const struct file_operations ag71xx_fops_int_stats = { + .open = ag71xx_debugfs_generic_open, + .read = read_file_int_stats, + .owner = THIS_MODULE +}; + +void ag71xx_debugfs_update_napi_stats(struct ag71xx *ag, int rx, int tx) +{ + struct ag71xx_napi_stats *stats = &ag->debug.napi_stats; + + if (rx) { + stats->rx_count++; + stats->rx_packets += rx; + if (rx <= AG71XX_NAPI_WEIGHT) + stats->rx[rx]++; + if (rx > stats->rx_packets_max) + stats->rx_packets_max = rx; + } + + if (tx) { + stats->tx_count++; + stats->tx_packets += tx; + if (tx <= AG71XX_NAPI_WEIGHT) + stats->tx[tx]++; + if (tx > stats->tx_packets_max) + stats->tx_packets_max = tx; + } +} + +static ssize_t read_file_napi_stats(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ag71xx *ag = file->private_data; + struct ag71xx_napi_stats *stats = &ag->debug.napi_stats; + char *buf; + unsigned int buflen; + unsigned int len = 0; + unsigned long rx_avg = 0; + unsigned long tx_avg = 0; + int ret; + int i; + + buflen = 2048; + buf = kmalloc(buflen, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (stats->rx_count) + rx_avg = stats->rx_packets / stats->rx_count; + + if (stats->tx_count) + tx_avg = stats->tx_packets / stats->tx_count; + + len += snprintf(buf + len, buflen - len, "%3s %10s %10s\n", + "len", "rx", "tx"); + + for (i = 1; i <= AG71XX_NAPI_WEIGHT; i++) + len += snprintf(buf + len, buflen - len, + "%3d: %10lu %10lu\n", + i, stats->rx[i], stats->tx[i]); + + len += snprintf(buf + len, buflen - len, "\n"); + + len += snprintf(buf + len, buflen - len, "%3s: %10lu %10lu\n", + "sum", stats->rx_count, stats->tx_count); + len += snprintf(buf + len, buflen - len, "%3s: %10lu %10lu\n", + "avg", rx_avg, tx_avg); + len += snprintf(buf + len, buflen - len, "%3s: %10lu %10lu\n", + "max", stats->rx_packets_max, stats->tx_packets_max); + len += snprintf(buf + len, buflen - len, "%3s: %10lu %10lu\n", + "pkt", stats->rx_packets, stats->tx_packets); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return ret; +} + +static const struct file_operations ag71xx_fops_napi_stats = { + .open = ag71xx_debugfs_generic_open, + .read = read_file_napi_stats, + .owner = THIS_MODULE +}; + +#define DESC_PRINT_LEN 64 + +static ssize_t read_file_ring(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos, + struct ag71xx *ag, + struct ag71xx_ring *ring, + unsigned desc_reg) +{ + int ring_size = BIT(ring->order); + int ring_mask = ring_size - 1; + char *buf; + unsigned int buflen; + unsigned int len = 0; + unsigned long flags; + ssize_t ret; + int curr; + int dirty; + u32 desc_hw; + int i; + + buflen = (ring_size * DESC_PRINT_LEN); + buf = kmalloc(buflen, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len += snprintf(buf + len, buflen - len, + "Idx ... %-8s %-8s %-8s %-8s .\n", + "desc", "next", "data", "ctrl"); + + spin_lock_irqsave(&ag->lock, flags); + + curr = (ring->curr & ring_mask); + dirty = (ring->dirty & ring_mask); + desc_hw = ag71xx_rr(ag, desc_reg); + for (i = 0; i < ring_size; i++) { + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); + u32 desc_dma = ((u32) ring->descs_dma) + i * AG71XX_DESC_SIZE; + + len += snprintf(buf + len, buflen - len, + "%3d %c%c%c %08x %08x %08x %08x %c\n", + i, + (i == curr) ? 'C' : ' ', + (i == dirty) ? 'D' : ' ', + (desc_hw == desc_dma) ? 'H' : ' ', + desc_dma, + desc->next, + desc->data, + desc->ctrl, + (desc->ctrl & DESC_EMPTY) ? 'E' : '*'); + } + + spin_unlock_irqrestore(&ag->lock, flags); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return ret; +} + +static ssize_t read_file_tx_ring(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ag71xx *ag = file->private_data; + + return read_file_ring(file, user_buf, count, ppos, ag, &ag->tx_ring, + AG71XX_REG_TX_DESC); +} + +static const struct file_operations ag71xx_fops_tx_ring = { + .open = ag71xx_debugfs_generic_open, + .read = read_file_tx_ring, + .owner = THIS_MODULE +}; + +static ssize_t read_file_rx_ring(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ag71xx *ag = file->private_data; + + return read_file_ring(file, user_buf, count, ppos, ag, &ag->rx_ring, + AG71XX_REG_RX_DESC); +} + +static const struct file_operations ag71xx_fops_rx_ring = { + .open = ag71xx_debugfs_generic_open, + .read = read_file_rx_ring, + .owner = THIS_MODULE +}; + +void ag71xx_debugfs_exit(struct ag71xx *ag) +{ + debugfs_remove_recursive(ag->debug.debugfs_dir); +} + +int ag71xx_debugfs_init(struct ag71xx *ag) +{ + struct device *dev = &ag->pdev->dev; + + ag->debug.debugfs_dir = debugfs_create_dir(dev_name(dev), + ag71xx_debugfs_root); + if (!ag->debug.debugfs_dir) { + dev_err(dev, "unable to create debugfs directory\n"); + return -ENOENT; + } + + debugfs_create_file("int_stats", S_IRUGO, ag->debug.debugfs_dir, + ag, &ag71xx_fops_int_stats); + debugfs_create_file("napi_stats", S_IRUGO, ag->debug.debugfs_dir, + ag, &ag71xx_fops_napi_stats); + debugfs_create_file("tx_ring", S_IRUGO, ag->debug.debugfs_dir, + ag, &ag71xx_fops_tx_ring); + debugfs_create_file("rx_ring", S_IRUGO, ag->debug.debugfs_dir, + ag, &ag71xx_fops_rx_ring); + + return 0; +} + +int ag71xx_debugfs_root_init(void) +{ + if (ag71xx_debugfs_root) + return -EBUSY; + + ag71xx_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!ag71xx_debugfs_root) + return -ENOENT; + + return 0; +} + +void ag71xx_debugfs_root_exit(void) +{ + debugfs_remove(ag71xx_debugfs_root); + ag71xx_debugfs_root = NULL; +} diff --git a/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_ethtool.c b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_ethtool.c new file mode 100644 index 0000000000..6f37fda387 --- /dev/null +++ b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_ethtool.c @@ -0,0 +1,120 @@ +/* + * Atheros AR71xx built-in ethernet mac driver + * + * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> + * + * Based on Atheros' AG7100 driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include "ag71xx.h" + +static int ag71xx_ethtool_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct ag71xx *ag = netdev_priv(dev); + struct phy_device *phydev = ag->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_ioctl(phydev, cmd); +} + +static int ag71xx_ethtool_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct ag71xx *ag = netdev_priv(dev); + struct phy_device *phydev = ag->phy_dev; + + if (!phydev) + return -ENODEV; + + return phy_ethtool_ioctl(phydev, cmd); +} + +static u32 ag71xx_ethtool_get_msglevel(struct net_device *dev) +{ + struct ag71xx *ag = netdev_priv(dev); + + return ag->msg_enable; +} + +static void ag71xx_ethtool_set_msglevel(struct net_device *dev, u32 msg_level) +{ + struct ag71xx *ag = netdev_priv(dev); + + ag->msg_enable = msg_level; +} + +static void ag71xx_ethtool_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *er) +{ + struct ag71xx *ag = netdev_priv(dev); + + er->tx_max_pending = AG71XX_TX_RING_SIZE_MAX; + er->rx_max_pending = AG71XX_RX_RING_SIZE_MAX; + er->rx_mini_max_pending = 0; + er->rx_jumbo_max_pending = 0; + + er->tx_pending = BIT(ag->tx_ring.order); + er->rx_pending = BIT(ag->rx_ring.order); + er->rx_mini_pending = 0; + er->rx_jumbo_pending = 0; + + if (ag->tx_ring.desc_split) + er->tx_pending /= AG71XX_TX_RING_DS_PER_PKT; +} + +static int ag71xx_ethtool_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *er) +{ + struct ag71xx *ag = netdev_priv(dev); + unsigned tx_size; + unsigned rx_size; + int err = 0; + + if (er->rx_mini_pending != 0|| + er->rx_jumbo_pending != 0 || + er->rx_pending == 0 || + er->tx_pending == 0) + return -EINVAL; + + tx_size = er->tx_pending < AG71XX_TX_RING_SIZE_MAX ? + er->tx_pending : AG71XX_TX_RING_SIZE_MAX; + + rx_size = er->rx_pending < AG71XX_RX_RING_SIZE_MAX ? + er->rx_pending : AG71XX_RX_RING_SIZE_MAX; + + if (netif_running(dev)) { + err = dev->netdev_ops->ndo_stop(dev); + if (err) + return err; + } + + if (ag->tx_ring.desc_split) + tx_size *= AG71XX_TX_RING_DS_PER_PKT; + + ag->tx_ring.order = ag71xx_ring_size_order(tx_size); + ag->rx_ring.order = ag71xx_ring_size_order(rx_size); + + if (netif_running(dev)) + err = dev->netdev_ops->ndo_open(dev); + + return err; +} + +struct ethtool_ops ag71xx_ethtool_ops = { + .set_settings = ag71xx_ethtool_set_settings, + .get_settings = ag71xx_ethtool_get_settings, + .get_msglevel = ag71xx_ethtool_get_msglevel, + .set_msglevel = ag71xx_ethtool_set_msglevel, + .get_ringparam = ag71xx_ethtool_get_ringparam, + .set_ringparam = ag71xx_ethtool_set_ringparam, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, +}; diff --git a/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c new file mode 100644 index 0000000000..9ccfb00c3a --- /dev/null +++ b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c @@ -0,0 +1,1611 @@ +/* + * Atheros AR71xx built-in ethernet mac driver + * + * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> + * + * Based on Atheros' AG7100 driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/sizes.h> +#include <linux/of_net.h> +#include <linux/of_address.h> +#include "ag71xx.h" + +#define AG71XX_DEFAULT_MSG_ENABLE \ + (NETIF_MSG_DRV \ + | NETIF_MSG_PROBE \ + | NETIF_MSG_LINK \ + | NETIF_MSG_TIMER \ + | NETIF_MSG_IFDOWN \ + | NETIF_MSG_IFUP \ + | NETIF_MSG_RX_ERR \ + | NETIF_MSG_TX_ERR) + +static int ag71xx_msg_level = -1; + +module_param_named(msg_level, ag71xx_msg_level, int, 0); +MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); + +#define ETH_SWITCH_HEADER_LEN 2 + +static int ag71xx_tx_packets(struct ag71xx *ag, bool flush); + +static inline unsigned int ag71xx_max_frame_len(unsigned int mtu) +{ + return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN; +} + +static void ag71xx_dump_dma_regs(struct ag71xx *ag) +{ + DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n", + ag->dev->name, + ag71xx_rr(ag, AG71XX_REG_TX_CTRL), + ag71xx_rr(ag, AG71XX_REG_TX_DESC), + ag71xx_rr(ag, AG71XX_REG_TX_STATUS)); + + DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n", + ag->dev->name, + ag71xx_rr(ag, AG71XX_REG_RX_CTRL), + ag71xx_rr(ag, AG71XX_REG_RX_DESC), + ag71xx_rr(ag, AG71XX_REG_RX_STATUS)); +} + +static inline void ag71xx_dump_intr(struct ag71xx *ag, char *label, u32 intr) +{ + DBG("%s: %s intr=%08x %s%s%s%s%s%s\n", + ag->dev->name, label, intr, + (intr & AG71XX_INT_TX_PS) ? "TXPS " : "", + (intr & AG71XX_INT_TX_UR) ? "TXUR " : "", + (intr & AG71XX_INT_TX_BE) ? "TXBE " : "", + (intr & AG71XX_INT_RX_PR) ? "RXPR " : "", + (intr & AG71XX_INT_RX_OF) ? "RXOF " : "", + (intr & AG71XX_INT_RX_BE) ? "RXBE " : ""); +} + +static void ag71xx_ring_tx_clean(struct ag71xx *ag) +{ + struct ag71xx_ring *ring = &ag->tx_ring; + struct net_device *dev = ag->dev; + int ring_mask = BIT(ring->order) - 1; + u32 bytes_compl = 0, pkts_compl = 0; + + while (ring->curr != ring->dirty) { + struct ag71xx_desc *desc; + u32 i = ring->dirty & ring_mask; + + desc = ag71xx_ring_desc(ring, i); + if (!ag71xx_desc_empty(desc)) { + desc->ctrl = 0; + dev->stats.tx_errors++; + } + + if (ring->buf[i].skb) { + bytes_compl += ring->buf[i].len; + pkts_compl++; + dev_kfree_skb_any(ring->buf[i].skb); + } + ring->buf[i].skb = NULL; + ring->dirty++; + } + + /* flush descriptors */ + wmb(); + + netdev_completed_queue(dev, pkts_compl, bytes_compl); +} + +static void ag71xx_ring_tx_init(struct ag71xx *ag) +{ + struct ag71xx_ring *ring = &ag->tx_ring; + int ring_size = BIT(ring->order); + int ring_mask = ring_size - 1; + int i; + + for (i = 0; i < ring_size; i++) { + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); + + desc->next = (u32) (ring->descs_dma + + AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); + + desc->ctrl = DESC_EMPTY; + ring->buf[i].skb = NULL; + } + + /* flush descriptors */ + wmb(); + + ring->curr = 0; + ring->dirty = 0; + netdev_reset_queue(ag->dev); +} + +static void ag71xx_ring_rx_clean(struct ag71xx *ag) +{ + struct ag71xx_ring *ring = &ag->rx_ring; + int ring_size = BIT(ring->order); + int i; + + if (!ring->buf) + return; + + for (i = 0; i < ring_size; i++) + if (ring->buf[i].rx_buf) { + dma_unmap_single(&ag->dev->dev, ring->buf[i].dma_addr, + ag->rx_buf_size, DMA_FROM_DEVICE); + skb_free_frag(ring->buf[i].rx_buf); + } +} + +static int ag71xx_buffer_size(struct ag71xx *ag) +{ + return ag->rx_buf_size + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf, + int offset, + void *(*alloc)(unsigned int size)) +{ + struct ag71xx_ring *ring = &ag->rx_ring; + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]); + void *data; + + data = alloc(ag71xx_buffer_size(ag)); + if (!data) + return false; + + buf->rx_buf = data; + buf->dma_addr = dma_map_single(&ag->dev->dev, data, ag->rx_buf_size, + DMA_FROM_DEVICE); + desc->data = (u32) buf->dma_addr + offset; + return true; +} + +static int ag71xx_ring_rx_init(struct ag71xx *ag) +{ + struct ag71xx_ring *ring = &ag->rx_ring; + int ring_size = BIT(ring->order); + int ring_mask = BIT(ring->order) - 1; + unsigned int i; + int ret; + + ret = 0; + for (i = 0; i < ring_size; i++) { + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); + + desc->next = (u32) (ring->descs_dma + + AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); + + DBG("ag71xx: RX desc at %p, next is %08x\n", + desc, desc->next); + } + + for (i = 0; i < ring_size; i++) { + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); + + if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset, + netdev_alloc_frag)) { + ret = -ENOMEM; + break; + } + + desc->ctrl = DESC_EMPTY; + } + + /* flush descriptors */ + wmb(); + + ring->curr = 0; + ring->dirty = 0; + + return ret; +} + +static int ag71xx_ring_rx_refill(struct ag71xx *ag) +{ + struct ag71xx_ring *ring = &ag->rx_ring; + int ring_mask = BIT(ring->order) - 1; + unsigned int count; + int offset = ag->rx_buf_offset; + + count = 0; + for (; ring->curr - ring->dirty > 0; ring->dirty++) { + struct ag71xx_desc *desc; + unsigned int i; + + i = ring->dirty & ring_mask; + desc = ag71xx_ring_desc(ring, i); + + if (!ring->buf[i].rx_buf && + !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset, + napi_alloc_frag)) + break; + + desc->ctrl = DESC_EMPTY; + count++; + } + + /* flush descriptors */ + wmb(); + + DBG("%s: %u rx descriptors refilled\n", ag->dev->name, count); + + return count; +} + +static int ag71xx_rings_init(struct ag71xx *ag) +{ + struct ag71xx_ring *tx = &ag->tx_ring; + struct ag71xx_ring *rx = &ag->rx_ring; + int ring_size = BIT(tx->order) + BIT(rx->order); + int tx_size = BIT(tx->order); + + tx->buf = kzalloc(ring_size * sizeof(*tx->buf), GFP_KERNEL); + if (!tx->buf) + return -ENOMEM; + + tx->descs_cpu = dma_alloc_coherent(NULL, ring_size * AG71XX_DESC_SIZE, + &tx->descs_dma, GFP_ATOMIC); + if (!tx->descs_cpu) { + kfree(tx->buf); + tx->buf = NULL; + return -ENOMEM; + } + + rx->buf = &tx->buf[BIT(tx->order)]; + rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE; + rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE; + + ag71xx_ring_tx_init(ag); + return ag71xx_ring_rx_init(ag); +} + +static void ag71xx_rings_free(struct ag71xx *ag) +{ + struct ag71xx_ring *tx = &ag->tx_ring; + struct ag71xx_ring *rx = &ag->rx_ring; + int ring_size = BIT(tx->order) + BIT(rx->order); + + if (tx->descs_cpu) + dma_free_coherent(NULL, ring_size * AG71XX_DESC_SIZE, + tx->descs_cpu, tx->descs_dma); + + kfree(tx->buf); + + tx->descs_cpu = NULL; + rx->descs_cpu = NULL; + tx->buf = NULL; + rx->buf = NULL; +} + +static void ag71xx_rings_cleanup(struct ag71xx *ag) +{ + ag71xx_ring_rx_clean(ag); + ag71xx_ring_tx_clean(ag); + ag71xx_rings_free(ag); + + netdev_reset_queue(ag->dev); +} + +static unsigned char *ag71xx_speed_str(struct ag71xx *ag) +{ + switch (ag->speed) { + case SPEED_1000: + return "1000"; + case SPEED_100: + return "100"; + case SPEED_10: + return "10"; + } + + return "?"; +} + +static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac) +{ + u32 t; + + t = (((u32) mac[5]) << 24) | (((u32) mac[4]) << 16) + | (((u32) mac[3]) << 8) | ((u32) mac[2]); + + ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t); + + t = (((u32) mac[1]) << 24) | (((u32) mac[0]) << 16); + ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t); +} + +static void ag71xx_dma_reset(struct ag71xx *ag) +{ + u32 val; + int i; + + ag71xx_dump_dma_regs(ag); + + /* stop RX and TX */ + ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); + ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); + + /* + * give the hardware some time to really stop all rx/tx activity + * clearing the descriptors too early causes random memory corruption + */ + mdelay(1); + + /* clear descriptor addresses */ + ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma); + ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma); + + /* clear pending RX/TX interrupts */ + for (i = 0; i < 256; i++) { + ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); + ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); + } + + /* clear pending errors */ + ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF); + ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR); + + val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); + if (val) + pr_alert("%s: unable to clear DMA Rx status: %08x\n", + ag->dev->name, val); + + val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); + + /* mask out reserved bits */ + val &= ~0xff000000; + + if (val) + pr_alert("%s: unable to clear DMA Tx status: %08x\n", + ag->dev->name, val); + + ag71xx_dump_dma_regs(ag); +} + +#define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \ + MAC_CFG1_SRX | MAC_CFG1_STX) + +#define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT) + +#define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \ + FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \ + FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \ + FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \ + FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \ + FIFO_CFG4_VT) + +#define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \ + FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \ + FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \ + FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \ + FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \ + FIFO_CFG5_17 | FIFO_CFG5_SF) + +static void ag71xx_hw_stop(struct ag71xx *ag) +{ + /* disable all interrupts and stop the rx/tx engine */ + ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0); + ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); + ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); +} + +static void ag71xx_hw_setup(struct ag71xx *ag) +{ + struct device_node *np = ag->pdev->dev.of_node; + u32 init = MAC_CFG1_INIT; + + /* setup MAC configuration registers */ + if (of_property_read_bool(np, "flow-control")) + init |= MAC_CFG1_TFC | MAC_CFG1_RFC; + ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init); + + ag71xx_sb(ag, AG71XX_REG_MAC_CFG2, + MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK); + + /* setup max frame length to zero */ + ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0); + + /* setup FIFO configuration registers */ + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT); + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]); + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]); + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT); + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT); +} + +static void ag71xx_hw_init(struct ag71xx *ag) +{ + ag71xx_hw_stop(ag); + + if (ag->phy_reset) { + reset_control_assert(ag->phy_reset); + msleep(50); + reset_control_deassert(ag->phy_reset); + msleep(200); + } + + ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR); + udelay(20); + + reset_control_assert(ag->mac_reset); + msleep(100); + reset_control_deassert(ag->mac_reset); + msleep(200); + + ag71xx_hw_setup(ag); + + ag71xx_dma_reset(ag); +} + +static void ag71xx_fast_reset(struct ag71xx *ag) +{ + struct net_device *dev = ag->dev; + u32 rx_ds; + u32 mii_reg; + + ag71xx_hw_stop(ag); + wmb(); + + mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG); + rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC); + + ag71xx_tx_packets(ag, true); + + reset_control_assert(ag->mac_reset); + udelay(10); + reset_control_deassert(ag->mac_reset); + udelay(10); + + ag71xx_dma_reset(ag); + ag71xx_hw_setup(ag); + ag->tx_ring.curr = 0; + ag->tx_ring.dirty = 0; + netdev_reset_queue(ag->dev); + + /* setup max frame length */ + ag71xx_wr(ag, AG71XX_REG_MAC_MFL, + ag71xx_max_frame_len(ag->dev->mtu)); + + ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds); + ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); + ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg); + + ag71xx_hw_set_macaddr(ag, dev->dev_addr); +} + +static void ag71xx_hw_start(struct ag71xx *ag) +{ + /* start RX engine */ + ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); + + /* enable interrupts */ + ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT); + + netif_wake_queue(ag->dev); +} + +static void ath79_set_pllval(struct ag71xx *ag) +{ + u32 pll_reg = ag->pllreg[1]; + u32 pll_val; + + if (!ag->pllregmap) + return; + + switch (ag->speed) { + case SPEED_10: + pll_val = ag->plldata[2]; + break; + case SPEED_100: + pll_val = ag->plldata[1]; + break; + case SPEED_1000: + pll_val = ag->plldata[0]; + break; + default: + BUG(); + } + + if (pll_val) + regmap_write(ag->pllregmap, pll_reg, pll_val); +} + +static void ath79_set_pll(struct ag71xx *ag) +{ + u32 pll_cfg = ag->pllreg[0]; + u32 pll_shift = ag->pllreg[2]; + + if (!ag->pllregmap) + return; + + regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 2 << pll_shift); + udelay(100); + + ath79_set_pllval(ag); + + regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 3 << pll_shift); + udelay(100); + + regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 0); + udelay(100); +} + +static void ath79_mii_ctrl_set_speed(struct ag71xx *ag) +{ + unsigned int mii_speed; + u32 t; + + if (!ag->mii_base) + return; + + switch (ag->speed) { + case SPEED_10: + mii_speed = AR71XX_MII_CTRL_SPEED_10; + break; + case SPEED_100: + mii_speed = AR71XX_MII_CTRL_SPEED_100; + break; + case SPEED_1000: + mii_speed = AR71XX_MII_CTRL_SPEED_1000; + break; + default: + BUG(); + } + + t = __raw_readl(ag->mii_base); + t &= ~(AR71XX_MII_CTRL_IF_MASK); + t |= (mii_speed & AR71XX_MII_CTRL_IF_MASK); + __raw_writel(t, ag->mii_base); +} + +static void +__ag71xx_link_adjust(struct ag71xx *ag, bool update) +{ + struct device_node *np = ag->pdev->dev.of_node; + u32 cfg2; + u32 ifctl; + u32 fifo5; + + if (!ag->link && update) { + ag71xx_hw_stop(ag); + netif_carrier_off(ag->dev); + if (netif_msg_link(ag)) + pr_info("%s: link down\n", ag->dev->name); + return; + } + + if (!of_device_is_compatible(np, "qca,ar9130-eth") && + !of_device_is_compatible(np, "qca,ar7100-eth")) + ag71xx_fast_reset(ag); + + cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2); + cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX); + cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0; + + ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL); + ifctl &= ~(MAC_IFCTL_SPEED); + + fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5); + fifo5 &= ~FIFO_CFG5_BM; + + switch (ag->speed) { + case SPEED_1000: + cfg2 |= MAC_CFG2_IF_1000; + fifo5 |= FIFO_CFG5_BM; + break; + case SPEED_100: + cfg2 |= MAC_CFG2_IF_10_100; + ifctl |= MAC_IFCTL_SPEED; + break; + case SPEED_10: + cfg2 |= MAC_CFG2_IF_10_100; + break; + default: + BUG(); + return; + } + + if (ag->tx_ring.desc_split) { + ag->fifodata[2] &= 0xffff; + ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16; + } + + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]); + + if (update) { + if (of_device_is_compatible(np, "qca,ar7100-eth")) { + ath79_set_pll(ag); + ath79_mii_ctrl_set_speed(ag); + } else if (of_device_is_compatible(np, "qca,ar7242-eth")) { + ath79_set_pll(ag); + } else if (of_device_is_compatible(np, "qca,ar9130-eth")) { + } else if (of_device_is_compatible(np, "qca,ar9340-eth")) { + } else if (of_device_is_compatible(np, "qca,qca9550-eth")) { + } else if (of_device_is_compatible(np, "qca,qca9560-eth")) { + } + } + + ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2); + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5); + ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl); + + if (of_device_is_compatible(np, "qca,qca9530-eth")) { + /* + * The rx ring buffer can stall on small packets on QCA953x and + * QCA956x. Disabling the inline checksum engine fixes the stall. + * The wr, rr functions cannot be used since this hidden register + * is outside of the normal ag71xx register block. + */ + void __iomem *dam = ioremap_nocache(0xb90001bc, 0x4); + if (dam) { + __raw_writel(__raw_readl(dam) & ~BIT(27), dam); + (void)__raw_readl(dam); + iounmap(dam); + } + } + + ag71xx_hw_start(ag); + + netif_carrier_on(ag->dev); + if (update && netif_msg_link(ag)) + pr_info("%s: link up (%sMbps/%s duplex)\n", + ag->dev->name, + ag71xx_speed_str(ag), + (DUPLEX_FULL == ag->duplex) ? "Full" : "Half"); + + DBG("%s: fifo_cfg0=%#x, fifo_cfg1=%#x, fifo_cfg2=%#x\n", + ag->dev->name, + ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0), + ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1), + ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2)); + + DBG("%s: fifo_cfg3=%#x, fifo_cfg4=%#x, fifo_cfg5=%#x\n", + ag->dev->name, + ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3), + ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4), + ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5)); + + DBG("%s: mac_cfg2=%#x, mac_ifctl=%#x\n", + ag->dev->name, + ag71xx_rr(ag, AG71XX_REG_MAC_CFG2), + ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL)); +} + +void ag71xx_link_adjust(struct ag71xx *ag) +{ + __ag71xx_link_adjust(ag, true); +} + +static int ag71xx_hw_enable(struct ag71xx *ag) +{ + int ret; + + ret = ag71xx_rings_init(ag); + if (ret) + return ret; + + napi_enable(&ag->napi); + ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); + ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma); + netif_start_queue(ag->dev); + + return 0; +} + +static void ag71xx_hw_disable(struct ag71xx *ag) +{ + unsigned long flags; + + spin_lock_irqsave(&ag->lock, flags); + + netif_stop_queue(ag->dev); + + ag71xx_hw_stop(ag); + ag71xx_dma_reset(ag); + + napi_disable(&ag->napi); + del_timer_sync(&ag->oom_timer); + + spin_unlock_irqrestore(&ag->lock, flags); + + ag71xx_rings_cleanup(ag); +} + +static int ag71xx_open(struct net_device *dev) +{ + struct ag71xx *ag = netdev_priv(dev); + unsigned int max_frame_len; + int ret; + + netif_carrier_off(dev); + max_frame_len = ag71xx_max_frame_len(dev->mtu); + ag->rx_buf_size = SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN); + + /* setup max frame length */ + ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len); + ag71xx_hw_set_macaddr(ag, dev->dev_addr); + + ret = ag71xx_hw_enable(ag); + if (ret) + goto err; + + ag71xx_ar7240_start(ag); + phy_start(ag->phy_dev); + + return 0; + +err: + ag71xx_rings_cleanup(ag); + return ret; +} + +static int ag71xx_stop(struct net_device *dev) +{ + struct ag71xx *ag = netdev_priv(dev); + + netif_carrier_off(dev); + phy_stop(ag->phy_dev); + ag71xx_hw_disable(ag); + + return 0; +} + +static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len) +{ + int i; + struct ag71xx_desc *desc; + int ring_mask = BIT(ring->order) - 1; + int ndesc = 0; + int split = ring->desc_split; + + if (!split) + split = len; + + while (len > 0) { + unsigned int cur_len = len; + + i = (ring->curr + ndesc) & ring_mask; + desc = ag71xx_ring_desc(ring, i); + + if (!ag71xx_desc_empty(desc)) + return -1; + + if (cur_len > split) { + cur_len = split; + + /* + * TX will hang if DMA transfers <= 4 bytes, + * make sure next segment is more than 4 bytes long. + */ + if (len <= split + 4) + cur_len -= 4; + } + + desc->data = addr; + addr += cur_len; + len -= cur_len; + + if (len > 0) + cur_len |= DESC_MORE; + + /* prevent early tx attempt of this descriptor */ + if (!ndesc) + cur_len |= DESC_EMPTY; + + desc->ctrl = cur_len; + ndesc++; + } + + return ndesc; +} + +static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ag71xx *ag = netdev_priv(dev); + struct ag71xx_ring *ring = &ag->tx_ring; + int ring_mask = BIT(ring->order) - 1; + int ring_size = BIT(ring->order); + struct ag71xx_desc *desc; + dma_addr_t dma_addr; + int i, n, ring_min; + + if (skb->len <= 4) { + DBG("%s: packet len is too small\n", ag->dev->name); + goto err_drop; + } + + dma_addr = dma_map_single(&dev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + + i = ring->curr & ring_mask; + desc = ag71xx_ring_desc(ring, i); + + /* setup descriptor fields */ + n = ag71xx_fill_dma_desc(ring, (u32) dma_addr, skb->len & ag->desc_pktlen_mask); + if (n < 0) + goto err_drop_unmap; + + i = (ring->curr + n - 1) & ring_mask; + ring->buf[i].len = skb->len; + ring->buf[i].skb = skb; + + netdev_sent_queue(dev, skb->len); + + skb_tx_timestamp(skb); + + desc->ctrl &= ~DESC_EMPTY; + ring->curr += n; + + /* flush descriptor */ + wmb(); + + ring_min = 2; + if (ring->desc_split) + ring_min *= AG71XX_TX_RING_DS_PER_PKT; + + if (ring->curr - ring->dirty >= ring_size - ring_min) { + DBG("%s: tx queue full\n", dev->name); + netif_stop_queue(dev); + } + + DBG("%s: packet injected into TX queue\n", ag->dev->name); + + /* enable TX engine */ + ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE); + + return NETDEV_TX_OK; + +err_drop_unmap: + dma_unmap_single(&dev->dev, dma_addr, skb->len, DMA_TO_DEVICE); + +err_drop: + dev->stats.tx_dropped++; + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int ag71xx_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct ag71xx *ag = netdev_priv(dev); + int ret; + + switch (cmd) { + case SIOCETHTOOL: + if (ag->phy_dev == NULL) + break; + + spin_lock_irq(&ag->lock); + ret = phy_ethtool_ioctl(ag->phy_dev, (void *) ifr->ifr_data); + spin_unlock_irq(&ag->lock); + return ret; + + case SIOCSIFHWADDR: + if (copy_from_user + (dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr))) + return -EFAULT; + return 0; + + case SIOCGIFHWADDR: + if (copy_to_user + (ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr))) + return -EFAULT; + return 0; + + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + if (ag->phy_dev == NULL) + break; + + return phy_mii_ioctl(ag->phy_dev, ifr, cmd); + + default: + break; + } + + return -EOPNOTSUPP; +} + +static void ag71xx_oom_timer_handler(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct ag71xx *ag = netdev_priv(dev); + + napi_schedule(&ag->napi); +} + +static void ag71xx_tx_timeout(struct net_device *dev) +{ + struct ag71xx *ag = netdev_priv(dev); + + if (netif_msg_tx_err(ag)) + pr_info("%s: tx timeout\n", ag->dev->name); + + schedule_delayed_work(&ag->restart_work, 1); +} + +static void ag71xx_restart_work_func(struct work_struct *work) +{ + struct ag71xx *ag = container_of(work, struct ag71xx, restart_work.work); + + rtnl_lock(); + ag71xx_hw_disable(ag); + ag71xx_hw_enable(ag); + if (ag->link) + __ag71xx_link_adjust(ag, false); + rtnl_unlock(); +} + +static bool ag71xx_check_dma_stuck(struct ag71xx *ag) +{ + unsigned long timestamp; + u32 rx_sm, tx_sm, rx_fd; + + timestamp = netdev_get_tx_queue(ag->dev, 0)->trans_start; + if (likely(time_before(jiffies, timestamp + HZ/10))) + return false; + + if (!netif_carrier_ok(ag->dev)) + return false; + + rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM); + if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6) + return true; + + tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM); + rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH); + if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) && + ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0) + return true; + + return false; +} + +static int ag71xx_tx_packets(struct ag71xx *ag, bool flush) +{ + struct ag71xx_ring *ring = &ag->tx_ring; + bool dma_stuck = false; + int ring_mask = BIT(ring->order) - 1; + int ring_size = BIT(ring->order); + int sent = 0; + int bytes_compl = 0; + int n = 0; + + DBG("%s: processing TX ring\n", ag->dev->name); + + while (ring->dirty + n != ring->curr) { + unsigned int i = (ring->dirty + n) & ring_mask; + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); + struct sk_buff *skb = ring->buf[i].skb; + + if (!flush && !ag71xx_desc_empty(desc)) { + if (ag->tx_hang_workaround && + ag71xx_check_dma_stuck(ag)) { + schedule_delayed_work(&ag->restart_work, HZ / 2); + dma_stuck = true; + } + break; + } + + if (flush) + desc->ctrl |= DESC_EMPTY; + + n++; + if (!skb) + continue; + + dev_kfree_skb_any(skb); + ring->buf[i].skb = NULL; + + bytes_compl += ring->buf[i].len; + + sent++; + ring->dirty += n; + + while (n > 0) { + ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); + n--; + } + } + + DBG("%s: %d packets sent out\n", ag->dev->name, sent); + + if (!sent) + return 0; + + ag->dev->stats.tx_bytes += bytes_compl; + ag->dev->stats.tx_packets += sent; + + netdev_completed_queue(ag->dev, sent, bytes_compl); + if ((ring->curr - ring->dirty) < (ring_size * 3) / 4) + netif_wake_queue(ag->dev); + + if (!dma_stuck) + cancel_delayed_work(&ag->restart_work); + + return sent; +} + +static int ag71xx_rx_packets(struct ag71xx *ag, int limit) +{ + struct net_device *dev = ag->dev; + struct ag71xx_ring *ring = &ag->rx_ring; + unsigned int pktlen_mask = ag->desc_pktlen_mask; + unsigned int offset = ag->rx_buf_offset; + int ring_mask = BIT(ring->order) - 1; + int ring_size = BIT(ring->order); + struct sk_buff_head queue; + struct sk_buff *skb; + int done = 0; + + DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n", + dev->name, limit, ring->curr, ring->dirty); + + skb_queue_head_init(&queue); + + while (done < limit) { + unsigned int i = ring->curr & ring_mask; + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); + int pktlen; + int err = 0; + + if (ag71xx_desc_empty(desc)) + break; + + if ((ring->dirty + ring_size) == ring->curr) { + ag71xx_assert(0); + break; + } + + ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); + + pktlen = desc->ctrl & pktlen_mask; + pktlen -= ETH_FCS_LEN; + + dma_unmap_single(&dev->dev, ring->buf[i].dma_addr, + ag->rx_buf_size, DMA_FROM_DEVICE); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += pktlen; + + skb = build_skb(ring->buf[i].rx_buf, ag71xx_buffer_size(ag)); + if (!skb) { + skb_free_frag(ring->buf[i].rx_buf); + goto next; + } + + skb_reserve(skb, offset); + skb_put(skb, pktlen); + + if (err) { + dev->stats.rx_dropped++; + kfree_skb(skb); + } else { + skb->dev = dev; + skb->ip_summed = CHECKSUM_NONE; + __skb_queue_tail(&queue, skb); + } + +next: + ring->buf[i].rx_buf = NULL; + done++; + + ring->curr++; + } + + ag71xx_ring_rx_refill(ag); + + while ((skb = __skb_dequeue(&queue)) != NULL) { + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + } + + DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n", + dev->name, ring->curr, ring->dirty, done); + + return done; +} + +static int ag71xx_poll(struct napi_struct *napi, int limit) +{ + struct ag71xx *ag = container_of(napi, struct ag71xx, napi); + struct net_device *dev = ag->dev; + struct ag71xx_ring *rx_ring = &ag->rx_ring; + int rx_ring_size = BIT(rx_ring->order); + unsigned long flags; + u32 status; + int tx_done; + int rx_done; + + tx_done = ag71xx_tx_packets(ag, false); + + DBG("%s: processing RX ring\n", dev->name); + rx_done = ag71xx_rx_packets(ag, limit); + + ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done); + + if (rx_ring->buf[rx_ring->dirty % rx_ring_size].rx_buf == NULL) + goto oom; + + status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); + if (unlikely(status & RX_STATUS_OF)) { + ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF); + dev->stats.rx_fifo_errors++; + + /* restart RX */ + ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); + } + + if (rx_done < limit) { + if (status & RX_STATUS_PR) + goto more; + + status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); + if (status & TX_STATUS_PS) + goto more; + + DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n", + dev->name, rx_done, tx_done, limit); + + napi_complete(napi); + + /* enable interrupts */ + spin_lock_irqsave(&ag->lock, flags); + ag71xx_int_enable(ag, AG71XX_INT_POLL); + spin_unlock_irqrestore(&ag->lock, flags); + return rx_done; + } + +more: + DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n", + dev->name, rx_done, tx_done, limit); + return limit; + +oom: + if (netif_msg_rx_err(ag)) + pr_info("%s: out of memory\n", dev->name); + + mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL); + napi_complete(napi); + return 0; +} + +static irqreturn_t ag71xx_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct ag71xx *ag = netdev_priv(dev); + u32 status; + + status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS); + ag71xx_dump_intr(ag, "raw", status); + + if (unlikely(!status)) + return IRQ_NONE; + + if (unlikely(status & AG71XX_INT_ERR)) { + if (status & AG71XX_INT_TX_BE) { + ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE); + dev_err(&dev->dev, "TX BUS error\n"); + } + if (status & AG71XX_INT_RX_BE) { + ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE); + dev_err(&dev->dev, "RX BUS error\n"); + } + } + + if (likely(status & AG71XX_INT_POLL)) { + ag71xx_int_disable(ag, AG71XX_INT_POLL); + DBG("%s: enable polling mode\n", dev->name); + napi_schedule(&ag->napi); + } + + ag71xx_debugfs_update_int_stats(ag, status); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void ag71xx_netpoll(struct net_device *dev) +{ + disable_irq(dev->irq); + ag71xx_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +static int ag71xx_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ag71xx *ag = netdev_priv(dev); + + dev->mtu = new_mtu; + ag71xx_wr(ag, AG71XX_REG_MAC_MFL, + ag71xx_max_frame_len(dev->mtu)); + + return 0; +} + +static const struct net_device_ops ag71xx_netdev_ops = { + .ndo_open = ag71xx_open, + .ndo_stop = ag71xx_stop, + .ndo_start_xmit = ag71xx_hard_start_xmit, + .ndo_do_ioctl = ag71xx_do_ioctl, + .ndo_tx_timeout = ag71xx_tx_timeout, + .ndo_change_mtu = ag71xx_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ag71xx_netpoll, +#endif +}; + +static const char *ag71xx_get_phy_if_mode_name(phy_interface_t mode) +{ + switch (mode) { + case PHY_INTERFACE_MODE_MII: + return "MII"; + case PHY_INTERFACE_MODE_GMII: + return "GMII"; + case PHY_INTERFACE_MODE_RMII: + return "RMII"; + case PHY_INTERFACE_MODE_RGMII: + return "RGMII"; + case PHY_INTERFACE_MODE_SGMII: + return "SGMII"; + default: + break; + } + + return "unknown"; +} + +static void ag71xx_of_bit(struct device_node *np, const char *prop, + u32 *reg, u32 mask) +{ + u32 val; + + if (of_property_read_u32(np, prop, &val)) + return; + + if (val) + *reg |= mask; + else + *reg &= ~mask; +} + +static void ag71xx_setup_gmac_933x(struct device_node *np, void __iomem *base) +{ + u32 val = __raw_readl(base + AR933X_GMAC_REG_ETH_CFG); + + ag71xx_of_bit(np, "switch-phy-swap", &val, AR933X_ETH_CFG_SW_PHY_SWAP); + ag71xx_of_bit(np, "switch-phy-addr-swap", &val, + AR933X_ETH_CFG_SW_PHY_ADDR_SWAP); + + __raw_writel(val, base + AR933X_GMAC_REG_ETH_CFG); +} + +static int ag71xx_setup_gmac(struct device_node *np) +{ + struct device_node *np_dev; + void __iomem *base; + int err = 0; + + np = of_get_child_by_name(np, "gmac-config"); + if (!np) + return 0; + + np_dev = of_parse_phandle(np, "device", 0); + if (!np_dev) + goto out; + + base = of_iomap(np_dev, 0); + if (!base) { + pr_err("%pOF: can't map GMAC registers\n", np_dev); + err = -ENOMEM; + goto err_iomap; + } + + if (of_device_is_compatible(np_dev, "qca,ar9330-gmac")) + ag71xx_setup_gmac_933x(np_dev, base); + + iounmap(base); + +err_iomap: + of_node_put(np_dev); +out: + of_node_put(np); + return err; +} + +static int ag71xx_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct net_device *dev; + struct resource *res; + struct ag71xx *ag; + const void *mac_addr; + u32 max_frame_len; + int tx_size, err; + + if (!np) + return -ENODEV; + + dev = alloc_etherdev(sizeof(*ag)); + if (!dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + err = ag71xx_setup_gmac(np); + if (err) + return err; + + SET_NETDEV_DEV(dev, &pdev->dev); + + ag = netdev_priv(dev); + ag->pdev = pdev; + ag->dev = dev; + ag->msg_enable = netif_msg_init(ag71xx_msg_level, + AG71XX_DEFAULT_MSG_ENABLE); + spin_lock_init(&ag->lock); + + ag->mac_reset = devm_reset_control_get(&pdev->dev, "mac"); + if (IS_ERR(ag->mac_reset)) { + dev_err(&pdev->dev, "missing mac reset\n"); + err = PTR_ERR(ag->mac_reset); + goto err_free; + } + + ag->phy_reset = devm_reset_control_get_optional(&pdev->dev, "phy"); + + if (of_property_read_u32_array(np, "fifo-data", ag->fifodata, 3)) { + if (of_device_is_compatible(np, "qca,ar9130-eth") || + of_device_is_compatible(np, "qca,ar7100-eth")) { + ag->fifodata[0] = 0x0fff0000; + ag->fifodata[1] = 0x00001fff; + } else { + ag->fifodata[0] = 0x0010ffff; + ag->fifodata[1] = 0x015500aa; + ag->fifodata[2] = 0x01f00140; + } + if (of_device_is_compatible(np, "qca,ar9130-eth")) + ag->fifodata[2] = 0x00780fff; + else if (of_device_is_compatible(np, "qca,ar7100-eth")) + ag->fifodata[2] = 0x008001ff; + } + + if (of_property_read_u32_array(np, "pll-data", ag->plldata, 3)) + dev_dbg(&pdev->dev, "failed to read pll-data property\n"); + + if (of_property_read_u32_array(np, "pll-reg", ag->pllreg, 3)) + dev_dbg(&pdev->dev, "failed to read pll-reg property\n"); + + ag->pllregmap = syscon_regmap_lookup_by_phandle(np, "pll-handle"); + if (IS_ERR(ag->pllregmap)) { + dev_dbg(&pdev->dev, "failed to read pll-handle property\n"); + ag->pllregmap = NULL; + } + + ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start, + res->end - res->start + 1); + if (!ag->mac_base) { + err = -ENOMEM; + goto err_free; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + ag->mii_base = devm_ioremap_nocache(&pdev->dev, res->start, + res->end - res->start + 1); + if (!ag->mii_base) { + err = -ENOMEM; + goto err_free; + } + } + + dev->irq = platform_get_irq(pdev, 0); + err = devm_request_irq(&pdev->dev, dev->irq, ag71xx_interrupt, + 0x0, dev_name(&pdev->dev), dev); + if (err) { + dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq); + goto err_free; + } + + dev->netdev_ops = &ag71xx_netdev_ops; + dev->ethtool_ops = &ag71xx_ethtool_ops; + + INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func); + + init_timer(&ag->oom_timer); + ag->oom_timer.data = (unsigned long) dev; + ag->oom_timer.function = ag71xx_oom_timer_handler; + + tx_size = AG71XX_TX_RING_SIZE_DEFAULT; + ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT); + + if (of_device_is_compatible(np, "qca,ar9340-eth") || + of_device_is_compatible(np, "qca,qca9530-eth") || + of_device_is_compatible(np, "qca,qca9550-eth") || + of_device_is_compatible(np, "qca,qca9560-eth")) + ag->desc_pktlen_mask = SZ_16K - 1; + else + ag->desc_pktlen_mask = SZ_4K - 1; + + if (ag->desc_pktlen_mask == SZ_16K - 1 && + !of_device_is_compatible(np, "qca,qca9550-eth") && + !of_device_is_compatible(np, "qca,qca9560-eth")) + max_frame_len = ag->desc_pktlen_mask; + else + max_frame_len = 1540; + + dev->min_mtu = 68; + dev->max_mtu = max_frame_len - ag71xx_max_frame_len(0); + + if (of_device_is_compatible(np, "qca,ar7240-eth")) + ag->tx_hang_workaround = 1; + + ag->rx_buf_offset = NET_SKB_PAD; + if (!of_device_is_compatible(np, "qca,ar7100-eth") && + !of_device_is_compatible(np, "qca,ar9130-eth")) + ag->rx_buf_offset += NET_IP_ALIGN; + + if (of_device_is_compatible(np, "qca,ar7100-eth")) { + ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT; + tx_size *= AG71XX_TX_RING_DS_PER_PKT; + } + ag->tx_ring.order = ag71xx_ring_size_order(tx_size); + + ag->stop_desc = dmam_alloc_coherent(&pdev->dev, + sizeof(struct ag71xx_desc), + &ag->stop_desc_dma, GFP_KERNEL); + if (!ag->stop_desc) + goto err_free; + + ag->stop_desc->data = 0; + ag->stop_desc->ctrl = 0; + ag->stop_desc->next = (u32) ag->stop_desc_dma; + + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + if (!mac_addr || !is_valid_ether_addr(dev->dev_addr)) { + dev_err(&pdev->dev, "invalid MAC address, using random address\n"); + eth_random_addr(dev->dev_addr); + } + + ag->phy_if_mode = of_get_phy_mode(np); + if (ag->phy_if_mode < 0) { + dev_err(&pdev->dev, "missing phy-mode property in DT\n"); + err = ag->phy_if_mode; + goto err_free; + } + + netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); + + ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0); + ag71xx_hw_init(ag); + ag71xx_mdio_init(ag); + + err = ag71xx_phy_connect(ag); + if (err) + goto err_mdio_free; + + err = ag71xx_debugfs_init(ag); + if (err) + goto err_phy_disconnect; + + platform_set_drvdata(pdev, dev); + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "unable to register net device\n"); + platform_set_drvdata(pdev, NULL); + ag71xx_debugfs_exit(ag); + goto err_phy_disconnect; + } + + pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode:%s\n", + dev->name, (unsigned long) ag->mac_base, dev->irq, + ag71xx_get_phy_if_mode_name(ag->phy_if_mode)); + + return 0; + +err_phy_disconnect: + ag71xx_phy_disconnect(ag); +err_mdio_free: + ag71xx_mdio_cleanup(ag); +err_free: + free_netdev(dev); + return err; +} + +static int ag71xx_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct ag71xx *ag; + + if (!dev) + return 0; + + ag = netdev_priv(dev); + ag71xx_debugfs_exit(ag); + ag71xx_phy_disconnect(ag); + ag71xx_mdio_cleanup(ag); + unregister_netdev(dev); + free_irq(dev->irq, dev); + iounmap(ag->mac_base); + kfree(dev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static const struct of_device_id ag71xx_match[] = { + { .compatible = "qca,ar7100-eth" }, + { .compatible = "qca,ar7240-eth" }, + { .compatible = "qca,ar7241-eth" }, + { .compatible = "qca,ar7242-eth" }, + { .compatible = "qca,ar9130-eth" }, + { .compatible = "qca,ar9330-eth" }, + { .compatible = "qca,ar9340-eth" }, + { .compatible = "qca,qca9530-eth" }, + { .compatible = "qca,qca9550-eth" }, + { .compatible = "qca,qca9560-eth" }, + {} +}; + +static struct platform_driver ag71xx_driver = { + .probe = ag71xx_probe, + .remove = ag71xx_remove, + .driver = { + .name = AG71XX_DRV_NAME, + .of_match_table = ag71xx_match, + } +}; + +static int __init ag71xx_module_init(void) +{ + int ret; + + ret = ag71xx_debugfs_root_init(); + if (ret) + goto err_out; + + ret = platform_driver_register(&ag71xx_driver); + if (ret) + goto err_debugfs_exit; + + return 0; + +err_debugfs_exit: + ag71xx_debugfs_root_exit(); +err_out: + return ret; +} + +static void __exit ag71xx_module_exit(void) +{ + platform_driver_unregister(&ag71xx_driver); + ag71xx_debugfs_root_exit(); +} + +module_init(ag71xx_module_init); +module_exit(ag71xx_module_exit); + +MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>"); +MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org>"); +MODULE_AUTHOR("Felix Fietkau <nbd@nbd.name>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" AG71XX_DRV_NAME); diff --git a/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_mdio.c b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_mdio.c new file mode 100644 index 0000000000..8c1572b181 --- /dev/null +++ b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_mdio.c @@ -0,0 +1,218 @@ +/* + * Atheros AR71xx built-in ethernet mac driver + * + * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> + * + * Based on Atheros' AG7100 driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/of_mdio.h> +#include "ag71xx.h" + +#define AG71XX_MDIO_RETRY 1000 +#define AG71XX_MDIO_DELAY 5 + +static int bus_count; + +static int ag71xx_mdio_wait_busy(struct ag71xx *ag) +{ + int i; + + for (i = 0; i < AG71XX_MDIO_RETRY; i++) { + u32 busy; + + udelay(AG71XX_MDIO_DELAY); + + regmap_read(ag->mii_regmap, AG71XX_REG_MII_IND, &busy); + if (!busy) + return 0; + + udelay(AG71XX_MDIO_DELAY); + } + + pr_err("%s: MDIO operation timed out\n", ag->mii_bus->name); + + return -ETIMEDOUT; +} + +int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg) +{ + struct ag71xx *ag = bus->priv; + int err; + int ret; + + err = ag71xx_mdio_wait_busy(ag); + if (err) + return 0xffff; + + regmap_write(ag->mii_regmap, AG71XX_REG_MII_CMD, MII_CMD_WRITE); + regmap_write(ag->mii_regmap, AG71XX_REG_MII_ADDR, + ((addr & 0xff) << MII_ADDR_SHIFT) | (reg & 0xff)); + regmap_write(ag->mii_regmap, AG71XX_REG_MII_CMD, MII_CMD_READ); + + err = ag71xx_mdio_wait_busy(ag); + if (err) + return 0xffff; + + regmap_read(ag->mii_regmap, AG71XX_REG_MII_STATUS, &ret); + ret &= 0xffff; + regmap_write(ag->mii_regmap, AG71XX_REG_MII_CMD, MII_CMD_WRITE); + + DBG("mii_read: addr=%04x, reg=%04x, value=%04x\n", addr, reg, ret); + + return ret; +} + +int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg, u16 val) +{ + struct ag71xx *ag = bus->priv; + + DBG("mii_write: addr=%04x, reg=%04x, value=%04x\n", addr, reg, val); + + regmap_write(ag->mii_regmap, AG71XX_REG_MII_ADDR, + ((addr & 0xff) << MII_ADDR_SHIFT) | (reg & 0xff)); + regmap_write(ag->mii_regmap, AG71XX_REG_MII_CTRL, val); + + ag71xx_mdio_wait_busy(ag); + + return 0; +} + +static int ar934x_mdio_clock_div(unsigned int rate) +{ + if (rate == 100 * 1000 * 1000) + return 6; /* 100 MHz clock divided by 20 => 5 MHz */ + else if (rate == 25 * 1000 * 1000) + return 0; /* 25 MHz clock divided by 4 => 6.25 MHz */ + else + return 3; /* 40 MHz clock divided by 8 => 5 MHz */ +} + +static int ag71xx_mdio_reset(struct mii_bus *bus) +{ + struct device_node *np = bus->dev.of_node; + struct ag71xx *ag = bus->priv; + struct device_node *np_ag = ag->pdev->dev.of_node; + bool builtin_switch; + u32 t; + + builtin_switch = of_property_read_bool(np, "builtin-switch"); + + if (of_device_is_compatible(np_ag, "qca,ar7240-eth")) + t = MII_CFG_CLK_DIV_6; + else if (of_device_is_compatible(np_ag, "qca,ar9340-eth")) + t = MII_CFG_CLK_DIV_58; + else if (builtin_switch) + t = MII_CFG_CLK_DIV_10; + else + t = MII_CFG_CLK_DIV_28; + + if (builtin_switch && of_device_is_compatible(np_ag, "qca,ar9340-eth")) { + struct clk *ref_clk = of_clk_get(np, 0); + int clock_rate; + + if (WARN_ON_ONCE(!ref_clk)) + clock_rate = 40 * 1000 * 1000; + else + clock_rate = clk_get_rate(ref_clk); + + t = ar934x_mdio_clock_div(clock_rate); + clk_put(ref_clk); + } + + regmap_write(ag->mii_regmap, AG71XX_REG_MII_CFG, t | MII_CFG_RESET); + udelay(100); + + regmap_write(ag->mii_regmap, AG71XX_REG_MII_CFG, t); + udelay(100); + + return 0; +} + +int ag71xx_mdio_init(struct ag71xx *ag) +{ + struct device *parent = &ag->pdev->dev; + struct device_node *np; + struct mii_bus *mii_bus; + bool builtin_switch; + int i, err; + + np = of_get_child_by_name(parent->of_node, "mdio-bus"); + if (!np) + return -ENODEV; + + if (!of_device_is_available(np)) { + err = 0; + goto err_out; + } + + ag->mii_regmap = syscon_regmap_lookup_by_phandle(np, "regmap"); + if (!ag->mii_regmap) + return -ENOENT; + + mii_bus = devm_mdiobus_alloc(parent); + if (!mii_bus) { + err = -ENOMEM; + goto err_out; + } + + ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio"); + builtin_switch = of_property_read_bool(np, "builtin-switch"); + + mii_bus->name = "mdio"; + if (builtin_switch) { + mii_bus->read = ar7240sw_phy_read; + mii_bus->write = ar7240sw_phy_write; + } else { + mii_bus->read = ag71xx_mdio_mii_read; + mii_bus->write = ag71xx_mdio_mii_write; + } + mii_bus->reset = ag71xx_mdio_reset; + mii_bus->priv = ag; + mii_bus->parent = parent; + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, bus_count++); + + if (!builtin_switch && + of_property_read_u32(np, "phy-mask", &mii_bus->phy_mask)) + mii_bus->phy_mask = 0; + + for (i = 0; i < PHY_MAX_ADDR; i++) + mii_bus->irq[i] = PHY_POLL; + + if (!IS_ERR(ag->mdio_reset)) { + reset_control_assert(ag->mdio_reset); + msleep(100); + reset_control_deassert(ag->mdio_reset); + msleep(200); + } + + err = of_mdiobus_register(mii_bus, np); + if (err) + goto err_out; + + ag->mii_bus = mii_bus; + + if (builtin_switch) + ag71xx_ar7240_init(ag, np); + + return 0; + +err_out: + of_node_put(np); + return err; +} + +void ag71xx_mdio_cleanup(struct ag71xx *ag) +{ + if (!ag->mii_bus) + return; + + ag71xx_ar7240_cleanup(ag); + mdiobus_unregister(ag->mii_bus); +} diff --git a/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_phy.c b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_phy.c new file mode 100644 index 0000000000..f9df38d530 --- /dev/null +++ b/target/linux/ath79/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_phy.c @@ -0,0 +1,92 @@ +/* + * Atheros AR71xx built-in ethernet mac driver + * + * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> + * + * Based on Atheros' AG7100 driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include <linux/of_mdio.h> +#include "ag71xx.h" + +static void ag71xx_phy_link_adjust(struct net_device *dev) +{ + struct ag71xx *ag = netdev_priv(dev); + struct phy_device *phydev = ag->phy_dev; + unsigned long flags; + int status_change = 0; + + spin_lock_irqsave(&ag->lock, flags); + + if (phydev->link) { + if (ag->duplex != phydev->duplex + || ag->speed != phydev->speed) { + status_change = 1; + } + } + + if (phydev->link != ag->link) + status_change = 1; + + ag->link = phydev->link; + ag->duplex = phydev->duplex; + ag->speed = phydev->speed; + + if (status_change) + ag71xx_link_adjust(ag); + + spin_unlock_irqrestore(&ag->lock, flags); +} + +int ag71xx_phy_connect(struct ag71xx *ag) +{ + struct device_node *np = ag->pdev->dev.of_node; + struct device_node *phy_node; + int ret; + + if (of_phy_is_fixed_link(np)) { + ret = of_phy_register_fixed_link(np); + if (ret < 0) { + dev_err(&ag->pdev->dev, + "Failed to register fixed PHY link: %d\n", ret); + return ret; + } + + phy_node = of_node_get(np); + } else { + phy_node = of_parse_phandle(np, "phy-handle", 0); + } + + if (!phy_node) { + dev_err(&ag->pdev->dev, + "Could not find valid phy node\n"); + return -ENODEV; + } + + ag->phy_dev = of_phy_connect(ag->dev, phy_node, ag71xx_phy_link_adjust, + 0, ag->phy_if_mode); + + of_node_put(phy_node); + + if (!ag->phy_dev) { + dev_err(&ag->pdev->dev, + "Could not connect to PHY device\n"); + return -ENODEV; + } + + dev_info(&ag->pdev->dev, "connected to PHY at %s [uid=%08x, driver=%s]\n", + phydev_name(ag->phy_dev), + ag->phy_dev->phy_id, ag->phy_dev->drv->name); + + return 0; +} + +void ag71xx_phy_disconnect(struct ag71xx *ag) +{ + phy_disconnect(ag->phy_dev); +} |