From 53ab9865c2b91bc6a239b2adee800dc52875b6bc Mon Sep 17 00:00:00 2001 From: David Bauer Date: Sun, 8 Dec 2019 21:44:23 +0100 Subject: ath79: add support for kernel 5.4 Signed-off-by: David Bauer [refreshed] Signed-off-by: Koen Vandeputte * Sync the patches with the changes done for kernel 4.19 * Use KERNEL_TESTING_PATCHVER * Refresh the configuration * Fix multiple compile bugs in the patches * Only add own ag71xx files for kernel 4.19 and use upstream version for 5.4. Signed-off-by: Hauke Mehrtens --- .../drivers/net/ethernet/atheros/ag71xx/Kconfig | 25 + .../drivers/net/ethernet/atheros/ag71xx/Makefile | 13 + .../drivers/net/ethernet/atheros/ag71xx/ag71xx.h | 454 +++++ .../net/ethernet/atheros/ag71xx/ag71xx_debugfs.c | 285 ++++ .../net/ethernet/atheros/ag71xx/ag71xx_ethtool.c | 108 ++ .../net/ethernet/atheros/ag71xx/ag71xx_gmac.c | 135 ++ .../net/ethernet/atheros/ag71xx/ag71xx_main.c | 1736 ++++++++++++++++++++ .../net/ethernet/atheros/ag71xx/ag71xx_mdio.c | 254 +++ .../net/ethernet/atheros/ag71xx/ag71xx_phy.c | 92 ++ 9 files changed, 3102 insertions(+) create mode 100644 target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/Kconfig create mode 100644 target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/Makefile create mode 100644 target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx.h create mode 100644 target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_debugfs.c create mode 100644 target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_ethtool.c create mode 100644 target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_gmac.c create mode 100644 target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c create mode 100644 target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_mdio.c create mode 100644 target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_phy.c (limited to 'target/linux/ath79/files-4.19/drivers') diff --git a/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/Kconfig b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/Kconfig new file mode 100644 index 0000000000..4df2d21e34 --- /dev/null +++ b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/Kconfig @@ -0,0 +1,25 @@ +config AG71XX + tristate "Atheros AR7XXX/AR9XXX built-in ethernet mac support" + depends on ATH79 + select PHYLIB + help + If you wish to compile a kernel for AR7XXX/91XXX and enable + ethernet support, then you should always answer Y to this. + +if AG71XX + +config AG71XX_DEBUG + bool "Atheros AR71xx built-in ethernet driver debugging" + default n + help + Atheros AR71xx built-in ethernet driver debugging messages. + +config AG71XX_DEBUG_FS + bool "Atheros AR71xx built-in ethernet driver debugfs support" + depends on DEBUG_FS + default n + help + Say Y, if you need access to various statistics provided by + the ag71xx driver. + +endif diff --git a/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/Makefile b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/Makefile new file mode 100644 index 0000000000..87add0d208 --- /dev/null +++ b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/Makefile @@ -0,0 +1,13 @@ +# +# Makefile for the Atheros AR71xx built-in ethernet macs +# + +ag71xx-y += ag71xx_main.o +ag71xx-y += ag71xx_gmac.o +ag71xx-y += ag71xx_ethtool.o +ag71xx-y += ag71xx_phy.o + +ag71xx-$(CONFIG_AG71XX_DEBUG_FS) += ag71xx_debugfs.o + +obj-$(CONFIG_AG71XX) += ag71xx_mdio.o +obj-$(CONFIG_AG71XX) += ag71xx.o diff --git a/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx.h b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx.h new file mode 100644 index 0000000000..fde9db3745 --- /dev/null +++ b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx.h @@ -0,0 +1,454 @@ +/* + * Atheros AR71xx built-in ethernet mac driver + * + * Copyright (C) 2008-2010 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * + * Based on Atheros' AG7100 driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#ifndef __AG71XX_H +#define __AG71XX_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#define AG71XX_DRV_NAME "ag71xx" + +/* + * For our NAPI weight bigger does *NOT* mean better - it means more + * D-cache misses and lots more wasted cycles than we'll ever + * possibly gain from saving instructions. + */ +#define AG71XX_NAPI_WEIGHT 32 +#define AG71XX_OOM_REFILL (1 + HZ/10) + +#define AG71XX_INT_ERR (AG71XX_INT_RX_BE | AG71XX_INT_TX_BE) +#define AG71XX_INT_TX (AG71XX_INT_TX_PS) +#define AG71XX_INT_RX (AG71XX_INT_RX_PR | AG71XX_INT_RX_OF) + +#define AG71XX_INT_POLL (AG71XX_INT_RX | AG71XX_INT_TX) +#define AG71XX_INT_INIT (AG71XX_INT_ERR | AG71XX_INT_POLL) + +#define AG71XX_TX_MTU_LEN 1540 + +#define AG71XX_TX_RING_SPLIT 512 +#define AG71XX_TX_RING_DS_PER_PKT DIV_ROUND_UP(AG71XX_TX_MTU_LEN, \ + AG71XX_TX_RING_SPLIT) +#define AG71XX_TX_RING_SIZE_DEFAULT 128 +#define AG71XX_RX_RING_SIZE_DEFAULT 256 + +#define AG71XX_TX_RING_SIZE_MAX 128 +#define AG71XX_RX_RING_SIZE_MAX 256 + +#ifdef CONFIG_AG71XX_DEBUG +#define DBG(fmt, args...) pr_debug(fmt, ## args) +#else +#define DBG(fmt, args...) do {} while (0) +#endif + +#define ag71xx_assert(_cond) \ +do { \ + if (_cond) \ + break; \ + printk("%s,%d: assertion failed\n", __FILE__, __LINE__); \ + BUG(); \ +} while (0) + +struct ag71xx_desc { + u32 data; + u32 ctrl; +#define DESC_EMPTY BIT(31) +#define DESC_MORE BIT(24) +#define DESC_PKTLEN_M 0xfff + u32 next; + u32 pad; +} __attribute__((aligned(4))); + +#define AG71XX_DESC_SIZE roundup(sizeof(struct ag71xx_desc), \ + L1_CACHE_BYTES) + +struct ag71xx_buf { + union { + struct sk_buff *skb; + void *rx_buf; + }; + union { + dma_addr_t dma_addr; + unsigned int len; + }; +}; + +struct ag71xx_ring { + struct ag71xx_buf *buf; + u8 *descs_cpu; + dma_addr_t descs_dma; + u16 desc_split; + u16 order; + unsigned int curr; + unsigned int dirty; +}; + +struct ag71xx_int_stats { + unsigned long rx_pr; + unsigned long rx_be; + unsigned long rx_of; + unsigned long tx_ps; + unsigned long tx_be; + unsigned long tx_ur; + unsigned long total; +}; + +struct ag71xx_napi_stats { + unsigned long napi_calls; + unsigned long rx_count; + unsigned long rx_packets; + unsigned long rx_packets_max; + unsigned long tx_count; + unsigned long tx_packets; + unsigned long tx_packets_max; + + unsigned long rx[AG71XX_NAPI_WEIGHT + 1]; + unsigned long tx[AG71XX_NAPI_WEIGHT + 1]; +}; + +struct ag71xx_debug { + struct dentry *debugfs_dir; + + struct ag71xx_int_stats int_stats; + struct ag71xx_napi_stats napi_stats; +}; + +struct ag71xx { + /* + * Critical data related to the per-packet data path are clustered + * early in this structure to help improve the D-cache footprint. + */ + struct ag71xx_ring rx_ring ____cacheline_aligned; + struct ag71xx_ring tx_ring ____cacheline_aligned; + + int mac_idx; + + u16 desc_pktlen_mask; + u16 rx_buf_size; + u8 rx_buf_offset; + u8 tx_hang_workaround:1; + + struct net_device *dev; + struct platform_device *pdev; + spinlock_t lock; + struct napi_struct napi; + u32 msg_enable; + + /* + * From this point onwards we're not looking at per-packet fields. + */ + void __iomem *mac_base; + void __iomem *mii_base; + + struct ag71xx_desc *stop_desc; + dma_addr_t stop_desc_dma; + + struct phy_device *phy_dev; + void *phy_priv; + int phy_if_mode; + + unsigned int link; + unsigned int speed; + int duplex; + + struct delayed_work restart_work; + struct timer_list oom_timer; + + struct reset_control *mac_reset; + struct reset_control *mdio_reset; + + u32 fifodata[3]; + u32 plldata[3]; + u32 pllreg[3]; + struct regmap *pllregmap; + +#ifdef CONFIG_AG71XX_DEBUG_FS + struct ag71xx_debug debug; +#endif +}; + +struct ag71xx_mdio { + struct reset_control *mdio_reset; + struct mii_bus *mii_bus; + struct regmap *mii_regmap; +}; + +extern struct ethtool_ops ag71xx_ethtool_ops; +void ag71xx_link_adjust(struct ag71xx *ag); + +int ag71xx_phy_connect(struct ag71xx *ag); +void ag71xx_phy_disconnect(struct ag71xx *ag); + +static inline int ag71xx_desc_empty(struct ag71xx_desc *desc) +{ + return (desc->ctrl & DESC_EMPTY) != 0; +} + +static inline struct ag71xx_desc * +ag71xx_ring_desc(struct ag71xx_ring *ring, int idx) +{ + return (struct ag71xx_desc *) &ring->descs_cpu[idx * AG71XX_DESC_SIZE]; +} + +static inline int +ag71xx_ring_size_order(int size) +{ + return fls(size - 1); +} + +/* Register offsets */ +#define AG71XX_REG_MAC_CFG1 0x0000 +#define AG71XX_REG_MAC_CFG2 0x0004 +#define AG71XX_REG_MAC_IPG 0x0008 +#define AG71XX_REG_MAC_HDX 0x000c +#define AG71XX_REG_MAC_MFL 0x0010 +#define AG71XX_REG_MII_CFG 0x0020 +#define AG71XX_REG_MII_CMD 0x0024 +#define AG71XX_REG_MII_ADDR 0x0028 +#define AG71XX_REG_MII_CTRL 0x002c +#define AG71XX_REG_MII_STATUS 0x0030 +#define AG71XX_REG_MII_IND 0x0034 +#define AG71XX_REG_MAC_IFCTL 0x0038 +#define AG71XX_REG_MAC_ADDR1 0x0040 +#define AG71XX_REG_MAC_ADDR2 0x0044 +#define AG71XX_REG_FIFO_CFG0 0x0048 +#define AG71XX_REG_FIFO_CFG1 0x004c +#define AG71XX_REG_FIFO_CFG2 0x0050 +#define AG71XX_REG_FIFO_CFG3 0x0054 +#define AG71XX_REG_FIFO_CFG4 0x0058 +#define AG71XX_REG_FIFO_CFG5 0x005c +#define AG71XX_REG_FIFO_RAM0 0x0060 +#define AG71XX_REG_FIFO_RAM1 0x0064 +#define AG71XX_REG_FIFO_RAM2 0x0068 +#define AG71XX_REG_FIFO_RAM3 0x006c +#define AG71XX_REG_FIFO_RAM4 0x0070 +#define AG71XX_REG_FIFO_RAM5 0x0074 +#define AG71XX_REG_FIFO_RAM6 0x0078 +#define AG71XX_REG_FIFO_RAM7 0x007c + +#define AG71XX_REG_TX_CTRL 0x0180 +#define AG71XX_REG_TX_DESC 0x0184 +#define AG71XX_REG_TX_STATUS 0x0188 +#define AG71XX_REG_RX_CTRL 0x018c +#define AG71XX_REG_RX_DESC 0x0190 +#define AG71XX_REG_RX_STATUS 0x0194 +#define AG71XX_REG_INT_ENABLE 0x0198 +#define AG71XX_REG_INT_STATUS 0x019c + +#define AG71XX_REG_FIFO_DEPTH 0x01a8 +#define AG71XX_REG_RX_SM 0x01b0 +#define AG71XX_REG_TX_SM 0x01b4 + +#define MAC_CFG1_TXE BIT(0) /* Tx Enable */ +#define MAC_CFG1_STX BIT(1) /* Synchronize Tx Enable */ +#define MAC_CFG1_RXE BIT(2) /* Rx Enable */ +#define MAC_CFG1_SRX BIT(3) /* Synchronize Rx Enable */ +#define MAC_CFG1_TFC BIT(4) /* Tx Flow Control Enable */ +#define MAC_CFG1_RFC BIT(5) /* Rx Flow Control Enable */ +#define MAC_CFG1_LB BIT(8) /* Loopback mode */ +#define MAC_CFG1_SR BIT(31) /* Soft Reset */ + +#define MAC_CFG2_FDX BIT(0) +#define MAC_CFG2_CRC_EN BIT(1) +#define MAC_CFG2_PAD_CRC_EN BIT(2) +#define MAC_CFG2_LEN_CHECK BIT(4) +#define MAC_CFG2_HUGE_FRAME_EN BIT(5) +#define MAC_CFG2_IF_1000 BIT(9) +#define MAC_CFG2_IF_10_100 BIT(8) + +#define FIFO_CFG0_WTM BIT(0) /* Watermark Module */ +#define FIFO_CFG0_RXS BIT(1) /* Rx System Module */ +#define FIFO_CFG0_RXF BIT(2) /* Rx Fabric Module */ +#define FIFO_CFG0_TXS BIT(3) /* Tx System Module */ +#define FIFO_CFG0_TXF BIT(4) /* Tx Fabric Module */ +#define FIFO_CFG0_ALL (FIFO_CFG0_WTM | FIFO_CFG0_RXS | FIFO_CFG0_RXF \ + | FIFO_CFG0_TXS | FIFO_CFG0_TXF) + +#define FIFO_CFG0_ENABLE_SHIFT 8 + +#define FIFO_CFG4_DE BIT(0) /* Drop Event */ +#define FIFO_CFG4_DV BIT(1) /* RX_DV Event */ +#define FIFO_CFG4_FC BIT(2) /* False Carrier */ +#define FIFO_CFG4_CE BIT(3) /* Code Error */ +#define FIFO_CFG4_CR BIT(4) /* CRC error */ +#define FIFO_CFG4_LM BIT(5) /* Length Mismatch */ +#define FIFO_CFG4_LO BIT(6) /* Length out of range */ +#define FIFO_CFG4_OK BIT(7) /* Packet is OK */ +#define FIFO_CFG4_MC BIT(8) /* Multicast Packet */ +#define FIFO_CFG4_BC BIT(9) /* Broadcast Packet */ +#define FIFO_CFG4_DR BIT(10) /* Dribble */ +#define FIFO_CFG4_LE BIT(11) /* Long Event */ +#define FIFO_CFG4_CF BIT(12) /* Control Frame */ +#define FIFO_CFG4_PF BIT(13) /* Pause Frame */ +#define FIFO_CFG4_UO BIT(14) /* Unsupported Opcode */ +#define FIFO_CFG4_VT BIT(15) /* VLAN tag detected */ +#define FIFO_CFG4_FT BIT(16) /* Frame Truncated */ +#define FIFO_CFG4_UC BIT(17) /* Unicast Packet */ + +#define FIFO_CFG5_DE BIT(0) /* Drop Event */ +#define FIFO_CFG5_DV BIT(1) /* RX_DV Event */ +#define FIFO_CFG5_FC BIT(2) /* False Carrier */ +#define FIFO_CFG5_CE BIT(3) /* Code Error */ +#define FIFO_CFG5_LM BIT(4) /* Length Mismatch */ +#define FIFO_CFG5_LO BIT(5) /* Length Out of Range */ +#define FIFO_CFG5_OK BIT(6) /* Packet is OK */ +#define FIFO_CFG5_MC BIT(7) /* Multicast Packet */ +#define FIFO_CFG5_BC BIT(8) /* Broadcast Packet */ +#define FIFO_CFG5_DR BIT(9) /* Dribble */ +#define FIFO_CFG5_CF BIT(10) /* Control Frame */ +#define FIFO_CFG5_PF BIT(11) /* Pause Frame */ +#define FIFO_CFG5_UO BIT(12) /* Unsupported Opcode */ +#define FIFO_CFG5_VT BIT(13) /* VLAN tag detected */ +#define FIFO_CFG5_LE BIT(14) /* Long Event */ +#define FIFO_CFG5_FT BIT(15) /* Frame Truncated */ +#define FIFO_CFG5_16 BIT(16) /* unknown */ +#define FIFO_CFG5_17 BIT(17) /* unknown */ +#define FIFO_CFG5_SF BIT(18) /* Short Frame */ +#define FIFO_CFG5_BM BIT(19) /* Byte Mode */ + +#define AG71XX_INT_TX_PS BIT(0) +#define AG71XX_INT_TX_UR BIT(1) +#define AG71XX_INT_TX_BE BIT(3) +#define AG71XX_INT_RX_PR BIT(4) +#define AG71XX_INT_RX_OF BIT(6) +#define AG71XX_INT_RX_BE BIT(7) + +#define MAC_IFCTL_SPEED BIT(16) + +#define MII_CFG_CLK_DIV_4 0 +#define MII_CFG_CLK_DIV_6 2 +#define MII_CFG_CLK_DIV_8 3 +#define MII_CFG_CLK_DIV_10 4 +#define MII_CFG_CLK_DIV_14 5 +#define MII_CFG_CLK_DIV_20 6 +#define MII_CFG_CLK_DIV_28 7 +#define MII_CFG_CLK_DIV_34 8 +#define MII_CFG_CLK_DIV_42 9 +#define MII_CFG_CLK_DIV_50 10 +#define MII_CFG_CLK_DIV_58 11 +#define MII_CFG_CLK_DIV_66 12 +#define MII_CFG_CLK_DIV_74 13 +#define MII_CFG_CLK_DIV_82 14 +#define MII_CFG_CLK_DIV_98 15 +#define MII_CFG_RESET BIT(31) + +#define MII_CMD_WRITE 0x0 +#define MII_CMD_READ 0x1 +#define MII_ADDR_SHIFT 8 +#define MII_IND_BUSY BIT(0) +#define MII_IND_INVALID BIT(2) + +#define TX_CTRL_TXE BIT(0) /* Tx Enable */ + +#define TX_STATUS_PS BIT(0) /* Packet Sent */ +#define TX_STATUS_UR BIT(1) /* Tx Underrun */ +#define TX_STATUS_BE BIT(3) /* Bus Error */ + +#define RX_CTRL_RXE BIT(0) /* Rx Enable */ + +#define RX_STATUS_PR BIT(0) /* Packet Received */ +#define RX_STATUS_OF BIT(2) /* Rx Overflow */ +#define RX_STATUS_BE BIT(3) /* Bus Error */ + +static inline void ag71xx_wr(struct ag71xx *ag, unsigned reg, u32 value) +{ + __raw_writel(value, ag->mac_base + reg); + /* flush write */ + (void) __raw_readl(ag->mac_base + reg); +} + +static inline u32 ag71xx_rr(struct ag71xx *ag, unsigned reg) +{ + return __raw_readl(ag->mac_base + reg); +} + +static inline void ag71xx_sb(struct ag71xx *ag, unsigned reg, u32 mask) +{ + void __iomem *r; + + r = ag->mac_base + reg; + __raw_writel(__raw_readl(r) | mask, r); + /* flush write */ + (void) __raw_readl(r); +} + +static inline void ag71xx_cb(struct ag71xx *ag, unsigned reg, u32 mask) +{ + void __iomem *r; + + r = ag->mac_base + reg; + __raw_writel(__raw_readl(r) & ~mask, r); + /* flush write */ + (void) __raw_readl(r); +} + +static inline void ag71xx_int_enable(struct ag71xx *ag, u32 ints) +{ + ag71xx_sb(ag, AG71XX_REG_INT_ENABLE, ints); +} + +static inline void ag71xx_int_disable(struct ag71xx *ag, u32 ints) +{ + ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints); +} + +#ifdef CONFIG_AG71XX_DEBUG_FS +int ag71xx_debugfs_root_init(void); +void ag71xx_debugfs_root_exit(void); +int ag71xx_debugfs_init(struct ag71xx *ag); +void ag71xx_debugfs_exit(struct ag71xx *ag); +void ag71xx_debugfs_update_int_stats(struct ag71xx *ag, u32 status); +void ag71xx_debugfs_update_napi_stats(struct ag71xx *ag, int rx, int tx); +#else +static inline int ag71xx_debugfs_root_init(void) { return 0; } +static inline void ag71xx_debugfs_root_exit(void) {} +static inline int ag71xx_debugfs_init(struct ag71xx *ag) { return 0; } +static inline void ag71xx_debugfs_exit(struct ag71xx *ag) {} +static inline void ag71xx_debugfs_update_int_stats(struct ag71xx *ag, + u32 status) {} +static inline void ag71xx_debugfs_update_napi_stats(struct ag71xx *ag, + int rx, int tx) {} +#endif /* CONFIG_AG71XX_DEBUG_FS */ + +int ag71xx_ar7240_init(struct ag71xx *ag, struct device_node *np); +void ag71xx_ar7240_cleanup(struct ag71xx *ag); + +int ag71xx_setup_gmac(struct device_node *np); + +int ar7240sw_phy_read(struct mii_bus *mii, int addr, int reg); +int ar7240sw_phy_write(struct mii_bus *mii, int addr, int reg, u16 val); + +#endif /* _AG71XX_H */ diff --git a/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_debugfs.c b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_debugfs.c new file mode 100644 index 0000000000..20cf1c15c8 --- /dev/null +++ b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_debugfs.c @@ -0,0 +1,285 @@ +/* + * Atheros AR71xx built-in ethernet mac driver + * + * Copyright (C) 2008-2010 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * + * Based on Atheros' AG7100 driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include + +#include "ag71xx.h" + +static struct dentry *ag71xx_debugfs_root; + +static int ag71xx_debugfs_generic_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +void ag71xx_debugfs_update_int_stats(struct ag71xx *ag, u32 status) +{ + if (status) + ag->debug.int_stats.total++; + if (status & AG71XX_INT_TX_PS) + ag->debug.int_stats.tx_ps++; + if (status & AG71XX_INT_TX_UR) + ag->debug.int_stats.tx_ur++; + if (status & AG71XX_INT_TX_BE) + ag->debug.int_stats.tx_be++; + if (status & AG71XX_INT_RX_PR) + ag->debug.int_stats.rx_pr++; + if (status & AG71XX_INT_RX_OF) + ag->debug.int_stats.rx_of++; + if (status & AG71XX_INT_RX_BE) + ag->debug.int_stats.rx_be++; +} + +static ssize_t read_file_int_stats(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ +#define PR_INT_STAT(_label, _field) \ + len += snprintf(buf + len, sizeof(buf) - len, \ + "%20s: %10lu\n", _label, ag->debug.int_stats._field); + + struct ag71xx *ag = file->private_data; + char buf[256]; + unsigned int len = 0; + + PR_INT_STAT("TX Packet Sent", tx_ps); + PR_INT_STAT("TX Underrun", tx_ur); + PR_INT_STAT("TX Bus Error", tx_be); + PR_INT_STAT("RX Packet Received", rx_pr); + PR_INT_STAT("RX Overflow", rx_of); + PR_INT_STAT("RX Bus Error", rx_be); + len += snprintf(buf + len, sizeof(buf) - len, "\n"); + PR_INT_STAT("Total", total); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +#undef PR_INT_STAT +} + +static const struct file_operations ag71xx_fops_int_stats = { + .open = ag71xx_debugfs_generic_open, + .read = read_file_int_stats, + .owner = THIS_MODULE +}; + +void ag71xx_debugfs_update_napi_stats(struct ag71xx *ag, int rx, int tx) +{ + struct ag71xx_napi_stats *stats = &ag->debug.napi_stats; + + if (rx) { + stats->rx_count++; + stats->rx_packets += rx; + if (rx <= AG71XX_NAPI_WEIGHT) + stats->rx[rx]++; + if (rx > stats->rx_packets_max) + stats->rx_packets_max = rx; + } + + if (tx) { + stats->tx_count++; + stats->tx_packets += tx; + if (tx <= AG71XX_NAPI_WEIGHT) + stats->tx[tx]++; + if (tx > stats->tx_packets_max) + stats->tx_packets_max = tx; + } +} + +static ssize_t read_file_napi_stats(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ag71xx *ag = file->private_data; + struct ag71xx_napi_stats *stats = &ag->debug.napi_stats; + char *buf; + unsigned int buflen; + unsigned int len = 0; + unsigned long rx_avg = 0; + unsigned long tx_avg = 0; + int ret; + int i; + + buflen = 2048; + buf = kmalloc(buflen, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (stats->rx_count) + rx_avg = stats->rx_packets / stats->rx_count; + + if (stats->tx_count) + tx_avg = stats->tx_packets / stats->tx_count; + + len += snprintf(buf + len, buflen - len, "%3s %10s %10s\n", + "len", "rx", "tx"); + + for (i = 1; i <= AG71XX_NAPI_WEIGHT; i++) + len += snprintf(buf + len, buflen - len, + "%3d: %10lu %10lu\n", + i, stats->rx[i], stats->tx[i]); + + len += snprintf(buf + len, buflen - len, "\n"); + + len += snprintf(buf + len, buflen - len, "%3s: %10lu %10lu\n", + "sum", stats->rx_count, stats->tx_count); + len += snprintf(buf + len, buflen - len, "%3s: %10lu %10lu\n", + "avg", rx_avg, tx_avg); + len += snprintf(buf + len, buflen - len, "%3s: %10lu %10lu\n", + "max", stats->rx_packets_max, stats->tx_packets_max); + len += snprintf(buf + len, buflen - len, "%3s: %10lu %10lu\n", + "pkt", stats->rx_packets, stats->tx_packets); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return ret; +} + +static const struct file_operations ag71xx_fops_napi_stats = { + .open = ag71xx_debugfs_generic_open, + .read = read_file_napi_stats, + .owner = THIS_MODULE +}; + +#define DESC_PRINT_LEN 64 + +static ssize_t read_file_ring(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos, + struct ag71xx *ag, + struct ag71xx_ring *ring, + unsigned desc_reg) +{ + int ring_size = BIT(ring->order); + int ring_mask = ring_size - 1; + char *buf; + unsigned int buflen; + unsigned int len = 0; + unsigned long flags; + ssize_t ret; + int curr; + int dirty; + u32 desc_hw; + int i; + + buflen = (ring_size * DESC_PRINT_LEN); + buf = kmalloc(buflen, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len += snprintf(buf + len, buflen - len, + "Idx ... %-8s %-8s %-8s %-8s .\n", + "desc", "next", "data", "ctrl"); + + spin_lock_irqsave(&ag->lock, flags); + + curr = (ring->curr & ring_mask); + dirty = (ring->dirty & ring_mask); + desc_hw = ag71xx_rr(ag, desc_reg); + for (i = 0; i < ring_size; i++) { + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); + u32 desc_dma = ((u32) ring->descs_dma) + i * AG71XX_DESC_SIZE; + + len += snprintf(buf + len, buflen - len, + "%3d %c%c%c %08x %08x %08x %08x %c\n", + i, + (i == curr) ? 'C' : ' ', + (i == dirty) ? 'D' : ' ', + (desc_hw == desc_dma) ? 'H' : ' ', + desc_dma, + desc->next, + desc->data, + desc->ctrl, + (desc->ctrl & DESC_EMPTY) ? 'E' : '*'); + } + + spin_unlock_irqrestore(&ag->lock, flags); + + ret = simple_read_from_buffer(user_buf, count, ppos, buf, len); + kfree(buf); + + return ret; +} + +static ssize_t read_file_tx_ring(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ag71xx *ag = file->private_data; + + return read_file_ring(file, user_buf, count, ppos, ag, &ag->tx_ring, + AG71XX_REG_TX_DESC); +} + +static const struct file_operations ag71xx_fops_tx_ring = { + .open = ag71xx_debugfs_generic_open, + .read = read_file_tx_ring, + .owner = THIS_MODULE +}; + +static ssize_t read_file_rx_ring(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ag71xx *ag = file->private_data; + + return read_file_ring(file, user_buf, count, ppos, ag, &ag->rx_ring, + AG71XX_REG_RX_DESC); +} + +static const struct file_operations ag71xx_fops_rx_ring = { + .open = ag71xx_debugfs_generic_open, + .read = read_file_rx_ring, + .owner = THIS_MODULE +}; + +void ag71xx_debugfs_exit(struct ag71xx *ag) +{ + debugfs_remove_recursive(ag->debug.debugfs_dir); +} + +int ag71xx_debugfs_init(struct ag71xx *ag) +{ + struct device *dev = &ag->pdev->dev; + + ag->debug.debugfs_dir = debugfs_create_dir(dev_name(dev), + ag71xx_debugfs_root); + if (!ag->debug.debugfs_dir) { + dev_err(dev, "unable to create debugfs directory\n"); + return -ENOENT; + } + + debugfs_create_file("int_stats", S_IRUGO, ag->debug.debugfs_dir, + ag, &ag71xx_fops_int_stats); + debugfs_create_file("napi_stats", S_IRUGO, ag->debug.debugfs_dir, + ag, &ag71xx_fops_napi_stats); + debugfs_create_file("tx_ring", S_IRUGO, ag->debug.debugfs_dir, + ag, &ag71xx_fops_tx_ring); + debugfs_create_file("rx_ring", S_IRUGO, ag->debug.debugfs_dir, + ag, &ag71xx_fops_rx_ring); + + return 0; +} + +int ag71xx_debugfs_root_init(void) +{ + if (ag71xx_debugfs_root) + return -EBUSY; + + ag71xx_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); + if (!ag71xx_debugfs_root) + return -ENOENT; + + return 0; +} + +void ag71xx_debugfs_root_exit(void) +{ + debugfs_remove(ag71xx_debugfs_root); + ag71xx_debugfs_root = NULL; +} diff --git a/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_ethtool.c b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_ethtool.c new file mode 100644 index 0000000000..2cd7b1be83 --- /dev/null +++ b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_ethtool.c @@ -0,0 +1,108 @@ +/* + * Atheros AR71xx built-in ethernet mac driver + * + * Copyright (C) 2008-2010 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * + * Based on Atheros' AG7100 driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include "ag71xx.h" + +static u32 ag71xx_ethtool_get_msglevel(struct net_device *dev) +{ + struct ag71xx *ag = netdev_priv(dev); + + return ag->msg_enable; +} + +static void ag71xx_ethtool_set_msglevel(struct net_device *dev, u32 msg_level) +{ + struct ag71xx *ag = netdev_priv(dev); + + ag->msg_enable = msg_level; +} + +static void ag71xx_ethtool_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *er) +{ + struct ag71xx *ag = netdev_priv(dev); + + er->tx_max_pending = AG71XX_TX_RING_SIZE_MAX; + er->rx_max_pending = AG71XX_RX_RING_SIZE_MAX; + er->rx_mini_max_pending = 0; + er->rx_jumbo_max_pending = 0; + + er->tx_pending = BIT(ag->tx_ring.order); + er->rx_pending = BIT(ag->rx_ring.order); + er->rx_mini_pending = 0; + er->rx_jumbo_pending = 0; + + if (ag->tx_ring.desc_split) + er->tx_pending /= AG71XX_TX_RING_DS_PER_PKT; +} + +static int ag71xx_ethtool_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *er) +{ + struct ag71xx *ag = netdev_priv(dev); + unsigned tx_size; + unsigned rx_size; + int err = 0; + + if (er->rx_mini_pending != 0|| + er->rx_jumbo_pending != 0 || + er->rx_pending == 0 || + er->tx_pending == 0) + return -EINVAL; + + tx_size = er->tx_pending < AG71XX_TX_RING_SIZE_MAX ? + er->tx_pending : AG71XX_TX_RING_SIZE_MAX; + + rx_size = er->rx_pending < AG71XX_RX_RING_SIZE_MAX ? + er->rx_pending : AG71XX_RX_RING_SIZE_MAX; + + if (netif_running(dev)) { + err = dev->netdev_ops->ndo_stop(dev); + if (err) + return err; + } + + if (ag->tx_ring.desc_split) + tx_size *= AG71XX_TX_RING_DS_PER_PKT; + + ag->tx_ring.order = ag71xx_ring_size_order(tx_size); + ag->rx_ring.order = ag71xx_ring_size_order(rx_size); + + if (netif_running(dev)) + err = dev->netdev_ops->ndo_open(dev); + + return err; +} + +static int ag71xx_ethtool_nway_reset(struct net_device *dev) +{ + struct ag71xx *ag = netdev_priv(dev); + struct phy_device *phydev = ag->phy_dev; + + if (!phydev) + return -ENODEV; + + return genphy_restart_aneg(phydev); +} + +struct ethtool_ops ag71xx_ethtool_ops = { + .get_msglevel = ag71xx_ethtool_get_msglevel, + .set_msglevel = ag71xx_ethtool_set_msglevel, + .get_ringparam = ag71xx_ethtool_get_ringparam, + .set_ringparam = ag71xx_ethtool_set_ringparam, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_link = ethtool_op_get_link, + .get_ts_info = ethtool_op_get_ts_info, + .nway_reset = ag71xx_ethtool_nway_reset, +}; diff --git a/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_gmac.c b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_gmac.c new file mode 100644 index 0000000000..cc0a15d3a4 --- /dev/null +++ b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_gmac.c @@ -0,0 +1,135 @@ +/* + * Atheros AR71xx built-in ethernet mac driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include +#include +#include "ag71xx.h" + +static void ag71xx_of_set(struct device_node *np, const char *prop, + u32 *reg, u32 shift, u32 mask) +{ + u32 val; + + if (of_property_read_u32(np, prop, &val)) + return; + + *reg &= ~(mask << shift); + *reg |= ((val & mask) << shift); +} + +static void ag71xx_of_bit(struct device_node *np, const char *prop, + u32 *reg, u32 mask) +{ + u32 val; + + if (of_property_read_u32(np, prop, &val)) + return; + + if (val) + *reg |= mask; + else + *reg &= ~mask; +} + +static void ag71xx_setup_gmac_933x(struct device_node *np, void __iomem *base) +{ + u32 val = __raw_readl(base + AR933X_GMAC_REG_ETH_CFG); + + ag71xx_of_bit(np, "switch-phy-swap", &val, AR933X_ETH_CFG_SW_PHY_SWAP); + ag71xx_of_bit(np, "switch-phy-addr-swap", &val, + AR933X_ETH_CFG_SW_PHY_ADDR_SWAP); + + __raw_writel(val, base + AR933X_GMAC_REG_ETH_CFG); +} + +static void ag71xx_setup_gmac_934x(struct device_node *np, void __iomem *base) +{ + u32 val = __raw_readl(base + AR934X_GMAC_REG_ETH_CFG); + + ag71xx_of_bit(np, "rgmii-gmac0", &val, AR934X_ETH_CFG_RGMII_GMAC0); + ag71xx_of_bit(np, "mii-gmac0", &val, AR934X_ETH_CFG_MII_GMAC0); + ag71xx_of_bit(np, "mii-gmac0-slave", &val, AR934X_ETH_CFG_MII_GMAC0_SLAVE); + ag71xx_of_bit(np, "gmii-gmac0", &val, AR934X_ETH_CFG_GMII_GMAC0); + ag71xx_of_bit(np, "switch-phy-swap", &val, AR934X_ETH_CFG_SW_PHY_SWAP); + ag71xx_of_bit(np, "switch-only-mode", &val, + AR934X_ETH_CFG_SW_ONLY_MODE); + ag71xx_of_set(np, "rxdv-delay", &val, + AR934X_ETH_CFG_RDV_DELAY_SHIFT, 0x3); + ag71xx_of_set(np, "rxd-delay", &val, + AR934X_ETH_CFG_RXD_DELAY_SHIFT, 0x3); + ag71xx_of_set(np, "txd-delay", &val, + AR934X_ETH_CFG_TXD_DELAY_SHIFT, 0x3); + ag71xx_of_set(np, "txen-delay", &val, + AR934X_ETH_CFG_TXE_DELAY_SHIFT, 0x3); + + __raw_writel(val, base + AR934X_GMAC_REG_ETH_CFG); +} + +static void ag71xx_setup_gmac_955x(struct device_node *np, void __iomem *base) +{ + u32 val = __raw_readl(base + QCA955X_GMAC_REG_ETH_CFG); + + ag71xx_of_bit(np, "rgmii-enabled", &val, QCA955X_ETH_CFG_RGMII_EN); + ag71xx_of_bit(np, "ge0-sgmii", &val, QCA955X_ETH_CFG_GE0_SGMII); + ag71xx_of_set(np, "txen-delay", &val, QCA955X_ETH_CFG_TXE_DELAY_SHIFT, 0x3); + ag71xx_of_set(np, "txd-delay", &val, QCA955X_ETH_CFG_TXD_DELAY_SHIFT, 0x3); + ag71xx_of_set(np, "rxdv-delay", &val, QCA955X_ETH_CFG_RDV_DELAY_SHIFT, 0x3); + ag71xx_of_set(np, "rxd-delay", &val, QCA955X_ETH_CFG_RXD_DELAY_SHIFT, 0x3); + + __raw_writel(val, base + QCA955X_GMAC_REG_ETH_CFG); +} + +static void ag71xx_setup_gmac_956x(struct device_node *np, void __iomem *base) +{ + u32 val = __raw_readl(base + QCA956X_GMAC_REG_ETH_CFG); + + ag71xx_of_bit(np, "switch-phy-swap", &val, QCA956X_ETH_CFG_SW_PHY_SWAP); + ag71xx_of_bit(np, "switch-phy-addr-swap", &val, + QCA956X_ETH_CFG_SW_PHY_ADDR_SWAP); + + __raw_writel(val, base + QCA956X_GMAC_REG_ETH_CFG); +} + +int ag71xx_setup_gmac(struct device_node *np) +{ + struct device_node *np_dev; + void __iomem *base; + int err = 0; + + np = of_get_child_by_name(np, "gmac-config"); + if (!np) + return 0; + + np_dev = of_parse_phandle(np, "device", 0); + if (!np_dev) + goto out; + + base = of_iomap(np_dev, 0); + if (!base) { + pr_err("%pOF: can't map GMAC registers\n", np_dev); + err = -ENOMEM; + goto err_iomap; + } + + if (of_device_is_compatible(np_dev, "qca,ar9330-gmac")) + ag71xx_setup_gmac_933x(np, base); + else if (of_device_is_compatible(np_dev, "qca,ar9340-gmac")) + ag71xx_setup_gmac_934x(np, base); + else if (of_device_is_compatible(np_dev, "qca,qca9550-gmac")) + ag71xx_setup_gmac_955x(np, base); + else if (of_device_is_compatible(np_dev, "qca,qca9560-gmac")) + ag71xx_setup_gmac_956x(np, base); + + iounmap(base); + +err_iomap: + of_node_put(np_dev); +out: + of_node_put(np); + return err; +} diff --git a/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c new file mode 100644 index 0000000000..2394ccc90d --- /dev/null +++ b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c @@ -0,0 +1,1736 @@ +/* + * Atheros AR71xx built-in ethernet mac driver + * + * Copyright (C) 2008-2010 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * + * Based on Atheros' AG7100 driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include "ag71xx.h" + +#define AG71XX_DEFAULT_MSG_ENABLE \ + (NETIF_MSG_DRV \ + | NETIF_MSG_PROBE \ + | NETIF_MSG_LINK \ + | NETIF_MSG_TIMER \ + | NETIF_MSG_IFDOWN \ + | NETIF_MSG_IFUP \ + | NETIF_MSG_RX_ERR \ + | NETIF_MSG_TX_ERR) + +static int ag71xx_msg_level = -1; + +module_param_named(msg_level, ag71xx_msg_level, int, 0); +MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); + +#define ETH_SWITCH_HEADER_LEN 2 + +static int ag71xx_tx_packets(struct ag71xx *ag, bool flush); + +static inline unsigned int ag71xx_max_frame_len(unsigned int mtu) +{ + return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN; +} + +static void ag71xx_dump_dma_regs(struct ag71xx *ag) +{ + DBG("%s: dma_tx_ctrl=%08x, dma_tx_desc=%08x, dma_tx_status=%08x\n", + ag->dev->name, + ag71xx_rr(ag, AG71XX_REG_TX_CTRL), + ag71xx_rr(ag, AG71XX_REG_TX_DESC), + ag71xx_rr(ag, AG71XX_REG_TX_STATUS)); + + DBG("%s: dma_rx_ctrl=%08x, dma_rx_desc=%08x, dma_rx_status=%08x\n", + ag->dev->name, + ag71xx_rr(ag, AG71XX_REG_RX_CTRL), + ag71xx_rr(ag, AG71XX_REG_RX_DESC), + ag71xx_rr(ag, AG71XX_REG_RX_STATUS)); +} + +static void ag71xx_dump_regs(struct ag71xx *ag) +{ + DBG("%s: mac_cfg1=%08x, mac_cfg2=%08x, ipg=%08x, hdx=%08x, mfl=%08x\n", + ag->dev->name, + ag71xx_rr(ag, AG71XX_REG_MAC_CFG1), + ag71xx_rr(ag, AG71XX_REG_MAC_CFG2), + ag71xx_rr(ag, AG71XX_REG_MAC_IPG), + ag71xx_rr(ag, AG71XX_REG_MAC_HDX), + ag71xx_rr(ag, AG71XX_REG_MAC_MFL)); + DBG("%s: mac_ifctl=%08x, mac_addr1=%08x, mac_addr2=%08x\n", + ag->dev->name, + ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL), + ag71xx_rr(ag, AG71XX_REG_MAC_ADDR1), + ag71xx_rr(ag, AG71XX_REG_MAC_ADDR2)); + DBG("%s: fifo_cfg0=%08x, fifo_cfg1=%08x, fifo_cfg2=%08x\n", + ag->dev->name, + ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0), + ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1), + ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2)); + DBG("%s: fifo_cfg3=%08x, fifo_cfg4=%08x, fifo_cfg5=%08x\n", + ag->dev->name, + ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3), + ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4), + ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5)); +} + +static inline void ag71xx_dump_intr(struct ag71xx *ag, char *label, u32 intr) +{ + DBG("%s: %s intr=%08x %s%s%s%s%s%s\n", + ag->dev->name, label, intr, + (intr & AG71XX_INT_TX_PS) ? "TXPS " : "", + (intr & AG71XX_INT_TX_UR) ? "TXUR " : "", + (intr & AG71XX_INT_TX_BE) ? "TXBE " : "", + (intr & AG71XX_INT_RX_PR) ? "RXPR " : "", + (intr & AG71XX_INT_RX_OF) ? "RXOF " : "", + (intr & AG71XX_INT_RX_BE) ? "RXBE " : ""); +} + +static void ag71xx_ring_tx_clean(struct ag71xx *ag) +{ + struct ag71xx_ring *ring = &ag->tx_ring; + struct net_device *dev = ag->dev; + int ring_mask = BIT(ring->order) - 1; + u32 bytes_compl = 0, pkts_compl = 0; + + while (ring->curr != ring->dirty) { + struct ag71xx_desc *desc; + u32 i = ring->dirty & ring_mask; + + desc = ag71xx_ring_desc(ring, i); + if (!ag71xx_desc_empty(desc)) { + desc->ctrl = 0; + dev->stats.tx_errors++; + } + + if (ring->buf[i].skb) { + bytes_compl += ring->buf[i].len; + pkts_compl++; + dev_kfree_skb_any(ring->buf[i].skb); + } + ring->buf[i].skb = NULL; + ring->dirty++; + } + + /* flush descriptors */ + wmb(); + + netdev_completed_queue(dev, pkts_compl, bytes_compl); +} + +static void ag71xx_ring_tx_init(struct ag71xx *ag) +{ + struct ag71xx_ring *ring = &ag->tx_ring; + int ring_size = BIT(ring->order); + int ring_mask = BIT(ring->order) - 1; + int i; + + for (i = 0; i < ring_size; i++) { + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); + + desc->next = (u32) (ring->descs_dma + + AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); + + desc->ctrl = DESC_EMPTY; + ring->buf[i].skb = NULL; + } + + /* flush descriptors */ + wmb(); + + ring->curr = 0; + ring->dirty = 0; + netdev_reset_queue(ag->dev); +} + +static void ag71xx_ring_rx_clean(struct ag71xx *ag) +{ + struct ag71xx_ring *ring = &ag->rx_ring; + int ring_size = BIT(ring->order); + int i; + + if (!ring->buf) + return; + + for (i = 0; i < ring_size; i++) + if (ring->buf[i].rx_buf) { + dma_unmap_single(&ag->pdev->dev, ring->buf[i].dma_addr, + ag->rx_buf_size, DMA_FROM_DEVICE); + skb_free_frag(ring->buf[i].rx_buf); + } +} + +static int ag71xx_buffer_size(struct ag71xx *ag) +{ + return ag->rx_buf_size + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf, + int offset, + void *(*alloc)(unsigned int size)) +{ + struct ag71xx_ring *ring = &ag->rx_ring; + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]); + void *data; + + data = alloc(ag71xx_buffer_size(ag)); + if (!data) + return false; + + buf->rx_buf = data; + buf->dma_addr = dma_map_single(&ag->pdev->dev, data, ag->rx_buf_size, + DMA_FROM_DEVICE); + desc->data = (u32) buf->dma_addr + offset; + return true; +} + +static int ag71xx_ring_rx_init(struct ag71xx *ag) +{ + struct ag71xx_ring *ring = &ag->rx_ring; + int ring_size = BIT(ring->order); + int ring_mask = BIT(ring->order) - 1; + unsigned int i; + int ret; + + ret = 0; + for (i = 0; i < ring_size; i++) { + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); + + desc->next = (u32) (ring->descs_dma + + AG71XX_DESC_SIZE * ((i + 1) & ring_mask)); + + DBG("ag71xx: RX desc at %p, next is %08x\n", + desc, desc->next); + } + + for (i = 0; i < ring_size; i++) { + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); + + if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], ag->rx_buf_offset, + netdev_alloc_frag)) { + ret = -ENOMEM; + break; + } + + desc->ctrl = DESC_EMPTY; + } + + /* flush descriptors */ + wmb(); + + ring->curr = 0; + ring->dirty = 0; + + return ret; +} + +static int ag71xx_ring_rx_refill(struct ag71xx *ag) +{ + struct ag71xx_ring *ring = &ag->rx_ring; + int ring_mask = BIT(ring->order) - 1; + unsigned int count; + int offset = ag->rx_buf_offset; + + count = 0; + for (; ring->curr - ring->dirty > 0; ring->dirty++) { + struct ag71xx_desc *desc; + unsigned int i; + + i = ring->dirty & ring_mask; + desc = ag71xx_ring_desc(ring, i); + + if (!ring->buf[i].rx_buf && + !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset, + napi_alloc_frag)) + break; + + desc->ctrl = DESC_EMPTY; + count++; + } + + /* flush descriptors */ + wmb(); + + DBG("%s: %u rx descriptors refilled\n", ag->dev->name, count); + + return count; +} + +static int ag71xx_rings_init(struct ag71xx *ag) +{ + struct ag71xx_ring *tx = &ag->tx_ring; + struct ag71xx_ring *rx = &ag->rx_ring; + int ring_size = BIT(tx->order) + BIT(rx->order); + int tx_size = BIT(tx->order); + + tx->buf = kzalloc(ring_size * sizeof(*tx->buf), GFP_KERNEL); + if (!tx->buf) + return -ENOMEM; + + tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE, + &tx->descs_dma, GFP_KERNEL); + if (!tx->descs_cpu) { + kfree(tx->buf); + tx->buf = NULL; + return -ENOMEM; + } + + rx->buf = &tx->buf[tx_size]; + rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE; + rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE; + + ag71xx_ring_tx_init(ag); + return ag71xx_ring_rx_init(ag); +} + +static void ag71xx_rings_free(struct ag71xx *ag) +{ + struct ag71xx_ring *tx = &ag->tx_ring; + struct ag71xx_ring *rx = &ag->rx_ring; + int ring_size = BIT(tx->order) + BIT(rx->order); + + if (tx->descs_cpu) + dma_free_coherent(&ag->pdev->dev, ring_size * AG71XX_DESC_SIZE, + tx->descs_cpu, tx->descs_dma); + + kfree(tx->buf); + + tx->descs_cpu = NULL; + rx->descs_cpu = NULL; + tx->buf = NULL; + rx->buf = NULL; +} + +static void ag71xx_rings_cleanup(struct ag71xx *ag) +{ + ag71xx_ring_rx_clean(ag); + ag71xx_ring_tx_clean(ag); + ag71xx_rings_free(ag); + + netdev_reset_queue(ag->dev); +} + +static unsigned char *ag71xx_speed_str(struct ag71xx *ag) +{ + switch (ag->speed) { + case SPEED_1000: + return "1000"; + case SPEED_100: + return "100"; + case SPEED_10: + return "10"; + } + + return "?"; +} + +static void ag71xx_hw_set_macaddr(struct ag71xx *ag, unsigned char *mac) +{ + u32 t; + + t = (((u32) mac[5]) << 24) | (((u32) mac[4]) << 16) + | (((u32) mac[3]) << 8) | ((u32) mac[2]); + + ag71xx_wr(ag, AG71XX_REG_MAC_ADDR1, t); + + t = (((u32) mac[1]) << 24) | (((u32) mac[0]) << 16); + ag71xx_wr(ag, AG71XX_REG_MAC_ADDR2, t); +} + +static void ag71xx_dma_reset(struct ag71xx *ag) +{ + u32 val; + int i; + + ag71xx_dump_dma_regs(ag); + + /* stop RX and TX */ + ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); + ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); + + /* + * give the hardware some time to really stop all rx/tx activity + * clearing the descriptors too early causes random memory corruption + */ + mdelay(1); + + /* clear descriptor addresses */ + ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->stop_desc_dma); + ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->stop_desc_dma); + + /* clear pending RX/TX interrupts */ + for (i = 0; i < 256; i++) { + ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); + ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); + } + + /* clear pending errors */ + ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE | RX_STATUS_OF); + ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE | TX_STATUS_UR); + + val = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); + if (val) + pr_alert("%s: unable to clear DMA Rx status: %08x\n", + ag->dev->name, val); + + val = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); + + /* mask out reserved bits */ + val &= ~0xff000000; + + if (val) + pr_alert("%s: unable to clear DMA Tx status: %08x\n", + ag->dev->name, val); + + ag71xx_dump_dma_regs(ag); +} + +#define MAC_CFG1_INIT (MAC_CFG1_RXE | MAC_CFG1_TXE | \ + MAC_CFG1_SRX | MAC_CFG1_STX) + +#define FIFO_CFG0_INIT (FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT) + +#define FIFO_CFG4_INIT (FIFO_CFG4_DE | FIFO_CFG4_DV | FIFO_CFG4_FC | \ + FIFO_CFG4_CE | FIFO_CFG4_CR | FIFO_CFG4_LM | \ + FIFO_CFG4_LO | FIFO_CFG4_OK | FIFO_CFG4_MC | \ + FIFO_CFG4_BC | FIFO_CFG4_DR | FIFO_CFG4_LE | \ + FIFO_CFG4_CF | FIFO_CFG4_PF | FIFO_CFG4_UO | \ + FIFO_CFG4_VT) + +#define FIFO_CFG5_INIT (FIFO_CFG5_DE | FIFO_CFG5_DV | FIFO_CFG5_FC | \ + FIFO_CFG5_CE | FIFO_CFG5_LO | FIFO_CFG5_OK | \ + FIFO_CFG5_MC | FIFO_CFG5_BC | FIFO_CFG5_DR | \ + FIFO_CFG5_CF | FIFO_CFG5_PF | FIFO_CFG5_VT | \ + FIFO_CFG5_LE | FIFO_CFG5_FT | FIFO_CFG5_16 | \ + FIFO_CFG5_17 | FIFO_CFG5_SF) + +static void ag71xx_hw_stop(struct ag71xx *ag) +{ + /* disable all interrupts and stop the rx/tx engine */ + ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, 0); + ag71xx_wr(ag, AG71XX_REG_RX_CTRL, 0); + ag71xx_wr(ag, AG71XX_REG_TX_CTRL, 0); +} + +static void ag71xx_hw_setup(struct ag71xx *ag) +{ + struct device_node *np = ag->pdev->dev.of_node; + u32 init = MAC_CFG1_INIT; + + /* setup MAC configuration registers */ + if (of_property_read_bool(np, "flow-control")) + init |= MAC_CFG1_TFC | MAC_CFG1_RFC; + ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init); + + ag71xx_sb(ag, AG71XX_REG_MAC_CFG2, + MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK); + + /* setup max frame length to zero */ + ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 0); + + /* setup FIFO configuration registers */ + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT); + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, ag->fifodata[0]); + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, ag->fifodata[1]); + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT); + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT); +} + +static void ag71xx_hw_init(struct ag71xx *ag) +{ + ag71xx_hw_stop(ag); + + ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR); + udelay(20); + + reset_control_assert(ag->mac_reset); + if (ag->mdio_reset) + reset_control_assert(ag->mdio_reset); + msleep(100); + reset_control_deassert(ag->mac_reset); + if (ag->mdio_reset) + reset_control_deassert(ag->mdio_reset); + msleep(200); + + ag71xx_hw_setup(ag); + + ag71xx_dma_reset(ag); +} + +static void ag71xx_fast_reset(struct ag71xx *ag) +{ + struct net_device *dev = ag->dev; + u32 rx_ds; + u32 mii_reg; + + ag71xx_hw_stop(ag); + wmb(); + + mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG); + rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC); + + ag71xx_tx_packets(ag, true); + + reset_control_assert(ag->mac_reset); + udelay(10); + reset_control_deassert(ag->mac_reset); + udelay(10); + + ag71xx_dma_reset(ag); + ag71xx_hw_setup(ag); + ag->tx_ring.curr = 0; + ag->tx_ring.dirty = 0; + netdev_reset_queue(ag->dev); + + /* setup max frame length */ + ag71xx_wr(ag, AG71XX_REG_MAC_MFL, + ag71xx_max_frame_len(ag->dev->mtu)); + + ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds); + ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); + ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg); + + ag71xx_hw_set_macaddr(ag, dev->dev_addr); +} + +static void ag71xx_hw_start(struct ag71xx *ag) +{ + /* start RX engine */ + ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); + + /* enable interrupts */ + ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT); + + netif_wake_queue(ag->dev); +} + +static void ath79_set_pllval(struct ag71xx *ag) +{ + u32 pll_reg = ag->pllreg[1]; + u32 pll_val; + + if (!ag->pllregmap) + return; + + switch (ag->speed) { + case SPEED_10: + pll_val = ag->plldata[2]; + break; + case SPEED_100: + pll_val = ag->plldata[1]; + break; + case SPEED_1000: + pll_val = ag->plldata[0]; + break; + default: + BUG(); + } + + if (pll_val) + regmap_write(ag->pllregmap, pll_reg, pll_val); +} + +static void ath79_set_pll(struct ag71xx *ag) +{ + u32 pll_cfg = ag->pllreg[0]; + u32 pll_shift = ag->pllreg[2]; + + if (!ag->pllregmap) + return; + + regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 2 << pll_shift); + udelay(100); + + ath79_set_pllval(ag); + + regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 3 << pll_shift); + udelay(100); + + regmap_update_bits(ag->pllregmap, pll_cfg, 3 << pll_shift, 0); + udelay(100); +} + +static void ag71xx_bit_set(void __iomem *reg, u32 bit) +{ + u32 val; + + val = __raw_readl(reg) | bit; + __raw_writel(val, reg); + __raw_readl(reg); +} + +static void ag71xx_bit_clear(void __iomem *reg, u32 bit) +{ + u32 val; + + val = __raw_readl(reg) & ~bit; + __raw_writel(val, reg); + __raw_readl(reg); +} + +static void ag71xx_sgmii_init_qca955x(struct device_node *np) +{ + struct device_node *np_dev; + void __iomem *gmac_base; + u32 mr_an_status; + u32 sgmii_status; + u8 tries = 0; + int err = 0; + + np = of_get_child_by_name(np, "gmac-config"); + if (!np) + return; + + np_dev = of_parse_phandle(np, "device", 0); + if (!np_dev) + goto out; + + gmac_base = of_iomap(np_dev, 0); + if (!gmac_base) { + pr_err("%pOF: can't map GMAC registers\n", np_dev); + err = -ENOMEM; + goto err_iomap; + } + + mr_an_status = __raw_readl(gmac_base + QCA955X_GMAC_REG_MR_AN_STATUS); + if (!(mr_an_status & QCA955X_MR_AN_STATUS_AN_ABILITY)) + goto sgmii_out; + + /* SGMII reset sequence */ + __raw_writel(QCA955X_SGMII_RESET_RX_CLK_N_RESET, + gmac_base + QCA955X_GMAC_REG_SGMII_RESET); + __raw_readl(gmac_base + QCA955X_GMAC_REG_SGMII_RESET); + udelay(10); + + ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET, + QCA955X_SGMII_RESET_HW_RX_125M_N); + udelay(10); + + ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET, + QCA955X_SGMII_RESET_RX_125M_N); + udelay(10); + + ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET, + QCA955X_SGMII_RESET_TX_125M_N); + udelay(10); + + ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET, + QCA955X_SGMII_RESET_RX_CLK_N); + udelay(10); + + ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET, + QCA955X_SGMII_RESET_TX_CLK_N); + udelay(10); + + /* + * The following is what QCA has to say about what happens here: + * + * Across resets SGMII link status goes to weird state. + * If SGMII_DEBUG register reads other than 0x1f or 0x10, + * we are for sure in a bad state. + * + * Issue a PHY reset in MR_AN_CONTROL to keep going. + */ + do { + ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_MR_AN_CONTROL, + QCA955X_MR_AN_CONTROL_PHY_RESET | + QCA955X_MR_AN_CONTROL_AN_ENABLE); + udelay(200); + ag71xx_bit_clear(gmac_base + QCA955X_GMAC_REG_MR_AN_CONTROL, + QCA955X_MR_AN_CONTROL_PHY_RESET); + mdelay(300); + sgmii_status = __raw_readl(gmac_base + QCA955X_GMAC_REG_SGMII_DEBUG) & + QCA955X_SGMII_DEBUG_TX_STATE_MASK; + + if (tries++ >= 20) { + pr_err("ag71xx: max retries for SGMII fixup exceeded\n"); + break; + } + } while (!(sgmii_status == 0xf || sgmii_status == 0x10)); + +sgmii_out: + iounmap(gmac_base); +err_iomap: + of_node_put(np_dev); +out: + of_node_put(np); +} + +static void ath79_mii_ctrl_set_if(struct ag71xx *ag, unsigned int mii_if) +{ + u32 t; + + t = __raw_readl(ag->mii_base); + t &= ~(AR71XX_MII_CTRL_IF_MASK); + t |= (mii_if & AR71XX_MII_CTRL_IF_MASK); + __raw_writel(t, ag->mii_base); +} + +static void ath79_mii0_ctrl_set_if(struct ag71xx *ag) +{ + unsigned int mii_if; + + switch (ag->phy_if_mode) { + case PHY_INTERFACE_MODE_MII: + mii_if = AR71XX_MII0_CTRL_IF_MII; + break; + case PHY_INTERFACE_MODE_GMII: + mii_if = AR71XX_MII0_CTRL_IF_GMII; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + mii_if = AR71XX_MII0_CTRL_IF_RGMII; + break; + case PHY_INTERFACE_MODE_RMII: + mii_if = AR71XX_MII0_CTRL_IF_RMII; + break; + default: + WARN(1, "Impossible PHY mode defined.\n"); + return; + } + + ath79_mii_ctrl_set_if(ag, mii_if); +} + +static void ath79_mii1_ctrl_set_if(struct ag71xx *ag) +{ + unsigned int mii_if; + + switch (ag->phy_if_mode) { + case PHY_INTERFACE_MODE_RMII: + mii_if = AR71XX_MII1_CTRL_IF_RMII; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + mii_if = AR71XX_MII1_CTRL_IF_RGMII; + break; + default: + WARN(1, "Impossible PHY mode defined.\n"); + return; + } + + ath79_mii_ctrl_set_if(ag, mii_if); +} + +static void ath79_mii_ctrl_set_speed(struct ag71xx *ag) +{ + unsigned int mii_speed; + u32 t; + + if (!ag->mii_base) + return; + + switch (ag->speed) { + case SPEED_10: + mii_speed = AR71XX_MII_CTRL_SPEED_10; + break; + case SPEED_100: + mii_speed = AR71XX_MII_CTRL_SPEED_100; + break; + case SPEED_1000: + mii_speed = AR71XX_MII_CTRL_SPEED_1000; + break; + default: + BUG(); + } + + t = __raw_readl(ag->mii_base); + t &= ~(AR71XX_MII_CTRL_SPEED_MASK << AR71XX_MII_CTRL_SPEED_SHIFT); + t |= mii_speed << AR71XX_MII_CTRL_SPEED_SHIFT; + __raw_writel(t, ag->mii_base); +} + +static void +__ag71xx_link_adjust(struct ag71xx *ag, bool update) +{ + struct device_node *np = ag->pdev->dev.of_node; + u32 cfg2; + u32 ifctl; + u32 fifo5; + + if (!ag->link && update) { + ag71xx_hw_stop(ag); + netif_carrier_off(ag->dev); + if (netif_msg_link(ag)) + pr_info("%s: link down\n", ag->dev->name); + return; + } + + if (!of_device_is_compatible(np, "qca,ar9130-eth") && + !of_device_is_compatible(np, "qca,ar7100-eth")) + ag71xx_fast_reset(ag); + + cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2); + cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX); + cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0; + + ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL); + ifctl &= ~(MAC_IFCTL_SPEED); + + fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5); + fifo5 &= ~FIFO_CFG5_BM; + + switch (ag->speed) { + case SPEED_1000: + cfg2 |= MAC_CFG2_IF_1000; + fifo5 |= FIFO_CFG5_BM; + break; + case SPEED_100: + cfg2 |= MAC_CFG2_IF_10_100; + ifctl |= MAC_IFCTL_SPEED; + break; + case SPEED_10: + cfg2 |= MAC_CFG2_IF_10_100; + break; + default: + BUG(); + return; + } + + if (ag->tx_ring.desc_split) { + ag->fifodata[2] &= 0xffff; + ag->fifodata[2] |= ((2048 - ag->tx_ring.desc_split) / 4) << 16; + } + + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, ag->fifodata[2]); + + if (update) { + if (of_device_is_compatible(np, "qca,ar7100-eth") || + of_device_is_compatible(np, "qca,ar9130-eth")) { + ath79_set_pll(ag); + ath79_mii_ctrl_set_speed(ag); + } else if (of_device_is_compatible(np, "qca,ar7242-eth") || + of_device_is_compatible(np, "qca,ar9340-eth") || + of_device_is_compatible(np, "qca,qca9550-eth") || + of_device_is_compatible(np, "qca,qca9560-eth")) { + ath79_set_pllval(ag); + if (of_property_read_bool(np, "qca955x-sgmii-fixup")) + ag71xx_sgmii_init_qca955x(np); + } + } + + ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2); + ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5); + ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl); + + if (of_device_is_compatible(np, "qca,qca9530-eth") || + of_device_is_compatible(np, "qca,qca9560-eth")) { + /* + * The rx ring buffer can stall on small packets on QCA953x and + * QCA956x. Disabling the inline checksum engine fixes the stall. + * The wr, rr functions cannot be used since this hidden register + * is outside of the normal ag71xx register block. + */ + void __iomem *dam = ioremap_nocache(0xb90001bc, 0x4); + if (dam) { + __raw_writel(__raw_readl(dam) & ~BIT(27), dam); + (void)__raw_readl(dam); + iounmap(dam); + } + } + + ag71xx_hw_start(ag); + + netif_carrier_on(ag->dev); + if (update && netif_msg_link(ag)) + pr_info("%s: link up (%sMbps/%s duplex)\n", + ag->dev->name, + ag71xx_speed_str(ag), + (DUPLEX_FULL == ag->duplex) ? "Full" : "Half"); + + ag71xx_dump_regs(ag); +} + +void ag71xx_link_adjust(struct ag71xx *ag) +{ + __ag71xx_link_adjust(ag, true); +} + +static int ag71xx_hw_enable(struct ag71xx *ag) +{ + int ret; + + ret = ag71xx_rings_init(ag); + if (ret) + return ret; + + napi_enable(&ag->napi); + ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); + ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma); + netif_start_queue(ag->dev); + + return 0; +} + +static void ag71xx_hw_disable(struct ag71xx *ag) +{ + netif_stop_queue(ag->dev); + + ag71xx_hw_stop(ag); + ag71xx_dma_reset(ag); + + napi_disable(&ag->napi); + del_timer_sync(&ag->oom_timer); + + ag71xx_rings_cleanup(ag); +} + +static int ag71xx_open(struct net_device *dev) +{ + struct ag71xx *ag = netdev_priv(dev); + unsigned int max_frame_len; + int ret; + + netif_carrier_off(dev); + max_frame_len = ag71xx_max_frame_len(dev->mtu); + ag->rx_buf_size = SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN); + + /* setup max frame length */ + ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len); + ag71xx_hw_set_macaddr(ag, dev->dev_addr); + + ret = ag71xx_hw_enable(ag); + if (ret) + goto err; + + phy_start(ag->phy_dev); + + return 0; + +err: + ag71xx_rings_cleanup(ag); + return ret; +} + +static int ag71xx_stop(struct net_device *dev) +{ + unsigned long flags; + struct ag71xx *ag = netdev_priv(dev); + + netif_carrier_off(dev); + phy_stop(ag->phy_dev); + + spin_lock_irqsave(&ag->lock, flags); + if (ag->link) { + ag->link = 0; + ag71xx_link_adjust(ag); + } + spin_unlock_irqrestore(&ag->lock, flags); + + ag71xx_hw_disable(ag); + + return 0; +} + +static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len) +{ + int i; + struct ag71xx_desc *desc; + int ring_mask = BIT(ring->order) - 1; + int ndesc = 0; + int split = ring->desc_split; + + if (!split) + split = len; + + while (len > 0) { + unsigned int cur_len = len; + + i = (ring->curr + ndesc) & ring_mask; + desc = ag71xx_ring_desc(ring, i); + + if (!ag71xx_desc_empty(desc)) + return -1; + + if (cur_len > split) { + cur_len = split; + + /* + * TX will hang if DMA transfers <= 4 bytes, + * make sure next segment is more than 4 bytes long. + */ + if (len <= split + 4) + cur_len -= 4; + } + + desc->data = addr; + addr += cur_len; + len -= cur_len; + + if (len > 0) + cur_len |= DESC_MORE; + + /* prevent early tx attempt of this descriptor */ + if (!ndesc) + cur_len |= DESC_EMPTY; + + desc->ctrl = cur_len; + ndesc++; + } + + return ndesc; +} + +static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ag71xx *ag = netdev_priv(dev); + struct ag71xx_ring *ring = &ag->tx_ring; + int ring_mask = BIT(ring->order) - 1; + int ring_size = BIT(ring->order); + struct ag71xx_desc *desc; + dma_addr_t dma_addr; + int i, n, ring_min; + + if (skb->len <= 4) { + DBG("%s: packet len is too small\n", ag->dev->name); + goto err_drop; + } + + dma_addr = dma_map_single(&ag->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + + i = ring->curr & ring_mask; + desc = ag71xx_ring_desc(ring, i); + + /* setup descriptor fields */ + n = ag71xx_fill_dma_desc(ring, (u32) dma_addr, skb->len & ag->desc_pktlen_mask); + if (n < 0) + goto err_drop_unmap; + + i = (ring->curr + n - 1) & ring_mask; + ring->buf[i].len = skb->len; + ring->buf[i].skb = skb; + + netdev_sent_queue(dev, skb->len); + + skb_tx_timestamp(skb); + + desc->ctrl &= ~DESC_EMPTY; + ring->curr += n; + + /* flush descriptor */ + wmb(); + + ring_min = 2; + if (ring->desc_split) + ring_min *= AG71XX_TX_RING_DS_PER_PKT; + + if (ring->curr - ring->dirty >= ring_size - ring_min) { + DBG("%s: tx queue full\n", dev->name); + netif_stop_queue(dev); + } + + DBG("%s: packet injected into TX queue\n", ag->dev->name); + + /* enable TX engine */ + ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE); + + return NETDEV_TX_OK; + +err_drop_unmap: + dma_unmap_single(&ag->pdev->dev, dma_addr, skb->len, DMA_TO_DEVICE); + +err_drop: + dev->stats.tx_dropped++; + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int ag71xx_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct ag71xx *ag = netdev_priv(dev); + + + switch (cmd) { + case SIOCSIFHWADDR: + if (copy_from_user + (dev->dev_addr, ifr->ifr_data, sizeof(dev->dev_addr))) + return -EFAULT; + return 0; + + case SIOCGIFHWADDR: + if (copy_to_user + (ifr->ifr_data, dev->dev_addr, sizeof(dev->dev_addr))) + return -EFAULT; + return 0; + + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + if (ag->phy_dev == NULL) + break; + + return phy_mii_ioctl(ag->phy_dev, ifr, cmd); + + default: + break; + } + + return -EOPNOTSUPP; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) +static void ag71xx_oom_timer_handler(unsigned long data) +{ + struct net_device *dev = (struct net_device *) data; + struct ag71xx *ag = netdev_priv(dev); +#else +static void ag71xx_oom_timer_handler(struct timer_list *t) +{ + struct ag71xx *ag = from_timer(ag, t, oom_timer); +#endif + + napi_schedule(&ag->napi); +} + +static void ag71xx_tx_timeout(struct net_device *dev) +{ + struct ag71xx *ag = netdev_priv(dev); + + if (netif_msg_tx_err(ag)) + pr_info("%s: tx timeout\n", ag->dev->name); + + schedule_delayed_work(&ag->restart_work, 1); +} + +static void ag71xx_restart_work_func(struct work_struct *work) +{ + struct ag71xx *ag = container_of(work, struct ag71xx, restart_work.work); + + rtnl_lock(); + ag71xx_hw_disable(ag); + ag71xx_hw_enable(ag); + if (ag->link) + __ag71xx_link_adjust(ag, false); + rtnl_unlock(); +} + +static bool ag71xx_check_dma_stuck(struct ag71xx *ag) +{ + unsigned long timestamp; + u32 rx_sm, tx_sm, rx_fd; + + timestamp = netdev_get_tx_queue(ag->dev, 0)->trans_start; + if (likely(time_before(jiffies, timestamp + HZ/10))) + return false; + + if (!netif_carrier_ok(ag->dev)) + return false; + + rx_sm = ag71xx_rr(ag, AG71XX_REG_RX_SM); + if ((rx_sm & 0x7) == 0x3 && ((rx_sm >> 4) & 0x7) == 0x6) + return true; + + tx_sm = ag71xx_rr(ag, AG71XX_REG_TX_SM); + rx_fd = ag71xx_rr(ag, AG71XX_REG_FIFO_DEPTH); + if (((tx_sm >> 4) & 0x7) == 0 && ((rx_sm & 0x7) == 0) && + ((rx_sm >> 4) & 0x7) == 0 && rx_fd == 0) + return true; + + return false; +} + +static int ag71xx_tx_packets(struct ag71xx *ag, bool flush) +{ + struct ag71xx_ring *ring = &ag->tx_ring; + bool dma_stuck = false; + int ring_mask = BIT(ring->order) - 1; + int ring_size = BIT(ring->order); + int sent = 0; + int bytes_compl = 0; + int n = 0; + + DBG("%s: processing TX ring\n", ag->dev->name); + + while (ring->dirty + n != ring->curr) { + unsigned int i = (ring->dirty + n) & ring_mask; + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); + struct sk_buff *skb = ring->buf[i].skb; + + if (!flush && !ag71xx_desc_empty(desc)) { + if (ag->tx_hang_workaround && + ag71xx_check_dma_stuck(ag)) { + schedule_delayed_work(&ag->restart_work, HZ / 2); + dma_stuck = true; + } + break; + } + + if (flush) + desc->ctrl |= DESC_EMPTY; + + n++; + if (!skb) + continue; + + dev_kfree_skb_any(skb); + ring->buf[i].skb = NULL; + + bytes_compl += ring->buf[i].len; + + sent++; + ring->dirty += n; + + while (n > 0) { + ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); + n--; + } + } + + DBG("%s: %d packets sent out\n", ag->dev->name, sent); + + if (!sent) + return 0; + + ag->dev->stats.tx_bytes += bytes_compl; + ag->dev->stats.tx_packets += sent; + + netdev_completed_queue(ag->dev, sent, bytes_compl); + if ((ring->curr - ring->dirty) < (ring_size * 3) / 4) + netif_wake_queue(ag->dev); + + if (!dma_stuck) + cancel_delayed_work(&ag->restart_work); + + return sent; +} + +static int ag71xx_rx_packets(struct ag71xx *ag, int limit) +{ + struct net_device *dev = ag->dev; + struct ag71xx_ring *ring = &ag->rx_ring; + unsigned int pktlen_mask = ag->desc_pktlen_mask; + unsigned int offset = ag->rx_buf_offset; + int ring_mask = BIT(ring->order) - 1; + int ring_size = BIT(ring->order); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0)) + struct list_head rx_list; + struct sk_buff *next; +#else + struct sk_buff_head queue; +#endif + struct sk_buff *skb; + int done = 0; + + DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n", + dev->name, limit, ring->curr, ring->dirty); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0)) + INIT_LIST_HEAD(&rx_list); +#else + skb_queue_head_init(&queue); +#endif + + while (done < limit) { + unsigned int i = ring->curr & ring_mask; + struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i); + int pktlen; + int err = 0; + + if (ag71xx_desc_empty(desc)) + break; + + if ((ring->dirty + ring_size) == ring->curr) { + ag71xx_assert(0); + break; + } + + ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); + + pktlen = desc->ctrl & pktlen_mask; + pktlen -= ETH_FCS_LEN; + + dma_unmap_single(&ag->pdev->dev, ring->buf[i].dma_addr, + ag->rx_buf_size, DMA_FROM_DEVICE); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += pktlen; + + skb = build_skb(ring->buf[i].rx_buf, ag71xx_buffer_size(ag)); + if (!skb) { + skb_free_frag(ring->buf[i].rx_buf); + goto next; + } + + skb_reserve(skb, offset); + skb_put(skb, pktlen); + + if (err) { + dev->stats.rx_dropped++; + kfree_skb(skb); + } else { + skb->dev = dev; + skb->ip_summed = CHECKSUM_NONE; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0)) + list_add_tail(&skb->list, &rx_list); +#else + __skb_queue_tail(&queue, skb); +#endif + } + +next: + ring->buf[i].rx_buf = NULL; + done++; + + ring->curr++; + } + + ag71xx_ring_rx_refill(ag); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0)) + list_for_each_entry_safe(skb, next, &rx_list, list) + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb_list(&rx_list); +#else + while ((skb = __skb_dequeue(&queue)) != NULL) { + skb->protocol = eth_type_trans(skb, dev); + netif_receive_skb(skb); + } +#endif + + DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n", + dev->name, ring->curr, ring->dirty, done); + + return done; +} + +static int ag71xx_poll(struct napi_struct *napi, int limit) +{ + struct ag71xx *ag = container_of(napi, struct ag71xx, napi); + struct net_device *dev = ag->dev; + struct ag71xx_ring *rx_ring = &ag->rx_ring; + int rx_ring_size = BIT(rx_ring->order); + unsigned long flags; + u32 status; + int tx_done; + int rx_done; + + tx_done = ag71xx_tx_packets(ag, false); + + DBG("%s: processing RX ring\n", dev->name); + rx_done = ag71xx_rx_packets(ag, limit); + + ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done); + + if (rx_ring->buf[rx_ring->dirty % rx_ring_size].rx_buf == NULL) + goto oom; + + status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); + if (unlikely(status & RX_STATUS_OF)) { + ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF); + dev->stats.rx_fifo_errors++; + + /* restart RX */ + ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); + } + + if (rx_done < limit) { + if (status & RX_STATUS_PR) + goto more; + + status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); + if (status & TX_STATUS_PS) + goto more; + + DBG("%s: disable polling mode, rx=%d, tx=%d,limit=%d\n", + dev->name, rx_done, tx_done, limit); + + napi_complete(napi); + + /* enable interrupts */ + spin_lock_irqsave(&ag->lock, flags); + ag71xx_int_enable(ag, AG71XX_INT_POLL); + spin_unlock_irqrestore(&ag->lock, flags); + return rx_done; + } + +more: + DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n", + dev->name, rx_done, tx_done, limit); + return limit; + +oom: + if (netif_msg_rx_err(ag)) + pr_info("%s: out of memory\n", dev->name); + + mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL); + napi_complete(napi); + return 0; +} + +static irqreturn_t ag71xx_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct ag71xx *ag = netdev_priv(dev); + u32 status; + + status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS); + ag71xx_dump_intr(ag, "raw", status); + + if (unlikely(!status)) + return IRQ_NONE; + + if (unlikely(status & AG71XX_INT_ERR)) { + if (status & AG71XX_INT_TX_BE) { + ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_BE); + dev_err(&dev->dev, "TX BUS error\n"); + } + if (status & AG71XX_INT_RX_BE) { + ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_BE); + dev_err(&dev->dev, "RX BUS error\n"); + } + } + + if (likely(status & AG71XX_INT_POLL)) { + ag71xx_int_disable(ag, AG71XX_INT_POLL); + DBG("%s: enable polling mode\n", dev->name); + napi_schedule(&ag->napi); + } + + ag71xx_debugfs_update_int_stats(ag, status); + + return IRQ_HANDLED; +} + +static int ag71xx_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ag71xx *ag = netdev_priv(dev); + + dev->mtu = new_mtu; + ag71xx_wr(ag, AG71XX_REG_MAC_MFL, + ag71xx_max_frame_len(dev->mtu)); + + return 0; +} + +static const struct net_device_ops ag71xx_netdev_ops = { + .ndo_open = ag71xx_open, + .ndo_stop = ag71xx_stop, + .ndo_start_xmit = ag71xx_hard_start_xmit, + .ndo_do_ioctl = ag71xx_do_ioctl, + .ndo_tx_timeout = ag71xx_tx_timeout, + .ndo_change_mtu = ag71xx_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static int ag71xx_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct net_device *dev; + struct resource *res; + struct ag71xx *ag; + const void *mac_addr; + u32 max_frame_len; + int tx_size, err; + + if (!np) + return -ENODEV; + + dev = devm_alloc_etherdev(&pdev->dev, sizeof(*ag)); + if (!dev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + err = ag71xx_setup_gmac(np); + if (err) + return err; + + SET_NETDEV_DEV(dev, &pdev->dev); + + ag = netdev_priv(dev); + ag->pdev = pdev; + ag->dev = dev; + ag->msg_enable = netif_msg_init(ag71xx_msg_level, + AG71XX_DEFAULT_MSG_ENABLE); + spin_lock_init(&ag->lock); + + ag->mac_reset = devm_reset_control_get_exclusive(&pdev->dev, "mac"); + if (IS_ERR(ag->mac_reset)) { + dev_err(&pdev->dev, "missing mac reset\n"); + return PTR_ERR(ag->mac_reset); + } + + ag->mdio_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "mdio"); + + if (of_property_read_u32_array(np, "fifo-data", ag->fifodata, 3)) { + if (of_device_is_compatible(np, "qca,ar9130-eth") || + of_device_is_compatible(np, "qca,ar7100-eth")) { + ag->fifodata[0] = 0x0fff0000; + ag->fifodata[1] = 0x00001fff; + } else { + ag->fifodata[0] = 0x0010ffff; + ag->fifodata[1] = 0x015500aa; + ag->fifodata[2] = 0x01f00140; + } + if (of_device_is_compatible(np, "qca,ar9130-eth")) + ag->fifodata[2] = 0x00780fff; + else if (of_device_is_compatible(np, "qca,ar7100-eth")) + ag->fifodata[2] = 0x008001ff; + } + + if (of_property_read_u32_array(np, "pll-data", ag->plldata, 3)) + dev_dbg(&pdev->dev, "failed to read pll-data property\n"); + + if (of_property_read_u32_array(np, "pll-reg", ag->pllreg, 3)) + dev_dbg(&pdev->dev, "failed to read pll-reg property\n"); + + ag->pllregmap = syscon_regmap_lookup_by_phandle(np, "pll-handle"); + if (IS_ERR(ag->pllregmap)) { + dev_dbg(&pdev->dev, "failed to read pll-handle property\n"); + ag->pllregmap = NULL; + } + + ag->mac_base = devm_ioremap_nocache(&pdev->dev, res->start, + res->end - res->start + 1); + if (!ag->mac_base) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + ag->mii_base = devm_ioremap_nocache(&pdev->dev, res->start, + res->end - res->start + 1); + if (!ag->mii_base) + return -ENOMEM; + } + + dev->irq = platform_get_irq(pdev, 0); + err = devm_request_irq(&pdev->dev, dev->irq, ag71xx_interrupt, + 0x0, dev_name(&pdev->dev), dev); + if (err) { + dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq); + return err; + } + + dev->netdev_ops = &ag71xx_netdev_ops; + dev->ethtool_ops = &ag71xx_ethtool_ops; + + INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func); + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) + init_timer(&ag->oom_timer); + ag->oom_timer.data = (unsigned long) dev; + ag->oom_timer.function = ag71xx_oom_timer_handler; +#else + timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0); +#endif + + tx_size = AG71XX_TX_RING_SIZE_DEFAULT; + ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT); + + if (of_device_is_compatible(np, "qca,ar9340-eth") || + of_device_is_compatible(np, "qca,qca9530-eth") || + of_device_is_compatible(np, "qca,qca9550-eth") || + of_device_is_compatible(np, "qca,qca9560-eth")) + ag->desc_pktlen_mask = SZ_16K - 1; + else + ag->desc_pktlen_mask = SZ_4K - 1; + + if (ag->desc_pktlen_mask == SZ_16K - 1 && + !of_device_is_compatible(np, "qca,qca9550-eth") && + !of_device_is_compatible(np, "qca,qca9560-eth")) + max_frame_len = ag->desc_pktlen_mask; + else + max_frame_len = 1540; + + dev->min_mtu = 68; + dev->max_mtu = max_frame_len - ag71xx_max_frame_len(0); + + if (of_device_is_compatible(np, "qca,ar7240-eth") || + of_device_is_compatible(np, "qca,ar7241-eth") || + of_device_is_compatible(np, "qca,ar7242-eth") || + of_device_is_compatible(np, "qca,ar9330-eth") || + of_device_is_compatible(np, "qca,ar9340-eth") || + of_device_is_compatible(np, "qca,qca9530-eth") || + of_device_is_compatible(np, "qca,qca9550-eth") || + of_device_is_compatible(np, "qca,qca9560-eth")) + ag->tx_hang_workaround = 1; + + ag->rx_buf_offset = NET_SKB_PAD; + if (!of_device_is_compatible(np, "qca,ar7100-eth") && + !of_device_is_compatible(np, "qca,ar9130-eth")) + ag->rx_buf_offset += NET_IP_ALIGN; + + if (of_device_is_compatible(np, "qca,ar7100-eth")) { + ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT; + tx_size *= AG71XX_TX_RING_DS_PER_PKT; + } + ag->tx_ring.order = ag71xx_ring_size_order(tx_size); + + ag->stop_desc = dmam_alloc_coherent(&pdev->dev, + sizeof(struct ag71xx_desc), + &ag->stop_desc_dma, GFP_KERNEL); + if (!ag->stop_desc) + return -ENOMEM; + + ag->stop_desc->data = 0; + ag->stop_desc->ctrl = 0; + ag->stop_desc->next = (u32) ag->stop_desc_dma; + + mac_addr = of_get_mac_address(np); + if (mac_addr) + memcpy(dev->dev_addr, mac_addr, ETH_ALEN); + if (!mac_addr || !is_valid_ether_addr(dev->dev_addr)) { + dev_err(&pdev->dev, "invalid MAC address, using random address\n"); + eth_random_addr(dev->dev_addr); + } + + ag->phy_if_mode = of_get_phy_mode(np); + if (ag->phy_if_mode < 0) { + dev_err(&pdev->dev, "missing phy-mode property in DT\n"); + return ag->phy_if_mode; + } + + if (of_property_read_u32(np, "qca,mac-idx", &ag->mac_idx)) + ag->mac_idx = -1; + if (ag->mii_base) + switch (ag->mac_idx) { + case 0: + ath79_mii0_ctrl_set_if(ag); + break; + case 1: + ath79_mii1_ctrl_set_if(ag); + break; + default: + break; + } + + netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); + + ag71xx_dump_regs(ag); + + ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, 0); + + ag71xx_hw_init(ag); + + ag71xx_dump_regs(ag); + + /* + * populate current node to register mdio-bus as a subdevice. + * the mdio bus works independently on ar7241 and later chips + * and we need to load mdio1 before gmac0, which can be done + * by adding a "simple-mfd" compatible to gmac node. The + * following code checks OF_POPULATED_BUS flag before populating + * to avoid duplicated population. + */ + if (!of_node_check_flag(np, OF_POPULATED_BUS)) { + err = of_platform_populate(np, NULL, NULL, &pdev->dev); + if (err) + return err; + } + + err = ag71xx_phy_connect(ag); + if (err) + return err; + + err = ag71xx_debugfs_init(ag); + if (err) + goto err_phy_disconnect; + + platform_set_drvdata(pdev, dev); + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "unable to register net device\n"); + platform_set_drvdata(pdev, NULL); + ag71xx_debugfs_exit(ag); + goto err_phy_disconnect; + } + + pr_info("%s: Atheros AG71xx at 0x%08lx, irq %d, mode: %s\n", + dev->name, (unsigned long) ag->mac_base, dev->irq, + phy_modes(ag->phy_if_mode)); + + return 0; + +err_phy_disconnect: + ag71xx_phy_disconnect(ag); + return err; +} + +static int ag71xx_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct ag71xx *ag; + + if (!dev) + return 0; + + ag = netdev_priv(dev); + ag71xx_debugfs_exit(ag); + ag71xx_phy_disconnect(ag); + unregister_netdev(dev); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static const struct of_device_id ag71xx_match[] = { + { .compatible = "qca,ar7100-eth" }, + { .compatible = "qca,ar7240-eth" }, + { .compatible = "qca,ar7241-eth" }, + { .compatible = "qca,ar7242-eth" }, + { .compatible = "qca,ar9130-eth" }, + { .compatible = "qca,ar9330-eth" }, + { .compatible = "qca,ar9340-eth" }, + { .compatible = "qca,qca9530-eth" }, + { .compatible = "qca,qca9550-eth" }, + { .compatible = "qca,qca9560-eth" }, + {} +}; + +static struct platform_driver ag71xx_driver = { + .probe = ag71xx_probe, + .remove = ag71xx_remove, + .driver = { + .name = AG71XX_DRV_NAME, + .of_match_table = ag71xx_match, + } +}; + +static int __init ag71xx_module_init(void) +{ + int ret; + + ret = ag71xx_debugfs_root_init(); + if (ret) + goto err_out; + + ret = platform_driver_register(&ag71xx_driver); + if (ret) + goto err_debugfs_exit; + + return 0; + +err_debugfs_exit: + ag71xx_debugfs_root_exit(); +err_out: + return ret; +} + +static void __exit ag71xx_module_exit(void) +{ + platform_driver_unregister(&ag71xx_driver); + ag71xx_debugfs_root_exit(); +} + +module_init(ag71xx_module_init); +module_exit(ag71xx_module_exit); + +MODULE_AUTHOR("Gabor Juhos "); +MODULE_AUTHOR("Imre Kaloz "); +MODULE_AUTHOR("Felix Fietkau "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" AG71XX_DRV_NAME); diff --git a/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_mdio.c b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_mdio.c new file mode 100644 index 0000000000..a58ee3346b --- /dev/null +++ b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_mdio.c @@ -0,0 +1,254 @@ +/* + * Atheros AR71xx built-in ethernet mac driver + * + * Copyright (C) 2008-2010 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * + * Based on Atheros' AG7100 driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include +#include +#include "ag71xx.h" + +#define AG71XX_MDIO_RETRY 1000 +#define AG71XX_MDIO_DELAY 5 + +static int bus_count; + +static int ag71xx_mdio_wait_busy(struct ag71xx_mdio *am) +{ + int i; + + for (i = 0; i < AG71XX_MDIO_RETRY; i++) { + u32 busy; + + udelay(AG71XX_MDIO_DELAY); + + regmap_read(am->mii_regmap, AG71XX_REG_MII_IND, &busy); + if (!busy) + return 0; + + udelay(AG71XX_MDIO_DELAY); + } + + pr_err("%s: MDIO operation timed out\n", am->mii_bus->name); + + return -ETIMEDOUT; +} + +static int ag71xx_mdio_mii_read(struct mii_bus *bus, int addr, int reg) +{ + struct ag71xx_mdio *am = bus->priv; + int err; + int ret; + + err = ag71xx_mdio_wait_busy(am); + if (err) + return 0xffff; + + regmap_write(am->mii_regmap, AG71XX_REG_MII_CMD, MII_CMD_WRITE); + regmap_write(am->mii_regmap, AG71XX_REG_MII_ADDR, + ((addr & 0xff) << MII_ADDR_SHIFT) | (reg & 0xff)); + regmap_write(am->mii_regmap, AG71XX_REG_MII_CMD, MII_CMD_READ); + + err = ag71xx_mdio_wait_busy(am); + if (err) + return 0xffff; + + regmap_read(am->mii_regmap, AG71XX_REG_MII_STATUS, &ret); + ret &= 0xffff; + regmap_write(am->mii_regmap, AG71XX_REG_MII_CMD, MII_CMD_WRITE); + + DBG("mii_read: addr=%04x, reg=%04x, value=%04x\n", addr, reg, ret); + + return ret; +} + +static int ag71xx_mdio_mii_write(struct mii_bus *bus, int addr, int reg, u16 val) +{ + struct ag71xx_mdio *am = bus->priv; + + DBG("mii_write: addr=%04x, reg=%04x, value=%04x\n", addr, reg, val); + + regmap_write(am->mii_regmap, AG71XX_REG_MII_ADDR, + ((addr & 0xff) << MII_ADDR_SHIFT) | (reg & 0xff)); + regmap_write(am->mii_regmap, AG71XX_REG_MII_CTRL, val); + + ag71xx_mdio_wait_busy(am); + + return 0; +} + +static const u32 ar71xx_mdio_div_table[] = { + 4, 4, 6, 8, 10, 14, 20, 28, +}; + +static const u32 ar7240_mdio_div_table[] = { + 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96, +}; + +static const u32 ar933x_mdio_div_table[] = { + 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98, +}; + +static int ag71xx_mdio_get_divider(struct device_node *np, u32 *div) +{ + struct clk *ref_clk = of_clk_get(np, 0); + unsigned long ref_clock; + u32 mdio_clock; + const u32 *table; + int ndivs, i; + + if (IS_ERR(ref_clk)) + return -EINVAL; + + ref_clock = clk_get_rate(ref_clk); + clk_put(ref_clk); + + if(of_property_read_u32(np, "qca,mdio-max-frequency", &mdio_clock)) { + if (of_property_read_bool(np, "builtin-switch")) + mdio_clock = 5000000; + else + mdio_clock = 2000000; + } + + if (of_device_is_compatible(np, "qca,ar9330-mdio") || + of_device_is_compatible(np, "qca,ar9340-mdio")) { + table = ar933x_mdio_div_table; + ndivs = ARRAY_SIZE(ar933x_mdio_div_table); + } else if (of_device_is_compatible(np, "qca,ar7240-mdio")) { + table = ar7240_mdio_div_table; + ndivs = ARRAY_SIZE(ar7240_mdio_div_table); + } else { + table = ar71xx_mdio_div_table; + ndivs = ARRAY_SIZE(ar71xx_mdio_div_table); + } + + for (i = 0; i < ndivs; i++) { + unsigned long t; + + t = ref_clock / table[i]; + if (t <= mdio_clock) { + *div = i; + return 0; + } + } + + return -ENOENT; +} + +static int ag71xx_mdio_reset(struct mii_bus *bus) +{ + struct device_node *np = bus->dev.of_node; + struct ag71xx_mdio *am = bus->priv; + bool builtin_switch; + u32 t; + + builtin_switch = of_property_read_bool(np, "builtin-switch"); + + if (ag71xx_mdio_get_divider(np, &t)) { + if (of_device_is_compatible(np, "qca,ar9340-mdio")) + t = MII_CFG_CLK_DIV_58; + else if (builtin_switch) + t = MII_CFG_CLK_DIV_10; + else + t = MII_CFG_CLK_DIV_28; + } + + regmap_write(am->mii_regmap, AG71XX_REG_MII_CFG, t | MII_CFG_RESET); + udelay(100); + + regmap_write(am->mii_regmap, AG71XX_REG_MII_CFG, t); + udelay(100); + + return 0; +} + +static int ag71xx_mdio_probe(struct platform_device *pdev) +{ + struct device *amdev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; + struct ag71xx_mdio *am; + struct mii_bus *mii_bus; + bool builtin_switch; + int i, err; + + am = devm_kzalloc(amdev, sizeof(*am), GFP_KERNEL); + if (!am) + return -ENOMEM; + + am->mii_regmap = syscon_regmap_lookup_by_phandle(np, "regmap"); + if (IS_ERR(am->mii_regmap)) + return PTR_ERR(am->mii_regmap); + + mii_bus = devm_mdiobus_alloc(amdev); + if (!mii_bus) + return -ENOMEM; + + am->mdio_reset = devm_reset_control_get_exclusive(amdev, "mdio"); + builtin_switch = of_property_read_bool(np, "builtin-switch"); + + mii_bus->name = "ag71xx_mdio"; + mii_bus->read = ag71xx_mdio_mii_read; + mii_bus->write = ag71xx_mdio_mii_write; + mii_bus->reset = ag71xx_mdio_reset; + mii_bus->priv = am; + mii_bus->parent = amdev; + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s.%d", np->name, bus_count++); + + if (!builtin_switch && + of_property_read_u32(np, "phy-mask", &mii_bus->phy_mask)) + mii_bus->phy_mask = 0; + + for (i = 0; i < PHY_MAX_ADDR; i++) + mii_bus->irq[i] = PHY_POLL; + + if (!IS_ERR(am->mdio_reset)) { + reset_control_assert(am->mdio_reset); + msleep(100); + reset_control_deassert(am->mdio_reset); + msleep(200); + } + + err = of_mdiobus_register(mii_bus, np); + if (err) + return err; + + am->mii_bus = mii_bus; + platform_set_drvdata(pdev, am); + + return 0; +} + +static int ag71xx_mdio_remove(struct platform_device *pdev) +{ + struct ag71xx_mdio *am = platform_get_drvdata(pdev); + + mdiobus_unregister(am->mii_bus); + return 0; +} + +static const struct of_device_id ag71xx_mdio_match[] = { + { .compatible = "qca,ar7240-mdio" }, + { .compatible = "qca,ar9330-mdio" }, + { .compatible = "qca,ar9340-mdio" }, + { .compatible = "qca,ath79-mdio" }, + {} +}; + +static struct platform_driver ag71xx_mdio_driver = { + .probe = ag71xx_mdio_probe, + .remove = ag71xx_mdio_remove, + .driver = { + .name = "ag71xx-mdio", + .of_match_table = ag71xx_mdio_match, + } +}; + +module_platform_driver(ag71xx_mdio_driver); +MODULE_LICENSE("GPL"); diff --git a/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_phy.c b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_phy.c new file mode 100644 index 0000000000..ac1af26860 --- /dev/null +++ b/target/linux/ath79/files-4.19/drivers/net/ethernet/atheros/ag71xx/ag71xx_phy.c @@ -0,0 +1,92 @@ +/* + * Atheros AR71xx built-in ethernet mac driver + * + * Copyright (C) 2008-2010 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * + * Based on Atheros' AG7100 driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#include +#include "ag71xx.h" + +static void ag71xx_phy_link_adjust(struct net_device *dev) +{ + struct ag71xx *ag = netdev_priv(dev); + struct phy_device *phydev = ag->phy_dev; + unsigned long flags; + int status_change = 0; + + spin_lock_irqsave(&ag->lock, flags); + + if (phydev->link) { + if (ag->duplex != phydev->duplex + || ag->speed != phydev->speed) { + status_change = 1; + } + } + + if (phydev->link != ag->link) + status_change = 1; + + ag->link = phydev->link; + ag->duplex = phydev->duplex; + ag->speed = phydev->speed; + + if (status_change) + ag71xx_link_adjust(ag); + + spin_unlock_irqrestore(&ag->lock, flags); +} + +int ag71xx_phy_connect(struct ag71xx *ag) +{ + struct device_node *np = ag->pdev->dev.of_node; + struct device_node *phy_node; + int ret; + + if (of_phy_is_fixed_link(np)) { + ret = of_phy_register_fixed_link(np); + if (ret < 0) { + dev_err(&ag->pdev->dev, + "Failed to register fixed PHY link: %d\n", ret); + return ret; + } + + phy_node = of_node_get(np); + } else { + phy_node = of_parse_phandle(np, "phy-handle", 0); + } + + if (!phy_node) { + dev_err(&ag->pdev->dev, + "Could not find valid phy node\n"); + return -ENODEV; + } + + ag->phy_dev = of_phy_connect(ag->dev, phy_node, ag71xx_phy_link_adjust, + 0, ag->phy_if_mode); + + of_node_put(phy_node); + + if (!ag->phy_dev) { + dev_err(&ag->pdev->dev, + "Could not connect to PHY device. Deferring probe.\n"); + return -EPROBE_DEFER; + } + + dev_info(&ag->pdev->dev, "connected to PHY at %s [uid=%08x, driver=%s]\n", + phydev_name(ag->phy_dev), + ag->phy_dev->phy_id, ag->phy_dev->drv->name); + + return 0; +} + +void ag71xx_phy_disconnect(struct ag71xx *ag) +{ + phy_disconnect(ag->phy_dev); +} -- cgit v1.2.3