aboutsummaryrefslogtreecommitdiffstats
path: root/backends/verilog/verilog_backend.cc
Commit message (Expand)AuthorAgeFilesLines
* Bugfix in partial mem write handling in verilog back-endClifford Wolf2016-08-201-42/+26
* Added missing support for mem read enable ports to verilog back-endClifford Wolf2016-08-181-6/+14
* Fixed upto handling in verilog back-endClifford Wolf2016-08-151-0/+3
* Added "write_verilog -defparam"Clifford Wolf2016-07-301-2/+21
* Added "write_verilog -nodec -nostr"Clifford Wolf2016-07-301-4/+27
* Added "yosys -D" featureClifford Wolf2016-04-211-1/+1
* Bugfix in write_verilog for RTLIL processesClifford Wolf2016-03-141-9/+20
* Bugfixes in writing of memories as VerilogClifford Wolf2015-09-251-7/+8
* Another block of spelling fixesLarry Doolittle2015-08-141-2/+2
* Re-created command-reference-manual.tex, copied some doc fixes to online helpClifford Wolf2015-08-141-3/+3
* Spell check (by Larry Doolittle)Clifford Wolf2015-08-141-1/+1
* Fixed trailing whitespacesClifford Wolf2015-07-021-4/+4
* $mem cell in verilog backend : grouped writes by clockluke whittlesey2015-06-081-54/+108
* Bug fix in $mem verilog backend + changed tests/bram flow of make test.luke whittlesey2015-06-041-14/+16
* Some fixes for $mem in verilog back-endClifford Wolf2015-05-201-19/+23
* Merge pull request #63 from wluker/verilog-backend-memClifford Wolf2015-05-111-1/+2
|\
| * Fixed bug in $mem cell verilog code generation.luke whittlesey2015-05-111-11/+12
* | Disabled broken $mem support in verilog backendClifford Wolf2015-05-101-11/+11
|/
* Made changes recommended by Clifford Wolf ...luke whittlesey2015-05-101-22/+11
* Verilog backend for $mem cells should now be able to handle differentluke whittlesey2015-05-081-50/+105
* Added support for $mem cells in the verilog backend.luke whittlesey2015-05-071-1/+120
* Minor fixes in handling of "init" attributeClifford Wolf2015-04-091-7/+7
* Added "init" attribute support to verilog backendClifford Wolf2015-04-041-0/+5
* Added Verilog backend $dffsr supportClifford Wolf2015-03-181-1/+51
* Fixed "write_verilog -attr2comment" handling of "*/" in stringsClifford Wolf2015-02-131-2/+4
* Added dict/pool.sort()Clifford Wolf2015-01-241-0/+2
* Cosmetic changes in verilog output formatClifford Wolf2015-01-021-5/+10
* Replaced std::unordered_map as implementation for Yosys::dictClifford Wolf2014-12-261-25/+25
* Added Yosys::{dict,nodict,vector} container typesClifford Wolf2014-12-261-1/+1
* Added $dffe support to write_verilogClifford Wolf2014-12-201-3/+14
* Fixed generation of temp names in verilog backendClifford Wolf2014-11-071-4/+5
* Renamed SIZE() to GetSize() because of name collision on Win32Clifford Wolf2014-10-101-1/+1
* namespace YosysClifford Wolf2014-09-271-4/+3
* Various bug fixes (related to $macc model testing)Clifford Wolf2014-09-061-1/+2
* Removed $bu0 cell typeClifford Wolf2014-09-041-1/+0
* Using $pos models for $bu0Clifford Wolf2014-09-031-16/+1
* Changed backend-api from FILE to std::ostreamClifford Wolf2014-08-231-230/+230
* Fixed AOI/OAI expr handling in verilog backendClifford Wolf2014-08-161-4/+4
* Added additional gate types: $_NAND_ $_NOR_ $_XNOR_ $_AOI3_ $_OAI3_ $_AOI4_ $...Clifford Wolf2014-08-161-4/+40
* Renamed $_INV_ cell type to $_NOT_Clifford Wolf2014-08-151-1/+1
* Refactoring of CellType classClifford Wolf2014-08-141-10/+28
* Be more conservative with printing decimal numbers in verilog backendClifford Wolf2014-08-021-2/+3
* Improved verilog output for ordinary $mux cellsClifford Wolf2014-08-021-3/+19
* More cleanups related to RTLIL::IdString usageClifford Wolf2014-08-021-6/+6
* Renamed port access function on RTLIL::Cell, added param access functionsClifford Wolf2014-07-311-40/+40
* Added support for "upto" wires to Verilog front- and back-endClifford Wolf2014-07-281-9/+22
* Using log_assert() instead of assert()Clifford Wolf2014-07-281-3/+2
* Refactoring: Renamed RTLIL::Design::modules to modules_Clifford Wolf2014-07-271-1/+1
* Refactoring: Renamed RTLIL::Module::cells to cells_Clifford Wolf2014-07-271-3/+3
* Refactoring: Renamed RTLIL::Module::wires to wires_Clifford Wolf2014-07-271-4/+4
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
From c7b0e371e1c5e2f6258decfeb948e0dda7109afc Mon Sep 17 00:00:00 2001
From: John Crispin <blogic@openwrt.org>
Date: Wed, 24 Oct 2012 19:50:30 +0200
Subject: [PATCH 116/123] NET: MIPS: lantiq: update etop driver for devicetree

---
 drivers/net/ethernet/lantiq_etop.c |  470 +++++++++++++++++++++++++-----------
 1 file changed, 333 insertions(+), 137 deletions(-)

diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 003c5bc..dc5457a 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -12,7 +12,7 @@
  *   along with this program; if not, write to the Free Software
  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
  *
- *   Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ *   Copyright (C) 2011-12 John Crispin <blogic@openwrt.org>
  */
 
 #include <linux/kernel.h>
@@ -36,6 +36,10 @@
 #include <linux/io.h>
 #include <linux/dma-mapping.h>
 #include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/of_net.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
 
 #include <asm/checksum.h>
 
@@ -71,25 +75,56 @@
 #define ETOP_MII_REVERSE	0xe
 #define ETOP_PLEN_UNDER		0x40
 #define ETOP_CGEN		0x800
-
-/* use 2 static channels for TX/RX */
-#define LTQ_ETOP_TX_CHANNEL	1
-#define LTQ_ETOP_RX_CHANNEL	6
-#define IS_TX(x)		(x == LTQ_ETOP_TX_CHANNEL)
-#define IS_RX(x)		(x == LTQ_ETOP_RX_CHANNEL)
-
+#define ETOP_CFG_MII0		0x01
+
+#define LTQ_GBIT_MDIO_CTL	0xCC
+#define LTQ_GBIT_MDIO_DATA	0xd0
+#define LTQ_GBIT_GCTL0		0x68
+#define LTQ_GBIT_PMAC_HD_CTL	0x8c
+#define LTQ_GBIT_P0_CTL		0x4
+#define LTQ_GBIT_PMAC_RX_IPG	0xa8
+
+#define PMAC_HD_CTL_AS		(1 << 19)
+#define PMAC_HD_CTL_RXSH	(1 << 22)
+
+/* Switch Enable (0=disable, 1=enable) */
+#define GCTL0_SE		0x80000000
+/* Disable MDIO auto polling (0=disable, 1=enable) */
+#define PX_CTL_DMDIO		0x00400000
+
+/* register information for the gbit's MDIO bus */
+#define MDIO_XR9_REQUEST	0x00008000
+#define MDIO_XR9_READ		0x00000800
+#define MDIO_XR9_WRITE		0x00000400
+#define MDIO_XR9_REG_MASK	0x1f
+#define MDIO_XR9_ADDR_MASK	0x1f
+#define MDIO_XR9_RD_MASK	0xffff
+#define MDIO_XR9_REG_OFFSET	0
+#define MDIO_XR9_ADDR_OFFSET	5
+#define MDIO_XR9_WR_OFFSET	16
+
+#define LTQ_DMA_ETOP	((of_machine_is_compatible("lantiq,ase")) ? \
+			(INT_NUM_IM3_IRL0) : (INT_NUM_IM2_IRL0))
+
+/* the newer xway socks have a embedded 3/7 port gbit multiplexer */
 #define ltq_etop_r32(x)		ltq_r32(ltq_etop_membase + (x))
 #define ltq_etop_w32(x, y)	ltq_w32(x, ltq_etop_membase + (y))
 #define ltq_etop_w32_mask(x, y, z)	\
 		ltq_w32_mask(x, y, ltq_etop_membase + (z))
 
-#define DRV_VERSION	"1.0"
+#define ltq_gbit_r32(x)		ltq_r32(ltq_gbit_membase + (x))
+#define ltq_gbit_w32(x, y)	ltq_w32(x, ltq_gbit_membase + (y))
+#define ltq_gbit_w32_mask(x, y, z)	\
+		ltq_w32_mask(x, y, ltq_gbit_membase + (z))
+
+#define DRV_VERSION	"1.2"
 
 static void __iomem *ltq_etop_membase;
+static void __iomem *ltq_gbit_membase;
 
 struct ltq_etop_chan {
-	int idx;
 	int tx_free;
+	int irq;
 	struct net_device *netdev;
 	struct napi_struct napi;
 	struct ltq_dma_channel dma;
@@ -99,22 +134,35 @@ struct ltq_etop_chan {
 struct ltq_etop_priv {
 	struct net_device *netdev;
 	struct platform_device *pdev;
-	struct ltq_eth_data *pldata;
 	struct resource *res;
 
 	struct mii_bus *mii_bus;
 	struct phy_device *phydev;
 
-	struct ltq_etop_chan ch[MAX_DMA_CHAN];
-	int tx_free[MAX_DMA_CHAN >> 1];
+	struct ltq_etop_chan txch;
+	struct ltq_etop_chan rxch;
+
+	int tx_irq;
+	int rx_irq;
+
+	const void *mac;
+	int mii_mode;
 
 	spinlock_t lock;
+
+	struct clk *clk_ppe;
+	struct clk *clk_switch;
+	struct clk *clk_ephy;
+	struct clk *clk_ephycgu;
 };
 
+static int ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr,
+				int phy_reg, u16 phy_data);
+
 static int
 ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
 {
-	ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
+	ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
 	if (!ch->skb[ch->dma.desc])
 		return -ENOMEM;
 	ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
@@ -149,8 +197,11 @@ ltq_etop_hw_receive(struct ltq_etop_chan *ch)
 	spin_unlock_irqrestore(&priv->lock, flags);
 
 	skb_put(skb, len);
+	skb->dev = ch->netdev;
 	skb->protocol = eth_type_trans(skb, ch->netdev);
 	netif_receive_skb(skb);
+	ch->netdev->stats.rx_packets++;
+	ch->netdev->stats.rx_bytes += len;
 }
 
 static int
@@ -158,8 +209,10 @@ ltq_etop_poll_rx(struct napi_struct *napi, int budget)
 {
 	struct ltq_etop_chan *ch = container_of(napi,
 				struct ltq_etop_chan, napi);
+	struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
 	int rx = 0;
 	int complete = 0;
+	unsigned long flags;
 
 	while ((rx < budget) && !complete) {
 		struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
@@ -173,7 +226,9 @@ ltq_etop_poll_rx(struct napi_struct *napi, int budget)
 	}
 	if (complete || !rx) {
 		napi_complete(&ch->napi);
+		spin_lock_irqsave(&priv->lock, flags);
 		ltq_dma_ack_irq(&ch->dma);
+		spin_unlock_irqrestore(&priv->lock, flags);
 	}
 	return rx;
 }
@@ -185,12 +240,14 @@ ltq_etop_poll_tx(struct napi_struct *napi, int budget)
 		container_of(napi, struct ltq_etop_chan, napi);
 	struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
 	struct netdev_queue *txq =
-		netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
+		netdev_get_tx_queue(ch->netdev, ch->dma.nr >> 1);
 	unsigned long flags;
 
 	spin_lock_irqsave(&priv->lock, flags);
 	while ((ch->dma.desc_base[ch->tx_free].ctl &
 			(LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
+		ch->netdev->stats.tx_packets++;
+		ch->netdev->stats.tx_bytes += ch->skb[ch->tx_free]->len;
 		dev_kfree_skb_any(ch->skb[ch->tx_free]);
 		ch->skb[ch->tx_free] = NULL;
 		memset(&ch->dma.desc_base[ch->tx_free], 0,
@@ -203,7 +260,9 @@ ltq_etop_poll_tx(struct napi_struct *napi, int budget)
 	if (netif_tx_queue_stopped(txq))
 		netif_tx_start_queue(txq);
 	napi_complete(&ch->napi);
+	spin_lock_irqsave(&priv->lock, flags);
 	ltq_dma_ack_irq(&ch->dma);
+	spin_unlock_irqrestore(&priv->lock, flags);
 	return 1;
 }
 
@@ -211,9 +270,10 @@ static irqreturn_t
 ltq_etop_dma_irq(int irq, void *_priv)
 {
 	struct ltq_etop_priv *priv = _priv;
-	int ch = irq - LTQ_DMA_CH0_INT;
-
-	napi_schedule(&priv->ch[ch].napi);
+	if (irq == priv->txch.dma.irq)
+		napi_schedule(&priv->txch.napi);
+	else
+		napi_schedule(&priv->rxch.napi);
 	return IRQ_HANDLED;
 }
 
@@ -225,7 +285,7 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
 	ltq_dma_free(&ch->dma);
 	if (ch->dma.irq)
 		free_irq(ch->dma.irq, priv);
-	if (IS_RX(ch->idx)) {
+	if (ch == &priv->txch) {
 		int desc;
 		for (desc = 0; desc < LTQ_DESC_NUM; desc++)
 			dev_kfree_skb_any(ch->skb[ch->dma.desc]);
@@ -236,23 +296,55 @@ static void
 ltq_etop_hw_exit(struct net_device *dev)
 {
 	struct ltq_etop_priv *priv = netdev_priv(dev);
-	int i;
 
-	ltq_pmu_disable(PMU_PPE);
-	for (i = 0; i < MAX_DMA_CHAN; i++)
-		if (IS_TX(i) || IS_RX(i))
-			ltq_etop_free_channel(dev, &priv->ch[i]);
+	clk_disable(priv->clk_ppe);
+
+	if (of_machine_is_compatible("lantiq,ar9"))
+		clk_disable(priv->clk_switch);
+
+	if (of_machine_is_compatible("lantiq,ase")) {
+		clk_disable(priv->clk_ephy);
+		clk_disable(priv->clk_ephycgu);
+	}
+
+	ltq_etop_free_channel(dev, &priv->txch);
+	ltq_etop_free_channel(dev, &priv->rxch);
+}
+
+static void
+ltq_etop_gbit_init(struct net_device *dev)
+{
+	struct ltq_etop_priv *priv = netdev_priv(dev);
+
+	clk_enable(priv->clk_switch);
+
+	ltq_gbit_w32_mask(0, GCTL0_SE, LTQ_GBIT_GCTL0);
+	/** Disable MDIO auto polling mode */
+	ltq_gbit_w32_mask(0, PX_CTL_DMDIO, LTQ_GBIT_P0_CTL);
+	/* set 1522 packet size */
+	ltq_gbit_w32_mask(0x300, 0, LTQ_GBIT_GCTL0);
+	/* disable pmac & dmac headers */
+	ltq_gbit_w32_mask(PMAC_HD_CTL_AS | PMAC_HD_CTL_RXSH, 0,
+		LTQ_GBIT_PMAC_HD_CTL);
+	/* Due to traffic halt when burst length 8,
+		replace default IPG value with 0x3B */
+	ltq_gbit_w32(0x3B, LTQ_GBIT_PMAC_RX_IPG);
 }
 
 static int
 ltq_etop_hw_init(struct net_device *dev)
 {
 	struct ltq_etop_priv *priv = netdev_priv(dev);
-	int i;
 
-	ltq_pmu_enable(PMU_PPE);
+	clk_enable(priv->clk_ppe);
 
-	switch (priv->pldata->mii_mode) {
+	if (of_machine_is_compatible("lantiq,ar9")) {
+		ltq_etop_gbit_init(dev);
+		/* force the etops link to the gbit to MII */
+		priv->mii_mode = PHY_INTERFACE_MODE_MII;
+	}
+
+	switch (priv->mii_mode) {
 	case PHY_INTERFACE_MODE_RMII:
 		ltq_etop_w32_mask(ETOP_MII_MASK,
 			ETOP_MII_REVERSE, LTQ_ETOP_CFG);
@@ -264,39 +356,68 @@ ltq_etop_hw_init(struct net_device *dev)
 		break;
 
 	default:
+		if (of_machine_is_compatible("lantiq,ase")) {
+			clk_enable(priv->clk_ephy);
+			/* disable external MII */
+			ltq_etop_w32_mask(0, ETOP_CFG_MII0, LTQ_ETOP_CFG);
+			/* enable clock for internal PHY */
+			clk_enable(priv->clk_ephycgu);
+			/* we need to write this magic to the internal phy to
+			   make it work */
+			ltq_etop_mdio_wr(NULL, 0x8, 0x12, 0xC020);
+			pr_info("Selected EPHY mode\n");
+			break;
+		}
 		netdev_err(dev, "unknown mii mode %d\n",
-			priv->pldata->mii_mode);
+			priv->mii_mode);
 		return -ENOTSUPP;
 	}
 
 	/* enable crc generation */
 	ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
 
+	return 0;
+}
+
+static int
+ltq_etop_dma_init(struct net_device *dev)
+{
+	struct ltq_etop_priv *priv = netdev_priv(dev);
+	int tx = priv->tx_irq - LTQ_DMA_ETOP;
+	int rx = priv->rx_irq - LTQ_DMA_ETOP;
+	int err;
+
 	ltq_dma_init_port(DMA_PORT_ETOP);
 
-	for (i = 0; i < MAX_DMA_CHAN; i++) {
-		int irq = LTQ_DMA_CH0_INT + i;
-		struct ltq_etop_chan *ch = &priv->ch[i];
-
-		ch->idx = ch->dma.nr = i;
-
-		if (IS_TX(i)) {
-			ltq_dma_alloc_tx(&ch->dma);
-			request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
-				"etop_tx", priv);
-		} else if (IS_RX(i)) {
-			ltq_dma_alloc_rx(&ch->dma);
-			for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
-					ch->dma.desc++)
-				if (ltq_etop_alloc_skb(ch))
-					return -ENOMEM;
-			ch->dma.desc = 0;
-			request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
-				"etop_rx", priv);
+	priv->txch.dma.nr = tx;
+	ltq_dma_alloc_tx(&priv->txch.dma);
+	err = request_irq(priv->tx_irq, ltq_etop_dma_irq, IRQF_DISABLED,
+		"eth_tx", priv);
+	if (err) {
+		netdev_err(dev, "failed to allocate tx irq\n");
+		goto err_out;
+	}
+	priv->txch.dma.irq = priv->tx_irq;
+
+	priv->rxch.dma.nr = rx;
+	ltq_dma_alloc_rx(&priv->rxch.dma);
+	for (priv->rxch.dma.desc = 0; priv->rxch.dma.desc < LTQ_DESC_NUM;
+			priv->rxch.dma.desc++) {
+		if (ltq_etop_alloc_skb(&priv->rxch)) {
+			netdev_err(dev, "failed to allocate skbs\n");
+			err = -ENOMEM;
+			goto err_out;
 		}
-		ch->dma.irq = irq;
 	}
-	return 0;
+	priv->rxch.dma.desc = 0;
+	err = request_irq(priv->rx_irq, ltq_etop_dma_irq, IRQF_DISABLED,
+		"eth_rx", priv);
+	if (err)
+		netdev_err(dev, "failed to allocate rx irq\n");
+	else
+		priv->rxch.dma.irq = priv->rx_irq;
+err_out:
+	return err;
 }
 
 static void
@@ -312,7 +433,10 @@ ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct ltq_etop_priv *priv = netdev_priv(dev);
 
-	return phy_ethtool_gset(priv->phydev, cmd);
+	if (priv->phydev)
+		return phy_ethtool_gset(priv->phydev, cmd);
+	else
+		return 0;
 }
 
 static int
@@ -320,7 +444,10 @@ ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct ltq_etop_priv *priv = netdev_priv(dev);
 
-	return phy_ethtool_sset(priv->phydev, cmd);
+	if (priv->phydev)
+		return phy_ethtool_sset(priv->phydev, cmd);
+	else
+		return 0;
 }
 
 static int
@@ -328,7 +455,10 @@ ltq_etop_nway_reset(struct net_device *dev)
 {
 	struct ltq_etop_priv *priv = netdev_priv(dev);
 
-	return phy_start_aneg(priv->phydev);
+	if (priv->phydev)
+		return phy_start_aneg(priv->phydev);
+	else
+		return 0;
 }
 
 static const struct ethtool_ops ltq_etop_ethtool_ops = {
@@ -339,6 +469,39 @@ static const struct ethtool_ops ltq_etop_ethtool_ops = {
 };
 
 static int
+ltq_etop_mdio_wr_xr9(struct mii_bus *bus, int phy_addr,
+		int phy_reg, u16 phy_data)
+{
+	u32 val = MDIO_XR9_REQUEST | MDIO_XR9_WRITE |
+		(phy_data << MDIO_XR9_WR_OFFSET) |
+		((phy_addr & MDIO_XR9_ADDR_MASK) << MDIO_XR9_ADDR_OFFSET) |
+		((phy_reg & MDIO_XR9_REG_MASK) << MDIO_XR9_REG_OFFSET);
+
+	while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
+		;
+	ltq_gbit_w32(val, LTQ_GBIT_MDIO_CTL);
+	while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
+		;
+	return 0;
+}
+
+static int
+ltq_etop_mdio_rd_xr9(struct mii_bus *bus, int phy_addr, int phy_reg)
+{
+	u32 val = MDIO_XR9_REQUEST | MDIO_XR9_READ |
+		((phy_addr & MDIO_XR9_ADDR_MASK) << MDIO_XR9_ADDR_OFFSET) |
+		((phy_reg & MDIO_XR9_REG_MASK) << MDIO_XR9_REG_OFFSET);
+
+	while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
+		;
+	ltq_gbit_w32(val, LTQ_GBIT_MDIO_CTL);
+	while (ltq_gbit_r32(LTQ_GBIT_MDIO_CTL) & MDIO_XR9_REQUEST)
+		;
+	val = ltq_gbit_r32(LTQ_GBIT_MDIO_DATA) & MDIO_XR9_RD_MASK;
+	return val;
+}
+
+static int
 ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
 {
 	u32 val = MDIO_REQUEST |
@@ -379,14 +542,11 @@ ltq_etop_mdio_probe(struct net_device *dev)
 {
 	struct ltq_etop_priv *priv = netdev_priv(dev);
 	struct phy_device *phydev = NULL;
-	int phy_addr;
 
-	for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
-		if (priv->mii_bus->phy_map[phy_addr]) {
-			phydev = priv->mii_bus->phy_map[phy_addr];
-			break;
-		}
-	}
+	if (of_machine_is_compatible("lantiq,ase"))
+		phydev = priv->mii_bus->phy_map[8];
+	else
+		phydev = priv->mii_bus->phy_map[0];
 
 	if (!phydev) {
 		netdev_err(dev, "no PHY found\n");
@@ -394,7 +554,7 @@ ltq_etop_mdio_probe(struct net_device *dev)
 	}
 
 	phydev = phy_connect(dev, dev_name(&phydev->dev), &ltq_etop_mdio_link,
-			0, priv->pldata->mii_mode);
+			0, priv->mii_mode);
 
 	if (IS_ERR(phydev)) {
 		netdev_err(dev, "Could not attach to PHY\n");
@@ -408,6 +568,9 @@ ltq_etop_mdio_probe(struct net_device *dev)
 			      | SUPPORTED_Autoneg
 			      | SUPPORTED_MII
 			      | SUPPORTED_TP);
+	if (of_machine_is_compatible("lantiq,ar9"))
+		phydev->supported &= SUPPORTED_1000baseT_Half
+					| SUPPORTED_1000baseT_Full;
 
 	phydev->advertising = phydev->supported;
 	priv->phydev = phydev;
@@ -433,8 +596,13 @@ ltq_etop_mdio_init(struct net_device *dev)
 	}
 
 	priv->mii_bus->priv = dev;
-	priv->mii_bus->read = ltq_etop_mdio_rd;
-	priv->mii_bus->write = ltq_etop_mdio_wr;
+	if (of_machine_is_compatible("lantiq,ar9")) {
+		priv->mii_bus->read = ltq_etop_mdio_rd_xr9;
+		priv->mii_bus->write = ltq_etop_mdio_wr_xr9;
+	} else {
+		priv->mii_bus->read = ltq_etop_mdio_rd;
+		priv->mii_bus->write = ltq_etop_mdio_wr;
+	}
 	priv->mii_bus->name = "ltq_mii";
 	snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
 		priv->pdev->name, priv->pdev->id);
@@ -483,17 +651,19 @@ static int
 ltq_etop_open(struct net_device *dev)
 {
 	struct ltq_etop_priv *priv = netdev_priv(dev);
-	int i;
+	unsigned long flags;
 
-	for (i = 0; i < MAX_DMA_CHAN; i++) {
-		struct ltq_etop_chan *ch = &priv->ch[i];
+	napi_enable(&priv->txch.napi);
+	napi_enable(&priv->rxch.napi);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	ltq_dma_open(&priv->txch.dma);
+	ltq_dma_open(&priv->rxch.dma);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	if (priv->phydev)
+		phy_start(priv->phydev);
 
-		if (!IS_TX(i) && (!IS_RX(i)))
-			continue;
-		ltq_dma_open(&ch->dma);
-		napi_enable(&ch->napi);
-	}
-	phy_start(priv->phydev);
 	netif_tx_start_all_queues(dev);
 	return 0;
 }
@@ -502,18 +672,19 @@ static int
 ltq_etop_stop(struct net_device *dev)
 {
 	struct ltq_etop_priv *priv = netdev_priv(dev);
-	int i;
+	unsigned long flags;
 
 	netif_tx_stop_all_queues(dev);
-	phy_stop(priv->phydev);
-	for (i = 0; i < MAX_DMA_CHAN; i++) {
-		struct ltq_etop_chan *ch = &priv->ch[i];
+	if (priv->phydev)
+		phy_stop(priv->phydev);
+	napi_disable(&priv->txch.napi);
+	napi_disable(&priv->rxch.napi);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	ltq_dma_close(&priv->txch.dma);
+	ltq_dma_close(&priv->rxch.dma);
+	spin_unlock_irqrestore(&priv->lock, flags);
 
-		if (!IS_RX(i) && !IS_TX(i))
-			continue;
-		napi_disable(&ch->napi);
-		ltq_dma_close(&ch->dma);
-	}
 	return 0;
 }
 
@@ -523,16 +694,16 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
 	int queue = skb_get_queue_mapping(skb);
 	struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
 	struct ltq_etop_priv *priv = netdev_priv(dev);
-	struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
-	struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
-	int len;
+	struct ltq_dma_desc *desc =
+		&priv->txch.dma.desc_base[priv->txch.dma.desc];
 	unsigned long flags;
 	u32 byte_offset;
+	int len;
 
 	len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
 
-	if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
-		dev_kfree_skb_any(skb);
+	if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) ||
+			priv->txch.skb[priv->txch.dma.desc]) {
 		netdev_err(dev, "tx ring full\n");
 		netif_tx_stop_queue(txq);
 		return NETDEV_TX_BUSY;
@@ -540,7 +711,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
 
 	/* dma needs to start on a 16 byte aligned address */
 	byte_offset = CPHYSADDR(skb->data) % 16;
-	ch->skb[ch->dma.desc] = skb;
+	priv->txch.skb[priv->txch.dma.desc] = skb;
 
 	dev->trans_start = jiffies;
 
@@ -550,11 +721,11 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
 	wmb();
 	desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
 		LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
-	ch->dma.desc++;
-	ch->dma.desc %= LTQ_DESC_NUM;
+	priv->txch.dma.desc++;
+	priv->txch.dma.desc %= LTQ_DESC_NUM;
 	spin_unlock_irqrestore(&priv->lock, flags);
 
-	if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
+	if (priv->txch.dma.desc_base[priv->txch.dma.desc].ctl & LTQ_DMA_OWN)
 		netif_tx_stop_queue(txq);
 
 	return NETDEV_TX_OK;
@@ -633,34 +804,32 @@ ltq_etop_init(struct net_device *dev)
 	struct ltq_etop_priv *priv = netdev_priv(dev);
 	struct sockaddr mac;
 	int err;
-	bool random_mac = false;
 
 	ether_setup(dev);
 	dev->watchdog_timeo = 10 * HZ;
 	err = ltq_etop_hw_init(dev);
 	if (err)
 		goto err_hw;
+	err = ltq_etop_dma_init(dev);
+	if (err)
+		goto err_hw;
+
 	ltq_etop_change_mtu(dev, 1500);
 
-	memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
+	memcpy(&mac.sa_data, priv->mac, ETH_ALEN);
 	if (!is_valid_ether_addr(mac.sa_data)) {
 		pr_warn("etop: invalid MAC, using random\n");
-		eth_random_addr(mac.sa_data);
-		random_mac = true;
+		random_ether_addr(mac.sa_data);
 	}
 
 	err = ltq_etop_set_mac_address(dev, &mac);
 	if (err)
 		goto err_netdev;
-
-	/* Set addr_assign_type here, ltq_etop_set_mac_address would reset it. */
-	if (random_mac)
-		dev->addr_assign_type |= NET_ADDR_RANDOM;
-
 	ltq_etop_set_multicast_list(dev);
-	err = ltq_etop_mdio_init(dev);
-	if (err)
-		goto err_netdev;
+	if (!ltq_etop_mdio_init(dev))
+		dev->ethtool_ops = &ltq_etop_ethtool_ops;
+	else
+		pr_warn("etop: mdio probe failed\n");;
 	return 0;
 
 err_netdev:
@@ -680,6 +849,9 @@ ltq_etop_tx_timeout(struct net_device *dev)
 	err = ltq_etop_hw_init(dev);
 	if (err)
 		goto err_hw;
+	err = ltq_etop_dma_init(dev);
+	if (err)
+		goto err_hw;
 	dev->trans_start = jiffies;
 	netif_wake_queue(dev);
 	return;
@@ -703,14 +875,19 @@ static const struct net_device_ops ltq_eth_netdev_ops = {
 	.ndo_tx_timeout = ltq_etop_tx_timeout,
 };
 
-static int __init
+static int __devinit
 ltq_etop_probe(struct platform_device *pdev)
 {
 	struct net_device *dev;
 	struct ltq_etop_priv *priv;
-	struct resource *res;
+	struct resource *res, *gbit_res, irqres[2];
 	int err;
-	int i;
+
+	err = of_irq_to_resource_table(pdev->dev.of_node, irqres, 2);
+	if (err != 2) {
+		dev_err(&pdev->dev, "failed to get etop irqs\n");
+		return -EINVAL;
+	}
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
@@ -736,30 +913,58 @@ ltq_etop_probe(struct platform_device *pdev)
 		goto err_out;
 	}
 
-	dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
-	if (!dev) {
-		err = -ENOMEM;
-		goto err_out;
+	if (of_machine_is_compatible("lantiq,ar9")) {
+		gbit_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		if (!gbit_res) {
+			dev_err(&pdev->dev, "failed to get gbit resource\n");
+			err = -ENOENT;
+			goto err_out;
+		}
+		ltq_gbit_membase = devm_ioremap_nocache(&pdev->dev,
+			gbit_res->start, resource_size(gbit_res));
+		if (!ltq_gbit_membase) {
+			dev_err(&pdev->dev, "failed to remap gigabit switch %d\n",
+				pdev->id);
+			err = -ENOMEM;
+			goto err_out;
+		}
 	}
+
+	dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
 	strcpy(dev->name, "eth%d");
 	dev->netdev_ops = &ltq_eth_netdev_ops;
-	dev->ethtool_ops = &ltq_etop_ethtool_ops;
 	priv = netdev_priv(dev);
 	priv->res = res;
 	priv->pdev = pdev;
-	priv->pldata = dev_get_platdata(&pdev->dev);
 	priv->netdev = dev;
+	priv->tx_irq = irqres[0].start;
+	priv->rx_irq = irqres[1].start;
+	priv->mii_mode = of_get_phy_mode(pdev->dev.of_node);
+	priv->mac = of_get_mac_address(pdev->dev.of_node);
+
+	priv->clk_ppe = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(priv->clk_ppe))
+		return PTR_ERR(priv->clk_ppe);
+	if (of_machine_is_compatible("lantiq,ar9")) {
+		priv->clk_switch = clk_get(&pdev->dev, "switch");
+		if (IS_ERR(priv->clk_switch))
+			return PTR_ERR(priv->clk_switch);
+	}
+	if (of_machine_is_compatible("lantiq,ase")) {
+		priv->clk_ephy = clk_get(&pdev->dev, "ephy");
+		if (IS_ERR(priv->clk_ephy))
+			return PTR_ERR(priv->clk_ephy);
+		priv->clk_ephycgu = clk_get(&pdev->dev, "ephycgu");
+		if (IS_ERR(priv->clk_ephycgu))
+			return PTR_ERR(priv->clk_ephycgu);
+	}
+
 	spin_lock_init(&priv->lock);
 
-	for (i = 0; i < MAX_DMA_CHAN; i++) {
-		if (IS_TX(i))
-			netif_napi_add(dev, &priv->ch[i].napi,
-				ltq_etop_poll_tx, 8);
-		else if (IS_RX(i))
-			netif_napi_add(dev, &priv->ch[i].napi,
-				ltq_etop_poll_rx, 32);
-		priv->ch[i].netdev = dev;
-	}
+	netif_napi_add(dev, &priv->txch.napi, ltq_etop_poll_tx, 8);
+	netif_napi_add(dev, &priv->rxch.napi, ltq_etop_poll_rx, 32);
+	priv->txch.netdev = dev;
+	priv->rxch.netdev = dev;
 
 	err = register_netdev(dev);
 	if (err)
@@ -788,32 +993,23 @@ ltq_etop_remove(struct platform_device *pdev)
 	return 0;
 }
 
+static const struct of_device_id ltq_etop_match[] = {
+	{ .compatible = "lantiq,etop-xway" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ltq_etop_match);
+
 static struct platform_driver ltq_mii_driver = {
+	.probe = ltq_etop_probe,
 	.remove = __devexit_p(ltq_etop_remove),
 	.driver = {
 		.name = "ltq_etop",
 		.owner = THIS_MODULE,
+		.of_match_table = ltq_etop_match,
 	},
 };
 
-int __init
-init_ltq_etop(void)
-{
-	int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
-
-	if (ret)
-		pr_err("ltq_etop: Error registering platform driver!");
-	return ret;
-}
-
-static void __exit
-exit_ltq_etop(void)
-{
-	platform_driver_unregister(&ltq_mii_driver);
-}
-
-module_init(init_ltq_etop);
-module_exit(exit_ltq_etop);
+module_platform_driver(ltq_mii_driver);
 
 MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
 MODULE_DESCRIPTION("Lantiq SoC ETOP");
-- 
1.7.10.4