--- a/drivers/net/ethernet/atheros/Makefile
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_ATL2) += atlx/
 obj-$(CONFIG_ATL1E) += atl1e/
 obj-$(CONFIG_ATL1C) += atl1c/
 obj-$(CONFIG_ALX) += alx/
+obj-$(CONFIG_NET_AR231X) += ar231x/
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -6,7 +6,7 @@
 config NET_VENDOR_ATHEROS
 	bool "Atheros devices"
 	default y
-	depends on (PCI || ATH79)
+	depends on (PCI || ATH25 || ATH79)
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y.
 
@@ -87,4 +87,10 @@ config ALX
 	  To compile this driver as a module, choose M here.  The module
 	  will be called alx.
 
+config NET_AR231X
+	tristate "Atheros AR231X built-in Ethernet support"
+	depends on ATH25
+	help
+	  Support for the AR231x/531x ethernet controller
+
 endif # NET_VENDOR_ATHEROS
--- /dev/null
+++ b/drivers/net/ethernet/atheros/ar231x/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NET_AR231X) += ar231x.o
--- /dev/null
+++ b/drivers/net/ethernet/atheros/ar231x/ar231x.c
@@ -0,0 +1,1120 @@
+/*
+ * ar231x.c: Linux driver for the Atheros AR231x Ethernet device.
+ *
+ * Copyright (C) 2004 by Sameer Dekate <sdekate@arubanetworks.com>
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006-2009 Felix Fietkau <nbd@nbd.name>
+ *
+ * Thanks to Atheros for providing hardware and documentation
+ * enabling me to write this driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Additional credits:
+ * This code is taken from John Taylor's Sibyte driver and then
+ * modified for the AR2313.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#define AR2313_MTU                     1692
+#define AR2313_PRIOS                   1
+#define AR2313_QUEUES                  (2*AR2313_PRIOS)
+#define AR2313_DESCR_ENTRIES           64
+
+#ifndef min
+#define min(a, b)	(((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef SMP_CACHE_BYTES
+#define SMP_CACHE_BYTES	L1_CACHE_BYTES
+#endif
+
+#define AR2313_MBOX_SET_BIT  0x8
+
+#include "ar231x.h"
+
+/**
+ * New interrupt handler strategy:
+ *
+ * An old interrupt handler worked using the traditional method of
+ * replacing an skbuff with a new one when a packet arrives. However
+ * the rx rings do not need to contain a static number of buffer
+ * descriptors, thus it makes sense to move the memory allocation out
+ * of the main interrupt handler and do it in a bottom half handler
+ * and only allocate new buffers when the number of buffers in the
+ * ring is below a certain threshold. In order to avoid starving the
+ * NIC under heavy load it is however necessary to force allocation
+ * when hitting a minimum threshold. The strategy for alloction is as
+ * follows:
+ *
+ *     RX_LOW_BUF_THRES    - allocate buffers in the bottom half
+ *     RX_PANIC_LOW_THRES  - we are very low on buffers, allocate
+ *                           the buffers in the interrupt handler
+ *     RX_RING_THRES       - maximum number of buffers in the rx ring
+ *
+ * One advantagous side effect of this allocation approach is that the
+ * entire rx processing can be done without holding any spin lock
+ * since the rx rings and registers are totally independent of the tx
+ * ring and its registers.  This of course includes the kmalloc's of
+ * new skb's. Thus start_xmit can run in parallel with rx processing
+ * and the memory allocation on SMP systems.
+ *
+ * Note that running the skb reallocation in a bottom half opens up
+ * another can of races which needs to be handled properly. In
+ * particular it can happen that the interrupt handler tries to run
+ * the reallocation while the bottom half is either running on another
+ * CPU or was interrupted on the same CPU. To get around this the
+ * driver uses bitops to prevent the reallocation routines from being
+ * reentered.
+ *
+ * TX handling can also be done without holding any spin lock, wheee
+ * this is fun! since tx_csm is only written to by the interrupt
+ * handler.
+ */
+
+/**
+ * Threshold values for RX buffer allocation - the low water marks for
+ * when to start refilling the rings are set to 75% of the ring
+ * sizes. It seems to make sense to refill the rings entirely from the
+ * intrrupt handler once it gets below the panic threshold, that way
+ * we don't risk that the refilling is moved to another CPU when the
+ * one running the interrupt handler just got the slab code hot in its
+ * cache.
+ */
+#define RX_RING_SIZE		AR2313_DESCR_ENTRIES
+#define RX_PANIC_THRES	        (RX_RING_SIZE/4)
+#define RX_LOW_THRES	        ((3*RX_RING_SIZE)/4)
+#define CRC_LEN                 4
+#define RX_OFFSET               2
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define VLAN_HDR                4
+#else
+#define VLAN_HDR                0
+#endif
+
+#define AR2313_BUFSIZE		(AR2313_MTU + VLAN_HDR + ETH_HLEN + CRC_LEN + \
+				 RX_OFFSET)
+
+#ifdef MODULE
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sameer Dekate <sdekate@arubanetworks.com>, Imre Kaloz <kaloz@openwrt.org>, Felix Fietkau <nbd@nbd.name>");
+MODULE_DESCRIPTION("AR231x Ethernet driver");
+#endif
+
+#define virt_to_phys(x) ((u32)(x) & 0x1fffffff)
+
+/* prototypes */
+static void ar231x_halt(struct net_device *dev);
+static void rx_tasklet_func(unsigned long data);
+static void rx_tasklet_cleanup(struct net_device *dev);
+static void ar231x_multicast_list(struct net_device *dev);
+static void ar231x_tx_timeout(struct net_device *dev);
+
+static int ar231x_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum);
+static int ar231x_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
+				u16 value);
+static int ar231x_mdiobus_reset(struct mii_bus *bus);
+static int ar231x_mdiobus_probe(struct net_device *dev);
+static void ar231x_adjust_link(struct net_device *dev);
+
+#ifndef ERR
+#define ERR(fmt, args...) printk("%s: " fmt, __func__, ##args)
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+ar231x_netpoll(struct net_device *dev)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	ar231x_interrupt(dev->irq, dev);
+	local_irq_restore(flags);
+}
+#endif
+
+static const struct net_device_ops ar231x_ops = {
+	.ndo_open		= ar231x_open,
+	.ndo_stop		= ar231x_close,
+	.ndo_start_xmit		= ar231x_start_xmit,
+	.ndo_set_rx_mode	= ar231x_multicast_list,
+	.ndo_do_ioctl		= ar231x_ioctl,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_tx_timeout		= ar231x_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= ar231x_netpoll,
+#endif
+};
+
+static int ar231x_probe(struct platform_device *pdev)
+{
+	struct net_device *dev;
+	struct ar231x_private *sp;
+	struct resource *res;
+	unsigned long ar_eth_base;
+	char buf[64];
+
+	dev = alloc_etherdev(sizeof(struct ar231x_private));
+
+	if (dev == NULL) {
+		printk(KERN_ERR
+			   "ar231x: Unable to allocate net_device structure!\n");
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, dev);
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	sp = netdev_priv(dev);
+	sp->dev = dev;
+	sp->pdev = pdev;
+	sp->cfg = pdev->dev.platform_data;
+
+	sprintf(buf, "eth%d_membase", pdev->id);
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, buf);
+	if (!res)
+		return -ENODEV;
+
+	sp->link = 0;
+	ar_eth_base = res->start;
+
+	sprintf(buf, "eth%d_irq", pdev->id);
+	dev->irq = platform_get_irq_byname(pdev, buf);
+
+	spin_lock_init(&sp->lock);
+
+	dev->features |= NETIF_F_HIGHDMA;
+	dev->netdev_ops = &ar231x_ops;
+
+	tasklet_init(&sp->rx_tasklet, rx_tasklet_func, (unsigned long)dev);
+	tasklet_disable(&sp->rx_tasklet);
+
+	sp->eth_regs = ioremap_nocache(ar_eth_base, sizeof(*sp->eth_regs));
+	if (!sp->eth_regs) {
+		printk("Can't remap eth registers\n");
+		return -ENXIO;
+	}
+
+	/**
+	 * When there's only one MAC, PHY regs are typically on ENET0,
+	 * even though the MAC might be on ENET1.
+	 * So remap PHY regs separately.
+	 */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "eth0_mii");
+	if (!res) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   "eth1_mii");
+		if (!res)
+			return -ENODEV;
+	}
+	sp->phy_regs = ioremap_nocache(res->start, resource_size(res));
+	if (!sp->phy_regs) {
+		printk("Can't remap phy registers\n");
+		return -ENXIO;
+	}
+
+	sp->dma_regs = ioremap_nocache(ar_eth_base + 0x1000,
+				       sizeof(*sp->dma_regs));
+	if (!sp->dma_regs) {
+		printk("Can't remap DMA registers\n");
+		return -ENXIO;
+	}
+	dev->base_addr = ar_eth_base + 0x1000;
+
+	strncpy(sp->name, "Atheros AR231x", sizeof(sp->name) - 1);
+	sp->name[sizeof(sp->name) - 1] = '\0';
+	memcpy(dev->dev_addr, sp->cfg->macaddr, 6);
+
+	if (ar231x_init(dev)) {
+		/* ar231x_init() calls ar231x_init_cleanup() on error */
+		kfree(dev);
+		return -ENODEV;
+	}
+
+	if (register_netdev(dev)) {
+		printk("%s: register_netdev failed\n", __func__);
+		return -1;
+	}
+
+	printk("%s: %s: %pM, irq %d\n", dev->name, sp->name, dev->dev_addr,
+	       dev->irq);
+
+	sp->mii_bus = mdiobus_alloc();
+	if (sp->mii_bus == NULL)
+		return -1;
+
+	sp->mii_bus->priv = dev;
+	sp->mii_bus->read = ar231x_mdiobus_read;
+	sp->mii_bus->write = ar231x_mdiobus_write;
+	sp->mii_bus->reset = ar231x_mdiobus_reset;
+	sp->mii_bus->name = "ar231x_eth_mii";
+	snprintf(sp->mii_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+
+	mdiobus_register(sp->mii_bus);
+
+	if (ar231x_mdiobus_probe(dev) != 0) {
+		printk(KERN_ERR "%s: mdiobus_probe failed\n", dev->name);
+		rx_tasklet_cleanup(dev);
+		ar231x_init_cleanup(dev);
+		unregister_netdev(dev);
+		kfree(dev);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void ar231x_multicast_list(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+	unsigned int filter;
+
+	filter = sp->eth_regs->mac_control;
+
+	if (dev->flags & IFF_PROMISC)
+		filter |= MAC_CONTROL_PR;
+	else
+		filter &= ~MAC_CONTROL_PR;
+	if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 0))
+		filter |= MAC_CONTROL_PM;
+	else
+		filter &= ~MAC_CONTROL_PM;
+
+	sp->eth_regs->mac_control = filter;
+}
+
+static void rx_tasklet_cleanup(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+
+	/**
+	 * Tasklet may be scheduled. Need to get it removed from the list
+	 * since we're about to free the struct.
+	 */
+
+	sp->unloading = 1;
+	tasklet_enable(&sp->rx_tasklet);
+	tasklet_kill(&sp->rx_tasklet);
+}
+
+static int ar231x_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct ar231x_private *sp = netdev_priv(dev);
+
+	rx_tasklet_cleanup(dev);
+	ar231x_init_cleanup(dev);
+	unregister_netdev(dev);
+	mdiobus_unregister(sp->mii_bus);
+	mdiobus_free(sp->mii_bus);
+	kfree(dev);
+	return 0;
+}
+
+/**
+ * Restart the AR2313 ethernet controller.
+ */
+static int ar231x_restart(struct net_device *dev)
+{
+	/* disable interrupts */
+	disable_irq(dev->irq);
+
+	/* stop mac */
+	ar231x_halt(dev);
+
+	/* initialize */
+	ar231x_init(dev);
+
+	/* enable interrupts */
+	enable_irq(dev->irq);
+
+	return 0;
+}
+
+static struct platform_driver ar231x_driver = {
+	.driver.name = "ar231x-eth",
+	.probe = ar231x_probe,
+	.remove = ar231x_remove,
+};
+
+module_platform_driver(ar231x_driver);
+
+static void ar231x_free_descriptors(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+
+	if (sp->rx_ring != NULL) {
+		kfree((void *)KSEG0ADDR(sp->rx_ring));
+		sp->rx_ring = NULL;
+		sp->tx_ring = NULL;
+	}
+}
+
+static int ar231x_allocate_descriptors(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+	int size;
+	int j;
+	ar231x_descr_t *space;
+
+	if (sp->rx_ring != NULL) {
+		printk("%s: already done.\n", __func__);
+		return 0;
+	}
+
+	size = sizeof(ar231x_descr_t) * (AR2313_DESCR_ENTRIES * AR2313_QUEUES);
+	space = kmalloc(size, GFP_KERNEL);
+	if (space == NULL)
+		return 1;
+
+	/* invalidate caches */
+	dma_cache_inv((unsigned int)space, size);
+
+	/* now convert pointer to KSEG1 */
+	space = (ar231x_descr_t *)KSEG1ADDR(space);
+
+	memset((void *)space, 0, size);
+
+	sp->rx_ring = space;
+	space += AR2313_DESCR_ENTRIES;
+
+	sp->tx_ring = space;
+	space += AR2313_DESCR_ENTRIES;
+
+	/* Initialize the transmit Descriptors */
+	for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
+		ar231x_descr_t *td = &sp->tx_ring[j];
+
+		td->status = 0;
+		td->devcs = DMA_TX1_CHAINED;
+		td->addr = 0;
+		td->descr = virt_to_phys(&sp->tx_ring[DSC_NEXT(j)]);
+	}
+
+	return 0;
+}
+
+/**
+ * Generic cleanup handling data allocated during init. Used when the
+ * module is unloaded or if an error occurs during initialization
+ */
+static void ar231x_init_cleanup(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+	struct sk_buff *skb;
+	int j;
+
+	ar231x_free_descriptors(dev);
+
+	if (sp->eth_regs)
+		iounmap((void *)sp->eth_regs);
+	if (sp->dma_regs)
+		iounmap((void *)sp->dma_regs);
+	if (sp->phy_regs)
+		iounmap((void *)sp->phy_regs);
+
+	if (sp->rx_skb) {
+		for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
+			skb = sp->rx_skb[j];
+			if (skb) {
+				sp->rx_skb[j] = NULL;
+				dev_kfree_skb(skb);
+			}
+		}
+		kfree(sp->rx_skb);
+		sp->rx_skb = NULL;
+	}
+
+	if (sp->tx_skb) {
+		for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
+			skb = sp->tx_skb[j];
+			if (skb) {
+				sp->tx_skb[j] = NULL;
+				dev_kfree_skb(skb);
+			}
+		}
+		kfree(sp->tx_skb);
+		sp->tx_skb = NULL;
+	}
+}
+
+static int ar231x_reset_reg(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+	unsigned int ethsal, ethsah;
+	unsigned int flags;
+
+	sp->cfg->reset_set(sp->cfg->reset_mac);
+	mdelay(10);
+	sp->cfg->reset_clear(sp->cfg->reset_mac);
+	mdelay(10);
+	sp->cfg->reset_set(sp->cfg->reset_phy);
+	mdelay(10);
+	sp->cfg->reset_clear(sp->cfg->reset_phy);
+	mdelay(10);
+
+	sp->dma_regs->bus_mode = (DMA_BUS_MODE_SWR);
+	mdelay(10);
+	sp->dma_regs->bus_mode =
+		((32 << DMA_BUS_MODE_PBL_SHIFT) | DMA_BUS_MODE_BLE);
+
+	/* enable interrupts */
+	sp->dma_regs->intr_ena = DMA_STATUS_AIS | DMA_STATUS_NIS |
+				 DMA_STATUS_RI | DMA_STATUS_TI |
+				 DMA_STATUS_FBE;
+	sp->dma_regs->xmt_base = virt_to_phys(sp->tx_ring);
+	sp->dma_regs->rcv_base = virt_to_phys(sp->rx_ring);
+	sp->dma_regs->control =
+		(DMA_CONTROL_SR | DMA_CONTROL_ST | DMA_CONTROL_SF);
+
+	sp->eth_regs->flow_control = (FLOW_CONTROL_FCE);
+	sp->eth_regs->vlan_tag = (0x8100);
+
+	/* Enable Ethernet Interface */
+	flags = (MAC_CONTROL_TE |	/* transmit enable */
+			 MAC_CONTROL_PM |	/* pass mcast */
+			 MAC_CONTROL_F |	/* full duplex */
+			 MAC_CONTROL_HBD);	/* heart beat disabled */
+
+	if (dev->flags & IFF_PROMISC) {	/* set promiscuous mode */
+		flags |= MAC_CONTROL_PR;
+	}
+	sp->eth_regs->mac_control = flags;
+
+	/* Set all Ethernet station address registers to their initial values */
+	ethsah = (((u_int) (dev->dev_addr[5]) << 8) & (u_int) 0x0000FF00) |
+		 (((u_int) (dev->dev_addr[4]) << 0) & (u_int) 0x000000FF);
+
+	ethsal = (((u_int) (dev->dev_addr[3]) << 24) & (u_int) 0xFF000000) |
+		 (((u_int) (dev->dev_addr[2]) << 16) & (u_int) 0x00FF0000) |
+		 (((u_int) (dev->dev_addr[1]) << 8) & (u_int) 0x0000FF00) |
+		 (((u_int) (dev->dev_addr[0]) << 0) & (u_int) 0x000000FF);
+
+	sp->eth_regs->mac_addr[0] = ethsah;
+	sp->eth_regs->mac_addr[1] = ethsal;
+
+	mdelay(10);
+
+	return 0;
+}
+
+static int ar231x_init(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+	int ecode = 0;
+
+	/* Allocate descriptors */
+	if (ar231x_allocate_descriptors(dev)) {
+		printk("%s: %s: ar231x_allocate_descriptors failed\n",
+		       dev->name, __func__);
+		ecode = -EAGAIN;
+		goto init_error;
+	}
+
+	/* Get the memory for the skb rings */
+	if (sp->rx_skb == NULL) {
+		sp->rx_skb =
+			kmalloc(sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES,
+				GFP_KERNEL);
+		if (!(sp->rx_skb)) {
+			printk("%s: %s: rx_skb kmalloc failed\n",
+			       dev->name, __func__);
+			ecode = -EAGAIN;
+			goto init_error;
+		}
+	}
+	memset(sp->rx_skb, 0, sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES);
+
+	if (sp->tx_skb == NULL) {
+		sp->tx_skb =
+			kmalloc(sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES,
+				GFP_KERNEL);
+		if (!(sp->tx_skb)) {
+			printk("%s: %s: tx_skb kmalloc failed\n",
+			       dev->name, __func__);
+			ecode = -EAGAIN;
+			goto init_error;
+		}
+	}
+	memset(sp->tx_skb, 0, sizeof(struct sk_buff *) * AR2313_DESCR_ENTRIES);
+
+	/**
+	 * Set tx_csm before we start receiving interrupts, otherwise
+	 * the interrupt handler might think it is supposed to process
+	 * tx ints before we are up and running, which may cause a null
+	 * pointer access in the int handler.
+	 */
+	sp->rx_skbprd = 0;
+	sp->cur_rx = 0;
+	sp->tx_prd = 0;
+	sp->tx_csm = 0;
+
+	/* Zero the stats before starting the interface */
+	memset(&dev->stats, 0, sizeof(dev->stats));
+
+	/**
+	 * We load the ring here as there seem to be no way to tell the
+	 * firmware to wipe the ring without re-initializing it.
+	 */
+	ar231x_load_rx_ring(dev, RX_RING_SIZE);
+
+	/* Init hardware */
+	ar231x_reset_reg(dev);
+
+	/* Get the IRQ */
+	ecode = request_irq(dev->irq, &ar231x_interrupt, 0,
+			    dev->name, dev);
+	if (ecode) {
+		printk(KERN_WARNING "%s: %s: Requested IRQ %d is busy\n",
+		       dev->name, __func__, dev->irq);
+		goto init_error;
+	}
+
+	tasklet_enable(&sp->rx_tasklet);
+
+	return 0;
+
+init_error:
+	ar231x_init_cleanup(dev);
+	return ecode;
+}
+
+/**
+ * Load the rx ring.
+ *
+ * Loading rings is safe without holding the spin lock since this is
+ * done only before the device is enabled, thus no interrupts are
+ * generated and by the interrupt handler/tasklet handler.
+ */
+static void ar231x_load_rx_ring(struct net_device *dev, int nr_bufs)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+	short i, idx;
+
+	idx = sp->rx_skbprd;
+
+	for (i = 0; i < nr_bufs; i++) {
+		struct sk_buff *skb;
+		ar231x_descr_t *rd;
+
+		if (sp->rx_skb[idx])
+			break;
+
+		skb = netdev_alloc_skb_ip_align(dev, AR2313_BUFSIZE);
+		if (!skb) {
+			printk("\n\n\n\n %s: No memory in system\n\n\n\n",
+			       __func__);
+			break;
+		}
+
+		/* Make sure IP header starts on a fresh cache line */
+		skb->dev = dev;
+		sp->rx_skb[idx] = skb;
+
+		rd = (ar231x_descr_t *)&sp->rx_ring[idx];
+
+		/* initialize dma descriptor */
+		rd->devcs = ((AR2313_BUFSIZE << DMA_RX1_BSIZE_SHIFT) |
+					 DMA_RX1_CHAINED);
+		rd->addr = virt_to_phys(skb->data);
+		rd->descr = virt_to_phys(&sp->rx_ring[DSC_NEXT(idx)]);
+		rd->status = DMA_RX_OWN;
+
+		idx = DSC_NEXT(idx);
+	}
+
+	if (i)
+		sp->rx_skbprd = idx;
+}
+
+#define AR2313_MAX_PKTS_PER_CALL        64
+
+static int ar231x_rx_int(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+	struct sk_buff *skb, *skb_new;
+	ar231x_descr_t *rxdesc;
+	unsigned int status;
+	u32 idx;
+	int pkts = 0;
+	int rval;
+
+	idx = sp->cur_rx;
+
+	/* process at most the entire ring and then wait for another int */
+	while (1) {
+		rxdesc = &sp->rx_ring[idx];
+		status = rxdesc->status;
+
+		if (status & DMA_RX_OWN) {
+			/* SiByte owns descriptor or descr not yet filled in */
+			rval = 0;
+			break;
+		}
+
+		if (++pkts > AR2313_MAX_PKTS_PER_CALL) {
+			rval = 1;
+			break;
+		}
+
+		if ((status & DMA_RX_ERROR) && !(status & DMA_RX_LONG)) {
+			dev->stats.rx_errors++;
+			dev->stats.rx_dropped++;
+
+			/* add statistics counters */
+			if (status & DMA_RX_ERR_CRC)
+				dev->stats.rx_crc_errors++;
+			if (status & DMA_RX_ERR_COL)
+				dev->stats.rx_over_errors++;
+			if (status & DMA_RX_ERR_LENGTH)
+				dev->stats.rx_length_errors++;
+			if (status & DMA_RX_ERR_RUNT)
+				dev->stats.rx_over_errors++;
+			if (status & DMA_RX_ERR_DESC)
+				dev->stats.rx_over_errors++;
+
+		} else {
+			/* alloc new buffer. */
+			skb_new = netdev_alloc_skb_ip_align(dev,
+							    AR2313_BUFSIZE);
+			if (skb_new != NULL) {
+				skb = sp->rx_skb[idx];
+				/* set skb */
+				skb_put(skb, ((status >> DMA_RX_LEN_SHIFT) &
+					0x3fff) - CRC_LEN);
+
+				dev->stats.rx_bytes += skb->len;
+				skb->protocol = eth_type_trans(skb, dev);
+				/* pass the packet to upper layers */
+				netif_rx(skb);
+
+				skb_new->dev = dev;
+				/* reset descriptor's curr_addr */
+				rxdesc->addr = virt_to_phys(skb_new->data);
+
+				dev->stats.rx_packets++;
+				sp->rx_skb[idx] = skb_new;
+			} else {
+				dev->stats.rx_dropped++;
+			}
+		}
+
+		rxdesc->devcs = ((AR2313_BUFSIZE << DMA_RX1_BSIZE_SHIFT) |
+						 DMA_RX1_CHAINED);
+		rxdesc->status = DMA_RX_OWN;
+
+		idx = DSC_NEXT(idx);
+	}
+
+	sp->cur_rx = idx;
+
+	return rval;
+}
+
+static void ar231x_tx_int(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+	u32 idx;
+	struct sk_buff *skb;
+	ar231x_descr_t *txdesc;
+	unsigned int status = 0;
+
+	idx = sp->tx_csm;
+
+	while (idx != sp->tx_prd) {
+		txdesc = &sp->tx_ring[idx];
+		status = txdesc->status;
+
+		if (status & DMA_TX_OWN) {
+			/* ar231x dma still owns descr */
+			break;
+		}
+		/* done with this descriptor */
+		dma_unmap_single(&sp->pdev->dev, txdesc->addr,
+				 txdesc->devcs & DMA_TX1_BSIZE_MASK,
+				 DMA_TO_DEVICE);
+		txdesc->status = 0;
+
+		if (status & DMA_TX_ERROR) {
+			dev->stats.tx_errors++;
+			dev->stats.tx_dropped++;
+			if (status & DMA_TX_ERR_UNDER)
+				dev->stats.tx_fifo_errors++;
+			if (status & DMA_TX_ERR_HB)
+				dev->stats.tx_heartbeat_errors++;
+			if (status & (DMA_TX_ERR_LOSS | DMA_TX_ERR_LINK))
+				dev->stats.tx_carrier_errors++;
+			if (status & (DMA_TX_ERR_LATE | DMA_TX_ERR_COL |
+			    DMA_TX_ERR_JABBER | DMA_TX_ERR_DEFER))
+				dev->stats.tx_aborted_errors++;
+		} else {
+			/* transmit OK */
+			dev->stats.tx_packets++;
+		}
+
+		skb = sp->tx_skb[idx];
+		sp->tx_skb[idx] = NULL;
+		idx = DSC_NEXT(idx);
+		dev->stats.tx_bytes += skb->len;
+		dev_kfree_skb_irq(skb);
+	}
+
+	sp->tx_csm = idx;
+}
+
+static void rx_tasklet_func(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct ar231x_private *sp = netdev_priv(dev);
+
+	if (sp->unloading)
+		return;
+
+	if (ar231x_rx_int(dev)) {
+		tasklet_hi_schedule(&sp->rx_tasklet);
+	} else {
+		unsigned long flags;
+
+		spin_lock_irqsave(&sp->lock, flags);
+		sp->dma_regs->intr_ena |= DMA_STATUS_RI;
+		spin_unlock_irqrestore(&sp->lock, flags);
+	}
+}
+
+static void rx_schedule(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+
+	sp->dma_regs->intr_ena &= ~DMA_STATUS_RI;
+
+	tasklet_hi_schedule(&sp->rx_tasklet);
+}
+
+static irqreturn_t ar231x_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct ar231x_private *sp = netdev_priv(dev);
+	unsigned int status, enabled;
+
+	/* clear interrupt */
+	/* Don't clear RI bit if currently disabled */
+	status = sp->dma_regs->status;
+	enabled = sp->dma_regs->intr_ena;
+	sp->dma_regs->status = status & enabled;
+
+	if (status & DMA_STATUS_NIS) {
+		/* normal status */
+		/**
+		 * Don't schedule rx processing if interrupt
+		 * is already disabled.
+		 */
+		if (status & enabled & DMA_STATUS_RI) {
+			/* receive interrupt */
+			rx_schedule(dev);
+		}
+		if (status & DMA_STATUS_TI) {
+			/* transmit interrupt */
+			ar231x_tx_int(dev);
+		}
+	}
+
+	/* abnormal status */
+	if (status & (DMA_STATUS_FBE | DMA_STATUS_TPS))
+		ar231x_restart(dev);
+
+	return IRQ_HANDLED;
+}
+
+static int ar231x_open(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+	unsigned int ethsal, ethsah;
+
+	/* reset the hardware, in case the MAC address changed */
+	ethsah = (((u_int) (dev->dev_addr[5]) << 8) & (u_int) 0x0000FF00) |
+		 (((u_int) (dev->dev_addr[4]) << 0) & (u_int) 0x000000FF);
+
+	ethsal = (((u_int) (dev->dev_addr[3]) << 24) & (u_int) 0xFF000000) |
+		 (((u_int) (dev->dev_addr[2]) << 16) & (u_int) 0x00FF0000) |
+		 (((u_int) (dev->dev_addr[1]) << 8) & (u_int) 0x0000FF00) |
+		 (((u_int) (dev->dev_addr[0]) << 0) & (u_int) 0x000000FF);
+
+	sp->eth_regs->mac_addr[0] = ethsah;
+	sp->eth_regs->mac_addr[1] = ethsal;
+
+	mdelay(10);
+
+	dev->mtu = 1500;
+	netif_start_queue(dev);
+
+	sp->eth_regs->mac_control |= MAC_CONTROL_RE;
+
+	phy_start(sp->phy_dev);
+
+	return 0;
+}
+
+static void ar231x_tx_timeout(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&sp->lock, flags);
+	ar231x_restart(dev);
+	spin_unlock_irqrestore(&sp->lock, flags);
+}
+
+static void ar231x_halt(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+	int j;
+
+	tasklet_disable(&sp->rx_tasklet);
+
+	/* kill the MAC */
+	sp->eth_regs->mac_control &= ~(MAC_CONTROL_RE |	/* disable Receives */
+				       MAC_CONTROL_TE);	/* disable Transmits */
+	/* stop dma */
+	sp->dma_regs->control = 0;
+	sp->dma_regs->bus_mode = DMA_BUS_MODE_SWR;
+
+	/* place phy and MAC in reset */
+	sp->cfg->reset_set(sp->cfg->reset_mac);
+	sp->cfg->reset_set(sp->cfg->reset_phy);
+
+	/* free buffers on tx ring */
+	for (j = 0; j < AR2313_DESCR_ENTRIES; j++) {
+		struct sk_buff *skb;
+		ar231x_descr_t *txdesc;
+
+		txdesc = &sp->tx_ring[j];
+		txdesc->descr = 0;
+
+		skb = sp->tx_skb[j];
+		if (skb) {
+			dev_kfree_skb(skb);
+			sp->tx_skb[j] = NULL;
+		}
+	}
+}
+
+/**
+ * close should do nothing. Here's why. It's called when
+ * 'ifconfig bond0 down' is run. If it calls free_irq then
+ * the irq is gone forever ! When bond0 is made 'up' again,
+ * the ar231x_open () does not call request_irq (). Worse,
+ * the call to ar231x_halt() generates a WDOG reset due to
+ * the write to reset register and the box reboots.
+ * Commenting this out is good since it allows the
+ * system to resume when bond0 is made up again.
+ */
+static int ar231x_close(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+#if 0
+	/* Disable interrupts */
+	disable_irq(dev->irq);
+
+	/**
+	 * Without (or before) releasing irq and stopping hardware, this
+	 * is an absolute non-sense, by the way. It will be reset instantly
+	 * by the first irq.
+	 */
+	netif_stop_queue(dev);
+
+	/* stop the MAC and DMA engines */
+	ar231x_halt(dev);
+
+	/* release the interrupt */
+	free_irq(dev->irq, dev);
+
+#endif
+
+	phy_stop(sp->phy_dev);
+
+	return 0;
+}
+
+static int ar231x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+	ar231x_descr_t *td;
+	u32 idx;
+
+	idx = sp->tx_prd;
+	td = &sp->tx_ring[idx];
+
+	if (td->status & DMA_TX_OWN) {
+		/* free skbuf and lie to the caller that we sent it out */
+		dev->stats.tx_dropped++;
+		dev_kfree_skb(skb);
+
+		/* restart transmitter in case locked */
+		sp->dma_regs->xmt_poll = 0;
+		return 0;
+	}
+
+	/* Setup the transmit descriptor. */
+	td->devcs = ((skb->len << DMA_TX1_BSIZE_SHIFT) |
+				 (DMA_TX1_LS | DMA_TX1_IC | DMA_TX1_CHAINED));
+	td->addr = dma_map_single(&sp->pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
+	td->status = DMA_TX_OWN;
+
+	/* kick transmitter last */
+	sp->dma_regs->xmt_poll = 0;
+
+	sp->tx_skb[idx] = skb;
+	idx = DSC_NEXT(idx);
+	sp->tx_prd = idx;
+
+	return 0;
+}
+
+static int ar231x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return phy_mii_ioctl(sp->phy_dev, ifr, cmd);
+
+	default:
+		break;
+	}
+
+	return -EOPNOTSUPP;
+}
+
+static void ar231x_adjust_link(struct net_device *dev)
+{
+	struct ar231x_private *sp = netdev_priv(dev);
+	struct phy_device *phydev = sp->phy_dev;
+	u32 mc;
+
+	if (!phydev->link) {
+		if (sp->link) {
+			pr_info("%s: link down\n", dev->name);
+			sp->link = 0;
+		}
+		return;
+	}
+	sp->link = 1;
+
+	pr_info("%s: link up (%uMbps/%s duplex)\n", dev->name,
+		phydev->speed, phydev->duplex ? "full" : "half");
+
+	mc = sp->eth_regs->mac_control;
+	if (phydev->duplex)
+		mc = (mc | MAC_CONTROL_F) & ~MAC_CONTROL_DRO;
+	else
+		mc = (mc | MAC_CONTROL_DRO) & ~MAC_CONTROL_F;
+	sp->eth_regs->mac_control = mc;
+	sp->duplex = phydev->duplex;
+}
+
+#define MII_ADDR(phy, reg) \
+	((reg << MII_ADDR_REG_SHIFT) | (phy << MII_ADDR_PHY_SHIFT))
+
+static int
+ar231x_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+	struct net_device *const dev = bus->priv;
+	struct ar231x_private *sp = netdev_priv(dev);
+	volatile MII *ethernet = sp->phy_regs;
+
+	ethernet->mii_addr = MII_ADDR(phy_addr, regnum);
+	while (ethernet->mii_addr & MII_ADDR_BUSY)
+		;
+	return ethernet->mii_data >> MII_DATA_SHIFT;
+}
+
+static int
+ar231x_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
+{
+	struct net_device *const dev = bus->priv;
+	struct ar231x_private *sp = netdev_priv(dev);
+	volatile MII *ethernet = sp->phy_regs;
+
+	while (ethernet->mii_addr & MII_ADDR_BUSY)
+		;
+	ethernet->mii_data = value << MII_DATA_SHIFT;
+	ethernet->mii_addr = MII_ADDR(phy_addr, regnum) | MII_ADDR_WRITE;
+
+	return 0;
+}
+
+static int ar231x_mdiobus_reset(struct mii_bus *bus)
+{
+	struct net_device *const dev = bus->priv;
+
+	ar231x_reset_reg(dev);
+
+	return 0;
+}
+
+static int ar231x_mdiobus_probe(struct net_device *dev)
+{
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+	struct ar231x_private *const sp = netdev_priv(dev);
+	struct phy_device *phydev = NULL;
+
+	/* find the first (lowest address) PHY on the current MAC's MII bus */
+	phydev = phy_find_first(sp->mii_bus);
+	if (!phydev) {
+		printk(KERN_ERR "ar231x: %s: no PHY found\n", dev->name);
+		return -1;
+	}
+
+	/* now we are supposed to have a proper phydev, to attach to... */
+	BUG_ON(phydev->attached_dev);
+
+	phydev = phy_connect(dev, phydev_name(phydev), &ar231x_adjust_link,
+			     PHY_INTERFACE_MODE_MII);
+	if (IS_ERR(phydev)) {
+		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+		return PTR_ERR(phydev);
+	}
+
+	/* mask with MAC supported features */
+	linkmode_set_bit_array(phy_10_100_features_array,
+			       ARRAY_SIZE(phy_10_100_features_array),
+			       mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
+
+	linkmode_and(phydev->supported, phydev->supported, mask);
+	linkmode_copy(phydev->advertising, phydev->supported);
+
+	sp->phy_dev = phydev;
+
+	printk(KERN_INFO "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+	       dev->name, phydev->drv->name, phydev_name(phydev));
+
+	return 0;
+}
+
--- /dev/null
+++ b/drivers/net/ethernet/atheros/ar231x/ar231x.h
@@ -0,0 +1,282 @@
+/*
+ * ar231x.h: Linux driver for the Atheros AR231x Ethernet device.
+ *
+ * Copyright (C) 2004 by Sameer Dekate <sdekate@arubanetworks.com>
+ * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2006-2009 Felix Fietkau <nbd@nbd.name>
+ *
+ * Thanks to Atheros for providing hardware and documentation
+ * enabling me to write this driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _AR2313_H_
+#define _AR2313_H_
+
+#include <linux/interrupt.h>
+#include <generated/autoconf.h>
+#include <linux/bitops.h>
+#include <ath25_platform.h>
+
+/* probe link timer - 5 secs */
+#define LINK_TIMER    (5*HZ)
+
+#define IS_DMA_TX_INT(X)   (((X) & (DMA_STATUS_TI)) != 0)
+#define IS_DMA_RX_INT(X)   (((X) & (DMA_STATUS_RI)) != 0)
+#define IS_DRIVER_OWNED(X) (((X) & (DMA_TX_OWN))    == 0)
+
+#define AR2313_TX_TIMEOUT (HZ/4)
+
+/* Rings */
+#define DSC_RING_ENTRIES_SIZE	(AR2313_DESCR_ENTRIES * sizeof(struct desc))
+#define DSC_NEXT(idx)	        ((idx + 1) & (AR2313_DESCR_ENTRIES - 1))
+
+#define AR2313_MBGET		2
+#define AR2313_MBSET		3
+#define AR2313_PCI_RECONFIG	4
+#define AR2313_PCI_DUMP		5
+#define AR2313_TEST_PANIC	6
+#define AR2313_TEST_NULLPTR	7
+#define AR2313_READ_DATA	8
+#define AR2313_WRITE_DATA	9
+#define AR2313_GET_VERSION	10
+#define AR2313_TEST_HANG	11
+#define AR2313_SYNC		12
+
+#define DMA_RX_ERR_CRC		BIT(1)
+#define DMA_RX_ERR_DRIB		BIT(2)
+#define DMA_RX_ERR_MII		BIT(3)
+#define DMA_RX_EV2		BIT(5)
+#define DMA_RX_ERR_COL		BIT(6)
+#define DMA_RX_LONG		BIT(7)
+#define DMA_RX_LS		BIT(8)	/* last descriptor */
+#define DMA_RX_FS		BIT(9)	/* first descriptor */
+#define DMA_RX_MF		BIT(10)	/* multicast frame */
+#define DMA_RX_ERR_RUNT		BIT(11)	/* runt frame */
+#define DMA_RX_ERR_LENGTH	BIT(12)	/* length error */
+#define DMA_RX_ERR_DESC		BIT(14)	/* descriptor error */
+#define DMA_RX_ERROR		BIT(15)	/* error summary */
+#define DMA_RX_LEN_MASK		0x3fff0000
+#define DMA_RX_LEN_SHIFT	16
+#define DMA_RX_FILT		BIT(30)
+#define DMA_RX_OWN		BIT(31)	/* desc owned by DMA controller */
+
+#define DMA_RX1_BSIZE_MASK	0x000007ff
+#define DMA_RX1_BSIZE_SHIFT	0
+#define DMA_RX1_CHAINED		BIT(24)
+#define DMA_RX1_RER		BIT(25)
+
+#define DMA_TX_ERR_UNDER	BIT(1)	/* underflow error */
+#define DMA_TX_ERR_DEFER	BIT(2)	/* excessive deferral */
+#define DMA_TX_COL_MASK		0x78
+#define DMA_TX_COL_SHIFT	3
+#define DMA_TX_ERR_HB		BIT(7)	/* hearbeat failure */
+#define DMA_TX_ERR_COL		BIT(8)	/* excessive collisions */
+#define DMA_TX_ERR_LATE		BIT(9)	/* late collision */
+#define DMA_TX_ERR_LINK		BIT(10)	/* no carrier */
+#define DMA_TX_ERR_LOSS		BIT(11)	/* loss of carrier */
+#define DMA_TX_ERR_JABBER	BIT(14)	/* transmit jabber timeout */
+#define DMA_TX_ERROR		BIT(15)	/* frame aborted */
+#define DMA_TX_OWN		BIT(31)	/* descr owned by DMA controller */
+
+#define DMA_TX1_BSIZE_MASK	0x000007ff
+#define DMA_TX1_BSIZE_SHIFT	0
+#define DMA_TX1_CHAINED		BIT(24)	/* chained descriptors */
+#define DMA_TX1_TER		BIT(25)	/* transmit end of ring */
+#define DMA_TX1_FS		BIT(29)	/* first segment */
+#define DMA_TX1_LS		BIT(30)	/* last segment */
+#define DMA_TX1_IC		BIT(31)	/* interrupt on completion */
+
+#define RCVPKT_LENGTH(X)	(X  >> 16)	/* Received pkt Length */
+
+#define MAC_CONTROL_RE		BIT(2)	/* receive enable */
+#define MAC_CONTROL_TE		BIT(3)	/* transmit enable */
+#define MAC_CONTROL_DC		BIT(5)	/* Deferral check */
+#define MAC_CONTROL_ASTP	BIT(8)	/* Auto pad strip */
+#define MAC_CONTROL_DRTY	BIT(10)	/* Disable retry */
+#define MAC_CONTROL_DBF		BIT(11)	/* Disable bcast frames */
+#define MAC_CONTROL_LCC		BIT(12)	/* late collision ctrl */
+#define MAC_CONTROL_HP		BIT(13)	/* Hash Perfect filtering */
+#define MAC_CONTROL_HASH	BIT(14)	/* Unicast hash filtering */
+#define MAC_CONTROL_HO		BIT(15)	/* Hash only filtering */
+#define MAC_CONTROL_PB		BIT(16)	/* Pass Bad frames */
+#define MAC_CONTROL_IF		BIT(17)	/* Inverse filtering */
+#define MAC_CONTROL_PR		BIT(18)	/* promis mode (valid frames only) */
+#define MAC_CONTROL_PM		BIT(19)	/* pass multicast */
+#define MAC_CONTROL_F		BIT(20)	/* full-duplex */
+#define MAC_CONTROL_DRO		BIT(23)	/* Disable Receive Own */
+#define MAC_CONTROL_HBD		BIT(28)	/* heart-beat disabled (MUST BE SET) */
+#define MAC_CONTROL_BLE		BIT(30)	/* big endian mode */
+#define MAC_CONTROL_RA		BIT(31)	/* rcv all (valid and invalid frames) */
+
+#define MII_ADDR_BUSY		BIT(0)
+#define MII_ADDR_WRITE		BIT(1)
+#define MII_ADDR_REG_SHIFT	6
+#define MII_ADDR_PHY_SHIFT	11
+#define MII_DATA_SHIFT		0
+
+#define FLOW_CONTROL_FCE	BIT(1)
+
+#define DMA_BUS_MODE_SWR	BIT(0)	/* software reset */
+#define DMA_BUS_MODE_BLE	BIT(7)	/* big endian mode */
+#define DMA_BUS_MODE_PBL_SHIFT	8	/* programmable burst length 32 */
+#define DMA_BUS_MODE_DBO	BIT(20)	/* big-endian descriptors */
+
+#define DMA_STATUS_TI		BIT(0)	/* transmit interrupt */
+#define DMA_STATUS_TPS		BIT(1)	/* transmit process stopped */
+#define DMA_STATUS_TU		BIT(2)	/* transmit buffer unavailable */
+#define DMA_STATUS_TJT		BIT(3)	/* transmit buffer timeout */
+#define DMA_STATUS_UNF		BIT(5)	/* transmit underflow */
+#define DMA_STATUS_RI		BIT(6)	/* receive interrupt */
+#define DMA_STATUS_RU		BIT(7)	/* receive buffer unavailable */
+#define DMA_STATUS_RPS		BIT(8)	/* receive process stopped */
+#define DMA_STATUS_ETI		BIT(10)	/* early transmit interrupt */
+#define DMA_STATUS_FBE		BIT(13)	/* fatal bus interrupt */
+#define DMA_STATUS_ERI		BIT(14)	/* early receive interrupt */
+#define DMA_STATUS_AIS		BIT(15)	/* abnormal interrupt summary */
+#define DMA_STATUS_NIS		BIT(16)	/* normal interrupt summary */
+#define DMA_STATUS_RS_SHIFT	17	/* receive process state */
+#define DMA_STATUS_TS_SHIFT	20	/* transmit process state */
+#define DMA_STATUS_EB_SHIFT	23	/* error bits */
+
+#define DMA_CONTROL_SR		BIT(1)	/* start receive */
+#define DMA_CONTROL_ST		BIT(13)	/* start transmit */
+#define DMA_CONTROL_SF		BIT(21)	/* store and forward */
+
+typedef struct {
+	volatile unsigned int status;	/* OWN, Device control and status. */
+	volatile unsigned int devcs;	/* pkt Control bits + Length */
+	volatile unsigned int addr;	/* Current Address. */
+	volatile unsigned int descr;	/* Next descriptor in chain. */
+} ar231x_descr_t;
+
+/**
+ * New Combo structure for Both Eth0 AND eth1
+ *
+ * Don't directly access MII related regs since phy chip could be actually
+ * connected to another ethernet block.
+ */
+typedef struct {
+	volatile unsigned int mac_control;	/* 0x00 */
+	volatile unsigned int mac_addr[2];	/* 0x04 - 0x08 */
+	volatile unsigned int mcast_table[2];	/* 0x0c - 0x10 */
+	volatile unsigned int __mii_addr;	/* 0x14 */
+	volatile unsigned int __mii_data;	/* 0x18 */
+	volatile unsigned int flow_control;	/* 0x1c */
+	volatile unsigned int vlan_tag;	/* 0x20 */
+	volatile unsigned int pad[7];	/* 0x24 - 0x3c */
+	volatile unsigned int ucast_table[8];	/* 0x40-0x5c */
+} ETHERNET_STRUCT;
+
+typedef struct {
+	volatile unsigned int mii_addr;
+	volatile unsigned int mii_data;
+} MII;
+
+/********************************************************************
+ * Interrupt controller
+ ********************************************************************/
+
+typedef struct {
+	volatile unsigned int wdog_control;	/* 0x08 */
+	volatile unsigned int wdog_timer;	/* 0x0c */
+	volatile unsigned int misc_status;	/* 0x10 */
+	volatile unsigned int misc_mask;	/* 0x14 */
+	volatile unsigned int global_status;	/* 0x18 */
+	volatile unsigned int reserved;	/* 0x1c */
+	volatile unsigned int reset_control;	/* 0x20 */
+} INTERRUPT;
+
+/********************************************************************
+ * DMA controller
+ ********************************************************************/
+typedef struct {
+	volatile unsigned int bus_mode;	/* 0x00 (CSR0) */
+	volatile unsigned int xmt_poll;	/* 0x04 (CSR1) */
+	volatile unsigned int rcv_poll;	/* 0x08 (CSR2) */
+	volatile unsigned int rcv_base;	/* 0x0c (CSR3) */
+	volatile unsigned int xmt_base;	/* 0x10 (CSR4) */
+	volatile unsigned int status;	/* 0x14 (CSR5) */
+	volatile unsigned int control;	/* 0x18 (CSR6) */
+	volatile unsigned int intr_ena;	/* 0x1c (CSR7) */
+	volatile unsigned int rcv_missed;	/* 0x20 (CSR8) */
+	volatile unsigned int reserved[11];	/* 0x24-0x4c (CSR9-19) */
+	volatile unsigned int cur_tx_buf_addr;	/* 0x50 (CSR20) */
+	volatile unsigned int cur_rx_buf_addr;	/* 0x50 (CSR21) */
+} DMA;
+
+/**
+ * Struct private for the Sibyte.
+ *
+ * Elements are grouped so variables used by the tx handling goes
+ * together, and will go into the same cache lines etc. in order to
+ * avoid cache line contention between the rx and tx handling on SMP.
+ *
+ * Frequently accessed variables are put at the beginning of the
+ * struct to help the compiler generate better/shorter code.
+ */
+struct ar231x_private {
+	struct net_device *dev;
+	struct platform_device *pdev;
+	int version;
+	u32 mb[2];
+
+	volatile MII *phy_regs;
+	volatile ETHERNET_STRUCT *eth_regs;
+	volatile DMA *dma_regs;
+	struct ar231x_eth *cfg;
+
+	spinlock_t lock;			/* Serialise access to device */
+
+	/* RX and TX descriptors, must be adjacent */
+	ar231x_descr_t *rx_ring;
+	ar231x_descr_t *tx_ring;
+
+	struct sk_buff **rx_skb;
+	struct sk_buff **tx_skb;
+
+	/* RX elements */
+	u32 rx_skbprd;
+	u32 cur_rx;
+
+	/* TX elements */
+	u32 tx_prd;
+	u32 tx_csm;
+
+	/* Misc elements */
+	char name[48];
+	struct {
+		u32 address;
+		u32 length;
+		char *mapping;
+	} desc;
+
+	unsigned short link;		/* 0 - link down, 1 - link up */
+	unsigned short duplex;		/* 0 - half, 1 - full */
+
+	struct tasklet_struct rx_tasklet;
+	int unloading;
+
+	struct phy_device *phy_dev;
+	struct mii_bus *mii_bus;
+};
+
+/* Prototypes */
+static int ar231x_init(struct net_device *dev);
+#ifdef TX_TIMEOUT
+static void ar231x_tx_timeout(struct net_device *dev);
+#endif
+static int ar231x_restart(struct net_device *dev);
+static void ar231x_load_rx_ring(struct net_device *dev, int bufs);
+static irqreturn_t ar231x_interrupt(int irq, void *dev_id);
+static int ar231x_open(struct net_device *dev);
+static int ar231x_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int ar231x_close(struct net_device *dev);
+static int ar231x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+static void ar231x_init_cleanup(struct net_device *dev);
+
+#endif	/* _AR2313_H_ */
--- a/arch/mips/ath25/ar2315_regs.h
+++ b/arch/mips/ath25/ar2315_regs.h
@@ -57,6 +57,9 @@
 #define AR2315_PCI_EXT_BASE	0x80000000	/* PCI external */
 #define AR2315_PCI_EXT_SIZE	0x40000000
 
+/* MII registers offset inside Ethernet MMR region */
+#define AR2315_ENET0_MII_BASE	(AR2315_ENET0_BASE + 0x14)
+
 /*
  * Configuration registers
  */
--- a/arch/mips/ath25/ar5312_regs.h
+++ b/arch/mips/ath25/ar5312_regs.h
@@ -64,6 +64,10 @@
 #define AR5312_AR5312_REV7	0x0057		/* AR5312 WMAC (AP30-040) */
 #define AR5312_AR2313_REV8	0x0058		/* AR2313 WMAC (AP43-030) */
 
+/* MII registers offset inside Ethernet MMR region */
+#define AR5312_ENET0_MII_BASE	(AR5312_ENET0_BASE + 0x14)
+#define AR5312_ENET1_MII_BASE	(AR5312_ENET1_BASE + 0x14)
+
 /* Reset/Timer Block Address Map */
 #define AR5312_TIMER		0x0000 /* countdown timer */
 #define AR5312_RELOAD		0x0004 /* timer reload value */
--- a/arch/mips/ath25/ar2315.c
+++ b/arch/mips/ath25/ar2315.c
@@ -136,6 +136,8 @@ static void ar2315_irq_dispatch(void)
 
 	if (pending & CAUSEF_IP3)
 		do_IRQ(AR2315_IRQ_WLAN0);
+	else if (pending & CAUSEF_IP4)
+		do_IRQ(AR2315_IRQ_ENET0);
 #ifdef CONFIG_PCI_AR2315
 	else if (pending & CAUSEF_IP5)
 		do_IRQ(AR2315_IRQ_LCBUS_PCI);
@@ -169,6 +171,29 @@ void __init ar2315_arch_init_irq(void)
 	ar2315_misc_irq_domain = domain;
 }
 
+static void ar2315_device_reset_set(u32 mask)
+{
+	u32 val;
+
+	val = ar2315_rst_reg_read(AR2315_RESET);
+	ar2315_rst_reg_write(AR2315_RESET, val | mask);
+}
+
+static void ar2315_device_reset_clear(u32 mask)
+{
+	u32 val;
+
+	val = ar2315_rst_reg_read(AR2315_RESET);
+	ar2315_rst_reg_write(AR2315_RESET, val & ~mask);
+}
+
+static struct ar231x_eth ar2315_eth_data = {
+	.reset_set = ar2315_device_reset_set,
+	.reset_clear = ar2315_device_reset_clear,
+	.reset_mac = AR2315_RESET_ENET0,
+	.reset_phy = AR2315_RESET_EPHY0,
+};
+
 static struct resource ar2315_gpio_res[] = {
 	{
 		.name = "ar2315-gpio",
@@ -205,6 +230,11 @@ void __init ar2315_init_devices(void)
 	ar2315_gpio_res[1].end = ar2315_gpio_res[1].start;
 	platform_device_register(&ar2315_gpio);
 
+	ar2315_eth_data.macaddr = ath25_board.config->enet0_mac;
+	ath25_add_ethernet(0, AR2315_ENET0_BASE, "eth0_mii",
+			   AR2315_ENET0_MII_BASE, AR2315_IRQ_ENET0,
+			   &ar2315_eth_data);
+
 	ath25_add_wmac(0, AR2315_WLAN0_BASE, AR2315_IRQ_WLAN0);
 }
 
--- a/arch/mips/ath25/ar5312.c
+++ b/arch/mips/ath25/ar5312.c
@@ -132,6 +132,10 @@ static void ar5312_irq_dispatch(void)
 
 	if (pending & CAUSEF_IP2)
 		do_IRQ(AR5312_IRQ_WLAN0);
+	else if (pending & CAUSEF_IP3)
+		do_IRQ(AR5312_IRQ_ENET0);
+	else if (pending & CAUSEF_IP4)
+		do_IRQ(AR5312_IRQ_ENET1);
 	else if (pending & CAUSEF_IP5)
 		do_IRQ(AR5312_IRQ_WLAN1);
 	else if (pending & CAUSEF_IP6)
@@ -163,6 +167,36 @@ void __init ar5312_arch_init_irq(void)
 	ar5312_misc_irq_domain = domain;
 }
 
+static void ar5312_device_reset_set(u32 mask)
+{
+	u32 val;
+
+	val = ar5312_rst_reg_read(AR5312_RESET);
+	ar5312_rst_reg_write(AR5312_RESET, val | mask);
+}
+
+static void ar5312_device_reset_clear(u32 mask)
+{
+	u32 val;
+
+	val = ar5312_rst_reg_read(AR5312_RESET);
+	ar5312_rst_reg_write(AR5312_RESET, val & ~mask);
+}
+
+static struct ar231x_eth ar5312_eth0_data = {
+	.reset_set = ar5312_device_reset_set,
+	.reset_clear = ar5312_device_reset_clear,
+	.reset_mac = AR5312_RESET_ENET0,
+	.reset_phy = AR5312_RESET_EPHY0,
+};
+
+static struct ar231x_eth ar5312_eth1_data = {
+	.reset_set = ar5312_device_reset_set,
+	.reset_clear = ar5312_device_reset_clear,
+	.reset_mac = AR5312_RESET_ENET1,
+	.reset_phy = AR5312_RESET_EPHY1,
+};
+
 static struct physmap_flash_data ar5312_flash_data = {
 	.width = 2,
 };
@@ -243,6 +277,7 @@ static void __init ar5312_flash_init(voi
 void __init ar5312_init_devices(void)
 {
 	struct ath25_boarddata *config;
+	u8 *c;
 
 	ar5312_flash_init();
 
@@ -266,8 +301,30 @@ void __init ar5312_init_devices(void)
 
 	platform_device_register(&ar5312_gpio);
 
+	/* Fix up MAC addresses if necessary */
+	if (is_broadcast_ether_addr(config->enet0_mac))
+		ether_addr_copy(config->enet0_mac, config->enet1_mac);
+
+	/* If ENET0 and ENET1 have the same mac address,
+	 * increment the one from ENET1 */
+	if (ether_addr_equal(config->enet0_mac, config->enet1_mac)) {
+		c = config->enet1_mac + 5;
+		while ((c >= config->enet1_mac) && !(++(*c)))
+			c--;
+	}
+
 	switch (ath25_soc) {
 	case ATH25_SOC_AR5312:
+		ar5312_eth0_data.macaddr = config->enet0_mac;
+		ath25_add_ethernet(0, AR5312_ENET0_BASE, "eth0_mii",
+				   AR5312_ENET0_MII_BASE, AR5312_IRQ_ENET0,
+				   &ar5312_eth0_data);
+
+		ar5312_eth1_data.macaddr = config->enet1_mac;
+		ath25_add_ethernet(1, AR5312_ENET1_BASE, "eth1_mii",
+				   AR5312_ENET1_MII_BASE, AR5312_IRQ_ENET1,
+				   &ar5312_eth1_data);
+
 		if (!ath25_board.radio)
 			return;
 
@@ -276,8 +333,18 @@ void __init ar5312_init_devices(void)
 
 		ath25_add_wmac(0, AR5312_WLAN0_BASE, AR5312_IRQ_WLAN0);
 		break;
+	/*
+	 * AR2312/3 ethernet uses the PHY of ENET0, but the MAC
+	 * of ENET1. Atheros calls it 'twisted' for a reason :)
+	 */
 	case ATH25_SOC_AR2312:
 	case ATH25_SOC_AR2313:
+		ar5312_eth1_data.reset_phy = ar5312_eth0_data.reset_phy;
+		ar5312_eth1_data.macaddr = config->enet0_mac;
+		ath25_add_ethernet(1, AR5312_ENET1_BASE, "eth0_mii",
+				   AR5312_ENET0_MII_BASE, AR5312_IRQ_ENET1,
+				   &ar5312_eth1_data);
+
 		if (!ath25_board.radio)
 			return;
 		break;
--- a/arch/mips/ath25/devices.h
+++ b/arch/mips/ath25/devices.h
@@ -33,6 +33,8 @@ extern struct ar231x_board_config ath25_
 extern void (*ath25_irq_dispatch)(void);
 
 int ath25_find_config(phys_addr_t offset, unsigned long size);
+int ath25_add_ethernet(int nr, u32 base, const char *mii_name, u32 mii_base,
+		       int irq, void *pdata);
 void ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk);
 int ath25_add_wmac(int nr, u32 base, int irq);
 
--- a/arch/mips/ath25/devices.c
+++ b/arch/mips/ath25/devices.c
@@ -13,6 +13,51 @@
 struct ar231x_board_config ath25_board;
 enum ath25_soc_type ath25_soc = ATH25_SOC_UNKNOWN;
 
+static struct resource ath25_eth0_res[] = {
+	{
+		.name = "eth0_membase",
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.name = "eth0_mii",
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.name = "eth0_irq",
+		.flags = IORESOURCE_IRQ,
+	}
+};
+
+static struct resource ath25_eth1_res[] = {
+	{
+		.name = "eth1_membase",
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.name = "eth1_mii",
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.name = "eth1_irq",
+		.flags = IORESOURCE_IRQ,
+	}
+};
+
+static struct platform_device ath25_eth[] = {
+	{
+		.id = 0,
+		.name = "ar231x-eth",
+		.resource = ath25_eth0_res,
+		.num_resources = ARRAY_SIZE(ath25_eth0_res)
+	},
+	{
+		.id = 1,
+		.name = "ar231x-eth",
+		.resource = ath25_eth1_res,
+		.num_resources = ARRAY_SIZE(ath25_eth1_res)
+	}
+};
+
 static struct resource ath25_wmac0_res[] = {
 	{
 		.name = "wmac0_membase",
@@ -71,6 +116,25 @@ const char *get_system_type(void)
 	return soc_type_strings[ath25_soc];
 }
 
+int __init ath25_add_ethernet(int nr, u32 base, const char *mii_name,
+			      u32 mii_base, int irq, void *pdata)
+{
+	struct resource *res;
+
+	ath25_eth[nr].dev.platform_data = pdata;
+	res = &ath25_eth[nr].resource[0];
+	res->start = base;
+	res->end = base + 0x2000 - 1;
+	res++;
+	res->name = mii_name;
+	res->start = mii_base;
+	res->end = mii_base + 8 - 1;
+	res++;
+	res->start = irq;
+	res->end = irq;
+	return platform_device_register(&ath25_eth[nr]);
+}
+
 void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
 {
 #ifdef CONFIG_SERIAL_8250_CONSOLE
--- a/arch/mips/include/asm/mach-ath25/ath25_platform.h
+++ b/arch/mips/include/asm/mach-ath25/ath25_platform.h
@@ -71,4 +71,15 @@ struct ar231x_board_config {
 	const char *radio;
 };
 
+/*
+ * Platform device information for the Ethernet MAC
+ */
+struct ar231x_eth {
+	void (*reset_set)(u32);
+	void (*reset_clear)(u32);
+	u32 reset_mac;
+	u32 reset_phy;
+	char *macaddr;
+};
+
 #endif /* __ASM_MACH_ATH25_PLATFORM_H */