From cf93418a4bd5e69f069a65da92537bd4d6191223 Mon Sep 17 00:00:00 2001
From: John Crispin <blogic@openwrt.org>
Date: Sun, 27 Jul 2014 09:29:51 +0100
Subject: [PATCH 54/57] DMA: ralink: add rt2880 dma engine

Signed-off-by: John Crispin <blogic@openwrt.org>
---
 drivers/dma/Kconfig       |    6 +
 drivers/dma/Makefile      |    1 +
 drivers/dma/dmaengine.c   |   26 ++
 drivers/dma/ralink-gdma.c |  577 +++++++++++++++++++++++++++++++++++++++++++++
 include/linux/dmaengine.h |    1 +
 5 files changed, 611 insertions(+)
 create mode 100644 drivers/dma/ralink-gdma.c

--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -351,6 +351,12 @@ config MOXART_DMA
 	help
 	  Enable support for the MOXA ART SoC DMA controller.
 
+config DMA_RALINK
+	tristate "RALINK DMA support"
+	depends on RALINK && SOC_MT7620
+	select DMA_ENGINE
+	select DMA_VIRTUAL_CHANNELS
+
 config DMA_ENGINE
 	bool
 
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
 obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
+obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -564,6 +564,32 @@ struct dma_chan *dma_get_any_slave_chann
 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
 
 /**
+ * dma_request_slave_channel - try to get specific channel exclusively
+ * @chan: target channel
+ */
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
+{
+	int err = -EBUSY;
+
+	/* lock against __dma_request_channel */
+	mutex_lock(&dma_list_mutex);
+
+	if (chan->client_count == 0) {
+		err = dma_chan_get(chan);
+		if (err)
+			pr_debug("%s: failed to get %s: (%d)\n",
+				__func__, dma_chan_name(chan), err);
+	} else
+		chan = NULL;
+
+	mutex_unlock(&dma_list_mutex);
+
+	return chan;
+}
+EXPORT_SYMBOL_GPL(dma_get_slave_channel);
+
+
+/**
  * __dma_request_channel - try to allocate an exclusive channel
  * @mask: capabilities that the channel must satisfy
  * @fn: optional callback to disposition available channels
--- /dev/null
+++ b/drivers/dma/ralink-gdma.c
@@ -0,0 +1,577 @@
+/*
+ *  Copyright (C) 2013, Lars-Peter Clausen <lars@metafoo.de>
+ *  GDMA4740 DMAC support
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General	 Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define GDMA_NR_CHANS			16
+
+#define GDMA_REG_SRC_ADDR(x)		(0x00 + (x) * 0x10)
+#define GDMA_REG_DST_ADDR(x)		(0x04 + (x) * 0x10)
+
+#define GDMA_REG_CTRL0(x)		(0x08 + (x) * 0x10)
+#define GDMA_REG_CTRL0_TX_MASK		0xffff
+#define GDMA_REG_CTRL0_TX_SHIFT		16
+#define GDMA_REG_CTRL0_CURR_MASK	0xff
+#define GDMA_REG_CTRL0_CURR_SHIFT	8
+#define	GDMA_REG_CTRL0_SRC_ADDR_FIXED	BIT(7)
+#define GDMA_REG_CTRL0_DST_ADDR_FIXED	BIT(6)
+#define GDMA_REG_CTRL0_BURST_MASK	0x7
+#define GDMA_REG_CTRL0_BURST_SHIFT	3
+#define	GDMA_REG_CTRL0_DONE_INT		BIT(2)
+#define	GDMA_REG_CTRL0_ENABLE		BIT(1)
+#define	GDMA_REG_CTRL0_HW_MODE		0
+
+#define GDMA_REG_CTRL1(x)		(0x0c + (x) * 0x10)
+#define GDMA_REG_CTRL1_SEG_MASK		0xf
+#define GDMA_REG_CTRL1_SEG_SHIFT	22
+#define GDMA_REG_CTRL1_REQ_MASK		0x3f
+#define GDMA_REG_CTRL1_SRC_REQ_SHIFT	16
+#define GDMA_REG_CTRL1_DST_REQ_SHIFT	8
+#define GDMA_REG_CTRL1_CONTINOUS	BIT(14)
+#define GDMA_REG_CTRL1_NEXT_MASK	0x1f
+#define GDMA_REG_CTRL1_NEXT_SHIFT	3
+#define GDMA_REG_CTRL1_COHERENT		BIT(2)
+#define GDMA_REG_CTRL1_FAIL		BIT(1)
+#define GDMA_REG_CTRL1_MASK		BIT(0)
+
+#define GDMA_REG_UNMASK_INT		0x200
+#define GDMA_REG_DONE_INT		0x204
+
+#define GDMA_REG_GCT			0x220
+#define GDMA_REG_GCT_CHAN_MASK		0x3
+#define GDMA_REG_GCT_CHAN_SHIFT		3
+#define GDMA_REG_GCT_VER_MASK		0x3
+#define GDMA_REG_GCT_VER_SHIFT		1
+#define GDMA_REG_GCT_ARBIT_RR		BIT(0)
+
+enum gdma_dma_transfer_size {
+	GDMA_TRANSFER_SIZE_4BYTE	= 0,
+	GDMA_TRANSFER_SIZE_8BYTE	= 1,
+	GDMA_TRANSFER_SIZE_16BYTE	= 2,
+	GDMA_TRANSFER_SIZE_32BYTE	= 3,
+};
+
+struct gdma_dma_sg {
+	dma_addr_t addr;
+	unsigned int len;
+};
+
+struct gdma_dma_desc {
+	struct virt_dma_desc vdesc;
+
+	enum dma_transfer_direction direction;
+	bool cyclic;
+
+	unsigned int num_sgs;
+	struct gdma_dma_sg sg[];
+};
+
+struct gdma_dmaengine_chan {
+	struct virt_dma_chan vchan;
+	unsigned int id;
+
+	dma_addr_t fifo_addr;
+	unsigned int transfer_shift;
+
+	struct gdma_dma_desc *desc;
+	unsigned int next_sg;
+};
+
+struct gdma_dma_dev {
+	struct dma_device ddev;
+	void __iomem *base;
+	struct clk *clk;
+
+	struct gdma_dmaengine_chan chan[GDMA_NR_CHANS];
+};
+
+static struct gdma_dma_dev *gdma_dma_chan_get_dev(
+	struct gdma_dmaengine_chan *chan)
+{
+	return container_of(chan->vchan.chan.device, struct gdma_dma_dev,
+		ddev);
+}
+
+static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c)
+{
+	return container_of(c, struct gdma_dmaengine_chan, vchan.chan);
+}
+
+static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc)
+{
+	return container_of(vdesc, struct gdma_dma_desc, vdesc);
+}
+
+static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
+	unsigned int reg)
+{
+	return readl(dma_dev->base + reg);
+}
+
+static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
+	unsigned reg, uint32_t val)
+{
+	//printk("gdma --> %p = 0x%08X\n", dma_dev->base + reg, val);
+	writel(val, dma_dev->base + reg);
+}
+
+static inline void gdma_dma_write_mask(struct gdma_dma_dev *dma_dev,
+	unsigned int reg, uint32_t val, uint32_t mask)
+{
+	uint32_t tmp;
+
+	tmp = gdma_dma_read(dma_dev, reg);
+	tmp &= ~mask;
+	tmp |= val;
+	gdma_dma_write(dma_dev, reg, tmp);
+}
+
+static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs)
+{
+	return kzalloc(sizeof(struct gdma_dma_desc) +
+		sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC);
+}
+
+static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
+{
+	if (maxburst <= 7)
+		return GDMA_TRANSFER_SIZE_4BYTE;
+	else if (maxburst <= 15)
+		return GDMA_TRANSFER_SIZE_8BYTE;
+	else if (maxburst <= 31)
+		return GDMA_TRANSFER_SIZE_16BYTE;
+
+	return GDMA_TRANSFER_SIZE_32BYTE;
+}
+
+static int gdma_dma_slave_config(struct dma_chan *c,
+	const struct dma_slave_config *config)
+{
+	struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
+	struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
+	enum gdma_dma_transfer_size transfer_size;
+	uint32_t flags;
+	uint32_t ctrl0, ctrl1;
+
+	switch (config->direction) {
+	case DMA_MEM_TO_DEV:
+		ctrl1 = 32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT;
+		ctrl1 |= config->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT;
+		flags = GDMA_REG_CTRL0_DST_ADDR_FIXED;
+		transfer_size = gdma_dma_maxburst(config->dst_maxburst);
+		chan->fifo_addr = config->dst_addr;
+		break;
+
+	case DMA_DEV_TO_MEM:
+		ctrl1 = config->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT;
+		ctrl1 |= 32 << GDMA_REG_CTRL1_DST_REQ_SHIFT;
+		flags = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
+		transfer_size = gdma_dma_maxburst(config->src_maxburst);
+		chan->fifo_addr = config->src_addr;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	chan->transfer_shift = 1 + transfer_size;
+
+	ctrl0 = flags | GDMA_REG_CTRL0_HW_MODE;
+	ctrl0 |= GDMA_REG_CTRL0_DONE_INT;
+
+	ctrl1 &= ~(GDMA_REG_CTRL1_NEXT_MASK << GDMA_REG_CTRL1_NEXT_SHIFT);
+	ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
+	ctrl1 |= GDMA_REG_CTRL1_FAIL;
+	ctrl1 &= ~GDMA_REG_CTRL1_CONTINOUS;
+	gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
+	gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
+
+	return 0;
+}
+
+static int gdma_dma_terminate_all(struct dma_chan *c)
+{
+	struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
+	struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
+	unsigned long flags;
+	LIST_HEAD(head);
+
+	spin_lock_irqsave(&chan->vchan.lock, flags);
+	gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0,
+			GDMA_REG_CTRL0_ENABLE);
+	chan->desc = NULL;
+	vchan_get_all_descriptors(&chan->vchan, &head);
+	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+	vchan_dma_desc_free_list(&chan->vchan, &head);
+
+	return 0;
+}
+
+static int gdma_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+	unsigned long arg)
+{
+	struct dma_slave_config *config = (struct dma_slave_config *)arg;
+
+	switch (cmd) {
+	case DMA_SLAVE_CONFIG:
+		return gdma_dma_slave_config(chan, config);
+	case DMA_TERMINATE_ALL:
+		return gdma_dma_terminate_all(chan);
+	default:
+		return -ENOSYS;
+	}
+}
+
+static int gdma_dma_start_transfer(struct gdma_dmaengine_chan *chan)
+{
+	struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
+	dma_addr_t src_addr, dst_addr;
+	struct virt_dma_desc *vdesc;
+	struct gdma_dma_sg *sg;
+
+	gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0,
+			GDMA_REG_CTRL0_ENABLE);
+
+	if (!chan->desc) {
+		vdesc = vchan_next_desc(&chan->vchan);
+		if (!vdesc)
+			return 0;
+		chan->desc = to_gdma_dma_desc(vdesc);
+		chan->next_sg = 0;
+	}
+
+	if (chan->next_sg == chan->desc->num_sgs)
+		chan->next_sg = 0;
+
+	sg = &chan->desc->sg[chan->next_sg];
+
+	if (chan->desc->direction == DMA_MEM_TO_DEV) {
+		src_addr = sg->addr;
+		dst_addr = chan->fifo_addr;
+	} else {
+		src_addr = chan->fifo_addr;
+		dst_addr = sg->addr;
+	}
+	gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
+	gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
+	gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id),
+			(sg->len << GDMA_REG_CTRL0_TX_SHIFT) | GDMA_REG_CTRL0_ENABLE,
+			GDMA_REG_CTRL0_TX_MASK << GDMA_REG_CTRL0_TX_SHIFT);
+	chan->next_sg++;
+	gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL1(chan->id), 0, GDMA_REG_CTRL1_MASK);
+
+	return 0;
+}
+
+static void gdma_dma_chan_irq(struct gdma_dmaengine_chan *chan)
+{
+	spin_lock(&chan->vchan.lock);
+	if (chan->desc) {
+		if (chan->desc && chan->desc->cyclic) {
+			vchan_cyclic_callback(&chan->desc->vdesc);
+		} else {
+			if (chan->next_sg == chan->desc->num_sgs) {
+				chan->desc = NULL;
+				vchan_cookie_complete(&chan->desc->vdesc);
+			}
+		}
+	}
+	gdma_dma_start_transfer(chan);
+	spin_unlock(&chan->vchan.lock);
+}
+
+static irqreturn_t gdma_dma_irq(int irq, void *devid)
+{
+	struct gdma_dma_dev *dma_dev = devid;
+	uint32_t unmask, done;
+	unsigned int i;
+
+	unmask = gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT);
+	gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, unmask);
+	done = gdma_dma_read(dma_dev, GDMA_REG_DONE_INT);
+
+	for (i = 0; i < GDMA_NR_CHANS; ++i)
+		if (done & BIT(i))
+			gdma_dma_chan_irq(&dma_dev->chan[i]);
+	gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, done);
+
+	return IRQ_HANDLED;
+}
+
+static void gdma_dma_issue_pending(struct dma_chan *c)
+{
+	struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->vchan.lock, flags);
+	if (vchan_issue_pending(&chan->vchan) && !chan->desc)
+		gdma_dma_start_transfer(chan);
+	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
+	struct dma_chan *c, struct scatterlist *sgl,
+	unsigned int sg_len, enum dma_transfer_direction direction,
+	unsigned long flags, void *context)
+{
+	struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
+	struct gdma_dma_desc *desc;
+	struct scatterlist *sg;
+	unsigned int i;
+
+	desc = gdma_dma_alloc_desc(sg_len);
+	if (!desc)
+		return NULL;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		desc->sg[i].addr = sg_dma_address(sg);
+		desc->sg[i].len = sg_dma_len(sg);
+	}
+
+	desc->num_sgs = sg_len;
+	desc->direction = direction;
+	desc->cyclic = false;
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
+	struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
+	size_t period_len, enum dma_transfer_direction direction,
+	unsigned long flags, void *context)
+{
+	struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
+	struct gdma_dma_desc *desc;
+	unsigned int num_periods, i;
+
+	if (buf_len % period_len)
+		return NULL;
+
+	num_periods = buf_len / period_len;
+
+	desc = gdma_dma_alloc_desc(num_periods);
+	if (!desc)
+		return NULL;
+
+	for (i = 0; i < num_periods; i++) {
+		desc->sg[i].addr = buf_addr;
+		desc->sg[i].len = period_len;
+		buf_addr += period_len;
+	}
+
+	desc->num_sgs = num_periods;
+	desc->direction = direction;
+	desc->cyclic = true;
+
+	return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+}
+
+static size_t gdma_dma_desc_residue(struct gdma_dmaengine_chan *chan,
+	struct gdma_dma_desc *desc, unsigned int next_sg)
+{
+	struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
+	unsigned int residue, count;
+	unsigned int i;
+
+	residue = 0;
+
+	for (i = next_sg; i < desc->num_sgs; i++)
+		residue += desc->sg[i].len;
+
+	if (next_sg != 0) {
+		count = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
+		count >>= GDMA_REG_CTRL0_CURR_SHIFT;
+		count &= GDMA_REG_CTRL0_CURR_MASK;
+		residue += count << chan->transfer_shift;
+	}
+
+	return residue;
+}
+
+static enum dma_status gdma_dma_tx_status(struct dma_chan *c,
+	dma_cookie_t cookie, struct dma_tx_state *state)
+{
+	struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
+	struct virt_dma_desc *vdesc;
+	enum dma_status status;
+	unsigned long flags;
+
+	status = dma_cookie_status(c, cookie, state);
+	if (status == DMA_SUCCESS || !state)
+		return status;
+
+	spin_lock_irqsave(&chan->vchan.lock, flags);
+	vdesc = vchan_find_desc(&chan->vchan, cookie);
+	if (cookie == chan->desc->vdesc.tx.cookie) {
+		state->residue = gdma_dma_desc_residue(chan, chan->desc,
+				chan->next_sg);
+	} else if (vdesc) {
+		state->residue = gdma_dma_desc_residue(chan,
+				to_gdma_dma_desc(vdesc), 0);
+	} else {
+		state->residue = 0;
+	}
+	spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+	return status;
+}
+
+static int gdma_dma_alloc_chan_resources(struct dma_chan *c)
+{
+	return 0;
+}
+
+static void gdma_dma_free_chan_resources(struct dma_chan *c)
+{
+	vchan_free_chan_resources(to_virt_chan(c));
+}
+
+static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
+{
+	kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
+}
+
+static struct dma_chan *
+of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
+			struct of_dma *ofdma)
+{
+	struct gdma_dma_dev *dma_dev = ofdma->of_dma_data;
+	unsigned int request = dma_spec->args[0];
+
+	if (request >= GDMA_NR_CHANS)
+		return NULL;
+
+	return dma_get_slave_channel(&(dma_dev->chan[request].vchan.chan));
+}
+
+static int gdma_dma_probe(struct platform_device *pdev)
+{
+	struct gdma_dmaengine_chan *chan;
+	struct gdma_dma_dev *dma_dev;
+	struct dma_device *dd;
+	unsigned int i;
+	struct resource *res;
+	uint32_t gct;
+	int ret;
+	int irq;
+
+
+	dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL);
+	if (!dma_dev)
+		return -EINVAL;
+
+	dd = &dma_dev->ddev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dma_dev->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(dma_dev->base))
+		return PTR_ERR(dma_dev->base);
+
+	dma_cap_set(DMA_SLAVE, dd->cap_mask);
+	dma_cap_set(DMA_CYCLIC, dd->cap_mask);
+	dd->device_alloc_chan_resources = gdma_dma_alloc_chan_resources;
+	dd->device_free_chan_resources = gdma_dma_free_chan_resources;
+	dd->device_tx_status = gdma_dma_tx_status;
+	dd->device_issue_pending = gdma_dma_issue_pending;
+	dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
+	dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
+	dd->device_control = gdma_dma_control;
+	dd->dev = &pdev->dev;
+	dd->chancnt = GDMA_NR_CHANS;
+	INIT_LIST_HEAD(&dd->channels);
+
+	for (i = 0; i < dd->chancnt; i++) {
+		chan = &dma_dev->chan[i];
+		chan->id = i;
+		chan->vchan.desc_free = gdma_dma_desc_free;
+		vchan_init(&chan->vchan, dd);
+	}
+
+	ret = dma_async_device_register(dd);
+	if (ret)
+		return ret;
+
+	ret = of_dma_controller_register(pdev->dev.of_node,
+		of_dma_xlate_by_chan_id, dma_dev);
+	if (ret)
+		goto err_unregister;
+
+	irq = platform_get_irq(pdev, 0);
+	ret = request_irq(irq, gdma_dma_irq, 0, dev_name(&pdev->dev), dma_dev);
+	if (ret)
+		goto err_unregister;
+
+	gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, 0);
+	gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, BIT(dd->chancnt) - 1);
+
+	gct = gdma_dma_read(dma_dev, GDMA_REG_GCT);
+	dev_info(&pdev->dev, "revision: %d, channels: %d\n",
+		(gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
+		8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) & GDMA_REG_GCT_CHAN_MASK));
+	platform_set_drvdata(pdev, dma_dev);
+
+	gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
+
+	return 0;
+
+err_unregister:
+	dma_async_device_unregister(dd);
+	return ret;
+}
+
+static int gdma_dma_remove(struct platform_device *pdev)
+{
+	struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev);
+	int irq = platform_get_irq(pdev, 0);
+
+	free_irq(irq, dma_dev);
+        of_dma_controller_free(pdev->dev.of_node);
+	dma_async_device_unregister(&dma_dev->ddev);
+
+	return 0;
+}
+
+static const struct of_device_id gdma_of_match_table[] = {
+	{ .compatible = "ralink,rt2880-gdma" },
+	{ },
+};
+
+static struct platform_driver gdma_dma_driver = {
+	.probe = gdma_dma_probe,
+	.remove = gdma_dma_remove,
+	.driver = {
+		.name = "gdma-rt2880",
+		.owner = THIS_MODULE,
+		.of_match_table = gdma_of_match_table,
+	},
+};
+module_platform_driver(gdma_dma_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("GDMA4740 DMA driver");
+MODULE_LICENSE("GPLv2");
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -1073,6 +1073,7 @@ struct dma_chan *dma_request_slave_chann
 						  const char *name);
 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name);
 void dma_release_channel(struct dma_chan *chan);
+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
 #else
 static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 {