diff options
author | Biwen Li <biwen.li@nxp.com> | 2019-05-06 12:13:14 +0800 |
---|---|---|
committer | Petr Štetiar <ynezz@true.cz> | 2019-06-06 15:40:09 +0200 |
commit | 5159d71983e649a89568e46d9ff02731beedd571 (patch) | |
tree | 2c669f4d9651c1fe26955778e5fee119543a85ce /target/linux/layerscape/patches-4.14/802-dma-support-layerscape.patch | |
parent | 639d127b831a2af29a03ab07b262abf46ada3b4e (diff) | |
download | upstream-5159d71983e649a89568e46d9ff02731beedd571.tar.gz upstream-5159d71983e649a89568e46d9ff02731beedd571.tar.bz2 upstream-5159d71983e649a89568e46d9ff02731beedd571.zip |
layerscape: update patches-4.14 to LSDK 19.03
All patches of LSDK 19.03 were ported to Openwrt kernel.
We still used an all-in-one patch for each IP/feature for
OpenWrt.
Below are the changes this patch introduced.
- Updated original IP/feature patches to LSDK 19.03.
- Added new IP/feature patches for eTSEC/PTP/TMU.
- Squashed scattered patches into IP/feature patches.
- Updated config-4.14 correspondingly.
- Refreshed all patches.
More info about LSDK and the kernel:
- https://lsdk.github.io/components.html
- https://source.codeaurora.org/external/qoriq/qoriq-components/linux
Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
Diffstat (limited to 'target/linux/layerscape/patches-4.14/802-dma-support-layerscape.patch')
-rw-r--r-- | target/linux/layerscape/patches-4.14/802-dma-support-layerscape.patch | 511 |
1 files changed, 197 insertions, 314 deletions
diff --git a/target/linux/layerscape/patches-4.14/802-dma-support-layerscape.patch b/target/linux/layerscape/patches-4.14/802-dma-support-layerscape.patch index aee4ae2946..e39bae0d1d 100644 --- a/target/linux/layerscape/patches-4.14/802-dma-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.14/802-dma-support-layerscape.patch @@ -1,35 +1,40 @@ -From 731adfb43892a1d7fe00e2036200f33a9b61a589 Mon Sep 17 00:00:00 2001 +From 5cb4bc977d933323429050033da9c701b24df43e Mon Sep 17 00:00:00 2001 From: Biwen Li <biwen.li@nxp.com> -Date: Tue, 30 Oct 2018 18:26:02 +0800 -Subject: [PATCH 19/40] dma: support layerscape +Date: Wed, 17 Apr 2019 18:58:23 +0800 +Subject: [PATCH] dma: support layerscape +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + This is an integrated patch of dma for layerscape +Signed-off-by: Biwen Li <biwen.li@nxp.com> Signed-off-by: Catalin Horghidan <catalin.horghidan@nxp.com> Signed-off-by: Changming Huang <jerry.huang@nxp.com> Signed-off-by: Horia Geantă <horia.geanta@nxp.com> +Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com> Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com> +Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com> Signed-off-by: Peng Ma <peng.ma@nxp.com> Signed-off-by: Radu Alexe <radu.alexe@nxp.com> Signed-off-by: Rajiv Vishwakarma <rajiv.vishwakarma@nxp.com> Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com> Signed-off-by: Wen He <wen.he_1@nxp.com> Signed-off-by: Yuan Yao <yao.yuan@nxp.com> -Signed-off-by: Biwen Li <biwen.li@nxp.com> --- - .../devicetree/bindings/dma/fsl-qdma.txt | 51 + - drivers/dma/Kconfig | 33 +- - drivers/dma/Makefile | 3 + - drivers/dma/caam_dma.c | 462 ++++++ - drivers/dma/dpaa2-qdma/Kconfig | 8 + - drivers/dma/dpaa2-qdma/Makefile | 8 + - drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 940 ++++++++++++ - drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 227 +++ - drivers/dma/dpaa2-qdma/dpdmai.c | 515 +++++++ - drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 +++++++ - drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 +++ - drivers/dma/fsl-qdma.c | 1278 +++++++++++++++++ - 12 files changed, 4267 insertions(+), 1 deletion(-) - create mode 100644 Documentation/devicetree/bindings/dma/fsl-qdma.txt + drivers/dma/Kconfig | 33 +- + drivers/dma/Makefile | 3 + + drivers/dma/caam_dma.c | 462 ++++++++ + drivers/dma/dpaa2-qdma/Kconfig | 8 + + drivers/dma/dpaa2-qdma/Makefile | 8 + + drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 781 ++++++++++++++ + drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 181 ++++ + drivers/dma/dpaa2-qdma/dpdmai.c | 515 +++++++++ + drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 +++++++++ + drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++ + drivers/dma/fsl-edma.c | 66 +- + drivers/dma/fsl-qdma.c | 1278 +++++++++++++++++++++++ + 12 files changed, 4073 insertions(+), 5 deletions(-) create mode 100644 drivers/dma/caam_dma.c create mode 100644 drivers/dma/dpaa2-qdma/Kconfig create mode 100644 drivers/dma/dpaa2-qdma/Makefile @@ -40,60 +45,6 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h create mode 100644 drivers/dma/fsl-qdma.c ---- /dev/null -+++ b/Documentation/devicetree/bindings/dma/fsl-qdma.txt -@@ -0,0 +1,51 @@ -+* Freescale queue Direct Memory Access(qDMA) Controller -+ -+The qDMA supports channel virtualization by allowing DMA jobs to be enqueued into -+different command queues. Core can initiate a DMA transaction by preparing a command -+descriptor for each DMA job and enqueuing this job to a command queue. -+ -+* qDMA Controller -+Required properties: -+- compatible : -+ should be "fsl,ls1021a-qdma". -+- reg : Specifies base physical address(s) and size of the qDMA registers. -+ The 1st region is qDMA control register's address and size. -+ The 2nd region is status queue control register's address and size. -+ The 3rd region is virtual block control register's address and size. -+- interrupts : A list of interrupt-specifiers, one for each entry in -+ interrupt-names. -+- interrupt-names : Should contain: -+ "qdma-queue0" - the block0 interrupt -+ "qdma-queue1" - the block1 interrupt -+ "qdma-queue2" - the block2 interrupt -+ "qdma-queue3" - the block3 interrupt -+ "qdma-error" - the error interrupt -+- channels : Number of DMA channels supported -+- block-number : the virtual block number -+- block-offset : the offset of different virtual block -+- queues : the number of command queue per virtual block -+- status-sizes : status queue size of per virtual block -+- queue-sizes : command queue size of per virtual block, the size number based on queues -+- big-endian: If present registers and hardware scatter/gather descriptors -+ of the qDMA are implemented in big endian mode, otherwise in little -+ mode. -+ -+Examples: -+ qdma: qdma@8390000 { -+ compatible = "fsl,ls1021a-qdma"; -+ reg = <0x0 0x8388000 0x0 0x1000>, /* Controller regs */ -+ <0x0 0x8389000 0x0 0x1000>, /* Status regs */ -+ <0x0 0x838a000 0x0 0x2000>; /* Block regs */ -+ interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>, -+ <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>, -+ <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>; -+ interrupt-names = "qdma-error", -+ "qdma-queue0", "qdma-queue1"; -+ channels = <8>; -+ block-number = <2>; -+ block-offset = <0x1000>; -+ queues = <2>; -+ status-sizes = <64>; -+ queue-sizes = <64 64>; -+ big-endian; -+ }; --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -129,6 +129,24 @@ config COH901318 @@ -659,7 +610,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> +fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o --- /dev/null +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c -@@ -0,0 +1,940 @@ +@@ -0,0 +1,781 @@ +/* + * drivers/dma/dpaa2-qdma/dpaa2-qdma.c + * @@ -693,6 +644,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/iommu.h> ++#include <linux/sys_soc.h> + +#include "../virt-dma.h" + @@ -765,9 +717,6 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + sizeof(struct dpaa2_fl_entry) * 3; + + comp_temp->qchan = dpaa2_chan; -+ comp_temp->sg_blk_num = 0; -+ INIT_LIST_HEAD(&comp_temp->sg_src_head); -+ INIT_LIST_HEAD(&comp_temp->sg_dst_head); + return comp_temp; + } + comp_temp = list_first_entry(&dpaa2_chan->comp_free, @@ -802,7 +751,8 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> +/* first frame list for descriptor buffer */ +static void dpaa2_qdma_populate_first_framel( + struct dpaa2_fl_entry *f_list, -+ struct dpaa2_qdma_comp *dpaa2_comp) ++ struct dpaa2_qdma_comp *dpaa2_comp, ++ bool wrt_changed) +{ + struct dpaa2_qdma_sd_d *sdd; + @@ -811,7 +761,12 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + /* source and destination descriptor */ + sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT); /* source descriptor CMD */ + sdd++; -+ sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT); /* dest descriptor CMD */ ++ ++ /* dest descriptor CMD */ ++ if (wrt_changed) ++ sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT); ++ else ++ sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT); + + memset(f_list, 0, sizeof(struct dpaa2_fl_entry)); + /* first frame list to source descriptor */ @@ -855,11 +810,15 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + dma_addr_t src, size_t len, unsigned long flags) +{ + struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); ++ struct dpaa2_qdma_engine *dpaa2_qdma; + struct dpaa2_qdma_comp *dpaa2_comp; + struct dpaa2_fl_entry *f_list; ++ bool wrt_changed; + uint32_t format; + ++ dpaa2_qdma = dpaa2_chan->qdma; + dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan); ++ wrt_changed = dpaa2_qdma->qdma_wrtype_fixup; + +#ifdef LONG_FORMAT + format = QDMA_FD_LONG_FORMAT; @@ -873,7 +832,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + +#ifdef LONG_FORMAT + /* first frame list for descriptor buffer (logn format) */ -+ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp); ++ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed); + + f_list++; +#endif @@ -883,155 +842,6 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags); +} + -+static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk( -+ struct dpaa2_qdma_comp *dpaa2_comp, -+ struct dpaa2_qdma_chan *dpaa2_chan) -+{ -+ struct qdma_sg_blk *sg_blk = NULL; -+ dma_addr_t phy_sgb; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); -+ if (list_empty(&dpaa2_chan->sgb_free)) { -+ sg_blk = (struct qdma_sg_blk *)dma_pool_alloc( -+ dpaa2_chan->sg_blk_pool, -+ GFP_NOWAIT, &phy_sgb); -+ if (!sg_blk) { -+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); -+ return sg_blk; -+ } -+ sg_blk->blk_virt_addr = (void *)(sg_blk + 1); -+ sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk); -+ } else { -+ sg_blk = list_first_entry(&dpaa2_chan->sgb_free, -+ struct qdma_sg_blk, list); -+ list_del(&sg_blk->list); -+ } -+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); -+ -+ return sg_blk; -+} -+ -+static uint32_t dpaa2_qdma_populate_sg(struct device *dev, -+ struct dpaa2_qdma_chan *dpaa2_chan, -+ struct dpaa2_qdma_comp *dpaa2_comp, -+ struct scatterlist *dst_sg, u32 dst_nents, -+ struct scatterlist *src_sg, u32 src_nents) -+{ -+ struct dpaa2_qdma_sg *src_sge; -+ struct dpaa2_qdma_sg *dst_sge; -+ struct qdma_sg_blk *sg_blk; -+ struct qdma_sg_blk *sg_blk_dst; -+ dma_addr_t src; -+ dma_addr_t dst; -+ uint32_t num; -+ uint32_t blocks; -+ uint32_t len = 0; -+ uint32_t total_len = 0; -+ int i, j = 0; -+ -+ num = min(dst_nents, src_nents); -+ blocks = num / (NUM_SG_PER_BLK - 1); -+ if (num % (NUM_SG_PER_BLK - 1)) -+ blocks += 1; -+ if (dpaa2_comp->sg_blk_num < blocks) { -+ len = blocks - dpaa2_comp->sg_blk_num; -+ for (i = 0; i < len; i++) { -+ /* source sg blocks */ -+ sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan); -+ if (!sg_blk) -+ return 0; -+ list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head); -+ /* destination sg blocks */ -+ sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan); -+ if (!sg_blk) -+ return 0; -+ list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head); -+ } -+ } else { -+ len = dpaa2_comp->sg_blk_num - blocks; -+ for (i = 0; i < len; i++) { -+ spin_lock(&dpaa2_chan->queue_lock); -+ /* handle source sg blocks */ -+ sg_blk = list_first_entry(&dpaa2_comp->sg_src_head, -+ struct qdma_sg_blk, list); -+ list_del(&sg_blk->list); -+ list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free); -+ /* handle destination sg blocks */ -+ sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head, -+ struct qdma_sg_blk, list); -+ list_del(&sg_blk->list); -+ list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free); -+ spin_unlock(&dpaa2_chan->queue_lock); -+ } -+ } -+ dpaa2_comp->sg_blk_num = blocks; -+ -+ /* get the first source sg phy address */ -+ sg_blk = list_first_entry(&dpaa2_comp->sg_src_head, -+ struct qdma_sg_blk, list); -+ dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr; -+ /* get the first destinaiton sg phy address */ -+ sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head, -+ struct qdma_sg_blk, list); -+ dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr; -+ -+ for (i = 0; i < blocks; i++) { -+ src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr; -+ dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr; -+ -+ for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) { -+ len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg)); -+ if (0 == len) -+ goto fetch; -+ total_len += len; -+ src = sg_dma_address(src_sg); -+ dst = sg_dma_address(dst_sg); -+ -+ /* source SG */ -+ src_sge->addr_lo = src; -+ src_sge->addr_hi = (src >> 32); -+ src_sge->data_len.data_len_sl0 = len; -+ src_sge->ctrl.sl = QDMA_SG_SL_LONG; -+ src_sge->ctrl.fmt = QDMA_SG_FMT_SDB; -+ /* destination SG */ -+ dst_sge->addr_lo = dst; -+ dst_sge->addr_hi = (dst >> 32); -+ dst_sge->data_len.data_len_sl0 = len; -+ dst_sge->ctrl.sl = QDMA_SG_SL_LONG; -+ dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB; -+fetch: -+ num--; -+ if (0 == num) { -+ src_sge->ctrl.f = QDMA_SG_F; -+ dst_sge->ctrl.f = QDMA_SG_F; -+ goto end; -+ } -+ dst_sg = sg_next(dst_sg); -+ src_sg = sg_next(src_sg); -+ src_sge++; -+ dst_sge++; -+ if (j == (NUM_SG_PER_BLK - 2)) { -+ /* for next blocks, extension */ -+ sg_blk = list_next_entry(sg_blk, list); -+ sg_blk_dst = list_next_entry(sg_blk_dst, list); -+ src_sge->addr_lo = sg_blk->blk_bus_addr; -+ src_sge->addr_hi = sg_blk->blk_bus_addr >> 32; -+ src_sge->ctrl.sl = QDMA_SG_SL_LONG; -+ src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE; -+ dst_sge->addr_lo = sg_blk_dst->blk_bus_addr; -+ dst_sge->addr_hi = -+ sg_blk_dst->blk_bus_addr >> 32; -+ dst_sge->ctrl.sl = QDMA_SG_SL_LONG; -+ dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE; -+ } -+ } -+ } -+ -+end: -+ return total_len; -+} -+ +static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ @@ -1245,7 +1055,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + ppriv->nctx.desired_cpu = 1; + ppriv->nctx.id = ppriv->rsp_fqid; + ppriv->nctx.cb = dpaa2_qdma_fqdan_cb; -+ err = dpaa2_io_service_register(NULL, &ppriv->nctx); ++ err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev); + if (err) { + dev_err(dev, "Notification register failed\n"); + goto err_service; @@ -1263,11 +1073,11 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + return 0; + +err_store: -+ dpaa2_io_service_deregister(NULL, &ppriv->nctx); ++ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev); +err_service: + ppriv--; + while (ppriv >= priv->ppriv) { -+ dpaa2_io_service_deregister(NULL, &ppriv->nctx); ++ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev); + dpaa2_io_store_destroy(ppriv->store); + ppriv--; + } @@ -1288,10 +1098,11 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> +static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv) +{ + struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; ++ struct device *dev = priv->dev; + int i; + + for (i = 0; i < priv->num_pairs; i++) { -+ dpaa2_io_service_deregister(NULL, &ppriv->nctx); ++ dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev); + ppriv++; + } +} @@ -1348,22 +1159,6 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + return err; +} + -+static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan, -+ struct list_head *head) -+{ -+ struct qdma_sg_blk *sgb_tmp, *_sgb_tmp; -+ /* free the QDMA SG pool block */ -+ list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) { -+ sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *) -+ sgb_tmp->blk_virt_addr - 1); -+ sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr -+ - sizeof(*sgb_tmp); -+ dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr, -+ sgb_tmp->blk_bus_addr); -+ } -+ -+} -+ +static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan, + struct list_head *head) +{ @@ -1374,10 +1169,6 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + dma_pool_free(qchan->fd_pool, + comp_tmp->fd_virt_addr, + comp_tmp->fd_bus_addr); -+ /* free the SG source block on comp */ -+ dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head); -+ /* free the SG destination block on comp */ -+ dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head); + list_del(&comp_tmp->list); + kfree(comp_tmp); + } @@ -1395,9 +1186,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + qchan = &dpaa2_qdma->chans[i]; + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used); + dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free); -+ dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free); + dma_pool_destroy(qchan->fd_pool); -+ dma_pool_destroy(qchan->sg_blk_pool); + } +} + @@ -1418,15 +1207,10 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + dev, FD_POOL_SIZE, 32, 0); + if (!dpaa2_chan->fd_pool) + return -1; -+ dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool", -+ dev, SG_POOL_SIZE, 32, 0); -+ if (!dpaa2_chan->sg_blk_pool) -+ return -1; + + spin_lock_init(&dpaa2_chan->queue_lock); + INIT_LIST_HEAD(&dpaa2_chan->comp_used); + INIT_LIST_HEAD(&dpaa2_chan->comp_free); -+ INIT_LIST_HEAD(&dpaa2_chan->sgb_free); + } + return 0; +} @@ -1451,7 +1235,10 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + /* obtain a MC portal */ + err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io); + if (err) { -+ dev_err(dev, "MC portal allocation failed\n"); ++ if (err == -ENXIO) ++ err = -EPROBE_DEFER; ++ else ++ dev_err(dev, "MC portal allocation failed\n"); + goto err_mcportal; + } + @@ -1500,6 +1287,11 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + goto err_reg; + } + ++ if (soc_device_match(soc_fixup_tuning)) ++ dpaa2_qdma->qdma_wrtype_fixup = true; ++ else ++ dpaa2_qdma->qdma_wrtype_fixup = false; ++ + dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask); + dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask); + dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask); @@ -1602,7 +1394,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> +MODULE_LICENSE("Dual BSD/GPL"); --- /dev/null +++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h -@@ -0,0 +1,227 @@ +@@ -0,0 +1,181 @@ +/* Copyright 2015 NXP Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without @@ -1641,7 +1433,6 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + +#define DPAA2_QDMA_STORE_SIZE 16 +#define NUM_CH 8 -+#define NUM_SG_PER_BLK 16 + +#define QDMA_DMR_OFFSET 0x0 +#define QDMA_DQ_EN (0 << 30) @@ -1672,37 +1463,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> +/* Destination descriptor command write transaction type for RBP=0: + coherent copy of cacheable memory */ +#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28) -+ -+#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */ -+#define QDMA_SG_FMT_FDS 0x1 /* frame data section */ -+#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */ -+#define QDMA_SG_SL_SHORT 0x1 /* short length */ -+#define QDMA_SG_SL_LONG 0x0 /* short length */ -+#define QDMA_SG_F 0x1 /* last sg entry */ -+struct dpaa2_qdma_sg { -+ uint32_t addr_lo; /* address 0:31 */ -+ uint32_t addr_hi:17; /* address 32:48 */ -+ uint32_t rsv:15; -+ union { -+ uint32_t data_len_sl0; /* SL=0, the long format */ -+ struct { -+ uint32_t len:17; /* SL=1, the short format */ -+ uint32_t reserve:3; -+ uint32_t sf:1; -+ uint32_t sr:1; -+ uint32_t size:10; /* buff size */ -+ } data_len_sl1; -+ } data_len; /* AVAIL_LENGTH */ -+ struct { -+ uint32_t bpid:14; -+ uint32_t ivp:1; -+ uint32_t mbt:1; -+ uint32_t offset:12; -+ uint32_t fmt:2; -+ uint32_t sl:1; -+ uint32_t f:1; -+ } ctrl; -+} __attribute__((__packed__)); ++#define LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT (0xb << 28) + +#define QMAN_FD_FMT_ENABLE (1) /* frame list table enable */ +#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */ @@ -1710,8 +1471,6 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> +#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */ +#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */ + -+#define QDMA_SB_FRAME (0 << 28) /* single frame */ -+#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */ +#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */ +#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */ + @@ -1747,35 +1506,19 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + struct mutex dpaa2_queue_mutex; + spinlock_t queue_lock; + struct dma_pool *fd_pool; -+ struct dma_pool *sg_blk_pool; + + struct list_head comp_used; + struct list_head comp_free; + -+ struct list_head sgb_free; -+}; -+ -+struct qdma_sg_blk { -+ dma_addr_t blk_bus_addr; -+ void *blk_virt_addr; -+ struct list_head list; +}; + +struct dpaa2_qdma_comp { + dma_addr_t fd_bus_addr; + dma_addr_t fl_bus_addr; + dma_addr_t desc_bus_addr; -+ dma_addr_t sge_src_bus_addr; -+ dma_addr_t sge_dst_bus_addr; + void *fd_virt_addr; + void *fl_virt_addr; + void *desc_virt_addr; -+ void *sg_src_virt_addr; -+ void *sg_dst_virt_addr; -+ struct qdma_sg_blk *sg_blk; -+ uint32_t sg_blk_num; -+ struct list_head sg_src_head; -+ struct list_head sg_dst_head; + struct dpaa2_qdma_chan *qchan; + struct virt_dma_desc vdesc; + struct list_head list; @@ -1785,6 +1528,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + struct dma_device dma_dev; + u32 n_chans; + struct dpaa2_qdma_chan chans[NUM_CH]; ++ bool qdma_wrtype_fixup; + + struct dpaa2_qdma_priv *priv; +}; @@ -1821,14 +1565,16 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + struct dpaa2_qdma_priv *priv; +}; + ++static struct soc_device_attribute soc_fixup_tuning[] = { ++ { .family = "QorIQ LX2160A"}, ++ { }, ++}; ++ +/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */ +#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \ + sizeof(struct dpaa2_fl_entry) * 3 + \ + sizeof(struct dpaa2_qdma_sd_d) * 2) + -+/* qdma_sg_blk + 16 SGs */ -+#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\ -+ sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK) +#endif /* __DPAA2_QDMA_H */ --- /dev/null +++ b/drivers/dma/dpaa2-qdma/dpdmai.c @@ -3097,6 +2843,143 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com> + MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid) + +#endif /* _FSL_DPDMAI_CMD_H */ +--- a/drivers/dma/fsl-edma.c ++++ b/drivers/dma/fsl-edma.c +@@ -146,6 +146,8 @@ struct fsl_edma_slave_config { + u32 dev_addr; + u32 burst; + u32 attr; ++ dma_addr_t dma_dev_addr; ++ enum dma_data_direction dma_dir; + }; + + struct fsl_edma_chan { +@@ -342,6 +344,53 @@ static int fsl_edma_resume(struct dma_ch + return 0; + } + ++static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan) ++{ ++ if (fsl_chan->fsc.dma_dir != DMA_NONE) ++ dma_unmap_resource(fsl_chan->vchan.chan.device->dev, ++ fsl_chan->fsc.dma_dev_addr, ++ fsl_chan->fsc.burst, fsl_chan->fsc.dma_dir, 0); ++ fsl_chan->fsc.dma_dir = DMA_NONE; ++} ++ ++static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan, ++ enum dma_transfer_direction dir) ++{ ++ struct device *dev = fsl_chan->vchan.chan.device->dev; ++ enum dma_data_direction dma_dir; ++ ++ switch (dir) { ++ case DMA_MEM_TO_DEV: ++ dma_dir = DMA_FROM_DEVICE; ++ break; ++ case DMA_DEV_TO_MEM: ++ dma_dir = DMA_TO_DEVICE; ++ break; ++ case DMA_DEV_TO_DEV: ++ dma_dir = DMA_BIDIRECTIONAL; ++ break; ++ default: ++ dma_dir = DMA_NONE; ++ break; ++ } ++ ++ /* Already mapped for this config? */ ++ if (fsl_chan->fsc.dma_dir == dma_dir) ++ return true; ++ ++ fsl_edma_unprep_slave_dma(fsl_chan); ++ fsl_chan->fsc.dma_dev_addr = dma_map_resource(dev, ++ fsl_chan->fsc.dev_addr, ++ fsl_chan->fsc.burst, ++ dma_dir, 0); ++ if (dma_mapping_error(dev, fsl_chan->fsc.dma_dev_addr)) ++ return false; ++ ++ fsl_chan->fsc.dma_dir = dma_dir; ++ ++ return true; ++} ++ + static int fsl_edma_slave_config(struct dma_chan *chan, + struct dma_slave_config *cfg) + { +@@ -361,6 +410,7 @@ static int fsl_edma_slave_config(struct + } else { + return -EINVAL; + } ++ fsl_edma_unprep_slave_dma(fsl_chan); + return 0; + } + +@@ -553,6 +603,9 @@ static struct dma_async_tx_descriptor *f + if (!is_slave_direction(fsl_chan->fsc.dir)) + return NULL; + ++ if (!fsl_edma_prep_slave_dma(fsl_chan, fsl_chan->fsc.dir)) ++ return NULL; ++ + sg_len = buf_len / period_len; + fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); + if (!fsl_desc) +@@ -572,11 +625,11 @@ static struct dma_async_tx_descriptor *f + + if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) { + src_addr = dma_buf_next; +- dst_addr = fsl_chan->fsc.dev_addr; ++ dst_addr = fsl_chan->fsc.dma_dev_addr; + soff = fsl_chan->fsc.addr_width; + doff = 0; + } else { +- src_addr = fsl_chan->fsc.dev_addr; ++ src_addr = fsl_chan->fsc.dma_dev_addr; + dst_addr = dma_buf_next; + soff = 0; + doff = fsl_chan->fsc.addr_width; +@@ -606,6 +659,9 @@ static struct dma_async_tx_descriptor *f + if (!is_slave_direction(fsl_chan->fsc.dir)) + return NULL; + ++ if (!fsl_edma_prep_slave_dma(fsl_chan, fsl_chan->fsc.dir)) ++ return NULL; ++ + fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); + if (!fsl_desc) + return NULL; +@@ -618,11 +674,11 @@ static struct dma_async_tx_descriptor *f + + if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) { + src_addr = sg_dma_address(sg); +- dst_addr = fsl_chan->fsc.dev_addr; ++ dst_addr = fsl_chan->fsc.dma_dev_addr; + soff = fsl_chan->fsc.addr_width; + doff = 0; + } else { +- src_addr = fsl_chan->fsc.dev_addr; ++ src_addr = fsl_chan->fsc.dma_dev_addr; + dst_addr = sg_dma_address(sg); + soff = 0; + doff = fsl_chan->fsc.addr_width; +@@ -802,6 +858,7 @@ static void fsl_edma_free_chan_resources + fsl_edma_chan_mux(fsl_chan, 0, false); + fsl_chan->edesc = NULL; + vchan_get_all_descriptors(&fsl_chan->vchan, &head); ++ fsl_edma_unprep_slave_dma(fsl_chan); + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); + + vchan_dma_desc_free_list(&fsl_chan->vchan, &head); +@@ -937,6 +994,7 @@ static int fsl_edma_probe(struct platfor + fsl_chan->slave_id = 0; + fsl_chan->idle = true; + fsl_chan->vchan.desc_free = fsl_edma_free_desc; ++ fsl_chan->fsc.dma_dir = DMA_NONE; + vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); + + edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i)); --- /dev/null +++ b/drivers/dma/fsl-qdma.c @@ -0,0 +1,1278 @@ |