diff options
author | Biwen Li <biwen.li@nxp.com> | 2018-12-12 09:56:18 +0800 |
---|---|---|
committer | Hauke Mehrtens <hauke@hauke-m.de> | 2018-12-18 20:17:23 +0100 |
commit | 68904cb8fda3692c19fb39d3f99633c9d12efed7 (patch) | |
tree | 56b86c6463e7a554acc4c1e55d6260dc81f28268 /target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch | |
parent | 328530c6e7569d7be24e3524483f4453910003e9 (diff) | |
download | upstream-68904cb8fda3692c19fb39d3f99633c9d12efed7.tar.gz upstream-68904cb8fda3692c19fb39d3f99633c9d12efed7.tar.bz2 upstream-68904cb8fda3692c19fb39d3f99633c9d12efed7.zip |
layerscape: drop kernel 4.9 support
This patch is to drop kernel 4.9 support.
Signed-off-by: Biwen Li <biwen.li@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
Diffstat (limited to 'target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch')
-rw-r--r-- | target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch | 4372 |
1 files changed, 0 insertions, 4372 deletions
diff --git a/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch b/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch deleted file mode 100644 index fb42fdaa5f..0000000000 --- a/target/linux/layerscape/patches-4.9/805-dma-support-layerscape.patch +++ /dev/null @@ -1,4372 +0,0 @@ -From d3d537ebe9884e7d945ab74bb02312d0c2c9b08d Mon Sep 17 00:00:00 2001 -From: Yangbo Lu <yangbo.lu@nxp.com> -Date: Thu, 5 Jul 2018 17:32:53 +0800 -Subject: [PATCH 17/32] dma: support layerscape - -This is an integrated patch for layerscape dma support. - -Signed-off-by: jiaheng.fan <jiaheng.fan@nxp.com> -Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> ---- - drivers/dma/Kconfig | 31 + - drivers/dma/Makefile | 3 + - drivers/dma/caam_dma.c | 563 ++++++++++ - drivers/dma/dpaa2-qdma/Kconfig | 8 + - drivers/dma/dpaa2-qdma/Makefile | 8 + - drivers/dma/dpaa2-qdma/dpaa2-qdma.c | 940 +++++++++++++++++ - drivers/dma/dpaa2-qdma/dpaa2-qdma.h | 227 +++++ - drivers/dma/dpaa2-qdma/dpdmai.c | 515 ++++++++++ - drivers/dma/dpaa2-qdma/fsl_dpdmai.h | 521 ++++++++++ - drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h | 222 ++++ - drivers/dma/fsl-qdma.c | 1243 +++++++++++++++++++++++ - 11 files changed, 4281 insertions(+) - create mode 100644 drivers/dma/caam_dma.c - create mode 100644 drivers/dma/dpaa2-qdma/Kconfig - create mode 100644 drivers/dma/dpaa2-qdma/Makefile - create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.c - create mode 100644 drivers/dma/dpaa2-qdma/dpaa2-qdma.h - create mode 100644 drivers/dma/dpaa2-qdma/dpdmai.c - create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai.h - create mode 100644 drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h - create mode 100644 drivers/dma/fsl-qdma.c - ---- a/drivers/dma/Kconfig -+++ b/drivers/dma/Kconfig -@@ -192,6 +192,20 @@ config FSL_EDMA - multiplexing capability for DMA request sources(slot). - This module can be found on Freescale Vybrid and LS-1 SoCs. - -+config FSL_QDMA -+ tristate "Freescale qDMA engine support" -+ select DMA_ENGINE -+ select DMA_VIRTUAL_CHANNELS -+ select DMA_ENGINE_RAID -+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH -+ help -+ Support the Freescale qDMA engine with command queue and legacy mode. -+ Channel virtualization is supported through enqueuing of DMA jobs to, -+ or dequeuing DMA jobs from, different work queues. -+ This module can be found on Freescale LS SoCs. -+ -+source drivers/dma/dpaa2-qdma/Kconfig -+ - config FSL_RAID - tristate "Freescale RAID engine Support" - depends on FSL_SOC && !ASYNC_TX_ENABLE_CHANNEL_SWITCH -@@ -564,6 +578,23 @@ config ZX_DMA - help - Support the DMA engine for ZTE ZX296702 platform devices. - -+config CRYPTO_DEV_FSL_CAAM_DMA -+ tristate "CAAM DMA engine support" -+ depends on CRYPTO_DEV_FSL_CAAM_JR -+ default y -+ select DMA_ENGINE -+ select ASYNC_CORE -+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH -+ help -+ Selecting this will offload the DMA operations for users of -+ the scatter gather memcopy API to the CAAM via job rings. The -+ CAAM is a hardware module that provides hardware acceleration to -+ cryptographic operations. It has a built-in DMA controller that can -+ be programmed to read/write cryptographic data. This module defines -+ a DMA driver that uses the DMA capabilities of the CAAM. -+ -+ To compile this as a module, choose M here: the module -+ will be called caam_dma. - - # driver files - source "drivers/dma/bestcomm/Kconfig" ---- a/drivers/dma/Makefile -+++ b/drivers/dma/Makefile -@@ -29,6 +29,8 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/ - obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o - obj-$(CONFIG_FSL_DMA) += fsldma.o - obj-$(CONFIG_FSL_EDMA) += fsl-edma.o -+obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o -+obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma/ - obj-$(CONFIG_FSL_RAID) += fsl_raid.o - obj-$(CONFIG_HSU_DMA) += hsu/ - obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o -@@ -67,6 +69,7 @@ obj-$(CONFIG_TI_DMA_CROSSBAR) += ti-dma- - obj-$(CONFIG_TI_EDMA) += edma.o - obj-$(CONFIG_XGENE_DMA) += xgene-dma.o - obj-$(CONFIG_ZX_DMA) += zx296702_dma.o -+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_DMA) += caam_dma.o - - obj-y += qcom/ - obj-y += xilinx/ ---- /dev/null -+++ b/drivers/dma/caam_dma.c -@@ -0,0 +1,563 @@ -+/* -+ * caam support for SG DMA -+ * -+ * Copyright 2016 Freescale Semiconductor, Inc -+ * Copyright 2017 NXP -+ */ -+ -+#include <linux/module.h> -+#include <linux/platform_device.h> -+#include <linux/dma-mapping.h> -+#include <linux/interrupt.h> -+#include <linux/slab.h> -+#include <linux/debugfs.h> -+ -+#include <linux/dmaengine.h> -+#include "dmaengine.h" -+ -+#include "../crypto/caam/regs.h" -+#include "../crypto/caam/jr.h" -+#include "../crypto/caam/error.h" -+#include "../crypto/caam/intern.h" -+#include "../crypto/caam/desc_constr.h" -+#include "../crypto/caam/sg_sw_sec4.h" -+ -+#define DESC_DMA_MEMCPY_LEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / \ -+ CAAM_CMD_SZ) -+ -+/* This is max chunk size of a DMA transfer. If a buffer is larger than this -+ * value it is internally broken into chunks of max CAAM_DMA_CHUNK_SIZE bytes -+ * and for each chunk a DMA transfer request is issued. -+ * This value is the largest number on 16 bits that is a multiple of 256 bytes -+ * (the largest configurable CAAM DMA burst size). -+ */ -+#define CAAM_DMA_CHUNK_SIZE 65280 -+ -+struct caam_dma_sh_desc { -+ u32 desc[DESC_DMA_MEMCPY_LEN] ____cacheline_aligned; -+ dma_addr_t desc_dma; -+}; -+ -+/* caam dma extended descriptor */ -+struct caam_dma_edesc { -+ struct dma_async_tx_descriptor async_tx; -+ struct list_head node; -+ struct caam_dma_ctx *ctx; -+ dma_addr_t src_dma; -+ dma_addr_t dst_dma; -+ unsigned int src_len; -+ unsigned int dst_len; -+ struct sec4_sg_entry *sec4_sg; -+ u32 jd[] ____cacheline_aligned; -+}; -+ -+/* -+ * caam_dma_ctx - per jr/channel context -+ * @chan: dma channel used by async_tx API -+ * @node: list_head used to attach to the global dma_ctx_list -+ * @jrdev: Job Ring device -+ * @submit_q: queue of pending (submitted, but not enqueued) jobs -+ * @done_not_acked: jobs that have been completed by jr, but maybe not acked -+ * @edesc_lock: protects extended descriptor -+ */ -+struct caam_dma_ctx { -+ struct dma_chan chan; -+ struct list_head node; -+ struct device *jrdev; -+ struct list_head submit_q; -+ struct list_head done_not_acked; -+ spinlock_t edesc_lock; -+}; -+ -+static struct dma_device *dma_dev; -+static struct caam_dma_sh_desc *dma_sh_desc; -+static LIST_HEAD(dma_ctx_list); -+ -+static dma_cookie_t caam_dma_tx_submit(struct dma_async_tx_descriptor *tx) -+{ -+ struct caam_dma_edesc *edesc = NULL; -+ struct caam_dma_ctx *ctx = NULL; -+ dma_cookie_t cookie; -+ -+ edesc = container_of(tx, struct caam_dma_edesc, async_tx); -+ ctx = container_of(tx->chan, struct caam_dma_ctx, chan); -+ -+ spin_lock_bh(&ctx->edesc_lock); -+ -+ cookie = dma_cookie_assign(tx); -+ list_add_tail(&edesc->node, &ctx->submit_q); -+ -+ spin_unlock_bh(&ctx->edesc_lock); -+ -+ return cookie; -+} -+ -+static unsigned int caam_dma_sg_dma_len(struct scatterlist *sg, -+ unsigned int nents) -+{ -+ unsigned int len; -+ -+ for (len = 0; sg && nents; sg = sg_next(sg), nents--) -+ len += sg_dma_len(sg); -+ -+ return len; -+} -+ -+static struct caam_dma_edesc * -+caam_dma_sg_edesc_alloc(struct dma_chan *chan, -+ struct scatterlist *dst_sg, unsigned int dst_nents, -+ struct scatterlist *src_sg, unsigned int src_nents, -+ unsigned long flags) -+{ -+ struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx, -+ chan); -+ struct device *jrdev = ctx->jrdev; -+ struct caam_dma_edesc *edesc; -+ struct sec4_sg_entry *sec4_sg; -+ dma_addr_t sec4_sg_dma_src; -+ unsigned int sec4_sg_bytes; -+ -+ if (!dst_sg || !src_sg || !dst_nents || !src_nents) -+ return NULL; -+ -+ sec4_sg_bytes = (src_nents + dst_nents) * sizeof(*sec4_sg); -+ -+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN + sec4_sg_bytes, -+ GFP_DMA | GFP_NOWAIT); -+ if (!edesc) -+ return ERR_PTR(-ENOMEM); -+ -+ edesc->src_len = caam_dma_sg_dma_len(src_sg, src_nents); -+ edesc->dst_len = caam_dma_sg_dma_len(dst_sg, dst_nents); -+ if (edesc->src_len != edesc->dst_len) { -+ dev_err(jrdev, "%s: src(%u) and dst(%u) len mismatch.\n", -+ __func__, edesc->src_len, edesc->dst_len); -+ kfree(edesc); -+ return ERR_PTR(-EINVAL); -+ } -+ -+ dma_async_tx_descriptor_init(&edesc->async_tx, chan); -+ edesc->async_tx.tx_submit = caam_dma_tx_submit; -+ edesc->async_tx.flags = flags; -+ edesc->async_tx.cookie = -EBUSY; -+ -+ /* Prepare SEC SGs */ -+ edesc->sec4_sg = (void *)edesc + offsetof(struct caam_dma_edesc, jd) + -+ DESC_JOB_IO_LEN; -+ -+ sec4_sg = edesc->sec4_sg; -+ sg_to_sec4_sg_last(src_sg, src_nents, sec4_sg, 0); -+ -+ sec4_sg += src_nents; -+ sg_to_sec4_sg_last(dst_sg, dst_nents, sec4_sg, 0); -+ -+ sec4_sg_dma_src = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, -+ DMA_TO_DEVICE); -+ if (dma_mapping_error(jrdev, sec4_sg_dma_src)) { -+ dev_err(jrdev, "error mapping segments to device\n"); -+ kfree(edesc); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ edesc->src_dma = sec4_sg_dma_src; -+ edesc->dst_dma = sec4_sg_dma_src + src_nents * sizeof(*sec4_sg); -+ edesc->ctx = ctx; -+ -+ return edesc; -+} -+ -+static void caam_jr_chan_free_edesc(struct caam_dma_edesc *edesc) -+{ -+ struct caam_dma_ctx *ctx = edesc->ctx; -+ struct caam_dma_edesc *_edesc = NULL; -+ -+ spin_lock_bh(&ctx->edesc_lock); -+ -+ list_add_tail(&edesc->node, &ctx->done_not_acked); -+ list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) { -+ if (async_tx_test_ack(&edesc->async_tx)) { -+ list_del(&edesc->node); -+ kfree(edesc); -+ } -+ } -+ -+ spin_unlock_bh(&ctx->edesc_lock); -+} -+ -+static void caam_dma_done(struct device *dev, u32 *hwdesc, u32 err, -+ void *context) -+{ -+ struct caam_dma_edesc *edesc = context; -+ struct caam_dma_ctx *ctx = edesc->ctx; -+ dma_async_tx_callback callback; -+ void *callback_param; -+ -+ if (err) -+ caam_jr_strstatus(ctx->jrdev, err); -+ -+ dma_run_dependencies(&edesc->async_tx); -+ -+ spin_lock_bh(&ctx->edesc_lock); -+ dma_cookie_complete(&edesc->async_tx); -+ spin_unlock_bh(&ctx->edesc_lock); -+ -+ callback = edesc->async_tx.callback; -+ callback_param = edesc->async_tx.callback_param; -+ -+ dma_descriptor_unmap(&edesc->async_tx); -+ -+ caam_jr_chan_free_edesc(edesc); -+ -+ if (callback) -+ callback(callback_param); -+} -+ -+static void caam_dma_sg_init_job_desc(struct caam_dma_edesc *edesc) -+{ -+ u32 *jd = edesc->jd; -+ u32 *sh_desc = dma_sh_desc->desc; -+ dma_addr_t desc_dma = dma_sh_desc->desc_dma; -+ -+ /* init the job descriptor */ -+ init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE); -+ -+ /* set SEQIN PTR */ -+ append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, LDST_SGF); -+ -+ /* set SEQOUT PTR */ -+ append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, LDST_SGF); -+ -+#ifdef DEBUG -+ print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ", -+ DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1); -+#endif -+} -+ -+/* This function can be called from an interrupt context */ -+static struct dma_async_tx_descriptor * -+caam_dma_prep_sg(struct dma_chan *chan, struct scatterlist *dst_sg, -+ unsigned int dst_nents, struct scatterlist *src_sg, -+ unsigned int src_nents, unsigned long flags) -+{ -+ struct caam_dma_edesc *edesc; -+ -+ /* allocate extended descriptor */ -+ edesc = caam_dma_sg_edesc_alloc(chan, dst_sg, dst_nents, src_sg, -+ src_nents, flags); -+ if (IS_ERR_OR_NULL(edesc)) -+ return ERR_CAST(edesc); -+ -+ /* Initialize job descriptor */ -+ caam_dma_sg_init_job_desc(edesc); -+ -+ return &edesc->async_tx; -+} -+ -+static void caam_dma_memcpy_init_job_desc(struct caam_dma_edesc *edesc) -+{ -+ u32 *jd = edesc->jd; -+ u32 *sh_desc = dma_sh_desc->desc; -+ dma_addr_t desc_dma = dma_sh_desc->desc_dma; -+ -+ /* init the job descriptor */ -+ init_job_desc_shared(jd, desc_dma, desc_len(sh_desc), HDR_REVERSE); -+ -+ /* set SEQIN PTR */ -+ append_seq_in_ptr(jd, edesc->src_dma, edesc->src_len, 0); -+ -+ /* set SEQOUT PTR */ -+ append_seq_out_ptr(jd, edesc->dst_dma, edesc->dst_len, 0); -+ -+#ifdef DEBUG -+ print_hex_dump(KERN_ERR, "caam dma desc@" __stringify(__LINE__) ": ", -+ DUMP_PREFIX_ADDRESS, 16, 4, jd, desc_bytes(jd), 1); -+#endif -+} -+ -+static struct dma_async_tx_descriptor * -+caam_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, -+ size_t len, unsigned long flags) -+{ -+ struct caam_dma_edesc *edesc; -+ struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx, -+ chan); -+ -+ edesc = kzalloc(sizeof(*edesc) + DESC_JOB_IO_LEN, GFP_DMA | GFP_NOWAIT); -+ if (!edesc) -+ return ERR_PTR(-ENOMEM); -+ -+ dma_async_tx_descriptor_init(&edesc->async_tx, chan); -+ edesc->async_tx.tx_submit = caam_dma_tx_submit; -+ edesc->async_tx.flags = flags; -+ edesc->async_tx.cookie = -EBUSY; -+ -+ edesc->src_dma = src; -+ edesc->src_len = len; -+ edesc->dst_dma = dst; -+ edesc->dst_len = len; -+ edesc->ctx = ctx; -+ -+ caam_dma_memcpy_init_job_desc(edesc); -+ -+ return &edesc->async_tx; -+} -+ -+/* This function can be called in an interrupt context */ -+static void caam_dma_issue_pending(struct dma_chan *chan) -+{ -+ struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx, -+ chan); -+ struct caam_dma_edesc *edesc, *_edesc; -+ -+ spin_lock_bh(&ctx->edesc_lock); -+ list_for_each_entry_safe(edesc, _edesc, &ctx->submit_q, node) { -+ if (caam_jr_enqueue(ctx->jrdev, edesc->jd, -+ caam_dma_done, edesc) < 0) -+ break; -+ list_del(&edesc->node); -+ } -+ spin_unlock_bh(&ctx->edesc_lock); -+} -+ -+static void caam_dma_free_chan_resources(struct dma_chan *chan) -+{ -+ struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx, -+ chan); -+ struct caam_dma_edesc *edesc, *_edesc; -+ -+ spin_lock_bh(&ctx->edesc_lock); -+ list_for_each_entry_safe(edesc, _edesc, &ctx->submit_q, node) { -+ list_del(&edesc->node); -+ kfree(edesc); -+ } -+ list_for_each_entry_safe(edesc, _edesc, &ctx->done_not_acked, node) { -+ list_del(&edesc->node); -+ kfree(edesc); -+ } -+ spin_unlock_bh(&ctx->edesc_lock); -+} -+ -+static int caam_dma_jr_chan_bind(void) -+{ -+ struct device *jrdev; -+ struct caam_dma_ctx *ctx; -+ int bonds = 0; -+ int i; -+ -+ for (i = 0; i < caam_jr_driver_probed(); i++) { -+ jrdev = caam_jridx_alloc(i); -+ if (IS_ERR(jrdev)) { -+ pr_err("job ring device %d allocation failed\n", i); -+ continue; -+ } -+ -+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); -+ if (!ctx) { -+ caam_jr_free(jrdev); -+ continue; -+ } -+ -+ ctx->chan.device = dma_dev; -+ ctx->chan.private = ctx; -+ -+ ctx->jrdev = jrdev; -+ -+ INIT_LIST_HEAD(&ctx->submit_q); -+ INIT_LIST_HEAD(&ctx->done_not_acked); -+ INIT_LIST_HEAD(&ctx->node); -+ spin_lock_init(&ctx->edesc_lock); -+ -+ dma_cookie_init(&ctx->chan); -+ -+ /* add the context of this channel to the context list */ -+ list_add_tail(&ctx->node, &dma_ctx_list); -+ -+ /* add this channel to the device chan list */ -+ list_add_tail(&ctx->chan.device_node, &dma_dev->channels); -+ -+ bonds++; -+ } -+ -+ return bonds; -+} -+ -+static inline void caam_jr_dma_free(struct dma_chan *chan) -+{ -+ struct caam_dma_ctx *ctx = container_of(chan, struct caam_dma_ctx, -+ chan); -+ -+ list_del(&ctx->node); -+ list_del(&chan->device_node); -+ caam_jr_free(ctx->jrdev); -+ kfree(ctx); -+} -+ -+static void set_caam_dma_desc(u32 *desc) -+{ -+ u32 *jmp_cmd; -+ -+ /* dma shared descriptor */ -+ init_sh_desc(desc, HDR_SHARE_NEVER | (1 << HDR_START_IDX_SHIFT)); -+ -+ /* REG1 = CAAM_DMA_CHUNK_SIZE */ -+ append_math_add_imm_u32(desc, REG1, ZERO, IMM, CAAM_DMA_CHUNK_SIZE); -+ -+ /* REG0 = SEQINLEN - CAAM_DMA_CHUNK_SIZE */ -+ append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, CAAM_DMA_CHUNK_SIZE); -+ -+ /* if (REG0 > 0) -+ * jmp to LABEL1 -+ */ -+ jmp_cmd = append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N | -+ JUMP_COND_MATH_Z); -+ -+ /* REG1 = SEQINLEN */ -+ append_math_sub(desc, REG1, SEQINLEN, ZERO, CAAM_CMD_SZ); -+ -+ /* LABEL1 */ -+ set_jump_tgt_here(desc, jmp_cmd); -+ -+ /* VARSEQINLEN = REG1 */ -+ append_math_add(desc, VARSEQINLEN, REG1, ZERO, CAAM_CMD_SZ); -+ -+ /* VARSEQOUTLEN = REG1 */ -+ append_math_add(desc, VARSEQOUTLEN, REG1, ZERO, CAAM_CMD_SZ); -+ -+ /* do FIFO STORE */ -+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_METADATA | LDST_VLF); -+ -+ /* do FIFO LOAD */ -+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | -+ FIFOLD_TYPE_IFIFO | LDST_VLF); -+ -+ /* if (REG0 > 0) -+ * jmp 0xF8 (after shared desc header) -+ */ -+ append_jump(desc, JUMP_TEST_INVALL | JUMP_COND_MATH_N | -+ JUMP_COND_MATH_Z | 0xF8); -+ -+#ifdef DEBUG -+ print_hex_dump(KERN_ERR, "caam dma shdesc@" __stringify(__LINE__) ": ", -+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); -+#endif -+} -+ -+static int __init caam_dma_probe(struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ struct device *ctrldev = dev->parent; -+ struct dma_chan *chan, *_chan; -+ u32 *sh_desc; -+ int err = -ENOMEM; -+ int bonds; -+ -+ if (!caam_jr_driver_probed()) { -+ dev_info(dev, "Defer probing after JR driver probing\n"); -+ return -EPROBE_DEFER; -+ } -+ -+ dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL); -+ if (!dma_dev) -+ return -ENOMEM; -+ -+ dma_sh_desc = kzalloc(sizeof(*dma_sh_desc), GFP_KERNEL | GFP_DMA); -+ if (!dma_sh_desc) -+ goto desc_err; -+ -+ sh_desc = dma_sh_desc->desc; -+ set_caam_dma_desc(sh_desc); -+ dma_sh_desc->desc_dma = dma_map_single(ctrldev, sh_desc, -+ desc_bytes(sh_desc), -+ DMA_TO_DEVICE); -+ if (dma_mapping_error(ctrldev, dma_sh_desc->desc_dma)) { -+ dev_err(dev, "unable to map dma descriptor\n"); -+ goto map_err; -+ } -+ -+ INIT_LIST_HEAD(&dma_dev->channels); -+ -+ bonds = caam_dma_jr_chan_bind(); -+ if (!bonds) { -+ err = -ENODEV; -+ goto jr_bind_err; -+ } -+ -+ dma_dev->dev = dev; -+ dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; -+ dma_cap_set(DMA_SG, dma_dev->cap_mask); -+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); -+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); -+ dma_dev->device_tx_status = dma_cookie_status; -+ dma_dev->device_issue_pending = caam_dma_issue_pending; -+ dma_dev->device_prep_dma_sg = caam_dma_prep_sg; -+ dma_dev->device_prep_dma_memcpy = caam_dma_prep_memcpy; -+ dma_dev->device_free_chan_resources = caam_dma_free_chan_resources; -+ -+ err = dma_async_device_register(dma_dev); -+ if (err) { -+ dev_err(dev, "Failed to register CAAM DMA engine\n"); -+ goto jr_bind_err; -+ } -+ -+ dev_info(dev, "caam dma support with %d job rings\n", bonds); -+ -+ return err; -+ -+jr_bind_err: -+ list_for_each_entry_safe(chan, _chan, &dma_dev->channels, device_node) -+ caam_jr_dma_free(chan); -+ -+ dma_unmap_single(ctrldev, dma_sh_desc->desc_dma, desc_bytes(sh_desc), -+ DMA_TO_DEVICE); -+map_err: -+ kfree(dma_sh_desc); -+desc_err: -+ kfree(dma_dev); -+ return err; -+} -+ -+static int caam_dma_remove(struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ struct device *ctrldev = dev->parent; -+ struct caam_dma_ctx *ctx, *_ctx; -+ -+ dma_async_device_unregister(dma_dev); -+ -+ list_for_each_entry_safe(ctx, _ctx, &dma_ctx_list, node) { -+ list_del(&ctx->node); -+ caam_jr_free(ctx->jrdev); -+ kfree(ctx); -+ } -+ -+ dma_unmap_single(ctrldev, dma_sh_desc->desc_dma, -+ desc_bytes(dma_sh_desc->desc), DMA_TO_DEVICE); -+ -+ kfree(dma_sh_desc); -+ kfree(dma_dev); -+ -+ dev_info(dev, "caam dma support disabled\n"); -+ return 0; -+} -+ -+static const struct of_device_id caam_dma_match[] = { -+ { .compatible = "fsl,sec-v5.4-dma", }, -+ { .compatible = "fsl,sec-v5.0-dma", }, -+ { .compatible = "fsl,sec-v4.0-dma", }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, caam_dma_match); -+ -+static struct platform_driver caam_dma_driver = { -+ .driver = { -+ .name = "caam-dma", -+ .of_match_table = caam_dma_match, -+ }, -+ .probe = caam_dma_probe, -+ .remove = caam_dma_remove, -+}; -+module_platform_driver(caam_dma_driver); -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_DESCRIPTION("NXP CAAM support for SG DMA"); -+MODULE_AUTHOR("NXP Semiconductors"); ---- /dev/null -+++ b/drivers/dma/dpaa2-qdma/Kconfig -@@ -0,0 +1,8 @@ -+menuconfig FSL_DPAA2_QDMA -+ tristate "NXP DPAA2 QDMA" -+ depends on FSL_MC_BUS && FSL_MC_DPIO -+ select DMA_ENGINE -+ select DMA_VIRTUAL_CHANNELS -+ ---help--- -+ NXP Data Path Acceleration Architecture 2 QDMA driver, -+ using the NXP MC bus driver. ---- /dev/null -+++ b/drivers/dma/dpaa2-qdma/Makefile -@@ -0,0 +1,8 @@ -+# -+# Makefile for the NXP DPAA2 CAAM controllers -+# -+ccflags-y += -DVERSION=\"\" -+ -+obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma.o -+ -+fsl-dpaa2-qdma-objs := dpaa2-qdma.o dpdmai.o ---- /dev/null -+++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.c -@@ -0,0 +1,940 @@ -+/* -+ * drivers/dma/dpaa2-qdma/dpaa2-qdma.c -+ * -+ * Copyright 2015-2017 NXP Semiconductor, Inc. -+ * Author: Changming Huang <jerry.huang@nxp.com> -+ * -+ * Driver for the NXP QDMA engine with QMan mode. -+ * Channel virtualization is supported through enqueuing of DMA jobs to, -+ * or dequeuing DMA jobs from different work queues with QMan portal. -+ * This module can be found on NXP LS2 SoCs. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ */ -+ -+#include <linux/init.h> -+#include <linux/module.h> -+#include <linux/interrupt.h> -+#include <linux/clk.h> -+#include <linux/dma-mapping.h> -+#include <linux/dmapool.h> -+#include <linux/slab.h> -+#include <linux/spinlock.h> -+#include <linux/of.h> -+#include <linux/of_device.h> -+#include <linux/of_address.h> -+#include <linux/of_irq.h> -+#include <linux/of_dma.h> -+#include <linux/types.h> -+#include <linux/delay.h> -+#include <linux/iommu.h> -+ -+#include "../virt-dma.h" -+ -+#include <linux/fsl/mc.h> -+#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h" -+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h" -+#include "fsl_dpdmai_cmd.h" -+#include "fsl_dpdmai.h" -+#include "dpaa2-qdma.h" -+ -+static bool smmu_disable = true; -+ -+static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan) -+{ -+ return container_of(chan, struct dpaa2_qdma_chan, vchan.chan); -+} -+ -+static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd) -+{ -+ return container_of(vd, struct dpaa2_qdma_comp, vdesc); -+} -+ -+static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan) -+{ -+ return 0; -+} -+ -+static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan) -+{ -+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); -+ unsigned long flags; -+ LIST_HEAD(head); -+ -+ spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags); -+ vchan_get_all_descriptors(&dpaa2_chan->vchan, &head); -+ spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags); -+ -+ vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head); -+} -+ -+/* -+ * Request a command descriptor for enqueue. -+ */ -+static struct dpaa2_qdma_comp * -+dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan) -+{ -+ struct dpaa2_qdma_comp *comp_temp = NULL; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); -+ if (list_empty(&dpaa2_chan->comp_free)) { -+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); -+ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL); -+ if (!comp_temp) -+ goto err; -+ comp_temp->fd_virt_addr = dma_pool_alloc(dpaa2_chan->fd_pool, -+ GFP_NOWAIT, &comp_temp->fd_bus_addr); -+ if (!comp_temp->fd_virt_addr) -+ goto err; -+ -+ comp_temp->fl_virt_addr = -+ (void *)((struct dpaa2_fd *) -+ comp_temp->fd_virt_addr + 1); -+ comp_temp->fl_bus_addr = comp_temp->fd_bus_addr + -+ sizeof(struct dpaa2_fd); -+ comp_temp->desc_virt_addr = -+ (void *)((struct dpaa2_fl_entry *) -+ comp_temp->fl_virt_addr + 3); -+ comp_temp->desc_bus_addr = comp_temp->fl_bus_addr + -+ sizeof(struct dpaa2_fl_entry) * 3; -+ -+ comp_temp->qchan = dpaa2_chan; -+ comp_temp->sg_blk_num = 0; -+ INIT_LIST_HEAD(&comp_temp->sg_src_head); -+ INIT_LIST_HEAD(&comp_temp->sg_dst_head); -+ return comp_temp; -+ } -+ comp_temp = list_first_entry(&dpaa2_chan->comp_free, -+ struct dpaa2_qdma_comp, list); -+ list_del(&comp_temp->list); -+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); -+ -+ comp_temp->qchan = dpaa2_chan; -+err: -+ return comp_temp; -+} -+ -+static void dpaa2_qdma_populate_fd(uint32_t format, -+ struct dpaa2_qdma_comp *dpaa2_comp) -+{ -+ struct dpaa2_fd *fd; -+ -+ fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr; -+ memset(fd, 0, sizeof(struct dpaa2_fd)); -+ -+ /* fd populated */ -+ dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr); -+ /* Bypass memory translation, Frame list format, short length disable */ -+ /* we need to disable BMT if fsl-mc use iova addr */ -+ if (smmu_disable) -+ dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE); -+ dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE); -+ -+ dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX); -+} -+ -+/* first frame list for descriptor buffer */ -+static void dpaa2_qdma_populate_first_framel( -+ struct dpaa2_fl_entry *f_list, -+ struct dpaa2_qdma_comp *dpaa2_comp) -+{ -+ struct dpaa2_qdma_sd_d *sdd; -+ -+ sdd = (struct dpaa2_qdma_sd_d *)dpaa2_comp->desc_virt_addr; -+ memset(sdd, 0, 2 * (sizeof(*sdd))); -+ /* source and destination descriptor */ -+ sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT); /* source descriptor CMD */ -+ sdd++; -+ sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT); /* dest descriptor CMD */ -+ -+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry)); -+ /* first frame list to source descriptor */ -+ -+ dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr); -+ dpaa2_fl_set_len(f_list, 0x20); -+ dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG); -+ -+ if (smmu_disable) -+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */ -+} -+ -+/* source and destination frame list */ -+static void dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list, -+ dma_addr_t dst, dma_addr_t src, size_t len, uint8_t fmt) -+{ -+ /* source frame list to source buffer */ -+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry)); -+ -+ -+ dpaa2_fl_set_addr(f_list, src); -+ dpaa2_fl_set_len(f_list, len); -+ dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG)); /* single buffer frame or scatter gather frame */ -+ if (smmu_disable) -+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */ -+ -+ f_list++; -+ /* destination frame list to destination buffer */ -+ memset(f_list, 0, sizeof(struct dpaa2_fl_entry)); -+ -+ dpaa2_fl_set_addr(f_list, dst); -+ dpaa2_fl_set_len(f_list, len); -+ dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG)); -+ dpaa2_fl_set_final(f_list, QDMA_FL_F); /* single buffer frame or scatter gather frame */ -+ if (smmu_disable) -+ f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE); /* bypass memory translation */ -+} -+ -+static struct dma_async_tx_descriptor *dpaa2_qdma_prep_memcpy( -+ struct dma_chan *chan, dma_addr_t dst, -+ dma_addr_t src, size_t len, unsigned long flags) -+{ -+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); -+ struct dpaa2_qdma_comp *dpaa2_comp; -+ struct dpaa2_fl_entry *f_list; -+ uint32_t format; -+ -+ dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan); -+ -+#ifdef LONG_FORMAT -+ format = QDMA_FD_LONG_FORMAT; -+#else -+ format = QDMA_FD_SHORT_FORMAT; -+#endif -+ /* populate Frame descriptor */ -+ dpaa2_qdma_populate_fd(format, dpaa2_comp); -+ -+ f_list = (struct dpaa2_fl_entry *)dpaa2_comp->fl_virt_addr; -+ -+#ifdef LONG_FORMAT -+ /* first frame list for descriptor buffer (logn format) */ -+ dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp); -+ -+ f_list++; -+#endif -+ -+ dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF); -+ -+ return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags); -+} -+ -+static struct qdma_sg_blk *dpaa2_qdma_get_sg_blk( -+ struct dpaa2_qdma_comp *dpaa2_comp, -+ struct dpaa2_qdma_chan *dpaa2_chan) -+{ -+ struct qdma_sg_blk *sg_blk = NULL; -+ dma_addr_t phy_sgb; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); -+ if (list_empty(&dpaa2_chan->sgb_free)) { -+ sg_blk = (struct qdma_sg_blk *)dma_pool_alloc( -+ dpaa2_chan->sg_blk_pool, -+ GFP_NOWAIT, &phy_sgb); -+ if (!sg_blk) { -+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); -+ return sg_blk; -+ } -+ sg_blk->blk_virt_addr = (void *)(sg_blk + 1); -+ sg_blk->blk_bus_addr = phy_sgb + sizeof(*sg_blk); -+ } else { -+ sg_blk = list_first_entry(&dpaa2_chan->sgb_free, -+ struct qdma_sg_blk, list); -+ list_del(&sg_blk->list); -+ } -+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); -+ -+ return sg_blk; -+} -+ -+static uint32_t dpaa2_qdma_populate_sg(struct device *dev, -+ struct dpaa2_qdma_chan *dpaa2_chan, -+ struct dpaa2_qdma_comp *dpaa2_comp, -+ struct scatterlist *dst_sg, u32 dst_nents, -+ struct scatterlist *src_sg, u32 src_nents) -+{ -+ struct dpaa2_qdma_sg *src_sge; -+ struct dpaa2_qdma_sg *dst_sge; -+ struct qdma_sg_blk *sg_blk; -+ struct qdma_sg_blk *sg_blk_dst; -+ dma_addr_t src; -+ dma_addr_t dst; -+ uint32_t num; -+ uint32_t blocks; -+ uint32_t len = 0; -+ uint32_t total_len = 0; -+ int i, j = 0; -+ -+ num = min(dst_nents, src_nents); -+ blocks = num / (NUM_SG_PER_BLK - 1); -+ if (num % (NUM_SG_PER_BLK - 1)) -+ blocks += 1; -+ if (dpaa2_comp->sg_blk_num < blocks) { -+ len = blocks - dpaa2_comp->sg_blk_num; -+ for (i = 0; i < len; i++) { -+ /* source sg blocks */ -+ sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan); -+ if (!sg_blk) -+ return 0; -+ list_add_tail(&sg_blk->list, &dpaa2_comp->sg_src_head); -+ /* destination sg blocks */ -+ sg_blk = dpaa2_qdma_get_sg_blk(dpaa2_comp, dpaa2_chan); -+ if (!sg_blk) -+ return 0; -+ list_add_tail(&sg_blk->list, &dpaa2_comp->sg_dst_head); -+ } -+ } else { -+ len = dpaa2_comp->sg_blk_num - blocks; -+ for (i = 0; i < len; i++) { -+ spin_lock(&dpaa2_chan->queue_lock); -+ /* handle source sg blocks */ -+ sg_blk = list_first_entry(&dpaa2_comp->sg_src_head, -+ struct qdma_sg_blk, list); -+ list_del(&sg_blk->list); -+ list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free); -+ /* handle destination sg blocks */ -+ sg_blk = list_first_entry(&dpaa2_comp->sg_dst_head, -+ struct qdma_sg_blk, list); -+ list_del(&sg_blk->list); -+ list_add_tail(&sg_blk->list, &dpaa2_chan->sgb_free); -+ spin_unlock(&dpaa2_chan->queue_lock); -+ } -+ } -+ dpaa2_comp->sg_blk_num = blocks; -+ -+ /* get the first source sg phy address */ -+ sg_blk = list_first_entry(&dpaa2_comp->sg_src_head, -+ struct qdma_sg_blk, list); -+ dpaa2_comp->sge_src_bus_addr = sg_blk->blk_bus_addr; -+ /* get the first destinaiton sg phy address */ -+ sg_blk_dst = list_first_entry(&dpaa2_comp->sg_dst_head, -+ struct qdma_sg_blk, list); -+ dpaa2_comp->sge_dst_bus_addr = sg_blk_dst->blk_bus_addr; -+ -+ for (i = 0; i < blocks; i++) { -+ src_sge = (struct dpaa2_qdma_sg *)sg_blk->blk_virt_addr; -+ dst_sge = (struct dpaa2_qdma_sg *)sg_blk_dst->blk_virt_addr; -+ -+ for (j = 0; j < (NUM_SG_PER_BLK - 1); j++) { -+ len = min(sg_dma_len(dst_sg), sg_dma_len(src_sg)); -+ if (0 == len) -+ goto fetch; -+ total_len += len; -+ src = sg_dma_address(src_sg); -+ dst = sg_dma_address(dst_sg); -+ -+ /* source SG */ -+ src_sge->addr_lo = src; -+ src_sge->addr_hi = (src >> 32); -+ src_sge->data_len.data_len_sl0 = len; -+ src_sge->ctrl.sl = QDMA_SG_SL_LONG; -+ src_sge->ctrl.fmt = QDMA_SG_FMT_SDB; -+ /* destination SG */ -+ dst_sge->addr_lo = dst; -+ dst_sge->addr_hi = (dst >> 32); -+ dst_sge->data_len.data_len_sl0 = len; -+ dst_sge->ctrl.sl = QDMA_SG_SL_LONG; -+ dst_sge->ctrl.fmt = QDMA_SG_FMT_SDB; -+fetch: -+ num--; -+ if (0 == num) { -+ src_sge->ctrl.f = QDMA_SG_F; -+ dst_sge->ctrl.f = QDMA_SG_F; -+ goto end; -+ } -+ dst_sg = sg_next(dst_sg); -+ src_sg = sg_next(src_sg); -+ src_sge++; -+ dst_sge++; -+ if (j == (NUM_SG_PER_BLK - 2)) { -+ /* for next blocks, extension */ -+ sg_blk = list_next_entry(sg_blk, list); -+ sg_blk_dst = list_next_entry(sg_blk_dst, list); -+ src_sge->addr_lo = sg_blk->blk_bus_addr; -+ src_sge->addr_hi = sg_blk->blk_bus_addr >> 32; -+ src_sge->ctrl.sl = QDMA_SG_SL_LONG; -+ src_sge->ctrl.fmt = QDMA_SG_FMT_SGTE; -+ dst_sge->addr_lo = sg_blk_dst->blk_bus_addr; -+ dst_sge->addr_hi = -+ sg_blk_dst->blk_bus_addr >> 32; -+ dst_sge->ctrl.sl = QDMA_SG_SL_LONG; -+ dst_sge->ctrl.fmt = QDMA_SG_FMT_SGTE; -+ } -+ } -+ } -+ -+end: -+ return total_len; -+} -+ -+static enum dma_status dpaa2_qdma_tx_status(struct dma_chan *chan, -+ dma_cookie_t cookie, struct dma_tx_state *txstate) -+{ -+ return dma_cookie_status(chan, cookie, txstate); -+} -+ -+static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc) -+{ -+} -+ -+static void dpaa2_qdma_issue_pending(struct dma_chan *chan) -+{ -+ struct dpaa2_qdma_comp *dpaa2_comp; -+ struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan); -+ struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma; -+ struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv; -+ struct virt_dma_desc *vdesc; -+ struct dpaa2_fd *fd; -+ int err; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&dpaa2_chan->queue_lock, flags); -+ spin_lock(&dpaa2_chan->vchan.lock); -+ if (vchan_issue_pending(&dpaa2_chan->vchan)) { -+ vdesc = vchan_next_desc(&dpaa2_chan->vchan); -+ if (!vdesc) -+ goto err_enqueue; -+ dpaa2_comp = to_fsl_qdma_comp(vdesc); -+ -+ fd = (struct dpaa2_fd *)dpaa2_comp->fd_virt_addr; -+ -+ list_del(&vdesc->node); -+ list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used); -+ -+ /* TOBO: priority hard-coded to zero */ -+ err = dpaa2_io_service_enqueue_fq(NULL, -+ priv->tx_queue_attr[0].fqid, fd); -+ if (err) { -+ list_del(&dpaa2_comp->list); -+ list_add_tail(&dpaa2_comp->list, -+ &dpaa2_chan->comp_free); -+ } -+ -+ } -+err_enqueue: -+ spin_unlock(&dpaa2_chan->vchan.lock); -+ spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags); -+} -+ -+static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev) -+{ -+ struct device *dev = &ls_dev->dev; -+ struct dpaa2_qdma_priv *priv; -+ struct dpaa2_qdma_priv_per_prio *ppriv; -+ uint8_t prio_def = DPDMAI_PRIO_NUM; -+ int err; -+ int i; -+ -+ priv = dev_get_drvdata(dev); -+ -+ priv->dev = dev; -+ priv->dpqdma_id = ls_dev->obj_desc.id; -+ -+ /*Get the handle for the DPDMAI this interface is associate with */ -+ err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpdmai_open() failed\n"); -+ return err; -+ } -+ dev_info(dev, "Opened dpdmai object successfully\n"); -+ -+ err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, -+ &priv->dpdmai_attr); -+ if (err) { -+ dev_err(dev, "dpdmai_get_attributes() failed\n"); -+ return err; -+ } -+ -+ if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) { -+ dev_err(dev, "DPDMAI major version mismatch\n" -+ "Found %u.%u, supported version is %u.%u\n", -+ priv->dpdmai_attr.version.major, -+ priv->dpdmai_attr.version.minor, -+ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR); -+ } -+ -+ if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) { -+ dev_err(dev, "DPDMAI minor version mismatch\n" -+ "Found %u.%u, supported version is %u.%u\n", -+ priv->dpdmai_attr.version.major, -+ priv->dpdmai_attr.version.minor, -+ DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR); -+ } -+ -+ priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def); -+ ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL); -+ if (!ppriv) { -+ dev_err(dev, "kzalloc for ppriv failed\n"); -+ return -1; -+ } -+ priv->ppriv = ppriv; -+ -+ for (i = 0; i < priv->num_pairs; i++) { -+ err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, -+ i, &priv->rx_queue_attr[i]); -+ if (err) { -+ dev_err(dev, "dpdmai_get_rx_queue() failed\n"); -+ return err; -+ } -+ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid; -+ -+ err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, -+ i, &priv->tx_queue_attr[i]); -+ if (err) { -+ dev_err(dev, "dpdmai_get_tx_queue() failed\n"); -+ return err; -+ } -+ ppriv->req_fqid = priv->tx_queue_attr[i].fqid; -+ ppriv->prio = i; -+ ppriv->priv = priv; -+ ppriv++; -+ } -+ -+ return 0; -+} -+ -+static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx) -+{ -+ struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx, -+ struct dpaa2_qdma_priv_per_prio, nctx); -+ struct dpaa2_qdma_priv *priv = ppriv->priv; -+ struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp; -+ struct dpaa2_qdma_chan *qchan; -+ const struct dpaa2_fd *fd; -+ const struct dpaa2_fd *fd_eq; -+ struct dpaa2_dq *dq; -+ int err; -+ int is_last = 0; -+ uint8_t status; -+ int i; -+ int found; -+ uint32_t n_chans = priv->dpaa2_qdma->n_chans; -+ -+ do { -+ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid, -+ ppriv->store); -+ } while (err); -+ -+ while (!is_last) { -+ do { -+ dq = dpaa2_io_store_next(ppriv->store, &is_last); -+ } while (!is_last && !dq); -+ if (!dq) { -+ dev_err(priv->dev, "FQID returned no valid frames!\n"); -+ continue; -+ } -+ -+ /* obtain FD and process the error */ -+ fd = dpaa2_dq_fd(dq); -+ -+ status = dpaa2_fd_get_ctrl(fd) & 0xff; -+ if (status) -+ dev_err(priv->dev, "FD error occurred\n"); -+ found = 0; -+ for (i = 0; i < n_chans; i++) { -+ qchan = &priv->dpaa2_qdma->chans[i]; -+ spin_lock(&qchan->queue_lock); -+ if (list_empty(&qchan->comp_used)) { -+ spin_unlock(&qchan->queue_lock); -+ continue; -+ } -+ list_for_each_entry_safe(dpaa2_comp, _comp_tmp, -+ &qchan->comp_used, list) { -+ fd_eq = (struct dpaa2_fd *) -+ dpaa2_comp->fd_virt_addr; -+ -+ if (le64_to_cpu(fd_eq->simple.addr) == -+ le64_to_cpu(fd->simple.addr)) { -+ -+ list_del(&dpaa2_comp->list); -+ list_add_tail(&dpaa2_comp->list, -+ &qchan->comp_free); -+ -+ spin_lock(&qchan->vchan.lock); -+ vchan_cookie_complete( -+ &dpaa2_comp->vdesc); -+ spin_unlock(&qchan->vchan.lock); -+ found = 1; -+ break; -+ } -+ } -+ spin_unlock(&qchan->queue_lock); -+ if (found) -+ break; -+ } -+ } -+ -+ dpaa2_io_service_rearm(NULL, ctx); -+} -+ -+static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv) -+{ -+ int err, i, num; -+ struct device *dev = priv->dev; -+ struct dpaa2_qdma_priv_per_prio *ppriv; -+ -+ num = priv->num_pairs; -+ ppriv = priv->ppriv; -+ for (i = 0; i < num; i++) { -+ ppriv->nctx.is_cdan = 0; -+ ppriv->nctx.desired_cpu = 1; -+ ppriv->nctx.id = ppriv->rsp_fqid; -+ ppriv->nctx.cb = dpaa2_qdma_fqdan_cb; -+ err = dpaa2_io_service_register(NULL, &ppriv->nctx); -+ if (err) { -+ dev_err(dev, "Notification register failed\n"); -+ goto err_service; -+ } -+ -+ ppriv->store = dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, -+ dev); -+ if (!ppriv->store) { -+ dev_err(dev, "dpaa2_io_store_create() failed\n"); -+ goto err_store; -+ } -+ -+ ppriv++; -+ } -+ return 0; -+ -+err_store: -+ dpaa2_io_service_deregister(NULL, &ppriv->nctx); -+err_service: -+ ppriv--; -+ while (ppriv >= priv->ppriv) { -+ dpaa2_io_service_deregister(NULL, &ppriv->nctx); -+ dpaa2_io_store_destroy(ppriv->store); -+ ppriv--; -+ } -+ return -1; -+} -+ -+static void __cold dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv) -+{ -+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; -+ int i; -+ -+ for (i = 0; i < priv->num_pairs; i++) { -+ dpaa2_io_store_destroy(ppriv->store); -+ ppriv++; -+ } -+} -+ -+static void __cold dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv) -+{ -+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; -+ int i; -+ -+ for (i = 0; i < priv->num_pairs; i++) { -+ dpaa2_io_service_deregister(NULL, &ppriv->nctx); -+ ppriv++; -+ } -+} -+ -+static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv) -+{ -+ int err; -+ struct dpdmai_rx_queue_cfg rx_queue_cfg; -+ struct device *dev = priv->dev; -+ struct dpaa2_qdma_priv_per_prio *ppriv; -+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); -+ int i, num; -+ -+ num = priv->num_pairs; -+ ppriv = priv->ppriv; -+ for (i = 0; i < num; i++) { -+ rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX | -+ DPDMAI_QUEUE_OPT_DEST; -+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64; -+ rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO; -+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; -+ rx_queue_cfg.dest_cfg.priority = ppriv->prio; -+ err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, -+ rx_queue_cfg.dest_cfg.priority, &rx_queue_cfg); -+ if (err) { -+ dev_err(dev, "dpdmai_set_rx_queue() failed\n"); -+ return err; -+ } -+ -+ ppriv++; -+ } -+ -+ return 0; -+} -+ -+static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv) -+{ -+ int err = 0; -+ struct device *dev = priv->dev; -+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); -+ struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv; -+ int i; -+ -+ for (i = 0; i < priv->num_pairs; i++) { -+ ppriv->nctx.qman64 = 0; -+ ppriv->nctx.dpio_id = 0; -+ ppriv++; -+ } -+ -+ err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle); -+ if (err) -+ dev_err(dev, "dpdmai_reset() failed\n"); -+ -+ return err; -+} -+ -+static void __cold dpaa2_dpdmai_free_pool(struct dpaa2_qdma_chan *qchan, -+ struct list_head *head) -+{ -+ struct qdma_sg_blk *sgb_tmp, *_sgb_tmp; -+ /* free the QDMA SG pool block */ -+ list_for_each_entry_safe(sgb_tmp, _sgb_tmp, head, list) { -+ sgb_tmp->blk_virt_addr = (void *)((struct qdma_sg_blk *) -+ sgb_tmp->blk_virt_addr - 1); -+ sgb_tmp->blk_bus_addr = sgb_tmp->blk_bus_addr -+ - sizeof(*sgb_tmp); -+ dma_pool_free(qchan->sg_blk_pool, sgb_tmp->blk_virt_addr, -+ sgb_tmp->blk_bus_addr); -+ } -+ -+} -+ -+static void __cold dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan, -+ struct list_head *head) -+{ -+ struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp; -+ /* free the QDMA comp resource */ -+ list_for_each_entry_safe(comp_tmp, _comp_tmp, -+ head, list) { -+ dma_pool_free(qchan->fd_pool, -+ comp_tmp->fd_virt_addr, -+ comp_tmp->fd_bus_addr); -+ /* free the SG source block on comp */ -+ dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_src_head); -+ /* free the SG destination block on comp */ -+ dpaa2_dpdmai_free_pool(qchan, &comp_tmp->sg_dst_head); -+ list_del(&comp_tmp->list); -+ kfree(comp_tmp); -+ } -+ -+} -+ -+static void __cold dpaa2_dpdmai_free_channels( -+ struct dpaa2_qdma_engine *dpaa2_qdma) -+{ -+ struct dpaa2_qdma_chan *qchan; -+ int num, i; -+ -+ num = dpaa2_qdma->n_chans; -+ for (i = 0; i < num; i++) { -+ qchan = &dpaa2_qdma->chans[i]; -+ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used); -+ dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free); -+ dpaa2_dpdmai_free_pool(qchan, &qchan->sgb_free); -+ dma_pool_destroy(qchan->fd_pool); -+ dma_pool_destroy(qchan->sg_blk_pool); -+ } -+} -+ -+static int dpaa2_dpdmai_alloc_channels(struct dpaa2_qdma_engine *dpaa2_qdma) -+{ -+ struct dpaa2_qdma_chan *dpaa2_chan; -+ struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev; -+ int i; -+ -+ INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels); -+ for (i = 0; i < dpaa2_qdma->n_chans; i++) { -+ dpaa2_chan = &dpaa2_qdma->chans[i]; -+ dpaa2_chan->qdma = dpaa2_qdma; -+ dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc; -+ vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev); -+ -+ dpaa2_chan->fd_pool = dma_pool_create("fd_pool", -+ dev, FD_POOL_SIZE, 32, 0); -+ if (!dpaa2_chan->fd_pool) -+ return -1; -+ dpaa2_chan->sg_blk_pool = dma_pool_create("sg_blk_pool", -+ dev, SG_POOL_SIZE, 32, 0); -+ if (!dpaa2_chan->sg_blk_pool) -+ return -1; -+ -+ spin_lock_init(&dpaa2_chan->queue_lock); -+ INIT_LIST_HEAD(&dpaa2_chan->comp_used); -+ INIT_LIST_HEAD(&dpaa2_chan->comp_free); -+ INIT_LIST_HEAD(&dpaa2_chan->sgb_free); -+ } -+ return 0; -+} -+ -+static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev) -+{ -+ struct dpaa2_qdma_priv *priv; -+ struct device *dev = &dpdmai_dev->dev; -+ struct dpaa2_qdma_engine *dpaa2_qdma; -+ int err; -+ -+ priv = kzalloc(sizeof(*priv), GFP_KERNEL); -+ if (!priv) -+ return -ENOMEM; -+ dev_set_drvdata(dev, priv); -+ priv->dpdmai_dev = dpdmai_dev; -+ -+ priv->iommu_domain = iommu_get_domain_for_dev(dev); -+ if (priv->iommu_domain) -+ smmu_disable = false; -+ -+ /* obtain a MC portal */ -+ err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io); -+ if (err) { -+ dev_err(dev, "MC portal allocation failed\n"); -+ goto err_mcportal; -+ } -+ -+ /* DPDMAI initialization */ -+ err = dpaa2_qdma_setup(dpdmai_dev); -+ if (err) { -+ dev_err(dev, "dpaa2_dpdmai_setup() failed\n"); -+ goto err_dpdmai_setup; -+ } -+ -+ /* DPIO */ -+ err = dpaa2_qdma_dpio_setup(priv); -+ if (err) { -+ dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n"); -+ goto err_dpio_setup; -+ } -+ -+ /* DPDMAI binding to DPIO */ -+ err = dpaa2_dpdmai_bind(priv); -+ if (err) { -+ dev_err(dev, "dpaa2_dpdmai_bind() failed\n"); -+ goto err_bind; -+ } -+ -+ /* DPDMAI enable */ -+ err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpdmai_enable() faile\n"); -+ goto err_enable; -+ } -+ -+ dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL); -+ if (!dpaa2_qdma) { -+ err = -ENOMEM; -+ goto err_eng; -+ } -+ -+ priv->dpaa2_qdma = dpaa2_qdma; -+ dpaa2_qdma->priv = priv; -+ -+ dpaa2_qdma->n_chans = NUM_CH; -+ -+ err = dpaa2_dpdmai_alloc_channels(dpaa2_qdma); -+ if (err) { -+ dev_err(dev, "QDMA alloc channels faile\n"); -+ goto err_reg; -+ } -+ -+ dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask); -+ dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask); -+ dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask); -+ -+ dpaa2_qdma->dma_dev.dev = dev; -+ dpaa2_qdma->dma_dev.device_alloc_chan_resources -+ = dpaa2_qdma_alloc_chan_resources; -+ dpaa2_qdma->dma_dev.device_free_chan_resources -+ = dpaa2_qdma_free_chan_resources; -+ dpaa2_qdma->dma_dev.device_tx_status = dpaa2_qdma_tx_status; -+ dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy; -+ dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending; -+ -+ err = dma_async_device_register(&dpaa2_qdma->dma_dev); -+ if (err) { -+ dev_err(dev, "Can't register NXP QDMA engine.\n"); -+ goto err_reg; -+ } -+ -+ return 0; -+ -+err_reg: -+ dpaa2_dpdmai_free_channels(dpaa2_qdma); -+ kfree(dpaa2_qdma); -+err_eng: -+ dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle); -+err_enable: -+ dpaa2_dpdmai_dpio_unbind(priv); -+err_bind: -+ dpaa2_dpmai_store_free(priv); -+ dpaa2_dpdmai_dpio_free(priv); -+err_dpio_setup: -+ dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle); -+err_dpdmai_setup: -+ fsl_mc_portal_free(priv->mc_io); -+err_mcportal: -+ kfree(priv->ppriv); -+ kfree(priv); -+ dev_set_drvdata(dev, NULL); -+ return err; -+} -+ -+static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev) -+{ -+ struct device *dev; -+ struct dpaa2_qdma_priv *priv; -+ struct dpaa2_qdma_engine *dpaa2_qdma; -+ -+ dev = &ls_dev->dev; -+ priv = dev_get_drvdata(dev); -+ dpaa2_qdma = priv->dpaa2_qdma; -+ -+ dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle); -+ dpaa2_dpdmai_dpio_unbind(priv); -+ dpaa2_dpmai_store_free(priv); -+ dpaa2_dpdmai_dpio_free(priv); -+ dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle); -+ fsl_mc_portal_free(priv->mc_io); -+ dev_set_drvdata(dev, NULL); -+ dpaa2_dpdmai_free_channels(dpaa2_qdma); -+ -+ dma_async_device_unregister(&dpaa2_qdma->dma_dev); -+ kfree(priv); -+ kfree(dpaa2_qdma); -+ -+ return 0; -+} -+ -+static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpdmai", -+ }, -+ { .vendor = 0x0 } -+}; -+ -+static struct fsl_mc_driver dpaa2_qdma_driver = { -+ .driver = { -+ .name = "dpaa2-qdma", -+ .owner = THIS_MODULE, -+ }, -+ .probe = dpaa2_qdma_probe, -+ .remove = dpaa2_qdma_remove, -+ .match_id_table = dpaa2_qdma_id_table -+}; -+ -+static int __init dpaa2_qdma_driver_init(void) -+{ -+ return fsl_mc_driver_register(&(dpaa2_qdma_driver)); -+} -+late_initcall(dpaa2_qdma_driver_init); -+ -+static void __exit fsl_qdma_exit(void) -+{ -+ fsl_mc_driver_unregister(&(dpaa2_qdma_driver)); -+} -+module_exit(fsl_qdma_exit); -+ -+MODULE_DESCRIPTION("NXP DPAA2 qDMA driver"); -+MODULE_LICENSE("Dual BSD/GPL"); ---- /dev/null -+++ b/drivers/dma/dpaa2-qdma/dpaa2-qdma.h -@@ -0,0 +1,227 @@ -+/* Copyright 2015 NXP Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of NXP Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY NXP Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL NXP Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef __DPAA2_QDMA_H -+#define __DPAA2_QDMA_H -+ -+#define LONG_FORMAT 1 -+ -+#define DPAA2_QDMA_STORE_SIZE 16 -+#define NUM_CH 8 -+#define NUM_SG_PER_BLK 16 -+ -+#define QDMA_DMR_OFFSET 0x0 -+#define QDMA_DQ_EN (0 << 30) -+#define QDMA_DQ_DIS (1 << 30) -+ -+#define QDMA_DSR_M_OFFSET 0x10004 -+ -+struct dpaa2_qdma_sd_d { -+ uint32_t rsv:32; -+ union { -+ struct { -+ uint32_t ssd:12; /* souce stride distance */ -+ uint32_t sss:12; /* souce stride size */ -+ uint32_t rsv1:8; -+ } sdf; -+ struct { -+ uint32_t dsd:12; /* Destination stride distance */ -+ uint32_t dss:12; /* Destination stride size */ -+ uint32_t rsv2:8; -+ } ddf; -+ } df; -+ uint32_t rbpcmd; /* Route-by-port command */ -+ uint32_t cmd; -+} __attribute__((__packed__)); -+/* Source descriptor command read transaction type for RBP=0: -+ coherent copy of cacheable memory */ -+#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28) -+/* Destination descriptor command write transaction type for RBP=0: -+ coherent copy of cacheable memory */ -+#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28) -+ -+#define QDMA_SG_FMT_SDB 0x0 /* single data buffer */ -+#define QDMA_SG_FMT_FDS 0x1 /* frame data section */ -+#define QDMA_SG_FMT_SGTE 0x2 /* SGT extension */ -+#define QDMA_SG_SL_SHORT 0x1 /* short length */ -+#define QDMA_SG_SL_LONG 0x0 /* short length */ -+#define QDMA_SG_F 0x1 /* last sg entry */ -+struct dpaa2_qdma_sg { -+ uint32_t addr_lo; /* address 0:31 */ -+ uint32_t addr_hi:17; /* address 32:48 */ -+ uint32_t rsv:15; -+ union { -+ uint32_t data_len_sl0; /* SL=0, the long format */ -+ struct { -+ uint32_t len:17; /* SL=1, the short format */ -+ uint32_t reserve:3; -+ uint32_t sf:1; -+ uint32_t sr:1; -+ uint32_t size:10; /* buff size */ -+ } data_len_sl1; -+ } data_len; /* AVAIL_LENGTH */ -+ struct { -+ uint32_t bpid:14; -+ uint32_t ivp:1; -+ uint32_t mbt:1; -+ uint32_t offset:12; -+ uint32_t fmt:2; -+ uint32_t sl:1; -+ uint32_t f:1; -+ } ctrl; -+} __attribute__((__packed__)); -+ -+#define QMAN_FD_FMT_ENABLE (1) /* frame list table enable */ -+#define QMAN_FD_BMT_ENABLE (1 << 15) /* bypass memory translation */ -+#define QMAN_FD_BMT_DISABLE (0 << 15) /* bypass memory translation */ -+#define QMAN_FD_SL_DISABLE (0 << 14) /* short lengthe disabled */ -+#define QMAN_FD_SL_ENABLE (1 << 14) /* short lengthe enabled */ -+ -+#define QDMA_SB_FRAME (0 << 28) /* single frame */ -+#define QDMA_SG_FRAME (2 << 28) /* scatter gather frames */ -+#define QDMA_FINAL_BIT_DISABLE (0 << 31) /* final bit disable */ -+#define QDMA_FINAL_BIT_ENABLE (1 << 31) /* final bit enable */ -+ -+#define QDMA_FD_SHORT_FORMAT (1 << 11) /* short format */ -+#define QDMA_FD_LONG_FORMAT (0 << 11) /* long format */ -+#define QDMA_SER_DISABLE (0 << 8) /* no notification */ -+#define QDMA_SER_CTX (1 << 8) /* notification by FQD_CTX[fqid] */ -+#define QDMA_SER_DEST (2 << 8) /* notification by destination desc */ -+#define QDMA_SER_BOTH (3 << 8) /* soruce and dest notification */ -+#define QDMA_FD_SPF_ENALBE (1 << 30) /* source prefetch enable */ -+ -+#define QMAN_FD_VA_ENABLE (1 << 14) /* Address used is virtual address */ -+#define QMAN_FD_VA_DISABLE (0 << 14)/* Address used is a real address */ -+#define QMAN_FD_CBMT_ENABLE (1 << 15) /* Flow Context: 49bit physical address */ -+#define QMAN_FD_CBMT_DISABLE (0 << 15) /* Flow Context: 64bit virtual address */ -+#define QMAN_FD_SC_DISABLE (0 << 27) /* stashing control */ -+ -+#define QDMA_FL_FMT_SBF (0x0) /* Single buffer frame */ -+#define QDMA_FL_FMT_SGE 0x2 /* Scatter gather frame */ -+#define QDMA_FL_BMT_ENABLE (0x1 << 15)/* enable bypass memory translation */ -+#define QDMA_FL_BMT_DISABLE 0x0 /* enable bypass memory translation */ -+#define QDMA_FL_SL_LONG (0x0 << 2)/* long length */ -+#define QDMA_FL_SL_SHORT 0x1 /* short length */ -+#define QDMA_FL_F (0x1)/* last frame list bit */ -+/*Description of Frame list table structure*/ -+ -+struct dpaa2_qdma_chan { -+ struct virt_dma_chan vchan; -+ struct virt_dma_desc vdesc; -+ enum dma_status status; -+ struct dpaa2_qdma_engine *qdma; -+ -+ struct mutex dpaa2_queue_mutex; -+ spinlock_t queue_lock; -+ struct dma_pool *fd_pool; -+ struct dma_pool *sg_blk_pool; -+ -+ struct list_head comp_used; -+ struct list_head comp_free; -+ -+ struct list_head sgb_free; -+}; -+ -+struct qdma_sg_blk { -+ dma_addr_t blk_bus_addr; -+ void *blk_virt_addr; -+ struct list_head list; -+}; -+ -+struct dpaa2_qdma_comp { -+ dma_addr_t fd_bus_addr; -+ dma_addr_t fl_bus_addr; -+ dma_addr_t desc_bus_addr; -+ dma_addr_t sge_src_bus_addr; -+ dma_addr_t sge_dst_bus_addr; -+ void *fd_virt_addr; -+ void *fl_virt_addr; -+ void *desc_virt_addr; -+ void *sg_src_virt_addr; -+ void *sg_dst_virt_addr; -+ struct qdma_sg_blk *sg_blk; -+ uint32_t sg_blk_num; -+ struct list_head sg_src_head; -+ struct list_head sg_dst_head; -+ struct dpaa2_qdma_chan *qchan; -+ struct virt_dma_desc vdesc; -+ struct list_head list; -+}; -+ -+struct dpaa2_qdma_engine { -+ struct dma_device dma_dev; -+ u32 n_chans; -+ struct dpaa2_qdma_chan chans[NUM_CH]; -+ -+ struct dpaa2_qdma_priv *priv; -+}; -+ -+/* -+ * dpaa2_qdma_priv - driver private data -+ */ -+struct dpaa2_qdma_priv { -+ int dpqdma_id; -+ -+ struct iommu_domain *iommu_domain; -+ struct dpdmai_attr dpdmai_attr; -+ struct device *dev; -+ struct fsl_mc_io *mc_io; -+ struct fsl_mc_device *dpdmai_dev; -+ -+ struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM]; -+ struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_PRIO_NUM]; -+ -+ uint8_t num_pairs; -+ -+ struct dpaa2_qdma_engine *dpaa2_qdma; -+ struct dpaa2_qdma_priv_per_prio *ppriv; -+}; -+ -+struct dpaa2_qdma_priv_per_prio { -+ int req_fqid; -+ int rsp_fqid; -+ int prio; -+ -+ struct dpaa2_io_store *store; -+ struct dpaa2_io_notification_ctx nctx; -+ -+ struct dpaa2_qdma_priv *priv; -+}; -+ -+/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */ -+#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \ -+ sizeof(struct dpaa2_fl_entry) * 3 + \ -+ sizeof(struct dpaa2_qdma_sd_d) * 2) -+ -+/* qdma_sg_blk + 16 SGs */ -+#define SG_POOL_SIZE (sizeof(struct qdma_sg_blk) +\ -+ sizeof(struct dpaa2_qdma_sg) * NUM_SG_PER_BLK) -+#endif /* __DPAA2_QDMA_H */ ---- /dev/null -+++ b/drivers/dma/dpaa2-qdma/dpdmai.c -@@ -0,0 +1,515 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include <linux/types.h> -+#include <linux/io.h> -+#include "fsl_dpdmai.h" -+#include "fsl_dpdmai_cmd.h" -+#include <linux/fsl/mc.h> -+ -+struct dpdmai_cmd_open { -+ __le32 dpdmai_id; -+}; -+ -+struct dpdmai_rsp_get_attributes { -+ __le32 id; -+ u8 num_of_priorities; -+ u8 pad0[3]; -+ __le16 major; -+ __le16 minor; -+}; -+ -+ -+struct dpdmai_cmd_queue { -+ __le32 dest_id; -+ u8 priority; -+ u8 queue; -+ u8 dest_type; -+ u8 pad; -+ __le64 user_ctx; -+ union { -+ __le32 options; -+ __le32 fqid; -+ }; -+}; -+ -+struct dpdmai_rsp_get_tx_queue { -+ __le64 pad; -+ __le32 fqid; -+}; -+ -+ -+int dpdmai_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdmai_id, -+ uint16_t *token) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ struct dpdmai_cmd_open *cmd_params; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ -+ cmd_params = (struct dpdmai_cmd_open *)cmd.params; -+ cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = mc_cmd_hdr_read_token(&cmd); -+ return 0; -+} -+ -+int dpdmai_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE, -+ cmd_flags, token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdmai_cfg *cfg, -+ uint16_t *token) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPDMAI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpdmai_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdmai_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdmai_irq_cfg *irq_cfg) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpdmai_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdmai_irq_cfg *irq_cfg) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPDMAI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdmai_attr *attr) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ int err; -+ struct dpdmai_rsp_get_attributes *rsp_params; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params; -+ attr->id = le32_to_cpu(rsp_params->id); -+ attr->version.major = le16_to_cpu(rsp_params->major); -+ attr->version.minor = le16_to_cpu(rsp_params->minor); -+ attr->num_of_priorities = rsp_params->num_of_priorities; -+ -+ -+ return 0; -+} -+ -+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ const struct dpdmai_rx_queue_cfg *cfg) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ struct dpdmai_cmd_queue *cmd_params; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE, -+ cmd_flags, -+ token); -+ -+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params; -+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); -+ cmd_params->priority = cfg->dest_cfg.priority; -+ cmd_params->queue = priority; -+ cmd_params->dest_type = cfg->dest_cfg.dest_type; -+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); -+ cmd_params->options = cpu_to_le32(cfg->options); -+ -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, struct dpdmai_rx_queue_attr *attr) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ struct dpdmai_cmd_queue *cmd_params; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE, -+ cmd_flags, -+ token); -+ -+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params; -+ cmd_params->queue = priority; -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id); -+ attr->dest_cfg.priority = cmd_params->priority; -+ attr->dest_cfg.dest_type = cmd_params->dest_type; -+ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx); -+ attr->fqid = le32_to_cpu(cmd_params->fqid); -+ -+ return 0; -+} -+ -+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpdmai_tx_queue_attr *attr) -+{ -+ struct fsl_mc_command cmd = { 0 }; -+ struct dpdmai_cmd_queue *cmd_params; -+ struct dpdmai_rsp_get_tx_queue *rsp_params; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE, -+ cmd_flags, -+ token); -+ -+ cmd_params = (struct dpdmai_cmd_queue *)cmd.params; -+ cmd_params->queue = priority; -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ -+ rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params; -+ attr->fqid = le32_to_cpu(rsp_params->fqid); -+ -+ return 0; -+} ---- /dev/null -+++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai.h -@@ -0,0 +1,521 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPDMAI_H -+#define __FSL_DPDMAI_H -+ -+struct fsl_mc_io; -+ -+/* Data Path DMA Interface API -+ * Contains initialization APIs and runtime control APIs for DPDMAI -+ */ -+ -+/* General DPDMAI macros */ -+ -+/** -+ * Maximum number of Tx/Rx priorities per DPDMAI object -+ */ -+#define DPDMAI_PRIO_NUM 2 -+ -+/** -+ * All queues considered; see dpdmai_set_rx_queue() -+ */ -+#define DPDMAI_ALL_QUEUES (uint8_t)(-1) -+ -+/** -+ * dpdmai_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpdmai_id: DPDMAI unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpdmai_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpdmai_id, -+ uint16_t *token); -+ -+/** -+ * dpdmai_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdmai_cfg - Structure representing DPDMAI configuration -+ * @priorities: Priorities for the DMA hardware processing; valid priorities are -+ * configured with values 1-8; the entry following last valid entry -+ * should be configured with 0 -+ */ -+struct dpdmai_cfg { -+ uint8_t priorities[DPDMAI_PRIO_NUM]; -+}; -+ -+/** -+ * dpdmai_create() - Create the DPDMAI object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPDMAI object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpdmai_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpdmai_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpdmai_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpdmai_is_enabled() - Check if the DPDMAI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpdmai_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpdmai_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpdmai_set_irq() - Set IRQ information for the DPDMAI to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpdmai_irq_cfg *irq_cfg); -+ -+/** -+ * dpdmai_get_irq() - Get IRQ information from the DPDMAI -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpdmai_irq_cfg *irq_cfg); -+ -+/** -+ * dpdmai_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpdmai_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned Interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpdmai_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpdmai_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpdmai_get_irq_status() - Get the current status of any pending interrupts -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpdmai_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpdmai_attr - Structure representing DPDMAI attributes -+ * @id: DPDMAI object ID -+ * @version: DPDMAI version -+ * @num_of_priorities: number of priorities -+ */ -+struct dpdmai_attr { -+ int id; -+ /** -+ * struct version - DPDMAI version -+ * @major: DPDMAI major version -+ * @minor: DPDMAI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint8_t num_of_priorities; -+}; -+ -+/** -+ * dpdmai_get_attributes() - Retrieve DPDMAI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpdmai_attr *attr); -+ -+/** -+ * enum dpdmai_dest - DPDMAI destination types -+ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode -+ * and does not generate FQDAN notifications; user is expected to dequeue -+ * from the queue based on polling or other user-defined method -+ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected to dequeue -+ * from the queue only after notification is received -+ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified DPCON object; -+ * user is expected to dequeue from the DPCON channel -+ */ -+enum dpdmai_dest { -+ DPDMAI_DEST_NONE = 0, -+ DPDMAI_DEST_DPIO = 1, -+ DPDMAI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid values -+ * are 0-1 or 0-7, depending on the number of priorities in that -+ * channel; not relevant for 'DPDMAI_DEST_NONE' option -+ */ -+struct dpdmai_dest_cfg { -+ enum dpdmai_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/* DPDMAI queue modification options */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPDMAI_QUEUE_OPT_USER_CTX 0x00000001 -+ -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPDMAI_QUEUE_OPT_DEST 0x00000002 -+ -+/** -+ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; -+ * valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options' -+ */ -+struct dpdmai_rx_queue_cfg { -+ uint32_t options; -+ uint64_t user_ctx; -+ struct dpdmai_dest_cfg dest_cfg; -+ -+}; -+ -+/** -+ * dpdmai_set_rx_queue() - Set Rx queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @priority: Select the queue relative to number of -+ * priorities configured at DPDMAI creation; use -+ * DPDMAI_ALL_QUEUES to configure all Rx queues -+ * identically. -+ * @cfg: Rx queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ const struct dpdmai_rx_queue_cfg *cfg); -+ -+/** -+ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @dest_cfg: Queue destination configuration -+ * @fqid: Virtual FQID value to be used for dequeue operations -+ */ -+struct dpdmai_rx_queue_attr { -+ uint64_t user_ctx; -+ struct dpdmai_dest_cfg dest_cfg; -+ uint32_t fqid; -+}; -+ -+/** -+ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @priority: Select the queue relative to number of -+ * priorities configured at DPDMAI creation -+ * @attr: Returned Rx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpdmai_rx_queue_attr *attr); -+ -+/** -+ * struct dpdmai_tx_queue_attr - Structure representing attributes of Tx queues -+ * @fqid: Virtual FQID to be used for sending frames to DMA hardware -+ */ -+ -+struct dpdmai_tx_queue_attr { -+ uint32_t fqid; -+}; -+ -+/** -+ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPDMAI object -+ * @priority: Select the queue relative to number of -+ * priorities configured at DPDMAI creation -+ * @attr: Returned Tx queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t priority, -+ struct dpdmai_tx_queue_attr *attr); -+ -+#endif /* __FSL_DPDMAI_H */ ---- /dev/null -+++ b/drivers/dma/dpaa2-qdma/fsl_dpdmai_cmd.h -@@ -0,0 +1,222 @@ -+/* Copyright 2013-2016 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPDMAI_CMD_H -+#define _FSL_DPDMAI_CMD_H -+ -+/* DPDMAI Version */ -+#define DPDMAI_VER_MAJOR 2 -+#define DPDMAI_VER_MINOR 2 -+ -+#define DPDMAI_CMD_BASE_VERSION 0 -+#define DPDMAI_CMD_ID_OFFSET 4 -+ -+/* Command IDs */ -+#define DPDMAI_CMDID_CLOSE ((0x800 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_OPEN ((0x80E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_CREATE ((0x90E << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_DESTROY ((0x900 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+ -+#define DPDMAI_CMDID_ENABLE ((0x002 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_DISABLE ((0x003 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_GET_ATTR ((0x004 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_RESET ((0x005 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_IS_ENABLED ((0x006 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+ -+#define DPDMAI_CMDID_SET_IRQ ((0x010 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_GET_IRQ ((0x011 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_SET_IRQ_ENABLE ((0x012 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_GET_IRQ_ENABLE ((0x013 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_SET_IRQ_MASK ((0x014 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_GET_IRQ_MASK ((0x015 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_GET_IRQ_STATUS ((0x016 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_CLEAR_IRQ_STATUS ((0x017 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+ -+#define DPDMAI_CMDID_SET_RX_QUEUE ((0x1A0 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_GET_RX_QUEUE ((0x1A1 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+#define DPDMAI_CMDID_GET_TX_QUEUE ((0x1A2 << DPDMAI_CMD_ID_OFFSET) | DPDMAI_CMD_BASE_VERSION) -+ -+ -+#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */ -+#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */ -+ -+ -+#define MAKE_UMASK64(_width) \ -+ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \ -+ (uint64_t)-1)) -+ -+static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val) -+{ -+ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset); -+} -+ -+static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width) -+{ -+ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width)); -+} -+ -+#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ -+ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) -+ -+#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \ -+ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width))) -+ -+#define MC_CMD_HDR_READ_TOKEN(_hdr) \ -+ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S)) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_OPEN(cmd, dpdmai_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpdmai_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[0]);\ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[1]);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->num_of_priorities); \ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_SET_RX_QUEUE(cmd, priority, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority); \ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpdmai_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_RX_QUEUE(cmd, priority) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_RX_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 48, 4, enum dpdmai_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_CMD_GET_TX_QUEUE(cmd, priority) \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, priority) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPDMAI_RSP_GET_TX_QUEUE(cmd, attr) \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->fqid) -+ -+#endif /* _FSL_DPDMAI_CMD_H */ ---- /dev/null -+++ b/drivers/dma/fsl-qdma.c -@@ -0,0 +1,1243 @@ -+/* -+ * drivers/dma/fsl-qdma.c -+ * -+ * Copyright 2014-2015 Freescale Semiconductor, Inc. -+ * -+ * Driver for the Freescale qDMA engine with software command queue mode. -+ * Channel virtualization is supported through enqueuing of DMA jobs to, -+ * or dequeuing DMA jobs from, different work queues. -+ * This module can be found on Freescale LS SoCs. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ */ -+ -+#include <asm/cacheflush.h> -+#include <linux/clk.h> -+#include <linux/delay.h> -+#include <linux/dma-mapping.h> -+#include <linux/dmapool.h> -+#include <linux/init.h> -+#include <linux/interrupt.h> -+#include <linux/module.h> -+#include <linux/of.h> -+#include <linux/of_address.h> -+#include <linux/of_device.h> -+#include <linux/of_dma.h> -+#include <linux/of_irq.h> -+#include <linux/slab.h> -+#include <linux/spinlock.h> -+ -+#include "virt-dma.h" -+ -+#define FSL_QDMA_DMR 0x0 -+#define FSL_QDMA_DSR 0x4 -+#define FSL_QDMA_DEIER 0xe00 -+#define FSL_QDMA_DEDR 0xe04 -+#define FSL_QDMA_DECFDW0R 0xe10 -+#define FSL_QDMA_DECFDW1R 0xe14 -+#define FSL_QDMA_DECFDW2R 0xe18 -+#define FSL_QDMA_DECFDW3R 0xe1c -+#define FSL_QDMA_DECFQIDR 0xe30 -+#define FSL_QDMA_DECBR 0xe34 -+ -+#define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x)) -+#define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x)) -+#define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x)) -+#define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x)) -+#define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x)) -+#define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x)) -+#define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x)) -+#define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x)) -+ -+#define FSL_QDMA_SQDPAR 0x80c -+#define FSL_QDMA_SQEPAR 0x814 -+#define FSL_QDMA_BSQMR 0x800 -+#define FSL_QDMA_BSQSR 0x804 -+#define FSL_QDMA_BSQICR 0x828 -+#define FSL_QDMA_CQMR 0xa00 -+#define FSL_QDMA_CQDSCR1 0xa08 -+#define FSL_QDMA_CQDSCR2 0xa0c -+#define FSL_QDMA_CQIER 0xa10 -+#define FSL_QDMA_CQEDR 0xa14 -+#define FSL_QDMA_SQCCMR 0xa20 -+ -+#define FSL_QDMA_SQICR_ICEN -+ -+#define FSL_QDMA_CQIDR_CQT 0xff000000 -+#define FSL_QDMA_CQIDR_SQPE 0x800000 -+#define FSL_QDMA_CQIDR_SQT 0x8000 -+ -+#define FSL_QDMA_BCQIER_CQTIE 0x8000 -+#define FSL_QDMA_BCQIER_CQPEIE 0x800000 -+#define FSL_QDMA_BSQICR_ICEN 0x80000000 -+#define FSL_QDMA_BSQICR_ICST(x) ((x) << 16) -+#define FSL_QDMA_CQIER_MEIE 0x80000000 -+#define FSL_QDMA_CQIER_TEIE 0x1 -+#define FSL_QDMA_SQCCMR_ENTER_WM 0x200000 -+ -+#define FSL_QDMA_QUEUE_MAX 8 -+ -+#define FSL_QDMA_BCQMR_EN 0x80000000 -+#define FSL_QDMA_BCQMR_EI 0x40000000 -+#define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20) -+#define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16) -+ -+#define FSL_QDMA_BCQSR_QF 0x10000 -+#define FSL_QDMA_BCQSR_XOFF 0x1 -+ -+#define FSL_QDMA_BSQMR_EN 0x80000000 -+#define FSL_QDMA_BSQMR_DI 0x40000000 -+#define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16) -+ -+#define FSL_QDMA_BSQSR_QE 0x20000 -+ -+#define FSL_QDMA_DMR_DQD 0x40000000 -+#define FSL_QDMA_DSR_DB 0x80000000 -+ -+#define FSL_QDMA_BASE_BUFFER_SIZE 96 -+#define FSL_QDMA_EXPECT_SG_ENTRY_NUM 16 -+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64 -+#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384 -+#define FSL_QDMA_QUEUE_NUM_MAX 8 -+ -+#define FSL_QDMA_CMD_RWTTYPE 0x4 -+#define FSL_QDMA_CMD_LWC 0x2 -+ -+#define FSL_QDMA_CMD_RWTTYPE_OFFSET 28 -+#define FSL_QDMA_CMD_NS_OFFSET 27 -+#define FSL_QDMA_CMD_DQOS_OFFSET 24 -+#define FSL_QDMA_CMD_WTHROTL_OFFSET 20 -+#define FSL_QDMA_CMD_DSEN_OFFSET 19 -+#define FSL_QDMA_CMD_LWC_OFFSET 16 -+ -+#define FSL_QDMA_E_SG_TABLE 1 -+#define FSL_QDMA_E_DATA_BUFFER 0 -+#define FSL_QDMA_F_LAST_ENTRY 1 -+ -+u64 pre_addr, pre_queue; -+ -+/* qDMA Command Descriptor Fotmats */ -+ -+/* Compound Command Descriptor Fotmat */ -+struct fsl_qdma_ccdf { -+ __le32 status; /* ser, status */ -+ __le32 cfg; /* format, offset */ -+ union { -+ struct { -+ __le32 addr_lo; /* low 32-bits of 40-bit address */ -+ u8 addr_hi; /* high 8-bits of 40-bit address */ -+ u8 __reserved1[2]; -+ u8 cfg8b_w1; /* dd, queue*/ -+ } __packed; -+ __le64 data; -+ }; -+} __packed; -+ -+#define QDMA_CCDF_STATUS 20 -+#define QDMA_CCDF_OFFSET 20 -+#define QDMA_CCDF_MASK GENMASK(28, 20) -+#define QDMA_CCDF_FOTMAT BIT(29) -+#define QDMA_CCDF_SER BIT(30) -+ -+static inline u64 qdma_ccdf_addr_get64(const struct fsl_qdma_ccdf *ccdf) -+{ -+ return le64_to_cpu(ccdf->data) & 0xffffffffffLLU; -+} -+static inline u64 qdma_ccdf_get_queue(const struct fsl_qdma_ccdf *ccdf) -+{ -+ return ccdf->cfg8b_w1 & 0xff; -+} -+static inline void qdma_ccdf_addr_set64(struct fsl_qdma_ccdf *ccdf, u64 addr) -+{ -+ ccdf->addr_hi = upper_32_bits(addr); -+ ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr)); -+} -+static inline int qdma_ccdf_get_offset(const struct fsl_qdma_ccdf *ccdf) -+{ -+ return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET; -+} -+static inline void qdma_ccdf_set_format(struct fsl_qdma_ccdf *ccdf, int offset) -+{ -+ ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset); -+} -+static inline int qdma_ccdf_get_status(const struct fsl_qdma_ccdf *ccdf) -+{ -+ return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS; -+} -+static inline void qdma_ccdf_set_ser(struct fsl_qdma_ccdf *ccdf, int status) -+{ -+ ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status); -+} -+/* qDMA Compound S/G Format */ -+struct fsl_qdma_csgf { -+ __le32 offset; /* offset */ -+ __le32 cfg; /* E bit, F bit, length */ -+ union { -+ struct { -+ __le32 addr_lo; /* low 32-bits of 40-bit address */ -+ u8 addr_hi; /* high 8-bits of 40-bit address */ -+ u8 __reserved1[3]; -+ }; -+ __le64 data; -+ }; -+} __packed; -+ -+#define QDMA_SG_FIN BIT(30) -+#define QDMA_SG_EXT BIT(31) -+#define QDMA_SG_LEN_MASK GENMASK(29, 0) -+static inline u64 qdma_csgf_addr_get64(const struct fsl_qdma_csgf *sg) -+{ -+ return be64_to_cpu(sg->data) & 0xffffffffffLLU; -+} -+static inline void qdma_csgf_addr_set64(struct fsl_qdma_csgf *sg, u64 addr) -+{ -+ sg->addr_hi = upper_32_bits(addr); -+ sg->addr_lo = cpu_to_le32(lower_32_bits(addr)); -+} -+static inline void qdma_csgf_set_len(struct fsl_qdma_csgf *csgf, int len) -+{ -+ csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK); -+} -+static inline void qdma_csgf_set_f(struct fsl_qdma_csgf *csgf, int len) -+{ -+ csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK)); -+} -+static inline void qdma_csgf_set_e(struct fsl_qdma_csgf *csgf, int len) -+{ -+ csgf->cfg = cpu_to_le32(QDMA_SG_EXT | (len & QDMA_SG_LEN_MASK)); -+} -+ -+/* qDMA Source Descriptor Format */ -+struct fsl_qdma_sdf { -+ __le32 rev3; -+ __le32 cfg; /* rev4, bit[0-11] - ssd, bit[12-23] sss */ -+ __le32 rev5; -+ __le32 cmd; -+} __packed; -+ -+/*qDMA Destination Descriptor Format*/ -+struct fsl_qdma_ddf { -+ __le32 rev1; -+ __le32 cfg; /* rev2, bit[0-11] - dsd, bit[12-23] - dss */ -+ __le32 rev3; -+ __le32 cmd; -+} __packed; -+ -+struct fsl_qdma_chan { -+ struct virt_dma_chan vchan; -+ struct virt_dma_desc vdesc; -+ enum dma_status status; -+ u32 slave_id; -+ struct fsl_qdma_engine *qdma; -+ struct fsl_qdma_queue *queue; -+ struct list_head qcomp; -+}; -+ -+struct fsl_qdma_queue { -+ struct fsl_qdma_ccdf *virt_head; -+ struct fsl_qdma_ccdf *virt_tail; -+ struct list_head comp_used; -+ struct list_head comp_free; -+ struct dma_pool *comp_pool; -+ struct dma_pool *sg_pool; -+ spinlock_t queue_lock; -+ dma_addr_t bus_addr; -+ u32 n_cq; -+ u32 id; -+ struct fsl_qdma_ccdf *cq; -+}; -+ -+struct fsl_qdma_sg { -+ dma_addr_t bus_addr; -+ void *virt_addr; -+}; -+ -+struct fsl_qdma_comp { -+ dma_addr_t bus_addr; -+ void *virt_addr; -+ struct fsl_qdma_chan *qchan; -+ struct fsl_qdma_sg *sg_block; -+ struct virt_dma_desc vdesc; -+ struct list_head list; -+ u32 sg_block_src; -+ u32 sg_block_dst; -+}; -+ -+struct fsl_qdma_engine { -+ struct dma_device dma_dev; -+ void __iomem *ctrl_base; -+ void __iomem *status_base; -+ void __iomem *block_base; -+ u32 n_chans; -+ u32 n_queues; -+ struct mutex fsl_qdma_mutex; -+ int error_irq; -+ int queue_irq; -+ bool big_endian; -+ struct fsl_qdma_queue *queue; -+ struct fsl_qdma_queue *status; -+ struct fsl_qdma_chan chans[]; -+ -+}; -+ -+static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr) -+{ -+ if (qdma->big_endian) -+ return ioread32be(addr); -+ else -+ return ioread32(addr); -+} -+ -+static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val, -+ void __iomem *addr) -+{ -+ if (qdma->big_endian) -+ iowrite32be(val, addr); -+ else -+ iowrite32(val, addr); -+} -+ -+static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan) -+{ -+ return container_of(chan, struct fsl_qdma_chan, vchan.chan); -+} -+ -+static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd) -+{ -+ return container_of(vd, struct fsl_qdma_comp, vdesc); -+} -+ -+static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan) -+{ -+ /* -+ * In QDMA mode, We don't need to do anything. -+ */ -+ return 0; -+} -+ -+static void fsl_qdma_free_chan_resources(struct dma_chan *chan) -+{ -+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); -+ unsigned long flags; -+ LIST_HEAD(head); -+ -+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags); -+ vchan_get_all_descriptors(&fsl_chan->vchan, &head); -+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); -+ -+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head); -+} -+ -+static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp, -+ dma_addr_t dst, dma_addr_t src, u32 len) -+{ -+ struct fsl_qdma_ccdf *ccdf; -+ struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest; -+ struct fsl_qdma_sdf *sdf; -+ struct fsl_qdma_ddf *ddf; -+ -+ ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr; -+ csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1; -+ csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2; -+ csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3; -+ sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4; -+ ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5; -+ -+ memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE); -+ /* Head Command Descriptor(Frame Descriptor) */ -+ qdma_ccdf_addr_set64(ccdf, fsl_comp->bus_addr + 16); -+ qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf)); -+ qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf)); -+ /* Status notification is enqueued to status queue. */ -+ /* Compound Command Descriptor(Frame List Table) */ -+ qdma_csgf_addr_set64(csgf_desc, fsl_comp->bus_addr + 64); -+ /* It must be 32 as Compound S/G Descriptor */ -+ qdma_csgf_set_len(csgf_desc, 32); -+ qdma_csgf_addr_set64(csgf_src, src); -+ qdma_csgf_set_len(csgf_src, len); -+ qdma_csgf_addr_set64(csgf_dest, dst); -+ qdma_csgf_set_len(csgf_dest, len); -+ /* This entry is the last entry. */ -+ qdma_csgf_set_f(csgf_dest, len); -+ /* Descriptor Buffer */ -+ sdf->cmd = cpu_to_le32( -+ FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET); -+ ddf->cmd = cpu_to_le32( -+ FSL_QDMA_CMD_RWTTYPE << FSL_QDMA_CMD_RWTTYPE_OFFSET); -+ ddf->cmd |= cpu_to_le32( -+ FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET); -+} -+ -+static void fsl_qdma_comp_fill_sg( -+ struct fsl_qdma_comp *fsl_comp, -+ struct scatterlist *dst_sg, unsigned int dst_nents, -+ struct scatterlist *src_sg, unsigned int src_nents) -+{ -+ struct fsl_qdma_ccdf *ccdf; -+ struct fsl_qdma_csgf *csgf_desc, *csgf_src, *csgf_dest, *csgf_sg; -+ struct fsl_qdma_sdf *sdf; -+ struct fsl_qdma_ddf *ddf; -+ struct fsl_qdma_sg *sg_block, *temp; -+ struct scatterlist *sg; -+ u64 total_src_len = 0; -+ u64 total_dst_len = 0; -+ u32 i; -+ -+ ccdf = (struct fsl_qdma_ccdf *)fsl_comp->virt_addr; -+ csgf_desc = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 1; -+ csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 2; -+ csgf_dest = (struct fsl_qdma_csgf *)fsl_comp->virt_addr + 3; -+ sdf = (struct fsl_qdma_sdf *)fsl_comp->virt_addr + 4; -+ ddf = (struct fsl_qdma_ddf *)fsl_comp->virt_addr + 5; -+ memset(fsl_comp->virt_addr, 0, FSL_QDMA_BASE_BUFFER_SIZE); -+ /* Head Command Descriptor(Frame Descriptor) */ -+ qdma_ccdf_addr_set64(ccdf, fsl_comp->bus_addr + 16); -+ qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf)); -+ /* Status notification is enqueued to status queue. */ -+ qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf)); -+ -+ /* Compound Command Descriptor(Frame List Table) */ -+ qdma_csgf_addr_set64(csgf_desc, fsl_comp->bus_addr + 64); -+ /* It must be 32 as Compound S/G Descriptor */ -+ qdma_csgf_set_len(csgf_desc, 32); -+ -+ sg_block = fsl_comp->sg_block; -+ qdma_csgf_addr_set64(csgf_src, sg_block->bus_addr); -+ /* This entry link to the s/g entry. */ -+ qdma_csgf_set_e(csgf_src, 32); -+ -+ temp = sg_block + fsl_comp->sg_block_src; -+ qdma_csgf_addr_set64(csgf_dest, temp->bus_addr); -+ /* This entry is the last entry. */ -+ qdma_csgf_set_f(csgf_dest, 32); -+ /* This entry link to the s/g entry. */ -+ qdma_csgf_set_e(csgf_dest, 32); -+ -+ for_each_sg(src_sg, sg, src_nents, i) { -+ temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); -+ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + -+ i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); -+ qdma_csgf_addr_set64(csgf_sg, sg_dma_address(sg)); -+ qdma_csgf_set_len(csgf_sg, sg_dma_len(sg)); -+ total_src_len += sg_dma_len(sg); -+ -+ if (i == src_nents - 1) -+ qdma_csgf_set_f(csgf_sg, sg_dma_len(sg)); -+ if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) == -+ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) { -+ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + -+ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1; -+ temp = sg_block + -+ i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; -+ qdma_csgf_addr_set64(csgf_sg, temp->bus_addr); -+ qdma_csgf_set_e(csgf_sg, sg_dma_len(sg)); -+ } -+ } -+ -+ sg_block += fsl_comp->sg_block_src; -+ for_each_sg(dst_sg, sg, dst_nents, i) { -+ temp = sg_block + i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); -+ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + -+ i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1); -+ qdma_csgf_addr_set64(csgf_sg, sg_dma_address(sg)); -+ qdma_csgf_set_len(csgf_sg, sg_dma_len(sg)); -+ total_dst_len += sg_dma_len(sg); -+ -+ if (i == dst_nents - 1) -+ qdma_csgf_set_f(csgf_sg, sg_dma_len(sg)); -+ if (i % (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) == -+ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 2) { -+ csgf_sg = (struct fsl_qdma_csgf *)temp->virt_addr + -+ FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1; -+ temp = sg_block + -+ i / (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; -+ qdma_csgf_addr_set64(csgf_sg, temp->bus_addr); -+ qdma_csgf_set_e(csgf_sg, sg_dma_len(sg)); -+ } -+ } -+ -+ if (total_src_len != total_dst_len) -+ dev_err(&fsl_comp->qchan->vchan.chan.dev->device, -+ "The data length for src and dst isn't match.\n"); -+ -+ qdma_csgf_set_len(csgf_src, total_src_len); -+ qdma_csgf_set_len(csgf_dest, total_dst_len); -+ -+ /* Descriptor Buffer */ -+} -+ -+/* -+ * Prei-request full command descriptor for enqueue. -+ */ -+static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue) -+{ -+ struct fsl_qdma_comp *comp_temp; -+ int i; -+ -+ for (i = 0; i < queue->n_cq; i++) { -+ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL); -+ if (!comp_temp) -+ return -1; -+ comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool, -+ GFP_NOWAIT, -+ &comp_temp->bus_addr); -+ if (!comp_temp->virt_addr) -+ return -1; -+ list_add_tail(&comp_temp->list, &queue->comp_free); -+ } -+ return 0; -+} -+ -+/* -+ * Request a command descriptor for enqueue. -+ */ -+static struct fsl_qdma_comp *fsl_qdma_request_enqueue_desc( -+ struct fsl_qdma_chan *fsl_chan, -+ unsigned int dst_nents, -+ unsigned int src_nents) -+{ -+ struct fsl_qdma_comp *comp_temp; -+ struct fsl_qdma_sg *sg_block; -+ struct fsl_qdma_queue *queue = fsl_chan->queue; -+ unsigned long flags; -+ unsigned int dst_sg_entry_block, src_sg_entry_block, sg_entry_total, i; -+ -+ spin_lock_irqsave(&queue->queue_lock, flags); -+ if (list_empty(&queue->comp_free)) { -+ spin_unlock_irqrestore(&queue->queue_lock, flags); -+ comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL); -+ if (!comp_temp) -+ return NULL; -+ comp_temp->virt_addr = dma_pool_alloc(queue->comp_pool, -+ GFP_NOWAIT, -+ &comp_temp->bus_addr); -+ if (!comp_temp->virt_addr) -+ return NULL; -+ } else { -+ comp_temp = list_first_entry(&queue->comp_free, -+ struct fsl_qdma_comp, -+ list); -+ list_del(&comp_temp->list); -+ spin_unlock_irqrestore(&queue->queue_lock, flags); -+ } -+ -+ if (dst_nents != 0) -+ dst_sg_entry_block = dst_nents / -+ (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; -+ else -+ dst_sg_entry_block = 0; -+ -+ if (src_nents != 0) -+ src_sg_entry_block = src_nents / -+ (FSL_QDMA_EXPECT_SG_ENTRY_NUM - 1) + 1; -+ else -+ src_sg_entry_block = 0; -+ -+ sg_entry_total = dst_sg_entry_block + src_sg_entry_block; -+ if (sg_entry_total) { -+ sg_block = kzalloc(sizeof(*sg_block) * -+ sg_entry_total, -+ GFP_KERNEL); -+ if (!sg_block) -+ return NULL; -+ comp_temp->sg_block = sg_block; -+ for (i = 0; i < sg_entry_total; i++) { -+ sg_block->virt_addr = dma_pool_alloc(queue->sg_pool, -+ GFP_NOWAIT, -+ &sg_block->bus_addr); -+ memset(sg_block->virt_addr, 0, -+ FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16); -+ sg_block++; -+ } -+ } -+ -+ comp_temp->sg_block_src = src_sg_entry_block; -+ comp_temp->sg_block_dst = dst_sg_entry_block; -+ comp_temp->qchan = fsl_chan; -+ -+ return comp_temp; -+} -+ -+static struct fsl_qdma_queue *fsl_qdma_alloc_queue_resources( -+ struct platform_device *pdev, -+ unsigned int queue_num) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ struct fsl_qdma_queue *queue_head, *queue_temp; -+ int ret, len, i; -+ unsigned int queue_size[FSL_QDMA_QUEUE_MAX]; -+ -+ if (queue_num > FSL_QDMA_QUEUE_MAX) -+ queue_num = FSL_QDMA_QUEUE_MAX; -+ len = sizeof(*queue_head) * queue_num; -+ queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); -+ if (!queue_head) -+ return NULL; -+ -+ ret = of_property_read_u32_array(np, "queue-sizes", queue_size, -+ queue_num); -+ if (ret) { -+ dev_err(&pdev->dev, "Can't get queue-sizes.\n"); -+ return NULL; -+ } -+ -+ for (i = 0; i < queue_num; i++) { -+ if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX -+ || queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { -+ dev_err(&pdev->dev, "Get wrong queue-sizes.\n"); -+ return NULL; -+ } -+ queue_temp = queue_head + i; -+ queue_temp->cq = dma_alloc_coherent(&pdev->dev, -+ sizeof(struct fsl_qdma_ccdf) * -+ queue_size[i], -+ &queue_temp->bus_addr, -+ GFP_KERNEL); -+ if (!queue_temp->cq) -+ return NULL; -+ queue_temp->n_cq = queue_size[i]; -+ queue_temp->id = i; -+ queue_temp->virt_head = queue_temp->cq; -+ queue_temp->virt_tail = queue_temp->cq; -+ /* -+ * The dma pool for queue command buffer -+ */ -+ queue_temp->comp_pool = dma_pool_create("comp_pool", -+ &pdev->dev, -+ FSL_QDMA_BASE_BUFFER_SIZE, -+ 16, 0); -+ if (!queue_temp->comp_pool) { -+ dma_free_coherent(&pdev->dev, -+ sizeof(struct fsl_qdma_ccdf) * -+ queue_size[i], -+ queue_temp->cq, -+ queue_temp->bus_addr); -+ return NULL; -+ } -+ /* -+ * The dma pool for queue command buffer -+ */ -+ queue_temp->sg_pool = dma_pool_create("sg_pool", -+ &pdev->dev, -+ FSL_QDMA_EXPECT_SG_ENTRY_NUM * 16, -+ 64, 0); -+ if (!queue_temp->sg_pool) { -+ dma_free_coherent(&pdev->dev, -+ sizeof(struct fsl_qdma_ccdf) * -+ queue_size[i], -+ queue_temp->cq, -+ queue_temp->bus_addr); -+ dma_pool_destroy(queue_temp->comp_pool); -+ return NULL; -+ } -+ /* -+ * List for queue command buffer -+ */ -+ INIT_LIST_HEAD(&queue_temp->comp_used); -+ INIT_LIST_HEAD(&queue_temp->comp_free); -+ spin_lock_init(&queue_temp->queue_lock); -+ } -+ -+ return queue_head; -+} -+ -+static struct fsl_qdma_queue *fsl_qdma_prep_status_queue( -+ struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ struct fsl_qdma_queue *status_head; -+ unsigned int status_size; -+ int ret; -+ -+ ret = of_property_read_u32(np, "status-sizes", &status_size); -+ if (ret) { -+ dev_err(&pdev->dev, "Can't get status-sizes.\n"); -+ return NULL; -+ } -+ if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX -+ || status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { -+ dev_err(&pdev->dev, "Get wrong status_size.\n"); -+ return NULL; -+ } -+ status_head = devm_kzalloc(&pdev->dev, sizeof(*status_head), -+ GFP_KERNEL); -+ if (!status_head) -+ return NULL; -+ -+ /* -+ * Buffer for queue command -+ */ -+ status_head->cq = dma_alloc_coherent(&pdev->dev, -+ sizeof(struct fsl_qdma_ccdf) * -+ status_size, -+ &status_head->bus_addr, -+ GFP_KERNEL); -+ if (!status_head->cq) -+ return NULL; -+ status_head->n_cq = status_size; -+ status_head->virt_head = status_head->cq; -+ status_head->virt_tail = status_head->cq; -+ status_head->comp_pool = NULL; -+ -+ return status_head; -+} -+ -+static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma) -+{ -+ void __iomem *ctrl = fsl_qdma->ctrl_base; -+ void __iomem *block = fsl_qdma->block_base; -+ int i, count = 5; -+ u32 reg; -+ -+ /* Disable the command queue and wait for idle state. */ -+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); -+ reg |= FSL_QDMA_DMR_DQD; -+ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); -+ for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++) -+ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i)); -+ -+ while (1) { -+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR); -+ if (!(reg & FSL_QDMA_DSR_DB)) -+ break; -+ if (count-- < 0) -+ return -EBUSY; -+ udelay(100); -+ } -+ -+ /* Disable status queue. */ -+ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR); -+ -+ /* -+ * Clear the command queue interrupt detect register for all queues. -+ */ -+ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0)); -+ -+ return 0; -+} -+ -+static int fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma) -+{ -+ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; -+ struct fsl_qdma_queue *fsl_status = fsl_qdma->status; -+ struct fsl_qdma_queue *temp_queue; -+ struct fsl_qdma_comp *fsl_comp; -+ struct fsl_qdma_ccdf *status_addr; -+ struct fsl_qdma_csgf *csgf_src; -+ void __iomem *block = fsl_qdma->block_base; -+ u32 reg, i; -+ bool duplicate, duplicate_handle; -+ -+ while (1) { -+ duplicate = 0; -+ duplicate_handle = 0; -+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR); -+ if (reg & FSL_QDMA_BSQSR_QE) -+ return 0; -+ status_addr = fsl_status->virt_head; -+ if (qdma_ccdf_get_queue(status_addr) == pre_queue && -+ qdma_ccdf_addr_get64(status_addr) == pre_addr) -+ duplicate = 1; -+ i = qdma_ccdf_get_queue(status_addr); -+ pre_queue = qdma_ccdf_get_queue(status_addr); -+ pre_addr = qdma_ccdf_addr_get64(status_addr); -+ temp_queue = fsl_queue + i; -+ spin_lock(&temp_queue->queue_lock); -+ if (list_empty(&temp_queue->comp_used)) { -+ if (duplicate) -+ duplicate_handle = 1; -+ else { -+ spin_unlock(&temp_queue->queue_lock); -+ return -1; -+ } -+ } else { -+ fsl_comp = list_first_entry(&temp_queue->comp_used, -+ struct fsl_qdma_comp, -+ list); -+ csgf_src = (struct fsl_qdma_csgf *)fsl_comp->virt_addr -+ + 2; -+ if (fsl_comp->bus_addr + 16 != pre_addr) { -+ if (duplicate) -+ duplicate_handle = 1; -+ else { -+ spin_unlock(&temp_queue->queue_lock); -+ return -1; -+ } -+ } -+ } -+ -+ if (duplicate_handle) { -+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); -+ reg |= FSL_QDMA_BSQMR_DI; -+ qdma_ccdf_addr_set64(status_addr, 0x0); -+ fsl_status->virt_head++; -+ if (fsl_status->virt_head == fsl_status->cq -+ + fsl_status->n_cq) -+ fsl_status->virt_head = fsl_status->cq; -+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); -+ spin_unlock(&temp_queue->queue_lock); -+ continue; -+ } -+ list_del(&fsl_comp->list); -+ -+ reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR); -+ reg |= FSL_QDMA_BSQMR_DI; -+ qdma_ccdf_addr_set64(status_addr, 0x0); -+ fsl_status->virt_head++; -+ if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq) -+ fsl_status->virt_head = fsl_status->cq; -+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); -+ spin_unlock(&temp_queue->queue_lock); -+ -+ spin_lock(&fsl_comp->qchan->vchan.lock); -+ vchan_cookie_complete(&fsl_comp->vdesc); -+ fsl_comp->qchan->status = DMA_COMPLETE; -+ spin_unlock(&fsl_comp->qchan->vchan.lock); -+ } -+ return 0; -+} -+ -+static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id) -+{ -+ struct fsl_qdma_engine *fsl_qdma = dev_id; -+ unsigned int intr; -+ void __iomem *status = fsl_qdma->status_base; -+ -+ intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR); -+ -+ if (intr) -+ dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n"); -+ -+ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR); -+ return IRQ_HANDLED; -+} -+ -+static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id) -+{ -+ struct fsl_qdma_engine *fsl_qdma = dev_id; -+ unsigned int intr, reg; -+ void __iomem *block = fsl_qdma->block_base; -+ void __iomem *ctrl = fsl_qdma->ctrl_base; -+ -+ intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0)); -+ -+ if ((intr & FSL_QDMA_CQIDR_SQT) != 0) -+ intr = fsl_qdma_queue_transfer_complete(fsl_qdma); -+ -+ if (intr != 0) { -+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); -+ reg |= FSL_QDMA_DMR_DQD; -+ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); -+ qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0)); -+ dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n"); -+ } -+ -+ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0)); -+ -+ return IRQ_HANDLED; -+} -+ -+static int -+fsl_qdma_irq_init(struct platform_device *pdev, -+ struct fsl_qdma_engine *fsl_qdma) -+{ -+ int ret; -+ -+ fsl_qdma->error_irq = platform_get_irq_byname(pdev, -+ "qdma-error"); -+ if (fsl_qdma->error_irq < 0) { -+ dev_err(&pdev->dev, "Can't get qdma controller irq.\n"); -+ return fsl_qdma->error_irq; -+ } -+ -+ fsl_qdma->queue_irq = platform_get_irq_byname(pdev, "qdma-queue"); -+ if (fsl_qdma->queue_irq < 0) { -+ dev_err(&pdev->dev, "Can't get qdma queue irq.\n"); -+ return fsl_qdma->queue_irq; -+ } -+ -+ ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq, -+ fsl_qdma_error_handler, 0, "qDMA error", fsl_qdma); -+ if (ret) { -+ dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n"); -+ return ret; -+ } -+ ret = devm_request_irq(&pdev->dev, fsl_qdma->queue_irq, -+ fsl_qdma_queue_handler, 0, "qDMA queue", fsl_qdma); -+ if (ret) { -+ dev_err(&pdev->dev, "Can't register qDMA queue IRQ.\n"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma) -+{ -+ struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue; -+ struct fsl_qdma_queue *temp; -+ void __iomem *ctrl = fsl_qdma->ctrl_base; -+ void __iomem *status = fsl_qdma->status_base; -+ void __iomem *block = fsl_qdma->block_base; -+ int i, ret; -+ u32 reg; -+ -+ /* Try to halt the qDMA engine first. */ -+ ret = fsl_qdma_halt(fsl_qdma); -+ if (ret) { -+ dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!"); -+ return ret; -+ } -+ -+ /* -+ * Clear the command queue interrupt detect register for all queues. -+ */ -+ qdma_writel(fsl_qdma, 0xffffffff, block + FSL_QDMA_BCQIDR(0)); -+ -+ for (i = 0; i < fsl_qdma->n_queues; i++) { -+ temp = fsl_queue + i; -+ /* -+ * Initialize Command Queue registers to point to the first -+ * command descriptor in memory. -+ * Dequeue Pointer Address Registers -+ * Enqueue Pointer Address Registers -+ */ -+ qdma_writel(fsl_qdma, temp->bus_addr, -+ block + FSL_QDMA_BCQDPA_SADDR(i)); -+ qdma_writel(fsl_qdma, temp->bus_addr, -+ block + FSL_QDMA_BCQEPA_SADDR(i)); -+ -+ /* Initialize the queue mode. */ -+ reg = FSL_QDMA_BCQMR_EN; -+ reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq)-4); -+ reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq)-6); -+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i)); -+ } -+ -+ /* -+ * Workaround for erratum: ERR010812. -+ * We must enable XOFF to avoid the enqueue rejection occurs. -+ * Setting SQCCMR ENTER_WM to 0x20. -+ */ -+ qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM, -+ block + FSL_QDMA_SQCCMR); -+ /* -+ * Initialize status queue registers to point to the first -+ * command descriptor in memory. -+ * Dequeue Pointer Address Registers -+ * Enqueue Pointer Address Registers -+ */ -+ qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr, -+ block + FSL_QDMA_SQEPAR); -+ qdma_writel(fsl_qdma, fsl_qdma->status->bus_addr, -+ block + FSL_QDMA_SQDPAR); -+ /* Initialize status queue interrupt. */ -+ qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE, -+ block + FSL_QDMA_BCQIER(0)); -+ qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN | FSL_QDMA_BSQICR_ICST(5) -+ | 0x8000, -+ block + FSL_QDMA_BSQICR); -+ qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE | FSL_QDMA_CQIER_TEIE, -+ block + FSL_QDMA_CQIER); -+ /* Initialize controller interrupt register. */ -+ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEDR); -+ qdma_writel(fsl_qdma, 0xffffffff, status + FSL_QDMA_DEIER); -+ -+ /* Initialize the status queue mode. */ -+ reg = FSL_QDMA_BSQMR_EN; -+ reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2(fsl_qdma->status->n_cq)-6); -+ qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR); -+ -+ reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR); -+ reg &= ~FSL_QDMA_DMR_DQD; -+ qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR); -+ -+ return 0; -+} -+ -+static struct dma_async_tx_descriptor *fsl_qdma_prep_dma_sg( -+ struct dma_chan *chan, -+ struct scatterlist *dst_sg, unsigned int dst_nents, -+ struct scatterlist *src_sg, unsigned int src_nents, -+ unsigned long flags) -+{ -+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); -+ struct fsl_qdma_comp *fsl_comp; -+ -+ fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, -+ dst_nents, -+ src_nents); -+ fsl_qdma_comp_fill_sg(fsl_comp, dst_sg, dst_nents, src_sg, src_nents); -+ -+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags); -+} -+ -+static struct dma_async_tx_descriptor * -+fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, -+ dma_addr_t src, size_t len, unsigned long flags) -+{ -+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); -+ struct fsl_qdma_comp *fsl_comp; -+ -+ fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan, 0, 0); -+ fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len); -+ -+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags); -+} -+ -+static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan) -+{ -+ void __iomem *block = fsl_chan->qdma->block_base; -+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; -+ struct fsl_qdma_comp *fsl_comp; -+ struct virt_dma_desc *vdesc; -+ u32 reg; -+ -+ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id)); -+ if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF)) -+ return; -+ vdesc = vchan_next_desc(&fsl_chan->vchan); -+ if (!vdesc) -+ return; -+ list_del(&vdesc->node); -+ fsl_comp = to_fsl_qdma_comp(vdesc); -+ -+ memcpy(fsl_queue->virt_head++, fsl_comp->virt_addr, 16); -+ if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq) -+ fsl_queue->virt_head = fsl_queue->cq; -+ -+ list_add_tail(&fsl_comp->list, &fsl_queue->comp_used); -+ barrier(); -+ reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id)); -+ reg |= FSL_QDMA_BCQMR_EI; -+ qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id)); -+ fsl_chan->status = DMA_IN_PROGRESS; -+} -+ -+static enum dma_status fsl_qdma_tx_status(struct dma_chan *chan, -+ dma_cookie_t cookie, struct dma_tx_state *txstate) -+{ -+ return dma_cookie_status(chan, cookie, txstate); -+} -+ -+static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc) -+{ -+ struct fsl_qdma_comp *fsl_comp; -+ struct fsl_qdma_queue *fsl_queue; -+ struct fsl_qdma_sg *sg_block; -+ unsigned long flags; -+ unsigned int i; -+ -+ fsl_comp = to_fsl_qdma_comp(vdesc); -+ fsl_queue = fsl_comp->qchan->queue; -+ -+ if (fsl_comp->sg_block) { -+ for (i = 0; i < fsl_comp->sg_block_src + -+ fsl_comp->sg_block_dst; i++) { -+ sg_block = fsl_comp->sg_block + i; -+ dma_pool_free(fsl_queue->sg_pool, -+ sg_block->virt_addr, -+ sg_block->bus_addr); -+ } -+ kfree(fsl_comp->sg_block); -+ } -+ -+ spin_lock_irqsave(&fsl_queue->queue_lock, flags); -+ list_add_tail(&fsl_comp->list, &fsl_queue->comp_free); -+ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags); -+} -+ -+static void fsl_qdma_issue_pending(struct dma_chan *chan) -+{ -+ struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan); -+ struct fsl_qdma_queue *fsl_queue = fsl_chan->queue; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&fsl_queue->queue_lock, flags); -+ spin_lock(&fsl_chan->vchan.lock); -+ if (vchan_issue_pending(&fsl_chan->vchan)) -+ fsl_qdma_enqueue_desc(fsl_chan); -+ spin_unlock(&fsl_chan->vchan.lock); -+ spin_unlock_irqrestore(&fsl_queue->queue_lock, flags); -+} -+ -+static int fsl_qdma_probe(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ struct fsl_qdma_engine *fsl_qdma; -+ struct fsl_qdma_chan *fsl_chan; -+ struct resource *res; -+ unsigned int len, chans, queues; -+ int ret, i; -+ -+ ret = of_property_read_u32(np, "channels", &chans); -+ if (ret) { -+ dev_err(&pdev->dev, "Can't get channels.\n"); -+ return ret; -+ } -+ -+ len = sizeof(*fsl_qdma) + sizeof(*fsl_chan) * chans; -+ fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); -+ if (!fsl_qdma) -+ return -ENOMEM; -+ -+ ret = of_property_read_u32(np, "queues", &queues); -+ if (ret) { -+ dev_err(&pdev->dev, "Can't get queues.\n"); -+ return ret; -+ } -+ -+ fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, queues); -+ if (!fsl_qdma->queue) -+ return -ENOMEM; -+ -+ fsl_qdma->status = fsl_qdma_prep_status_queue(pdev); -+ if (!fsl_qdma->status) -+ return -ENOMEM; -+ -+ fsl_qdma->n_chans = chans; -+ fsl_qdma->n_queues = queues; -+ mutex_init(&fsl_qdma->fsl_qdma_mutex); -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res); -+ if (IS_ERR(fsl_qdma->ctrl_base)) -+ return PTR_ERR(fsl_qdma->ctrl_base); -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); -+ fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res); -+ if (IS_ERR(fsl_qdma->status_base)) -+ return PTR_ERR(fsl_qdma->status_base); -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); -+ fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res); -+ if (IS_ERR(fsl_qdma->block_base)) -+ return PTR_ERR(fsl_qdma->block_base); -+ -+ ret = fsl_qdma_irq_init(pdev, fsl_qdma); -+ if (ret) -+ return ret; -+ -+ fsl_qdma->big_endian = of_property_read_bool(np, "big-endian"); -+ INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels); -+ for (i = 0; i < fsl_qdma->n_chans; i++) { -+ struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i]; -+ -+ fsl_chan->qdma = fsl_qdma; -+ fsl_chan->queue = fsl_qdma->queue + i % fsl_qdma->n_queues; -+ fsl_chan->vchan.desc_free = fsl_qdma_free_desc; -+ INIT_LIST_HEAD(&fsl_chan->qcomp); -+ vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev); -+ } -+ for (i = 0; i < fsl_qdma->n_queues; i++) -+ fsl_qdma_pre_request_enqueue_desc(fsl_qdma->queue + i); -+ -+ dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask); -+ dma_cap_set(DMA_SG, fsl_qdma->dma_dev.cap_mask); -+ -+ fsl_qdma->dma_dev.dev = &pdev->dev; -+ fsl_qdma->dma_dev.device_alloc_chan_resources -+ = fsl_qdma_alloc_chan_resources; -+ fsl_qdma->dma_dev.device_free_chan_resources -+ = fsl_qdma_free_chan_resources; -+ fsl_qdma->dma_dev.device_tx_status = fsl_qdma_tx_status; -+ fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy; -+ fsl_qdma->dma_dev.device_prep_dma_sg = fsl_qdma_prep_dma_sg; -+ fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending; -+ -+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); -+ -+ platform_set_drvdata(pdev, fsl_qdma); -+ -+ ret = dma_async_device_register(&fsl_qdma->dma_dev); -+ if (ret) { -+ dev_err(&pdev->dev, "Can't register Freescale qDMA engine.\n"); -+ return ret; -+ } -+ -+ ret = fsl_qdma_reg_init(fsl_qdma); -+ if (ret) { -+ dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n"); -+ return ret; -+ } -+ -+ -+ return 0; -+} -+ -+static int fsl_qdma_remove(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev); -+ struct fsl_qdma_queue *queue_temp; -+ struct fsl_qdma_queue *status = fsl_qdma->status; -+ struct fsl_qdma_comp *comp_temp, *_comp_temp; -+ int i; -+ -+ of_dma_controller_free(np); -+ dma_async_device_unregister(&fsl_qdma->dma_dev); -+ -+ /* Free descriptor areas */ -+ for (i = 0; i < fsl_qdma->n_queues; i++) { -+ queue_temp = fsl_qdma->queue + i; -+ list_for_each_entry_safe(comp_temp, _comp_temp, -+ &queue_temp->comp_used, list) { -+ dma_pool_free(queue_temp->comp_pool, -+ comp_temp->virt_addr, -+ comp_temp->bus_addr); -+ list_del(&comp_temp->list); -+ kfree(comp_temp); -+ } -+ list_for_each_entry_safe(comp_temp, _comp_temp, -+ &queue_temp->comp_free, list) { -+ dma_pool_free(queue_temp->comp_pool, -+ comp_temp->virt_addr, -+ comp_temp->bus_addr); -+ list_del(&comp_temp->list); -+ kfree(comp_temp); -+ } -+ dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) * -+ queue_temp->n_cq, queue_temp->cq, -+ queue_temp->bus_addr); -+ dma_pool_destroy(queue_temp->comp_pool); -+ } -+ -+ dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_ccdf) * -+ status->n_cq, status->cq, status->bus_addr); -+ return 0; -+} -+ -+static const struct of_device_id fsl_qdma_dt_ids[] = { -+ { .compatible = "fsl,ls1021a-qdma", }, -+ { /* sentinel */ } -+}; -+MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids); -+ -+static struct platform_driver fsl_qdma_driver = { -+ .driver = { -+ .name = "fsl-qdma", -+ .owner = THIS_MODULE, -+ .of_match_table = fsl_qdma_dt_ids, -+ }, -+ .probe = fsl_qdma_probe, -+ .remove = fsl_qdma_remove, -+}; -+ -+static int __init fsl_qdma_init(void) -+{ -+ return platform_driver_register(&fsl_qdma_driver); -+} -+subsys_initcall(fsl_qdma_init); -+ -+static void __exit fsl_qdma_exit(void) -+{ -+ platform_driver_unregister(&fsl_qdma_driver); -+} -+module_exit(fsl_qdma_exit); -+ -+MODULE_ALIAS("platform:fsl-qdma"); -+MODULE_DESCRIPTION("Freescale qDMA engine driver"); -+MODULE_LICENSE("GPL v2"); |