aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-5.4/804-crypto-0035-crypto-caam-qi-use-QBMan-NXP-SDK-driver.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/layerscape/patches-5.4/804-crypto-0035-crypto-caam-qi-use-QBMan-NXP-SDK-driver.patch')
-rw-r--r--target/linux/layerscape/patches-5.4/804-crypto-0035-crypto-caam-qi-use-QBMan-NXP-SDK-driver.patch342
1 files changed, 342 insertions, 0 deletions
diff --git a/target/linux/layerscape/patches-5.4/804-crypto-0035-crypto-caam-qi-use-QBMan-NXP-SDK-driver.patch b/target/linux/layerscape/patches-5.4/804-crypto-0035-crypto-caam-qi-use-QBMan-NXP-SDK-driver.patch
new file mode 100644
index 0000000000..524ac40692
--- /dev/null
+++ b/target/linux/layerscape/patches-5.4/804-crypto-0035-crypto-caam-qi-use-QBMan-NXP-SDK-driver.patch
@@ -0,0 +1,342 @@
+From 28ef279c55a914372bf41587f6264e8e3e61e7d5 Mon Sep 17 00:00:00 2001
+From: Horia Geanta <horia.geanta@nxp.com>
+Date: Mon, 12 Jun 2017 19:42:34 +0300
+Subject: [PATCH] crypto: caam/qi - use QBMan (NXP) SDK driver
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Update caam/qi to work with QBMan from NXP SDK.
+
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+
+Squashed "crypto: caam/qi - fix FD congestion weight" fix.
+
+Solved rebase conflicts:
+
+drivers/crypto/caam/qi.c:579
+ kept call to dev_err_ratelimited, but changed to fd->status
+drivers/crypto/caam/sg_sw_qm.h:96
+ kept changes from patch, but changed sg_count to len
+
+Signed-off-by: Vlad Pelin <vlad.pelin@nxp.com>
+Acked-by: Horia Geanta <horia.geanta@nxp.com>
+---
+ drivers/crypto/caam/Kconfig | 2 +-
+ drivers/crypto/caam/qi.c | 82 +++++++++++++++++++++---------------------
+ drivers/crypto/caam/qi.h | 2 +-
+ drivers/crypto/caam/sg_sw_qm.h | 46 ++++++++++++++++--------
+ 4 files changed, 74 insertions(+), 58 deletions(-)
+
+--- a/drivers/crypto/caam/Kconfig
++++ b/drivers/crypto/caam/Kconfig
+@@ -106,7 +106,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+
+ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+ bool "Queue Interface as Crypto API backend"
+- depends on FSL_DPAA && NET
++ depends on FSL_SDK_DPA && NET
+ default y
+ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+ select CRYPTO_AUTHENC
+--- a/drivers/crypto/caam/qi.c
++++ b/drivers/crypto/caam/qi.c
+@@ -9,7 +9,7 @@
+
+ #include <linux/cpumask.h>
+ #include <linux/kthread.h>
+-#include <soc/fsl/qman.h>
++#include <linux/fsl_qman.h>
+
+ #include "regs.h"
+ #include "qi.h"
+@@ -107,23 +107,21 @@ static void *caam_iova_to_virt(struct io
+ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
+ {
+ struct qm_fd fd;
+- dma_addr_t addr;
+ int ret;
+ int num_retries = 0;
+
+- qm_fd_clear_fd(&fd);
+- qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
+-
+- addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
++ fd.cmd = 0;
++ fd.format = qm_fd_compound;
++ fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
++ fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
+ DMA_BIDIRECTIONAL);
+- if (dma_mapping_error(qidev, addr)) {
++ if (dma_mapping_error(qidev, fd.addr)) {
+ dev_err(qidev, "DMA mapping error for QI enqueue request\n");
+ return -EIO;
+ }
+- qm_fd_addr_set64(&fd, addr);
+
+ do {
+- ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
++ ret = qman_enqueue(req->drv_ctx->req_fq, &fd, 0);
+ if (likely(!ret))
+ return 0;
+
+@@ -139,7 +137,7 @@ int caam_qi_enqueue(struct device *qidev
+ EXPORT_SYMBOL(caam_qi_enqueue);
+
+ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
+- const union qm_mr_entry *msg)
++ const struct qm_mr_entry *msg)
+ {
+ const struct qm_fd *fd;
+ struct caam_drv_req *drv_req;
+@@ -148,7 +146,7 @@ static void caam_fq_ern_cb(struct qman_p
+
+ fd = &msg->ern.fd;
+
+- if (qm_fd_get_format(fd) != qm_fd_compound) {
++ if (fd->format != qm_fd_compound) {
+ dev_err(qidev, "Non-compound FD from CAAM\n");
+ return;
+ }
+@@ -186,20 +184,22 @@ static struct qman_fq *create_caam_req_f
+ req_fq->cb.fqs = NULL;
+
+ ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
+- QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
++ QMAN_FQ_FLAG_TO_DCPORTAL | QMAN_FQ_FLAG_LOCKED,
++ req_fq);
+ if (ret) {
+ dev_err(qidev, "Failed to create session req FQ\n");
+ goto create_req_fq_fail;
+ }
+
+- memset(&opts, 0, sizeof(opts));
+- opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
+- QM_INITFQ_WE_CONTEXTB |
+- QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
+- opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
+- qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
+- opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
+- qm_fqd_context_a_set64(&opts.fqd, hwdesc);
++ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
++ QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
++ QM_INITFQ_WE_CGID;
++ opts.fqd.fq_ctrl = QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE;
++ opts.fqd.dest.channel = qm_channel_caam;
++ opts.fqd.dest.wq = 2;
++ opts.fqd.context_b = qman_fq_fqid(rsp_fq);
++ opts.fqd.context_a.hi = upper_32_bits(hwdesc);
++ opts.fqd.context_a.lo = lower_32_bits(hwdesc);
+ opts.fqd.cgid = qipriv.cgr.cgrid;
+
+ ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
+@@ -213,7 +213,7 @@ static struct qman_fq *create_caam_req_f
+ return req_fq;
+
+ init_req_fq_fail:
+- qman_destroy_fq(req_fq);
++ qman_destroy_fq(req_fq, 0);
+ create_req_fq_fail:
+ kfree(req_fq);
+ return ERR_PTR(ret);
+@@ -281,7 +281,7 @@ empty_fq:
+ if (ret)
+ dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
+
+- qman_destroy_fq(fq);
++ qman_destroy_fq(fq, 0);
+ kfree(fq);
+
+ return ret;
+@@ -298,7 +298,7 @@ static int empty_caam_fq(struct qman_fq
+ if (ret)
+ return ret;
+
+- if (!qm_mcr_np_get(&np, frm_cnt))
++ if (!np.frm_cnt)
+ break;
+
+ msleep(20);
+@@ -565,30 +565,28 @@ static enum qman_cb_dqrr_result caam_rsp
+ const struct qm_fd *fd;
+ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
+ struct caam_drv_private *priv = dev_get_drvdata(qidev);
+- u32 status;
+
+ if (caam_qi_napi_schedule(p, caam_napi))
+ return qman_cb_dqrr_stop;
+
+ fd = &dqrr->fd;
+- status = be32_to_cpu(fd->status);
+- if (unlikely(status)) {
+- u32 ssrc = status & JRSTA_SSRC_MASK;
+- u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
++ if (unlikely(fd->status)) {
++ u32 ssrc = fd->status & JRSTA_SSRC_MASK;
++ u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
+
+ if (ssrc != JRSTA_SSRC_CCB_ERROR ||
+ err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+ dev_err_ratelimited(qidev,
+ "Error: %#x in CAAM response FD\n",
+- status);
++ fd->status);
+ }
+
+- if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
++ if (unlikely(fd->format != qm_fd_compound)) {
+ dev_err(qidev, "Non-compound FD from CAAM\n");
+ return qman_cb_dqrr_consume;
+ }
+
+- drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
++ drv_req = caam_iova_to_virt(priv->domain, fd->addr);
+ if (unlikely(!drv_req)) {
+ dev_err(qidev,
+ "Can't find original request for caam response\n");
+@@ -598,7 +596,7 @@ static enum qman_cb_dqrr_result caam_rsp
+ dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
+ sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
+
+- drv_req->cbk(drv_req, status);
++ drv_req->cbk(drv_req, fd->status);
+ return qman_cb_dqrr_consume;
+ }
+
+@@ -622,17 +620,18 @@ static int alloc_rsp_fq_cpu(struct devic
+ return -ENODEV;
+ }
+
+- memset(&opts, 0, sizeof(opts));
+- opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
+- QM_INITFQ_WE_CONTEXTB |
+- QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
+- opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
+- QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
+- qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
++ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
++ QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA |
++ QM_INITFQ_WE_CGID;
++ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH |
++ QM_FQCTRL_CGE;
++ opts.fqd.dest.channel = qman_affine_channel(cpu);
++ opts.fqd.dest.wq = 3;
+ opts.fqd.cgid = qipriv.cgr.cgrid;
+ opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
+ QM_STASHING_EXCL_DATA;
+- qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
++ opts.fqd.context_a.stashing.data_cl = 1;
++ opts.fqd.context_a.stashing.context_cl = 1;
+
+ ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
+ if (ret) {
+@@ -662,8 +661,7 @@ static int init_cgr(struct device *qidev
+
+ qipriv.cgr.cb = cgr_cb;
+ memset(&opts, 0, sizeof(opts));
+- opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
+- QM_CGR_WE_MODE);
++ opts.we_mask = QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE;
+ opts.cgr.cscn_en = QM_CGR_EN;
+ opts.cgr.mode = QMAN_CGR_MODE_FRAME;
+ qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
+--- a/drivers/crypto/caam/qi.h
++++ b/drivers/crypto/caam/qi.h
+@@ -9,7 +9,7 @@
+ #ifndef __QI_H__
+ #define __QI_H__
+
+-#include <soc/fsl/qman.h>
++#include <linux/fsl_qman.h>
+ #include "compat.h"
+ #include "desc.h"
+ #include "desc_constr.h"
+--- a/drivers/crypto/caam/sg_sw_qm.h
++++ b/drivers/crypto/caam/sg_sw_qm.h
+@@ -7,46 +7,61 @@
+ #ifndef __SG_SW_QM_H
+ #define __SG_SW_QM_H
+
+-#include <soc/fsl/qman.h>
++#include <linux/fsl_qman.h>
+ #include "regs.h"
+
++static inline void cpu_to_hw_sg(struct qm_sg_entry *qm_sg_ptr)
++{
++ dma_addr_t addr = qm_sg_ptr->opaque;
++
++ qm_sg_ptr->opaque = cpu_to_caam64(addr);
++ qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
++}
++
+ static inline void __dma_to_qm_sg(struct qm_sg_entry *qm_sg_ptr, dma_addr_t dma,
+- u16 offset)
++ u32 len, u16 offset)
+ {
+- qm_sg_entry_set64(qm_sg_ptr, dma);
++ qm_sg_ptr->addr = dma;
++ qm_sg_ptr->length = len;
+ qm_sg_ptr->__reserved2 = 0;
+ qm_sg_ptr->bpid = 0;
+- qm_sg_ptr->offset = cpu_to_be16(offset & QM_SG_OFF_MASK);
++ qm_sg_ptr->__reserved3 = 0;
++ qm_sg_ptr->offset = offset & QM_SG_OFFSET_MASK;
++
++ cpu_to_hw_sg(qm_sg_ptr);
+ }
+
+ static inline void dma_to_qm_sg_one(struct qm_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+ {
+- __dma_to_qm_sg(qm_sg_ptr, dma, offset);
+- qm_sg_entry_set_len(qm_sg_ptr, len);
++ qm_sg_ptr->extension = 0;
++ qm_sg_ptr->final = 0;
++ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
+ }
+
+ static inline void dma_to_qm_sg_one_last(struct qm_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+ {
+- __dma_to_qm_sg(qm_sg_ptr, dma, offset);
+- qm_sg_entry_set_f(qm_sg_ptr, len);
++ qm_sg_ptr->extension = 0;
++ qm_sg_ptr->final = 1;
++ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
+ }
+
+ static inline void dma_to_qm_sg_one_ext(struct qm_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len, u16 offset)
+ {
+- __dma_to_qm_sg(qm_sg_ptr, dma, offset);
+- qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | (len & QM_SG_LEN_MASK));
++ qm_sg_ptr->extension = 1;
++ qm_sg_ptr->final = 0;
++ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
+ }
+
+ static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
+ dma_addr_t dma, u32 len,
+ u16 offset)
+ {
+- __dma_to_qm_sg(qm_sg_ptr, dma, offset);
+- qm_sg_ptr->cfg = cpu_to_be32(QM_SG_EXT | QM_SG_FIN |
+- (len & QM_SG_LEN_MASK));
++ qm_sg_ptr->extension = 1;
++ qm_sg_ptr->final = 1;
++ __dma_to_qm_sg(qm_sg_ptr, dma, len, offset);
+ }
+
+ /*
+@@ -79,7 +94,10 @@ static inline void sg_to_qm_sg_last(stru
+ struct qm_sg_entry *qm_sg_ptr, u16 offset)
+ {
+ qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
+- qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
++
++ qm_sg_ptr->sgt_efl = caam32_to_cpu(qm_sg_ptr->sgt_efl);
++ qm_sg_ptr->final = 1;
++ qm_sg_ptr->sgt_efl = cpu_to_caam32(qm_sg_ptr->sgt_efl);
+ }
+
+ #endif /* __SG_SW_QM_H */