aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch
diff options
context:
space:
mode:
authorBiwen Li <biwen.li@nxp.com>2019-05-06 12:13:14 +0800
committerPetr Štetiar <ynezz@true.cz>2019-06-06 15:40:09 +0200
commit5159d71983e649a89568e46d9ff02731beedd571 (patch)
tree2c669f4d9651c1fe26955778e5fee119543a85ce /target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch
parent639d127b831a2af29a03ab07b262abf46ada3b4e (diff)
downloadupstream-5159d71983e649a89568e46d9ff02731beedd571.tar.gz
upstream-5159d71983e649a89568e46d9ff02731beedd571.tar.bz2
upstream-5159d71983e649a89568e46d9ff02731beedd571.zip
layerscape: update patches-4.14 to LSDK 19.03
All patches of LSDK 19.03 were ported to Openwrt kernel. We still used an all-in-one patch for each IP/feature for OpenWrt. Below are the changes this patch introduced. - Updated original IP/feature patches to LSDK 19.03. - Added new IP/feature patches for eTSEC/PTP/TMU. - Squashed scattered patches into IP/feature patches. - Updated config-4.14 correspondingly. - Refreshed all patches. More info about LSDK and the kernel: - https://lsdk.github.io/components.html - https://source.codeaurora.org/external/qoriq/qoriq-components/linux Signed-off-by: Biwen Li <biwen.li@nxp.com> Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
Diffstat (limited to 'target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch')
-rw-r--r--target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch3798
1 files changed, 3350 insertions, 448 deletions
diff --git a/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch b/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch
index ff0fe8daa4..0d26aca797 100644
--- a/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch
+++ b/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch
@@ -1,59 +1,69 @@
-From 936d5f485f2ff837cdd7d49839771bd3367e8b92 Mon Sep 17 00:00:00 2001
+From ba8e92b322a3763880fdc4d19e9c7085f5504be7 Mon Sep 17 00:00:00 2001
From: Biwen Li <biwen.li@nxp.com>
-Date: Tue, 30 Oct 2018 18:28:03 +0800
-Subject: [PATCH 37/40] sec: support layerscape
+Date: Tue, 23 Apr 2019 17:41:43 +0800
+Subject: [PATCH] sec: support layerscape
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
This is an integrated patch of sec for layerscape
Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Biwen Li <biwen.li@nxp.com>
+Signed-off-by: Carmen Iorga <carmen.iorga@nxp.com>
Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
Signed-off-by: Guanhua Gao <guanhua.gao@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
-Signed-off-by: Horia Geantă horia.geanta@nxp.com
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
-Signed-off-by: Biwen Li <biwen.li@nxp.com>
---
crypto/Kconfig | 20 +
crypto/Makefile | 1 +
+ crypto/chacha20poly1305.c | 2 -
crypto/tcrypt.c | 27 +-
crypto/testmgr.c | 244 ++
crypto/testmgr.h | 219 ++
- crypto/tls.c | 607 +++
+ crypto/tls.c | 607 ++++
drivers/crypto/Makefile | 2 +-
- drivers/crypto/caam/Kconfig | 57 +-
- drivers/crypto/caam/Makefile | 10 +-
- drivers/crypto/caam/caamalg.c | 131 +-
- drivers/crypto/caam/caamalg_desc.c | 761 +++-
- drivers/crypto/caam/caamalg_desc.h | 47 +-
- drivers/crypto/caam/caamalg_qi.c | 927 ++++-
- drivers/crypto/caam/caamalg_qi2.c | 5691 +++++++++++++++++++++++++++
- drivers/crypto/caam/caamalg_qi2.h | 274 ++
- drivers/crypto/caam/caamhash.c | 132 +-
+ drivers/crypto/caam/Kconfig | 85 +-
+ drivers/crypto/caam/Makefile | 26 +-
+ drivers/crypto/caam/caamalg.c | 468 +++-
+ drivers/crypto/caam/caamalg_desc.c | 903 +++++-
+ drivers/crypto/caam/caamalg_desc.h | 52 +-
+ drivers/crypto/caam/caamalg_qi.c | 1060 ++++++-
+ drivers/crypto/caam/caamalg_qi2.c | 5843 +++++++++++++++++++++++++++++++++++
+ drivers/crypto/caam/caamalg_qi2.h | 276 ++
+ drivers/crypto/caam/caamhash.c | 192 +-
drivers/crypto/caam/caamhash_desc.c | 108 +
drivers/crypto/caam/caamhash_desc.h | 49 +
- drivers/crypto/caam/compat.h | 2 +
- drivers/crypto/caam/ctrl.c | 23 +-
- drivers/crypto/caam/desc.h | 62 +-
- drivers/crypto/caam/desc_constr.h | 52 +-
- drivers/crypto/caam/dpseci.c | 865 ++++
- drivers/crypto/caam/dpseci.h | 433 ++
+ drivers/crypto/caam/caampkc.c | 52 +-
+ drivers/crypto/caam/caamrng.c | 52 +-
+ drivers/crypto/caam/compat.h | 4 +
+ drivers/crypto/caam/ctrl.c | 194 +-
+ drivers/crypto/caam/desc.h | 89 +-
+ drivers/crypto/caam/desc_constr.h | 59 +-
+ drivers/crypto/caam/dpseci.c | 865 ++++++
+ drivers/crypto/caam/dpseci.h | 433 +++
drivers/crypto/caam/dpseci_cmd.h | 287 ++
- drivers/crypto/caam/error.c | 75 +-
+ drivers/crypto/caam/error.c | 81 +-
drivers/crypto/caam/error.h | 6 +-
- drivers/crypto/caam/intern.h | 1 +
- drivers/crypto/caam/jr.c | 42 +
+ drivers/crypto/caam/intern.h | 102 +-
+ drivers/crypto/caam/jr.c | 84 +
drivers/crypto/caam/jr.h | 2 +
drivers/crypto/caam/key_gen.c | 30 -
drivers/crypto/caam/key_gen.h | 30 +
- drivers/crypto/caam/qi.c | 85 +-
+ drivers/crypto/caam/qi.c | 134 +-
drivers/crypto/caam/qi.h | 2 +-
- drivers/crypto/caam/regs.h | 2 +
+ drivers/crypto/caam/regs.h | 76 +-
drivers/crypto/caam/sg_sw_qm.h | 46 +-
drivers/crypto/talitos.c | 8 +
- 37 files changed, 11006 insertions(+), 354 deletions(-)
+ include/crypto/chacha20.h | 1 +
+ 41 files changed, 12088 insertions(+), 733 deletions(-)
create mode 100644 crypto/tls.c
create mode 100644 drivers/crypto/caam/caamalg_qi2.c
create mode 100644 drivers/crypto/caam/caamalg_qi2.h
@@ -102,6 +112,17 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
obj-$(CONFIG_CRYPTO_LZO) += lzo.o
obj-$(CONFIG_CRYPTO_LZ4) += lz4.o
obj-$(CONFIG_CRYPTO_LZ4HC) += lz4hc.o
+--- a/crypto/chacha20poly1305.c
++++ b/crypto/chacha20poly1305.c
+@@ -22,8 +22,6 @@
+
+ #include "internal.h"
+
+-#define CHACHAPOLY_IV_SIZE 12
+-
+ struct chachapoly_instance_ctx {
+ struct crypto_skcipher_spawn chacha;
+ struct crypto_ahash_spawn poly;
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -76,7 +76,7 @@ static char *check[] = {
@@ -1316,10 +1337,16 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
-@@ -1,7 +1,11 @@
+@@ -1,7 +1,17 @@
+config CRYPTO_DEV_FSL_CAAM_COMMON
+ tristate
+
++config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
++ tristate
++
++config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
++ tristate
++
config CRYPTO_DEV_FSL_CAAM
- tristate "Freescale CAAM-Multicore driver backend"
+ tristate "Freescale CAAM-Multicore platform driver backend"
@@ -1329,7 +1356,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
-@@ -12,9 +16,16 @@ config CRYPTO_DEV_FSL_CAAM
+@@ -12,9 +22,16 @@ config CRYPTO_DEV_FSL_CAAM
To compile this driver as a module, choose M here: the module
will be called caam.
@@ -1347,7 +1374,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
default y
help
Enables the driver module for Job Rings which are part of
-@@ -25,9 +36,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
+@@ -25,9 +42,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
To compile this driver as a module, choose M here: the module
will be called caam_jr.
@@ -1359,7 +1386,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
range 2 9
default "9"
help
-@@ -45,7 +57,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
+@@ -45,7 +63,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
@@ -1367,50 +1394,83 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
help
Enable the Job Ring's interrupt coalescing feature.
-@@ -75,7 +86,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
+@@ -74,9 +91,9 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THL
+ threshold. Range is 1-65535.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
- tristate "Register algorithm implementations with the Crypto API"
+- tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
++ bool "Register algorithm implementations with the Crypto API"
default y
++ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AEAD
select CRYPTO_AUTHENC
-@@ -90,7 +100,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+ select CRYPTO_BLKCIPHER
+@@ -85,13 +102,11 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+ scatterlist crypto API (such as the linux native IPSec
+ stack) to the SEC4 via job ring.
+- To compile this as a module, choose M here: the module
+- will be called caamalg.
+-
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
- tristate "Queue Interface as Crypto API backend"
+- tristate "Queue Interface as Crypto API backend"
- depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
++ bool "Queue Interface as Crypto API backend"
+ depends on FSL_SDK_DPA && NET
default y
++ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
-@@ -107,7 +117,6 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+ help
+@@ -102,36 +117,26 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
+ assigned to the kernel should also be more than the number of
+ job rings.
+- To compile this as a module, choose M here: the module
+- will be called caamalg_qi.
+-
config CRYPTO_DEV_FSL_CAAM_AHASH_API
- tristate "Register hash algorithm implementations with Crypto API"
+- tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
++ bool "Register hash algorithm implementations with Crypto API"
default y
++ select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
select CRYPTO_HASH
help
-@@ -119,7 +128,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
+ Selecting this will offload ahash for users of the
+ scatterlist crypto API to the SEC4 via job ring.
+- To compile this as a module, choose M here: the module
+- will be called caamhash.
+-
config CRYPTO_DEV_FSL_CAAM_PKC_API
- tristate "Register public key cryptography implementations with Crypto API"
+- tristate "Register public key cryptography implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
++ bool "Register public key cryptography implementations with Crypto API"
default y
select CRYPTO_RSA
help
-@@ -131,7 +139,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
+ Selecting this will allow SEC Public key support for RSA.
+ Supported cryptographic primitives: encryption, decryption,
+ signature and verification.
+- To compile this as a module, choose M here: the module
+- will be called caam_pkc.
config CRYPTO_DEV_FSL_CAAM_RNG_API
- tristate "Register caam device for hwrng API"
+- tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
++ bool "Register caam device for hwrng API"
default y
select CRYPTO_RNG
select HW_RANDOM
-@@ -142,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
- To compile this as a module, choose M here: the module
- will be called caamrng.
+@@ -139,16 +144,24 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
+ Selecting this will register the SEC4 hardware rng to
+ the hw_random API for suppying the kernel entropy pool.
+
+- To compile this as a module, choose M here: the module
+- will be called caamrng.
++endif # CRYPTO_DEV_FSL_CAAM_JR
-config CRYPTO_DEV_FSL_CAAM_DEBUG
- bool "Enable debug output in CAAM driver"
@@ -1418,14 +1478,17 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
- help
- Selecting this will enable printing of various debug
- information in the CAAM driver.
-+endif # CRYPTO_DEV_FSL_CAAM_JR
-+
+endif # CRYPTO_DEV_FSL_CAAM
-+
+
+-config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
+- def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
+- CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
+config CRYPTO_DEV_FSL_DPAA2_CAAM
+ tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
+ depends on FSL_MC_DPIO
+ select CRYPTO_DEV_FSL_CAAM_COMMON
++ select CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
++ select CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AUTHENC
+ select CRYPTO_AEAD
@@ -1437,16 +1500,6 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+
+ To compile this as a module, choose M here: the module
+ will be called dpaa2_caam.
-
- config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
- def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
-- CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
-+ CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
-+ CRYPTO_DEV_FSL_DPAA2_CAAM)
-+
-+config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
-+ def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
-+ CRYPTO_DEV_FSL_DPAA2_CAAM)
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
@@ -1458,21 +1511,30 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
+-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
-+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
- obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
-
- caam-objs := ctrl.o
+-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
+-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
+-
+-caam-objs := ctrl.o
-caam_jr-objs := jr.o key_gen.o error.o
-+caam_jr-objs := jr.o key_gen.o
- caam_pkc-y := caampkc.o pkc_desc.o
+-caam_pkc-y := caampkc.o pkc_desc.o
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
++
++caam-y := ctrl.o
++caam_jr-y := jr.o key_gen.o
++caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
++caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
++caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
++caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
++caam_jr-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caampkc.o pkc_desc.o
++
++caam-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += qi.o
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
ccflags-y += -DCONFIG_CAAM_QI
- caam-objs += qi.o
+- caam-objs += qi.o
endif
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
@@ -1480,7 +1542,16 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+dpaa2_caam-y := caamalg_qi2.o dpseci.o
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
-@@ -108,6 +108,7 @@ struct caam_ctx {
+@@ -71,6 +71,8 @@
+ #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
+ CAAM_CMD_SZ * 5)
+
++#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
++
+ #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
+ #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+@@ -108,6 +110,7 @@ struct caam_ctx {
dma_addr_t sh_desc_dec_dma;
dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
@@ -1488,7 +1559,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
-@@ -118,6 +119,7 @@ static int aead_null_set_sh_desc(struct
+@@ -118,6 +121,7 @@ static int aead_null_set_sh_desc(struct
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
@@ -1496,7 +1567,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
ctx->adata.keylen_pad;
-@@ -136,9 +138,10 @@ static int aead_null_set_sh_desc(struct
+@@ -136,9 +140,10 @@ static int aead_null_set_sh_desc(struct
/* aead_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
@@ -1509,7 +1580,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/*
* Job Descriptor and Shared Descriptors
-@@ -154,9 +157,10 @@ static int aead_null_set_sh_desc(struct
+@@ -154,9 +159,10 @@ static int aead_null_set_sh_desc(struct
/* aead_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
@@ -1522,7 +1593,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
return 0;
}
-@@ -168,6 +172,7 @@ static int aead_set_sh_desc(struct crypt
+@@ -168,6 +174,7 @@ static int aead_set_sh_desc(struct crypt
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
@@ -1530,7 +1601,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
u32 ctx1_iv_off = 0;
u32 *desc, *nonce = NULL;
u32 inl_mask;
-@@ -234,9 +239,9 @@ static int aead_set_sh_desc(struct crypt
+@@ -234,9 +241,9 @@ static int aead_set_sh_desc(struct crypt
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
@@ -1542,7 +1613,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
skip_enc:
/*
-@@ -266,9 +271,9 @@ skip_enc:
+@@ -266,9 +273,9 @@ skip_enc:
desc = ctx->sh_desc_dec;
cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, alg->caam.geniv, is_rfc3686,
@@ -1554,7 +1625,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
if (!alg->caam.geniv)
goto skip_givenc;
-@@ -300,9 +305,9 @@ skip_enc:
+@@ -300,9 +307,9 @@ skip_enc:
desc = ctx->sh_desc_enc;
cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
ctx->authsize, is_rfc3686, nonce,
@@ -1566,7 +1637,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
skip_givenc:
return 0;
-@@ -323,6 +328,7 @@ static int gcm_set_sh_desc(struct crypto
+@@ -323,6 +330,7 @@ static int gcm_set_sh_desc(struct crypto
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
@@ -1574,7 +1645,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
-@@ -344,9 +350,9 @@ static int gcm_set_sh_desc(struct crypto
+@@ -344,9 +352,9 @@ static int gcm_set_sh_desc(struct crypto
}
desc = ctx->sh_desc_enc;
@@ -1586,7 +1657,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/*
* Job Descriptor and Shared Descriptors
-@@ -361,9 +367,9 @@ static int gcm_set_sh_desc(struct crypto
+@@ -361,9 +369,9 @@ static int gcm_set_sh_desc(struct crypto
}
desc = ctx->sh_desc_dec;
@@ -1598,7 +1669,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
return 0;
}
-@@ -382,6 +388,7 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -382,6 +390,7 @@ static int rfc4106_set_sh_desc(struct cr
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
@@ -1606,7 +1677,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
-@@ -403,9 +410,10 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -403,9 +412,10 @@ static int rfc4106_set_sh_desc(struct cr
}
desc = ctx->sh_desc_enc;
@@ -1619,7 +1690,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/*
* Job Descriptor and Shared Descriptors
-@@ -420,9 +428,10 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -420,9 +430,10 @@ static int rfc4106_set_sh_desc(struct cr
}
desc = ctx->sh_desc_dec;
@@ -1632,7 +1703,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
return 0;
}
-@@ -442,6 +451,7 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -442,6 +453,7 @@ static int rfc4543_set_sh_desc(struct cr
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
@@ -1640,7 +1711,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
u32 *desc;
int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN -
ctx->cdata.keylen;
-@@ -463,9 +473,10 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -463,9 +475,10 @@ static int rfc4543_set_sh_desc(struct cr
}
desc = ctx->sh_desc_enc;
@@ -1653,7 +1724,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/*
* Job Descriptor and Shared Descriptors
-@@ -480,9 +491,10 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -480,9 +493,10 @@ static int rfc4543_set_sh_desc(struct cr
}
desc = ctx->sh_desc_dec;
@@ -1666,7 +1737,67 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
return 0;
}
-@@ -503,6 +515,7 @@ static int aead_setkey(struct crypto_aea
+@@ -498,11 +512,67 @@ static int rfc4543_setauthsize(struct cr
+ return 0;
+ }
+
++static int chachapoly_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *jrdev = ctx->jrdev;
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ u32 *desc;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ desc = ctx->sh_desc_enc;
++ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
++ ctx->authsize, true, false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++ desc_bytes(desc), ctx->dir);
++
++ desc = ctx->sh_desc_dec;
++ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
++ ctx->authsize, false, false);
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
++ desc_bytes(desc), ctx->dir);
++
++ return 0;
++}
++
++static int chachapoly_setauthsize(struct crypto_aead *aead,
++ unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++
++ if (authsize != POLY1305_DIGEST_SIZE)
++ return -EINVAL;
++
++ ctx->authsize = authsize;
++ return chachapoly_set_sh_desc(aead);
++}
++
++static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
++ unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
++
++ if (keylen != CHACHA20_KEY_SIZE + saltlen) {
++ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++ }
++
++ ctx->cdata.key_virt = key;
++ ctx->cdata.keylen = keylen - saltlen;
++
++ return chachapoly_set_sh_desc(aead);
++}
++
+ static int aead_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
@@ -1674,7 +1805,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
struct crypto_authenc_keys keys;
int ret = 0;
-@@ -517,6 +530,27 @@ static int aead_setkey(struct crypto_aea
+@@ -517,6 +587,27 @@ static int aead_setkey(struct crypto_aea
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
@@ -1702,7 +1833,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
keys.authkeylen, CAAM_MAX_KEY_SIZE -
keys.enckeylen);
-@@ -527,12 +561,14 @@ static int aead_setkey(struct crypto_aea
+@@ -527,12 +618,14 @@ static int aead_setkey(struct crypto_aea
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
@@ -1718,7 +1849,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
ctx->cdata.keylen = keys.enckeylen;
return aead_set_sh_desc(aead);
badkey:
-@@ -552,7 +588,7 @@ static int gcm_setkey(struct crypto_aead
+@@ -552,7 +645,7 @@ static int gcm_setkey(struct crypto_aead
#endif
memcpy(ctx->key, key, keylen);
@@ -1727,7 +1858,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
ctx->cdata.keylen = keylen;
return gcm_set_sh_desc(aead);
-@@ -580,7 +616,7 @@ static int rfc4106_setkey(struct crypto_
+@@ -580,7 +673,7 @@ static int rfc4106_setkey(struct crypto_
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
@@ -1736,7 +1867,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
return rfc4106_set_sh_desc(aead);
}
-@@ -606,7 +642,7 @@ static int rfc4543_setkey(struct crypto_
+@@ -606,7 +699,7 @@ static int rfc4543_setkey(struct crypto_
*/
ctx->cdata.keylen = keylen - 4;
dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
@@ -1745,7 +1876,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
return rfc4543_set_sh_desc(aead);
}
-@@ -658,21 +694,21 @@ static int ablkcipher_setkey(struct cryp
+@@ -658,21 +751,21 @@ static int ablkcipher_setkey(struct cryp
cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
@@ -1770,7 +1901,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
return 0;
}
-@@ -701,13 +737,13 @@ static int xts_ablkcipher_setkey(struct
+@@ -701,13 +794,13 @@ static int xts_ablkcipher_setkey(struct
desc = ctx->sh_desc_enc;
cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
@@ -1786,7 +1917,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
return 0;
}
-@@ -987,9 +1023,6 @@ static void init_aead_job(struct aead_re
+@@ -987,9 +1080,6 @@ static void init_aead_job(struct aead_re
append_seq_out_ptr(desc, dst_dma,
req->assoclen + req->cryptlen - authsize,
out_options);
@@ -1796,7 +1927,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
}
static void init_gcm_job(struct aead_request *req,
-@@ -1004,6 +1037,7 @@ static void init_gcm_job(struct aead_req
+@@ -1004,6 +1094,7 @@ static void init_gcm_job(struct aead_req
unsigned int last;
init_aead_job(req, edesc, all_contig, encrypt);
@@ -1804,7 +1935,48 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/* BUG This should not be specific to generic GCM. */
last = 0;
-@@ -1030,6 +1064,7 @@ static void init_authenc_job(struct aead
+@@ -1021,6 +1112,40 @@ static void init_gcm_job(struct aead_req
+ /* End of blank commands */
+ }
+
++static void init_chachapoly_job(struct aead_request *req,
++ struct aead_edesc *edesc, bool all_contig,
++ bool encrypt)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ unsigned int assoclen = req->assoclen;
++ u32 *desc = edesc->hw_desc;
++ u32 ctx_iv_off = 4;
++
++ init_aead_job(req, edesc, all_contig, encrypt);
++
++ if (ivsize != CHACHAPOLY_IV_SIZE) {
++ /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
++ ctx_iv_off += 4;
++
++ /*
++ * The associated data comes already with the IV but we need
++ * to skip it when we authenticate or encrypt...
++ */
++ assoclen -= ivsize;
++ }
++
++ append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
++
++ /*
++ * For IPsec load the IV further in the same register.
++ * For RFC7539 simply load the 12 bytes nonce in a single operation
++ */
++ append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ ctx_iv_off << LDST_OFFSET_SHIFT);
++}
++
+ static void init_authenc_job(struct aead_request *req,
+ struct aead_edesc *edesc,
+ bool all_contig, bool encrypt)
+@@ -1030,6 +1155,7 @@ static void init_authenc_job(struct aead
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -1812,7 +1984,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
-@@ -1053,6 +1088,15 @@ static void init_authenc_job(struct aead
+@@ -1053,6 +1179,15 @@ static void init_authenc_job(struct aead
init_aead_job(req, edesc, all_contig, encrypt);
@@ -1828,7 +2000,176 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
append_load_as_imm(desc, req->iv, ivsize,
LDST_CLASS_1_CCB |
-@@ -3204,9 +3248,11 @@ struct caam_crypto_alg {
+@@ -1225,8 +1360,16 @@ static struct aead_edesc *aead_edesc_all
+ }
+ }
+
++ /*
++ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
++ * the end of the table by allocating more S/G entries.
++ */
+ sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0;
+- sec4_sg_len += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
++ if (mapped_dst_nents > 1)
++ sec4_sg_len += ALIGN(mapped_dst_nents, 4);
++ else
++ sec4_sg_len = ALIGN(sec4_sg_len, 4);
++
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+@@ -1307,6 +1450,72 @@ static int gcm_encrypt(struct aead_reque
+ return ret;
+ }
+
++static int chachapoly_encrypt(struct aead_request *req)
++{
++ struct aead_edesc *edesc;
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *jrdev = ctx->jrdev;
++ bool all_contig;
++ u32 *desc;
++ int ret;
++
++ edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
++ true);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ desc = edesc->hw_desc;
++
++ init_chachapoly_job(req, edesc, all_contig, true);
++ print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
++ 1);
++
++ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
++ if (!ret) {
++ ret = -EINPROGRESS;
++ } else {
++ aead_unmap(jrdev, edesc, req);
++ kfree(edesc);
++ }
++
++ return ret;
++}
++
++static int chachapoly_decrypt(struct aead_request *req)
++{
++ struct aead_edesc *edesc;
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ struct device *jrdev = ctx->jrdev;
++ bool all_contig;
++ u32 *desc;
++ int ret;
++
++ edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
++ false);
++ if (IS_ERR(edesc))
++ return PTR_ERR(edesc);
++
++ desc = edesc->hw_desc;
++
++ init_chachapoly_job(req, edesc, all_contig, false);
++ print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
++ 1);
++
++ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
++ if (!ret) {
++ ret = -EINPROGRESS;
++ } else {
++ aead_unmap(jrdev, edesc, req);
++ kfree(edesc);
++ }
++
++ return ret;
++}
++
+ static int ipsec_gcm_encrypt(struct aead_request *req)
+ {
+ if (req->assoclen < 8)
+@@ -1494,7 +1703,25 @@ static struct ablkcipher_edesc *ablkciph
+
+ sec4_sg_ents = 1 + mapped_src_nents;
+ dst_sg_idx = sec4_sg_ents;
+- sec4_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
++
++ /*
++ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
++ * the end of the table by allocating more S/G entries. Logic:
++ * if (src != dst && output S/G)
++ * pad output S/G, if needed
++ * else if (src == dst && S/G)
++ * overlapping S/Gs; pad one of them
++ * else if (input S/G) ...
++ * pad input S/G, if needed
++ */
++ if (mapped_dst_nents > 1)
++ sec4_sg_ents += ALIGN(mapped_dst_nents, 4);
++ else if ((req->src == req->dst) && (mapped_src_nents > 1))
++ sec4_sg_ents = max(ALIGN(sec4_sg_ents, 4),
++ 1 + ALIGN(mapped_src_nents, 4));
++ else
++ sec4_sg_ents = ALIGN(sec4_sg_ents, 4);
++
+ sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
+
+ /*
+@@ -3196,6 +3423,50 @@ static struct caam_aead_alg driver_aeads
+ .geniv = true,
+ },
+ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "rfc7539(chacha20,poly1305)",
++ .cra_driver_name = "rfc7539-chacha20-poly1305-"
++ "caam",
++ .cra_blocksize = 1,
++ },
++ .setkey = chachapoly_setkey,
++ .setauthsize = chachapoly_setauthsize,
++ .encrypt = chachapoly_encrypt,
++ .decrypt = chachapoly_decrypt,
++ .ivsize = CHACHAPOLY_IV_SIZE,
++ .maxauthsize = POLY1305_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
++ OP_ALG_AAI_AEAD,
++ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
++ OP_ALG_AAI_AEAD,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "rfc7539esp(chacha20,poly1305)",
++ .cra_driver_name = "rfc7539esp-chacha20-"
++ "poly1305-caam",
++ .cra_blocksize = 1,
++ },
++ .setkey = chachapoly_setkey,
++ .setauthsize = chachapoly_setauthsize,
++ .encrypt = chachapoly_encrypt,
++ .decrypt = chachapoly_decrypt,
++ .ivsize = 8,
++ .maxauthsize = POLY1305_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
++ OP_ALG_AAI_AEAD,
++ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
++ OP_ALG_AAI_AEAD,
++ },
++ },
+ };
+
+ struct caam_crypto_alg {
+@@ -3204,9 +3475,11 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
@@ -1841,7 +2182,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
ctx->jrdev = caam_jr_alloc();
if (IS_ERR(ctx->jrdev)) {
-@@ -3214,10 +3260,16 @@ static int caam_init_common(struct caam_
+@@ -3214,10 +3487,16 @@ static int caam_init_common(struct caam_
return PTR_ERR(ctx->jrdev);
}
@@ -1859,7 +2200,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
caam_jr_free(ctx->jrdev);
-@@ -3245,7 +3297,7 @@ static int caam_cra_init(struct crypto_t
+@@ -3245,7 +3524,7 @@ static int caam_cra_init(struct crypto_t
container_of(alg, struct caam_crypto_alg, crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1868,7 +2209,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
}
static int caam_aead_init(struct crypto_aead *tfm)
-@@ -3255,14 +3307,15 @@ static int caam_aead_init(struct crypto_
+@@ -3255,14 +3534,15 @@ static int caam_aead_init(struct crypto_
container_of(alg, struct caam_aead_alg, aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
@@ -1886,6 +2227,166 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
caam_jr_free(ctx->jrdev);
}
+@@ -3276,7 +3556,7 @@ static void caam_aead_exit(struct crypto
+ caam_exit_common(crypto_aead_ctx(tfm));
+ }
+
+-static void __exit caam_algapi_exit(void)
++void caam_algapi_exit(void)
+ {
+
+ struct caam_crypto_alg *t_alg, *n;
+@@ -3355,56 +3635,52 @@ static void caam_aead_alg_init(struct ca
+ alg->exit = caam_aead_exit;
+ }
+
+-static int __init caam_algapi_init(void)
++int caam_algapi_init(struct device *ctrldev)
+ {
+- struct device_node *dev_node;
+- struct platform_device *pdev;
+- struct device *ctrldev;
+- struct caam_drv_private *priv;
++ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
+ int i = 0, err = 0;
+- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
++ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
+ unsigned int md_limit = SHA512_DIGEST_SIZE;
+ bool registered = false;
+
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+- if (!dev_node) {
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+- if (!dev_node)
+- return -ENODEV;
+- }
+-
+- pdev = of_find_device_by_node(dev_node);
+- if (!pdev) {
+- of_node_put(dev_node);
+- return -ENODEV;
+- }
+-
+- ctrldev = &pdev->dev;
+- priv = dev_get_drvdata(ctrldev);
+- of_node_put(dev_node);
+-
+- /*
+- * If priv is NULL, it's probably because the caam driver wasn't
+- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+- */
+- if (!priv)
+- return -ENODEV;
+-
+-
+ INIT_LIST_HEAD(&alg_list);
+
+ /*
+ * Register crypto algorithms the device supports.
+ * First, detect presence and attributes of DES, AES, and MD blocks.
+ */
+- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++ if (priv->era < 10) {
++ u32 cha_vid, cha_inst;
++
++ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
++ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
++ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++
++ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
++ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
++ CHA_ID_LS_DES_SHIFT;
++ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
++ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++ ccha_inst = 0;
++ ptha_inst = 0;
++ } else {
++ u32 aesa, mdha;
++
++ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
++ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
++
++ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
++ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
++
++ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
++ aes_inst = aesa & CHA_VER_NUM_MASK;
++ md_inst = mdha & CHA_VER_NUM_MASK;
++ ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
++ ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
++ }
+
+ /* If MD is present, limit digest size based on LP256 */
+- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
++ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
+ md_limit = SHA256_DIGEST_SIZE;
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+@@ -3426,10 +3702,10 @@ static int __init caam_algapi_init(void)
+ * Check support for AES modes not available
+ * on LP devices.
+ */
+- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+- if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+- OP_ALG_AAI_XTS)
+- continue;
++ if (aes_vid == CHA_VER_VID_AES_LP &&
++ (alg->class1_alg_type & OP_ALG_AAI_MASK) ==
++ OP_ALG_AAI_XTS)
++ continue;
+
+ t_alg = caam_alg_alloc(alg);
+ if (IS_ERR(t_alg)) {
+@@ -3468,21 +3744,28 @@ static int __init caam_algapi_init(void)
+ if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
++ /* Skip CHACHA20 algorithms if not supported by device */
++ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
++ continue;
++
++ /* Skip POLY1305 algorithms if not supported by device */
++ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
++ continue;
++
+ /*
+ * Check support for AES algorithms not available
+ * on LP devices.
+ */
+- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
+- if (alg_aai == OP_ALG_AAI_GCM)
+- continue;
++ if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
++ continue;
+
+ /*
+ * Skip algorithms requiring message digests
+ * if MD or MD size is not supported by device.
+ */
+- if (c2_alg_sel &&
+- (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
+- continue;
++ if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
++ (!md_inst || t_alg->aead.maxauthsize > md_limit))
++ continue;
+
+ caam_aead_alg_init(t_alg);
+
+@@ -3502,10 +3785,3 @@ static int __init caam_algapi_init(void)
+
+ return err;
+ }
+-
+-module_init(caam_algapi_init);
+-module_exit(caam_algapi_exit);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION("FSL CAAM support for crypto API");
+-MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -45,16 +45,16 @@ static inline void append_dec_op1(u32 *d
@@ -2881,6 +3382,15 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/* assoclen + cryptlen = seqinlen */
append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
+@@ -931,7 +1507,7 @@ void cnstr_shdsc_rfc4543_encap(u32 * con
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+- (0x8 << MOVE_LEN_SHIFT));
++ (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -966,10 +1542,13 @@ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap)
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
@@ -2915,7 +3425,165 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/* assoclen + cryptlen = seqoutlen */
append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
-@@ -1075,7 +1666,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
+@@ -1001,7 +1592,7 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+- (0x8 << MOVE_LEN_SHIFT));
++ (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP);
+
+ /* Will read assoclen + cryptlen bytes */
+ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+@@ -1035,6 +1626,138 @@ void cnstr_shdsc_rfc4543_decap(u32 * con
+ }
+ EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
+
++/**
++ * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and
++ * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared
++ * descriptor (non-protocol).
++ * @desc: pointer to buffer used for descriptor construction
++ * @cdata: pointer to block cipher transform definitions
++ * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with
++ * OP_ALG_AAI_AEAD.
++ * @adata: pointer to authentication transform definitions
++ * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with
++ * OP_ALG_AAI_AEAD.
++ * @ivsize: initialization vector size
++ * @icvsize: integrity check value (ICV) size (truncated or full)
++ * @encap: true if encapsulation, false if decapsulation
++ * @is_qi: true when called from caam/qi
++ */
++void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int ivsize,
++ unsigned int icvsize, const bool encap,
++ const bool is_qi)
++{
++ u32 *key_jump_cmd, *wait_cmd;
++ u32 nfifo;
++ const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE);
++
++ /* Note: Context registers are saved. */
++ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
++
++ /* skip key loading if they are loaded due to sharing */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen,
++ CLASS_1 | KEY_DEST_CLASS_REG);
++
++ /* For IPsec load the salt from keymat in the context register */
++ if (is_ipsec)
++ append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4,
++ LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT |
++ 4 << LDST_OFFSET_SHIFT);
++
++ set_jump_tgt_here(desc, key_jump_cmd);
++
++ /* Class 2 and 1 operations: Poly & ChaCha */
++ if (encap) {
++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_ENCRYPT);
++ } else {
++ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
++ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
++ OP_ALG_DECRYPT);
++ }
++
++ if (is_qi) {
++ u32 *wait_load_cmd;
++ u32 ctx1_iv_off = is_ipsec ? 8 : 4;
++
++ /* REG3 = assoclen */
++ append_seq_load(desc, 4, LDST_CLASS_DECO |
++ LDST_SRCDST_WORD_DECO_MATH3 |
++ 4 << LDST_OFFSET_SHIFT);
++
++ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_CALM | JUMP_COND_NCP |
++ JUMP_COND_NOP | JUMP_COND_NIP |
++ JUMP_COND_NIFP);
++ set_jump_tgt_here(desc, wait_load_cmd);
++
++ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT |
++ ctx1_iv_off << LDST_OFFSET_SHIFT);
++ }
++
++ /*
++ * MAGIC with NFIFO
++ * Read associated data from the input and send them to class1 and
++ * class2 alignment blocks. From class1 send data to output fifo and
++ * then write it to memory since we don't need to encrypt AD.
++ */
++ nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 |
++ NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND;
++ append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB |
++ LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3);
++
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++ append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO |
++ FIFOLD_CLASS_CLASS1 | LDST_VLF);
++ append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK |
++ MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3);
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
++
++ /* IPsec - copy IV at the output */
++ if (is_ipsec)
++ append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA |
++ 0x2 << 25);
++
++ wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
++ JUMP_COND_NOP | JUMP_TEST_ALL);
++ set_jump_tgt_here(desc, wait_cmd);
++
++ if (encap) {
++ /* Read and write cryptlen bytes */
++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0,
++ CAAM_CMD_SZ);
++ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
++
++ /* Write ICV */
++ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++ } else {
++ /* Read and write cryptlen bytes */
++ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0,
++ CAAM_CMD_SZ);
++ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
++
++ /* Load ICV for verification */
++ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
++ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
++ }
++
++ print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
++ 1);
++}
++EXPORT_SYMBOL(cnstr_shdsc_chachapoly);
++
+ /*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+@@ -1053,7 +1776,8 @@ static inline void ablkcipher_append_src
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
++ * - OP_ALG_ALGSEL_CHACHA20
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+@@ -1075,7 +1799,7 @@ void cnstr_shdsc_ablkcipher_encap(u32 *
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
@@ -2924,7 +3592,17 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
-@@ -1140,7 +1731,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
+@@ -1118,7 +1842,8 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_enc
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
++ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
++ * - OP_ALG_ALGSEL_CHACHA20
+ * @ivsize: initialization vector size
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+@@ -1140,7 +1865,7 @@ void cnstr_shdsc_ablkcipher_decap(u32 *
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
@@ -2933,7 +3611,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
LDST_CLASS_IND_CCB |
-@@ -1209,7 +1800,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
+@@ -1209,7 +1934,7 @@ void cnstr_shdsc_ablkcipher_givencap(u32
/* Load Nonce into CONTEXT1 reg */
if (is_rfc3686) {
@@ -2975,7 +3653,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
-@@ -43,46 +52,62 @@
+@@ -43,46 +52,67 @@
15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
@@ -3046,6 +3724,11 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
- unsigned int icvsize);
+ unsigned int ivsize, unsigned int icvsize,
+ const bool is_qi);
++
++void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
++ struct alginfo *adata, unsigned int ivsize,
++ unsigned int icvsize, const bool encap,
++ const bool is_qi);
void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, const bool is_rfc3686,
@@ -3131,7 +3814,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
+ keys.enckeylen);
-+ dma_sync_single_for_device(jrdev, ctx->key_dma,
++ dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
@@ -3156,12 +3839,17 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
-@@ -258,6 +284,468 @@ badkey:
+@@ -258,55 +284,139 @@ badkey:
return -EINVAL;
}
+-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+- const u8 *key, unsigned int keylen)
+static int tls_set_sh_desc(struct crypto_aead *tls)
-+{
+ {
+- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+- const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ unsigned int ivsize = crypto_aead_ivsize(tls);
+ unsigned int blocksize = crypto_aead_blocksize(tls);
@@ -3231,26 +3919,45 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
-+ struct device *jrdev = ctx->jrdev;
+ struct device *jrdev = ctx->jrdev;
+- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+- u32 ctx1_iv_off = 0;
+- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+- OP_ALG_AAI_CTR_MOD128);
+- const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+ struct crypto_authenc_keys keys;
-+ int ret = 0;
-+
+ int ret = 0;
+
+- memcpy(ctx->key, key, keylen);
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
-+#ifdef DEBUG
+ #ifdef DEBUG
+ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+
-+ /*
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+ #endif
+- /*
+- * AES-CTR needs to load IV in CONTEXT1 reg
+- * at an offset of 128bits (16bytes)
+- * CONTEXT1[255:128] = IV
+- */
+- if (ctr_mode)
+- ctx1_iv_off = 16;
+
+ /*
+- * RFC3686 specific:
+- * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+- * | *key = {KEY, NONCE}
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
-+ */
+ */
+- if (is_rfc3686) {
+- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+- keylen -= CTR_RFC3686_NONCE_SIZE;
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
@@ -3266,14 +3973,25 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
-+ }
-+
+ }
+
+- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+- ctx->cdata.keylen = keylen;
+- ctx->cdata.key_virt = ctx->key;
+- ctx->cdata.key_inline = true;
+ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
+ if (ret)
+ goto badkey;
-+
+
+- /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
+- cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+- is_rfc3686, ctx1_iv_off);
+- cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+- is_rfc3686, ctx1_iv_off);
+- cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
+- ivsize, is_rfc3686, ctx1_iv_off);
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
@@ -3293,39 +4011,44 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ ret = tls_set_sh_desc(tls);
+ if (ret)
+ goto badkey;
-+
-+ /* Now update the driver contexts with the new shared descriptor */
-+ if (ctx->drv_ctx[ENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+ ctx->sh_desc_enc);
-+ if (ret) {
-+ dev_err(jrdev, "driver enc context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[DECRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
-+ ctx->sh_desc_dec);
-+ if (ret) {
-+ dev_err(jrdev, "driver dec context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ return ret;
-+badkey:
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+@@ -327,42 +437,84 @@ static int ablkcipher_setkey(struct cryp
+ }
+ }
+
+- if (ctx->drv_ctx[GIVENCRYPT]) {
+- ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
+- ctx->sh_desc_givenc);
+- if (ret) {
+- dev_err(jrdev, "driver givenc context update failed\n");
+- goto badkey;
+- }
+- }
+-
+ return ret;
+ badkey:
+- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
-+}
-+
+ return -EINVAL;
+ }
+
+-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+- const u8 *key, unsigned int keylen)
+static int gcm_set_sh_desc(struct crypto_aead *aead)
-+{
+ {
+- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+- struct device *jrdev = ctx->jrdev;
+- int ret = 0;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
-+
+
+- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+- dev_err(jrdev, "key size mismatch\n");
+- goto badkey;
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
@@ -3354,8 +4077,8 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
-+ }
-+
+ }
+
+ cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
@@ -3384,62 +4107,129 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
-+ memcpy(ctx->key, key, keylen);
+ memcpy(ctx->key, key, keylen);
+- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
-+ ctx->cdata.keylen = keylen;
-+
+ ctx->cdata.keylen = keylen;
+- ctx->cdata.key_virt = ctx->key;
+- ctx->cdata.key_inline = true;
+
+- /* xts ablkcipher encrypt, decrypt shared descriptors */
+- cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
+- cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
+ ret = gcm_set_sh_desc(aead);
+ if (ret)
+ return ret;
-+
-+ /* Now update the driver contexts with the new shared descriptor */
-+ if (ctx->drv_ctx[ENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+ ctx->sh_desc_enc);
-+ if (ret) {
-+ dev_err(jrdev, "driver enc context update failed\n");
+
+ /* Now update the driver contexts with the new shared descriptor */
+ if (ctx->drv_ctx[ENCRYPT]) {
+@@ -370,7 +522,7 @@ static int xts_ablkcipher_setkey(struct
+ ctx->sh_desc_enc);
+ if (ret) {
+ dev_err(jrdev, "driver enc context update failed\n");
+- goto badkey;
+ return ret;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[DECRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
-+ ctx->sh_desc_dec);
-+ if (ret) {
-+ dev_err(jrdev, "driver dec context update failed\n");
+ }
+ }
+
+@@ -379,151 +531,829 @@ static int xts_ablkcipher_setkey(struct
+ ctx->sh_desc_dec);
+ if (ret) {
+ dev_err(jrdev, "driver dec context update failed\n");
+- goto badkey;
+ return ret;
-+ }
-+ }
-+
+ }
+ }
+
+- return ret;
+-badkey:
+- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+- return -EINVAL;
+ return 0;
-+}
-+
+ }
+
+-/*
+- * aead_edesc - s/w-extended aead descriptor
+- * @src_nents: number of segments in input scatterlist
+- * @dst_nents: number of segments in output scatterlist
+- * @iv_dma: dma address of iv for checking continuity and link table
+- * @qm_sg_bytes: length of dma mapped h/w link table
+- * @qm_sg_dma: bus physical mapped address of h/w link table
+- * @assoclen: associated data length, in CAAM endianness
+- * @assoclen_dma: bus physical mapped address of req->assoclen
+- * @drv_req: driver-specific request structure
+- * @sgt: the h/w link table, followed by IV
+- */
+-struct aead_edesc {
+- int src_nents;
+- int dst_nents;
+- dma_addr_t iv_dma;
+- int qm_sg_bytes;
+- dma_addr_t qm_sg_dma;
+- unsigned int assoclen;
+- dma_addr_t assoclen_dma;
+- struct caam_drv_req drv_req;
+- struct qm_sg_entry sgt[0];
+-};
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
-+
+
+-/*
+- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+- * @src_nents: number of segments in input scatterlist
+- * @dst_nents: number of segments in output scatterlist
+- * @iv_dma: dma address of iv for checking continuity and link table
+- * @qm_sg_bytes: length of dma mapped h/w link table
+- * @qm_sg_dma: bus physical mapped address of h/w link table
+- * @drv_req: driver-specific request structure
+- * @sgt: the h/w link table, followed by IV
+- */
+-struct ablkcipher_edesc {
+- int src_nents;
+- int dst_nents;
+- dma_addr_t iv_dma;
+- int qm_sg_bytes;
+- dma_addr_t qm_sg_dma;
+- struct caam_drv_req drv_req;
+- struct qm_sg_entry sgt[0];
+-};
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
-+
-+ /*
+
+-static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
+- enum optype type)
+-{
+ /*
+- * This function is called on the fast path with values of 'type'
+- * known at compile time. Invalid arguments are not expected and
+- * thus no checks are made.
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
-+ */
+ */
+- struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
+- u32 *desc;
+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
-+
+
+- if (unlikely(!drv_ctx)) {
+- spin_lock(&ctx->lock);
+ cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ ctx->authsize, true);
-+
+
+- /* Read again to check if some other core init drv_ctx */
+- drv_ctx = ctx->drv_ctx[type];
+- if (!drv_ctx) {
+- int cpu;
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
@@ -3450,7 +4240,13 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
-+
+
+- if (type == ENCRYPT)
+- desc = ctx->sh_desc_enc;
+- else if (type == DECRYPT)
+- desc = ctx->sh_desc_dec;
+- else /* (type == GIVENCRYPT) */
+- desc = ctx->sh_desc_givenc;
+ cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
@@ -3622,13 +4418,162 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ return 0;
+}
+
- static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
- {
-@@ -414,6 +902,29 @@ struct aead_edesc {
- };
-
- /*
++static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
++ const char *alg_name = crypto_tfm_alg_name(tfm);
++ struct device *jrdev = ctx->jrdev;
++ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ u32 ctx1_iv_off = 0;
++ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
++ OP_ALG_AAI_CTR_MOD128);
++ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
++ int ret = 0;
++
++ memcpy(ctx->key, key, keylen);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++ /*
++ * AES-CTR needs to load IV in CONTEXT1 reg
++ * at an offset of 128bits (16bytes)
++ * CONTEXT1[255:128] = IV
++ */
++ if (ctr_mode)
++ ctx1_iv_off = 16;
++
++ /*
++ * RFC3686 specific:
++ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
++ * | *key = {KEY, NONCE}
++ */
++ if (is_rfc3686) {
++ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
++ keylen -= CTR_RFC3686_NONCE_SIZE;
++ }
++
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ ctx->cdata.keylen = keylen;
++ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_inline = true;
++
++ /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
++ cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
++ is_rfc3686, ctx1_iv_off);
++ cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
++ is_rfc3686, ctx1_iv_off);
++ cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
++ ivsize, is_rfc3686, ctx1_iv_off);
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[GIVENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
++ ctx->sh_desc_givenc);
++ if (ret) {
++ dev_err(jrdev, "driver givenc context update failed\n");
++ goto badkey;
++ }
++ }
++
++ return ret;
++badkey:
++ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++}
++
++static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
++ struct device *jrdev = ctx->jrdev;
++ int ret = 0;
++
++ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
++ dev_err(jrdev, "key size mismatch\n");
++ goto badkey;
++ }
++
++ memcpy(ctx->key, key, keylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ ctx->cdata.keylen = keylen;
++ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_inline = true;
++
++ /* xts ablkcipher encrypt, decrypt shared descriptors */
++ cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
++ cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ goto badkey;
++ }
++ }
++
++ return ret;
++badkey:
++ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++}
++
++/*
++ * aead_edesc - s/w-extended aead descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @assoclen: associated data length, in CAAM endianness
++ * @assoclen_dma: bus physical mapped address of req->assoclen
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table, followed by IV
++ */
++struct aead_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++ unsigned int assoclen;
++ dma_addr_t assoclen_dma;
++ struct caam_drv_req drv_req;
++ struct qm_sg_entry sgt[0];
++};
++
++/*
+ * tls_edesc - s/w-extended tls descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
@@ -3652,13 +4597,100 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+};
+
+/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
- * @src_nents: number of segments in input scatterlist
- * @dst_nents: number of segments in output scatterlist
-@@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de
- dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
- }
-
++ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
++ * @src_nents: number of segments in input scatterlist
++ * @dst_nents: number of segments in output scatterlist
++ * @iv_dma: dma address of iv for checking continuity and link table
++ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @qm_sg_dma: bus physical mapped address of h/w link table
++ * @drv_req: driver-specific request structure
++ * @sgt: the h/w link table, followed by IV
++ */
++struct ablkcipher_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++ struct caam_drv_req drv_req;
++ struct qm_sg_entry sgt[0];
++};
++
++static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
++ enum optype type)
++{
++ /*
++ * This function is called on the fast path with values of 'type'
++ * known at compile time. Invalid arguments are not expected and
++ * thus no checks are made.
++ */
++ struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
++ u32 *desc;
++
++ if (unlikely(!drv_ctx)) {
++ spin_lock(&ctx->lock);
++
++ /* Read again to check if some other core init drv_ctx */
++ drv_ctx = ctx->drv_ctx[type];
++ if (!drv_ctx) {
++ int cpu;
++
++ if (type == ENCRYPT)
++ desc = ctx->sh_desc_enc;
++ else if (type == DECRYPT)
++ desc = ctx->sh_desc_dec;
++ else /* (type == GIVENCRYPT) */
++ desc = ctx->sh_desc_givenc;
++
++ cpu = smp_processor_id();
++ drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
++ if (likely(!IS_ERR_OR_NULL(drv_ctx)))
++ drv_ctx->op_type = type;
++
++ ctx->drv_ctx[type] = drv_ctx;
++ }
++
++ spin_unlock(&ctx->lock);
++ }
++
++ return drv_ctx;
++}
++
++static void caam_unmap(struct device *dev, struct scatterlist *src,
++ struct scatterlist *dst, int src_nents,
++ int dst_nents, dma_addr_t iv_dma, int ivsize,
++ enum optype op_type, dma_addr_t qm_sg_dma,
++ int qm_sg_bytes)
++{
++ if (dst != src) {
++ if (src_nents)
++ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
++ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
++ } else {
++ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
++ }
++
++ if (iv_dma)
++ dma_unmap_single(dev, iv_dma, ivsize,
++ op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
++ DMA_TO_DEVICE);
++ if (qm_sg_bytes)
++ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
++}
++
++static void aead_unmap(struct device *dev,
++ struct aead_edesc *edesc,
++ struct aead_request *req)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ int ivsize = crypto_aead_ivsize(aead);
++
++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
++ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
++ edesc->qm_sg_dma, edesc->qm_sg_bytes);
++ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++}
++
+static void tls_unmap(struct device *dev,
+ struct tls_edesc *edesc,
+ struct aead_request *req)
@@ -3672,53 +4704,22 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ edesc->qm_sg_bytes);
+}
+
- static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
-@@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re
- qidev = caam_ctx->qidev;
-
- if (unlikely(status)) {
-+ u32 ssrc = status & JRSTA_SSRC_MASK;
-+ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
-+
- caam_jr_strstatus(qidev, status);
-- ecode = -EIO;
-+ /*
-+ * verify hw auth check passed else return -EBADMSG
-+ */
-+ if (ssrc == JRSTA_SSRC_CCB_ERROR &&
-+ err_id == JRSTA_CCBERR_ERRID_ICVCHK)
-+ ecode = -EBADMSG;
-+ else
-+ ecode = -EIO;
- }
-
- edesc = container_of(drv_req, typeof(*edesc), drv_req);
-@@ -785,6 +1319,260 @@ static int aead_decrypt(struct aead_requ
- return aead_crypt(req, false);
- }
-
-+static int ipsec_gcm_encrypt(struct aead_request *req)
++static void ablkcipher_unmap(struct device *dev,
++ struct ablkcipher_edesc *edesc,
++ struct ablkcipher_request *req)
+{
-+ if (req->assoclen < 8)
-+ return -EINVAL;
-+
-+ return aead_crypt(req, true);
-+}
-+
-+static int ipsec_gcm_decrypt(struct aead_request *req)
-+{
-+ if (req->assoclen < 8)
-+ return -EINVAL;
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
-+ return aead_crypt(req, false);
++ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
++ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
++ edesc->qm_sg_dma, edesc->qm_sg_bytes);
+}
+
-+static void tls_done(struct caam_drv_req *drv_req, u32 status)
++static void aead_done(struct caam_drv_req *drv_req, u32 status)
+{
+ struct device *qidev;
-+ struct tls_edesc *edesc;
++ struct aead_edesc *edesc;
+ struct aead_request *aead_req = drv_req->app_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
@@ -3727,56 +4728,56 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ qidev = caam_ctx->qidev;
+
+ if (unlikely(status)) {
++ u32 ssrc = status & JRSTA_SSRC_MASK;
++ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
++
+ caam_jr_strstatus(qidev, status);
-+ ecode = -EIO;
++ /*
++ * verify hw auth check passed else return -EBADMSG
++ */
++ if (ssrc == JRSTA_SSRC_CCB_ERROR &&
++ err_id == JRSTA_CCBERR_ERRID_ICVCHK)
++ ecode = -EBADMSG;
++ else
++ ecode = -EIO;
+ }
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
-+ tls_unmap(qidev, edesc, aead_req);
++ aead_unmap(qidev, edesc, aead_req);
+
+ aead_request_complete(aead_req, ecode);
+ qi_cache_free(edesc);
+}
+
+/*
-+ * allocate and map the tls extended descriptor
++ * allocate and map the aead extended descriptor
+ */
-+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
++static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
++ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ unsigned int blocksize = crypto_aead_blocksize(aead);
-+ unsigned int padsize, authsize;
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *qidev = ctx->qidev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
++ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+ struct tls_edesc *edesc;
++ struct aead_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
-+ u8 *iv;
-+ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
++ unsigned int authsize = ctx->authsize;
++ int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ struct caam_drv_ctx *drv_ctx;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
-+ struct scatterlist *dst;
-+
-+ if (encrypt) {
-+ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
-+ blocksize);
-+ authsize = ctx->authsize + padsize;
-+ } else {
-+ authsize = ctx->authsize;
-+ }
+
+ drv_ctx = get_drv_ctx(ctx, op_type);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
-+ return (struct tls_edesc *)drv_ctx;
++ return (struct aead_edesc *)drv_ctx;
+
-+ /* allocate space for base edesc, link tables and IV */
++ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(qidev, "could not allocate extended descriptor\n");
@@ -3786,7 +4787,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ if (likely(req->src == req->dst)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
-+ (encrypt ? authsize : 0));
++ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
@@ -3802,7 +4803,6 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
-+ dst = req->dst;
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
@@ -3813,13 +4813,14 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ return ERR_PTR(src_nents);
+ }
+
-+ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
-+ dst_nents = sg_nents_for_len(dst, req->cryptlen +
-+ (encrypt ? authsize : 0));
++ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
++ req->cryptlen +
++ (encrypt ? authsize :
++ (-authsize)));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-+ req->cryptlen +
-+ (encrypt ? authsize : 0));
++ req->assoclen + req->cryptlen +
++ (encrypt ? authsize : (-authsize)));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
@@ -3836,7 +4837,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ mapped_src_nents = 0;
+ }
+
-+ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
++ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
@@ -3846,51 +4847,95 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ }
+ }
+
++ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
++ ivsize = crypto_aead_ivsize(aead);
++
+ /*
-+ * Create S/G table: IV, src, dst.
++ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+ * Input is not contiguous.
++ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
++ * the end of the table by allocating more S/G entries. Logic:
++ * if (src != dst && output S/G)
++ * pad output S/G, if needed
++ * else if (src == dst && S/G)
++ * overlapping S/Gs; pad one of them
++ * else if (input S/G) ...
++ * pad input S/G, if needed
+ */
-+ qm_sg_ents = 1 + mapped_src_nents +
-+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
++ qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
++ if (mapped_dst_nents > 1)
++ qm_sg_ents += ALIGN(mapped_dst_nents, 4);
++ else if ((req->src == req->dst) && (mapped_src_nents > 1))
++ qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
++ 1 + !!ivsize + ALIGN(mapped_src_nents, 4));
++ else
++ qm_sg_ents = ALIGN(qm_sg_ents, 4);
++
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
-+
-+ ivsize = crypto_aead_ivsize(aead);
-+ iv = (u8 *)(sg_table + qm_sg_ents);
-+ /* Make sure IV is located in a DMAable area */
-+ memcpy(iv, req->iv, ivsize);
-+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, iv_dma)) {
-+ dev_err(qidev, "unable to map IV\n");
-+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
-+ 0, 0);
++ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
++ CAAM_QI_MEMCACHE_SIZE)) {
++ dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
++ qm_sg_ents, ivsize);
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
++ 0, 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
++ if (ivsize) {
++ u8 *iv = (u8 *)(sg_table + qm_sg_ents);
++
++ /* Make sure IV is located in a DMAable area */
++ memcpy(iv, req->iv, ivsize);
++
++ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, iv_dma)) {
++ dev_err(qidev, "unable to map IV\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents,
++ dst_nents, 0, 0, 0, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
++ }
++
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
-+ edesc->dst = dst;
+ edesc->iv_dma = iv_dma;
+ edesc->drv_req.app_ctx = req;
-+ edesc->drv_req.cbk = tls_done;
++ edesc->drv_req.cbk = aead_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
-+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-+ qm_sg_index = 1;
++ edesc->assoclen = cpu_to_caam32(req->assoclen);
++ edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
++ dev_err(qidev, "unable to map assoclen\n");
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
++ qi_cache_free(edesc);
++ return ERR_PTR(-ENOMEM);
++ }
+
++ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
++ qm_sg_index++;
++ if (ivsize) {
++ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
++ qm_sg_index++;
++ }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
-+ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
++ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
-+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
-+ ivsize, op_type, 0, 0);
++ dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
++ iv_dma, ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
@@ -3898,65 +4943,458 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
-+ out_len = req->cryptlen + (encrypt ? authsize : 0);
-+ in_len = ivsize + req->assoclen + req->cryptlen;
++ out_len = req->assoclen + req->cryptlen +
++ (encrypt ? ctx->authsize : (-ctx->authsize));
++ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
-+
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
+
-+ if (req->dst == req->src)
-+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
-+ (sg_nents_for_len(req->src, req->assoclen) +
-+ 1) * sizeof(*sg_table), out_len, 0);
-+ else if (mapped_dst_nents == 1)
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
-+ else
++ if (req->dst == req->src) {
++ if (mapped_src_nents == 1)
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
++ out_len, 0);
++ else
++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
++ (1 + !!ivsize) * sizeof(*sg_table),
++ out_len, 0);
++ } else if (mapped_dst_nents == 1) {
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
++ 0);
++ } else {
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
+ qm_sg_index, out_len, 0);
++ }
+
+ return edesc;
+}
-+
-+static int tls_crypt(struct aead_request *req, bool encrypt)
+
+- cpu = smp_processor_id();
+- drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
+- if (likely(!IS_ERR_OR_NULL(drv_ctx)))
+- drv_ctx->op_type = type;
++static inline int aead_crypt(struct aead_request *req, bool encrypt)
+{
-+ struct tls_edesc *edesc;
++ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
-+
+
+- ctx->drv_ctx[type] = drv_ctx;
+- }
+ if (unlikely(caam_congested))
+ return -EAGAIN;
-+
-+ edesc = tls_edesc_alloc(req, encrypt);
+
+- spin_unlock(&ctx->lock);
++ /* allocate extended descriptor */
++ edesc = aead_edesc_alloc(req, encrypt);
+ if (IS_ERR_OR_NULL(edesc))
+ return PTR_ERR(edesc);
+
++ /* Create and submit job descriptor */
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
-+ tls_unmap(ctx->qidev, edesc, req);
++ aead_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
-+ }
-+
+ }
+
+- return drv_ctx;
+ return ret;
+ }
+
+-static void caam_unmap(struct device *dev, struct scatterlist *src,
+- struct scatterlist *dst, int src_nents,
+- int dst_nents, dma_addr_t iv_dma, int ivsize,
+- enum optype op_type, dma_addr_t qm_sg_dma,
+- int qm_sg_bytes)
++static int aead_encrypt(struct aead_request *req)
+ {
+- if (dst != src) {
+- if (src_nents)
+- dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+- } else {
+- dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+- }
++ return aead_crypt(req, true);
+}
+
+- if (iv_dma)
+- dma_unmap_single(dev, iv_dma, ivsize,
+- op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
+- DMA_TO_DEVICE);
+- if (qm_sg_bytes)
+- dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
++static int aead_decrypt(struct aead_request *req)
++{
++ return aead_crypt(req, false);
+ }
+
+-static void aead_unmap(struct device *dev,
+- struct aead_edesc *edesc,
+- struct aead_request *req)
++static int ipsec_gcm_encrypt(struct aead_request *req)
+ {
+- struct crypto_aead *aead = crypto_aead_reqtfm(req);
+- int ivsize = crypto_aead_ivsize(aead);
++ if (req->assoclen < 8)
++ return -EINVAL;
+
+- caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
+- edesc->qm_sg_dma, edesc->qm_sg_bytes);
+- dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
++ return aead_crypt(req, true);
+ }
+
+-static void ablkcipher_unmap(struct device *dev,
+- struct ablkcipher_edesc *edesc,
+- struct ablkcipher_request *req)
++static int ipsec_gcm_decrypt(struct aead_request *req)
+ {
+- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ if (req->assoclen < 8)
++ return -EINVAL;
+
+- caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
+- edesc->qm_sg_dma, edesc->qm_sg_bytes);
++ return aead_crypt(req, false);
+ }
+
+-static void aead_done(struct caam_drv_req *drv_req, u32 status)
++static void tls_done(struct caam_drv_req *drv_req, u32 status)
+ {
+ struct device *qidev;
+- struct aead_edesc *edesc;
++ struct tls_edesc *edesc;
+ struct aead_request *aead_req = drv_req->app_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+@@ -537,41 +1367,51 @@ static void aead_done(struct caam_drv_re
+ }
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
+- aead_unmap(qidev, edesc, aead_req);
++ tls_unmap(qidev, edesc, aead_req);
+
+ aead_request_complete(aead_req, ecode);
+ qi_cache_free(edesc);
+ }
+
+ /*
+- * allocate and map the aead extended descriptor
++ * allocate and map the tls extended descriptor
+ */
+-static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+- bool encrypt)
++static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
+ {
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int blocksize = crypto_aead_blocksize(aead);
++ unsigned int padsize, authsize;
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *qidev = ctx->qidev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+- GFP_KERNEL : GFP_ATOMIC;
++ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+- struct aead_edesc *edesc;
++ struct tls_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
+- unsigned int authsize = ctx->authsize;
+- int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
++ u8 *iv;
++ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ struct caam_drv_ctx *drv_ctx;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++ struct scatterlist *dst;
++
++ if (encrypt) {
++ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
++ blocksize);
++ authsize = ctx->authsize + padsize;
++ } else {
++ authsize = ctx->authsize;
++ }
+
+ drv_ctx = get_drv_ctx(ctx, op_type);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
+- return (struct aead_edesc *)drv_ctx;
++ return (struct tls_edesc *)drv_ctx;
+
+- /* allocate space for base edesc and hw desc commands, link tables */
++ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(qidev, "could not allocate extended descriptor\n");
+@@ -581,7 +1421,7 @@ static struct aead_edesc *aead_edesc_all
+ if (likely(req->src == req->dst)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
+- (encrypt ? authsize : 0));
++ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+@@ -597,6 +1437,7 @@ static struct aead_edesc *aead_edesc_all
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
++ dst = req->dst;
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+@@ -607,14 +1448,13 @@ static struct aead_edesc *aead_edesc_all
+ return ERR_PTR(src_nents);
+ }
+
+- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+- req->cryptlen +
+- (encrypt ? authsize :
+- (-authsize)));
++ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
++ dst_nents = sg_nents_for_len(dst, req->cryptlen +
++ (encrypt ? authsize : 0));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
+- req->assoclen + req->cryptlen +
+- (encrypt ? authsize : (-authsize)));
++ req->cryptlen +
++ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+@@ -631,7 +1471,7 @@ static struct aead_edesc *aead_edesc_all
+ mapped_src_nents = 0;
+ }
+
+- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
++ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+@@ -641,80 +1481,51 @@ static struct aead_edesc *aead_edesc_all
+ }
+ }
+
+- if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
+- ivsize = crypto_aead_ivsize(aead);
+-
+ /*
+- * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
++ * Create S/G table: IV, src, dst.
+ * Input is not contiguous.
+ */
+- qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
++ qm_sg_ents = 1 + mapped_src_nents +
+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+- if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+- CAAM_QI_MEMCACHE_SIZE)) {
+- dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+- qm_sg_ents, ivsize);
+- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+- 0, 0, 0, 0);
+
++ ivsize = crypto_aead_ivsize(aead);
++ iv = (u8 *)(sg_table + qm_sg_ents);
++ /* Make sure IV is located in a DMAable area */
++ memcpy(iv, req->iv, ivsize);
++ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, iv_dma)) {
++ dev_err(qidev, "unable to map IV\n");
++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
++ 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+- if (ivsize) {
+- u8 *iv = (u8 *)(sg_table + qm_sg_ents);
+-
+- /* Make sure IV is located in a DMAable area */
+- memcpy(iv, req->iv, ivsize);
+-
+- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+- if (dma_mapping_error(qidev, iv_dma)) {
+- dev_err(qidev, "unable to map IV\n");
+- caam_unmap(qidev, req->src, req->dst, src_nents,
+- dst_nents, 0, 0, 0, 0, 0);
+- qi_cache_free(edesc);
+- return ERR_PTR(-ENOMEM);
+- }
+- }
+-
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
++ edesc->dst = dst;
+ edesc->iv_dma = iv_dma;
+ edesc->drv_req.app_ctx = req;
+- edesc->drv_req.cbk = aead_done;
++ edesc->drv_req.cbk = tls_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
+- edesc->assoclen = cpu_to_caam32(req->assoclen);
+- edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
+- dev_err(qidev, "unable to map assoclen\n");
+- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+- iv_dma, ivsize, op_type, 0, 0);
+- qi_cache_free(edesc);
+- return ERR_PTR(-ENOMEM);
+- }
++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++ qm_sg_index = 1;
+
+- dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
+- qm_sg_index++;
+- if (ivsize) {
+- dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
+- qm_sg_index++;
+- }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
+- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
++ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
+- dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
+- iv_dma, ivsize, op_type, 0, 0);
++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
++ ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+@@ -722,35 +1533,29 @@ static struct aead_edesc *aead_edesc_all
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+- out_len = req->assoclen + req->cryptlen +
+- (encrypt ? ctx->authsize : (-ctx->authsize));
+- in_len = 4 + ivsize + req->assoclen + req->cryptlen;
++ out_len = req->cryptlen + (encrypt ? authsize : 0);
++ in_len = ivsize + req->assoclen + req->cryptlen;
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
++
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
+
+- if (req->dst == req->src) {
+- if (mapped_src_nents == 1)
+- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
+- out_len, 0);
+- else
+- dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
+- (1 + !!ivsize) * sizeof(*sg_table),
+- out_len, 0);
+- } else if (mapped_dst_nents == 1) {
+- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
+- 0);
+- } else {
++ if (req->dst == req->src)
++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
++ (sg_nents_for_len(req->src, req->assoclen) +
++ 1) * sizeof(*sg_table), out_len, 0);
++ else if (mapped_dst_nents == 1)
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
++ else
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
+ qm_sg_index, out_len, 0);
+- }
+
+ return edesc;
+ }
+
+-static inline int aead_crypt(struct aead_request *req, bool encrypt)
++static int tls_crypt(struct aead_request *req, bool encrypt)
+ {
+- struct aead_edesc *edesc;
++ struct tls_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
+@@ -758,31 +1563,29 @@ static inline int aead_crypt(struct aead
+ if (unlikely(caam_congested))
+ return -EAGAIN;
+
+- /* allocate extended descriptor */
+- edesc = aead_edesc_alloc(req, encrypt);
++ edesc = tls_edesc_alloc(req, encrypt);
+ if (IS_ERR_OR_NULL(edesc))
+ return PTR_ERR(edesc);
+
+- /* Create and submit job descriptor */
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+- aead_unmap(ctx->qidev, edesc, req);
++ tls_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+ }
+
+-static int aead_encrypt(struct aead_request *req)
+static int tls_encrypt(struct aead_request *req)
-+{
+ {
+- return aead_crypt(req, true);
+ return tls_crypt(req, true);
-+}
-+
+ }
+
+-static int aead_decrypt(struct aead_request *req)
+static int tls_decrypt(struct aead_request *req)
-+{
+ {
+- return aead_crypt(req, false);
+ return tls_crypt(req, false);
-+}
-+
+ }
+
static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
- {
- struct ablkcipher_edesc *edesc;
-@@ -1308,6 +2096,61 @@ static struct caam_alg_template driver_a
+@@ -900,7 +1703,24 @@ static struct ablkcipher_edesc *ablkciph
+ qm_sg_ents = 1 + mapped_src_nents;
+ dst_sg_idx = qm_sg_ents;
+
+- qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
++ /*
++ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
++ * the end of the table by allocating more S/G entries. Logic:
++ * if (src != dst && output S/G)
++ * pad output S/G, if needed
++ * else if (src == dst && S/G)
++ * overlapping S/Gs; pad one of them
++ * else if (input S/G) ...
++ * pad input S/G, if needed
++ */
++ if (mapped_dst_nents > 1)
++ qm_sg_ents += ALIGN(mapped_dst_nents, 4);
++ else if ((req->src == req->dst) && (mapped_src_nents > 1))
++ qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
++ 1 + ALIGN(mapped_src_nents, 4));
++ else
++ qm_sg_ents = ALIGN(qm_sg_ents, 4);
++
+ qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+ if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+ ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+@@ -1308,6 +2128,61 @@ static struct caam_alg_template driver_a
};
static struct caam_aead_alg driver_aeads[] = {
@@ -4018,7 +5456,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/* single-pass ipsec_esp descriptor */
{
.aead = {
-@@ -2118,6 +2961,26 @@ static struct caam_aead_alg driver_aeads
+@@ -2118,6 +2993,26 @@ static struct caam_aead_alg driver_aeads
.geniv = true,
}
},
@@ -4045,7 +5483,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
};
struct caam_crypto_alg {
-@@ -2126,9 +2989,20 @@ struct caam_crypto_alg {
+@@ -2126,9 +3021,21 @@ struct caam_crypto_alg {
struct caam_alg_entry caam;
};
@@ -4054,6 +5492,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ bool uses_dkp)
{
struct caam_drv_private *priv;
++ struct device *dev;
+ /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
+ static const u8 digest_size[] = {
+ MD5_DIGEST_SIZE,
@@ -4067,27 +5506,36 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/*
* distribute tfms across job rings to ensure in-order
-@@ -2140,8 +3014,14 @@ static int caam_init_common(struct caam_
+@@ -2140,10 +3047,19 @@ static int caam_init_common(struct caam_
return PTR_ERR(ctx->jrdev);
}
+- ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
+- dev_err(ctx->jrdev, "unable to map key\n");
+ priv = dev_get_drvdata(ctx->jrdev->parent);
-+ if (priv->era >= 6 && uses_dkp)
++ if (priv->era >= 6 && uses_dkp) {
+ ctx->dir = DMA_BIDIRECTIONAL;
-+ else
++ dev = ctx->jrdev->parent;
++ } else {
+ ctx->dir = DMA_TO_DEVICE;
++ dev = ctx->jrdev;
++ }
+
- ctx->key_dma = dma_map_single(ctx->jrdev, ctx->key, sizeof(ctx->key),
-- DMA_TO_DEVICE);
++ ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
+ ctx->dir);
- if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
- dev_err(ctx->jrdev, "unable to map key\n");
++ if (dma_mapping_error(dev, ctx->key_dma)) {
++ dev_err(dev, "unable to map key\n");
caam_jr_free(ctx->jrdev);
-@@ -2152,7 +3032,22 @@ static int caam_init_common(struct caam_
+ return -ENOMEM;
+ }
+@@ -2152,8 +3068,23 @@ static int caam_init_common(struct caam_
ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
- priv = dev_get_drvdata(ctx->jrdev->parent);
+- ctx->qidev = priv->qidev;
+ if (ctx->adata.algtype) {
+ op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
+ >> OP_ALG_ALGSEL_SHIFT;
@@ -4104,10 +5552,11 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ ctx->authsize = 0;
+ }
+
- ctx->qidev = priv->qidev;
++ ctx->qidev = ctx->jrdev->parent;
spin_lock_init(&ctx->lock);
-@@ -2170,7 +3065,7 @@ static int caam_cra_init(struct crypto_t
+ ctx->drv_ctx[ENCRYPT] = NULL;
+@@ -2170,7 +3101,7 @@ static int caam_cra_init(struct crypto_t
crypto_alg);
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -4116,7 +5565,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
}
static int caam_aead_init(struct crypto_aead *tfm)
-@@ -2180,7 +3075,9 @@ static int caam_aead_init(struct crypto_
+@@ -2180,17 +3111,25 @@ static int caam_aead_init(struct crypto_
aead);
struct caam_ctx *ctx = crypto_aead_ctx(tfm);
@@ -4127,31 +5576,162 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
}
static void caam_exit_common(struct caam_ctx *ctx)
-@@ -2189,8 +3086,7 @@ static void caam_exit_common(struct caam
+ {
++ struct device *dev;
++
+ caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
- dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key),
- DMA_TO_DEVICE);
-+ dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
++ if (ctx->dir == DMA_BIDIRECTIONAL)
++ dev = ctx->jrdev->parent;
++ else
++ dev = ctx->jrdev;
++
++ dma_unmap_single(dev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
caam_jr_free(ctx->jrdev);
}
-@@ -2315,6 +3211,11 @@ static int __init caam_qi_algapi_init(vo
- if (!priv || !priv->qi_present)
- return -ENODEV;
+@@ -2206,7 +3145,7 @@ static void caam_aead_exit(struct crypto
+ }
-+ if (caam_dpaa2) {
-+ dev_info(ctrldev, "caam/qi frontend driver not suitable for DPAA 2.x, aborting...\n");
-+ return -ENODEV;
-+ }
-+
+ static struct list_head alg_list;
+-static void __exit caam_qi_algapi_exit(void)
++void caam_qi_algapi_exit(void)
+ {
+ struct caam_crypto_alg *t_alg, *n;
+ int i;
+@@ -2282,53 +3221,48 @@ static void caam_aead_alg_init(struct ca
+ alg->exit = caam_aead_exit;
+ }
+
+-static int __init caam_qi_algapi_init(void)
++int caam_qi_algapi_init(struct device *ctrldev)
+ {
+- struct device_node *dev_node;
+- struct platform_device *pdev;
+- struct device *ctrldev;
+- struct caam_drv_private *priv;
++ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
+ int i = 0, err = 0;
+- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
++ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
+ unsigned int md_limit = SHA512_DIGEST_SIZE;
+ bool registered = false;
+
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+- if (!dev_node) {
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+- if (!dev_node)
+- return -ENODEV;
+- }
+-
+- pdev = of_find_device_by_node(dev_node);
+- of_node_put(dev_node);
+- if (!pdev)
+- return -ENODEV;
+-
+- ctrldev = &pdev->dev;
+- priv = dev_get_drvdata(ctrldev);
+-
+- /*
+- * If priv is NULL, it's probably because the caam driver wasn't
+- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+- */
+- if (!priv || !priv->qi_present)
+- return -ENODEV;
+-
INIT_LIST_HEAD(&alg_list);
/*
+ * Register crypto algorithms the device supports.
+ * First, detect presence and attributes of DES, AES, and MD blocks.
+ */
+- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
+- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
+- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++ if (priv->era < 10) {
++ u32 cha_vid, cha_inst;
++
++ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
++ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
++ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++
++ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
++ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
++ CHA_ID_LS_DES_SHIFT;
++ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
++ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++ } else {
++ u32 aesa, mdha;
++
++ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
++ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
++
++ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
++ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
++
++ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
++ aes_inst = aesa & CHA_VER_NUM_MASK;
++ md_inst = mdha & CHA_VER_NUM_MASK;
++ }
+
+ /* If MD is present, limit digest size based on LP256 */
+- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
++ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
+ md_limit = SHA256_DIGEST_SIZE;
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+@@ -2349,14 +3283,14 @@ static int __init caam_qi_algapi_init(vo
+ t_alg = caam_alg_alloc(alg);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+- dev_warn(priv->qidev, "%s alg allocation failed\n",
++ dev_warn(ctrldev, "%s alg allocation failed\n",
+ alg->driver_name);
+ continue;
+ }
+
+ err = crypto_register_alg(&t_alg->crypto_alg);
+ if (err) {
+- dev_warn(priv->qidev, "%s alg registration failed\n",
++ dev_warn(ctrldev, "%s alg registration failed\n",
+ t_alg->crypto_alg.cra_driver_name);
+ kfree(t_alg);
+ continue;
+@@ -2388,8 +3322,7 @@ static int __init caam_qi_algapi_init(vo
+ * Check support for AES algorithms not available
+ * on LP devices.
+ */
+- if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
+- (alg_aai == OP_ALG_AAI_GCM))
++ if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
+ continue;
+
+ /*
+@@ -2414,14 +3347,7 @@ static int __init caam_qi_algapi_init(vo
+ }
+
+ if (registered)
+- dev_info(priv->qidev, "algorithms registered in /proc/crypto\n");
++ dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
+
+ return err;
+ }
+-
+-module_init(caam_qi_algapi_init);
+-module_exit(caam_qi_algapi_exit);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION("Support for crypto API using CAAM-QI backend");
+-MODULE_AUTHOR("Freescale Semiconductor");
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.c
-@@ -0,0 +1,5691 @@
+@@ -0,0 +1,5843 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
@@ -4179,13 +5759,6 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
+ SHA512_DIGEST_SIZE * 2)
+
-+#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM
-+bool caam_little_end;
-+EXPORT_SYMBOL(caam_little_end);
-+bool caam_imx;
-+EXPORT_SYMBOL(caam_imx);
-+#endif
-+
+/*
+ * This is a a cache of buffers, from which the users of CAAM QI driver
+ * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
@@ -4639,7 +6212,15 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+
-+ edesc->assoclen = cpu_to_caam32(req->assoclen);
++ if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
++ OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
++ /*
++ * The associated data comes already with the IV but we need
++ * to skip it when we authenticate or encrypt...
++ */
++ edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
++ else
++ edesc->assoclen = cpu_to_caam32(req->assoclen);
+ edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->assoclen_dma)) {
@@ -4709,6 +6290,68 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ return edesc;
+}
+
++static int chachapoly_set_sh_desc(struct crypto_aead *aead)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ struct device *dev = ctx->dev;
++ struct caam_flc *flc;
++ u32 *desc;
++
++ if (!ctx->cdata.keylen || !ctx->authsize)
++ return 0;
++
++ flc = &ctx->flc[ENCRYPT];
++ desc = flc->sh_desc;
++ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
++ ctx->authsize, true, true);
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
++
++ flc = &ctx->flc[DECRYPT];
++ desc = flc->sh_desc;
++ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
++ ctx->authsize, false, true);
++ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ ctx->dir);
++
++ return 0;
++}
++
++static int chachapoly_setauthsize(struct crypto_aead *aead,
++ unsigned int authsize)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++
++ if (authsize != POLY1305_DIGEST_SIZE)
++ return -EINVAL;
++
++ ctx->authsize = authsize;
++ return chachapoly_set_sh_desc(aead);
++}
++
++static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
++ unsigned int keylen)
++{
++ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int ivsize = crypto_aead_ivsize(aead);
++ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
++
++ if (keylen != CHACHA20_KEY_SIZE + saltlen) {
++ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++ }
++
++ ctx->cdata.key_virt = key;
++ ctx->cdata.keylen = keylen - saltlen;
++
++ return chachapoly_set_sh_desc(aead);
++}
++
+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req,
+ bool encrypt)
+{
@@ -5303,7 +6946,9 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
-+ OP_ALG_AAI_CTR_MOD128);
++ OP_ALG_AAI_CTR_MOD128) &&
++ ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
++ OP_ALG_ALGSEL_CHACHA20);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+#ifdef DEBUG
@@ -6119,7 +7764,23 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
-+ }
++ },
++ {
++ .skcipher = {
++ .base = {
++ .cra_name = "chacha20",
++ .cra_driver_name = "chacha20-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = skcipher_setkey,
++ .encrypt = skcipher_encrypt,
++ .decrypt = skcipher_decrypt,
++ .min_keysize = CHACHA20_KEY_SIZE,
++ .max_keysize = CHACHA20_KEY_SIZE,
++ .ivsize = CHACHA20_IV_SIZE,
++ },
++ .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
++ },
+};
+
+static struct caam_aead_alg driver_aeads[] = {
@@ -7236,6 +8897,50 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ {
+ .aead = {
+ .base = {
++ .cra_name = "rfc7539(chacha20,poly1305)",
++ .cra_driver_name = "rfc7539-chacha20-poly1305-"
++ "caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = chachapoly_setkey,
++ .setauthsize = chachapoly_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = CHACHAPOLY_IV_SIZE,
++ .maxauthsize = POLY1305_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
++ OP_ALG_AAI_AEAD,
++ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
++ OP_ALG_AAI_AEAD,
++ },
++ },
++ {
++ .aead = {
++ .base = {
++ .cra_name = "rfc7539esp(chacha20,poly1305)",
++ .cra_driver_name = "rfc7539esp-chacha20-"
++ "poly1305-caam-qi2",
++ .cra_blocksize = 1,
++ },
++ .setkey = chachapoly_setkey,
++ .setauthsize = chachapoly_setauthsize,
++ .encrypt = aead_encrypt,
++ .decrypt = aead_decrypt,
++ .ivsize = 8,
++ .maxauthsize = POLY1305_DIGEST_SIZE,
++ },
++ .caam = {
++ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
++ OP_ALG_AAI_AEAD,
++ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
++ OP_ALG_AAI_AEAD,
++ },
++ },
++ {
++ .aead = {
++ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha512-"
@@ -9040,7 +10745,8 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ nctx->cb = dpaa2_caam_fqdan_cb;
+
+ /* Register notification callbacks */
-+ err = dpaa2_io_service_register(NULL, nctx);
++ ppriv->dpio = dpaa2_io_service_select(cpu);
++ err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
+ if (unlikely(err)) {
+ dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
+ nctx->cb = NULL;
@@ -9058,6 +10764,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ dev);
+ if (unlikely(!ppriv->store)) {
+ dev_err(dev, "dpaa2_io_store_create() failed\n");
++ err = -ENOMEM;
+ goto err;
+ }
+
@@ -9072,7 +10779,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->nctx.cb)
+ break;
-+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++ dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
+ }
+
+ for_each_online_cpu(cpu) {
@@ -9088,11 +10795,12 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
++ struct device *dev = priv->dev;
+ int i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++ dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
+ dpaa2_io_store_destroy(ppriv->store);
+
+ if (++i == priv->num_pairs)
@@ -9190,7 +10898,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+
+ /* Retry while portal is busy */
+ do {
-+ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
++ err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
+ ppriv->store);
+ } while (err == -EBUSY);
+
@@ -9258,7 +10966,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+
+ if (cleaned < budget) {
+ napi_complete_done(napi, cleaned);
-+ err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
++ err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
+ if (unlikely(err))
+ dev_err(priv->dev, "Notification rearm failed: %d\n",
+ err);
@@ -9399,21 +11107,31 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+
+ i = 0;
+ for_each_online_cpu(cpu) {
-+ dev_info(dev, "pair %d: rx queue %d, tx queue %d\n", i,
-+ priv->rx_queue_attr[i].fqid,
-+ priv->tx_queue_attr[i].fqid);
++ u8 j;
++
++ j = i % priv->num_pairs;
+
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+ ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
-+ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
-+ ppriv->prio = i;
++ ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
++
++ /*
++ * Allow all cores to enqueue, while only some of them
++ * will take part in dequeuing.
++ */
++ if (++i > priv->num_pairs)
++ continue;
++
++ ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
++ ppriv->prio = j;
++
++ dev_info(dev, "pair %d: rx queue %d, tx queue %d\n", j,
++ priv->rx_queue_attr[j].fqid,
++ priv->tx_queue_attr[j].fqid);
+
+ ppriv->net_dev.dev = *dev;
+ INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
+ netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
+ DPAA2_CAAM_NAPI_WEIGHT);
-+ if (++i == priv->num_pairs)
-+ break;
+ }
+
+ return 0;
@@ -9538,12 +11256,13 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ priv->ppriv = alloc_percpu(*priv->ppriv);
+ if (!priv->ppriv) {
+ dev_err(dev, "alloc_percpu() failed\n");
++ err = -ENOMEM;
+ goto err_alloc_ppriv;
+ }
+
+ /* DPSECI initialization */
+ err = dpaa2_dpseci_setup(dpseci_dev);
-+ if (err < 0) {
++ if (err) {
+ dev_err(dev, "dpaa2_dpseci_setup() failed\n");
+ goto err_dpseci_setup;
+ }
@@ -9551,7 +11270,8 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ /* DPIO */
+ err = dpaa2_dpseci_dpio_setup(priv);
+ if (err) {
-+ dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
++ if (err != -EPROBE_DEFER)
++ dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
+ goto err_dpio_setup;
+ }
+
@@ -9585,6 +11305,11 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ (alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
++ /* Skip CHACHA20 algorithms if not supported by device */
++ if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
++ !priv->sec_attr.ccha_acc_num)
++ continue;
++
+ t_alg->caam.dev = dev;
+ caam_skcipher_alg_init(t_alg);
+
@@ -9617,11 +11342,22 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ (c1_alg_sel == OP_ALG_ALGSEL_AES))
+ continue;
+
++ /* Skip CHACHA20 algorithms if not supported by device */
++ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
++ !priv->sec_attr.ccha_acc_num)
++ continue;
++
++ /* Skip POLY1305 algorithms if not supported by device */
++ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
++ !priv->sec_attr.ptha_acc_num)
++ continue;
++
+ /*
+ * Skip algorithms requiring message digests
+ * if MD not supported by device.
+ */
-+ if (!priv->sec_attr.md_acc_num && c2_alg_sel)
++ if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
++ !priv->sec_attr.md_acc_num)
+ continue;
+
+ t_alg->caam.dev = dev;
@@ -9761,7 +11497,8 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+{
+ struct dpaa2_fd fd;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
-+ int err = 0, i, id;
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++ int err = 0, i;
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
@@ -9791,23 +11528,18 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
+ dpaa2_fd_set_flc(&fd, req->flc_dma);
+
-+ /*
-+ * There is no guarantee that preemption is disabled here,
-+ * thus take action.
-+ */
-+ preempt_disable();
-+ id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
++ ppriv = this_cpu_ptr(priv->ppriv);
+ for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
-+ err = dpaa2_io_service_enqueue_fq(NULL,
-+ priv->tx_queue_attr[id].fqid,
++ err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
+ &fd);
+ if (err != -EBUSY)
+ break;
++
++ cpu_relax();
+ }
-+ preempt_enable();
+
-+ if (unlikely(err < 0)) {
-+ dev_err(dev, "Error enqueuing frame: %d\n", err);
++ if (unlikely(err)) {
++ dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
+ goto err_out;
+ }
+
@@ -9845,7 +11577,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+module_fsl_mc_driver(dpaa2_caam_driver);
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.h
-@@ -0,0 +1,274 @@
+@@ -0,0 +1,276 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
@@ -9953,6 +11685,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ * @nctx: notification context of response FQ
+ * @store: where dequeued frames are stored
+ * @priv: backpointer to dpaa2_caam_priv
++ * @dpio: portal used for data path operations
+ */
+struct dpaa2_caam_priv_per_cpu {
+ struct napi_struct napi;
@@ -9963,6 +11696,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ struct dpaa2_io_notification_ctx nctx;
+ struct dpaa2_io_store *store;
+ struct dpaa2_caam_priv *priv;
++ struct dpaa2_io *dpio;
+};
+
+/*
@@ -10122,7 +11856,15 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+#endif /* _CAAMALG_QI2_H_ */
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
-@@ -62,6 +62,7 @@
+@@ -2,6 +2,7 @@
+ * caam - Freescale FSL CAAM support for ahash functions of crypto API
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+@@ -62,6 +63,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
@@ -10130,7 +11872,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#define CAAM_CRA_PRIORITY 3000
-@@ -71,14 +72,6 @@
+@@ -71,14 +73,6 @@
#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
@@ -10145,7 +11887,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
CAAM_MAX_HASH_KEY_SIZE)
#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
-@@ -107,6 +100,7 @@ struct caam_hash_ctx {
+@@ -107,6 +101,7 @@ struct caam_hash_ctx {
dma_addr_t sh_desc_update_first_dma;
dma_addr_t sh_desc_fin_dma;
dma_addr_t sh_desc_digest_dma;
@@ -10153,7 +11895,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
struct device *jrdev;
u8 key[CAAM_MAX_HASH_KEY_SIZE];
int ctx_len;
-@@ -218,7 +212,7 @@ static inline int buf_map_to_sec4_sg(str
+@@ -218,7 +213,7 @@ static inline int buf_map_to_sec4_sg(str
}
/* Map state->caam_ctx, and add it to link table */
@@ -10162,7 +11904,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
struct caam_hash_state *state, int ctx_len,
struct sec4_sg_entry *sec4_sg, u32 flag)
{
-@@ -234,68 +228,22 @@ static inline int ctx_map_to_sec4_sg(u32
+@@ -234,68 +229,22 @@ static inline int ctx_map_to_sec4_sg(u32
return 0;
}
@@ -10237,7 +11979,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
-@@ -304,9 +252,10 @@ static int ahash_set_sh_desc(struct cryp
+@@ -304,9 +253,10 @@ static int ahash_set_sh_desc(struct cryp
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
@@ -10250,7 +11992,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
-@@ -315,9 +264,10 @@ static int ahash_set_sh_desc(struct cryp
+@@ -315,9 +265,10 @@ static int ahash_set_sh_desc(struct cryp
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
@@ -10263,7 +12005,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc,
-@@ -326,9 +276,10 @@ static int ahash_set_sh_desc(struct cryp
+@@ -326,9 +277,10 @@ static int ahash_set_sh_desc(struct cryp
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
@@ -10276,7 +12018,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
-@@ -421,6 +372,7 @@ static int ahash_setkey(struct crypto_ah
+@@ -421,6 +373,7 @@ static int ahash_setkey(struct crypto_ah
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
@@ -10284,7 +12026,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
int ret;
u8 *hashed_key = NULL;
-@@ -441,16 +393,26 @@ static int ahash_setkey(struct crypto_ah
+@@ -441,16 +394,26 @@ static int ahash_setkey(struct crypto_ah
key = hashed_key;
}
@@ -10320,7 +12062,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
kfree(hashed_key);
return ahash_set_sh_desc(ahash);
-@@ -773,7 +735,7 @@ static int ahash_update_ctx(struct ahash
+@@ -773,7 +736,7 @@ static int ahash_update_ctx(struct ahash
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
@@ -10329,7 +12071,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
-@@ -871,9 +833,8 @@ static int ahash_final_ctx(struct ahash_
+@@ -871,9 +834,8 @@ static int ahash_final_ctx(struct ahash_
desc = edesc->hw_desc;
edesc->sec4_sg_bytes = sec4_sg_bytes;
@@ -10340,7 +12082,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
-@@ -967,7 +928,7 @@ static int ahash_finup_ctx(struct ahash_
+@@ -967,7 +929,7 @@ static int ahash_finup_ctx(struct ahash_
edesc->src_nents = src_nents;
@@ -10349,7 +12091,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
-@@ -1126,7 +1087,6 @@ static int ahash_final_no_ctx(struct aha
+@@ -1126,7 +1088,6 @@ static int ahash_final_no_ctx(struct aha
dev_err(jrdev, "unable to map dst\n");
goto unmap;
}
@@ -10357,7 +12099,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#ifdef DEBUG
print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
-@@ -1208,7 +1168,6 @@ static int ahash_update_no_ctx(struct ah
+@@ -1208,7 +1169,6 @@ static int ahash_update_no_ctx(struct ah
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
@@ -10365,7 +12107,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state);
if (ret)
-@@ -1420,7 +1379,6 @@ static int ahash_update_first(struct aha
+@@ -1420,7 +1380,6 @@ static int ahash_update_first(struct aha
}
edesc->src_nents = src_nents;
@@ -10373,7 +12115,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
to_hash);
-@@ -1722,6 +1680,7 @@ static int caam_hash_cra_init(struct cry
+@@ -1722,6 +1681,7 @@ static int caam_hash_cra_init(struct cry
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
dma_addr_t dma_addr;
@@ -10381,7 +12123,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/*
* Get a Job ring from Job Ring driver to ensure in-order
-@@ -1733,10 +1692,13 @@ static int caam_hash_cra_init(struct cry
+@@ -1733,10 +1693,13 @@ static int caam_hash_cra_init(struct cry
return PTR_ERR(ctx->jrdev);
}
@@ -10396,7 +12138,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
caam_jr_free(ctx->jrdev);
-@@ -1771,7 +1733,7 @@ static void caam_hash_cra_exit(struct cr
+@@ -1771,11 +1734,11 @@ static void caam_hash_cra_exit(struct cr
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
offsetof(struct caam_hash_ctx,
sh_desc_update_dma),
@@ -10405,6 +12147,95 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
caam_jr_free(ctx->jrdev);
}
+-static void __exit caam_algapi_hash_exit(void)
++void caam_algapi_hash_exit(void)
+ {
+ struct caam_hash_alg *t_alg, *n;
+
+@@ -1834,56 +1797,38 @@ caam_hash_alloc(struct caam_hash_templat
+ return t_alg;
+ }
+
+-static int __init caam_algapi_hash_init(void)
++int caam_algapi_hash_init(struct device *ctrldev)
+ {
+- struct device_node *dev_node;
+- struct platform_device *pdev;
+- struct device *ctrldev;
+ int i = 0, err = 0;
+- struct caam_drv_private *priv;
++ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
+ unsigned int md_limit = SHA512_DIGEST_SIZE;
+- u32 cha_inst, cha_vid;
+-
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+- if (!dev_node) {
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+- if (!dev_node)
+- return -ENODEV;
+- }
+-
+- pdev = of_find_device_by_node(dev_node);
+- if (!pdev) {
+- of_node_put(dev_node);
+- return -ENODEV;
+- }
+-
+- ctrldev = &pdev->dev;
+- priv = dev_get_drvdata(ctrldev);
+- of_node_put(dev_node);
+-
+- /*
+- * If priv is NULL, it's probably because the caam driver wasn't
+- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+- */
+- if (!priv)
+- return -ENODEV;
++ u32 md_inst, md_vid;
+
+ /*
+ * Register crypto algorithms the device supports. First, identify
+ * presence and attributes of MD block.
+ */
+- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
++ if (priv->era < 10) {
++ md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
++ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++ md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
++ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
++ } else {
++ u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
++
++ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
++ md_inst = mdha & CHA_VER_NUM_MASK;
++ }
+
+ /*
+ * Skip registration of any hashing algorithms if MD block
+ * is not present.
+ */
+- if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
++ if (!md_inst)
+ return -ENODEV;
+
+ /* Limit digest size based on LP256 */
+- if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
++ if (md_vid == CHA_VER_VID_MD_LP256)
+ md_limit = SHA256_DIGEST_SIZE;
+
+ INIT_LIST_HEAD(&hash_list);
+@@ -1934,10 +1879,3 @@ static int __init caam_algapi_hash_init(
+
+ return err;
+ }
+-
+-module_init(caam_algapi_hash_init);
+-module_exit(caam_algapi_hash_exit);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION("FSL CAAM support for ahash functions of crypto API");
+-MODULE_AUTHOR("Freescale Semiconductor - NMG");
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -0,0 +1,108 @@
@@ -10568,6 +12399,171 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ int digestsize, int ctx_len, bool import_ctx, int era);
+
+#endif /* _CAAMHASH_DESC_H_ */
+--- a/drivers/crypto/caam/caampkc.c
++++ b/drivers/crypto/caam/caampkc.c
+@@ -2,6 +2,7 @@
+ * caam - Freescale FSL CAAM support for Public Key Cryptography
+ *
+ * Copyright 2016 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ *
+ * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
+ * all the desired key parameters, input and output pointers.
+@@ -1017,46 +1018,22 @@ static struct akcipher_alg caam_rsa = {
+ };
+
+ /* Public Key Cryptography module initialization handler */
+-static int __init caam_pkc_init(void)
++int caam_pkc_init(struct device *ctrldev)
+ {
+- struct device_node *dev_node;
+- struct platform_device *pdev;
+- struct device *ctrldev;
+- struct caam_drv_private *priv;
+- u32 cha_inst, pk_inst;
++ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
++ u32 pk_inst;
+ int err;
+
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+- if (!dev_node) {
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+- if (!dev_node)
+- return -ENODEV;
+- }
+-
+- pdev = of_find_device_by_node(dev_node);
+- if (!pdev) {
+- of_node_put(dev_node);
+- return -ENODEV;
+- }
+-
+- ctrldev = &pdev->dev;
+- priv = dev_get_drvdata(ctrldev);
+- of_node_put(dev_node);
+-
+- /*
+- * If priv is NULL, it's probably because the caam driver wasn't
+- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+- */
+- if (!priv)
+- return -ENODEV;
+-
+ /* Determine public key hardware accelerator presence. */
+- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+- pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
++ if (priv->era < 10)
++ pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
++ CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
++ else
++ pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
+
+ /* Do not register algorithms if PKHA is not present. */
+ if (!pk_inst)
+- return -ENODEV;
++ return 0;
+
+ err = crypto_register_akcipher(&caam_rsa);
+ if (err)
+@@ -1068,14 +1045,7 @@ static int __init caam_pkc_init(void)
+ return err;
+ }
+
+-static void __exit caam_pkc_exit(void)
++void caam_pkc_exit(void)
+ {
+ crypto_unregister_akcipher(&caam_rsa);
+ }
+-
+-module_init(caam_pkc_init);
+-module_exit(caam_pkc_exit);
+-
+-MODULE_LICENSE("Dual BSD/GPL");
+-MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
+-MODULE_AUTHOR("Freescale Semiconductor");
+--- a/drivers/crypto/caam/caamrng.c
++++ b/drivers/crypto/caam/caamrng.c
+@@ -2,6 +2,7 @@
+ * caam - Freescale FSL CAAM support for hw_random
+ *
+ * Copyright 2011 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+@@ -294,49 +295,29 @@ static struct hwrng caam_rng = {
+ .read = caam_read,
+ };
+
+-static void __exit caam_rng_exit(void)
++void caam_rng_exit(void)
+ {
+ caam_jr_free(rng_ctx->jrdev);
+ hwrng_unregister(&caam_rng);
+ kfree(rng_ctx);
+ }
+
+-static int __init caam_rng_init(void)
++int caam_rng_init(struct device *ctrldev)
+ {
+ struct device *dev;
+- struct device_node *dev_node;
+- struct platform_device *pdev;
+- struct device *ctrldev;
+- struct caam_drv_private *priv;
++ u32 rng_inst;
++ struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
+ int err;
+
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+- if (!dev_node) {
+- dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+- if (!dev_node)
+- return -ENODEV;
+- }
+-
+- pdev = of_find_device_by_node(dev_node);
+- if (!pdev) {
+- of_node_put(dev_node);
+- return -ENODEV;
+- }
+-
+- ctrldev = &pdev->dev;
+- priv = dev_get_drvdata(ctrldev);
+- of_node_put(dev_node);
+-
+- /*
+- * If priv is NULL, it's probably because the caam driver wasn't
+- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+- */
+- if (!priv)
+- return -ENODEV;
+-
+ /* Check for an instantiated RNG before registration */
+- if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
+- return -ENODEV;
++ if (priv->era < 10)
++ rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
++ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
++ else
++ rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
++
++ if (!rng_inst)
++ return 0;
+
+ dev = caam_jr_alloc();
+ if (IS_ERR(dev)) {
+@@ -361,10 +342,3 @@ free_caam_alloc:
+ caam_jr_free(dev);
+ return err;
+ }
+-
+-module_init(caam_rng_init);
+-module_exit(caam_rng_exit);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
+-MODULE_AUTHOR("Freescale Semiconductor - NMG");
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -17,6 +17,7 @@
@@ -10578,7 +12574,13 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/in.h>
-@@ -38,6 +39,7 @@
+@@ -34,10 +35,13 @@
+ #include <crypto/des.h>
+ #include <crypto/sha.h>
+ #include <crypto/md5.h>
++#include <crypto/chacha20.h>
++#include <crypto/poly1305.h>
+ #include <crypto/internal/aead.h>
#include <crypto/authenc.h>
#include <crypto/akcipher.h>
#include <crypto/scatterwalk.h>
@@ -10588,7 +12590,26 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#include <crypto/internal/rsa.h>
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
-@@ -27,6 +27,8 @@ EXPORT_SYMBOL(caam_imx);
+@@ -2,6 +2,7 @@
+ * Controller-level driver, kernel property detection, initialization
+ *
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ */
+
+ #include <linux/device.h>
+@@ -16,17 +17,15 @@
+ #include "desc_constr.h"
+ #include "ctrl.h"
+
+-bool caam_little_end;
+-EXPORT_SYMBOL(caam_little_end);
+ bool caam_dpaa2;
+ EXPORT_SYMBOL(caam_dpaa2);
+-bool caam_imx;
+-EXPORT_SYMBOL(caam_imx);
+
+ #ifdef CONFIG_CAAM_QI
#include "qi.h"
#endif
@@ -10597,7 +12618,61 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/*
* i.MX targets tend to have clock control subsystems that can
* enable/disable clocking to our device.
-@@ -332,6 +334,9 @@ static int caam_remove(struct platform_d
+@@ -105,7 +104,7 @@ static inline int run_descriptor_deco0(s
+ struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+ struct caam_deco __iomem *deco = ctrlpriv->deco;
+ unsigned int timeout = 100000;
+- u32 deco_dbg_reg, flags;
++ u32 deco_dbg_reg, deco_state, flags;
+ int i;
+
+
+@@ -148,13 +147,22 @@ static inline int run_descriptor_deco0(s
+ timeout = 10000000;
+ do {
+ deco_dbg_reg = rd_reg32(&deco->desc_dbg);
++
++ if (ctrlpriv->era < 10)
++ deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
++ DESC_DBG_DECO_STAT_SHIFT;
++ else
++ deco_state = (rd_reg32(&deco->dbg_exec) &
++ DESC_DER_DECO_STAT_MASK) >>
++ DESC_DER_DECO_STAT_SHIFT;
++
+ /*
+ * If an error occured in the descriptor, then
+ * the DECO status field will be set to 0x0D
+ */
+- if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
+- DESC_DBG_DECO_STAT_HOST_ERR)
++ if (deco_state == DECO_STAT_HOST_ERR)
+ break;
++
+ cpu_relax();
+ } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
+
+@@ -316,15 +324,15 @@ static int caam_remove(struct platform_d
+ of_platform_depopulate(ctrldev);
+
+ #ifdef CONFIG_CAAM_QI
+- if (ctrlpriv->qidev)
+- caam_qi_shutdown(ctrlpriv->qidev);
++ if (ctrlpriv->qi_init)
++ caam_qi_shutdown(ctrldev);
+ #endif
+
+ /*
+ * De-initialize RNG state handles initialized by this driver.
+- * In case of DPAA 2.x, RNG is managed by MC firmware.
++ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+ */
+- if (!caam_dpaa2 && ctrlpriv->rng4_sh_init)
++ if (!ctrlpriv->mc_en && ctrlpriv->rng4_sh_init)
+ deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+
+ /* Shut down debug views */
+@@ -332,6 +340,9 @@ static int caam_remove(struct platform_d
debugfs_remove_recursive(ctrlpriv->dfs_root);
#endif
@@ -10607,7 +12682,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/* Unmap controller region */
iounmap(ctrl);
-@@ -433,6 +438,10 @@ static int caam_probe(struct platform_de
+@@ -433,6 +444,10 @@ static int caam_probe(struct platform_de
{.family = "Freescale i.MX"},
{},
};
@@ -10618,19 +12693,223 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
-@@ -615,6 +624,8 @@ static int caam_probe(struct platform_de
- goto iounmap_ctrl;
+@@ -442,7 +457,7 @@ static int caam_probe(struct platform_de
+ struct caam_perfmon *perfmon;
+ #endif
+ u32 scfgr, comp_params;
+- u32 cha_vid_ls;
++ u8 rng_vid;
+ int pg_size;
+ int BLOCK_OFFSET = 0;
+
+@@ -454,15 +469,54 @@ static int caam_probe(struct platform_de
+ dev_set_drvdata(dev, ctrlpriv);
+ nprop = pdev->dev.of_node;
+
++ /* Get configuration properties from device tree */
++ /* First, get register page */
++ ctrl = of_iomap(nprop, 0);
++ if (!ctrl) {
++ dev_err(dev, "caam: of_iomap() failed\n");
++ return -ENOMEM;
++ }
++
++ caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
++ (CSTA_PLEND | CSTA_ALT_PLEND));
+ caam_imx = (bool)soc_device_match(imx_soc);
+
++ comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
++ caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
++ ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
++
++#ifdef CONFIG_CAAM_QI
++ /* If (DPAA 1.x) QI present, check whether dependencies are available */
++ if (ctrlpriv->qi_present && !caam_dpaa2) {
++ ret = qman_is_probed();
++ if (!ret) {
++ ret = -EPROBE_DEFER;
++ goto iounmap_ctrl;
++ } else if (ret < 0) {
++ dev_err(dev, "failing probe due to qman probe error\n");
++ ret = -ENODEV;
++ goto iounmap_ctrl;
++ }
++
++ ret = qman_portals_probed();
++ if (!ret) {
++ ret = -EPROBE_DEFER;
++ goto iounmap_ctrl;
++ } else if (ret < 0) {
++ dev_err(dev, "failing probe due to qman portals probe error\n");
++ ret = -ENODEV;
++ goto iounmap_ctrl;
++ }
++ }
++#endif
++
+ /* Enable clocking */
+ clk = caam_drv_identify_clk(&pdev->dev, "ipg");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM ipg clk: %d\n", ret);
+- return ret;
++ goto iounmap_ctrl;
+ }
+ ctrlpriv->caam_ipg = clk;
+
+@@ -471,7 +525,7 @@ static int caam_probe(struct platform_de
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM mem clk: %d\n", ret);
+- return ret;
++ goto iounmap_ctrl;
+ }
+ ctrlpriv->caam_mem = clk;
+
+@@ -480,7 +534,7 @@ static int caam_probe(struct platform_de
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM aclk clk: %d\n", ret);
+- return ret;
++ goto iounmap_ctrl;
+ }
+ ctrlpriv->caam_aclk = clk;
+
+@@ -490,7 +544,7 @@ static int caam_probe(struct platform_de
+ ret = PTR_ERR(clk);
+ dev_err(&pdev->dev,
+ "can't identify CAAM emi_slow clk: %d\n", ret);
+- return ret;
++ goto iounmap_ctrl;
+ }
+ ctrlpriv->caam_emi_slow = clk;
+ }
+@@ -498,7 +552,7 @@ static int caam_probe(struct platform_de
+ ret = clk_prepare_enable(ctrlpriv->caam_ipg);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
+- return ret;
++ goto iounmap_ctrl;
}
+ ret = clk_prepare_enable(ctrlpriv->caam_mem);
+@@ -523,25 +577,10 @@ static int caam_probe(struct platform_de
+ }
+ }
+
+- /* Get configuration properties from device tree */
+- /* First, get register page */
+- ctrl = of_iomap(nprop, 0);
+- if (ctrl == NULL) {
+- dev_err(dev, "caam: of_iomap() failed\n");
+- ret = -ENOMEM;
+- goto disable_caam_emi_slow;
+- }
+-
+- caam_little_end = !(bool)(rd_reg32(&ctrl->perfmon.status) &
+- (CSTA_PLEND | CSTA_ALT_PLEND));
+-
+- /* Finding the page size for using the CTPR_MS register */
+- comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
+- pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
+-
+ /* Allocating the BLOCK_OFFSET based on the supported page size on
+ * the platform
+ */
++ pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
+ if (pg_size == 0)
+ BLOCK_OFFSET = PG_SIZE_4K;
+ else
+@@ -563,11 +602,14 @@ static int caam_probe(struct platform_de
+ /*
+ * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
+ * long pointers in master configuration register.
+- * In case of DPAA 2.x, Management Complex firmware performs
++ * In case of SoCs with Management Complex, MC f/w performs
+ * the configuration.
+ */
+- caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
+- if (!caam_dpaa2)
++ np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
++ ctrlpriv->mc_en = !!np;
++ of_node_put(np);
++
++ if (!ctrlpriv->mc_en)
+ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK | MCFGR_LONG_PTR,
+ MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
+ MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+@@ -612,14 +654,11 @@ static int caam_probe(struct platform_de
+ }
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
+- goto iounmap_ctrl;
++ goto disable_caam_emi_slow;
+ }
+
+- ret = of_platform_populate(nprop, caam_match, NULL, dev);
+- if (ret) {
+- dev_err(dev, "JR platform devices creation error\n");
+- goto iounmap_ctrl;
+- }
+ ctrlpriv->era = caam_get_era();
++ ctrlpriv->domain = iommu_get_domain_for_dev(dev);
+
+ #ifdef CONFIG_DEBUG_FS
+ /*
+@@ -633,21 +672,7 @@ static int caam_probe(struct platform_de
+ ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+ #endif
+
+- ring = 0;
+- for_each_available_child_of_node(nprop, np)
+- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+- of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+- ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
+- ((__force uint8_t *)ctrl +
+- (ring + JR_BLOCK_NUMBER) *
+- BLOCK_OFFSET
+- );
+- ctrlpriv->total_jobrs++;
+- ring++;
+- }
+-
+ /* Check to see if (DPAA 1.x) QI present. If so, enable */
+- ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
+ if (ctrlpriv->qi_present && !caam_dpaa2) {
+ ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
+ ((__force uint8_t *)ctrl +
+@@ -664,6 +689,25 @@ static int caam_probe(struct platform_de
+ #endif
+ }
+
++ ret = of_platform_populate(nprop, caam_match, NULL, dev);
++ if (ret) {
++ dev_err(dev, "JR platform devices creation error\n");
++ goto shutdown_qi;
++ }
++
++ ring = 0;
++ for_each_available_child_of_node(nprop, np)
++ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
++ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
++ ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
++ ((__force uint8_t *)ctrl +
++ (ring + JR_BLOCK_NUMBER) *
++ BLOCK_OFFSET
++ );
++ ctrlpriv->total_jobrs++;
++ ring++;
++ }
+
- ret = of_platform_populate(nprop, caam_match, NULL, dev);
- if (ret) {
- dev_err(dev, "JR platform devices creation error\n");
-@@ -671,6 +682,16 @@ static int caam_probe(struct platform_de
+ /* If no QI and no rings specified, quit and go home */
+ if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
+ dev_err(dev, "no queues configured, terminating\n");
+@@ -671,15 +715,29 @@ static int caam_probe(struct platform_de
goto caam_remove;
}
+- cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
+ caam_dma_pdev_info.parent = dev;
+ caam_dma_pdev_info.dma_mask = dma_get_mask(dev);
+ caam_dma_dev = platform_device_register_full(&caam_dma_pdev_info);
@@ -10641,21 +12920,73 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ set_dma_ops(&caam_dma_dev->dev, get_dma_ops(dev));
+ }
+
- cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
++ if (ctrlpriv->era < 10)
++ rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
++ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
++ else
++ rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
++ CHA_VER_VID_SHIFT;
/*
-@@ -746,7 +767,7 @@ static int caam_probe(struct platform_de
+ * If SEC has RNG version >= 4 and RNG state handle has not been
+ * already instantiated, do RNG instantiation
+- * In case of DPAA 2.x, RNG is managed by MC firmware.
++ * In case of SoCs with Management Complex, RNG is managed by MC f/w.
+ */
+- if (!caam_dpaa2 &&
+- (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
++ if (!ctrlpriv->mc_en && rng_vid >= 4) {
+ ctrlpriv->rng4_sh_init =
+ rd_reg32(&ctrl->r4tst[0].rdsta);
+ /*
+@@ -746,10 +804,9 @@ static int caam_probe(struct platform_de
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- caam_get_era());
+- dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
+- ctrlpriv->total_jobrs, ctrlpriv->qi_present,
+- caam_dpaa2 ? "yes" : "no");
+ ctrlpriv->era);
- dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
- ctrlpriv->total_jobrs, ctrlpriv->qi_present,
- caam_dpaa2 ? "yes" : "no");
++ dev_info(dev, "job rings = %d, qi = %d\n",
++ ctrlpriv->total_jobrs, ctrlpriv->qi_present);
+
+ #ifdef CONFIG_DEBUG_FS
+ debugfs_create_file("rq_dequeued", S_IRUSR | S_IRGRP | S_IROTH,
+@@ -816,8 +873,11 @@ caam_remove:
+ caam_remove(pdev);
+ return ret;
+
+-iounmap_ctrl:
+- iounmap(ctrl);
++shutdown_qi:
++#ifdef CONFIG_CAAM_QI
++ if (ctrlpriv->qi_init)
++ caam_qi_shutdown(dev);
++#endif
+ disable_caam_emi_slow:
+ if (ctrlpriv->caam_emi_slow)
+ clk_disable_unprepare(ctrlpriv->caam_emi_slow);
+@@ -827,6 +887,8 @@ disable_caam_mem:
+ clk_disable_unprepare(ctrlpriv->caam_mem);
+ disable_caam_ipg:
+ clk_disable_unprepare(ctrlpriv->caam_ipg);
++iounmap_ctrl:
++ iounmap(ctrl);
+ return ret;
+ }
+
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
-@@ -42,6 +42,7 @@
+@@ -4,6 +4,7 @@
+ * Definitions to support CAAM descriptor instruction generation
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ */
+
+ #ifndef DESC_H
+@@ -42,6 +43,7 @@
#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
@@ -10663,7 +12994,28 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#define CMD_STORE (0x0a << CMD_SHIFT)
#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
-@@ -355,6 +356,7 @@
+@@ -242,6 +244,7 @@
+ #define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
++#define LDST_SRCDST_WORD_INFO_FIFO_SM (0x71 << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
+
+ /* Offset in source/destination */
+@@ -284,6 +287,12 @@
+ #define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
+ #define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+
++/* Special Length definitions when dst=sm, nfifo-{sm,m} */
++#define LDLEN_MATH0 0
++#define LDLEN_MATH1 1
++#define LDLEN_MATH2 2
++#define LDLEN_MATH3 3
++
+ /*
+ * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
+ * Command Constructs
+@@ -355,6 +364,7 @@
#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
@@ -10671,7 +13023,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/* Other types. Need to OR in last/flush bits as desired */
#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
-@@ -408,6 +410,7 @@
+@@ -408,6 +418,7 @@
#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
@@ -10679,7 +13031,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
/*
-@@ -444,6 +447,18 @@
+@@ -444,6 +455,18 @@
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
#define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
#define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
@@ -10698,7 +13050,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
-@@ -1093,6 +1108,22 @@
+@@ -1093,6 +1116,22 @@
/* MacSec protinfos */
#define OP_PCL_MACSEC 0x0001
@@ -10721,7 +13073,60 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/* PKI unidirectional protocol protinfo bits */
#define OP_PCL_PKPROT_TEST 0x0008
#define OP_PCL_PKPROT_DECRYPT 0x0004
-@@ -1440,10 +1471,11 @@
+@@ -1105,6 +1144,12 @@
+ #define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
+ #define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
+
++/* version register fields */
++#define OP_VER_CCHA_NUM 0x000000ff /* Number CCHAs instantiated */
++#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */
++#define OP_VER_CCHA_REV 0x00ff0000 /* CCHA Revision Number */
++#define OP_VER_CCHA_VID 0xff000000 /* CCHA Version ID */
++
+ #define OP_ALG_ALGSEL_SHIFT 16
+ #define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
+ #define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
+@@ -1124,6 +1169,8 @@
+ #define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
+ #define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
+ #define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
++#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT)
++#define OP_ALG_ALGSEL_POLY1305 (0xE0 << OP_ALG_ALGSEL_SHIFT)
+
+ #define OP_ALG_AAI_SHIFT 4
+ #define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
+@@ -1171,6 +1218,11 @@
+ #define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
+ #define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
+
++/* Chacha20 AAI set */
++#define OP_ALG_AAI_AEAD (0x002 << OP_ALG_AAI_SHIFT)
++#define OP_ALG_AAI_KEYSTREAM (0x001 << OP_ALG_AAI_SHIFT)
++#define OP_ALG_AAI_BC8 (0x008 << OP_ALG_AAI_SHIFT)
++
+ /* hmac/smac AAI set */
+ #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
+ #define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
+@@ -1359,6 +1411,7 @@
+ #define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
+ #define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
+ #define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
++#define MOVE_SRC_AUX_ABLK (0x0a << MOVE_SRC_SHIFT)
+
+ #define MOVE_DEST_SHIFT 16
+ #define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
+@@ -1385,6 +1438,10 @@
+
+ #define MOVELEN_MRSEL_SHIFT 0
+ #define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
++#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
++#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
++#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
++#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
+
+ /*
+ * MATH Command Constructs
+@@ -1440,10 +1497,11 @@
#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
@@ -10734,7 +13139,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
/* Destination selectors */
#define MATH_DEST_SHIFT 8
-@@ -1452,6 +1484,7 @@
+@@ -1452,6 +1510,7 @@
#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
@@ -10742,7 +13147,15 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
-@@ -1624,4 +1657,31 @@
+@@ -1560,6 +1619,7 @@
+ #define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+ #define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+ #define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
++#define NFIFOENTRY_DTYPE_POLY (0xB << NFIFOENTRY_DTYPE_SHIFT)
+ #define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
+ #define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+@@ -1624,4 +1684,31 @@
/* Frame Descriptor Command for Replacement Job Descriptor */
#define FD_CMD_REPLACE_JOB_DESC 0x20000000
@@ -10794,15 +13207,16 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
-@@ -189,6 +189,7 @@ static inline u32 *append_##cmd(u32 * co
+@@ -189,6 +189,8 @@ static inline u32 *append_##cmd(u32 * co
}
APPEND_CMD_RET(jump, JUMP)
APPEND_CMD_RET(move, MOVE)
+APPEND_CMD_RET(moveb, MOVEB)
++APPEND_CMD_RET(move_len, MOVE_LEN)
static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
{
-@@ -271,7 +272,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
+@@ -271,7 +273,7 @@ APPEND_SEQ_PTR_INTLEN(in, IN)
APPEND_SEQ_PTR_INTLEN(out, OUT)
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
@@ -10811,7 +13225,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
unsigned int len, u32 options) \
{ \
PRINT_POS; \
-@@ -312,7 +313,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
+@@ -312,7 +314,7 @@ APPEND_CMD_PTR_LEN(seq_out_ptr, SEQ_OUT_
* from length of immediate data provided, e.g., split keys
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
@@ -10820,7 +13234,20 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
-@@ -452,7 +453,7 @@ struct alginfo {
+@@ -327,7 +329,11 @@ static inline void append_##cmd##_imm_##
+ u32 options) \
+ { \
+ PRINT_POS; \
+- append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
++ if (options & LDST_LEN_MASK) \
++ append_cmd(desc, CMD_##op | IMMEDIATE | options); \
++ else \
++ append_cmd(desc, CMD_##op | IMMEDIATE | options | \
++ sizeof(type)); \
+ append_cmd(desc, immediate); \
+ }
+ APPEND_CMD_RAW_IMM(load, LOAD, u32);
+@@ -452,7 +458,7 @@ struct alginfo {
unsigned int keylen_pad;
union {
dma_addr_t key_dma;
@@ -10829,7 +13256,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
};
bool key_inline;
};
-@@ -496,4 +497,45 @@ static inline int desc_inline_query(unsi
+@@ -496,4 +502,45 @@ static inline int desc_inline_query(unsi
return (rem_bytes >= 0) ? 0 : -1;
}
@@ -12471,7 +14898,20 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+#endif /* _DPSECI_CMD_H_ */
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
-@@ -108,6 +108,54 @@ static const struct {
+@@ -50,6 +50,12 @@ void caam_dump_sg(const char *level, con
+ #endif /* DEBUG */
+ EXPORT_SYMBOL(caam_dump_sg);
+
++bool caam_little_end;
++EXPORT_SYMBOL(caam_little_end);
++
++bool caam_imx;
++EXPORT_SYMBOL(caam_imx);
++
+ static const struct {
+ u8 value;
+ const char *error_text;
+@@ -108,6 +114,54 @@ static const struct {
{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
};
@@ -12526,7 +14966,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
static const char * const cha_id_list[] = {
"",
"AES",
-@@ -236,6 +284,27 @@ static void report_deco_status(struct de
+@@ -236,6 +290,27 @@ static void report_deco_status(struct de
status, error, idx_str, idx, err_str, err_err_code);
}
@@ -12554,7 +14994,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
static void report_jr_status(struct device *jrdev, const u32 status,
const char *error)
{
-@@ -250,7 +319,7 @@ static void report_cond_code_status(stru
+@@ -250,7 +325,7 @@ static void report_cond_code_status(stru
status, error, __func__);
}
@@ -12563,7 +15003,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
{
static const struct stat_src {
void (*report_ssed)(struct device *jrdev, const u32 status,
-@@ -262,7 +331,7 @@ void caam_jr_strstatus(struct device *jr
+@@ -262,7 +337,7 @@ void caam_jr_strstatus(struct device *jr
{ report_ccb_status, "CCB" },
{ report_jump_status, "Jump" },
{ report_deco_status, "DECO" },
@@ -12572,7 +15012,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
{ report_jr_status, "Job Ring" },
{ report_cond_code_status, "Condition Code" },
{ NULL, NULL },
-@@ -288,4 +357,4 @@ void caam_jr_strstatus(struct device *jr
+@@ -288,4 +363,4 @@ void caam_jr_strstatus(struct device *jr
else
dev_err(jrdev, "%d: unknown error source\n", ssrc);
}
@@ -12595,17 +15035,140 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
int rowsize, int groupsize, struct scatterlist *sg,
--- a/drivers/crypto/caam/intern.h
+++ b/drivers/crypto/caam/intern.h
-@@ -84,6 +84,7 @@ struct caam_drv_private {
+@@ -65,10 +65,6 @@ struct caam_drv_private_jr {
+ * Driver-private storage for a single CAAM block instance
+ */
+ struct caam_drv_private {
+-#ifdef CONFIG_CAAM_QI
+- struct device *qidev;
+-#endif
+-
+ /* Physical-presence section */
+ struct caam_ctrl __iomem *ctrl; /* controller region */
+ struct caam_deco __iomem *deco; /* DECO/CCB views */
+@@ -76,14 +72,21 @@ struct caam_drv_private {
+ struct caam_queue_if __iomem *qi; /* QI control region */
+ struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
+
++ struct iommu_domain *domain;
++
+ /*
+ * Detected geometry block. Filled in from device tree if powerpc,
+ * or from register-based version detection code
+ */
+ u8 total_jobrs; /* Total Job Rings in device */
u8 qi_present; /* Nonzero if QI present in device */
++#ifdef CONFIG_CAAM_QI
++ u8 qi_init; /* Nonzero if QI has been initialized */
++#endif
++ u8 mc_en; /* Nonzero if MC f/w is active */
int secvio_irq; /* Security violation interrupt number */
int virt_en; /* Virtualization enabled in CAAM */
+ int era; /* CAAM Era (internal HW revision) */
#define RNG4_MAX_HANDLES 2
/* RNG4 block */
+@@ -108,8 +111,95 @@ struct caam_drv_private {
+ #endif
+ };
+
+-void caam_jr_algapi_init(struct device *dev);
+-void caam_jr_algapi_remove(struct device *dev);
++#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
++
++int caam_algapi_init(struct device *dev);
++void caam_algapi_exit(void);
++
++#else
++
++static inline int caam_algapi_init(struct device *dev)
++{
++ return 0;
++}
++
++static inline void caam_algapi_exit(void)
++{
++}
++
++#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
++
++#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
++
++int caam_algapi_hash_init(struct device *dev);
++void caam_algapi_hash_exit(void);
++
++#else
++
++static inline int caam_algapi_hash_init(struct device *dev)
++{
++ return 0;
++}
++
++static inline void caam_algapi_hash_exit(void)
++{
++}
++
++#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
++
++#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
++
++int caam_pkc_init(struct device *dev);
++void caam_pkc_exit(void);
++
++#else
++
++static inline int caam_pkc_init(struct device *dev)
++{
++ return 0;
++}
++
++static inline void caam_pkc_exit(void)
++{
++}
++
++#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
++
++#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
++
++int caam_rng_init(struct device *dev);
++void caam_rng_exit(void);
++
++#else
++
++static inline int caam_rng_init(struct device *dev)
++{
++ return 0;
++}
++
++static inline void caam_rng_exit(void)
++{
++}
++
++#endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
++
++#ifdef CONFIG_CAAM_QI
++
++int caam_qi_algapi_init(struct device *dev);
++void caam_qi_algapi_exit(void);
++
++#else
++
++static inline int caam_qi_algapi_init(struct device *dev)
++{
++ return 0;
++}
++
++static inline void caam_qi_algapi_exit(void)
++{
++}
++
++#endif /* CONFIG_CAAM_QI */
+
+ #ifdef CONFIG_DEBUG_FS
+ static int caam_debugfs_u64_get(void *data, u64 *val)
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
-@@ -23,6 +23,14 @@ struct jr_driver_data {
+@@ -23,6 +23,52 @@ struct jr_driver_data {
static struct jr_driver_data driver_data;
@@ -12617,10 +15180,58 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+}
+EXPORT_SYMBOL(caam_jr_driver_probed);
+
++static DEFINE_MUTEX(algs_lock);
++static unsigned int active_devs;
++
++static void register_algs(struct device *dev)
++{
++ mutex_lock(&algs_lock);
++
++ if (++active_devs != 1)
++ goto algs_unlock;
++
++ caam_algapi_init(dev);
++ caam_algapi_hash_init(dev);
++ caam_pkc_init(dev);
++ caam_rng_init(dev);
++ caam_qi_algapi_init(dev);
++
++algs_unlock:
++ mutex_unlock(&algs_lock);
++}
++
++static void unregister_algs(void)
++{
++ mutex_lock(&algs_lock);
++
++ if (--active_devs != 0)
++ goto algs_unlock;
++
++ caam_qi_algapi_exit();
++
++ caam_rng_exit();
++ caam_pkc_exit();
++ caam_algapi_hash_exit();
++ caam_algapi_exit();
++
++algs_unlock:
++ mutex_unlock(&algs_lock);
++}
++
static int caam_reset_hw_jr(struct device *dev)
{
struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
-@@ -119,6 +127,8 @@ static int caam_jr_remove(struct platfor
+@@ -108,6 +154,9 @@ static int caam_jr_remove(struct platfor
+ return -EBUSY;
+ }
+
++ /* Unregister JR-based RNG & crypto algorithms */
++ unregister_algs();
++
+ /* Remove the node from Physical JobR list maintained by driver */
+ spin_lock(&driver_data.jr_alloc_lock);
+ list_del(&jrpriv->list_node);
+@@ -119,6 +168,8 @@ static int caam_jr_remove(struct platfor
dev_err(jrdev, "Failed to shut down job ring\n");
irq_dispose_mapping(jrpriv->irq);
@@ -12629,7 +15240,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
return ret;
}
-@@ -282,6 +292,36 @@ struct device *caam_jr_alloc(void)
+@@ -282,6 +333,36 @@ struct device *caam_jr_alloc(void)
EXPORT_SYMBOL(caam_jr_alloc);
/**
@@ -12666,10 +15277,11 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
* caam_jr_free() - Free the Job Ring
* @rdev - points to the dev that identifies the Job ring to
* be released.
-@@ -539,6 +579,8 @@ static int caam_jr_probe(struct platform
+@@ -539,6 +620,9 @@ static int caam_jr_probe(struct platform
atomic_set(&jrpriv->tfm_count, 0);
++ register_algs(jrdev->parent);
+ jr_driver_probed++;
+
return 0;
@@ -12776,7 +15388,32 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#include "regs.h"
#include "qi.h"
-@@ -105,23 +105,21 @@ static struct kmem_cache *qi_cache;
+@@ -58,11 +58,9 @@ static DEFINE_PER_CPU(int, last_cpu);
+ /*
+ * caam_qi_priv - CAAM QI backend private params
+ * @cgr: QMan congestion group
+- * @qi_pdev: platform device for QI backend
+ */
+ struct caam_qi_priv {
+ struct qman_cgr cgr;
+- struct platform_device *qi_pdev;
+ };
+
+ static struct caam_qi_priv qipriv ____cacheline_aligned;
+@@ -102,26 +100,34 @@ static int mod_init_cpu;
+ */
+ static struct kmem_cache *qi_cache;
+
++static void *caam_iova_to_virt(struct iommu_domain *domain,
++ dma_addr_t iova_addr)
++{
++ phys_addr_t phys_addr;
++
++ phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
++
++ return phys_to_virt(phys_addr);
++}
++
int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
{
struct qm_fd fd;
@@ -12806,7 +15443,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
if (likely(!ret))
return 0;
-@@ -137,7 +135,7 @@ int caam_qi_enqueue(struct device *qidev
+@@ -137,20 +143,21 @@ int caam_qi_enqueue(struct device *qidev
EXPORT_SYMBOL(caam_qi_enqueue);
static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
@@ -12815,7 +15452,8 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
{
const struct qm_fd *fd;
struct caam_drv_req *drv_req;
-@@ -145,7 +143,7 @@ static void caam_fq_ern_cb(struct qman_p
+ struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
++ struct caam_drv_private *priv = dev_get_drvdata(qidev);
fd = &msg->ern.fd;
@@ -12824,7 +15462,13 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
dev_err(qidev, "Non-compound FD from CAAM\n");
return;
}
-@@ -180,20 +178,22 @@ static struct qman_fq *create_caam_req_f
+
+- drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
++ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
+ if (!drv_req) {
+ dev_err(qidev,
+ "Can't find original request for CAAM response\n");
+@@ -180,20 +187,22 @@ static struct qman_fq *create_caam_req_f
req_fq->cb.fqs = NULL;
ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
@@ -12856,7 +15500,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
opts.fqd.cgid = qipriv.cgr.cgrid;
ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
-@@ -207,7 +207,7 @@ static struct qman_fq *create_caam_req_f
+@@ -207,7 +216,7 @@ static struct qman_fq *create_caam_req_f
return req_fq;
init_req_fq_fail:
@@ -12865,7 +15509,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
create_req_fq_fail:
kfree(req_fq);
return ERR_PTR(ret);
-@@ -275,7 +275,7 @@ empty_fq:
+@@ -275,7 +284,7 @@ empty_fq:
if (ret)
dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
@@ -12874,7 +15518,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
kfree(fq);
return ret;
-@@ -292,7 +292,7 @@ static int empty_caam_fq(struct qman_fq
+@@ -292,7 +301,7 @@ static int empty_caam_fq(struct qman_fq
if (ret)
return ret;
@@ -12883,11 +15527,29 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
break;
msleep(20);
-@@ -572,22 +572,27 @@ static enum qman_cb_dqrr_result caam_rsp
+@@ -495,7 +504,7 @@ EXPORT_SYMBOL(caam_drv_ctx_rel);
+ int caam_qi_shutdown(struct device *qidev)
+ {
+ int i, ret;
+- struct caam_qi_priv *priv = dev_get_drvdata(qidev);
++ struct caam_qi_priv *priv = &qipriv;
+ const cpumask_t *cpus = qman_affine_cpus();
+ struct cpumask old_cpumask = current->cpus_allowed;
+
+@@ -528,7 +537,6 @@ int caam_qi_shutdown(struct device *qide
+ /* Now that we're done with the CGRs, restore the cpus allowed mask */
+ set_cpus_allowed_ptr(current, &old_cpumask);
+
+- platform_device_unregister(priv->qi_pdev);
+ return ret;
+ }
+
+@@ -572,22 +580,28 @@ static enum qman_cb_dqrr_result caam_rsp
struct caam_drv_req *drv_req;
const struct qm_fd *fd;
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
- u32 status;
++ struct caam_drv_private *priv = dev_get_drvdata(qidev);
if (caam_qi_napi_schedule(p, caam_napi))
return qman_cb_dqrr_stop;
@@ -12899,25 +15561,25 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
+ if (unlikely(fd->status)) {
+ u32 ssrc = fd->status & JRSTA_SSRC_MASK;
+ u8 err_id = fd->status & JRSTA_CCBERR_ERRID_MASK;
-+
+
+- if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
+ if (ssrc != JRSTA_SSRC_CCB_ERROR ||
+ err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+ dev_err(qidev, "Error: %#x in CAAM response FD\n",
+ fd->status);
+ }
-
-- if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
++
+ if (unlikely(fd->format != qm_fd_compound)) {
dev_err(qidev, "Non-compound FD from CAAM\n");
return qman_cb_dqrr_consume;
}
- drv_req = (struct caam_drv_req *)phys_to_virt(qm_fd_addr_get64(fd));
-+ drv_req = (struct caam_drv_req *)phys_to_virt(fd->addr);
++ drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
if (unlikely(!drv_req)) {
dev_err(qidev,
"Can't find original request for caam response\n");
-@@ -597,7 +602,7 @@ static enum qman_cb_dqrr_result caam_rsp
+@@ -597,7 +611,7 @@ static enum qman_cb_dqrr_result caam_rsp
dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
@@ -12926,7 +15588,7 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
return qman_cb_dqrr_consume;
}
-@@ -621,17 +626,18 @@ static int alloc_rsp_fq_cpu(struct devic
+@@ -621,17 +635,18 @@ static int alloc_rsp_fq_cpu(struct devic
return -ENODEV;
}
@@ -12953,7 +15615,19 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
if (ret) {
-@@ -662,8 +668,7 @@ static int init_cgr(struct device *qidev
+@@ -650,9 +665,8 @@ static int init_cgr(struct device *qidev
+ {
+ int ret;
+ struct qm_mcc_initcgr opts;
+- const u64 cpus = *(u64 *)qman_affine_cpus();
+- const int num_cpus = hweight64(cpus);
+- const u64 val = num_cpus * MAX_RSP_FQ_BACKLOG_PER_CPU;
++ const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
++ MAX_RSP_FQ_BACKLOG_PER_CPU;
+
+ ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
+ if (ret) {
+@@ -662,8 +676,7 @@ static int init_cgr(struct device *qidev
qipriv.cgr.cb = cgr_cb;
memset(&opts, 0, sizeof(opts));
@@ -12963,6 +15637,81 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
opts.cgr.cscn_en = QM_CGR_EN;
opts.cgr.mode = QMAN_CGR_MODE_FRAME;
qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
+@@ -708,15 +721,10 @@ static void free_rsp_fqs(void)
+ int caam_qi_init(struct platform_device *caam_pdev)
+ {
+ int err, i;
+- struct platform_device *qi_pdev;
+ struct device *ctrldev = &caam_pdev->dev, *qidev;
+ struct caam_drv_private *ctrlpriv;
+ const cpumask_t *cpus = qman_affine_cpus();
+ struct cpumask old_cpumask = current->cpus_allowed;
+- static struct platform_device_info qi_pdev_info = {
+- .name = "caam_qi",
+- .id = PLATFORM_DEVID_NONE
+- };
+
+ /*
+ * QMAN requires CGRs to be removed from same CPU+portal from where it
+@@ -728,24 +736,13 @@ int caam_qi_init(struct platform_device
+ mod_init_cpu = cpumask_first(cpus);
+ set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
+
+- qi_pdev_info.parent = ctrldev;
+- qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
+- qi_pdev = platform_device_register_full(&qi_pdev_info);
+- if (IS_ERR(qi_pdev))
+- return PTR_ERR(qi_pdev);
+- set_dma_ops(&qi_pdev->dev, get_dma_ops(ctrldev));
+-
+ ctrlpriv = dev_get_drvdata(ctrldev);
+- qidev = &qi_pdev->dev;
+-
+- qipriv.qi_pdev = qi_pdev;
+- dev_set_drvdata(qidev, &qipriv);
++ qidev = ctrldev;
+
+ /* Initialize the congestion detection */
+ err = init_cgr(qidev);
+ if (err) {
+ dev_err(qidev, "CGR initialization failed: %d\n", err);
+- platform_device_unregister(qi_pdev);
+ return err;
+ }
+
+@@ -754,7 +751,6 @@ int caam_qi_init(struct platform_device
+ if (err) {
+ dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
+ free_rsp_fqs();
+- platform_device_unregister(qi_pdev);
+ return err;
+ }
+
+@@ -777,15 +773,11 @@ int caam_qi_init(struct platform_device
+ napi_enable(irqtask);
+ }
+
+- /* Hook up QI device to parent controlling caam device */
+- ctrlpriv->qidev = qidev;
+-
+ qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
+ SLAB_CACHE_DMA, NULL);
+ if (!qi_cache) {
+ dev_err(qidev, "Can't allocate CAAM cache\n");
+ free_rsp_fqs();
+- platform_device_unregister(qi_pdev);
+ return -ENOMEM;
+ }
+
+@@ -795,6 +787,8 @@ int caam_qi_init(struct platform_device
+ debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
+ &times_congested, &caam_fops_u64_ro);
+ #endif
++
++ ctrlpriv->qi_init = 1;
+ dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
+ return 0;
+ }
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -9,7 +9,7 @@
@@ -12976,7 +15725,128 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#include "desc_constr.h"
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
-@@ -627,6 +627,8 @@ struct caam_job_ring {
+@@ -3,6 +3,7 @@
+ * CAAM hardware register-level view
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ * Copyright 2018 NXP
+ */
+
+ #ifndef REGS_H
+@@ -211,6 +212,47 @@ struct jr_outentry {
+ u32 jrstatus; /* Status for completed descriptor */
+ } __packed;
+
++/* Version registers (Era 10+) e80-eff */
++struct version_regs {
++ u32 crca; /* CRCA_VERSION */
++ u32 afha; /* AFHA_VERSION */
++ u32 kfha; /* KFHA_VERSION */
++ u32 pkha; /* PKHA_VERSION */
++ u32 aesa; /* AESA_VERSION */
++ u32 mdha; /* MDHA_VERSION */
++ u32 desa; /* DESA_VERSION */
++ u32 snw8a; /* SNW8A_VERSION */
++ u32 snw9a; /* SNW9A_VERSION */
++ u32 zuce; /* ZUCE_VERSION */
++ u32 zuca; /* ZUCA_VERSION */
++ u32 ccha; /* CCHA_VERSION */
++ u32 ptha; /* PTHA_VERSION */
++ u32 rng; /* RNG_VERSION */
++ u32 trng; /* TRNG_VERSION */
++ u32 aaha; /* AAHA_VERSION */
++ u32 rsvd[10];
++ u32 sr; /* SR_VERSION */
++ u32 dma; /* DMA_VERSION */
++ u32 ai; /* AI_VERSION */
++ u32 qi; /* QI_VERSION */
++ u32 jr; /* JR_VERSION */
++ u32 deco; /* DECO_VERSION */
++};
++
++/* Version registers bitfields */
++
++/* Number of CHAs instantiated */
++#define CHA_VER_NUM_MASK 0xffull
++/* CHA Miscellaneous Information */
++#define CHA_VER_MISC_SHIFT 8
++#define CHA_VER_MISC_MASK (0xffull << CHA_VER_MISC_SHIFT)
++/* CHA Revision Number */
++#define CHA_VER_REV_SHIFT 16
++#define CHA_VER_REV_MASK (0xffull << CHA_VER_REV_SHIFT)
++/* CHA Version ID */
++#define CHA_VER_VID_SHIFT 24
++#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
++
+ /*
+ * caam_perfmon - Performance Monitor/Secure Memory Status/
+ * CAAM Global Status/Component Version IDs
+@@ -223,15 +265,13 @@ struct jr_outentry {
+ #define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
+
+ /*
+- * CHA version IDs / instantiation bitfields
++ * CHA version IDs / instantiation bitfields (< Era 10)
+ * Defined for use with the cha_id fields in perfmon, but the same shift/mask
+ * selectors can be used to pull out the number of instantiated blocks within
+ * cha_num fields in perfmon because the locations are the same.
+ */
+ #define CHA_ID_LS_AES_SHIFT 0
+ #define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
+-#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT)
+-#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT)
+
+ #define CHA_ID_LS_DES_SHIFT 4
+ #define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
+@@ -241,9 +281,6 @@ struct jr_outentry {
+
+ #define CHA_ID_LS_MD_SHIFT 12
+ #define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
+-#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT)
+-#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT)
+-#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT)
+
+ #define CHA_ID_LS_RNG_SHIFT 16
+ #define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
+@@ -269,6 +306,13 @@ struct jr_outentry {
+ #define CHA_ID_MS_JR_SHIFT 28
+ #define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
+
++/* Specific CHA version IDs */
++#define CHA_VER_VID_AES_LP 0x3ull
++#define CHA_VER_VID_AES_HP 0x4ull
++#define CHA_VER_VID_MD_LP256 0x0ull
++#define CHA_VER_VID_MD_LP512 0x1ull
++#define CHA_VER_VID_MD_HP 0x2ull
++
+ struct sec_vid {
+ u16 ip_id;
+ u8 maj_rev;
+@@ -473,8 +517,10 @@ struct caam_ctrl {
+ struct rng4tst r4tst[2];
+ };
+
+- u32 rsvd9[448];
++ u32 rsvd9[416];
+
++ /* Version registers - introduced with era 10 e80-eff */
++ struct version_regs vreg;
+ /* Performance Monitor f00-fff */
+ struct caam_perfmon perfmon;
+ };
+@@ -564,8 +610,10 @@ struct caam_job_ring {
+ u32 rsvd11;
+ u32 jrcommand; /* JRCRx - JobR command */
+
+- u32 rsvd12[932];
++ u32 rsvd12[900];
+
++ /* Version registers - introduced with era 10 e80-eff */
++ struct version_regs vreg;
+ /* Performance Monitor f00-fff */
+ struct caam_perfmon perfmon;
+ };
+@@ -627,6 +675,8 @@ struct caam_job_ring {
#define JRSTA_DECOERR_INVSIGN 0x86
#define JRSTA_DECOERR_DSASIGN 0x87
@@ -12985,6 +15855,28 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
#define JRSTA_CCBERR_JUMP 0x08000000
#define JRSTA_CCBERR_INDEX_MASK 0xff00
#define JRSTA_CCBERR_INDEX_SHIFT 8
+@@ -870,13 +920,19 @@ struct caam_deco {
+ u32 rsvd29[48];
+ u32 descbuf[64]; /* DxDESB - Descriptor buffer */
+ u32 rscvd30[193];
+-#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
+ #define DESC_DBG_DECO_STAT_VALID 0x80000000
+ #define DESC_DBG_DECO_STAT_MASK 0x00F00000
++#define DESC_DBG_DECO_STAT_SHIFT 20
+ u32 desc_dbg; /* DxDDR - DECO Debug Register */
+- u32 rsvd31[126];
++ u32 rsvd31[13];
++#define DESC_DER_DECO_STAT_MASK 0x000F0000
++#define DESC_DER_DECO_STAT_SHIFT 16
++ u32 dbg_exec; /* DxDER - DECO Debug Exec Register */
++ u32 rsvd32[112];
+ };
+
++#define DECO_STAT_HOST_ERR 0xD
++
+ #define DECO_JQCR_WHL 0x20000000
+ #define DECO_JQCR_FOUR 0x10000000
+
--- a/drivers/crypto/caam/sg_sw_qm.h
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -34,46 +34,61 @@
@@ -13091,3 +15983,13 @@ Signed-off-by: Biwen Li <biwen.li@nxp.com>
if (ret > 1) {
tbl_off += ret;
sync_needed = true;
+--- a/include/crypto/chacha20.h
++++ b/include/crypto/chacha20.h
+@@ -13,6 +13,7 @@
+ #define CHACHA20_IV_SIZE 16
+ #define CHACHA20_KEY_SIZE 32
+ #define CHACHA20_BLOCK_SIZE 64
++#define CHACHAPOLY_IV_SIZE 12
+
+ struct chacha20_ctx {
+ u32 key[8];