aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/layerscape
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/layerscape')
-rw-r--r--target/linux/layerscape/patches-4.14/702-dpaa2-ethernet-support-layerscape.patch147
-rw-r--r--target/linux/layerscape/patches-4.14/807-usb-support-layerscape.patch6
-rw-r--r--target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch1166
3 files changed, 300 insertions, 1019 deletions
diff --git a/target/linux/layerscape/patches-4.14/702-dpaa2-ethernet-support-layerscape.patch b/target/linux/layerscape/patches-4.14/702-dpaa2-ethernet-support-layerscape.patch
index 9ccacba3ae..1656ddf3e8 100644
--- a/target/linux/layerscape/patches-4.14/702-dpaa2-ethernet-support-layerscape.patch
+++ b/target/linux/layerscape/patches-4.14/702-dpaa2-ethernet-support-layerscape.patch
@@ -3166,7 +3166,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
err = dpni_enable(priv->mc_io, 0, priv->mc_token);
if (err < 0) {
netdev_err(net_dev, "dpni_enable() failed\n");
-@@ -1047,51 +1355,20 @@ static int dpaa2_eth_open(struct net_dev
+@@ -1047,48 +1355,17 @@ static int dpaa2_eth_open(struct net_dev
link_state_err:
enable_err:
@@ -3181,8 +3181,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
- */
-static u32 drain_channel(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch)
-+static int dpaa2_eth_stop(struct net_device *net_dev)
- {
+-{
- u32 drained = 0, total = 0;
-
- do {
@@ -3193,11 +3192,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
-
- return total;
-}
-+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-+ int dpni_enabled = 0;
-+ int retries = 10, i;
-+ int err = 0;
-
+-
-static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
-{
- struct dpaa2_eth_channel *ch;
@@ -3212,20 +3207,18 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
- return drained;
-}
-
--static int dpaa2_eth_stop(struct net_device *net_dev)
--{
-- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ static int dpaa2_eth_stop(struct net_device *net_dev)
+ {
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- int dpni_enabled;
- int retries = 10;
- u32 drained;
--
-- netif_tx_stop_all_queues(net_dev);
-- netif_carrier_off(net_dev);
-+ netif_tx_stop_all_queues(net_dev);
-+ netif_carrier_off(net_dev);
++ int dpni_enabled = 0;
++ int retries = 10, i;
++ int err = 0;
- /* Loop while dpni_disable() attempts to drain the egress FQs
- * and confirm them back to us.
+ netif_tx_stop_all_queues(net_dev);
+ netif_carrier_off(net_dev);
@@ -1105,56 +1382,24 @@ static int dpaa2_eth_stop(struct net_dev
} while (dpni_enabled && --retries);
if (!retries) {
@@ -4496,7 +4489,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
dev_err(dev, "error adding key extraction rule, too many rules?\n");
-@@ -2020,49 +2966,107 @@ static int dpaa2_eth_set_hash(struct net
+@@ -2020,12 +2966,10 @@ static int dpaa2_eth_set_hash(struct net
}
key->type = DPKG_EXTRACT_FROM_HDR;
@@ -4506,27 +4499,37 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
- key->extract.from_hdr.field = hash_fields[i].cls_field;
+ key->extract.from_hdr.field = dist_fields[i].cls_field;
cls_cfg.num_extracts++;
-+ }
-+
-+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
-+ if (!dma_mem)
-+ return -ENOMEM;
-+
-+ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
-+ if (err) {
-+ dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
+-
+- priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
+ }
+
+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
+@@ -2035,36 +2979,96 @@ static int dpaa2_eth_set_hash(struct net
+ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
+ if (err) {
+ dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
+- goto err_prep_key;
+ goto free_key;
-+ }
-+
-+ /* Prepare for setting the rx dist */
+ }
+
+- memset(&dist_cfg, 0, sizeof(dist_cfg));
+-
+ /* Prepare for setting the rx dist */
+- dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
+- DPAA2_CLASSIFIER_DMA_SIZE,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
+ key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, key_iova)) {
-+ dev_err(dev, "DMA mapping failed\n");
-+ err = -ENOMEM;
+ dev_err(dev, "DMA mapping failed\n");
+ err = -ENOMEM;
+- goto err_dma_map;
+ goto free_key;
-+ }
-+
+ }
+
+- dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
+- dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+ if (type == DPAA2_ETH_RX_DIST_HASH) {
+ if (dpaa2_eth_has_legacy_dist(priv))
+ err = config_legacy_hash_key(priv, key_iova);
@@ -4535,17 +4538,24 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+ } else {
+ err = config_cls_key(priv, key_iova);
+ }
-+
+
+- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
+- dma_unmap_single(dev, dist_cfg.key_cfg_iova,
+- DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
+- if (err)
+- dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
+ dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
+ DMA_TO_DEVICE);
+ if (!err && type == DPAA2_ETH_RX_DIST_HASH)
+ priv->rx_hash_fields = rx_hash_fields;
-+
+
+-err_dma_map:
+-err_prep_key:
+free_key:
-+ kfree(dma_mem);
-+ return err;
-+}
-+
+ kfree(dma_mem);
+ return err;
+ }
+
+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
@@ -4571,70 +4581,43 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ int err;
-
-- priv->rx_hash_fields |= hash_fields[i].rxnfc_field;
++
+ /* Check if we actually support Rx flow classification */
+ if (dpaa2_eth_has_legacy_dist(priv)) {
+ dev_dbg(dev, "Rx cls not supported by current MC version\n");
+ return -EOPNOTSUPP;
- }
-
-- dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
-- if (!dma_mem)
-- return -ENOMEM;
--
-- err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
-- if (err) {
-- dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
-- goto err_prep_key;
++ }
++
+ if (!dpaa2_eth_fs_enabled(priv)) {
+ dev_dbg(dev, "Rx cls disabled in DPNI options\n");
+ return -EOPNOTSUPP;
- }
-
-- memset(&dist_cfg, 0, sizeof(dist_cfg));
--
-- /* Prepare for setting the rx dist */
-- dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
-- DPAA2_CLASSIFIER_DMA_SIZE,
-- DMA_TO_DEVICE);
-- if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) {
-- dev_err(dev, "DMA mapping failed\n");
-- err = -ENOMEM;
-- goto err_dma_map;
++ }
++
+ if (!dpaa2_eth_hash_enabled(priv)) {
+ dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
+ return -EOPNOTSUPP;
- }
-
-- dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
-- dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
++ }
++
+ /* If there is no support for masking in the classification table,
+ * we don't set a default key, as it will depend on the rules
+ * added by the user at runtime.
+ */
+ if (!dpaa2_eth_fs_mask_enabled(priv))
+ goto out;
-
-- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
-- dma_unmap_single(dev, dist_cfg.key_cfg_iova,
-- DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE);
++
+ err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
- if (err)
-- dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err);
++ if (err)
+ return err;
-
--err_dma_map:
--err_prep_key:
-- kfree(dma_mem);
-- return err;
++
+out:
+ priv->rx_cls_enabled = 1;
+
+ return 0;
- }
-
++}
++
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
+ * frame queues and channels
+ */
@@ -2080,6 +3084,7 @@ static int bind_dpni(struct dpaa2_eth_pr
pools_params.num_dpbp = 1;
pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
diff --git a/target/linux/layerscape/patches-4.14/807-usb-support-layerscape.patch b/target/linux/layerscape/patches-4.14/807-usb-support-layerscape.patch
index ea39a1433c..988afe10ca 100644
--- a/target/linux/layerscape/patches-4.14/807-usb-support-layerscape.patch
+++ b/target/linux/layerscape/patches-4.14/807-usb-support-layerscape.patch
@@ -1312,7 +1312,7 @@ Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com>
xhci->quirks |= XHCI_BROKEN_PORT_PED;
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
-@@ -1972,10 +1972,12 @@ static int finish_td(struct xhci_hcd *xh
+@@ -1976,10 +1976,12 @@ static int finish_td(struct xhci_hcd *xh
union xhci_trb *ep_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
@@ -1325,7 +1325,7 @@ Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com>
u32 trb_comp_code;
int ep_index;
-@@ -1998,14 +2000,30 @@ static int finish_td(struct xhci_hcd *xh
+@@ -2002,14 +2004,30 @@ static int finish_td(struct xhci_hcd *xh
if (trb_comp_code == COMP_STALL_ERROR ||
xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
trb_comp_code)) {
@@ -1363,7 +1363,7 @@ Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com>
while (ep_ring->dequeue != td->last_trb)
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
-@@ -1597,13 +1597,38 @@ static int xhci_urb_dequeue(struct usb_h
+@@ -1595,13 +1595,38 @@ static int xhci_urb_dequeue(struct usb_h
ret = -ENOMEM;
goto done;
}
diff --git a/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch b/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch
index 0d26aca797..95e6894b93 100644
--- a/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch
+++ b/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch
@@ -3839,17 +3839,12 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
-@@ -258,55 +284,139 @@ badkey:
+@@ -258,6 +284,468 @@ badkey:
return -EINVAL;
}
--static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-- const u8 *key, unsigned int keylen)
+static int tls_set_sh_desc(struct crypto_aead *tls)
- {
-- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
-- const char *alg_name = crypto_tfm_alg_name(tfm);
++{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ unsigned int ivsize = crypto_aead_ivsize(tls);
+ unsigned int blocksize = crypto_aead_blocksize(tls);
@@ -3919,45 +3914,26 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
- struct device *jrdev = ctx->jrdev;
-- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-- u32 ctx1_iv_off = 0;
-- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
-- OP_ALG_AAI_CTR_MOD128);
-- const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
++ struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+ struct crypto_authenc_keys keys;
- int ret = 0;
-
-- memcpy(ctx->key, key, keylen);
++ int ret = 0;
++
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
- #ifdef DEBUG
++#ifdef DEBUG
+ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- #endif
-- /*
-- * AES-CTR needs to load IV in CONTEXT1 reg
-- * at an offset of 128bits (16bytes)
-- * CONTEXT1[255:128] = IV
-- */
-- if (ctr_mode)
-- ctx1_iv_off = 16;
-
- /*
-- * RFC3686 specific:
-- * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
-- * | *key = {KEY, NONCE}
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
- */
-- if (is_rfc3686) {
-- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
-- keylen -= CTR_RFC3686_NONCE_SIZE;
++ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
@@ -3973,25 +3949,14 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
- }
-
-- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-- ctx->cdata.keylen = keylen;
-- ctx->cdata.key_virt = ctx->key;
-- ctx->cdata.key_inline = true;
++ }
++
+ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
+ if (ret)
+ goto badkey;
-
-- /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
-- cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
-- is_rfc3686, ctx1_iv_off);
-- cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
-- is_rfc3686, ctx1_iv_off);
-- cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
-- ivsize, is_rfc3686, ctx1_iv_off);
++
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
@@ -4011,44 +3976,39 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ ret = tls_set_sh_desc(tls);
+ if (ret)
+ goto badkey;
-
- /* Now update the driver contexts with the new shared descriptor */
- if (ctx->drv_ctx[ENCRYPT]) {
-@@ -327,42 +437,84 @@ static int ablkcipher_setkey(struct cryp
- }
- }
-
-- if (ctx->drv_ctx[GIVENCRYPT]) {
-- ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
-- ctx->sh_desc_givenc);
-- if (ret) {
-- dev_err(jrdev, "driver givenc context update failed\n");
-- goto badkey;
-- }
-- }
--
- return ret;
- badkey:
-- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ goto badkey;
++ }
++ }
++
++ return ret;
++badkey:
+ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
--static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-- const u8 *key, unsigned int keylen)
++ return -EINVAL;
++}
++
+static int gcm_set_sh_desc(struct crypto_aead *aead)
- {
-- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-- struct device *jrdev = ctx->jrdev;
-- int ret = 0;
++{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
-
-- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
-- dev_err(jrdev, "key size mismatch\n");
-- goto badkey;
++
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
@@ -4077,8 +4037,8 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
- }
-
++ }
++
+ cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
@@ -4107,129 +4067,62 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
- memcpy(ctx->key, key, keylen);
-- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
- ctx->cdata.keylen = keylen;
-- ctx->cdata.key_virt = ctx->key;
-- ctx->cdata.key_inline = true;
-
-- /* xts ablkcipher encrypt, decrypt shared descriptors */
-- cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
-- cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
++ ctx->cdata.keylen = keylen;
++
+ ret = gcm_set_sh_desc(aead);
+ if (ret)
+ return ret;
-
- /* Now update the driver contexts with the new shared descriptor */
- if (ctx->drv_ctx[ENCRYPT]) {
-@@ -370,7 +522,7 @@ static int xts_ablkcipher_setkey(struct
- ctx->sh_desc_enc);
- if (ret) {
- dev_err(jrdev, "driver enc context update failed\n");
-- goto badkey;
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
+ return ret;
- }
- }
-
-@@ -379,151 +531,829 @@ static int xts_ablkcipher_setkey(struct
- ctx->sh_desc_dec);
- if (ret) {
- dev_err(jrdev, "driver dec context update failed\n");
-- goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
+ return ret;
- }
- }
-
-- return ret;
--badkey:
-- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-- return -EINVAL;
++ }
++ }
++
+ return 0;
- }
-
--/*
-- * aead_edesc - s/w-extended aead descriptor
-- * @src_nents: number of segments in input scatterlist
-- * @dst_nents: number of segments in output scatterlist
-- * @iv_dma: dma address of iv for checking continuity and link table
-- * @qm_sg_bytes: length of dma mapped h/w link table
-- * @qm_sg_dma: bus physical mapped address of h/w link table
-- * @assoclen: associated data length, in CAAM endianness
-- * @assoclen_dma: bus physical mapped address of req->assoclen
-- * @drv_req: driver-specific request structure
-- * @sgt: the h/w link table, followed by IV
-- */
--struct aead_edesc {
-- int src_nents;
-- int dst_nents;
-- dma_addr_t iv_dma;
-- int qm_sg_bytes;
-- dma_addr_t qm_sg_dma;
-- unsigned int assoclen;
-- dma_addr_t assoclen_dma;
-- struct caam_drv_req drv_req;
-- struct qm_sg_entry sgt[0];
--};
++}
++
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
-
--/*
-- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
-- * @src_nents: number of segments in input scatterlist
-- * @dst_nents: number of segments in output scatterlist
-- * @iv_dma: dma address of iv for checking continuity and link table
-- * @qm_sg_bytes: length of dma mapped h/w link table
-- * @qm_sg_dma: bus physical mapped address of h/w link table
-- * @drv_req: driver-specific request structure
-- * @sgt: the h/w link table, followed by IV
-- */
--struct ablkcipher_edesc {
-- int src_nents;
-- int dst_nents;
-- dma_addr_t iv_dma;
-- int qm_sg_bytes;
-- dma_addr_t qm_sg_dma;
-- struct caam_drv_req drv_req;
-- struct qm_sg_entry sgt[0];
--};
++
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
-
--static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
-- enum optype type)
--{
- /*
-- * This function is called on the fast path with values of 'type'
-- * known at compile time. Invalid arguments are not expected and
-- * thus no checks are made.
++
++ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
- */
-- struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
-- u32 *desc;
++ */
+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
-
-- if (unlikely(!drv_ctx)) {
-- spin_lock(&ctx->lock);
++
+ cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ ctx->authsize, true);
-
-- /* Read again to check if some other core init drv_ctx */
-- drv_ctx = ctx->drv_ctx[type];
-- if (!drv_ctx) {
-- int cpu;
++
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
@@ -4240,13 +4133,7 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
-
-- if (type == ENCRYPT)
-- desc = ctx->sh_desc_enc;
-- else if (type == DECRYPT)
-- desc = ctx->sh_desc_dec;
-- else /* (type == GIVENCRYPT) */
-- desc = ctx->sh_desc_givenc;
++
+ cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
@@ -4418,162 +4305,13 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ return 0;
+}
+
-+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-+ const u8 *key, unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
-+ const char *alg_name = crypto_tfm_alg_name(tfm);
-+ struct device *jrdev = ctx->jrdev;
-+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-+ u32 ctx1_iv_off = 0;
-+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
-+ OP_ALG_AAI_CTR_MOD128);
-+ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
-+ int ret = 0;
-+
-+ memcpy(ctx->key, key, keylen);
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+ /*
-+ * AES-CTR needs to load IV in CONTEXT1 reg
-+ * at an offset of 128bits (16bytes)
-+ * CONTEXT1[255:128] = IV
-+ */
-+ if (ctr_mode)
-+ ctx1_iv_off = 16;
-+
-+ /*
-+ * RFC3686 specific:
-+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
-+ * | *key = {KEY, NONCE}
-+ */
-+ if (is_rfc3686) {
-+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
-+ keylen -= CTR_RFC3686_NONCE_SIZE;
-+ }
-+
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-+ ctx->cdata.keylen = keylen;
-+ ctx->cdata.key_virt = ctx->key;
-+ ctx->cdata.key_inline = true;
-+
-+ /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
-+ cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
-+ is_rfc3686, ctx1_iv_off);
-+ cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
-+ is_rfc3686, ctx1_iv_off);
-+ cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
-+ ivsize, is_rfc3686, ctx1_iv_off);
-+
-+ /* Now update the driver contexts with the new shared descriptor */
-+ if (ctx->drv_ctx[ENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+ ctx->sh_desc_enc);
-+ if (ret) {
-+ dev_err(jrdev, "driver enc context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[DECRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
-+ ctx->sh_desc_dec);
-+ if (ret) {
-+ dev_err(jrdev, "driver dec context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[GIVENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
-+ ctx->sh_desc_givenc);
-+ if (ret) {
-+ dev_err(jrdev, "driver givenc context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ return ret;
-+badkey:
-+ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
-+}
-+
-+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-+ const u8 *key, unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-+ struct device *jrdev = ctx->jrdev;
-+ int ret = 0;
-+
-+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
-+ dev_err(jrdev, "key size mismatch\n");
-+ goto badkey;
-+ }
-+
-+ memcpy(ctx->key, key, keylen);
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-+ ctx->cdata.keylen = keylen;
-+ ctx->cdata.key_virt = ctx->key;
-+ ctx->cdata.key_inline = true;
-+
-+ /* xts ablkcipher encrypt, decrypt shared descriptors */
-+ cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
-+ cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
-+
-+ /* Now update the driver contexts with the new shared descriptor */
-+ if (ctx->drv_ctx[ENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+ ctx->sh_desc_enc);
-+ if (ret) {
-+ dev_err(jrdev, "driver enc context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[DECRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
-+ ctx->sh_desc_dec);
-+ if (ret) {
-+ dev_err(jrdev, "driver dec context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ return ret;
-+badkey:
-+ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
-+}
-+
-+/*
-+ * aead_edesc - s/w-extended aead descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @assoclen: associated data length, in CAAM endianness
-+ * @assoclen_dma: bus physical mapped address of req->assoclen
-+ * @drv_req: driver-specific request structure
-+ * @sgt: the h/w link table, followed by IV
-+ */
-+struct aead_edesc {
-+ int src_nents;
-+ int dst_nents;
-+ dma_addr_t iv_dma;
-+ int qm_sg_bytes;
-+ dma_addr_t qm_sg_dma;
-+ unsigned int assoclen;
-+ dma_addr_t assoclen_dma;
-+ struct caam_drv_req drv_req;
-+ struct qm_sg_entry sgt[0];
-+};
-+
-+/*
+ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+ {
+@@ -414,6 +902,29 @@ struct aead_edesc {
+ };
+
+ /*
+ * tls_edesc - s/w-extended tls descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
@@ -4597,100 +4335,13 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+};
+
+/*
-+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @drv_req: driver-specific request structure
-+ * @sgt: the h/w link table, followed by IV
-+ */
-+struct ablkcipher_edesc {
-+ int src_nents;
-+ int dst_nents;
-+ dma_addr_t iv_dma;
-+ int qm_sg_bytes;
-+ dma_addr_t qm_sg_dma;
-+ struct caam_drv_req drv_req;
-+ struct qm_sg_entry sgt[0];
-+};
-+
-+static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
-+ enum optype type)
-+{
-+ /*
-+ * This function is called on the fast path with values of 'type'
-+ * known at compile time. Invalid arguments are not expected and
-+ * thus no checks are made.
-+ */
-+ struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
-+ u32 *desc;
-+
-+ if (unlikely(!drv_ctx)) {
-+ spin_lock(&ctx->lock);
-+
-+ /* Read again to check if some other core init drv_ctx */
-+ drv_ctx = ctx->drv_ctx[type];
-+ if (!drv_ctx) {
-+ int cpu;
-+
-+ if (type == ENCRYPT)
-+ desc = ctx->sh_desc_enc;
-+ else if (type == DECRYPT)
-+ desc = ctx->sh_desc_dec;
-+ else /* (type == GIVENCRYPT) */
-+ desc = ctx->sh_desc_givenc;
-+
-+ cpu = smp_processor_id();
-+ drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
-+ if (likely(!IS_ERR_OR_NULL(drv_ctx)))
-+ drv_ctx->op_type = type;
-+
-+ ctx->drv_ctx[type] = drv_ctx;
-+ }
-+
-+ spin_unlock(&ctx->lock);
-+ }
-+
-+ return drv_ctx;
-+}
-+
-+static void caam_unmap(struct device *dev, struct scatterlist *src,
-+ struct scatterlist *dst, int src_nents,
-+ int dst_nents, dma_addr_t iv_dma, int ivsize,
-+ enum optype op_type, dma_addr_t qm_sg_dma,
-+ int qm_sg_bytes)
-+{
-+ if (dst != src) {
-+ if (src_nents)
-+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
-+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
-+ } else {
-+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
-+ }
-+
-+ if (iv_dma)
-+ dma_unmap_single(dev, iv_dma, ivsize,
-+ op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
-+ DMA_TO_DEVICE);
-+ if (qm_sg_bytes)
-+ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
-+}
-+
-+static void aead_unmap(struct device *dev,
-+ struct aead_edesc *edesc,
-+ struct aead_request *req)
-+{
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ int ivsize = crypto_aead_ivsize(aead);
-+
-+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-+ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
-+ edesc->qm_sg_dma, edesc->qm_sg_bytes);
-+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
-+}
-+
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+@@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+ }
+
+static void tls_unmap(struct device *dev,
+ struct tls_edesc *edesc,
+ struct aead_request *req)
@@ -4704,22 +4355,80 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ edesc->qm_sg_bytes);
+}
+
-+static void ablkcipher_unmap(struct device *dev,
-+ struct ablkcipher_edesc *edesc,
-+ struct ablkcipher_request *req)
+ static void ablkcipher_unmap(struct device *dev,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req)
+@@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re
+ qidev = caam_ctx->qidev;
+
+ if (unlikely(status)) {
++ u32 ssrc = status & JRSTA_SSRC_MASK;
++ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
++
+ caam_jr_strstatus(qidev, status);
+- ecode = -EIO;
++ /*
++ * verify hw auth check passed else return -EBADMSG
++ */
++ if (ssrc == JRSTA_SSRC_CCB_ERROR &&
++ err_id == JRSTA_CCBERR_ERRID_ICVCHK)
++ ecode = -EBADMSG;
++ else
++ ecode = -EIO;
+ }
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
+@@ -647,9 +1181,24 @@ static struct aead_edesc *aead_edesc_all
+ /*
+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+ * Input is not contiguous.
++ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
++ * the end of the table by allocating more S/G entries. Logic:
++ * if (src != dst && output S/G)
++ * pad output S/G, if needed
++ * else if (src == dst && S/G)
++ * overlapping S/Gs; pad one of them
++ * else if (input S/G) ...
++ * pad input S/G, if needed
+ */
+- qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
+- (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
++ qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
++ if (mapped_dst_nents > 1)
++ qm_sg_ents += ALIGN(mapped_dst_nents, 4);
++ else if ((req->src == req->dst) && (mapped_src_nents > 1))
++ qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
++ 1 + !!ivsize + ALIGN(mapped_src_nents, 4));
++ else
++ qm_sg_ents = ALIGN(qm_sg_ents, 4);
++
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+@@ -785,6 +1334,260 @@ static int aead_decrypt(struct aead_requ
+ return aead_crypt(req, false);
+ }
+
++static int ipsec_gcm_encrypt(struct aead_request *req)
+{
-+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ if (req->assoclen < 8)
++ return -EINVAL;
+
-+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-+ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
-+ edesc->qm_sg_dma, edesc->qm_sg_bytes);
++ return aead_crypt(req, true);
+}
+
-+static void aead_done(struct caam_drv_req *drv_req, u32 status)
++static int ipsec_gcm_decrypt(struct aead_request *req)
++{
++ if (req->assoclen < 8)
++ return -EINVAL;
++
++ return aead_crypt(req, false);
++}
++
++static void tls_done(struct caam_drv_req *drv_req, u32 status)
+{
+ struct device *qidev;
-+ struct aead_edesc *edesc;
++ struct tls_edesc *edesc;
+ struct aead_request *aead_req = drv_req->app_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
@@ -4728,56 +4437,56 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ qidev = caam_ctx->qidev;
+
+ if (unlikely(status)) {
-+ u32 ssrc = status & JRSTA_SSRC_MASK;
-+ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
-+
+ caam_jr_strstatus(qidev, status);
-+ /*
-+ * verify hw auth check passed else return -EBADMSG
-+ */
-+ if (ssrc == JRSTA_SSRC_CCB_ERROR &&
-+ err_id == JRSTA_CCBERR_ERRID_ICVCHK)
-+ ecode = -EBADMSG;
-+ else
-+ ecode = -EIO;
++ ecode = -EIO;
+ }
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
-+ aead_unmap(qidev, edesc, aead_req);
++ tls_unmap(qidev, edesc, aead_req);
+
+ aead_request_complete(aead_req, ecode);
+ qi_cache_free(edesc);
+}
+
+/*
-+ * allocate and map the aead extended descriptor
++ * allocate and map the tls extended descriptor
+ */
-+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
-+ bool encrypt)
++static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int blocksize = crypto_aead_blocksize(aead);
++ unsigned int padsize, authsize;
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *qidev = ctx->qidev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
++ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+ struct aead_edesc *edesc;
++ struct tls_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
-+ unsigned int authsize = ctx->authsize;
-+ int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
++ u8 *iv;
++ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ struct caam_drv_ctx *drv_ctx;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++ struct scatterlist *dst;
++
++ if (encrypt) {
++ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
++ blocksize);
++ authsize = ctx->authsize + padsize;
++ } else {
++ authsize = ctx->authsize;
++ }
+
+ drv_ctx = get_drv_ctx(ctx, op_type);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
-+ return (struct aead_edesc *)drv_ctx;
++ return (struct tls_edesc *)drv_ctx;
+
-+ /* allocate space for base edesc and hw desc commands, link tables */
++ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(qidev, "could not allocate extended descriptor\n");
@@ -4787,7 +4496,7 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ if (likely(req->src == req->dst)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
-+ (encrypt ? authsize : 0));
++ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
@@ -4803,6 +4512,7 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
++ dst = req->dst;
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
@@ -4813,14 +4523,13 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ return ERR_PTR(src_nents);
+ }
+
-+ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
-+ req->cryptlen +
-+ (encrypt ? authsize :
-+ (-authsize)));
++ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
++ dst_nents = sg_nents_for_len(dst, req->cryptlen +
++ (encrypt ? authsize : 0));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-+ req->assoclen + req->cryptlen +
-+ (encrypt ? authsize : (-authsize)));
++ req->cryptlen +
++ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
@@ -4837,7 +4546,7 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ mapped_src_nents = 0;
+ }
+
-+ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
++ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
@@ -4847,95 +4556,51 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ }
+ }
+
-+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
-+ ivsize = crypto_aead_ivsize(aead);
-+
+ /*
-+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
++ * Create S/G table: IV, src, dst.
+ * Input is not contiguous.
-+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
-+ * the end of the table by allocating more S/G entries. Logic:
-+ * if (src != dst && output S/G)
-+ * pad output S/G, if needed
-+ * else if (src == dst && S/G)
-+ * overlapping S/Gs; pad one of them
-+ * else if (input S/G) ...
-+ * pad input S/G, if needed
+ */
-+ qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
-+ if (mapped_dst_nents > 1)
-+ qm_sg_ents += ALIGN(mapped_dst_nents, 4);
-+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
-+ qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
-+ 1 + !!ivsize + ALIGN(mapped_src_nents, 4));
-+ else
-+ qm_sg_ents = ALIGN(qm_sg_ents, 4);
-+
++ qm_sg_ents = 1 + mapped_src_nents +
++ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
-+ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
-+ CAAM_QI_MEMCACHE_SIZE)) {
-+ dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
-+ qm_sg_ents, ivsize);
-+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
-+ 0, 0, 0, 0);
++
++ ivsize = crypto_aead_ivsize(aead);
++ iv = (u8 *)(sg_table + qm_sg_ents);
++ /* Make sure IV is located in a DMAable area */
++ memcpy(iv, req->iv, ivsize);
++ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, iv_dma)) {
++ dev_err(qidev, "unable to map IV\n");
++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
++ 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
-+ if (ivsize) {
-+ u8 *iv = (u8 *)(sg_table + qm_sg_ents);
-+
-+ /* Make sure IV is located in a DMAable area */
-+ memcpy(iv, req->iv, ivsize);
-+
-+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, iv_dma)) {
-+ dev_err(qidev, "unable to map IV\n");
-+ caam_unmap(qidev, req->src, req->dst, src_nents,
-+ dst_nents, 0, 0, 0, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ }
-+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
++ edesc->dst = dst;
+ edesc->iv_dma = iv_dma;
+ edesc->drv_req.app_ctx = req;
-+ edesc->drv_req.cbk = aead_done;
++ edesc->drv_req.cbk = tls_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
-+ edesc->assoclen = cpu_to_caam32(req->assoclen);
-+ edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
-+ dev_err(qidev, "unable to map assoclen\n");
-+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+ iv_dma, ivsize, op_type, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++ qm_sg_index = 1;
+
-+ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
-+ qm_sg_index++;
-+ if (ivsize) {
-+ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
-+ qm_sg_index++;
-+ }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
-+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
++ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
-+ dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
-+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+ iv_dma, ivsize, op_type, 0, 0);
++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
++ ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
@@ -4943,431 +4608,64 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com>
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
-+ out_len = req->assoclen + req->cryptlen +
-+ (encrypt ? ctx->authsize : (-ctx->authsize));
-+ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
++ out_len = req->cryptlen + (encrypt ? authsize : 0);
++ in_len = ivsize + req->assoclen + req->cryptlen;
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
++
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
+
-+ if (req->dst == req->src) {
-+ if (mapped_src_nents == 1)
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
-+ out_len, 0);
-+ else
-+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
-+ (1 + !!ivsize) * sizeof(*sg_table),
-+ out_len, 0);
-+ } else if (mapped_dst_nents == 1) {
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
-+ 0);
-+ } else {
++ if (req->dst == req->src)
++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
++ (sg_nents_for_len(req->src, req->assoclen) +
++ 1) * sizeof(*sg_table), out_len, 0);
++ else if (mapped_dst_nents == 1)
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
++ else
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
+ qm_sg_index, out_len, 0);
-+ }
+
+ return edesc;
+}
-
-- cpu = smp_processor_id();
-- drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
-- if (likely(!IS_ERR_OR_NULL(drv_ctx)))
-- drv_ctx->op_type = type;
-+static inline int aead_crypt(struct aead_request *req, bool encrypt)
++
++static int tls_crypt(struct aead_request *req, bool encrypt)
+{
-+ struct aead_edesc *edesc;
++ struct tls_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
-
-- ctx->drv_ctx[type] = drv_ctx;
-- }
++
+ if (unlikely(caam_congested))
+ return -EAGAIN;
-
-- spin_unlock(&ctx->lock);
-+ /* allocate extended descriptor */
-+ edesc = aead_edesc_alloc(req, encrypt);
++
++ edesc = tls_edesc_alloc(req, encrypt);
+ if (IS_ERR_OR_NULL(edesc))
+ return PTR_ERR(edesc);
+
-+ /* Create and submit job descriptor */
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
-+ aead_unmap(ctx->qidev, edesc, req);
++ tls_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
- }
-
-- return drv_ctx;
-+ return ret;
- }
-
--static void caam_unmap(struct device *dev, struct scatterlist *src,
-- struct scatterlist *dst, int src_nents,
-- int dst_nents, dma_addr_t iv_dma, int ivsize,
-- enum optype op_type, dma_addr_t qm_sg_dma,
-- int qm_sg_bytes)
-+static int aead_encrypt(struct aead_request *req)
- {
-- if (dst != src) {
-- if (src_nents)
-- dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
-- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
-- } else {
-- dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
-- }
-+ return aead_crypt(req, true);
-+}
-
-- if (iv_dma)
-- dma_unmap_single(dev, iv_dma, ivsize,
-- op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
-- DMA_TO_DEVICE);
-- if (qm_sg_bytes)
-- dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
-+static int aead_decrypt(struct aead_request *req)
-+{
-+ return aead_crypt(req, false);
- }
-
--static void aead_unmap(struct device *dev,
-- struct aead_edesc *edesc,
-- struct aead_request *req)
-+static int ipsec_gcm_encrypt(struct aead_request *req)
- {
-- struct crypto_aead *aead = crypto_aead_reqtfm(req);
-- int ivsize = crypto_aead_ivsize(aead);
-+ if (req->assoclen < 8)
-+ return -EINVAL;
-
-- caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
-- edesc->qm_sg_dma, edesc->qm_sg_bytes);
-- dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
-+ return aead_crypt(req, true);
- }
-
--static void ablkcipher_unmap(struct device *dev,
-- struct ablkcipher_edesc *edesc,
-- struct ablkcipher_request *req)
-+static int ipsec_gcm_decrypt(struct aead_request *req)
- {
-- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-+ if (req->assoclen < 8)
-+ return -EINVAL;
-
-- caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
-- edesc->qm_sg_dma, edesc->qm_sg_bytes);
-+ return aead_crypt(req, false);
- }
-
--static void aead_done(struct caam_drv_req *drv_req, u32 status)
-+static void tls_done(struct caam_drv_req *drv_req, u32 status)
- {
- struct device *qidev;
-- struct aead_edesc *edesc;
-+ struct tls_edesc *edesc;
- struct aead_request *aead_req = drv_req->app_ctx;
- struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
- struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
-@@ -537,41 +1367,51 @@ static void aead_done(struct caam_drv_re
- }
-
- edesc = container_of(drv_req, typeof(*edesc), drv_req);
-- aead_unmap(qidev, edesc, aead_req);
-+ tls_unmap(qidev, edesc, aead_req);
-
- aead_request_complete(aead_req, ecode);
- qi_cache_free(edesc);
- }
-
- /*
-- * allocate and map the aead extended descriptor
-+ * allocate and map the tls extended descriptor
- */
--static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
-- bool encrypt)
-+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
- {
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ unsigned int blocksize = crypto_aead_blocksize(aead);
-+ unsigned int padsize, authsize;
- struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
- typeof(*alg), aead);
- struct device *qidev = ctx->qidev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-- GFP_KERNEL : GFP_ATOMIC;
-+ GFP_KERNEL : GFP_ATOMIC;
- int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-- struct aead_edesc *edesc;
-+ struct tls_edesc *edesc;
- dma_addr_t qm_sg_dma, iv_dma = 0;
- int ivsize = 0;
-- unsigned int authsize = ctx->authsize;
-- int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
-+ u8 *iv;
-+ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
- int in_len, out_len;
- struct qm_sg_entry *sg_table, *fd_sgt;
- struct caam_drv_ctx *drv_ctx;
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
-+ struct scatterlist *dst;
-+
-+ if (encrypt) {
-+ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
-+ blocksize);
-+ authsize = ctx->authsize + padsize;
-+ } else {
-+ authsize = ctx->authsize;
+ }
-
- drv_ctx = get_drv_ctx(ctx, op_type);
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
-- return (struct aead_edesc *)drv_ctx;
-+ return (struct tls_edesc *)drv_ctx;
-
-- /* allocate space for base edesc and hw desc commands, link tables */
-+ /* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_alloc(GFP_DMA | flags);
- if (unlikely(!edesc)) {
- dev_err(qidev, "could not allocate extended descriptor\n");
-@@ -581,7 +1421,7 @@ static struct aead_edesc *aead_edesc_all
- if (likely(req->src == req->dst)) {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen +
-- (encrypt ? authsize : 0));
-+ (encrypt ? authsize : 0));
- if (unlikely(src_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen +
-@@ -597,6 +1437,7 @@ static struct aead_edesc *aead_edesc_all
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
-+ dst = req->dst;
- } else {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen);
-@@ -607,14 +1448,13 @@ static struct aead_edesc *aead_edesc_all
- return ERR_PTR(src_nents);
- }
-
-- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
-- req->cryptlen +
-- (encrypt ? authsize :
-- (-authsize)));
-+ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
-+ dst_nents = sg_nents_for_len(dst, req->cryptlen +
-+ (encrypt ? authsize : 0));
- if (unlikely(dst_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-- req->assoclen + req->cryptlen +
-- (encrypt ? authsize : (-authsize)));
-+ req->cryptlen +
-+ (encrypt ? authsize : 0));
- qi_cache_free(edesc);
- return ERR_PTR(dst_nents);
- }
-@@ -631,7 +1471,7 @@ static struct aead_edesc *aead_edesc_all
- mapped_src_nents = 0;
- }
-
-- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
-+ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(qidev, "unable to map destination\n");
-@@ -641,80 +1481,51 @@ static struct aead_edesc *aead_edesc_all
- }
- }
-
-- if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
-- ivsize = crypto_aead_ivsize(aead);
--
- /*
-- * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
-+ * Create S/G table: IV, src, dst.
- * Input is not contiguous.
- */
-- qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
-+ qm_sg_ents = 1 + mapped_src_nents +
- (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
- sg_table = &edesc->sgt[0];
- qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
-- if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
-- CAAM_QI_MEMCACHE_SIZE)) {
-- dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
-- qm_sg_ents, ivsize);
-- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
-- 0, 0, 0, 0);
+
-+ ivsize = crypto_aead_ivsize(aead);
-+ iv = (u8 *)(sg_table + qm_sg_ents);
-+ /* Make sure IV is located in a DMAable area */
-+ memcpy(iv, req->iv, ivsize);
-+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, iv_dma)) {
-+ dev_err(qidev, "unable to map IV\n");
-+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
-+ 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
-
-- if (ivsize) {
-- u8 *iv = (u8 *)(sg_table + qm_sg_ents);
--
-- /* Make sure IV is located in a DMAable area */
-- memcpy(iv, req->iv, ivsize);
--
-- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
-- if (dma_mapping_error(qidev, iv_dma)) {
-- dev_err(qidev, "unable to map IV\n");
-- caam_unmap(qidev, req->src, req->dst, src_nents,
-- dst_nents, 0, 0, 0, 0, 0);
-- qi_cache_free(edesc);
-- return ERR_PTR(-ENOMEM);
-- }
-- }
--
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
-+ edesc->dst = dst;
- edesc->iv_dma = iv_dma;
- edesc->drv_req.app_ctx = req;
-- edesc->drv_req.cbk = aead_done;
-+ edesc->drv_req.cbk = tls_done;
- edesc->drv_req.drv_ctx = drv_ctx;
-
-- edesc->assoclen = cpu_to_caam32(req->assoclen);
-- edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
-- DMA_TO_DEVICE);
-- if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
-- dev_err(qidev, "unable to map assoclen\n");
-- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-- iv_dma, ivsize, op_type, 0, 0);
-- qi_cache_free(edesc);
-- return ERR_PTR(-ENOMEM);
-- }
-+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-+ qm_sg_index = 1;
-
-- dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
-- qm_sg_index++;
-- if (ivsize) {
-- dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
-- qm_sg_index++;
-- }
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
- qm_sg_index += mapped_src_nents;
-
- if (mapped_dst_nents > 1)
-- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
-+ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
- qm_sg_index, 0);
-
- qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(qidev, qm_sg_dma)) {
- dev_err(qidev, "unable to map S/G table\n");
-- dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
-- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-- iv_dma, ivsize, op_type, 0, 0);
-+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
-+ ivsize, op_type, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
-@@ -722,35 +1533,29 @@ static struct aead_edesc *aead_edesc_all
- edesc->qm_sg_dma = qm_sg_dma;
- edesc->qm_sg_bytes = qm_sg_bytes;
-
-- out_len = req->assoclen + req->cryptlen +
-- (encrypt ? ctx->authsize : (-ctx->authsize));
-- in_len = 4 + ivsize + req->assoclen + req->cryptlen;
-+ out_len = req->cryptlen + (encrypt ? authsize : 0);
-+ in_len = ivsize + req->assoclen + req->cryptlen;
-
- fd_sgt = &edesc->drv_req.fd_sgt[0];
++ return ret;
++}
+
- dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
-
-- if (req->dst == req->src) {
-- if (mapped_src_nents == 1)
-- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
-- out_len, 0);
-- else
-- dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
-- (1 + !!ivsize) * sizeof(*sg_table),
-- out_len, 0);
-- } else if (mapped_dst_nents == 1) {
-- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
-- 0);
-- } else {
-+ if (req->dst == req->src)
-+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
-+ (sg_nents_for_len(req->src, req->assoclen) +
-+ 1) * sizeof(*sg_table), out_len, 0);
-+ else if (mapped_dst_nents == 1)
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
-+ else
- dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
- qm_sg_index, out_len, 0);
-- }
-
- return edesc;
- }
-
--static inline int aead_crypt(struct aead_request *req, bool encrypt)
-+static int tls_crypt(struct aead_request *req, bool encrypt)
- {
-- struct aead_edesc *edesc;
-+ struct tls_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- int ret;
-@@ -758,31 +1563,29 @@ static inline int aead_crypt(struct aead
- if (unlikely(caam_congested))
- return -EAGAIN;
-
-- /* allocate extended descriptor */
-- edesc = aead_edesc_alloc(req, encrypt);
-+ edesc = tls_edesc_alloc(req, encrypt);
- if (IS_ERR_OR_NULL(edesc))
- return PTR_ERR(edesc);
-
-- /* Create and submit job descriptor */
- ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
-- aead_unmap(ctx->qidev, edesc, req);
-+ tls_unmap(ctx->qidev, edesc, req);
- qi_cache_free(edesc);
- }
-
- return ret;
- }
-
--static int aead_encrypt(struct aead_request *req)
+static int tls_encrypt(struct aead_request *req)
- {
-- return aead_crypt(req, true);
++{
+ return tls_crypt(req, true);
- }
-
--static int aead_decrypt(struct aead_request *req)
++}
++
+static int tls_decrypt(struct aead_request *req)
- {
-- return aead_crypt(req, false);
++{
+ return tls_crypt(req, false);
- }
-
++}
++
static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+ {
+ struct ablkcipher_edesc *edesc;
@@ -900,7 +1703,24 @@ static struct ablkcipher_edesc *ablkciph
qm_sg_ents = 1 + mapped_src_nents;
dst_sg_idx = qm_sg_ents;