aboutsummaryrefslogtreecommitdiffstats
path: root/package/kernel/lantiq/ltq-deu/src
diff options
context:
space:
mode:
Diffstat (limited to 'package/kernel/lantiq/ltq-deu/src')
-rw-r--r--package/kernel/lantiq/ltq-deu/src/Makefile8
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_aes.c1668
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_arc4.c108
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_async_aes.c10
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_async_des.c12
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_des.c390
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_deu.c4
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_deu.h4
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_deu_ar9.h8
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_deu_danube.h8
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_deu_vr9.c2
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_md5.c79
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_md5_hmac.c189
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_sha1.c56
-rw-r--r--package/kernel/lantiq/ltq-deu/src/ifxmips_sha1_hmac.c278
15 files changed, 1969 insertions, 855 deletions
diff --git a/package/kernel/lantiq/ltq-deu/src/Makefile b/package/kernel/lantiq/ltq-deu/src/Makefile
index 93ae7ca922f..13f3dccaee6 100644
--- a/package/kernel/lantiq/ltq-deu/src/Makefile
+++ b/package/kernel/lantiq/ltq-deu/src/Makefile
@@ -7,18 +7,18 @@ endif
ifeq ($(BUILD_VARIANT),ar9)
CFLAGS_MODULE = -DCONFIG_AR9 -DCONFIG_CRYPTO_DEV_DEU -DCONFIG_CRYPTO_DEV_SPEED_TEST -DCONFIG_CRYPTO_DEV_DES \
- -DCONFIG_CRYPTO_DEV_AES -DCONFIG_CRYPTO_DEV_SHA1 -DCONFIG_CRYPTO_DEV_MD5 -DCONFIG_CRYPTO_DEV_ARC4 \
+ -DCONFIG_CRYPTO_DEV_AES -DCONFIG_CRYPTO_DEV_SHA1 -DCONFIG_CRYPTO_DEV_MD5 \
-DCONFIG_CRYPTO_DEV_SHA1_HMAC -DCONFIG_CRYPTO_DEV_MD5_HMAC
obj-m = ltq_deu_ar9.o
- ltq_deu_ar9-objs = ifxmips_deu.o ifxmips_deu_ar9.o ifxmips_des.o ifxmips_aes.o ifxmips_arc4.o \
+ ltq_deu_ar9-objs = ifxmips_deu.o ifxmips_deu_ar9.o ifxmips_des.o ifxmips_aes.o \
ifxmips_sha1.o ifxmips_md5.o ifxmips_sha1_hmac.o ifxmips_md5_hmac.o
endif
ifeq ($(BUILD_VARIANT),vr9)
CFLAGS_MODULE = -DCONFIG_VR9 -DCONFIG_CRYPTO_DEV_DEU -DCONFIG_CRYPTO_DEV_SPEED_TEST -DCONFIG_CRYPTO_DEV_DES \
- -DCONFIG_CRYPTO_DEV_AES -DCONFIG_CRYPTO_DEV_SHA1 -DCONFIG_CRYPTO_DEV_MD5 -DCONFIG_CRYPTO_DEV_ARC4 \
+ -DCONFIG_CRYPTO_DEV_AES -DCONFIG_CRYPTO_DEV_SHA1 -DCONFIG_CRYPTO_DEV_MD5 \
-DCONFIG_CRYPTO_DEV_SHA1_HMAC -DCONFIG_CRYPTO_DEV_MD5_HMAC
obj-m = ltq_deu_vr9.o
- ltq_deu_vr9-objs = ifxmips_deu.o ifxmips_deu_vr9.o ifxmips_des.o ifxmips_aes.o ifxmips_arc4.o \
+ ltq_deu_vr9-objs = ifxmips_deu.o ifxmips_deu_vr9.o ifxmips_des.o ifxmips_aes.o \
ifxmips_sha1.o ifxmips_md5.o ifxmips_sha1_hmac.o ifxmips_md5_hmac.o
endif
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_aes.c b/package/kernel/lantiq/ltq-deu/src/ifxmips_aes.c
index 76abfafb4e6..2aa4b095935 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_aes.c
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_aes.c
@@ -57,6 +57,14 @@
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <crypto/algapi.h>
+#include <crypto/b128ops.h>
+#include <crypto/gcm.h>
+#include <crypto/gf128mul.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/xts.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
#include "ifxmips_deu.h"
@@ -83,9 +91,12 @@ spinlock_t aes_lock;
#define AES_MIN_KEY_SIZE 16
#define AES_MAX_KEY_SIZE 32
#define AES_BLOCK_SIZE 16
+#define AES_BLOCK_WORDS 4
#define CTR_RFC3686_NONCE_SIZE 4
#define CTR_RFC3686_IV_SIZE 8
+#define CTR_RFC3686_MIN_KEY_SIZE (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)
#define CTR_RFC3686_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)
+#define AES_CBCMAC_DBN_TEMP_SIZE 128
#ifdef CRYPTO_DEBUG
extern char debug_level;
@@ -112,8 +123,18 @@ extern void ifx_deu_aes (void *ctx_arg, uint8_t *out_arg, const uint8_t *in_arg,
struct aes_ctx {
int key_length;
- u32 buf[AES_MAX_KEY_SIZE];
+ u8 buf[AES_MAX_KEY_SIZE];
+ u8 tweakkey[AES_MAX_KEY_SIZE];
u8 nonce[CTR_RFC3686_NONCE_SIZE];
+ u8 lastbuffer[4 * XTS_BLOCK_SIZE];
+ int use_tweak;
+ u32 byte_count;
+ u32 dbn;
+ int started;
+ u32 (*temp)[AES_BLOCK_WORDS];
+ u8 block[AES_BLOCK_SIZE];
+ u8 hash[AES_BLOCK_SIZE];
+ struct gf128mul_4k *gf128;
};
extern int disable_deudma;
@@ -130,18 +151,17 @@ extern int disable_multiblock;
int aes_set_key (struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
{
struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
- unsigned long *flags = (unsigned long *) &tfm->crt_flags;
//printk("set_key in %s\n", __FILE__);
//aes_chip_init();
if (key_len != 16 && key_len != 24 && key_len != 32) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
ctx->key_length = key_len;
+ ctx->use_tweak = 0;
DPRINTF(0, "ctx @%p, key_len %d, ctx->key_length %d\n", ctx, key_len, ctx->key_length);
memcpy ((u8 *) (ctx->buf), in_key, key_len);
@@ -149,35 +169,37 @@ int aes_set_key (struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
}
-/*! \fn void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, size_t nbytes, int encdec, int mode)
+/*! \fn int aes_set_key_skcipher (struct crypto_skcipher *tfm, const uint8_t *in_key, unsigned int key_len)
* \ingroup IFX_AES_FUNCTIONS
- * \brief main interface to AES hardware
- * \param ctx_arg crypto algo context
- * \param out_arg output bytestream
- * \param in_arg input bytestream
- * \param iv_arg initialization vector
- * \param nbytes length of bytestream
- * \param encdec 1 for encrypt; 0 for decrypt
- * \param mode operation mode such as ebc, cbc, ctr
- *
-*/
-void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
- u8 *iv_arg, size_t nbytes, int encdec, int mode)
+ * \brief sets the AES keys for skcipher
+ * \param tfm linux crypto skcipher
+ * \param in_key input key
+ * \param key_len key lengths of 16, 24 and 32 bytes supported
+ * \return -EINVAL - bad key length, 0 - SUCCESS
+*/
+int aes_set_key_skcipher (struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len)
+{
+ return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
+}
+
+/*! \fn void aes_set_key_skcipher (void *ctx_arg)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief sets the AES key to the hardware, requires spinlock to be set by caller
+ * \param ctx_arg crypto algo context
+ * \return
+*/
+void aes_set_key_hw (void *ctx_arg)
{
/*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
- u32 *in_key = ctx->buf;
- unsigned long flag;
- /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+ u8 *in_key = ctx->buf;
int key_len = ctx->key_length;
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
- int i = 0;
- int byte_cnt = nbytes;
-
+ if (ctx->use_tweak) in_key = ctx->tweakkey;
- CRTCL_SECT_START;
/* 128, 192 or 256 bit key length */
aes->controlr.K = key_len / 8 - 2;
if (key_len == 128 / 8) {
@@ -206,8 +228,7 @@ void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
}
else {
printk (KERN_ERR "[%s %s %d]: Invalid key_len : %d\n", __FILE__, __func__, __LINE__, key_len);
- CRTCL_SECT_END;
- return;// -EINVAL;
+ return; //-EINVAL;
}
/* let HW pre-process DEcryption key in any case (even if
@@ -215,6 +236,36 @@ void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
checked in decryption routine! */
aes->controlr.PNK = 1;
+}
+
+
+/*! \fn void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, size_t nbytes, int encdec, int mode)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief main interface to AES hardware
+ * \param ctx_arg crypto algo context
+ * \param out_arg output bytestream
+ * \param in_arg input bytestream
+ * \param iv_arg initialization vector
+ * \param nbytes length of bytestream
+ * \param encdec 1 for encrypt; 0 for decrypt
+ * \param mode operation mode such as ebc, cbc, ctr
+ *
+*/
+void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
+ u8 *iv_arg, size_t nbytes, int encdec, int mode)
+
+{
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+ volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
+ //struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
+ unsigned long flag;
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+ int i = 0;
+ int byte_cnt = nbytes;
+
+ CRTCL_SECT_START;
+
+ aes_set_key_hw (ctx_arg);
aes->controlr.E_D = !encdec; //encryption
aes->controlr.O = mode; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
@@ -251,23 +302,24 @@ void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
/* To handle all non-aligned bytes (not aligned to 16B size) */
if (byte_cnt) {
- aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 0));
- aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 1));
- aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 2));
- aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 3)); /* start crypto */
+ u8 temparea[16] = {0,};
+
+ memcpy(temparea, ((u32 *) in_arg + (i * 4)), byte_cnt);
+
+ aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) temparea + 0));
+ aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) temparea + 1));
+ aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) temparea + 2));
+ aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) temparea + 3)); /* start crypto */
while (aes->controlr.BUS) {
}
- *((volatile u32 *) out_arg + (i * 4) + 0) = aes->OD3R;
- *((volatile u32 *) out_arg + (i * 4) + 1) = aes->OD2R;
- *((volatile u32 *) out_arg + (i * 4) + 2) = aes->OD1R;
- *((volatile u32 *) out_arg + (i * 4) + 3) = aes->OD0R;
-
- /* to ensure that the extended pages are clean */
- memset (out_arg + (i * 16) + (nbytes % AES_BLOCK_SIZE), 0,
- (AES_BLOCK_SIZE - (nbytes % AES_BLOCK_SIZE)));
+ *((volatile u32 *) temparea + 0) = aes->OD3R;
+ *((volatile u32 *) temparea + 1) = aes->OD2R;
+ *((volatile u32 *) temparea + 2) = aes->OD1R;
+ *((volatile u32 *) temparea + 3) = aes->OD0R;
+ memcpy(((u32 *) out_arg + (i * 4)), temparea, byte_cnt);
}
//tc.chen : copy iv_arg back
@@ -294,7 +346,6 @@ void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
int ctr_rfc3686_aes_set_key (struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
{
struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
- unsigned long *flags = (unsigned long *)&tfm->crt_flags;
//printk("ctr_rfc3686_aes_set_key in %s\n", __FILE__);
@@ -304,17 +355,32 @@ int ctr_rfc3686_aes_set_key (struct crypto_tfm *tfm, const uint8_t *in_key, unsi
key_len -= CTR_RFC3686_NONCE_SIZE; // remove 4 bytes of nonce
if (key_len != 16 && key_len != 24 && key_len != 32) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
ctx->key_length = key_len;
+ ctx->use_tweak = 0;
memcpy ((u8 *) (ctx->buf), in_key, key_len);
return 0;
}
+/*!
+ * \fn int ctr_rfc3686_aes_set_key_skcipher (struct crypto_skcipher *tfm, const uint8_t *in_key, unsigned int key_len)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief sets RFC3686 key for skcipher
+ * \param tfm linux crypto skcipher
+ * \param in_key input key
+ * \param key_len key lengths of 20, 28 and 36 bytes supported; last 4 bytes is nonce
+ * \return 0 - SUCCESS
+ * -EINVAL - bad key length
+*/
+int ctr_rfc3686_aes_set_key_skcipher (struct crypto_skcipher *tfm, const uint8_t *in_key, unsigned int key_len)
+{
+ return ctr_rfc3686_aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
+}
+
/*! \fn void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
* \ingroup IFX_AES_FUNCTIONS
* \brief main interface with deu hardware in DMA mode
@@ -423,11 +489,11 @@ void ifx_deu_aes_ctr (void *ctx, uint8_t *dst, const uint8_t *src,
/*! \fn void aes_encrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
* \ingroup IFX_AES_FUNCTIONS
- * \brief encrypt AES_BLOCK_SIZE of data
- * \param tfm linux crypto algo transform
- * \param out output bytestream
- * \param in input bytestream
-*/
+ * \brief encrypt AES_BLOCK_SIZE of data
+ * \param tfm linux crypto algo transform
+ * \param out output bytestream
+ * \param in input bytestream
+*/
void aes_encrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
{
struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -437,11 +503,11 @@ void aes_encrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
/*! \fn void aes_decrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
* \ingroup IFX_AES_FUNCTIONS
- * \brief decrypt AES_BLOCK_SIZE of data
- * \param tfm linux crypto algo transform
- * \param out output bytestream
- * \param in input bytestream
-*/
+ * \brief decrypt AES_BLOCK_SIZE of data
+ * \param tfm linux crypto algo transform
+ * \param out output bytestream
+ * \param in input bytestream
+*/
void aes_decrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
{
struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -449,14 +515,14 @@ void aes_decrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
CRYPTO_DIR_DECRYPT, 0);
}
-/*
- * \brief AES function mappings
+/*
+ * \brief AES function mappings
*/
struct crypto_alg ifxdeu_aes_alg = {
.cra_name = "aes",
.cra_driver_name = "ifxdeu-aes",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_module = THIS_MODULE,
@@ -472,115 +538,91 @@ struct crypto_alg ifxdeu_aes_alg = {
}
};
-/*! \fn int ecb_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
+/*! \fn int ecb_aes_encrypt(struct skcipher_req *req)
* \ingroup IFX_AES_FUNCTIONS
- * \brief ECB AES encrypt using linux crypto blkcipher
- * \param desc blkcipher descriptor
- * \param dst output scatterlist
- * \param src input scatterlist
- * \param nbytes data size in bytes
+ * \brief ECB AES encrypt using linux crypto skcipher
+ * \param req skcipher request
* \return err
-*/
-int ecb_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+*/
+int ecb_aes_encrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
int err;
- unsigned int enc_bytes;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ unsigned int enc_bytes, nbytes;
+
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = enc_bytes = walk.nbytes)) {
enc_bytes -= (nbytes % AES_BLOCK_SIZE);
ifx_deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
NULL, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-/*! \fn int ecb_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
+/*! \fn int ecb_aes_decrypt(struct skcipher_req *req)
* \ingroup IFX_AES_FUNCTIONS
- * \brief ECB AES decrypt using linux crypto blkcipher
- * \param desc blkcipher descriptor
- * \param dst output scatterlist
- * \param src input scatterlist
- * \param nbytes data size in bytes
+ * \brief ECB AES decrypt using linux crypto skcipher
+ * \param req skcipher request
* \return err
-*/
-int ecb_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+*/
+int ecb_aes_decrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
int err;
- unsigned int dec_bytes;
+ unsigned int dec_bytes, nbytes;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = dec_bytes = walk.nbytes)) {
dec_bytes -= (nbytes % AES_BLOCK_SIZE);
ifx_deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
NULL, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-/*
+/*
* \brief AES function mappings
*/
-struct crypto_alg ifxdeu_ecb_aes_alg = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ifxdeu-ecb(aes)",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ifxdeu_ecb_aes_alg.cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aes_set_key,
- .encrypt = ecb_aes_encrypt,
- .decrypt = ecb_aes_decrypt,
- }
- }
+struct skcipher_alg ifxdeu_ecb_aes_alg = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "ifxdeu-ecb(aes)",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ifxdeu_ecb_aes_alg.base.cra_list),
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = ecb_aes_encrypt,
+ .decrypt = ecb_aes_decrypt,
};
-
-/*! \fn int cbc_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
+/*! \fn int ecb_aes_encrypt(struct skcipher_req *req)
* \ingroup IFX_AES_FUNCTIONS
- * \brief CBC AES encrypt using linux crypto blkcipher
- * \param desc blkcipher descriptor
- * \param dst output scatterlist
- * \param src input scatterlist
- * \param nbytes data size in bytes
+ * \brief CBC AES encrypt using linux crypto skcipher
+ * \param req skcipher request
* \return err
-*/
-int cbc_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+*/
+int cbc_aes_encrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
int err;
- unsigned int enc_bytes;
+ unsigned int enc_bytes, nbytes;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = enc_bytes = walk.nbytes)) {
u8 *iv = walk.iv;
@@ -588,32 +630,26 @@ int cbc_aes_encrypt(struct blkcipher_desc *desc,
ifx_deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-/*! \fn int cbc_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
+/*! \fn int cbc_aes_decrypt(struct skcipher_req *req)
* \ingroup IFX_AES_FUNCTIONS
- * \brief CBC AES decrypt using linux crypto blkcipher
- * \param desc blkcipher descriptor
- * \param dst output scatterlist
- * \param src input scatterlist
- * \param nbytes data size in bytes
+ * \brief CBC AES decrypt using linux crypto skcipher
+ * \param req skcipher request
* \return err
-*/
-int cbc_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+*/
+int cbc_aes_decrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
int err;
- unsigned int dec_bytes;
+ unsigned int dec_bytes, nbytes;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = dec_bytes = walk.nbytes)) {
u8 *iv = walk.iv;
@@ -621,7 +657,7 @@ int cbc_aes_decrypt(struct blkcipher_desc *desc,
ifx_deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
@@ -630,142 +666,572 @@ int cbc_aes_decrypt(struct blkcipher_desc *desc,
/*
* \brief AES function mappings
*/
-struct crypto_alg ifxdeu_cbc_aes_alg = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "ifxdeu-cbc(aes)",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ifxdeu_cbc_aes_alg.cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aes_set_key,
- .encrypt = cbc_aes_encrypt,
- .decrypt = cbc_aes_decrypt,
+struct skcipher_alg ifxdeu_cbc_aes_alg = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "ifxdeu-cbc(aes)",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ifxdeu_cbc_aes_alg.base.cra_list),
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = cbc_aes_encrypt,
+ .decrypt = cbc_aes_decrypt,
+};
+
+/*! \fn void ifx_deu_aes_xts (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, size_t nbytes, int encdec)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief main interface to AES hardware for XTS impl
+ * \param ctx_arg crypto algo context
+ * \param out_arg output bytestream
+ * \param in_arg input bytestream
+ * \param iv_arg initialization vector
+ * \param nbytes length of bytestream
+ * \param encdec 1 for encrypt; 0 for decrypt
+ *
+*/
+void ifx_deu_aes_xts (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
+ u8 *iv_arg, size_t nbytes, int encdec)
+{
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+ volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
+ //struct aes_ctx *ctx = (struct aes_ctx *)ctx_arg;
+ unsigned long flag;
+ /*~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
+ u8 oldiv[16];
+ int i = 0;
+ int byte_cnt = nbytes;
+
+ CRTCL_SECT_START;
+
+ aes_set_key_hw (ctx_arg);
+
+ aes->controlr.E_D = !encdec; //encryption
+ aes->controlr.O = 1; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR - CBC mode for xts
+
+ i = 0;
+ while (byte_cnt >= 16) {
+
+ if (!encdec) {
+ if (((byte_cnt % 16) > 0) && (byte_cnt < (2*XTS_BLOCK_SIZE))) {
+ memcpy(oldiv, iv_arg, 16);
+ gf128mul_x_ble((le128 *)iv_arg, (le128 *)iv_arg);
+ }
+ u128_xor((u128 *)((u32 *) in_arg + (i * 4) + 0), (u128 *)((u32 *) in_arg + (i * 4) + 0), (u128 *)iv_arg);
+ }
+
+ aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
+ aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
+ aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
+ aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
+
+ aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 0));
+ aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 1));
+ aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 2));
+ aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 3)); /* start crypto */
+
+ while (aes->controlr.BUS) {
+ // this will not take long
+ }
+
+ *((volatile u32 *) out_arg + (i * 4) + 0) = aes->OD3R;
+ *((volatile u32 *) out_arg + (i * 4) + 1) = aes->OD2R;
+ *((volatile u32 *) out_arg + (i * 4) + 2) = aes->OD1R;
+ *((volatile u32 *) out_arg + (i * 4) + 3) = aes->OD0R;
+
+ if (encdec) {
+ u128_xor((u128 *)((volatile u32 *) out_arg + (i * 4) + 0), (u128 *)((volatile u32 *) out_arg + (i * 4) + 0), (u128 *)iv_arg);
}
+ gf128mul_x_ble((le128 *)iv_arg, (le128 *)iv_arg);
+ i++;
+ byte_cnt -= 16;
+ }
+
+ if (byte_cnt) {
+ u8 state[XTS_BLOCK_SIZE] = {0,};
+
+ if (!encdec) memcpy(iv_arg, oldiv, 16);
+
+ aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) iv_arg);
+ aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 1));
+ aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 2));
+ aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) iv_arg + 3));
+
+ memcpy(state, ((u32 *) in_arg + (i * 4) + 0), byte_cnt);
+ memcpy((state + byte_cnt), (out_arg + ((i - 1) * 16) + byte_cnt), (XTS_BLOCK_SIZE - byte_cnt));
+ if (!encdec) {
+ u128_xor((u128 *)state, (u128 *)state, (u128 *)iv_arg);
+ }
+
+ aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) state + 0));
+ aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) state + 1));
+ aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) state + 2));
+ aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) state + 3)); /* start crypto */
+
+ memcpy(((u32 *) out_arg + (i * 4) + 0), ((u32 *) out_arg + ((i - 1) * 4) + 0), byte_cnt);
+
+ while (aes->controlr.BUS) {
+ // this will not take long
+ }
+
+ *((volatile u32 *) out_arg + ((i-1) * 4) + 0) = aes->OD3R;
+ *((volatile u32 *) out_arg + ((i-1) * 4) + 1) = aes->OD2R;
+ *((volatile u32 *) out_arg + ((i-1) * 4) + 2) = aes->OD1R;
+ *((volatile u32 *) out_arg + ((i-1) * 4) + 3) = aes->OD0R;
+
+ if (encdec) {
+ u128_xor((u128 *)((volatile u32 *) out_arg + ((i-1) * 4) + 0), (u128 *)((volatile u32 *) out_arg + ((i-1) * 4) + 0), (u128 *)iv_arg);
+ }
+ }
+
+ CRTCL_SECT_END;
+}
+
+/*! \fn int xts_aes_encrypt(struct skcipher_req *req)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief XTS AES encrypt using linux crypto skcipher
+ * \param req skcipher request
+ * \return err
+*/
+int xts_aes_encrypt(struct skcipher_request *req)
+{
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
+ int err;
+ unsigned int enc_bytes, nbytes, processed;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ if (req->cryptlen < XTS_BLOCK_SIZE)
+ return -EINVAL;
+
+ ctx->use_tweak = 1;
+ aes_encrypt(req->base.tfm, walk.iv, walk.iv);
+ ctx->use_tweak = 0;
+ processed = 0;
+
+ while ((nbytes = walk.nbytes) && (walk.nbytes >= (XTS_BLOCK_SIZE * 2)) ) {
+ u8 *iv = walk.iv;
+ if (nbytes == walk.total) {
+ enc_bytes = nbytes;
+ } else {
+ enc_bytes = nbytes & ~(XTS_BLOCK_SIZE - 1);
+ if ((req->cryptlen - processed - enc_bytes) < (XTS_BLOCK_SIZE)) {
+ if (enc_bytes > (2 * XTS_BLOCK_SIZE)) {
+ enc_bytes -= XTS_BLOCK_SIZE;
+ } else {
+ break;
+ }
+ }
+ }
+ ifx_deu_aes_xts(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ iv, enc_bytes, CRYPTO_DIR_ENCRYPT);
+ err = skcipher_walk_done(&walk, nbytes - enc_bytes);
+ processed += enc_bytes;
+ }
+
+ if ((walk.nbytes)) {
+ u8 *iv = walk.iv;
+ nbytes = req->cryptlen - processed;
+ scatterwalk_map_and_copy(ctx->lastbuffer, req->src, (req->cryptlen - nbytes), nbytes, 0);
+ ifx_deu_aes_xts(ctx, ctx->lastbuffer, ctx->lastbuffer,
+ iv, nbytes, CRYPTO_DIR_ENCRYPT);
+ scatterwalk_map_and_copy(ctx->lastbuffer, req->dst, (req->cryptlen - nbytes), nbytes, 1);
+ skcipher_request_complete(req, 0);
+ }
+
+ return err;
+}
+
+/*! \fn int xts_aes_decrypt(struct skcipher_req *req)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief XTS AES decrypt using linux crypto skcipher
+ * \param req skcipher request
+ * \return err
+*/
+int xts_aes_decrypt(struct skcipher_request *req)
+{
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
+ int err;
+ unsigned int dec_bytes, nbytes, processed;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ if (req->cryptlen < XTS_BLOCK_SIZE)
+ return -EINVAL;
+
+ ctx->use_tweak = 1;
+ aes_encrypt(req->base.tfm, walk.iv, walk.iv);
+ ctx->use_tweak = 0;
+ processed = 0;
+
+ while ((nbytes = walk.nbytes) && (walk.nbytes >= (XTS_BLOCK_SIZE * 2))) {
+ u8 *iv = walk.iv;
+ if (nbytes == walk.total) {
+ dec_bytes = nbytes;
+ } else {
+ dec_bytes = nbytes & ~(XTS_BLOCK_SIZE - 1);
+ if ((req->cryptlen - processed - dec_bytes) < (XTS_BLOCK_SIZE)) {
+ if (dec_bytes > (2 * XTS_BLOCK_SIZE)) {
+ dec_bytes -= XTS_BLOCK_SIZE;
+ } else {
+ break;
+ }
+ }
+ }
+ ifx_deu_aes_xts(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ iv, dec_bytes, CRYPTO_DIR_DECRYPT);
+ err = skcipher_walk_done(&walk, nbytes - dec_bytes);
+ processed += dec_bytes;
+ }
+
+ if ((walk.nbytes)) {
+ u8 *iv = walk.iv;
+ nbytes = req->cryptlen - processed;
+ scatterwalk_map_and_copy(ctx->lastbuffer, req->src, (req->cryptlen - nbytes), nbytes, 0);
+ ifx_deu_aes_xts(ctx, ctx->lastbuffer, ctx->lastbuffer,
+ iv, nbytes, CRYPTO_DIR_DECRYPT);
+ scatterwalk_map_and_copy(ctx->lastbuffer, req->dst, (req->cryptlen - nbytes), nbytes, 1);
+ skcipher_request_complete(req, 0);
}
+
+ return err;
+}
+
+/*! \fn int xts_aes_set_key_skcipher (struct crypto_tfm *tfm, const uint8_t *in_key, unsigned int key_len)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief sets the AES keys for XTS
+ * \param tfm linux crypto algo transform
+ * \param in_key input key
+ * \param key_len key lengths of 16, 24 and 32 bytes supported
+ * \return -EINVAL - bad key length, 0 - SUCCESS
+*/
+int xts_aes_set_key_skcipher (struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len)
+{
+ struct aes_ctx *ctx = crypto_tfm_ctx(crypto_skcipher_tfm(tfm));
+ unsigned int keylen = (key_len / 2);
+
+ if (key_len % 2) return -EINVAL;
+
+ if (keylen != 16 && keylen != 24 && keylen != 32) {
+ return -EINVAL;
+ }
+
+ ctx->key_length = keylen;
+ ctx->use_tweak = 0;
+ DPRINTF(0, "ctx @%p, key_len %d, ctx->key_length %d\n", ctx, key_len, ctx->key_length);
+ memcpy ((u8 *) (ctx->buf), in_key, keylen);
+ memcpy ((u8 *) (ctx->tweakkey), in_key + keylen, keylen);
+
+ return 0;
+}
+
+/*
+ * \brief AES function mappings
+*/
+struct skcipher_alg ifxdeu_xts_aes_alg = {
+ .base.cra_name = "xts(aes)",
+ .base.cra_driver_name = "ifxdeu-xts(aes)",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = XTS_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ifxdeu_xts_aes_alg.base.cra_list),
+ .min_keysize = AES_MIN_KEY_SIZE * 2,
+ .max_keysize = AES_MAX_KEY_SIZE * 2,
+ .ivsize = XTS_BLOCK_SIZE,
+ .walksize = 2 * XTS_BLOCK_SIZE,
+ .setkey = xts_aes_set_key_skcipher,
+ .encrypt = xts_aes_encrypt,
+ .decrypt = xts_aes_decrypt,
};
+/*! \fn int ofb_aes_encrypt(struct skcipher_req *req)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief OFB AES encrypt using linux crypto skcipher
+ * \param req skcipher request
+ * \return err
+*/
+int ofb_aes_encrypt(struct skcipher_request *req)
+{
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
+ int err;
+ unsigned int enc_bytes, nbytes;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
+ enc_bytes -= (nbytes % AES_BLOCK_SIZE);
+ ifx_deu_aes_ofb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ /* to handle remaining bytes < AES_BLOCK_SIZE */
+ if (walk.nbytes) {
+ ifx_deu_aes_ofb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
+ err = skcipher_walk_done(&walk, 0);
+ }
-/*! \fn int ctr_basic_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
+ return err;
+}
+
+/*! \fn int ofb_aes_decrypt(struct skcipher_req *req)
* \ingroup IFX_AES_FUNCTIONS
- * \brief Counter mode AES encrypt using linux crypto blkcipher
- * \param desc blkcipher descriptor
- * \param dst output scatterlist
- * \param src input scatterlist
- * \param nbytes data size in bytes
+ * \brief OFB AES decrypt using linux crypto skcipher
+ * \param req skcipher request
* \return err
-*/
-int ctr_basic_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+*/
+int ofb_aes_decrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
int err;
- unsigned int enc_bytes;
+ unsigned int dec_bytes, nbytes;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = enc_bytes = walk.nbytes)) {
- u8 *iv = walk.iv;
+ while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
+ dec_bytes -= (nbytes % AES_BLOCK_SIZE);
+ ifx_deu_aes_ofb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ /* to handle remaining bytes < AES_BLOCK_SIZE */
+ if (walk.nbytes) {
+ ifx_deu_aes_ofb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
+ err = skcipher_walk_done(&walk, 0);
+ }
+
+ return err;
+}
+
+/*
+ * \brief AES function mappings
+*/
+struct skcipher_alg ifxdeu_ofb_aes_alg = {
+ .base.cra_name = "ofb(aes)",
+ .base.cra_driver_name = "ifxdeu-ofb(aes)",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ifxdeu_ofb_aes_alg.base.cra_list),
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .walksize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = ofb_aes_encrypt,
+ .decrypt = ofb_aes_decrypt,
+};
+
+/*! \fn int cfb_aes_encrypt(struct skcipher_req *req)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief CFB AES encrypt using linux crypto skcipher
+ * \param req skcipher request
+ * \return err
+*/
+int cfb_aes_encrypt(struct skcipher_request *req)
+{
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
+ int err;
+ unsigned int enc_bytes, nbytes;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
+ enc_bytes -= (nbytes % AES_BLOCK_SIZE);
+ ifx_deu_aes_cfb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ /* to handle remaining bytes < AES_BLOCK_SIZE */
+ if (walk.nbytes) {
+ ifx_deu_aes_cfb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
+ err = skcipher_walk_done(&walk, 0);
+ }
+
+ return err;
+}
+
+/*! \fn int cfb_aes_decrypt(struct skcipher_req *req)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief CFB AES decrypt using linux crypto skcipher
+ * \param req skcipher request
+ * \return err
+*/
+int cfb_aes_decrypt(struct skcipher_request *req)
+{
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
+ int err;
+ unsigned int dec_bytes, nbytes;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
+ dec_bytes -= (nbytes % AES_BLOCK_SIZE);
+ ifx_deu_aes_cfb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ /* to handle remaining bytes < AES_BLOCK_SIZE */
+ if (walk.nbytes) {
+ ifx_deu_aes_cfb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
+ err = skcipher_walk_done(&walk, 0);
+ }
+
+ return err;
+}
+
+/*
+ * \brief AES function mappings
+*/
+struct skcipher_alg ifxdeu_cfb_aes_alg = {
+ .base.cra_name = "cfb(aes)",
+ .base.cra_driver_name = "ifxdeu-cfb(aes)",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ifxdeu_cfb_aes_alg.base.cra_list),
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .walksize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = cfb_aes_encrypt,
+ .decrypt = cfb_aes_decrypt,
+};
+
+/*! \fn int ctr_basic_aes_encrypt(struct skcipher_req *req)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief Counter mode AES encrypt using linux crypto skcipher
+ * \param req skcipher request
+ * \return err
+*/
+int ctr_basic_aes_encrypt(struct skcipher_request *req)
+{
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
+ int err;
+ unsigned int enc_bytes, nbytes;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
enc_bytes -= (nbytes % AES_BLOCK_SIZE);
ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
+ walk.iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ /* to handle remaining bytes < AES_BLOCK_SIZE */
+ if (walk.nbytes) {
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
+ err = skcipher_walk_done(&walk, 0);
}
return err;
}
-/*! \fn int ctr_basic_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
+/*! \fn int ctr_basic_aes_encrypt(struct skcipher_req *req)
* \ingroup IFX_AES_FUNCTIONS
- * \brief Counter mode AES decrypt using linux crypto blkcipher
- * \param desc blkcipher descriptor
- * \param dst output scatterlist
- * \param src input scatterlist
- * \param nbytes data size in bytes
+ * \brief Counter mode AES decrypt using linux crypto skcipher
+ * \param req skcipher request
* \return err
-*/
-int ctr_basic_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+*/
+int ctr_basic_aes_decrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
int err;
- unsigned int dec_bytes;
+ unsigned int dec_bytes, nbytes;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
- while ((nbytes = dec_bytes = walk.nbytes)) {
- u8 *iv = walk.iv;
+ while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
dec_bytes -= (nbytes % AES_BLOCK_SIZE);
ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
+ walk.iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ /* to handle remaining bytes < AES_BLOCK_SIZE */
+ if (walk.nbytes) {
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
+ err = skcipher_walk_done(&walk, 0);
}
return err;
}
-/*
+/*
* \brief AES function mappings
*/
-struct crypto_alg ifxdeu_ctr_basic_aes_alg = {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ifxdeu-ctr(aes)",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ifxdeu_ctr_basic_aes_alg.cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = aes_set_key,
- .encrypt = ctr_basic_aes_encrypt,
- .decrypt = ctr_basic_aes_decrypt,
- }
- }
+struct skcipher_alg ifxdeu_ctr_basic_aes_alg = {
+ .base.cra_name = "ctr(aes)",
+ .base.cra_driver_name = "ifxdeu-ctr(aes)",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ifxdeu_ctr_basic_aes_alg.base.cra_list),
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .walksize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key_skcipher,
+ .encrypt = ctr_basic_aes_encrypt,
+ .decrypt = ctr_basic_aes_decrypt,
};
-
-/*! \fn int ctr_rfc3686_aes_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
+/*! \fn int ctr_rfc3686_aes_encrypt(struct skcipher_req *req)
* \ingroup IFX_AES_FUNCTIONS
- * \brief Counter mode AES (rfc3686) encrypt using linux crypto blkcipher
- * \param desc blkcipher descriptor
- * \param dst output scatterlist
- * \param src input scatterlist
- * \param nbytes data size in bytes
+ * \brief Counter mode AES (rfc3686) encrypt using linux crypto skcipher
+ * \param req skcipher request
* \return err
-*/
-int ctr_rfc3686_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+*/
+int ctr_rfc3686_aes_encrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, bsize = nbytes;
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes, enc_bytes;
+ int err;
u8 rfc3686_iv[16];
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
-
+ err = skcipher_walk_virt(&walk, req, false);
+ nbytes = walk.nbytes;
+
/* set up counter block */
memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
memcpy(rfc3686_iv + CTR_RFC3686_NONCE_SIZE, walk.iv, CTR_RFC3686_IV_SIZE);
@@ -774,54 +1240,40 @@ int ctr_rfc3686_aes_encrypt(struct blkcipher_desc *desc,
*(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
cpu_to_be32(1);
- /* scatterlist source is the same size as request size, just process once */
- if (nbytes == walk.nbytes) {
- ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- rfc3686_iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
- nbytes -= walk.nbytes;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- return err;
- }
-
- while ((nbytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
- ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- rfc3686_iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
-
- nbytes -= walk.nbytes;
- bsize -= walk.nbytes;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
+ enc_bytes -= (nbytes % AES_BLOCK_SIZE);
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ rfc3686_iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = skcipher_walk_done(&walk, nbytes);
}
/* to handle remaining bytes < AES_BLOCK_SIZE */
if (walk.nbytes) {
ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
rfc3686_iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = skcipher_walk_done(&walk, 0);
}
-
+
return err;
}
-/*! \fn int ctr_rfc3686_aes_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
+/*! \fn int ctr_rfc3686_aes_decrypt(struct skcipher_req *req)
* \ingroup IFX_AES_FUNCTIONS
- * \brief Counter mode AES (rfc3686) decrypt using linux crypto blkcipher
- * \param desc blkcipher descriptor
- * \param dst output scatterlist
- * \param src input scatterlist
- * \param nbytes data size in bytes
+ * \brief Counter mode AES (rfc3686) decrypt using linux crypto skcipher
+ * \param req skcipher request
* \return err
-*/
-int ctr_rfc3686_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+*/
+int ctr_rfc3686_aes_decrypt(struct skcipher_request *req)
{
- struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
- int err, bsize = nbytes;
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes, dec_bytes;
+ int err;
u8 rfc3686_iv[16];
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
+ nbytes = walk.nbytes;
/* set up counter block */
memcpy(rfc3686_iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
@@ -831,86 +1283,634 @@ int ctr_rfc3686_aes_decrypt(struct blkcipher_desc *desc,
*(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
cpu_to_be32(1);
- /* scatterlist source is the same size as request size, just process once */
- if (nbytes == walk.nbytes) {
- ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- rfc3686_iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
- nbytes -= walk.nbytes;
- err = blkcipher_walk_done(desc, &walk, nbytes);
- return err;
- }
-
- while ((nbytes = walk.nbytes) % (walk.nbytes >= AES_BLOCK_SIZE)) {
- ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- rfc3686_iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
-
- nbytes -= walk.nbytes;
- bsize -= walk.nbytes;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
+ dec_bytes -= (nbytes % AES_BLOCK_SIZE);
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ rfc3686_iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = skcipher_walk_done(&walk, nbytes);
}
/* to handle remaining bytes < AES_BLOCK_SIZE */
if (walk.nbytes) {
ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- rfc3686_iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
- err = blkcipher_walk_done(desc, &walk, 0);
+ rfc3686_iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
+ err = skcipher_walk_done(&walk, 0);
}
return err;
}
-/*
+/*
* \brief AES function mappings
*/
-struct crypto_alg ifxdeu_ctr_rfc3686_aes_alg = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "ifxdeu-ctr-rfc3686(aes)",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aes_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ifxdeu_ctr_rfc3686_aes_alg.cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = CTR_RFC3686_MAX_KEY_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .setkey = ctr_rfc3686_aes_set_key,
- .encrypt = ctr_rfc3686_aes_encrypt,
- .decrypt = ctr_rfc3686_aes_decrypt,
+struct skcipher_alg ifxdeu_ctr_rfc3686_aes_alg = {
+ .base.cra_name = "rfc3686(ctr(aes))",
+ .base.cra_driver_name = "ifxdeu-ctr-rfc3686(aes)",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ifxdeu_ctr_rfc3686_aes_alg.base.cra_list),
+ .min_keysize = CTR_RFC3686_MIN_KEY_SIZE,
+ .max_keysize = CTR_RFC3686_MAX_KEY_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .walksize = AES_BLOCK_SIZE,
+ .setkey = ctr_rfc3686_aes_set_key_skcipher,
+ .encrypt = ctr_rfc3686_aes_encrypt,
+ .decrypt = ctr_rfc3686_aes_decrypt,
+};
+
+static int aes_cbcmac_final_impl(struct shash_desc *desc, u8 *out, bool hash_final);
+
+/*! \fn static void aes_cbcmac_transform(struct shash_desc *desc, u8 const *in)
+ * \ingroup IFX_aes_cbcmac_FUNCTIONS
+ * \brief save input block to context
+ * \param desc linux crypto shash descriptor
+ * \param in 16-byte block of input
+*/
+static void aes_cbcmac_transform(struct shash_desc *desc, u8 const *in)
+{
+ struct aes_ctx *mctx = crypto_shash_ctx(desc->tfm);
+
+ if ( ((mctx->dbn)+1) > AES_CBCMAC_DBN_TEMP_SIZE )
+ {
+ //printk("aes_cbcmac_DBN_TEMP_SIZE exceeded\n");
+ aes_cbcmac_final_impl(desc, (u8 *)mctx->hash, false);
+ }
+
+ memcpy(&mctx->temp[mctx->dbn], in, 16); //dbn workaround
+ mctx->dbn += 1;
+}
+
+/*! \fn int aes_cbcmac_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen)
+ * \ingroup IFX_aes_cbcmac_FUNCTIONS
+ * \brief sets cbcmac aes key
+ * \param tfm linux crypto shash transform
+ * \param key input key
+ * \param keylen key
+*/
+static int aes_cbcmac_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen)
+{
+ return aes_set_key(crypto_shash_tfm(tfm), key, keylen);
+
+ return 0;
+}
+
+/*! \fn void aes_cbcmac_init(struct shash_desc *desc)
+ * \ingroup IFX_aes_cbcmac_FUNCTIONS
+ * \brief initialize md5 hmac context
+ * \param desc linux crypto shash descriptor
+*/
+static int aes_cbcmac_init(struct shash_desc *desc)
+{
+
+ struct aes_ctx *mctx = crypto_shash_ctx(desc->tfm);
+
+ mctx->dbn = 0; //dbn workaround
+ mctx->started = 0;
+ mctx->byte_count = 0;
+ memset(mctx->hash, 0, AES_BLOCK_SIZE);
+
+ return 0;
+}
+
+/*! \fn void aes_cbcmac_update(struct shash_desc *desc, const u8 *data, unsigned int len)
+ * \ingroup IFX_aes_cbcmac_FUNCTIONS
+ * \brief on-the-fly cbcmac aes computation
+ * \param desc linux crypto shash descriptor
+ * \param data input data
+ * \param len size of input data
+*/
+static int aes_cbcmac_update(struct shash_desc *desc, const u8 *data, unsigned int len)
+{
+ struct aes_ctx *mctx = crypto_shash_ctx(desc->tfm);
+ const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x0f);
+
+ mctx->byte_count += len;
+
+ if (avail > len) {
+ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
+ data, len);
+ return 0;
+ }
+
+ memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
+ data, avail);
+
+ aes_cbcmac_transform(desc, mctx->block);
+ data += avail;
+ len -= avail;
+
+ while (len >= sizeof(mctx->block)) {
+ memcpy(mctx->block, data, sizeof(mctx->block));
+ aes_cbcmac_transform(desc, mctx->block);
+ data += sizeof(mctx->block);
+ len -= sizeof(mctx->block);
+ }
+
+ memcpy(mctx->block, data, len);
+ return 0;
+}
+
+/*! \fn static int aes_cbcmac_final_impl(struct shash_desc *desc, u8 *out, bool hash_final)
+ * \ingroup IFX_aes_cbcmac_FUNCTIONS
+ * \brief compute final or intermediate md5 hmac value
+ * \param desc linux crypto shash descriptor
+ * \param out final cbcmac aes output value
+ * \param in finalize or intermediate processing
+*/
+static int aes_cbcmac_final_impl(struct shash_desc *desc, u8 *out, bool hash_final)
+{
+ struct aes_ctx *mctx = crypto_shash_ctx(desc->tfm);
+ const unsigned int offset = mctx->byte_count & 0x0f;
+ char *p = (char *)mctx->block + offset;
+ volatile struct aes_t *aes = (volatile struct aes_t *) AES_START;
+ unsigned long flag;
+ int i = 0;
+ int dbn;
+ u32 *in = mctx->temp[0];
+
+ CRTCL_SECT_START;
+
+ aes_set_key_hw (mctx);
+
+ aes->controlr.E_D = !CRYPTO_DIR_ENCRYPT; //encryption
+ aes->controlr.O = 1; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
+
+ //aes->controlr.F = 128; //default; only for CFB and OFB modes; change only for customer-specific apps
+
+ //printk("\ndbn = %d\n", mctx->dbn);
+
+ if (mctx->started) {
+ aes->IV3R = DEU_ENDIAN_SWAP(*(u32 *) mctx->hash);
+ aes->IV2R = DEU_ENDIAN_SWAP(*((u32 *) mctx->hash + 1));
+ aes->IV1R = DEU_ENDIAN_SWAP(*((u32 *) mctx->hash + 2));
+ aes->IV0R = DEU_ENDIAN_SWAP(*((u32 *) mctx->hash + 3));
+ } else {
+ mctx->started = 1;
+ aes->IV3R = 0;
+ aes->IV2R = 0;
+ aes->IV1R = 0;
+ aes->IV0R = 0;
+ }
+
+ i = 0;
+ for (dbn = 0; dbn < mctx->dbn; dbn++)
+ {
+ aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) in + (i * 4) + 0));
+ aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) in + (i * 4) + 1));
+ aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) in + (i * 4) + 2));
+ aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) in + (i * 4) + 3)); /* start crypto */
+
+ while (aes->controlr.BUS) {
+ // this will not take long
+ }
+
+ in += 4;
+ }
+
+ *((u32 *) mctx->hash) = DEU_ENDIAN_SWAP(aes->IV3R);
+ *((u32 *) mctx->hash + 1) = DEU_ENDIAN_SWAP(aes->IV2R);
+ *((u32 *) mctx->hash + 2) = DEU_ENDIAN_SWAP(aes->IV1R);
+ *((u32 *) mctx->hash + 3) = DEU_ENDIAN_SWAP(aes->IV0R);
+
+ if (hash_final && offset) {
+ aes->controlr.O = 0; //0 ECB 1 CBC 2 OFB 3 CFB 4 CTR
+ crypto_xor(mctx->block, mctx->hash, offset);
+
+ memcpy(p, mctx->hash + offset, (AES_BLOCK_SIZE - offset));
+
+ aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) mctx->block + 0));
+ aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) mctx->block + 1));
+ aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) mctx->block + 2));
+ aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) mctx->block + 3)); /* start crypto */
+
+ while (aes->controlr.BUS) {
+ // this will not take long
}
+
+ *((u32 *) mctx->hash) = DEU_ENDIAN_SWAP(aes->OD3R);
+ *((u32 *) mctx->hash + 1) = DEU_ENDIAN_SWAP(aes->OD2R);
+ *((u32 *) mctx->hash + 2) = DEU_ENDIAN_SWAP(aes->OD1R);
+ *((u32 *) mctx->hash + 3) = DEU_ENDIAN_SWAP(aes->OD0R);
}
+
+ CRTCL_SECT_END;
+
+ if (hash_final) {
+ memcpy(out, mctx->hash, AES_BLOCK_SIZE);
+ /* reset the context after we finish with the hash */
+ aes_cbcmac_init(desc);
+ } else {
+ mctx->dbn = 0;
+ }
+ return 0;
+}
+
+/*! \fn static int aes_cbcmac_final(struct crypto_tfm *tfm, u8 *out)
+ * \ingroup IFX_aes_cbcmac_FUNCTIONS
+ * \brief call aes_cbcmac_final_impl with hash_final true
+ * \param tfm linux crypto algo transform
+ * \param out final md5 hmac output value
+*/
+static int aes_cbcmac_final(struct shash_desc *desc, u8 *out)
+{
+ return aes_cbcmac_final_impl(desc, out, true);
+}
+
+/*! \fn void aes_cbcmac_init_tfm(struct crypto_tfm *tfm)
+ * \ingroup IFX_aes_cbcmac_FUNCTIONS
+ * \brief initialize pointers in aes_ctx
+ * \param tfm linux crypto shash transform
+*/
+static int aes_cbcmac_init_tfm(struct crypto_tfm *tfm)
+{
+ struct aes_ctx *mctx = crypto_tfm_ctx(tfm);
+ mctx->temp = kzalloc(AES_BLOCK_SIZE * AES_CBCMAC_DBN_TEMP_SIZE, GFP_KERNEL);
+ if (IS_ERR(mctx->temp)) return PTR_ERR(mctx->temp);
+
+ return 0;
+}
+
+/*! \fn void aes_cbcmac_exit_tfm(struct crypto_tfm *tfm)
+ * \ingroup IFX_aes_cbcmac_FUNCTIONS
+ * \brief free pointers in aes_ctx
+ * \param tfm linux crypto shash transform
+*/
+static void aes_cbcmac_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct aes_ctx *mctx = crypto_tfm_ctx(tfm);
+ kfree(mctx->temp);
+}
+
+/*
+ * \brief aes_cbcmac function mappings
+*/
+static struct shash_alg ifxdeu_cbcmac_aes_alg = {
+ .digestsize = AES_BLOCK_SIZE,
+ .init = aes_cbcmac_init,
+ .update = aes_cbcmac_update,
+ .final = aes_cbcmac_final,
+ .setkey = aes_cbcmac_setkey,
+ .descsize = sizeof(struct aes_ctx),
+ .base = {
+ .cra_name = "cbcmac(aes)",
+ .cra_driver_name= "ifxdeu-cbcmac(aes)",
+ .cra_priority = 400,
+ .cra_ctxsize = sizeof(struct aes_ctx),
+ .cra_flags = CRYPTO_ALG_TYPE_HASH | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = 1,
+ .cra_module = THIS_MODULE,
+ .cra_init = aes_cbcmac_init_tfm,
+ .cra_exit = aes_cbcmac_exit_tfm,
+ }
};
+/*! \fn int aes_set_key_aead (struct crypto_aead *aead, const uint8_t *in_key, unsigned int key_len)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief sets the AES keys for aead gcm
+ * \param aead linux crypto aead
+ * \param in_key input key
+ * \param key_len key lengths of 16, 24 and 32 bytes supported
+ * \return -EINVAL - bad key length, 0 - SUCCESS
+*/
+int aes_set_key_aead (struct crypto_aead *aead, const u8 *in_key, unsigned int key_len)
+{
+ struct aes_ctx *ctx = crypto_aead_ctx(aead);
+ int err;
+
+ err = aes_set_key(&aead->base, in_key, key_len);
+ if (err) return err;
+
+ memset(ctx->block, 0, sizeof(ctx->block));
+ memset(ctx->lastbuffer, 0, AES_BLOCK_SIZE);
+ ifx_deu_aes_ctr(ctx, ctx->block, ctx->block,
+ ctx->lastbuffer, AES_BLOCK_SIZE, CRYPTO_DIR_ENCRYPT, 0);
+ if (ctx->gf128) gf128mul_free_4k(ctx->gf128);
+ ctx->gf128 = gf128mul_init_4k_lle((be128 *)ctx->block);
+
+ return err;
+}
+
+/*! \fn int gcm_aes_setauthsize (struct crypto_aead *aead, unsigned int authsize)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief sets the AES keys for aead gcm
+ * \param aead linux crypto aead
+ * \param in_key input authsize
+ * \return -EINVAL - bad authsize length, 0 - SUCCESS
+*/
+int gcm_aes_setauthsize (struct crypto_aead *aead, unsigned int authsize)
+{
+ return crypto_gcm_check_authsize(authsize);
+}
+
+/*! \fn int gcm_aes_encrypt(struct aead_request *req)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief GCM AES encrypt using linux crypto aead
+ * \param req aead request
+ * \return err
+*/
+int gcm_aes_encrypt(struct aead_request *req)
+{
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
+ struct skcipher_request request;
+ int err;
+ unsigned int enc_bytes, nbytes;
+ be128 lengths;
+ u8 iv[AES_BLOCK_SIZE];
+
+ lengths.a = cpu_to_be64(req->assoclen * 8);
+ lengths.b = cpu_to_be64(req->cryptlen * 8);
+
+ memset(ctx->hash, 0, sizeof(ctx->hash));
+ memset(ctx->block, 0, sizeof(ctx->block));
+ memcpy(iv, req->iv, GCM_AES_IV_SIZE);
+ *(__be32 *)((void *)iv + GCM_AES_IV_SIZE) = cpu_to_be32(1);
+ ifx_deu_aes_ctr(ctx, ctx->block, ctx->block,
+ iv, 16, CRYPTO_DIR_ENCRYPT, 0);
+
+ request.cryptlen = req->cryptlen + req->assoclen;
+ request.src = req->src;
+ request.dst = req->dst;
+ request.base = req->base;
+
+ crypto_skcipher_alg(crypto_skcipher_reqtfm(&request))->walksize = AES_BLOCK_SIZE;
+
+ if (req->assoclen && (req->assoclen < AES_BLOCK_SIZE))
+ crypto_skcipher_alg(crypto_skcipher_reqtfm(&request))->walksize = req->assoclen;
+
+ err = skcipher_walk_virt(&walk, &request, false);
+
+ //process assoc data if available
+ if (req->assoclen > 0) {
+ unsigned int assoc_remain, ghashlen;
+
+ assoc_remain = req->assoclen;
+ ghashlen = min(req->assoclen, walk.nbytes);
+ while ((nbytes = enc_bytes = ghashlen) && (ghashlen >= AES_BLOCK_SIZE)) {
+ u8 *temp;
+ if (nbytes > req->assoclen) nbytes = enc_bytes = req->assoclen;
+ enc_bytes -= (nbytes % AES_BLOCK_SIZE);
+ memcpy(walk.dst.virt.addr, walk.src.virt.addr, enc_bytes);
+ assoc_remain -= enc_bytes;
+ temp = walk.dst.virt.addr;
+ while (enc_bytes > 0) {
+ u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)temp);
+ gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
+ enc_bytes -= AES_BLOCK_SIZE;
+ temp += 16;
+ }
+ if (assoc_remain < AES_BLOCK_SIZE) walk.stride = assoc_remain;
+ if (assoc_remain == 0) walk.stride = AES_BLOCK_SIZE;
+ enc_bytes = nbytes - (nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, (walk.nbytes - enc_bytes));
+ ghashlen = min(assoc_remain, walk.nbytes);
+ }
+
+ if ((enc_bytes = ghashlen)) {
+ memcpy(ctx->lastbuffer, walk.src.virt.addr, enc_bytes);
+ memset(ctx->lastbuffer + enc_bytes, 0, (AES_BLOCK_SIZE - enc_bytes));
+ memcpy(walk.dst.virt.addr, walk.src.virt.addr, ghashlen);
+ u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)ctx->lastbuffer);
+ gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
+ walk.stride = AES_BLOCK_SIZE;
+ err = skcipher_walk_done(&walk, (walk.nbytes - ghashlen));
+ }
+ }
+
+ //crypt and hash
+ while ((nbytes = enc_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
+ u8 *temp;
+ enc_bytes -= (nbytes % AES_BLOCK_SIZE);
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ temp = walk.dst.virt.addr;
+ while (enc_bytes) {
+ u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)temp);
+ gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
+ enc_bytes -= AES_BLOCK_SIZE;
+ temp += 16;
+ }
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ /* crypt and hash remaining bytes < AES_BLOCK_SIZE */
+ if ((enc_bytes = walk.nbytes)) {
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
+ memcpy(ctx->lastbuffer, walk.dst.virt.addr, enc_bytes);
+ memset(ctx->lastbuffer + enc_bytes, 0, (AES_BLOCK_SIZE - enc_bytes));
+ u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)ctx->lastbuffer);
+ gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
+ err = skcipher_walk_done(&walk, 0);
+ }
+
+ //finalize and copy hash
+ u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)&lengths);
+ gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
+ u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)ctx->block);
+ scatterwalk_map_and_copy(ctx->hash, req->dst, req->cryptlen + req->assoclen, crypto_aead_authsize(crypto_aead_reqtfm(req)), 1);
+
+ aead_request_complete(req, 0);
+
+ return err;
+}
+
+/*! \fn int gcm_aes_decrypt(struct aead_request *req)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief GCM AES decrypt using linux crypto aead
+ * \param req aead request
+ * \return err
+*/
+int gcm_aes_decrypt(struct aead_request *req)
+{
+ struct aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
+ struct skcipher_request request;
+ int err;
+ unsigned int dec_bytes, nbytes, authsize;
+ be128 lengths;
+ u8 iv[AES_BLOCK_SIZE];
+
+ authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+
+ lengths.a = cpu_to_be64(req->assoclen * 8);
+ lengths.b = cpu_to_be64((req->cryptlen - authsize) * 8);
+
+ memset(ctx->hash, 0, sizeof(ctx->hash));
+ memset(ctx->block, 0, sizeof(ctx->block));
+ memcpy(iv, req->iv, GCM_AES_IV_SIZE);
+ *(__be32 *)((void *)iv + GCM_AES_IV_SIZE) = cpu_to_be32(1);
+ ifx_deu_aes_ctr(ctx, ctx->block, ctx->block,
+ iv, 16, CRYPTO_DIR_ENCRYPT, 0);
+
+ request.cryptlen = req->cryptlen + req->assoclen - authsize;
+ request.src = req->src;
+ request.dst = req->dst;
+ request.base = req->base;
+ crypto_skcipher_alg(crypto_skcipher_reqtfm(&request))->walksize = AES_BLOCK_SIZE;
+
+ if (req->assoclen && (req->assoclen < AES_BLOCK_SIZE))
+ crypto_skcipher_alg(crypto_skcipher_reqtfm(&request))->walksize = req->assoclen;
+
+ err = skcipher_walk_virt(&walk, &request, false);
+
+ //process assoc data if available
+ if (req->assoclen > 0) {
+ unsigned int assoc_remain, ghashlen;
+
+ assoc_remain = req->assoclen;
+ ghashlen = min(req->assoclen, walk.nbytes);
+ while ((nbytes = dec_bytes = ghashlen) && (ghashlen >= AES_BLOCK_SIZE)) {
+ u8 *temp;
+ if (nbytes > req->assoclen) nbytes = dec_bytes = req->assoclen;
+ dec_bytes -= (nbytes % AES_BLOCK_SIZE);
+ memcpy(walk.dst.virt.addr, walk.src.virt.addr, dec_bytes);
+ assoc_remain -= dec_bytes;
+ temp = walk.dst.virt.addr;
+ while (dec_bytes > 0) {
+ u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)temp);
+ gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
+ dec_bytes -= AES_BLOCK_SIZE;
+ temp += 16;
+ }
+ if (assoc_remain < AES_BLOCK_SIZE) walk.stride = assoc_remain;
+ if (assoc_remain == 0) walk.stride = AES_BLOCK_SIZE;
+ dec_bytes = nbytes - (nbytes % AES_BLOCK_SIZE);
+ err = skcipher_walk_done(&walk, (walk.nbytes - dec_bytes));
+ ghashlen = min(assoc_remain, walk.nbytes);
+ }
+
+ if ((dec_bytes = ghashlen)) {
+ memcpy(ctx->lastbuffer, walk.src.virt.addr, dec_bytes);
+ memset(ctx->lastbuffer + dec_bytes, 0, (AES_BLOCK_SIZE - dec_bytes));
+ memcpy(walk.dst.virt.addr, walk.src.virt.addr, ghashlen);
+ u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)ctx->lastbuffer);
+ gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
+ walk.stride = AES_BLOCK_SIZE;
+ err = skcipher_walk_done(&walk, (walk.nbytes - ghashlen));
+ }
+ }
+
+ //crypt and hash
+ while ((nbytes = dec_bytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
+ u8 *temp;
+ dec_bytes -= (nbytes % AES_BLOCK_SIZE);
+ temp = walk.src.virt.addr;
+ while (dec_bytes) {
+ u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)temp);
+ gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
+ dec_bytes -= AES_BLOCK_SIZE;
+ temp += 16;
+ }
+ dec_bytes = nbytes - (nbytes % AES_BLOCK_SIZE);
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ /* crypt and hash remaining bytes < AES_BLOCK_SIZE */
+ if ((dec_bytes = walk.nbytes)) {
+ memcpy(ctx->lastbuffer, walk.src.virt.addr, dec_bytes);
+ memset(ctx->lastbuffer + dec_bytes, 0, (AES_BLOCK_SIZE - dec_bytes));
+ u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)ctx->lastbuffer);
+ gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ iv, walk.nbytes, CRYPTO_DIR_DECRYPT, 0);
+ err = skcipher_walk_done(&walk, 0);
+ }
+
+ //finalize and copy hash
+ u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)&lengths);
+ gf128mul_4k_lle((be128 *)ctx->hash, ctx->gf128);
+ u128_xor((u128 *)ctx->hash, (u128 *)ctx->hash, (u128 *)ctx->block);
+
+ scatterwalk_map_and_copy(ctx->lastbuffer, req->src, req->cryptlen + req->assoclen - authsize, authsize, 0);
+ err = crypto_memneq(ctx->lastbuffer, ctx->hash, authsize) ? -EBADMSG : 0;
+
+ aead_request_complete(req, 0);
+
+ return err;
+}
+
+/*! \fn void aes_gcm_exit_tfm(struct crypto_tfm *tfm)
+ * \ingroup IFX_aes_cbcmac_FUNCTIONS
+ * \brief free pointers in aes_ctx
+ * \param tfm linux crypto shash transform
+*/
+static void aes_gcm_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct aes_ctx *ctx = crypto_tfm_ctx(tfm);
+ if (ctx->gf128) gf128mul_free_4k(ctx->gf128);
+}
+
+/*
+ * \brief AES function mappings
+*/
+struct aead_alg ifxdeu_gcm_aes_alg = {
+ .base.cra_name = "gcm(aes)",
+ .base.cra_driver_name = "ifxdeu-gcm(aes)",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct aes_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ifxdeu_gcm_aes_alg.base.cra_list),
+ .base.cra_exit = aes_gcm_exit_tfm,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ .setkey = aes_set_key_aead,
+ .encrypt = gcm_aes_encrypt,
+ .decrypt = gcm_aes_decrypt,
+ .setauthsize = gcm_aes_setauthsize,
+};
/*! \fn int ifxdeu_init_aes (void)
* \ingroup IFX_AES_FUNCTIONS
- * \brief function to initialize AES driver
- * \return ret
-*/
+ * \brief function to initialize AES driver
+ * \return ret
+*/
int ifxdeu_init_aes (void)
{
int ret = -ENOSYS;
+ aes_chip_init();
if ((ret = crypto_register_alg(&ifxdeu_aes_alg)))
goto aes_err;
- if ((ret = crypto_register_alg(&ifxdeu_ecb_aes_alg)))
+ if ((ret = crypto_register_skcipher(&ifxdeu_ecb_aes_alg)))
goto ecb_aes_err;
- if ((ret = crypto_register_alg(&ifxdeu_cbc_aes_alg)))
+ if ((ret = crypto_register_skcipher(&ifxdeu_cbc_aes_alg)))
goto cbc_aes_err;
- if ((ret = crypto_register_alg(&ifxdeu_ctr_basic_aes_alg)))
+ if ((ret = crypto_register_skcipher(&ifxdeu_xts_aes_alg)))
+ goto xts_aes_err;
+
+ if ((ret = crypto_register_skcipher(&ifxdeu_ofb_aes_alg)))
+ goto ofb_aes_err;
+
+ if ((ret = crypto_register_skcipher(&ifxdeu_cfb_aes_alg)))
+ goto cfb_aes_err;
+
+ if ((ret = crypto_register_skcipher(&ifxdeu_ctr_basic_aes_alg)))
goto ctr_basic_aes_err;
- if ((ret = crypto_register_alg(&ifxdeu_ctr_rfc3686_aes_alg)))
+ if ((ret = crypto_register_skcipher(&ifxdeu_ctr_rfc3686_aes_alg)))
goto ctr_rfc3686_aes_err;
- aes_chip_init ();
+ if ((ret = crypto_register_shash(&ifxdeu_cbcmac_aes_alg)))
+ goto cbcmac_aes_err;
+
+ if ((ret = crypto_register_aead(&ifxdeu_gcm_aes_alg)))
+ goto gcm_aes_err;
CRTCL_SECT_INIT;
@@ -918,20 +1918,40 @@ int ifxdeu_init_aes (void)
printk (KERN_NOTICE "IFX DEU AES initialized%s%s.\n", disable_multiblock ? "" : " (multiblock)", disable_deudma ? "" : " (DMA)");
return ret;
+gcm_aes_err:
+ crypto_unregister_aead(&ifxdeu_gcm_aes_alg);
+ printk (KERN_ERR "IFX gcm_aes initialization failed!\n");
+ return ret;
+cbcmac_aes_err:
+ crypto_unregister_shash(&ifxdeu_cbcmac_aes_alg);
+ printk (KERN_ERR "IFX cbcmac_aes initialization failed!\n");
+ return ret;
ctr_rfc3686_aes_err:
- crypto_unregister_alg(&ifxdeu_ctr_rfc3686_aes_alg);
+ crypto_unregister_skcipher(&ifxdeu_ctr_rfc3686_aes_alg);
printk (KERN_ERR "IFX ctr_rfc3686_aes initialization failed!\n");
return ret;
ctr_basic_aes_err:
- crypto_unregister_alg(&ifxdeu_ctr_basic_aes_alg);
+ crypto_unregister_skcipher(&ifxdeu_ctr_basic_aes_alg);
printk (KERN_ERR "IFX ctr_basic_aes initialization failed!\n");
return ret;
+cfb_aes_err:
+ crypto_unregister_skcipher(&ifxdeu_cfb_aes_alg);
+ printk (KERN_ERR "IFX cfb_aes initialization failed!\n");
+ return ret;
+ofb_aes_err:
+ crypto_unregister_skcipher(&ifxdeu_ofb_aes_alg);
+ printk (KERN_ERR "IFX ofb_aes initialization failed!\n");
+ return ret;
+xts_aes_err:
+ crypto_unregister_skcipher(&ifxdeu_xts_aes_alg);
+ printk (KERN_ERR "IFX xts_aes initialization failed!\n");
+ return ret;
cbc_aes_err:
- crypto_unregister_alg(&ifxdeu_cbc_aes_alg);
+ crypto_unregister_skcipher(&ifxdeu_cbc_aes_alg);
printk (KERN_ERR "IFX cbc_aes initialization failed!\n");
return ret;
ecb_aes_err:
- crypto_unregister_alg(&ifxdeu_ecb_aes_alg);
+ crypto_unregister_skcipher(&ifxdeu_ecb_aes_alg);
printk (KERN_ERR "IFX aes initialization failed!\n");
return ret;
aes_err:
@@ -942,16 +1962,18 @@ aes_err:
/*! \fn void ifxdeu_fini_aes (void)
* \ingroup IFX_AES_FUNCTIONS
- * \brief unregister aes driver
-*/
+ * \brief unregister aes driver
+*/
void ifxdeu_fini_aes (void)
{
crypto_unregister_alg (&ifxdeu_aes_alg);
- crypto_unregister_alg (&ifxdeu_ecb_aes_alg);
- crypto_unregister_alg (&ifxdeu_cbc_aes_alg);
- crypto_unregister_alg (&ifxdeu_ctr_basic_aes_alg);
- crypto_unregister_alg (&ifxdeu_ctr_rfc3686_aes_alg);
-
+ crypto_unregister_skcipher (&ifxdeu_ecb_aes_alg);
+ crypto_unregister_skcipher (&ifxdeu_cbc_aes_alg);
+ crypto_unregister_skcipher (&ifxdeu_xts_aes_alg);
+ crypto_unregister_skcipher (&ifxdeu_ofb_aes_alg);
+ crypto_unregister_skcipher (&ifxdeu_cfb_aes_alg);
+ crypto_unregister_skcipher (&ifxdeu_ctr_basic_aes_alg);
+ crypto_unregister_skcipher (&ifxdeu_ctr_rfc3686_aes_alg);
+ crypto_unregister_shash (&ifxdeu_cbcmac_aes_alg);
+ crypto_unregister_aead (&ifxdeu_gcm_aes_alg);
}
-
-
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_arc4.c b/package/kernel/lantiq/ltq-deu/src/ifxmips_arc4.c
index 9faad94016b..51f988fe43c 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_arc4.c
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_arc4.c
@@ -47,6 +47,7 @@
#include <linux/errno.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
#include <linux/interrupt.h>
#include <asm/byteorder.h>
#include <linux/delay.h>
@@ -88,7 +89,6 @@ struct arc4_ctx {
extern int disable_deudma;
extern int disable_multiblock;
-
/*! \fn static void _deu_arc4 (void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
\ingroup IFX_ARC4_FUNCTIONS
\brief main interface to ARC4 hardware
@@ -203,6 +203,19 @@ static int arc4_set_key(struct crypto_tfm *tfm, const u8 *inkey,
return 0;
}
+/*! \fn static int arc4_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len)
+ \ingroup IFX_ARC4_FUNCTIONS
+ \brief sets ARC4 key
+ \param tfm linux crypto skcipher
+ \param in_key input key
+ \param key_len key lengths less than or equal to 16 bytes supported
+*/
+static int arc4_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *inkey,
+ unsigned int key_len)
+{
+ return arc4_set_key(crypto_skcipher_ctx(tfm), inkey, key_len);
+}
+
/*! \fn static void _deu_arc4_ecb(void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
\ingroup IFX_ARC4_FUNCTIONS
\brief sets ARC4 hardware to ECB mode
@@ -243,7 +256,7 @@ static struct crypto_alg ifxdeu_arc4_alg = {
.cra_name = "arc4",
.cra_driver_name = "ifxdeu-arc4",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = ARC4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct arc4_ctx),
.cra_module = THIS_MODULE,
@@ -259,61 +272,51 @@ static struct crypto_alg ifxdeu_arc4_alg = {
}
};
-/*! \fn static int ecb_arc4_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
+/*! \fn static int ecb_arc4_encrypt(struct skcipher_request *req)
\ingroup IFX_ARC4_FUNCTIONS
- \brief ECB ARC4 encrypt using linux crypto blkcipher
- \param desc blkcipher descriptor
- \param dst output scatterlist
- \param src input scatterlist
- \param nbytes data size in bytes
-*/
-static int ecb_arc4_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+ \brief ECB ARC4 encrypt using linux crypto skcipher
+ \param req skcipher_request
+*/
+static int ecb_arc4_encrypt(struct skcipher_request *req)
{
- struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct arc4_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
DPRINTF(1, "\n");
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
_deu_arc4_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
NULL, nbytes, CRYPTO_DIR_ENCRYPT, 0);
nbytes &= ARC4_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-/*! \fn static int ecb_arc4_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
+/*! \fn static int ecb_arc4_decrypt(struct skcipher_request *req)
\ingroup IFX_ARC4_FUNCTIONS
- \brief ECB ARC4 decrypt using linux crypto blkcipher
- \param desc blkcipher descriptor
- \param dst output scatterlist
- \param src input scatterlist
- \param nbytes data size in bytes
-*/
-static int ecb_arc4_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+ \brief ECB ARC4 decrypt using linux crypto skcipher
+ \param desc skcipher_request
+*/
+static int ecb_arc4_decrypt(struct skcipher_request *req)
{
- struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct arc4_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
+ unsigned int nbytes;
int err;
DPRINTF(1, "\n");
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
_deu_arc4_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
NULL, nbytes, CRYPTO_DIR_DECRYPT, 0);
nbytes &= ARC4_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
@@ -322,25 +325,20 @@ static int ecb_arc4_decrypt(struct blkcipher_desc *desc,
/*
* \brief ARC4 function mappings
*/
-static struct crypto_alg ifxdeu_ecb_arc4_alg = {
- .cra_name = "ecb(arc4)",
- .cra_driver_name = "ifxdeu-ecb(arc4)",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = ARC4_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct arc4_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ifxdeu_ecb_arc4_alg.cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = ARC4_MIN_KEY_SIZE,
- .max_keysize = ARC4_MAX_KEY_SIZE,
- .setkey = arc4_set_key,
- .encrypt = ecb_arc4_encrypt,
- .decrypt = ecb_arc4_decrypt,
- }
- }
+static struct skcipher_alg ifxdeu_ecb_arc4_alg = {
+ .base.cra_name = "ecb(arc4)",
+ .base.cra_driver_name = "ifxdeu-ecb(arc4)",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = ARC4_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct arc4_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ifxdeu_ecb_arc4_alg.base.cra_list),
+ .min_keysize = ARC4_MIN_KEY_SIZE,
+ .max_keysize = ARC4_MAX_KEY_SIZE,
+ .setkey = arc4_set_key_skcipher,
+ .encrypt = ecb_arc4_encrypt,
+ .decrypt = ecb_arc4_decrypt,
};
/*! \fn int ifxdeu_init_arc4(void)
@@ -355,7 +353,7 @@ int ifxdeu_init_arc4(void)
if ((ret = crypto_register_alg(&ifxdeu_arc4_alg)))
goto arc4_err;
- if ((ret = crypto_register_alg(&ifxdeu_ecb_arc4_alg)))
+ if ((ret = crypto_register_skcipher(&ifxdeu_ecb_arc4_alg)))
goto ecb_arc4_err;
arc4_chip_init ();
@@ -370,7 +368,7 @@ arc4_err:
printk(KERN_ERR "IFX arc4 initialization failed!\n");
return ret;
ecb_arc4_err:
- crypto_unregister_alg(&ifxdeu_ecb_arc4_alg);
+ crypto_unregister_skcipher(&ifxdeu_ecb_arc4_alg);
printk (KERN_ERR "IFX ecb_arc4 initialization failed!\n");
return ret;
@@ -383,9 +381,7 @@ ecb_arc4_err:
void ifxdeu_fini_arc4(void)
{
crypto_unregister_alg (&ifxdeu_arc4_alg);
- crypto_unregister_alg (&ifxdeu_ecb_arc4_alg);
+ crypto_unregister_skcipher (&ifxdeu_ecb_arc4_alg);
}
-
-
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_async_aes.c b/package/kernel/lantiq/ltq-deu/src/ifxmips_async_aes.c
index dcd059371fa..8184fed71fa 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_async_aes.c
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_async_aes.c
@@ -964,7 +964,7 @@ static struct lq_aes_alg aes_drivers_alg[] = {
.alg = {
.cra_name = "aes",
.cra_driver_name = "ifxdeu-aes",
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_type = &crypto_ablkcipher_type,
@@ -984,7 +984,7 @@ static struct lq_aes_alg aes_drivers_alg[] = {
.alg = {
.cra_name = "ecb(aes)",
.cra_driver_name = "ifxdeu-ecb(aes)",
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_type = &crypto_ablkcipher_type,
@@ -1004,7 +1004,7 @@ static struct lq_aes_alg aes_drivers_alg[] = {
.alg = {
.cra_name = "cbc(aes)",
.cra_driver_name = "ifxdeu-cbc(aes)",
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_type = &crypto_ablkcipher_type,
@@ -1024,7 +1024,7 @@ static struct lq_aes_alg aes_drivers_alg[] = {
.alg = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ifxdeu-ctr(aes)",
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_type = &crypto_ablkcipher_type,
@@ -1044,7 +1044,7 @@ static struct lq_aes_alg aes_drivers_alg[] = {
.alg = {
.cra_name = "rfc3686(ctr(aes))",
.cra_driver_name = "ifxdeu-rfc3686(ctr(aes))",
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aes_ctx),
.cra_type = &crypto_ablkcipher_type,
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_async_des.c b/package/kernel/lantiq/ltq-deu/src/ifxmips_async_des.c
index 1523763ccd7..bd560bf6596 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_async_des.c
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_async_des.c
@@ -761,7 +761,7 @@ static struct lq_des_alg des_drivers_alg [] = {
.alg = {
.cra_name = "des",
.cra_driver_name = "lqdeu-des",
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des_ctx),
.cra_type = &crypto_ablkcipher_type,
@@ -782,7 +782,7 @@ static struct lq_des_alg des_drivers_alg [] = {
.alg = {
.cra_name = "ecb(des)",
.cra_driver_name = "lqdeu-ecb(des)",
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des_ctx),
.cra_type = &crypto_ablkcipher_type,
@@ -802,7 +802,7 @@ static struct lq_des_alg des_drivers_alg [] = {
.alg = {
.cra_name = "cbc(des)",
.cra_driver_name = "lqdeu-cbc(des)",
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des_ctx),
.cra_type = &crypto_ablkcipher_type,
@@ -822,7 +822,7 @@ static struct lq_des_alg des_drivers_alg [] = {
.alg = {
.cra_name = "des3_ede",
.cra_driver_name = "lqdeu-des3_ede",
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des_ctx),
.cra_type = &crypto_ablkcipher_type,
@@ -842,7 +842,7 @@ static struct lq_des_alg des_drivers_alg [] = {
.alg = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "lqdeu-ecb(des3_ede)",
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des_ctx),
.cra_type = &crypto_ablkcipher_type,
@@ -862,7 +862,7 @@ static struct lq_des_alg des_drivers_alg [] = {
.alg = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "lqdeu-cbc(des3_ede)",
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des_ctx),
.cra_type = &crypto_ablkcipher_type,
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_des.c b/package/kernel/lantiq/ltq-deu/src/ifxmips_des.c
index 6d7d82fcb92..953c3feddc7 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_des.c
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_des.c
@@ -50,6 +50,8 @@
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <crypto/algapi.h>
+#include <crypto/des.h>
+#include <crypto/internal/skcipher.h>
#include "ifxmips_deu.h"
#if defined(CONFIG_DANUBE)
@@ -105,17 +107,18 @@ void des_dma_memory_copy(u32 *outcopy, u32 *out_dma, u8 *out_arg, int nbytes);
void ifx_deu_des (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
u8 *iv_arg, u32 nbytes, int encdec, int mode);
-struct des_ctx {
+struct ifx_deu_des_ctx {
int controlr_M;
int key_length;
u8 iv[DES_BLOCK_SIZE];
u32 expkey[DES3_EDE_EXPKEY_WORDS];
+ struct des_ctx des_context;
+ struct des3_ede_ctx des3_ede_context;
};
extern int disable_multiblock;
extern int disable_deudma;
-
/*! \fn int des_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
* \ingroup IFX_DES_FUNCTIONS
* \brief sets DES key
@@ -126,18 +129,42 @@ extern int disable_deudma;
int des_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
- struct des_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct ifx_deu_des_ctx *dctx = crypto_tfm_ctx(tfm);
+ int err;
//printk("setkey in %s\n", __FILE__);
+ err = des_expand_key(&dctx->des_context, key, keylen);
+ if (err == -ENOKEY) {
+ if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)
+ err = -EINVAL;
+ else
+ err = 0;
+ }
+
dctx->controlr_M = 0; // des
dctx->key_length = keylen;
memcpy ((u8 *) (dctx->expkey), key, keylen);
- return 0;
+ if (err)
+ memset(dctx, 0, sizeof(*dctx));
+
+ return err;
}
+/*! \fn int des_setkey_skcipher (struct crypto_skcipher *tfm, const uint8_t *in_key, unsigned int key_len)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief sets the AES keys for skcipher
+ * \param tfm linux crypto skcipher
+ * \param in_key input key
+ * \param key_len key lengths of 16, 24 and 32 bytes supported
+ * \return -EINVAL - bad key length, 0 - SUCCESS
+*/
+int des_setkey_skcipher (struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len)
+{
+ return des_setkey(crypto_skcipher_tfm(tfm), in_key, key_len);
+}
/*! \fn void ifx_deu_des(void *ctx_arg, u8 *out_arg, const u8 *in_arg, u8 *iv_arg, u32 nbytes, int encdec, int mode)
* \ingroup IFX_DES_FUNCTIONS
@@ -155,7 +182,7 @@ void ifx_deu_des (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
u8 *iv_arg, u32 nbytes, int encdec, int mode)
{
volatile struct des_t *des = (struct des_t *) DES_3DES_START;
- struct des_ctx *dctx = ctx_arg;
+ struct ifx_deu_des_ctx *dctx = ctx_arg;
u32 *key = dctx->expkey;
unsigned long flag;
@@ -178,12 +205,13 @@ void ifx_deu_des (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
des->K3HR = DEU_ENDIAN_SWAP(*((u32 *) key + 4));
des->K3LR = DEU_ENDIAN_SWAP(*((u32 *) key + 5));
/* no break; */
-
+ fallthrough;
case 16:
des->K2HR = DEU_ENDIAN_SWAP(*((u32 *) key + 2));
des->K2LR = DEU_ENDIAN_SWAP(*((u32 *) key + 3));
/* no break; */
+ fallthrough;
case 8:
des->K1HR = DEU_ENDIAN_SWAP(*((u32 *) key + 0));
des->K1LR = DEU_ENDIAN_SWAP(*((u32 *) key + 1));
@@ -257,8 +285,6 @@ void ifx_deu_des (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
* \param mode operation mode such as ebc, cbc
*/
-
-
/*! \fn void ifx_deu_des_ecb (void *ctx, uint8_t *dst, const uint8_t *src, uint8_t *iv, size_t nbytes, int encdec, int inplace)
* \ingroup IFX_DES_FUNCTIONS
* \brief sets DES hardware to ECB mode
@@ -270,7 +296,6 @@ void ifx_deu_des (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
* \param encdec 1 for encrypt; 0 for decrypt
* \param inplace not used
*/
-
void ifx_deu_des_ecb (void *ctx, uint8_t *dst, const uint8_t *src,
uint8_t *iv, size_t nbytes, int encdec, int inplace)
{
@@ -345,31 +370,31 @@ void ifx_deu_des_ctr (void *ctx, uint8_t *dst, const uint8_t *src,
ifx_deu_des (ctx, dst, src, iv, nbytes, encdec, 4);
}
-/*! \fn void des_encrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
+/*! \fn void ifx_deu_des_encrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
* \ingroup IFX_DES_FUNCTIONS
* \brief encrypt DES_BLOCK_SIZE of data
* \param tfm linux crypto algo transform
* \param out output bytestream
* \param in input bytestream
*/
-void des_encrypt (struct crypto_tfm *tfm, uint8_t * out, const uint8_t * in)
+void ifx_deu_des_encrypt (struct crypto_tfm *tfm, uint8_t * out, const uint8_t * in)
{
- struct des_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ifx_deu_des_ctx *ctx = crypto_tfm_ctx(tfm);
ifx_deu_des (ctx, out, in, NULL, DES_BLOCK_SIZE,
CRYPTO_DIR_ENCRYPT, 0);
}
-/*! \fn void des_decrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
+/*! \fn void ifx_deu_des_decrypt (struct crypto_tfm *tfm, uint8_t *out, const uint8_t *in)
* \ingroup IFX_DES_FUNCTIONS
* \brief encrypt DES_BLOCK_SIZE of data
* \param tfm linux crypto algo transform
* \param out output bytestream
* \param in input bytestream
*/
-void des_decrypt (struct crypto_tfm *tfm, uint8_t * out, const uint8_t * in)
+void ifx_deu_des_decrypt (struct crypto_tfm *tfm, uint8_t * out, const uint8_t * in)
{
- struct des_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct ifx_deu_des_ctx *ctx = crypto_tfm_ctx(tfm);
ifx_deu_des (ctx, out, in, NULL, DES_BLOCK_SIZE,
CRYPTO_DIR_DECRYPT, 0);
}
@@ -398,16 +423,41 @@ void des_decrypt (struct crypto_tfm *tfm, uint8_t * out, const uint8_t * in)
int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{
- struct des_ctx *dctx = crypto_tfm_ctx(tfm);
+ struct ifx_deu_des_ctx *dctx = crypto_tfm_ctx(tfm);
+ int err;
//printk("setkey in %s\n", __FILE__);
+ err = des3_ede_expand_key(&dctx->des3_ede_context, key, keylen);
+ if (err == -ENOKEY) {
+ if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)
+ err = -EINVAL;
+ else
+ err = 0;
+ }
+
dctx->controlr_M = keylen / 8 + 1; // 3DES EDE1 / EDE2 / EDE3 Mode
dctx->key_length = keylen;
memcpy ((u8 *) (dctx->expkey), key, keylen);
- return 0;
+ if (err)
+ memset(dctx, 0, sizeof(*dctx));
+
+ return err;
+}
+
+/*! \fn int des3_ede_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+ * \ingroup IFX_DES_FUNCTIONS
+ * \brief sets 3DES key
+ * \param tfm linux crypto skcipher transform
+ * \param key input key
+ * \param keylen key length
+*/
+int des3_ede_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return des3_ede_setkey(crypto_skcipher_tfm(tfm), key, keylen);
}
/*
@@ -417,9 +467,9 @@ struct crypto_alg ifxdeu_des_alg = {
.cra_name = "des",
.cra_driver_name = "ifxdeu-des",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des_ctx),
+ .cra_ctxsize = sizeof(struct ifx_deu_des_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(ifxdeu_des_alg.cra_list),
@@ -427,8 +477,8 @@ struct crypto_alg ifxdeu_des_alg = {
.cia_min_keysize = DES_KEY_SIZE,
.cia_max_keysize = DES_KEY_SIZE,
.cia_setkey = des_setkey,
- .cia_encrypt = des_encrypt,
- .cia_decrypt = des_decrypt } }
+ .cia_encrypt = ifx_deu_des_encrypt,
+ .cia_decrypt = ifx_deu_des_decrypt } }
};
/*
@@ -438,79 +488,68 @@ struct crypto_alg ifxdeu_des3_ede_alg = {
.cra_name = "des3_ede",
.cra_driver_name = "ifxdeu-des3_ede",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des_ctx),
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct ifx_deu_des_ctx),
.cra_module = THIS_MODULE,
.cra_alignmask = 3,
.cra_list = LIST_HEAD_INIT(ifxdeu_des3_ede_alg.cra_list),
.cra_u = { .cipher = {
- .cia_min_keysize = DES_KEY_SIZE,
- .cia_max_keysize = DES_KEY_SIZE,
+ .cia_min_keysize = DES3_EDE_KEY_SIZE,
+ .cia_max_keysize = DES3_EDE_KEY_SIZE,
.cia_setkey = des3_ede_setkey,
- .cia_encrypt = des_encrypt,
- .cia_decrypt = des_decrypt } }
+ .cia_encrypt = ifx_deu_des_encrypt,
+ .cia_decrypt = ifx_deu_des_decrypt } }
};
-/*! \fn int ecb_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
- * \ingroup IFX_DES_FUNCTIONS
- * \brief ECB DES encrypt using linux crypto blkcipher
- * \param desc blkcipher descriptor
- * \param dst output scatterlist
- * \param src input scatterlist
- * \param nbytes data size in bytes
-*/
-int ecb_des_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+/*! \fn int ecb_des_encrypt(struct skcipher_req *req)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief ECB DES encrypt using linux crypto skcipher
+ * \param req skcipher request
+ * \return err
+*/
+int ecb_des_encrypt(struct skcipher_request *req)
{
- struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct ifx_deu_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
int err;
- unsigned int enc_bytes;
+ unsigned int enc_bytes, nbytes;
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = enc_bytes = walk.nbytes)) {
enc_bytes -= (nbytes % DES_BLOCK_SIZE);
ifx_deu_des_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
NULL, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
nbytes &= DES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-/*! \fn int ecb_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
- * \ingroup IFX_DES_FUNCTIONS
- * \brief ECB DES decrypt using linux crypto blkcipher
- * \param desc blkcipher descriptor
- * \param dst output scatterlist
- * \param src input scatterlist
- * \param nbytes data size in bytes
+/*! \fn int ecb_des_decrypt(struct skcipher_req *req)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief ECB DES decrypt using linux crypto skcipher
+ * \param req skcipher request
* \return err
-*/
-int ecb_des_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+*/
+int ecb_des_decrypt(struct skcipher_request *req)
{
- struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct ifx_deu_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
int err;
- unsigned int dec_bytes;
+ unsigned int dec_bytes, nbytes;
DPRINTF(1, "\n");
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = dec_bytes = walk.nbytes)) {
dec_bytes -= (nbytes % DES_BLOCK_SIZE);
ifx_deu_des_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
NULL, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
nbytes &= DES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
@@ -518,73 +557,57 @@ int ecb_des_decrypt(struct blkcipher_desc *desc,
/*
* \brief DES function mappings
-*/
-struct crypto_alg ifxdeu_ecb_des_alg = {
- .cra_name = "ecb(des)",
- .cra_driver_name = "ifxdeu-ecb(des)",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ifxdeu_ecb_des_alg.cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = des_setkey,
- .encrypt = ecb_des_encrypt,
- .decrypt = ecb_des_decrypt,
- }
- }
+*/
+struct skcipher_alg ifxdeu_ecb_des_alg = {
+ .base.cra_name = "ecb(des)",
+ .base.cra_driver_name = "ifxdeu-ecb(des)",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ifx_deu_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ifxdeu_ecb_des_alg.base.cra_list),
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = des_setkey_skcipher,
+ .encrypt = ecb_des_encrypt,
+ .decrypt = ecb_des_decrypt,
};
/*
* \brief DES function mappings
-*/
-struct crypto_alg ifxdeu_ecb_des3_ede_alg = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "ifxdeu-ecb(des3_ede)",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ifxdeu_ecb_des3_ede_alg.cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = des3_ede_setkey,
- .encrypt = ecb_des_encrypt,
- .decrypt = ecb_des_decrypt,
- }
- }
+*/
+struct skcipher_alg ifxdeu_ecb_des3_ede_alg = {
+ .base.cra_name = "ecb(des3_ede)",
+ .base.cra_driver_name = "ifxdeu-ecb(des3_ede)",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ifx_deu_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ifxdeu_ecb_des3_ede_alg.base.cra_list),
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = des3_ede_setkey_skcipher,
+ .encrypt = ecb_des_encrypt,
+ .decrypt = ecb_des_decrypt,
};
-/*! \fn int cbc_des_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
- * \ingroup IFX_DES_FUNCTIONS
- * \brief CBC DES encrypt using linux crypto blkcipher
- * \param desc blkcipher descriptor
- * \param dst output scatterlist
- * \param src input scatterlist
- * \param nbytes data size in bytes
+/*! \fn int cbc_des_encrypt(struct skcipher_req *req)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief CBC DES encrypt using linux crypto skcipher
+ * \param req skcipher request
* \return err
-*/
-int cbc_des_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+*/
+int cbc_des_encrypt(struct skcipher_request *req)
{
- struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct ifx_deu_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
int err;
- unsigned int enc_bytes;
+ unsigned int enc_bytes, nbytes;
DPRINTF(1, "\n");
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = enc_bytes = walk.nbytes)) {
u8 *iv = walk.iv;
@@ -592,33 +615,27 @@ int cbc_des_encrypt(struct blkcipher_desc *desc,
ifx_deu_des_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
nbytes &= DES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
-/*! \fn int cbc_des_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes)
- * \ingroup IFX_DES_FUNCTIONS
- * \brief CBC DES decrypt using linux crypto blkcipher
- * \param desc blkcipher descriptor
- * \param dst output scatterlist
- * \param src input scatterlist
- * \param nbytes data size in bytes
+/*! \fn int cbc_des_encrypt(struct skcipher_req *req)
+ * \ingroup IFX_AES_FUNCTIONS
+ * \brief CBC DES decrypt using linux crypto skcipher
+ * \param req skcipher request
* \return err
-*/
-int cbc_des_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
+*/
+int cbc_des_decrypt(struct skcipher_request *req)
{
- struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
- struct blkcipher_walk walk;
+ struct ifx_deu_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct skcipher_walk walk;
int err;
- unsigned int dec_bytes;
+ unsigned int dec_bytes, nbytes;
DPRINTF(1, "\n");
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt(desc, &walk);
+ err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = dec_bytes = walk.nbytes)) {
u8 *iv = walk.iv;
@@ -626,7 +643,7 @@ int cbc_des_decrypt(struct blkcipher_desc *desc,
ifx_deu_des_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
nbytes &= DES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
return err;
@@ -634,52 +651,42 @@ int cbc_des_decrypt(struct blkcipher_desc *desc,
/*
* \brief DES function mappings
-*/
-struct crypto_alg ifxdeu_cbc_des_alg = {
- .cra_name = "cbc(des)",
- .cra_driver_name = "ifxdeu-cbc(des)",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ifxdeu_cbc_des_alg.cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des_setkey,
- .encrypt = cbc_des_encrypt,
- .decrypt = cbc_des_decrypt,
- }
- }
+*/
+struct skcipher_alg ifxdeu_cbc_des_alg = {
+ .base.cra_name = "cbc(des)",
+ .base.cra_driver_name = "ifxdeu-cbc(des)",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = DES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ifx_deu_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ifxdeu_cbc_des_alg.base.cra_list),
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des_setkey_skcipher,
+ .encrypt = cbc_des_encrypt,
+ .decrypt = cbc_des_decrypt,
};
/*
* \brief DES function mappings
-*/
-struct crypto_alg ifxdeu_cbc_des3_ede_alg = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "ifxdeu-cbc(des3_ede)",
- .cra_priority = 400,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct des_ctx),
- .cra_type = &crypto_blkcipher_type,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(ifxdeu_cbc_des3_ede_alg.cra_list),
- .cra_u = {
- .blkcipher = {
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = des3_ede_setkey,
- .encrypt = cbc_des_encrypt,
- .decrypt = cbc_des_decrypt,
- }
- }
+*/
+struct skcipher_alg ifxdeu_cbc_des3_ede_alg = {
+ .base.cra_name = "cbc(des3_ede)",
+ .base.cra_driver_name = "ifxdeu-cbc(des3_ede)",
+ .base.cra_priority = 400,
+ .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct ifx_deu_des_ctx),
+ .base.cra_module = THIS_MODULE,
+ .base.cra_list = LIST_HEAD_INIT(ifxdeu_cbc_des3_ede_alg.base.cra_list),
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = des3_ede_setkey_skcipher,
+ .encrypt = cbc_des_encrypt,
+ .decrypt = cbc_des_decrypt,
};
/*! \fn int ifxdeu_init_des (void)
@@ -690,16 +697,17 @@ int ifxdeu_init_des (void)
{
int ret = -ENOSYS;
+ des_chip_init();
ret = crypto_register_alg(&ifxdeu_des_alg);
if (ret < 0)
goto des_err;
- ret = crypto_register_alg(&ifxdeu_ecb_des_alg);
+ ret = crypto_register_skcipher(&ifxdeu_ecb_des_alg);
if (ret < 0)
goto ecb_des_err;
- ret = crypto_register_alg(&ifxdeu_cbc_des_alg);
+ ret = crypto_register_skcipher(&ifxdeu_cbc_des_alg);
if (ret < 0)
goto cbc_des_err;
@@ -707,15 +715,14 @@ int ifxdeu_init_des (void)
if (ret < 0)
goto des3_ede_err;
- ret = crypto_register_alg(&ifxdeu_ecb_des3_ede_alg);
+ ret = crypto_register_skcipher(&ifxdeu_ecb_des3_ede_alg);
if (ret < 0)
goto ecb_des3_ede_err;
- ret = crypto_register_alg(&ifxdeu_cbc_des3_ede_alg);
+ ret = crypto_register_skcipher(&ifxdeu_cbc_des3_ede_alg);
if (ret < 0)
goto cbc_des3_ede_err;
- des_chip_init();
CRTCL_SECT_INIT;
@@ -728,11 +735,11 @@ des_err:
printk(KERN_ERR "IFX des initialization failed!\n");
return ret;
ecb_des_err:
- crypto_unregister_alg(&ifxdeu_ecb_des_alg);
+ crypto_unregister_skcipher(&ifxdeu_ecb_des_alg);
printk (KERN_ERR "IFX ecb_des initialization failed!\n");
return ret;
cbc_des_err:
- crypto_unregister_alg(&ifxdeu_cbc_des_alg);
+ crypto_unregister_skcipher(&ifxdeu_cbc_des_alg);
printk (KERN_ERR "IFX cbc_des initialization failed!\n");
return ret;
des3_ede_err:
@@ -740,11 +747,11 @@ des3_ede_err:
printk(KERN_ERR "IFX des3_ede initialization failed!\n");
return ret;
ecb_des3_ede_err:
- crypto_unregister_alg(&ifxdeu_ecb_des3_ede_alg);
+ crypto_unregister_skcipher(&ifxdeu_ecb_des3_ede_alg);
printk (KERN_ERR "IFX ecb_des3_ede initialization failed!\n");
return ret;
cbc_des3_ede_err:
- crypto_unregister_alg(&ifxdeu_cbc_des3_ede_alg);
+ crypto_unregister_skcipher(&ifxdeu_cbc_des3_ede_alg);
printk (KERN_ERR "IFX cbc_des3_ede initialization failed!\n");
return ret;
@@ -757,11 +764,10 @@ cbc_des3_ede_err:
void ifxdeu_fini_des (void)
{
crypto_unregister_alg (&ifxdeu_des_alg);
- crypto_unregister_alg (&ifxdeu_ecb_des_alg);
- crypto_unregister_alg (&ifxdeu_cbc_des_alg);
+ crypto_unregister_skcipher (&ifxdeu_ecb_des_alg);
+ crypto_unregister_skcipher (&ifxdeu_cbc_des_alg);
crypto_unregister_alg (&ifxdeu_des3_ede_alg);
- crypto_unregister_alg (&ifxdeu_ecb_des3_ede_alg);
- crypto_unregister_alg (&ifxdeu_cbc_des3_ede_alg);
+ crypto_unregister_skcipher (&ifxdeu_ecb_des3_ede_alg);
+ crypto_unregister_skcipher (&ifxdeu_cbc_des3_ede_alg);
}
-
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_deu.c b/package/kernel/lantiq/ltq-deu/src/ifxmips_deu.c
index 3947b31a40b..096b8b5bba8 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_deu.c
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_deu.c
@@ -46,6 +46,7 @@
#include <linux/modversions.h>
#endif
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -69,6 +70,8 @@
#endif /* CONFIG_xxxx */
int disable_deudma = 1;
+spinlock_t ltq_deu_hash_lock;
+EXPORT_SYMBOL_GPL(ltq_deu_hash_lock);
void chip_version(void);
@@ -84,6 +87,7 @@ static int ltq_deu_probe(struct platform_device *pdev)
START_DEU_POWER;
+ CRTCL_SECT_HASH_INIT;
#define IFX_DEU_DRV_VERSION "2.0.0"
printk(KERN_INFO "Infineon Technologies DEU driver version %s \n", IFX_DEU_DRV_VERSION);
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_deu.h b/package/kernel/lantiq/ltq-deu/src/ifxmips_deu.h
index 8045c2081a6..3c994cb3465 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_deu.h
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_deu.h
@@ -131,6 +131,10 @@ void __exit lqdeu_fini_async_des(void);
void __exit deu_fini (void);
int deu_dma_init (void);
+extern spinlock_t ltq_deu_hash_lock;
+#define CRTCL_SECT_HASH_INIT spin_lock_init(&ltq_deu_hash_lock)
+#define CRTCL_SECT_HASH_START spin_lock_irqsave(&ltq_deu_hash_lock, flag)
+#define CRTCL_SECT_HASH_END spin_unlock_irqrestore(&ltq_deu_hash_lock, flag)
#define DEU_WAKELIST_INIT(queue) \
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_deu_ar9.h b/package/kernel/lantiq/ltq-deu/src/ifxmips_deu_ar9.h
index 69414553dea..2f373589a5c 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_deu_ar9.h
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_deu_ar9.h
@@ -117,6 +117,14 @@
hash->controlr.INIT = 1; \
} while(0)
+#define MD5_HASH_INIT \
+ do { \
+ volatile struct deu_hash_t *hash = (struct deu_hash_t *) HASH_START; \
+ hash->controlr.SM = 1; \
+ hash->controlr.ALGO = 1; \
+ hash->controlr.INIT = 1; \
+ } while(0)
+
/* DEU Common Structures for AR9*/
struct clc_controlr_t {
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_deu_danube.h b/package/kernel/lantiq/ltq-deu/src/ifxmips_deu_danube.h
index 25efa04a696..25561cf6e08 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_deu_danube.h
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_deu_danube.h
@@ -104,6 +104,14 @@
hash->controlr.INIT = 1; \
} while(0)
+#define MD5_HASH_INIT \
+ do { \
+ volatile struct deu_hash_t *hash = (struct deu_hash_t *) HASH_START; \
+ hash->controlr.SM = 1; \
+ hash->controlr.ALGO = 1; \
+ hash->controlr.INIT = 1; \
+ } while(0)
+
/* DEU STRUCTURES */
struct clc_controlr_t {
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_deu_vr9.c b/package/kernel/lantiq/ltq-deu/src/ifxmips_deu_vr9.c
index aaa7bce237b..8063672613a 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_deu_vr9.c
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_deu_vr9.c
@@ -107,7 +107,7 @@ void aes_chip_init (void)
// start crypto engine with write to ILR
aes->controlr.SM = 1;
- aes->controlr.NDC = 0;
+ aes->controlr.NDC = 1;
asm("sync");
aes->controlr.ENDI = 1;
asm("sync");
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_md5.c b/package/kernel/lantiq/ltq-deu/src/ifxmips_md5.c
index 11cb64799e9..ee7e486b565 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_md5.c
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_md5.c
@@ -64,11 +64,6 @@
#define MD5_HASH_WORDS 4
#define HASH_START IFX_HASH_CON
-static spinlock_t lock;
-#define CRTCL_SECT_INIT spin_lock_init(&lock)
-#define CRTCL_SECT_START spin_lock_irqsave(&lock, flag)
-#define CRTCL_SECT_END spin_unlock_irqrestore(&lock, flag)
-
//#define CRYPTO_DEBUG
#ifdef CRYPTO_DEBUG
extern char debug_level;
@@ -86,18 +81,6 @@ struct md5_ctx {
extern int disable_deudma;
-/*! \fn static u32 endian_swap(u32 input)
- * \ingroup IFX_MD5_FUNCTIONS
- * \brief perform dword level endian swap
- * \param input value of dword that requires to be swapped
-*/
-static u32 endian_swap(u32 input)
-{
- u8 *ptr = (u8 *)&input;
-
- return ((ptr[3] << 24) | (ptr[2] << 16) | (ptr[1] << 8) | ptr[0]);
-}
-
/*! \fn static void md5_transform(u32 *hash, u32 const *in)
* \ingroup IFX_MD5_FUNCTIONS
* \brief main interface to md5 hardware
@@ -110,18 +93,20 @@ static void md5_transform(struct md5_ctx *mctx, u32 *hash, u32 const *in)
volatile struct deu_hash_t *hashs = (struct deu_hash_t *) HASH_START;
unsigned long flag;
- CRTCL_SECT_START;
+ CRTCL_SECT_HASH_START;
+
+ MD5_HASH_INIT;
if (mctx->started) {
- hashs->D1R = endian_swap(*((u32 *) hash + 0));
- hashs->D2R = endian_swap(*((u32 *) hash + 1));
- hashs->D3R = endian_swap(*((u32 *) hash + 2));
- hashs->D4R = endian_swap(*((u32 *) hash + 3));
+ hashs->D1R = *((u32 *) hash + 0);
+ hashs->D2R = *((u32 *) hash + 1);
+ hashs->D3R = *((u32 *) hash + 2);
+ hashs->D4R = *((u32 *) hash + 3);
}
for (i = 0; i < 16; i++) {
- hashs->MR = endian_swap(in[i]);
-// printk("in[%d]: %08x\n", i, endian_swap(in[i]));
+ hashs->MR = in[i];
+// printk("in[%d]: %08x\n", i, in[i]);
};
//wait for processing
@@ -129,14 +114,14 @@ static void md5_transform(struct md5_ctx *mctx, u32 *hash, u32 const *in)
// this will not take long
}
- *((u32 *) hash + 0) = endian_swap (hashs->D1R);
- *((u32 *) hash + 1) = endian_swap (hashs->D2R);
- *((u32 *) hash + 2) = endian_swap (hashs->D3R);
- *((u32 *) hash + 3) = endian_swap (hashs->D4R);
+ *((u32 *) hash + 0) = hashs->D1R;
+ *((u32 *) hash + 1) = hashs->D2R;
+ *((u32 *) hash + 2) = hashs->D3R;
+ *((u32 *) hash + 3) = hashs->D4R;
- mctx->started = 1;
+ CRTCL_SECT_HASH_END;
- CRTCL_SECT_END;
+ mctx->started = 1;
}
/*! \fn static inline void md5_transform_helper(struct md5_ctx *ctx)
@@ -158,12 +143,7 @@ static inline void md5_transform_helper(struct md5_ctx *ctx)
static int md5_init(struct shash_desc *desc)
{
struct md5_ctx *mctx = shash_desc_ctx(desc);
- volatile struct deu_hash_t *hash = (struct deu_hash_t *) HASH_START;
-
- hash->controlr.ENDI = 0;
- hash->controlr.SM = 1;
- hash->controlr.ALGO = 1; // 1 = md5 0 = sha1
- hash->controlr.INIT = 1; // Initialize the hash operation by writing a '1' to the INIT bit.
+ //volatile struct deu_hash_t *hash = (struct deu_hash_t *) HASH_START;
mctx->byte_count = 0;
mctx->started = 0;
@@ -220,8 +200,8 @@ static int md5_final(struct shash_desc *desc, u8 *out)
const unsigned int offset = mctx->byte_count & 0x3f;
char *p = (char *)mctx->block + offset;
int padding = 56 - (offset + 1);
- volatile struct deu_hash_t *hashs = (struct deu_hash_t *) HASH_START;
- unsigned long flag;
+ //volatile struct deu_hash_t *hashs = (struct deu_hash_t *) HASH_START;
+ //unsigned long flag;
*p++ = 0x80;
if (padding < 0) {
@@ -232,24 +212,12 @@ static int md5_final(struct shash_desc *desc, u8 *out)
}
memset(p, 0, padding);
- mctx->block[14] = endian_swap(mctx->byte_count << 3);
- mctx->block[15] = endian_swap(mctx->byte_count >> 29);
-
-#if 0
- le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
- sizeof(u64)) / sizeof(u32));
-#endif
+ mctx->block[14] = le32_to_cpu(mctx->byte_count << 3);
+ mctx->block[15] = le32_to_cpu(mctx->byte_count >> 29);
md5_transform(mctx, mctx->hash, mctx->block);
- CRTCL_SECT_START;
-
- *((u32 *) out + 0) = endian_swap (hashs->D1R);
- *((u32 *) out + 1) = endian_swap (hashs->D2R);
- *((u32 *) out + 2) = endian_swap (hashs->D3R);
- *((u32 *) out + 3) = endian_swap (hashs->D4R);
-
- CRTCL_SECT_END;
+ memcpy(out, mctx->hash, MD5_DIGEST_SIZE);
// Wipe context
memset(mctx, 0, sizeof(*mctx));
@@ -270,7 +238,7 @@ static struct shash_alg ifxdeu_md5_alg = {
.cra_name = "md5",
.cra_driver_name= "ifxdeu-md5",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_HASH,
+ .cra_flags = CRYPTO_ALG_TYPE_HASH | CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -288,8 +256,6 @@ int ifxdeu_init_md5 (void)
if ((ret = crypto_register_shash(&ifxdeu_md5_alg)))
goto md5_err;
- CRTCL_SECT_INIT;
-
printk (KERN_NOTICE "IFX DEU MD5 initialized%s.\n", disable_deudma ? "" : " (DMA)");
return ret;
@@ -308,4 +274,3 @@ void ifxdeu_fini_md5 (void)
crypto_unregister_shash(&ifxdeu_md5_alg);
}
-
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_md5_hmac.c b/package/kernel/lantiq/ltq-deu/src/ifxmips_md5_hmac.c
index 6cb2e5a4174..109d27cbfbe 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_md5_hmac.c
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_md5_hmac.c
@@ -63,11 +63,6 @@
#define MD5_HMAC_DBN_TEMP_SIZE 1024 // size in dword, needed for dbn workaround
#define HASH_START IFX_HASH_CON
-static spinlock_t lock;
-#define CRTCL_SECT_INIT spin_lock_init(&lock)
-#define CRTCL_SECT_START spin_lock_irqsave(&lock, flag)
-#define CRTCL_SECT_END spin_unlock_irqrestore(&lock, flag)
-
//#define CRYPTO_DEBUG
#ifdef CRYPTO_DEBUG
extern char debug_level;
@@ -84,24 +79,15 @@ struct md5_hmac_ctx {
u32 block[MD5_BLOCK_WORDS];
u64 byte_count;
u32 dbn;
+ int started;
unsigned int keylen;
+ struct shash_desc *desc;
+ u32 (*temp)[MD5_BLOCK_WORDS];
};
-static u32 temp[MD5_HMAC_DBN_TEMP_SIZE];
-
extern int disable_deudma;
-/*! \fn static u32 endian_swap(u32 input)
- * \ingroup IFX_MD5_HMAC_FUNCTIONS
- * \brief perform dword level endian swap
- * \param input value of dword that requires to be swapped
-*/
-static u32 endian_swap(u32 input)
-{
- u8 *ptr = (u8 *)&input;
-
- return ((ptr[3] << 24) | (ptr[2] << 16) | (ptr[1] << 8) | ptr[0]);
-}
+static int md5_hmac_final_impl(struct shash_desc *desc, u8 *out, bool hash_final);
/*! \fn static void md5_hmac_transform(struct crypto_tfm *tfm, u32 const *in)
* \ingroup IFX_MD5_HMAC_FUNCTIONS
@@ -113,14 +99,14 @@ static void md5_hmac_transform(struct shash_desc *desc, u32 const *in)
{
struct md5_hmac_ctx *mctx = crypto_shash_ctx(desc->tfm);
- memcpy(&temp[mctx->dbn<<4], in, 64); //dbn workaround
- mctx->dbn += 1;
-
- if ( (mctx->dbn<<4) > MD5_HMAC_DBN_TEMP_SIZE )
+ if ( ((mctx->dbn<<4)+1) > MD5_HMAC_DBN_TEMP_SIZE )
{
- printk("MD5_HMAC_DBN_TEMP_SIZE exceeded\n");
+ //printk("MD5_HMAC_DBN_TEMP_SIZE exceeded\n");
+ md5_hmac_final_impl(desc, (u8 *)mctx->hash, false);
}
+ memcpy(&mctx->temp[mctx->dbn], in, 64); //dbn workaround
+ mctx->dbn += 1;
}
/*! \fn int md5_hmac_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
@@ -133,23 +119,30 @@ static void md5_hmac_transform(struct shash_desc *desc, u32 const *in)
static int md5_hmac_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen)
{
struct md5_hmac_ctx *mctx = crypto_shash_ctx(tfm);
- volatile struct deu_hash_t *hash = (struct deu_hash_t *) HASH_START;
+ int err;
//printk("copying keys to context with length %d\n", keylen);
if (keylen > MAX_HASH_KEYLEN) {
- printk("Key length more than what DEU hash can handle\n");
- return -EINVAL;
- }
-
+ char *hash_alg_name = "md5";
- hash->KIDX |= 0x80000000; // reset all 16 words of the key to '0'
- memcpy(&mctx->key, key, keylen);
- mctx->keylen = keylen;
+ mctx->desc->tfm = crypto_alloc_shash(hash_alg_name, 0, 0);
+ if (IS_ERR(mctx->desc->tfm)) return PTR_ERR(mctx->desc->tfm);
- return 0;
+ memset(mctx->key, 0, MAX_HASH_KEYLEN);
+ err = crypto_shash_digest(mctx->desc, key, keylen, mctx->key);
+ if (err) return err;
-}
+ mctx->keylen = MD5_DIGEST_SIZE;
+ crypto_free_shash(mctx->desc->tfm);
+ } else {
+ memcpy(mctx->key, key, keylen);
+ mctx->keylen = keylen;
+ }
+ memset(mctx->key + mctx->keylen, 0, MAX_HASH_KEYLEN - mctx->keylen);
+
+ return 0;
+}
/*! \fn int md5_hmac_setkey_hw(const u8 *key, unsigned int keylen)
* \ingroup IFX_MD5_HMAC_FUNCTIONS
@@ -157,17 +150,15 @@ static int md5_hmac_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int
* \param key input key
* \param keylen key length greater than 64 bytes IS NOT SUPPORTED
*/
-
static int md5_hmac_setkey_hw(const u8 *key, unsigned int keylen)
{
volatile struct deu_hash_t *hash = (struct deu_hash_t *) HASH_START;
- unsigned long flag;
int i, j;
u32 *in_key = (u32 *)key;
//printk("\nsetkey keylen: %d\n key: ", keylen);
- CRTCL_SECT_START;
+ hash->KIDX |= 0x80000000; // reset all 16 words of the key to '0'
j = 0;
for (i = 0; i < keylen; i+=4)
{
@@ -177,7 +168,6 @@ static int md5_hmac_setkey_hw(const u8 *key, unsigned int keylen)
asm("sync");
j++;
}
- CRTCL_SECT_END;
return 0;
}
@@ -194,11 +184,11 @@ static int md5_hmac_init(struct shash_desc *desc)
mctx->dbn = 0; //dbn workaround
- md5_hmac_setkey_hw(mctx->key, mctx->keylen);
+ mctx->started = 0;
+ mctx->byte_count = 0;
return 0;
}
-EXPORT_SYMBOL(md5_hmac_init);
/*! \fn void md5_hmac_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
* \ingroup IFX_MD5_HMAC_FUNCTIONS
@@ -237,16 +227,27 @@ static int md5_hmac_update(struct shash_desc *desc, const u8 *data, unsigned int
memcpy(mctx->block, data, len);
return 0;
}
-EXPORT_SYMBOL(md5_hmac_update);
-/*! \fn void md5_hmac_final(struct crypto_tfm *tfm, u8 *out)
+/*! \fn static int md5_hmac_final(struct crypto_tfm *tfm, u8 *out)
* \ingroup IFX_MD5_HMAC_FUNCTIONS
- * \brief compute final md5 hmac value
+ * \brief call md5_hmac_final_impl with hash_final true
* \param tfm linux crypto algo transform
* \param out final md5 hmac output value
*/
static int md5_hmac_final(struct shash_desc *desc, u8 *out)
{
+ return md5_hmac_final_impl(desc, out, true);
+}
+
+/*! \fn static int md5_hmac_final_impl(struct crypto_tfm *tfm, u8 *out, bool hash_final)
+ * \ingroup IFX_MD5_HMAC_FUNCTIONS
+ * \brief compute final or intermediate md5 hmac value
+ * \param tfm linux crypto algo transform
+ * \param out final md5 hmac output value
+ * \param in finalize or intermediate processing
+*/
+static int md5_hmac_final_impl(struct shash_desc *desc, u8 *out, bool hash_final)
+{
struct md5_hmac_ctx *mctx = crypto_shash_ctx(desc->tfm);
const unsigned int offset = mctx->byte_count & 0x3f;
char *p = (char *)mctx->block + offset;
@@ -255,27 +256,36 @@ static int md5_hmac_final(struct shash_desc *desc, u8 *out)
unsigned long flag;
int i = 0;
int dbn;
- u32 *in = &temp[0];
+ u32 *in = mctx->temp[0];
+
+ if (hash_final) {
+ *p++ = 0x80;
+ if (padding < 0) {
+ memset(p, 0x00, padding + sizeof (u64));
+ md5_hmac_transform(desc, mctx->block);
+ p = (char *)mctx->block;
+ padding = 56;
+ }
+ memset(p, 0, padding);
+ mctx->block[14] = le32_to_cpu((mctx->byte_count + 64) << 3); // need to add 512 bit of the IPAD operation
+ mctx->block[15] = 0x00000000;
- *p++ = 0x80;
- if (padding < 0) {
- memset(p, 0x00, padding + sizeof (u64));
md5_hmac_transform(desc, mctx->block);
- p = (char *)mctx->block;
- padding = 56;
}
- memset(p, 0, padding);
- mctx->block[14] = endian_swap((mctx->byte_count + 64) << 3); // need to add 512 bit of the IPAD operation
- mctx->block[15] = 0x00000000;
+ CRTCL_SECT_HASH_START;
- md5_hmac_transform(desc, mctx->block);
+ MD5_HASH_INIT;
- CRTCL_SECT_START;
+ md5_hmac_setkey_hw(mctx->key, mctx->keylen);
//printk("\ndbn = %d\n", mctx->dbn);
- hashs->DBN = mctx->dbn;
+ if (hash_final) {
+ hashs->DBN = mctx->dbn;
+ } else {
+ hashs->DBN = mctx->dbn + 5;
+ }
asm("sync");
*IFX_HASH_CON = 0x0703002D; //khs, go, init, ndc, endi, kyue, hmen, md5
@@ -285,6 +295,15 @@ static int md5_hmac_final(struct shash_desc *desc, u8 *out)
// this will not take long
}
+ if (mctx->started) {
+ hashs->D1R = *((u32 *) mctx->hash + 0);
+ hashs->D2R = *((u32 *) mctx->hash + 1);
+ hashs->D3R = *((u32 *) mctx->hash + 2);
+ hashs->D4R = *((u32 *) mctx->hash + 3);
+ } else {
+ mctx->started = 1;
+ }
+
for (dbn = 0; dbn < mctx->dbn; dbn++)
{
for (i = 0; i < 16; i++) {
@@ -302,11 +321,12 @@ static int md5_hmac_final(struct shash_desc *desc, u8 *out)
in += 16;
}
-
#if 1
- //wait for digest ready
- while (! hashs->controlr.DGRY) {
- // this will not take long
+ if (hash_final) {
+ //wait for digest ready
+ while (! hashs->controlr.DGRY) {
+ // this will not take long
+ }
}
#endif
@@ -314,26 +334,49 @@ static int md5_hmac_final(struct shash_desc *desc, u8 *out)
*((u32 *) out + 1) = hashs->D2R;
*((u32 *) out + 2) = hashs->D3R;
*((u32 *) out + 3) = hashs->D4R;
- *((u32 *) out + 4) = hashs->D5R;
- /* reset the context after we finish with the hash */
- mctx->byte_count = 0;
- memset(&mctx->hash[0], 0, sizeof(MD5_HASH_WORDS));
- memset(&mctx->block[0], 0, sizeof(MD5_BLOCK_WORDS));
- memset(&temp[0], 0, MD5_HMAC_DBN_TEMP_SIZE);
+ CRTCL_SECT_HASH_END;
- CRTCL_SECT_END;
+ if (hash_final) {
+ /* reset the context after we finish with the hash */
+ md5_hmac_init(desc);
+ } else {
+ mctx->dbn = 0;
+ }
+ return 0;
+}
+/*! \fn void md5_hmac_init_tfm(struct crypto_tfm *tfm)
+ * \ingroup IFX_MD5_HMAC_FUNCTIONS
+ * \brief initialize pointers in md5_hmac_ctx
+ * \param tfm linux crypto algo transform
+*/
+static int md5_hmac_init_tfm(struct crypto_tfm *tfm)
+{
+ struct md5_hmac_ctx *mctx = crypto_tfm_ctx(tfm);
+ mctx->temp = kzalloc(4 * MD5_HMAC_DBN_TEMP_SIZE, GFP_KERNEL);
+ if (IS_ERR(mctx->temp)) return PTR_ERR(mctx->temp);
+ mctx->desc = kzalloc(sizeof(struct shash_desc), GFP_KERNEL);
+ if (IS_ERR(mctx->desc)) return PTR_ERR(mctx->desc);
- return 0;
+ return 0;
}
-EXPORT_SYMBOL(md5_hmac_final);
+/*! \fn void md5_hmac_exit_tfm(struct crypto_tfm *tfm)
+ * \ingroup IFX_MD5_HMAC_FUNCTIONS
+ * \brief free pointers in md5_hmac_ctx
+ * \param tfm linux crypto algo transform
+*/
+static void md5_hmac_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct md5_hmac_ctx *mctx = crypto_tfm_ctx(tfm);
+ kfree(mctx->temp);
+ kfree(mctx->desc);
+}
/*
* \brief MD5_HMAC function mappings
*/
-
static struct shash_alg ifxdeu_md5_hmac_alg = {
.digestsize = MD5_DIGEST_SIZE,
.init = md5_hmac_init,
@@ -345,10 +388,12 @@ static struct shash_alg ifxdeu_md5_hmac_alg = {
.cra_name = "hmac(md5)",
.cra_driver_name= "ifxdeu-md5_hmac",
.cra_priority = 400,
- .cra_ctxsize = sizeof(struct md5_hmac_ctx),
- .cra_flags = CRYPTO_ALG_TYPE_HASH,
+ .cra_ctxsize = sizeof(struct md5_hmac_ctx),
+ .cra_flags = CRYPTO_ALG_TYPE_HASH | CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
+ .cra_init = md5_hmac_init_tfm,
+ .cra_exit = md5_hmac_exit_tfm,
}
};
@@ -365,8 +410,6 @@ int ifxdeu_init_md5_hmac (void)
if ((ret = crypto_register_shash(&ifxdeu_md5_hmac_alg)))
goto md5_hmac_err;
- CRTCL_SECT_INIT;
-
printk (KERN_NOTICE "IFX DEU MD5_HMAC initialized%s.\n", disable_deudma ? "" : " (DMA)");
return ret;
@@ -383,5 +426,3 @@ void ifxdeu_fini_md5_hmac (void)
{
crypto_unregister_shash(&ifxdeu_md5_hmac_alg);
}
-
-
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_sha1.c b/package/kernel/lantiq/ltq-deu/src/ifxmips_sha1.c
index d711c4804db..76734917d1e 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_sha1.c
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_sha1.c
@@ -38,14 +38,18 @@
\brief ifx deu sha1 functions
*/
-
/* Project header */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
#include <crypto/sha.h>
+#else
+#include <crypto/sha1.h>
+#endif
+#include <crypto/hash.h>
#include <crypto/internal/hash.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
@@ -65,11 +69,6 @@
#define SHA1_HMAC_BLOCK_SIZE 64
#define HASH_START IFX_HASH_CON
-static spinlock_t lock;
-#define CRTCL_SECT_INIT spin_lock_init(&lock)
-#define CRTCL_SECT_START spin_lock_irqsave(&lock, flag)
-#define CRTCL_SECT_END spin_unlock_irqrestore(&lock, flag)
-
//#define CRYPTO_DEBUG
#ifdef CRYPTO_DEBUG
extern char debug_level;
@@ -91,20 +90,21 @@ struct sha1_ctx {
extern int disable_deudma;
-
-/*! \fn static void sha1_transform (u32 *state, const u32 *in)
+/*! \fn static void sha1_transform1 (u32 *state, const u32 *in)
* \ingroup IFX_SHA1_FUNCTIONS
* \brief main interface to sha1 hardware
* \param state current state
* \param in 64-byte block of input
*/
-static void sha1_transform (struct sha1_ctx *sctx, u32 *state, const u32 *in)
+static void sha1_transform1 (struct sha1_ctx *sctx, u32 *state, const u32 *in)
{
int i = 0;
volatile struct deu_hash_t *hashs = (struct deu_hash_t *) HASH_START;
unsigned long flag;
- CRTCL_SECT_START;
+ CRTCL_SECT_HASH_START;
+
+ SHA_HASH_INIT;
/* For context switching purposes, the previous hash output
* is loaded back into the output register
@@ -137,20 +137,18 @@ static void sha1_transform (struct sha1_ctx *sctx, u32 *state, const u32 *in)
sctx->started = 1;
- CRTCL_SECT_END;
+ CRTCL_SECT_HASH_END;
}
-/*! \fn static void sha1_init(struct crypto_tfm *tfm)
+/*! \fn static void sha1_init1(struct crypto_tfm *tfm)
* \ingroup IFX_SHA1_FUNCTIONS
* \brief initialize sha1 hardware
* \param tfm linux crypto algo transform
*/
-static int sha1_init(struct shash_desc *desc)
+static int sha1_init1(struct shash_desc *desc)
{
struct sha1_ctx *sctx = shash_desc_ctx(desc);
- SHA_HASH_INIT;
-
sctx->started = 0;
sctx->count = 0;
return 0;
@@ -174,9 +172,9 @@ static int sha1_update(struct shash_desc * desc, const u8 *data,
if ((j + len) > 63) {
memcpy (&sctx->buffer[j], data, (i = 64 - j));
- sha1_transform (sctx, sctx->state, (const u32 *)sctx->buffer);
+ sha1_transform1 (sctx, sctx->state, (const u32 *)sctx->buffer);
for (; i + 63 < len; i += 64) {
- sha1_transform (sctx, sctx->state, (const u32 *)&data[i]);
+ sha1_transform1 (sctx, sctx->state, (const u32 *)&data[i]);
}
j = 0;
@@ -201,8 +199,8 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
u64 t;
u8 bits[8] = { 0, };
static const u8 padding[64] = { 0x80, };
- volatile struct deu_hash_t *hashs = (struct deu_hash_t *) HASH_START;
- unsigned long flag;
+ //volatile struct deu_hash_t *hashs = (struct deu_hash_t *) HASH_START;
+ //unsigned long flag;
t = sctx->count;
bits[7] = 0xff & t;
@@ -229,15 +227,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
/* Append length */
sha1_update (desc, bits, sizeof bits);
- CRTCL_SECT_START;
-
- *((u32 *) out + 0) = hashs->D1R;
- *((u32 *) out + 1) = hashs->D2R;
- *((u32 *) out + 2) = hashs->D3R;
- *((u32 *) out + 3) = hashs->D4R;
- *((u32 *) out + 4) = hashs->D5R;
-
- CRTCL_SECT_END;
+ memcpy(out, sctx->hash, SHA1_DIGEST_SIZE);
// Wipe context
memset (sctx, 0, sizeof *sctx);
@@ -250,7 +240,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
*/
static struct shash_alg ifxdeu_sha1_alg = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = sha1_init,
+ .init = sha1_init1,
.update = sha1_update,
.final = sha1_final,
.descsize = sizeof(struct sha1_ctx),
@@ -259,7 +249,7 @@ static struct shash_alg ifxdeu_sha1_alg = {
.cra_name = "sha1",
.cra_driver_name= "ifxdeu-sha1",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_HASH,
+ .cra_flags = CRYPTO_ALG_TYPE_HASH | CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
@@ -278,8 +268,6 @@ int ifxdeu_init_sha1 (void)
if ((ret = crypto_register_shash(&ifxdeu_sha1_alg)))
goto sha1_err;
- CRTCL_SECT_INIT;
-
printk (KERN_NOTICE "IFX DEU SHA1 initialized%s.\n", disable_deudma ? "" : " (DMA)");
return ret;
@@ -298,5 +286,3 @@ void ifxdeu_fini_sha1 (void)
}
-
-
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_sha1_hmac.c b/package/kernel/lantiq/ltq-deu/src/ifxmips_sha1_hmac.c
index 7776c51686a..b58a91a5df7 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_sha1_hmac.c
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_sha1_hmac.c
@@ -38,14 +38,18 @@
\brief ifx sha1 hmac functions
*/
-
/* Project header */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/crypto.h>
-#include <linux/cryptohash.h>
#include <crypto/internal/hash.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
+#include <crypto/sha.h>
+#else
+#include <crypto/sha1.h>
+#endif
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
@@ -60,17 +64,14 @@
#endif
#define SHA1_DIGEST_SIZE 20
+#define SHA1_BLOCK_WORDS 16
+#define SHA1_HASH_WORDS 5
#define SHA1_HMAC_BLOCK_SIZE 64
#define SHA1_HMAC_DBN_TEMP_SIZE 1024 // size in dword, needed for dbn workaround
#define HASH_START IFX_HASH_CON
#define SHA1_HMAC_MAX_KEYLEN 64
-static spinlock_t lock;
-#define CRTCL_SECT_INIT spin_lock_init(&lock)
-#define CRTCL_SECT_START spin_lock_irqsave(&lock, flag)
-#define CRTCL_SECT_END spin_unlock_irqrestore(&lock, flag)
-
#ifdef CRYPTO_DEBUG
extern char debug_level;
#define DPRINTF(level, format, args...) if (level < debug_level) printk(KERN_INFO "[%s %s %d]: " format, __FILE__, __func__, __LINE__, ##args);
@@ -83,16 +84,19 @@ struct sha1_hmac_ctx {
u8 buffer[SHA1_HMAC_BLOCK_SIZE];
u8 key[SHA1_HMAC_MAX_KEYLEN];
- u32 state[5];
+ u32 hash[SHA1_HASH_WORDS];
u32 dbn;
+ int started;
u64 count;
+ struct shash_desc *desc;
+ u32 (*temp)[SHA1_BLOCK_WORDS];
};
-static u32 temp[SHA1_HMAC_DBN_TEMP_SIZE];
-
extern int disable_deudma;
+static int sha1_hmac_final_impl(struct shash_desc *desc, u8 *out, bool hash_final);
+
/*! \fn static void sha1_hmac_transform(struct crypto_tfm *tfm, u32 const *in)
* \ingroup IFX_SHA1_HMAC_FUNCTIONS
* \brief save input block to context
@@ -103,14 +107,15 @@ static int sha1_hmac_transform(struct shash_desc *desc, u32 const *in)
{
struct sha1_hmac_ctx *sctx = crypto_shash_ctx(desc->tfm);
- memcpy(&temp[sctx->dbn<<4], in, 64); //dbn workaround
- sctx->dbn += 1;
-
- if ( (sctx->dbn<<4) > SHA1_HMAC_DBN_TEMP_SIZE )
+ if ( ((sctx->dbn<<4)+1) > SHA1_HMAC_DBN_TEMP_SIZE )
{
- printk("SHA1_HMAC_DBN_TEMP_SIZE exceeded\n");
+ //printk("SHA1_HMAC_DBN_TEMP_SIZE exceeded\n");
+ sha1_hmac_final_impl(desc, (u8 *)sctx->hash, false);
}
-
+
+ memcpy(&sctx->temp[sctx->dbn], in, 64); //dbn workaround
+ sctx->dbn += 1;
+
return 0;
}
@@ -124,24 +129,32 @@ static int sha1_hmac_transform(struct shash_desc *desc, u32 const *in)
static int sha1_hmac_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen)
{
struct sha1_hmac_ctx *sctx = crypto_shash_ctx(tfm);
- volatile struct deu_hash_t *hashs = (struct deu_hash_t *) HASH_START;
-
+ int err;
+
if (keylen > SHA1_HMAC_MAX_KEYLEN) {
- printk("Key length exceeds maximum key length\n");
- return -EINVAL;
+ char *hash_alg_name = "sha1";
+
+ sctx->desc->tfm = crypto_alloc_shash(hash_alg_name, 0, 0);
+ if (IS_ERR(sctx->desc->tfm)) return PTR_ERR(sctx->desc->tfm);
+
+ memset(sctx->key, 0, SHA1_HMAC_MAX_KEYLEN);
+ err = crypto_shash_digest(sctx->desc, key, keylen, sctx->key);
+ if (err) return err;
+
+ sctx->keylen = SHA1_DIGEST_SIZE;
+
+ crypto_free_shash(sctx->desc->tfm);
+ } else {
+ memcpy(sctx->key, key, keylen);
+ sctx->keylen = keylen;
}
+ memset(sctx->key + sctx->keylen, 0, SHA1_HMAC_MAX_KEYLEN - sctx->keylen);
//printk("Setting keys of len: %d\n", keylen);
-
- hashs->KIDX |= 0x80000000; //reset keys back to 0
- memcpy(&sctx->key, key, keylen);
- sctx->keylen = keylen;
return 0;
-
}
-
/*! \fn int sha1_hmac_setkey_hw(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
* \ingroup IFX_SHA1_HMAC_FUNCTIONS
* \brief sets sha1 hmac key into hw registers
@@ -153,12 +166,11 @@ static int sha1_hmac_setkey_hw(const u8 *key, unsigned int keylen)
{
volatile struct deu_hash_t *hash = (struct deu_hash_t *) HASH_START;
int i, j;
- unsigned long flag;
u32 *in_key = (u32 *)key;
j = 0;
- CRTCL_SECT_START;
+ hash->KIDX |= 0x80000000; //reset keys back to 0
for (i = 0; i < keylen; i+=4)
{
hash->KIDX = j;
@@ -167,7 +179,6 @@ static int sha1_hmac_setkey_hw(const u8 *key, unsigned int keylen)
j++;
}
- CRTCL_SECT_END;
return 0;
}
@@ -182,7 +193,8 @@ static int sha1_hmac_init(struct shash_desc *desc)
//printk("debug ln: %d, fn: %s\n", __LINE__, __func__);
sctx->dbn = 0; //dbn workaround
- sha1_hmac_setkey_hw(sctx->key, sctx->keylen);
+ sctx->started = 0;
+ sctx->count = 0;
return 0;
}
@@ -220,15 +232,26 @@ static int sha1_hmac_update(struct shash_desc *desc, const u8 *data,
return 0;
}
-/*! \fn static void sha1_hmac_final(struct crypto_tfm *tfm, u8 *out)
+/*! \fn static int sha1_hmac_final(struct crypto_tfm *tfm, u8 *out)
* \ingroup IFX_SHA1_HMAC_FUNCTIONS
- * \brief ompute final sha1 hmac value
+ * \brief call sha1_hmac_final_impl with hash_final true
* \param tfm linux crypto algo transform
* \param out final sha1 hmac output value
*/
static int sha1_hmac_final(struct shash_desc *desc, u8 *out)
{
- //struct sha1_hmac_ctx *sctx = shash_desc_ctx(desc);
+ return sha1_hmac_final_impl(desc, out, true);
+}
+
+/*! \fn static int sha1_hmac_final_impl(struct crypto_tfm *tfm, u8 *out, bool hash_final)
+ * \ingroup IFX_SHA1_HMAC_FUNCTIONS
+ * \brief ompute final or intermediate sha1 hmac value
+ * \param tfm linux crypto algo transform
+ * \param out final sha1 hmac output value
+ * \param in finalize or intermediate processing
+*/
+static int sha1_hmac_final_impl(struct shash_desc *desc, u8 *out, bool hash_final)
+{
struct sha1_hmac_ctx *sctx = crypto_shash_ctx(desc->tfm);
u32 index, padlen;
u64 t;
@@ -238,37 +261,48 @@ static int sha1_hmac_final(struct shash_desc *desc, u8 *out)
unsigned long flag;
int i = 0;
int dbn;
- u32 *in = &temp[0];
-
- t = sctx->count + 512; // need to add 512 bit of the IPAD operation
- bits[7] = 0xff & t;
- t >>= 8;
- bits[6] = 0xff & t;
- t >>= 8;
- bits[5] = 0xff & t;
- t >>= 8;
- bits[4] = 0xff & t;
- t >>= 8;
- bits[3] = 0xff & t;
- t >>= 8;
- bits[2] = 0xff & t;
- t >>= 8;
- bits[1] = 0xff & t;
- t >>= 8;
- bits[0] = 0xff & t;
-
- /* Pad out to 56 mod 64 */
- index = (sctx->count >> 3) & 0x3f;
- padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
- sha1_hmac_update (desc, padding, padlen);
-
- /* Append length */
- sha1_hmac_update (desc, bits, sizeof bits);
-
- CRTCL_SECT_START;
-
- hashs->DBN = sctx->dbn;
-
+ u32 *in = sctx->temp[0];
+
+ if (hash_final) {
+ t = sctx->count + 512; // need to add 512 bit of the IPAD operation
+ bits[7] = 0xff & t;
+ t >>= 8;
+ bits[6] = 0xff & t;
+ t >>= 8;
+ bits[5] = 0xff & t;
+ t >>= 8;
+ bits[4] = 0xff & t;
+ t >>= 8;
+ bits[3] = 0xff & t;
+ t >>= 8;
+ bits[2] = 0xff & t;
+ t >>= 8;
+ bits[1] = 0xff & t;
+ t >>= 8;
+ bits[0] = 0xff & t;
+
+ /* Pad out to 56 mod 64 */
+ index = (sctx->count >> 3) & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+ sha1_hmac_update (desc, padding, padlen);
+
+ /* Append length */
+ sha1_hmac_update (desc, bits, sizeof bits);
+ }
+
+ CRTCL_SECT_HASH_START;
+
+ SHA_HASH_INIT;
+
+ sha1_hmac_setkey_hw(sctx->key, sctx->keylen);
+
+ if (hash_final) {
+ hashs->DBN = sctx->dbn;
+ } else {
+ hashs->DBN = sctx->dbn + 5;
+ }
+ asm("sync");
+
//for vr9 change, ENDI = 1
*IFX_HASH_CON = HASH_CON_VALUE;
@@ -277,28 +311,40 @@ static int sha1_hmac_final(struct shash_desc *desc, u8 *out)
// this will not take long
}
+ if (sctx->started) {
+ hashs->D1R = *((u32 *) sctx->hash + 0);
+ hashs->D2R = *((u32 *) sctx->hash + 1);
+ hashs->D3R = *((u32 *) sctx->hash + 2);
+ hashs->D4R = *((u32 *) sctx->hash + 3);
+ hashs->D5R = *((u32 *) sctx->hash + 4);
+ } else {
+ sctx->started = 1;
+ }
+
for (dbn = 0; dbn < sctx->dbn; dbn++)
{
- for (i = 0; i < 16; i++) {
- hashs->MR = in[i];
- };
+ for (i = 0; i < 16; i++) {
+ hashs->MR = in[i];
+ };
- hashs->controlr.GO = 1;
- asm("sync");
+ hashs->controlr.GO = 1;
+ asm("sync");
- //wait for processing
- while (hashs->controlr.BSY) {
+ //wait for processing
+ while (hashs->controlr.BSY) {
// this will not take long
- }
+ }
- in += 16;
-}
+ in += 16;
+ }
#if 1
- //wait for digest ready
- while (! hashs->controlr.DGRY) {
- // this will not take long
+ if (hash_final) {
+ //wait for digest ready
+ while (! hashs->controlr.DGRY) {
+ // this will not take long
+ }
}
#endif
@@ -308,40 +354,71 @@ static int sha1_hmac_final(struct shash_desc *desc, u8 *out)
*((u32 *) out + 3) = hashs->D4R;
*((u32 *) out + 4) = hashs->D5R;
- memset(&sctx->buffer[0], 0, SHA1_HMAC_BLOCK_SIZE);
- sctx->count = 0;
-
+ CRTCL_SECT_HASH_END;
+
+ if (hash_final) {
+ sha1_hmac_init(desc);
+ } else {
+ sctx->dbn = 0;
+ }
//printk("debug ln: %d, fn: %s\n", __LINE__, __func__);
- CRTCL_SECT_END;
+ return 0;
+
+}
+
+/*! \fn void sha1_hmac_init_tfm(struct crypto_tfm *tfm)
+ * \ingroup IFX_SHA1_HMAC_FUNCTIONS
+ * \brief initialize pointers in sha1_hmac_ctx
+ * \param tfm linux crypto algo transform
+*/
+static int sha1_hmac_init_tfm(struct crypto_tfm *tfm)
+{
+ struct sha1_hmac_ctx *sctx = crypto_tfm_ctx(tfm);
+ sctx->temp = kzalloc(4 * SHA1_HMAC_DBN_TEMP_SIZE, GFP_KERNEL);
+ if (IS_ERR(sctx->temp)) return PTR_ERR(sctx->temp);
+ sctx->desc = kzalloc(sizeof(struct shash_desc), GFP_KERNEL);
+ if (IS_ERR(sctx->desc)) return PTR_ERR(sctx->desc);
return 0;
+}
+/*! \fn void sha1_hmac_exit_tfm(struct crypto_tfm *tfm)
+ * \ingroup IFX_SHA1_HMAC_FUNCTIONS
+ * \brief free pointers in sha1_hmac_ctx
+ * \param tfm linux crypto algo transform
+*/
+static void sha1_hmac_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct sha1_hmac_ctx *sctx = crypto_tfm_ctx(tfm);
+ kfree(sctx->temp);
+ kfree(sctx->desc);
}
-/*
- * \brief SHA1-HMAC function mappings
+/*
+ * \brief SHA1_HMAC function mappings
*/
+
static struct shash_alg ifxdeu_sha1_hmac_alg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .init = sha1_hmac_init,
- .update = sha1_hmac_update,
- .final = sha1_hmac_final,
- .setkey = sha1_hmac_setkey,
- .descsize = sizeof(struct sha1_hmac_ctx),
- .base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name= "ifxdeu-sha1_hmac",
- .cra_priority = 400,
- .cra_ctxsize = sizeof(struct sha1_hmac_ctx),
- .cra_flags = CRYPTO_ALG_TYPE_HASH,
- .cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
+ .digestsize = SHA1_DIGEST_SIZE,
+ .init = sha1_hmac_init,
+ .update = sha1_hmac_update,
+ .final = sha1_hmac_final,
+ .setkey = sha1_hmac_setkey,
+ .descsize = sizeof(struct sha1_hmac_ctx),
+ .base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name= "ifxdeu-sha1_hmac",
+ .cra_priority = 400,
+ .cra_ctxsize = sizeof(struct sha1_hmac_ctx),
+ .cra_flags = CRYPTO_ALG_TYPE_HASH | CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = SHA1_HMAC_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_init = sha1_hmac_init_tfm,
+ .cra_exit = sha1_hmac_exit_tfm,
}
-
};
-
/*! \fn int ifxdeu_init_sha1_hmac (void)
* \ingroup IFX_SHA1_HMAC_FUNCTIONS
* \brief initialize sha1 hmac driver
@@ -355,8 +432,6 @@ int ifxdeu_init_sha1_hmac (void)
if ((ret = crypto_register_shash(&ifxdeu_sha1_hmac_alg)))
goto sha1_err;
- CRTCL_SECT_INIT;
-
printk (KERN_NOTICE "IFX DEU SHA1_HMAC initialized%s.\n", disable_deudma ? "" : " (DMA)");
return ret;
@@ -376,4 +451,3 @@ void ifxdeu_fini_sha1_hmac (void)
}
-