[source] ltq-deu: fix handling of data blocks with sizes != AES/DES block size
LEDE Commits
lede-commits at lists.infradead.org
Fri Aug 19 20:47:31 PDT 2016
blogic pushed a commit to source.git, branch master:
https://git.lede-project.org/?p=source.git;a=commitdiff;h=93916613943ae095027f01acc521b4fcf174dea2
commit 93916613943ae095027f01acc521b4fcf174dea2
Author: Martin Schiller <mschiller at tdt.de>
AuthorDate: Thu Aug 18 12:03:16 2016 +0200
ltq-deu: fix handling of data blocks with sizes != AES/DES block size
This fix is a backport from the lantiq UGW-6.1.1-MR1
Signed-off-by: Martin Schiller <mschiller at tdt.de>
---
package/kernel/lantiq/ltq-deu/src/ifxmips_aes.c | 124 ++++++++++++++++++------
package/kernel/lantiq/ltq-deu/src/ifxmips_des.c | 28 +++---
2 files changed, 108 insertions(+), 44 deletions(-)
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_aes.c b/package/kernel/lantiq/ltq-deu/src/ifxmips_aes.c
index fe888b8..9ef42d1 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_aes.c
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_aes.c
@@ -249,6 +249,26 @@ void ifx_deu_aes (void *ctx_arg, u8 *out_arg, const u8 *in_arg,
byte_cnt -= 16;
}
+ /* To handle all non-aligned bytes (not aligned to 16B size) */
+ if (byte_cnt) {
+ aes->ID3R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 0));
+ aes->ID2R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 1));
+ aes->ID1R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 2));
+ aes->ID0R = INPUT_ENDIAN_SWAP(*((u32 *) in_arg + (i * 4) + 3)); /* start crypto */
+
+ while (aes->controlr.BUS) {
+ }
+
+ *((volatile u32 *) out_arg + (i * 4) + 0) = aes->OD3R;
+ *((volatile u32 *) out_arg + (i * 4) + 1) = aes->OD2R;
+ *((volatile u32 *) out_arg + (i * 4) + 2) = aes->OD1R;
+ *((volatile u32 *) out_arg + (i * 4) + 3) = aes->OD0R;
+
+ /* to ensure that the extended pages are clean */
+ memset (out_arg + (i * 16) + (nbytes % AES_BLOCK_SIZE), 0,
+ (AES_BLOCK_SIZE - (nbytes % AES_BLOCK_SIZE)));
+
+ }
//tc.chen : copy iv_arg back
if (mode > 0) {
@@ -467,14 +487,15 @@ int ecb_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ unsigned int enc_bytes;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
- nbytes -= (nbytes % AES_BLOCK_SIZE);
+ while ((nbytes = enc_bytes = walk.nbytes)) {
+ enc_bytes -= (nbytes % AES_BLOCK_SIZE);
ifx_deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- NULL, nbytes, CRYPTO_DIR_ENCRYPT, 0);
+ NULL, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
@@ -498,14 +519,15 @@ int ecb_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ unsigned int dec_bytes;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
- nbytes -= (nbytes % AES_BLOCK_SIZE);
+ while ((nbytes = dec_bytes = walk.nbytes)) {
+ dec_bytes -= (nbytes % AES_BLOCK_SIZE);
ifx_deu_aes_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- NULL, nbytes, CRYPTO_DIR_DECRYPT, 0);
+ NULL, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
@@ -553,15 +575,16 @@ int cbc_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ unsigned int enc_bytes;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = enc_bytes = walk.nbytes)) {
u8 *iv = walk.iv;
- nbytes -= (nbytes % AES_BLOCK_SIZE);
+ enc_bytes -= (nbytes % AES_BLOCK_SIZE);
ifx_deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
+ iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
@@ -585,15 +608,16 @@ int cbc_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ unsigned int dec_bytes;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = dec_bytes = walk.nbytes)) {
u8 *iv = walk.iv;
- nbytes -= (nbytes % AES_BLOCK_SIZE);
+ dec_bytes -= (nbytes % AES_BLOCK_SIZE);
ifx_deu_aes_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
+ iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
@@ -642,15 +666,16 @@ int ctr_basic_aes_encrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ unsigned int enc_bytes;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = enc_bytes = walk.nbytes)) {
u8 *iv = walk.iv;
- nbytes -= (nbytes % AES_BLOCK_SIZE);
+ enc_bytes -= (nbytes % AES_BLOCK_SIZE);
ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
+ iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
@@ -674,15 +699,16 @@ int ctr_basic_aes_decrypt(struct blkcipher_desc *desc,
struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ unsigned int dec_bytes;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = dec_bytes = walk.nbytes)) {
u8 *iv = walk.iv;
- nbytes -= (nbytes % AES_BLOCK_SIZE);
+ dec_bytes -= (nbytes % AES_BLOCK_SIZE);
ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
+ iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
nbytes &= AES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
@@ -730,7 +756,7 @@ int ctr_rfc3686_aes_encrypt(struct blkcipher_desc *desc,
{
struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- int err;
+ int err, bsize = nbytes;
u8 rfc3686_iv[16];
blkcipher_walk_init(&walk, dst, src, nbytes);
@@ -744,12 +770,29 @@ int ctr_rfc3686_aes_encrypt(struct blkcipher_desc *desc,
*(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
cpu_to_be32(1);
- while ((nbytes = walk.nbytes)) {
- nbytes -= (nbytes % AES_BLOCK_SIZE);
- ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- rfc3686_iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ /* scatterlist source is the same size as request size, just process once */
+ if (nbytes == walk.nbytes) {
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ rfc3686_iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
+ nbytes -= walk.nbytes;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ return err;
+ }
+
+ while ((nbytes = walk.nbytes) && (walk.nbytes >= AES_BLOCK_SIZE)) {
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ rfc3686_iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
+
+ nbytes -= walk.nbytes;
+ bsize -= walk.nbytes;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ /* to handle remaining bytes < AES_BLOCK_SIZE */
+ if (walk.nbytes) {
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ rfc3686_iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
+ err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
@@ -770,7 +813,7 @@ int ctr_rfc3686_aes_decrypt(struct blkcipher_desc *desc,
{
struct aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
- int err;
+ int err, bsize = nbytes;
u8 rfc3686_iv[16];
blkcipher_walk_init(&walk, dst, src, nbytes);
@@ -784,12 +827,29 @@ int ctr_rfc3686_aes_decrypt(struct blkcipher_desc *desc,
*(__be32 *)(rfc3686_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
cpu_to_be32(1);
- while ((nbytes = walk.nbytes)) {
- nbytes -= (nbytes % AES_BLOCK_SIZE);
- ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- rfc3686_iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
- nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ /* scatterlist source is the same size as request size, just process once */
+ if (nbytes == walk.nbytes) {
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ rfc3686_iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
+ nbytes -= walk.nbytes;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ return err;
+ }
+
+ while ((nbytes = walk.nbytes) % (walk.nbytes >= AES_BLOCK_SIZE)) {
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ rfc3686_iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
+
+ nbytes -= walk.nbytes;
+ bsize -= walk.nbytes;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ /* to handle remaining bytes < AES_BLOCK_SIZE */
+ if (walk.nbytes) {
+ ifx_deu_aes_ctr(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ rfc3686_iv, walk.nbytes, CRYPTO_DIR_ENCRYPT, 0);
+ err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
diff --git a/package/kernel/lantiq/ltq-deu/src/ifxmips_des.c b/package/kernel/lantiq/ltq-deu/src/ifxmips_des.c
index dcf6c18..513c916 100644
--- a/package/kernel/lantiq/ltq-deu/src/ifxmips_des.c
+++ b/package/kernel/lantiq/ltq-deu/src/ifxmips_des.c
@@ -465,14 +465,15 @@ int ecb_des_encrypt(struct blkcipher_desc *desc,
struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ unsigned int enc_bytes;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
- nbytes -= (nbytes % DES_BLOCK_SIZE);
+ while ((nbytes = enc_bytes = walk.nbytes)) {
+ enc_bytes -= (nbytes % DES_BLOCK_SIZE);
ifx_deu_des_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- NULL, nbytes, CRYPTO_DIR_ENCRYPT, 0);
+ NULL, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
nbytes &= DES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
@@ -496,15 +497,16 @@ int ecb_des_decrypt(struct blkcipher_desc *desc,
struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ unsigned int dec_bytes;
DPRINTF(1, "\n");
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
- nbytes -= (nbytes % DES_BLOCK_SIZE);
+ while ((nbytes = dec_bytes = walk.nbytes)) {
+ dec_bytes -= (nbytes % DES_BLOCK_SIZE);
ifx_deu_des_ecb(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- NULL, nbytes, CRYPTO_DIR_DECRYPT, 0);
+ NULL, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
nbytes &= DES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
@@ -574,16 +576,17 @@ int cbc_des_encrypt(struct blkcipher_desc *desc,
struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ unsigned int enc_bytes;
DPRINTF(1, "\n");
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = enc_bytes = walk.nbytes)) {
u8 *iv = walk.iv;
- nbytes -= (nbytes % DES_BLOCK_SIZE);
+ enc_bytes -= (nbytes % DES_BLOCK_SIZE);
ifx_deu_des_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- iv, nbytes, CRYPTO_DIR_ENCRYPT, 0);
+ iv, enc_bytes, CRYPTO_DIR_ENCRYPT, 0);
nbytes &= DES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
@@ -607,16 +610,17 @@ int cbc_des_decrypt(struct blkcipher_desc *desc,
struct des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
+ unsigned int dec_bytes;
DPRINTF(1, "\n");
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
- while ((nbytes = walk.nbytes)) {
+ while ((nbytes = dec_bytes = walk.nbytes)) {
u8 *iv = walk.iv;
- nbytes -= (nbytes % DES_BLOCK_SIZE);
+ dec_bytes -= (nbytes % DES_BLOCK_SIZE);
ifx_deu_des_cbc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
- iv, nbytes, CRYPTO_DIR_DECRYPT, 0);
+ iv, dec_bytes, CRYPTO_DIR_DECRYPT, 0);
nbytes &= DES_BLOCK_SIZE - 1;
err = blkcipher_walk_done(desc, &walk, nbytes);
}
More information about the lede-commits
mailing list