[LEDE-DEV] [PATCH 3/5] apm821xx: backport crypto4xx patches from 4.15

Christian Lamparter chunkeey at gmail.com
Sun Jan 7 12:27:50 PST 2018


This patch backports changes to crypto4xx in order
to get the crypto4xx operational.

Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
---
 .../010-crypto-gcm-add-GCM-IV-size-constant.patch  |  27 +
 ...0001-crypto-crypto4xx-remove-bad-list_del.patch |  32 ++
 ...to4xx-remove-unused-definitions-and-write.patch | 120 ++++
 ...to4xx-set-CRYPTO_ALG_KERN_DRIVER_ONLY-fla.patch |  31 ++
 ...to4xx-remove-extern-statement-before-func.patch |  76 +++
 ...to4xx-remove-double-assignment-of-pd_uinf.patch |  24 +
 ...to4xx-fix-dynamic_sa_ctl-s-sa_contents-de.patch |  87 +++
 ...to4xx-move-and-refactor-dynamic_contents-.patch | 234 ++++++++
 ...to4xx-enable-AES-RFC3686-ECB-CFB-and-OFB-.patch | 248 +++++++++
 ...pto4xx-refactor-crypto4xx_copy_pkt_to_dst.patch | 171 ++++++
 ...to4xx-replace-crypto4xx_dev-s-scatter_buf.patch |  59 ++
 ...to4xx-fix-crypto4xx_build_pdr-crypto4xx_b.patch |  84 +++
 ...pto-crypto4xx-pointer-arithmetic-overhaul.patch | 373 +++++++++++++
 ...-crypto4xx-wire-up-hmac_mc-to-hmac_muting.patch |  25 +
 ...4-crypto-crypto4xx-fix-off-by-one-AES-OFB.patch |  49 ++
 ...rypto4xx-fix-type-mismatch-compiler-error.patch |  29 +
 ...rypto-crypto4xx-add-backlog-queue-support.patch | 161 ++++++
 ...to4xx-use-the-correct-LE32-format-for-IV-.patch | 236 ++++++++
 ...pto-crypto4xx-overhaul-crypto4xx_build_pd.patch | 535 ++++++++++++++++++
 ...020-crypto-crypto4xx-fix-various-warnings.patch |  62 +++
 ...pto-crypto4xx-fix-stalls-under-heavy-load.patch | 112 ++++
 ...to4xx-simplify-sa-and-state-context-acqui.patch | 209 +++++++
 ...crypto-crypto4xx-prepare-for-AEAD-support.patch | 617 +++++++++++++++++++++
 ...0024-crypto-crypto4xx-add-aes-ccm-support.patch | 256 +++++++++
 ...0025-crypto-crypto4xx-add-aes-gcm-support.patch | 220 ++++++++
 ...to4xx-shuffle-iomap-in-front-of-request_i.patch |  71 +++
 ...crypto-crypto4xx-support-Revision-B-parts.patch | 150 +++++
 ...-crypto-crypto4xx-fix-missing-irq-devname.patch |  37 ++
 ...20-0004-crypto-crypto4xx-kill-MODULE_NAME.patch |  47 ++
 ...to4xx-perform-aead-icv-check-in-the-drive.patch | 146 +++++
 ...rypto-crypto4xx-performance-optimizations.patch | 158 ++++++
 31 files changed, 4686 insertions(+)
 create mode 100644 target/linux/apm821xx/patches-4.14/010-crypto-gcm-add-GCM-IV-size-constant.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0001-crypto-crypto4xx-remove-bad-list_del.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0002-crypto-crypto4xx-remove-unused-definitions-and-write.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0003-crypto-crypto4xx-set-CRYPTO_ALG_KERN_DRIVER_ONLY-fla.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0004-crypto-crypto4xx-remove-extern-statement-before-func.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0005-crypto-crypto4xx-remove-double-assignment-of-pd_uinf.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0006-crypto-crypto4xx-fix-dynamic_sa_ctl-s-sa_contents-de.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0007-crypto-crypto4xx-move-and-refactor-dynamic_contents-.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0008-crypto-crypto4xx-enable-AES-RFC3686-ECB-CFB-and-OFB-.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0009-crypto-crypto4xx-refactor-crypto4xx_copy_pkt_to_dst.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0010-crypto-crypto4xx-replace-crypto4xx_dev-s-scatter_buf.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0011-crypto-crypto4xx-fix-crypto4xx_build_pdr-crypto4xx_b.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0012-crypto-crypto4xx-pointer-arithmetic-overhaul.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0013-crypto-crypto4xx-wire-up-hmac_mc-to-hmac_muting.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0014-crypto-crypto4xx-fix-off-by-one-AES-OFB.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0015-crypto-crypto4xx-fix-type-mismatch-compiler-error.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0017-crypto-crypto4xx-add-backlog-queue-support.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0018-crypto-crypto4xx-use-the-correct-LE32-format-for-IV-.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0019-crypto-crypto4xx-overhaul-crypto4xx_build_pd.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0020-crypto-crypto4xx-fix-various-warnings.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0021-crypto-crypto4xx-fix-stalls-under-heavy-load.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0022-crypto-crypto4xx-simplify-sa-and-state-context-acqui.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0023-crypto-crypto4xx-prepare-for-AEAD-support.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0024-crypto-crypto4xx-add-aes-ccm-support.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/020-0025-crypto-crypto4xx-add-aes-gcm-support.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/120-0001-crypto-crypto4xx-shuffle-iomap-in-front-of-request_i.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/120-0002-crypto-crypto4xx-support-Revision-B-parts.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/120-0003-crypto-crypto4xx-fix-missing-irq-devname.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/120-0004-crypto-crypto4xx-kill-MODULE_NAME.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/120-0005-crypto-crypto4xx-perform-aead-icv-check-in-the-drive.patch
 create mode 100644 target/linux/apm821xx/patches-4.14/120-0006-crypto-crypto4xx-performance-optimizations.patch

diff --git a/target/linux/apm821xx/patches-4.14/010-crypto-gcm-add-GCM-IV-size-constant.patch b/target/linux/apm821xx/patches-4.14/010-crypto-gcm-add-GCM-IV-size-constant.patch
new file mode 100644
index 0000000000..6f0cc6015c
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/010-crypto-gcm-add-GCM-IV-size-constant.patch
@@ -0,0 +1,27 @@
+From ef780324592dd639e4bfbc5b9bf8934b234b7c99 Mon Sep 17 00:00:00 2001
+From: Corentin LABBE <clabbe.montjoie at gmail.com>
+Date: Tue, 22 Aug 2017 10:08:08 +0200
+Subject: [PATCH] crypto: gcm - add GCM IV size constant
+
+Many GCM users use directly GCM IV size instead of using some constant.
+
+This patch add all IV size constant used by GCM.
+
+Signed-off-by: Corentin Labbe <clabbe.montjoie at gmail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ include/crypto/gcm.h | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+ create mode 100644 include/crypto/gcm.h
+
+--- /dev/null
++++ b/include/crypto/gcm.h
+@@ -0,0 +1,8 @@
++#ifndef _CRYPTO_GCM_H
++#define _CRYPTO_GCM_H
++
++#define GCM_AES_IV_SIZE 12
++#define GCM_RFC4106_IV_SIZE 8
++#define GCM_RFC4543_IV_SIZE 8
++
++#endif
diff --git a/target/linux/apm821xx/patches-4.14/020-0001-crypto-crypto4xx-remove-bad-list_del.patch b/target/linux/apm821xx/patches-4.14/020-0001-crypto-crypto4xx-remove-bad-list_del.patch
new file mode 100644
index 0000000000..8df0937e4d
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0001-crypto-crypto4xx-remove-bad-list_del.patch
@@ -0,0 +1,32 @@
+From a728a196d253530f17da5c86dc7dfbe58c5f7094 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at googlemail.com>
+Date: Fri, 25 Aug 2017 15:47:14 +0200
+Subject: [PATCH 01/25] crypto: crypto4xx - remove bad list_del
+
+alg entries are only added to the list, after the registration
+was successful. If the registration failed, it was never added
+to the list in the first place.
+
+Signed-off-by: Christian Lamparter <chunkeey at googlemail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -1033,12 +1033,10 @@ int crypto4xx_register_alg(struct crypto
+ 			break;
+ 		}
+ 
+-		if (rc) {
+-			list_del(&alg->entry);
++		if (rc)
+ 			kfree(alg);
+-		} else {
++		else
+ 			list_add_tail(&alg->entry, &sec_dev->alg_list);
+-		}
+ 	}
+ 
+ 	return 0;
diff --git a/target/linux/apm821xx/patches-4.14/020-0002-crypto-crypto4xx-remove-unused-definitions-and-write.patch b/target/linux/apm821xx/patches-4.14/020-0002-crypto-crypto4xx-remove-unused-definitions-and-write.patch
new file mode 100644
index 0000000000..5983246344
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0002-crypto-crypto4xx-remove-unused-definitions-and-write.patch
@@ -0,0 +1,120 @@
+From 81065f66dd99b3af58626a914b8c0fcff6b8b0ba Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at googlemail.com>
+Date: Fri, 25 Aug 2017 15:47:15 +0200
+Subject: [PATCH 02/25] crypto: crypto4xx - remove unused definitions and
+ write-only variables
+
+This patch removes several unused code and definitons
+(structs, variables, ...).
+
+Signed-off-by: Christian Lamparter <chunkeey at googlemail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_alg.c  |  6 ------
+ drivers/crypto/amcc/crypto4xx_core.c |  2 +-
+ drivers/crypto/amcc/crypto4xx_core.h | 16 ----------------
+ 3 files changed, 1 insertion(+), 23 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -75,7 +75,6 @@ int crypto4xx_encrypt(struct ablkcipher_
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ 
+ 	ctx->direction = DIR_OUTBOUND;
+-	ctx->hash_final = 0;
+ 	ctx->is_hash = 0;
+ 	ctx->pd_ctl = 0x1;
+ 
+@@ -89,7 +88,6 @@ int crypto4xx_decrypt(struct ablkcipher_
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ 
+ 	ctx->direction = DIR_INBOUND;
+-	ctx->hash_final = 0;
+ 	ctx->is_hash = 0;
+ 	ctx->pd_ctl = 1;
+ 
+@@ -136,7 +134,6 @@ static int crypto4xx_setkey_aes(struct c
+ 	}
+ 	/* Setup SA */
+ 	sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+-	ctx->hash_final = 0;
+ 
+ 	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ 				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+@@ -191,7 +188,6 @@ static int crypto4xx_hash_alg_init(struc
+ 
+ 	ctx->dev   = my_alg->dev;
+ 	ctx->is_hash = 1;
+-	ctx->hash_final = 0;
+ 
+ 	/* Create SA */
+ 	if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+@@ -256,7 +252,6 @@ int crypto4xx_hash_update(struct ahash_r
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ 
+ 	ctx->is_hash = 1;
+-	ctx->hash_final = 0;
+ 	ctx->pd_ctl = 0x11;
+ 	ctx->direction = DIR_INBOUND;
+ 
+@@ -274,7 +269,6 @@ int crypto4xx_hash_digest(struct ahash_r
+ {
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ 
+-	ctx->hash_final = 1;
+ 	ctx->pd_ctl = 0x11;
+ 	ctx->direction = DIR_INBOUND;
+ 
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -962,7 +962,7 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 
+ 	sa->sa_command_1.bf.hash_crypto_offset = 0;
+ 	pd->pd_ctl.w = ctx->pd_ctl;
+-	pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
++	pd->pd_ctl_len.w = 0x00400000 | datalen;
+ 	pd_uinfo->state = PD_ENTRY_INUSE;
+ 	wmb();
+ 	/* write any value to push engine to read a pd */
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -72,7 +72,6 @@ struct pd_uinfo {
+ struct crypto4xx_device {
+ 	struct crypto4xx_core_device *core_dev;
+ 	char *name;
+-	u64  ce_phy_address;
+ 	void __iomem *ce_base;
+ 	void __iomem *trng_base;
+ 
+@@ -127,21 +126,9 @@ struct crypto4xx_ctx {
+ 	u32 sa_len;
+ 	u32 offset_to_sr_ptr;           /* offset to state ptr, in dynamic sa */
+ 	u32 direction;
+-	u32 next_hdr;
+ 	u32 save_iv;
+-	u32 pd_ctl_len;
+ 	u32 pd_ctl;
+-	u32 bypass;
+ 	u32 is_hash;
+-	u32 hash_final;
+-};
+-
+-struct crypto4xx_req_ctx {
+-	struct crypto4xx_device *dev;	/* Device in which
+-					operation to send to */
+-	void *sa;
+-	u32 sa_dma_addr;
+-	u16 sa_len;
+ };
+ 
+ struct crypto4xx_alg_common {
+@@ -172,9 +159,6 @@ static inline struct crypto4xx_alg *cryp
+ 
+ extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
+ extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+-extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
+-				   struct crypto4xx_ctx *rctx);
+-extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx);
+ extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+ extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
+ extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
diff --git a/target/linux/apm821xx/patches-4.14/020-0003-crypto-crypto4xx-set-CRYPTO_ALG_KERN_DRIVER_ONLY-fla.patch b/target/linux/apm821xx/patches-4.14/020-0003-crypto-crypto4xx-set-CRYPTO_ALG_KERN_DRIVER_ONLY-fla.patch
new file mode 100644
index 0000000000..e50a418284
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0003-crypto-crypto4xx-set-CRYPTO_ALG_KERN_DRIVER_ONLY-fla.patch
@@ -0,0 +1,31 @@
+From 1ef52a95ea53c3c54b061e3f1af85976356c7132 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at googlemail.com>
+Date: Fri, 25 Aug 2017 15:47:16 +0200
+Subject: [PATCH 03/25] crypto: crypto4xx - set CRYPTO_ALG_KERN_DRIVER_ONLY
+ flag
+
+The security offload function is performed by a cryptographic
+engine core attached to the 128-bit PLB (processor local bus)
+with builtin DMA and interrupt controllers. This, I think,
+satisfies the requirement for the CRYPTO_ALG_KERN_DRIVER_ONLY
+flag.
+
+Signed-off-by: Christian Lamparter <chunkeey at googlemail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -1114,7 +1114,9 @@ struct crypto4xx_alg_common crypto4xx_al
+ 		.cra_name 	= "cbc(aes)",
+ 		.cra_driver_name = "cbc-aes-ppc4xx",
+ 		.cra_priority 	= CRYPTO4XX_CRYPTO_PRIORITY,
+-		.cra_flags 	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
++		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
++				  CRYPTO_ALG_ASYNC |
++				  CRYPTO_ALG_KERN_DRIVER_ONLY,
+ 		.cra_blocksize 	= AES_BLOCK_SIZE,
+ 		.cra_ctxsize 	= sizeof(struct crypto4xx_ctx),
+ 		.cra_type 	= &crypto_ablkcipher_type,
diff --git a/target/linux/apm821xx/patches-4.14/020-0004-crypto-crypto4xx-remove-extern-statement-before-func.patch b/target/linux/apm821xx/patches-4.14/020-0004-crypto-crypto4xx-remove-extern-statement-before-func.patch
new file mode 100644
index 0000000000..cd61ac719b
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0004-crypto-crypto4xx-remove-extern-statement-before-func.patch
@@ -0,0 +1,76 @@
+From 886c251fd4ca40a27697afec7bc44c115e803d78 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at googlemail.com>
+Date: Fri, 25 Aug 2017 15:47:17 +0200
+Subject: [PATCH 04/25] crypto: crypto4xx - remove extern statement before
+ function declaration
+
+All function declarations are "extern" by default, there is no need to
+specify it explicitly.
+
+For C99 states in 6.2.2.5:
+"If the declaration of an identifier for a function has no
+storage-class specifier, its linkage is determined exactly
+as if it were declared with the storage-class specifier
+extern."
+
+Signed-off-by: Christian Lamparter <chunkeey at googlemail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_core.h | 48 ++++++++++++++++++------------------
+ 1 file changed, 24 insertions(+), 24 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -157,28 +157,28 @@ static inline struct crypto4xx_alg *cryp
+ 	return container_of(x, struct crypto4xx_alg, alg.u.cipher);
+ }
+ 
+-extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
+-extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+-extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+-extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
+-extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
+-extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
+-extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
+-extern void crypto4xx_memcpy_le(unsigned int *dst,
+-				const unsigned char *buf, int len);
+-extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
+-			      struct crypto4xx_ctx *ctx,
+-			      struct scatterlist *src,
+-			      struct scatterlist *dst,
+-			      unsigned int datalen,
+-			      void *iv, u32 iv_len);
+-extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+-				    const u8 *key, unsigned int keylen);
+-extern int crypto4xx_encrypt(struct ablkcipher_request *req);
+-extern int crypto4xx_decrypt(struct ablkcipher_request *req);
+-extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+-extern int crypto4xx_hash_digest(struct ahash_request *req);
+-extern int crypto4xx_hash_final(struct ahash_request *req);
+-extern int crypto4xx_hash_update(struct ahash_request *req);
+-extern int crypto4xx_hash_init(struct ahash_request *req);
++int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
++void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
++void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
++u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
++u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
++u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
++u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
++void crypto4xx_memcpy_le(unsigned int *dst,
++			 const unsigned char *buf, int len);
++u32 crypto4xx_build_pd(struct crypto_async_request *req,
++		       struct crypto4xx_ctx *ctx,
++		       struct scatterlist *src,
++		       struct scatterlist *dst,
++		       unsigned int datalen,
++		       void *iv, u32 iv_len);
++int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
++			     const u8 *key, unsigned int keylen);
++int crypto4xx_encrypt(struct ablkcipher_request *req);
++int crypto4xx_decrypt(struct ablkcipher_request *req);
++int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
++int crypto4xx_hash_digest(struct ahash_request *req);
++int crypto4xx_hash_final(struct ahash_request *req);
++int crypto4xx_hash_update(struct ahash_request *req);
++int crypto4xx_hash_init(struct ahash_request *req);
+ #endif
diff --git a/target/linux/apm821xx/patches-4.14/020-0005-crypto-crypto4xx-remove-double-assignment-of-pd_uinf.patch b/target/linux/apm821xx/patches-4.14/020-0005-crypto-crypto4xx-remove-double-assignment-of-pd_uinf.patch
new file mode 100644
index 0000000000..4fc3617576
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0005-crypto-crypto4xx-remove-double-assignment-of-pd_uinf.patch
@@ -0,0 +1,24 @@
+From c587e65deacf8c86de2d7c51f1e81d0a4a9147a8 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at googlemail.com>
+Date: Fri, 25 Aug 2017 15:47:18 +0200
+Subject: [PATCH 05/25] crypto: crypto4xx - remove double assignment of
+ pd_uinfo->state
+
+crypto4xx_put_pd_to_pdr() already clears the flag.
+
+Signed-off-by: Christian Lamparter <chunkeey at googlemail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -1079,7 +1079,6 @@ static void crypto4xx_bh_tasklet_cb(unsi
+ 			pd->pd_ctl.bf.pe_done = 0;
+ 			crypto4xx_pd_done(core_dev->dev, tail);
+ 			crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
+-			pd_uinfo->state = PD_ENTRY_FREE;
+ 		} else {
+ 			/* if tail not done, break */
+ 			break;
diff --git a/target/linux/apm821xx/patches-4.14/020-0006-crypto-crypto4xx-fix-dynamic_sa_ctl-s-sa_contents-de.patch b/target/linux/apm821xx/patches-4.14/020-0006-crypto-crypto4xx-fix-dynamic_sa_ctl-s-sa_contents-de.patch
new file mode 100644
index 0000000000..84063d3036
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0006-crypto-crypto4xx-fix-dynamic_sa_ctl-s-sa_contents-de.patch
@@ -0,0 +1,87 @@
+From 453e3090b9c3f5da70b21648c2244e9821f0916d Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at googlemail.com>
+Date: Fri, 25 Aug 2017 15:47:19 +0200
+Subject: [PATCH 06/25] crypto: crypto4xx - fix dynamic_sa_ctl's sa_contents
+ declaration
+
+The driver had a union dynamic_sa_contents in place that
+described the meaning of the bits in the sa_contents
+variable.
+
+Signed-off-by: Christian Lamparter <chunkeey at googlemail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_alg.c |  4 ++--
+ drivers/crypto/amcc/crypto4xx_sa.c  | 12 ++++++------
+ drivers/crypto/amcc/crypto4xx_sa.h  |  2 +-
+ 3 files changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -149,7 +149,7 @@ static int crypto4xx_setkey_aes(struct c
+ 				 SA_NOT_COPY_HDR);
+ 	crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ 			    key, keylen);
+-	sa->sa_contents = SA_AES_CONTENTS | (keylen << 2);
++	sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
+ 	sa->sa_command_1.bf.key_len = keylen >> 3;
+ 	ctx->is_hash = 0;
+ 	ctx->direction = DIR_INBOUND;
+@@ -219,7 +219,7 @@ static int crypto4xx_hash_alg_init(struc
+ 				 SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ 				 SA_NOT_COPY_HDR);
+ 	ctx->direction = DIR_INBOUND;
+-	sa->sa_contents = SA_HASH160_CONTENTS;
++	sa->sa_contents.w = SA_HASH160_CONTENTS;
+ 	sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
+ 	/* Need to zero hash digest in SA */
+ 	memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
+--- a/drivers/crypto/amcc/crypto4xx_sa.c
++++ b/drivers/crypto/amcc/crypto4xx_sa.c
+@@ -40,9 +40,9 @@ u32 get_dynamic_sa_offset_state_ptr_fiel
+ 	union dynamic_sa_contents cts;
+ 
+ 	if (ctx->direction == DIR_INBOUND)
+-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
++		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents.w;
+ 	else
+-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
++		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents.w;
+ 	offset = cts.bf.key_size
+ 		+ cts.bf.inner_size
+ 		+ cts.bf.outer_size
+@@ -66,9 +66,9 @@ u32 get_dynamic_sa_iv_size(struct crypto
+ 	union dynamic_sa_contents cts;
+ 
+ 	if (ctx->direction == DIR_INBOUND)
+-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
++		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents.w;
+ 	else
+-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
++		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents.w;
+ 	return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
+ }
+ 
+@@ -77,9 +77,9 @@ u32 get_dynamic_sa_offset_key_field(stru
+ 	union dynamic_sa_contents cts;
+ 
+ 	if (ctx->direction == DIR_INBOUND)
+-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
++		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents.w;
+ 	else
+-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
++		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents.w;
+ 
+ 	return sizeof(struct dynamic_sa_ctl);
+ }
+--- a/drivers/crypto/amcc/crypto4xx_sa.h
++++ b/drivers/crypto/amcc/crypto4xx_sa.h
+@@ -169,7 +169,7 @@ union sa_command_1 {
+ } __attribute__((packed));
+ 
+ struct dynamic_sa_ctl {
+-	u32 sa_contents;
++	union dynamic_sa_contents sa_contents;
+ 	union sa_command_0 sa_command_0;
+ 	union sa_command_1 sa_command_1;
+ } __attribute__((packed));
diff --git a/target/linux/apm821xx/patches-4.14/020-0007-crypto-crypto4xx-move-and-refactor-dynamic_contents-.patch b/target/linux/apm821xx/patches-4.14/020-0007-crypto-crypto4xx-move-and-refactor-dynamic_contents-.patch
new file mode 100644
index 0000000000..c7c32e5d36
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0007-crypto-crypto4xx-move-and-refactor-dynamic_contents-.patch
@@ -0,0 +1,234 @@
+From 249c8d98ea339325dca481d5dae93686cd494059 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at googlemail.com>
+Date: Fri, 25 Aug 2017 15:47:20 +0200
+Subject: [PATCH 07/25] crypto: crypto4xx - move and refactor dynamic_contents
+ helpers
+
+This patch refactors and moves the dynamic_contents helper
+functions into the crypto4xx_sa.h header file.
+
+ * get_dynamic_sa_iv_size is no longer needed, as the cryptoapi
+   provides the required IV size information as well.
+
+ * refactor the function declarations to use the a pointer to the
+   dynamic_sa_contents union, instead of the crypto4xx_ctx.
+
+ * rename get_dynamic_sa_offset_key_field to get_dynamic_sa_key_field.
+   It returns the pointer to the key directly.
+
+Signed-off-by: Christian Lamparter <chunkeey at googlemail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/Makefile         |  2 +-
+ drivers/crypto/amcc/crypto4xx_alg.c  | 20 ++++-----
+ drivers/crypto/amcc/crypto4xx_core.h |  3 --
+ drivers/crypto/amcc/crypto4xx_sa.c   | 85 ------------------------------------
+ drivers/crypto/amcc/crypto4xx_sa.h   | 28 ++++++++++++
+ 5 files changed, 39 insertions(+), 99 deletions(-)
+ delete mode 100644 drivers/crypto/amcc/crypto4xx_sa.c
+
+--- a/drivers/crypto/amcc/Makefile
++++ b/drivers/crypto/amcc/Makefile
+@@ -1,3 +1,3 @@
+ obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += crypto4xx.o
+-crypto4xx-y :=  crypto4xx_core.o crypto4xx_alg.o crypto4xx_sa.o
++crypto4xx-y :=  crypto4xx_core.o crypto4xx_alg.o
+ crypto4xx-$(CONFIG_HW_RANDOM_PPC4XX) += crypto4xx_trng.o
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -29,8 +29,8 @@
+ #include <crypto/aes.h>
+ #include <crypto/sha.h>
+ #include "crypto4xx_reg_def.h"
+-#include "crypto4xx_sa.h"
+ #include "crypto4xx_core.h"
++#include "crypto4xx_sa.h"
+ 
+ static void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
+ 				     u32 save_iv, u32 ld_h, u32 ld_iv,
+@@ -79,8 +79,8 @@ int crypto4xx_encrypt(struct ablkcipher_
+ 	ctx->pd_ctl = 0x1;
+ 
+ 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+-				  req->nbytes, req->info,
+-				  get_dynamic_sa_iv_size(ctx));
++		req->nbytes, req->info,
++		crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)));
+ }
+ 
+ int crypto4xx_decrypt(struct ablkcipher_request *req)
+@@ -92,8 +92,8 @@ int crypto4xx_decrypt(struct ablkcipher_
+ 	ctx->pd_ctl = 1;
+ 
+ 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+-				  req->nbytes, req->info,
+-				  get_dynamic_sa_iv_size(ctx));
++		req->nbytes, req->info,
++		crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)));
+ }
+ 
+ /**
+@@ -147,15 +147,15 @@ static int crypto4xx_setkey_aes(struct c
+ 				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ 				 SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ 				 SA_NOT_COPY_HDR);
+-	crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
++	crypto4xx_memcpy_le(get_dynamic_sa_key_field(sa),
+ 			    key, keylen);
+ 	sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
+ 	sa->sa_command_1.bf.key_len = keylen >> 3;
+ 	ctx->is_hash = 0;
+ 	ctx->direction = DIR_INBOUND;
+-	memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+-			(void *)&ctx->state_record_dma_addr, 4);
+-	ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
++	memcpy(sa + get_dynamic_sa_offset_state_ptr_field(sa),
++	       (void *)&ctx->state_record_dma_addr, 4);
++	ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
+ 
+ 	memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ 	sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+@@ -225,7 +225,7 @@ static int crypto4xx_hash_alg_init(struc
+ 	memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
+ 	memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
+ 	sa_in->state_ptr = ctx->state_record_dma_addr;
+-	ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
++	ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
+ 
+ 	return 0;
+ }
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -161,9 +161,6 @@ int crypto4xx_alloc_sa(struct crypto4xx_
+ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+ void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+ u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
+-u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
+-u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
+-u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
+ void crypto4xx_memcpy_le(unsigned int *dst,
+ 			 const unsigned char *buf, int len);
+ u32 crypto4xx_build_pd(struct crypto_async_request *req,
+--- a/drivers/crypto/amcc/crypto4xx_sa.c
++++ /dev/null
+@@ -1,85 +0,0 @@
+-/**
+- * AMCC SoC PPC4xx Crypto Driver
+- *
+- * Copyright (c) 2008 Applied Micro Circuits Corporation.
+- * All rights reserved. James Hsiao <jhsiao at amcc.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- *
+- * @file crypto4xx_sa.c
+- *
+- * This file implements the security context
+- * associate format.
+- */
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/moduleparam.h>
+-#include <linux/mod_devicetable.h>
+-#include <linux/interrupt.h>
+-#include <linux/spinlock_types.h>
+-#include <linux/highmem.h>
+-#include <linux/scatterlist.h>
+-#include <linux/crypto.h>
+-#include <crypto/algapi.h>
+-#include <crypto/des.h>
+-#include "crypto4xx_reg_def.h"
+-#include "crypto4xx_sa.h"
+-#include "crypto4xx_core.h"
+-
+-u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
+-{
+-	u32 offset;
+-	union dynamic_sa_contents cts;
+-
+-	if (ctx->direction == DIR_INBOUND)
+-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents.w;
+-	else
+-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents.w;
+-	offset = cts.bf.key_size
+-		+ cts.bf.inner_size
+-		+ cts.bf.outer_size
+-		+ cts.bf.spi
+-		+ cts.bf.seq_num0
+-		+ cts.bf.seq_num1
+-		+ cts.bf.seq_num_mask0
+-		+ cts.bf.seq_num_mask1
+-		+ cts.bf.seq_num_mask2
+-		+ cts.bf.seq_num_mask3
+-		+ cts.bf.iv0
+-		+ cts.bf.iv1
+-		+ cts.bf.iv2
+-		+ cts.bf.iv3;
+-
+-	return sizeof(struct dynamic_sa_ctl) + offset * 4;
+-}
+-
+-u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
+-{
+-	union dynamic_sa_contents cts;
+-
+-	if (ctx->direction == DIR_INBOUND)
+-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents.w;
+-	else
+-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents.w;
+-	return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
+-}
+-
+-u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx)
+-{
+-	union dynamic_sa_contents cts;
+-
+-	if (ctx->direction == DIR_INBOUND)
+-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents.w;
+-	else
+-		cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents.w;
+-
+-	return sizeof(struct dynamic_sa_ctl);
+-}
+--- a/drivers/crypto/amcc/crypto4xx_sa.h
++++ b/drivers/crypto/amcc/crypto4xx_sa.h
+@@ -240,4 +240,32 @@ struct dynamic_sa_hash160 {
+ #define SA_HASH160_LEN		(sizeof(struct dynamic_sa_hash160)/4)
+ #define SA_HASH160_CONTENTS     0x2000a502
+ 
++static inline u32
++get_dynamic_sa_offset_state_ptr_field(struct dynamic_sa_ctl *cts)
++{
++	u32 offset;
++
++	offset = cts->sa_contents.bf.key_size
++		+ cts->sa_contents.bf.inner_size
++		+ cts->sa_contents.bf.outer_size
++		+ cts->sa_contents.bf.spi
++		+ cts->sa_contents.bf.seq_num0
++		+ cts->sa_contents.bf.seq_num1
++		+ cts->sa_contents.bf.seq_num_mask0
++		+ cts->sa_contents.bf.seq_num_mask1
++		+ cts->sa_contents.bf.seq_num_mask2
++		+ cts->sa_contents.bf.seq_num_mask3
++		+ cts->sa_contents.bf.iv0
++		+ cts->sa_contents.bf.iv1
++		+ cts->sa_contents.bf.iv2
++		+ cts->sa_contents.bf.iv3;
++
++	return sizeof(struct dynamic_sa_ctl) + offset * 4;
++}
++
++static inline u8 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
++{
++	return (u8 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
++}
++
+ #endif
diff --git a/target/linux/apm821xx/patches-4.14/020-0008-crypto-crypto4xx-enable-AES-RFC3686-ECB-CFB-and-OFB-.patch b/target/linux/apm821xx/patches-4.14/020-0008-crypto-crypto4xx-enable-AES-RFC3686-ECB-CFB-and-OFB-.patch
new file mode 100644
index 0000000000..9977c60d26
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0008-crypto-crypto4xx-enable-AES-RFC3686-ECB-CFB-and-OFB-.patch
@@ -0,0 +1,248 @@
+From f2a13e7cba9e2b16f4888fbd9cf2bc25b95945be Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at googlemail.com>
+Date: Fri, 25 Aug 2017 15:47:21 +0200
+Subject: [PATCH 08/25] crypto: crypto4xx - enable AES RFC3686, ECB, CFB and
+ OFB offloads
+
+The crypto engine supports more than just aes-cbc. This patch
+enables the remaining AES block cipher modes that pass the
+testmanager's test vectors.
+
+Signed-off-by: Christian Lamparter <chunkeey at googlemail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_alg.c  | 66 ++++++++++++++++++++++++
+ drivers/crypto/amcc/crypto4xx_core.c | 98 ++++++++++++++++++++++++++++++++++++
+ drivers/crypto/amcc/crypto4xx_core.h | 10 ++++
+ drivers/crypto/amcc/crypto4xx_sa.h   |  3 ++
+ 4 files changed, 177 insertions(+)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -28,6 +28,7 @@
+ #include <crypto/algapi.h>
+ #include <crypto/aes.h>
+ #include <crypto/sha.h>
++#include <crypto/ctr.h>
+ #include "crypto4xx_reg_def.h"
+ #include "crypto4xx_core.h"
+ #include "crypto4xx_sa.h"
+@@ -171,6 +172,71 @@ int crypto4xx_setkey_aes_cbc(struct cryp
+ 				    CRYPTO_FEEDBACK_MODE_NO_FB);
+ }
+ 
++int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
++			     const u8 *key, unsigned int keylen)
++{
++	return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
++				    CRYPTO_FEEDBACK_MODE_128BIT_CFB);
++}
++
++int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
++			     const u8 *key, unsigned int keylen)
++{
++	return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
++				    CRYPTO_FEEDBACK_MODE_NO_FB);
++}
++
++int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
++			     const u8 *key, unsigned int keylen)
++{
++	return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
++				    CRYPTO_FEEDBACK_MODE_64BIT_OFB);
++}
++
++int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
++			     const u8 *key, unsigned int keylen)
++{
++	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
++	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
++	int rc;
++
++	rc = crypto4xx_setkey_aes(cipher, key, keylen - CTR_RFC3686_NONCE_SIZE,
++		CRYPTO_MODE_CTR, CRYPTO_FEEDBACK_MODE_NO_FB);
++	if (rc)
++		return rc;
++
++	memcpy(ctx->state_record,
++		key + keylen - CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
++
++	return 0;
++}
++
++int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req)
++{
++	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
++	__be32 iv[AES_IV_SIZE / 4] = { *(u32 *)ctx->state_record,
++		*(u32 *) req->info, *(u32 *) (req->info + 4), cpu_to_be32(1) };
++
++	ctx->direction = DIR_OUTBOUND;
++	ctx->pd_ctl = 1;
++
++	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
++				  req->nbytes, iv, AES_IV_SIZE);
++}
++
++int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req)
++{
++	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
++	__be32 iv[AES_IV_SIZE / 4] = { *(u32 *)ctx->state_record,
++		*(u32 *) req->info, *(u32 *) (req->info + 4), cpu_to_be32(1) };
++
++	ctx->direction = DIR_INBOUND;
++	ctx->pd_ctl = 1;
++
++	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
++				  req->nbytes, iv, AES_IV_SIZE);
++}
++
+ /**
+  * HASH SHA1 Functions
+  */
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -36,6 +36,7 @@
+ #include <asm/dcr-regs.h>
+ #include <asm/cacheflush.h>
+ #include <crypto/aes.h>
++#include <crypto/ctr.h>
+ #include <crypto/sha.h>
+ #include "crypto4xx_reg_def.h"
+ #include "crypto4xx_core.h"
+@@ -1133,6 +1134,103 @@ struct crypto4xx_alg_common crypto4xx_al
+ 			}
+ 		}
+ 	}},
++	{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
++		.cra_name	= "cfb(aes)",
++		.cra_driver_name = "cfb-aes-ppc4xx",
++		.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
++		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
++				  CRYPTO_ALG_ASYNC |
++				  CRYPTO_ALG_KERN_DRIVER_ONLY,
++		.cra_blocksize	= AES_BLOCK_SIZE,
++		.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
++		.cra_type	= &crypto_ablkcipher_type,
++		.cra_init	= crypto4xx_alg_init,
++		.cra_exit	= crypto4xx_alg_exit,
++		.cra_module	= THIS_MODULE,
++		.cra_u		= {
++			.ablkcipher = {
++				.min_keysize	= AES_MIN_KEY_SIZE,
++				.max_keysize	= AES_MAX_KEY_SIZE,
++				.ivsize		= AES_IV_SIZE,
++				.setkey		= crypto4xx_setkey_aes_cfb,
++				.encrypt	= crypto4xx_encrypt,
++				.decrypt	= crypto4xx_decrypt,
++			}
++		}
++	} },
++	{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
++		.cra_name	= "rfc3686(ctr(aes))",
++		.cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
++		.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
++		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
++				  CRYPTO_ALG_ASYNC |
++				  CRYPTO_ALG_KERN_DRIVER_ONLY,
++		.cra_blocksize	= AES_BLOCK_SIZE,
++		.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
++		.cra_type	= &crypto_ablkcipher_type,
++		.cra_init	= crypto4xx_alg_init,
++		.cra_exit	= crypto4xx_alg_exit,
++		.cra_module	= THIS_MODULE,
++		.cra_u		= {
++			.ablkcipher = {
++				.min_keysize	= AES_MIN_KEY_SIZE +
++						  CTR_RFC3686_NONCE_SIZE,
++				.max_keysize	= AES_MAX_KEY_SIZE +
++						  CTR_RFC3686_NONCE_SIZE,
++				.ivsize		= CTR_RFC3686_IV_SIZE,
++				.setkey		= crypto4xx_setkey_rfc3686,
++				.encrypt	= crypto4xx_rfc3686_encrypt,
++				.decrypt	= crypto4xx_rfc3686_decrypt,
++			}
++		}
++	} },
++	{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
++		.cra_name	= "ecb(aes)",
++		.cra_driver_name = "ecb-aes-ppc4xx",
++		.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
++		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
++				  CRYPTO_ALG_ASYNC |
++				  CRYPTO_ALG_KERN_DRIVER_ONLY,
++		.cra_blocksize	= AES_BLOCK_SIZE,
++		.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
++		.cra_type	= &crypto_ablkcipher_type,
++		.cra_init	= crypto4xx_alg_init,
++		.cra_exit	= crypto4xx_alg_exit,
++		.cra_module	= THIS_MODULE,
++		.cra_u		= {
++			.ablkcipher = {
++				.min_keysize	= AES_MIN_KEY_SIZE,
++				.max_keysize	= AES_MAX_KEY_SIZE,
++				.setkey		= crypto4xx_setkey_aes_ecb,
++				.encrypt	= crypto4xx_encrypt,
++				.decrypt	= crypto4xx_decrypt,
++			}
++		}
++	} },
++	{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
++		.cra_name	= "ofb(aes)",
++		.cra_driver_name = "ofb-aes-ppc4xx",
++		.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
++		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
++				  CRYPTO_ALG_ASYNC |
++				  CRYPTO_ALG_KERN_DRIVER_ONLY,
++		.cra_blocksize	= AES_BLOCK_SIZE,
++		.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
++		.cra_type	= &crypto_ablkcipher_type,
++		.cra_init	= crypto4xx_alg_init,
++		.cra_exit	= crypto4xx_alg_exit,
++		.cra_module	= THIS_MODULE,
++		.cra_u		= {
++			.ablkcipher = {
++				.min_keysize	= AES_MIN_KEY_SIZE,
++				.max_keysize	= AES_MAX_KEY_SIZE,
++				.ivsize		= AES_IV_SIZE,
++				.setkey		= crypto4xx_setkey_aes_cbc,
++				.encrypt	= crypto4xx_encrypt,
++				.decrypt	= crypto4xx_decrypt,
++			}
++		}
++	} },
+ };
+ 
+ /**
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -171,8 +171,18 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 		       void *iv, u32 iv_len);
+ int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+ 			     const u8 *key, unsigned int keylen);
++int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
++			     const u8 *key, unsigned int keylen);
++int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
++			     const u8 *key, unsigned int keylen);
++int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
++			     const u8 *key, unsigned int keylen);
++int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
++			     const u8 *key, unsigned int keylen);
+ int crypto4xx_encrypt(struct ablkcipher_request *req);
+ int crypto4xx_decrypt(struct ablkcipher_request *req);
++int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
++int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
+ int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+ int crypto4xx_hash_digest(struct ahash_request *req);
+ int crypto4xx_hash_final(struct ahash_request *req);
+--- a/drivers/crypto/amcc/crypto4xx_sa.h
++++ b/drivers/crypto/amcc/crypto4xx_sa.h
+@@ -112,6 +112,9 @@ union sa_command_0 {
+ 
+ #define CRYPTO_MODE_ECB				0
+ #define CRYPTO_MODE_CBC				1
++#define CRYPTO_MODE_OFB				2
++#define CRYPTO_MODE_CFB				3
++#define CRYPTO_MODE_CTR				4
+ 
+ #define CRYPTO_FEEDBACK_MODE_NO_FB		0
+ #define CRYPTO_FEEDBACK_MODE_64BIT_OFB		0
diff --git a/target/linux/apm821xx/patches-4.14/020-0009-crypto-crypto4xx-refactor-crypto4xx_copy_pkt_to_dst.patch b/target/linux/apm821xx/patches-4.14/020-0009-crypto-crypto4xx-refactor-crypto4xx_copy_pkt_to_dst.patch
new file mode 100644
index 0000000000..c6ccade8c1
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0009-crypto-crypto4xx-refactor-crypto4xx_copy_pkt_to_dst.patch
@@ -0,0 +1,171 @@
+From 5c727f92ea5e019fd216f73009eee2b6e0867726 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at googlemail.com>
+Date: Fri, 25 Aug 2017 15:47:22 +0200
+Subject: [PATCH 09/25] crypto: crypto4xx - refactor
+ crypto4xx_copy_pkt_to_dst()
+
+This patch refactors the crypto4xx_copy_pkt_to_dst() to use
+scatterwalk_map_and_copy() to copy the processed data between
+the crypto engine's scatter ring buffer and the destination
+specified by the ablkcipher_request.
+
+This also makes the crypto4xx_fill_one_page() function redundant.
+
+Signed-off-by: Christian Lamparter <chunkeey at googlemail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 126 +++++++++--------------------------
+ 1 file changed, 30 insertions(+), 96 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -38,6 +38,7 @@
+ #include <crypto/aes.h>
+ #include <crypto/ctr.h>
+ #include <crypto/sha.h>
++#include <crypto/scatterwalk.h>
+ #include "crypto4xx_reg_def.h"
+ #include "crypto4xx_core.h"
+ #include "crypto4xx_sa.h"
+@@ -481,111 +482,44 @@ static inline struct ce_sd *crypto4xx_ge
+ 	return  (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
+ }
+ 
+-static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
+-				   dma_addr_t *addr, u32 *length,
+-				   u32 *idx, u32 *offset, u32 *nbytes)
+-{
+-	u32 len;
+-
+-	if (*length > dev->scatter_buffer_size) {
+-		memcpy(phys_to_virt(*addr),
+-			dev->scatter_buffer_va +
+-			*idx * dev->scatter_buffer_size + *offset,
+-			dev->scatter_buffer_size);
+-		*offset = 0;
+-		*length -= dev->scatter_buffer_size;
+-		*nbytes -= dev->scatter_buffer_size;
+-		if (*idx == PPC4XX_LAST_SD)
+-			*idx = 0;
+-		else
+-			(*idx)++;
+-		*addr = *addr +  dev->scatter_buffer_size;
+-		return 1;
+-	} else if (*length < dev->scatter_buffer_size) {
+-		memcpy(phys_to_virt(*addr),
+-			dev->scatter_buffer_va +
+-			*idx * dev->scatter_buffer_size + *offset, *length);
+-		if ((*offset + *length) == dev->scatter_buffer_size) {
+-			if (*idx == PPC4XX_LAST_SD)
+-				*idx = 0;
+-			else
+-				(*idx)++;
+-			*nbytes -= *length;
+-			*offset = 0;
+-		} else {
+-			*nbytes -= *length;
+-			*offset += *length;
+-		}
+-
+-		return 0;
+-	} else {
+-		len = (*nbytes <= dev->scatter_buffer_size) ?
+-				(*nbytes) : dev->scatter_buffer_size;
+-		memcpy(phys_to_virt(*addr),
+-			dev->scatter_buffer_va +
+-			*idx * dev->scatter_buffer_size + *offset,
+-			len);
+-		*offset = 0;
+-		*nbytes -= len;
+-
+-		if (*idx == PPC4XX_LAST_SD)
+-			*idx = 0;
+-		else
+-			(*idx)++;
+-
+-		return 0;
+-    }
+-}
+-
+ static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
+ 				      struct ce_pd *pd,
+ 				      struct pd_uinfo *pd_uinfo,
+ 				      u32 nbytes,
+ 				      struct scatterlist *dst)
+ {
+-	dma_addr_t addr;
+-	u32 this_sd;
+-	u32 offset;
+-	u32 len;
+-	u32 i;
+-	u32 sg_len;
+-	struct scatterlist *sg;
+-
+-	this_sd = pd_uinfo->first_sd;
+-	offset = 0;
+-	i = 0;
++	unsigned int first_sd = pd_uinfo->first_sd;
++	unsigned int last_sd;
++	unsigned int overflow = 0;
++	unsigned int to_copy;
++	unsigned int dst_start = 0;
++
++	/*
++	 * Because the scatter buffers are all neatly organized in one
++	 * big continuous ringbuffer; scatterwalk_map_and_copy() can
++	 * be instructed to copy a range of buffers in one go.
++	 */
++
++	last_sd = (first_sd + pd_uinfo->num_sd);
++	if (last_sd > PPC4XX_LAST_SD) {
++		last_sd = PPC4XX_LAST_SD;
++		overflow = last_sd % PPC4XX_NUM_SD;
++	}
+ 
+ 	while (nbytes) {
+-		sg = &dst[i];
+-		sg_len = sg->length;
+-		addr = dma_map_page(dev->core_dev->device, sg_page(sg),
+-				sg->offset, sg->length, DMA_TO_DEVICE);
+-
+-		if (offset == 0) {
+-			len = (nbytes <= sg->length) ? nbytes : sg->length;
+-			while (crypto4xx_fill_one_page(dev, &addr, &len,
+-				&this_sd, &offset, &nbytes))
+-				;
+-			if (!nbytes)
+-				return;
+-			i++;
+-		} else {
+-			len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
+-				nbytes : (dev->scatter_buffer_size - offset);
+-			len = (sg->length < len) ? sg->length : len;
+-			while (crypto4xx_fill_one_page(dev, &addr, &len,
+-					       &this_sd, &offset, &nbytes))
+-				;
+-			if (!nbytes)
+-				return;
+-			sg_len -= len;
+-			if (sg_len) {
+-				addr += len;
+-				while (crypto4xx_fill_one_page(dev, &addr,
+-					&sg_len, &this_sd, &offset, &nbytes))
+-					;
+-			}
+-			i++;
++		void *buf = dev->scatter_buffer_va +
++			first_sd * PPC4XX_SD_BUFFER_SIZE;
++
++		to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
++				      (1 + last_sd - first_sd));
++		scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
++		nbytes -= to_copy;
++
++		if (overflow) {
++			first_sd = 0;
++			last_sd = overflow;
++			dst_start += to_copy;
++			overflow = 0;
+ 		}
+ 	}
+ }
diff --git a/target/linux/apm821xx/patches-4.14/020-0010-crypto-crypto4xx-replace-crypto4xx_dev-s-scatter_buf.patch b/target/linux/apm821xx/patches-4.14/020-0010-crypto-crypto4xx-replace-crypto4xx_dev-s-scatter_buf.patch
new file mode 100644
index 0000000000..90a6dab2d7
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0010-crypto-crypto4xx-replace-crypto4xx_dev-s-scatter_buf.patch
@@ -0,0 +1,59 @@
+From 40e3b847bff70edc28c5290d209e531da6f9e534 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at googlemail.com>
+Date: Fri, 25 Aug 2017 15:47:23 +0200
+Subject: [PATCH 10/25] crypto: crypto4xx - replace crypto4xx_dev's
+ scatter_buffer_size with constant
+
+scatter_buffer_size is always set to PPC4XX_SD_BUFFER_SIZE.
+I don't think there's any point in keeping the variable
+around.
+
+Signed-off-by: Christian Lamparter <chunkeey at googlemail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 7 +++----
+ drivers/crypto/amcc/crypto4xx_core.h | 1 -
+ 2 files changed, 3 insertions(+), 5 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -394,10 +394,9 @@ static u32 crypto4xx_build_sdr(struct cr
+ 	if (!dev->sdr)
+ 		return -ENOMEM;
+ 
+-	dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
+ 	dev->scatter_buffer_va =
+ 		dma_alloc_coherent(dev->core_dev->device,
+-			dev->scatter_buffer_size * PPC4XX_NUM_SD,
++			PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
+ 			&dev->scatter_buffer_pa, GFP_ATOMIC);
+ 	if (!dev->scatter_buffer_va) {
+ 		dma_free_coherent(dev->core_dev->device,
+@@ -410,7 +409,7 @@ static u32 crypto4xx_build_sdr(struct cr
+ 
+ 	for (i = 0; i < PPC4XX_NUM_SD; i++) {
+ 		sd_array[i].ptr = dev->scatter_buffer_pa +
+-				  dev->scatter_buffer_size * i;
++				  PPC4XX_SD_BUFFER_SIZE * i;
+ 	}
+ 
+ 	return 0;
+@@ -425,7 +424,7 @@ static void crypto4xx_destroy_sdr(struct
+ 
+ 	if (dev->scatter_buffer_va != NULL)
+ 		dma_free_coherent(dev->core_dev->device,
+-				  dev->scatter_buffer_size * PPC4XX_NUM_SD,
++				  PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
+ 				  dev->scatter_buffer_va,
+ 				  dev->scatter_buffer_pa);
+ }
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -87,7 +87,6 @@ struct crypto4xx_device {
+ 					program ce sdr_base_register */
+ 	void *scatter_buffer_va;
+ 	dma_addr_t scatter_buffer_pa;
+-	u32 scatter_buffer_size;
+ 
+ 	void *shadow_sa_pool;		/* pool of memory for sa in pd_uinfo */
+ 	dma_addr_t shadow_sa_pool_pa;
diff --git a/target/linux/apm821xx/patches-4.14/020-0011-crypto-crypto4xx-fix-crypto4xx_build_pdr-crypto4xx_b.patch b/target/linux/apm821xx/patches-4.14/020-0011-crypto-crypto4xx-fix-crypto4xx_build_pdr-crypto4xx_b.patch
new file mode 100644
index 0000000000..837fbdc547
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0011-crypto-crypto4xx-fix-crypto4xx_build_pdr-crypto4xx_b.patch
@@ -0,0 +1,84 @@
+From 5d59ad6eea82ef8df92b4109615a0dde9d8093e9 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at googlemail.com>
+Date: Fri, 25 Aug 2017 15:47:24 +0200
+Subject: [PATCH 11/25] crypto: crypto4xx - fix crypto4xx_build_pdr,
+ crypto4xx_build_sdr leak
+
+If one of the later memory allocations in rypto4xx_build_pdr()
+fails: dev->pdr (and/or) dev->pdr_uinfo wouldn't be freed.
+
+crypto4xx_build_sdr() has the same issue with dev->sdr.
+
+Signed-off-by: Christian Lamparter <chunkeey at googlemail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -209,7 +209,7 @@ static u32 crypto4xx_build_pdr(struct cr
+ 				  dev->pdr_pa);
+ 		return -ENOMEM;
+ 	}
+-	memset(dev->pdr, 0,  sizeof(struct ce_pd) * PPC4XX_NUM_PD);
++	memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
+ 	dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
+ 				   256 * PPC4XX_NUM_PD,
+ 				   &dev->shadow_sa_pool_pa,
+@@ -242,13 +242,15 @@ static u32 crypto4xx_build_pdr(struct cr
+ 
+ static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
+ {
+-	if (dev->pdr != NULL)
++	if (dev->pdr)
+ 		dma_free_coherent(dev->core_dev->device,
+ 				  sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+ 				  dev->pdr, dev->pdr_pa);
++
+ 	if (dev->shadow_sa_pool)
+ 		dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
+ 				  dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
++
+ 	if (dev->shadow_sr_pool)
+ 		dma_free_coherent(dev->core_dev->device,
+ 			sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
+@@ -417,12 +419,12 @@ static u32 crypto4xx_build_sdr(struct cr
+ 
+ static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
+ {
+-	if (dev->sdr != NULL)
++	if (dev->sdr)
+ 		dma_free_coherent(dev->core_dev->device,
+ 				  sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+ 				  dev->sdr, dev->sdr_pa);
+ 
+-	if (dev->scatter_buffer_va != NULL)
++	if (dev->scatter_buffer_va)
+ 		dma_free_coherent(dev->core_dev->device,
+ 				  PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
+ 				  dev->scatter_buffer_va,
+@@ -1223,7 +1225,7 @@ static int crypto4xx_probe(struct platfo
+ 
+ 	rc = crypto4xx_build_gdr(core_dev->dev);
+ 	if (rc)
+-		goto err_build_gdr;
++		goto err_build_pdr;
+ 
+ 	rc = crypto4xx_build_sdr(core_dev->dev);
+ 	if (rc)
+@@ -1266,12 +1268,11 @@ err_iomap:
+ err_request_irq:
+ 	irq_dispose_mapping(core_dev->irq);
+ 	tasklet_kill(&core_dev->tasklet);
+-	crypto4xx_destroy_sdr(core_dev->dev);
+ err_build_sdr:
++	crypto4xx_destroy_sdr(core_dev->dev);
+ 	crypto4xx_destroy_gdr(core_dev->dev);
+-err_build_gdr:
+-	crypto4xx_destroy_pdr(core_dev->dev);
+ err_build_pdr:
++	crypto4xx_destroy_pdr(core_dev->dev);
+ 	kfree(core_dev->dev);
+ err_alloc_dev:
+ 	kfree(core_dev);
diff --git a/target/linux/apm821xx/patches-4.14/020-0012-crypto-crypto4xx-pointer-arithmetic-overhaul.patch b/target/linux/apm821xx/patches-4.14/020-0012-crypto-crypto4xx-pointer-arithmetic-overhaul.patch
new file mode 100644
index 0000000000..ee7ee1191e
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0012-crypto-crypto4xx-pointer-arithmetic-overhaul.patch
@@ -0,0 +1,373 @@
+From 9e0a0b3a192af20193f074ed2ad9dd85a2e48d00 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at googlemail.com>
+Date: Fri, 25 Aug 2017 15:47:25 +0200
+Subject: [PATCH 12/25] crypto: crypto4xx - pointer arithmetic overhaul
+
+This patch improves the readability of various functions,
+by replacing various void* pointers declarations with
+their respective structs *. This makes it possible to go
+for the eye-friendly array-indexing methods.
+
+Signed-off-by: Christian Lamparter <chunkeey at googlemail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_alg.c  | 26 ++++++++--------
+ drivers/crypto/amcc/crypto4xx_core.c | 60 +++++++++++++++---------------------
+ drivers/crypto/amcc/crypto4xx_core.h | 41 +++++++++++++-----------
+ 3 files changed, 59 insertions(+), 68 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -134,7 +134,7 @@ static int crypto4xx_setkey_aes(struct c
+ 		}
+ 	}
+ 	/* Setup SA */
+-	sa = (struct dynamic_sa_ctl *) ctx->sa_in;
++	sa = ctx->sa_in;
+ 
+ 	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ 				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+@@ -159,7 +159,7 @@ static int crypto4xx_setkey_aes(struct c
+ 	ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
+ 
+ 	memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+-	sa = (struct dynamic_sa_ctl *) ctx->sa_out;
++	sa = ctx->sa_out;
+ 	sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ 
+ 	return 0;
+@@ -248,8 +248,7 @@ static int crypto4xx_hash_alg_init(struc
+ 	struct crypto_alg *alg = tfm->__crt_alg;
+ 	struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+-	struct dynamic_sa_ctl *sa;
+-	struct dynamic_sa_hash160 *sa_in;
++	struct dynamic_sa_hash160 *sa;
+ 	int rc;
+ 
+ 	ctx->dev   = my_alg->dev;
+@@ -273,25 +272,24 @@ static int crypto4xx_hash_alg_init(struc
+ 
+ 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ 				 sizeof(struct crypto4xx_ctx));
+-	sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+-	set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
++	sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
++	set_dynamic_sa_command_0(&sa->ctrl, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ 				 SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
+ 				 SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
+ 				 SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ 				 SA_OPCODE_HASH, DIR_INBOUND);
+-	set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
++	set_dynamic_sa_command_1(&sa->ctrl, 0, SA_HASH_MODE_HASH,
+ 				 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ 				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ 				 SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ 				 SA_NOT_COPY_HDR);
+ 	ctx->direction = DIR_INBOUND;
+-	sa->sa_contents.w = SA_HASH160_CONTENTS;
+-	sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
+ 	/* Need to zero hash digest in SA */
+-	memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
+-	memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
+-	sa_in->state_ptr = ctx->state_record_dma_addr;
+-	ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
++	memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
++	memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
++	sa->state_ptr = ctx->state_record_dma_addr;
++	ctx->offset_to_sr_ptr =
++		get_dynamic_sa_offset_state_ptr_field(&sa->ctrl);
+ 
+ 	return 0;
+ }
+@@ -302,7 +300,7 @@ int crypto4xx_hash_init(struct ahash_req
+ 	int ds;
+ 	struct dynamic_sa_ctl *sa;
+ 
+-	sa = (struct dynamic_sa_ctl *) ctx->sa_in;
++	sa = ctx->sa_in;
+ 	ds = crypto_ahash_digestsize(
+ 			__crypto_ahash_cast(req->base.tfm));
+ 	sa->sa_command_0.bf.digest_len = ds >> 2;
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -211,7 +211,7 @@ static u32 crypto4xx_build_pdr(struct cr
+ 	}
+ 	memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
+ 	dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
+-				   256 * PPC4XX_NUM_PD,
++				   sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
+ 				   &dev->shadow_sa_pool_pa,
+ 				   GFP_ATOMIC);
+ 	if (!dev->shadow_sa_pool)
+@@ -223,16 +223,14 @@ static u32 crypto4xx_build_pdr(struct cr
+ 	if (!dev->shadow_sr_pool)
+ 		return -ENOMEM;
+ 	for (i = 0; i < PPC4XX_NUM_PD; i++) {
+-		pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
+-						sizeof(struct pd_uinfo) * i);
++		pd_uinfo = &dev->pdr_uinfo[i];
+ 
+ 		/* alloc 256 bytes which is enough for any kind of dynamic sa */
+-		pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
++		pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
+ 		pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
+ 
+ 		/* alloc state record */
+-		pd_uinfo->sr_va = dev->shadow_sr_pool +
+-		    sizeof(struct sa_state_record) * i;
++		pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
+ 		pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
+ 		    sizeof(struct sa_state_record) * i;
+ 	}
+@@ -248,8 +246,9 @@ static void crypto4xx_destroy_pdr(struct
+ 				  dev->pdr, dev->pdr_pa);
+ 
+ 	if (dev->shadow_sa_pool)
+-		dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
+-				  dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
++		dma_free_coherent(dev->core_dev->device,
++			sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
++			dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+ 
+ 	if (dev->shadow_sr_pool)
+ 		dma_free_coherent(dev->core_dev->device,
+@@ -277,11 +276,9 @@ static u32 crypto4xx_get_pd_from_pdr_nol
+ 
+ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
+ {
+-	struct pd_uinfo *pd_uinfo;
++	struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+ 	unsigned long flags;
+ 
+-	pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
+-				       sizeof(struct pd_uinfo) * idx);
+ 	spin_lock_irqsave(&dev->core_dev->lock, flags);
+ 	if (dev->pdr_tail != PPC4XX_LAST_PD)
+ 		dev->pdr_tail++;
+@@ -298,7 +295,7 @@ static struct ce_pd *crypto4xx_get_pdp(s
+ {
+ 	*pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
+ 
+-	return dev->pdr + sizeof(struct ce_pd) * idx;
++	return &dev->pdr[idx];
+ }
+ 
+ /**
+@@ -376,7 +373,7 @@ static inline struct ce_gd *crypto4xx_ge
+ {
+ 	*gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
+ 
+-	return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
++	return &dev->gdr[idx];
+ }
+ 
+ /**
+@@ -387,7 +384,6 @@ static inline struct ce_gd *crypto4xx_ge
+ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
+ {
+ 	int i;
+-	struct ce_sd *sd_array;
+ 
+ 	/* alloc memory for scatter descriptor ring */
+ 	dev->sdr = dma_alloc_coherent(dev->core_dev->device,
+@@ -407,10 +403,8 @@ static u32 crypto4xx_build_sdr(struct cr
+ 		return -ENOMEM;
+ 	}
+ 
+-	sd_array = dev->sdr;
+-
+ 	for (i = 0; i < PPC4XX_NUM_SD; i++) {
+-		sd_array[i].ptr = dev->scatter_buffer_pa +
++		dev->sdr[i].ptr = dev->scatter_buffer_pa +
+ 				  PPC4XX_SD_BUFFER_SIZE * i;
+ 	}
+ 
+@@ -480,7 +474,7 @@ static inline struct ce_sd *crypto4xx_ge
+ {
+ 	*sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
+ 
+-	return  (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
++	return &dev->sdr[idx];
+ }
+ 
+ static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
+@@ -529,11 +523,10 @@ static u32 crypto4xx_copy_digest_to_dst(
+ 					struct crypto4xx_ctx *ctx)
+ {
+ 	struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+-	struct sa_state_record *state_record =
+-				(struct sa_state_record *) pd_uinfo->sr_va;
+ 
+ 	if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
+-		memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
++		memcpy((void *) pd_uinfo->dest_va,
++		       pd_uinfo->sr_va->save_digest,
+ 		       SA_HASH_ALG_SHA1_DIGEST_SIZE);
+ 	}
+ 
+@@ -607,11 +600,9 @@ static u32 crypto4xx_ahash_done(struct c
+ 
+ static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
+ {
+-	struct ce_pd *pd;
+-	struct pd_uinfo *pd_uinfo;
++	struct ce_pd *pd = &dev->pdr[idx];
++	struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+ 
+-	pd =  dev->pdr + sizeof(struct ce_pd)*idx;
+-	pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
+ 	if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
+ 			CRYPTO_ALG_TYPE_ABLKCIPHER)
+ 		return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
+@@ -712,7 +703,6 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 	unsigned long flags;
+ 	struct pd_uinfo *pd_uinfo = NULL;
+ 	unsigned int nbytes = datalen, idx;
+-	unsigned int ivlen = 0;
+ 	u32 gd_idx = 0;
+ 
+ 	/* figure how many gd is needed */
+@@ -771,17 +761,15 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 	}
+ 	spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ 
+-	pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
+-				       sizeof(struct pd_uinfo) * pd_entry);
++	pd_uinfo = &dev->pdr_uinfo[pd_entry];
+ 	pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
+ 	pd_uinfo->async_req = req;
+ 	pd_uinfo->num_gd = num_gd;
+ 	pd_uinfo->num_sd = num_sd;
+ 
+ 	if (iv_len || ctx->is_hash) {
+-		ivlen = iv_len;
+ 		pd->sa = pd_uinfo->sa_pa;
+-		sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
++		sa = pd_uinfo->sa_va;
+ 		if (ctx->direction == DIR_INBOUND)
+ 			memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
+ 		else
+@@ -791,14 +779,15 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 			&pd_uinfo->sr_pa, 4);
+ 
+ 		if (iv_len)
+-			crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
++			crypto4xx_memcpy_le(pd_uinfo->sr_va->save_iv,
++					    iv, iv_len);
+ 	} else {
+ 		if (ctx->direction == DIR_INBOUND) {
+ 			pd->sa = ctx->sa_in_dma_addr;
+-			sa = (struct dynamic_sa_ctl *) ctx->sa_in;
++			sa = ctx->sa_in;
+ 		} else {
+ 			pd->sa = ctx->sa_out_dma_addr;
+-			sa = (struct dynamic_sa_ctl *) ctx->sa_out;
++			sa = ctx->sa_out;
+ 		}
+ 	}
+ 	pd->sa_len = ctx->sa_len;
+@@ -1006,9 +995,8 @@ static void crypto4xx_bh_tasklet_cb(unsi
+ 
+ 	while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
+ 		tail = core_dev->dev->pdr_tail;
+-		pd_uinfo = core_dev->dev->pdr_uinfo +
+-			sizeof(struct pd_uinfo)*tail;
+-		pd =  core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
++		pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
++		pd = &core_dev->dev->pdr[tail];
+ 		if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
+ 				   pd->pd_ctl.bf.pe_done &&
+ 				   !pd->pd_ctl.bf.host_ready) {
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -23,6 +23,8 @@
+ #define __CRYPTO4XX_CORE_H__
+ 
+ #include <crypto/internal/hash.h>
++#include "crypto4xx_reg_def.h"
++#include "crypto4xx_sa.h"
+ 
+ #define MODULE_NAME "crypto4xx"
+ 
+@@ -48,6 +50,13 @@
+ 
+ struct crypto4xx_device;
+ 
++union shadow_sa_buf {
++	struct dynamic_sa_ctl sa;
++
++	/* alloc 256 bytes which is enough for any kind of dynamic sa */
++	u8 buf[256];
++} __packed;
++
+ struct pd_uinfo {
+ 	struct crypto4xx_device *dev;
+ 	u32   state;
+@@ -60,9 +69,9 @@ struct pd_uinfo {
+ 				used by this packet */
+ 	u32 num_sd;		/* number of scatter discriptors
+ 				used by this packet */
+-	void *sa_va;		/* shadow sa, when using cp from ctx->sa */
++	struct dynamic_sa_ctl *sa_va;	/* shadow sa */
+ 	u32 sa_pa;
+-	void *sr_va;		/* state record for shadow sa */
++	struct sa_state_record *sr_va;	/* state record for shadow sa */
+ 	u32 sr_pa;
+ 	struct scatterlist *dest_va;
+ 	struct crypto_async_request *async_req; 	/* base crypto request
+@@ -75,22 +84,18 @@ struct crypto4xx_device {
+ 	void __iomem *ce_base;
+ 	void __iomem *trng_base;
+ 
+-	void *pdr;			/* base address of packet
+-					descriptor ring */
+-	dma_addr_t pdr_pa;		/* physical address used to
+-					program ce pdr_base_register */
+-	void *gdr;                      /* gather descriptor ring */
+-	dma_addr_t gdr_pa;		/* physical address used to
+-					program ce gdr_base_register */
+-	void *sdr;			/* scatter descriptor ring */
+-	dma_addr_t sdr_pa;		/* physical address used to
+-					program ce sdr_base_register */
++	struct ce_pd *pdr;	/* base address of packet descriptor ring */
++	dma_addr_t pdr_pa;	/* physical address of pdr_base_register */
++	struct ce_gd *gdr;	/* gather descriptor ring */
++	dma_addr_t gdr_pa;	/* physical address of gdr_base_register */
++	struct ce_sd *sdr;	/* scatter descriptor ring */
++	dma_addr_t sdr_pa;	/* physical address of sdr_base_register */
+ 	void *scatter_buffer_va;
+ 	dma_addr_t scatter_buffer_pa;
+ 
+-	void *shadow_sa_pool;		/* pool of memory for sa in pd_uinfo */
++	union shadow_sa_buf *shadow_sa_pool;
+ 	dma_addr_t shadow_sa_pool_pa;
+-	void *shadow_sr_pool;		/* pool of memory for sr in pd_uinfo */
++	struct sa_state_record *shadow_sr_pool;
+ 	dma_addr_t shadow_sr_pool_pa;
+ 	u32 pdr_tail;
+ 	u32 pdr_head;
+@@ -98,7 +103,7 @@ struct crypto4xx_device {
+ 	u32 gdr_head;
+ 	u32 sdr_tail;
+ 	u32 sdr_head;
+-	void *pdr_uinfo;
++	struct pd_uinfo *pdr_uinfo;
+ 	struct list_head alg_list;	/* List of algorithm supported
+ 					by this device */
+ };
+@@ -116,11 +121,11 @@ struct crypto4xx_core_device {
+ 
+ struct crypto4xx_ctx {
+ 	struct crypto4xx_device *dev;
+-	void *sa_in;
++	struct dynamic_sa_ctl *sa_in;
+ 	dma_addr_t sa_in_dma_addr;
+-	void *sa_out;
++	struct dynamic_sa_ctl *sa_out;
+ 	dma_addr_t sa_out_dma_addr;
+-	void *state_record;
++	struct sa_state_record *state_record;
+ 	dma_addr_t state_record_dma_addr;
+ 	u32 sa_len;
+ 	u32 offset_to_sr_ptr;           /* offset to state ptr, in dynamic sa */
diff --git a/target/linux/apm821xx/patches-4.14/020-0013-crypto-crypto4xx-wire-up-hmac_mc-to-hmac_muting.patch b/target/linux/apm821xx/patches-4.14/020-0013-crypto-crypto4xx-wire-up-hmac_mc-to-hmac_muting.patch
new file mode 100644
index 0000000000..9ddfcb95f1
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0013-crypto-crypto4xx-wire-up-hmac_mc-to-hmac_muting.patch
@@ -0,0 +1,25 @@
+From 5a4326d3a03f03c2518a2c255be33a7114af3230 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Wed, 4 Oct 2017 01:00:05 +0200
+Subject: [PATCH 13/25] crypto: crypto4xx - wire up hmac_mc to hmac_muting
+
+The hmac_mc parameter of set_dynamic_sa_command_1()
+was defined but not used. On closer inspection it
+turns out, it was never wired up.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_alg.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -63,6 +63,7 @@ static void set_dynamic_sa_command_1(str
+ 	sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
+ 	sa->sa_command_1.bf.feedback_mode = cfb,
+ 	sa->sa_command_1.bf.sa_rev = 1;
++	sa->sa_command_1.bf.hmac_muting = hmac_mc;
+ 	sa->sa_command_1.bf.extended_seq_num = esn;
+ 	sa->sa_command_1.bf.seq_num_mask = sn_mask;
+ 	sa->sa_command_1.bf.mutable_bit_proc = mute;
diff --git a/target/linux/apm821xx/patches-4.14/020-0014-crypto-crypto4xx-fix-off-by-one-AES-OFB.patch b/target/linux/apm821xx/patches-4.14/020-0014-crypto-crypto4xx-fix-off-by-one-AES-OFB.patch
new file mode 100644
index 0000000000..acded24bbb
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0014-crypto-crypto4xx-fix-off-by-one-AES-OFB.patch
@@ -0,0 +1,49 @@
+From e9b8e4e1129d0886094cfe013cdbaafc4ce0de76 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Wed, 4 Oct 2017 01:00:06 +0200
+Subject: [PATCH 14/25] crypto: crypto4xx - fix off-by-one AES-OFB
+
+I used aes-cbc as a template for ofb. But sadly I forgot
+to update set_key method to crypto4xx_setkey_aes_ofb().
+
+this was caught by the testmgr:
+alg: skcipher: Test 1 failed (invalid result) on encr. for ofb-aes-ppc4xx
+00000000: 76 49 ab ac 81 19 b2 46 ce e9 8e 9b 12 e9 19 7d
+00000010: 50 86 cb 9b 50 72 19 ee 95 db 11 3a 91 76 78 b2
+00000020: 73 be d6 b8 e3 c1 74 3b 71 16 e6 9e 22 22 95 16
+00000030: 3f f1 ca a1 68 1f ac 09 12 0e ca 30 75 86 e1 a7
+
+With the correct set_key method, the aes-ofb cipher passes the test.
+
+name         : ofb(aes)
+driver       : ofb-aes-ppc4xx
+module       : crypto4xx
+priority     : 300
+refcnt       : 1
+selftest     : passed
+internal     : no
+type         : ablkcipher
+async        : yes
+blocksize    : 16
+min keysize  : 16
+max keysize  : 32
+ivsize       : 16
+geniv        : <default>
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -1148,7 +1148,7 @@ struct crypto4xx_alg_common crypto4xx_al
+ 				.min_keysize	= AES_MIN_KEY_SIZE,
+ 				.max_keysize	= AES_MAX_KEY_SIZE,
+ 				.ivsize		= AES_IV_SIZE,
+-				.setkey		= crypto4xx_setkey_aes_cbc,
++				.setkey		= crypto4xx_setkey_aes_ofb,
+ 				.encrypt	= crypto4xx_encrypt,
+ 				.decrypt	= crypto4xx_decrypt,
+ 			}
diff --git a/target/linux/apm821xx/patches-4.14/020-0015-crypto-crypto4xx-fix-type-mismatch-compiler-error.patch b/target/linux/apm821xx/patches-4.14/020-0015-crypto-crypto4xx-fix-type-mismatch-compiler-error.patch
new file mode 100644
index 0000000000..f0f1d0ea5a
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0015-crypto-crypto4xx-fix-type-mismatch-compiler-error.patch
@@ -0,0 +1,29 @@
+From 333eb3edda3842f3e5dbd723cb18bbe47eb0508b Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Wed, 4 Oct 2017 01:00:07 +0200
+Subject: [PATCH 15/25] crypto: crypto4xx - fix type mismatch compiler error
+
+This patch fixes a type mismatch error that I accidentally
+introduced when I moved and refactored the dynamic_contents
+helpers.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_sa.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_sa.h
++++ b/drivers/crypto/amcc/crypto4xx_sa.h
+@@ -266,9 +266,9 @@ get_dynamic_sa_offset_state_ptr_field(st
+ 	return sizeof(struct dynamic_sa_ctl) + offset * 4;
+ }
+ 
+-static inline u8 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
++static inline u32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
+ {
+-	return (u8 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
++	return (u32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
+ }
+ 
+ #endif
diff --git a/target/linux/apm821xx/patches-4.14/020-0017-crypto-crypto4xx-add-backlog-queue-support.patch b/target/linux/apm821xx/patches-4.14/020-0017-crypto-crypto4xx-add-backlog-queue-support.patch
new file mode 100644
index 0000000000..046dd9c257
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0017-crypto-crypto4xx-add-backlog-queue-support.patch
@@ -0,0 +1,161 @@
+From 8ef8d195430ca3542d0434cf25e5115484b9fa32 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Wed, 4 Oct 2017 01:00:09 +0200
+Subject: [PATCH 17/25] crypto: crypto4xx - add backlog queue support
+
+Previously, If the crypto4xx driver used all available
+security contexts, it would simply refuse new requests
+with -EAGAIN. CRYPTO_TFM_REQ_MAY_BACKLOG was ignored.
+
+in case of dm-crypt.c's crypt_convert() function this was
+causing the following errors to manifest, if the system was
+pushed hard enough:
+
+| EXT4-fs warning (dm-1): ext4_end_bio:314: I/O error -5 writing to ino ..
+| EXT4-fs warning (dm-1): ext4_end_bio:314: I/O error -5 writing to ino ..
+| EXT4-fs warning (dm-1): ext4_end_bio:314: I/O error -5 writing to ino ..
+| JBD2: Detected IO errors while flushing file data on dm-1-8
+| Aborting journal on device dm-1-8.
+| EXT4-fs error : ext4_journal_check_start:56: Detected aborted journal
+| EXT4-fs (dm-1): Remounting filesystem read-only
+| EXT4-fs : ext4_writepages: jbd2_start: 2048 pages, inode 498...; err -30
+
+(This did cause corruptions due to failed writes)
+
+To fix this mess, the crypto4xx driver needs to notifiy the
+user to slow down. This can be achieved by returning -EBUSY
+on requests, once the crypto hardware was falling behind.
+
+Note: -EBUSY has two different meanings. Setting the flag
+CRYPTO_TFM_REQ_MAY_BACKLOG implies that the request was
+successfully queued, by the crypto driver. To achieve this
+requirement, the implementation introduces a threshold check and
+adds logic to the completion routines in much the same way as
+AMD's Cryptographic Coprocessor (CCP) driver do.
+
+Note2: Tests showed that dm-crypt starved ipsec traffic.
+Under load, ipsec links dropped to 0 Kbits/s. This is because
+dm-crypt's callback would instantly queue the next request.
+In order to not starve ipsec, the driver reserves a small
+portion of the available crypto contexts for this purpose.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 47 ++++++++++++++++++++++++++++++------
+ drivers/crypto/amcc/crypto4xx_core.h |  3 ++-
+ 2 files changed, 41 insertions(+), 9 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -39,6 +39,7 @@
+ #include <crypto/ctr.h>
+ #include <crypto/sha.h>
+ #include <crypto/scatterwalk.h>
++#include <crypto/internal/skcipher.h>
+ #include "crypto4xx_reg_def.h"
+ #include "crypto4xx_core.h"
+ #include "crypto4xx_sa.h"
+@@ -573,8 +574,10 @@ static u32 crypto4xx_ablkcipher_done(str
+ 				    dst->offset, dst->length, DMA_FROM_DEVICE);
+ 	}
+ 	crypto4xx_ret_sg_desc(dev, pd_uinfo);
+-	if (ablk_req->base.complete != NULL)
+-		ablk_req->base.complete(&ablk_req->base, 0);
++
++	if (pd_uinfo->state & PD_ENTRY_BUSY)
++		ablkcipher_request_complete(ablk_req, -EINPROGRESS);
++	ablkcipher_request_complete(ablk_req, 0);
+ 
+ 	return 0;
+ }
+@@ -591,9 +594,10 @@ static u32 crypto4xx_ahash_done(struct c
+ 	crypto4xx_copy_digest_to_dst(pd_uinfo,
+ 				     crypto_tfm_ctx(ahash_req->base.tfm));
+ 	crypto4xx_ret_sg_desc(dev, pd_uinfo);
+-	/* call user provided callback function x */
+-	if (ahash_req->base.complete != NULL)
+-		ahash_req->base.complete(&ahash_req->base, 0);
++
++	if (pd_uinfo->state & PD_ENTRY_BUSY)
++		ahash_request_complete(ahash_req, -EINPROGRESS);
++	ahash_request_complete(ahash_req, 0);
+ 
+ 	return 0;
+ }
+@@ -704,6 +708,7 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 	struct pd_uinfo *pd_uinfo = NULL;
+ 	unsigned int nbytes = datalen, idx;
+ 	u32 gd_idx = 0;
++	bool is_busy;
+ 
+ 	/* figure how many gd is needed */
+ 	num_gd = sg_nents_for_len(src, datalen);
+@@ -734,6 +739,31 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 	 * already got must be return the original place.
+ 	 */
+ 	spin_lock_irqsave(&dev->core_dev->lock, flags);
++	/*
++	 * Let the caller know to slow down, once more than 13/16ths = 81%
++	 * of the available data contexts are being used simultaneously.
++	 *
++	 * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
++	 * 31 more contexts. Before new requests have to be rejected.
++	 */
++	if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
++		is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
++			((PPC4XX_NUM_PD * 13) / 16);
++	} else {
++		/*
++		 * To fix contention issues between ipsec (no blacklog) and
++		 * dm-crypto (backlog) reserve 32 entries for "no backlog"
++		 * data contexts.
++		 */
++		is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
++			((PPC4XX_NUM_PD * 15) / 16);
++
++		if (is_busy) {
++			spin_unlock_irqrestore(&dev->core_dev->lock, flags);
++			return -EBUSY;
++		}
++	}
++
+ 	if (num_gd) {
+ 		fst_gd = crypto4xx_get_n_gd(dev, num_gd);
+ 		if (fst_gd == ERING_WAS_FULL) {
+@@ -888,11 +918,12 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 	sa->sa_command_1.bf.hash_crypto_offset = 0;
+ 	pd->pd_ctl.w = ctx->pd_ctl;
+ 	pd->pd_ctl_len.w = 0x00400000 | datalen;
+-	pd_uinfo->state = PD_ENTRY_INUSE;
++	pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
++
+ 	wmb();
+ 	/* write any value to push engine to read a pd */
+ 	writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+-	return -EINPROGRESS;
++	return is_busy ? -EBUSY : -EINPROGRESS;
+ }
+ 
+ /**
+@@ -997,7 +1028,7 @@ static void crypto4xx_bh_tasklet_cb(unsi
+ 		tail = core_dev->dev->pdr_tail;
+ 		pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
+ 		pd = &core_dev->dev->pdr[tail];
+-		if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
++		if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
+ 				   pd->pd_ctl.bf.pe_done &&
+ 				   !pd->pd_ctl.bf.host_ready) {
+ 			pd->pd_ctl.bf.pe_done = 0;
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -44,7 +44,8 @@
+ #define PPC4XX_LAST_SD				(PPC4XX_NUM_SD - 1)
+ #define PPC4XX_SD_BUFFER_SIZE			2048
+ 
+-#define PD_ENTRY_INUSE				1
++#define PD_ENTRY_BUSY				BIT(1)
++#define PD_ENTRY_INUSE				BIT(0)
+ #define PD_ENTRY_FREE				0
+ #define ERING_WAS_FULL				0xffffffff
+ 
diff --git a/target/linux/apm821xx/patches-4.14/020-0018-crypto-crypto4xx-use-the-correct-LE32-format-for-IV-.patch b/target/linux/apm821xx/patches-4.14/020-0018-crypto-crypto4xx-use-the-correct-LE32-format-for-IV-.patch
new file mode 100644
index 0000000000..22d2a317f5
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0018-crypto-crypto4xx-use-the-correct-LE32-format-for-IV-.patch
@@ -0,0 +1,236 @@
+From 4865b122d4aff5151c88d2f7442d5a87f7e795ae Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Wed, 4 Oct 2017 01:00:10 +0200
+Subject: [PATCH 18/25] crypto: crypto4xx - use the correct LE32 format for IV
+ and key defs
+
+The hardware expects that the keys, IVs (and inner/outer hashes)
+are in the le32 format.
+
+This patch changes all hardware interface declarations to use
+the correct LE32 data format for each field.
+
+In order to pass __CHECK_ENDIAN__ checks, crypto4xx_memcpy_le
+has to be honest about the endianness of its parameters.
+The function was split and moved to the common crypto4xx_core.h
+header. This allows the compiler to generate better code if the
+sizes/len is a constant (various *_IV_LEN).
+
+Please note that the hardware isn't consistent with the endiannes
+of the save_digest field in the state record struct though.
+The hashes produced by GHASH and CBC (for CCM) will be in LE32.
+Whereas md5 and sha{1/,256,...} do not need any conversion.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_alg.c  |  4 +--
+ drivers/crypto/amcc/crypto4xx_core.c | 40 ++----------------------------
+ drivers/crypto/amcc/crypto4xx_core.h | 47 +++++++++++++++++++++++++++++++++---
+ drivers/crypto/amcc/crypto4xx_sa.h   | 29 ++++++++++++----------
+ 4 files changed, 64 insertions(+), 56 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -149,8 +149,8 @@ static int crypto4xx_setkey_aes(struct c
+ 				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ 				 SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ 				 SA_NOT_COPY_HDR);
+-	crypto4xx_memcpy_le(get_dynamic_sa_key_field(sa),
+-			    key, keylen);
++	crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
++				 key, keylen);
+ 	sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
+ 	sa->sa_command_1.bf.key_len = keylen >> 3;
+ 	ctx->is_hash = 0;
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -614,42 +614,6 @@ static u32 crypto4xx_pd_done(struct cryp
+ 		return crypto4xx_ahash_done(dev, pd_uinfo);
+ }
+ 
+-/**
+- * Note: Only use this function to copy items that is word aligned.
+- */
+-void crypto4xx_memcpy_le(unsigned int *dst,
+-			 const unsigned char *buf,
+-			 int len)
+-{
+-	u8 *tmp;
+-	for (; len >= 4; buf += 4, len -= 4)
+-		*dst++ = cpu_to_le32(*(unsigned int *) buf);
+-
+-	tmp = (u8 *)dst;
+-	switch (len) {
+-	case 3:
+-		*tmp++ = 0;
+-		*tmp++ = *(buf+2);
+-		*tmp++ = *(buf+1);
+-		*tmp++ = *buf;
+-		break;
+-	case 2:
+-		*tmp++ = 0;
+-		*tmp++ = 0;
+-		*tmp++ = *(buf+1);
+-		*tmp++ = *buf;
+-		break;
+-	case 1:
+-		*tmp++ = 0;
+-		*tmp++ = 0;
+-		*tmp++ = 0;
+-		*tmp++ = *buf;
+-		break;
+-	default:
+-		break;
+-	}
+-}
+-
+ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
+ {
+ 	crypto4xx_destroy_pdr(core_dev->dev);
+@@ -809,8 +773,8 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 			&pd_uinfo->sr_pa, 4);
+ 
+ 		if (iv_len)
+-			crypto4xx_memcpy_le(pd_uinfo->sr_va->save_iv,
+-					    iv, iv_len);
++			crypto4xx_memcpy_to_le32(pd_uinfo->sr_va->save_iv,
++						 iv, iv_len);
+ 	} else {
+ 		if (ctx->direction == DIR_INBOUND) {
+ 			pd->sa = ctx->sa_in_dma_addr;
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -166,9 +166,7 @@ int crypto4xx_alloc_sa(struct crypto4xx_
+ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+ void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+ u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
+-void crypto4xx_memcpy_le(unsigned int *dst,
+-			 const unsigned char *buf, int len);
+-u32 crypto4xx_build_pd(struct crypto_async_request *req,
++int crypto4xx_build_pd(struct crypto_async_request *req,
+ 		       struct crypto4xx_ctx *ctx,
+ 		       struct scatterlist *src,
+ 		       struct scatterlist *dst,
+@@ -193,4 +191,47 @@ int crypto4xx_hash_digest(struct ahash_r
+ int crypto4xx_hash_final(struct ahash_request *req);
+ int crypto4xx_hash_update(struct ahash_request *req);
+ int crypto4xx_hash_init(struct ahash_request *req);
++
++/**
++ * Note: Only use this function to copy items that is word aligned.
++ */
++static inline void crypto4xx_memcpy_swab32(u32 *dst, const void *buf,
++					   size_t len)
++{
++	for (; len >= 4; buf += 4, len -= 4)
++		*dst++ = __swab32p((u32 *) buf);
++
++	if (len) {
++		const u8 *tmp = (u8 *)buf;
++
++		switch (len) {
++		case 3:
++			*dst = (tmp[2] << 16) |
++			       (tmp[1] << 8) |
++			       tmp[0];
++			break;
++		case 2:
++			*dst = (tmp[1] << 8) |
++			       tmp[0];
++			break;
++		case 1:
++			*dst = tmp[0];
++			break;
++		default:
++			break;
++		}
++	}
++}
++
++static inline void crypto4xx_memcpy_from_le32(u32 *dst, const void *buf,
++					      size_t len)
++{
++	crypto4xx_memcpy_swab32(dst, buf, len);
++}
++
++static inline void crypto4xx_memcpy_to_le32(__le32 *dst, const void *buf,
++					    size_t len)
++{
++	crypto4xx_memcpy_swab32((u32 *)dst, buf, len);
++}
+ #endif
+--- a/drivers/crypto/amcc/crypto4xx_sa.h
++++ b/drivers/crypto/amcc/crypto4xx_sa.h
+@@ -181,9 +181,12 @@ struct dynamic_sa_ctl {
+  * State Record for Security Association (SA)
+  */
+ struct  sa_state_record {
+-	u32 save_iv[4];
+-	u32 save_hash_byte_cnt[2];
+-	u32 save_digest[16];
++	__le32 save_iv[4];
++	__le32 save_hash_byte_cnt[2];
++	union {
++		u32 save_digest[16]; /* for MD5/SHA */
++		__le32 save_digest_le32[16]; /* GHASH / CBC */
++	};
+ } __attribute__((packed));
+ 
+ /**
+@@ -192,8 +195,8 @@ struct  sa_state_record {
+  */
+ struct dynamic_sa_aes128 {
+ 	struct dynamic_sa_ctl	ctrl;
+-	u32 key[4];
+-	u32 iv[4]; /* for CBC, OFC, and CFB mode */
++	__le32 key[4];
++	__le32 iv[4]; /* for CBC, OFC, and CFB mode */
+ 	u32 state_ptr;
+ 	u32 reserved;
+ } __attribute__((packed));
+@@ -206,8 +209,8 @@ struct dynamic_sa_aes128 {
+  */
+ struct dynamic_sa_aes192 {
+ 	struct dynamic_sa_ctl ctrl;
+-	u32 key[6];
+-	u32 iv[4]; /* for CBC, OFC, and CFB mode */
++	__le32 key[6];
++	__le32 iv[4]; /* for CBC, OFC, and CFB mode */
+ 	u32 state_ptr;
+ 	u32 reserved;
+ } __attribute__((packed));
+@@ -220,8 +223,8 @@ struct dynamic_sa_aes192 {
+  */
+ struct dynamic_sa_aes256 {
+ 	struct dynamic_sa_ctl ctrl;
+-	u32 key[8];
+-	u32 iv[4]; /* for CBC, OFC, and CFB mode */
++	__le32 key[8];
++	__le32 iv[4]; /* for CBC, OFC, and CFB mode */
+ 	u32 state_ptr;
+ 	u32 reserved;
+ } __attribute__((packed));
+@@ -235,8 +238,8 @@ struct dynamic_sa_aes256 {
+  */
+ struct dynamic_sa_hash160 {
+ 	struct dynamic_sa_ctl ctrl;
+-	u32 inner_digest[5];
+-	u32 outer_digest[5];
++	__le32 inner_digest[5];
++	__le32 outer_digest[5];
+ 	u32 state_ptr;
+ 	u32 reserved;
+ } __attribute__((packed));
+@@ -266,9 +269,9 @@ get_dynamic_sa_offset_state_ptr_field(st
+ 	return sizeof(struct dynamic_sa_ctl) + offset * 4;
+ }
+ 
+-static inline u32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
++static inline __le32 *get_dynamic_sa_key_field(struct dynamic_sa_ctl *cts)
+ {
+-	return (u32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
++	return (__le32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
+ }
+ 
+ #endif
diff --git a/target/linux/apm821xx/patches-4.14/020-0019-crypto-crypto4xx-overhaul-crypto4xx_build_pd.patch b/target/linux/apm821xx/patches-4.14/020-0019-crypto-crypto4xx-overhaul-crypto4xx_build_pd.patch
new file mode 100644
index 0000000000..7b739c68cd
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0019-crypto-crypto4xx-overhaul-crypto4xx_build_pd.patch
@@ -0,0 +1,535 @@
+From cd4dcd6da7a2610e0562a6e130bb68cc544a8fb1 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Wed, 4 Oct 2017 01:00:11 +0200
+Subject: [PATCH 19/25] crypto: crypto4xx - overhaul crypto4xx_build_pd()
+
+This patch overhauls and fixes code related to crypto4xx_build_pd()
+
+ * crypto4xx_build_pd() did not handle chained source scatterlist.
+   This is fixed by replacing the buggy indexed-access of &src[idx]
+   with sg_next() in the gather array setup loop.
+
+ * The redundant is_hash, direction, save_iv and pd_ctl members
+   in the crypto4xx_ctx struct have been removed.
+    - is_hash can be derived from the crypto_async_request parameter.
+    - direction is already part of the security association's
+      bf.dir bitfield.
+    - save_iv is unused.
+    - pd_ctl always had the host_ready bit enabled anyway.
+      (the hash_final case is rather pointless, since the ahash
+       code has been deactivated).
+
+ * make crypto4xx_build_pd()'s caller responsible for converting
+   the IV to the LE32 format.
+
+ * change crypto4xx_ahash_update() and crypto4xx_ahash_digest() to
+   initialize a temporary destination scatterlist. This allows the
+   removal of an ugly cast of req->result (which is a pointer to an
+   u8-array) to a scatterlist pointer.
+
+ * change crypto4xx_build_pd() return type to int. After all
+   it returns -EINPROGRESS/-EBUSY.
+
+ * fix crypto4xx_build_pd() thread-unsafe sa handling.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_alg.c  |  87 +++++++++++-------------
+ drivers/crypto/amcc/crypto4xx_core.c | 128 ++++++++++++++++-------------------
+ drivers/crypto/amcc/crypto4xx_core.h |  12 ++--
+ 3 files changed, 103 insertions(+), 124 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -75,27 +75,29 @@ static void set_dynamic_sa_command_1(str
+ int crypto4xx_encrypt(struct ablkcipher_request *req)
+ {
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
++	unsigned int ivlen = crypto_ablkcipher_ivsize(
++		crypto_ablkcipher_reqtfm(req));
++	__le32 iv[ivlen];
+ 
+-	ctx->direction = DIR_OUTBOUND;
+-	ctx->is_hash = 0;
+-	ctx->pd_ctl = 0x1;
++	if (ivlen)
++		crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
+ 
+ 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+-		req->nbytes, req->info,
+-		crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)));
++		req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len);
+ }
+ 
+ int crypto4xx_decrypt(struct ablkcipher_request *req)
+ {
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
++	unsigned int ivlen = crypto_ablkcipher_ivsize(
++		crypto_ablkcipher_reqtfm(req));
++	__le32 iv[ivlen];
+ 
+-	ctx->direction = DIR_INBOUND;
+-	ctx->is_hash = 0;
+-	ctx->pd_ctl = 1;
++	if (ivlen)
++		crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
+ 
+ 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+-		req->nbytes, req->info,
+-		crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)));
++		req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len);
+ }
+ 
+ /**
+@@ -153,11 +155,6 @@ static int crypto4xx_setkey_aes(struct c
+ 				 key, keylen);
+ 	sa->sa_contents.w = SA_AES_CONTENTS | (keylen << 2);
+ 	sa->sa_command_1.bf.key_len = keylen >> 3;
+-	ctx->is_hash = 0;
+-	ctx->direction = DIR_INBOUND;
+-	memcpy(sa + get_dynamic_sa_offset_state_ptr_field(sa),
+-	       (void *)&ctx->state_record_dma_addr, 4);
+-	ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
+ 
+ 	memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ 	sa = ctx->sa_out;
+@@ -206,7 +203,7 @@ int crypto4xx_setkey_rfc3686(struct cryp
+ 	if (rc)
+ 		return rc;
+ 
+-	memcpy(ctx->state_record,
++	crypto4xx_memcpy_to_le32(ctx->state_record->save_iv,
+ 		key + keylen - CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
+ 
+ 	return 0;
+@@ -215,27 +212,29 @@ int crypto4xx_setkey_rfc3686(struct cryp
+ int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req)
+ {
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+-	__be32 iv[AES_IV_SIZE / 4] = { *(u32 *)ctx->state_record,
+-		*(u32 *) req->info, *(u32 *) (req->info + 4), cpu_to_be32(1) };
+-
+-	ctx->direction = DIR_OUTBOUND;
+-	ctx->pd_ctl = 1;
++	__le32 iv[AES_IV_SIZE / 4] = {
++		ctx->state_record->save_iv[0],
++		cpu_to_le32p((u32 *) req->info),
++		cpu_to_le32p((u32 *) (req->info + 4)),
++		cpu_to_le32(1) };
+ 
+ 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+-				  req->nbytes, iv, AES_IV_SIZE);
++				  req->nbytes, iv, AES_IV_SIZE,
++				  ctx->sa_out, ctx->sa_len);
+ }
+ 
+ int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req)
+ {
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+-	__be32 iv[AES_IV_SIZE / 4] = { *(u32 *)ctx->state_record,
+-		*(u32 *) req->info, *(u32 *) (req->info + 4), cpu_to_be32(1) };
+-
+-	ctx->direction = DIR_INBOUND;
+-	ctx->pd_ctl = 1;
++	__le32 iv[AES_IV_SIZE / 4] = {
++		ctx->state_record->save_iv[0],
++		cpu_to_le32p((u32 *) req->info),
++		cpu_to_le32p((u32 *) (req->info + 4)),
++		cpu_to_le32(1) };
+ 
+ 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+-				  req->nbytes, iv, AES_IV_SIZE);
++				  req->nbytes, iv, AES_IV_SIZE,
++				  ctx->sa_out, ctx->sa_len);
+ }
+ 
+ /**
+@@ -253,7 +252,6 @@ static int crypto4xx_hash_alg_init(struc
+ 	int rc;
+ 
+ 	ctx->dev   = my_alg->dev;
+-	ctx->is_hash = 1;
+ 
+ 	/* Create SA */
+ 	if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+@@ -284,13 +282,9 @@ static int crypto4xx_hash_alg_init(struc
+ 				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ 				 SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ 				 SA_NOT_COPY_HDR);
+-	ctx->direction = DIR_INBOUND;
+ 	/* Need to zero hash digest in SA */
+ 	memset(sa->inner_digest, 0, sizeof(sa->inner_digest));
+ 	memset(sa->outer_digest, 0, sizeof(sa->outer_digest));
+-	sa->state_ptr = ctx->state_record_dma_addr;
+-	ctx->offset_to_sr_ptr =
+-		get_dynamic_sa_offset_state_ptr_field(&sa->ctrl);
+ 
+ 	return 0;
+ }
+@@ -306,23 +300,22 @@ int crypto4xx_hash_init(struct ahash_req
+ 			__crypto_ahash_cast(req->base.tfm));
+ 	sa->sa_command_0.bf.digest_len = ds >> 2;
+ 	sa->sa_command_0.bf.load_hash_state = SA_LOAD_HASH_FROM_SA;
+-	ctx->is_hash = 1;
+-	ctx->direction = DIR_INBOUND;
+ 
+ 	return 0;
+ }
+ 
+ int crypto4xx_hash_update(struct ahash_request *req)
+ {
++	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
++	struct scatterlist dst;
++	unsigned int ds = crypto_ahash_digestsize(ahash);
++
++	sg_init_one(&dst, req->result, ds);
+ 
+-	ctx->is_hash = 1;
+-	ctx->pd_ctl = 0x11;
+-	ctx->direction = DIR_INBOUND;
+-
+-	return crypto4xx_build_pd(&req->base, ctx, req->src,
+-				  (struct scatterlist *) req->result,
+-				  req->nbytes, NULL, 0);
++	return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
++				  req->nbytes, NULL, 0, ctx->sa_in,
++				  ctx->sa_len);
+ }
+ 
+ int crypto4xx_hash_final(struct ahash_request *req)
+@@ -332,14 +325,16 @@ int crypto4xx_hash_final(struct ahash_re
+ 
+ int crypto4xx_hash_digest(struct ahash_request *req)
+ {
++	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
++	struct scatterlist dst;
++	unsigned int ds = crypto_ahash_digestsize(ahash);
+ 
+-	ctx->pd_ctl = 0x11;
+-	ctx->direction = DIR_INBOUND;
++	sg_init_one(&dst, req->result, ds);
+ 
+-	return crypto4xx_build_pd(&req->base, ctx, req->src,
+-				  (struct scatterlist *) req->result,
+-				  req->nbytes, NULL, 0);
++	return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
++				  req->nbytes, NULL, 0, ctx->sa_in,
++				  ctx->sa_len);
+ }
+ 
+ /**
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -194,7 +194,6 @@ void crypto4xx_free_state_record(struct
+ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
+ {
+ 	int i;
+-	struct pd_uinfo *pd_uinfo;
+ 	dev->pdr = dma_alloc_coherent(dev->core_dev->device,
+ 				      sizeof(struct ce_pd) * PPC4XX_NUM_PD,
+ 				      &dev->pdr_pa, GFP_ATOMIC);
+@@ -224,11 +223,14 @@ static u32 crypto4xx_build_pdr(struct cr
+ 	if (!dev->shadow_sr_pool)
+ 		return -ENOMEM;
+ 	for (i = 0; i < PPC4XX_NUM_PD; i++) {
+-		pd_uinfo = &dev->pdr_uinfo[i];
++		struct ce_pd *pd = &dev->pdr[i];
++		struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
++
++		pd->sa = dev->shadow_sa_pool_pa +
++			sizeof(union shadow_sa_buf) * i;
+ 
+ 		/* alloc 256 bytes which is enough for any kind of dynamic sa */
+ 		pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
+-		pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
+ 
+ 		/* alloc state record */
+ 		pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
+@@ -291,14 +293,6 @@ static u32 crypto4xx_put_pd_to_pdr(struc
+ 	return 0;
+ }
+ 
+-static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
+-				       dma_addr_t *pd_dma, u32 idx)
+-{
+-	*pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
+-
+-	return &dev->pdr[idx];
+-}
+-
+ /**
+  * alloc memory for the gather ring
+  * no need to alloc buf for the ring
+@@ -520,18 +514,16 @@ static void crypto4xx_copy_pkt_to_dst(st
+ 	}
+ }
+ 
+-static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
++static void crypto4xx_copy_digest_to_dst(void *dst,
++					struct pd_uinfo *pd_uinfo,
+ 					struct crypto4xx_ctx *ctx)
+ {
+ 	struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ 
+ 	if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
+-		memcpy((void *) pd_uinfo->dest_va,
+-		       pd_uinfo->sr_va->save_digest,
++		memcpy(dst, pd_uinfo->sr_va->save_digest,
+ 		       SA_HASH_ALG_SHA1_DIGEST_SIZE);
+ 	}
+-
+-	return 0;
+ }
+ 
+ static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
+@@ -591,7 +583,7 @@ static u32 crypto4xx_ahash_done(struct c
+ 	ahash_req = ahash_request_cast(pd_uinfo->async_req);
+ 	ctx  = crypto_tfm_ctx(ahash_req->base.tfm);
+ 
+-	crypto4xx_copy_digest_to_dst(pd_uinfo,
++	crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
+ 				     crypto_tfm_ctx(ahash_req->base.tfm));
+ 	crypto4xx_ret_sg_desc(dev, pd_uinfo);
+ 
+@@ -651,17 +643,17 @@ static u32 get_next_sd(u32 current)
+ 		return 0;
+ }
+ 
+-u32 crypto4xx_build_pd(struct crypto_async_request *req,
++int crypto4xx_build_pd(struct crypto_async_request *req,
+ 		       struct crypto4xx_ctx *ctx,
+ 		       struct scatterlist *src,
+ 		       struct scatterlist *dst,
+-		       unsigned int datalen,
+-		       void *iv, u32 iv_len)
++		       const unsigned int datalen,
++		       const __le32 *iv, const u32 iv_len,
++		       const struct dynamic_sa_ctl *req_sa,
++		       const unsigned int sa_len)
+ {
+ 	struct crypto4xx_device *dev = ctx->dev;
+-	dma_addr_t addr, pd_dma, sd_dma, gd_dma;
+ 	struct dynamic_sa_ctl *sa;
+-	struct scatterlist *sg;
+ 	struct ce_gd *gd;
+ 	struct ce_pd *pd;
+ 	u32 num_gd, num_sd;
+@@ -669,8 +661,9 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 	u32 fst_sd = 0xffffffff;
+ 	u32 pd_entry;
+ 	unsigned long flags;
+-	struct pd_uinfo *pd_uinfo = NULL;
+-	unsigned int nbytes = datalen, idx;
++	struct pd_uinfo *pd_uinfo;
++	unsigned int nbytes = datalen;
++	size_t offset_to_sr_ptr;
+ 	u32 gd_idx = 0;
+ 	bool is_busy;
+ 
+@@ -684,7 +677,7 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 		num_gd = 0;
+ 
+ 	/* figure how many sd is needed */
+-	if (sg_is_last(dst) || ctx->is_hash) {
++	if (sg_is_last(dst)) {
+ 		num_sd = 0;
+ 	} else {
+ 		if (datalen > PPC4XX_SD_BUFFER_SIZE) {
+@@ -755,37 +748,27 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 	}
+ 	spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ 
++	pd = &dev->pdr[pd_entry];
++	pd->sa_len = sa_len;
++
+ 	pd_uinfo = &dev->pdr_uinfo[pd_entry];
+-	pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
+ 	pd_uinfo->async_req = req;
+ 	pd_uinfo->num_gd = num_gd;
+ 	pd_uinfo->num_sd = num_sd;
+ 
+-	if (iv_len || ctx->is_hash) {
+-		pd->sa = pd_uinfo->sa_pa;
+-		sa = pd_uinfo->sa_va;
+-		if (ctx->direction == DIR_INBOUND)
+-			memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
+-		else
+-			memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
++	if (iv_len)
++		memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
+ 
+-		memcpy((void *) sa + ctx->offset_to_sr_ptr,
+-			&pd_uinfo->sr_pa, 4);
++	sa = pd_uinfo->sa_va;
++	memcpy(sa, req_sa, sa_len * 4);
++
++	offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
++	*(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
+ 
+-		if (iv_len)
+-			crypto4xx_memcpy_to_le32(pd_uinfo->sr_va->save_iv,
+-						 iv, iv_len);
+-	} else {
+-		if (ctx->direction == DIR_INBOUND) {
+-			pd->sa = ctx->sa_in_dma_addr;
+-			sa = ctx->sa_in;
+-		} else {
+-			pd->sa = ctx->sa_out_dma_addr;
+-			sa = ctx->sa_out;
+-		}
+-	}
+-	pd->sa_len = ctx->sa_len;
+ 	if (num_gd) {
++		dma_addr_t gd_dma;
++		struct scatterlist *sg;
++
+ 		/* get first gd we are going to use */
+ 		gd_idx = fst_gd;
+ 		pd_uinfo->first_gd = fst_gd;
+@@ -794,27 +777,30 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 		pd->src = gd_dma;
+ 		/* enable gather */
+ 		sa->sa_command_0.bf.gather = 1;
+-		idx = 0;
+-		src = &src[0];
+ 		/* walk the sg, and setup gather array */
++
++		sg = src;
+ 		while (nbytes) {
+-			sg = &src[idx];
+-			addr = dma_map_page(dev->core_dev->device, sg_page(sg),
+-				    sg->offset, sg->length, DMA_TO_DEVICE);
+-			gd->ptr = addr;
+-			gd->ctl_len.len = sg->length;
++			size_t len;
++
++			len = min(sg->length, nbytes);
++			gd->ptr = dma_map_page(dev->core_dev->device,
++				sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
++			gd->ctl_len.len = len;
+ 			gd->ctl_len.done = 0;
+ 			gd->ctl_len.ready = 1;
+-			if (sg->length >= nbytes)
++			if (len >= nbytes)
+ 				break;
++
+ 			nbytes -= sg->length;
+ 			gd_idx = get_next_gd(gd_idx);
+ 			gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+-			idx++;
++			sg = sg_next(sg);
+ 		}
+ 	} else {
+ 		pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
+-				src->offset, src->length, DMA_TO_DEVICE);
++				src->offset, min(nbytes, src->length),
++				DMA_TO_DEVICE);
+ 		/*
+ 		 * Disable gather in sa command
+ 		 */
+@@ -825,25 +811,24 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 		pd_uinfo->first_gd = 0xffffffff;
+ 		pd_uinfo->num_gd = 0;
+ 	}
+-	if (ctx->is_hash || sg_is_last(dst)) {
++	if (sg_is_last(dst)) {
+ 		/*
+ 		 * we know application give us dst a whole piece of memory
+ 		 * no need to use scatter ring.
+-		 * In case of is_hash, the icv is always at end of src data.
+ 		 */
+ 		pd_uinfo->using_sd = 0;
+ 		pd_uinfo->first_sd = 0xffffffff;
+ 		pd_uinfo->num_sd = 0;
+ 		pd_uinfo->dest_va = dst;
+ 		sa->sa_command_0.bf.scatter = 0;
+-		if (ctx->is_hash)
+-			pd->dest = virt_to_phys((void *)dst);
+-		else
+-			pd->dest = (u32)dma_map_page(dev->core_dev->device,
+-					sg_page(dst), dst->offset,
+-					dst->length, DMA_TO_DEVICE);
++		pd->dest = (u32)dma_map_page(dev->core_dev->device,
++					     sg_page(dst), dst->offset,
++					     min(datalen, dst->length),
++					     DMA_TO_DEVICE);
+ 	} else {
++		dma_addr_t sd_dma;
+ 		struct ce_sd *sd = NULL;
++
+ 		u32 sd_idx = fst_sd;
+ 		nbytes = datalen;
+ 		sa->sa_command_0.bf.scatter = 1;
+@@ -857,7 +842,6 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 		sd->ctl.done = 0;
+ 		sd->ctl.rdy = 1;
+ 		/* sd->ptr should be setup by sd_init routine*/
+-		idx = 0;
+ 		if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
+ 			nbytes -= PPC4XX_SD_BUFFER_SIZE;
+ 		else
+@@ -868,19 +852,23 @@ u32 crypto4xx_build_pd(struct crypto_asy
+ 			/* setup scatter descriptor */
+ 			sd->ctl.done = 0;
+ 			sd->ctl.rdy = 1;
+-			if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
++			if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
+ 				nbytes -= PPC4XX_SD_BUFFER_SIZE;
+-			else
++			} else {
+ 				/*
+ 				 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
+ 				 * which is more than nbytes, so done.
+ 				 */
+ 				nbytes = 0;
++			}
+ 		}
+ 	}
+ 
+ 	sa->sa_command_1.bf.hash_crypto_offset = 0;
+-	pd->pd_ctl.w = ctx->pd_ctl;
++	pd->pd_ctl.w = 0;
++	pd->pd_ctl.bf.hash_final =
++		(crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH);
++	pd->pd_ctl.bf.host_ready = 1;
+ 	pd->pd_ctl_len.w = 0x00400000 | datalen;
+ 	pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
+ 
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -71,7 +71,6 @@ struct pd_uinfo {
+ 	u32 num_sd;		/* number of scatter discriptors
+ 				used by this packet */
+ 	struct dynamic_sa_ctl *sa_va;	/* shadow sa */
+-	u32 sa_pa;
+ 	struct sa_state_record *sr_va;	/* state record for shadow sa */
+ 	u32 sr_pa;
+ 	struct scatterlist *dest_va;
+@@ -129,11 +128,6 @@ struct crypto4xx_ctx {
+ 	struct sa_state_record *state_record;
+ 	dma_addr_t state_record_dma_addr;
+ 	u32 sa_len;
+-	u32 offset_to_sr_ptr;           /* offset to state ptr, in dynamic sa */
+-	u32 direction;
+-	u32 save_iv;
+-	u32 pd_ctl;
+-	u32 is_hash;
+ };
+ 
+ struct crypto4xx_alg_common {
+@@ -170,8 +164,10 @@ int crypto4xx_build_pd(struct crypto_asy
+ 		       struct crypto4xx_ctx *ctx,
+ 		       struct scatterlist *src,
+ 		       struct scatterlist *dst,
+-		       unsigned int datalen,
+-		       void *iv, u32 iv_len);
++		       const unsigned int datalen,
++		       const __le32 *iv, const u32 iv_len,
++		       const struct dynamic_sa_ctl *sa,
++		       const unsigned int sa_len);
+ int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+ 			     const u8 *key, unsigned int keylen);
+ int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
diff --git a/target/linux/apm821xx/patches-4.14/020-0020-crypto-crypto4xx-fix-various-warnings.patch b/target/linux/apm821xx/patches-4.14/020-0020-crypto-crypto4xx-fix-various-warnings.patch
new file mode 100644
index 0000000000..8847d1fcc1
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0020-crypto-crypto4xx-fix-various-warnings.patch
@@ -0,0 +1,62 @@
+From 64e1062b2371cb8d6126d4e970832365a1a84562 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Wed, 4 Oct 2017 01:00:12 +0200
+Subject: [PATCH 20/25] crypto: crypto4xx - fix various warnings
+
+crypto4xx_core.c:179:6: warning: symbol 'crypto4xx_free_state_record'
+	was not declared. Should it be static?
+crypto4xx_core.c:331:5: warning: symbol 'crypto4xx_get_n_gd'
+	was not declared. Should it be static?
+crypto4xx_core.c:652:6: warning: symbol 'crypto4xx_return_pd'
+	was not declared. Should it be static?
+
+crypto4xx_return_pd() is not used by anything. Therefore it is removed.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 16 +++-------------
+ 1 file changed, 3 insertions(+), 13 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -176,7 +176,7 @@ u32 crypto4xx_alloc_state_record(struct
+ 	return 0;
+ }
+ 
+-void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
++static void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
+ {
+ 	if (ctx->state_record != NULL)
+ 		dma_free_coherent(ctx->dev->core_dev->device,
+@@ -322,10 +322,11 @@ static inline void crypto4xx_destroy_gdr
+  * when this function is called.
+  * preemption or interrupt must be disabled
+  */
+-u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
++static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
+ {
+ 	u32 retval;
+ 	u32 tmp;
++
+ 	if (n >= PPC4XX_NUM_GD)
+ 		return ERING_WAS_FULL;
+ 
+@@ -616,17 +617,6 @@ static void crypto4xx_stop_all(struct cr
+ 	kfree(core_dev);
+ }
+ 
+-void crypto4xx_return_pd(struct crypto4xx_device *dev,
+-			 u32 pd_entry, struct ce_pd *pd,
+-			 struct pd_uinfo *pd_uinfo)
+-{
+-	/* irq should be already disabled */
+-	dev->pdr_head = pd_entry;
+-	pd->pd_ctl.w = 0;
+-	pd->pd_ctl_len.w = 0;
+-	pd_uinfo->state = PD_ENTRY_FREE;
+-}
+-
+ static u32 get_next_gd(u32 current)
+ {
+ 	if (current != PPC4XX_LAST_GD)
diff --git a/target/linux/apm821xx/patches-4.14/020-0021-crypto-crypto4xx-fix-stalls-under-heavy-load.patch b/target/linux/apm821xx/patches-4.14/020-0021-crypto-crypto4xx-fix-stalls-under-heavy-load.patch
new file mode 100644
index 0000000000..aa621c52ce
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0021-crypto-crypto4xx-fix-stalls-under-heavy-load.patch
@@ -0,0 +1,112 @@
+From 4b5b79998af61db8b0506fba6c0f33b57ea457bd Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Wed, 4 Oct 2017 01:00:13 +0200
+Subject: [PATCH 21/25] crypto: crypto4xx - fix stalls under heavy load
+
+If the crypto4xx device is continuously loaded by dm-crypt
+and ipsec work, it will start to work intermittent after a
+few (between 20-30) seconds, hurting throughput and latency.
+
+This patch contains various stability improvements in order
+to fix this issue. So far, the hardware has survived more
+than a day without suffering any stalls under the continuous
+load.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_core.c    | 33 ++++++++++++++++++---------------
+ drivers/crypto/amcc/crypto4xx_reg_def.h |  3 +++
+ 2 files changed, 21 insertions(+), 15 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -280,17 +280,20 @@ static u32 crypto4xx_get_pd_from_pdr_nol
+ static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
+ {
+ 	struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
++	u32 tail;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&dev->core_dev->lock, flags);
++	pd_uinfo->state = PD_ENTRY_FREE;
++
+ 	if (dev->pdr_tail != PPC4XX_LAST_PD)
+ 		dev->pdr_tail++;
+ 	else
+ 		dev->pdr_tail = 0;
+-	pd_uinfo->state = PD_ENTRY_FREE;
++	tail = dev->pdr_tail;
+ 	spin_unlock_irqrestore(&dev->core_dev->lock, flags);
+ 
+-	return 0;
++	return tail;
+ }
+ 
+ /**
+@@ -854,16 +857,16 @@ int crypto4xx_build_pd(struct crypto_asy
+ 		}
+ 	}
+ 
+-	sa->sa_command_1.bf.hash_crypto_offset = 0;
+-	pd->pd_ctl.w = 0;
+-	pd->pd_ctl.bf.hash_final =
+-		(crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH);
+-	pd->pd_ctl.bf.host_ready = 1;
++	pd->pd_ctl.w = PD_CTL_HOST_READY |
++		((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
++		 (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
++			PD_CTL_HASH_FINAL : 0);
+ 	pd->pd_ctl_len.w = 0x00400000 | datalen;
+ 	pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
+ 
+ 	wmb();
+ 	/* write any value to push engine to read a pd */
++	writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+ 	writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+ 	return is_busy ? -EBUSY : -EINPROGRESS;
+ }
+@@ -964,23 +967,23 @@ static void crypto4xx_bh_tasklet_cb(unsi
+ 	struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+ 	struct pd_uinfo *pd_uinfo;
+ 	struct ce_pd *pd;
+-	u32 tail;
++	u32 tail = core_dev->dev->pdr_tail;
++	u32 head = core_dev->dev->pdr_head;
+ 
+-	while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
+-		tail = core_dev->dev->pdr_tail;
++	do {
+ 		pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
+ 		pd = &core_dev->dev->pdr[tail];
+ 		if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
+-				   pd->pd_ctl.bf.pe_done &&
+-				   !pd->pd_ctl.bf.host_ready) {
+-			pd->pd_ctl.bf.pe_done = 0;
++		     ((READ_ONCE(pd->pd_ctl.w) &
++		       (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
++		       PD_CTL_PE_DONE)) {
+ 			crypto4xx_pd_done(core_dev->dev, tail);
+-			crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
++			tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
+ 		} else {
+ 			/* if tail not done, break */
+ 			break;
+ 		}
+-	}
++	} while (head != tail);
+ }
+ 
+ /**
+--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
++++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
+@@ -261,6 +261,9 @@ union ce_pd_ctl {
+ 	} bf;
+ 	u32 w;
+ } __attribute__((packed));
++#define PD_CTL_HASH_FINAL	BIT(4)
++#define PD_CTL_PE_DONE		BIT(1)
++#define PD_CTL_HOST_READY	BIT(0)
+ 
+ union ce_pd_ctl_len {
+ 	struct {
diff --git a/target/linux/apm821xx/patches-4.14/020-0022-crypto-crypto4xx-simplify-sa-and-state-context-acqui.patch b/target/linux/apm821xx/patches-4.14/020-0022-crypto-crypto4xx-simplify-sa-and-state-context-acqui.patch
new file mode 100644
index 0000000000..06ab798f5b
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0022-crypto-crypto4xx-simplify-sa-and-state-context-acqui.patch
@@ -0,0 +1,209 @@
+From 2f77690dcb96e525bc6b57bce4a0eaecaa2878d1 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Wed, 4 Oct 2017 01:00:14 +0200
+Subject: [PATCH 22/25] crypto: crypto4xx - simplify sa and state context
+ acquisition
+
+Thanks to the big overhaul of crypto4xx_build_pd(), the request-local
+sa_in, sa_out and state_record allocation can be simplified.
+
+There's no need to setup any dma coherent memory anymore and
+much of the support code can be removed.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_alg.c  | 27 +++++--------------
+ drivers/crypto/amcc/crypto4xx_core.c | 50 ++++++------------------------------
+ drivers/crypto/amcc/crypto4xx_core.h |  6 +----
+ 3 files changed, 15 insertions(+), 68 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -122,20 +122,13 @@ static int crypto4xx_setkey_aes(struct c
+ 	}
+ 
+ 	/* Create SA */
+-	if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
++	if (ctx->sa_in || ctx->sa_out)
+ 		crypto4xx_free_sa(ctx);
+ 
+ 	rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
+ 	if (rc)
+ 		return rc;
+ 
+-	if (ctx->state_record_dma_addr == 0) {
+-		rc = crypto4xx_alloc_state_record(ctx);
+-		if (rc) {
+-			crypto4xx_free_sa(ctx);
+-			return rc;
+-		}
+-	}
+ 	/* Setup SA */
+ 	sa = ctx->sa_in;
+ 
+@@ -203,8 +196,8 @@ int crypto4xx_setkey_rfc3686(struct cryp
+ 	if (rc)
+ 		return rc;
+ 
+-	crypto4xx_memcpy_to_le32(ctx->state_record->save_iv,
+-		key + keylen - CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
++	ctx->iv_nonce = cpu_to_le32p((u32 *)&key[keylen -
++						 CTR_RFC3686_NONCE_SIZE]);
+ 
+ 	return 0;
+ }
+@@ -213,7 +206,7 @@ int crypto4xx_rfc3686_encrypt(struct abl
+ {
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ 	__le32 iv[AES_IV_SIZE / 4] = {
+-		ctx->state_record->save_iv[0],
++		ctx->iv_nonce,
+ 		cpu_to_le32p((u32 *) req->info),
+ 		cpu_to_le32p((u32 *) (req->info + 4)),
+ 		cpu_to_le32(1) };
+@@ -227,7 +220,7 @@ int crypto4xx_rfc3686_decrypt(struct abl
+ {
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ 	__le32 iv[AES_IV_SIZE / 4] = {
+-		ctx->state_record->save_iv[0],
++		ctx->iv_nonce,
+ 		cpu_to_le32p((u32 *) req->info),
+ 		cpu_to_le32p((u32 *) (req->info + 4)),
+ 		cpu_to_le32(1) };
+@@ -254,21 +247,13 @@ static int crypto4xx_hash_alg_init(struc
+ 	ctx->dev   = my_alg->dev;
+ 
+ 	/* Create SA */
+-	if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
++	if (ctx->sa_in || ctx->sa_out)
+ 		crypto4xx_free_sa(ctx);
+ 
+ 	rc = crypto4xx_alloc_sa(ctx, sa_len);
+ 	if (rc)
+ 		return rc;
+ 
+-	if (ctx->state_record_dma_addr == 0) {
+-		crypto4xx_alloc_state_record(ctx);
+-		if (!ctx->state_record_dma_addr) {
+-			crypto4xx_free_sa(ctx);
+-			return -ENOMEM;
+-		}
+-	}
+-
+ 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ 				 sizeof(struct crypto4xx_ctx));
+ 	sa = (struct dynamic_sa_hash160 *)ctx->sa_in;
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -130,21 +130,17 @@ static void crypto4xx_hw_init(struct cry
+ 
+ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
+ {
+-	ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
+-					&ctx->sa_in_dma_addr, GFP_ATOMIC);
++	ctx->sa_in = kzalloc(size * 4, GFP_ATOMIC);
+ 	if (ctx->sa_in == NULL)
+ 		return -ENOMEM;
+ 
+-	ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
+-					 &ctx->sa_out_dma_addr, GFP_ATOMIC);
++	ctx->sa_out = kzalloc(size * 4, GFP_ATOMIC);
+ 	if (ctx->sa_out == NULL) {
+-		dma_free_coherent(ctx->dev->core_dev->device, size * 4,
+-				  ctx->sa_in, ctx->sa_in_dma_addr);
++		kfree(ctx->sa_in);
++		ctx->sa_in = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+-	memset(ctx->sa_in, 0, size * 4);
+-	memset(ctx->sa_out, 0, size * 4);
+ 	ctx->sa_len = size;
+ 
+ 	return 0;
+@@ -152,40 +148,13 @@ int crypto4xx_alloc_sa(struct crypto4xx_
+ 
+ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
+ {
+-	if (ctx->sa_in != NULL)
+-		dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
+-				  ctx->sa_in, ctx->sa_in_dma_addr);
+-	if (ctx->sa_out != NULL)
+-		dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
+-				  ctx->sa_out, ctx->sa_out_dma_addr);
+-
+-	ctx->sa_in_dma_addr = 0;
+-	ctx->sa_out_dma_addr = 0;
++	kfree(ctx->sa_in);
++	ctx->sa_in = NULL;
++	kfree(ctx->sa_out);
++	ctx->sa_out = NULL;
+ 	ctx->sa_len = 0;
+ }
+ 
+-u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
+-{
+-	ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
+-				sizeof(struct sa_state_record),
+-				&ctx->state_record_dma_addr, GFP_ATOMIC);
+-	if (!ctx->state_record_dma_addr)
+-		return -ENOMEM;
+-	memset(ctx->state_record, 0, sizeof(struct sa_state_record));
+-
+-	return 0;
+-}
+-
+-static void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
+-{
+-	if (ctx->state_record != NULL)
+-		dma_free_coherent(ctx->dev->core_dev->device,
+-				  sizeof(struct sa_state_record),
+-				  ctx->state_record,
+-				  ctx->state_record_dma_addr);
+-	ctx->state_record_dma_addr = 0;
+-}
+-
+ /**
+  * alloc memory for the gather ring
+  * no need to alloc buf for the ring
+@@ -883,8 +852,6 @@ static int crypto4xx_alg_init(struct cry
+ 	ctx->dev = amcc_alg->dev;
+ 	ctx->sa_in = NULL;
+ 	ctx->sa_out = NULL;
+-	ctx->sa_in_dma_addr = 0;
+-	ctx->sa_out_dma_addr = 0;
+ 	ctx->sa_len = 0;
+ 
+ 	switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+@@ -905,7 +872,6 @@ static void crypto4xx_alg_exit(struct cr
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ 
+ 	crypto4xx_free_sa(ctx);
+-	crypto4xx_free_state_record(ctx);
+ }
+ 
+ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -122,11 +122,8 @@ struct crypto4xx_core_device {
+ struct crypto4xx_ctx {
+ 	struct crypto4xx_device *dev;
+ 	struct dynamic_sa_ctl *sa_in;
+-	dma_addr_t sa_in_dma_addr;
+ 	struct dynamic_sa_ctl *sa_out;
+-	dma_addr_t sa_out_dma_addr;
+-	struct sa_state_record *state_record;
+-	dma_addr_t state_record_dma_addr;
++	__le32 iv_nonce;
+ 	u32 sa_len;
+ };
+ 
+@@ -159,7 +156,6 @@ static inline struct crypto4xx_alg *cryp
+ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
+ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+ void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+-u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
+ int crypto4xx_build_pd(struct crypto_async_request *req,
+ 		       struct crypto4xx_ctx *ctx,
+ 		       struct scatterlist *src,
diff --git a/target/linux/apm821xx/patches-4.14/020-0023-crypto-crypto4xx-prepare-for-AEAD-support.patch b/target/linux/apm821xx/patches-4.14/020-0023-crypto-crypto4xx-prepare-for-AEAD-support.patch
new file mode 100644
index 0000000000..ff19c4f54e
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0023-crypto-crypto4xx-prepare-for-AEAD-support.patch
@@ -0,0 +1,617 @@
+From a0aae821ba3d35a49d4d0143dfb0c07eee22130e Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Wed, 4 Oct 2017 01:00:15 +0200
+Subject: [PATCH 23/25] crypto: crypto4xx - prepare for AEAD support
+
+This patch enhances existing interfaces and
+functions to support AEAD ciphers in the next
+patches.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_alg.c  |  19 +--
+ drivers/crypto/amcc/crypto4xx_core.c | 217 +++++++++++++++++++++++++++--------
+ drivers/crypto/amcc/crypto4xx_core.h |  22 ++--
+ drivers/crypto/amcc/crypto4xx_sa.h   |  41 +++++++
+ 4 files changed, 226 insertions(+), 73 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -26,6 +26,7 @@
+ #include <crypto/internal/hash.h>
+ #include <linux/dma-mapping.h>
+ #include <crypto/algapi.h>
++#include <crypto/aead.h>
+ #include <crypto/aes.h>
+ #include <crypto/sha.h>
+ #include <crypto/ctr.h>
+@@ -83,7 +84,7 @@ int crypto4xx_encrypt(struct ablkcipher_
+ 		crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
+ 
+ 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+-		req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len);
++		req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0);
+ }
+ 
+ int crypto4xx_decrypt(struct ablkcipher_request *req)
+@@ -97,7 +98,7 @@ int crypto4xx_decrypt(struct ablkcipher_
+ 		crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
+ 
+ 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+-		req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len);
++		req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0);
+ }
+ 
+ /**
+@@ -213,7 +214,7 @@ int crypto4xx_rfc3686_encrypt(struct abl
+ 
+ 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ 				  req->nbytes, iv, AES_IV_SIZE,
+-				  ctx->sa_out, ctx->sa_len);
++				  ctx->sa_out, ctx->sa_len, 0);
+ }
+ 
+ int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req)
+@@ -227,7 +228,7 @@ int crypto4xx_rfc3686_decrypt(struct abl
+ 
+ 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ 				  req->nbytes, iv, AES_IV_SIZE,
+-				  ctx->sa_out, ctx->sa_len);
++				  ctx->sa_out, ctx->sa_len, 0);
+ }
+ 
+ /**
+@@ -239,11 +240,13 @@ static int crypto4xx_hash_alg_init(struc
+ 				   unsigned char hm)
+ {
+ 	struct crypto_alg *alg = tfm->__crt_alg;
+-	struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
++	struct crypto4xx_alg *my_alg;
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ 	struct dynamic_sa_hash160 *sa;
+ 	int rc;
+ 
++	my_alg = container_of(__crypto_ahash_alg(alg), struct crypto4xx_alg,
++			      alg.u.hash);
+ 	ctx->dev   = my_alg->dev;
+ 
+ 	/* Create SA */
+@@ -300,7 +303,7 @@ int crypto4xx_hash_update(struct ahash_r
+ 
+ 	return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+ 				  req->nbytes, NULL, 0, ctx->sa_in,
+-				  ctx->sa_len);
++				  ctx->sa_len, 0);
+ }
+ 
+ int crypto4xx_hash_final(struct ahash_request *req)
+@@ -319,7 +322,7 @@ int crypto4xx_hash_digest(struct ahash_r
+ 
+ 	return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
+ 				  req->nbytes, NULL, 0, ctx->sa_in,
+-				  ctx->sa_len);
++				  ctx->sa_len, 0);
+ }
+ 
+ /**
+@@ -330,5 +333,3 @@ int crypto4xx_sha1_alg_init(struct crypt
+ 	return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
+ 				       SA_HASH_MODE_HASH);
+ }
+-
+-
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -35,10 +35,12 @@
+ #include <asm/dcr.h>
+ #include <asm/dcr-regs.h>
+ #include <asm/cacheflush.h>
++#include <crypto/aead.h>
+ #include <crypto/aes.h>
+ #include <crypto/ctr.h>
+ #include <crypto/sha.h>
+ #include <crypto/scatterwalk.h>
++#include <crypto/internal/aead.h>
+ #include <crypto/internal/skcipher.h>
+ #include "crypto4xx_reg_def.h"
+ #include "crypto4xx_core.h"
+@@ -518,7 +520,7 @@ static void crypto4xx_ret_sg_desc(struct
+ 	}
+ }
+ 
+-static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
++static void crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
+ 				     struct pd_uinfo *pd_uinfo,
+ 				     struct ce_pd *pd)
+ {
+@@ -543,11 +545,9 @@ static u32 crypto4xx_ablkcipher_done(str
+ 	if (pd_uinfo->state & PD_ENTRY_BUSY)
+ 		ablkcipher_request_complete(ablk_req, -EINPROGRESS);
+ 	ablkcipher_request_complete(ablk_req, 0);
+-
+-	return 0;
+ }
+ 
+-static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
++static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
+ 				struct pd_uinfo *pd_uinfo)
+ {
+ 	struct crypto4xx_ctx *ctx;
+@@ -563,20 +563,88 @@ static u32 crypto4xx_ahash_done(struct c
+ 	if (pd_uinfo->state & PD_ENTRY_BUSY)
+ 		ahash_request_complete(ahash_req, -EINPROGRESS);
+ 	ahash_request_complete(ahash_req, 0);
++}
+ 
+-	return 0;
++static void crypto4xx_aead_done(struct crypto4xx_device *dev,
++				struct pd_uinfo *pd_uinfo,
++				struct ce_pd *pd)
++{
++	struct aead_request *aead_req;
++	struct crypto4xx_ctx *ctx;
++	struct scatterlist *dst = pd_uinfo->dest_va;
++	int err = 0;
++
++	aead_req = container_of(pd_uinfo->async_req, struct aead_request,
++				base);
++	ctx  = crypto_tfm_ctx(aead_req->base.tfm);
++
++	if (pd_uinfo->using_sd) {
++		crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
++					  pd->pd_ctl_len.bf.pkt_len,
++					  dst);
++	} else {
++		__dma_sync_page(sg_page(dst), dst->offset, dst->length,
++				DMA_FROM_DEVICE);
++	}
++
++	if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
++		/* append icv at the end */
++		size_t cp_len = crypto_aead_authsize(
++			crypto_aead_reqtfm(aead_req));
++		u32 icv[cp_len];
++
++		crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
++					   cp_len);
++
++		scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
++					 cp_len, 1);
++	}
++
++	crypto4xx_ret_sg_desc(dev, pd_uinfo);
++
++	if (pd->pd_ctl.bf.status & 0xff) {
++		if (pd->pd_ctl.bf.status & 0x1) {
++			/* authentication error */
++			err = -EBADMSG;
++		} else {
++			if (!__ratelimit(&dev->aead_ratelimit)) {
++				if (pd->pd_ctl.bf.status & 2)
++					pr_err("pad fail error\n");
++				if (pd->pd_ctl.bf.status & 4)
++					pr_err("seqnum fail\n");
++				if (pd->pd_ctl.bf.status & 8)
++					pr_err("error _notify\n");
++				pr_err("aead return err status = 0x%02x\n",
++					pd->pd_ctl.bf.status & 0xff);
++				pr_err("pd pad_ctl = 0x%08x\n",
++					pd->pd_ctl.bf.pd_pad_ctl);
++			}
++			err = -EINVAL;
++		}
++	}
++
++	if (pd_uinfo->state & PD_ENTRY_BUSY)
++		aead_request_complete(aead_req, -EINPROGRESS);
++
++	aead_request_complete(aead_req, err);
+ }
+ 
+-static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
++static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
+ {
+ 	struct ce_pd *pd = &dev->pdr[idx];
+ 	struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
+ 
+-	if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
+-			CRYPTO_ALG_TYPE_ABLKCIPHER)
+-		return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
+-	else
+-		return crypto4xx_ahash_done(dev, pd_uinfo);
++	switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
++	case CRYPTO_ALG_TYPE_ABLKCIPHER:
++		crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
++		break;
++	case CRYPTO_ALG_TYPE_AEAD:
++		crypto4xx_aead_done(dev, pd_uinfo, pd);
++		break;
++	case CRYPTO_ALG_TYPE_AHASH:
++		crypto4xx_ahash_done(dev, pd_uinfo);
++		break;
++	}
+ }
+ 
+ static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
+@@ -612,8 +680,10 @@ int crypto4xx_build_pd(struct crypto_asy
+ 		       const unsigned int datalen,
+ 		       const __le32 *iv, const u32 iv_len,
+ 		       const struct dynamic_sa_ctl *req_sa,
+-		       const unsigned int sa_len)
++		       const unsigned int sa_len,
++		       const unsigned int assoclen)
+ {
++	struct scatterlist _dst[2];
+ 	struct crypto4xx_device *dev = ctx->dev;
+ 	struct dynamic_sa_ctl *sa;
+ 	struct ce_gd *gd;
+@@ -627,18 +697,25 @@ int crypto4xx_build_pd(struct crypto_asy
+ 	unsigned int nbytes = datalen;
+ 	size_t offset_to_sr_ptr;
+ 	u32 gd_idx = 0;
++	int tmp;
+ 	bool is_busy;
+ 
+-	/* figure how many gd is needed */
+-	num_gd = sg_nents_for_len(src, datalen);
+-	if ((int)num_gd < 0) {
++	/* figure how many gd are needed */
++	tmp = sg_nents_for_len(src, assoclen + datalen);
++	if (tmp < 0) {
+ 		dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
+-		return -EINVAL;
++		return tmp;
+ 	}
+-	if (num_gd == 1)
+-		num_gd = 0;
++	if (tmp == 1)
++		tmp = 0;
++	num_gd = tmp;
+ 
+-	/* figure how many sd is needed */
++	if (assoclen) {
++		nbytes += assoclen;
++		dst = scatterwalk_ffwd(_dst, dst, assoclen);
++	}
++
++	/* figure how many sd are needed */
+ 	if (sg_is_last(dst)) {
+ 		num_sd = 0;
+ 	} else {
+@@ -724,6 +801,7 @@ int crypto4xx_build_pd(struct crypto_asy
+ 	sa = pd_uinfo->sa_va;
+ 	memcpy(sa, req_sa, sa_len * 4);
+ 
++	sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
+ 	offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
+ 	*(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
+ 
+@@ -830,7 +908,7 @@ int crypto4xx_build_pd(struct crypto_asy
+ 		((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
+ 		 (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
+ 			PD_CTL_HASH_FINAL : 0);
+-	pd->pd_ctl_len.w = 0x00400000 | datalen;
++	pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
+ 	pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
+ 
+ 	wmb();
+@@ -843,40 +921,68 @@ int crypto4xx_build_pd(struct crypto_asy
+ /**
+  * Algorithm Registration Functions
+  */
+-static int crypto4xx_alg_init(struct crypto_tfm *tfm)
++static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
++			       struct crypto4xx_ctx *ctx)
+ {
+-	struct crypto_alg *alg = tfm->__crt_alg;
+-	struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
+-	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+-
+ 	ctx->dev = amcc_alg->dev;
+ 	ctx->sa_in = NULL;
+ 	ctx->sa_out = NULL;
+ 	ctx->sa_len = 0;
++}
+ 
+-	switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+-	default:
+-		tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
+-		break;
+-	case CRYPTO_ALG_TYPE_AHASH:
+-		crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+-					 sizeof(struct crypto4xx_ctx));
+-		break;
+-	}
++static int crypto4xx_ablk_init(struct crypto_tfm *tfm)
++{
++	struct crypto_alg *alg = tfm->__crt_alg;
++	struct crypto4xx_alg *amcc_alg;
++	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ 
++	amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
++	crypto4xx_ctx_init(amcc_alg, ctx);
++	tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
+ 	return 0;
+ }
+ 
+-static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
++static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
+ {
+-	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+-
+ 	crypto4xx_free_sa(ctx);
+ }
+ 
+-int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
+-			   struct crypto4xx_alg_common *crypto_alg,
+-			   int array_size)
++static void crypto4xx_ablk_exit(struct crypto_tfm *tfm)
++{
++	crypto4xx_common_exit(crypto_tfm_ctx(tfm));
++}
++
++static int crypto4xx_aead_init(struct crypto_aead *tfm)
++{
++	struct aead_alg *alg = crypto_aead_alg(tfm);
++	struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
++	struct crypto4xx_alg *amcc_alg;
++
++	ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
++						CRYPTO_ALG_NEED_FALLBACK |
++						CRYPTO_ALG_ASYNC);
++	if (IS_ERR(ctx->sw_cipher.aead))
++		return PTR_ERR(ctx->sw_cipher.aead);
++
++	amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
++	crypto4xx_ctx_init(amcc_alg, ctx);
++	crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
++				max(sizeof(struct crypto4xx_ctx), 32 +
++				crypto_aead_reqsize(ctx->sw_cipher.aead)));
++	return 0;
++}
++
++static void crypto4xx_aead_exit(struct crypto_aead *tfm)
++{
++	struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
++
++	crypto4xx_common_exit(ctx);
++	crypto_free_aead(ctx->sw_cipher.aead);
++}
++
++static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
++				  struct crypto4xx_alg_common *crypto_alg,
++				  int array_size)
+ {
+ 	struct crypto4xx_alg *alg;
+ 	int i;
+@@ -891,6 +997,10 @@ int crypto4xx_register_alg(struct crypto
+ 		alg->dev = sec_dev;
+ 
+ 		switch (alg->alg.type) {
++		case CRYPTO_ALG_TYPE_AEAD:
++			rc = crypto_register_aead(&alg->alg.u.aead);
++			break;
++
+ 		case CRYPTO_ALG_TYPE_AHASH:
+ 			rc = crypto_register_ahash(&alg->alg.u.hash);
+ 			break;
+@@ -920,6 +1030,10 @@ static void crypto4xx_unregister_alg(str
+ 			crypto_unregister_ahash(&alg->alg.u.hash);
+ 			break;
+ 
++		case CRYPTO_ALG_TYPE_AEAD:
++			crypto_unregister_aead(&alg->alg.u.aead);
++			break;
++
+ 		default:
+ 			crypto_unregister_alg(&alg->alg.u.cipher);
+ 		}
+@@ -973,7 +1087,7 @@ static irqreturn_t crypto4xx_ce_interrup
+ /**
+  * Supported Crypto Algorithms
+  */
+-struct crypto4xx_alg_common crypto4xx_alg[] = {
++static struct crypto4xx_alg_common crypto4xx_alg[] = {
+ 	/* Crypto AES modes */
+ 	{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ 		.cra_name 	= "cbc(aes)",
+@@ -985,8 +1099,8 @@ struct crypto4xx_alg_common crypto4xx_al
+ 		.cra_blocksize 	= AES_BLOCK_SIZE,
+ 		.cra_ctxsize 	= sizeof(struct crypto4xx_ctx),
+ 		.cra_type 	= &crypto_ablkcipher_type,
+-		.cra_init	= crypto4xx_alg_init,
+-		.cra_exit	= crypto4xx_alg_exit,
++		.cra_init	= crypto4xx_ablk_init,
++		.cra_exit	= crypto4xx_ablk_exit,
+ 		.cra_module 	= THIS_MODULE,
+ 		.cra_u 		= {
+ 			.ablkcipher = {
+@@ -1009,8 +1123,8 @@ struct crypto4xx_alg_common crypto4xx_al
+ 		.cra_blocksize	= AES_BLOCK_SIZE,
+ 		.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
+ 		.cra_type	= &crypto_ablkcipher_type,
+-		.cra_init	= crypto4xx_alg_init,
+-		.cra_exit	= crypto4xx_alg_exit,
++		.cra_init	= crypto4xx_ablk_init,
++		.cra_exit	= crypto4xx_ablk_exit,
+ 		.cra_module	= THIS_MODULE,
+ 		.cra_u		= {
+ 			.ablkcipher = {
+@@ -1033,8 +1147,8 @@ struct crypto4xx_alg_common crypto4xx_al
+ 		.cra_blocksize	= AES_BLOCK_SIZE,
+ 		.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
+ 		.cra_type	= &crypto_ablkcipher_type,
+-		.cra_init	= crypto4xx_alg_init,
+-		.cra_exit	= crypto4xx_alg_exit,
++		.cra_init	= crypto4xx_ablk_init,
++		.cra_exit	= crypto4xx_ablk_exit,
+ 		.cra_module	= THIS_MODULE,
+ 		.cra_u		= {
+ 			.ablkcipher = {
+@@ -1059,8 +1173,8 @@ struct crypto4xx_alg_common crypto4xx_al
+ 		.cra_blocksize	= AES_BLOCK_SIZE,
+ 		.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
+ 		.cra_type	= &crypto_ablkcipher_type,
+-		.cra_init	= crypto4xx_alg_init,
+-		.cra_exit	= crypto4xx_alg_exit,
++		.cra_init	= crypto4xx_ablk_init,
++		.cra_exit	= crypto4xx_ablk_exit,
+ 		.cra_module	= THIS_MODULE,
+ 		.cra_u		= {
+ 			.ablkcipher = {
+@@ -1082,8 +1196,8 @@ struct crypto4xx_alg_common crypto4xx_al
+ 		.cra_blocksize	= AES_BLOCK_SIZE,
+ 		.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
+ 		.cra_type	= &crypto_ablkcipher_type,
+-		.cra_init	= crypto4xx_alg_init,
+-		.cra_exit	= crypto4xx_alg_exit,
++		.cra_init	= crypto4xx_ablk_init,
++		.cra_exit	= crypto4xx_ablk_exit,
+ 		.cra_module	= THIS_MODULE,
+ 		.cra_u		= {
+ 			.ablkcipher = {
+@@ -1149,6 +1263,7 @@ static int crypto4xx_probe(struct platfo
+ 	core_dev->device = dev;
+ 	spin_lock_init(&core_dev->lock);
+ 	INIT_LIST_HEAD(&core_dev->dev->alg_list);
++	ratelimit_default_init(&core_dev->dev->aead_ratelimit);
+ 	rc = crypto4xx_build_pdr(core_dev->dev);
+ 	if (rc)
+ 		goto err_build_pdr;
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -22,7 +22,9 @@
+ #ifndef __CRYPTO4XX_CORE_H__
+ #define __CRYPTO4XX_CORE_H__
+ 
++#include <linux/ratelimit.h>
+ #include <crypto/internal/hash.h>
++#include <crypto/internal/aead.h>
+ #include "crypto4xx_reg_def.h"
+ #include "crypto4xx_sa.h"
+ 
+@@ -106,6 +108,7 @@ struct crypto4xx_device {
+ 	struct pd_uinfo *pdr_uinfo;
+ 	struct list_head alg_list;	/* List of algorithm supported
+ 					by this device */
++	struct ratelimit_state aead_ratelimit;
+ };
+ 
+ struct crypto4xx_core_device {
+@@ -125,6 +128,9 @@ struct crypto4xx_ctx {
+ 	struct dynamic_sa_ctl *sa_out;
+ 	__le32 iv_nonce;
+ 	u32 sa_len;
++	union {
++		struct crypto_aead *aead;
++	} sw_cipher;
+ };
+ 
+ struct crypto4xx_alg_common {
+@@ -132,6 +138,7 @@ struct crypto4xx_alg_common {
+ 	union {
+ 		struct crypto_alg cipher;
+ 		struct ahash_alg hash;
++		struct aead_alg aead;
+ 	} u;
+ };
+ 
+@@ -141,18 +148,6 @@ struct crypto4xx_alg {
+ 	struct crypto4xx_device *dev;
+ };
+ 
+-static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
+-	struct crypto_alg *x)
+-{
+-	switch (x->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+-	case CRYPTO_ALG_TYPE_AHASH:
+-		return container_of(__crypto_ahash_alg(x),
+-				    struct crypto4xx_alg, alg.u.hash);
+-	}
+-
+-	return container_of(x, struct crypto4xx_alg, alg.u.cipher);
+-}
+-
+ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
+ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
+ void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
+@@ -163,7 +158,8 @@ int crypto4xx_build_pd(struct crypto_asy
+ 		       const unsigned int datalen,
+ 		       const __le32 *iv, const u32 iv_len,
+ 		       const struct dynamic_sa_ctl *sa,
+-		       const unsigned int sa_len);
++		       const unsigned int sa_len,
++		       const unsigned int assoclen);
+ int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
+ 			     const u8 *key, unsigned int keylen);
+ int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+--- a/drivers/crypto/amcc/crypto4xx_sa.h
++++ b/drivers/crypto/amcc/crypto4xx_sa.h
+@@ -55,6 +55,8 @@ union dynamic_sa_contents {
+ #define SA_OP_GROUP_BASIC			0
+ #define SA_OPCODE_ENCRYPT			0
+ #define SA_OPCODE_DECRYPT			0
++#define SA_OPCODE_ENCRYPT_HASH			1
++#define SA_OPCODE_HASH_DECRYPT			1
+ #define SA_OPCODE_HASH				3
+ #define SA_CIPHER_ALG_DES			0
+ #define SA_CIPHER_ALG_3DES			1
+@@ -65,6 +67,8 @@ union dynamic_sa_contents {
+ 
+ #define SA_HASH_ALG_MD5				0
+ #define SA_HASH_ALG_SHA1			1
++#define SA_HASH_ALG_GHASH			12
++#define SA_HASH_ALG_CBC_MAC			14
+ #define SA_HASH_ALG_NULL			15
+ #define SA_HASH_ALG_SHA1_DIGEST_SIZE		20
+ 
+@@ -234,6 +238,36 @@ struct dynamic_sa_aes256 {
+ #define SA_AES_CONTENTS		0x3e000002
+ 
+ /**
++ * Security Association (SA) for AES128 CCM
++ */
++struct dynamic_sa_aes128_ccm {
++	struct dynamic_sa_ctl ctrl;
++	__le32 key[4];
++	__le32 iv[4];
++	u32 state_ptr;
++	u32 reserved;
++} __packed;
++#define SA_AES128_CCM_LEN	(sizeof(struct dynamic_sa_aes128_ccm)/4)
++#define SA_AES128_CCM_CONTENTS	0x3e000042
++#define SA_AES_CCM_CONTENTS	0x3e000002
++
++/**
++ * Security Association (SA) for AES128_GCM
++ */
++struct dynamic_sa_aes128_gcm {
++	struct dynamic_sa_ctl ctrl;
++	__le32 key[4];
++	__le32 inner_digest[4];
++	__le32 iv[4];
++	u32 state_ptr;
++	u32 reserved;
++} __packed;
++
++#define SA_AES128_GCM_LEN	(sizeof(struct dynamic_sa_aes128_gcm)/4)
++#define SA_AES128_GCM_CONTENTS	0x3e000442
++#define SA_AES_GCM_CONTENTS	0x3e000402
++
++/**
+  * Security Association (SA) for HASH160: HMAC-SHA1
+  */
+ struct dynamic_sa_hash160 {
+@@ -274,4 +308,11 @@ static inline __le32 *get_dynamic_sa_key
+ 	return (__le32 *) ((unsigned long)cts + sizeof(struct dynamic_sa_ctl));
+ }
+ 
++static inline __le32 *get_dynamic_sa_inner_digest(struct dynamic_sa_ctl *cts)
++{
++	return (__le32 *) ((unsigned long)cts +
++		sizeof(struct dynamic_sa_ctl) +
++		cts->sa_contents.bf.key_size * 4);
++}
++
+ #endif
diff --git a/target/linux/apm821xx/patches-4.14/020-0024-crypto-crypto4xx-add-aes-ccm-support.patch b/target/linux/apm821xx/patches-4.14/020-0024-crypto-crypto4xx-add-aes-ccm-support.patch
new file mode 100644
index 0000000000..73283f650c
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0024-crypto-crypto4xx-add-aes-ccm-support.patch
@@ -0,0 +1,256 @@
+From 65ea8b678fcf385ac18864743bae66c0643e6842 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Wed, 4 Oct 2017 01:00:16 +0200
+Subject: [PATCH 24/25] crypto: crypto4xx - add aes-ccm support
+
+This patch adds aes-ccm support.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_alg.c  | 185 +++++++++++++++++++++++++++++++++++
+ drivers/crypto/amcc/crypto4xx_core.c |  23 +++++
+ drivers/crypto/amcc/crypto4xx_core.h |   8 ++
+ 3 files changed, 216 insertions(+)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -231,6 +231,191 @@ int crypto4xx_rfc3686_decrypt(struct abl
+ 				  ctx->sa_out, ctx->sa_len, 0);
+ }
+ 
++static inline bool crypto4xx_aead_need_fallback(struct aead_request *req,
++						bool is_ccm, bool decrypt)
++{
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++
++	/* authsize has to be a multiple of 4 */
++	if (aead->authsize & 3)
++		return true;
++
++	/*
++	 * hardware does not handle cases where cryptlen
++	 * is less than a block
++	 */
++	if (req->cryptlen < AES_BLOCK_SIZE)
++		return true;
++
++	/* assoc len needs to be a multiple of 4 */
++	if (req->assoclen & 0x3)
++		return true;
++
++	/* CCM supports only counter field length of 2 and 4 bytes */
++	if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
++		return true;
++
++	/* CCM - fix CBC MAC mismatch in special case */
++	if (is_ccm && decrypt && !req->assoclen)
++		return true;
++
++	return false;
++}
++
++static int crypto4xx_aead_fallback(struct aead_request *req,
++	struct crypto4xx_ctx *ctx, bool do_decrypt)
++{
++	char aead_req_data[sizeof(struct aead_request) +
++			   crypto_aead_reqsize(ctx->sw_cipher.aead)]
++		__aligned(__alignof__(struct aead_request));
++
++	struct aead_request *subreq = (void *) aead_req_data;
++
++	memset(subreq, 0, sizeof(aead_req_data));
++
++	aead_request_set_tfm(subreq, ctx->sw_cipher.aead);
++	aead_request_set_callback(subreq, req->base.flags,
++				  req->base.complete, req->base.data);
++	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
++			       req->iv);
++	aead_request_set_ad(subreq, req->assoclen);
++	return do_decrypt ? crypto_aead_decrypt(subreq) :
++			    crypto_aead_encrypt(subreq);
++}
++
++static int crypto4xx_setup_fallback(struct crypto4xx_ctx *ctx,
++				    struct crypto_aead *cipher,
++				    const u8 *key,
++				    unsigned int keylen)
++{
++	int rc;
++
++	crypto_aead_clear_flags(ctx->sw_cipher.aead, CRYPTO_TFM_REQ_MASK);
++	crypto_aead_set_flags(ctx->sw_cipher.aead,
++		crypto_aead_get_flags(cipher) & CRYPTO_TFM_REQ_MASK);
++	rc = crypto_aead_setkey(ctx->sw_cipher.aead, key, keylen);
++	crypto_aead_clear_flags(cipher, CRYPTO_TFM_RES_MASK);
++	crypto_aead_set_flags(cipher,
++		crypto_aead_get_flags(ctx->sw_cipher.aead) &
++			CRYPTO_TFM_RES_MASK);
++
++	return rc;
++}
++
++/**
++ * AES-CCM Functions
++ */
++
++int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
++			     unsigned int keylen)
++{
++	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
++	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
++	struct dynamic_sa_ctl *sa;
++	int rc = 0;
++
++	rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
++	if (rc)
++		return rc;
++
++	if (ctx->sa_in || ctx->sa_out)
++		crypto4xx_free_sa(ctx);
++
++	rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen - 16) / 4);
++	if (rc)
++		return rc;
++
++	/* Setup SA */
++	sa = (struct dynamic_sa_ctl *) ctx->sa_in;
++	sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
++
++	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
++				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
++				 SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
++				 SA_CIPHER_ALG_AES,
++				 SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
++				 SA_OPCODE_HASH_DECRYPT, DIR_INBOUND);
++
++	set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
++				 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
++				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
++				 SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
++				 SA_NOT_COPY_HDR);
++
++	sa->sa_command_1.bf.key_len = keylen >> 3;
++
++	crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa), key, keylen);
++
++	memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
++	sa = (struct dynamic_sa_ctl *) ctx->sa_out;
++
++	set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
++				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
++				 SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
++				 SA_CIPHER_ALG_AES,
++				 SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
++				 SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND);
++
++	set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
++				 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
++				 SA_SEQ_MASK_OFF, SA_MC_ENABLE,
++				 SA_COPY_PAD, SA_COPY_PAYLOAD,
++				 SA_NOT_COPY_HDR);
++
++	sa->sa_command_1.bf.key_len = keylen >> 3;
++	return 0;
++}
++
++static int crypto4xx_crypt_aes_ccm(struct aead_request *req, bool decrypt)
++{
++	struct crypto4xx_ctx *ctx  = crypto_tfm_ctx(req->base.tfm);
++	struct crypto_aead *aead = crypto_aead_reqtfm(req);
++	unsigned int len = req->cryptlen;
++	__le32 iv[16];
++	u32 tmp_sa[ctx->sa_len * 4];
++	struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *)tmp_sa;
++
++	if (crypto4xx_aead_need_fallback(req, true, decrypt))
++		return crypto4xx_aead_fallback(req, ctx, decrypt);
++
++	if (decrypt)
++		len -= crypto_aead_authsize(aead);
++
++	memcpy(tmp_sa, decrypt ? ctx->sa_in : ctx->sa_out, sizeof(tmp_sa));
++	sa->sa_command_0.bf.digest_len = crypto_aead_authsize(aead) >> 2;
++
++	if (req->iv[0] == 1) {
++		/* CRYPTO_MODE_AES_ICM */
++		sa->sa_command_1.bf.crypto_mode9_8 = 1;
++	}
++
++	iv[3] = cpu_to_le32(0);
++	crypto4xx_memcpy_to_le32(iv, req->iv, 16 - (req->iv[0] + 1));
++
++	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
++				  len, iv, sizeof(iv),
++				  sa, ctx->sa_len, req->assoclen);
++}
++
++int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
++{
++	return crypto4xx_crypt_aes_ccm(req, false);
++}
++
++int crypto4xx_decrypt_aes_ccm(struct aead_request *req)
++{
++	return crypto4xx_crypt_aes_ccm(req, true);
++}
++
++int crypto4xx_setauthsize_aead(struct crypto_aead *cipher,
++			       unsigned int authsize)
++{
++	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
++	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
++
++	return crypto_aead_setauthsize(ctx->sw_cipher.aead, authsize);
++}
++
+ /**
+  * HASH SHA1 Functions
+  */
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -1210,6 +1210,29 @@ static struct crypto4xx_alg_common crypt
+ 			}
+ 		}
+ 	} },
++
++	/* AEAD */
++	{ .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
++		.setkey		= crypto4xx_setkey_aes_ccm,
++		.setauthsize	= crypto4xx_setauthsize_aead,
++		.encrypt	= crypto4xx_encrypt_aes_ccm,
++		.decrypt	= crypto4xx_decrypt_aes_ccm,
++		.init		= crypto4xx_aead_init,
++		.exit		= crypto4xx_aead_exit,
++		.ivsize		= AES_BLOCK_SIZE,
++		.maxauthsize    = 16,
++		.base = {
++			.cra_name	= "ccm(aes)",
++			.cra_driver_name = "ccm-aes-ppc4xx",
++			.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
++			.cra_flags	= CRYPTO_ALG_ASYNC |
++					  CRYPTO_ALG_NEED_FALLBACK |
++					  CRYPTO_ALG_KERN_DRIVER_ONLY,
++			.cra_blocksize	= 1,
++			.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
++			.cra_module	= THIS_MODULE,
++		},
++	} },
+ };
+ 
+ /**
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -222,4 +222,12 @@ static inline void crypto4xx_memcpy_to_l
+ {
+ 	crypto4xx_memcpy_swab32((u32 *)dst, buf, len);
+ }
++
++int crypto4xx_setauthsize_aead(struct crypto_aead *ciper,
++			       unsigned int authsize);
++int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher,
++			     const u8 *key, unsigned int keylen);
++int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
++int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
++
+ #endif
diff --git a/target/linux/apm821xx/patches-4.14/020-0025-crypto-crypto4xx-add-aes-gcm-support.patch b/target/linux/apm821xx/patches-4.14/020-0025-crypto-crypto4xx-add-aes-gcm-support.patch
new file mode 100644
index 0000000000..3a2ed310b3
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/020-0025-crypto-crypto4xx-add-aes-gcm-support.patch
@@ -0,0 +1,220 @@
+From 59231368d3a959fc30c5142c406a045f49130daa Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Wed, 4 Oct 2017 01:00:17 +0200
+Subject: [PATCH 25/25] crypto: crypto4xx - add aes-gcm support
+
+This patch adds aes-gcm support to crypto4xx.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+Signed-off-by: Herbert Xu <herbert at gondor.apana.org.au>
+---
+ drivers/crypto/amcc/crypto4xx_alg.c  | 139 +++++++++++++++++++++++++++++++++++
+ drivers/crypto/amcc/crypto4xx_core.c |  22 ++++++
+ drivers/crypto/amcc/crypto4xx_core.h |   4 +
+ 3 files changed, 165 insertions(+)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -28,6 +28,7 @@
+ #include <crypto/algapi.h>
+ #include <crypto/aead.h>
+ #include <crypto/aes.h>
++#include <crypto/gcm.h>
+ #include <crypto/sha.h>
+ #include <crypto/ctr.h>
+ #include "crypto4xx_reg_def.h"
+@@ -417,6 +418,144 @@ int crypto4xx_setauthsize_aead(struct cr
+ }
+ 
+ /**
++ * AES-GCM Functions
++ */
++
++static int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
++{
++	switch (keylen) {
++	case 16:
++	case 24:
++	case 32:
++		return 0;
++	default:
++		return -EINVAL;
++	}
++}
++
++static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
++					     unsigned int keylen)
++{
++	struct crypto_cipher *aes_tfm = NULL;
++	uint8_t src[16] = { 0 };
++	int rc = 0;
++
++	aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC |
++				      CRYPTO_ALG_NEED_FALLBACK);
++	if (IS_ERR(aes_tfm)) {
++		rc = PTR_ERR(aes_tfm);
++		pr_warn("could not load aes cipher driver: %d\n", rc);
++		return rc;
++	}
++
++	rc = crypto_cipher_setkey(aes_tfm, key, keylen);
++	if (rc) {
++		pr_err("setkey() failed: %d\n", rc);
++		goto out;
++	}
++
++	crypto_cipher_encrypt_one(aes_tfm, src, src);
++	crypto4xx_memcpy_to_le32(hash_start, src, 16);
++out:
++	crypto_free_cipher(aes_tfm);
++	return rc;
++}
++
++int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
++			     const u8 *key, unsigned int keylen)
++{
++	struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
++	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
++	struct dynamic_sa_ctl *sa;
++	int    rc = 0;
++
++	if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
++		crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++		return -EINVAL;
++	}
++
++	rc = crypto4xx_setup_fallback(ctx, cipher, key, keylen);
++	if (rc)
++		return rc;
++
++	if (ctx->sa_in || ctx->sa_out)
++		crypto4xx_free_sa(ctx);
++
++	rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen - 16) / 4);
++	if (rc)
++		return rc;
++
++	sa  = (struct dynamic_sa_ctl *) ctx->sa_in;
++
++	sa->sa_contents.w = SA_AES_GCM_CONTENTS | (keylen << 2);
++	set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
++				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
++				 SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH,
++				 SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
++				 SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT,
++				 DIR_INBOUND);
++	set_dynamic_sa_command_1(sa, CRYPTO_MODE_CTR, SA_HASH_MODE_HASH,
++				 CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
++				 SA_SEQ_MASK_ON, SA_MC_DISABLE,
++				 SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
++				 SA_NOT_COPY_HDR);
++
++	sa->sa_command_1.bf.key_len = keylen >> 3;
++
++	crypto4xx_memcpy_to_le32(get_dynamic_sa_key_field(sa),
++				 key, keylen);
++
++	rc = crypto4xx_compute_gcm_hash_key_sw(get_dynamic_sa_inner_digest(sa),
++		key, keylen);
++	if (rc) {
++		pr_err("GCM hash key setting failed = %d\n", rc);
++		goto err;
++	}
++
++	memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
++	sa = (struct dynamic_sa_ctl *) ctx->sa_out;
++	sa->sa_command_0.bf.dir = DIR_OUTBOUND;
++	sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH;
++
++	return 0;
++err:
++	crypto4xx_free_sa(ctx);
++	return rc;
++}
++
++static inline int crypto4xx_crypt_aes_gcm(struct aead_request *req,
++					  bool decrypt)
++{
++	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
++	unsigned int len = req->cryptlen;
++	__le32 iv[4];
++
++	if (crypto4xx_aead_need_fallback(req, false, decrypt))
++		return crypto4xx_aead_fallback(req, ctx, decrypt);
++
++	crypto4xx_memcpy_to_le32(iv, req->iv, GCM_AES_IV_SIZE);
++	iv[3] = cpu_to_le32(1);
++
++	if (decrypt)
++		len -= crypto_aead_authsize(crypto_aead_reqtfm(req));
++
++	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
++				  len, iv, sizeof(iv),
++				  decrypt ? ctx->sa_in : ctx->sa_out,
++				  ctx->sa_len, req->assoclen);
++}
++
++int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
++{
++	return crypto4xx_crypt_aes_gcm(req, false);
++}
++
++int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
++{
++	return crypto4xx_crypt_aes_gcm(req, true);
++}
++
++/**
+  * HASH SHA1 Functions
+  */
+ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -38,6 +38,7 @@
+ #include <crypto/aead.h>
+ #include <crypto/aes.h>
+ #include <crypto/ctr.h>
++#include <crypto/gcm.h>
+ #include <crypto/sha.h>
+ #include <crypto/scatterwalk.h>
+ #include <crypto/internal/aead.h>
+@@ -1227,6 +1228,27 @@ static struct crypto4xx_alg_common crypt
+ 			.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
+ 			.cra_flags	= CRYPTO_ALG_ASYNC |
+ 					  CRYPTO_ALG_NEED_FALLBACK |
++					  CRYPTO_ALG_KERN_DRIVER_ONLY,
++			.cra_blocksize	= 1,
++			.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
++			.cra_module	= THIS_MODULE,
++		},
++	} },
++	{ .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
++		.setkey		= crypto4xx_setkey_aes_gcm,
++		.setauthsize	= crypto4xx_setauthsize_aead,
++		.encrypt	= crypto4xx_encrypt_aes_gcm,
++		.decrypt	= crypto4xx_decrypt_aes_gcm,
++		.init		= crypto4xx_aead_init,
++		.exit		= crypto4xx_aead_exit,
++		.ivsize		= GCM_AES_IV_SIZE,
++		.maxauthsize	= 16,
++		.base = {
++			.cra_name	= "gcm(aes)",
++			.cra_driver_name = "gcm-aes-ppc4xx",
++			.cra_priority	= CRYPTO4XX_CRYPTO_PRIORITY,
++			.cra_flags	= CRYPTO_ALG_ASYNC |
++					  CRYPTO_ALG_NEED_FALLBACK |
+ 					  CRYPTO_ALG_KERN_DRIVER_ONLY,
+ 			.cra_blocksize	= 1,
+ 			.cra_ctxsize	= sizeof(struct crypto4xx_ctx),
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -229,5 +229,9 @@ int crypto4xx_setkey_aes_ccm(struct cryp
+ 			     const u8 *key, unsigned int keylen);
+ int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
+ int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
++int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
++			     const u8 *key, unsigned int keylen);
++int crypto4xx_encrypt_aes_gcm(struct aead_request *req);
++int crypto4xx_decrypt_aes_gcm(struct aead_request *req);
+ 
+ #endif
diff --git a/target/linux/apm821xx/patches-4.14/120-0001-crypto-crypto4xx-shuffle-iomap-in-front-of-request_i.patch b/target/linux/apm821xx/patches-4.14/120-0001-crypto-crypto4xx-shuffle-iomap-in-front-of-request_i.patch
new file mode 100644
index 0000000000..fc8df38867
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/120-0001-crypto-crypto4xx-shuffle-iomap-in-front-of-request_i.patch
@@ -0,0 +1,71 @@
+From 4baa099377d73ea99c7802a9685815b32e8bf119 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Thu, 21 Dec 2017 15:08:18 +0100
+Subject: [PATCH 1/6] crypto: crypto4xx - shuffle iomap in front of request_irq
+
+It is possible to avoid the ce_base null pointer check in the
+drivers' interrupt handler routine "crypto4xx_ce_interrupt_handler()"
+by simply doing the iomap in front of the IRQ registration.
+
+This way, the ce_base will always be valid in the handler and
+a branch in an critical path can be avoided.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 21 +++++++++------------
+ 1 file changed, 9 insertions(+), 12 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -1075,9 +1075,6 @@ static irqreturn_t crypto4xx_ce_interrup
+ 	struct device *dev = (struct device *)data;
+ 	struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+ 
+-	if (!core_dev->dev->ce_base)
+-		return 0;
+-
+ 	writel(PPC4XX_INTERRUPT_CLR,
+ 	       core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ 	tasklet_schedule(&core_dev->tasklet);
+@@ -1325,13 +1322,6 @@ static int crypto4xx_probe(struct platfo
+ 	tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
+ 		     (unsigned long) dev);
+ 
+-	/* Register for Crypto isr, Crypto Engine IRQ */
+-	core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+-	rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
+-			 core_dev->dev->name, dev);
+-	if (rc)
+-		goto err_request_irq;
+-
+ 	core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
+ 	if (!core_dev->dev->ce_base) {
+ 		dev_err(dev, "failed to of_iomap\n");
+@@ -1339,6 +1329,13 @@ static int crypto4xx_probe(struct platfo
+ 		goto err_iomap;
+ 	}
+ 
++	/* Register for Crypto isr, Crypto Engine IRQ */
++	core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
++	rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
++			 core_dev->dev->name, dev);
++	if (rc)
++		goto err_request_irq;
++
+ 	/* need to setup pdr, rdr, gdr and sdr before this */
+ 	crypto4xx_hw_init(core_dev->dev);
+ 
+@@ -1352,11 +1349,11 @@ static int crypto4xx_probe(struct platfo
+ 	return 0;
+ 
+ err_start_dev:
+-	iounmap(core_dev->dev->ce_base);
+-err_iomap:
+ 	free_irq(core_dev->irq, dev);
+ err_request_irq:
+ 	irq_dispose_mapping(core_dev->irq);
++	iounmap(core_dev->dev->ce_base);
++err_iomap:
+ 	tasklet_kill(&core_dev->tasklet);
+ err_build_sdr:
+ 	crypto4xx_destroy_sdr(core_dev->dev);
diff --git a/target/linux/apm821xx/patches-4.14/120-0002-crypto-crypto4xx-support-Revision-B-parts.patch b/target/linux/apm821xx/patches-4.14/120-0002-crypto-crypto4xx-support-Revision-B-parts.patch
new file mode 100644
index 0000000000..1adad96fe1
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/120-0002-crypto-crypto4xx-support-Revision-B-parts.patch
@@ -0,0 +1,150 @@
+From 1e932b627e79aa2c70e2c7278e4ac930303faa3f Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Thu, 21 Dec 2017 15:09:18 +0100
+Subject: [PATCH 2/6] crypto: crypto4xx - support Revision B parts
+
+This patch adds support for the crypto4xx RevB cores
+found in the 460EX, 460SX and later cores (like the APM821xx).
+
+Without this patch, the crypto4xx driver will not be
+able to process any offloaded requests and simply hang
+indefinitely.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+---
+ drivers/crypto/amcc/crypto4xx_core.c    | 48 +++++++++++++++++++++++++++++----
+ drivers/crypto/amcc/crypto4xx_core.h    |  1 +
+ drivers/crypto/amcc/crypto4xx_reg_def.h |  4 ++-
+ 3 files changed, 47 insertions(+), 6 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -128,7 +128,14 @@ static void crypto4xx_hw_init(struct cry
+ 	writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
+ 	writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
+ 	writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
+-	writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
++	if (dev->is_revb) {
++		writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
++		       dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
++		writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
++		       dev->ce_base + CRYPTO4XX_INT_EN);
++	} else {
++		writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
++	}
+ }
+ 
+ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
+@@ -1070,18 +1077,29 @@ static void crypto4xx_bh_tasklet_cb(unsi
+ /**
+  * Top Half of isr.
+  */
+-static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
++static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
++						      u32 clr_val)
+ {
+ 	struct device *dev = (struct device *)data;
+ 	struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+ 
+-	writel(PPC4XX_INTERRUPT_CLR,
+-	       core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
++	writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ 	tasklet_schedule(&core_dev->tasklet);
+ 
+ 	return IRQ_HANDLED;
+ }
+ 
++static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
++{
++	return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
++}
++
++static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
++{
++	return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
++		PPC4XX_TMO_ERR_INT);
++}
++
+ /**
+  * Supported Crypto Algorithms
+  */
+@@ -1263,6 +1281,8 @@ static int crypto4xx_probe(struct platfo
+ 	struct resource res;
+ 	struct device *dev = &ofdev->dev;
+ 	struct crypto4xx_core_device *core_dev;
++	u32 pvr;
++	bool is_revb = true;
+ 
+ 	rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
+ 	if (rc)
+@@ -1279,6 +1299,7 @@ static int crypto4xx_probe(struct platfo
+ 		       mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
+ 		mtdcri(SDR0, PPC405EX_SDR0_SRST,
+ 		       mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
++		is_revb = false;
+ 	} else if (of_find_compatible_node(NULL, NULL,
+ 			"amcc,ppc460sx-crypto")) {
+ 		mtdcri(SDR0, PPC460SX_SDR0_SRST,
+@@ -1301,7 +1322,22 @@ static int crypto4xx_probe(struct platfo
+ 	if (!core_dev->dev)
+ 		goto err_alloc_dev;
+ 
++	/*
++	 * Older version of 460EX/GT have a hardware bug.
++	 * Hence they do not support H/W based security intr coalescing
++	 */
++	pvr = mfspr(SPRN_PVR);
++	if (is_revb && ((pvr >> 4) == 0x130218A)) {
++		u32 min = PVR_MIN(pvr);
++
++		if (min < 4) {
++			dev_info(dev, "RevA detected - disable interrupt coalescing\n");
++			is_revb = false;
++		}
++	}
++
+ 	core_dev->dev->core_dev = core_dev;
++	core_dev->dev->is_revb = is_revb;
+ 	core_dev->device = dev;
+ 	spin_lock_init(&core_dev->lock);
+ 	INIT_LIST_HEAD(&core_dev->dev->alg_list);
+@@ -1331,7 +1367,9 @@ static int crypto4xx_probe(struct platfo
+ 
+ 	/* Register for Crypto isr, Crypto Engine IRQ */
+ 	core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
+-	rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
++	rc = request_irq(core_dev->irq, is_revb ?
++			 crypto4xx_ce_interrupt_handler_revb :
++			 crypto4xx_ce_interrupt_handler, 0,
+ 			 core_dev->dev->name, dev);
+ 	if (rc)
+ 		goto err_request_irq;
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -109,6 +109,7 @@ struct crypto4xx_device {
+ 	struct list_head alg_list;	/* List of algorithm supported
+ 					by this device */
+ 	struct ratelimit_state aead_ratelimit;
++	bool is_revb;
+ };
+ 
+ struct crypto4xx_core_device {
+--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
++++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
+@@ -121,13 +121,15 @@
+ #define PPC4XX_PD_SIZE				6
+ #define PPC4XX_CTX_DONE_INT			0x2000
+ #define PPC4XX_PD_DONE_INT			0x8000
++#define PPC4XX_TMO_ERR_INT			0x40000
+ #define PPC4XX_BYTE_ORDER			0x22222
+ #define PPC4XX_INTERRUPT_CLR			0x3ffff
+ #define PPC4XX_PRNG_CTRL_AUTO_EN		0x3
+ #define PPC4XX_DC_3DES_EN			1
+ #define PPC4XX_TRNG_EN				0x00020000
+-#define PPC4XX_INT_DESCR_CNT			4
++#define PPC4XX_INT_DESCR_CNT			7
+ #define PPC4XX_INT_TIMEOUT_CNT			0
++#define PPC4XX_INT_TIMEOUT_CNT_REVB		0x3FF
+ #define PPC4XX_INT_CFG				1
+ /**
+  * all follow define are ad hoc
diff --git a/target/linux/apm821xx/patches-4.14/120-0003-crypto-crypto4xx-fix-missing-irq-devname.patch b/target/linux/apm821xx/patches-4.14/120-0003-crypto-crypto4xx-fix-missing-irq-devname.patch
new file mode 100644
index 0000000000..a295fa4616
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/120-0003-crypto-crypto4xx-fix-missing-irq-devname.patch
@@ -0,0 +1,37 @@
+From 00179ef6e3c4e5db6258cd6e273e4063b8437d18 Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Thu, 21 Dec 2017 15:10:18 +0100
+Subject: [PATCH 3/6] crypto: crypto4xx - fix missing irq devname
+
+crypto4xx_device's name variable is not set to anything.
+The common devname for request_irq seems to be the module
+name. This will fix the seemingly anonymous interrupt
+entry in /proc/interrupts for crypto4xx.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 2 +-
+ drivers/crypto/amcc/crypto4xx_core.h | 1 -
+ 2 files changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -1370,7 +1370,7 @@ static int crypto4xx_probe(struct platfo
+ 	rc = request_irq(core_dev->irq, is_revb ?
+ 			 crypto4xx_ce_interrupt_handler_revb :
+ 			 crypto4xx_ce_interrupt_handler, 0,
+-			 core_dev->dev->name, dev);
++			 KBUILD_MODNAME, dev);
+ 	if (rc)
+ 		goto err_request_irq;
+ 
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -82,7 +82,6 @@ struct pd_uinfo {
+ 
+ struct crypto4xx_device {
+ 	struct crypto4xx_core_device *core_dev;
+-	char *name;
+ 	void __iomem *ce_base;
+ 	void __iomem *trng_base;
+ 
diff --git a/target/linux/apm821xx/patches-4.14/120-0004-crypto-crypto4xx-kill-MODULE_NAME.patch b/target/linux/apm821xx/patches-4.14/120-0004-crypto-crypto4xx-kill-MODULE_NAME.patch
new file mode 100644
index 0000000000..4ee99f44cf
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/120-0004-crypto-crypto4xx-kill-MODULE_NAME.patch
@@ -0,0 +1,47 @@
+From c3621f23fed7d6fff33083ae538004ea59c01d8f Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Thu, 21 Dec 2017 15:11:18 +0100
+Subject: [PATCH 4/6] crypto: crypto4xx - kill MODULE_NAME
+
+KBUILD_MODNAME provides the same value.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+---
+ drivers/crypto/amcc/crypto4xx_core.c | 2 +-
+ drivers/crypto/amcc/crypto4xx_core.h | 2 --
+ drivers/crypto/amcc/crypto4xx_trng.c | 2 +-
+ 3 files changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -1432,7 +1432,7 @@ MODULE_DEVICE_TABLE(of, crypto4xx_match)
+ 
+ static struct platform_driver crypto4xx_driver = {
+ 	.driver = {
+-		.name = MODULE_NAME,
++		.name = KBUILD_MODNAME,
+ 		.of_match_table = crypto4xx_match,
+ 	},
+ 	.probe		= crypto4xx_probe,
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -28,8 +28,6 @@
+ #include "crypto4xx_reg_def.h"
+ #include "crypto4xx_sa.h"
+ 
+-#define MODULE_NAME "crypto4xx"
+-
+ #define PPC460SX_SDR0_SRST                      0x201
+ #define PPC405EX_SDR0_SRST                      0x200
+ #define PPC460EX_SDR0_SRST                      0x201
+--- a/drivers/crypto/amcc/crypto4xx_trng.c
++++ b/drivers/crypto/amcc/crypto4xx_trng.c
+@@ -92,7 +92,7 @@ void ppc4xx_trng_probe(struct crypto4xx_
+ 	if (!rng)
+ 		goto err_out;
+ 
+-	rng->name = MODULE_NAME;
++	rng->name = KBUILD_MODNAME;
+ 	rng->data_present = ppc4xx_trng_data_present;
+ 	rng->data_read = ppc4xx_trng_data_read;
+ 	rng->priv = (unsigned long) dev;
diff --git a/target/linux/apm821xx/patches-4.14/120-0005-crypto-crypto4xx-perform-aead-icv-check-in-the-drive.patch b/target/linux/apm821xx/patches-4.14/120-0005-crypto-crypto4xx-perform-aead-icv-check-in-the-drive.patch
new file mode 100644
index 0000000000..0dbd924e3c
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/120-0005-crypto-crypto4xx-perform-aead-icv-check-in-the-drive.patch
@@ -0,0 +1,146 @@
+From 5b3856d1d98e6f6a58b70c1c0d7da3fb5f042e9c Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Thu, 21 Dec 2017 16:00:01 +0100
+Subject: [PATCH 5/6] crypto: crypto4xx - perform aead icv check in the driver
+
+The ccm-aes-ppc4xx now fails one of testmgr's expected
+failure test cases as such:
+
+alg: aead: decryption failed on test 10 for ccm-aes-ppc4xx: ret was 0, expected -EBADMSG
+
+Upon closer inspection, it turned out that the hardware's
+crypto flags that would indicate an authentification failure
+are not set by the hardware. The original vendor source from
+which this was ported does not have any special code or notes
+about why this would happen or if there are any WAs.
+
+Hence, this patch converts the aead_done callback handler to
+perform the icv check in the driver. And this fixes the false
+negative and the ccm-aes-ppc4xx passes the selftests once again.
+
+|name         : ccm(aes)
+|driver       : ccm-aes-ppc4xx
+|module       : crypto4xx
+|priority     : 300
+|refcnt       : 1
+|selftest     : passed
+|internal     : no
+|type         : aead
+|async        : yes
+|blocksize    : 1
+|ivsize       : 16
+|maxauthsize  : 16
+|geniv        : <none>
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+---
+ drivers/crypto/amcc/crypto4xx_alg.c  |  6 +---
+ drivers/crypto/amcc/crypto4xx_core.c | 54 ++++++++++++++++++------------------
+ 2 files changed, 28 insertions(+), 32 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -256,10 +256,6 @@ static inline bool crypto4xx_aead_need_f
+ 	if (is_ccm && !(req->iv[0] == 1 || req->iv[0] == 3))
+ 		return true;
+ 
+-	/* CCM - fix CBC MAC mismatch in special case */
+-	if (is_ccm && decrypt && !req->assoclen)
+-		return true;
+-
+ 	return false;
+ }
+ 
+@@ -330,7 +326,7 @@ int crypto4xx_setkey_aes_ccm(struct cryp
+ 	sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ 	sa->sa_contents.w = SA_AES_CCM_CONTENTS | (keylen << 2);
+ 
+-	set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
++	set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ 				 SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ 				 SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ 				 SA_CIPHER_ALG_AES,
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -577,15 +577,14 @@ static void crypto4xx_aead_done(struct c
+ 				struct pd_uinfo *pd_uinfo,
+ 				struct ce_pd *pd)
+ {
+-	struct aead_request *aead_req;
+-	struct crypto4xx_ctx *ctx;
++	struct aead_request *aead_req = container_of(pd_uinfo->async_req,
++		struct aead_request, base);
+ 	struct scatterlist *dst = pd_uinfo->dest_va;
++	size_t cp_len = crypto_aead_authsize(
++		crypto_aead_reqtfm(aead_req));
++	u32 icv[cp_len];
+ 	int err = 0;
+ 
+-	aead_req = container_of(pd_uinfo->async_req, struct aead_request,
+-				base);
+-	ctx  = crypto_tfm_ctx(aead_req->base.tfm);
+-
+ 	if (pd_uinfo->using_sd) {
+ 		crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+ 					  pd->pd_ctl_len.bf.pkt_len,
+@@ -597,38 +596,39 @@ static void crypto4xx_aead_done(struct c
+ 
+ 	if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
+ 		/* append icv at the end */
+-		size_t cp_len = crypto_aead_authsize(
+-			crypto_aead_reqtfm(aead_req));
+-		u32 icv[cp_len];
+-
+ 		crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
+ 					   cp_len);
+ 
+ 		scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
+ 					 cp_len, 1);
++	} else {
++		/* check icv at the end */
++		scatterwalk_map_and_copy(icv, aead_req->src,
++			aead_req->assoclen + aead_req->cryptlen -
++			cp_len, cp_len, 0);
++
++		crypto4xx_memcpy_from_le32(icv, icv, cp_len);
++
++		if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
++			err = -EBADMSG;
+ 	}
+ 
+ 	crypto4xx_ret_sg_desc(dev, pd_uinfo);
+ 
+ 	if (pd->pd_ctl.bf.status & 0xff) {
+-		if (pd->pd_ctl.bf.status & 0x1) {
+-			/* authentication error */
+-			err = -EBADMSG;
+-		} else {
+-			if (!__ratelimit(&dev->aead_ratelimit)) {
+-				if (pd->pd_ctl.bf.status & 2)
+-					pr_err("pad fail error\n");
+-				if (pd->pd_ctl.bf.status & 4)
+-					pr_err("seqnum fail\n");
+-				if (pd->pd_ctl.bf.status & 8)
+-					pr_err("error _notify\n");
+-				pr_err("aead return err status = 0x%02x\n",
+-					pd->pd_ctl.bf.status & 0xff);
+-				pr_err("pd pad_ctl = 0x%08x\n",
+-					pd->pd_ctl.bf.pd_pad_ctl);
+-			}
+-			err = -EINVAL;
++		if (!__ratelimit(&dev->aead_ratelimit)) {
++			if (pd->pd_ctl.bf.status & 2)
++				pr_err("pad fail error\n");
++			if (pd->pd_ctl.bf.status & 4)
++				pr_err("seqnum fail\n");
++			if (pd->pd_ctl.bf.status & 8)
++				pr_err("error _notify\n");
++			pr_err("aead return err status = 0x%02x\n",
++				pd->pd_ctl.bf.status & 0xff);
++			pr_err("pd pad_ctl = 0x%08x\n",
++				pd->pd_ctl.bf.pd_pad_ctl);
+ 		}
++		err = -EINVAL;
+ 	}
+ 
+ 	if (pd_uinfo->state & PD_ENTRY_BUSY)
diff --git a/target/linux/apm821xx/patches-4.14/120-0006-crypto-crypto4xx-performance-optimizations.patch b/target/linux/apm821xx/patches-4.14/120-0006-crypto-crypto4xx-performance-optimizations.patch
new file mode 100644
index 0000000000..0475fdaf8f
--- /dev/null
+++ b/target/linux/apm821xx/patches-4.14/120-0006-crypto-crypto4xx-performance-optimizations.patch
@@ -0,0 +1,158 @@
+From 30afcbb01a750a1ef0cee8a0861a347912c2e4fb Mon Sep 17 00:00:00 2001
+From: Christian Lamparter <chunkeey at gmail.com>
+Date: Thu, 21 Dec 2017 16:00:01 +0100
+Subject: [PATCH 6/6] crypto: crypto4xx - performance optimizations
+
+This patch provides a cheap 2MiB/s+ (~ 6%) performance
+improvement over the current code. This is because the
+compiler can now optimize several endian swap memcpy.
+
+Signed-off-by: Christian Lamparter <chunkeey at gmail.com>
+---
+ drivers/crypto/amcc/crypto4xx_alg.c  | 32 +++++++++++++++++++-------------
+ drivers/crypto/amcc/crypto4xx_core.c | 22 +++++++++++-----------
+ drivers/crypto/amcc/crypto4xx_core.h |  6 ++++--
+ 3 files changed, 34 insertions(+), 26 deletions(-)
+
+--- a/drivers/crypto/amcc/crypto4xx_alg.c
++++ b/drivers/crypto/amcc/crypto4xx_alg.c
+@@ -74,32 +74,38 @@ static void set_dynamic_sa_command_1(str
+ 	sa->sa_command_1.bf.copy_hdr = cp_hdr;
+ }
+ 
+-int crypto4xx_encrypt(struct ablkcipher_request *req)
++static inline int crypto4xx_crypt(struct ablkcipher_request *req,
++				  const unsigned int ivlen, bool decrypt)
+ {
+ 	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+-	unsigned int ivlen = crypto_ablkcipher_ivsize(
+-		crypto_ablkcipher_reqtfm(req));
+ 	__le32 iv[ivlen];
+ 
+ 	if (ivlen)
+ 		crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
+ 
+ 	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+-		req->nbytes, iv, ivlen, ctx->sa_out, ctx->sa_len, 0);
++		req->nbytes, iv, ivlen, decrypt ? ctx->sa_in : ctx->sa_out,
++		ctx->sa_len, 0);
+ }
+ 
+-int crypto4xx_decrypt(struct ablkcipher_request *req)
++int crypto4xx_encrypt_noiv(struct ablkcipher_request *req)
+ {
+-	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+-	unsigned int ivlen = crypto_ablkcipher_ivsize(
+-		crypto_ablkcipher_reqtfm(req));
+-	__le32 iv[ivlen];
++	return crypto4xx_crypt(req, 0, false);
++}
+ 
+-	if (ivlen)
+-		crypto4xx_memcpy_to_le32(iv, req->info, ivlen);
++int crypto4xx_encrypt_iv(struct ablkcipher_request *req)
++{
++	return crypto4xx_crypt(req, AES_IV_SIZE, false);
++}
+ 
+-	return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+-		req->nbytes, iv, ivlen, ctx->sa_in, ctx->sa_len, 0);
++int crypto4xx_decrypt_noiv(struct ablkcipher_request *req)
++{
++	return crypto4xx_crypt(req, 0, true);
++}
++
++int crypto4xx_decrypt_iv(struct ablkcipher_request *req)
++{
++	return crypto4xx_crypt(req, AES_IV_SIZE, true);
+ }
+ 
+ /**
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -582,7 +582,7 @@ static void crypto4xx_aead_done(struct c
+ 	struct scatterlist *dst = pd_uinfo->dest_va;
+ 	size_t cp_len = crypto_aead_authsize(
+ 		crypto_aead_reqtfm(aead_req));
+-	u32 icv[cp_len];
++	u32 icv[AES_BLOCK_SIZE];
+ 	int err = 0;
+ 
+ 	if (pd_uinfo->using_sd) {
+@@ -597,7 +597,7 @@ static void crypto4xx_aead_done(struct c
+ 	if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
+ 		/* append icv at the end */
+ 		crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
+-					   cp_len);
++					   sizeof(icv));
+ 
+ 		scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
+ 					 cp_len, 1);
+@@ -607,7 +607,7 @@ static void crypto4xx_aead_done(struct c
+ 			aead_req->assoclen + aead_req->cryptlen -
+ 			cp_len, cp_len, 0);
+ 
+-		crypto4xx_memcpy_from_le32(icv, icv, cp_len);
++		crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
+ 
+ 		if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
+ 			err = -EBADMSG;
+@@ -1124,8 +1124,8 @@ static struct crypto4xx_alg_common crypt
+ 				.max_keysize 	= AES_MAX_KEY_SIZE,
+ 				.ivsize		= AES_IV_SIZE,
+ 				.setkey 	= crypto4xx_setkey_aes_cbc,
+-				.encrypt 	= crypto4xx_encrypt,
+-				.decrypt 	= crypto4xx_decrypt,
++				.encrypt 	= crypto4xx_encrypt_iv,
++				.decrypt 	= crypto4xx_decrypt_iv,
+ 			}
+ 		}
+ 	}},
+@@ -1148,8 +1148,8 @@ static struct crypto4xx_alg_common crypt
+ 				.max_keysize	= AES_MAX_KEY_SIZE,
+ 				.ivsize		= AES_IV_SIZE,
+ 				.setkey		= crypto4xx_setkey_aes_cfb,
+-				.encrypt	= crypto4xx_encrypt,
+-				.decrypt	= crypto4xx_decrypt,
++				.encrypt	= crypto4xx_encrypt_iv,
++				.decrypt	= crypto4xx_decrypt_iv,
+ 			}
+ 		}
+ 	} },
+@@ -1197,8 +1197,8 @@ static struct crypto4xx_alg_common crypt
+ 				.min_keysize	= AES_MIN_KEY_SIZE,
+ 				.max_keysize	= AES_MAX_KEY_SIZE,
+ 				.setkey		= crypto4xx_setkey_aes_ecb,
+-				.encrypt	= crypto4xx_encrypt,
+-				.decrypt	= crypto4xx_decrypt,
++				.encrypt	= crypto4xx_encrypt_noiv,
++				.decrypt	= crypto4xx_decrypt_noiv,
+ 			}
+ 		}
+ 	} },
+@@ -1221,8 +1221,8 @@ static struct crypto4xx_alg_common crypt
+ 				.max_keysize	= AES_MAX_KEY_SIZE,
+ 				.ivsize		= AES_IV_SIZE,
+ 				.setkey		= crypto4xx_setkey_aes_ofb,
+-				.encrypt	= crypto4xx_encrypt,
+-				.decrypt	= crypto4xx_decrypt,
++				.encrypt	= crypto4xx_encrypt_iv,
++				.decrypt	= crypto4xx_decrypt_iv,
+ 			}
+ 		}
+ 	} },
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -168,8 +168,10 @@ int crypto4xx_setkey_aes_ofb(struct cryp
+ 			     const u8 *key, unsigned int keylen);
+ int crypto4xx_setkey_rfc3686(struct crypto_ablkcipher *cipher,
+ 			     const u8 *key, unsigned int keylen);
+-int crypto4xx_encrypt(struct ablkcipher_request *req);
+-int crypto4xx_decrypt(struct ablkcipher_request *req);
++int crypto4xx_encrypt_iv(struct ablkcipher_request *req);
++int crypto4xx_decrypt_iv(struct ablkcipher_request *req);
++int crypto4xx_encrypt_noiv(struct ablkcipher_request *req);
++int crypto4xx_decrypt_noiv(struct ablkcipher_request *req);
+ int crypto4xx_rfc3686_encrypt(struct ablkcipher_request *req);
+ int crypto4xx_rfc3686_decrypt(struct ablkcipher_request *req);
+ int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
-- 
2.15.1




More information about the Lede-dev mailing list