[PATCH 1/1] crypto: arm64/aes-neonbs - Move key expansion off the stack

Eric Biggers ebiggers at kernel.org
Thu Mar 5 11:38:47 PST 2026


On Fri, Mar 06, 2026 at 02:32:24AM +0800, Cheng-Yang Chou wrote:
> aesbs_setkey() and aesbs_cbc_ctr_setkey() trigger -Wframe-larger-than=
> warnings due to struct crypto_aes_ctx being allocated on the stack,
> causing the frame size to exceed 1024 bytes.
> 
> Allocate struct crypto_aes_ctx on the heap instead to reduce stack
> usage. Use a goto-based cleanup path to ensure memzero_explicit() and
> kfree() are always called.
> 
> Signed-off-by: Cheng-Yang Chou <yphbchou0911 at gmail.com>
> ---
>  arch/arm64/crypto/aes-neonbs-glue.c | 39 ++++++++++++++++++-----------
>  1 file changed, 25 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
> index cb87c8fc66b3..a24b66fd5cad 100644
> --- a/arch/arm64/crypto/aes-neonbs-glue.c
> +++ b/arch/arm64/crypto/aes-neonbs-glue.c
> @@ -76,19 +76,25 @@ static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
>  			unsigned int key_len)
>  {
>  	struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
> -	struct crypto_aes_ctx rk;
> +	struct crypto_aes_ctx *rk;
>  	int err;
>  
> -	err = aes_expandkey(&rk, in_key, key_len);
> +	rk = kmalloc(sizeof(*rk), GFP_KERNEL);
> +	if (!rk)
> +		return -ENOMEM;
> +
> +	err = aes_expandkey(rk, in_key, key_len);
>  	if (err)
> -		return err;
> +		goto out;
>  
>  	ctx->rounds = 6 + key_len / 4;
>  
>  	scoped_ksimd()
> -		aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds);
> -
> -	return 0;
> +		aesbs_convert_key(ctx->rk, rk->key_enc, ctx->rounds);
> +out:
> +	memzero_explicit(rk, sizeof(*rk));
> +	kfree(rk);
> +	return err;
>  }
>  
>  static int __ecb_crypt(struct skcipher_request *req,
> @@ -133,22 +139,27 @@ static int aesbs_cbc_ctr_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
>  			    unsigned int key_len)
>  {
>  	struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
> -	struct crypto_aes_ctx rk;
> +	struct crypto_aes_ctx *rk;
>  	int err;
>  
> -	err = aes_expandkey(&rk, in_key, key_len);
> +	rk = kmalloc(sizeof(*rk), GFP_KERNEL);
> +	if (!rk)
> +		return -ENOMEM;
> +
> +	err = aes_expandkey(rk, in_key, key_len);
>  	if (err)
> -		return err;
> +		goto out;
>  
>  	ctx->key.rounds = 6 + key_len / 4;
>  
> -	memcpy(ctx->enc, rk.key_enc, sizeof(ctx->enc));
> +	memcpy(ctx->enc, rk->key_enc, sizeof(ctx->enc));
>  
>  	scoped_ksimd()
> -		aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
> -	memzero_explicit(&rk, sizeof(rk));
> -
> -	return 0;
> +		aesbs_convert_key(ctx->key.rk, rk->key_enc, ctx->key.rounds);
> +out:
> +	memzero_explicit(rk, sizeof(*rk));
> +	kfree(rk);
> +	return err;
>  }

Instead of memzero_explicit() followed by kfree(), just use
kfree_sensitive().

Also, single patches should not have a cover letter.  Just send a single
patch email with all the details in the patch itself.

As for the actual change, I guess it's okay for now.  Ideally we'd
refactor the aes-bs key preparation to not need temporary space.

- Eric



More information about the linux-arm-kernel mailing list