[PATCH 15/15] crypto: xilinx: Add gcm(aes) support for AMD/Xilinx Versal device

Harsh Jain h.jain at amd.com
Wed Oct 29 03:21:58 PDT 2025


Add gcm(aes) algorithm support for AMD/Xilinx Versal devices.

Signed-off-by: Harsh Jain <h.jain at amd.com>
---
 drivers/crypto/xilinx/zynqmp-aes-gcm.c | 380 ++++++++++++++++++++++++-
 1 file changed, 377 insertions(+), 3 deletions(-)

diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index 834852a042dd..da2c59cb74e2 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
  * Xilinx ZynqMP AES Driver.
- * Copyright (c) 2020 Xilinx Inc.
+ * Copyright (C) 2020-2022 Xilinx Inc.
+ * Copyright (C) 2022-2025 Advanced Micro Devices, Inc.
  */
 
 #include <crypto/aes.h>
@@ -19,11 +20,13 @@
 #include <linux/string.h>
 
 #define ZYNQMP_DMA_BIT_MASK	32U
+#define VERSAL_DMA_BIT_MASK		64U
 #define XILINX_AES_AUTH_SIZE		16U
 #define XILINX_AES_BLK_SIZE		1U
 #define ZYNQMP_AES_MIN_INPUT_BLK_SIZE	4U
 #define ZYNQMP_AES_WORD_LEN		4U
 
+#define VERSAL_AES_QWORD_LEN		16U
 #define ZYNQMP_AES_GCM_TAG_MISMATCH_ERR	0x01
 #define ZYNQMP_AES_WRONG_KEY_SRC_ERR	0x13
 #define ZYNQMP_AES_PUF_NOT_PROGRAMMED	0xE300
@@ -44,7 +47,9 @@ struct xilinx_aead_drv_ctx {
 	struct aead_engine_alg aead;
 	struct device *dev;
 	struct crypto_engine *engine;
+	u8 keysrc;
 	u8 dma_bit_mask;
+	int (*aes_aead_cipher)(struct aead_request *areq);
 };
 
 struct xilinx_hwkey_info {
@@ -76,6 +81,54 @@ struct zynqmp_aead_req_ctx {
 	enum xilinx_aead_op op;
 };
 
+enum versal_aead_keysrc {
+	VERSAL_AES_BBRAM_KEY = 0,
+	VERSAL_AES_BBRAM_RED_KEY,
+	VERSAL_AES_BH_KEY,
+	VERSAL_AES_BH_RED_KEY,
+	VERSAL_AES_EFUSE_KEY,
+	VERSAL_AES_EFUSE_RED_KEY,
+	VERSAL_AES_EFUSE_USER_KEY_0,
+	VERSAL_AES_EFUSE_USER_KEY_1,
+	VERSAL_AES_EFUSE_USER_RED_KEY_0,
+	VERSAL_AES_EFUSE_USER_RED_KEY_1,
+	VERSAL_AES_KUP_KEY,
+	VERSAL_AES_PUF_KEY,
+	VERSAL_AES_USER_KEY_0,
+	VERSAL_AES_USER_KEY_1,
+	VERSAL_AES_USER_KEY_2,
+	VERSAL_AES_USER_KEY_3,
+	VERSAL_AES_USER_KEY_4,
+	VERSAL_AES_USER_KEY_5,
+	VERSAL_AES_USER_KEY_6,
+	VERSAL_AES_USER_KEY_7,
+	VERSAL_AES_EXPANDED_KEYS,
+	VERSAL_AES_ALL_KEYS,
+};
+
+enum versal_aead_op {
+	VERSAL_AES_ENCRYPT = 0,
+	VERSAL_AES_DECRYPT
+};
+
+enum versal_aes_keysize {
+	HW_AES_KEY_SIZE_128 = 0,
+	HW_AES_KEY_SIZE_256 = 2,
+};
+
+struct versal_init_ops {
+	u64 iv;
+	u32 op;
+	u32 keysrc;
+	u32 size;
+};
+
+struct versal_in_params {
+	u64 in_data_addr;
+	u32 size;
+	u32 is_last;
+};
+
 static int zynqmp_aes_aead_cipher(struct aead_request *req)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
@@ -180,6 +233,141 @@ static int zynqmp_aes_aead_cipher(struct aead_request *req)
 	return ret;
 }
 
+static int versal_aes_aead_cipher(struct aead_request *req)
+{
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+	struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+	dma_addr_t dma_addr_data, dma_addr_hw_req, dma_addr_in;
+	u32 total_len = req->assoclen + req->cryptlen;
+	struct device *dev = tfm_ctx->dev;
+	struct versal_init_ops *hwreq;
+	struct versal_in_params *in;
+	u32 gcm_offset, out_len;
+	size_t dmabuf_size;
+	size_t kbuf_size;
+	void *dmabuf;
+	char *kbuf;
+	int ret;
+
+	kbuf_size = total_len + XILINX_AES_AUTH_SIZE;
+	kbuf = kmalloc(kbuf_size, GFP_KERNEL);
+	if (unlikely(!kbuf)) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	dmabuf_size = sizeof(struct versal_init_ops) +
+		      sizeof(struct versal_in_params) +
+		      GCM_AES_IV_SIZE;
+	dmabuf = kmalloc(dmabuf_size, GFP_KERNEL);
+	if (unlikely(!dmabuf)) {
+		ret = -ENOMEM;
+		goto buf1_free;
+	}
+
+	dma_addr_hw_req = dma_map_single(dev, dmabuf, dmabuf_size, DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(dev, dma_addr_hw_req))) {
+		ret = -ENOMEM;
+		goto buf2_free;
+	}
+	scatterwalk_map_and_copy(kbuf, req->src, 0, total_len, 0);
+	dma_addr_data = dma_map_single(dev, kbuf, kbuf_size, DMA_BIDIRECTIONAL);
+	if (unlikely(dma_mapping_error(dev, dma_addr_data))) {
+		dma_unmap_single(dev, dma_addr_hw_req, dmabuf_size, DMA_BIDIRECTIONAL);
+		ret = -ENOMEM;
+		goto buf2_free;
+	}
+	hwreq = dmabuf;
+	in = dmabuf + sizeof(struct versal_init_ops);
+	memcpy(dmabuf + sizeof(struct versal_init_ops) +
+	       sizeof(struct versal_in_params), req->iv, GCM_AES_IV_SIZE);
+	hwreq->iv = dma_addr_hw_req + sizeof(struct versal_init_ops) +
+		    sizeof(struct versal_in_params);
+	hwreq->keysrc = tfm_ctx->keysrc;
+	dma_addr_in = dma_addr_hw_req + sizeof(struct versal_init_ops);
+	if (rq_ctx->op == XILINX_AES_ENCRYPT) {
+		hwreq->op = VERSAL_AES_ENCRYPT;
+		out_len = total_len + crypto_aead_authsize(aead);
+		in->size = req->cryptlen;
+	} else {
+		hwreq->op = VERSAL_AES_DECRYPT;
+		out_len = total_len - XILINX_AES_AUTH_SIZE;
+		in->size = req->cryptlen - XILINX_AES_AUTH_SIZE;
+	}
+
+	if (tfm_ctx->keylen == AES_KEYSIZE_128)
+		hwreq->size = HW_AES_KEY_SIZE_128;
+	else
+		hwreq->size = HW_AES_KEY_SIZE_256;
+
+	/* Request aes key write for volatile user keys */
+	if (hwreq->keysrc >= VERSAL_AES_USER_KEY_0 && hwreq->keysrc <= VERSAL_AES_USER_KEY_7) {
+		ret = versal_pm_aes_key_write(hwreq->size, hwreq->keysrc,
+					      tfm_ctx->key_dma_addr);
+		if (ret)
+			goto unmap;
+	}
+
+	in->in_data_addr = dma_addr_data + req->assoclen;
+	in->is_last = 1;
+	gcm_offset = req->assoclen + in->size;
+	dma_sync_single_for_device(dev, dma_addr_hw_req, dmabuf_size, DMA_BIDIRECTIONAL);
+	ret = versal_pm_aes_op_init(dma_addr_hw_req);
+	if (ret)
+		goto clearkey;
+
+	if (req->assoclen > 0) {
+		/* Currently GMAC is OFF by default */
+		ret = versal_pm_aes_update_aad(dma_addr_data, req->assoclen);
+		if (ret)
+			goto clearkey;
+	}
+	if (rq_ctx->op == XILINX_AES_ENCRYPT) {
+		ret = versal_pm_aes_enc_update(dma_addr_in,
+					       dma_addr_data + req->assoclen);
+		if (ret)
+			goto clearkey;
+
+		ret = versal_pm_aes_enc_final(dma_addr_data + gcm_offset);
+		if (ret)
+			goto clearkey;
+	} else {
+		ret = versal_pm_aes_dec_update(dma_addr_in,
+					       dma_addr_data + req->assoclen);
+		if (ret)
+			goto clearkey;
+
+		ret = versal_pm_aes_dec_final(dma_addr_data + gcm_offset);
+		if (ret) {
+			ret = -EBADMSG;
+			goto clearkey;
+		}
+	}
+	dma_unmap_single(dev, dma_addr_data, kbuf_size, DMA_BIDIRECTIONAL);
+	dma_unmap_single(dev, dma_addr_hw_req, dmabuf_size, DMA_BIDIRECTIONAL);
+	sg_copy_from_buffer(req->dst, sg_nents(req->dst),
+			    kbuf, out_len);
+	dma_addr_data = 0;
+	dma_addr_hw_req = 0;
+
+clearkey:
+	if (hwreq->keysrc >= VERSAL_AES_USER_KEY_0 && hwreq->keysrc <= VERSAL_AES_USER_KEY_7)
+		versal_pm_aes_key_zero(hwreq->keysrc);
+unmap:
+	if (unlikely(dma_addr_data))
+		dma_unmap_single(dev, dma_addr_data, kbuf_size, DMA_BIDIRECTIONAL);
+	if (unlikely(dma_addr_hw_req))
+		dma_unmap_single(dev, dma_addr_hw_req, dmabuf_size, DMA_BIDIRECTIONAL);
+buf2_free:
+	memzero_explicit(dmabuf, dmabuf_size);
+	kfree(dmabuf);
+buf1_free:
+	memzero_explicit(kbuf, kbuf_size);
+	kfree(kbuf);
+err:
+	return ret;
+}
+
 static int zynqmp_fallback_check(struct xilinx_aead_tfm_ctx *tfm_ctx,
 				 struct aead_request *req)
 {
@@ -205,13 +393,40 @@ static int zynqmp_fallback_check(struct xilinx_aead_tfm_ctx *tfm_ctx,
 	return 0;
 }
 
+static int versal_fallback_check(struct xilinx_aead_tfm_ctx *tfm_ctx,
+				 struct aead_request *req)
+{
+	struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+
+	if (tfm_ctx->authsize != XILINX_AES_AUTH_SIZE && rq_ctx->op == XILINX_AES_DECRYPT)
+		return 1;
+
+	if (tfm_ctx->keylen == AES_KEYSIZE_192)
+		return 1;
+
+	if (req->cryptlen < ZYNQMP_AES_MIN_INPUT_BLK_SIZE ||
+	    req->cryptlen % ZYNQMP_AES_WORD_LEN ||
+	    req->assoclen % VERSAL_AES_QWORD_LEN)
+		return 1;
+
+	if (rq_ctx->op == XILINX_AES_DECRYPT &&
+	    req->cryptlen <= XILINX_AES_AUTH_SIZE)
+		return 1;
+
+	return 0;
+}
+
 static int xilinx_handle_aes_req(struct crypto_engine *engine, void *req)
 {
 	struct aead_request *areq =
 				container_of(req, struct aead_request, base);
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct xilinx_aead_drv_ctx *drv_ctx;
 	int err;
 
-	err = zynqmp_aes_aead_cipher(areq);
+	drv_ctx = container_of(alg, struct xilinx_aead_drv_ctx, aead.base);
+	err = drv_ctx->aes_aead_cipher(areq);
 	local_bh_disable();
 	crypto_finalize_aead_request(engine, areq, err);
 	local_bh_enable();
@@ -264,6 +479,50 @@ static int zynqmp_aes_aead_setkey(struct crypto_aead *aead, const u8 *key,
 	return err;
 }
 
+static int versal_aes_aead_setkey(struct crypto_aead *aead, const u8 *key,
+				  unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm);
+	struct xilinx_hwkey_info hwkey;
+	unsigned char keysrc;
+	int err;
+
+	if (keylen == sizeof(struct xilinx_hwkey_info)) {
+		memcpy(&hwkey, key, sizeof(struct xilinx_hwkey_info));
+		if (hwkey.magic != XILINX_KEY_MAGIC)
+			return -EINVAL;
+
+		keysrc = hwkey.type;
+		if ((keysrc >= VERSAL_AES_EFUSE_USER_KEY_0 &&
+		     keysrc  <= VERSAL_AES_USER_KEY_7) &&
+		     keysrc != VERSAL_AES_KUP_KEY) {
+			tfm_ctx->keysrc = keysrc;
+			tfm_ctx->keylen = sizeof(struct xilinx_hwkey_info);
+			return 0;
+		}
+		return -EINVAL;
+	}
+	if (tfm_ctx->keysrc < VERSAL_AES_USER_KEY_0 || tfm_ctx->keysrc > VERSAL_AES_USER_KEY_7)
+		return -EINVAL;
+	if (keylen == AES_KEYSIZE_256 || keylen == AES_KEYSIZE_128) {
+		tfm_ctx->keylen = keylen;
+		memcpy(tfm_ctx->key, key, keylen);
+		dma_sync_single_for_device(tfm_ctx->dev, tfm_ctx->key_dma_addr,
+					   AES_KEYSIZE_256,
+					   DMA_TO_DEVICE);
+	}
+
+	tfm_ctx->fbk_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+	tfm_ctx->fbk_cipher->base.crt_flags |= (aead->base.crt_flags &
+						CRYPTO_TFM_REQ_MASK);
+	err = crypto_aead_setkey(tfm_ctx->fbk_cipher, key, keylen);
+	if (!err)
+		tfm_ctx->keylen = keylen;
+
+	return err;
+}
+
 static int xilinx_aes_aead_setauthsize(struct crypto_aead *aead,
 				       unsigned int authsize)
 {
@@ -309,6 +568,43 @@ static int zynqmp_aes_aead_encrypt(struct aead_request *req)
 	return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
 }
 
+static int versal_aes_aead_encrypt(struct aead_request *req)
+{
+	struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+	struct aead_request *subreq = aead_request_ctx(req);
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct xilinx_aead_drv_ctx *drv_ctx;
+	int err;
+
+	drv_ctx = container_of(alg, struct xilinx_aead_drv_ctx, aead.base);
+	rq_ctx->op = XILINX_AES_ENCRYPT;
+	if (tfm_ctx->keysrc >= VERSAL_AES_USER_KEY_0 &&
+	    tfm_ctx->keysrc <= VERSAL_AES_USER_KEY_7 &&
+	    tfm_ctx->keylen == sizeof(struct xilinx_hwkey_info))
+		return -EINVAL;
+	err = versal_fallback_check(tfm_ctx, req);
+	if (err && (tfm_ctx->keysrc < VERSAL_AES_USER_KEY_0 ||
+		    tfm_ctx->keysrc > VERSAL_AES_USER_KEY_7))
+		return -EOPNOTSUPP;
+	if (err) {
+		aead_request_set_tfm(subreq, tfm_ctx->fbk_cipher);
+		aead_request_set_callback(subreq, req->base.flags,
+					  NULL, NULL);
+		aead_request_set_crypt(subreq, req->src, req->dst,
+				       req->cryptlen, req->iv);
+		aead_request_set_ad(subreq, req->assoclen);
+		if (rq_ctx->op == XILINX_AES_ENCRYPT)
+			err = crypto_aead_encrypt(subreq);
+		else
+			err = crypto_aead_decrypt(subreq);
+		return err;
+	}
+
+	return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
+}
+
 static int zynqmp_aes_aead_decrypt(struct aead_request *req)
 {
 	struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
@@ -342,6 +638,45 @@ static int zynqmp_aes_aead_decrypt(struct aead_request *req)
 	return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
 }
 
+static int versal_aes_aead_decrypt(struct aead_request *req)
+{
+	struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+	struct aead_request *subreq = aead_request_ctx(req);
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
+	struct aead_alg *alg = crypto_aead_alg(aead);
+	struct xilinx_aead_drv_ctx *drv_ctx;
+	int err;
+
+	drv_ctx = container_of(alg, struct xilinx_aead_drv_ctx, aead.base);
+	rq_ctx->op = XILINX_AES_DECRYPT;
+	if (tfm_ctx->keysrc >= VERSAL_AES_USER_KEY_0 &&
+	    tfm_ctx->keysrc <= VERSAL_AES_USER_KEY_7 &&
+	    tfm_ctx->keylen == sizeof(struct xilinx_hwkey_info))
+		return -EINVAL;
+
+	err = versal_fallback_check(tfm_ctx, req);
+	if (err &&
+	    (tfm_ctx->keysrc < VERSAL_AES_USER_KEY_0 ||
+	    tfm_ctx->keysrc > VERSAL_AES_USER_KEY_7))
+		return -EOPNOTSUPP;
+	if (err) {
+		aead_request_set_tfm(subreq, tfm_ctx->fbk_cipher);
+		aead_request_set_callback(subreq, req->base.flags,
+					  NULL, NULL);
+		aead_request_set_crypt(subreq, req->src, req->dst,
+				       req->cryptlen, req->iv);
+		aead_request_set_ad(subreq, req->assoclen);
+		if (rq_ctx->op == XILINX_AES_ENCRYPT)
+			err = crypto_aead_encrypt(subreq);
+		else
+			err = crypto_aead_decrypt(subreq);
+		return err;
+	}
+
+	return crypto_transfer_aead_request_to_engine(drv_ctx->engine, req);
+}
+
 static int xilinx_aes_aead_init(struct crypto_aead *aead)
 {
 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
@@ -352,7 +687,7 @@ static int xilinx_aes_aead_init(struct crypto_aead *aead)
 	drv_ctx = container_of(alg, struct xilinx_aead_drv_ctx, aead.base);
 	tfm_ctx->dev = drv_ctx->dev;
 	tfm_ctx->keylen = 0;
-	tfm_ctx->keysrc = ZYNQMP_AES_KUP_KEY;
+	tfm_ctx->keysrc = drv_ctx->keysrc;
 
 	tfm_ctx->fbk_cipher = crypto_alloc_aead(drv_ctx->aead.base.base.cra_name,
 						0,
@@ -399,6 +734,8 @@ static void xilinx_aes_aead_exit(struct crypto_aead *aead)
 }
 
 static struct xilinx_aead_drv_ctx zynqmp_aes_drv_ctx = {
+	.aes_aead_cipher = zynqmp_aes_aead_cipher,
+	.keysrc = ZYNQMP_AES_KUP_KEY,
 	.aead.base = {
 		.setkey		= zynqmp_aes_aead_setkey,
 		.setauthsize	= xilinx_aes_aead_setauthsize,
@@ -428,12 +765,49 @@ static struct xilinx_aead_drv_ctx zynqmp_aes_drv_ctx = {
 	.dma_bit_mask = ZYNQMP_DMA_BIT_MASK,
 };
 
+static struct xilinx_aead_drv_ctx versal_aes_drv_ctx = {
+	.aes_aead_cipher	= versal_aes_aead_cipher,
+	.keysrc = VERSAL_AES_USER_KEY_0,
+	.aead.base = {
+		.setkey		= versal_aes_aead_setkey,
+		.setauthsize	= xilinx_aes_aead_setauthsize,
+		.encrypt	= versal_aes_aead_encrypt,
+		.decrypt	= versal_aes_aead_decrypt,
+		.init		= xilinx_aes_aead_init,
+		.exit		= xilinx_aes_aead_exit,
+		.ivsize		= GCM_AES_IV_SIZE,
+		.maxauthsize	= XILINX_AES_AUTH_SIZE,
+		.base = {
+		.cra_name		= "gcm(aes)",
+		.cra_driver_name	= "versal-aes-gcm",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_AEAD |
+					  CRYPTO_ALG_ASYNC |
+					  CRYPTO_ALG_ALLOCATES_MEMORY |
+					  CRYPTO_ALG_KERN_DRIVER_ONLY |
+					  CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= XILINX_AES_BLK_SIZE,
+		.cra_ctxsize		= sizeof(struct xilinx_aead_tfm_ctx),
+		.cra_module		= THIS_MODULE,
+		}
+	},
+	.aead.op = {
+		.do_one_request = xilinx_handle_aes_req,
+	},
+	.dma_bit_mask = VERSAL_DMA_BIT_MASK,
+};
+
 static struct xlnx_feature aes_feature_map[] = {
 	{
 		.family = PM_ZYNQMP_FAMILY_CODE,
 		.feature_id = PM_SECURE_AES,
 		.data = &zynqmp_aes_drv_ctx,
 	},
+	{
+		.family = PM_VERSAL_FAMILY_CODE,
+		.feature_id = XSECURE_API_AES_OP_INIT,
+		.data = &versal_aes_drv_ctx,
+	},
 	{ /* sentinel */ }
 };
 
-- 
2.49.1




More information about the linux-arm-kernel mailing list