[PATCH 10/15] crypto: zynqmp-aes-gcm: Change coherent DMA to streaming DMA API
Harsh Jain
h.jain at amd.com
Wed Oct 29 03:21:53 PDT 2025
Update the driver to use streaming DMA API.
Signed-off-by: Harsh Jain <h.jain at amd.com>
---
drivers/crypto/xilinx/zynqmp-aes-gcm.c | 92 ++++++++++++++++----------
1 file changed, 57 insertions(+), 35 deletions(-)
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index bc12340487be..e3e7aef87571 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -65,8 +65,8 @@ struct zynqmp_aead_hw_req {
struct zynqmp_aead_tfm_ctx {
struct device *dev;
- u8 key[ZYNQMP_AES_KEY_SIZE];
- u8 *iv;
+ dma_addr_t key_dma_addr;
+ u8 *key;
u32 keylen;
u32 authsize;
u8 keysrc;
@@ -82,39 +82,38 @@ static int zynqmp_aes_aead_cipher(struct aead_request *req)
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct zynqmp_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead);
struct zynqmp_aead_req_ctx *rq_ctx = aead_request_ctx(req);
+ dma_addr_t dma_addr_data, dma_addr_hw_req;
struct device *dev = tfm_ctx->dev;
struct zynqmp_aead_hw_req *hwreq;
- dma_addr_t dma_addr_data, dma_addr_hw_req;
unsigned int data_size;
unsigned int status;
int ret;
size_t dma_size;
+ void *dmabuf;
char *kbuf;
- if (tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY)
- dma_size = req->cryptlen + ZYNQMP_AES_KEY_SIZE
- + GCM_AES_IV_SIZE;
- else
- dma_size = req->cryptlen + GCM_AES_IV_SIZE;
-
- kbuf = dma_alloc_coherent(dev, dma_size, &dma_addr_data, GFP_KERNEL);
+ dma_size = req->cryptlen + ZYNQMP_AES_AUTH_SIZE;
+ kbuf = kmalloc(dma_size, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
- hwreq = dma_alloc_coherent(dev, sizeof(struct zynqmp_aead_hw_req),
- &dma_addr_hw_req, GFP_KERNEL);
- if (!hwreq) {
- dma_free_coherent(dev, dma_size, kbuf, dma_addr_data);
+ dmabuf = kmalloc(sizeof(*hwreq) + GCM_AES_IV_SIZE, GFP_KERNEL);
+ if (!dmabuf) {
+ kfree(kbuf);
return -ENOMEM;
}
-
+ hwreq = dmabuf;
data_size = req->cryptlen;
scatterwalk_map_and_copy(kbuf, req->src, 0, req->cryptlen, 0);
- memcpy(kbuf + data_size, req->iv, GCM_AES_IV_SIZE);
+ memcpy(dmabuf + sizeof(struct zynqmp_aead_hw_req), req->iv, GCM_AES_IV_SIZE);
+ dma_addr_data = dma_map_single(dev, kbuf, dma_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(dev, dma_addr_data))) {
+ ret = -ENOMEM;
+ goto freemem;
+ }
hwreq->src = dma_addr_data;
hwreq->dst = dma_addr_data;
- hwreq->iv = hwreq->src + data_size;
hwreq->keysrc = tfm_ctx->keysrc;
hwreq->op = rq_ctx->op;
@@ -123,17 +122,26 @@ static int zynqmp_aes_aead_cipher(struct aead_request *req)
else
hwreq->size = data_size - ZYNQMP_AES_AUTH_SIZE;
- if (hwreq->keysrc == ZYNQMP_AES_KUP_KEY) {
- memcpy(kbuf + data_size + GCM_AES_IV_SIZE,
- tfm_ctx->key, ZYNQMP_AES_KEY_SIZE);
-
- hwreq->key = hwreq->src + data_size + GCM_AES_IV_SIZE;
- } else {
+ if (hwreq->keysrc == ZYNQMP_AES_KUP_KEY)
+ hwreq->key = tfm_ctx->key_dma_addr;
+ else
hwreq->key = 0;
- }
+ dma_addr_hw_req = dma_map_single(dev, dmabuf, sizeof(struct zynqmp_aead_hw_req) +
+ GCM_AES_IV_SIZE,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma_addr_hw_req))) {
+ ret = -ENOMEM;
+ dma_unmap_single(dev, dma_addr_data, dma_size, DMA_BIDIRECTIONAL);
+ goto freemem;
+ }
+ hwreq->iv = dma_addr_hw_req + sizeof(struct zynqmp_aead_hw_req);
+ dma_sync_single_for_device(dev, dma_addr_hw_req, sizeof(struct zynqmp_aead_hw_req) +
+ GCM_AES_IV_SIZE, DMA_TO_DEVICE);
ret = zynqmp_pm_aes_engine(dma_addr_hw_req, &status);
-
+ dma_unmap_single(dev, dma_addr_hw_req, sizeof(struct zynqmp_aead_hw_req) + GCM_AES_IV_SIZE,
+ DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma_addr_data, dma_size, DMA_BIDIRECTIONAL);
if (ret) {
dev_err(dev, "ERROR: AES PM API failed\n");
} else if (status) {
@@ -164,15 +172,11 @@ static int zynqmp_aes_aead_cipher(struct aead_request *req)
ret = 0;
}
- if (kbuf) {
- memzero_explicit(kbuf, dma_size);
- dma_free_coherent(dev, dma_size, kbuf, dma_addr_data);
- }
- if (hwreq) {
- memzero_explicit(hwreq, sizeof(struct zynqmp_aead_hw_req));
- dma_free_coherent(dev, sizeof(struct zynqmp_aead_hw_req),
- hwreq, dma_addr_hw_req);
- }
+freemem:
+ memzero_explicit(kbuf, dma_size);
+ kfree(kbuf);
+ memzero_explicit(dmabuf, sizeof(struct zynqmp_aead_hw_req) + GCM_AES_IV_SIZE);
+ kfree(dmabuf);
return ret;
}
@@ -243,6 +247,9 @@ static int zynqmp_aes_aead_setkey(struct crypto_aead *aead, const u8 *key,
if (keylen == ZYNQMP_AES_KEY_SIZE && tfm_ctx->keysrc == ZYNQMP_AES_KUP_KEY) {
tfm_ctx->keylen = keylen;
memcpy(tfm_ctx->key, key, keylen);
+ dma_sync_single_for_device(tfm_ctx->dev, tfm_ctx->key_dma_addr,
+ ZYNQMP_AES_KEY_SIZE,
+ DMA_TO_DEVICE);
} else if (tfm_ctx->keysrc != ZYNQMP_AES_KUP_KEY) {
return -EINVAL;
}
@@ -359,7 +366,20 @@ static int zynqmp_aes_aead_init(struct crypto_aead *aead)
__func__, drv_ctx->aead.base.base.cra_name);
return PTR_ERR(tfm_ctx->fbk_cipher);
}
-
+ tfm_ctx->key = kmalloc(ZYNQMP_AES_KEY_SIZE, GFP_KERNEL);
+ if (!tfm_ctx->key) {
+ crypto_free_aead(tfm_ctx->fbk_cipher);
+ return -ENOMEM;
+ }
+ tfm_ctx->key_dma_addr = dma_map_single(tfm_ctx->dev, tfm_ctx->key,
+ ZYNQMP_AES_KEY_SIZE,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tfm_ctx->dev, tfm_ctx->key_dma_addr))) {
+ kfree(tfm_ctx->key);
+ crypto_free_aead(tfm_ctx->fbk_cipher);
+ tfm_ctx->fbk_cipher = NULL;
+ return -ENOMEM;
+ }
crypto_aead_set_reqsize(aead,
max(sizeof(struct zynqmp_aead_req_ctx),
sizeof(struct aead_request) +
@@ -373,6 +393,8 @@ static void zynqmp_aes_aead_exit(struct crypto_aead *aead)
struct zynqmp_aead_tfm_ctx *tfm_ctx =
(struct zynqmp_aead_tfm_ctx *)crypto_tfm_ctx(tfm);
+ dma_unmap_single(tfm_ctx->dev, tfm_ctx->key_dma_addr, ZYNQMP_AES_KEY_SIZE, DMA_TO_DEVICE);
+ kfree(tfm_ctx->key);
if (tfm_ctx->fbk_cipher) {
crypto_free_aead(tfm_ctx->fbk_cipher);
tfm_ctx->fbk_cipher = NULL;
--
2.49.1
More information about the linux-arm-kernel
mailing list