[PATCH v4 2/9] dmaengine: Add safe API to combine configuration and preparation
Frank Li
Frank.Li at nxp.com
Wed May 6 13:44:14 PDT 2026
Introduce dmaengine_prep_config_single_safe() and
dmaengine_prep_config_sg_safe() to provide a reentrant-safe way to
combine slave configuration and transfer preparation.
Drivers may implement the new device_prep_config_sg() callback to perform
both steps atomically. If the callback is not provided, the helpers fall
back to calling dmaengine_slave_config() followed by
dmaengine_prep_slave_sg() under per-channel mutex protection.
Tested-by: Niklas Cassel <cassel at kernel.org>
Signed-off-by: Frank Li <Frank.Li at nxp.com>
---
chagne in v4
- use spinlock() to protect config() and prep()
change in v3
- new patch
---
drivers/dma/dmaengine.c | 2 ++
include/linux/dmaengine.h | 58 +++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 60 insertions(+)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 405bd2fbb4a3b94fd0bf44526f656f6a19feaad0..ba29e60160c1a0148793bb299849bccfebb6d32b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1099,6 +1099,8 @@ static int __dma_async_device_channel_register(struct dma_device *device,
chan->dev->device.parent = device->dev;
chan->dev->chan = chan;
chan->dev->dev_id = device->dev_id;
+ spin_lock_init(&chan->lock);
+
if (!name)
dev_set_name(&chan->dev->device, "dma%dchan%d", device->dev_id, chan->chan_id);
else
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index defa377d2ef54d94e6337cdfa7826a091295535e..23728f3d60804e49cd4cbbd3a513c4936eed5836 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -322,6 +322,8 @@ struct dma_router {
* @slave: ptr to the device using this channel
* @cookie: last cookie value returned to client
* @completed_cookie: last completed cookie for this channel
+ * @lock: protect between config and prepare transfer when driver have not
+ * implemented callback device_prep_config_sg().
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
* @name: backlink name for sysfs
@@ -341,6 +343,12 @@ struct dma_chan {
dma_cookie_t cookie;
dma_cookie_t completed_cookie;
+ /*
+ * protect between config and prepare transfer because *_prep() may be
+ * called from complete callback, which is in GFP_NOSLEEP context.
+ */
+ spinlock_t lock; /* protect between config and prepare transfer since */
+
/* sysfs */
int chan_id;
struct dma_chan_dev *dev;
@@ -1068,6 +1076,56 @@ dmaengine_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return dmaengine_prep_config_sg(chan, sgl, sg_len, dir, flags, NULL);
}
+/*
+ * dmaengine_prep_config_single(sg)_safe() is re-entrant version.
+ *
+ * The unsafe variant (without the _safe suffix) falls back to calling
+ * dmaengine_slave_config() and dmaengine_prep_slave_sg() separately.
+ * In this case, additional locking may be required, depending on the
+ * DMA consumer's usage.
+ *
+ * If dmaengine driver have not implemented call back device_prep_config_sg()
+ * safe version use per-channel spinlock to protect call dmaengine_slave_config()
+ * and dmaengine_prep_slave_sg().
+ */
+static inline struct dma_async_tx_descriptor *
+dmaengine_prep_config_sg_safe(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction dir,
+ unsigned long flags,
+ struct dma_slave_config *config)
+{
+ struct dma_async_tx_descriptor *tx;
+
+ if (!chan || !chan->device)
+ return NULL;
+
+ if (!chan->device->device_prep_config_sg)
+ spin_lock(&chan->lock);
+
+ tx = dmaengine_prep_config_sg(chan, sgl, sg_len, dir, flags, config);
+
+ if (!chan->device->device_prep_config_sg)
+ spin_unlock(&chan->lock);
+
+ return tx;
+}
+
+static inline struct dma_async_tx_descriptor *
+dmaengine_prep_config_single_safe(struct dma_chan *chan, dma_addr_t buf,
+ size_t len, enum dma_transfer_direction dir,
+ unsigned long flags,
+ struct dma_slave_config *config)
+{
+ struct scatterlist sg;
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = buf;
+ sg_dma_len(&sg) = len;
+
+ return dmaengine_prep_config_sg_safe(chan, &sg, 1, dir, flags, config);
+}
+
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct rio_dma_ext;
static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
--
2.43.0
More information about the Linux-nvme
mailing list