[PATCHv3 5/7] blk-mq-dma: move common dma start code to a helper
Keith Busch
kbusch at meta.com
Tue Jul 29 07:34:40 PDT 2025
From: Keith Busch <kbusch at kernel.org>
In preparing for dma mapping integrity metadata, move the common dma
setup to a helper.
Signed-off-by: Keith Busch <kbusch at kernel.org>
---
block/blk-mq-dma.c | 69 +++++++++++++++++++++++++---------------------
1 file changed, 38 insertions(+), 31 deletions(-)
diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c
index 87c9a7bfa090d..646caa00a0485 100644
--- a/block/blk-mq-dma.c
+++ b/block/blk-mq-dma.c
@@ -113,44 +113,16 @@ static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_dev,
return true;
}
-/**
- * blk_rq_dma_map_iter_start - map the first DMA segment for a request
- * @req: request to map
- * @dma_dev: device to map to
- * @state: DMA IOVA state
- * @iter: block layer DMA iterator
- *
- * Start DMA mapping @req to @dma_dev. @state and @iter are provided by the
- * caller and don't need to be initialized. @state needs to be stored for use
- * at unmap time, @iter is only needed at map time.
- *
- * Returns %false if there is no segment to map, including due to an error, or
- * %true ft it did map a segment.
- *
- * If a segment was mapped, the DMA address for it is returned in @iter.addr and
- * the length in @iter.len. If no segment was mapped the status code is
- * returned in @iter.status.
- *
- * The caller can call blk_rq_dma_map_coalesce() to check if further segments
- * need to be mapped after this, or go straight to blk_rq_dma_map_iter_next()
- * to try to map the following segments.
- */
-bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
- struct dma_iova_state *state, struct blk_dma_iter *iter)
+static bool blk_dma_map_iter_start(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, struct blk_dma_iter *iter,
+ unsigned int total_len)
{
- unsigned int total_len = blk_rq_payload_bytes(req);
struct blk_map_iter *map_iter = &iter->iter;
map_iter->bio = req->bio;
- map_iter->iter = req->bio->bi_iter;
memset(&iter->p2pdma, 0, sizeof(iter->p2pdma));
iter->status = BLK_STS_OK;
- if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
- iter->iter.bvec = &req->special_vec;
- else
- iter->iter.bvec = req->bio->bi_io_vec;
-
/*
* Grab the first segment ASAP because we'll need it to check for P2P
* transfers.
@@ -179,6 +151,41 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
return blk_rq_dma_map_iova(req, dma_dev, state, iter);
return blk_dma_map_direct(req, dma_dev, iter);
}
+
+/**
+ * blk_rq_dma_map_iter_start - map the first DMA segment for a request
+ * @req: request to map
+ * @dma_dev: device to map to
+ * @state: DMA IOVA state
+ * @iter: block layer DMA iterator
+ *
+ * Start DMA mapping @req to @dma_dev. @state and @iter are provided by the
+ * caller and don't need to be initialized. @state needs to be stored for use
+ * at unmap time, @iter is only needed at map time.
+ *
+ * Returns %false if there is no segment to map, including due to an error, or
+ * %true ft it did map a segment.
+ *
+ * If a segment was mapped, the DMA address for it is returned in @iter.addr and
+ * the length in @iter.len. If no segment was mapped the status code is
+ * returned in @iter.status.
+ *
+ * The caller can call blk_rq_dma_map_coalesce() to check if further segments
+ * need to be mapped after this, or go straight to blk_rq_dma_map_iter_next()
+ * to try to map the following segments.
+ */
+bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
+ struct dma_iova_state *state, struct blk_dma_iter *iter)
+{
+ iter->iter.iter = req->bio->bi_iter;
+ if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
+ iter->iter.bvec = &req->special_vec;
+ else
+ iter->iter.bvec = req->bio->bi_io_vec;
+
+ return blk_dma_map_iter_start(req, dma_dev, state, iter,
+ blk_rq_payload_bytes(req));
+}
EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start);
/**
--
2.47.3
More information about the Linux-nvme
mailing list