[PATCH 1/2] blk-integrity: add scatter-less DMA mapping helpers

Keith Busch kbusch at meta.com
Wed Jun 25 13:44:44 PDT 2025


From: Keith Busch <kbusch at kernel.org>

This is much like the scatter-less DMA helpers for request data, but for
integrity metadata instead. This one only subscribes to the direct
mapping as the virt boundary queue limit used to check for iova
coalescing possibilities doesn't apply to metadata.

Signed-off-by: Keith Busch <kbusch at kernel.org>
---
 block/blk-integrity.c         | 94 +++++++++++++++++++++++++++++++++++
 block/blk-mq-dma.c            |  9 +---
 block/blk.h                   | 10 ++++
 include/linux/blk-integrity.h |  6 +++
 4 files changed, 112 insertions(+), 7 deletions(-)

diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index e4e2567061f9d..e79df07d1151a 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -112,6 +112,100 @@ int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
 }
 EXPORT_SYMBOL(blk_rq_map_integrity_sg);
 
+static void bio_integrity_advance_iter_single(struct bio *bio,
+						struct bvec_iter *iter,
+						struct bio_vec *bvec,
+						unsigned int bytes)
+{
+	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
+
+	iter->bi_sector += bytes / bi->tuple_size;
+	bvec_iter_advance(bvec, iter, bytes);
+}
+
+static bool blk_rq_integrity_map_iter_next(struct request *req,
+		struct req_iterator *iter, struct phys_vec *vec)
+{
+	struct bio_integrity_payload *bip = bio_integrity(iter->bio);
+	unsigned int max_size;
+	struct bio_vec bv;
+
+	if (!iter->iter.bi_size)
+		return false;
+
+	bv = mp_bvec_iter_bvec(bip->bip_vec, iter->iter);
+	vec->paddr = bvec_phys(&bv);
+	max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX);
+	bv.bv_len = min(bv.bv_len, max_size);
+
+	bio_integrity_advance_iter_single(iter->bio, &iter->iter, &bv, bv.bv_len);
+	while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) {
+		struct bio_vec next;
+
+		if (!iter->iter.bi_size) {
+			if (!iter->bio->bi_next)
+				break;
+			iter->bio = iter->bio->bi_next;
+			iter->iter = iter->bio->bi_iter;
+		}
+
+		next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
+		if (bv.bv_len + next.bv_len > max_size ||
+		    !biovec_phys_mergeable(req->q, &bv, &next))
+			break;
+
+		bv.bv_len += next.bv_len;
+		bio_integrity_advance_iter_single(iter->bio, &iter->iter, &bv,
+							next.bv_len);
+	}
+
+	vec->len = bv.bv_len;
+	return true;
+}
+
+bool blk_rq_integrity_dma_map_iter_start(struct request *req,
+		struct device *dma_dev, struct blk_dma_iter *iter)
+{
+	struct bio_integrity_payload *bip = bio_integrity(req->bio);
+	struct phys_vec vec;
+
+	iter->iter.bio = req->bio;
+	iter->iter.iter = bip->bip_iter;
+	memset(&iter->p2pdma, 0, sizeof(iter->p2pdma));
+	iter->status = BLK_STS_OK;
+
+	if (!blk_rq_integrity_map_iter_next(req, &iter->iter, &vec))
+		return false;
+
+	switch (pci_p2pdma_state(&iter->p2pdma, dma_dev,
+				phys_to_page(vec.paddr))) {
+	case PCI_P2PDMA_MAP_BUS_ADDR:
+		return blk_dma_map_bus(iter, &vec);
+	case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+	case PCI_P2PDMA_MAP_NONE:
+		break;
+	default:
+		iter->status = BLK_STS_INVAL;
+		return false;
+	}
+
+	return blk_dma_map_direct(req, dma_dev, iter, &vec);
+}
+EXPORT_SYMBOL_GPL(blk_rq_integrity_map_iter_start);
+
+bool blk_rq_integrity_dma_map_iter_next(struct request *req,
+		struct device *dma_dev, struct blk_dma_iter *iter)
+{
+	struct phys_vec vec;
+
+	if (!blk_rq_integrity_map_iter_next(req, &iter->iter, &vec))
+		return false;
+	if (iter->p2pdma.map == PCI_P2PDMA_MAP_BUS_ADDR)
+		return blk_dma_map_bus(iter, &vec);
+	return blk_dma_map_direct(req, dma_dev, iter, &vec);
+}
+EXPORT_SYMBOL_GPL(blk_rq_integrity_dma_map_iter_next);
+
 int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
 			      ssize_t bytes)
 {
diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c
index ad283017caef2..54c25e5e60d78 100644
--- a/block/blk-mq-dma.c
+++ b/block/blk-mq-dma.c
@@ -5,11 +5,6 @@
 #include <linux/blk-mq-dma.h>
 #include "blk.h"
 
-struct phys_vec {
-	phys_addr_t	paddr;
-	u32		len;
-};
-
 static bool blk_map_iter_next(struct request *req, struct req_iterator *iter,
 			      struct phys_vec *vec)
 {
@@ -77,14 +72,14 @@ static inline bool blk_can_dma_map_iova(struct request *req,
 		dma_get_merge_boundary(dma_dev));
 }
 
-static bool blk_dma_map_bus(struct blk_dma_iter *iter, struct phys_vec *vec)
+bool blk_dma_map_bus(struct blk_dma_iter *iter, struct phys_vec *vec)
 {
 	iter->addr = pci_p2pdma_bus_addr_map(&iter->p2pdma, vec->paddr);
 	iter->len = vec->len;
 	return true;
 }
 
-static bool blk_dma_map_direct(struct request *req, struct device *dma_dev,
+bool blk_dma_map_direct(struct request *req, struct device *dma_dev,
 		struct blk_dma_iter *iter, struct phys_vec *vec)
 {
 	iter->addr = dma_map_page(dma_dev, phys_to_page(vec->paddr),
diff --git a/block/blk.h b/block/blk.h
index 1141b343d0b5c..755975ddc3046 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -4,6 +4,7 @@
 
 #include <linux/bio-integrity.h>
 #include <linux/blk-crypto.h>
+#include <linux/blk-mq-dma.h>
 #include <linux/lockdep.h>
 #include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
 #include <linux/sched/sysctl.h>
@@ -727,6 +728,15 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
 	      const struct blk_holder_ops *hops, struct file *bdev_file);
 int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
 
+struct phys_vec {
+	phys_addr_t	paddr;
+	u32		len;
+};
+
+bool blk_dma_map_bus(struct blk_dma_iter *iter, struct phys_vec *vec);
+bool blk_dma_map_direct(struct request *req, struct device *dma_dev,
+		struct blk_dma_iter *iter, struct phys_vec *vec);
+
 void blk_integrity_generate(struct bio *bio);
 void blk_integrity_verify_iter(struct bio *bio, struct bvec_iter *saved_iter);
 void blk_integrity_prepare(struct request *rq);
diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h
index c7eae0bfb013f..8e2aeb5c13864 100644
--- a/include/linux/blk-integrity.h
+++ b/include/linux/blk-integrity.h
@@ -4,6 +4,7 @@
 
 #include <linux/blk-mq.h>
 #include <linux/bio-integrity.h>
+#include <linux/blk-mq-dma.h>
 
 struct request;
 
@@ -30,6 +31,11 @@ int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
 int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
 			      ssize_t bytes);
 
+bool blk_rq_integrity_dma_map_iter_start(struct request *req,
+		struct device *dma_dev, struct blk_dma_iter *iter);
+bool blk_rq_integrity_dma_map_iter_next(struct request *req,
+		struct device *dma_dev, struct blk_dma_iter *iter);
+
 static inline bool
 blk_integrity_queue_supports_integrity(struct request_queue *q)
 {
-- 
2.47.1




More information about the Linux-nvme mailing list