[PATCH 1/2] blk-integrity: add scatter-less DMA mapping helpers

Christoph Hellwig hch at lst.de
Wed Jun 25 22:02:02 PDT 2025


On Wed, Jun 25, 2025 at 03:17:32PM -0600, Keith Busch wrote:
> > +		next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
> 
> And then this should be:
> 
> 		next = mp_bvec_iter_bvec(bip->bip_vec, iter->iter);
> 
> Obviously I didn't test merging bio's into plugged requests...

So, testing was the main reason I have skipped this in the initial
conversion, as I could not come up with good coverage of multi-segment
integrity metadata.  Anuj promised we'd get good test coverage once the
PI query ioctl lands, so the plan was to do it just after that.

I had written some code before I realized that, but it never really got
finished, but one thing I did was to trying figure out how we implement
the iterator without too much code duplication.  The below is what I came
up with - it adds two branches to the fast path, but otherwise shares the
entire dma iterator and I think also makes it very clear which bio_vec
table to use.  Maybe this is useful for the next version?  My next step
would have been to convert the scatterlist mapping to use the new common
helper and unify the code with the data mapping, and then implement the
new API (hopefully also sharing most of the code from the data mapping).

diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c
index 82bae475dfa4..24667d199525 100644
--- a/block/blk-mq-dma.c
+++ b/block/blk-mq-dma.c
@@ -10,28 +10,34 @@ struct phys_vec {
 };
 
 static bool blk_map_iter_next(struct request *req, struct req_iterator *iter,
-			      struct phys_vec *vec)
+			      struct phys_vec *vec, bool integrity)
 {
+	struct bio_vec *base;
 	unsigned int max_size;
 	struct bio_vec bv;
 
-	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
-		if (!iter->bio)
-			return false;
-		vec->paddr = bvec_phys(&req->special_vec);
-		vec->len = req->special_vec.bv_len;
-		iter->bio = NULL;
-		return true;
+	if (integrity) {
+		base = iter->bio->bi_integrity->bip_vec;
+	} else {
+		if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
+			if (!iter->bio)
+				return false;
+			vec->paddr = bvec_phys(&req->special_vec);
+			vec->len = req->special_vec.bv_len;
+			iter->bio = NULL;
+			return true;
+		}
+		base = iter->bio->bi_io_vec;
 	}
 
 	if (!iter->iter.bi_size)
 		return false;
 
-	bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
+	bv = mp_bvec_iter_bvec(base, iter->iter);
 	vec->paddr = bvec_phys(&bv);
 	max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX);
 	bv.bv_len = min(bv.bv_len, max_size);
-	bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len);
+	bvec_iter_advance_single(base, &iter->iter, bv.bv_len);
 
 	/*
 	 * If we are entirely done with this bi_io_vec entry, check if the next
@@ -46,15 +52,19 @@ static bool blk_map_iter_next(struct request *req, struct req_iterator *iter,
 				break;
 			iter->bio = iter->bio->bi_next;
 			iter->iter = iter->bio->bi_iter;
+			if (integrity)
+				base = iter->bio->bi_integrity->bip_vec;
+			else
+				base = iter->bio->bi_io_vec;
 		}
 
-		next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
+		next = mp_bvec_iter_bvec(base, iter->iter);
 		if (bv.bv_len + next.bv_len > max_size ||
 		    !biovec_phys_mergeable(req->q, &bv, &next))
 			break;
 
 		bv.bv_len += next.bv_len;
-		bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len);
+		bvec_iter_advance_single(base, &iter->iter, next.bv_len);
 	}
 
 	vec->len = bv.bv_len;
@@ -95,7 +105,7 @@ int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
 	if (iter.bio)
 		iter.iter = iter.bio->bi_iter;
 
-	while (blk_map_iter_next(rq, &iter, &vec)) {
+	while (blk_map_iter_next(rq, &iter, &vec, false)) {
 		*last_sg = blk_next_sg(last_sg, sglist);
 		sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,
 				offset_in_page(vec.paddr));



More information about the Linux-nvme mailing list