[RFC 06/12] nvme-pci: add support for user passed dma vectors

Pavel Begunkov asml.silence at gmail.com
Fri Jun 27 08:10:33 PDT 2025


Implement ->get_dma_device blk-mq callback and add BIO_DMAVEC handling.
If the drivers see BIO_DMAVEC, instead of mapping pages, it'll directly
populate the prp list with the provided dma addresses.

Suggested-by: Keith Busch <kbusch at kernel.org>
Signed-off-by: Pavel Begunkov <asml.silence at gmail.com>
---
 drivers/nvme/host/pci.c | 158 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 158 insertions(+)

diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8ff12e415cb5..44a6366f2d9a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -637,11 +637,59 @@ static void nvme_free_descriptors(struct nvme_queue *nvmeq, struct request *req)
 	}
 }
 
+static void nvme_sync_dma(struct nvme_dev *nvme_dev, struct request *req,
+			  enum dma_data_direction dir)
+{
+	bool for_cpu = dir == DMA_FROM_DEVICE;
+	struct device *dev = nvme_dev->dev;
+	struct bio *bio = req->bio;
+	int offset, length;
+	struct dmavec *dmav;
+
+	if (!dma_dev_need_sync(dev))
+		return;
+
+	offset = bio->bi_iter.bi_bvec_done;
+	length = blk_rq_payload_bytes(req);
+	dmav = &bio->bi_dmavec[bio->bi_iter.bi_idx];
+
+	while (length) {
+		u64 dma_addr = dmav->addr + offset;
+		int dma_len = min(dmav->len - offset, length);
+
+		if (for_cpu)
+			__dma_sync_single_for_cpu(dev, dma_addr, dma_len, dir);
+		else
+			__dma_sync_single_for_device(dev, dma_addr,
+						     dma_len, dir);
+
+		length -= dma_len;
+	}
+}
+
+static void nvme_unmap_premapped_data(struct nvme_dev *dev,
+				      struct nvme_queue *nvmeq,
+				      struct request *req)
+{
+	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+	if (rq_data_dir(req) == READ)
+		nvme_sync_dma(dev, req, DMA_FROM_DEVICE);
+
+	if (!iod->dma_len)
+		nvme_free_descriptors(nvmeq, req);
+}
+
 static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_queue *nvmeq,
 			    struct request *req)
 {
 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 
+	if (req->bio && bio_flagged(req->bio, BIO_DMAVEC)) {
+		nvme_unmap_premapped_data(dev, nvmeq, req);
+		return;
+	}
+
 	if (iod->dma_len) {
 		dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
 			       rq_dma_dir(req));
@@ -846,6 +894,104 @@ static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
 	return BLK_STS_OK;
 }
 
+static blk_status_t nvme_dma_premapped(struct nvme_dev *dev, struct request *req,
+				   struct nvme_queue *nvmeq,
+				   struct nvme_rw_command *cmnd)
+{
+	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+	int length = blk_rq_payload_bytes(req);
+	u64 dma_addr, first_dma_addr;
+	struct bio *bio = req->bio;
+	int dma_len, offset;
+	struct dmavec *dmav;
+	dma_addr_t prp_dma;
+	__le64 *prp_list;
+	int i;
+
+	if (rq_data_dir(req) == WRITE)
+		nvme_sync_dma(dev, req, DMA_TO_DEVICE);
+
+	offset = bio->bi_iter.bi_bvec_done;
+	dmav = &bio->bi_dmavec[bio->bi_iter.bi_idx];
+	dma_addr = dmav->addr + offset;
+	dma_len = dmav->len - offset;
+	first_dma_addr = dma_addr;
+	offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
+
+	length -= (NVME_CTRL_PAGE_SIZE - offset);
+	if (length <= 0) {
+		iod->first_dma = 0;
+		goto done;
+	}
+
+	dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
+	if (dma_len) {
+		dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
+	} else {
+		dmav++;
+		dma_addr = dmav->addr;
+		dma_len = dmav->len;
+	}
+
+	if (length <= NVME_CTRL_PAGE_SIZE) {
+		iod->first_dma = dma_addr;
+		goto done;
+	}
+
+	if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) <=
+	    NVME_SMALL_POOL_SIZE / sizeof(__le64))
+		iod->flags |= IOD_SMALL_DESCRIPTOR;
+
+	prp_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC,
+			&prp_dma);
+	if (!prp_list)
+		return BLK_STS_RESOURCE;
+
+	iod->descriptors[iod->nr_descriptors++] = prp_list;
+	iod->first_dma = prp_dma;
+	i = 0;
+	for (;;) {
+		if (i == NVME_CTRL_PAGE_SIZE >> 3) {
+			__le64 *old_prp_list = prp_list;
+
+			prp_list = dma_pool_alloc(nvmeq->descriptor_pools.large,
+					GFP_ATOMIC, &prp_dma);
+			if (!prp_list)
+				goto free_prps;
+			iod->descriptors[iod->nr_descriptors++] = prp_list;
+			prp_list[0] = old_prp_list[i - 1];
+			old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+			i = 1;
+		}
+
+		prp_list[i++] = cpu_to_le64(dma_addr);
+		dma_len -= NVME_CTRL_PAGE_SIZE;
+		dma_addr += NVME_CTRL_PAGE_SIZE;
+		length -= NVME_CTRL_PAGE_SIZE;
+		if (length <= 0)
+			break;
+		if (dma_len > 0)
+			continue;
+		if (unlikely(dma_len < 0))
+			goto bad_sgl;
+		dmav++;
+		dma_addr = dmav->addr;
+		dma_len = dmav->len;
+	}
+done:
+	cmnd->dptr.prp1 = cpu_to_le64(first_dma_addr);
+	cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
+	return BLK_STS_OK;
+free_prps:
+	nvme_free_descriptors(nvmeq, req);
+	return BLK_STS_RESOURCE;
+bad_sgl:
+	WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
+			"Invalid SGL for payload:%d nents:%d\n",
+			blk_rq_payload_bytes(req), iod->sgt.nents);
+	return BLK_STS_IOERR;
+}
+
 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 		struct nvme_command *cmnd)
 {
@@ -854,6 +1000,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
 	blk_status_t ret = BLK_STS_RESOURCE;
 	int rc;
 
+	if (req->bio && bio_flagged(req->bio, BIO_DMAVEC))
+		return nvme_dma_premapped(dev, req, nvmeq, &cmnd->rw);
+
 	if (blk_rq_nr_phys_segments(req) == 1) {
 		struct bio_vec bv = req_bvec(req);
 
@@ -1874,6 +2023,14 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
 	return result;
 }
 
+static struct device *nvme_pci_get_dma_device(struct request_queue *q)
+{
+	struct nvme_ns *ns = q->queuedata;
+	struct nvme_dev *dev = to_nvme_dev(ns->ctrl);
+
+	return dev->dev;
+}
+
 static const struct blk_mq_ops nvme_mq_admin_ops = {
 	.queue_rq	= nvme_queue_rq,
 	.complete	= nvme_pci_complete_rq,
@@ -1892,6 +2049,7 @@ static const struct blk_mq_ops nvme_mq_ops = {
 	.map_queues	= nvme_pci_map_queues,
 	.timeout	= nvme_timeout,
 	.poll		= nvme_poll,
+	.get_dma_device = nvme_pci_get_dma_device,
 };
 
 static void nvme_dev_remove_admin(struct nvme_dev *dev)
-- 
2.49.0




More information about the Linux-nvme mailing list