[PATCH v2 2/3] nvme-pci: data/meta transfer via SGL
Kanchan Joshi
joshi.k at samsung.com
Fri Oct 20 06:28:36 PDT 2023
If NVME_REQ_FORCE_SGL flag is set, make the data and meta transfer
use SGL. A new dma pool is used to allocate per-io metadata sgl.
This is a prepatory patch to enable unprivileged passthrough via SGL.
Signed-off-by: Kanchan Joshi <joshi.k at samsung.com>
---
drivers/nvme/host/nvme.h | 5 ++++
drivers/nvme/host/pci.c | 54 ++++++++++++++++++++++++++++++++++++----
2 files changed, 54 insertions(+), 5 deletions(-)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 141e1b1a897a..b0e234509049 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -1045,6 +1045,11 @@ static inline void nvme_start_request(struct request *rq)
blk_mq_start_request(rq);
}
+static inline bool nvme_ctrl_meta_sgl_supported(struct nvme_ctrl *ctrl)
+{
+ return ctrl->sgls & (1 << 19);
+}
+
static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
{
return ctrl->sgls & ((1 << 0) | (1 << 1));
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 60a08dfe8d75..5122af327952 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -123,6 +123,7 @@ struct nvme_dev {
struct device *dev;
struct dma_pool *prp_page_pool;
struct dma_pool *prp_small_pool;
+ struct dma_pool *meta_sgl_pool;
unsigned online_queues;
unsigned max_qid;
unsigned io_queues[HCTX_MAX_TYPES];
@@ -236,6 +237,8 @@ struct nvme_iod {
unsigned int dma_len; /* length of single DMA segment mapping */
dma_addr_t first_dma;
dma_addr_t meta_dma;
+ dma_addr_t meta_dma_sg;
+ struct nvme_sgl_desc *meta_sgl;
struct sg_table sgt;
union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS];
};
@@ -772,18 +775,20 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret = BLK_STS_RESOURCE;
int rc;
+ bool force_sgl = nvme_req(req)->flags & NVME_REQ_FORCE_SGL;
if (blk_rq_nr_phys_segments(req) == 1) {
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct bio_vec bv = req_bvec(req);
if (!is_pci_p2pdma_page(bv.bv_page)) {
- if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+ if (!force_sgl &&
+ bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
return nvme_setup_prp_simple(dev, req,
&cmnd->rw, &bv);
- if (nvmeq->qid && sgl_threshold &&
- nvme_ctrl_sgl_supported(&dev->ctrl))
+ if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)
+ && (sgl_threshold || force_sgl))
return nvme_setup_sgl_simple(dev, req,
&cmnd->rw, &bv);
}
@@ -806,7 +811,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out_free_sg;
}
- if (nvme_pci_use_sgls(dev, req, iod->sgt.nents))
+ if (force_sgl || nvme_pci_use_sgls(dev, req, iod->sgt.nents))
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
@@ -825,13 +830,38 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ bool force_sgl = nvme_req(req)->flags & NVME_REQ_FORCE_SGL;
+ blk_status_t ret;
iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
rq_dma_dir(req), 0);
if (dma_mapping_error(dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
- cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
+
+ if (!force_sgl) {
+ cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
+ return BLK_STS_OK;
+ }
+
+ iod->meta_sgl = dma_pool_alloc(dev->meta_sgl_pool, GFP_KERNEL,
+ &iod->meta_dma_sg);
+ if (!iod->meta_sgl) {
+ ret = BLK_STS_IOERR;
+ goto out_unmap;
+ }
+
+ iod->meta_sgl->addr = cpu_to_le64(iod->meta_dma);
+ iod->meta_sgl->length = cpu_to_le32(rq_integrity_vec(req)->bv_len);
+ iod->meta_sgl->type = NVME_SGL_FMT_DATA_DESC << 4;
+ cmnd->rw.metadata = cpu_to_le64(iod->meta_dma_sg);
+ cmnd->rw.flags = NVME_CMD_SGL_METASEG;
+
return BLK_STS_OK;
+
+out_unmap:
+ dma_unmap_page(dev->dev, iod->meta_dma,
+ rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
+ return ret;
}
static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
@@ -967,6 +997,11 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
dma_unmap_page(dev->dev, iod->meta_dma,
rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
+
+ if (nvme_req(req)->flags & NVME_REQ_FORCE_SGL)
+ dma_pool_free(dev->meta_sgl_pool,
+ (void *)iod->meta_sgl,
+ iod->meta_dma_sg);
}
if (blk_rq_nr_phys_segments(req))
@@ -2643,6 +2678,14 @@ static int nvme_setup_prp_pools(struct nvme_dev *dev)
dma_pool_destroy(dev->prp_page_pool);
return -ENOMEM;
}
+ /* for metadata sgl */
+ dev->meta_sgl_pool = dma_pool_create("meta sg 16", dev->dev, 16, 16, 0);
+ if (!dev->meta_sgl_pool) {
+ dma_pool_destroy(dev->prp_page_pool);
+ dma_pool_destroy(dev->prp_small_pool);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -2650,6 +2693,7 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
{
dma_pool_destroy(dev->prp_page_pool);
dma_pool_destroy(dev->prp_small_pool);
+ dma_pool_destroy(dev->meta_sgl_pool);
}
static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
--
2.25.1
More information about the Linux-nvme
mailing list