[RFC PATCH 1/5] nvme-pci: reduce blk_rq_nr_phys_segments calls
Chaitanya Kulkarni
chaitanya.kulkarni at wdc.com
Mon Jul 6 19:15:20 EDT 2020
In the fast patch blk_rq_nr_phys_segments() is called multiple times
nvme_queue_rq()(1), nvme_map_data()(2) and nvme_pci_use_sgl()(1). The
function blk_rq_nr_phys_segments() adds a if check for special payload.
The same check gets repeated number of time we call the function in the
fast path.
In order to minimize repetitive check in the fast path this patch
reduces the number of calls to one in the parent function
nvme_queue_rq() after we call the nvme_setup_cmd() so that we have the
right nseg count (with write-zeroes using discard quirk) and adjust the
code in submission path.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni at wdc.com>
---
drivers/nvme/host/pci.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c283e8dbfb86..07ac28d7d66c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -495,10 +495,10 @@ static void **nvme_pci_iod_list(struct request *req)
return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
}
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
+static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
+ unsigned short nseg)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- int nseg = blk_rq_nr_phys_segments(req);
unsigned int avg_seg_size;
if (nseg == 0)
@@ -787,13 +787,13 @@ static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
}
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
- struct nvme_command *cmnd)
+ struct nvme_command *cmnd, unsigned short nseg)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret = BLK_STS_RESOURCE;
int nr_mapped;
- if (blk_rq_nr_phys_segments(req) == 1) {
+ if (nseg == 1) {
struct bio_vec bv = req_bvec(req);
if (!is_pci_p2pdma_page(bv.bv_page)) {
@@ -812,7 +812,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
if (!iod->sg)
return BLK_STS_RESOURCE;
- sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
+ sg_init_table(iod->sg, nseg);
iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
if (!iod->nents)
goto out;
@@ -826,7 +826,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
if (!nr_mapped)
goto out;
- iod->use_sgl = nvme_pci_use_sgls(dev, req);
+ iod->use_sgl = nvme_pci_use_sgls(dev, req, nseg);
if (iod->use_sgl)
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
else
@@ -860,6 +860,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_queue *nvmeq = hctx->driver_data;
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
+ unsigned short nseg;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_command cmnd;
blk_status_t ret;
@@ -879,8 +880,9 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret)
return ret;
- if (blk_rq_nr_phys_segments(req)) {
- ret = nvme_map_data(dev, req, &cmnd);
+ nseg = blk_rq_nr_phys_segments(req);
+ if (nseg) {
+ ret = nvme_map_data(dev, req, &cmnd, nseg);
if (ret)
goto out_free_cmd;
}
--
2.22.0
More information about the Linux-nvme
mailing list