[PATCH V3 08/10] nvme: rdma: replace blk_mq_rdma_map_queues with blk_mq_dev_map_queues
Ming Lei
ming.lei at redhat.com
Fri Jul 9 01:10:03 PDT 2021
Replace blk_mq_virtio_map_queues with blk_mq_dev_map_queues which is more
generic from blk-mq viewpoint, so we can unify all map queue
implementation.
Meantime we can pass 'use_manage_irq' info to blk-mq via
blk_mq_dev_map_queues(), this info needn't be 100% accurate, and what
we need is that true has to be passed in if the hba really uses managed
irq.
Signed-off-by: Ming Lei <ming.lei at redhat.com>
---
drivers/nvme/host/rdma.c | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index a9e70cefd7ed..dc47df03a39a 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2169,6 +2169,14 @@ static void nvme_rdma_complete_rq(struct request *rq)
nvme_complete_rq(rq);
}
+static const struct cpumask *nvme_rdma_get_queue_affinity(
+ void *dev_data, int offset, int queue)
+{
+ struct ib_device *dev = dev_data;
+
+ return ib_get_vector_affinity(dev, offset + queue);
+}
+
static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_rdma_ctrl *ctrl = set->driver_data;
@@ -2192,10 +2200,12 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
ctrl->io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_READ].queue_offset = 0;
}
- blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
- ctrl->device->dev, 0);
- blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
- ctrl->device->dev, 0);
+ blk_mq_dev_map_queues(&set->map[HCTX_TYPE_DEFAULT],
+ ctrl->device->dev, 0, nvme_rdma_get_queue_affinity,
+ true, false);
+ blk_mq_dev_map_queues(&set->map[HCTX_TYPE_READ],
+ ctrl->device->dev, 0, nvme_rdma_get_queue_affinity,
+ true, false);
if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
/* map dedicated poll queues only if we have queues left */
--
2.31.1
More information about the Linux-nvme
mailing list