[RFC 3/3] nvme: create special request queue for cdev

Keith Busch kbusch at meta.com
Mon May 1 08:33:06 PDT 2023


From: Keith Busch <kbusch at kernel.org>

The cdev only services passthrough commands which don't merge, track
stats, or need accounting. Give it a special request queue with all
these options cleared so that we're not adding overhead for it.

Signed-off-by: Keith Busch <kbusch at kernel.org>
---
 drivers/nvme/host/core.c  | 29 +++++++++++++++++++++++++----
 drivers/nvme/host/ioctl.c |  4 ++--
 drivers/nvme/host/nvme.h  |  1 +
 3 files changed, 28 insertions(+), 6 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 0f1cb6f418182..5bb05c91d866d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4071,21 +4071,39 @@ static const struct file_operations nvme_ns_chr_fops = {
 
 static int nvme_add_ns_cdev(struct nvme_ns *ns)
 {
+	struct nvme_ctrl *ctrl = ns->ctrl;
+	struct request_queue *q;
 	int ret;
 
-	ns->cdev_device.parent = ns->ctrl->device;
+	ns->cdev_device.parent = ctrl->device;
 	ret = dev_set_name(&ns->cdev_device, "ng%dn%d",
-			   ns->ctrl->instance, ns->head->instance);
+			   ctrl->instance, ns->head->instance);
 	if (ret)
 		return ret;
 
 	ret = nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
-			     ns->ctrl->ops->module);
+			     ctrl->ops->module);
 	if (ret)
 		goto out_free_name;
 
+	q = blk_mq_init_queue(ctrl->tagset);
+	if (IS_ERR(q)) {
+		ret = PTR_ERR(q);
+		goto out_free_cdev;
+	}
+
+	blk_queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
+	blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
+	blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
+	q->queuedata = ns;
+	ns->cdev_queue = q;
+
 	return 0;
 
+out_free_cdev:
+	cdev_device_del(&ns->cdev, &ns->cdev_device);
+	return ret;
+
 out_free_name:
 	kfree_const(ns->cdev_device.kobj.name);
 	return ret;
@@ -4399,8 +4417,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 	/* guarantee not available in head->list */
 	synchronize_srcu(&ns->head->srcu);
 
-	if (!nvme_ns_head_multipath(ns->head))
+	if (!nvme_ns_head_multipath(ns->head)) {
+		blk_mq_destroy_queue(ns->cdev_queue);
+		blk_put_queue(ns->cdev_queue);
 		nvme_cdev_del(&ns->cdev, &ns->cdev_device);
+	}
 	del_gendisk(ns->disk);
 
 	down_write(&ns->ctrl->namespaces_rwsem);
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 3804e5205b42b..bf4fcb5d270e9 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -553,7 +553,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 {
 	struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
 	const struct nvme_uring_cmd *cmd = ioucmd->cmd;
-	struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
+	struct request_queue *q = ns ? ns->cdev_queue : ctrl->admin_q;
 	struct nvme_uring_data d;
 	struct nvme_command c;
 	struct request *req;
@@ -791,7 +791,7 @@ int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
 	bio = READ_ONCE(ioucmd->cookie);
 	ns = container_of(file_inode(ioucmd->file)->i_cdev,
 			struct nvme_ns, cdev);
-	q = ns->queue;
+	q = ns->cdev_queue;
 	if (test_bit(QUEUE_FLAG_POLL, &q->queue_flags) && bio && bio->bi_bdev)
 		ret = bio_poll(bio, iob, poll_flags);
 	rcu_read_unlock();
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bf46f122e9e1e..d837c118f4f18 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -495,6 +495,7 @@ struct nvme_ns {
 
 	struct cdev		cdev;
 	struct device		cdev_device;
+	struct request_queue	*cdev_queue;
 
 	struct nvme_fault_inject fault_inject;
 
-- 
2.34.1




More information about the Linux-nvme mailing list