[RFC v1 3/3] nvme-fabrics: move configure admin queue code to fabrics.c

Daniel Wagner dwagner at suse.de
Wed Mar 1 00:27:37 PST 2023


The two transports for tcp and rdma share a lot of o common code, which
handles the controller states and is not transport specific.

The fabrics.c file contains helper functions to be used by the transport
This limits what the common code in fabrics can do. Any transport
specific allocation/initialization (e.g comand size for admin_tag_set)
has either be parameterized via the function call or we
introduce callbacks for parameterizing.

The third options is to introduce an intermediated type e.g. struct
nvme_fabrics_ctrl, but that would mean a lot of code refactoring and
restructuring just to avoid passing in arguments to functions.

The callback approach has another benefit, as it allows to add hooks to
the generic code for the driver to do allocate additional transport
specific resources.

Signed-off-by: Daniel Wagner <dwagner at suse.de>
---
 drivers/nvme/host/fabrics.c | 52 ++++++++++++++++++++
 drivers/nvme/host/fabrics.h | 12 +++++
 drivers/nvme/host/nvme.h    |  3 ++
 drivers/nvme/host/rdma.c    | 77 +++++++++---------------------
 drivers/nvme/host/tcp.c     | 94 +++++++++++--------------------------
 5 files changed, 115 insertions(+), 123 deletions(-)

diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index bbaa04a0c502..b09195c4a366 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -1134,6 +1134,58 @@ nvmf_create_ctrl(struct device *dev, const char *buf)
 	return ERR_PTR(ret);
 }
 
+int nvmf_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
+{
+	int error;
+
+	error = ctrl->fabrics_ops->alloc_queue(ctrl, 0);
+	if (error)
+		return error;
+
+	error = ctrl->fabrics_ops->init_queue(ctrl, 0);
+	if (error)
+		goto out_free_queue;
+
+	if (new) {
+		error = ctrl->fabrics_ops->alloc_admin_tag_set(ctrl);
+		if (error)
+			goto out_deinit_admin_queue;
+
+	}
+
+	error = ctrl->fabrics_ops->start_queue(ctrl, 0);
+	if (error)
+		goto out_remove_admin_tag_set;
+
+	error = nvme_enable_ctrl(ctrl);
+	if (error)
+		goto out_stop_queue;
+
+	nvme_unquiesce_admin_queue(ctrl);
+
+	error = nvme_init_ctrl_finish(ctrl, false);
+	if (error)
+		goto out_quiesce_queue;
+
+	return 0;
+
+out_quiesce_queue:
+	nvme_quiesce_admin_queue(ctrl);
+	blk_sync_queue(ctrl->admin_q);
+out_stop_queue:
+	ctrl->fabrics_ops->stop_queue(ctrl, 0);
+	nvme_cancel_admin_tagset(ctrl);
+out_remove_admin_tag_set:
+	if (new)
+		nvme_remove_admin_tag_set(ctrl);
+out_deinit_admin_queue:
+	ctrl->fabrics_ops->deinit_queue(ctrl, 0);
+out_free_queue:
+	ctrl->fabrics_ops->free_queue(ctrl, 0);
+	return error;
+}
+EXPORT_SYMBOL_GPL(nvmf_configure_admin_queue);
+
 static struct class *nvmf_class;
 static struct device *nvmf_device;
 static DEFINE_MUTEX(nvmf_dev_mutex);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index a6e22116e139..30b7e388c8e8 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -172,6 +172,16 @@ struct nvmf_transport_ops {
 					struct nvmf_ctrl_options *opts);
 };
 
+struct nvme_fabrics_ops {
+	int 	(*alloc_queue)(struct nvme_ctrl *ctrl, int qid);
+	void	(*free_queue)(struct nvme_ctrl *ctrl, int qid);
+	int	(*init_queue)(struct nvme_ctrl *ctrl, int qid);
+	void	(*deinit_queue)(struct nvme_ctrl *ctrl, int qid);
+	int	(*start_queue)(struct nvme_ctrl *ctrl, int qid);
+	void	(*stop_queue)(struct nvme_ctrl *ctrl, int qid);
+	int	(*alloc_admin_tag_set)(struct nvme_ctrl *ctrl);
+};
+
 static inline bool
 nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
 			struct nvmf_ctrl_options *opts)
@@ -214,5 +224,7 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
 bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
 		struct nvmf_ctrl_options *opts);
+int nvmf_configure_admin_queue(struct nvme_ctrl *ctrl, bool new);
+
 
 #endif /* _NVME_FABRICS_H */
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bf46f122e9e1..e58211802031 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -244,6 +244,8 @@ enum nvme_ctrl_flags {
 	NVME_CTRL_STOPPED		= 3,
 };
 
+struct nvme_fabrics_ops;
+
 struct nvme_ctrl {
 	bool comp_seen;
 	enum nvme_ctrl_state state;
@@ -251,6 +253,7 @@ struct nvme_ctrl {
 	spinlock_t lock;
 	struct mutex scan_lock;
 	const struct nvme_ctrl_ops *ops;
+	const struct nvme_fabrics_ops *fabrics_ops;
 	struct request_queue *admin_q;
 	struct request_queue *connect_q;
 	struct request_queue *fabrics_q;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index d070d84b204e..0347d7909b1d 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -887,61 +887,6 @@ static void nvme_rdma_deinit_queue(struct nvme_ctrl *nctrl, int qid)
 	}
 }
 
-static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
-		bool new)
-{
-	int error;
-
-	error = nvme_rdma_alloc_queue(&ctrl->ctrl, 0);
-	if (error)
-		return error;
-
-	error = nvme_rdma_init_queue(&ctrl->ctrl, 0);
-	if (error)
-		goto out_free_queue;
-
-	if (new) {
-		error = nvme_alloc_admin_tag_set(&ctrl->ctrl,
-				&ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops,
-				sizeof(struct nvme_rdma_request) +
-				NVME_RDMA_DATA_SGL_SIZE);
-		if (error)
-			goto out_deinit_admin_queue;
-
-	}
-
-	error = nvme_rdma_start_queue(&ctrl->ctrl, 0);
-	if (error)
-		goto out_remove_admin_tag_set;
-
-	error = nvme_enable_ctrl(&ctrl->ctrl);
-	if (error)
-		goto out_stop_queue;
-
-	nvme_unquiesce_admin_queue(&ctrl->ctrl);
-
-	error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
-	if (error)
-		goto out_quiesce_queue;
-
-	return 0;
-
-out_quiesce_queue:
-	nvme_quiesce_admin_queue(&ctrl->ctrl);
-	blk_sync_queue(ctrl->ctrl.admin_q);
-out_stop_queue:
-	nvme_rdma_stop_queue(&ctrl->ctrl, 0);
-	nvme_cancel_admin_tagset(&ctrl->ctrl);
-out_remove_admin_tag_set:
-	if (new)
-		nvme_remove_admin_tag_set(&ctrl->ctrl);
-out_deinit_admin_queue:
-	nvme_rdma_deinit_queue(&ctrl->ctrl, 0);
-out_free_queue:
-	nvme_rdma_free_queue(&ctrl->ctrl, 0);
-	return error;
-}
-
 static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
 {
 	int ret, nr_queues;
@@ -1081,12 +1026,21 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
 	}
 }
 
+static int nvme_rdma_alloc_admin_tag_set(struct nvme_ctrl *ctrl)
+{
+
+	return nvme_alloc_admin_tag_set(ctrl, &to_rdma_ctrl(ctrl)->admin_tag_set,
+					&nvme_rdma_admin_mq_ops,
+					sizeof(struct nvme_rdma_request) +
+					   NVME_RDMA_DATA_SGL_SIZE);
+}
+
 static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
 {
 	int ret;
 	bool changed;
 
-	ret = nvme_rdma_configure_admin_queue(ctrl, new);
+	ret = nvmf_configure_admin_queue(&ctrl->ctrl, new);
 	if (ret)
 		return ret;
 
@@ -2330,6 +2284,16 @@ nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
 	return found;
 }
 
+static struct nvme_fabrics_ops nvme_rdma_fabrics_ops = {
+	.alloc_queue		= nvme_rdma_alloc_queue,
+	.free_queue		= nvme_rdma_free_queue,
+	.init_queue		= nvme_rdma_init_queue,
+	.deinit_queue		= nvme_rdma_deinit_queue,
+	.start_queue		= nvme_rdma_start_queue,
+	.stop_queue		= nvme_rdma_stop_queue,
+	.alloc_admin_tag_set	= nvme_rdma_alloc_admin_tag_set,
+};
+
 static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 		struct nvmf_ctrl_options *opts)
 {
@@ -2341,6 +2305,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 	if (!ctrl)
 		return ERR_PTR(-ENOMEM);
 	ctrl->ctrl.opts = opts;
+	ctrl->ctrl.fabrics_ops = &nvme_rdma_fabrics_ops;
 	INIT_LIST_HEAD(&ctrl->list);
 
 	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index d6100a787d39..fdb8b6d0c3e1 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1740,25 +1740,6 @@ static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
 	return ret;
 }
 
-static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
-{
-	int ret;
-
-	ret = nvme_tcp_alloc_queue(ctrl, 0);
-	if (ret)
-		return ret;
-
-	ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
-	if (ret)
-		goto out_free_queue;
-
-	return 0;
-
-out_free_queue:
-	nvme_tcp_free_queue(ctrl, 0);
-	return ret;
-}
-
 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
 {
 	int i, ret;
@@ -1932,53 +1913,6 @@ static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
 	nvme_tcp_free_admin_queue(ctrl);
 }
 
-static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
-{
-	int error;
-
-	error = nvme_tcp_alloc_admin_queue(ctrl);
-	if (error)
-		return error;
-
-	if (new) {
-		error = nvme_alloc_admin_tag_set(ctrl,
-				&to_tcp_ctrl(ctrl)->admin_tag_set,
-				&nvme_tcp_admin_mq_ops,
-				sizeof(struct nvme_tcp_request));
-		if (error)
-			goto out_free_queue;
-	}
-
-	error = nvme_tcp_start_queue(ctrl, 0);
-	if (error)
-		goto out_cleanup_tagset;
-
-	error = nvme_enable_ctrl(ctrl);
-	if (error)
-		goto out_stop_queue;
-
-	nvme_unquiesce_admin_queue(ctrl);
-
-	error = nvme_init_ctrl_finish(ctrl, false);
-	if (error)
-		goto out_quiesce_queue;
-
-	return 0;
-
-out_quiesce_queue:
-	nvme_quiesce_admin_queue(ctrl);
-	blk_sync_queue(ctrl->admin_q);
-out_stop_queue:
-	nvme_tcp_stop_queue(ctrl, 0);
-	nvme_cancel_admin_tagset(ctrl);
-out_cleanup_tagset:
-	if (new)
-		nvme_remove_admin_tag_set(ctrl);
-out_free_queue:
-	nvme_tcp_free_admin_queue(ctrl);
-	return error;
-}
-
 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
 		bool remove)
 {
@@ -2027,12 +1961,28 @@ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
 	}
 }
 
+static int nvme_tcp_init_queue(struct nvme_ctrl *ctrl, int qid)
+{
+	if (qid == 0) {
+		return nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
+	}
+
+	return 0;
+}
+
+static int nvme_tcp_alloc_admin_tag_set(struct nvme_ctrl *ctrl)
+{
+	return nvme_alloc_admin_tag_set(ctrl, &to_tcp_ctrl(ctrl)->admin_tag_set,
+					&nvme_tcp_admin_mq_ops,
+					sizeof(struct nvme_tcp_request));
+}
+
 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
 {
 	struct nvmf_ctrl_options *opts = ctrl->opts;
 	int ret;
 
-	ret = nvme_tcp_configure_admin_queue(ctrl, new);
+	ret = nvmf_configure_admin_queue(ctrl, new);
 	if (ret)
 		return ret;
 
@@ -2552,6 +2502,15 @@ nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
 	return found;
 }
 
+static struct nvme_fabrics_ops nvme_tcp_fabrics_ops = {
+	.alloc_queue		= nvme_tcp_alloc_queue,
+	.free_queue		= nvme_tcp_free_queue,
+	.start_queue		= nvme_tcp_start_queue,
+	.stop_queue		= nvme_tcp_stop_queue,
+	.init_queue		= nvme_tcp_init_queue,
+	.alloc_admin_tag_set	= nvme_tcp_alloc_admin_tag_set,
+};
+
 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
 		struct nvmf_ctrl_options *opts)
 {
@@ -2564,6 +2523,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
 
 	INIT_LIST_HEAD(&ctrl->list);
 	ctrl->ctrl.opts = opts;
+	ctrl->ctrl.fabrics_ops = &nvme_tcp_fabrics_ops;
 	ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
 				opts->nr_poll_queues + 1;
 	ctrl->ctrl.sqsize = opts->queue_size - 1;
-- 
2.39.2




More information about the Linux-nvme mailing list