[PATCH 07/10] nvme-fabrics: add nvmf_init_ctrl/nvmf_uninit_ctrl API

Max Gurtovoy mgurtovoy at nvidia.com
Wed Oct 20 03:38:41 PDT 2021


Centralize the initalization and un-initialization of fabrics specific
settings. For now, only used by RDMA and TCP fabric transports.

Also, convert the reconnect_work and error_recovery_work to be static
functions since they are not used outside the fabrics driver anymore.

Reviewed-by: Israel Rukshin <israelr at nvidia.com>
Signed-off-by: Max Gurtovoy <mgurtovoy at nvidia.com>
---
 drivers/nvme/host/fabrics.c | 20 ++++++++++++++++----
 drivers/nvme/host/fabrics.h |  4 ++--
 drivers/nvme/host/rdma.c    |  7 ++-----
 drivers/nvme/host/tcp.c     |  7 ++-----
 4 files changed, 22 insertions(+), 16 deletions(-)

diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 6a2283e09164..e50f6b32a286 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -493,7 +493,7 @@ void nvmf_reconnect_or_remove(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvmf_reconnect_or_remove);
 
-void nvmf_error_recovery_work(struct work_struct *work)
+static void nvmf_error_recovery_work(struct work_struct *work)
 {
 	struct nvme_ctrl *ctrl = container_of(work,
 				struct nvme_ctrl, err_work);
@@ -514,7 +514,6 @@ void nvmf_error_recovery_work(struct work_struct *work)
 
 	nvmf_reconnect_or_remove(ctrl);
 }
-EXPORT_SYMBOL_GPL(nvmf_error_recovery_work);
 
 void nvmf_error_recovery(struct nvme_ctrl *ctrl)
 {
@@ -526,7 +525,7 @@ void nvmf_error_recovery(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvmf_error_recovery);
 
-void nvmf_reconnect_ctrl_work(struct work_struct *work)
+static void nvmf_reconnect_ctrl_work(struct work_struct *work)
 {
 	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
 			struct nvme_ctrl, connect_work);
@@ -548,7 +547,20 @@ void nvmf_reconnect_ctrl_work(struct work_struct *work)
 		 ctrl->nr_reconnects);
 	nvmf_reconnect_or_remove(ctrl);
 }
-EXPORT_SYMBOL_GPL(nvmf_reconnect_ctrl_work);
+
+void nvmf_init_ctrl(struct nvme_ctrl *ctrl)
+{
+	INIT_DELAYED_WORK(&ctrl->connect_work, nvmf_reconnect_ctrl_work);
+	INIT_WORK(&ctrl->err_work, nvmf_error_recovery_work);
+}
+EXPORT_SYMBOL_GPL(nvmf_init_ctrl);
+
+void nvmf_uninit_ctrl(struct nvme_ctrl *ctrl)
+{
+	cancel_work_sync(&ctrl->err_work);
+	cancel_delayed_work_sync(&ctrl->connect_work);
+}
+EXPORT_SYMBOL_GPL(nvmf_uninit_ctrl);
 
 /**
  * nvmf_register_transport() - NVMe Fabrics Library registration function.
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 49c98b69647f..06933e7a4ff4 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -190,8 +190,8 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
 void nvmf_reconnect_or_remove(struct nvme_ctrl *ctrl);
 void nvmf_error_recovery(struct nvme_ctrl *ctrl);
-void nvmf_error_recovery_work(struct work_struct *work);
-void nvmf_reconnect_ctrl_work(struct work_struct *work);
+void nvmf_init_ctrl(struct nvme_ctrl *ctrl);
+void nvmf_uninit_ctrl(struct nvme_ctrl *ctrl);
 bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
 		struct nvmf_ctrl_options *opts);
 
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 9c62f3766f49..8e1e8c8c8a0d 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2159,9 +2159,7 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
 
 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
 {
-	cancel_work_sync(&ctrl->ctrl.err_work);
-	cancel_delayed_work_sync(&ctrl->ctrl.connect_work);
-
+	nvmf_uninit_ctrl(&ctrl->ctrl);
 	nvme_rdma_teardown_io_queues(&ctrl->ctrl, shutdown);
 	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
 	if (shutdown)
@@ -2292,8 +2290,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 		goto out_free_ctrl;
 	}
 
-	INIT_DELAYED_WORK(&ctrl->ctrl.connect_work, nvmf_reconnect_ctrl_work);
-	INIT_WORK(&ctrl->ctrl.err_work, nvmf_error_recovery_work);
+	nvmf_init_ctrl(&ctrl->ctrl);
 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
 
 	ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index e6e8de2dcc8e..7f50b423388f 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2034,9 +2034,7 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
 
 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
 {
-	cancel_work_sync(&ctrl->err_work);
-	cancel_delayed_work_sync(&ctrl->connect_work);
-
+	nvmf_uninit_ctrl(ctrl);
 	nvme_tcp_teardown_io_queues(ctrl, shutdown);
 	blk_mq_quiesce_queue(ctrl->admin_q);
 	if (shutdown)
@@ -2438,8 +2436,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
 	ctrl->ctrl.sqsize = opts->queue_size - 1;
 	ctrl->ctrl.kato = opts->kato;
 
-	INIT_DELAYED_WORK(&ctrl->ctrl.connect_work, nvmf_reconnect_ctrl_work);
-	INIT_WORK(&ctrl->ctrl.err_work, nvmf_error_recovery_work);
+	nvmf_init_ctrl(&ctrl->ctrl);
 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
 
 	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
-- 
2.18.1




More information about the Linux-nvme mailing list