[PATCH 7/7] nvme-fabrics: add nvmf_init_ctrl/nvmf_teardown_ctrl API

Max Gurtovoy mgurtovoy at nvidia.com
Mon Oct 18 06:40:20 PDT 2021


Centralize the initalization and teardown of fabrics specific settings.
For now, only used by RDMA and TCP fabric transports.

Also, convert the reconnect_work and error_recovery_work to be static
functions since they are not used outside the fabrics driver anymore.

Reviewed-by: Israel Rukshin <israelr at nvidia.com>
Signed-off-by: Max Gurtovoy <mgurtovoy at nvidia.com>
---
 drivers/nvme/host/fabrics.c | 20 ++++++++++++++++----
 drivers/nvme/host/fabrics.h |  4 ++--
 drivers/nvme/host/rdma.c    |  7 ++-----
 drivers/nvme/host/tcp.c     |  7 ++-----
 4 files changed, 22 insertions(+), 16 deletions(-)

diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 7f76b27ce1f2..4a16e5f85d24 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -493,7 +493,7 @@ void nvmf_reconnect_or_remove(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvmf_reconnect_or_remove);
 
-void nvmf_error_recovery_work(struct work_struct *work)
+static void nvmf_error_recovery_work(struct work_struct *work)
 {
 	struct nvme_ctrl *ctrl = container_of(work,
 				struct nvme_ctrl, err_work);
@@ -514,7 +514,6 @@ void nvmf_error_recovery_work(struct work_struct *work)
 
 	nvmf_reconnect_or_remove(ctrl);
 }
-EXPORT_SYMBOL_GPL(nvmf_error_recovery_work);
 
 void nvmf_error_recovery(struct nvme_ctrl *ctrl)
 {
@@ -526,7 +525,7 @@ void nvmf_error_recovery(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvmf_error_recovery);
 
-void nvmf_reconnect_ctrl_work(struct work_struct *work)
+static void nvmf_reconnect_ctrl_work(struct work_struct *work)
 {
 	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
 			struct nvme_ctrl, connect_work);
@@ -548,7 +547,20 @@ void nvmf_reconnect_ctrl_work(struct work_struct *work)
 		 ctrl->nr_reconnects);
 	nvmf_reconnect_or_remove(ctrl);
 }
-EXPORT_SYMBOL_GPL(nvmf_reconnect_ctrl_work);
+
+void nvmf_init_ctrl(struct nvme_ctrl *ctrl)
+{
+	INIT_DELAYED_WORK(&ctrl->connect_work, nvmf_reconnect_ctrl_work);
+	INIT_WORK(&ctrl->err_work, nvmf_error_recovery_work);
+}
+EXPORT_SYMBOL_GPL(nvmf_init_ctrl);
+
+void nvmf_teardown_ctrl(struct nvme_ctrl *ctrl)
+{
+	cancel_work_sync(&ctrl->err_work);
+	cancel_delayed_work_sync(&ctrl->connect_work);
+}
+EXPORT_SYMBOL_GPL(nvmf_teardown_ctrl);
 
 /**
  * nvmf_register_transport() - NVMe Fabrics Library registration function.
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 49c98b69647f..08b290c2e01a 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -190,8 +190,8 @@ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
 void nvmf_reconnect_or_remove(struct nvme_ctrl *ctrl);
 void nvmf_error_recovery(struct nvme_ctrl *ctrl);
-void nvmf_error_recovery_work(struct work_struct *work);
-void nvmf_reconnect_ctrl_work(struct work_struct *work);
+void nvmf_init_ctrl(struct nvme_ctrl *ctrl);
+void nvmf_teardown_ctrl(struct nvme_ctrl *ctrl);
 bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
 		struct nvmf_ctrl_options *opts);
 
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 7fb2f434fe0d..aa3e142047eb 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2169,9 +2169,7 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
 
 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
 {
-	cancel_work_sync(&ctrl->ctrl.err_work);
-	cancel_delayed_work_sync(&ctrl->ctrl.connect_work);
-
+	nvmf_teardown_ctrl(&ctrl->ctrl);
 	nvme_rdma_teardown_io_queues(ctrl, shutdown);
 	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
 	if (shutdown)
@@ -2302,8 +2300,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
 		goto out_free_ctrl;
 	}
 
-	INIT_DELAYED_WORK(&ctrl->ctrl.connect_work, nvmf_reconnect_ctrl_work);
-	INIT_WORK(&ctrl->ctrl.err_work, nvmf_error_recovery_work);
+	nvmf_init_ctrl(&ctrl->ctrl);
 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
 
 	ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index c0e5bb3949b3..26c2b181edb9 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2049,9 +2049,7 @@ static int _nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl)
 
 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
 {
-	cancel_work_sync(&ctrl->err_work);
-	cancel_delayed_work_sync(&ctrl->connect_work);
-
+	nvmf_teardown_ctrl(ctrl);
 	nvme_tcp_teardown_io_queues(ctrl, shutdown);
 	blk_mq_quiesce_queue(ctrl->admin_q);
 	if (shutdown)
@@ -2453,8 +2451,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
 	ctrl->ctrl.sqsize = opts->queue_size - 1;
 	ctrl->ctrl.kato = opts->kato;
 
-	INIT_DELAYED_WORK(&ctrl->ctrl.connect_work, nvmf_reconnect_ctrl_work);
-	INIT_WORK(&ctrl->ctrl.err_work, nvmf_error_recovery_work);
+	nvmf_init_ctrl(&ctrl->ctrl);
 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
 
 	if (!(opts->mask & NVMF_OPT_TRSVCID)) {
-- 
2.18.1




More information about the Linux-nvme mailing list