[PATCH 06/10] nvme/nvme-fabrics: introduce nvmf_reconnect_ctrl_work API
Max Gurtovoy
mgurtovoy at nvidia.com
Wed Oct 20 03:38:40 PDT 2021
Reconnect work is duplicated in RDMA and TCP transports. Move this logic
to common code. For that, introduce a new ctrl op to setup a ctrl.
Also update the RDMA/TCP transport drivers to use this API and remove
the duplicated code.
Reviewed-by: Israel Rukshin <israelr at nvidia.com>
Reviewed-by: Chaitanya Kulkarni <kch at nvidia.com>
Signed-off-by: Max Gurtovoy <mgurtovoy at nvidia.com>
---
drivers/nvme/host/fabrics.c | 24 +++++++++++
drivers/nvme/host/fabrics.h | 1 +
drivers/nvme/host/nvme.h | 1 +
drivers/nvme/host/rdma.c | 82 ++++++++++++++-----------------------
drivers/nvme/host/tcp.c | 28 +------------
5 files changed, 58 insertions(+), 78 deletions(-)
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 5a770196eb60..6a2283e09164 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -526,6 +526,30 @@ void nvmf_error_recovery(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvmf_error_recovery);
+void nvmf_reconnect_ctrl_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
+ struct nvme_ctrl, connect_work);
+
+ ++ctrl->nr_reconnects;
+
+ if (ctrl->ops->setup_ctrl(ctrl, false))
+ goto requeue;
+
+ dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
+ ctrl->nr_reconnects);
+
+ ctrl->nr_reconnects = 0;
+
+ return;
+
+requeue:
+ dev_info(ctrl->device, "Failed reconnect attempt %d\n",
+ ctrl->nr_reconnects);
+ nvmf_reconnect_or_remove(ctrl);
+}
+EXPORT_SYMBOL_GPL(nvmf_reconnect_ctrl_work);
+
/**
* nvmf_register_transport() - NVMe Fabrics Library registration function.
* @ops: Transport ops instance to be registered to the
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 8655eff74ed0..49c98b69647f 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -191,6 +191,7 @@ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
void nvmf_reconnect_or_remove(struct nvme_ctrl *ctrl);
void nvmf_error_recovery(struct nvme_ctrl *ctrl);
void nvmf_error_recovery_work(struct work_struct *work);
+void nvmf_reconnect_ctrl_work(struct work_struct *work);
bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 5cdf2ec45e9a..e137db2760d8 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -497,6 +497,7 @@ struct nvme_ctrl_ops {
/* Fabrics only */
void (*teardown_ctrl_io_queues)(struct nvme_ctrl *ctrl, bool remove);
void (*teardown_ctrl_admin_queue)(struct nvme_ctrl *ctrl, bool remove);
+ int (*setup_ctrl)(struct nvme_ctrl *ctrl, bool new);
};
/*
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 4e42f1956181..9c62f3766f49 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1067,8 +1067,9 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
kfree(ctrl);
}
-static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
+static int nvme_rdma_setup_ctrl(struct nvme_ctrl *nctrl, bool new)
{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
int ret;
bool changed;
@@ -1076,98 +1077,75 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
if (ret)
return ret;
- if (ctrl->ctrl.icdoff) {
+ if (nctrl->icdoff) {
ret = -EOPNOTSUPP;
- dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
+ dev_err(nctrl->device, "icdoff is not supported!\n");
goto destroy_admin;
}
- if (!(ctrl->ctrl.sgls & (1 << 2))) {
+ if (!(nctrl->sgls & (1 << 2))) {
ret = -EOPNOTSUPP;
- dev_err(ctrl->ctrl.device,
+ dev_err(nctrl->device,
"Mandatory keyed sgls are not supported!\n");
goto destroy_admin;
}
- if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) {
- dev_warn(ctrl->ctrl.device,
+ if (nctrl->opts->queue_size > nctrl->sqsize + 1) {
+ dev_warn(nctrl->device,
"queue_size %zu > ctrl sqsize %u, clamping down\n",
- ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
+ nctrl->opts->queue_size, nctrl->sqsize + 1);
}
- if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
- dev_warn(ctrl->ctrl.device,
+ if (nctrl->sqsize + 1 > nctrl->maxcmd) {
+ dev_warn(nctrl->device,
"sqsize %u > ctrl maxcmd %u, clamping down\n",
- ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
- ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
+ nctrl->sqsize + 1, nctrl->maxcmd);
+ nctrl->sqsize = nctrl->maxcmd - 1;
}
- if (ctrl->ctrl.sgls & (1 << 20))
+ if (nctrl->sgls & (1 << 20))
ctrl->use_inline_data = true;
- if (ctrl->ctrl.queue_count > 1) {
+ if (nctrl->queue_count > 1) {
ret = nvme_rdma_configure_io_queues(ctrl, new);
if (ret)
goto destroy_admin;
}
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ changed = nvme_change_ctrl_state(nctrl, NVME_CTRL_LIVE);
if (!changed) {
/*
* state change failure is ok if we started ctrl delete,
* unless we're during creation of a new controller to
* avoid races with teardown flow.
*/
- WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
- ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
+ WARN_ON_ONCE(nctrl->state != NVME_CTRL_DELETING &&
+ nctrl->state != NVME_CTRL_DELETING_NOIO);
WARN_ON_ONCE(new);
ret = -EINVAL;
goto destroy_io;
}
- nvme_start_ctrl(&ctrl->ctrl);
+ nvme_start_ctrl(nctrl);
return 0;
destroy_io:
- if (ctrl->ctrl.queue_count > 1) {
- nvme_stop_queues(&ctrl->ctrl);
- nvme_sync_io_queues(&ctrl->ctrl);
+ if (nctrl->queue_count > 1) {
+ nvme_stop_queues(nctrl);
+ nvme_sync_io_queues(nctrl);
nvme_rdma_stop_io_queues(ctrl);
- nvme_cancel_tagset(&ctrl->ctrl);
+ nvme_cancel_tagset(nctrl);
nvme_rdma_destroy_io_queues(ctrl, new);
}
destroy_admin:
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
- blk_sync_queue(ctrl->ctrl.admin_q);
+ blk_mq_quiesce_queue(nctrl->admin_q);
+ blk_sync_queue(nctrl->admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
- nvme_cancel_admin_tagset(&ctrl->ctrl);
+ nvme_cancel_admin_tagset(nctrl);
nvme_rdma_destroy_admin_queue(ctrl, new);
return ret;
}
-static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
-{
- struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
- struct nvme_rdma_ctrl, ctrl.connect_work);
-
- ++ctrl->ctrl.nr_reconnects;
-
- if (nvme_rdma_setup_ctrl(ctrl, false))
- goto requeue;
-
- dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
- ctrl->ctrl.nr_reconnects);
-
- ctrl->ctrl.nr_reconnects = 0;
-
- return;
-
-requeue:
- dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
- ctrl->ctrl.nr_reconnects);
- nvmf_reconnect_or_remove(&ctrl->ctrl);
-}
-
static void nvme_rdma_end_request(struct nvme_rdma_request *req)
{
struct request *rq = blk_mq_rq_from_pdu(req);
@@ -2212,7 +2190,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
return;
}
- if (nvme_rdma_setup_ctrl(ctrl, false))
+ if (nvme_rdma_setup_ctrl(&ctrl->ctrl, false))
goto out_fail;
return;
@@ -2236,6 +2214,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.get_address = nvmf_get_address,
.teardown_ctrl_io_queues = nvme_rdma_teardown_io_queues,
.teardown_ctrl_admin_queue = nvme_rdma_teardown_admin_queue,
+ .setup_ctrl = nvme_rdma_setup_ctrl,
};
/*
@@ -2313,8 +2292,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
goto out_free_ctrl;
}
- INIT_DELAYED_WORK(&ctrl->ctrl.connect_work,
- nvme_rdma_reconnect_ctrl_work);
+ INIT_DELAYED_WORK(&ctrl->ctrl.connect_work, nvmf_reconnect_ctrl_work);
INIT_WORK(&ctrl->ctrl.err_work, nvmf_error_recovery_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
@@ -2337,7 +2315,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
WARN_ON_ONCE(!changed);
- ret = nvme_rdma_setup_ctrl(ctrl, true);
+ ret = nvme_rdma_setup_ctrl(&ctrl->ctrl, true);
if (ret)
goto out_uninit_ctrl;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 679eb3c2b8fd..e6e8de2dcc8e 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2032,30 +2032,6 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
return ret;
}
-static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
-{
- struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
- struct nvme_tcp_ctrl, ctrl.connect_work);
- struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
-
- ++ctrl->nr_reconnects;
-
- if (nvme_tcp_setup_ctrl(ctrl, false))
- goto requeue;
-
- dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
- ctrl->nr_reconnects);
-
- ctrl->nr_reconnects = 0;
-
- return;
-
-requeue:
- dev_info(ctrl->device, "Failed reconnect attempt %d\n",
- ctrl->nr_reconnects);
- nvmf_reconnect_or_remove(ctrl);
-}
-
static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
{
cancel_work_sync(&ctrl->err_work);
@@ -2425,6 +2401,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
.get_address = nvmf_get_address,
.teardown_ctrl_io_queues = nvme_tcp_teardown_io_queues,
.teardown_ctrl_admin_queue = nvme_tcp_teardown_admin_queue,
+ .setup_ctrl = nvme_tcp_setup_ctrl,
};
static bool
@@ -2461,8 +2438,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
- INIT_DELAYED_WORK(&ctrl->ctrl.connect_work,
- nvme_tcp_reconnect_ctrl_work);
+ INIT_DELAYED_WORK(&ctrl->ctrl.connect_work, nvmf_reconnect_ctrl_work);
INIT_WORK(&ctrl->ctrl.err_work, nvmf_error_recovery_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
--
2.18.1
More information about the Linux-nvme
mailing list