[PATCH 4/4] nvme: start keep-alive after admin queue setup
Hannes Reinecke
hare at suse.de
Fri Oct 20 07:26:00 PDT 2023
Setting up I/O queues might take quite some time on larger and/or
busy setups, so KATO might expire before all I/O queues could be
set up.
Fix this by moving starting and stopping keep-alive into the calls
to nvme_unquiesce_admin_queue() and nvme_quiesce_admin_queue().
Signed-off-by: Hannes Reinecke <hare at suse.de>
---
drivers/nvme/host/apple.c | 4 ++--
drivers/nvme/host/core.c | 7 ++++---
drivers/nvme/host/fc.c | 8 ++++----
drivers/nvme/host/nvme.h | 2 +-
drivers/nvme/host/pci.c | 8 ++++----
drivers/nvme/host/rdma.c | 6 +++---
drivers/nvme/host/tcp.c | 6 +++---
drivers/nvme/target/loop.c | 2 +-
8 files changed, 22 insertions(+), 21 deletions(-)
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 596bb11eeba5..91d3b1341723 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -869,7 +869,7 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
*/
if (shutdown) {
nvme_unquiesce_io_queues(&anv->ctrl);
- nvme_unquiesce_admin_queue(&anv->ctrl);
+ nvme_unquiesce_admin_queue(&anv->ctrl, false);
}
}
@@ -1107,7 +1107,7 @@ static void apple_nvme_reset_work(struct work_struct *work)
dev_dbg(anv->dev, "Starting admin queue");
apple_nvme_init_queue(&anv->adminq);
- nvme_unquiesce_admin_queue(&anv->ctrl);
+ nvme_unquiesce_admin_queue(&anv->ctrl, true);
if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) {
dev_warn(anv->ctrl.device,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 62612f87aafa..070912e1601a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4344,8 +4344,6 @@ EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
void nvme_start_ctrl(struct nvme_ctrl *ctrl)
{
- nvme_start_keep_alive(ctrl);
-
nvme_enable_aen(ctrl);
/*
@@ -4602,6 +4600,7 @@ EXPORT_SYMBOL_GPL(nvme_unquiesce_io_queues);
void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl)
{
+ nvme_stop_keep_alive(ctrl);
if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
blk_mq_quiesce_queue(ctrl->admin_q);
else
@@ -4609,10 +4608,12 @@ void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_quiesce_admin_queue);
-void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl)
+void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl, bool start_ka)
{
if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
blk_mq_unquiesce_queue(ctrl->admin_q);
+ if (start_ka)
+ nvme_start_keep_alive(ctrl);
}
EXPORT_SYMBOL_GPL(nvme_unquiesce_admin_queue);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 17b6c9238d68..3ac749bf34de 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2404,7 +2404,7 @@ nvme_fc_ctrl_free(struct kref *ref)
list_del(&ctrl->ctrl_list);
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
- nvme_unquiesce_admin_queue(&ctrl->ctrl);
+ nvme_unquiesce_admin_queue(&ctrl->ctrl, false);
nvme_remove_admin_tag_set(&ctrl->ctrl);
kfree(ctrl->queues);
@@ -2535,7 +2535,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
nvme_fc_terminate_exchange, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
if (start_queues)
- nvme_unquiesce_admin_queue(&ctrl->ctrl);
+ nvme_unquiesce_admin_queue(&ctrl->ctrl, true);
}
static void
@@ -3129,7 +3129,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
(ilog2(SZ_4K) - 9);
- nvme_unquiesce_admin_queue(&ctrl->ctrl);
+ nvme_unquiesce_admin_queue(&ctrl->ctrl, true);
ret = nvme_init_ctrl_finish(&ctrl->ctrl, false);
if (!ret && test_bit(ASSOC_FAILED, &ctrl->flags))
@@ -3288,7 +3288,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
nvme_fc_free_queue(&ctrl->queues[0]);
/* re-enable the admin_q so anything new can fast fail */
- nvme_unquiesce_admin_queue(&ctrl->ctrl);
+ nvme_unquiesce_admin_queue(&ctrl->ctrl, false);
/* resume the io queues so that things will fast fail */
nvme_unquiesce_io_queues(&ctrl->ctrl);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 39a90b7cb125..1aba30600b4a 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -770,7 +770,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl);
void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl);
void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl);
-void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl);
+void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl, bool start_ka);
void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl);
void nvme_sync_queues(struct nvme_ctrl *ctrl);
void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5b6dec052dfe..b9a6abe3fd33 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1681,7 +1681,7 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
* user requests may be waiting on a stopped queue. Start the
* queue to flush these to completion.
*/
- nvme_unquiesce_admin_queue(&dev->ctrl);
+ nvme_unquiesce_admin_queue(&dev->ctrl, false);
nvme_remove_admin_tag_set(&dev->ctrl);
}
}
@@ -2615,7 +2615,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
if (shutdown) {
nvme_unquiesce_io_queues(&dev->ctrl);
if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
- nvme_unquiesce_admin_queue(&dev->ctrl);
+ nvme_unquiesce_admin_queue(&dev->ctrl, false);
}
mutex_unlock(&dev->shutdown_lock);
}
@@ -2722,7 +2722,7 @@ static void nvme_reset_work(struct work_struct *work)
goto out;
}
- nvme_unquiesce_admin_queue(&dev->ctrl);
+ nvme_unquiesce_admin_queue(&dev->ctrl, true);
result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend);
if (result)
@@ -3020,7 +3020,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_disable;
}
- nvme_unquiesce_admin_queue(&dev->ctrl);
+ nvme_unquiesce_admin_queue(&ctrl->ctrl, true);
result = nvme_init_ctrl_finish(&dev->ctrl, false);
if (result)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 337a624a537c..a9368767560f 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -830,7 +830,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
else
ctrl->ctrl.max_integrity_segments = 0;
- nvme_unquiesce_admin_queue(&ctrl->ctrl);
+ nvme_unquiesce_admin_queue(&ctrl->ctrl, true);
error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
if (error)
@@ -932,7 +932,7 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
if (remove) {
- nvme_unquiesce_admin_queue(&ctrl->ctrl);
+ nvme_unquiesce_admin_queue(&ctrl->ctrl, false);
nvme_remove_admin_tag_set(&ctrl->ctrl);
}
nvme_rdma_destroy_admin_queue(ctrl);
@@ -1120,7 +1120,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
nvme_rdma_teardown_io_queues(ctrl, false);
nvme_unquiesce_io_queues(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, false);
- nvme_unquiesce_admin_queue(&ctrl->ctrl);
+ nvme_unquiesce_admin_queue(&ctrl->ctrl, false);
nvme_auth_stop(&ctrl->ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 4714a902f4ca..a3c3ef843dca 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2103,7 +2103,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
if (error)
goto out_stop_queue;
- nvme_unquiesce_admin_queue(ctrl);
+ nvme_unquiesce_admin_queue(ctrl, true);
error = nvme_init_ctrl_finish(ctrl, false);
if (error)
@@ -2133,7 +2133,7 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl);
if (remove)
- nvme_unquiesce_admin_queue(ctrl);
+ nvme_unquiesce_admin_queue(ctrl, false);
nvme_tcp_destroy_admin_queue(ctrl, remove);
}
@@ -2280,7 +2280,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
/* unquiesce to fail fast pending requests */
nvme_unquiesce_io_queues(ctrl);
nvme_tcp_teardown_admin_queue(ctrl, false);
- nvme_unquiesce_admin_queue(ctrl);
+ nvme_unquiesce_admin_queue(ctrl, false);
nvme_auth_stop(ctrl);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index e1b8ead94575..6237f6baba4f 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -375,7 +375,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
ctrl->ctrl.max_hw_sectors =
(NVME_LOOP_MAX_SEGMENTS - 1) << PAGE_SECTORS_SHIFT;
- nvme_unquiesce_admin_queue(&ctrl->ctrl);
+ nvme_unquiesce_admin_queue(&ctrl->ctrl, true);
error = nvme_init_ctrl_finish(&ctrl->ctrl, false);
if (error)
--
2.35.3
More information about the Linux-nvme
mailing list