[PATCH 5/5] nvme: Add two-pass shutdown support.

Sagi Grimberg sagi at grimberg.me
Tue Jan 30 03:15:43 PST 2024


> This works with the two-pass shutdown mechanism setup for the PCI
> drivers and participates to provide the shutdown_wait
> method at the pci_driver structure level.
> 
> This patch changes the nvme shutdown() method to pass
> down the NVME_PCI_DISABLE_SHUTDOWN_TWOPASS enum value instead
> of NVME_PCI_DISABLE_SHUTDOWN. nvme_dev_disable() is changed
> to call nvme_request_shutdown() instead of nvme_disable_ctrl()
> in this case.
> 
> nvme_request_shutdown() sets the shutdown bit,
> but does not wait for completion.
> 
> The nvme_shutdown_wait() callback is added to synchronously
> wait for the NVME_CSTS_SHST_CMPLT bit meaning the nvme
> device has shutdown.
> 
> This change speeds up the shutdown in a system which hosts
> many controllers.
> 
> Based on work by Tanjore Suresh <tansuresh at google.com>
> 
> Signed-off-by: Jeremy Allison <jallison at ciq.com>
> ---
>   drivers/nvme/host/pci.c | 39 ++++++++++++++++++++++++++++++++++++---
>   1 file changed, 36 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index 8ee77f755d9d..bbd89eecf05a 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -2607,7 +2607,14 @@ static void nvme_dev_disable(struct nvme_dev *dev,
>   
>   	if (!dead && dev->ctrl.queue_count > 0) {
>   		nvme_delete_io_queues(dev);
> -		nvme_disable_ctrl(&dev->ctrl, shutdown);
> +		/*
> +		 * NVME_PCI_DISABLE_SHUTDOWN_TWOPASS requests shutdown
> +		 * but doesn't wait for completion.
> +		 */
> +		if (shutdown_type == NVME_PCI_DISABLE_SHUTDOWN_TWOPASS)
> +			nvme_request_shutdown(&dev->ctrl);
> +		else
> +			nvme_disable_ctrl(&dev->ctrl, shutdown);
>   		nvme_poll_irqdisable(&dev->queues[0]);
>   	}
>   	nvme_suspend_io_queues(dev);
> @@ -2625,7 +2632,7 @@ static void nvme_dev_disable(struct nvme_dev *dev,
>   	 * must flush all entered requests to their failed completion to avoid
>   	 * deadlocking blk-mq hot-cpu notifier.
>   	 */
> -	if (shutdown) {
> +	if (shutdown_type == NVME_PCI_DISABLE_SHUTDOWN) {
>   		nvme_unquiesce_io_queues(&dev->ctrl);
>   		if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
>   			nvme_unquiesce_admin_queue(&dev->ctrl);
> @@ -3128,7 +3135,32 @@ static void nvme_shutdown(struct pci_dev *pdev)
>   {
>   	struct nvme_dev *dev = pci_get_drvdata(pdev);
>   
> -	nvme_disable_prepare_reset(dev, NVME_PCI_DISABLE_SHUTDOWN);
> +	nvme_disable_prepare_reset(dev, NVME_PCI_DISABLE_SHUTDOWN_TWOPASS);
> +}
> +
> +static void nvme_shutdown_wait(struct pci_dev *pdev)
> +{
> +	struct nvme_dev *dev = pci_get_drvdata(pdev);
> +
> +	mutex_lock(&dev->shutdown_lock);

General question that just came up, is there any risk here that the
shutdown_lock is released and re-taken later while the shutdown is still 
in progress? I don't spot anything immediate, but perhaps Keith can
comment?

If this change does expose us to potential issues, you can simply
release the shutdown_lock here (while taking it in ?

> +	/*
> +	 * Finish waiting for the shutdown request
> +	 * initiated in nvme_shutdown() above using
> +	 * NVME_PCI_DISABLE_SHUTDOWN_TWOPASS.
> +	 */
> +	nvme_wait_ready(&dev->ctrl, NVME_CSTS_SHST_MASK,
> +			NVME_CSTS_SHST_CMPLT,
> +			dev->ctrl.shutdown_timeout, "shutdown");
> +	/*
> +	 * The driver will not be starting up queues again if shutting down so
> +	 * must flush all entered requests to their failed completion to avoid
> +	 * deadlocking blk-mq hot-cpu notifier.
> +	 */
> +	nvme_unquiesce_io_queues(&dev->ctrl);
> +	if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
> +		nvme_unquiesce_admin_queue(&dev->ctrl);
> +
> +	mutex_unlock(&dev->shutdown_lock);
>   }
>   
>   /*
> @@ -3522,6 +3554,7 @@ static struct pci_driver nvme_driver = {
>   	.probe		= nvme_probe,
>   	.remove		= nvme_remove,
>   	.shutdown	= nvme_shutdown,
> +	.shutdown_wait	= nvme_shutdown_wait,
>   	.driver		= {
>   		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
>   #ifdef CONFIG_PM_SLEEP



More information about the Linux-nvme mailing list