[PATCH RFC 4/5] nvme: add sysfs attribute to change IO timeout per nvme controller

Mohamed Khalfella mkhalfella at purestorage.com
Tue Feb 17 12:25:53 PST 2026


On Thu 2026-02-12 13:09:50 +0100, Maurizio Lombardi wrote:
> Currently, there is no method to adjust the timeout values
> on a per controller basis with nvme I/O queues.
> Add an io_timeout attribute to nvme so that different
> nvme controllers which may have different timeout
> requirements can have custom I/O timeouts set.
> 
> Signed-off-by: Maurizio Lombardi <mlombard at redhat.com>
> ---
>  drivers/nvme/host/apple.c |  2 +-
>  drivers/nvme/host/core.c  |  4 +++-
>  drivers/nvme/host/nvme.h  |  1 +
>  drivers/nvme/host/sysfs.c | 38 ++++++++++++++++++++++++++++++++++++++
>  4 files changed, 43 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
> index ed61b97fde59..e9bb64e3ec9c 100644
> --- a/drivers/nvme/host/apple.c
> +++ b/drivers/nvme/host/apple.c
> @@ -1331,7 +1331,7 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
>  	if (anv->hw->has_lsq_nvmmu)
>  		anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
>  	anv->tagset.queue_depth = anv->hw->max_queue_depth - 1;
> -	anv->tagset.timeout = NVME_IO_TIMEOUT;
> +	anv->tagset.timeout = anv->ctrl.io_timeout;
>  	anv->tagset.numa_node = NUMA_NO_NODE;
>  	anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
>  	anv->tagset.driver_data = &anv->ioq;
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index b9315f0abf80..cc7d725bd6ff 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -4169,6 +4169,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
>  		mutex_unlock(&ctrl->namespaces_lock);
>  		goto out_unlink_ns;
>  	}
> +	blk_queue_rq_timeout(ns->queue, ctrl->io_timeout);

Given that io tagset->timeout has been initialized to ctrl->io_timeout
Do we still need the line above?

>  	nvme_ns_add_to_ctrl_list(ns);
>  	mutex_unlock(&ctrl->namespaces_lock);
>  	synchronize_srcu(&ctrl->srcu);
> @@ -4930,7 +4931,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
>  	set->cmd_size = cmd_size;
>  	set->driver_data = ctrl;
>  	set->nr_hw_queues = ctrl->queue_count - 1;
> -	set->timeout = NVME_IO_TIMEOUT;
> +	set->timeout = ctrl->io_timeout;
>  	set->nr_maps = nr_maps;
>  	ret = blk_mq_alloc_tag_set(set);
>  	if (ret)
> @@ -5107,6 +5108,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
>  	memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
>  	ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
>  	ctrl->ka_last_check_time = jiffies;
> +	ctrl->io_timeout = NVME_IO_TIMEOUT;
>  
>  	BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
>  			PAGE_SIZE);
> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
> index 9a5f28c5103c..ef390a020d8d 100644
> --- a/drivers/nvme/host/nvme.h
> +++ b/drivers/nvme/host/nvme.h
> @@ -316,6 +316,7 @@ struct nvme_ctrl {
>  	u16 mtfa;
>  	u32 ctrl_config;
>  	u32 queue_count;
> +	u32 io_timeout;
>  
>  	u64 cap;
>  	u32 max_hw_sectors;
> diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
> index 149cd1ab3b3d..0536e4919aa7 100644
> --- a/drivers/nvme/host/sysfs.c
> +++ b/drivers/nvme/host/sysfs.c
> @@ -631,6 +631,43 @@ static struct device_attribute dev_attr_admin_timeout =  \
>  	__ATTR(admin_timeout, S_IRUGO | S_IWUSR, \
>  	nvme_admin_timeout_show, nvme_admin_timeout_store);
>  
> +static ssize_t nvme_io_timeout_show(struct device *dev,
> +			struct device_attribute *attr, char *buf)
> +{
> +	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
> +
> +	return sysfs_emit(buf, "%u\n", jiffies_to_msecs(ctrl->io_timeout));
> +}
> +
> +static ssize_t nvme_io_timeout_store(struct device *dev,
> +			struct device_attribute *attr,
> +			const char *buf, size_t count)
> +{
> +	struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
> +	struct nvme_ns *ns;
> +	u32 timeout;
> +	int err;
> +
> +	err = kstrtou32(buf, 10, &timeout);
> +	if (err || !timeout)
> +		return -EINVAL;
> +
> +	/* Take the namespaces_lock to avoid racing against nvme_alloc_ns() */
> +	mutex_lock(&ctrl->namespaces_lock);
> +
> +	ctrl->io_timeout = msecs_to_jiffies(timeout);
> +	list_for_each_entry(ns, &ctrl->namespaces, list)
> +		blk_queue_rq_timeout(ns->queue, ctrl->io_timeout);
> +
> +	mutex_unlock(&ctrl->namespaces_lock);
> +
> +	return count;
> +}
> +
> +static struct device_attribute dev_attr_io_timeout = \
> +	__ATTR(io_timeout, S_IRUGO | S_IWUSR, \
> +	nvme_io_timeout_show, nvme_io_timeout_store);
> +
>  #ifdef CONFIG_NVME_HOST_AUTH
>  static ssize_t nvme_ctrl_dhchap_secret_show(struct device *dev,
>  		struct device_attribute *attr, char *buf)
> @@ -773,6 +810,7 @@ static struct attribute *nvme_dev_attrs[] = {
>  	&dev_attr_cntrltype.attr,
>  	&dev_attr_dctype.attr,
>  	&dev_attr_admin_timeout.attr,
> +	&dev_attr_io_timeout.attr,
>  #ifdef CONFIG_NVME_HOST_AUTH
>  	&dev_attr_dhchap_secret.attr,
>  	&dev_attr_dhchap_ctrl_secret.attr,
> -- 
> 2.53.0
> 
> 



More information about the Linux-nvme mailing list