[PATCH v3 1/1] nvme: multipath: Implemented new iopolicy "queue-depth"

Nilay Shroff nilay at linux.ibm.com
Tue May 21 01:48:09 PDT 2024



On 5/21/24 01:50, John Meneghini wrote:
> From: "Ewan D. Milne" <emilne at redhat.com>
> 
> The round-robin path selector is inefficient in cases where there is a
> difference in latency between multiple active optimized paths.  In the
> presence of one or more high latency paths the round-robin selector
> continues to the high latency path equally. This results in a bias
> towards the highest latency path and can cause is significant decrease
> in overall performance as IOs pile on the lowest latency path. This
> problem is particularly accute with NVMe-oF controllers.
> 
> The queue-depth policy instead sends I/O requests down the path with the
> least amount of requests in its request queue. Paths with lower latency
> will clear requests more quickly and have less requests in their queues
> compared to higher latency paths. The goal of this path selector is to
> make more use of lower latency paths, which will bring down overall IO
> latency.
> 
> Signed-off-by: Ewan D. Milne <emilne at redhat.com>
> [tsong: patch developed by Thomas Song @ Pure Storage, fixed whitespace
>       and compilation warnings, updated MODULE_PARM description, and
>       fixed potential issue with ->current_path[] being used]
> Signed-off-by: Thomas Song <tsong at purestorage.com>
> [jmeneghi: vairious changes and improvements, addressed review comments]
> Signed-off-by: John Meneghini <jmeneghi at redhat.com>
> Link: https://lore.kernel.org/linux-nvme/20240509202929.831680-1-jmeneghi@redhat.com/
> Tested-by: Marco Patalano <mpatalan at redhat.com>
> Reviewed-by: Randy Jennings <randyj at redhat.com>
> Tested-by: Jyoti Rani <jani at purestorage.com>
> ---
>  drivers/nvme/host/core.c      |  2 +-
>  drivers/nvme/host/multipath.c | 86 +++++++++++++++++++++++++++++++++--
>  drivers/nvme/host/nvme.h      |  9 ++++
>  3 files changed, 92 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index a066429b790d..1dd7c52293ff 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -110,7 +110,7 @@ struct workqueue_struct *nvme_delete_wq;
>  EXPORT_SYMBOL_GPL(nvme_delete_wq);
>  
>  static LIST_HEAD(nvme_subsystems);
> -static DEFINE_MUTEX(nvme_subsystems_lock);
> +DEFINE_MUTEX(nvme_subsystems_lock);
>  
>  static DEFINE_IDA(nvme_instance_ida);
>  static dev_t nvme_ctrl_base_chr_devt;
> diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
> index 5397fb428b24..0e2b6e720e95 100644
> --- a/drivers/nvme/host/multipath.c
> +++ b/drivers/nvme/host/multipath.c
> @@ -17,6 +17,7 @@ MODULE_PARM_DESC(multipath,
>  static const char *nvme_iopolicy_names[] = {
>  	[NVME_IOPOLICY_NUMA]	= "numa",
>  	[NVME_IOPOLICY_RR]	= "round-robin",
> +	[NVME_IOPOLICY_QD]      = "queue-depth",
>  };
>  
>  static int iopolicy = NVME_IOPOLICY_NUMA;
> @@ -29,6 +30,8 @@ static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
>  		iopolicy = NVME_IOPOLICY_NUMA;
>  	else if (!strncmp(val, "round-robin", 11))
>  		iopolicy = NVME_IOPOLICY_RR;
> +	else if (!strncmp(val, "queue-depth", 11))
> +		iopolicy = NVME_IOPOLICY_QD;
>  	else
>  		return -EINVAL;
>  
> @@ -43,7 +46,7 @@ static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
>  module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
>  	&iopolicy, 0644);
>  MODULE_PARM_DESC(iopolicy,
> -	"Default multipath I/O policy; 'numa' (default) or 'round-robin'");
> +	"Default multipath I/O policy; 'numa' (default) , 'round-robin' or 'queue-depth'");
>  
>  void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
>  {
> @@ -127,6 +130,11 @@ void nvme_mpath_start_request(struct request *rq)
>  	struct nvme_ns *ns = rq->q->queuedata;
>  	struct gendisk *disk = ns->head->disk;
>  
> +	if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) {
> +		atomic_inc(&ns->ctrl->nr_active);
> +		nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
> +	}
> +
>  	if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
>  		return;
>  
> @@ -140,8 +148,12 @@ void nvme_mpath_end_request(struct request *rq)
>  {
>  	struct nvme_ns *ns = rq->q->queuedata;
>  
> +	if ((nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE))
> +		atomic_dec_if_positive(&ns->ctrl->nr_active);
> +
>  	if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
>  		return;
> +
>  	bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
>  			 blk_rq_bytes(rq) >> SECTOR_SHIFT,
>  			 nvme_req(rq)->start_time);
> @@ -330,6 +342,40 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
>  	return found;
>  }
>  
I think you may also want to reset nr_active counter if in case, in-flight nvme request 
is cancelled. If the request is cancelled then nvme_mpath_end_request() wouldn't be invoked.
So you may want to reset nr_active counter from nvme_cancel_request() as below:

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index bf7615cb36ee..4fea7883ce8e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -497,8 +497,9 @@ EXPORT_SYMBOL_GPL(nvme_host_path_error);
 
 bool nvme_cancel_request(struct request *req, void *data)
 {
-       dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
-                               "Cancelling I/O %d", req->tag);
+       struct nvme_ctrl *ctrl = (struct nvme_ctrl *)data;
+
+       dev_dbg_ratelimited(ctrl->device, "Cancelling I/O %d", req->tag);
 
        /* don't abort one completed or idle request */
        if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT)
@@ -506,6 +507,8 @@ bool nvme_cancel_request(struct request *req, void *data)
 
        nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
        nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+       if ((nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE))
+               atomic_dec(&ctrl->nr_active);
        blk_mq_complete_request(req);
        return true;
 }

Please note that I am using atomic_dec() instead of atomic_dec_if_positive()
above for the same reason as Keith mentioned in his earlier mail.

> +static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head)
> +{
> +	struct nvme_ns *best_opt = NULL, *best_nonopt = NULL, *ns;
> +	unsigned int min_depth_opt = UINT_MAX, min_depth_nonopt = UINT_MAX;
> +	unsigned int depth;
> +
> +	list_for_each_entry_rcu(ns, &head->list, siblings) {
> +		if (nvme_path_is_disabled(ns))
> +			continue;
> +
> +		depth = atomic_read(&ns->ctrl->nr_active);
> +
> +		switch (ns->ana_state) {
> +		case NVME_ANA_OPTIMIZED:
> +			if (depth < min_depth_opt) {
> +				min_depth_opt = depth;
> +				best_opt = ns;
> +			}
> +			break;
> +
> +		case NVME_ANA_NONOPTIMIZED:
> +			if (depth < min_depth_nonopt) {
> +				min_depth_nonopt = depth;
> +				best_nonopt = ns;
> +			}
> +			break;
> +		default:
> +			break;
> +		}
> +	}
> +
> +	return best_opt ? best_opt : best_nonopt;
> +}
> +
>  static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
>  {
>  	return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE &&
> @@ -338,15 +384,27 @@ static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
>  
>  inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
>  {
> -	int node = numa_node_id();
> +	int iopolicy = READ_ONCE(head->subsys->iopolicy);
> +	int node;
>  	struct nvme_ns *ns;
>  
> +	/*
> +	 * queue-depth iopolicy does not need to reference ->current_path
> +	 * but round-robin needs the last path used to advance to the
> +	 * next one, and numa will continue to use the last path unless
> +	 * it is or has become not optimized
> +	 */
> +	if (iopolicy == NVME_IOPOLICY_QD)
> +		return nvme_queue_depth_path(head);
> +
> +	node = numa_node_id();
>  	ns = srcu_dereference(head->current_path[node], &head->srcu);
>  	if (unlikely(!ns))
>  		return __nvme_find_path(head, node);
>  
> -	if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
> +	if (iopolicy == NVME_IOPOLICY_RR)
>  		return nvme_round_robin_path(head, node, ns);
> +
>  	if (unlikely(!nvme_path_is_optimized(ns)))
>  		return __nvme_find_path(head, node);
>  	return ns;
> @@ -798,6 +856,25 @@ static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
>  			  nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
>  }
>  
> +void nvme_subsys_iopolicy_update(struct nvme_subsystem *subsys, int iopolicy)
> +{
> +	struct nvme_ctrl *ctrl;
> +	int old_iopolicy = READ_ONCE(subsys->iopolicy);
> +
> +	WRITE_ONCE(subsys->iopolicy, iopolicy);
> +
> +	mutex_lock(&nvme_subsystems_lock);
> +	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
> +		atomic_set(&ctrl->nr_active, 0);
> +		nvme_mpath_clear_ctrl_paths(ctrl);
> +	}
> +	mutex_unlock(&nvme_subsystems_lock);
> +
> +	pr_notice("%s: changed from %s to %s for subsysnqn %s\n", __func__,
> +			nvme_iopolicy_names[old_iopolicy], nvme_iopolicy_names[iopolicy],
> +			subsys->subnqn);
> +}
> +
>  static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
>  		struct device_attribute *attr, const char *buf, size_t count)
>  {
> @@ -807,7 +884,7 @@ static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
>  
>  	for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
>  		if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
> -			WRITE_ONCE(subsys->iopolicy, i);
> +			nvme_subsys_iopolicy_update(subsys, i);
>  			return count;
>  		}
>  	}
> @@ -905,6 +982,7 @@ void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
>  	mutex_init(&ctrl->ana_lock);
>  	timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
>  	INIT_WORK(&ctrl->ana_work, nvme_ana_work);
> +	atomic_set(&ctrl->nr_active, 0);
>  }
>  
>  int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
> index f243a5822c2b..f5557889b244 100644
> --- a/drivers/nvme/host/nvme.h
> +++ b/drivers/nvme/host/nvme.h
> @@ -50,6 +50,8 @@ extern struct workqueue_struct *nvme_wq;
>  extern struct workqueue_struct *nvme_reset_wq;
>  extern struct workqueue_struct *nvme_delete_wq;
>  
> +extern struct mutex nvme_subsystems_lock;
> +
>  /*
>   * List of workarounds for devices that required behavior not specified in
>   * the standard.
> @@ -190,6 +192,7 @@ enum {
>  	NVME_REQ_CANCELLED		= (1 << 0),
>  	NVME_REQ_USERCMD		= (1 << 1),
>  	NVME_MPATH_IO_STATS		= (1 << 2),
> +	NVME_MPATH_CNT_ACTIVE	= (1 << 3),
>  };
>  
>  static inline struct nvme_request *nvme_req(struct request *req)
> @@ -354,6 +357,7 @@ struct nvme_ctrl {
>  	size_t ana_log_size;
>  	struct timer_list anatt_timer;
>  	struct work_struct ana_work;
> +	atomic_t nr_active;
>  #endif
>  
>  #ifdef CONFIG_NVME_HOST_AUTH
> @@ -402,6 +406,7 @@ static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
>  enum nvme_iopolicy {
>  	NVME_IOPOLICY_NUMA,
>  	NVME_IOPOLICY_RR,
> +	NVME_IOPOLICY_QD,
>  };
>  
>  struct nvme_subsystem {
> @@ -935,6 +940,7 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
>  void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
>  void nvme_mpath_start_request(struct request *rq);
>  void nvme_mpath_end_request(struct request *rq);
> +void nvme_subsys_iopolicy_update(struct nvme_subsystem *subsys, int iopolicy);
>  
>  static inline void nvme_trace_bio_complete(struct request *req)
>  {
> @@ -1034,6 +1040,9 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
>  {
>  	return false;
>  }
> +static inline void nvme_subsys_iopolicy_update(struct nvme_subsystem *subsys, int iopolicy)
> +{
> +}
>  #endif /* CONFIG_NVME_MULTIPATH */
>  
>  int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,

Thanks,
--Nilay



More information about the Linux-nvme mailing list