[PATCH] nvme-multipath: introduce service-time iopolicy
Guixin Liu
kanie at linux.alibaba.com
Wed Nov 6 02:34:10 PST 2024
The service-time policy can dispatch I/O to the path with the lowest
total amount of currently processed I/O, ensuring that new I/O can be
sent to less-loaded paths when some paths are overloaded, thereby
achieving lower latency.
Signed-off-by: Guixin Liu <kanie at linux.alibaba.com>
---
drivers/nvme/host/multipath.c | 53 ++++++++++++++++++++++++++++++++++-
drivers/nvme/host/nvme.h | 3 ++
2 files changed, 55 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 6a15873055b9..8b99a93b83c7 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -18,6 +18,7 @@ static const char *nvme_iopolicy_names[] = {
[NVME_IOPOLICY_NUMA] = "numa",
[NVME_IOPOLICY_RR] = "round-robin",
[NVME_IOPOLICY_QD] = "queue-depth",
+ [NVME_IOPOLICY_ST] = "service-time",
};
static int iopolicy = NVME_IOPOLICY_NUMA;
@@ -32,6 +33,8 @@ static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
iopolicy = NVME_IOPOLICY_RR;
else if (!strncmp(val, "queue-depth", 11))
iopolicy = NVME_IOPOLICY_QD;
+ else if (!strncmp(val, "service-time", 12))
+ iopolicy = NVME_IOPOLICY_ST;
else
return -EINVAL;
@@ -46,7 +49,7 @@ static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
&iopolicy, 0644);
MODULE_PARM_DESC(iopolicy,
- "Default multipath I/O policy; 'numa' (default), 'round-robin' or 'queue-depth'");
+ "Default multipath I/O policy; 'numa' (default), 'round-robin', 'queue-depth' or 'service-time'");
void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
{
@@ -136,6 +139,11 @@ void nvme_mpath_start_request(struct request *rq)
nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE;
}
+ if (READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_ST) {
+ atomic_add(blk_rq_bytes(rq), &ns->ctrl->inflight_size);
+ nvme_req(rq)->flags |= NVME_MPATH_CNT_IOSIZE;
+ }
+
if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq))
return;
@@ -152,6 +160,9 @@ void nvme_mpath_end_request(struct request *rq)
if (nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)
atomic_dec_if_positive(&ns->ctrl->nr_active);
+ if (nvme_req(rq)->flags & NVME_MPATH_CNT_IOSIZE)
+ atomic_sub(blk_rq_bytes(rq), &ns->ctrl->inflight_size);
+
if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS))
return;
bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
@@ -405,9 +416,48 @@ static struct nvme_ns *nvme_numa_path(struct nvme_ns_head *head)
return ns;
}
+static struct nvme_ns *nvme_service_time_path(struct nvme_ns_head *head)
+{
+ struct nvme_ns *opt = NULL, *nonopt = NULL, *ns;
+ unsigned int min_inflight_nonopt = UINT_MAX;
+ unsigned int min_inflight_opt = UINT_MAX;
+ unsigned int inflight;
+
+ list_for_each_entry_rcu(ns, &head->list, siblings) {
+ if (nvme_path_is_disabled(ns))
+ continue;
+
+ inflight = atomic_read(&ns->ctrl->inflight_size);
+
+ switch (ns->ana_state) {
+ case NVME_ANA_OPTIMIZED:
+ if (inflight < min_inflight_opt) {
+ min_inflight_opt = inflight;
+ opt = ns;
+ }
+ break;
+ case NVME_ANA_NONOPTIMIZED:
+ if (inflight < min_inflight_nonopt) {
+ min_inflight_nonopt = inflight;
+ nonopt = ns;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (min_inflight_opt == 0)
+ return opt;
+ }
+
+ return opt ? opt : nonopt;
+}
+
inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
{
switch (READ_ONCE(head->subsys->iopolicy)) {
+ case NVME_IOPOLICY_ST:
+ return nvme_service_time_path(head);
case NVME_IOPOLICY_QD:
return nvme_queue_depth_path(head);
case NVME_IOPOLICY_RR:
@@ -1040,6 +1090,7 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
/* initialize this in the identify path to cover controller resets */
atomic_set(&ctrl->nr_active, 0);
+ atomic_set(&ctrl->inflight_size, 0);
if (!ctrl->max_namespaces ||
ctrl->max_namespaces > le32_to_cpu(id->nn)) {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 093cb423f536..b7160c8b6847 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -202,6 +202,7 @@ enum {
NVME_REQ_USERCMD = (1 << 1),
NVME_MPATH_IO_STATS = (1 << 2),
NVME_MPATH_CNT_ACTIVE = (1 << 3),
+ NVME_MPATH_CNT_IOSIZE = (1 << 4),
};
static inline struct nvme_request *nvme_req(struct request *req)
@@ -367,6 +368,7 @@ struct nvme_ctrl {
struct timer_list anatt_timer;
struct work_struct ana_work;
atomic_t nr_active;
+ atomic_t inflight_size;
#endif
#ifdef CONFIG_NVME_HOST_AUTH
@@ -416,6 +418,7 @@ enum nvme_iopolicy {
NVME_IOPOLICY_NUMA,
NVME_IOPOLICY_RR,
NVME_IOPOLICY_QD,
+ NVME_IOPOLICY_ST,
};
struct nvme_subsystem {
--
2.43.0
More information about the Linux-nvme
mailing list