[RFC PATCHv2 4/4] nvme-multipath: add debugfs attribute for adaptive I/O policy stat

Nilay Shroff nilay at linux.ibm.com
Thu Oct 9 03:05:26 PDT 2025


This commit introduces a new debugfs attribute, "adaptive_stat", under
both per-path and head debugfs directories (defined under /sys/kernel/
debug/block/). This attribute provides visibility into the internal
state of the adaptive I/O policy to aid in debugging and performance
analysis.

For per-path entries, "adaptive_stat" reports the corresponding path
statistics such as I/O weight, selection count, processed samples, and
ignored samples.

For head entries, it reports per-CPU statistics for each reachable path,
including I/O weight, path score, smoothed (EWMA) latency, selection
count, processed samples, and ignored samples.

These additions enhance observability of the adaptive I/O path selection
behavior and help diagnose imbalance or instability in multipath
performance.

Signed-off-by: Nilay Shroff <nilay at linux.ibm.com>
---
 drivers/nvme/host/core.c      |   3 +
 drivers/nvme/host/debugfs.c   | 117 ++++++++++++++++++++++++++++++++++
 drivers/nvme/host/multipath.c |   2 +
 3 files changed, 122 insertions(+)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c7f21823c137..5db716186ec0 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4184,6 +4184,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
 	if (device_add_disk(ctrl->device, ns->disk, nvme_ns_attr_groups))
 		goto out_cleanup_ns_from_list;
 
+	nvme_debugfs_register(ns->disk);
+
 	if (!nvme_ns_head_multipath(ns->head))
 		nvme_add_ns_cdev(ns);
 
@@ -4271,6 +4273,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
 
 	nvme_mpath_remove_sysfs_link(ns);
 
+	nvme_debugfs_unregister(ns->disk);
 	del_gendisk(ns->disk);
 
 	mutex_lock(&ns->ctrl->namespaces_lock);
diff --git a/drivers/nvme/host/debugfs.c b/drivers/nvme/host/debugfs.c
index 5c441779554f..2e7ebb0199bf 100644
--- a/drivers/nvme/host/debugfs.c
+++ b/drivers/nvme/host/debugfs.c
@@ -89,12 +89,129 @@ static const struct file_operations nvme_debugfs_fops = {
 	.release = nvme_debugfs_release,
 };
 
+static void *nvme_mpath_adp_stat_start(struct seq_file *m, loff_t *pos)
+{
+	struct nvme_ns *ns;
+	struct nvme_debugfs_ctx *ctx = m->private;
+	struct nvme_ns_head *head = ctx->data;
+
+	/* Remember srcu index, so we can unlock later. */
+	ctx->srcu_idx = srcu_read_lock(&head->srcu);
+	ns = list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
+
+	while (*pos && ns) {
+		ns = list_next_or_null_rcu(&head->list, &ns->siblings,
+				struct nvme_ns, siblings);
+		(*pos)--;
+	}
+
+	return ns;
+}
+
+static void *nvme_mpath_adp_stat_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct nvme_ns *ns = v;
+	struct nvme_debugfs_ctx *ctx = m->private;
+	struct nvme_ns_head *head = ctx->data;
+
+	(*pos)++;
+
+	return list_next_or_null_rcu(&head->list, &ns->siblings,
+			struct nvme_ns, siblings);
+}
+
+static void nvme_mpath_adp_stat_stop(struct seq_file *m, void *v)
+{
+	struct nvme_debugfs_ctx *ctx = m->private;
+	struct nvme_ns_head *head = ctx->data;
+	int srcu_idx = ctx->srcu_idx;
+
+	srcu_read_unlock(&head->srcu, srcu_idx);
+}
+
+static int nvme_mpath_adp_stat_show(struct seq_file *m, void *v)
+{
+#ifdef CONFIG_NVME_MULTIPATH
+	int cpu, rw;
+	struct nvme_path_stat *stat;
+	struct nvme_ns *ns = v;
+
+	seq_printf(m, "%s:\n", ns->disk->disk_name);
+	for_each_online_cpu(cpu) {
+		seq_printf(m, "cpu %d : ", cpu);
+		for (rw = 0; rw < 2; rw++) {
+			stat = &per_cpu_ptr(ns->info, cpu)[rw].stat;
+			seq_printf(m, "%u %llu %llu %llu %llu %llu ",
+				stat->weight, stat->score,
+				stat->slat_ns, stat->sel,
+				stat->nr_samples, stat->nr_ignored);
+		}
+		seq_putc(m, '\n');
+	}
+#endif
+	return 0;
+}
+
+static const struct seq_operations nvme_mpath_adp_stat_seq_ops = {
+	.start = nvme_mpath_adp_stat_start,
+	.next  = nvme_mpath_adp_stat_next,
+	.stop  = nvme_mpath_adp_stat_stop,
+	.show  = nvme_mpath_adp_stat_show
+};
 
 static const struct nvme_debugfs_attr nvme_mpath_debugfs_attrs[] = {
+	{"adaptive_stat", 0400, .seq_ops = &nvme_mpath_adp_stat_seq_ops},
 	{},
 };
 
+static void adp_stat_read_all(struct nvme_ns *ns, struct nvme_path_stat *batch)
+{
+#ifdef CONFIG_NVME_MULTIPATH
+	int rw, cpu;
+	u32 ncpu[2] = {0};
+	struct nvme_path_stat *stat;
+
+	for_each_online_cpu(cpu) {
+		for (rw = 0; rw < 2; rw++) {
+			stat = &per_cpu_ptr(ns->info, cpu)[rw].stat;
+			if (stat->weight) {
+				batch[rw].weight += stat->weight;
+				batch[rw].sel += stat->sel;
+				batch[rw].nr_samples += stat->nr_samples;
+				batch[rw].nr_ignored += stat->nr_ignored;
+				ncpu[rw]++;
+			}
+		}
+	}
+
+	for (rw = 0; rw < 2; rw++) {
+		if (!ncpu[rw])
+			continue;
+		batch[rw].weight = DIV_U64_ROUND_CLOSEST(batch[rw].weight,
+				ncpu[rw]);
+	}
+#endif
+}
+static int nvme_ns_adp_stat_show(void *data, struct seq_file *m)
+{
+	struct nvme_path_stat stat[2] = {0};
+	struct nvme_ns *ns = (struct nvme_ns *)data;
+
+	adp_stat_read_all(ns, stat);
+	seq_printf(m, "%u %llu %llu %llu %u %llu %llu %llu\n",
+		stat[READ].weight,
+		stat[READ].sel,
+		stat[READ].nr_samples,
+		stat[READ].nr_ignored,
+		stat[WRITE].weight,
+		stat[WRITE].sel,
+		stat[WRITE].nr_samples,
+		stat[WRITE].nr_ignored);
+	return 0;
+}
+
 static const struct nvme_debugfs_attr nvme_ns_debugfs_attrs[] = {
+	{"adaptive_stat", 0400, nvme_ns_adp_stat_show},
 	{},
 };
 
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 9ecdaca5e9a0..26495696e24e 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -1059,6 +1059,7 @@ static void nvme_remove_head(struct nvme_ns_head *head)
 
 		nvme_cdev_del(&head->cdev, &head->cdev_device);
 		synchronize_srcu(&head->srcu);
+		nvme_debugfs_unregister(head->disk);
 		del_gendisk(head->disk);
 	}
 	nvme_put_ns_head(head);
@@ -1162,6 +1163,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
 		}
 		nvme_add_ns_head_cdev(head);
 		kblockd_schedule_work(&head->partition_scan_work);
+		nvme_debugfs_register(head->disk);
 	}
 
 	nvme_mpath_add_sysfs_link(ns->head);
-- 
2.51.0




More information about the Linux-nvme mailing list