[RFC PATCH v2 1/1] nvme-multipath: Add sysfs attributes for showing multipath info

Nilay Shroff nilay at linux.ibm.com
Fri Aug 9 10:29:57 PDT 2024


NVMe native multipath supports different IO policies for selecting I/O
path, however we don't have any visibility about which path is being
selected by multipath code for forwarding I/O. This patch helps add that
visibility by adding three new sysfs attribute files named "numa",
"round_robin", and "queue_depth" under each namespace head block path
/sys/block/<nvmeXnY>/multipath. As the name suggests, "numa" attribute
file shows the information about multipath I/O for numa policy,
"round_robin" attribute file shows the information about multipath I/O
for round-robin policy and "queue_depth" attribute file shows the
information about multipath I/O for queue-depth IO policy.

Signed-off-by: Nilay Shroff <nilay at linux.ibm.com>
---
 drivers/nvme/host/multipath.c |   2 +-
 drivers/nvme/host/nvme.h      |   1 +
 drivers/nvme/host/sysfs.c     | 108 ++++++++++++++++++++++++++++++++++
 3 files changed, 110 insertions(+), 1 deletion(-)

diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 91d9eb3c22ef..1a5ca3e7cd5c 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -292,7 +292,7 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
 	return found;
 }
 
-static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
+struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
 		struct nvme_ns *ns)
 {
 	ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index f900e44243ae..3fe5fac5067f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -955,6 +955,7 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl);
 bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
 void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
+struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head, struct nvme_ns *ns);
 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
 void nvme_mpath_start_request(struct request *rq);
 void nvme_mpath_end_request(struct request *rq);
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index ba05faaac562..06f30c65ee27 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -294,6 +294,113 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
 	return a->mode;
 }
 
+static ssize_t queue_depth_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct nvme_ns *ns;
+	struct nvme_ns_head *head;
+	int srcu_idx, len = 0;
+
+	head = dev_to_ns_head(dev);
+
+	srcu_idx = srcu_read_lock(&head->srcu);
+
+	list_for_each_entry_rcu(ns, &head->list, siblings) {
+		len += sysfs_emit_at(buf, len, "%s %d\n",
+			    ns->disk->disk_name,
+			    atomic_read(&ns->ctrl->nr_active));
+	}
+
+	srcu_read_unlock(&head->srcu, srcu_idx);
+
+	return len;
+}
+DEVICE_ATTR_RO(queue_depth);
+
+static ssize_t numa_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct nvme_ns_head *head;
+	struct nvme_ns *ns;
+	int node, len = 0;
+
+	head = dev_to_ns_head(dev);
+
+	for_each_node(node) {
+		ns = head->current_path[node];
+		if (unlikely(!ns))
+			continue;
+
+		len += sysfs_emit_at(buf, len, "node%d: %s\n",
+				node, ns->disk->disk_name);
+	}
+
+	return len;
+}
+DEVICE_ATTR_RO(numa);
+
+static ssize_t round_robin_show(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	struct nvme_ns *ns, *tmp_ns;
+	struct nvme_ns_head *head;
+	int srcu_idx, node, len = 0;
+
+	head = dev_to_ns_head(dev);
+
+	srcu_idx = srcu_read_lock(&head->srcu);
+	for_each_node(node) {
+		ns = tmp_ns = head->current_path[node];
+
+		if (unlikely(!ns))
+			continue;
+
+		len += sysfs_emit_at(buf, len, "node%d: ", node);
+		do {
+			ns = nvme_next_ns(head, ns);
+			if (!ns)
+				break;
+			len += sysfs_emit_at(buf, len, "%s ",
+					ns->disk->disk_name);
+		} while (ns != tmp_ns);
+
+		len += sysfs_emit_at(buf, len, "\n");
+	}
+	srcu_read_unlock(&head->srcu, srcu_idx);
+
+	return len;
+}
+DEVICE_ATTR_RO(round_robin);
+
+static umode_t nvme_ns_mpath_attrs_are_visible(struct kobject *kobj,
+		struct attribute *attr, int n)
+{
+	struct device *dev;
+	struct gendisk *disk;
+
+	dev = container_of(kobj, struct device, kobj);
+	disk = dev_to_disk(dev);
+
+	if (!nvme_disk_is_ns_head(disk))
+		return SYSFS_GROUP_INVISIBLE;
+
+	return attr->mode;
+}
+
+static struct attribute *nvme_ns_mpath_attrs[] = {
+	&dev_attr_queue_depth.attr,
+	&dev_attr_numa.attr,
+	&dev_attr_round_robin.attr,
+	NULL
+};
+
+static const struct attribute_group nvme_ns_mpath_attr_group = {
+	.name           = "multipath",
+	.attrs		= nvme_ns_mpath_attrs,
+	.is_visible     = nvme_ns_mpath_attrs_are_visible,
+};
+
 static const struct attribute_group nvme_ns_attr_group = {
 	.attrs		= nvme_ns_attrs,
 	.is_visible	= nvme_ns_attrs_are_visible,
@@ -301,6 +408,7 @@ static const struct attribute_group nvme_ns_attr_group = {
 
 const struct attribute_group *nvme_ns_attr_groups[] = {
 	&nvme_ns_attr_group,
+	&nvme_ns_mpath_attr_group,
 	NULL,
 };
 
-- 
2.45.2




More information about the Linux-nvme mailing list