[PATCH] nvme/multipath: Fix RCU list traversal to use SRCU primitive
Breno Leitao
leitao at debian.org
Tue Nov 5 06:42:46 PST 2024
The code currently uses list_for_each_entry_rcu() while holding an SRCU
lock, triggering false positive warnings with CONFIG_PROVE_RCU=y
enabled:
drivers/nvme/host/multipath.c:168 RCU-list traversed in non-reader section!!
drivers/nvme/host/multipath.c:227 RCU-list traversed in non-reader section!!
drivers/nvme/host/multipath.c:260 RCU-list traversed in non-reader section!!
While the list is properly protected by SRCU lock, the code uses the
wrong list traversal primitive. Replace list_for_each_entry_rcu() with
list_for_each_entry_srcu() to correctly indicate SRCU-based protection
and eliminate the false warning.
Signed-off-by: Breno Leitao <leitao at debian.org>
Fixes: be647e2c76b2 ("nvme: use srcu for iterating namespace list")
---
drivers/nvme/host/multipath.c | 21 ++++++++++++++-------
1 file changed, 14 insertions(+), 7 deletions(-)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 6a15873055b9513f827709ad780bc7e18f75e439..f25582e4d88bb04c0f866ada11735ce562f227d4 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -165,7 +165,8 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
+ srcu_read_lock_held(&ctrl->srcu)) {
if (!ns->head->disk)
continue;
kblockd_schedule_work(&ns->head->requeue_work);
@@ -209,7 +210,8 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
int srcu_idx;
srcu_idx = srcu_read_lock(&ctrl->srcu);
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
+ srcu_read_lock_held(&ctrl->srcu)) {
nvme_mpath_clear_current_path(ns);
kblockd_schedule_work(&ns->head->requeue_work);
}
@@ -224,7 +226,8 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
int srcu_idx;
srcu_idx = srcu_read_lock(&head->srcu);
- list_for_each_entry_rcu(ns, &head->list, siblings) {
+ list_for_each_entry_srcu(ns, &head->list, siblings,
+ srcu_read_lock_held(&head->srcu)) {
if (capacity != get_capacity(ns->disk))
clear_bit(NVME_NS_READY, &ns->flags);
}
@@ -257,7 +260,8 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
struct nvme_ns *found = NULL, *fallback = NULL, *ns;
- list_for_each_entry_rcu(ns, &head->list, siblings) {
+ list_for_each_entry_srcu(ns, &head->list, siblings,
+ srcu_read_lock_held(&head->srcu)) {
if (nvme_path_is_disabled(ns))
continue;
@@ -356,7 +360,8 @@ static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head)
unsigned int min_depth_opt = UINT_MAX, min_depth_nonopt = UINT_MAX;
unsigned int depth;
- list_for_each_entry_rcu(ns, &head->list, siblings) {
+ list_for_each_entry_srcu(ns, &head->list, siblings,
+ srcu_read_lock_held(&head->srcu)) {
if (nvme_path_is_disabled(ns))
continue;
@@ -424,7 +429,8 @@ static bool nvme_available_path(struct nvme_ns_head *head)
if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
return NULL;
- list_for_each_entry_rcu(ns, &head->list, siblings) {
+ list_for_each_entry_srcu(ns, &head->list, siblings,
+ srcu_read_lock_held(&head->srcu)) {
if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
continue;
switch (nvme_ctrl_state(ns->ctrl)) {
@@ -785,7 +791,8 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
return 0;
srcu_idx = srcu_read_lock(&ctrl->srcu);
- list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+ list_for_each_entry_srcu(ns, &ctrl->namespaces, list,
+ srcu_read_lock_held(&ctrl->srcu)) {
unsigned nsid;
again:
nsid = le32_to_cpu(desc->nsids[n]);
---
base-commit: 822eca3e06d039ea9ebf46c6192af06661f50d58
change-id: 20241104-nvme_multipath_rcu-9dba03fc4b02
Best regards,
--
Breno Leitao <leitao at debian.org>
More information about the Linux-nvme
mailing list