[PATCH 3/6] nvme-auth: use xarray instead of linked list

Hannes Reinecke hare at suse.de
Wed Nov 2 00:52:21 PDT 2022


The current design of holding the chap context is slightly awkward,
as the context is allocated on demand, and we have to lock the list
when looking up contexts as we wouldn't know if the context is
allocated.

This patch moves the allocation out of the chap context before starting
authentication and stores it into an xarray. With that we can do
away with the list traversal and access the context directly
via the queue number.

Signed-off-by: Hannes Reinecke <hare at suse.de>
---
 drivers/nvme/host/auth.c | 85 +++++++++++++++++++++-------------------
 drivers/nvme/host/nvme.h |  2 +-
 2 files changed, 45 insertions(+), 42 deletions(-)

diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 719c514363ee..cece4f33e3a8 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -878,27 +878,35 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
 
 	mutex_lock(&ctrl->dhchap_auth_mutex);
 	/* Check if the context is already queued */
-	list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
-		if (chap->qid == qid) {
-			dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
+	chap = xa_load(&ctrl->dhchap_auth_xa, qid);
+	if (!chap) {
+		int ret;
+
+		chap = kzalloc(sizeof(*chap), GFP_NOIO);
+		if (!chap) {
 			mutex_unlock(&ctrl->dhchap_auth_mutex);
-			flush_work(&chap->auth_work);
-			__nvme_auth_reset(chap);
-			queue_work(nvme_wq, &chap->auth_work);
-			return 0;
+			dev_warn(ctrl->device,
+				 "qid %d: error allocation authentication", qid);
+			return -ENOMEM;
 		}
-	}
-	chap = kzalloc(sizeof(*chap), GFP_KERNEL);
-	if (!chap) {
+		chap->qid = qid;
+		chap->ctrl = ctrl;
+
+		INIT_WORK(&chap->auth_work, __nvme_auth_work);
+		ret = xa_insert(&ctrl->dhchap_auth_xa, qid, chap, GFP_NOIO);
 		mutex_unlock(&ctrl->dhchap_auth_mutex);
-		return -ENOMEM;
+		if (ret) {
+			dev_warn(ctrl->device,
+				 "qid %d: error %d inserting authentication",
+				 qid, ret);
+			kfree(chap);
+			return ret;
+		}
+	} else {
+		mutex_unlock(&ctrl->dhchap_auth_mutex);
+		flush_work(&chap->auth_work);
+		__nvme_auth_reset(chap);
 	}
-	chap->qid = (qid == NVME_QID_ANY) ? 0 : qid;
-	chap->ctrl = ctrl;
-
-	INIT_WORK(&chap->auth_work, __nvme_auth_work);
-	list_add(&chap->entry, &ctrl->dhchap_auth_list);
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
 	queue_work(nvme_wq, &chap->auth_work);
 	return 0;
 }
@@ -907,33 +915,28 @@ EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
 {
 	struct nvme_dhchap_queue_context *chap;
-	int ret;
 
-	mutex_lock(&ctrl->dhchap_auth_mutex);
-	list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
-		if (chap->qid != qid)
-			continue;
-		mutex_unlock(&ctrl->dhchap_auth_mutex);
-		flush_work(&chap->auth_work);
-		ret = chap->error;
-		return ret;
+	chap = xa_load(&ctrl->dhchap_auth_xa, qid);
+	if (!chap) {
+		dev_warn(ctrl->device,
+			 "qid %d: authentication not initialized!",
+			 qid);
+		return -ENOENT;
 	}
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
-	return -ENXIO;
+	flush_work(&chap->auth_work);
+	return chap->error;
 }
 EXPORT_SYMBOL_GPL(nvme_auth_wait);
 
 void nvme_auth_reset(struct nvme_ctrl *ctrl)
 {
 	struct nvme_dhchap_queue_context *chap;
+	unsigned long qid;
 
-	mutex_lock(&ctrl->dhchap_auth_mutex);
-	list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
-		mutex_unlock(&ctrl->dhchap_auth_mutex);
+	xa_for_each(&ctrl->dhchap_auth_xa, qid, chap) {
 		flush_work(&chap->auth_work);
 		__nvme_auth_reset(chap);
 	}
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_auth_reset);
 
@@ -979,7 +982,7 @@ static void nvme_dhchap_auth_work(struct work_struct *work)
 
 void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
 {
-	INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
+	xa_init_flags(&ctrl->dhchap_auth_xa, XA_FLAGS_ALLOC);
 	INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work);
 	mutex_init(&ctrl->dhchap_auth_mutex);
 	if (!ctrl->opts)
@@ -991,27 +994,27 @@ EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
 
 void nvme_auth_stop(struct nvme_ctrl *ctrl)
 {
-	struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+	struct nvme_dhchap_queue_context *chap;
+	unsigned long qid;
 
 	cancel_work_sync(&ctrl->dhchap_auth_work);
-	mutex_lock(&ctrl->dhchap_auth_mutex);
-	list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry)
+	xa_for_each(&ctrl->dhchap_auth_xa, qid, chap)
 		cancel_work_sync(&chap->auth_work);
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_auth_stop);
 
 void nvme_auth_free(struct nvme_ctrl *ctrl)
 {
-	struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+	struct nvme_dhchap_queue_context *chap;
+	unsigned long qid;
 
 	mutex_lock(&ctrl->dhchap_auth_mutex);
-	list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) {
-		list_del_init(&chap->entry);
-		flush_work(&chap->auth_work);
+	xa_for_each(&ctrl->dhchap_auth_xa, qid, chap) {
+		chap = xa_erase(&ctrl->dhchap_auth_xa, qid);
 		__nvme_auth_free(chap);
 	}
 	mutex_unlock(&ctrl->dhchap_auth_mutex);
+	xa_destroy(&ctrl->dhchap_auth_xa);
 	if (ctrl->host_key) {
 		nvme_auth_free_key(ctrl->host_key);
 		ctrl->host_key = NULL;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 32d9dc2d957e..9b31fb8c1b27 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -338,7 +338,7 @@ struct nvme_ctrl {
 
 #ifdef CONFIG_NVME_AUTH
 	struct work_struct dhchap_auth_work;
-	struct list_head dhchap_auth_list;
+	struct xarray dhchap_auth_xa;
 	struct mutex dhchap_auth_mutex;
 	struct nvme_dhchap_key *host_key;
 	struct nvme_dhchap_key *ctrl_key;
-- 
2.35.3




More information about the Linux-nvme mailing list