[PATCH 2/2] nvme-auth: use xarray instead of linked list

Hannes Reinecke hare at suse.de
Fri Oct 28 06:50:27 PDT 2022


The current design of holding the chap context is slightly awkward,
as the context is allocated on demand, and we have to lock the list
when looking up contexts as we wouldn't know if the context is
allocated.

This patch moves the allocation out of the chap context before starting
authentication and stores it into an xarray. With that we can do
away with the lock and access the context directly via the queue number.

Signed-off-by: Hannes Reinecke <hare at suse.de>
---
 drivers/nvme/host/auth.c | 116 ++++++++++++++++++++++-----------------
 drivers/nvme/host/nvme.h |   3 +-
 2 files changed, 66 insertions(+), 53 deletions(-)

diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index b68fb2c764f6..7b974bd0fa64 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -72,10 +72,12 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
 				     0, flags, nvme_max_retries);
 	if (ret > 0)
 		dev_warn(ctrl->device,
-			"qid %d auth_send failed with status %d\n", qid, ret);
+			"qid %d auth_%s failed with status %d\n",
+			 qid, auth_send ? "send" : "recv", ret);
 	else if (ret < 0)
 		dev_err(ctrl->device,
-			"qid %d auth_send failed with error %d\n", qid, ret);
+			"qid %d auth_%s failed with error %d\n",
+			qid, auth_send ? "send" : "recv", ret);
 	return ret;
 }
 
@@ -870,29 +872,42 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
 		return -ENOKEY;
 	}
 
-	mutex_lock(&ctrl->dhchap_auth_mutex);
-	/* Check if the context is already queued */
-	list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
-		if (chap->qid == qid) {
-			dev_dbg(ctrl->device, "qid %d: re-using context\n", qid);
-			mutex_unlock(&ctrl->dhchap_auth_mutex);
-			flush_work(&chap->auth_work);
-			__nvme_auth_reset(chap);
-			queue_work(nvme_wq, &chap->auth_work);
-			return 0;
-		}
-	}
-	chap = kzalloc(sizeof(*chap), GFP_KERNEL);
+	if (qid == NVME_QID_ANY)
+		qid = 0;
+	chap = xa_load(&ctrl->dhchap_auth_xa, qid);
 	if (!chap) {
-		mutex_unlock(&ctrl->dhchap_auth_mutex);
-		return -ENOMEM;
-	}
-	chap->qid = (qid == NVME_QID_ANY) ? 0 : qid;
-	chap->ctrl = ctrl;
+		int ret;
+
+		chap = kzalloc(sizeof(*chap), GFP_KERNEL);
+		if (!chap) {
+			dev_warn(ctrl->device,
+				 "qid %d: error allocation authentication", qid);
+			return -ENOMEM;
+		}
+		chap->qid = qid;
+		chap->ctrl = ctrl;
 
-	INIT_WORK(&chap->auth_work, __nvme_auth_work);
-	list_add(&chap->entry, &ctrl->dhchap_auth_list);
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
+		INIT_WORK(&chap->auth_work, __nvme_auth_work);
+		ret = xa_insert(&ctrl->dhchap_auth_xa, qid, chap, GFP_KERNEL);
+		if (ret) {
+			dev_warn(ctrl->device,
+				 "qid %d: error %d inserting authentication",
+				 qid, ret);
+			kfree(chap);
+			return ret;
+		}
+	} else {
+		if (chap->qid != qid) {
+			dev_warn(ctrl->device,
+				 "qid %d: authentication qid mismatch (%d)!",
+				 chap->qid, qid);
+			chap = xa_erase(&ctrl->dhchap_auth_xa, qid);
+			__nvme_auth_free(chap);
+			return -ENOENT;
+		}
+		flush_work(&chap->auth_work);
+		__nvme_auth_reset(chap);
+	}
 	queue_work(nvme_wq, &chap->auth_work);
 	return 0;
 }
@@ -901,33 +916,35 @@ EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
 {
 	struct nvme_dhchap_queue_context *chap;
-	int ret;
 
-	mutex_lock(&ctrl->dhchap_auth_mutex);
-	list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
-		if (chap->qid != qid)
-			continue;
-		mutex_unlock(&ctrl->dhchap_auth_mutex);
-		flush_work(&chap->auth_work);
-		ret = chap->error;
-		return ret;
+	if (qid == NVME_QID_ANY)
+		qid = 0;
+	chap = xa_load(&ctrl->dhchap_auth_xa, qid);
+	if (!chap) {
+		dev_warn(ctrl->device,
+			 "qid %d: authentication not initialized!",
+			 qid);
+		return -ENOENT;
+	} else if (chap->qid != qid) {
+		dev_warn(ctrl->device,
+			 "qid %d: authentication qid mismatch (%d)!",
+			 chap->qid, qid);
+		return -ENOENT;
 	}
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
-	return -ENXIO;
+	flush_work(&chap->auth_work);
+	return chap->error;
 }
 EXPORT_SYMBOL_GPL(nvme_auth_wait);
 
 void nvme_auth_reset(struct nvme_ctrl *ctrl)
 {
 	struct nvme_dhchap_queue_context *chap;
+	unsigned long qid;
 
-	mutex_lock(&ctrl->dhchap_auth_mutex);
-	list_for_each_entry(chap, &ctrl->dhchap_auth_list, entry) {
-		mutex_unlock(&ctrl->dhchap_auth_mutex);
+	xa_for_each(&ctrl->dhchap_auth_xa, qid, chap) {
 		flush_work(&chap->auth_work);
 		__nvme_auth_reset(chap);
 	}
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_auth_reset);
 
@@ -947,7 +964,7 @@ static void nvme_dhchap_auth_work(struct work_struct *work)
 	ret = nvme_auth_wait(ctrl, 0);
 	if (ret) {
 		dev_warn(ctrl->device,
-			 "qid 0: authentication failed\n");
+			 "qid 0: authentication failed with %d\n", ret);
 		return;
 	}
 
@@ -969,9 +986,8 @@ static void nvme_dhchap_auth_work(struct work_struct *work)
 
 void nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
 {
-	INIT_LIST_HEAD(&ctrl->dhchap_auth_list);
+	xa_init_flags(&ctrl->dhchap_auth_xa, XA_FLAGS_ALLOC);
 	INIT_WORK(&ctrl->dhchap_auth_work, nvme_dhchap_auth_work);
-	mutex_init(&ctrl->dhchap_auth_mutex);
 	if (!ctrl->opts)
 		return;
 	nvme_auth_generate_key(ctrl->opts->dhchap_secret, &ctrl->host_key);
@@ -981,27 +997,25 @@ EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
 
 void nvme_auth_stop(struct nvme_ctrl *ctrl)
 {
-	struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+	struct nvme_dhchap_queue_context *chap;
+	unsigned long qid;
 
 	cancel_work_sync(&ctrl->dhchap_auth_work);
-	mutex_lock(&ctrl->dhchap_auth_mutex);
-	list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry)
+	xa_for_each(&ctrl->dhchap_auth_xa, qid, chap)
 		cancel_work_sync(&chap->auth_work);
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_auth_stop);
 
 void nvme_auth_free(struct nvme_ctrl *ctrl)
 {
-	struct nvme_dhchap_queue_context *chap = NULL, *tmp;
+	struct nvme_dhchap_queue_context *chap;
+	unsigned long qid;
 
-	mutex_lock(&ctrl->dhchap_auth_mutex);
-	list_for_each_entry_safe(chap, tmp, &ctrl->dhchap_auth_list, entry) {
-		list_del_init(&chap->entry);
-		flush_work(&chap->auth_work);
+	xa_for_each(&ctrl->dhchap_auth_xa, qid, chap) {
+		chap = xa_erase(&ctrl->dhchap_auth_xa, qid);
 		__nvme_auth_free(chap);
 	}
-	mutex_unlock(&ctrl->dhchap_auth_mutex);
+	xa_destroy(&ctrl->dhchap_auth_xa);
 	if (ctrl->host_key) {
 		nvme_auth_free_key(ctrl->host_key);
 		ctrl->host_key = NULL;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 32d9dc2d957e..d0b2d3e4b63f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -338,8 +338,7 @@ struct nvme_ctrl {
 
 #ifdef CONFIG_NVME_AUTH
 	struct work_struct dhchap_auth_work;
-	struct list_head dhchap_auth_list;
-	struct mutex dhchap_auth_mutex;
+	struct xarray dhchap_auth_xa;
 	struct nvme_dhchap_key *host_key;
 	struct nvme_dhchap_key *ctrl_key;
 	u16 transaction;
-- 
2.35.3




More information about the Linux-nvme mailing list