[PATCH 5/5] nvmet_tcp: release queue from group

Wunderlich, Mark mark.wunderlich at intel.com
Thu Aug 27 21:01:03 EDT 2020


nvmet_tcp: release queue from group

The poll group constantly processes an active set
of its assigned queues via the work_list. While a queue
resides in the work_list the queues activate_mutex is
held.

The release changes introduced here support the graceful
transition of queue state from Live, to Disconnecting,
and finally Released as part of queue removal.

Only the group worker migrates queues from the active
work_list to the release_list during this release process.

Signed-off-by: Mark Wunderlich <mark.wunderlich at intel.com>
---
 drivers/nvme/target/tcp.c |   30 ++++++++++++++++++++++++++++--
 1 file changed, 28 insertions(+), 2 deletions(-)

diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 3955dbe38f0f..1aded6f3837d 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -96,6 +96,7 @@ enum nvmet_tcp_queue_state {
 	NVMET_TCP_Q_CONNECTING,
 	NVMET_TCP_Q_LIVE,
 	NVMET_TCP_Q_DISCONNECTING,
+	NVMET_TCP_Q_RELEASED,
 };
 
 struct nvmet_tcp_queue {
@@ -213,6 +214,24 @@ static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
 		!cmd->rbytes_done;
 }
 
+static inline void nvmet_tcp_release_queue(struct nvmet_tcp_queue *queue)
+{
+	struct nvmet_tcp_queue_group *group = queue->group;
+
+	spin_lock(&queue->state_lock);
+	queue->state = NVMET_TCP_Q_RELEASED;
+	spin_unlock(&queue->state_lock);
+	/*
+	 * Queue mutex is expected to be held, and queue
+	 * may reside in work_list. Mutex held during move from
+	 * work to release lists.
+	 */
+	if (!list_empty_careful(&queue->glist_entry))
+		list_del_init(&queue->glist_entry);
+	list_add_tail(&queue->glist_entry, &group->release_list);
+	schedule_work(&queue->release_work);
+}
+
 static inline void nvmet_tcp_queue_work(struct nvmet_tcp_queue *queue)
 {
 	struct nvmet_tcp_queue_group *group;
@@ -1221,9 +1240,10 @@ static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
 {
 	spin_lock(&queue->state_lock);
-	if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
+	if (queue->state != NVMET_TCP_Q_DISCONNECTING &&
+			queue->state != NVMET_TCP_Q_RELEASED) {
 		queue->state = NVMET_TCP_Q_DISCONNECTING;
-		schedule_work(&queue->release_work);
+		nvmet_tcp_queue_work(queue);
 	}
 	spin_unlock(&queue->state_lock);
 }
@@ -1257,6 +1277,11 @@ static void nvmet_tcp_io_work(struct work_struct *w)
 			break;
 	} while (!time_after(jiffies, deadline));
 
+	list_for_each_entry_safe(queue, next, &group->work_list, glist_entry) {
+		if (queue->state == NVMET_TCP_Q_DISCONNECTING)
+			nvmet_tcp_release_queue(queue);
+	}
+
 	if (grp_ops > 0) {
 		pending = true;
 		group->deadline = 0;
@@ -1408,6 +1433,7 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
 		container_of(w, struct nvmet_tcp_queue, release_work);
 
 	nvmet_tcp_remove_from_group(queue);
+	list_del_init(&queue->glist_entry);
 
 	mutex_lock(&nvmet_tcp_queue_mutex);
 	list_del_init(&queue->queue_list);



More information about the Linux-nvme mailing list