[PATCH 2/5] nvmet_tcp: introduce poll groups
Wunderlich, Mark
mark.wunderlich at intel.com
Thu Aug 27 21:00:56 EDT 2020
nvmet_tcp: introduce poll groups
To better support increased numbers of queue
connections over a possible smaller number of available
system cores we introduce the 'poll group'.
The poll group provides a service point for a
single associated process worker entity to service a set
of assigned queues in the group's work list.
The work list of queues are monitored evenly during the
groups poll time period.
An 'active' queue is moved from the work_list into the
group's release_list before removal from group as part
of queue connection teardown. A queue mutex is
provided for controlled movement of queues between the
group lists.
Subsequent patches will build upon the 'poll group'.
Signed-off-by: Mark Wunderlich <mark.wunderlich at intel.com>
---
drivers/nvme/target/tcp.c | 48 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 46 insertions(+), 2 deletions(-)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 911db3f170df..3e3c217c77d4 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -146,6 +146,21 @@ struct nvmet_tcp_port {
void (*data_ready)(struct sock *);
};
+struct nvmet_tcp_group_napi {
+ unsigned int napi_id;
+ unsigned int napi_cnt;
+};
+
+#define NVMET_TCP_GROUP_NAPI_LIMIT 8
+struct nvmet_tcp_queue_group {
+ struct nvmet_tcp_group_napi napi[NVMET_TCP_GROUP_NAPI_LIMIT];
+ struct work_struct io_work;
+ int cpu;
+ struct list_head work_list;
+ struct list_head release_list;
+};
+struct nvmet_tcp_queue_group *nvmet_tcp_queue_groups;
+
static DEFINE_IDA(nvmet_tcp_queue_ida);
static LIST_HEAD(nvmet_tcp_queue_list);
static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
@@ -1706,6 +1721,27 @@ static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
}
}
+static bool alloc_group_list(void)
+{
+ struct nvmet_tcp_queue_group *group;
+ int i, nr_grps = num_possible_cpus();
+
+ nvmet_tcp_queue_groups = kcalloc(nr_grps, sizeof(struct nvmet_tcp_queue_group),
+ GFP_KERNEL);
+ if (!nvmet_tcp_queue_groups)
+ return false;
+
+ // Init the group list
+ for (i = 0; i < nr_grps; i++) {
+ group = nvmet_tcp_queue_groups + i;
+ group->cpu = i;
+ INIT_LIST_HEAD(&group->work_list);
+ INIT_LIST_HEAD(&group->release_list);
+ INIT_WORK(&group->io_work, nvmet_tcp_io_work);
+ }
+ return true;
+}
+
static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_TCP,
@@ -1722,10 +1758,15 @@ static int __init nvmet_tcp_init(void)
{
int ret;
- nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
- if (!nvmet_tcp_wq)
+ if (!alloc_group_list())
return -ENOMEM;
+ nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI & WQ_CPU_INTENSIVE, 0);
+ if (!nvmet_tcp_wq) {
+ ret = -ENOMEM;
+ goto err_wq;
+ }
+
ret = nvmet_register_transport(&nvmet_tcp_ops);
if (ret)
goto err;
@@ -1733,6 +1774,8 @@ static int __init nvmet_tcp_init(void)
return 0;
err:
destroy_workqueue(nvmet_tcp_wq);
+err_wq:
+ kfree(nvmet_tcp_queue_groups);
return ret;
}
@@ -1750,6 +1793,7 @@ static void __exit nvmet_tcp_exit(void)
flush_scheduled_work();
destroy_workqueue(nvmet_tcp_wq);
+ kfree(nvmet_tcp_queue_groups);
}
module_init(nvmet_tcp_init);
More information about the Linux-nvme
mailing list