[PATCH 3/3] nvmet: replace transports array with a linked list
Max Gurtovoy
mgurtovoy at nvidia.com
Sat Sep 20 16:49:43 PDT 2025
The nvmet_transports array was sized to 255 (NVMF_TRTYPE_MAX), although
only up to 5 transport types can ever be registered, resulting in
some unused memory. This change replaces the array with a linked list to
store registered transport ops, substantially improving memory
efficiency by allocating space only for active transports.
Reviewed-by: Israel Rukshin <israelr at nvidia.com>
Signed-off-by: Max Gurtovoy <mgurtovoy at nvidia.com>
---
drivers/nvme/target/core.c | 48 +++++++++++++++++++++++++----------
drivers/nvme/target/fc.c | 4 +--
drivers/nvme/target/loop.c | 4 +--
drivers/nvme/target/nvmet.h | 5 ++--
drivers/nvme/target/pci-epf.c | 2 +-
drivers/nvme/target/rdma.c | 4 +--
drivers/nvme/target/tcp.c | 4 +--
7 files changed, 47 insertions(+), 24 deletions(-)
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index f31c7fa70740..7ee12819e2b5 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -21,7 +21,7 @@
struct kmem_cache *nvmet_bvec_cache;
struct workqueue_struct *buffered_io_wq;
struct workqueue_struct *zbd_wq;
-static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
+static LIST_HEAD(nvmet_transports);
static DEFINE_IDA(cntlid_ida);
struct workqueue_struct *nvmet_wq;
@@ -37,7 +37,7 @@ EXPORT_SYMBOL_GPL(nvmet_wq);
* - per-subsystem allowed hosts list
* - allow_any_host subsystem attribute
* - nvmet_genctr
- * - the nvmet_transports array
+ * - the nvmet_transports list
*
* When updating any of those lists/structures write lock should be obtained,
* while when reading (popolating discovery log page or checking host-subsystem
@@ -275,25 +275,46 @@ void nvmet_port_send_ana_event(struct nvmet_port *port)
up_read(&nvmet_config_sem);
}
-int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
+static struct nvmet_fabrics_ops *nvmet_lookup_transport(unsigned int type)
{
- int ret = 0;
+ struct nvmet_fabrics_ops *ops;
+ lockdep_assert_held(&nvmet_config_sem);
+
+ list_for_each_entry(ops, &nvmet_transports, entry) {
+ if (ops->type == type)
+ return ops;
+ }
+
+ return NULL;
+}
+
+int nvmet_register_transport(struct nvmet_fabrics_ops *ops)
+{
down_write(&nvmet_config_sem);
- if (nvmet_transports[ops->type])
- ret = -EINVAL;
- else
- nvmet_transports[ops->type] = ops;
+ if (nvmet_lookup_transport(ops->type)) {
+ up_write(&nvmet_config_sem);
+ return -EEXIST;
+ }
+
+ list_add_tail(&ops->entry, &nvmet_transports);
up_write(&nvmet_config_sem);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(nvmet_register_transport);
-void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
+void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops)
{
+ struct nvmet_fabrics_ops *fops, *tmp;
+
down_write(&nvmet_config_sem);
- nvmet_transports[ops->type] = NULL;
+ list_for_each_entry_safe(fops, tmp, &nvmet_transports, entry) {
+ if (fops == ops) {
+ list_del(&ops->entry);
+ break;
+ }
+ }
up_write(&nvmet_config_sem);
}
EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
@@ -323,12 +344,12 @@ int nvmet_enable_port(struct nvmet_port *port)
if (port->disc_addr.trtype == NVMF_TRTYPE_MAX)
return -EINVAL;
- ops = nvmet_transports[port->disc_addr.trtype];
+ ops = nvmet_lookup_transport(port->disc_addr.trtype);
if (!ops) {
up_write(&nvmet_config_sem);
request_module("nvmet-transport-%d", port->disc_addr.trtype);
down_write(&nvmet_config_sem);
- ops = nvmet_transports[port->disc_addr.trtype];
+ ops = nvmet_lookup_transport(port->disc_addr.trtype);
if (!ops) {
pr_err("transport type %d not supported\n",
port->disc_addr.trtype);
@@ -2006,6 +2027,7 @@ static void __exit nvmet_exit(void)
destroy_workqueue(buffered_io_wq);
destroy_workqueue(zbd_wq);
kmem_cache_destroy(nvmet_bvec_cache);
+ WARN_ON_ONCE(!list_empty(&nvmet_transports));
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index a9b18c051f5b..58dfb658453c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1932,7 +1932,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
-static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
+static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
static void
nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
@@ -2994,7 +2994,7 @@ nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl,
return ret;
}
-static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
+static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_FC,
.msdbd = 1,
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index f85a8441bcc6..72c126b95fd2 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -66,7 +66,7 @@ static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
-static const struct nvmet_fabrics_ops nvme_loop_ops;
+static struct nvmet_fabrics_ops nvme_loop_ops;
static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
{
@@ -670,7 +670,7 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
flush_workqueue(nvme_delete_wq);
}
-static const struct nvmet_fabrics_ops nvme_loop_ops = {
+static struct nvmet_fabrics_ops nvme_loop_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_LOOP,
.add_port = nvme_loop_add_port,
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 51df72f5e89b..2d29df0f66eb 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -409,6 +409,7 @@ struct nvmet_subsys_link {
struct nvmet_req;
struct nvmet_fabrics_ops {
struct module *owner;
+ struct list_head entry;
unsigned int type;
unsigned int msdbd;
unsigned int flags;
@@ -637,8 +638,8 @@ void nvmet_send_ana_event(struct nvmet_subsys *subsys,
struct nvmet_port *port);
void nvmet_port_send_ana_event(struct nvmet_port *port);
-int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
-void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
+int nvmet_register_transport(struct nvmet_fabrics_ops *ops);
+void nvmet_unregister_transport(struct nvmet_fabrics_ops *ops);
void nvmet_port_del_ctrls(struct nvmet_port *port,
struct nvmet_subsys *subsys);
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index 2e78397a7373..6be4e7586a98 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -1523,7 +1523,7 @@ static u16 nvmet_pci_epf_set_feat(const struct nvmet_ctrl *tctrl,
}
}
-static const struct nvmet_fabrics_ops nvmet_pci_epf_fabrics_ops = {
+static struct nvmet_fabrics_ops nvmet_pci_epf_fabrics_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_PCI,
.add_port = nvmet_pci_epf_add_port,
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 0485e25ab797..9336cb7e25bb 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -176,7 +176,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_rsp *r,
int tag);
-static const struct nvmet_fabrics_ops nvmet_rdma_ops;
+static struct nvmet_fabrics_ops nvmet_rdma_ops;
static int srq_size_set(const char *val, const struct kernel_param *kp)
{
@@ -2038,7 +2038,7 @@ static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
return NVME_RDMA_MAX_QUEUE_SIZE;
}
-static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
+static struct nvmet_fabrics_ops nvmet_rdma_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_RDMA,
.msdbd = 1,
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 470bf37e5a63..88e728cb4779 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -211,7 +211,7 @@ static LIST_HEAD(nvmet_tcp_queue_list);
static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
static struct workqueue_struct *nvmet_tcp_wq;
-static const struct nvmet_fabrics_ops nvmet_tcp_ops;
+static struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
@@ -2180,7 +2180,7 @@ static ssize_t nvmet_tcp_host_port_addr(struct nvmet_ctrl *ctrl,
(struct sockaddr *)&queue->sockaddr_peer);
}
-static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
+static struct nvmet_fabrics_ops nvmet_tcp_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_TCP,
.msdbd = 1,
--
2.18.1
More information about the Linux-nvme
mailing list