[PATCH] nvmet: dynamically allocate nvmet_ns->nguid

Chaitanya Kulkarni kch at nvidia.com
Mon May 1 22:30:51 PDT 2023


The nvmet_ns struct is critical to I/O operations in each backend bdev
and file, but its static nguid array is not accessed in the fast path.
This means that pulling all the memory for the array on each access
is inefficient.

This patch dynamically allocates the nvmet_ns->nguid array, reducing the
size of the nvmet_ns struct. This optimization should reduce unnecessary
memory access in the fast path that is required for the array vs pointer.
For allocation of nguid with kzalloc() use same policy GFP_KERNEL that is
used to allocate nvmet_ns struct iself.

struct nvmet_ns size difference with pahole :-

with this patch		:- /* size: 440, cachelines: 7, members: 23 */
without this patch	:- /* size: 448, cachelines: 7, members: 23 */

Signed-off-by: Chaitanya Kulkarni <kch at nvidia.com>
---

Following is the difference with pahole:

With this patch :-

struct nvmet_ns {
	struct percpu_ref          ref;                  /*     0    16 */
	struct block_device *      bdev;                 /*    16     8 */
	struct file *              file;                 /*    24     8 */
	bool                       readonly;             /*    32     1 */

	/* XXX 3 bytes hole, try to pack */

	u32                        nsid;                 /*    36     4 */
	u32                        blksize_shift;        /*    40     4 */

	/* XXX 4 bytes hole, try to pack */

	loff_t                     size;                 /*    48     8 */
	u8 *                       nguid;                /*    56     8 */
	/* --- cacheline 1 boundary (64 bytes) --- */
	uuid_t                     uuid;                 /*    64    16 */
	u32                        anagrpid;             /*    80     4 */
	bool                       buffered_io;          /*    84     1 */
	bool                       enabled;              /*    85     1 */

	/* XXX 2 bytes hole, try to pack */

	struct nvmet_subsys *      subsys;               /*    88     8 */
	const char  *              device_path;          /*    96     8 */
	struct config_group        device_group;         /*   104   136 */
	/* --- cacheline 3 boundary (192 bytes) was 48 bytes ago --- */
	struct config_group        group;                /*   240   136 */
	/* --- cacheline 5 boundary (320 bytes) was 56 bytes ago --- */
	struct completion          disable_done;         /*   376    32 */
	/* --- cacheline 6 boundary (384 bytes) was 24 bytes ago --- */
	mempool_t *                bvec_pool;            /*   408     8 */
	struct pci_dev *           p2p_dev;              /*   416     8 */
	int                        use_p2pmem;           /*   424     4 */
	int                        pi_type;              /*   428     4 */
	int                        metadata_size;        /*   432     4 */
	u8                         csi;                  /*   436     1 */

	/* size: 440, cachelines: 7, members: 23 */
	/* sum members: 428, holes: 3, sum holes: 9 */
	/* padding: 3 */
	/* last cacheline: 56 bytes */
};

Without this patch :-

struct nvmet_ns {
	struct percpu_ref          ref;                  /*     0    16 */
	struct block_device *      bdev;                 /*    16     8 */
	struct file *              file;                 /*    24     8 */
	bool                       readonly;             /*    32     1 */

	/* XXX 3 bytes hole, try to pack */

	u32                        nsid;                 /*    36     4 */
	u32                        blksize_shift;        /*    40     4 */

	/* XXX 4 bytes hole, try to pack */

	loff_t                     size;                 /*    48     8 */
	u8                         nguid[16];            /*    56    16 */
	/* --- cacheline 1 boundary (64 bytes) was 8 bytes ago --- */
	uuid_t                     uuid;                 /*    72    16 */
	u32                        anagrpid;             /*    88     4 */
	bool                       buffered_io;          /*    92     1 */
	bool                       enabled;              /*    93     1 */

	/* XXX 2 bytes hole, try to pack */

	struct nvmet_subsys *      subsys;               /*    96     8 */
	const char  *              device_path;          /*   104     8 */
	struct config_group        device_group;         /*   112   136 */
	/* --- cacheline 3 boundary (192 bytes) was 56 bytes ago --- */
	struct config_group        group;                /*   248   136 */
	/* --- cacheline 6 boundary (384 bytes) --- */
	struct completion          disable_done;         /*   384    32 */
	mempool_t *                bvec_pool;            /*   416     8 */
	struct pci_dev *           p2p_dev;              /*   424     8 */
	int                        use_p2pmem;           /*   432     4 */
	int                        pi_type;              /*   436     4 */
	int                        metadata_size;        /*   440     4 */
	u8                         csi;                  /*   444     1 */

	/* size: 448, cachelines: 7, members: 23 */
	/* sum members: 436, holes: 3, sum holes: 9 */
	/* padding: 3 */
};

Please note that this patch is generated on the top of 
("nvmet: Reorder fields in 'struct nvmet_ns'") :-

https://www.spinics.net/lists/kernel/msg4773169.html

 drivers/nvme/target/admin-cmd.c | 6 +++---
 drivers/nvme/target/configfs.c  | 4 ++--
 drivers/nvme/target/core.c      | 7 +++++++
 drivers/nvme/target/nvmet.h     | 2 +-
 4 files changed, 13 insertions(+), 6 deletions(-)

diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 39cb570f833d..21129ad15320 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -551,7 +551,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
 	id->nmic = NVME_NS_NMIC_SHARED;
 	id->anagrpid = cpu_to_le32(req->ns->anagrpid);
 
-	memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
+	memcpy(&id->nguid, req->ns->nguid, sizeof(id->nguid));
 
 	id->lbaf[0].ds = req->ns->blksize_shift;
 
@@ -646,10 +646,10 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
 		if (status)
 			goto out;
 	}
-	if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
+	if (memchr_inv(req->ns->nguid, 0, NVME_NIDT_NGUID_LEN)) {
 		status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
 						  NVME_NIDT_NGUID_LEN,
-						  &req->ns->nguid, &off);
+						  req->ns->nguid, &off);
 		if (status)
 			goto out;
 	}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 907143870da5..463ae31d5d71 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -444,7 +444,7 @@ CONFIGFS_ATTR(nvmet_ns_, device_uuid);
 
 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
 {
-	return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
+	return sprintf(page, "%pUb\n", to_nvmet_ns(item)->nguid);
 }
 
 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
@@ -480,7 +480,7 @@ static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
 			p++;
 	}
 
-	memcpy(&ns->nguid, nguid, sizeof(nguid));
+	memcpy(ns->nguid, nguid, sizeof(nguid));
 out_unlock:
 	mutex_unlock(&subsys->lock);
 	return ret ? ret : count;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index f66ed13d7c11..cc95ba3c2835 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -665,6 +665,7 @@ void nvmet_ns_free(struct nvmet_ns *ns)
 	up_write(&nvmet_ana_sem);
 
 	kfree(ns->device_path);
+	kfree(ns->nguid);
 	kfree(ns);
 }
 
@@ -676,6 +677,12 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
 	if (!ns)
 		return NULL;
 
+	ns->nguid = kzalloc(NVME_NIDT_NGUID_LEN, GFP_KERNEL);
+	if (!ns) {
+		kfree(ns);
+		return NULL;
+	}
+
 	init_completion(&ns->disable_done);
 
 	ns->nsid = nsid;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index c50146085fb5..4c2a20dc9eed 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -64,7 +64,7 @@ struct nvmet_ns {
 	u32			nsid;
 	u32			blksize_shift;
 	loff_t			size;
-	u8			nguid[16];
+	u8                      *nguid;
 	uuid_t			uuid;
 	u32			anagrpid;
 
-- 
2.40.0




More information about the Linux-nvme mailing list