[PATCH] nvmet: dynamically allocate nvmet_ns->nguid

Chaitanya Kulkarni chaitanyak at nvidia.com
Wed May 3 15:39:47 PDT 2023


On 5/3/23 08:45, Jens Axboe wrote:
> On 5/1/23 11:30?PM, Chaitanya Kulkarni wrote:
>> The nvmet_ns struct is critical to I/O operations in each backend bdev
>> and file, but its static nguid array is not accessed in the fast path.
>> This means that pulling all the memory for the array on each access
>> is inefficient.
>>
>> This patch dynamically allocates the nvmet_ns->nguid array, reducing the
>> size of the nvmet_ns struct. This optimization should reduce unnecessary
>> memory access in the fast path that is required for the array vs pointer.
>> For allocation of nguid with kzalloc() use same policy GFP_KERNEL that is
>> used to allocate nvmet_ns struct iself.
> Replacing 16 bytes of embedded memory with 8 bytes and then alloc+free
> seems like a poor tradeoff.
>
> Why not just arrange it a bit more sanely, and also push the config
> stuff out-of-line as that would not be used in the fast path. The below
> 30 second job takes it from 456 -> 440 bytes for me, and has a better
> layout imho.
>   
Intentionally I didn't touch reordering since I've asked Christoph J
look into it so my patch only optimizing the nguid allocation.
Just looked into his series your patch is missing from that.

Moving configfs out of the way and combining bool together is
much better layout ...

> diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
> index dc60a22646f7..790d7513e442 100644
> --- a/drivers/nvme/target/nvmet.h
> +++ b/drivers/nvme/target/nvmet.h
> @@ -61,29 +61,31 @@ struct nvmet_ns {
>   	struct block_device	*bdev;
>   	struct file		*file;
>   	bool			readonly;
> +	bool			buffered_io;
> +	bool			enabled;
> +	u8			csi;
>   	u32			nsid;
>   	u32			blksize_shift;
> +	u32			anagrpid;
>   	loff_t			size;
>   	u8			nguid[16];
>   	uuid_t			uuid;
> -	u32			anagrpid;
>   
> -	bool			buffered_io;
> -	bool			enabled;
>   	struct nvmet_subsys	*subsys;
>   	const char		*device_path;
>   
> -	struct config_group	device_group;
> -	struct config_group	group;
> -
>   	struct completion	disable_done;
>   	mempool_t		*bvec_pool;
>   
> -	int			use_p2pmem;
>   	struct pci_dev		*p2p_dev;
> +	int			use_p2pmem;
>   	int			pi_type;
>   	int			metadata_size;
> -	u8			csi;
> +
> +	struct config_group	device_group;
> +	struct config_group	group;
> +
> +
>   };
>   
>   static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
>

I've applied your patch and I got what you have mentioned [1]:-
          /* size: 440, cachelines: 7, members: 23 */
          /* last cacheline: 56 bytes */

I've applied nguid allocation on the top of your patch to go from 440->432
see [2] :-
          /* size: 432, cachelines: 7, members: 23 */
          /* last cacheline: 48 bytes */

I failed to answer myself whycarry 8 bytes more when they are not even
accessed in the fast path irrespective of the re-ordering since new nguid
allocation is not in fast path ?

-ck

[1] Jen's reordering

nvme (nvme-6.4) # pahole target/nvmet.ko |  grep "struct nvmet_ns {" -A 42
struct nvmet_ns {
         struct percpu_ref          ref;                  /* 0    16 */
         struct block_device *      bdev;                 /* 16     8 */
         struct file *              file;                 /* 24     8 */
         bool                       readonly;             /* 32     1 */
         bool                       buffered_io;          /* 33     1 */
         bool                       enabled;              /* 34     1 */
         u8                         csi;                  /* 35     1 */
         u32                        nsid;                 /* 36     4 */
         u32                        blksize_shift;        /* 40     4 */
         u32                        anagrpid;             /* 44     4 */
         loff_t                     size;                 /* 48     8 */
         u8                         nguid[16];            /* 56    16 */
         /* --- cacheline 1 boundary (64 bytes) was 8 bytes ago --- */
         uuid_t                     uuid;                 /* 72    16 */
         struct nvmet_subsys *      subsys;               /* 88     8 */
         const char  *              device_path;          /* 96     8 */
         struct completion          disable_done;         /* 104    32 */
         /* --- cacheline 2 boundary (128 bytes) was 8 bytes ago --- */
         mempool_t *                bvec_pool;            /* 136     8 */
         struct pci_dev *           p2p_dev;              /* 144     8 */
         int                        use_p2pmem;           /* 152     4 */
         int                        pi_type;              /* 156     4 */
         int                        metadata_size;        /* 160     4 */

         /* XXX 4 bytes hole, try to pack */

         struct config_group        device_group;         /* 168   136 */
         /* --- cacheline 4 boundary (256 bytes) was 48 bytes ago --- */
         struct config_group        group;                /* 304   136 */

         /* size: 440, cachelines: 7, members: 23 */
         /* sum members: 436, holes: 1, sum holes: 4 */
         /* last cacheline: 56 bytes */
};

[2] nguid allocation on the top of [1].

diff --git a/drivers/nvme/target/admin-cmd.c 
b/drivers/nvme/target/admin-cmd.c
index 39cb570f833d..21129ad15320 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -551,7 +551,7 @@ static void nvmet_execute_identify_ns(struct 
nvmet_req *req)
         id->nmic = NVME_NS_NMIC_SHARED;
         id->anagrpid = cpu_to_le32(req->ns->anagrpid);

-       memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
+       memcpy(&id->nguid, req->ns->nguid, sizeof(id->nguid));

         id->lbaf[0].ds = req->ns->blksize_shift;

@@ -646,10 +646,10 @@ static void nvmet_execute_identify_desclist(struct 
nvmet_req *req)
                 if (status)
                         goto out;
         }
-       if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
+       if (memchr_inv(req->ns->nguid, 0, NVME_NIDT_NGUID_LEN)) {
                 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
NVME_NIDT_NGUID_LEN,
- &req->ns->nguid, &off);
+ req->ns->nguid, &off);
                 if (status)
                         goto out;
         }
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 907143870da5..463ae31d5d71 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -444,7 +444,7 @@ CONFIGFS_ATTR(nvmet_ns_, device_uuid);

  static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, 
char *page)
  {
-       return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
+       return sprintf(page, "%pUb\n", to_nvmet_ns(item)->nguid);
  }

  static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
@@ -480,7 +480,7 @@ static ssize_t nvmet_ns_device_nguid_store(struct 
config_item *item,
                         p++;
         }

-       memcpy(&ns->nguid, nguid, sizeof(nguid));
+       memcpy(ns->nguid, nguid, sizeof(nguid));
  out_unlock:
         mutex_unlock(&subsys->lock);
         return ret ? ret : count;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index f66ed13d7c11..cc95ba3c2835 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -665,6 +665,7 @@ void nvmet_ns_free(struct nvmet_ns *ns)
         up_write(&nvmet_ana_sem);

         kfree(ns->device_path);
+       kfree(ns->nguid);
         kfree(ns);
  }

@@ -676,6 +677,12 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys 
*subsys, u32 nsid)
         if (!ns)
                 return NULL;

+       ns->nguid = kzalloc(NVME_NIDT_NGUID_LEN, GFP_KERNEL);
+       if (!ns) {
+               kfree(ns);
+               return NULL;
+       }
+
         init_completion(&ns->disable_done);

         ns->nsid = nsid;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 790d7513e442..3bc91a4ee4ee 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -68,7 +68,7 @@ struct nvmet_ns {
         u32                     blksize_shift;
         u32                     anagrpid;
         loff_t                  size;
-       u8                      nguid[16];
+       u8                      *nguid;
         uuid_t                  uuid;

         struct nvmet_subsys     *subsys;
ys;
nvme (nvme-6.4) # pahole target/nvmet.ko |  grep "struct nvmet_ns {" -A 42
struct nvmet_ns {
         struct percpu_ref          ref;                  /* 0    16 */
         struct block_device *      bdev;                 /* 16     8 */
         struct file *              file;                 /* 24     8 */
         bool                       readonly;             /* 32     1 */
         bool                       buffered_io;          /* 33     1 */
         bool                       enabled;              /* 34     1 */
         u8                         csi;                  /* 35     1 */
         u32                        nsid;                 /* 36     4 */
         u32                        blksize_shift;        /* 40     4 */
         u32                        anagrpid;             /* 44     4 */
         loff_t                     size;                 /* 48     8 */
         u8 *                       nguid;                /* 56     8 */
         /* --- cacheline 1 boundary (64 bytes) --- */
         uuid_t                     uuid;                 /* 64    16 */
         struct nvmet_subsys *      subsys;               /* 80     8 */
         const char  *              device_path;          /* 88     8 */
         struct completion          disable_done;         /* 96    32 */
         /* --- cacheline 2 boundary (128 bytes) --- */
         mempool_t *                bvec_pool;            /* 128     8 */
         struct pci_dev *           p2p_dev;              /* 136     8 */
         int                        use_p2pmem;           /* 144     4 */
         int                        pi_type;              /* 148     4 */
         int                        metadata_size;        /* 152     4 */

         /* XXX 4 bytes hole, try to pack */

         struct config_group        device_group;         /* 160   136 */
         /* --- cacheline 4 boundary (256 bytes) was 40 bytes ago --- */
         struct config_group        group;                /* 296   136 */

         /* size: 432, cachelines: 7, members: 23 */
         /* sum members: 428, holes: 1, sum holes: 4 */
         /* last cacheline: 48 bytes */
};




More information about the Linux-nvme mailing list