[PATCHv2 0/5] nvme-pci: iod optimisations
Kanchan Joshi
joshi.k at samsung.com
Mon Aug 1 22:34:49 PDT 2022
On Fri, Jul 29, 2022 at 09:28:46AM -0700, Keith Busch wrote:
>From: Keith Busch <kbusch at kernel.org>
>
>The series adjusts the struct nvme_iod fields for optimal sizing. The
>end result on 64-bit arch removes 24 bytes, from 152 down to 128. We
>typically allocate many thousands of these, and the cache aligned size
>is a nice bonus.
Reviewed-by: Kanchan Joshi <joshi.k at samsung.com>
But would you like to reduce 8 more bytes by killing meta_dma field?
This one:
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 06b7ec0d5558..119192363b31 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -232,7 +232,6 @@ struct nvme_iod {
u8 nents; /* Used in scatterlist */
unsigned int dma_len; /* length of single DMA segment mapping */
dma_addr_t first_dma;
- dma_addr_t meta_dma;
struct scatterlist *sg;
};
@@ -889,13 +888,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
struct nvme_command *cmnd)
{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t meta_dma;
- iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
+ meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
rq_dma_dir(req), 0);
- if (dma_mapping_error(dev->dev, iod->meta_dma))
+ if (dma_mapping_error(dev->dev, meta_dma))
return BLK_STS_IOERR;
- cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
+ cmnd->rw.metadata = cpu_to_le64(meta_dma);
return BLK_STS_OK;
}
@@ -1030,8 +1029,9 @@ static __always_inline void nvme_pci_unmap_rq(struct request *req)
if (blk_integrity_rq(req)) {
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ dma_addr_t meta_dma = le64_to_cpu(iod->cmd.rw.metadata);
- dma_unmap_page(dev->dev, iod->meta_dma,
+ dma_unmap_page(dev->dev, meta_dma,
rq_integrity_vec(req)->bv_len, rq_data_dir(req));
}
More information about the Linux-nvme
mailing list