[PATCH 14/14] nvmet: use inline bio for passthru fast path
Chaitanya Kulkarni
chaitanya.kulkarni at wdc.com
Mon Aug 10 14:54:56 EDT 2020
In nvmet_passthru_execute_cmd() which is a high frequency function
it uses bio_alloc() which leads to memory allocation from the fs pool
for each I/O.
For NVMeoF nvmet_req we already have inline_bvec allocated as a part of
request allocation that can be used with preallocated bio when we
already know the size of request before bio allocation with bio_alloc(),
which we already do.
Introduce a bio member for the nvmet_req passthru anon union. In the
fast path, check if we can get away with inline bvec and bio from
nvmet_req with bio_init() call before actually allocating from the
bio_alloc().
This will be useful to avoid any new memory allocation under high
memory pressure situation and get rid of any extra work of
allocation (bio_alloc()) vs initialization (bio_init()) when
transfer len is < NVMET_MAX_INLINE_DATA_LEN that user can configure at
compile time.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni at wdc.com>
---
drivers/nvme/target/nvmet.h | 1 +
drivers/nvme/target/passthru.c | 21 ++++++++++++++++++---
2 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 47ee3fb193bd..3dd18f593472 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -331,6 +331,7 @@ struct nvmet_req {
} f;
struct {
struct request *rq;
+ struct bio inline_bio;
struct work_struct work;
bool use_workqueue;
} p;
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index a7ccc817ac2d..8b22a6c8f57c 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -200,6 +200,14 @@ static inline bool nvmet_write_cmd_group(__u8 opcode)
return ret;
}
+static void nvmet_passthru_bio_done(struct bio *bio)
+{
+ struct nvmet_req *req = bio->bi_private;
+
+ if (bio != &req->p.inline_bio)
+ bio_put(bio);
+}
+
static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
{
unsigned int op_flags = 0;
@@ -219,14 +227,21 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
} else if (unlikely(req->cmd->common.opcode == nvme_cmd_flush))
op_flags = REQ_FUA;
- bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
- bio->bi_end_io = bio_put;
+ if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ bio = &req->p.inline_bio;
+ bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ } else {
+ bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+ }
+
+ bio->bi_end_io = nvmet_passthru_bio_done;
bio->bi_opf = req_op(rq) | op_flags;
+ bio->bi_private = req;
for_each_sg(req->sg, sg, req->sg_cnt, i) {
if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length,
sg->offset) < sg->length) {
- bio_put(bio);
+ nvmet_passthru_bio_done(bio);
return -EINVAL;
}
sg_cnt--;
--
2.22.1
More information about the Linux-nvme
mailing list