[RFC 13/13] nvme: Add async passthru polling support
Kanchan Joshi
joshi.k at samsung.com
Mon Dec 20 06:17:34 PST 2021
From: Pankaj Raghav <p.raghav at samsung.com>
IO_URING already has polling support for read and write. This patch
extends that support for uring cmd passthu. The unused flag in
uring_cmd struct is used to indicate if the completion should be polled.
If device side polling is not enabled, then the submission request will
fallback to a non-polled request.
Signed-off-by: Pankaj Raghav <p.raghav at samsung.com>
---
block/blk-mq.c | 3 +-
drivers/nvme/host/core.c | 1 +
drivers/nvme/host/ioctl.c | 79 ++++++++++++++++++++++++++++++++++-
drivers/nvme/host/multipath.c | 1 +
drivers/nvme/host/nvme.h | 4 ++
fs/io_uring.c | 45 ++++++++++++++++++--
include/linux/blk-mq.h | 1 +
include/linux/io_uring.h | 10 ++++-
8 files changed, 135 insertions(+), 9 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c77991688bfd..acfa55c96a43 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1193,7 +1193,7 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
-static bool blk_rq_is_poll(struct request *rq)
+bool blk_rq_is_poll(struct request *rq)
{
if (!rq->mq_hctx)
return false;
@@ -1203,6 +1203,7 @@ static bool blk_rq_is_poll(struct request *rq)
return false;
return true;
}
+EXPORT_SYMBOL_GPL(blk_rq_is_poll);
static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 5199adf7ae92..f0697cbe2bf1 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3676,6 +3676,7 @@ static const struct file_operations nvme_ns_chr_fops = {
.unlocked_ioctl = nvme_ns_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.async_cmd = nvme_ns_chr_async_cmd,
+ .iopoll = nvme_iopoll,
};
static int nvme_add_ns_cdev(struct nvme_ns *ns)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index bdaf8f317aa8..ce2fe94df3ad 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -31,6 +31,12 @@ struct nvme_uring_cmd {
void __user *meta_buffer;
};
+static inline bool is_polling_enabled(struct io_uring_cmd *ioucmd,
+ struct request *req)
+{
+ return (ioucmd->flags & URING_CMD_POLLED) && blk_rq_is_poll(req);
+}
+
static struct nvme_uring_cmd *nvme_uring_cmd(struct io_uring_cmd *ioucmd)
{
return (struct nvme_uring_cmd *)&ioucmd->pdu;
@@ -76,8 +82,16 @@ static void nvme_end_async_pt(struct request *req, blk_status_t err)
cmd->req = req;
req->bio = bio;
- /* this takes care of setting up task-work */
- io_uring_cmd_complete_in_task(ioucmd, nvme_pt_task_cb);
+
+ /*IO can be completed immediately when the callback
+ * is in the same task context
+ */
+ if (is_polling_enabled(ioucmd, req)) {
+ nvme_pt_task_cb(ioucmd);
+ } else {
+ /* this takes care of setting up task-work */
+ io_uring_cmd_complete_in_task(ioucmd, nvme_pt_task_cb);
+ }
}
static void nvme_setup_uring_cmd_data(struct request *rq,
@@ -183,6 +197,12 @@ static int nvme_submit_user_cmd(struct request_queue *q,
}
}
if (ioucmd) { /* async dispatch */
+
+ if (bio && is_polling_enabled(ioucmd, req)) {
+ ioucmd->bio = bio;
+ bio->bi_opf |= REQ_POLLED;
+ }
+
nvme_setup_uring_cmd_data(req, ioucmd, meta, meta_buffer,
meta_len, write);
blk_execute_rq_nowait(req, 0, nvme_end_async_pt);
@@ -496,6 +516,32 @@ int nvme_ns_chr_async_cmd(struct io_uring_cmd *ioucmd,
return nvme_ns_async_ioctl(ns, ioucmd);
}
+int nvme_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+ unsigned int flags)
+{
+ struct bio *bio = NULL;
+ struct nvme_ns *ns = NULL;
+ struct request_queue *q = NULL;
+ int ret = 0;
+
+ rcu_read_lock();
+ bio = READ_ONCE(kiocb->private);
+ ns = container_of(file_inode(kiocb->ki_filp)->i_cdev, struct nvme_ns,
+ cdev);
+ q = ns->queue;
+
+ /* bio and driver_cb are a part of the same union type in io_uring_cmd
+ * struct. When there are no poll queues, driver_cb is used for IRQ cb
+ * but polling is performed from the io_uring side. To avoid unnecessary
+ * polling, a check is added to see if it is a polled queue and return 0
+ * if it is not.
+ */
+ if ((test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) && bio && bio->bi_bdev)
+ ret = bio_poll(bio, iob, flags);
+ rcu_read_unlock();
+ return ret;
+}
+
#ifdef CONFIG_NVME_MULTIPATH
static int nvme_ns_head_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
void __user *argp, struct nvme_ns_head *head, int srcu_idx)
@@ -577,6 +623,35 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
srcu_read_unlock(&head->srcu, srcu_idx);
return ret;
}
+
+int nvme_ns_head_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+ unsigned int flags)
+{
+ struct bio *bio = NULL;
+ struct request_queue *q = NULL;
+ struct cdev *cdev = file_inode(kiocb->ki_filp)->i_cdev;
+ struct nvme_ns_head *head = container_of(cdev, struct nvme_ns_head, cdev);
+ int srcu_idx = srcu_read_lock(&head->srcu);
+ struct nvme_ns *ns = nvme_find_path(head);
+ int ret = -EWOULDBLOCK;
+
+ if (ns) {
+ bio = READ_ONCE(kiocb->private);
+ q = ns->queue;
+ /* bio and driver_cb are a part of the same union type in io_uring_cmd
+ * struct. When there are no poll queues, driver_cb is used for IRQ cb
+ * but polling is performed from the io_uring side. To avoid unnecessary
+ * polling, a check is added to see if it is a polled queue and return 0
+ * if it is not.
+ */
+ if ((test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) && bio &&
+ bio->bi_bdev)
+ ret = bio_poll(bio, iob, flags);
+ }
+
+ srcu_read_unlock(&head->srcu, srcu_idx);
+ return ret;
+}
#endif /* CONFIG_NVME_MULTIPATH */
static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 1e59c8e06622..df91b2953932 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -424,6 +424,7 @@ static const struct file_operations nvme_ns_head_chr_fops = {
.unlocked_ioctl = nvme_ns_head_chr_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.async_cmd = nvme_ns_head_chr_async_cmd,
+ .iopoll = nvme_ns_head_iopoll,
};
static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 56a7cc8421fc..730ada8a3e8e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -752,8 +752,12 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
int nvme_ns_chr_async_cmd(struct io_uring_cmd *ucmd,
enum io_uring_cmd_flags flags);
+int nvme_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+ unsigned int flags);
int nvme_ns_head_chr_async_cmd(struct io_uring_cmd *ucmd,
enum io_uring_cmd_flags flags);
+int nvme_ns_head_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+ unsigned int flags);
int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
extern const struct attribute_group *nvme_ns_id_attr_groups[];
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f77dde1bdc75..ae2e7666622e 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2655,7 +2655,20 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (READ_ONCE(req->iopoll_completed))
break;
- ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob, poll_flags);
+ if (req->opcode == IORING_OP_URING_CMD ||
+ req->opcode == IORING_OP_URING_CMD_FIXED) {
+ /* uring_cmd structure does not contain kiocb struct */
+ struct kiocb kiocb_uring_cmd;
+
+ kiocb_uring_cmd.private = req->uring_cmd.bio;
+ kiocb_uring_cmd.ki_filp = req->uring_cmd.file;
+ ret = req->uring_cmd.file->f_op->iopoll(&kiocb_uring_cmd,
+ &iob, poll_flags);
+ } else {
+ ret = kiocb->ki_filp->f_op->iopoll(kiocb, &iob,
+ poll_flags);
+ }
+
if (unlikely(ret < 0))
return ret;
else if (ret)
@@ -2768,6 +2781,15 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
wq_list_empty(&ctx->iopoll_list))
break;
}
+
+ /*
+ * In some scenarios, completion callback has been queued up to be
+ * completed in-task context but polling happens in the same task
+ * not giving a chance for the completion callback to complete.
+ */
+ if (current->task_works)
+ io_run_task_work();
+
ret = io_do_iopoll(ctx, !min);
if (ret < 0)
break;
@@ -4122,6 +4144,14 @@ static int io_linkat(struct io_kiocb *req, unsigned int issue_flags)
return 0;
}
+static void io_complete_uring_cmd_iopoll(struct io_kiocb *req, long res)
+{
+ WRITE_ONCE(req->result, res);
+ /* order with io_iopoll_complete() checking ->result */
+ smp_wmb();
+ WRITE_ONCE(req->iopoll_completed, 1);
+}
+
/*
* Called by consumers of io_uring_cmd, if they originally returned
* -EIOCBQUEUED upon receiving the command.
@@ -4132,7 +4162,11 @@ void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret)
if (ret < 0)
req_set_fail(req);
- io_req_complete(req, ret);
+
+ if (req->uring_cmd.flags & URING_CMD_POLLED)
+ io_complete_uring_cmd_iopoll(req, ret);
+ else
+ io_req_complete(req, ret);
}
EXPORT_SYMBOL_GPL(io_uring_cmd_done);
@@ -4147,8 +4181,11 @@ static int io_uring_cmd_prep(struct io_kiocb *req,
return -EOPNOTSUPP;
if (req->ctx->flags & IORING_SETUP_IOPOLL) {
- printk_once(KERN_WARNING "io_uring: iopoll not supported!\n");
- return -EOPNOTSUPP;
+ req->uring_cmd.flags = URING_CMD_POLLED;
+ req->uring_cmd.bio = NULL;
+ req->iopoll_completed = 0;
+ } else {
+ req->uring_cmd.flags = 0;
}
cmd->op = READ_ONCE(csqe->op);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e35a5d835b1f..2233ccf41c19 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -933,6 +933,7 @@ int blk_rq_map_kern(struct request_queue *, struct request *, void *,
int blk_rq_append_bio(struct request *rq, struct bio *bio);
void blk_execute_rq_nowait(struct request *rq, bool at_head,
rq_end_io_fn *end_io);
+bool blk_rq_is_poll(struct request *rq);
blk_status_t blk_execute_rq(struct request *rq, bool at_head);
struct req_iterator {
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 07732bc850af..bbc9c4ea19c3 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -6,6 +6,7 @@
#include <linux/xarray.h>
enum {
+ URING_CMD_POLLED = (1 << 0),
URING_CMD_FIXEDBUFS = (1 << 1),
};
/*
@@ -17,8 +18,13 @@ struct io_uring_cmd {
__u16 op;
__u16 flags;
__u32 len;
- /* used if driver requires update in task context*/
- void (*driver_cb)(struct io_uring_cmd *cmd);
+ union {
+ void *bio; // Used for polling based completion
+
+ /* used if driver requires update in task context for IRQ based completion*/
+ void (*driver_cb)(struct io_uring_cmd *cmd);
+ };
+
__u64 pdu[5]; /* 40 bytes available inline for free use */
};
--
2.25.1
More information about the Linux-nvme
mailing list