[PATCH 17/17] nvme: enable non-inline passthru commands
Clay Mayers
Clay.Mayers at kioxia.com
Thu Mar 24 14:09:18 PDT 2022
> From: Kanchan Joshi
> Sent: Tuesday, March 8, 2022 7:21 AM
> To: axboe at kernel.dk; hch at lst.de; kbusch at kernel.org;
> asml.silence at gmail.com
> Cc: io-uring at vger.kernel.org; linux-nvme at lists.infradead.org; linux-
> block at vger.kernel.org; sbates at raithlin.com; logang at deltatee.com;
> pankydev8 at gmail.com; javier at javigon.com; mcgrof at kernel.org;
> a.manzanares at samsung.com; joshiiitr at gmail.com; anuj20.g at samsung.com
> Subject: [PATCH 17/17] nvme: enable non-inline passthru commands
>
> From: Anuj Gupta <anuj20.g at samsung.com>
>
> On submission,just fetch the commmand from userspace pointer and reuse
> everything else. On completion, update the result field inside the passthru
> command.
>
> Signed-off-by: Anuj Gupta <anuj20.g at samsung.com>
> Signed-off-by: Kanchan Joshi <joshi.k at samsung.com>
> ---
> drivers/nvme/host/ioctl.c | 29 +++++++++++++++++++++++++----
> 1 file changed, 25 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index
> 701feaecabbe..ddb7e5864be6 100644
> --- a/drivers/nvme/host/ioctl.c
> +++ b/drivers/nvme/host/ioctl.c
> @@ -65,6 +65,14 @@ static void nvme_pt_task_cb(struct io_uring_cmd
> *ioucmd)
> }
> kfree(pdu->meta);
>
> + if (ioucmd->flags & IO_URING_F_UCMD_INDIRECT) {
> + struct nvme_passthru_cmd64 __user *ptcmd64 = ioucmd-
> >cmd;
> + u64 result = le64_to_cpu(nvme_req(req)->result.u64);
> +
> + if (put_user(result, &ptcmd64->result))
> + status = -EFAULT;
When the thread that submitted the io_uring_cmd has exited, the CB is
called by a system worker instead so put_user() fails. The cqe is still
completed and the process sees a failed i/o status, but the i/o did not
fail. The same is true for meta data being returned in patch 5.
I can't say if it's a requirement to support this case. It does break our
current proto-type but we can adjust.
> + }
> +
> io_uring_cmd_done(ioucmd, status);
> }
>
> @@ -143,6 +151,13 @@ static inline bool nvme_is_fixedb_passthru(struct
> io_uring_cmd *ioucmd)
> return ((ioucmd) && (ioucmd->flags &
> IO_URING_F_UCMD_FIXEDBUFS)); }
>
> +static inline bool is_inline_rw(struct io_uring_cmd *ioucmd, struct
> +nvme_command *cmd) {
> + return ((ioucmd->flags & IO_URING_F_UCMD_INDIRECT) ||
> + (cmd->common.opcode == nvme_cmd_write ||
> + cmd->common.opcode == nvme_cmd_read)); }
> +
> static int nvme_submit_user_cmd(struct request_queue *q,
> struct nvme_command *cmd, u64 ubuffer,
> unsigned bufflen, void __user *meta_buffer, unsigned
> meta_len, @@ -193,8 +208,7 @@ static int nvme_submit_user_cmd(struct
> request_queue *q,
> }
> }
> if (ioucmd) { /* async dispatch */
> - if (cmd->common.opcode == nvme_cmd_write ||
> - cmd->common.opcode == nvme_cmd_read) {
> + if (is_inline_rw(ioucmd, cmd)) {
> if (bio && is_polling_enabled(ioucmd, req)) {
> ioucmd->bio = bio;
> bio->bi_opf |= REQ_POLLED;
> @@ -204,7 +218,7 @@ static int nvme_submit_user_cmd(struct
> request_queue *q,
> blk_execute_rq_nowait(req, 0, nvme_end_async_pt);
> return 0;
> } else {
> - /* support only read and write for now. */
> + /* support only read and write for inline */
> ret = -EINVAL;
> goto out_meta;
> }
> @@ -372,7 +386,14 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl,
> struct nvme_ns *ns,
> } else {
> if (ioucmd->cmd_len != sizeof(struct nvme_passthru_cmd64))
> return -EINVAL;
> - cptr = (struct nvme_passthru_cmd64 *)ioucmd->cmd;
> + if (ioucmd->flags & IO_URING_F_UCMD_INDIRECT) {
> + ucmd = (struct nvme_passthru_cmd64 __user
> *)ioucmd->cmd;
> + if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
> + return -EFAULT;
> + cptr = &cmd;
> + } else {
> + cptr = (struct nvme_passthru_cmd64 *)ioucmd->cmd;
> + }
> }
> if (cptr->flags & NVME_HIPRI)
> rq_flags |= REQ_POLLED;
> --
> 2.25.1
More information about the Linux-nvme
mailing list