[PATCH 6/6] nvme-ioctl: simplify parameters
Keith Busch
kbusch at meta.com
Mon Feb 24 10:21:28 PST 2025
From: Keith Busch <kbusch at kernel.org>
The uring_cmd handler already defines a struct to group all the
parameters needed to submit a request. Reuse this throughout to simplfy
the function signature.
Signed-off-by: Keith Busch <kbusch at kernel.org>
---
drivers/nvme/host/ioctl.c | 87 ++++++++++++++++++++++-----------------
1 file changed, 50 insertions(+), 37 deletions(-)
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 016a21a3861e9..729dac2d6ee0a 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -99,8 +99,15 @@ static void __user *nvme_to_user_ptr(uintptr_t ptrval)
return (void __user *)ptrval;
}
-static int nvme_map_user_request(struct request *req, u64 ubuffer,
- unsigned bufflen, u64 meta_buffer, unsigned meta_len,
+struct nvme_user_data {
+ __u64 metadata;
+ __u64 addr;
+ __u32 data_len;
+ __u32 metadata_len;
+ __u32 timeout_ms;
+};
+
+static int nvme_map_user_request(struct request *req, struct nvme_user_data *d,
struct io_uring_cmd *ioucmd, unsigned int flags)
{
struct request_queue *q = req->q;
@@ -108,7 +115,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
struct block_device *bdev = ns ? ns->disk->part0 : NULL;
bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk);
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
- bool has_metadata = meta_buffer && meta_len;
+ bool has_metadata = d->metadata && d->metadata_len;
struct bio *bio = NULL;
int ret;
@@ -128,14 +135,14 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
/* fixedbufs is only for non-vectored io */
if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
return -EINVAL;
- ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
+ ret = io_uring_cmd_import_fixed(d->addr, d->data_len,
rq_data_dir(req), &iter, ioucmd);
if (ret < 0)
return ret;
ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
} else {
- ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
- bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
+ ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(d->addr),
+ d->data_len, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
0, rq_data_dir(req));
}
@@ -148,7 +155,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (has_metadata) {
ret = blk_rq_integrity_map_user(req,
- nvme_to_user_ptr(meta_buffer), meta_len);
+ nvme_to_user_ptr(d->metadata), d->metadata_len);
if (ret)
goto out_unmap;
}
@@ -163,8 +170,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
static struct request *nvme_alloc_user_request(struct request_queue *q,
struct nvme_command *cmd, blk_opf_t rq_flags,
- blk_mq_req_flags_t blk_flags, unsigned timeout_ms, u64 ubuffer,
- unsigned bufflen, u64 meta_buffer, unsigned meta_len,
+ blk_mq_req_flags_t blk_flags, struct nvme_user_data *d,
struct io_uring_cmd *ioucmd, unsigned int flags)
{
struct request *req;
@@ -175,11 +181,10 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
return req;
nvme_init_request(req, cmd);
nvme_req(req)->flags |= NVME_REQ_USERCMD;
- req->timeout = timeout_ms ? msecs_to_jiffies(timeout_ms) : 0;
+ req->timeout = d->timeout_ms ? msecs_to_jiffies(d->timeout_ms) : 0;
- if (ubuffer && bufflen) {
- ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
- meta_len, ioucmd, flags);
+ if (d->addr && d->data_len) {
+ ret = nvme_map_user_request(req, d, ioucmd, flags);
if (ret)
goto out;
}
@@ -191,9 +196,8 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
}
static int nvme_submit_user_cmd(struct request_queue *q,
- struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
- u64 meta_buffer, unsigned meta_len, u64 *result,
- unsigned timeout_ms, unsigned int flags)
+ struct nvme_command *cmd, struct nvme_user_data *d,
+ u64 *result, unsigned int flags)
{
struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl;
@@ -202,8 +206,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
u32 effects;
int ret;
- req = nvme_alloc_user_request(q, cmd, 0, 0, timeout_ms, ubuffer,
- bufflen, meta_buffer, meta_len, NULL, flags);
+ req = nvme_alloc_user_request(q, cmd, 0, 0, d, NULL, flags);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -229,6 +232,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
struct nvme_user_io io;
struct nvme_command c;
unsigned length, meta_len;
+ struct nvme_user_data d;
if (copy_from_user(&io, uio, sizeof(io)))
return -EFAULT;
@@ -280,8 +284,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.lbat = cpu_to_le16(io.apptag);
c.rw.lbatm = cpu_to_le16(io.appmask);
- return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, io.metadata,
- meta_len, NULL, 0, 0);
+ d.addr = io.addr;
+ d.data_len = length;
+ d.metadata = io.metadata;
+ d.metadata_len = meta_len;
+ d.timeout_ms = 0;
+
+ return nvme_submit_user_cmd(ns->queue, &c, &d, NULL, 0);
}
static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
@@ -302,6 +311,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd __user *ucmd, unsigned int flags)
{
struct nvme_passthru_cmd cmd;
+ struct nvme_user_data d;
struct nvme_command c;
u64 result;
int status;
@@ -329,9 +339,14 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (!nvme_cmd_allowed(ns, &c, flags))
return -EACCES;
- status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- cmd.addr, cmd.data_len, cmd.metadata, cmd.metadata_len,
- &result, cmd.timeout_ms, flags);
+ d.addr = cmd.addr;
+ d.data_len = cmd.data_len;
+ d.metadata = cmd.metadata;
+ d.metadata_len = cmd.metadata_len;
+ d.timeout_ms = cmd.timeout_ms;
+
+ status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, &d,
+ &result, flags);
if (status >= 0) {
if (put_user(result, &ucmd->result))
@@ -345,6 +360,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags)
{
struct nvme_passthru_cmd64 cmd;
+ struct nvme_user_data d;
struct nvme_command c;
int status;
@@ -371,9 +387,14 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (!nvme_cmd_allowed(ns, &c, flags))
return -EACCES;
- status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- cmd.addr, cmd.data_len, cmd.metadata, cmd.metadata_len,
- &cmd.result, cmd.timeout_ms, flags);
+ d.addr = cmd.addr;
+ d.data_len = cmd.data_len;
+ d.metadata = cmd.metadata;
+ d.metadata_len = cmd.metadata_len;
+ d.timeout_ms = cmd.timeout_ms;
+
+ status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, &d,
+ &cmd.result, flags);
if (status >= 0) {
if (put_user(cmd.result, &ucmd->result))
@@ -383,14 +404,6 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
return status;
}
-struct nvme_uring_data {
- __u64 metadata;
- __u64 addr;
- __u32 data_len;
- __u32 metadata_len;
- __u32 timeout_ms;
-};
-
/*
* This overlays struct io_uring_cmd pdu.
* Expect build errors if this grows larger than that.
@@ -459,7 +472,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe);
struct request_queue *q = ns ? ns->queue : ctrl->admin_q;
- struct nvme_uring_data d;
+ struct nvme_user_data d;
struct nvme_command c;
struct request *req;
blk_opf_t rq_flags = REQ_ALLOC_CACHE;
@@ -504,8 +517,8 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
if (issue_flags & IO_URING_F_IOPOLL)
rq_flags |= REQ_POLLED;
- req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags, d.timeout_ms,
- d.addr, d.data_len, d.metadata, d.metadata_len, ioucmd, flags);
+ req = nvme_alloc_user_request(q, &c, rq_flags, blk_flags, &d, ioucmd,
+ flags);
if (IS_ERR(req))
return PTR_ERR(req);
--
2.43.5
More information about the Linux-nvme
mailing list