[PATCHv7 1/6] io_uring/rw: move fixed buffer import to issue path
Jens Axboe
axboe at kernel.dk
Wed Feb 26 12:20:24 PST 2025
On 2/26/25 12:04 PM, Jens Axboe wrote:
> On 2/26/25 11:20 AM, Keith Busch wrote:
>> From: Keith Busch <kbusch at kernel.org>
>>
>> Registered buffers may depend on a linked command, which makes the prep
>> path too early to import. Move to the issue path when the node is
>> actually needed like all the other users of fixed buffers.
>
> Conceptually I think this patch is fine, but it does bother me with
> random bool arguments. We could fold in something like the (totally
> tested) below diff to get rid of that. What do you think?
>
> +static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
> + int ddir)
> +{
> + int ret;
> +
> + ret = __io_prep_rw(req, sqe, ddir);
> + if (unlikely(ret))
> + return ret;
> +
> + return io_rw_do_import(req, ITER_DEST);
Oops, should be 'ddir' here too of course. Updated below, does pass my
testing fwiw.
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 728d695d2552..4ac2d004b352 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -248,8 +248,8 @@ static int io_prep_rw_pi(struct io_kiocb *req, struct io_rw *rw, int ddir,
return ret;
}
-static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- int ddir, bool do_import)
+static int __io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ int ddir)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
unsigned ioprio;
@@ -285,14 +285,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
rw->len = READ_ONCE(sqe->len);
rw->flags = READ_ONCE(sqe->rw_flags);
- if (do_import && !io_do_buffer_select(req)) {
- struct io_async_rw *io = req->async_data;
-
- ret = io_import_rw_buffer(ddir, req, io, 0);
- if (unlikely(ret))
- return ret;
- }
-
attr_type_mask = READ_ONCE(sqe->attr_type_mask);
if (attr_type_mask) {
u64 attr_ptr;
@@ -307,27 +299,52 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return ret;
}
+static int io_rw_do_import(struct io_kiocb *req, int ddir)
+{
+ if (!io_do_buffer_select(req)) {
+ struct io_async_rw *io = req->async_data;
+ int ret;
+
+ ret = io_import_rw_buffer(ddir, req, io, 0);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return 0;
+}
+
+static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ int ddir)
+{
+ int ret;
+
+ ret = __io_prep_rw(req, sqe, ddir);
+ if (unlikely(ret))
+ return ret;
+
+ return io_rw_do_import(req, ddir);
+}
+
int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- return io_prep_rw(req, sqe, ITER_DEST, true);
+ return io_prep_rw(req, sqe, ITER_DEST);
}
int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- return io_prep_rw(req, sqe, ITER_SOURCE, true);
+ return io_prep_rw(req, sqe, ITER_SOURCE);
}
static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe,
int ddir)
{
- const bool do_import = !(req->flags & REQ_F_BUFFER_SELECT);
int ret;
- ret = io_prep_rw(req, sqe, ddir, do_import);
+ ret = io_prep_rw(req, sqe, ddir);
if (unlikely(ret))
return ret;
- if (do_import)
- return 0;
+ if (!(req->flags & REQ_F_BUFFER_SELECT))
+ return io_rw_do_import(req, ddir);
/*
* Have to do this validation here, as this is in io_read() rw->len
@@ -364,12 +381,12 @@ static int io_init_rw_fixed(struct io_kiocb *req, unsigned int issue_flags,
int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- return io_prep_rw(req, sqe, ITER_DEST, false);
+ return io_prep_rw(req, sqe, ITER_DEST);
}
int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- return io_prep_rw(req, sqe, ITER_SOURCE, false);
+ return io_prep_rw(req, sqe, ITER_SOURCE);
}
/*
@@ -385,7 +402,7 @@ int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (!(req->flags & REQ_F_BUFFER_SELECT))
return -EINVAL;
- ret = io_prep_rw(req, sqe, ITER_DEST, false);
+ ret = io_prep_rw(req, sqe, ITER_DEST);
if (unlikely(ret))
return ret;
--
Jens Axboe
More information about the Linux-nvme
mailing list