[PATCH v4 06/12] io_uring: modify io_get_cqe for CQE32
Stefan Roesch
shr at fb.com
Tue Apr 26 11:21:28 PDT 2022
Modify accesses to the CQE array to take large CQE's into account. The
index needs to be shifted by one for large CQE's.
Signed-off-by: Stefan Roesch <shr at fb.com>
Signed-off-by: Jens Axboe <axboe at kernel.dk>
Reviewed-by: Kanchan Joshi <joshi.k at samsung.com>
---
fs/io_uring.c | 19 +++++++++++++++++--
1 file changed, 17 insertions(+), 2 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index f300130fd9f0..726238dc65dc 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1909,8 +1909,12 @@ static noinline struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
{
struct io_rings *rings = ctx->rings;
unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
+ unsigned int shift = 0;
unsigned int free, queued, len;
+ if (ctx->flags & IORING_SETUP_CQE32)
+ shift = 1;
+
/* userspace may cheat modifying the tail, be safe and do min */
queued = min(__io_cqring_events(ctx), ctx->cq_entries);
free = ctx->cq_entries - queued;
@@ -1922,15 +1926,26 @@ static noinline struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx)
ctx->cached_cq_tail++;
ctx->cqe_cached = &rings->cqes[off];
ctx->cqe_sentinel = ctx->cqe_cached + len;
- return ctx->cqe_cached++;
+ ctx->cqe_cached++;
+ return &rings->cqes[off << shift];
}
static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
{
if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
+ struct io_uring_cqe *cqe = ctx->cqe_cached;
+
+ if (ctx->flags & IORING_SETUP_CQE32) {
+ unsigned int off = ctx->cqe_cached - ctx->rings->cqes;
+
+ cqe += off;
+ }
+
ctx->cached_cq_tail++;
- return ctx->cqe_cached++;
+ ctx->cqe_cached++;
+ return cqe;
}
+
return __io_get_cqe(ctx);
}
--
2.30.2
More information about the Linux-nvme
mailing list