[PATCH 09/21] block: Add checks to merging of atomic writes
John Garry
john.g.garry at oracle.com
Fri Sep 29 03:27:14 PDT 2023
For atomic writes we allow merging, but we must adhere to some additional
rules:
- Only allow merging of atomic writes with other atomic writes
- Ensure that the merged IO would not cross an atomic write boundary, if
any
We already ensure that we don't exceed the atomic writes size limit in
get_max_io_size().
Signed-off-by: John Garry <john.g.garry at oracle.com>
---
block/blk-merge.c | 72 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 72 insertions(+)
diff --git a/block/blk-merge.c b/block/blk-merge.c
index bc21f8ff4842..5dc850924e29 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -18,6 +18,23 @@
#include "blk-rq-qos.h"
#include "blk-throttle.h"
+static bool bio_straddles_atomic_write_boundary(loff_t bi_sector,
+ unsigned int bi_size,
+ unsigned int boundary)
+{
+ loff_t start = bi_sector << SECTOR_SHIFT;
+ loff_t end = start + bi_size;
+ loff_t start_mod = start % boundary;
+ loff_t end_mod = end % boundary;
+
+ if (end - start > boundary)
+ return true;
+ if ((start_mod > end_mod) && (start_mod && end_mod))
+ return true;
+
+ return false;
+}
+
static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
{
*bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
@@ -664,6 +681,18 @@ int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
return 0;
}
+ if (req->cmd_flags & REQ_ATOMIC) {
+ unsigned int atomic_write_boundary_bytes =
+ queue_atomic_write_boundary_bytes(req->q);
+
+ if (atomic_write_boundary_bytes &&
+ bio_straddles_atomic_write_boundary(req->__sector,
+ bio->bi_iter.bi_size + blk_rq_bytes(req),
+ atomic_write_boundary_bytes)) {
+ return 0;
+ }
+ }
+
return ll_new_hw_segment(req, bio, nr_segs);
}
@@ -683,6 +712,19 @@ static int ll_front_merge_fn(struct request *req, struct bio *bio,
return 0;
}
+ if (req->cmd_flags & REQ_ATOMIC) {
+ unsigned int atomic_write_boundary_bytes =
+ queue_atomic_write_boundary_bytes(req->q);
+
+ if (atomic_write_boundary_bytes &&
+ bio_straddles_atomic_write_boundary(
+ bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size + blk_rq_bytes(req),
+ atomic_write_boundary_bytes)) {
+ return 0;
+ }
+ }
+
return ll_new_hw_segment(req, bio, nr_segs);
}
@@ -719,6 +761,18 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
blk_rq_get_max_sectors(req, blk_rq_pos(req)))
return 0;
+ if (req->cmd_flags & REQ_ATOMIC) {
+ unsigned int atomic_write_boundary_bytes =
+ queue_atomic_write_boundary_bytes(req->q);
+
+ if (atomic_write_boundary_bytes &&
+ bio_straddles_atomic_write_boundary(req->__sector,
+ blk_rq_bytes(req) + blk_rq_bytes(next),
+ atomic_write_boundary_bytes)) {
+ return 0;
+ }
+ }
+
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
if (total_phys_segments > blk_rq_get_max_segments(req))
return 0;
@@ -814,6 +868,18 @@ static enum elv_merge blk_try_req_merge(struct request *req,
return ELEVATOR_NO_MERGE;
}
+static bool blk_atomic_write_mergeable_rq_bio(struct request *rq,
+ struct bio *bio)
+{
+ return (rq->cmd_flags & REQ_ATOMIC) == (bio->bi_opf & REQ_ATOMIC);
+}
+
+static bool blk_atomic_write_mergeable_rqs(struct request *rq,
+ struct request *next)
+{
+ return (rq->cmd_flags & REQ_ATOMIC) == (next->cmd_flags & REQ_ATOMIC);
+}
+
/*
* For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held.
@@ -833,6 +899,9 @@ static struct request *attempt_merge(struct request_queue *q,
if (req->ioprio != next->ioprio)
return NULL;
+ if (!blk_atomic_write_mergeable_rqs(req, next))
+ return NULL;
+
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
@@ -960,6 +1029,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (rq->ioprio != bio_prio(bio))
return false;
+ if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false)
+ return false;
+
return true;
}
--
2.31.1
More information about the Linux-nvme
mailing list