[PATCH for-next v10 6/7] block: extend functionality to map bvec iterator
Christoph Hellwig
hch at lst.de
Wed Sep 28 10:40:39 PDT 2022
On Tue, Sep 27, 2022 at 11:06:09PM +0530, Kanchan Joshi wrote:
> Extend blk_rq_map_user_iov so that it can handle bvec iterator.
> It maps the pages from bvec iterator into a bio and place the bio into
> request.
>
> This helper will be used by nvme for uring-passthrough path when IO is
> done using pre-mapped buffers.
Can we avoid duplicating some of the checks? Something like the below
incremental patch. Note that this now also allows the copy path for
all kinds of iov_iters, but as the copy from/to iter code is safe
and the sanity check was just or the map path that should be fine.
It's best split into a prep patch, though.
---
diff --git a/block/blk-map.c b/block/blk-map.c
index a1aa8dacb02bc..c51de30767403 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -549,26 +549,16 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio)
EXPORT_SYMBOL(blk_rq_append_bio);
/* Prepare bio for passthrough IO given ITER_BVEC iter */
-static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter,
- bool *copy)
+static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
{
struct request_queue *q = rq->q;
- size_t nr_iter, nr_segs, i;
- struct bio *bio = NULL;
- struct bio_vec *bv, *bvecs, *bvprvp = NULL;
+ size_t nr_iter = iov_iter_count(iter);
+ size_t nr_segs = iter->nr_segs;
+ struct bio_vec *bvecs, *bvprvp = NULL;
struct queue_limits *lim = &q->limits;
unsigned int nsegs = 0, bytes = 0;
- unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
-
- /* see if we need to copy pages due to any weird situation */
- if (blk_queue_may_bounce(q))
- goto out_copy;
- else if (iov_iter_alignment(iter) & align)
- goto out_copy;
- /* virt-alignment gap is checked anyway down, so avoid extra loop here */
-
- nr_iter = iov_iter_count(iter);
- nr_segs = iter->nr_segs;
+ struct bio *bio;
+ size_t i;
if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
return -EINVAL;
@@ -586,14 +576,15 @@ static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter,
/* loop to perform a bunch of sanity checks */
bvecs = (struct bio_vec *)iter->bvec;
for (i = 0; i < nr_segs; i++) {
- bv = &bvecs[i];
+ struct bio_vec *bv = &bvecs[i];
+
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, fallback to copy.
*/
if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
bio_map_put(bio);
- goto out_copy;
+ return -EREMOTEIO;
}
/* check full condition */
if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
@@ -611,9 +602,6 @@ static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter,
put_bio:
bio_map_put(bio);
return -EINVAL;
-out_copy:
- *copy = true;
- return 0;
}
/**
@@ -635,33 +623,35 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask)
{
- bool copy = false;
+ bool copy = false, map_bvec = false;
unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
struct bio *bio = NULL;
struct iov_iter i;
int ret = -EINVAL;
- if (iov_iter_is_bvec(iter)) {
- ret = blk_rq_map_user_bvec(rq, iter, ©);
- if (ret != 0)
- goto fail;
- if (copy)
- goto do_copy;
- return ret;
- }
- if (!iter_is_iovec(iter))
- goto fail;
-
if (map_data)
copy = true;
else if (blk_queue_may_bounce(q))
copy = true;
else if (iov_iter_alignment(iter) & align)
copy = true;
+ else if (iov_iter_is_bvec(iter))
+ map_bvec = true;
+ else if (!iter_is_iovec(iter))
+ copy = true;
else if (queue_virt_boundary(q))
copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
-do_copy:
+ if (map_bvec) {
+ ret = blk_rq_map_user_bvec(rq, iter);
+ if (!ret)
+ return 0;
+ if (ret != -EREMOTEIO)
+ goto fail;
+ /* fall back to copying the data on limits mismatches */
+ copy = true;
+ }
+
i = *iter;
do {
if (copy)
More information about the Linux-nvme
mailing list