[PATCH v5 2/6] block: wire-up support for passthrough plugging
Anuj Gupta
anuj20.g at samsung.com
Thu May 12 04:50:47 PDT 2022
On Thu, May 12, 2022 at 01:25:24PM +0800, Ming Lei wrote:
> Hello,
>
> On Wed, May 11, 2022 at 11:17:46AM +0530, Kanchan Joshi wrote:
> > From: Jens Axboe <axboe at kernel.dk>
> >
> > Add support for plugging in passthrough path. When plugging is enabled, the
> > requests are added to a plug instead of getting dispatched to the driver.
> > And when the plug is finished, the whole batch gets dispatched via
> > ->queue_rqs which turns out to be more efficient. Otherwise dispatching
> > used to happen via ->queue_rq, one request at a time.
> >
> > Signed-off-by: Jens Axboe <axboe at kernel.dk>
> > Reviewed-by: Christoph Hellwig <hch at lst.de>
> > ---
> > block/blk-mq.c | 73 +++++++++++++++++++++++++++-----------------------
> > 1 file changed, 39 insertions(+), 34 deletions(-)
> >
> > diff --git a/block/blk-mq.c b/block/blk-mq.c
> > index 84d749511f55..2cf011b57cf9 100644
> > --- a/block/blk-mq.c
> > +++ b/block/blk-mq.c
> > @@ -2340,6 +2340,40 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
> > blk_mq_hctx_mark_pending(hctx, ctx);
> > }
> >
> > +/*
> > + * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
> > + * queues. This is important for md arrays to benefit from merging
> > + * requests.
> > + */
> > +static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
> > +{
> > + if (plug->multiple_queues)
> > + return BLK_MAX_REQUEST_COUNT * 2;
> > + return BLK_MAX_REQUEST_COUNT;
> > +}
> > +
> > +static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
> > +{
> > + struct request *last = rq_list_peek(&plug->mq_list);
> > +
> > + if (!plug->rq_count) {
> > + trace_block_plug(rq->q);
> > + } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
> > + (!blk_queue_nomerges(rq->q) &&
> > + blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
> > + blk_mq_flush_plug_list(plug, false);
> > + trace_block_plug(rq->q);
> > + }
> > +
> > + if (!plug->multiple_queues && last && last->q != rq->q)
> > + plug->multiple_queues = true;
> > + if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
> > + plug->has_elevator = true;
> > + rq->rq_next = NULL;
> > + rq_list_add(&plug->mq_list, rq);
> > + plug->rq_count++;
> > +}
> > +
> > /**
> > * blk_mq_request_bypass_insert - Insert a request at dispatch list.
> > * @rq: Pointer to request to be inserted.
> > @@ -2353,7 +2387,12 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
> > bool run_queue)
> > {
> > struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
> > + struct blk_plug *plug = current->plug;
> >
> > + if (plug) {
> > + blk_add_rq_to_plug(plug, rq);
> > + return;
> > + }
>
> This way may cause nested plugging, and breaks xfstests generic/131.
> Also may cause io hang since request can't be polled before flushing
> plug in blk_execute_rq().
>
Hi Ming,
Could you please share your test setup.
I tried test 131 with xfs and it passed.
I followed these steps:
1) mkfs.xfs -f /dev/nvme0n1
2) mount /dev/nvme0n1 /mnt/test
3) ./check tests/generic/131
Tried the same with ext4 and it passed as well.
Thanks,
Anuj
> I'd suggest to apply the plug in blk_execute_rq_nowait(), such as:
>
>
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 2cf011b57cf9..60c29c0229d5 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -1169,6 +1169,62 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t error)
> complete(waiting);
> }
>
> +/*
> + * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
> + * queues. This is important for md arrays to benefit from merging
> + * requests.
> + */
> +static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
> +{
> + if (plug->multiple_queues)
> + return BLK_MAX_REQUEST_COUNT * 2;
> + return BLK_MAX_REQUEST_COUNT;
> +}
> +
> +static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
> +{
> + struct request *last = rq_list_peek(&plug->mq_list);
> +
> + if (!plug->rq_count) {
> + trace_block_plug(rq->q);
> + } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
> + (!blk_queue_nomerges(rq->q) &&
> + blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
> + blk_mq_flush_plug_list(plug, false);
> + trace_block_plug(rq->q);
> + }
> +
> + if (!plug->multiple_queues && last && last->q != rq->q)
> + plug->multiple_queues = true;
> + if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
> + plug->has_elevator = true;
> + rq->rq_next = NULL;
> + rq_list_add(&plug->mq_list, rq);
> + plug->rq_count++;
> +}
> +
> +static void __blk_execute_rq_nowait(struct request *rq, bool at_head,
> + rq_end_io_fn *done, bool use_plug)
> +{
> + WARN_ON(irqs_disabled());
> + WARN_ON(!blk_rq_is_passthrough(rq));
> +
> + rq->end_io = done;
> +
> + blk_account_io_start(rq);
> +
> + if (use_plug && current->plug) {
> + blk_add_rq_to_plug(current->plug, rq);
> + return;
> + }
> + /*
> + * don't check dying flag for MQ because the request won't
> + * be reused after dying flag is set
> + */
> + blk_mq_sched_insert_request(rq, at_head, true, false);
> +}
> +
> +
> /**
> * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
> * @rq: request to insert
> @@ -1184,18 +1240,8 @@ static void blk_end_sync_rq(struct request *rq, blk_status_t error)
> */
> void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
> {
> - WARN_ON(irqs_disabled());
> - WARN_ON(!blk_rq_is_passthrough(rq));
> -
> - rq->end_io = done;
> -
> - blk_account_io_start(rq);
> + __blk_execute_rq_nowait(rq, at_head, done, true);
>
> - /*
> - * don't check dying flag for MQ because the request won't
> - * be reused after dying flag is set
> - */
> - blk_mq_sched_insert_request(rq, at_head, true, false);
> }
> EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
>
> @@ -1234,7 +1280,7 @@ blk_status_t blk_execute_rq(struct request *rq, bool at_head)
> unsigned long hang_check;
>
> rq->end_io_data = &wait;
> - blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq);
> + __blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq, false);
>
> /* Prevent hang_check timer from firing at us during very long I/O */
> hang_check = sysctl_hung_task_timeout_secs;
> @@ -2340,40 +2386,6 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
> blk_mq_hctx_mark_pending(hctx, ctx);
> }
>
> -/*
> - * Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
> - * queues. This is important for md arrays to benefit from merging
> - * requests.
> - */
> -static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
> -{
> - if (plug->multiple_queues)
> - return BLK_MAX_REQUEST_COUNT * 2;
> - return BLK_MAX_REQUEST_COUNT;
> -}
> -
> -static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
> -{
> - struct request *last = rq_list_peek(&plug->mq_list);
> -
> - if (!plug->rq_count) {
> - trace_block_plug(rq->q);
> - } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
> - (!blk_queue_nomerges(rq->q) &&
> - blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
> - blk_mq_flush_plug_list(plug, false);
> - trace_block_plug(rq->q);
> - }
> -
> - if (!plug->multiple_queues && last && last->q != rq->q)
> - plug->multiple_queues = true;
> - if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
> - plug->has_elevator = true;
> - rq->rq_next = NULL;
> - rq_list_add(&plug->mq_list, rq);
> - plug->rq_count++;
> -}
> -
> /**
> * blk_mq_request_bypass_insert - Insert a request at dispatch list.
> * @rq: Pointer to request to be inserted.
> @@ -2387,12 +2399,7 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
> bool run_queue)
> {
> struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
> - struct blk_plug *plug = current->plug;
>
> - if (plug) {
> - blk_add_rq_to_plug(plug, rq);
> - return;
> - }
> spin_lock(&hctx->lock);
> if (at_head)
> list_add(&rq->queuelist, &hctx->dispatch);
>
>
> Thanks,
> Ming
>
>
More information about the Linux-nvme
mailing list