[PATCH 1/3] block: introduce blk_queue_nr_active()
Hannes Reinecke
hare at suse.de
Wed Sep 27 00:36:11 PDT 2023
On 9/25/23 18:31, Ewan D. Milne wrote:
> Returns a count of the total number of active requests
> in a queue. For non-shared tags (the usual case) this is
> the sum of nr_active from all of the hctxs.
>
Bit of an exaggeration here.
Shared tags are in use if the hardware supports only a global tag space
(ie basically all SCSI and FC HBAs).
> Signed-off-by: Ewan D. Milne <emilne at redhat.com>
> ---
> block/blk-mq.h | 5 -----
> include/linux/blk-mq.h | 33 ++++++++++++++++++++++++++-------
> 2 files changed, 26 insertions(+), 12 deletions(-)
>
> diff --git a/block/blk-mq.h b/block/blk-mq.h
> index 1743857e0b01..fbc65eefa017 100644
> --- a/block/blk-mq.h
> +++ b/block/blk-mq.h
> @@ -214,11 +214,6 @@ static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
> return tag < tags->nr_reserved_tags;
> }
>
> -static inline bool blk_mq_is_shared_tags(unsigned int flags)
> -{
> - return flags & BLK_MQ_F_TAG_HCTX_SHARED;
> -}
> -
> static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
> {
> if (data->rq_flags & RQF_SCHED_TAGS)
> diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
> index 01e8c31db665..c921ae5236ab 100644
> --- a/include/linux/blk-mq.h
> +++ b/include/linux/blk-mq.h
> @@ -716,6 +716,32 @@ int blk_rq_poll(struct request *rq, struct io_comp_batch *iob,
>
> bool blk_mq_queue_inflight(struct request_queue *q);
>
> +#define queue_for_each_hw_ctx(q, hctx, i) \
> + xa_for_each(&(q)->hctx_table, (i), (hctx))
> +
> +#define hctx_for_each_ctx(hctx, ctx, i) \
> + for ((i) = 0; (i) < (hctx)->nr_ctx && \
> + ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
> +
> +static inline bool blk_mq_is_shared_tags(unsigned int flags)
> +{
> + return flags & BLK_MQ_F_TAG_HCTX_SHARED;
> +}
> +
> +static inline unsigned int blk_mq_queue_nr_active(struct request_queue *q)
> +{
> + unsigned int nr_active = 0;
> + struct blk_mq_hw_ctx *hctx;
> + unsigned long i;
> +
> + queue_for_each_hw_ctx(q, hctx, i) {
> + if (unlikely(blk_mq_is_shared_tags(hctx->flags)))
> + return atomic_read(&q->nr_active_requests_shared_tags);
> + nr_active += atomic_read(&hctx->nr_active);
> + }
> + return nr_active;
> +}
> +
> enum {
> /* return when out of requests */
> BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
> @@ -941,13 +967,6 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
> return rq + 1;
> }
>
> -#define queue_for_each_hw_ctx(q, hctx, i) \
> - xa_for_each(&(q)->hctx_table, (i), (hctx))
> -
> -#define hctx_for_each_ctx(hctx, ctx, i) \
> - for ((i) = 0; (i) < (hctx)->nr_ctx && \
> - ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
> -
> static inline void blk_mq_cleanup_rq(struct request *rq)
> {
> if (rq->q->mq_ops->cleanup_rq)
Well. As discussed, using xarray on 'small' arrays is horrible for
performance. We really should revert the patch from Ming to
turn it back into a simple array; that'll make traversing much faster.
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare at suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), Geschäftsführer: Ivo Totev, Andrew
Myers, Andrew McDonald, Martje Boudien Moerman
More information about the Linux-nvme
mailing list