[PATCH 2/5] blk-mq: rename hctx_lock & hctx_unlock

Ming Lei ming.lei at redhat.com
Thu Nov 18 18:18:46 PST 2021


We have moved srcu from 'struct blk_mq_hw_ctx' into 'struct request_queue',
both hctx_lock and hctx_unlock are run on request queue level, so rename
them as queue_lock and queue_unlock().

And it could be used for supporting Jens's ->queue_rqs(), as suggested
by Keith.

Also it could be extended for driver uses in future.

Cc: Keith Busch <kbusch at kernel.org>
Signed-off-by: Ming Lei <ming.lei at redhat.com>
---
 block/blk-mq.c | 40 +++++++++++++++++++++++-----------------
 1 file changed, 23 insertions(+), 17 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9728a571b009..ba0d0e411b65 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1076,24 +1076,26 @@ void blk_mq_complete_request(struct request *rq)
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
 
-static inline void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
-	__releases(hctx->srcu)
+static inline void queue_unlock(struct request_queue *q, bool blocking,
+		int srcu_idx)
+	__releases(q->srcu)
 {
-	if (!(hctx->flags & BLK_MQ_F_BLOCKING))
+	if (!blocking)
 		rcu_read_unlock();
 	else
-		srcu_read_unlock(hctx->queue->srcu, srcu_idx);
+		srcu_read_unlock(q->srcu, srcu_idx);
 }
 
-static inline void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
+static inline void queue_lock(struct request_queue *q, bool blocking,
+		int *srcu_idx)
 	__acquires(hctx->srcu)
 {
-	if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
+	if (!blocking) {
 		/* shut up gcc false positive */
 		*srcu_idx = 0;
 		rcu_read_lock();
 	} else
-		*srcu_idx = srcu_read_lock(hctx->queue->srcu);
+		*srcu_idx = srcu_read_lock(q->srcu);
 }
 
 /**
@@ -1958,6 +1960,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
 {
 	int srcu_idx;
+	bool blocking = hctx->flags & BLK_MQ_F_BLOCKING;
 
 	/*
 	 * We can't run the queue inline with ints disabled. Ensure that
@@ -1965,11 +1968,11 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
 	 */
 	WARN_ON_ONCE(in_interrupt());
 
-	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
+	might_sleep_if(blocking);
 
-	hctx_lock(hctx, &srcu_idx);
+	queue_lock(hctx->queue, blocking, &srcu_idx);
 	blk_mq_sched_dispatch_requests(hctx);
-	hctx_unlock(hctx, srcu_idx);
+	queue_unlock(hctx->queue, blocking, srcu_idx);
 }
 
 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
@@ -2083,6 +2086,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 {
 	int srcu_idx;
 	bool need_run;
+	bool blocking = hctx->flags & BLK_MQ_F_BLOCKING;
 
 	/*
 	 * When queue is quiesced, we may be switching io scheduler, or
@@ -2092,10 +2096,10 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
 	 * And queue will be rerun in blk_mq_unquiesce_queue() if it is
 	 * quiesced.
 	 */
-	hctx_lock(hctx, &srcu_idx);
+	queue_lock(hctx->queue, blocking, &srcu_idx);
 	need_run = !blk_queue_quiesced(hctx->queue) &&
 		blk_mq_hctx_has_pending(hctx);
-	hctx_unlock(hctx, srcu_idx);
+	queue_unlock(hctx->queue, blocking, srcu_idx);
 
 	if (need_run)
 		__blk_mq_delay_run_hw_queue(hctx, async, 0);
@@ -2500,10 +2504,11 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 {
 	blk_status_t ret;
 	int srcu_idx;
+	bool blocking = hctx->flags & BLK_MQ_F_BLOCKING;
 
-	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
+	might_sleep_if(blocking);
 
-	hctx_lock(hctx, &srcu_idx);
+	queue_lock(hctx->queue, blocking, &srcu_idx);
 
 	ret = __blk_mq_try_issue_directly(hctx, rq, false, true);
 	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
@@ -2511,7 +2516,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 	else if (ret != BLK_STS_OK)
 		blk_mq_end_request(rq, ret);
 
-	hctx_unlock(hctx, srcu_idx);
+	queue_unlock(hctx->queue, blocking, srcu_idx);
 }
 
 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
@@ -2519,10 +2524,11 @@ static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
 	blk_status_t ret;
 	int srcu_idx;
 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+	bool blocking = hctx->flags & BLK_MQ_F_BLOCKING;
 
-	hctx_lock(hctx, &srcu_idx);
+	queue_lock(hctx->queue, blocking, &srcu_idx);
 	ret = __blk_mq_try_issue_directly(hctx, rq, true, last);
-	hctx_unlock(hctx, srcu_idx);
+	queue_unlock(hctx->queue, blocking, srcu_idx);
 
 	return ret;
 }
-- 
2.31.1




More information about the Linux-nvme mailing list