[PATCH V2 08/13] block: export __blk_mq_unfreeze_queue
Ming Lei
ming.lei at redhat.com
Sat Jan 22 03:10:49 PST 2022
blk_mq_unfreeze_queue() is used by scsi when releasing disk, so not
necessary to unfreeze into percpu mode, then the following
blk_cleanup_queue doesn't need to freeze queue from percpu mode, and
the implied RCU grace period may be avoided.
Meantime move clearing QUEUE_FLAG_INIT_DONE into this API, so that
when one disk is added, ->q_usage_counter can be switched to percpu
mode again.
Signed-off-by: Ming Lei <ming.lei at redhat.com>
---
block/blk-mq.c | 10 +++++++++-
block/blk.h | 1 -
block/genhd.c | 1 -
include/linux/blk-mq.h | 1 +
4 files changed, 10 insertions(+), 3 deletions(-)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 66cc701921c1..d51b0aa2e4e4 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -215,11 +215,18 @@ void blk_mq_freeze_queue(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
+/*
+ * When 'force_atomic' is passed as true, this API is supposed to be
+ * called only in case that disk is removed or released.
+ */
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
{
mutex_lock(&q->mq_freeze_lock);
- if (force_atomic)
+ if (force_atomic) {
+ /* When new disk is added, switch to percpu mode */
+ blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
q->q_usage_counter.data->force_atomic = true;
+ }
q->mq_freeze_depth--;
WARN_ON_ONCE(q->mq_freeze_depth < 0);
if (!q->mq_freeze_depth) {
@@ -228,6 +235,7 @@ void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
}
mutex_unlock(&q->mq_freeze_lock);
}
+EXPORT_SYMBOL_GPL(__blk_mq_unfreeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q)
{
diff --git a/block/blk.h b/block/blk.h
index 7b0f12260ae6..a038e25d8637 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -43,7 +43,6 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
void blk_free_flush_queue(struct blk_flush_queue *q);
void blk_freeze_queue(struct request_queue *q);
-void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
void blk_queue_start_drain(struct request_queue *q);
int __bio_queue_enter(struct request_queue *q, struct bio *bio);
bool submit_bio_checks(struct bio *bio);
diff --git a/block/genhd.c b/block/genhd.c
index b9b0db168ce1..5bd7bcd6246e 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -628,7 +628,6 @@ void del_gendisk(struct gendisk *disk)
/*
* Allow using passthrough request again after the queue is torn down.
*/
- blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
__blk_mq_unfreeze_queue(q, true);
}
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index d2ad2ed11723..1645159d10f3 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -869,6 +869,7 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
+void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
--
2.31.1
More information about the Linux-nvme
mailing list