[PATCH 0/3 rfc] Fix nvme-tcp and nvme-rdma controller reset hangs
Keith Busch
kbusch at kernel.org
Fri Mar 19 19:07:28 GMT 2021
On Fri, Mar 19, 2021 at 06:28:17PM +0100, Christoph Hellwig wrote:
> What about something like this?
Yes, this looks good.
> diff --git a/block/blk-core.c b/block/blk-core.c
> index fc60ff20849738..4344f3c9058282 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -792,7 +792,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
> return BLK_STS_OK;
> }
>
> -static noinline_for_stack bool submit_bio_checks(struct bio *bio)
> +noinline_for_stack bool submit_bio_checks(struct bio *bio)
> {
> struct block_device *bdev = bio->bi_bdev;
> struct request_queue *q = bdev->bd_disk->queue;
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index d4d7c1caa43966..4ff85692843b49 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -2286,6 +2286,43 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
> return BLK_QC_T_NONE;
> }
>
> +/**
> + * blk_mq_submit_bio_direct - hand a bio directly to the driver for I/O
> + * @bio: The bio describing the location in memory and on the device.
> + *
> + * This function behaves similar to submit_bio_noacct(), but does never waits
> + * for the queue to be unfreozen, instead it return false and lets the caller
> + * deal with the fallout. It also does not protect against recursion and thus
> + * must only be used if the called driver is known to be blk-mq based.
> + */
> +bool blk_mq_submit_bio_direct(struct bio *bio, blk_qc_t *qc)
> +{
> + struct gendisk *disk = bio->bi_bdev->bd_disk;
> + struct request_queue *q = disk->queue;
> +
> + if (WARN_ON_ONCE(!current->bio_list) ||
> + WARN_ON_ONCE(disk->fops->submit_bio)) {
> + bio_io_error(bio);
> + goto fail;
> + }
> + if (!submit_bio_checks(bio))
> + goto fail;
> +
> + if (unlikely(blk_queue_enter(q, BLK_MQ_REQ_NOWAIT)))
> + return false;
> + if (!blk_crypto_bio_prep(&bio))
> + goto fail_queue_exit;
> + *qc = blk_mq_submit_bio(bio);
> + return true;
> +
> +fail_queue_exit:
> + blk_queue_exit(disk->queue);
> +fail:
> + *qc = BLK_QC_T_NONE;
> + return true;
> +}
> +EXPORT_SYMBOL_GPL(blk_mq_submit_bio_direct);
> +
> void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
> unsigned int hctx_idx)
> {
> diff --git a/block/blk.h b/block/blk.h
> index 3b53e44b967e4e..c4c66b2a9ffb19 100644
> --- a/block/blk.h
> +++ b/block/blk.h
> @@ -221,6 +221,7 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
> ssize_t part_timeout_store(struct device *, struct device_attribute *,
> const char *, size_t);
>
> +bool submit_bio_checks(struct bio *bio);
> void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
> int ll_back_merge_fn(struct request *req, struct bio *bio,
> unsigned int nr_segs);
> diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
> index a1d476e1ac020f..92adebfaf86fd1 100644
> --- a/drivers/nvme/host/multipath.c
> +++ b/drivers/nvme/host/multipath.c
> @@ -309,6 +309,7 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
> */
> blk_queue_split(&bio);
>
> +retry:
> srcu_idx = srcu_read_lock(&head->srcu);
> ns = nvme_find_path(head);
> if (likely(ns)) {
> @@ -316,7 +317,12 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
> bio->bi_opf |= REQ_NVME_MPATH;
> trace_block_bio_remap(bio, disk_devt(ns->head->disk),
> bio->bi_iter.bi_sector);
> - ret = submit_bio_noacct(bio);
> +
> + if (!blk_mq_submit_bio_direct(bio, &ret)) {
> + nvme_mpath_clear_current_path(ns);
> + srcu_read_unlock(&head->srcu, srcu_idx);
> + goto retry;
> + }
> } else if (nvme_available_path(head)) {
> dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
>
> diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
> index 2c473c9b899089..6804f397106ada 100644
> --- a/include/linux/blk-mq.h
> +++ b/include/linux/blk-mq.h
> @@ -615,6 +615,7 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
> }
>
> blk_qc_t blk_mq_submit_bio(struct bio *bio);
> +bool blk_mq_submit_bio_direct(struct bio *bio, blk_qc_t *qc);
> void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
> struct lock_class_key *key);
>
More information about the Linux-nvme
mailing list