nvme-tcp: kernel NULL pointer dereference, address: 0000000000000034
Sagi Grimberg
sagi at grimberg.me
Tue Mar 21 03:15:47 PDT 2023
>>> The admin tagset does not have a polled hctxs map to begin with,
>>> so I'm unclear how any fabrics or admin requests end up polled...
>>
>> Hmm, if no map_queues() callback is provided for the admin tag set,
>> isn't the
>> default mapping function used and this would add the poll hctxs map?
>> Let me add
>> a map_queues() callback and see what happens :)
>
> admin_tagset.nr_maps = 1 (only the default map, no read, no poll)
Oddly, I don't see admin/fabrics requests being polled...
I just attempted with the below patch and it seems to work.
The only change I did was to pass to blk_poll the cookie as well,
from bio_poll that is bi_cookie, and from blk_rq_poll it is computed
from the hctx directly.
--
diff --git a/block/blk-core.c b/block/blk-core.c
index 9e5e0277a4d9..098840fe8bef 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -844,30 +844,11 @@ void submit_bio(struct bio *bio)
}
EXPORT_SYMBOL(submit_bio);
-/**
- * bio_poll - poll for BIO completions
- * @bio: bio to poll for
- * @iob: batches of IO
- * @flags: BLK_POLL_* flags that control the behavior
- *
- * Poll for completions on queue associated with the bio. Returns number of
- * completed entries found.
- *
- * Note: the caller must either be the context that submitted @bio, or
- * be in a RCU critical section to prevent freeing of @bio.
- */
-int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int
flags)
+static int blk_poll(struct request_queue *q, struct io_comp_batch *iob,
+ blk_qc_t cookie, struct bio *bio, unsigned int flags)
{
- blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
- struct block_device *bdev;
- struct request_queue *q;
int ret = 0;
- bdev = READ_ONCE(bio->bi_bdev);
- if (!bdev)
- return 0;
-
- q = bdev_get_queue(bdev);
if (cookie == BLK_QC_T_NONE ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return 0;
@@ -902,6 +883,39 @@ int bio_poll(struct bio *bio, struct io_comp_batch
*iob, unsigned int flags)
blk_queue_exit(q);
return ret;
}
+
+/**
+ * blk_rq_poll - poll for request completions
+ * @rq: request to poll for
+ */
+int blk_rq_poll(struct request *rq)
+{
+ return blk_poll(rq->q, NULL, blk_rq_to_qc(rq), rq->bio, 0);
+}
+EXPORT_SYMBOL_GPL(blk_rq_poll);
+
+/**
+ * bio_poll - poll for BIO completions
+ * @bio: bio to poll for
+ * @iob: batches of IO
+ * @flags: BLK_POLL_* flags that control the behavior
+ *
+ * Poll for completions on queue associated with the bio. Returns number of
+ * completed entries found.
+ *
+ * Note: the caller must either be the context that submitted @bio, or
+ * be in a RCU critical section to prevent freeing of @bio.
+ */
+int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int
flags)
+{
+ struct block_device *bdev;
+
+ bdev = READ_ONCE(bio->bi_bdev);
+ if (!bdev)
+ return 0;
+
+ return blk_poll(bdev_get_queue(bdev), iob,
READ_ONCE(bio->bi_cookie), bio, flags);
+}
EXPORT_SYMBOL_GPL(bio_poll);
/*
diff --git a/block/blk-mq.c b/block/blk-mq.c
index cf1a39adf9a5..8161a07ca59d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -66,9 +66,6 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
return bucket;
}
-#define BLK_QC_T_SHIFT 16
-#define BLK_QC_T_INTERNAL (1U << 31)
-
static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct
request_queue *q,
blk_qc_t qc)
{
@@ -86,13 +83,6 @@ static inline struct request *blk_qc_to_rq(struct
blk_mq_hw_ctx *hctx,
return blk_mq_tag_to_rq(hctx->tags, tag);
}
-static inline blk_qc_t blk_rq_to_qc(struct request *rq)
-{
- return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) |
- (rq->tag != -1 ?
- rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL));
-}
-
/*
* Check if any of the ctx, dispatch list or elevator
* have pending work in this hardware queue.
@@ -1359,8 +1349,6 @@ bool blk_rq_is_poll(struct request *rq)
return false;
if (rq->mq_hctx->type != HCTX_TYPE_POLL)
return false;
- if (WARN_ON_ONCE(!rq->bio))
- return false;
return true;
}
EXPORT_SYMBOL_GPL(blk_rq_is_poll);
@@ -1368,7 +1356,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
static void blk_rq_poll_completion(struct request *rq, struct
completion *wait)
{
do {
- bio_poll(rq->bio, NULL, 0);
+ blk_rq_poll(rq);
cond_resched();
} while (!completion_done(wait));
}
diff --git a/block/blk-mq.h b/block/blk-mq.h
index a7482d2cc82e..7100ba5edd16 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -86,6 +86,15 @@ static inline struct blk_mq_hw_ctx
*blk_mq_map_queue_type(struct request_queue *
return xa_load(&q->hctx_table, q->tag_set->map[type].mq_map[cpu]);
}
+#define BLK_QC_T_SHIFT 16
+#define BLK_QC_T_INTERNAL (1U << 31)
+static inline blk_qc_t blk_rq_to_qc(struct request *rq)
+{
+ return (rq->mq_hctx->queue_num << BLK_QC_T_SHIFT) |
+ (rq->tag != -1 ?
+ rq->tag : (rq->internal_tag | BLK_QC_T_INTERNAL));
+}
+
static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf)
{
enum hctx_type type = HCTX_TYPE_DEFAULT;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d1aee08f8c18..70532cafd3a9 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -872,6 +872,7 @@ blk_status_t errno_to_blk_status(int errno);
#define BLK_POLL_ONESHOT (1 << 0)
/* do not sleep to wait for the expected completion time */
#define BLK_POLL_NOSLEEP (1 << 1)
+int blk_rq_poll(struct request *rq);
int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int
flags);
int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
unsigned int flags);
--
More information about the Linux-nvme
mailing list