nvme-tcp: kernel NULL pointer dereference, address: 0000000000000034

Keith Busch kbusch at kernel.org
Wed Mar 15 15:24:28 PDT 2023


Could you try this patch with your tcp polling queues enabled?

---
diff --git a/block/blk-core.c b/block/blk-core.c
index 9e5e0277a4d95..12f7ab369f7ba 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -844,30 +844,12 @@ void submit_bio(struct bio *bio)
 }
 EXPORT_SYMBOL(submit_bio);
 
-/**
- * bio_poll - poll for BIO completions
- * @bio: bio to poll for
- * @iob: batches of IO
- * @flags: BLK_POLL_* flags that control the behavior
- *
- * Poll for completions on queue associated with the bio. Returns number of
- * completed entries found.
- *
- * Note: the caller must either be the context that submitted @bio, or
- * be in a RCU critical section to prevent freeing of @bio.
- */
-int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
+static int blk_poll(struct request_queue *q, struct io_comp_batch *iob,
+		    struct bio *bio, unsigned int flags)
 {
 	blk_qc_t cookie = READ_ONCE(bio->bi_cookie);
-	struct block_device *bdev;
-	struct request_queue *q;
 	int ret = 0;
 
-	bdev = READ_ONCE(bio->bi_bdev);
-	if (!bdev)
-		return 0;
-
-	q = bdev_get_queue(bdev);
 	if (cookie == BLK_QC_T_NONE ||
 	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
 		return 0;
@@ -902,6 +884,39 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
 	blk_queue_exit(q);
 	return ret;
 }
+
+/**
+ * blk_rq_poll - poll for request completions
+ * @rq: request to poll for
+ */
+int blk_rq_poll(struct request *rq)
+{
+	return blk_poll(rq->q, NULL, rq->bio, 0);
+}
+EXPORT_SYMBOL_GPL(blk_rq_poll);
+
+/**
+ * bio_poll - poll for BIO completions
+ * @bio: bio to poll for
+ * @iob: batches of IO
+ * @flags: BLK_POLL_* flags that control the behavior
+ *
+ * Poll for completions on queue associated with the bio. Returns number of
+ * completed entries found.
+ *
+ * Note: the caller must either be the context that submitted @bio, or
+ * be in a RCU critical section to prevent freeing of @bio.
+ */
+int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
+{
+	struct block_device *bdev;
+
+	bdev = READ_ONCE(bio->bi_bdev);
+	if (!bdev)
+		return 0;
+
+	return blk_poll(bdev_get_queue(bdev), iob, bio, flags);
+}
 EXPORT_SYMBOL_GPL(bio_poll);
 
 /*
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d0cb2ef18fe21..2ada72cbfb24e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1368,7 +1368,7 @@ EXPORT_SYMBOL_GPL(blk_rq_is_poll);
 static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
 {
 	do {
-		bio_poll(rq->bio, NULL, 0);
+		blk_rq_poll(rq);
 		cond_resched();
 	} while (!completion_done(wait));
 }
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d1aee08f8c181..70532cafd3a96 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -872,6 +872,7 @@ blk_status_t errno_to_blk_status(int errno);
 #define BLK_POLL_ONESHOT		(1 << 0)
 /* do not sleep to wait for the expected completion time */
 #define BLK_POLL_NOSLEEP		(1 << 1)
+int blk_rq_poll(struct request *rq);
 int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
 int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
 			unsigned int flags);
--



More information about the Linux-nvme mailing list