[PATCH RFC 01/11] um: Make UBD requests synchronous in TT ext/infcpu mode

Benjamin Beichler benjamin.beichler at uni-rostock.de
Fri Nov 3 09:41:39 PDT 2023


The UBD driver employs multiple threads to enhance block device accesses
in userspace. These threads communicate via pipes and are triggered by
interrupts that utilize the SIGIO handler.

However, in TT mode, both inf-cpu and external modes, this asynchronous,
multithreaded request processing lead to issues where requests are not
processed. This occurs because there is no dedicated time travel handler
for the UBD interrupt.

Since asynchronous, multithreaded request processing does not provide
substantial benefits in time travel mode and may even introduce
additional overhead (as multiple threads are scheduled sequentially to
execute requests in TT mode with infinite CPU power), this patch
switches to synchronous request processing directly within the
submit_request call for the respective TT modes.

Signed-off-by: Benjamin Beichler <benjamin.beichler at uni-rostock.de>
---
 arch/um/drivers/ubd_kern.c | 45 ++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 38 insertions(+), 7 deletions(-)

diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 50206feac577..cdad289d5032 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -44,6 +44,7 @@
 #include "ubd.h"
 #include <os.h>
 #include "cow.h"
+#include "timetravel.h"
 
 /* Max request size is determined by sector mask - 32K */
 #define UBD_MAX_REQUEST (8 * sizeof(long))
@@ -456,6 +457,17 @@ static int bulk_req_safe_read(
 	return n;
 }
 
+static void finalize_request(struct io_thread_req *io_req)
+{
+	if ((io_req->error == BLK_STS_NOTSUPP) &&
+	    (req_op(io_req->req) == REQ_OP_DISCARD)) {
+		blk_queue_max_discard_sectors(io_req->req->q, 0);
+		blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
+	}
+	blk_mq_end_request(io_req->req, io_req->error);
+	kfree(io_req);
+}
+
 /* Called without dev->lock held, and only in interrupt context. */
 static void ubd_handler(void)
 {
@@ -479,13 +491,7 @@ static void ubd_handler(void)
 		}
 		for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
 			struct io_thread_req *io_req = (*irq_req_buffer)[count];
-
-			if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) {
-				blk_queue_max_discard_sectors(io_req->req->q, 0);
-				blk_queue_max_write_zeroes_sectors(io_req->req->q, 0);
-			}
-			blk_mq_end_request(io_req->req, io_req->error);
-			kfree(io_req);
+			finalize_request(io_req);
 		}
 	}
 }
@@ -1136,6 +1142,17 @@ static int __init ubd_driver_init(void){
 		/* Letting ubd=sync be like using ubd#s= instead of ubd#= is
 		 * enough. So use anyway the io thread. */
 	}
+
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+	/* do not initialize asynchronous io-thread and corresponding irq
+	 * in inf-cpu or ext time travel, as we need synchronous io logic
+	 */
+
+	if (time_travel_mode == TT_MODE_INFCPU ||
+	    time_travel_mode == TT_MODE_EXTERNAL)
+		return 0;
+#endif
+
 	stack = alloc_stack(0, 0);
 	io_pid = start_io_thread(stack + PAGE_SIZE, &thread_fd);
 	if(io_pid < 0){
@@ -1312,8 +1329,11 @@ static struct io_thread_req *ubd_alloc_req(struct ubd *dev, struct request *req,
 	return io_req;
 }
 
+static void do_io(struct io_thread_req *req, struct io_desc *desc);
+
 static int ubd_submit_request(struct ubd *dev, struct request *req)
 {
+	int i;
 	int segs = 0;
 	struct io_thread_req *io_req;
 	int ret;
@@ -1334,6 +1354,17 @@ static int ubd_submit_request(struct ubd *dev, struct request *req)
 	if (segs)
 		ubd_map_req(dev, io_req, req);
 
+#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
+	//do the request sychronous (bypass io_thread and ubd_handler)
+	if (time_travel_mode == TT_MODE_INFCPU ||
+	    time_travel_mode == TT_MODE_EXTERNAL) {
+		for (i = 0; !io_req->error && i < io_req->desc_cnt; i++)
+			do_io(io_req, &io_req->io_desc[i]);
+		finalize_request(io_req);
+		return 0;
+	}
+#endif
+
 	ret = os_write_file(thread_fd, &io_req, sizeof(io_req));
 	if (ret != sizeof(io_req)) {
 		if (ret != -EAGAIN)

-- 
2.34.1





More information about the linux-um mailing list