[PATCH v3] um: Enable preemption in UML
anton.ivanov at cambridgegreys.com
anton.ivanov at cambridgegreys.com
Thu Sep 21 08:32:04 PDT 2023
From: Anton Ivanov <anton.ivanov at cambridgegreys.com>
Preemption requires saving/restoring FPU state. This patch
adds support for it using GCC intrinsics.
Signed-off-by: Anton Ivanov <anton.ivanov at cambridgegreys.com>
---
arch/um/Kconfig | 1 -
arch/um/include/asm/fpu/api.h | 6 +-
arch/um/include/asm/processor-generic.h | 3 +
arch/um/kernel/Makefile | 4 ++
arch/um/kernel/fpu.c | 77 +++++++++++++++++++++++++
arch/um/kernel/irq.c | 2 +
6 files changed, 91 insertions(+), 2 deletions(-)
create mode 100644 arch/um/kernel/fpu.c
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index b5e179360534..603f5fd82293 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -11,7 +11,6 @@ config UML
select ARCH_HAS_KCOV
select ARCH_HAS_STRNCPY_FROM_USER
select ARCH_HAS_STRNLEN_USER
- select ARCH_NO_PREEMPT
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
diff --git a/arch/um/include/asm/fpu/api.h b/arch/um/include/asm/fpu/api.h
index 71bfd9ef3938..76ffa9c3c4b9 100644
--- a/arch/um/include/asm/fpu/api.h
+++ b/arch/um/include/asm/fpu/api.h
@@ -7,9 +7,13 @@
* A set of "dummy" defines to allow the direct inclusion
* of x86 optimized copy, xor, etc routines into the
* UML code tree. */
-
+#if defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTRARY)
+extern void kernel_fpu_begin(void);
+extern void kernel_fpu_end(void);
+#else
#define kernel_fpu_begin() (void)0
#define kernel_fpu_end() (void)0
+#endif
static inline bool irq_fpu_usable(void)
{
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 7414154b8e9a..d9362f5a2212 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -44,6 +44,9 @@ struct thread_struct {
} cb;
} u;
} request;
+#if defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTRARY)
+ u8 fpu[2048] __aligned(64); /* Intel docs require xsave/xrestore area to be aligned to 16 bytes */
+#endif
};
#define INIT_THREAD \
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 811188be954c..c616e884a488 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -26,9 +26,13 @@ obj-$(CONFIG_OF) += dtb.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_GENERIC_PCI_IOMAP) += ioport.o
+obj-$(CONFIG_PREEMPT) += fpu.o
+obj-$(CONFIG_PREEMPT_VOLUNTARY) += fpu.o
USER_OBJS := config.o
+CFLAGS_fpu.o += -mxsave -mxsaveopt
+
include $(srctree)/arch/um/scripts/Makefile.rules
targets := config.c config.tmp capflags.c
diff --git a/arch/um/kernel/fpu.c b/arch/um/kernel/fpu.c
new file mode 100644
index 000000000000..346c07236185
--- /dev/null
+++ b/arch/um/kernel/fpu.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Cambridge Greys Ltd
+ * Copyright (C) 2023 Red Hat Inc
+ */
+
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <asm/fpu/api.h>
+#include <asm/cpufeature.h>
+
+/*
+ * The critical section between kernel_fpu_begin() and kernel_fpu_end()
+ * is non-reentrant. It is the caller's responsibility to avoid reentrance.
+ */
+
+static DEFINE_PER_CPU(bool, in_kernel_fpu);
+
+/* UML and driver code it pulls out of the x86 tree knows about 387 features
+ * up to and including AVX512. TILE, etc are not yet supported.
+ */
+
+#define KNOWN_387_FEATURES 0xFF
+
+
+void kernel_fpu_begin(void)
+{
+ preempt_disable();
+
+ WARN_ON(this_cpu_read(in_kernel_fpu));
+
+ this_cpu_write(in_kernel_fpu, true);
+
+#ifdef CONFIG_64BIT
+ if (likely(cpu_has(&boot_cpu_data, X86_FEATURE_XSAVEOPT)))
+ __builtin_ia32_xsaveopt64(¤t->thread.fpu, KNOWN_387_FEATURES);
+ else {
+ if (likely(cpu_has(&boot_cpu_data, X86_FEATURE_XSAVE)))
+ __builtin_ia32_xsave64(¤t->thread.fpu, KNOWN_387_FEATURES);
+ else
+ __builtin_ia32_fxsave64(¤t->thread.fpu);
+ }
+#else
+ if (likely(cpu_has(&boot_cpu_data, X86_FEATURE_XSAVEOPT)))
+ __builtin_ia32_xsaveopt(¤t->thread.fpu, KNOWN_387_FEATURES);
+ else {
+ if (likely(cpu_has(&boot_cpu_data, X86_FEATURE_XSAVE)))
+ __builtin_ia32_xsave(¤t->thread.fpu, KNOWN_387_FEATURES);
+ else
+ __builtin_ia32_fxsave(¤t->thread.fpu);
+ }
+#endif
+}
+
+EXPORT_SYMBOL_GPL(kernel_fpu_begin);
+
+void kernel_fpu_end(void)
+{
+ WARN_ON(!this_cpu_read(in_kernel_fpu));
+
+#ifdef CONFIG_64BIT
+ if (likely(cpu_has(&boot_cpu_data, X86_FEATURE_XSAVE)))
+ __builtin_ia32_xrstor64(¤t->thread.fpu, KNOWN_387_FEATURES);
+ else
+ __builtin_ia32_fxrstor64(¤t->thread.fpu);
+#else
+ if (likely(cpu_has(&boot_cpu_data, X86_FEATURE_XSAVE)))
+ __builtin_ia32_xrstor(¤t->thread.fpu, KNOWN_387_FEATURES);
+ else
+ __builtin_ia32_fxrstor(¤t->thread.fpu);
+#endif
+ this_cpu_write(in_kernel_fpu, false);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(kernel_fpu_end);
+
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 635d44606bfe..c02525da45df 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -195,7 +195,9 @@ static void _sigio_handler(struct uml_pt_regs *regs,
void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{
+ preempt_disable();
_sigio_handler(regs, irqs_suspended);
+ preempt_enable();
}
static struct irq_entry *get_irq_entry_by_fd(int fd)
--
2.30.2
More information about the linux-um
mailing list