[PATCH v4 03/13] um: decouple MMU specific code from the common part
Hajime Tazaki
thehajime at gmail.com
Sun Dec 8 02:15:30 PST 2024
This splits the memory, process related code with common and MMU
specific parts in order to avoid ifdefs in .c file and duplication
between MMU and !MMU.
Signed-off-by: Hajime Tazaki <thehajime at gmail.com>
---
arch/um/include/shared/mem.h | 8 ++
arch/um/kernel/Makefile | 5 +-
arch/um/kernel/mem-pgtable.c | 175 ++++++++++++++++++++++++++++++++
arch/um/kernel/mem.c | 149 +--------------------------
arch/um/kernel/process.c | 25 +++++
arch/um/kernel/skas/process.c | 27 -----
arch/um/os-Linux/Makefile | 3 +-
arch/um/os-Linux/internal.h | 5 +
arch/um/os-Linux/process.c | 133 ++++++++++++++++++++++++
arch/um/os-Linux/skas/process.c | 132 ------------------------
10 files changed, 352 insertions(+), 310 deletions(-)
create mode 100644 arch/um/kernel/mem-pgtable.c
diff --git a/arch/um/include/shared/mem.h b/arch/um/include/shared/mem.h
index 98aacd544108..c5dfca3bf4d5 100644
--- a/arch/um/include/shared/mem.h
+++ b/arch/um/include/shared/mem.h
@@ -19,4 +19,12 @@ static inline void *uml_to_virt(unsigned long phys)
return((void *) uml_physmem + phys);
}
+#ifdef CONFIG_MMU
+extern void um_fixrange_init(void);
+#else
+static inline void um_fixrange_init(void)
+{
+}
+#endif
+
#endif
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index f8567b933ffa..eec6682812ce 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -16,9 +16,10 @@ extra-y := vmlinux.lds
obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
physmem.o process.o ptrace.o reboot.o sigio.o \
- signal.o sysrq.o time.o tlb.o trap.o \
- um_arch.o umid.o maccess.o kmsg_dump.o capflags.o skas/
+ signal.o sysrq.o time.o \
+ um_arch.o umid.o maccess.o kmsg_dump.o capflags.o
obj-y += load_file.o
+obj-$(CONFIG_MMU) += mem-pgtable.o tlb.o trap.o skas/
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
obj-$(CONFIG_GPROF) += gprof_syms.o
diff --git a/arch/um/kernel/mem-pgtable.c b/arch/um/kernel/mem-pgtable.c
new file mode 100644
index 000000000000..8c977bda601d
--- /dev/null
+++ b/arch/um/kernel/mem-pgtable.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
+ */
+
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/memblock.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <asm/fixmap.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <as-layout.h>
+#include <init.h>
+#include <kern.h>
+#include <kern_util.h>
+#include <mem_user.h>
+#include <os.h>
+#include <um_malloc.h>
+
+/*
+ * Create a page table and place a pointer to it in a middle page
+ * directory entry.
+ */
+static void __init one_page_table_init(pmd_t *pmd)
+{
+ if (pmd_none(*pmd)) {
+ pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
+ PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
+ set_pmd(pmd, __pmd(_KERNPG_TABLE +
+ (unsigned long) __pa(pte)));
+ WARN_ON_ONCE(pte != pte_offset_kernel(pmd, 0));
+ }
+}
+
+static void __init one_md_table_init(pud_t *pud)
+{
+#if CONFIG_PGTABLE_LEVELS > 2
+ pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+
+ if (!pmd_table)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
+ set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
+ WARN_ON_ONCE(pmd_table != pmd_offset(pud, 0));
+#endif
+}
+
+static void __init one_ud_table_init(p4d_t *p4d)
+{
+#if CONFIG_PGTABLE_LEVELS > 3
+ pud_t *pud_table = (pud_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+
+ if (!pud_table)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE);
+
+ set_p4d(p4d, __p4d(_KERNPG_TABLE + (unsigned long) __pa(pud_table)));
+ WARN_ON_ONCE(pud_table != pud_offset(p4d, 0));
+#endif
+}
+
+static void __init fixrange_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ int i, j;
+ unsigned long vaddr;
+
+ vaddr = start;
+ i = pgd_index(vaddr);
+ j = pmd_index(vaddr);
+ pgd = pgd_base + i;
+
+ for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
+ p4d = p4d_offset(pgd, vaddr);
+ if (p4d_none(*p4d))
+ one_ud_table_init(p4d);
+ pud = pud_offset(p4d, vaddr);
+ if (pud_none(*pud))
+ one_md_table_init(pud);
+ pmd = pmd_offset(pud, vaddr);
+ for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
+ one_page_table_init(pmd);
+ vaddr += PMD_SIZE;
+ }
+ j = 0;
+ }
+}
+
+static void __init fixaddr_user_init(void)
+{
+#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
+ long size = FIXADDR_USER_END - FIXADDR_USER_START;
+ pte_t *pte;
+ phys_t p;
+ unsigned long v, vaddr = FIXADDR_USER_START;
+
+ if (!size)
+ return;
+
+ fixrange_init(FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
+ v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
+ if (!v)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, size, PAGE_SIZE);
+
+ memcpy((void *) v, (void *) FIXADDR_USER_START, size);
+ p = __pa(v);
+ for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
+ p += PAGE_SIZE) {
+ pte = virt_to_kpte(vaddr);
+ pte_set_val(*pte, p, PAGE_READONLY);
+ }
+#endif
+}
+
+void __init um_fixrange_init(void)
+{
+ unsigned long vaddr;
+
+ /*
+ * Fixed mappings, only the page table structure has to be
+ * created - mappings will be set by set_fixmap():
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
+
+ fixaddr_user_init();
+}
+
+/* Allocate and free page tables. */
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+ if (pgd) {
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return pgd;
+}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY,
+ [VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 53248ed04771..e3a7ec32d6c7 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -6,7 +6,6 @@
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/memblock.h>
-#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <asm/fixmap.h>
@@ -74,113 +73,9 @@ void __init mem_init(void)
kmalloc_ok = 1;
}
-/*
- * Create a page table and place a pointer to it in a middle page
- * directory entry.
- */
-static void __init one_page_table_init(pmd_t *pmd)
-{
- if (pmd_none(*pmd)) {
- pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
- PAGE_SIZE);
- if (!pte)
- panic("%s: Failed to allocate %lu bytes align=%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
-
- set_pmd(pmd, __pmd(_KERNPG_TABLE +
- (unsigned long) __pa(pte)));
- BUG_ON(pte != pte_offset_kernel(pmd, 0));
- }
-}
-
-static void __init one_md_table_init(pud_t *pud)
-{
-#if CONFIG_PGTABLE_LEVELS > 2
- pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
- if (!pmd_table)
- panic("%s: Failed to allocate %lu bytes align=%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
-
- set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
- BUG_ON(pmd_table != pmd_offset(pud, 0));
-#endif
-}
-
-static void __init one_ud_table_init(p4d_t *p4d)
-{
-#if CONFIG_PGTABLE_LEVELS > 3
- pud_t *pud_table = (pud_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
- if (!pud_table)
- panic("%s: Failed to allocate %lu bytes align=%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE);
-
- set_p4d(p4d, __p4d(_KERNPG_TABLE + (unsigned long) __pa(pud_table)));
- BUG_ON(pud_table != pud_offset(p4d, 0));
-#endif
-}
-
-static void __init fixrange_init(unsigned long start, unsigned long end,
- pgd_t *pgd_base)
-{
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- int i, j;
- unsigned long vaddr;
-
- vaddr = start;
- i = pgd_index(vaddr);
- j = pmd_index(vaddr);
- pgd = pgd_base + i;
-
- for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
- p4d = p4d_offset(pgd, vaddr);
- if (p4d_none(*p4d))
- one_ud_table_init(p4d);
- pud = pud_offset(p4d, vaddr);
- if (pud_none(*pud))
- one_md_table_init(pud);
- pmd = pmd_offset(pud, vaddr);
- for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
- one_page_table_init(pmd);
- vaddr += PMD_SIZE;
- }
- j = 0;
- }
-}
-
-static void __init fixaddr_user_init( void)
-{
-#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
- long size = FIXADDR_USER_END - FIXADDR_USER_START;
- pte_t *pte;
- phys_t p;
- unsigned long v, vaddr = FIXADDR_USER_START;
-
- if (!size)
- return;
-
- fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
- v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
- if (!v)
- panic("%s: Failed to allocate %lu bytes align=%lx\n",
- __func__, size, PAGE_SIZE);
-
- memcpy((void *) v , (void *) FIXADDR_USER_START, size);
- p = __pa(v);
- for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
- p += PAGE_SIZE) {
- pte = virt_to_kpte(vaddr);
- pte_set_val(*pte, p, PAGE_READONLY);
- }
-#endif
-}
-
void __init paging_init(void)
{
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
- unsigned long vaddr;
empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
PAGE_SIZE);
@@ -191,14 +86,7 @@ void __init paging_init(void)
max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
free_area_init(max_zone_pfn);
- /*
- * Fixed mappings, only the page table structure has to be
- * created - mappings will be set by set_fixmap():
- */
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
-
- fixaddr_user_init();
+ um_fixrange_init();
}
/*
@@ -210,42 +98,7 @@ void free_initmem(void)
{
}
-/* Allocate and free page tables. */
-
-pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
-
- if (pgd) {
- memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
- memcpy(pgd + USER_PTRS_PER_PGD,
- swapper_pg_dir + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
- }
- return pgd;
-}
-
void *uml_kmalloc(int size, int flags)
{
return kmalloc(size, flags);
}
-
-static const pgprot_t protection_map[16] = {
- [VM_NONE] = PAGE_NONE,
- [VM_READ] = PAGE_READONLY,
- [VM_WRITE] = PAGE_COPY,
- [VM_WRITE | VM_READ] = PAGE_COPY,
- [VM_EXEC] = PAGE_READONLY,
- [VM_EXEC | VM_READ] = PAGE_READONLY,
- [VM_EXEC | VM_WRITE] = PAGE_COPY,
- [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
- [VM_SHARED] = PAGE_NONE,
- [VM_SHARED | VM_READ] = PAGE_READONLY,
- [VM_SHARED | VM_WRITE] = PAGE_SHARED,
- [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
- [VM_SHARED | VM_EXEC] = PAGE_READONLY,
- [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
- [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
- [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
-};
-DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 30bdc0a87dc8..aaf7bcff235c 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -25,6 +25,7 @@
#include <linux/tick.h>
#include <linux/threads.h>
#include <linux/resume_user_mode.h>
+#include <linux/start_kernel.h>
#include <asm/current.h>
#include <asm/mmu_context.h>
#include <asm/switch_to.h>
@@ -46,6 +47,8 @@
struct task_struct *cpu_tasks[NR_CPUS];
EXPORT_SYMBOL(cpu_tasks);
+static char cpu0_irqstack[THREAD_SIZE] __aligned(THREAD_SIZE);
+
void free_stack(unsigned long stack, int order)
{
free_pages(stack, order);
@@ -295,3 +298,25 @@ unsigned long __get_wchan(struct task_struct *p)
return 0;
}
+
+
+static int __init start_kernel_proc(void *unused)
+{
+ block_signals_trace();
+
+ start_kernel();
+ return 0;
+}
+
+int __init start_uml(void)
+{
+ stack_protections((unsigned long) &cpu0_irqstack);
+ set_sigstack(cpu0_irqstack, THREAD_SIZE);
+
+ init_new_thread_signals();
+
+ init_task.thread.request.thread.proc = start_kernel_proc;
+ init_task.thread.request.thread.arg = NULL;
+ return start_idle_thread(task_stack_page(&init_task),
+ &init_task.thread.switch_buf);
+}
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 05dcdc057af9..5247121d3419 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -16,33 +16,6 @@
#include <skas.h>
#include <kern_util.h>
-extern void start_kernel(void);
-
-static int __init start_kernel_proc(void *unused)
-{
- block_signals_trace();
-
- start_kernel();
- return 0;
-}
-
-extern int userspace_pid[];
-
-static char cpu0_irqstack[THREAD_SIZE] __aligned(THREAD_SIZE);
-
-int __init start_uml(void)
-{
- stack_protections((unsigned long) &cpu0_irqstack);
- set_sigstack(cpu0_irqstack, THREAD_SIZE);
-
- init_new_thread_signals();
-
- init_task.thread.request.thread.proc = start_kernel_proc;
- init_task.thread.request.thread.arg = NULL;
- return start_idle_thread(task_stack_page(&init_task),
- &init_task.thread.switch_buf);
-}
-
unsigned long current_stub_stack(void)
{
if (current->mm == NULL)
diff --git a/arch/um/os-Linux/Makefile b/arch/um/os-Linux/Makefile
index 049dfa5bc9c6..331564888400 100644
--- a/arch/um/os-Linux/Makefile
+++ b/arch/um/os-Linux/Makefile
@@ -8,7 +8,8 @@ KCOV_INSTRUMENT := n
obj-y = execvp.o file.o helper.o irq.o main.o mem.o process.o \
registers.o sigio.o signal.o start_up.o time.o tty.o \
- umid.o user_syms.o util.o drivers/ skas/
+ umid.o user_syms.o util.o drivers/
+obj-$(CONFIG_MMU) += skas/
CFLAGS_signal.o += -Wframe-larger-than=4096
diff --git a/arch/um/os-Linux/internal.h b/arch/um/os-Linux/internal.h
index 317fca190c2b..bcbf64ce8cd1 100644
--- a/arch/um/os-Linux/internal.h
+++ b/arch/um/os-Linux/internal.h
@@ -2,6 +2,11 @@
#ifndef __UM_OS_LINUX_INTERNAL_H
#define __UM_OS_LINUX_INTERNAL_H
+/*
+ * process.c
+ */
+extern int userspace_pid[];
+
/*
* elf_aux.c
*/
diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
index 9f086f939420..6df378639880 100644
--- a/arch/um/os-Linux/process.c
+++ b/arch/um/os-Linux/process.c
@@ -6,6 +6,7 @@
#include <stdio.h>
#include <stdlib.h>
+#include <stdbool.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
@@ -15,9 +16,15 @@
#include <sys/prctl.h>
#include <sys/wait.h>
#include <asm/unistd.h>
+#include <linux/threads.h>
#include <init.h>
#include <longjmp.h>
#include <os.h>
+#include <as-layout.h>
+#include <kern_util.h>
+
+int userspace_pid[NR_CPUS];
+int unscheduled_userspace_iterations;
void os_alarm_process(int pid)
{
@@ -209,3 +216,129 @@ void os_set_pdeathsig(void)
{
prctl(PR_SET_PDEATHSIG, SIGKILL);
}
+
+int is_skas_winch(int pid, int fd, void *data)
+{
+ return pid == getpgrp();
+}
+
+void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
+{
+ (*buf)[0].JB_IP = (unsigned long) handler;
+ (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
+ sizeof(void *);
+}
+
+#define INIT_JMP_NEW_THREAD 0
+#define INIT_JMP_CALLBACK 1
+#define INIT_JMP_HALT 2
+#define INIT_JMP_REBOOT 3
+
+void switch_threads(jmp_buf *me, jmp_buf *you)
+{
+ unscheduled_userspace_iterations = 0;
+
+ if (UML_SETJMP(me) == 0)
+ UML_LONGJMP(you, 1);
+}
+
+static jmp_buf initial_jmpbuf;
+
+/* XXX Make these percpu */
+static void (*cb_proc)(void *arg);
+static void *cb_arg;
+static jmp_buf *cb_back;
+
+int start_idle_thread(void *stack, jmp_buf *switch_buf)
+{
+ int n;
+
+ set_handler(SIGWINCH);
+
+ /*
+ * Can't use UML_SETJMP or UML_LONGJMP here because they save
+ * and restore signals, with the possible side-effect of
+ * trying to handle any signals which came when they were
+ * blocked, which can't be done on this stack.
+ * Signals must be blocked when jumping back here and restored
+ * after returning to the jumper.
+ */
+ n = setjmp(initial_jmpbuf);
+ switch (n) {
+ case INIT_JMP_NEW_THREAD:
+ (*switch_buf)[0].JB_IP = (unsigned long) uml_finishsetup;
+ (*switch_buf)[0].JB_SP = (unsigned long) stack +
+ UM_THREAD_SIZE - sizeof(void *);
+ break;
+ case INIT_JMP_CALLBACK:
+ (*cb_proc)(cb_arg);
+ longjmp(*cb_back, 1);
+ break;
+ case INIT_JMP_HALT:
+ kmalloc_ok = 0;
+ return 0;
+ case INIT_JMP_REBOOT:
+ kmalloc_ok = 0;
+ return 1;
+ default:
+ printk(UM_KERN_ERR "Bad sigsetjmp return in %s - %d\n",
+ __func__, n);
+ fatal_sigsegv();
+ }
+ longjmp(*switch_buf, 1);
+
+ /* unreachable */
+ printk(UM_KERN_ERR "impossible long jump!");
+ fatal_sigsegv();
+ return 0;
+}
+
+void initial_thread_cb_skas(void (*proc)(void *), void *arg)
+{
+ jmp_buf here;
+
+ cb_proc = proc;
+ cb_arg = arg;
+ cb_back = &here;
+
+ block_signals_trace();
+ if (UML_SETJMP(&here) == 0)
+ UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
+ unblock_signals_trace();
+
+ cb_proc = NULL;
+ cb_arg = NULL;
+ cb_back = NULL;
+}
+
+void halt_skas(void)
+{
+ block_signals_trace();
+ UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
+}
+
+static bool noreboot;
+
+static int __init noreboot_cmd_param(char *str, int *add)
+{
+ *add = 0;
+ noreboot = true;
+ return 0;
+}
+
+__uml_setup("noreboot", noreboot_cmd_param,
+"noreboot\n"
+" Rather than rebooting, exit always, akin to QEMU's -no-reboot option.\n"
+" This is useful if you're using CONFIG_PANIC_TIMEOUT in order to catch\n"
+" crashes in CI\n");
+
+void reboot_skas(void)
+{
+ block_signals_trace();
+ UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT);
+}
+
+void __switch_mm(struct mm_id *mm_idp)
+{
+ userspace_pid[0] = mm_idp->pid;
+}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index f683cfc9e51a..555eb4905f70 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -5,7 +5,6 @@
*/
#include <stdlib.h>
-#include <stdbool.h>
#include <unistd.h>
#include <sched.h>
#include <errno.h>
@@ -16,7 +15,6 @@
#include <sys/wait.h>
#include <sys/stat.h>
#include <asm/unistd.h>
-#include <as-layout.h>
#include <init.h>
#include <kern_util.h>
#include <mem.h>
@@ -25,15 +23,9 @@
#include <registers.h>
#include <skas.h>
#include <sysdep/stub.h>
-#include <linux/threads.h>
#include <timetravel.h>
#include "../internal.h"
-int is_skas_winch(int pid, int fd, void *data)
-{
- return pid == getpgrp();
-}
-
static const char *ptrace_reg_name(int idx)
{
#define R(n) case HOST_##n: return #n
@@ -305,8 +297,6 @@ static int __init init_stub_exe_fd(void)
}
__initcall(init_stub_exe_fd);
-int userspace_pid[NR_CPUS];
-
/**
* start_userspace() - prepare a new userspace process
* @stub_stack: pointer to the stub stack.
@@ -388,7 +378,6 @@ int start_userspace(unsigned long stub_stack)
return err;
}
-int unscheduled_userspace_iterations;
extern unsigned long tt_extra_sched_jiffies;
void userspace(struct uml_pt_regs *regs)
@@ -550,124 +539,3 @@ void userspace(struct uml_pt_regs *regs)
}
}
}
-
-void new_thread(void *stack, jmp_buf *buf, void (*handler)(void))
-{
- (*buf)[0].JB_IP = (unsigned long) handler;
- (*buf)[0].JB_SP = (unsigned long) stack + UM_THREAD_SIZE -
- sizeof(void *);
-}
-
-#define INIT_JMP_NEW_THREAD 0
-#define INIT_JMP_CALLBACK 1
-#define INIT_JMP_HALT 2
-#define INIT_JMP_REBOOT 3
-
-void switch_threads(jmp_buf *me, jmp_buf *you)
-{
- unscheduled_userspace_iterations = 0;
-
- if (UML_SETJMP(me) == 0)
- UML_LONGJMP(you, 1);
-}
-
-static jmp_buf initial_jmpbuf;
-
-/* XXX Make these percpu */
-static void (*cb_proc)(void *arg);
-static void *cb_arg;
-static jmp_buf *cb_back;
-
-int start_idle_thread(void *stack, jmp_buf *switch_buf)
-{
- int n;
-
- set_handler(SIGWINCH);
-
- /*
- * Can't use UML_SETJMP or UML_LONGJMP here because they save
- * and restore signals, with the possible side-effect of
- * trying to handle any signals which came when they were
- * blocked, which can't be done on this stack.
- * Signals must be blocked when jumping back here and restored
- * after returning to the jumper.
- */
- n = setjmp(initial_jmpbuf);
- switch (n) {
- case INIT_JMP_NEW_THREAD:
- (*switch_buf)[0].JB_IP = (unsigned long) uml_finishsetup;
- (*switch_buf)[0].JB_SP = (unsigned long) stack +
- UM_THREAD_SIZE - sizeof(void *);
- break;
- case INIT_JMP_CALLBACK:
- (*cb_proc)(cb_arg);
- longjmp(*cb_back, 1);
- break;
- case INIT_JMP_HALT:
- kmalloc_ok = 0;
- return 0;
- case INIT_JMP_REBOOT:
- kmalloc_ok = 0;
- return 1;
- default:
- printk(UM_KERN_ERR "Bad sigsetjmp return in %s - %d\n",
- __func__, n);
- fatal_sigsegv();
- }
- longjmp(*switch_buf, 1);
-
- /* unreachable */
- printk(UM_KERN_ERR "impossible long jump!");
- fatal_sigsegv();
- return 0;
-}
-
-void initial_thread_cb_skas(void (*proc)(void *), void *arg)
-{
- jmp_buf here;
-
- cb_proc = proc;
- cb_arg = arg;
- cb_back = &here;
-
- block_signals_trace();
- if (UML_SETJMP(&here) == 0)
- UML_LONGJMP(&initial_jmpbuf, INIT_JMP_CALLBACK);
- unblock_signals_trace();
-
- cb_proc = NULL;
- cb_arg = NULL;
- cb_back = NULL;
-}
-
-void halt_skas(void)
-{
- block_signals_trace();
- UML_LONGJMP(&initial_jmpbuf, INIT_JMP_HALT);
-}
-
-static bool noreboot;
-
-static int __init noreboot_cmd_param(char *str, int *add)
-{
- *add = 0;
- noreboot = true;
- return 0;
-}
-
-__uml_setup("noreboot", noreboot_cmd_param,
-"noreboot\n"
-" Rather than rebooting, exit always, akin to QEMU's -no-reboot option.\n"
-" This is useful if you're using CONFIG_PANIC_TIMEOUT in order to catch\n"
-" crashes in CI\n");
-
-void reboot_skas(void)
-{
- block_signals_trace();
- UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT);
-}
-
-void __switch_mm(struct mm_id *mm_idp)
-{
- userspace_pid[0] = mm_idp->pid;
-}
--
2.43.0
More information about the linux-um
mailing list