[PATCH 1/4] riscv: Introduce virtual kernel mapping KASLR
Alexandre Ghiti
alexghiti at rivosinc.com
Wed Feb 15 06:51:10 PST 2023
KASLR implementation relies on a relocatable kernel so that we can move
the kernel mapping.
The seed needed to virtually move the kernel is taken from the device tree,
so we rely on the bootloader to provide a correct seed. Zkr could be used
unconditionnally instead if implemented, but that's for another patch.
Signed-off-by: Alexandre Ghiti <alexghiti at rivosinc.com>
---
arch/riscv/Kconfig | 18 ++++++++++++++++
arch/riscv/include/asm/page.h | 1 +
arch/riscv/kernel/pi/Makefile | 2 +-
arch/riscv/kernel/pi/cmdline_early.c | 12 +++++++++++
arch/riscv/kernel/pi/fdt_early.c | 23 +++++++++++++++++++++
arch/riscv/mm/init.c | 31 +++++++++++++++++++++++++++-
6 files changed, 85 insertions(+), 2 deletions(-)
create mode 100644 arch/riscv/kernel/pi/fdt_early.c
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index e3b7608d23ae..70082bd978cf 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -559,6 +559,24 @@ config RELOCATABLE
If unsure, say N.
+config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image"
+ select RELOCATABLE
+ help
+ Randomizes the virtual address at which the kernel image is
+ loaded, as a security feature that deters exploit attempts
+ relying on knowledge of the location of kernel internals.
+
+ It is the bootloader's job to provide entropy, by passing a
+ random u64 value in /chosen/kaslr-seed at kernel entry.
+
+ When booting via the UEFI stub, it will invoke the firmware's
+ EFI_RNG_PROTOCOL implementation (if available) to supply entropy
+ to the kernel proper. In addition, it will randomise the physical
+ location of the kernel Image as well.
+
+ If unsure, say N.
+
endmenu # "Kernel features"
menu "Boot options"
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index bd7b9dda1e4f..11c9aa4931c7 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -107,6 +107,7 @@ typedef struct page *pgtable_t;
struct kernel_mapping {
unsigned long page_offset;
unsigned long virt_addr;
+ unsigned long virt_offset;
uintptr_t phys_addr;
uintptr_t size;
/* Offset between linear mapping virtual address and kernel load address */
diff --git a/arch/riscv/kernel/pi/Makefile b/arch/riscv/kernel/pi/Makefile
index 554e936ef0b6..d997d08ae593 100644
--- a/arch/riscv/kernel/pi/Makefile
+++ b/arch/riscv/kernel/pi/Makefile
@@ -30,5 +30,5 @@ $(obj)/string.o: $(srctree)/lib/string.c FORCE
$(obj)/ctype.o: $(srctree)/lib/ctype.c FORCE
$(call if_changed_rule,cc_o_c)
-obj-y := cmdline_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o
+obj-y := cmdline_early.pi.o fdt_early.pi.o string.pi.o ctype.pi.o lib-fdt.pi.o lib-fdt_ro.pi.o
extra-y := $(patsubst %.pi.o,%.o,$(obj-y))
diff --git a/arch/riscv/kernel/pi/cmdline_early.c b/arch/riscv/kernel/pi/cmdline_early.c
index 5ae7b853fa66..8d898cc6e7ff 100644
--- a/arch/riscv/kernel/pi/cmdline_early.c
+++ b/arch/riscv/kernel/pi/cmdline_early.c
@@ -50,3 +50,15 @@ u64 __init set_satp_mode_from_cmdline(uintptr_t dtb_pa)
return match_noXlvl(cmdline);
}
+
+static bool __init match_nokaslr(char *cmdline)
+{
+ return strstr(cmdline, "nokaslr");
+}
+
+bool __init set_nokaslr_from_cmdline(uintptr_t dtb_pa)
+{
+ char *cmdline = get_early_cmdline(dtb_pa);
+
+ return match_nokaslr(cmdline);
+}
diff --git a/arch/riscv/kernel/pi/fdt_early.c b/arch/riscv/kernel/pi/fdt_early.c
new file mode 100644
index 000000000000..a51f00021886
--- /dev/null
+++ b/arch/riscv/kernel/pi/fdt_early.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/libfdt.h>
+
+u64 __init get_kaslr_seed(uintptr_t dtb_pa)
+{
+ int node, len;
+ fdt64_t *prop;
+ u64 ret;
+
+ node = fdt_path_offset((void *)dtb_pa, "/chosen");
+ if (node < 0)
+ return 0;
+
+ prop = fdt_getprop_w((void *)dtb_pa, node, "kaslr-seed", &len);
+ if (!prop || len != sizeof(u64))
+ return 0;
+
+ ret = fdt64_to_cpu(*prop);
+ *prop = 0;
+ return ret;
+}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index aa4f1dc6df58..e6a7e6f5c75d 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -1032,11 +1032,40 @@ static void __init pt_ops_set_late(void)
#endif
}
+#ifdef CONFIG_RANDOMIZE_BASE
+extern bool __init __pi_set_nokaslr_from_cmdline(uintptr_t dtb_pa);
+extern u64 __init __pi_get_kaslr_seed(uintptr_t dtb_pa);
+
+static int __init print_nokaslr(char *p)
+{
+ pr_info("Disabled KASLR");
+ return 0;
+}
+early_param("nokaslr", print_nokaslr);
+#endif
+
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
- kernel_map.virt_addr = KERNEL_LINK_ADDR;
+#ifdef CONFIG_RANDOMIZE_BASE
+ if (!__pi_set_nokaslr_from_cmdline(dtb_pa)) {
+ u64 kaslr_seed = __pi_get_kaslr_seed(dtb_pa);
+ u32 kernel_size = (uintptr_t)(&_end) - (uintptr_t)(&_start);
+ u32 nr_pos;
+
+ /*
+ * Compute the number of positions available: we are limited
+ * by the early page table that only has one PUD and we must
+ * be aligned on PMD_SIZE.
+ */
+ nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE;
+
+ kernel_map.virt_offset = (kaslr_seed % nr_pos) * PMD_SIZE;
+ }
+#endif
+
+ kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
#ifdef CONFIG_XIP_KERNEL
--
2.37.2
More information about the linux-riscv
mailing list