[PATCH v6 9/9] arm64: mm: add support for WXN memory translation attribute

Ard Biesheuvel ardb at kernel.org
Fri Jul 1 06:04:44 PDT 2022


The AArch64 virtual memory system supports a global WXN control, which
can be enabled to make all writable mappings implicitly no-exec. This is
a useful hardening feature, as it prevents mistakes in managing page
table permissions from being exploited to attack the system.

When enabled at EL1, the restrictions apply to both EL1 and EL0. EL1 is
completely under our control, and has been cleaned up to allow WXN to be
enabled from boot onwards. EL0 is not under our control, but given that
widely deployed security features such as selinux or PaX already limit
the ability of user space to create mappings that are writable and
executable at the same time, the impact of enabling this for EL0 is
expected to be limited. (For this reason, common user space libraries
that have a legitimate need for manipulating executable code already
carry fallbacks such as [0].)

If enabled at compile time, the feature can still be disabled at boot if
needed, by passing arm64.nowxn on the kernel command line.

[0] https://github.com/libffi/libffi/blob/master/src/closures.c#L440

Signed-off-by: Ard Biesheuvel <ardb at kernel.org>
Reviewed-by: Kees Cook <keescook at chromium.org>
---
 arch/arm64/Kconfig                      | 11 ++++++
 arch/arm64/include/asm/cpufeature.h     |  9 +++++
 arch/arm64/include/asm/mman.h           | 36 ++++++++++++++++++++
 arch/arm64/include/asm/mmu_context.h    | 30 +++++++++++++++-
 arch/arm64/kernel/pi/early_map_kernel.c | 11 +++++-
 arch/arm64/mm/mmu.c                     | 33 ++++++++++++++----
 arch/arm64/mm/proc.S                    | 23 +++++++++++++
 7 files changed, 144 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1652a9800ebe..d262d5ab4316 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1422,6 +1422,17 @@ config RODATA_FULL_DEFAULT_ENABLED
 	  This requires the linear region to be mapped down to pages,
 	  which may adversely affect performance in some cases.
 
+config ARM64_WXN
+	bool "Enable WXN attribute so all writable mappings are non-exec"
+	help
+	  Set the WXN bit in the SCTLR system register so that all writable
+	  mappings are treated as if the PXN/UXN bit is set as well.
+	  If this is set to Y, it can still be disabled at runtime by
+	  passing 'arm64.nowxn' on the kernel command line.
+
+	  This should only be set if no software needs to be supported that
+	  relies on being able to execute from writable mappings.
+
 config ARM64_SW_TTBR0_PAN
 	bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
 	help
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 14a8f3d93add..86ec12ceeaff 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -915,6 +915,15 @@ extern struct arm64_ftr_override id_aa64isar2_override;
 u32 get_kvm_ipa_limit(void);
 void dump_cpu_features(void);
 
+extern int arm64_no_wxn;
+
+static inline bool arm64_wxn_enabled(void)
+{
+	if (!IS_ENABLED(CONFIG_ARM64_WXN))
+		return false;
+	return arm64_no_wxn == 0;
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif
diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h
index 5966ee4a6154..6d4940342ba7 100644
--- a/arch/arm64/include/asm/mman.h
+++ b/arch/arm64/include/asm/mman.h
@@ -35,11 +35,40 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
 }
 #define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
 
+static inline bool arm64_check_wx_prot(unsigned long prot,
+				       struct task_struct *tsk)
+{
+	/*
+	 * When we are running with SCTLR_ELx.WXN==1, writable mappings are
+	 * implicitly non-executable. This means we should reject such mappings
+	 * when user space attempts to create them using mmap() or mprotect().
+	 */
+	if (arm64_wxn_enabled() &&
+	    ((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC))) {
+		/*
+		 * User space libraries such as libffi carry elaborate
+		 * heuristics to decide whether it is worth it to even attempt
+		 * to create writable executable mappings, as PaX or selinux
+		 * enabled systems will outright reject it. They will usually
+		 * fall back to something else (e.g., two separate shared
+		 * mmap()s of a temporary file) on failure.
+		 */
+		pr_info_ratelimited(
+			"process %s (%d) attempted to create PROT_WRITE+PROT_EXEC mapping\n",
+			tsk->comm, tsk->pid);
+		return false;
+	}
+	return true;
+}
+
 static inline bool arch_validate_prot(unsigned long prot,
 	unsigned long addr __always_unused)
 {
 	unsigned long supported = PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM;
 
+	if (!arm64_check_wx_prot(prot, current))
+		return false;
+
 	if (system_supports_bti())
 		supported |= PROT_BTI;
 
@@ -50,6 +79,13 @@ static inline bool arch_validate_prot(unsigned long prot,
 }
 #define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr)
 
+static inline bool arch_validate_mmap_prot(unsigned long prot,
+					   unsigned long addr)
+{
+	return arm64_check_wx_prot(prot, current);
+}
+#define arch_validate_mmap_prot arch_validate_mmap_prot
+
 static inline bool arch_validate_flags(unsigned long vm_flags)
 {
 	if (!system_supports_mte())
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index c7ccd82db1d2..cd4bb5410a18 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -19,13 +19,41 @@
 #include <asm/cacheflush.h>
 #include <asm/cpufeature.h>
 #include <asm/proc-fns.h>
-#include <asm-generic/mm_hooks.h>
 #include <asm/cputype.h>
 #include <asm/sysreg.h>
 #include <asm/tlbflush.h>
 
 extern bool rodata_full;
 
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+				struct mm_struct *mm)
+{
+	return 0;
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
+static inline void arch_unmap(struct mm_struct *mm,
+			unsigned long start, unsigned long end)
+{
+}
+
+static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
+		bool write, bool execute, bool foreign)
+{
+	if (IS_ENABLED(CONFIG_ARM64_WXN) && execute &&
+	    (vma->vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
+		pr_warn_ratelimited(
+			"process %s (%d) attempted to execute from writable memory\n",
+			current->comm, current->pid);
+		/* disallow unless the nowxn override is set */
+		return !arm64_wxn_enabled();
+	}
+	return true;
+}
+
 static inline void contextidr_thread_switch(struct task_struct *next)
 {
 	if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
diff --git a/arch/arm64/kernel/pi/early_map_kernel.c b/arch/arm64/kernel/pi/early_map_kernel.c
index b2e660a1d003..c6c1bae343d3 100644
--- a/arch/arm64/kernel/pi/early_map_kernel.c
+++ b/arch/arm64/kernel/pi/early_map_kernel.c
@@ -278,15 +278,24 @@ static bool arm64_early_this_cpu_has_e0pd(void)
 						    ID_AA64MMFR2_E0PD_SHIFT);
 }
 
+extern void disable_wxn(void);
+
 static void map_kernel(void *fdt, u64 kaslr_offset, u64 va_offset)
 {
 	pgd_t *pgdp = (void *)init_pg_dir + PAGE_SIZE;
 	pgprot_t text_prot = PAGE_KERNEL_ROX;
 	pgprot_t data_prot = PAGE_KERNEL;
 	pgprot_t prot;
+	bool nowxn = false;
 
-	if (cmdline_has(fdt, "rodata=off"))
+	if (cmdline_has(fdt, "rodata=off")) {
 		text_prot = PAGE_KERNEL_EXEC;
+		nowxn = true;
+	}
+
+	if (IS_ENABLED(CONFIG_ARM64_WXN) &&
+	    (nowxn || cmdline_has(fdt, "arm64.nowxn")))
+		disable_wxn();
 
 	// If we have a CPU that supports BTI and a kernel built for
 	// BTI then mark the kernel executable text as guarded pages
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 0a23f4f14f99..0f3e556ccfae 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -73,6 +73,21 @@ long __section(".mmuoff.data.write") __early_cpu_boot_status;
 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
 EXPORT_SYMBOL(empty_zero_page);
 
+#ifdef CONFIG_ARM64_WXN
+asmlinkage int arm64_no_wxn __ro_after_init;
+
+static int set_arm64_no_wxn(char *str)
+{
+	arm64_no_wxn = 1;
+
+	// Make the value visible to booting secondaries
+	dcache_clean_inval_poc((u64)&arm64_no_wxn,
+			       (u64)&arm64_no_wxn + sizeof(arm64_no_wxn));
+	return 1;
+}
+__setup("arm64.nowxn", set_arm64_no_wxn);
+#endif
+
 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
@@ -660,15 +675,19 @@ static int __init parse_rodata(char *arg)
 	int ret = strtobool(arg, &rodata_enabled);
 	if (!ret) {
 		rodata_full = false;
-		return 0;
-	}
+	} else {
+		/* permit 'full' in addition to boolean options */
+		if (strcmp(arg, "full"))
+			return -EINVAL;
 
-	/* permit 'full' in addition to boolean options */
-	if (strcmp(arg, "full"))
-		return -EINVAL;
+		rodata_enabled = true;
+		rodata_full = true;
+	}
 
-	rodata_enabled = true;
-	rodata_full = true;
+#ifdef CONFIG_ARM64_WXN
+	if (!rodata_enabled)
+		set_arm64_no_wxn(NULL);
+#endif
 	return 0;
 }
 early_param("rodata", parse_rodata);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 4322ddf5e02f..656c78f82a17 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -498,8 +498,31 @@ SYM_FUNC_START(__cpu_setup)
 	 * Prepare SCTLR
 	 */
 	mov_q	x0, INIT_SCTLR_EL1_MMU_ON
+#ifdef CONFIG_ARM64_WXN
+	ldr_l	w1, arm64_no_wxn, x1
+	tst	w1, #0x1			// WXN disabled on command line?
+	orr	x1, x0, #SCTLR_ELx_WXN
+	csel	x0, x0, x1, ne
+#endif
 	ret					// return to head.S
 
 	.unreq	mair
 	.unreq	tcr
 SYM_FUNC_END(__cpu_setup)
+
+#ifdef CONFIG_ARM64_WXN
+	.align	2
+SYM_CODE_START(__pi_disable_wxn)
+	mrs	x0, sctlr_el1
+	bic	x1, x0, #SCTLR_ELx_M
+	msr	sctlr_el1, x1
+	isb
+	tlbi	vmalle1
+	dsb	nsh
+	isb
+	bic	x0, x0, #SCTLR_ELx_WXN
+	msr	sctlr_el1, x0
+	isb
+	ret
+SYM_CODE_END(__pi_disable_wxn)
+#endif
-- 
2.35.1




More information about the linux-arm-kernel mailing list