[PATCH 4/4] ARM: Implement privileged no-access using TTBR0 page table walks disabling

Linus Walleij linus.walleij at linaro.org
Tue Jan 23 13:16:17 PST 2024


From: Catalin Marinas <catalin.marinas at arm.com>

With LPAE enabled, privileged no-access cannot be enforced using CPU
domains as such feature is not available. This patch implements PAN
by disabling TTBR0 page table walks while in kernel mode.

The ARM architecture allows page table walks to be split between TTBR0
and TTBR1. With LPAE enabled, the split is defined by a combination of
TTBCR T0SZ and T1SZ bits. Currently, an LPAE-enabled kernel uses TTBR0
for user addresses and TTBR1 for kernel addresses with the VMSPLIT_2G
and VMSPLIT_3G configurations. The main advantage for the 3:1 split is
that TTBR1 is reduced to 2 levels, so potentially faster TLB refill
(though usually the first level entries are already cached in the TLB).

The PAN support on LPAE-enabled kernels uses TTBR0 when running in user
space or in kernel space during user access routines (TTBCR T0SZ and
T1SZ are both 0). When running user accesses are disabled in kernel
mode, TTBR0 page table walks are disabled by setting TTBCR.EPD0. TTBR1
is used for kernel accesses (including loadable modules; anything
covered by swapper_pg_dir) by reducing the TTBCR.T0SZ to the minimum
(2^(32-7) = 32MB). To avoid user accesses potentially hitting stale TLB
entries, the ASID is switched to 0 (reserved) by setting TTBCR.A1 and
using the ASID value in TTBR1. The difference from a non-PAN kernel is
that with the 3:1 memory split, TTBR1 always uses 3 levels of page
tables.

Signed-off-by: Catalin Marinas <catalin.marinas at arm.com>
Reviewed-by: Kees Cook <keescook at chromium.org>
Signed-off-by: Linus Walleij <linus.walleij at linaro.org>
---
 arch/arm/Kconfig                            | 22 ++++++++++++--
 arch/arm/include/asm/assembler.h            |  1 +
 arch/arm/include/asm/pgtable-3level-hwdef.h |  9 ++++++
 arch/arm/include/asm/uaccess-asm.h          | 42 ++++++++++++++++++++++++++
 arch/arm/include/asm/uaccess.h              | 47 +++++++++++++++++++++++++++++
 arch/arm/kernel/suspend.c                   |  8 +++++
 arch/arm/lib/csumpartialcopyuser.S          | 14 +++++++++
 arch/arm/mm/fault.c                         |  8 +++++
 8 files changed, 148 insertions(+), 3 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0af6709570d1..3d97a15a3e2d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1231,9 +1231,9 @@ config HIGHPTE
 	  consumed by page tables.  Setting this option will allow
 	  user-space 2nd level page tables to reside in high memory.
 
-config CPU_SW_DOMAIN_PAN
-	bool "Enable use of CPU domains to implement privileged no-access"
-	depends on MMU && !ARM_LPAE
+config ARM_PAN
+	bool "Enable privileged no-access"
+	depends on MMU
 	default y
 	help
 	  Increase kernel security by ensuring that normal kernel accesses
@@ -1242,10 +1242,26 @@ config CPU_SW_DOMAIN_PAN
 	  by ensuring that magic values (such as LIST_POISON) will always
 	  fault when dereferenced.
 
+	  The implementation uses CPU domains when !CONFIG_ARM_LPAE and
+	  disabling of TTBR0 page table walks with CONFIG_ARM_LPAE.
+
+config CPU_SW_DOMAIN_PAN
+	def_bool y
+	depends on ARM_PAN && !ARM_LPAE
+	help
+	  Enable use of CPU domains to implement privileged no-access.
+
 	  CPUs with low-vector mappings use a best-efforts implementation.
 	  Their lower 1MB needs to remain accessible for the vectors, but
 	  the remainder of userspace will become appropriately inaccessible.
 
+config CPU_TTBR0_PAN
+	def_bool y
+	depends on ARM_PAN && ARM_LPAE
+	help
+	  Enable privileged no-access by disabling TTBR0 page table walks when
+	  running in kernel mode.
+
 config HW_PERF_EVENTS
 	def_bool y
 	depends on ARM_PMU
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index aebe2c8f6a68..d33c1e24e00b 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -21,6 +21,7 @@
 #include <asm/opcodes-virt.h>
 #include <asm/asm-offsets.h>
 #include <asm/page.h>
+#include <asm/pgtable.h>
 #include <asm/thread_info.h>
 #include <asm/uaccess-asm.h>
 
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 19da7753a0b8..323ad811732e 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -74,6 +74,7 @@
 #define PHYS_MASK_SHIFT		(40)
 #define PHYS_MASK		((1ULL << PHYS_MASK_SHIFT) - 1)
 
+#ifndef CONFIG_CPU_TTBR0_PAN
 /*
  * TTBR0/TTBR1 split (PAGE_OFFSET):
  *   0x40000000: T0SZ = 2, T1SZ = 0 (not used)
@@ -93,6 +94,14 @@
 #endif
 
 #define TTBR1_SIZE	(((PAGE_OFFSET >> 30) - 1) << 16)
+#else
+/*
+ * With CONFIG_CPU_TTBR0_PAN enabled, TTBR1 is only used during uaccess
+ * disabled regions when TTBR0 is disabled.
+ */
+#define TTBR1_OFFSET	0			/* pointing to swapper_pg_dir */
+#define TTBR1_SIZE	0			/* TTBR1 size controlled via TTBCR.T0SZ */
+#endif
 
 /*
  * TTBCR register bits.
diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h
index ea42ba25920f..f7acf4cabbdc 100644
--- a/arch/arm/include/asm/uaccess-asm.h
+++ b/arch/arm/include/asm/uaccess-asm.h
@@ -65,6 +65,37 @@
 	.endif
 	.endm
 
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+	.macro	uaccess_disable, tmp, isb=1
+	/*
+	 * Disable TTBR0 page table walks (EDP0 = 1), use the reserved ASID
+	 * from TTBR1 (A1 = 1) and enable TTBR1 page table walks for kernel
+	 * addresses by reducing TTBR0 range to 32MB (T0SZ = 7).
+	 */
+	mrc	p15, 0, \tmp, c2, c0, 2		@ read TTBCR
+	orr	\tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
+	orr	\tmp, \tmp, #TTBCR_A1
+	mcr	p15, 0, \tmp, c2, c0, 2		@ write TTBCR
+	.if	\isb
+	instr_sync
+	.endif
+	.endm
+
+	.macro	uaccess_enable, tmp, isb=1
+	/*
+	 * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
+	 * TTBR0 (A1 = 0).
+	 */
+	mrc	p15, 0, \tmp, c2, c0, 2		@ read TTBCR
+	bic	\tmp, \tmp, #TTBCR_EPD0 | TTBCR_T0SZ_MASK
+	bic	\tmp, \tmp, #TTBCR_A1
+	mcr	p15, 0, \tmp, c2, c0, 2		@ write TTBCR
+	.if	\isb
+	instr_sync
+	.endif
+	.endm
+
 #else
 
 	.macro	uaccess_disable, tmp, isb=1
@@ -79,6 +110,12 @@
 #define DACR(x...)	x
 #else
 #define DACR(x...)
+#endif
+
+#ifdef CONFIG_CPU_TTBR0_PAN
+#define PAN(x...)	x
+#else
+#define PAN(x...)
 #endif
 
 	/*
@@ -94,6 +131,8 @@
 	.macro	uaccess_entry, tsk, tmp0, tmp1, tmp2, disable
  DACR(	mrc	p15, 0, \tmp0, c3, c0, 0)
  DACR(	str	\tmp0, [sp, #SVC_DACR])
+ PAN(	mrc	p15, 0, \tmp0, c2, c0, 2)
+ PAN(	str	\tmp0, [sp, #SVC_DACR])
 	.if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN)
 	/* kernel=client, user=no access */
 	mov	\tmp2, #DACR_UACCESS_DISABLE
@@ -112,8 +151,11 @@
 	.macro	uaccess_exit, tsk, tmp0, tmp1
  DACR(	ldr	\tmp0, [sp, #SVC_DACR])
  DACR(	mcr	p15, 0, \tmp0, c3, c0, 0)
+ PAN(	ldr	\tmp0, [sp, #SVC_DACR])
+ PAN(	mcr	p15, 0, \tmp0, c2, c0, 2)
 	.endm
 
 #undef DACR
+#undef PAN
 
 #endif /* __ASM_UACCESS_ASM_H__ */
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 9b9234d1bb6a..5b542eab009f 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -14,6 +14,8 @@
 #include <asm/domain.h>
 #include <asm/unaligned.h>
 #include <asm/unified.h>
+#include <asm/pgtable.h>
+#include <asm/proc-fns.h>
 #include <asm/compiler.h>
 
 #include <asm/extable.h>
@@ -43,6 +45,45 @@ static __always_inline void uaccess_restore(unsigned int flags)
 	set_domain(flags);
 }
 
+static inline bool uaccess_disabled(struct pt_regs *regs)
+{
+	/*
+	 * This is handled by hardware domain checks but included for
+	 * completeness.
+	 */
+	return !(get_domain() & domain_mask(DOMAIN_USER));
+}
+
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+static inline unsigned int uaccess_save_and_enable(void)
+{
+	unsigned int old_ttbcr = cpu_get_ttbcr();
+
+	/*
+	 * Enable TTBR0 page table walks (T0SZ = 0, EDP0 = 0) and ASID from
+	 * TTBR0 (A1 = 0).
+	 */
+	cpu_set_ttbcr(old_ttbcr & ~(TTBCR_A1 | TTBCR_EPD0 | TTBCR_T0SZ_MASK));
+	isb();
+
+	return old_ttbcr;
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+	cpu_set_ttbcr(flags);
+	isb();
+}
+
+static inline bool uaccess_disabled(struct pt_regs *regs)
+{
+	/* uaccess state saved above pt_regs on SVC exception entry */
+	unsigned int ttbcr = *(unsigned int *)(regs + 1);
+
+	return ttbcr & TTBCR_EPD0;
+}
+
 #else
 
 static inline unsigned int uaccess_save_and_enable(void)
@@ -54,6 +95,12 @@ static inline void uaccess_restore(unsigned int flags)
 {
 }
 
+static inline bool uaccess_disabled(struct pt_regs *regs)
+{
+	/* Without PAN userspace is always available */
+	return false;
+}
+
 #endif
 
 /*
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index c3ec3861dd07..58a6441b58c4 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -12,6 +12,7 @@
 #include <asm/smp_plat.h>
 #include <asm/suspend.h>
 #include <asm/tlbflush.h>
+#include <asm/uaccess.h>
 
 extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
 extern void cpu_resume_mmu(void);
@@ -26,6 +27,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 	if (!idmap_pgd)
 		return -EINVAL;
 
+	/*
+	 * Needed for the MMU disabling/enabing code to be able to run from
+	 * TTBR0 addresses.
+	 */
+	if (IS_ENABLED(CONFIG_CPU_TTBR0_PAN))
+		uaccess_save_and_enable();
+
 	/*
 	 * Function graph tracer state gets incosistent when the kernel
 	 * calls functions that never return (aka suspend finishers) hence
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 04d8d9d741c7..c289bde04743 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -27,6 +27,20 @@
 		ret	lr
 		.endm
 
+#elif defined(CONFIG_CPU_TTBR0_PAN)
+
+		.macro	save_regs
+		mrc	p15, 0, ip, c2, c0, 2		@ read TTBCR
+		stmfd	sp!, {r1, r2, r4 - r8, ip, lr}
+		uaccess_enable ip
+		.endm
+
+		.macro	load_regs
+		ldmfd	sp!, {r1, r2, r4 - r8, ip, lr}
+		mcr	p15, 0, ip, c2, c0, 2		@ restore TTBCR
+		ret	lr
+		.endm
+
 #else
 
 		.macro	save_regs
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index e96fb40b9cc3..de4abf9dfd6a 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -278,6 +278,14 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
 
+	/*
+	 * Privileged access aborts with CONFIG_CPU_TTBR0_PAN enabled are
+	 * routed via the translation fault mechanism. Check whether uaccess
+	 * is disabled while in kernel mode.
+	 */
+	if (IS_ENABLED(CONFIG_CPU_TTBR0_PAN) && !user_mode(regs) && uaccess_disabled(regs))
+		goto no_context;
+
 	if (!(flags & FAULT_FLAG_USER))
 		goto lock_mmap;
 

-- 
2.34.1




More information about the linux-arm-kernel mailing list