[RFC PATCH 1/2] arm64: Support execute-only permissions with Enhanced PAN
Vladimir Murzin
vladimir.murzin at arm.com
Fri Nov 13 10:20:22 EST 2020
Enhanced Privileged Access Never (EPAN) allows Privileged Access Never
to be used with Execute-only mappings.
Absence of such support was a reason for 24cecc377463 ("arm64: Revert
support for execute-only user mappings"). Thus now it can be revisited
and re-enabled.
Cc: Kees Cook <keescook at chromium.org>
Cc: Catalin Marinas <catalin.marinas at arm.com>
Signed-off-by: Vladimir Murzin <vladimir.murzin at arm.com>
---
arch/arm64/Kconfig | 17 +++++++++++++++++
arch/arm64/include/asm/cpucaps.h | 3 ++-
arch/arm64/include/asm/pgtable-prot.h | 5 +++--
arch/arm64/include/asm/pgtable.h | 23 +++++++++++++++++++++--
arch/arm64/include/asm/sysreg.h | 1 +
arch/arm64/kernel/cpufeature.c | 20 ++++++++++++++++++++
arch/arm64/mm/fault.c | 3 +++
7 files changed, 67 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1515f6f..6639244 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1056,6 +1056,9 @@ config ARCH_WANT_HUGE_PMD_SHARE
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
+config ARCH_HAS_FILTER_PGPROT
+ def_bool y
+
config ARCH_ENABLE_SPLIT_PMD_PTLOCK
def_bool y if PGTABLE_LEVELS > 2
@@ -1688,6 +1691,20 @@ config ARM64_MTE
endmenu
+menu "ARMv8.7 architectural features"
+
+config ARM64_EPAN
+ bool "Enable support for Enhanced Privileged Access Never (EPAN)"
+ default y
+ depends on ARM64_PAN
+ help
+ Enhanced Privileged Access Never (EPAN) allows Privileged
+ Access Never to be used with Execute-only mappings.
+
+ The feature is detected at runtime, and will remain disabled
+ if the cpu does not implement the feature.
+endmenu
+
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
default y
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index e7d9899..046202f 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -66,7 +66,8 @@
#define ARM64_HAS_TLB_RANGE 56
#define ARM64_MTE 57
#define ARM64_WORKAROUND_1508412 58
+#define ARM64_HAS_EPAN 59
-#define ARM64_NCAPS 59
+#define ARM64_NCAPS 60
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 046be78..f91c2aa 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -88,12 +88,13 @@ extern bool arm64_use_ng_mappings;
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
+#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_READONLY
#define __P011 PAGE_READONLY
-#define __P100 PAGE_READONLY_EXEC
+#define __P100 PAGE_EXECONLY
#define __P101 PAGE_READONLY_EXEC
#define __P110 PAGE_READONLY_EXEC
#define __P111 PAGE_READONLY_EXEC
@@ -102,7 +103,7 @@ extern bool arm64_use_ng_mappings;
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC
+#define __S100 PAGE_EXECONLY
#define __S101 PAGE_READONLY_EXEC
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 4ff12a7..d1f68d2 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -113,8 +113,15 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
-#define pte_valid_not_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+#define pte_valid_not_user(pte) \
+({ \
+ int __val; \
+ if (cpus_have_const_cap(ARM64_HAS_EPAN)) \
+ __val = (pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN); \
+ else \
+ __val = (pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID; \
+ __val; \
+})
#define pte_valid_young(pte) \
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
#define pte_valid_user(pte) \
@@ -974,6 +981,18 @@ static inline bool arch_faults_on_old_pte(void)
}
#define arch_faults_on_old_pte arch_faults_on_old_pte
+static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
+{
+ if (cpus_have_const_cap(ARM64_HAS_EPAN))
+ return prot;
+
+ if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
+ return prot;
+
+ return PAGE_READONLY_EXEC;
+}
+
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 174817b..19147b6 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -579,6 +579,7 @@
#endif
/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_EPAN (BIT(57))
#define SCTLR_EL1_ATA0 (BIT(42))
#define SCTLR_EL1_TCF0_SHIFT 38
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index dcc165b..2033e0b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1602,6 +1602,13 @@ static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
}
#endif /* CONFIG_ARM64_PAN */
+#ifdef CONFIG_ARM64_EPAN
+static void cpu_enable_epan(const struct arm64_cpu_capabilities *__unused)
+{
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_EPAN);
+}
+#endif /* CONFIG_ARM64_EPAN */
+
#ifdef CONFIG_ARM64_RAS_EXTN
static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
{
@@ -1750,6 +1757,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_pan,
},
#endif /* CONFIG_ARM64_PAN */
+#ifdef CONFIG_ARM64_EPAN
+ {
+ .desc = "Enhanced Privileged Access Never",
+ .capability = ARM64_HAS_EPAN,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64MMFR1_EL1,
+ .field_pos = ID_AA64MMFR1_PAN_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 3,
+ .cpu_enable = cpu_enable_epan,
+ },
+#endif /* CONFIG_ARM64_EPAN */
#ifdef CONFIG_ARM64_LSE_ATOMICS
{
.desc = "LSE atomic instructions",
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 1ee9400..b93222e 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -467,6 +467,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (faulthandler_disabled() || !mm)
goto no_context;
+ if (cpus_have_const_cap(ARM64_HAS_EPAN))
+ vm_flags &= ~VM_EXEC;
+
if (user_mode(regs))
mm_flags |= FAULT_FLAG_USER;
--
2.7.4
More information about the linux-arm-kernel
mailing list