[PATCH v3 0/3] arm64: Drop support for VPIPT i-cache policy
Marc Zyngier
maz at kernel.org
Mon Dec 4 10:26:25 PST 2023
On Mon, 04 Dec 2023 14:44:56 +0000,
Mark Rutland <mark.rutland at arm.com> wrote:
>
> On Mon, Dec 04, 2023 at 02:36:03PM +0000, Marc Zyngier wrote:
> > ARMv8.2 introduced support for VPIPT i-caches, the V standing for
> > VMID-tagged. Although this looked like a reasonable idea, no
> > implementation has ever made it into the wild.
> >
> > Linux has supported this for over 6 years (amusingly, just as the
> > architecture was dropping support for AIVIVT i-caches), but we had no
> > way to even test it, and it is likely that this code was just
> > bit-rotting.
> >
> > However, in a recent breakthrough (XML drop 2023-09, tagged as
> > d55f5af8e09052abe92a02adf820deea2eaed717), the architecture has
> > finally been purged of this option, making VIPT and PIPT the only two
> > valid options.
> >
> > This really means this code is just dead code. Nobody will ever come
> > up with such an implementation, and we can just get rid of it.
> >
> > Most of the impact is on KVM, where we drop a few large comment blocks
> > (and a bit of code), while the core arch code loses the detection code
> > itself.
> >
> > * From v2:
> > - Fix reserved naming for RESERVED_AIVIVT
> > - Collected RBs from Anshuman an Zenghui
> >
> > Marc Zyngier (3):
> > KVM: arm64: Remove VPIPT I-cache handling
> > arm64: Kill detection of VPIPT i-cache policy
> > arm64: Rename reserved values for CTR_EL0.L1Ip
>
> For the series:
>
> Acked-by: Mark Rutland <mark.rutland at arm.com>
Thanks.
> Looking forward, we can/should probably replace __icache_flags with a single
> ICACHE_NOALIASING or ICACHE_PIPT cpucap, which'd get rid of a bunch of
> duplicated logic and make that more sound in the case of races around cpu
> onlining.
As long as we refuse VIPT CPUs coming up late (i.e. after we have
patched the kernel to set ICACHE_PIPT), it should be doable. I guess
we already have this restriction as userspace is able to probe the
I-cache policy anyway.
How about the patch below (tested in a guest with a bunch of hacks to
expose different L1Ip values)?
Thanks,
M.
From 8f88afb0b317213dcbf18ea460a508bfccc18568 Mon Sep 17 00:00:00 2001
From: Marc Zyngier <maz at kernel.org>
Date: Mon, 4 Dec 2023 18:09:40 +0000
Subject: [PATCH] arm64: Make icache detection a cpu capability
Now that we only have two icache policies, we are in a good position
to make the whole detection business more robust.
Let's replace __icache_flags by a single capability (ICACHE_PIPT),
and use this if all CPUs are indeed PIPT. It means we can rely on
existing logic to mandate that a VIPT CPU coming up late will be
denied booting, which is the safe thing to do.
This also leads to some nice cleanups in pKVM, and KVM as a whole
can use ARM64_ICACHE_PIPT as a final cap.
Suggested-by: Mark Rutland <mark.rutland at arm.com>
Signed-off-by: Marc Zyngier <maz at kernel.org>
---
arch/arm64/include/asm/cache.h | 9 ++-------
arch/arm64/include/asm/kvm_hyp.h | 1 -
arch/arm64/include/asm/kvm_mmu.h | 2 +-
arch/arm64/kernel/cpufeature.c | 7 +++++++
arch/arm64/kernel/cpuinfo.c | 34 --------------------------------
arch/arm64/kvm/arm.c | 1 -
arch/arm64/kvm/hyp/nvhe/pkvm.c | 3 ---
arch/arm64/tools/cpucaps | 1 +
arch/arm64/tools/sysreg | 2 +-
9 files changed, 12 insertions(+), 48 deletions(-)
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 06a4670bdb0b..8ef9522a6151 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -37,9 +37,9 @@
#ifndef __ASSEMBLY__
-#include <linux/bitops.h>
#include <linux/kasan-enabled.h>
+#include <asm/cpufeature.h>
#include <asm/cputype.h>
#include <asm/mte-def.h>
#include <asm/sysreg.h>
@@ -55,18 +55,13 @@ static inline unsigned int arch_slab_minalign(void)
#define arch_slab_minalign() arch_slab_minalign()
#endif
-#define CTR_L1IP(ctr) SYS_FIELD_GET(CTR_EL0, L1Ip, ctr)
-
-#define ICACHEF_ALIASING 0
-extern unsigned long __icache_flags;
-
/*
* Whilst the D-side always behaves as PIPT on AArch64, aliasing is
* permitted in the I-cache.
*/
static inline int icache_is_aliasing(void)
{
- return test_bit(ICACHEF_ALIASING, &__icache_flags);
+ return !cpus_have_cap(ARM64_ICACHE_PIPT);
}
static inline u32 cache_type_cwg(void)
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 145ce73fc16c..7ad70f9865fd 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -140,7 +140,6 @@ extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
-extern unsigned long kvm_nvhe_sym(__icache_flags);
extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index e3e793d0ec30..ab86c2f025cf 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -248,7 +248,7 @@ static inline void __invalidate_icache_guest_page(void *va, size_t size)
* invalidation range exceeds our arbitrary limit on invadations by
* cache line.
*/
- if (icache_is_aliasing() || size > __invalidate_icache_max_range())
+ if (!cpus_have_final_cap(ARM64_ICACHE_PIPT) || size > __invalidate_icache_max_range())
icache_inval_all_pou();
else
icache_inval_pou((unsigned long)va, (unsigned long)va + size);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 646591c67e7a..73a37176676e 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2272,6 +2272,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_always,
},
+ {
+ .desc = "PIPT I-cache policy",
+ .capability = ARM64_ICACHE_PIPT,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(CTR_EL0, L1Ip, PIPT)
+ },
{
.desc = "GIC system register CPU interface",
.capability = ARM64_HAS_GIC_CPUIF_SYSREGS,
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 47043c0d95ec..a4ea331fb6d4 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -33,20 +33,6 @@
DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
static struct cpuinfo_arm64 boot_cpu_data;
-static inline const char *icache_policy_str(int l1ip)
-{
- switch (l1ip) {
- case CTR_EL0_L1Ip_VIPT:
- return "VIPT";
- case CTR_EL0_L1Ip_PIPT:
- return "PIPT";
- default:
- return "RESERVED/UNKNOWN";
- }
-}
-
-unsigned long __icache_flags;
-
static const char *const hwcap_str[] = {
[KERNEL_HWCAP_FP] = "fp",
[KERNEL_HWCAP_ASIMD] = "asimd",
@@ -378,24 +364,6 @@ static int __init cpuinfo_regs_init(void)
}
device_initcall(cpuinfo_regs_init);
-static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
-{
- unsigned int cpu = smp_processor_id();
- u32 l1ip = CTR_L1IP(info->reg_ctr);
-
- switch (l1ip) {
- case CTR_EL0_L1Ip_PIPT:
- break;
- case CTR_EL0_L1Ip_VIPT:
- default:
- /* Assume aliasing */
- set_bit(ICACHEF_ALIASING, &__icache_flags);
- break;
- }
-
- pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str(l1ip), cpu);
-}
-
static void __cpuinfo_store_cpu_32bit(struct cpuinfo_32bit *info)
{
info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
@@ -457,8 +425,6 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
__cpuinfo_store_cpu_32bit(&info->aarch32);
-
- cpuinfo_detect_icache_policy(info);
}
void cpuinfo_store_cpu(void)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index e5f75f1f1085..a5f71165cd5c 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -2242,7 +2242,6 @@ static void kvm_hyp_init_symbols(void)
kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1);
- kvm_nvhe_sym(__icache_flags) = __icache_flags;
kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index b29f15418c0a..187ce5720697 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -12,9 +12,6 @@
#include <nvhe/pkvm.h>
#include <nvhe/trap_handler.h>
-/* Used by icache_is_aliasing(). */
-unsigned long __icache_flags;
-
/* Used by kvm_get_vttbr(). */
unsigned int kvm_arm_vmid_bits;
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index b98c38288a9d..9c6313cd6f5e 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -53,6 +53,7 @@ HAS_TLB_RANGE
HAS_VIRT_HOST_EXTN
HAS_WFXT
HW_DBM
+ICACHE_PIPT
KVM_HVHE
KVM_PROTECTED_MODE
MISMATCHED_CACHE_TYPE
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index c5af75b23187..db8c96841138 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -2003,7 +2003,7 @@ Field 28 IDC
Field 27:24 CWG
Field 23:20 ERG
Field 19:16 DminLine
-Enum 15:14 L1Ip
+UnsignedEnum 15:14 L1Ip
# This was named as VPIPT in the ARM but now documented as reserved
0b00 RESERVED_VPIPT
# This is named as AIVIVT in the ARM but documented as reserved
--
2.39.2
--
Without deviation from the norm, progress is not possible.
More information about the linux-arm-kernel
mailing list