[PATCH] arm64: errata: Fix handling of 1418040 with late CPU onlining
Catalin Marinas
catalin.marinas at arm.com
Fri Nov 6 07:44:00 EST 2020
On Fri, Nov 06, 2020 at 12:18:32PM +0000, Suzuki K Poulose wrote:
> On 11/6/20 11:49 AM, Will Deacon wrote:
> > In a surprising turn of events, it transpires that CPU capabilities
> > configured as ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE are never set as the
> > result of late-onlining. Therefore our handling of erratum 1418040 does
> > not get activated if it is not required by any of the boot CPUs, even
> > though we allow late-onlining of an affected CPU.
>
> The capability state is not altered after the SMP boot for all types
> of caps. The weak caps are there to allow a late CPU to turn online
> without getting "banned". This may be something we could relax with
> a new flag in the scope.
Like this? Of course, it needs some testing.
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 97244d4feca9..b896e72131d7 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -246,6 +246,8 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
#define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU ((u16)BIT(5))
/* Panic when a conflict is detected */
#define ARM64_CPUCAP_PANIC_ON_CONFLICT ((u16)BIT(6))
+/* Together with PERMITTED_FOR_LATE_CPU, set the corresponding cpu_hwcaps bit */
+#define ARM64_CPUCAP_SET_FOR_LATE_CPU ((u16)BIT(7))
/*
* CPU errata workarounds that need to be enabled at boot time if one or
@@ -481,6 +483,16 @@ static __always_inline bool cpus_have_const_cap(int num)
return cpus_have_cap(num);
}
+/*
+ * Test for a capability with a runtime check. This is an alias for
+ * cpus_have_cap() but with the name chosen to emphasize the applicability to
+ * late capability setting.
+ */
+static __always_inline bool cpus_have_late_cap(int num)
+{
+ return cpus_have_cap(num);
+}
+
static inline void cpus_set_cap(unsigned int num)
{
if (num >= ARM64_NCAPS) {
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 61314fd70f13..6b7de7292e8c 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -481,7 +481,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
* also need the non-affected CPUs to be able to come
* in at any point in time. Wonderful.
*/
- .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE |
+ ARM64_CPUCAP_SET_FOR_LATE_CPU,
},
#endif
#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index dcc165b3fc04..51e63be41ea5 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1720,6 +1720,12 @@ cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
}
+static bool
+cpucap_set_for_late_cpu(const struct arm64_cpu_capabilities *cap)
+{
+ return !!(cap->type & ARM64_CPUCAP_SET_FOR_LATE_CPU);
+}
+
static bool
cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
{
@@ -2489,6 +2495,11 @@ static void verify_local_cpu_caps(u16 scope_mask)
*/
if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
break;
+ /*
+ * Set the capability bit if it allows late setting.
+ */
+ if (cpucap_set_for_late_cpu(caps))
+ cpus_set_cap(caps->capability);
}
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 4784011cecac..152639962845 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -523,7 +523,7 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
u64 val;
if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
- cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
+ cpus_have_late_cap(ARM64_WORKAROUND_1418040)))
return;
prev32 = is_compat_thread(task_thread_info(prev));
More information about the linux-arm-kernel
mailing list