[PATCH] arm64/fpsimd: Ensure we don't contend a SMCU from idling CPUs
Mark Brown
broonie at kernel.org
Tue Jun 18 06:57:42 PDT 2024
When we enter the kernel we currently don't update any of the floating
point register state until either something else uses floating point or we
get a CPU_PM_ENTER notification during suspend or cpuidle. This means that
for a system which has been configured with both suspend and cpuidle
disabled we will leave whatever floating point state was loaded in the
registers present while a CPU is idling.
For SME this may cause an idling CPU to interfere with the operation of
other CPUs, SME may be implemented as a SMCU shared with between multiple
CPUs. Leaving ZA or especially streaming mode enabled may be taken by
the hardware as an indication that SME is active use by the CPU and cause
resources to be allocated to it at the expense of other CPUs.
Add an arch_cpu_idle_enter() implementation which disables SME if it is
active when we idle the CPU, ensuring that we don't create any spurious
contention even if cpuidle is not enabled.
Signed-off-by: Mark Brown <broonie at kernel.org>
---
arch/arm64/include/asm/fpsimd.h | 2 ++
arch/arm64/kernel/fpsimd.c | 14 ++++++++++++++
arch/arm64/kernel/process.c | 5 +++++
3 files changed, 21 insertions(+)
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index bc69ac368d73..e1453a1d261d 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -82,6 +82,8 @@ extern void fpsimd_restore_current_state(void);
extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
extern void fpsimd_kvm_prepare(void);
+extern void fpsimd_idle_enter(void);
+
struct cpu_fp_state {
struct user_fpsimd_state *st;
void *sve_state;
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 82e8a6017382..400eaad374a2 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -2126,6 +2126,20 @@ static void __init fpsimd_pm_init(void)
static inline void fpsimd_pm_init(void) { }
#endif /* CONFIG_CPU_PM */
+void fpsimd_idle_enter(void)
+{
+ /*
+ * Leaving SME enabled may leave this core contending with
+ * other cores if we have a SMCU, disable whenever we enter
+ * idle to avoid this. Only do this if they're actually
+ * enabled to avoid overhead in cases where we don't enter a
+ * low enough power state to loose register state.
+ */
+ if (system_supports_sme() &&
+ (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)))
+ fpsimd_save_and_flush_cpu_state();
+}
+
#ifdef CONFIG_HOTPLUG_CPU
static int fpsimd_cpu_dead(unsigned int cpu)
{
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 4ae31b7af6c3..fd616882cd49 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -68,6 +68,11 @@ EXPORT_SYMBOL(__stack_chk_guard);
void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
+void arch_cpu_idle_enter(void)
+{
+ fpsimd_idle_enter();
+}
+
#ifdef CONFIG_HOTPLUG_CPU
void __noreturn arch_cpu_idle_dead(void)
{
---
base-commit: 83a7eefedc9b56fe7bfeff13b6c7356688ffa670
change-id: 20240617-arm64-sme-no-cpuidle-8c68ef86e370
Best regards,
--
Mark Brown <broonie at kernel.org>
More information about the linux-arm-kernel
mailing list