[PATCH] arm64: use sanitized feature registers for conditional ZCR_EL1 and SMCR_EL1 reads

Marc Zyngier maz at kernel.org
Wed Jul 13 23:47:51 PDT 2022


On Wed, 13 Jul 2022 19:18:52 +0100,
Peter Collingbourne <pcc at google.com> wrote:
> 
> With arm64.nosve we would still read ZCR_EL1 in __cpuinfo_store_cpu
> because the condition for reading it was based on the unsanitized feature
> register value info->reg_id_aa64pfr0. Fix the problem by moving the reads
> to init_cpu_features after we have computed the sanitized value. Fix
> the SMCR_EL1 read for SME similarly.
> 
> Fixes: 504ee23611c4 ("arm64: Add the arm64.nosve command line option")
> Signed-off-by: Peter Collingbourne <pcc at google.com>
> ---
>  arch/arm64/kernel/cpufeature.c | 11 +++++++----
>  arch/arm64/kernel/cpuinfo.c    |  8 --------
>  2 files changed, 7 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index e5afa9eba85d..71d290fec36f 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -999,15 +999,18 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
>  	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
>  		init_32bit_cpu_features(&info->aarch32);
>  
> -	if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
> +	if (IS_ENABLED(CONFIG_ARM64_SVE) &&
> +	    id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
> +		info->reg_zcr = read_zcr_features();
>  		init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
>  		vec_init_vq_map(ARM64_VEC_SVE);
>  	}
>  
> -	if (id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
> +	if (IS_ENABLED(CONFIG_ARM64_SME) &&
> +	    id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
> +		info->reg_smcr = read_smcr_features();
>  		init_cpu_ftr_reg(SYS_SMCR_EL1, info->reg_smcr);
> -		if (IS_ENABLED(CONFIG_ARM64_SME))
> -			vec_init_vq_map(ARM64_VEC_SME);
> +		vec_init_vq_map(ARM64_VEC_SME);
>  	}
>  
>  	if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
> diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
> index 8eff0a34ffd4..66bc6f25d3b4 100644
> --- a/arch/arm64/kernel/cpuinfo.c
> +++ b/arch/arm64/kernel/cpuinfo.c
> @@ -418,14 +418,6 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
>  	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
>  		__cpuinfo_store_cpu_32bit(&info->aarch32);
>  
> -	if (IS_ENABLED(CONFIG_ARM64_SVE) &&
> -	    id_aa64pfr0_sve(info->reg_id_aa64pfr0))
> -		info->reg_zcr = read_zcr_features();
> -
> -	if (IS_ENABLED(CONFIG_ARM64_SME) &&
> -	    id_aa64pfr1_sme(info->reg_id_aa64pfr1))
> -		info->reg_smcr = read_smcr_features();

Not sure what this applies on, but that's a kernel that doesn't
contain d69d564964872 ("arm64/sme: Expose SMIDR through sysfs").
> -
>  	cpuinfo_detect_icache_policy(info);
>  }
>  

This looks wrong to me. With this change, a secondary CPU never
initialises its own view of reg_{zcr,smcr}. I came up with the
following patch instead, also updating the cpuinfo_arm64 structure
when updating the capabilities on secondary CPU boot (on top of
arm64/boot):

From ee8d6a3741229336acedf13aa1304e05ed630ff0 Mon Sep 17 00:00:00 2001
From: Marc Zyngier <maz at kernel.org>
Date: Mon, 11 Jul 2022 16:43:36 +0100
Subject: [PATCH] arm64: Delay initialisation of cpuinfo_arm64::reg_{zcr,smcr}

Even if we are now able to tell the kernel to avoid exposing SVE/SME
from the command line, we still have a couple of places where we
unconditionally access the ZCR_EL1 (resp. SMCR_EL1) registers.

On systems with broken firmwares, this results in a crash even if
arm64.nosve (resp. arm64.nosme) was passed on the command-line.

To avoid this, only update cpuinfo_arm64::reg_{zcr,smcr} once
we have computed the sanitised version for the corresponding
feature registers (ID_AA64PFR0 for SVE, and ID_AA64PFR1 for
SME). This results in some minor refactoring.

Signed-off-by: Marc Zyngier <maz at kernel.org>
---
 arch/arm64/kernel/cpufeature.c | 41 ++++++++++++++++++++++++----------
 arch/arm64/kernel/cpuinfo.c    | 16 -------------
 2 files changed, 29 insertions(+), 28 deletions(-)

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 1707576b7ca0..6adca2f337e8 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1000,15 +1000,24 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
 	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
 		init_32bit_cpu_features(&info->aarch32);
 
-	if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
+	if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+	    id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
+		info->reg_zcr = read_zcr_features();
 		init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
 		vec_init_vq_map(ARM64_VEC_SVE);
 	}
 
-	if (id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
+	if (IS_ENABLED(CONFIG_ARM64_SME) &&
+	    id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
+		info->reg_smcr = read_smcr_features();
+		/*
+		 * We mask out SMPS since even if the hardware
+		 * supports priorities the kernel does not at present
+		 * and we block access to them.
+		 */
+		info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
 		init_cpu_ftr_reg(SYS_SMCR_EL1, info->reg_smcr);
-		if (IS_ENABLED(CONFIG_ARM64_SME))
-			vec_init_vq_map(ARM64_VEC_SME);
+		vec_init_vq_map(ARM64_VEC_SME);
 	}
 
 	if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
@@ -1240,23 +1249,31 @@ void update_cpu_features(int cpu,
 	taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu,
 				      info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0);
 
-	if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
+	if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+	    id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
+		info->reg_zcr = read_zcr_features();
 		taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
 					info->reg_zcr, boot->reg_zcr);
 
-		/* Probe vector lengths, unless we already gave up on SVE */
-		if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
-		    !system_capabilities_finalized())
+		/* Probe vector lengths */
+		if (!system_capabilities_finalized())
 			vec_update_vq_map(ARM64_VEC_SVE);
 	}
 
-	if (id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
+	if (IS_ENABLED(CONFIG_ARM64_SME) &&
+	    id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
+		info->reg_smcr = read_smcr_features();
+		/*
+		 * We mask out SMPS since even if the hardware
+		 * supports priorities the kernel does not at present
+		 * and we block access to them.
+		 */
+		info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
 		taint |= check_update_ftr_reg(SYS_SMCR_EL1, cpu,
 					info->reg_smcr, boot->reg_smcr);
 
-		/* Probe vector lengths, unless we already gave up on SME */
-		if (id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1)) &&
-		    !system_capabilities_finalized())
+		/* Probe vector lengths */
+		if (!system_capabilities_finalized())
 			vec_update_vq_map(ARM64_VEC_SME);
 	}
 
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 3628b8a28e92..5b53d275adc1 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -438,22 +438,6 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
 	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
 		__cpuinfo_store_cpu_32bit(&info->aarch32);
 
-	if (IS_ENABLED(CONFIG_ARM64_SVE) &&
-	    id_aa64pfr0_sve(info->reg_id_aa64pfr0))
-		info->reg_zcr = read_zcr_features();
-
-	if (IS_ENABLED(CONFIG_ARM64_SME) &&
-	    id_aa64pfr1_sme(info->reg_id_aa64pfr1)) {
-		info->reg_smcr = read_smcr_features();
-
-		/*
-		 * We mask out SMPS since even if the hardware
-		 * supports priorities the kernel does not at present
-		 * and we block access to them.
-		 */
-		info->reg_smidr = read_cpuid(SMIDR_EL1) & ~SMIDR_EL1_SMPS;
-	}
-
 	cpuinfo_detect_icache_policy(info);
 }
 
-- 
2.34.1


-- 
Without deviation from the norm, progress is not possible.



More information about the linux-arm-kernel mailing list