[PATCH v4 2/4] arm64: cpufeature: Modify address authentication cpufeature to exact

Amit Daniel Kachhap amit.kachhap at arm.com
Fri Jul 10 04:00:08 EDT 2020


The current address authentication cpufeature levels are set as LOWER_SAFE
which is not compatible with the different configurations added for Armv8.3
ptrauth enhancements as the different levels have different behaviour and
there is no tunable to enable the lower safe versions. This is rectified
by setting those cpufeature type as EXACT.

The current cpufeature framework also does not interfere in the booting of
non-exact secondary cpus but rather marks them as tainted. As a workaround
this is fixed by replacing the generic match handler with a new handler
specific to ptrauth.

After this change, if there is any variation in ptrauth configurations in
secondary cpus from boot cpu then those mismatched cpus are parked in an
infinite loop.

Signed-off-by: Amit Daniel Kachhap <amit.kachhap at arm.com>
[Suzuki: Introduce new matching function for address authentication]
Suggested-by: Suzuki K Poulose <suzuki.poulose at arm.com>
---
Changes since v3:
 * Commit logs cleanup.

 arch/arm64/kernel/cpufeature.c | 56 ++++++++++++++++++++++++++++------
 1 file changed, 47 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 9fae0efc80c1..8ac8c18f70c9 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -210,9 +210,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
-		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
+		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
-		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
+		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
 	ARM64_FTR_END,
 };
@@ -1605,11 +1605,49 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
 #endif /* CONFIG_ARM64_RAS_EXTN */
 
 #ifdef CONFIG_ARM64_PTR_AUTH
-static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
-			     int __unused)
+static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
 {
-	return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
-	       __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF);
+	int local_cpu_auth;
+	static int boot_cpu_auth_arch;
+	static int boot_cpu_auth_imp_def;
+
+	/* We don't expect to be called with SCOPE_SYSTEM */
+	WARN_ON(scope == SCOPE_SYSTEM);
+
+	local_cpu_auth = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
+						     entry->field_pos, entry->sign);
+	/*
+	 * The ptr-auth feature levels are not intercompatible with
+	 * lower levels. Hence we must match all the CPUs with that
+	 * of the boot CPU. So cache the level of boot CPU and compare
+	 * it against the secondary CPUs.
+	 */
+	if (scope & SCOPE_BOOT_CPU) {
+		if (entry->capability == ARM64_HAS_ADDRESS_AUTH_IMP_DEF) {
+			boot_cpu_auth_imp_def = local_cpu_auth;
+			return boot_cpu_auth_imp_def >= entry->min_field_value;
+		} else if (entry->capability == ARM64_HAS_ADDRESS_AUTH_ARCH) {
+			boot_cpu_auth_arch = local_cpu_auth;
+			return boot_cpu_auth_arch >= entry->min_field_value;
+		}
+	} else if (scope & SCOPE_LOCAL_CPU) {
+		if (entry->capability == ARM64_HAS_ADDRESS_AUTH_IMP_DEF) {
+			return (local_cpu_auth >= entry->min_field_value) &&
+			       (local_cpu_auth == boot_cpu_auth_imp_def);
+		}
+		if (entry->capability == ARM64_HAS_ADDRESS_AUTH_ARCH) {
+			return (local_cpu_auth >= entry->min_field_value) &&
+			       (local_cpu_auth == boot_cpu_auth_arch);
+		}
+	}
+	return false;
+}
+
+static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
+			     int scope)
+{
+	return has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH], scope) ||
+	       has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
 }
 
 static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
@@ -1958,7 +1996,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.sign = FTR_UNSIGNED,
 		.field_pos = ID_AA64ISAR1_APA_SHIFT,
 		.min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
-		.matches = has_cpuid_feature,
+		.matches = has_address_auth_cpucap,
 	},
 	{
 		.desc = "Address authentication (IMP DEF algorithm)",
@@ -1968,12 +2006,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
 		.sign = FTR_UNSIGNED,
 		.field_pos = ID_AA64ISAR1_API_SHIFT,
 		.min_field_value = ID_AA64ISAR1_API_IMP_DEF,
-		.matches = has_cpuid_feature,
+		.matches = has_address_auth_cpucap,
 	},
 	{
 		.capability = ARM64_HAS_ADDRESS_AUTH,
 		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
-		.matches = has_address_auth,
+		.matches = has_address_auth_metacap,
 	},
 	{
 		.desc = "Generic authentication (architected algorithm)",
-- 
2.17.1




More information about the linux-arm-kernel mailing list