[PATCH v6 15/27] arm64/sysreg: Add _EL1 into ID_AA64ISAR2_EL1 definition names
Mark Brown
broonie at kernel.org
Wed Jun 29 03:28:22 PDT 2022
Normally we include the full register name in the defines for fields within
registers but this has not been followed for ID registers. In preparation
for automatic generation of defines add the _EL1s into the defines for
ID_AA64ISAR2_EL1 to follow the convention. No functional changes.
Signed-off-by: Mark Brown <broonie at kernel.org>
---
arch/arm64/include/asm/asm_pointer_auth.h | 2 +-
arch/arm64/include/asm/cpufeature.h | 2 +-
arch/arm64/include/asm/sysreg.h | 34 +++++++++----------
arch/arm64/kernel/cpufeature.c | 34 +++++++++----------
arch/arm64/kernel/idreg-override.c | 4 +--
.../arm64/kvm/hyp/include/nvhe/fixed_config.h | 4 +--
arch/arm64/kvm/hyp/nvhe/sys_regs.c | 4 +--
arch/arm64/kvm/sys_regs.c | 6 ++--
8 files changed, 45 insertions(+), 45 deletions(-)
diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h
index 3b192e04a5dd..13ecc79854ee 100644
--- a/arch/arm64/include/asm/asm_pointer_auth.h
+++ b/arch/arm64/include/asm/asm_pointer_auth.h
@@ -61,7 +61,7 @@ alternative_else_nop_endif
mrs \tmp1, id_aa64isar1_el1
ubfx \tmp1, \tmp1, #ID_AA64ISAR1_EL1_APA_SHIFT, #8
mrs_s \tmp2, SYS_ID_AA64ISAR2_EL1
- ubfx \tmp2, \tmp2, #ID_AA64ISAR2_APA3_SHIFT, #4
+ ubfx \tmp2, \tmp2, #ID_AA64ISAR2_EL1_APA3_SHIFT, #4
orr \tmp1, \tmp1, \tmp2
cbz \tmp1, .Lno_addr_auth\@
mov_q \tmp1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 6472f2badc97..fe59035bdc22 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -673,7 +673,7 @@ static inline bool supports_clearbhb(int scope)
isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
return cpuid_feature_extract_unsigned_field(isar2,
- ID_AA64ISAR2_BC_SHIFT);
+ ID_AA64ISAR2_EL1_BC_SHIFT);
}
const struct cpumask *system_32bit_el0_cpumask(void);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index ba9053bc33c2..7f083a2ed7dc 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -738,29 +738,29 @@
#define ID_AA64ISAR1_EL1_GPI_IMP 0x1
/* id_aa64isar2 */
-#define ID_AA64ISAR2_BC_SHIFT 28
-#define ID_AA64ISAR2_APA3_SHIFT 12
-#define ID_AA64ISAR2_GPA3_SHIFT 8
-#define ID_AA64ISAR2_RPRES_SHIFT 4
-#define ID_AA64ISAR2_WFxT_SHIFT 0
+#define ID_AA64ISAR2_EL1_BC_SHIFT 28
+#define ID_AA64ISAR2_EL1_APA3_SHIFT 12
+#define ID_AA64ISAR2_EL1_GPA3_SHIFT 8
+#define ID_AA64ISAR2_EL1_RPRES_SHIFT 4
+#define ID_AA64ISAR2_EL1_WFxT_SHIFT 0
/*
* Value 0x1 has been removed from the architecture, and is
* reserved, but has not yet been removed from the ARM ARM
* as of ARM DDI 0487G.b.
*/
-#define ID_AA64ISAR2_WFxT_NI 0x0
-#define ID_AA64ISAR2_WFxT_IMP 0x2
-
-#define ID_AA64ISAR2_APA3_NI 0x0
-#define ID_AA64ISAR2_APA3_PAuth 0x1
-#define ID_AA64ISAR2_APA3_EPAC 0x2
-#define ID_AA64ISAR2_APA3_PAuth2 0x3
-#define ID_AA64ISAR2_APA3_FPAC 0x4
-#define ID_AA64ISAR2_APA3_FPACCOMBINE 0x5
-
-#define ID_AA64ISAR2_GPA3_NI 0x0
-#define ID_AA64ISAR2_GPA3_IMP 0x1
+#define ID_AA64ISAR2_EL1_WFxT_NI 0x0
+#define ID_AA64ISAR2_EL1_WFxT_IMP 0x2
+
+#define ID_AA64ISAR2_EL1_APA3_NI 0x0
+#define ID_AA64ISAR2_EL1_APA3_PAuth 0x1
+#define ID_AA64ISAR2_EL1_APA3_EPAC 0x2
+#define ID_AA64ISAR2_EL1_APA3_PAuth2 0x3
+#define ID_AA64ISAR2_EL1_APA3_FPAC 0x4
+#define ID_AA64ISAR2_EL1_APA3_FPACCOMBINE 0x5
+
+#define ID_AA64ISAR2_EL1_GPA3_NI 0x0
+#define ID_AA64ISAR2_EL1_GPA3_IMP 0x1
/* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 0d4f0120c516..be20100a7d4c 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -231,13 +231,13 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_BC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
- FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_APA3_SHIFT, 4, 0),
+ FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
- FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_GPA3_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_WFxT_SHIFT, 4, 0),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_GPA3_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -2326,9 +2326,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
.sys_reg = SYS_ID_AA64ISAR2_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64ISAR2_APA3_SHIFT,
+ .field_pos = ID_AA64ISAR2_EL1_APA3_SHIFT,
.field_width = 4,
- .min_field_value = ID_AA64ISAR2_APA3_PAuth,
+ .min_field_value = ID_AA64ISAR2_EL1_APA3_PAuth,
.matches = has_address_auth_cpucap,
},
{
@@ -2364,9 +2364,9 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.sys_reg = SYS_ID_AA64ISAR2_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64ISAR2_GPA3_SHIFT,
+ .field_pos = ID_AA64ISAR2_EL1_GPA3_SHIFT,
.field_width = 4,
- .min_field_value = ID_AA64ISAR2_GPA3_IMP,
+ .min_field_value = ID_AA64ISAR2_EL1_GPA3_IMP,
.matches = has_cpuid_feature,
},
{
@@ -2516,10 +2516,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.sys_reg = SYS_ID_AA64ISAR2_EL1,
.sign = FTR_UNSIGNED,
- .field_pos = ID_AA64ISAR2_WFxT_SHIFT,
+ .field_pos = ID_AA64ISAR2_EL1_WFxT_SHIFT,
.field_width = 4,
.matches = has_cpuid_feature,
- .min_field_value = ID_AA64ISAR2_WFxT_IMP,
+ .min_field_value = ID_AA64ISAR2_EL1_WFxT_IMP,
},
{},
};
@@ -2565,8 +2565,8 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
ID_AA64ISAR1_EL1_APA_PAuth)
},
{
- HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_APA3_SHIFT,
- 4, FTR_UNSIGNED, ID_AA64ISAR2_APA3_PAuth)
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_APA3_SHIFT,
+ 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_APA3_PAuth)
},
{
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_API_SHIFT,
@@ -2581,8 +2581,8 @@ static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
4, FTR_UNSIGNED, ID_AA64ISAR1_EL1_GPA_IMP)
},
{
- HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_GPA3_SHIFT,
- 4, FTR_UNSIGNED, ID_AA64ISAR2_GPA3_IMP)
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_GPA3_SHIFT,
+ 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_GPA3_IMP)
},
{
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_GPI_SHIFT,
@@ -2653,8 +2653,8 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
#endif /* CONFIG_ARM64_MTE */
HWCAP_CAP(SYS_ID_AA64MMFR0_EL1, ID_AA64MMFR0_ECV_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ECV),
HWCAP_CAP(SYS_ID_AA64MMFR1_EL1, ID_AA64MMFR1_AFP_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AFP),
- HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
- HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
+ HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RPRES),
+ HWCAP_CAP(SYS_ID_AA64ISAR2_EL1, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, FTR_UNSIGNED, ID_AA64ISAR2_EL1_WFxT_IMP, CAP_HWCAP, KERNEL_HWCAP_WFXT),
#ifdef CONFIG_ARM64_SME
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SME_SHIFT, 4, FTR_UNSIGNED, ID_AA64PFR1_SME, CAP_HWCAP, KERNEL_HWCAP_SME),
HWCAP_CAP(SYS_ID_AA64SMFR0_EL1, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, FTR_UNSIGNED, ID_AA64SMFR0_EL1_FA64_IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index 652c19b13588..720a847f7dfe 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -75,8 +75,8 @@ static const struct ftr_set_desc isar2 __initconst = {
.name = "id_aa64isar2",
.override = &id_aa64isar2_override,
.fields = {
- { "gpa3", ID_AA64ISAR2_GPA3_SHIFT },
- { "apa3", ID_AA64ISAR2_APA3_SHIFT },
+ { "gpa3", ID_AA64ISAR2_EL1_GPA3_SHIFT },
+ { "apa3", ID_AA64ISAR2_EL1_APA3_SHIFT },
{}
},
};
diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
index 46cf9dec21ba..fa6e466ed57f 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
@@ -193,8 +193,8 @@
)
#define PVM_ID_AA64ISAR2_ALLOW (\
- ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3) | \
- ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) \
+ ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) \
)
u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
index 5b77bc1cca0c..6b94c3e6ff26 100644
--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
@@ -186,8 +186,8 @@ static u64 get_pvm_id_aa64isar2(const struct kvm_vcpu *vcpu)
u64 allow_mask = PVM_ID_AA64ISAR2_ALLOW;
if (!vcpu_has_ptrauth(vcpu))
- allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) |
- ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3));
+ allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
+ ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
return id_aa64isar2_el1_sys_val & allow_mask;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index ccd973dc346a..c4fb3874b5e2 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1143,10 +1143,10 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
break;
case SYS_ID_AA64ISAR2_EL1:
if (!vcpu_has_ptrauth(vcpu))
- val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_APA3) |
- ARM64_FEATURE_MASK(ID_AA64ISAR2_GPA3));
+ val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
+ ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
if (!cpus_have_final_cap(ARM64_HAS_WFXT))
- val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_WFxT);
+ val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
break;
case SYS_ID_AA64DFR0_EL1:
/* Limit debug to ARMv8.0 */
--
2.30.2
More information about the linux-arm-kernel
mailing list