[PATCH v2 4/4] arm64: Add support of PAuth QARMA3 architected algorithm

Will Deacon will at kernel.org
Tue Feb 15 10:21:54 PST 2022


On Mon, Jan 31, 2022 at 05:06:54PM +0000, Vladimir Murzin wrote:
> QARMA3 is relaxed version of the QARMA5 algorithm which expected to
> reduce the latency of calculation while still delivering a suitable
> level of security.
> 
> Support for QARMA3 can be discovered via ID_AA64ISAR2_EL1
> 
>     APA3, bits [15:12] Indicates whether the QARMA3 algorithm is
>                        implemented in the PE for address
>                        authentication in AArch64 state.
> 
>     GPA3, bits [11:8] Indicates whether the QARMA3 algorithm is
>                        implemented in the PE for generic code
>                        authentication in AArch64 state.
> 
> Signed-off-by: Vladimir Murzin <vladimir.murzin at arm.com>
> ---
>  arch/arm64/include/asm/asm_pointer_auth.h      |  3 ++
>  arch/arm64/include/asm/cpufeature.h            |  1 +
>  arch/arm64/include/asm/kvm_hyp.h               |  1 +
>  arch/arm64/include/asm/sysreg.h                | 12 +++++++
>  arch/arm64/kernel/cpufeature.c                 | 45 +++++++++++++++++++++++---
>  arch/arm64/kernel/idreg-override.c             | 16 +++++++--
>  arch/arm64/kvm/arm.c                           |  1 +
>  arch/arm64/kvm/hyp/include/nvhe/fixed_config.h |  5 +++
>  arch/arm64/kvm/hyp/nvhe/sys_regs.c             | 14 ++++++++
>  arch/arm64/kvm/sys_regs.c                      |  5 +++
>  arch/arm64/tools/cpucaps                       |  2 ++
>  11 files changed, 99 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h
> index f1bba5f..ead62f7 100644
> --- a/arch/arm64/include/asm/asm_pointer_auth.h
> +++ b/arch/arm64/include/asm/asm_pointer_auth.h
> @@ -60,6 +60,9 @@ alternative_else_nop_endif
>  	.macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
>  	mrs	\tmp1, id_aa64isar1_el1
>  	ubfx	\tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8
> +	mrs_s	\tmp2, SYS_ID_AA64ISAR2_EL1
> +	ubfx	\tmp2, \tmp2, #ID_AA64ISAR2_APA3_SHIFT, #4
> +	orr	\tmp1, \tmp1, \tmp2
>  	cbz	\tmp1, .Lno_addr_auth\@
>  	mov_q	\tmp1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
>  			SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
> index ef6be92..fe7137f 100644
> --- a/arch/arm64/include/asm/cpufeature.h
> +++ b/arch/arm64/include/asm/cpufeature.h
> @@ -854,6 +854,7 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
>  extern struct arm64_ftr_override id_aa64mmfr1_override;
>  extern struct arm64_ftr_override id_aa64pfr1_override;
>  extern struct arm64_ftr_override id_aa64isar1_override;
> +extern struct arm64_ftr_override id_aa64isar2_override;
>  
>  u32 get_kvm_ipa_limit(void);
>  void dump_cpu_features(void);
> diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
> index 462882f..aa7fa2a 100644
> --- a/arch/arm64/include/asm/kvm_hyp.h
> +++ b/arch/arm64/include/asm/kvm_hyp.h
> @@ -118,6 +118,7 @@ extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);
>  extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val);
>  extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val);
>  extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val);
> +extern u64 kvm_nvhe_sym(id_aa64isar2_el1_sys_val);
>  extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
>  extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
>  extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index 898bee0..cbe4164 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -773,6 +773,8 @@
>  #define ID_AA64ISAR1_GPI_IMP_DEF		0x1
>  
>  /* id_aa64isar2 */
> +#define ID_AA64ISAR2_APA3_SHIFT		12
> +#define ID_AA64ISAR2_GPA3_SHIFT		8
>  #define ID_AA64ISAR2_RPRES_SHIFT	4
>  #define ID_AA64ISAR2_WFXT_SHIFT		0
>  
> @@ -786,6 +788,16 @@
>  #define ID_AA64ISAR2_WFXT_NI		0x0
>  #define ID_AA64ISAR2_WFXT_SUPPORTED	0x2
>  
> +#define ID_AA64ISAR2_APA3_NI			0x0
> +#define ID_AA64ISAR2_APA3_ARCHITECTED		0x1
> +#define ID_AA64ISAR2_APA3_ARCH_EPAC		0x2
> +#define ID_AA64ISAR2_APA3_ARCH_EPAC2		0x3
> +#define ID_AA64ISAR2_APA3_ARCH_EPAC2_FPAC	0x4
> +#define ID_AA64ISAR2_APA3_ARCH_EPAC2_FPAC_CMB	0x5
> +
> +#define ID_AA64ISAR2_GPA3_NI			0x0
> +#define ID_AA64ISAR2_GPA3_ARCHITECTED		0x1
> +
>  /* id_aa64pfr0 */
>  #define ID_AA64PFR0_CSV3_SHIFT		60
>  #define ID_AA64PFR0_CSV2_SHIFT		56
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index 69fbc53..aab6766 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -226,6 +226,10 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
>  };
>  
>  static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
> +	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_APA3_SHIFT, 4, 0),
> +	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> +		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_GPA3_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_RPRES_SHIFT, 4, 0),
>  	ARM64_FTR_END,
>  };
> @@ -596,6 +600,7 @@ static const struct arm64_ftr_bits ftr_raz[] = {
>  struct arm64_ftr_override __ro_after_init id_aa64mmfr1_override;
>  struct arm64_ftr_override __ro_after_init id_aa64pfr1_override;
>  struct arm64_ftr_override __ro_after_init id_aa64isar1_override;
> +struct arm64_ftr_override __ro_after_init id_aa64isar2_override;
>  
>  static const struct __ftr_reg_entry {
>  	u32			sys_id;
> @@ -644,6 +649,8 @@ static const struct __ftr_reg_entry {
>  	ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
>  			       &id_aa64isar1_override),
>  	ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
> +	ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2,
> +			       &id_aa64isar2_override),
>  
>  	/* Op1 = 0, CRn = 0, CRm = 7 */
>  	ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
> @@ -1834,10 +1841,11 @@ static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
>  {
>  	bool api = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
>  	bool apa = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5], scope);
> +	bool apa3 = has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3], scope);
>  
> -	WARN_ON(apa && api);
> +	WARN_ON((apa && api) || (apa && apa3) || (api && apa3));

I don't really get the point in this warning, what is somebody supposed to
do if they hit it? The kernel isn't the right place to make assertions about
the CPU design.

Will



More information about the linux-arm-kernel mailing list