[PATCH v5 67/69] KVM: arm64: nv: Enable ARMv8.4-NV support

Ganapatrao Kulkarni gankulkarni at os.amperecomputing.com
Tue Jan 18 03:50:18 PST 2022



On 30-11-2021 01:31 am, Marc Zyngier wrote:
> As all the VNCR-capable system registers are nicely separated
> from the rest of the crowd, let's set HCR_EL2.NV2 on and let
> the ball rolling.
> 
> Signed-off-by: Marc Zyngier <maz at kernel.org>
> ---
>   arch/arm64/include/asm/kvm_arm.h     |  1 +
>   arch/arm64/include/asm/kvm_emulate.h | 23 +++++++++++++----------
>   arch/arm64/include/asm/sysreg.h      |  1 +
>   arch/arm64/kvm/hyp/vhe/switch.c      | 14 +++++++++++++-
>   4 files changed, 28 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
> index b603466803d2..18c35446249f 100644
> --- a/arch/arm64/include/asm/kvm_arm.h
> +++ b/arch/arm64/include/asm/kvm_arm.h
> @@ -20,6 +20,7 @@
>   #define HCR_AMVOFFEN	(UL(1) << 51)
>   #define HCR_FIEN	(UL(1) << 47)
>   #define HCR_FWB		(UL(1) << 46)
> +#define HCR_NV2		(UL(1) << 45)
>   #define HCR_AT		(UL(1) << 44)
>   #define HCR_NV1		(UL(1) << 43)
>   #define HCR_NV		(UL(1) << 42)
> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index 1664430be698..f282997e4a4c 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -245,21 +245,24 @@ static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
>   
>   static inline u64 __fixup_spsr_el2_write(struct kvm_cpu_context *ctxt, u64 val)
>   {
> -	if (!__vcpu_el2_e2h_is_set(ctxt)) {
> -		/*
> -		 * Clear the .M field when writing SPSR to the CPU, so that we
> -		 * can detect when the CPU clobbered our SPSR copy during a
> -		 * local exception.
> -		 */
> -		val &= ~0xc;
> -	}
> +	struct kvm_vcpu *vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
> +
> +	if (enhanced_nested_virt_in_use(vcpu) || __vcpu_el2_e2h_is_set(ctxt))
> +		return val;
>   
> -	return val;
> +	/*
> +	 * Clear the .M field when writing SPSR to the CPU, so that we
> +	 * can detect when the CPU clobbered our SPSR copy during a
> +	 * local exception.
> +	 */
> +	return val &= ~0xc;
>   }
>   
>   static inline u64 __fixup_spsr_el2_read(const struct kvm_cpu_context *ctxt, u64 val)
>   {
> -	if (__vcpu_el2_e2h_is_set(ctxt))
> +	struct kvm_vcpu *vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
> +
> +	if (enhanced_nested_virt_in_use(vcpu) || __vcpu_el2_e2h_is_set(ctxt))
>   		return val;
>   
>   	/*
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index 71e6a0410e7c..5de90138d0a4 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -550,6 +550,7 @@
>   #define SYS_TCR_EL2			sys_reg(3, 4, 2, 0, 2)
>   #define SYS_VTTBR_EL2			sys_reg(3, 4, 2, 1, 0)
>   #define SYS_VTCR_EL2			sys_reg(3, 4, 2, 1, 2)
> +#define SYS_VNCR_EL2			sys_reg(3, 4, 2, 2, 0)
>   
>   #define SYS_ZCR_EL2			sys_reg(3, 4, 1, 2, 0)
>   #define SYS_TRFCR_EL2			sys_reg(3, 4, 1, 2, 1)
> diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
> index ef4488db6dc1..5cadda79089a 100644
> --- a/arch/arm64/kvm/hyp/vhe/switch.c
> +++ b/arch/arm64/kvm/hyp/vhe/switch.c
> @@ -45,7 +45,13 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
>   			 * the EL1 virtual memory control register accesses
>   			 * as well as the AT S1 operations.
>   			 */
> -			hcr |= HCR_TVM | HCR_TRVM | HCR_AT | HCR_TTLB | HCR_NV1;
> +			if (enhanced_nested_virt_in_use(vcpu)) {
> +				hcr &= ~HCR_TVM;

I think, we should clear TRVM also?
				hcr &= ~(HCR_TVM | HCR_TRVM);

> +			} else {
> +				hcr |= HCR_TVM | HCR_TRVM | HCR_TTLB;
> +			}
> +
> +			hcr |= HCR_AT | HCR_NV1;
>   		} else {
>   			/*
>   			 * For a guest hypervisor on v8.1 (VHE), allow to
> @@ -79,6 +85,12 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
>   			if (!vcpu_el2_tge_is_set(vcpu))
>   				hcr |= HCR_AT | HCR_TTLB;
>   		}
> +
> +		if (enhanced_nested_virt_in_use(vcpu)) {
> +			hcr |= HCR_AT | HCR_TTLB | HCR_NV2;
> +			write_sysreg_s(vcpu->arch.ctxt.vncr_array,
> +				       SYS_VNCR_EL2);
> +		}
>   	} else if (nested_virt_in_use(vcpu)) {
>   		u64 vhcr_el2 = __vcpu_sys_reg(vcpu, HCR_EL2);
>   


Thanks,
Ganapat



More information about the linux-arm-kernel mailing list