[PATCH v3 17/42] KVM: arm64: Handle trapping of FEAT_LS64* instructions

Joey Gouly joey.gouly at arm.com
Thu May 1 04:01:50 PDT 2025


On Sat, Apr 26, 2025 at 01:28:11PM +0100, Marc Zyngier wrote:
> We generally don't expect FEAT_LS64* instructions to trap, unless
> they are trapped by a guest hypervisor.
> 
> Otherwise, this is just the guest playing tricks on us by using
> an instruction that isn't advertised, which we handle with a well
> deserved UNDEF.
> 
> Signed-off-by: Marc Zyngier <maz at kernel.org>

Reviewed-by: Joey Gouly <joey.gouly at arm.com>

> ---
>  arch/arm64/kvm/handle_exit.c | 56 ++++++++++++++++++++++++++++++++++++
>  1 file changed, 56 insertions(+)
> 
> diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
> index b73dc26bc44b4..636c14ed2bb82 100644
> --- a/arch/arm64/kvm/handle_exit.c
> +++ b/arch/arm64/kvm/handle_exit.c
> @@ -298,6 +298,61 @@ static int handle_svc(struct kvm_vcpu *vcpu)
>  	return 1;
>  }
>  
> +static int handle_other(struct kvm_vcpu *vcpu)
> +{
> +	bool is_l2 = vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu);
> +	u64 hcrx = __vcpu_sys_reg(vcpu, HCRX_EL2);
> +	u64 esr = kvm_vcpu_get_esr(vcpu);
> +	u64 iss = ESR_ELx_ISS(esr);
> +	struct kvm *kvm = vcpu->kvm;
> +	bool allowed, fwd = false;
> +
> +	/*
> +	 * We only trap for two reasons:
> +	 *
> +	 * - the feature is disabled, and the only outcome is to
> +	 *   generate an UNDEF.
> +	 *
> +	 * - the feature is enabled, but a NV guest wants to trap the
> +	 *   feature used my its L2 guest. We forward the exception in
> +	 *   this case.
> +	 *
> +	 * What we don't expect is to end-up here if the guest is
> +	 * expected be be able to directly use the feature, hence the
> +	 * WARN_ON below.
> +	 */
> +	switch (iss) {
> +	case ESR_ELx_ISS_OTHER_ST64BV:
> +		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V);
> +		if (is_l2)
> +			fwd = !(hcrx & HCRX_EL2_EnASR);
> +		break;
> +	case ESR_ELx_ISS_OTHER_ST64BV0:
> +		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA);
> +		if (is_l2)
> +			fwd = !(hcrx & HCRX_EL2_EnAS0);
> +		break;
> +	case ESR_ELx_ISS_OTHER_LDST64B:
> +		allowed = kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64);
> +		if (is_l2)
> +			fwd = !(hcrx & HCRX_EL2_EnALS);
> +		break;
> +	default:
> +		/* Clearly, we're missing something. */
> +		WARN_ON_ONCE(1);
> +		allowed = false;
> +	}
> +
> +	WARN_ON_ONCE(allowed && !fwd);
> +
> +	if (allowed && fwd)
> +		kvm_inject_nested_sync(vcpu, esr);
> +	else
> +		kvm_inject_undefined(vcpu);
> +
> +	return 1;
> +}
> +
>  static exit_handle_fn arm_exit_handlers[] = {
>  	[0 ... ESR_ELx_EC_MAX]	= kvm_handle_unknown_ec,
>  	[ESR_ELx_EC_WFx]	= kvm_handle_wfx,
> @@ -307,6 +362,7 @@ static exit_handle_fn arm_exit_handlers[] = {
>  	[ESR_ELx_EC_CP14_LS]	= kvm_handle_cp14_load_store,
>  	[ESR_ELx_EC_CP10_ID]	= kvm_handle_cp10_id,
>  	[ESR_ELx_EC_CP14_64]	= kvm_handle_cp14_64,
> +	[ESR_ELx_EC_OTHER]	= handle_other,
>  	[ESR_ELx_EC_HVC32]	= handle_hvc,
>  	[ESR_ELx_EC_SMC32]	= handle_smc,
>  	[ESR_ELx_EC_HVC64]	= handle_hvc,
> -- 
> 2.39.2
> 



More information about the linux-arm-kernel mailing list