[PATCH 2/2] KVM: arm64: Move pkvm's special 32bit handling into a generic infrastructure
Fuad Tabba
tabba at google.com
Wed Nov 24 05:11:52 PST 2021
Hi Marc,
On Tue, Nov 23, 2021 at 2:23 PM Marc Zyngier <maz at kernel.org> wrote:
>
> Protected KVM is trying to turn AArch32 exceptions into an illegal
> exception entry. Unfortunately, it does that it a way that is a bit
Small nit: s/it/in
> abrupt, and too early for PSTATE to be available.
> Instead, move it to the fixup code, which is a more reasonable place
> for it. This will also be useful for the NV code.
This approach seems to be easier to generalize for other cases than
the previous one.
Reviewed-by: Fuad Tabba <tabba at google.com>
Cheers,
/fuad
>
> Signed-off-by: Marc Zyngier <maz at kernel.org>
> ---
> arch/arm64/kvm/hyp/include/hyp/switch.h | 8 ++++++++
> arch/arm64/kvm/hyp/nvhe/switch.c | 8 +-------
> arch/arm64/kvm/hyp/vhe/switch.c | 4 ++++
> 3 files changed, 13 insertions(+), 7 deletions(-)
>
> diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
> index d79fd101615f..96c5f3fb7838 100644
> --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
> +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
> @@ -403,6 +403,8 @@ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
>
> static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
>
> +static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
> +
> /*
> * Allow the hypervisor to handle the exit with an exit handler if it has one.
> *
> @@ -435,6 +437,12 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
> */
> vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
>
> + /*
> + * Check whether we want to repaint the state one way or
> + * another.
> + */
> + early_exit_filter(vcpu, exit_code);
> +
> if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
> vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
>
> diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
> index c0e3fed26d93..d13115a12434 100644
> --- a/arch/arm64/kvm/hyp/nvhe/switch.c
> +++ b/arch/arm64/kvm/hyp/nvhe/switch.c
> @@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
> * Returns false if the guest ran in AArch32 when it shouldn't have, and
> * thus should exit to the host, or true if a the guest run loop can continue.
> */
> -static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
> +static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
> {
> struct kvm *kvm = kern_hyp_va(vcpu->kvm);
>
> @@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
> vcpu->arch.target = -1;
> *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
> *exit_code |= ARM_EXCEPTION_IL;
> - return false;
> }
> -
> - return true;
> }
>
> /* Switch to the guest for legacy non-VHE systems */
> @@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
> /* Jump in the fire! */
> exit_code = __guest_enter(vcpu);
>
> - if (unlikely(!handle_aarch32_guest(vcpu, &exit_code)))
> - break;
> -
> /* And we're baaack! */
> } while (fixup_guest_exit(vcpu, &exit_code));
>
> diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
> index 5a2cb5d9bc4b..fbb26b93c347 100644
> --- a/arch/arm64/kvm/hyp/vhe/switch.c
> +++ b/arch/arm64/kvm/hyp/vhe/switch.c
> @@ -112,6 +112,10 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
> return hyp_exit_handlers;
> }
>
> +static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
> +{
> +}
> +
> /* Switch to the guest for VHE systems running in EL2 */
> static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
> {
> --
> 2.30.2
>
More information about the linux-arm-kernel
mailing list