[PATCH v3 14/15] KVM: arm64: Convert the SVE guest vcpu flag to a vm flag
Marc Zyngier
maz at kernel.org
Sun Dec 1 04:58:47 PST 2024
On Thu, 28 Nov 2024 12:35:14 +0000,
Fuad Tabba <tabba at google.com> wrote:
>
> The vcpu flag GUEST_HAS_SVE is per-vcpu, but it is based on what
> is now a per-vm feature. Make the flag per-vm.
>
> Signed-off-by: Fuad Tabba <tabba at google.com>
> ---
> arch/arm64/include/asm/kvm_emulate.h | 6 +++---
> arch/arm64/include/asm/kvm_host.h | 10 ++++++----
> arch/arm64/kvm/hyp/include/hyp/switch.h | 2 +-
> arch/arm64/kvm/hyp/nvhe/hyp-main.c | 2 +-
> arch/arm64/kvm/hyp/nvhe/pkvm.c | 11 +++++++----
> arch/arm64/kvm/hyp/nvhe/switch.c | 8 ++++----
> arch/arm64/kvm/hyp/vhe/switch.c | 2 +-
> arch/arm64/kvm/reset.c | 2 +-
> 8 files changed, 24 insertions(+), 19 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
> index 406e99a452bf..ae6d0dc0e4ff 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -620,7 +620,7 @@ static __always_inline void kvm_write_cptr_el2(u64 val)
> }
>
> /* Resets the value of cptr_el2 when returning to the host. */
> -static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
> +static __always_inline void kvm_reset_cptr_el2(struct kvm *kvm)
I'd rather you avoid the resulting churn:
static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
[...]
which does the trick at zero cost. It is also more logical to keep the
vcpu as a parameter, as the per-VM-ness is only an implementation
detail.
> {
> u64 val;
>
> @@ -631,14 +631,14 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
> } else if (has_hvhe()) {
> val = CPACR_ELx_FPEN;
>
> - if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
> + if (!kvm_has_sve(kvm) || !guest_owns_fp_regs())
> val |= CPACR_ELx_ZEN;
> if (cpus_have_final_cap(ARM64_SME))
> val |= CPACR_ELx_SMEN;
> } else {
> val = CPTR_NVHE_EL2_RES1;
>
> - if (vcpu_has_sve(vcpu) && guest_owns_fp_regs())
> + if (kvm_has_sve(kvm) && guest_owns_fp_regs())
> val |= CPTR_EL2_TZ;
> if (!cpus_have_final_cap(ARM64_SME))
> val |= CPTR_EL2_TSM;
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 680ecef1d7aa..c5c80c789ad0 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -331,6 +331,8 @@ struct kvm_arch {
> #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
> /* Fine-Grained UNDEF initialised */
> #define KVM_ARCH_FLAG_FGU_INITIALIZED 8
> + /* SVE exposed to guest */
> +#define KVM_ARCH_FLAG_GUEST_HAS_SVE 9
> unsigned long flags;
>
> /* VM-wide vCPU feature set */
> @@ -862,8 +864,6 @@ struct kvm_vcpu_arch {
> #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
> #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
>
> -/* SVE exposed to guest */
> -#define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
> /* SVE config completed */
> #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
> /* KVM_ARM_VCPU_INIT completed */
> @@ -956,8 +956,10 @@ struct kvm_vcpu_arch {
> KVM_GUESTDBG_USE_HW | \
> KVM_GUESTDBG_SINGLESTEP)
>
> -#define vcpu_has_sve(vcpu) (system_supports_sve() && \
> - vcpu_get_flag(vcpu, GUEST_HAS_SVE))
> +#define kvm_has_sve(kvm) \
> + test_bit(KVM_ARCH_FLAG_GUEST_HAS_SVE, &(kvm)->arch.flags)
> +#define vcpu_has_sve(vcpu) \
> + kvm_has_sve((vcpu)->kvm)
Two things:
- it is definitely worth keeping the system_supports_sve() helper, so
that we avoid checking flags for nothing
- Since you preserve the vcpu_has_sve(), how about writing it as:
#ifdef __KVM_NVHE_HYPERVISOR__
#define vcpu_has_sve(v) kvm_has_sve(kern_hyp_va((v)->kvm))
#else
#define vcpu_has_sve(v) kvm_has_sve((v)->kvm)
#endif
which will avoid the churn in this patch, and makes it hard to get
it wrong?
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
More information about the linux-arm-kernel
mailing list