[PATCH v2 05/12] KVM: arm64: Initialize feature id registers for protected VMs

Fuad Tabba tabba at google.com
Mon Nov 25 03:58:58 PST 2024


Hi Marc,

On Sun, 24 Nov 2024 at 12:12, Marc Zyngier <maz at kernel.org> wrote:
>
> On Fri, 22 Nov 2024 11:06:15 +0000,
> Fuad Tabba <tabba at google.com> wrote:
> >
> > The hypervisor maintains the state of protected VMs. Initialize
> > the values for feature ID registers for protected VMs, to be used
> > when setting traps and when advertising features to protected
> > VMs.
> >
> > Signed-off-by: Fuad Tabba <tabba at google.com>
> > ---
> >  .../arm64/kvm/hyp/include/nvhe/fixed_config.h |  1 +
> >  arch/arm64/kvm/hyp/nvhe/pkvm.c                |  4 ++
> >  arch/arm64/kvm/hyp/nvhe/sys_regs.c            | 54 +++++++++++++++++--
> >  3 files changed, 56 insertions(+), 3 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
> > index d1e59b88ff66..69e26d1a0ebe 100644
> > --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
> > +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
> > @@ -201,6 +201,7 @@
> >  u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
> >  bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
> >  bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
> > +void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu);
> >  int kvm_check_pvm_sysreg_table(void);
> >
> >  #endif /* __ARM64_KVM_FIXED_CONFIG_H__ */
> > diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
> > index 59ff6aac514c..4ef03294b2b4 100644
> > --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
> > +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
> > @@ -381,6 +381,7 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
> >       hyp_vm->kvm.created_vcpus = nr_vcpus;
> >       hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
> >       hyp_vm->kvm.arch.pkvm.enabled = READ_ONCE(host_kvm->arch.pkvm.enabled);
> > +     hyp_vm->kvm.arch.flags = 0;
> >       pkvm_init_features_from_host(hyp_vm, host_kvm);
> >  }
> >
> > @@ -419,6 +420,9 @@ static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
> >       hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
> >       hyp_vcpu->vcpu.arch.mp_state.mp_state = KVM_MP_STATE_STOPPED;
> >
> > +     if (pkvm_hyp_vcpu_is_protected(hyp_vcpu))
> > +             kvm_init_pvm_id_regs(&hyp_vcpu->vcpu);
> > +
> >       ret = pkvm_vcpu_init_traps(hyp_vcpu);
> >       if (ret)
> >               goto done;
> > diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
> > index 59fb2f056177..7008e9641f41 100644
> > --- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
> > +++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
> > @@ -204,8 +204,7 @@ static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu)
> >       return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW;
> >  }
> >
> > -/* Read a sanitized cpufeature ID register by its encoding */
> > -u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
> > +static u64 pvm_calc_id_reg(const struct kvm_vcpu *vcpu, u32 id)
> >  {
> >       switch (id) {
> >       case SYS_ID_AA64PFR0_EL1:
> > @@ -240,10 +239,25 @@ u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
> >       }
> >  }
> >
> > +/* Read a sanitized cpufeature ID register by its encoding */
> > +u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
> > +{
> > +     return pvm_calc_id_reg(vcpu, id);
> > +}
> > +
>
> Whatever reason you have to keep this per-CPU, it really needs to
> change to be VM-scoped. Maybe not immediately, but that should be on
> your list.

Ack.

> >  static u64 read_id_reg(const struct kvm_vcpu *vcpu,
> >                      struct sys_reg_desc const *r)
> >  {
> > -     return pvm_read_id_reg(vcpu, reg_to_encoding(r));
> > +     struct kvm *kvm = vcpu->kvm;
> > +     u32 reg = reg_to_encoding(r);
> > +
> > +     if (WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)))
> > +             return 0;
> > +
> > +     if (reg >= sys_reg(3, 0, 0, 4, 0) && reg <= sys_reg(3, 0, 0, 7, 7))
> > +             return kvm->arch.id_regs[IDREG_IDX(reg)];
>
> Why the limit to the AA64 idregs? Aren't the other shadow sysreg
> values correctly initialised to 0?

Yes they are. Will expand this to cover those too.


> > +
> > +     return 0;
> >  }
> >
> >  /* Handler to RAZ/WI sysregs */
> > @@ -448,6 +462,40 @@ static const struct sys_reg_desc pvm_sys_reg_descs[] = {
> >       /* Performance Monitoring Registers are restricted. */
> >  };
> >
> > +/*
> > + * Initializes feature registers for protected vms.
> > + */
> > +void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu)
> > +{
> > +     const u32 pvm_feat_id_regs[] = {
>
> static?

Yes, and see below.

> > +             SYS_ID_AA64PFR0_EL1,
> > +             SYS_ID_AA64PFR1_EL1,
> > +             SYS_ID_AA64ISAR0_EL1,
> > +             SYS_ID_AA64ISAR1_EL1,
> > +             SYS_ID_AA64ISAR2_EL1,
> > +             SYS_ID_AA64ZFR0_EL1,
> > +             SYS_ID_AA64MMFR0_EL1,
> > +             SYS_ID_AA64MMFR1_EL1,
> > +             SYS_ID_AA64MMFR2_EL1,
> > +             SYS_ID_AA64MMFR4_EL1,
> > +             SYS_ID_AA64DFR0_EL1,
> > +     };
> > +     struct kvm *kvm = vcpu->kvm;
> > +     unsigned long i;
> > +
> > +     if (WARN_ON_ONCE(test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags)))
> > +             return;
> > +
> > +     for (i = 0; i < ARRAY_SIZE(pvm_feat_id_regs); i++) {
>
> But do you really need this array? Can't you just iterate over the
> idreg space and compute the value? Sure, that'd be a bit longer, but
> at least you would define the idregs you care about in a single place.

Like you said, I was trying to save a couple of cycles, but I'll
change it as you suggest.

> > +             struct kvm_arch *ka = &kvm->arch;
> > +             u32 reg = pvm_feat_id_regs[i];
> > +
> > +             ka->id_regs[IDREG_IDX(reg)] = pvm_calc_id_reg(vcpu, reg);
> > +     }
> > +
> > +     set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
> > +}
> > +
> >  /*
> >   * Checks that the sysreg table is unique and in-order.
> >   *
>
> Thanks,

Thank you!


/fuad

>         M.
>
> --
> Without deviation from the norm, progress is not possible.



More information about the linux-arm-kernel mailing list