[PATCH v8 6/6] KVM: arm64: Refactor writings for PMUVer/CSV2/CSV3

Jing Zhang jingzhangos at google.com
Wed May 17 15:55:44 PDT 2023


Hi Suraj,

On Wed, May 17, 2023 at 3:00 PM Jitindar Singh, Suraj
<surajjs at amazon.com> wrote:
>
> On Wed, 2023-05-03 at 17:16 +0000, Jing Zhang wrote:
> > Refactor writings for ID_AA64PFR0_EL1.[CSV2|CSV3],
> > ID_AA64DFR0_EL1.PMUVer and ID_DFR0_ELF.PerfMon based on utilities
> > introduced by ID register descriptor array.
> >
> > Signed-off-by: Jing Zhang <jingzhangos at google.com>
> > ---
> >  arch/arm64/include/asm/cpufeature.h |   1 +
> >  arch/arm64/kernel/cpufeature.c      |   2 +-
> >  arch/arm64/kvm/id_regs.c            | 361 ++++++++++++++++++--------
> > --
> >  3 files changed, 242 insertions(+), 122 deletions(-)
> >
> >
>
> [ SNIP ]
>
> >
> > +static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
> > +                                         const struct sys_reg_desc
> > *rd)
> > +{
> > +       u64 val;
> > +       u32 id = reg_to_encoding(rd);
> > +
> > +       val = read_sanitised_ftr_reg(id);
> > +       /*
> > +        * The default is to expose CSV2 == 1 if the HW isn't
> > affected.
> > +        * Although this is a per-CPU feature, we make it global
> > because
> > +        * asymmetric systems are just a nuisance.
> > +        *
> > +        * Userspace can override this as long as it doesn't promise
> > +        * the impossible.
> > +        */
> > +       if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
> > +               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
> > +               val |=
> > FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), 1);
> > +       }
> > +       if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
> > +               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
> > +               val |=
> > FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), 1);
> > +       }
> > +
> > +       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
> > +
> > +       return val;
> > +}
> > +
> >  static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
> >                                const struct sys_reg_desc *rd,
> >                                u64 val)
> >  {
> > -       struct kvm_arch *arch = &vcpu->kvm->arch;
> > -       u64 sval = val;
> >         u8 csv2, csv3;
> > -       int ret = 0;
> >
> >         /*
> >          * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long
> > as
> > @@ -226,26 +338,30 @@ static int set_id_aa64pfr0_el1(struct kvm_vcpu
> > *vcpu,
> >         if (csv3 > 1 || (csv3 && arm64_get_meltdown_state() !=
> > SPECTRE_UNAFFECTED))
> >                 return -EINVAL;
>
> Can't we remove the checking of csv[23] here as it will be checked by
> arm64_check_features()?
>
> i.e. in arm64_check_features() we will load the "limit" value from the
> "reset" function (read_sanitised_id_aa64pfr0_el1()) which has csv[23]
> set appropriately and limit it to a safe value basically performing the
> same check as we are here.
The limit and the check here might be different if like
arm64_get_meltdown_state() is not SPECTRE_UNAFFECTED.
i.e. if we remove the check here, theoretically, the csv3 can be set a
value greater 1 if arm64_get_meltdown_state() is not
SPECTRE_UNAFFECTED.
>
> >
> > -       mutex_lock(&arch->config_lock);
> > -       /* We can only differ with CSV[23], and anything else is an
> > error */
> > -       val ^= read_id_reg(vcpu, rd);
> > -       val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
> > -                ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
> > -       if (val) {
> > -               ret = -EINVAL;
> > -               goto out;
> > -       }
> > +       return set_id_reg(vcpu, rd, val);
> > +}
> >
> > -       /* Only allow userspace to change the idregs before VM
> > running */
> > -       if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &vcpu->kvm-
> > >arch.flags)) {
> > -               if (sval != read_id_reg(vcpu, rd))
> > -                       ret = -EBUSY;
> > -       } else {
> > -               IDREG(vcpu->kvm, reg_to_encoding(rd)) = sval;
> > -       }
> > -out:
> > -       mutex_unlock(&arch->config_lock);
> > -       return ret;
> > +static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
> > +                                         const struct sys_reg_desc
> > *rd)
> > +{
> > +       u64 val;
> > +       u32 id = reg_to_encoding(rd);
> > +
> > +       val = read_sanitised_ftr_reg(id);
> > +       /* Limit debug to ARMv8.0 */
> > +       val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
> > +       val |=
> > FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
> > +       /*
> > +        * Initialise the default PMUver before there is a chance to
> > +        * create an actual PMU.
> > +        */
> > +       val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
> > +       val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
> > +                         kvm_arm_pmu_get_pmuver_limit());
> > +       /* Hide SPE from guests */
> > +       val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
> > +
> > +       return val;
> >  }
> >
> >  static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
> > @@ -255,7 +371,6 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu
> > *vcpu,
> >         struct kvm_arch *arch = &vcpu->kvm->arch;
> >         u8 pmuver, host_pmuver;
> >         bool valid_pmu;
> > -       u64 sval = val;
> >         int ret = 0;
> >
> >         host_pmuver = kvm_arm_pmu_get_pmuver_limit();
> > @@ -277,40 +392,61 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu
> > *vcpu,
> >                 return -EINVAL;
> >
> >         mutex_lock(&arch->config_lock);
> > -       /* We can only differ with PMUver, and anything else is an
> > error */
> > -       val ^= read_id_reg(vcpu, rd);
> > -       val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
> > -       if (val) {
> > -               ret = -EINVAL;
> > -               goto out;
> > -       }
> > -
> >         /* Only allow userspace to change the idregs before VM
> > running */
> >         if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &vcpu->kvm-
> > >arch.flags)) {
> > -               if (sval != read_id_reg(vcpu, rd))
> > +               if (val != read_id_reg(vcpu, rd))
> >                         ret = -EBUSY;
> > -       } else {
> > -               if (valid_pmu) {
> > -                       val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
> > -                       val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> > -                       val |=
> > FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK, pmuver);
> > -                       IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) = val;
> > -
> > -                       val = IDREG(vcpu->kvm, SYS_ID_DFR0_EL1);
> > -                       val &= ~ID_DFR0_EL1_PerfMon_MASK;
> > -                       val |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK,
> > pmuver_to_perfmon(pmuver));
> > -                       IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) = val;
> > -               } else {
> > -
> >                        assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU,
> > &vcpu->kvm->arch.flags,
> > -                                  pmuver ==
> > ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
> > -               }
> > +               goto out;
> > +       }
> > +
> > +       if (!valid_pmu) {
> > +               /*
> > +                * Ignore the PMUVer filed in @val. The PMUVer would
>
> Nit s/filed/field
Will fix.
>
> > be determined
> > +                * by arch flags bit
> > KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU,
> > +                */
> > +               pmuver = FIELD_GET(ID_AA64DFR0_EL1_PMUVer_MASK,
> > +                                  IDREG(vcpu->kvm,
> > SYS_ID_AA64DFR0_EL1));
> > +               val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> > +               val |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
> > pmuver);
> >         }
> >
> > +       ret = arm64_check_features(vcpu, rd, val);
> > +       if (ret)
> > +               goto out;
> > +
> > +       IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) = val;
> > +
> > +       val = IDREG(vcpu->kvm, SYS_ID_DFR0_EL1);
> > +       val &= ~ID_DFR0_EL1_PerfMon_MASK;
> > +       val |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK,
> > pmuver_to_perfmon(pmuver));
> > +       IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) = val;
> > +
> > +       if (!valid_pmu)
> > +               assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu-
> > >kvm->arch.flags,
> > +                          pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
> > +
> >  out:
> >         mutex_unlock(&arch->config_lock);
> >         return ret;
> >  }
> >
> > +static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
> > +                                     const struct sys_reg_desc *rd)
> > +{
> > +       u64 val;
> > +       u32 id = reg_to_encoding(rd);
> > +
> > +       val = read_sanitised_ftr_reg(id);
> > +       /*
> > +        * Initialise the default PMUver before there is a chance to
> > +        * create an actual PMU.
> > +        */
> > +       val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
> > +       val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
> > kvm_arm_pmu_get_pmuver_limit());
> > +
> > +       return val;
> > +}
> > +
> >  static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
> >                            const struct sys_reg_desc *rd,
> >                            u64 val)
> > @@ -318,7 +454,6 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
> >         struct kvm_arch *arch = &vcpu->kvm->arch;
> >         u8 perfmon, host_perfmon;
> >         bool valid_pmu;
> > -       u64 sval = val;
> >         int ret = 0;
> >
> >         host_perfmon =
> > pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
> > @@ -341,35 +476,39 @@ static int set_id_dfr0_el1(struct kvm_vcpu
> > *vcpu,
> >                 return -EINVAL;
> >
> >         mutex_lock(&arch->config_lock);
> > -       /* We can only differ with PerfMon, and anything else is an
> > error */
> > -       val ^= read_id_reg(vcpu, rd);
> > -       val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
> > -       if (val) {
> > -               ret = -EINVAL;
> > -               goto out;
> > -       }
> > -
> >         /* Only allow userspace to change the idregs before VM
> > running */
> >         if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &vcpu->kvm-
> > >arch.flags)) {
> > -               if (sval != read_id_reg(vcpu, rd))
> > +               if (val != read_id_reg(vcpu, rd))
> >                         ret = -EBUSY;
> > -       } else {
> > -               if (valid_pmu) {
> > -                       val = IDREG(vcpu->kvm, SYS_ID_DFR0_EL1);
> > -                       val &= ~ID_DFR0_EL1_PerfMon_MASK;
> > -                       val |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK,
> > perfmon);
> > -                       IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) = val;
> > -
> > -                       val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
> > -                       val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> > -                       val |=
> > FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK, perfmon_to_pmuver(perfmon));
> > -                       IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) = val;
> > -               } else {
> > -
> >                        assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU,
> > &vcpu->kvm->arch.flags,
> > -                                  perfmon ==
> > ID_DFR0_EL1_PerfMon_IMPDEF);
> > -               }
> > +               goto out;
> > +       }
> > +
> > +       if (!valid_pmu) {
> > +               /*
> > +                * Ignore the PerfMon filed in @val. The PerfMon
>
> Nit s/filed/field
Thanks, will fix it.
>
> > would be determined
> > +                * by arch flags bit
> > KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU,
> > +                */
> > +               perfmon = FIELD_GET(ID_DFR0_EL1_PerfMon_MASK,
> > +                                   IDREG(vcpu->kvm,
> > SYS_ID_DFR0_EL1));
> > +               val &= ~ID_DFR0_EL1_PerfMon_MASK;
> > +               val |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, perfmon);
> >         }
> >
> > +       ret = arm64_check_features(vcpu, rd, val);
> > +       if (ret)
> > +               goto out;
> > +
> > +       IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) = val;
> > +
> > +       val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
> > +       val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> > +       val |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
> > perfmon_to_pmuver(perfmon));
> > +       IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) = val;
> > +
> > +       if (!valid_pmu)
> > +               assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu-
> > >kvm->arch.flags,
> > +                          perfmon == ID_DFR0_EL1_PerfMon_IMPDEF);
> > +
> >  out:
> >         mutex_unlock(&arch->config_lock);
> >         return ret;
>
> Otherwise looks good!
>
> Thanks,
> Suraj

Thanks,
Jing



More information about the linux-arm-kernel mailing list