[PATCH v2 02/25] KVM: arm64: Add feature checking helpers
Marc Zyngier
maz at kernel.org
Tue Jan 30 12:45:09 PST 2024
In order to make it easier to check whether a particular feature
is exposed to a guest, add a new set of helpers, with kvm_has_feat()
being the most useful.
Let's start making use of them in the PMU code (courtesy of Oliver).
Follow-up work will intricude additional use patterns.
Co-developed--by: Oliver Upton <oliver.upton at linux.dev>
Signed-off-by: Oliver Upton <oliver.upton at linux.dev>
Signed-off-by: Marc Zyngier <maz at kernel.org>
---
arch/arm64/include/asm/kvm_host.h | 53 +++++++++++++++++++++++++++++++
arch/arm64/kvm/pmu-emul.c | 11 ++++---
arch/arm64/kvm/sys_regs.c | 6 ++--
include/kvm/arm_pmu.h | 11 -------
4 files changed, 61 insertions(+), 20 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 21c57b812569..c0cf9c5f5e8d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -1233,4 +1233,57 @@ static inline void kvm_hyp_reserve(void) { }
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
+#define __expand_field_sign_unsigned(id, fld, val) \
+ ((u64)(id##_##fld##_##val))
+
+#define __expand_field_sign_signed(id, fld, val) \
+ ({ \
+ s64 __val = id##_##fld##_##val; \
+ __val <<= 64 - id##_##fld##_WIDTH; \
+ __val >>= 64 - id##_##fld##_SHIFT - id##_##fld##_WIDTH; \
+ \
+ __val; \
+ })
+
+#define expand_field_sign(id, fld, val) \
+ (id##_##fld##_SIGNED ? \
+ __expand_field_sign_signed(id, fld, val) : \
+ __expand_field_sign_unsigned(id, fld, val))
+
+#define get_idreg_field_unsigned(kvm, id, fld) \
+ ({ \
+ u64 __val = IDREG(kvm, SYS_##id); \
+ __val &= id##_##fld##_MASK; \
+ __val >>= id##_##fld##_SHIFT; \
+ \
+ __val; \
+ })
+
+#define get_idreg_field_signed(kvm, id, fld) \
+ ({ \
+ s64 __val = IDREG(kvm, SYS_##id); \
+ __val <<= 64 - id##_##fld##_SHIFT - id##_##fld##_WIDTH; \
+ __val >>= id##_##fld##_SHIFT; \
+ \
+ __val; \
+ })
+
+#define get_idreg_field_enum(kvm, id, fld) \
+ get_idreg_field_unsigned(kvm, id, fld)
+
+#define get_idreg_field(kvm, id, fld) \
+ (id##_##fld##_SIGNED ? \
+ get_idreg_field_signed(kvm, id, fld) : \
+ get_idreg_field_unsigned(kvm, id, fld))
+
+#define kvm_has_feat(kvm, id, fld, limit) \
+ (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit))
+
+#define kvm_has_feat_enum(kvm, id, fld, limit) \
+ (get_idreg_field_unsigned((kvm), id, fld) == id##_##fld##_##limit)
+
+#define kvm_has_feat_range(kvm, id, fld, min, max) \
+ (get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
+ get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 3d9467ff73bc..925522470b2b 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -64,12 +64,11 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
{
u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
kvm_pmu_event_mask(kvm);
- u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
- if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0))
+ if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL2, IMP))
mask |= ARMV8_PMU_INCLUDE_EL2;
- if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr0))
+ if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP))
mask |= ARMV8_PMU_EXCLUDE_NS_EL0 |
ARMV8_PMU_EXCLUDE_NS_EL1 |
ARMV8_PMU_EXCLUDE_EL3;
@@ -83,8 +82,10 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
*/
static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
{
+ struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
+
return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
- kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc)));
+ kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5));
}
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
@@ -556,7 +557,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
return;
/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
- if (!kvm_pmu_is_3p5(vcpu))
+ if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
val &= ~ARMV8_PMU_PMCR_LP;
/* The reset bits don't indicate any state, and shouldn't be saved. */
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 041b11825578..3c31f8cb9eef 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -505,10 +505,9 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- u64 val = IDREG(vcpu->kvm, SYS_ID_AA64MMFR1_EL1);
u32 sr = reg_to_encoding(r);
- if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
+ if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) {
kvm_inject_undefined(vcpu);
return false;
}
@@ -2748,8 +2747,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
return ignore_write(vcpu, p);
} else {
u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
- u64 pfr = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1);
- u32 el3 = !!SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr);
+ u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
(SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 4b9d8fb393a8..eb4c369a79eb 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -90,16 +90,6 @@ void kvm_vcpu_pmu_resync_el0(void);
vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
} while (0)
-/*
- * Evaluates as true when emulating PMUv3p5, and false otherwise.
- */
-#define kvm_pmu_is_3p5(vcpu) ({ \
- u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); \
- u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); \
- \
- pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5; \
-})
-
u8 kvm_arm_pmu_get_pmuver_limit(void);
u64 kvm_pmu_evtyper_mask(struct kvm *kvm);
int kvm_arm_set_default_pmu(struct kvm *kvm);
@@ -168,7 +158,6 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
}
#define kvm_vcpu_has_pmu(vcpu) ({ false; })
-#define kvm_pmu_is_3p5(vcpu) ({ false; })
static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
--
2.39.2
More information about the linux-arm-kernel
mailing list