[PATCH v4 12/16] KVM: arm64: PMU: Allow ID_DFR0_EL1.PerfMon to be set from userspace
Marc Zyngier
maz at kernel.org
Sun Nov 13 08:38:28 PST 2022
Allow userspace to write ID_DFR0_EL1, on the condition that only
the PerfMon field can be altered and be something that is compatible
with what was computed for the AArch64 view of the guest.
Signed-off-by: Marc Zyngier <maz at kernel.org>
---
arch/arm64/kvm/sys_regs.c | 57 ++++++++++++++++++++++++++++++++++++++-
1 file changed, 56 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 3cbcda665d23..dc201a0557c0 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1070,6 +1070,19 @@ static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
return vcpu->kvm->arch.dfr0_pmuver.unimp;
}
+static u8 perfmon_to_pmuver(u8 perfmon)
+{
+ switch (perfmon) {
+ case ID_DFR0_PERFMON_8_0:
+ return ID_AA64DFR0_EL1_PMUVer_IMP;
+ case ID_DFR0_PERFMON_IMP_DEF:
+ return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
+ default:
+ /* Anything ARMv8.1+ has the same value. For now. */
+ return perfmon;
+ }
+}
+
static u8 pmuver_to_perfmon(u8 pmuver)
{
switch (pmuver) {
@@ -1281,6 +1294,46 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
return 0;
}
+static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd,
+ u64 val)
+{
+ u8 perfmon, host_perfmon;
+ bool valid_pmu;
+
+ host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
+
+ /*
+ * Allow DFR0_EL1.PerfMon to be set from userspace as long as
+ * it doesn't promise more than what the HW gives us on the
+ * AArch64 side (as everything is emulated with that), and
+ * that this is a PMUv3.
+ */
+ perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_PERFMON), val);
+ if ((perfmon != ID_DFR0_PERFMON_IMP_DEF && perfmon > host_perfmon) ||
+ (perfmon != 0 && perfmon < ID_DFR0_PERFMON_8_0))
+ return -EINVAL;
+
+ valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_PERFMON_IMP_DEF);
+
+ /* Make sure view register and PMU support do match */
+ if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
+ return -EINVAL;
+
+ /* We can only differ with PerfMon, and anything else is an error */
+ val ^= read_id_reg(vcpu, rd);
+ val &= ~ARM64_FEATURE_MASK(ID_DFR0_PERFMON);
+ if (val)
+ return -EINVAL;
+
+ if (valid_pmu)
+ vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
+ else
+ vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
+
+ return 0;
+}
+
/*
* cpufeature ID register user accessors
*
@@ -1502,7 +1555,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* CRm=1 */
AA32_ID_SANITISED(ID_PFR0_EL1),
AA32_ID_SANITISED(ID_PFR1_EL1),
- AA32_ID_SANITISED(ID_DFR0_EL1),
+ { SYS_DESC(SYS_ID_DFR0_EL1), .access = access_id_reg,
+ .get_user = get_id_reg, .set_user = set_id_dfr0_el1,
+ .visibility = aa32_id_visibility, },
ID_HIDDEN(ID_AFR0_EL1),
AA32_ID_SANITISED(ID_MMFR0_EL1),
AA32_ID_SANITISED(ID_MMFR1_EL1),
--
2.34.1
More information about the linux-arm-kernel
mailing list