[PATCH v3 2/4] KVM: arm64: Keep a list of probed PMUs
Reiji Watanabe
reijiw at google.com
Mon Dec 13 23:23:34 PST 2021
Hi Alex,
On Mon, Dec 13, 2021 at 7:23 AM Alexandru Elisei
<alexandru.elisei at arm.com> wrote:
>
> The ARM PMU driver calls kvm_host_pmu_init() after probing to tell KVM that
> a hardware PMU is available for guest emulation. Heterogeneous systems can
> have more than one PMU present, and the callback gets called multiple
> times, once for each of them. Keep track of all the PMUs available to KVM,
> as they're going to be needed later.
>
> Signed-off-by: Alexandru Elisei <alexandru.elisei at arm.com>
> ---
> arch/arm64/kvm/pmu-emul.c | 25 +++++++++++++++++++++++--
> include/kvm/arm_pmu.h | 5 +++++
> 2 files changed, 28 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
> index a5e4bbf5e68f..eb4be96f144d 100644
> --- a/arch/arm64/kvm/pmu-emul.c
> +++ b/arch/arm64/kvm/pmu-emul.c
> @@ -7,6 +7,7 @@
> #include <linux/cpu.h>
> #include <linux/kvm.h>
> #include <linux/kvm_host.h>
> +#include <linux/list.h>
> #include <linux/perf_event.h>
> #include <linux/perf/arm_pmu.h>
> #include <linux/uaccess.h>
> @@ -14,6 +15,9 @@
> #include <kvm/arm_pmu.h>
> #include <kvm/arm_vgic.h>
>
> +static LIST_HEAD(arm_pmus);
> +static DEFINE_MUTEX(arm_pmus_lock);
> +
> static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
> static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
> static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
> @@ -742,9 +746,26 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
>
> void kvm_host_pmu_init(struct arm_pmu *pmu)
> {
> - if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
> - !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
> + struct arm_pmu_entry *entry;
> +
> + if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF ||
> + is_protected_kvm_enabled())
> + return;
> +
> + mutex_lock(&arm_pmus_lock);
> +
> + entry = kmalloc(sizeof(*entry), GFP_KERNEL);
> + if (!entry)
> + goto out_unlock;
It might be better to get the lock after the kmalloc above is done ?
(the kmalloc might sleep, which will make the code hold the lock longer)
I don't think the current code will cause any problem though.
Reviewed-by: Reiji Watanabe <reijiw at google.com>
Thanks,
Reiji
> +
> + if (list_empty(&arm_pmus))
> static_branch_enable(&kvm_arm_pmu_available);
> +
> + entry->arm_pmu = pmu;
> + list_add_tail(&entry->entry, &arm_pmus);
> +
> +out_unlock:
> + mutex_unlock(&arm_pmus_lock);
> }
>
> static int kvm_pmu_probe_pmuver(void)
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index 90f21898aad8..e249c5f172aa 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -36,6 +36,11 @@ struct kvm_pmu {
> struct irq_work overflow_work;
> };
>
> +struct arm_pmu_entry {
> + struct list_head entry;
> + struct arm_pmu *arm_pmu;
> +};
> +
> #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
> u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
> void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
> --
> 2.34.1
>
> _______________________________________________
> kvmarm mailing list
> kvmarm at lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
More information about the linux-arm-kernel
mailing list