[PATCH] KVM: arm64: fix MPIDR to vcpu index cache to support vcpu change at runtime
l00313349
liting8 at huawei.com
Tue Nov 28 05:54:02 PST 2023
From: Ting Li <liting8 at huawei.com>
Consider vcpu change at runtime, for example online_vcpus is 8,
but only start 2 vcpu threads, MPIDR to vcpu index 0 may be overwrited
and vm hangs. When start other vcpu threads, cache will be outdated
and always get mismatch.
To solve this problem, codes update as followed:
1.First time only vcpu 0 can init MPIDR data, and use flags to avoid
index overwrite.
2.When MPIDR cache mismatch, fallback to old iterative method, and reinit
MPIDR to vcpu index cache data.
Signed-off-by: Ting Li <liting8 at huawei.com>
---
arch/arm64/kvm/arm.c | 49 ++++++++++++++++++++++++++++++++++++--------
1 file changed, 41 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index e5f75f1f1085..a3c038ad2f41 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -584,14 +584,16 @@ static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
static void kvm_init_mpidr_data(struct kvm *kvm)
{
struct kvm_mpidr_data *data = NULL;
+ bool *index_inited_flags = NULL;
+ struct kvm_mpidr_data *data_to_del = NULL;
unsigned long c, mask, nr_entries;
u64 aff_set = 0, aff_clr = ~0UL;
struct kvm_vcpu *vcpu;
- mutex_lock(&kvm->arch.config_lock);
+ if (atomic_read(&kvm->online_vcpus) == 1)
+ return;
- if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
- goto out;
+ mutex_lock(&kvm->arch.config_lock);
kvm_for_each_vcpu(c, vcpu, kvm) {
u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
@@ -618,16 +620,37 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
if (!data)
goto out;
+ /*
+ * Use index inited flags to avoid index overwrite.
+ */
+ index_inited_flags = kcalloc(nr_entries, sizeof(bool), GFP_KERNEL);
+ if (!index_inited_flags) {
+ kfree(data);
+ goto out;
+ }
+
data->mpidr_mask = mask;
kvm_for_each_vcpu(c, vcpu, kvm) {
u64 aff = kvm_vcpu_get_mpidr_aff(vcpu);
u16 index = kvm_mpidr_index(data, aff);
+ /*
+ * When index has inited, ignore set conflict index.
+ */
+ if (index_inited_flags[index])
+ continue;
data->cmpidr_to_idx[index] = c;
+ index_inited_flags[index] = true;
}
-
+ /*
+ * When mpidr_data exist, change to new data then free old data.
+ */
+ if (kvm->arch.mpidr_data)
+ data_to_del = kvm->arch.mpidr_data;
kvm->arch.mpidr_data = data;
+ kfree(data_to_del);
+ kfree(index_inited_flags);
out:
mutex_unlock(&kvm->arch.config_lock);
}
@@ -655,7 +678,8 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (likely(vcpu_has_run_once(vcpu)))
return 0;
- kvm_init_mpidr_data(kvm);
+ if (vcpu->vcpu_id == 0)
+ kvm_init_mpidr_data(kvm);
kvm_arm_vcpu_init_debug(vcpu);
@@ -2459,15 +2483,24 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
vcpu = kvm_get_vcpu(kvm,
kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
+ /*
+ * When mpidr mismatch, fallback to old iterative method.
+ */
if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
vcpu = NULL;
-
- return vcpu;
+ else
+ return vcpu;
}
kvm_for_each_vcpu(i, vcpu, kvm) {
- if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
+ if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) {
+ /*
+ * Reinit MPIDR to vcpu index cache.
+ */
+ if (kvm->arch.mpidr_data)
+ kvm_init_mpidr_data(kvm);
return vcpu;
+ }
}
return NULL;
}
--
2.33.0
More information about the linux-arm-kernel
mailing list