[RFC PATCH v2 2/7] KVM: arm64: Introduce support to pin VMIDs

Shameer Kolothum shameerali.kolothum.thodi at huawei.com
Thu Feb 8 07:18:32 PST 2024


Introduce kvm_arm_pinned_vmid_get() and kvm_arm_pinned_vmid_put(), to pin
a VMID associated with a KVM instance. This will guarantee that VMID
remains the same after a rollover.

This is in preparation of introducing support in the SMMUv3 driver to use
the KVM VMID for S2 stage configuration in nested mode.

Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi at huawei.com>
---
 arch/arm64/include/asm/kvm_host.h |  3 ++
 arch/arm64/kvm/vmid.c             | 84 ++++++++++++++++++++++++++++++-
 2 files changed, 86 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 21c57b812569..20fb00d29f48 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -141,6 +141,7 @@ int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
 
 struct kvm_vmid {
 	atomic64_t id;
+	refcount_t pinned;
 };
 
 struct kvm_s2_mmu {
@@ -1097,6 +1098,8 @@ int __init kvm_arm_vmid_alloc_init(void);
 void __init kvm_arm_vmid_alloc_free(void);
 bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
 void kvm_arm_vmid_clear_active(void);
+unsigned long kvm_arm_pinned_vmid_get(struct kvm_vmid *kvm_vmid);
+void kvm_arm_pinned_vmid_put(struct kvm_vmid *kvm_vmid);
 
 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
 {
diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c
index 806223b7022a..0ffe24683071 100644
--- a/arch/arm64/kvm/vmid.c
+++ b/arch/arm64/kvm/vmid.c
@@ -25,6 +25,10 @@ static unsigned long *vmid_map;
 static DEFINE_PER_CPU(atomic64_t, active_vmids);
 static DEFINE_PER_CPU(u64, reserved_vmids);
 
+static unsigned long max_pinned_vmids;
+static unsigned long nr_pinned_vmids;
+static unsigned long *pinned_vmid_map;
+
 #define VMID_MASK		(~GENMASK(kvm_arm_vmid_bits - 1, 0))
 #define VMID_FIRST_VERSION	(1UL << kvm_arm_vmid_bits)
 
@@ -47,7 +51,10 @@ static void flush_context(void)
 	int cpu;
 	u64 vmid;
 
-	bitmap_zero(vmid_map, NUM_USER_VMIDS);
+	if (pinned_vmid_map)
+		bitmap_copy(vmid_map, pinned_vmid_map, NUM_USER_VMIDS);
+	else
+		bitmap_zero(vmid_map, NUM_USER_VMIDS);
 
 	for_each_possible_cpu(cpu) {
 		vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
@@ -103,6 +110,14 @@ static u64 new_vmid(struct kvm_vmid *kvm_vmid)
 			return newvmid;
 		}
 
+		/*
+		 * If it is pinned, we can keep using it. Note that reserved
+		 * takes priority, because even if it is also pinned, we need to
+		 * update the generation into the reserved_vmids.
+		 */
+		if (refcount_read(&kvm_vmid->pinned))
+			return newvmid;
+
 		if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) {
 			atomic64_set(&kvm_vmid->id, newvmid);
 			return newvmid;
@@ -174,6 +189,63 @@ bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
 	return updated;
 }
 
+unsigned long kvm_arm_pinned_vmid_get(struct kvm_vmid *kvm_vmid)
+{
+	unsigned long flags;
+	u64 vmid;
+
+	if (!pinned_vmid_map)
+		return 0;
+
+	raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
+
+	vmid = atomic64_read(&kvm_vmid->id);
+
+	if (refcount_inc_not_zero(&kvm_vmid->pinned))
+		goto out_unlock;
+
+	if (nr_pinned_vmids >= max_pinned_vmids) {
+		vmid = 0;
+		goto out_unlock;
+	}
+
+	/*
+	 * If we went through one or more rollover since that VMID was
+	 * used, make sure it is still valid, or generate a new one.
+	 */
+	if (!vmid_gen_match(vmid))
+		vmid = new_vmid(kvm_vmid);
+
+	nr_pinned_vmids++;
+	__set_bit(vmid2idx(vmid), pinned_vmid_map);
+	refcount_set(&kvm_vmid->pinned, 1);
+
+out_unlock:
+	raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
+
+	vmid &= ~VMID_MASK;
+
+	return vmid;
+}
+
+void kvm_arm_pinned_vmid_put(struct kvm_vmid *kvm_vmid)
+{
+	unsigned long flags;
+	u64 vmid = atomic64_read(&kvm_vmid->id);
+
+	if (!pinned_vmid_map)
+		return;
+
+	raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
+
+	if (refcount_dec_and_test(&kvm_vmid->pinned)) {
+		__clear_bit(vmid2idx(vmid), pinned_vmid_map);
+		nr_pinned_vmids--;
+	}
+
+	raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
+}
+
 /*
  * Initialize the VMID allocator
  */
@@ -191,10 +263,20 @@ int __init kvm_arm_vmid_alloc_init(void)
 	if (!vmid_map)
 		return -ENOMEM;
 
+	pinned_vmid_map = bitmap_zalloc(NUM_USER_VMIDS, GFP_KERNEL);
+	nr_pinned_vmids = 0;
+
+	/*
+	 * Ensure we have at least one emty slot available after rollover
+	 * and maximum number of VMIDs are pinned. VMID#0 is reserved.
+	 */
+	max_pinned_vmids = NUM_USER_VMIDS - num_possible_cpus() - 2;
+
 	return 0;
 }
 
 void __init kvm_arm_vmid_alloc_free(void)
 {
+	bitmap_free(pinned_vmid_map);
 	bitmap_free(vmid_map);
 }
-- 
2.34.1




More information about the linux-arm-kernel mailing list