[PATCH v3 5/7] KVM: x86: Participate in bitmap-based PTE aging
James Houghton
jthoughton at google.com
Mon Apr 1 16:29:44 PDT 2024
Only handle the TDP MMU case for now. In other cases, if a bitmap was
not provided, fallback to the slowpath that takes mmu_lock, or, if a
bitmap was provided, inform the caller that the bitmap is unreliable.
Suggested-by: Yu Zhao <yuzhao at google.com>
Signed-off-by: James Houghton <jthoughton at google.com>
---
arch/x86/include/asm/kvm_host.h | 14 ++++++++++++++
arch/x86/kvm/mmu/mmu.c | 16 ++++++++++++++--
arch/x86/kvm/mmu/tdp_mmu.c | 10 +++++++++-
3 files changed, 37 insertions(+), 3 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3b58e2306621..c30918d0887e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -2324,4 +2324,18 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
*/
#define KVM_EXIT_HYPERCALL_MBZ GENMASK_ULL(31, 1)
+#define kvm_arch_prepare_bitmap_age kvm_arch_prepare_bitmap_age
+static inline bool kvm_arch_prepare_bitmap_age(struct mmu_notifier *mn)
+{
+ /*
+ * Indicate that we support bitmap-based aging when using the TDP MMU
+ * and the accessed bit is available in the TDP page tables.
+ *
+ * We have no other preparatory work to do here, so we do not need to
+ * redefine kvm_arch_finish_bitmap_age().
+ */
+ return IS_ENABLED(CONFIG_X86_64) && tdp_mmu_enabled
+ && shadow_accessed_mask;
+}
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 992e651540e8..fae1a75750bb 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1674,8 +1674,14 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool young = false;
- if (kvm_memslots_have_rmaps(kvm))
+ if (kvm_memslots_have_rmaps(kvm)) {
+ if (range->lockless) {
+ kvm_age_set_unreliable(range);
+ return false;
+ }
+
young = kvm_handle_gfn_range(kvm, range, kvm_age_rmap);
+ }
if (tdp_mmu_enabled)
young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
@@ -1687,8 +1693,14 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
bool young = false;
- if (kvm_memslots_have_rmaps(kvm))
+ if (kvm_memslots_have_rmaps(kvm)) {
+ if (range->lockless) {
+ kvm_age_set_unreliable(range);
+ return false;
+ }
+
young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmap);
+ }
if (tdp_mmu_enabled)
young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index d078157e62aa..edea01bc145f 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1217,6 +1217,9 @@ static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
if (!is_accessed_spte(iter->old_spte))
return false;
+ if (!kvm_gfn_should_age(range, iter->gfn))
+ return false;
+
if (spte_ad_enabled(iter->old_spte)) {
iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
iter->old_spte,
@@ -1250,7 +1253,12 @@ bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
struct kvm_gfn_range *range)
{
- return is_accessed_spte(iter->old_spte);
+ bool young = is_accessed_spte(iter->old_spte);
+
+ if (young)
+ kvm_gfn_record_young(range, iter->gfn);
+
+ return young;
}
bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
--
2.44.0.478.gd926399ef9-goog
More information about the linux-arm-kernel
mailing list