[PATCH v7 5/8] KVM: x86/mmu: Don't pass FOLL_GET to __kvm_follow_pfn
David Stevens
stevensd at chromium.org
Tue Jul 4 00:50:50 PDT 2023
From: David Stevens <stevensd at chromium.org>
Stop passing FOLL_GET to __kvm_follow_pfn. This allows the host to map
memory into the guest that is backed by un-refcounted struct pages - for
example, higher order non-compound pages allocated by the amdgpu driver
via ttm_pool_alloc_page.
The bulk of this change is tracking the is_refcounted_page flag so that
non-refcounted pages don't trigger page_count() == 0 warnings. This is
done by storing the flag in an unused bit in the sptes.
Signed-off-by: David Stevens <stevensd at chromium.org>
---
arch/x86/kvm/mmu/mmu.c | 44 +++++++++++++++++++++------------
arch/x86/kvm/mmu/mmu_internal.h | 1 +
arch/x86/kvm/mmu/paging_tmpl.h | 9 ++++---
arch/x86/kvm/mmu/spte.c | 4 ++-
arch/x86/kvm/mmu/spte.h | 12 ++++++++-
arch/x86/kvm/mmu/tdp_mmu.c | 22 ++++++++++-------
6 files changed, 62 insertions(+), 30 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index e44ab512c3a1..b1607e314497 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -553,12 +553,14 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
flush = true;
- kvm_set_pfn_accessed(spte_to_pfn(old_spte));
+ if (is_refcounted_page_pte(old_spte))
+ kvm_set_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
}
if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
flush = true;
- kvm_set_pfn_dirty(spte_to_pfn(old_spte));
+ if (is_refcounted_page_pte(old_spte))
+ kvm_set_page_dirty(pfn_to_page(spte_to_pfn(old_spte)));
}
return flush;
@@ -596,14 +598,18 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
* before they are reclaimed. Sanity check that, if the pfn is backed
* by a refcounted page, the refcount is elevated.
*/
- page = kvm_pfn_to_refcounted_page(pfn);
- WARN_ON(page && !page_count(page));
+ if (is_refcounted_page_pte(old_spte)) {
+ page = kvm_pfn_to_refcounted_page(pfn);
+ WARN_ON(!page || !page_count(page));
+ }
- if (is_accessed_spte(old_spte))
- kvm_set_pfn_accessed(pfn);
+ if (is_refcounted_page_pte(old_spte)) {
+ if (is_accessed_spte(old_spte))
+ kvm_set_page_accessed(pfn_to_page(pfn));
- if (is_dirty_spte(old_spte))
- kvm_set_pfn_dirty(pfn);
+ if (is_dirty_spte(old_spte))
+ kvm_set_page_dirty(pfn_to_page(pfn));
+ }
return old_spte;
}
@@ -639,8 +645,8 @@ static bool mmu_spte_age(u64 *sptep)
* Capture the dirty status of the page, so that it doesn't get
* lost when the SPTE is marked for access tracking.
*/
- if (is_writable_pte(spte))
- kvm_set_pfn_dirty(spte_to_pfn(spte));
+ if (is_writable_pte(spte) && is_refcounted_page_pte(spte))
+ kvm_set_page_dirty(pfn_to_page(spte_to_pfn(spte)));
spte = mark_spte_for_access_track(spte);
mmu_spte_update_no_track(sptep, spte);
@@ -1278,8 +1284,8 @@ static bool spte_wrprot_for_clear_dirty(u64 *sptep)
{
bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
(unsigned long *)sptep);
- if (was_writable && !spte_ad_enabled(*sptep))
- kvm_set_pfn_dirty(spte_to_pfn(*sptep));
+ if (was_writable && !spte_ad_enabled(*sptep) && is_refcounted_page_pte(*sptep))
+ kvm_set_page_dirty(pfn_to_page(spte_to_pfn(*sptep)));
return was_writable;
}
@@ -2937,6 +2943,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
bool host_writable = !fault || fault->map_writable;
bool prefetch = !fault || fault->prefetch;
bool write_fault = fault && fault->write;
+ bool is_refcounted = !fault || fault->is_refcounted_page;
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
*sptep, write_fault, gfn);
@@ -2969,7 +2976,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
}
wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
- true, host_writable, &spte);
+ true, host_writable, is_refcounted, &spte);
if (*sptep == spte) {
ret = RET_PF_SPURIOUS;
@@ -4299,8 +4306,9 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
struct kvm_follow_pfn foll = {
.slot = slot,
.gfn = fault->gfn,
- .flags = FOLL_GET | (fault->write ? FOLL_WRITE : 0),
+ .flags = fault->write ? FOLL_WRITE : 0,
.allow_write_mapping = true,
+ .guarded_by_mmu_notifier = true,
};
/*
@@ -4317,6 +4325,7 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
fault->slot = NULL;
fault->pfn = KVM_PFN_NOSLOT;
fault->map_writable = false;
+ fault->is_refcounted_page = false;
return RET_PF_CONTINUE;
}
/*
@@ -4366,6 +4375,7 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
success:
fault->hva = foll.hva;
fault->map_writable = foll.writable;
+ fault->is_refcounted_page = foll.is_refcounted_page;
return RET_PF_CONTINUE;
}
@@ -4451,7 +4461,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
out_unlock:
write_unlock(&vcpu->kvm->mmu_lock);
- kvm_release_pfn_clean(fault->pfn);
+ if (fault->is_refcounted_page)
+ kvm_set_page_accessed(pfn_to_page(fault->pfn));
return r;
}
@@ -4529,7 +4540,8 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
out_unlock:
read_unlock(&vcpu->kvm->mmu_lock);
- kvm_release_pfn_clean(fault->pfn);
+ if (fault->is_refcounted_page)
+ kvm_set_page_accessed(pfn_to_page(fault->pfn));
return r;
}
#endif
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index d39af5639ce9..55790085884f 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -240,6 +240,7 @@ struct kvm_page_fault {
kvm_pfn_t pfn;
hva_t hva;
bool map_writable;
+ bool is_refcounted_page;
/*
* Indicates the guest is trying to write a gfn that contains one or
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 0662e0278e70..3284e7bd9619 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -829,7 +829,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
out_unlock:
write_unlock(&vcpu->kvm->mmu_lock);
- kvm_release_pfn_clean(fault->pfn);
+ if (fault->is_refcounted_page)
+ kvm_set_page_accessed(pfn_to_page(fault->pfn));
return r;
}
@@ -883,7 +884,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
*/
static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int i)
{
- bool host_writable;
+ bool host_writable, is_refcounted;
gpa_t first_pte_gpa;
u64 *sptep, spte;
struct kvm_memory_slot *slot;
@@ -940,10 +941,12 @@ static int FNAME(sync_spte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, int
sptep = &sp->spt[i];
spte = *sptep;
host_writable = spte & shadow_host_writable_mask;
+ // TODO: is this correct?
+ is_refcounted = spte & SPTE_MMU_PAGE_REFCOUNTED;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
make_spte(vcpu, sp, slot, pte_access, gfn,
spte_to_pfn(spte), spte, true, false,
- host_writable, &spte);
+ host_writable, is_refcounted, &spte);
return mmu_spte_update(sptep, spte);
}
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index cf2c6426a6fc..46c681dc45e6 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -138,7 +138,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
u64 old_spte, bool prefetch, bool can_unsync,
- bool host_writable, u64 *new_spte)
+ bool host_writable, bool is_refcounted, u64 *new_spte)
{
int level = sp->role.level;
u64 spte = SPTE_MMU_PRESENT_MASK;
@@ -188,6 +188,8 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (level > PG_LEVEL_4K)
spte |= PT_PAGE_SIZE_MASK;
+ else if (is_refcounted)
+ spte |= SPTE_MMU_PAGE_REFCOUNTED;
if (shadow_memtype_mask)
spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn,
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 1279db2eab44..be93dd061ae3 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -95,6 +95,11 @@ static_assert(!(EPT_SPTE_MMU_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
/* Defined only to keep the above static asserts readable. */
#undef SHADOW_ACC_TRACK_SAVED_MASK
+/*
+ * Indicates that the SPTE refers to a page with a valid refcount.
+ */
+#define SPTE_MMU_PAGE_REFCOUNTED BIT_ULL(59)
+
/*
* Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
* the memslots generation and is derived as follows:
@@ -332,6 +337,11 @@ static inline bool is_dirty_spte(u64 spte)
return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
}
+static inline bool is_refcounted_page_pte(u64 spte)
+{
+ return spte & SPTE_MMU_PAGE_REFCOUNTED;
+}
+
static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte,
int level)
{
@@ -462,7 +472,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
u64 old_spte, bool prefetch, bool can_unsync,
- bool host_writable, u64 *new_spte);
+ bool host_writable, bool is_refcounted, u64 *new_spte);
u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte,
union kvm_mmu_page_role role, int index);
u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 512163d52194..a9b1b14d2e26 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -474,6 +474,7 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
bool was_leaf = was_present && is_last_spte(old_spte, level);
bool is_leaf = is_present && is_last_spte(new_spte, level);
bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
+ bool is_refcounted = is_refcounted_page_pte(old_spte);
WARN_ON(level > PT64_ROOT_MAX_LEVEL);
WARN_ON(level < PG_LEVEL_4K);
@@ -538,9 +539,9 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
if (is_leaf != was_leaf)
kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
- if (was_leaf && is_dirty_spte(old_spte) &&
+ if (was_leaf && is_dirty_spte(old_spte) && is_refcounted &&
(!is_present || !is_dirty_spte(new_spte) || pfn_changed))
- kvm_set_pfn_dirty(spte_to_pfn(old_spte));
+ kvm_set_page_dirty(pfn_to_page(spte_to_pfn(old_spte)));
/*
* Recursively handle child PTs if the change removed a subtree from
@@ -552,9 +553,9 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
(is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
- if (was_leaf && is_accessed_spte(old_spte) &&
+ if (was_leaf && is_accessed_spte(old_spte) && is_refcounted &&
(!is_present || !is_accessed_spte(new_spte) || pfn_changed))
- kvm_set_pfn_accessed(spte_to_pfn(old_spte));
+ kvm_set_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
}
/*
@@ -988,8 +989,9 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
else
wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
- fault->pfn, iter->old_spte, fault->prefetch, true,
- fault->map_writable, &new_spte);
+ fault->pfn, iter->old_spte, fault->prefetch, true,
+ fault->map_writable, fault->is_refcounted_page,
+ &new_spte);
if (new_spte == iter->old_spte)
ret = RET_PF_SPURIOUS;
@@ -1205,8 +1207,9 @@ static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
* Capture the dirty status of the page, so that it doesn't get
* lost when the SPTE is marked for access tracking.
*/
- if (is_writable_pte(iter->old_spte))
- kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte));
+ if (is_writable_pte(iter->old_spte) &&
+ is_refcounted_page_pte(iter->old_spte))
+ kvm_set_page_dirty(pfn_to_page(spte_to_pfn(iter->old_spte)));
new_spte = mark_spte_for_access_track(iter->old_spte);
iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep,
@@ -1626,7 +1629,8 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
iter.old_spte,
iter.old_spte & ~dbit);
- kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte));
+ if (is_refcounted_page_pte(iter.old_spte))
+ kvm_set_page_dirty(pfn_to_page(spte_to_pfn(iter.old_spte)));
}
rcu_read_unlock();
--
2.41.0.255.g8b1d071c50-goog
More information about the linux-arm-kernel
mailing list