[PATCH v6 10/22] KVM: x86/mmu: Pass memory caches to allocate SPs separately
David Matlack
dmatlack at google.com
Mon May 16 16:21:26 PDT 2022
Refactor kvm_mmu_alloc_shadow_page() to receive the caches from which it
will allocate the various pieces of memory for shadow pages as a
parameter, rather than deriving them from the vcpu pointer. This will be
useful in a future commit where shadow pages are allocated during VM
ioctls for eager page splitting, and thus will use a different set of
caches.
Preemptively pull the caches out all the way to
kvm_mmu_get_shadow_page() since eager page splitting will not be calling
kvm_mmu_alloc_shadow_page() directly.
No functional change intended.
Signed-off-by: David Matlack <dmatlack at google.com>
---
arch/x86/kvm/mmu/mmu.c | 36 +++++++++++++++++++++++++++++-------
1 file changed, 29 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 6a3b1b00f02b..bad4dd5aa051 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2075,17 +2075,25 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm_vcpu *vcpu,
return sp;
}
+/* Caches used when allocating a new shadow page. */
+struct shadow_page_caches {
+ struct kvm_mmu_memory_cache *page_header_cache;
+ struct kvm_mmu_memory_cache *shadow_page_cache;
+ struct kvm_mmu_memory_cache *gfn_array_cache;
+};
+
static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm_vcpu *vcpu,
+ struct shadow_page_caches *caches,
gfn_t gfn,
struct hlist_head *sp_list,
union kvm_mmu_page_role role)
{
struct kvm_mmu_page *sp;
- sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
- sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
+ sp = kvm_mmu_memory_cache_alloc(caches->page_header_cache);
+ sp->spt = kvm_mmu_memory_cache_alloc(caches->shadow_page_cache);
if (!role.direct)
- sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
+ sp->gfns = kvm_mmu_memory_cache_alloc(caches->gfn_array_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
@@ -2107,9 +2115,10 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm_vcpu *vcpu,
return sp;
}
-static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
- gfn_t gfn,
- union kvm_mmu_page_role role)
+static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
+ struct shadow_page_caches *caches,
+ gfn_t gfn,
+ union kvm_mmu_page_role role)
{
struct hlist_head *sp_list;
struct kvm_mmu_page *sp;
@@ -2120,13 +2129,26 @@ static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
sp = kvm_mmu_find_shadow_page(vcpu, gfn, sp_list, role);
if (!sp) {
created = true;
- sp = kvm_mmu_alloc_shadow_page(vcpu, gfn, sp_list, role);
+ sp = kvm_mmu_alloc_shadow_page(vcpu, caches, gfn, sp_list, role);
}
trace_kvm_mmu_get_page(sp, created);
return sp;
}
+static struct kvm_mmu_page *kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu,
+ gfn_t gfn,
+ union kvm_mmu_page_role role)
+{
+ struct shadow_page_caches caches = {
+ .page_header_cache = &vcpu->arch.mmu_page_header_cache,
+ .shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache,
+ .gfn_array_cache = &vcpu->arch.mmu_gfn_array_cache,
+ };
+
+ return __kvm_mmu_get_shadow_page(vcpu, &caches, gfn, role);
+}
+
static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct, u32 access)
{
struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep);
--
2.36.0.550.gb090851708-goog
More information about the kvm-riscv
mailing list