[PATCH 10/13] RISC-V: KVM: Introduce struct kvm_gstage_mapping
Anup Patel
apatel at ventanamicro.com
Wed Jun 4 23:14:55 PDT 2025
Introduce struct kvm_gstage_mapping which represents a g-stage
mapping at a particular page table level of the g-stage. Also,
update the kvm_riscv_gstage_map() to return the g-stage mapping
upon success.
Signed-off-by: Anup Patel <apatel at ventanamicro.com>
---
arch/riscv/include/asm/kvm_mmu.h | 9 ++++-
arch/riscv/kvm/mmu.c | 58 ++++++++++++++++++--------------
arch/riscv/kvm/vcpu_exit.c | 3 +-
3 files changed, 43 insertions(+), 27 deletions(-)
diff --git a/arch/riscv/include/asm/kvm_mmu.h b/arch/riscv/include/asm/kvm_mmu.h
index 4e1654282ee4..91c11e692dc7 100644
--- a/arch/riscv/include/asm/kvm_mmu.h
+++ b/arch/riscv/include/asm/kvm_mmu.h
@@ -8,6 +8,12 @@
#include <linux/kvm_types.h>
+struct kvm_gstage_mapping {
+ gpa_t addr;
+ pte_t pte;
+ u32 level;
+};
+
int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
phys_addr_t hpa, unsigned long size,
bool writable, bool in_atomic);
@@ -15,7 +21,8 @@ void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
unsigned long size);
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot,
- gpa_t gpa, unsigned long hva, bool is_write);
+ gpa_t gpa, unsigned long hva, bool is_write,
+ struct kvm_gstage_mapping *out_map);
int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index c9d87e7472fb..934c97c21130 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -135,18 +135,18 @@ static void gstage_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
kvm_riscv_hfence_gvma_vmid_gpa(kvm, -1UL, 0, addr, BIT(order), order);
}
-static int gstage_set_pte(struct kvm *kvm, u32 level,
- struct kvm_mmu_memory_cache *pcache,
- gpa_t addr, const pte_t *new_pte)
+static int gstage_set_pte(struct kvm *kvm,
+ struct kvm_mmu_memory_cache *pcache,
+ const struct kvm_gstage_mapping *map)
{
u32 current_level = gstage_pgd_levels - 1;
pte_t *next_ptep = (pte_t *)kvm->arch.pgd;
- pte_t *ptep = &next_ptep[gstage_pte_index(addr, current_level)];
+ pte_t *ptep = &next_ptep[gstage_pte_index(map->addr, current_level)];
- if (current_level < level)
+ if (current_level < map->level)
return -EINVAL;
- while (current_level != level) {
+ while (current_level != map->level) {
if (gstage_pte_leaf(ptep))
return -EEXIST;
@@ -165,13 +165,13 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
}
current_level--;
- ptep = &next_ptep[gstage_pte_index(addr, current_level)];
+ ptep = &next_ptep[gstage_pte_index(map->addr, current_level)];
}
- if (pte_val(*ptep) != pte_val(*new_pte)) {
- set_pte(ptep, *new_pte);
+ if (pte_val(*ptep) != pte_val(map->pte)) {
+ set_pte(ptep, map->pte);
if (gstage_pte_leaf(ptep))
- gstage_remote_tlb_flush(kvm, current_level, addr);
+ gstage_remote_tlb_flush(kvm, current_level, map->addr);
}
return 0;
@@ -181,14 +181,16 @@ static int gstage_map_page(struct kvm *kvm,
struct kvm_mmu_memory_cache *pcache,
gpa_t gpa, phys_addr_t hpa,
unsigned long page_size,
- bool page_rdonly, bool page_exec)
+ bool page_rdonly, bool page_exec,
+ struct kvm_gstage_mapping *out_map)
{
- int ret;
- u32 level = 0;
- pte_t new_pte;
pgprot_t prot;
+ int ret;
- ret = gstage_page_size_to_level(page_size, &level);
+ out_map->addr = gpa;
+ out_map->level = 0;
+
+ ret = gstage_page_size_to_level(page_size, &out_map->level);
if (ret)
return ret;
@@ -216,10 +218,10 @@ static int gstage_map_page(struct kvm *kvm,
else
prot = PAGE_WRITE;
}
- new_pte = pfn_pte(PFN_DOWN(hpa), prot);
- new_pte = pte_mkdirty(new_pte);
+ out_map->pte = pfn_pte(PFN_DOWN(hpa), prot);
+ out_map->pte = pte_mkdirty(out_map->pte);
- return gstage_set_pte(kvm, level, pcache, gpa, &new_pte);
+ return gstage_set_pte(kvm, pcache, out_map);
}
enum gstage_op {
@@ -350,7 +352,6 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
phys_addr_t hpa, unsigned long size,
bool writable, bool in_atomic)
{
- pte_t pte;
int ret = 0;
unsigned long pfn;
phys_addr_t addr, end;
@@ -358,22 +359,25 @@ int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
.gfp_custom = (in_atomic) ? GFP_ATOMIC | __GFP_ACCOUNT : 0,
.gfp_zero = __GFP_ZERO,
};
+ struct kvm_gstage_mapping map;
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
pfn = __phys_to_pfn(hpa);
for (addr = gpa; addr < end; addr += PAGE_SIZE) {
- pte = pfn_pte(pfn, PAGE_KERNEL_IO);
+ map.addr = addr;
+ map.pte = pfn_pte(pfn, PAGE_KERNEL_IO);
+ map.level = 0;
if (!writable)
- pte = pte_wrprotect(pte);
+ map.pte = pte_wrprotect(map.pte);
ret = kvm_mmu_topup_memory_cache(&pcache, gstage_pgd_levels);
if (ret)
goto out;
spin_lock(&kvm->mmu_lock);
- ret = gstage_set_pte(kvm, 0, &pcache, addr, &pte);
+ ret = gstage_set_pte(kvm, &pcache, &map);
spin_unlock(&kvm->mmu_lock);
if (ret)
goto out;
@@ -591,7 +595,8 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot,
- gpa_t gpa, unsigned long hva, bool is_write)
+ gpa_t gpa, unsigned long hva, bool is_write,
+ struct kvm_gstage_mapping *out_map)
{
int ret;
kvm_pfn_t hfn;
@@ -606,6 +611,9 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
unsigned long vma_pagesize, mmu_seq;
struct page *page;
+ /* Setup initial state of output mapping */
+ memset(out_map, 0, sizeof(*out_map));
+
/* We need minimum second+third level pages */
ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
if (ret) {
@@ -675,10 +683,10 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
if (writable) {
mark_page_dirty(kvm, gfn);
ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
- vma_pagesize, false, true);
+ vma_pagesize, false, true, out_map);
} else {
ret = gstage_map_page(kvm, pcache, gpa, hfn << PAGE_SHIFT,
- vma_pagesize, true, true);
+ vma_pagesize, true, true, out_map);
}
if (ret)
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index cc82bbab0e24..4fadf2bcd070 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -14,6 +14,7 @@
static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_cpu_trap *trap)
{
+ struct kvm_gstage_mapping host_map;
struct kvm_memory_slot *memslot;
unsigned long hva, fault_addr;
bool writable;
@@ -42,7 +43,7 @@ static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
- (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
+ (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false, &host_map);
if (ret < 0)
return ret;
--
2.43.0
More information about the linux-riscv
mailing list