[PATCH 2/7] riscv/kvm: use raw PTE helpers for G-stage leaf PTEs
Yunhui Cui
cuiyunhui at bytedance.com
Tue Apr 21 02:24:52 PDT 2026
Use the raw RISC-V PTE helpers when KVM G-stage code needs to inspect
or update the exact leaf entry encoding. This keeps G-stage page tables
independent from the public PTE wrappers that will gain Svnapot-aware
behavior.
No functional change intended.
Signed-off-by: Yunhui Cui <cuiyunhui at bytedance.com>
---
arch/riscv/kvm/gstage.c | 48 ++++++++++++++++++++++-------------------
arch/riscv/kvm/mmu.c | 4 ++--
2 files changed, 28 insertions(+), 24 deletions(-)
diff --git a/arch/riscv/kvm/gstage.c b/arch/riscv/kvm/gstage.c
index d9fe8be2a1516..fda235092533a 100644
--- a/arch/riscv/kvm/gstage.c
+++ b/arch/riscv/kvm/gstage.c
@@ -88,7 +88,7 @@ bool kvm_riscv_gstage_get_leaf(struct kvm_gstage *gstage, gpa_t addr,
*ptep_level = current_level;
ptep = (pte_t *)gstage->pgd;
ptep = &ptep[gstage_pte_index(gstage, addr, current_level)];
- while (ptep && pte_val(ptep_get(ptep))) {
+ while (ptep && pte_val(__ptep_get(ptep))) {
if (gstage_pte_leaf(ptep)) {
*ptep_level = current_level;
*ptepp = ptep;
@@ -98,7 +98,7 @@ bool kvm_riscv_gstage_get_leaf(struct kvm_gstage *gstage, gpa_t addr,
if (current_level) {
current_level--;
*ptep_level = current_level;
- ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
+ ptep = (pte_t *)gstage_pte_page_vaddr(__ptep_get(ptep));
ptep = &ptep[gstage_pte_index(gstage, addr, current_level)];
} else {
ptep = NULL;
@@ -138,18 +138,19 @@ int kvm_riscv_gstage_set_pte(struct kvm_gstage *gstage,
if (gstage_pte_leaf(ptep))
return -EEXIST;
- if (!pte_val(ptep_get(ptep))) {
+ if (!pte_val(__ptep_get(ptep))) {
if (!pcache)
return -ENOMEM;
next_ptep = kvm_mmu_memory_cache_alloc(pcache);
if (!next_ptep)
return -ENOMEM;
- set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
- __pgprot(_PAGE_TABLE)));
+ __set_pte(ptep,
+ pfn_pte(PFN_DOWN(__pa(next_ptep)),
+ __pgprot(_PAGE_TABLE)));
} else {
if (gstage_pte_leaf(ptep))
return -EEXIST;
- next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
+ next_ptep = (pte_t *)gstage_pte_page_vaddr(__ptep_get(ptep));
}
current_level--;
@@ -157,7 +158,7 @@ int kvm_riscv_gstage_set_pte(struct kvm_gstage *gstage,
}
if (pte_val(*ptep) != pte_val(map->pte)) {
- set_pte(ptep, map->pte);
+ __set_pte(ptep, map->pte);
if (gstage_pte_leaf(ptep))
gstage_tlb_flush(gstage, current_level, map->addr);
}
@@ -170,13 +171,13 @@ static void kvm_riscv_gstage_update_pte_prot(struct kvm_gstage *gstage, u32 leve
{
pte_t new_pte;
- if (pgprot_val(pte_pgprot(ptep_get(ptep))) == pgprot_val(prot))
+ if (pgprot_val(pte_pgprot(__ptep_get(ptep))) == pgprot_val(prot))
return;
- new_pte = pfn_pte(pte_pfn(ptep_get(ptep)), prot);
+ new_pte = pfn_pte(pte_pfn(__ptep_get(ptep)), prot);
new_pte = pte_mkdirty(new_pte);
- set_pte(ptep, new_pte);
+ __set_pte(ptep, new_pte);
gstage_tlb_flush(gstage, level, addr);
}
@@ -255,7 +256,8 @@ int kvm_riscv_gstage_map_page(struct kvm_gstage *gstage,
if (ptep_level > out_map->level) {
kvm_riscv_gstage_split_huge(gstage, pcache, gpa,
out_map->level, true);
- } else if (ALIGN_DOWN(PFN_PHYS(pte_pfn(ptep_get(ptep))), page_size) == hpa) {
+ } else if (ALIGN_DOWN(PFN_PHYS(pte_pfn(__ptep_get(ptep))),
+ page_size) == hpa) {
kvm_riscv_gstage_update_pte_prot(gstage, ptep_level, gpa, ptep, prot);
return 0;
}
@@ -301,16 +303,16 @@ int kvm_riscv_gstage_split_huge(struct kvm_gstage *gstage,
while(current_level > target_level) {
ptep = (pte_t *)&next_ptep[gstage_pte_index(gstage, addr, current_level)];
- if (!pte_val(ptep_get(ptep)))
+ if (!pte_val(__ptep_get(ptep)))
break;
if (!gstage_pte_leaf(ptep)) {
- next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
+ next_ptep = (pte_t *)gstage_pte_page_vaddr(__ptep_get(ptep));
current_level--;
continue;
}
- huge_pte = pte_val(ptep_get(ptep));
+ huge_pte = pte_val(__ptep_get(ptep));
ret = gstage_level_to_page_size(gstage, current_level - 1, &child_page_size);
if (ret)
@@ -322,11 +324,12 @@ int kvm_riscv_gstage_split_huge(struct kvm_gstage *gstage,
for (i = 0; i < PTRS_PER_PTE; i++) {
child_pte = make_child_pte(huge_pte, i, child_page_size);
- set_pte((pte_t *)&next_ptep[i], __pte(child_pte));
+ __set_pte((pte_t *)&next_ptep[i], __pte(child_pte));
}
- set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
- __pgprot(_PAGE_TABLE)));
+ __set_pte(ptep,
+ pfn_pte(PFN_DOWN(__pa(next_ptep)),
+ __pgprot(_PAGE_TABLE)));
if (flush)
gstage_tlb_flush(gstage, current_level, addr);
@@ -351,18 +354,18 @@ void kvm_riscv_gstage_op_pte(struct kvm_gstage *gstage, gpa_t addr,
WARN_ON(addr & (page_size - 1));
- if (!pte_val(ptep_get(ptep)))
+ if (!pte_val(__ptep_get(ptep)))
return;
if (ptep_level && !gstage_pte_leaf(ptep)) {
- next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
+ next_ptep = (pte_t *)gstage_pte_page_vaddr(__ptep_get(ptep));
next_ptep_level = ptep_level - 1;
ret = gstage_level_to_page_size(gstage, next_ptep_level, &next_page_size);
if (ret)
return;
if (op == GSTAGE_OP_CLEAR)
- set_pte(ptep, __pte(0));
+ __set_pte(ptep, __pte(0));
for (i = 0; i < PTRS_PER_PTE; i++)
kvm_riscv_gstage_op_pte(gstage, addr + i * next_page_size,
&next_ptep[i], next_ptep_level, op);
@@ -371,9 +374,10 @@ void kvm_riscv_gstage_op_pte(struct kvm_gstage *gstage, gpa_t addr,
} else {
old_pte = *ptep;
if (op == GSTAGE_OP_CLEAR)
- set_pte(ptep, __pte(0));
+ __set_pte(ptep, __pte(0));
else if (op == GSTAGE_OP_WP)
- set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE));
+ __set_pte(ptep,
+ __pte(pte_val(__ptep_get(ptep)) & ~_PAGE_WRITE));
if (pte_val(*ptep) != pte_val(old_pte))
gstage_tlb_flush(gstage, ptep_level, addr);
}
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 2d3def024270c..f338ef08a6d13 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -262,7 +262,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
&ptep, &ptep_level))
return false;
- return ptep_test_and_clear_young(NULL, 0, ptep);
+ return __ptep_test_and_clear_young(NULL, 0, ptep);
}
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
@@ -282,7 +282,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
&ptep, &ptep_level))
return false;
- return pte_young(ptep_get(ptep));
+ return pte_young(__ptep_get(ptep));
}
static bool fault_supports_gstage_huge_mapping(struct kvm_memory_slot *memslot,
--
2.39.5
More information about the linux-riscv
mailing list