[PATCH v2 30/30] KVM: arm64: Convert gmem_abort() to struct kvm_s2_fault_desc
Marc Zyngier
maz at kernel.org
Fri Mar 27 04:36:18 PDT 2026
Having fully converted user_mem_abort() to kvm_s2_fault_desc and
co, convert gmem_abort() to it as well. The change is obviously
much simpler.
Tested-by: Fuad Tabba <tabba at google.com>
Reviewed-by: Fuad Tabba <tabba at google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose at arm.com>
Signed-off-by: Marc Zyngier <maz at kernel.org>
---
arch/arm64/kvm/mmu.c | 41 +++++++++++++++++++----------------------
1 file changed, 19 insertions(+), 22 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index f4c8f72642e02..1fe7182be45ac 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1575,33 +1575,31 @@ struct kvm_s2_fault_desc {
unsigned long hva;
};
-static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
- struct kvm_s2_trans *nested,
- struct kvm_memory_slot *memslot, bool is_perm)
+static int gmem_abort(const struct kvm_s2_fault_desc *s2fd)
{
bool write_fault, exec_fault;
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
- struct kvm_pgtable *pgt = vcpu->arch.hw_mmu->pgt;
+ struct kvm_pgtable *pgt = s2fd->vcpu->arch.hw_mmu->pgt;
unsigned long mmu_seq;
struct page *page;
- struct kvm *kvm = vcpu->kvm;
+ struct kvm *kvm = s2fd->vcpu->kvm;
void *memcache;
kvm_pfn_t pfn;
gfn_t gfn;
int ret;
- ret = prepare_mmu_memcache(vcpu, true, &memcache);
+ ret = prepare_mmu_memcache(s2fd->vcpu, true, &memcache);
if (ret)
return ret;
- if (nested)
- gfn = kvm_s2_trans_output(nested) >> PAGE_SHIFT;
+ if (s2fd->nested)
+ gfn = kvm_s2_trans_output(s2fd->nested) >> PAGE_SHIFT;
else
- gfn = fault_ipa >> PAGE_SHIFT;
+ gfn = s2fd->fault_ipa >> PAGE_SHIFT;
- write_fault = kvm_is_write_fault(vcpu);
- exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
+ write_fault = kvm_is_write_fault(s2fd->vcpu);
+ exec_fault = kvm_vcpu_trap_is_exec_fault(s2fd->vcpu);
VM_WARN_ON_ONCE(write_fault && exec_fault);
@@ -1609,24 +1607,24 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
/* Pairs with the smp_wmb() in kvm_mmu_invalidate_end(). */
smp_rmb();
- ret = kvm_gmem_get_pfn(kvm, memslot, gfn, &pfn, &page, NULL);
+ ret = kvm_gmem_get_pfn(kvm, s2fd->memslot, gfn, &pfn, &page, NULL);
if (ret) {
- kvm_prepare_memory_fault_exit(vcpu, fault_ipa, PAGE_SIZE,
+ kvm_prepare_memory_fault_exit(s2fd->vcpu, s2fd->fault_ipa, PAGE_SIZE,
write_fault, exec_fault, false);
return ret;
}
- if (!(memslot->flags & KVM_MEM_READONLY))
+ if (!(s2fd->memslot->flags & KVM_MEM_READONLY))
prot |= KVM_PGTABLE_PROT_W;
- if (nested)
- prot = adjust_nested_fault_perms(nested, prot);
+ if (s2fd->nested)
+ prot = adjust_nested_fault_perms(s2fd->nested, prot);
if (exec_fault || cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
prot |= KVM_PGTABLE_PROT_X;
- if (nested)
- prot = adjust_nested_exec_perms(kvm, nested, prot);
+ if (s2fd->nested)
+ prot = adjust_nested_exec_perms(kvm, s2fd->nested, prot);
kvm_fault_lock(kvm);
if (mmu_invalidate_retry(kvm, mmu_seq)) {
@@ -1634,7 +1632,7 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
goto out_unlock;
}
- ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, fault_ipa, PAGE_SIZE,
+ ret = KVM_PGT_FN(kvm_pgtable_stage2_map)(pgt, s2fd->fault_ipa, PAGE_SIZE,
__pfn_to_phys(pfn), prot,
memcache, flags);
@@ -1643,7 +1641,7 @@ static int gmem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_fault_unlock(kvm);
if ((prot & KVM_PGTABLE_PROT_W) && !ret)
- mark_page_dirty_in_slot(kvm, memslot, gfn);
+ mark_page_dirty_in_slot(kvm, s2fd->memslot, gfn);
return ret != -EAGAIN ? ret : 0;
}
@@ -2300,8 +2298,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
};
if (kvm_slot_has_gmem(memslot))
- ret = gmem_abort(vcpu, fault_ipa, nested, memslot,
- esr_fsc_is_permission_fault(esr));
+ ret = gmem_abort(&s2fd);
else
ret = user_mem_abort(&s2fd);
--
2.47.3
More information about the linux-arm-kernel
mailing list