[PATCH v1 05/13] KVM: arm64: Extract stage-2 permission logic in user_mem_abort()
Fuad Tabba
tabba at google.com
Fri Mar 6 06:02:24 PST 2026
Extract the logic that computes the stage-2 protections and checks for
various permission faults (e.g., execution faults on non-cacheable
memory) into a new helper function, kvm_s2_fault_compute_prot(). This
helper also handles injecting atomic/exclusive faults back into the
guest when necessary.
This refactoring step separates the permission computation from the
mapping logic, making the main fault handler flow clearer.
Signed-off-by: Fuad Tabba <tabba at google.com>
---
arch/arm64/kvm/mmu.c | 163 +++++++++++++++++++++++--------------------
1 file changed, 87 insertions(+), 76 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 344a477e1bff..b328299cc0f5 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1809,6 +1809,89 @@ static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
return 1;
}
+static int kvm_s2_fault_compute_prot(struct kvm_s2_fault *fault)
+{
+ struct kvm *kvm = fault->vcpu->kvm;
+
+ /*
+ * Check if this is non-struct page memory PFN, and cannot support
+ * CMOs. It could potentially be unsafe to access as cacheable.
+ */
+ if (fault->vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(fault->pfn)) {
+ if (fault->is_vma_cacheable) {
+ /*
+ * Whilst the VMA owner expects cacheable mapping to this
+ * PFN, hardware also has to support the FWB and CACHE DIC
+ * features.
+ *
+ * ARM64 KVM relies on kernel VA mapping to the PFN to
+ * perform cache maintenance as the CMO instructions work on
+ * virtual addresses. VM_PFNMAP region are not necessarily
+ * mapped to a KVA and hence the presence of hardware features
+ * S2FWB and CACHE DIC are mandatory to avoid the need for
+ * cache maintenance.
+ */
+ if (!kvm_supports_cacheable_pfnmap())
+ return -EFAULT;
+ } else {
+ /*
+ * If the page was identified as device early by looking at
+ * the VMA flags, vma_pagesize is already representing the
+ * largest quantity we can map. If instead it was mapped
+ * via __kvm_faultin_pfn(), vma_pagesize is set to PAGE_SIZE
+ * and must not be upgraded.
+ *
+ * In both cases, we don't let transparent_hugepage_adjust()
+ * change things at the last minute.
+ */
+ fault->s2_force_noncacheable = true;
+ }
+ } else if (fault->logging_active && !fault->write_fault) {
+ /*
+ * Only actually map the page as writable if this was a write
+ * fault.
+ */
+ fault->writable = false;
+ }
+
+ if (fault->exec_fault && fault->s2_force_noncacheable)
+ return -ENOEXEC;
+
+ /*
+ * Guest performs atomic/exclusive operations on memory with unsupported
+ * attributes (e.g. ld64b/st64b on normal memory when no FEAT_LS64WB)
+ * and trigger the exception here. Since the memslot is valid, inject
+ * the fault back to the guest.
+ */
+ if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(fault->vcpu))) {
+ kvm_inject_dabt_excl_atomic(fault->vcpu, kvm_vcpu_get_hfar(fault->vcpu));
+ return 1;
+ }
+
+ if (fault->nested)
+ adjust_nested_fault_perms(fault->nested, &fault->prot, &fault->writable);
+
+ if (fault->writable)
+ fault->prot |= KVM_PGTABLE_PROT_W;
+
+ if (fault->exec_fault)
+ fault->prot |= KVM_PGTABLE_PROT_X;
+
+ if (fault->s2_force_noncacheable) {
+ if (fault->vfio_allow_any_uc)
+ fault->prot |= KVM_PGTABLE_PROT_NORMAL_NC;
+ else
+ fault->prot |= KVM_PGTABLE_PROT_DEVICE;
+ } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) {
+ fault->prot |= KVM_PGTABLE_PROT_X;
+ }
+
+ if (fault->nested)
+ adjust_nested_exec_perms(kvm, fault->nested, &fault->prot);
+
+ return 0;
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
struct kvm_memory_slot *memslot, unsigned long hva,
@@ -1863,68 +1946,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
ret = 0;
- /*
- * Check if this is non-struct fault->page memory PFN, and cannot support
- * CMOs. It could potentially be unsafe to access as cacheable.
- */
- if (fault->vm_flags & (VM_PFNMAP | VM_MIXEDMAP) && !pfn_is_map_memory(fault->pfn)) {
- if (fault->is_vma_cacheable) {
- /*
- * Whilst the VMA owner expects cacheable mapping to this
- * PFN, hardware also has to support the FWB and CACHE DIC
- * features.
- *
- * ARM64 KVM relies on kernel VA mapping to the PFN to
- * perform cache maintenance as the CMO instructions work on
- * virtual addresses. VM_PFNMAP region are not necessarily
- * mapped to a KVA and hence the presence of hardware features
- * S2FWB and CACHE DIC are mandatory to avoid the need for
- * cache maintenance.
- */
- if (!kvm_supports_cacheable_pfnmap())
- ret = -EFAULT;
- } else {
- /*
- * If the fault->page was identified as device early by looking at
- * the VMA flags, fault->vma_pagesize is already representing the
- * largest quantity we can map. If instead it was mapped
- * via __kvm_faultin_pfn(), fault->vma_pagesize is set to PAGE_SIZE
- * and must not be upgraded.
- *
- * In both cases, we don't let transparent_hugepage_adjust()
- * change things at the last minute.
- */
- fault->s2_force_noncacheable = true;
- }
- } else if (fault->logging_active && !fault->write_fault) {
- /*
- * Only actually map the fault->page as fault->writable if this was a write
- * fault.
- */
- fault->writable = false;
+ ret = kvm_s2_fault_compute_prot(fault);
+ if (ret == 1) {
+ ret = 1; /* fault injected */
+ goto out_put_page;
}
-
- if (fault->exec_fault && fault->s2_force_noncacheable)
- ret = -ENOEXEC;
-
if (ret)
goto out_put_page;
- /*
- * Guest performs atomic/exclusive operations on memory with unsupported
- * attributes (e.g. ld64b/st64b on normal memory when no FEAT_LS64WB)
- * and trigger the exception here. Since the fault->memslot is valid, inject
- * the fault back to the guest.
- */
- if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(fault->vcpu))) {
- kvm_inject_dabt_excl_atomic(fault->vcpu, kvm_vcpu_get_hfar(fault->vcpu));
- ret = 1;
- goto out_put_page;
- }
-
- if (fault->nested)
- adjust_nested_fault_perms(fault->nested, &fault->prot, &fault->writable);
-
kvm_fault_lock(kvm);
pgt = fault->vcpu->arch.hw_mmu->pgt;
if (mmu_invalidate_retry(kvm, fault->mmu_seq)) {
@@ -1961,24 +1990,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
}
- if (fault->writable)
- fault->prot |= KVM_PGTABLE_PROT_W;
-
- if (fault->exec_fault)
- fault->prot |= KVM_PGTABLE_PROT_X;
-
- if (fault->s2_force_noncacheable) {
- if (fault->vfio_allow_any_uc)
- fault->prot |= KVM_PGTABLE_PROT_NORMAL_NC;
- else
- fault->prot |= KVM_PGTABLE_PROT_DEVICE;
- } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) {
- fault->prot |= KVM_PGTABLE_PROT_X;
- }
-
- if (fault->nested)
- adjust_nested_exec_perms(kvm, fault->nested, &fault->prot);
-
/*
* Under the premise of getting a FSC_PERM fault, we just need to relax
* permissions only if fault->vma_pagesize equals fault->fault_granule. Otherwise,
--
2.53.0.473.g4a7958ca14-goog
More information about the linux-arm-kernel
mailing list