[PATCH v1 10/13] KVM: arm64: Initialize struct kvm_s2_fault completely at declaration
Fuad Tabba
tabba at google.com
Fri Mar 6 06:02:29 PST 2026
Simplify the initialization of struct kvm_s2_fault in user_mem_abort().
Instead of partially initializing the struct via designated initializers
and then sequentially assigning the remaining fields (like write_fault
and topup_memcache) further down the function, evaluate those
dependencies upfront.
This allows the entire struct to be fully initialized at declaration. It
also eliminates the need for the intermediate fault_data variable and
its associated fault pointer, reducing boilerplate.
Signed-off-by: Fuad Tabba <tabba at google.com>
---
arch/arm64/kvm/mmu.c | 34 ++++++++++++++++------------------
1 file changed, 16 insertions(+), 18 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 35bcacba5800..2d6e749c1756 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1962,8 +1962,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
bool fault_is_perm)
{
- int ret = 0;
- struct kvm_s2_fault fault_data = {
+ bool write_fault = kvm_is_write_fault(vcpu);
+ bool logging_active = memslot_is_logging(memslot);
+ struct kvm_s2_fault fault = {
.vcpu = vcpu,
.fault_ipa = fault_ipa,
.nested = nested,
@@ -1971,19 +1972,18 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
.hva = hva,
.fault_is_perm = fault_is_perm,
.ipa = fault_ipa,
- .logging_active = memslot_is_logging(memslot),
- .force_pte = memslot_is_logging(memslot),
- .s2_force_noncacheable = false,
+ .logging_active = logging_active,
+ .force_pte = logging_active,
.prot = KVM_PGTABLE_PROT_R,
+ .fault_granule = fault_is_perm ? kvm_vcpu_trap_get_perm_fault_granule(vcpu) : 0,
+ .write_fault = write_fault,
+ .exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu),
+ .topup_memcache = !fault_is_perm || (logging_active && write_fault),
};
- struct kvm_s2_fault *fault = &fault_data;
void *memcache;
+ int ret;
- if (fault->fault_is_perm)
- fault->fault_granule = kvm_vcpu_trap_get_perm_fault_granule(fault->vcpu);
- fault->write_fault = kvm_is_write_fault(fault->vcpu);
- fault->exec_fault = kvm_vcpu_trap_is_exec_fault(fault->vcpu);
- VM_WARN_ON_ONCE(fault->write_fault && fault->exec_fault);
+ VM_WARN_ON_ONCE(fault.write_fault && fault.exec_fault);
/*
* Permission faults just need to update the existing leaf entry,
@@ -1991,9 +1991,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* only exception to this is when dirty logging is enabled at runtime
* and a write fault needs to collapse a block entry into a table.
*/
- fault->topup_memcache = !fault->fault_is_perm ||
- (fault->logging_active && fault->write_fault);
- ret = prepare_mmu_memcache(fault->vcpu, fault->topup_memcache, &memcache);
+ ret = prepare_mmu_memcache(vcpu, fault.topup_memcache, &memcache);
if (ret)
return ret;
@@ -2001,17 +1999,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* Let's check if we will get back a huge fault->page backed by hugetlbfs, or
* get block mapping for device MMIO region.
*/
- ret = kvm_s2_fault_pin_pfn(fault);
+ ret = kvm_s2_fault_pin_pfn(&fault);
if (ret != 1)
return ret;
- ret = kvm_s2_fault_compute_prot(fault);
+ ret = kvm_s2_fault_compute_prot(&fault);
if (ret) {
- kvm_release_page_unused(fault->page);
+ kvm_release_page_unused(fault.page);
return ret;
}
- return kvm_s2_fault_map(fault, memcache);
+ return kvm_s2_fault_map(&fault, memcache);
}
/* Resolve the access fault by making the page young again. */
--
2.53.0.473.g4a7958ca14-goog
More information about the linux-arm-kernel
mailing list