[PATCH v2 13/30] KVM: arm64: Clean up control flow in kvm_s2_fault_map()
Anshuman Khandual
anshuman.khandual at arm.com
Tue Mar 31 00:11:04 PDT 2026
On 27/03/26 5:06 PM, Marc Zyngier wrote:
> From: Fuad Tabba <tabba at google.com>
>
> Clean up the KVM MMU lock retry loop by pre-assigning the error code.
> Add clear braces to the THP adjustment integration for readability, and
> safely unnest the transparent hugepage logic branches.
>
> Signed-off-by: Fuad Tabba <tabba at google.com>
> Signed-off-by: Marc Zyngier <maz at kernel.org>
Reviewed-by: Anshuman Khandual <anshuman.khandual at arm.com>
> ---
> arch/arm64/kvm/mmu.c | 20 +++++++++++---------
> 1 file changed, 11 insertions(+), 9 deletions(-)
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index ee2a548999b1b..c6cd6ce5254be 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1897,10 +1897,9 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
>
> kvm_fault_lock(kvm);
> pgt = fault->vcpu->arch.hw_mmu->pgt;
> - if (mmu_invalidate_retry(kvm, fault->mmu_seq)) {
> - ret = -EAGAIN;
> + ret = -EAGAIN;
> + if (mmu_invalidate_retry(kvm, fault->mmu_seq))
> goto out_unlock;
> - }
>
> /*
> * If we are not forced to use page mapping, check if we are
> @@ -1908,16 +1907,17 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
> */
> if (fault->vma_pagesize == PAGE_SIZE &&
> !(fault->force_pte || fault->s2_force_noncacheable)) {
> - if (fault->fault_is_perm && fault->fault_granule > PAGE_SIZE)
> + if (fault->fault_is_perm && fault->fault_granule > PAGE_SIZE) {
> fault->vma_pagesize = fault->fault_granule;
> - else
> + } else {
> fault->vma_pagesize = transparent_hugepage_adjust(kvm, fault->memslot,
> fault->hva, &fault->pfn,
> &fault->fault_ipa);
>
> - if (fault->vma_pagesize < 0) {
> - ret = fault->vma_pagesize;
> - goto out_unlock;
> + if (fault->vma_pagesize < 0) {
> + ret = fault->vma_pagesize;
> + goto out_unlock;
> + }
> }
> }
>
> @@ -1951,7 +1951,9 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
> if (fault->writable && !ret)
> mark_page_dirty_in_slot(kvm, fault->memslot, fault->gfn);
>
> - return ret != -EAGAIN ? ret : 0;
> + if (ret != -EAGAIN)
> + return ret;
> + return 0;
> }
>
> static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
More information about the linux-arm-kernel
mailing list