[PATCH v2 21/30] KVM: arm64: Kill topup_memcache from kvm_s2_fault
Fuad Tabba
tabba at google.com
Sun Mar 29 06:41:22 PDT 2026
Hi Marc,
On Fri, 27 Mar 2026 at 14:49, Marc Zyngier <maz at kernel.org> wrote:
>
> On Fri, 27 Mar 2026 11:36:09 +0000,
> Marc Zyngier <maz at kernel.org> wrote:
> >
> > The topup_memcache field can be easily replaced by the equivalent
> > conditions, and the resulting code is not much worse.
This is easier to reason about I think, so (not sure if it applies to
fold-ins, but fwiw):
Reviewed-by: Fuad Tabba <tabba at google.com>
Thanks,
/fuad
> >
> > Tested-by: Fuad Tabba <tabba at google.com>
> > Reviewed-by: Fuad Tabba <tabba at google.com>
> > Reviewed-by: Suzuki K Poulose <suzuki.poulose at arm.com>
> > Signed-off-by: Marc Zyngier <maz at kernel.org>
> > ---
> > arch/arm64/kvm/mmu.c | 12 ++++++------
> > 1 file changed, 6 insertions(+), 6 deletions(-)
> >
> > diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> > index e8bda71e862b2..5b05caecdbd92 100644
> > --- a/arch/arm64/kvm/mmu.c
> > +++ b/arch/arm64/kvm/mmu.c
> > @@ -1712,7 +1712,6 @@ static short kvm_s2_resolve_vma_size(const struct kvm_s2_fault_desc *s2fd,
> >
> > struct kvm_s2_fault {
> > bool writable;
> > - bool topup_memcache;
> > bool mte_allowed;
> > bool is_vma_cacheable;
> > bool s2_force_noncacheable;
> > @@ -1983,9 +1982,8 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
> > .logging_active = logging_active,
> > .force_pte = logging_active,
> > .prot = KVM_PGTABLE_PROT_R,
> > - .topup_memcache = !perm_fault || (logging_active && kvm_is_write_fault(s2fd->vcpu)),
> > };
> > - void *memcache;
> > + void *memcache = NULL;
> > int ret;
> >
> > /*
> > @@ -1994,9 +1992,11 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
> > * only exception to this is when dirty logging is enabled at runtime
> > * and a write fault needs to collapse a block entry into a table.
> > */
> > - ret = prepare_mmu_memcache(s2fd->vcpu, fault.topup_memcache, &memcache);
> > - if (ret)
> > - return ret;
> > + if (!perm_fault || (logging_active && kvm_is_write_fault(s2fd->vcpu))) {
> > + ret = prepare_mmu_memcache(s2fd->vcpu, true, &memcache);
> > + if (ret)
> > + return ret;
> > + }
> >
> > /*
> > * Let's check if we will get back a huge page backed by hugetlbfs, or
>
> Sashiko has spotted [1] an interesting corner case here, which is that the
> original code always initialises memcache to its correct value, while
> we now only do it in a limited number of cases.
>
> I'm proposing to restore the original behaviour by folding the
> following change into this patch, splitting the retrieval of the
> memcache pointer from the top-up and avoiding the ugly pointer
> indirection:
>
> diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
> index 1fe7182be45ac..03e1f389339c7 100644
> --- a/arch/arm64/kvm/mmu.c
> +++ b/arch/arm64/kvm/mmu.c
> @@ -1513,25 +1513,22 @@ static bool kvm_vma_is_cacheable(struct vm_area_struct *vma)
> }
> }
>
> -static int prepare_mmu_memcache(struct kvm_vcpu *vcpu, bool topup_memcache,
> - void **memcache)
> +static void *get_mmu_memcache(struct kvm_vcpu *vcpu)
> {
> - int min_pages;
> -
> if (!is_protected_kvm_enabled())
> - *memcache = &vcpu->arch.mmu_page_cache;
> + return &vcpu->arch.mmu_page_cache;
> else
> - *memcache = &vcpu->arch.pkvm_memcache;
> -
> - if (!topup_memcache)
> - return 0;
> + return &vcpu->arch.pkvm_memcache;
> +}
>
> - min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
> +static int topup_mmu_memcache(struct kvm_vcpu *vcpu, void *memcache)
> +{
> + int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
>
> if (!is_protected_kvm_enabled())
> - return kvm_mmu_topup_memory_cache(*memcache, min_pages);
> + return kvm_mmu_topup_memory_cache(memcache, min_pages);
>
> - return topup_hyp_memcache(*memcache, min_pages);
> + return topup_hyp_memcache(memcache, min_pages);
> }
>
> /*
> @@ -1589,7 +1586,8 @@ static int gmem_abort(const struct kvm_s2_fault_desc *s2fd)
> gfn_t gfn;
> int ret;
>
> - ret = prepare_mmu_memcache(s2fd->vcpu, true, &memcache);
> + memcache = get_mmu_memcache(s2fd->vcpu);
> + ret = topup_mmu_memcache(s2fd->vcpu, memcache);
> if (ret)
> return ret;
>
> @@ -1993,7 +1991,7 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
> bool perm_fault = kvm_vcpu_trap_is_permission_fault(s2fd->vcpu);
> struct kvm_s2_fault_vma_info s2vi = {};
> enum kvm_pgtable_prot prot;
> - void *memcache = NULL;
> + void *memcache;
> int ret;
>
> /*
> @@ -2002,9 +2000,10 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
> * only exception to this is when dirty logging is enabled at runtime
> * and a write fault needs to collapse a block entry into a table.
> */
> + memcache = get_mmu_memcache(s2fd->vcpu);
> if (!perm_fault || (memslot_is_logging(s2fd->memslot) &&
> kvm_is_write_fault(s2fd->vcpu))) {
> - ret = prepare_mmu_memcache(s2fd->vcpu, true, &memcache);
> + ret = topup_mmu_memcache(s2fd->vcpu, memcache);
> if (ret)
> return ret;
> }
>
> The bot has also pointed out a couple of cases where memcache and
> permission faults interact badly. I'll look into them separately, as
> they predate this rework.
>
> Thanks,
>
> M.
>
> [1] https://sashiko.dev/#/patchset/20260327113618.4051534-1-maz%40kernel.org?patch=12134
>
> --
> Without deviation from the norm, progress is not possible.
More information about the linux-arm-kernel
mailing list