[PATCH 05/17] KVM: arm64: Constrain fault_granule to kvm_s2_fault_map()
Marc Zyngier
maz at kernel.org
Mon Mar 16 10:54:38 PDT 2026
The notion of fault_granule is specific to kvm_s2_fault_map(), and
is unused anywhere else.
Move this variable locally, removing it from kvm_s2_fault.
Signed-off-by: Marc Zyngier <maz at kernel.org>
---
arch/arm64/kvm/mmu.c | 17 +++++++++--------
1 file changed, 9 insertions(+), 8 deletions(-)
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 1b32f2e6c3e61..12c2f0aeaae4c 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1724,7 +1724,6 @@ struct kvm_s2_fault {
bool logging_active;
bool force_pte;
long vma_pagesize;
- long fault_granule;
enum kvm_pgtable_prot prot;
struct page *page;
vm_flags_t vm_flags;
@@ -1908,9 +1907,9 @@ static phys_addr_t get_ipa(const struct kvm_s2_fault *fault)
static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
struct kvm_s2_fault *fault, void *memcache)
{
- bool fault_is_perm = kvm_s2_fault_is_perm(s2fd);
struct kvm *kvm = s2fd->vcpu->kvm;
struct kvm_pgtable *pgt;
+ long perm_fault_granule;
int ret;
enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
@@ -1920,14 +1919,17 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
if (mmu_invalidate_retry(kvm, fault->mmu_seq))
goto out_unlock;
+ perm_fault_granule = (kvm_s2_fault_is_perm(s2fd) ?
+ kvm_vcpu_trap_get_perm_fault_granule(s2fd->vcpu) : 0);
+
/*
* If we are not forced to use fault->page mapping, check if we are
* backed by a THP and thus use block mapping if possible.
*/
if (fault->vma_pagesize == PAGE_SIZE &&
!(fault->force_pte || fault->s2_force_noncacheable)) {
- if (fault_is_perm && fault->fault_granule > PAGE_SIZE) {
- fault->vma_pagesize = fault->fault_granule;
+ if (perm_fault_granule > PAGE_SIZE) {
+ fault->vma_pagesize = perm_fault_granule;
} else {
fault->vma_pagesize = transparent_hugepage_adjust(kvm, s2fd->memslot,
s2fd->hva, &fault->pfn,
@@ -1940,15 +1942,15 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
}
}
- if (!fault_is_perm && !fault->s2_force_noncacheable && kvm_has_mte(kvm))
+ if (!perm_fault_granule && !fault->s2_force_noncacheable && kvm_has_mte(kvm))
sanitise_mte_tags(kvm, fault->pfn, fault->vma_pagesize);
/*
* Under the premise of getting a FSC_PERM fault, we just need to relax
- * permissions only if fault->vma_pagesize equals fault->fault_granule. Otherwise,
+ * permissions only if vma_pagesize equals perm_fault_granule. Otherwise,
* kvm_pgtable_stage2_map() should be called to change block size.
*/
- if (fault_is_perm && fault->vma_pagesize == fault->fault_granule) {
+ if (fault->vma_pagesize == perm_fault_granule) {
/*
* Drop the SW bits in favour of those stored in the
* PTE, which will be preserved.
@@ -1984,7 +1986,6 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
.logging_active = logging_active,
.force_pte = logging_active,
.prot = KVM_PGTABLE_PROT_R,
- .fault_granule = perm_fault ? kvm_vcpu_trap_get_perm_fault_granule(s2fd->vcpu) : 0,
.write_fault = write_fault,
.exec_fault = kvm_vcpu_trap_is_exec_fault(s2fd->vcpu),
.topup_memcache = !perm_fault || (logging_active && write_fault),
--
2.47.3
More information about the linux-arm-kernel
mailing list