[PATCH v2 4/5] KVM: arm64: Kill KVM_PGTABLE_S2_NOFWB
Marc Zyngier
maz at kernel.org
Fri Jan 23 11:16:36 PST 2026
Nobody is using this flag anymore, so remove it. This allows
some cleanup by removing stage2_has_fwb(), which is can be replaced
by a direct check on the capability.
Reviewed-by: Joey Gouly <joey.gouly at arm.com>
Reviewed-by: Fuad Tabba <tabba at google.com>
Tested-by: Fuad Tabba <tabba at google.com>
Signed-off-by: Marc Zyngier <maz at kernel.org>
---
arch/arm64/include/asm/kvm_pgtable.h | 7 ++-----
arch/arm64/kvm/hyp/pgtable.c | 21 ++++++---------------
2 files changed, 8 insertions(+), 20 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 9ce51a637da0a..2198b62428832 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -229,15 +229,12 @@ struct kvm_pgtable_mm_ops {
/**
* enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
- * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
- * ARM64_HAS_STAGE2_FWB.
* @KVM_PGTABLE_S2_IDMAP: Only use identity mappings.
* @KVM_PGTABLE_S2_AS_S1: Final memory attributes are that of Stage-1.
*/
enum kvm_pgtable_stage2_flags {
- KVM_PGTABLE_S2_NOFWB = BIT(0),
- KVM_PGTABLE_S2_IDMAP = BIT(1),
- KVM_PGTABLE_S2_AS_S1 = BIT(2),
+ KVM_PGTABLE_S2_IDMAP = BIT(0),
+ KVM_PGTABLE_S2_AS_S1 = BIT(1),
};
/**
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index c52a24c15ff28..00e33a16494bd 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -631,14 +631,6 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
return vtcr;
}
-static bool stage2_has_fwb(struct kvm_pgtable *pgt)
-{
- if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
- return false;
-
- return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
-}
-
void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
phys_addr_t addr, size_t size)
{
@@ -661,14 +653,13 @@ void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
#define KVM_S2_MEMATTR(pgt, attr) \
({ \
+ bool __fwb = cpus_have_final_cap(ARM64_HAS_STAGE2_FWB); \
kvm_pte_t __attr; \
\
if ((pgt)->flags & KVM_PGTABLE_S2_AS_S1) \
- __attr = PAGE_S2_MEMATTR(AS_S1, \
- stage2_has_fwb(pgt)); \
+ __attr = PAGE_S2_MEMATTR(AS_S1, __fwb); \
else \
- __attr = PAGE_S2_MEMATTR(attr, \
- stage2_has_fwb(pgt)); \
+ __attr = PAGE_S2_MEMATTR(attr, __fwb); \
\
__attr; \
})
@@ -880,7 +871,7 @@ static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt)
* system supporting FWB as the optimization is entirely
* pointless when the unmap walker needs to perform CMOs.
*/
- return system_supports_tlb_range() && stage2_has_fwb(pgt);
+ return system_supports_tlb_range() && cpus_have_final_cap(ARM64_HAS_STAGE2_FWB);
}
static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
@@ -1160,7 +1151,7 @@ static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
if (mm_ops->page_count(childp) != 1)
return 0;
} else if (stage2_pte_cacheable(pgt, ctx->old)) {
- need_flush = !stage2_has_fwb(pgt);
+ need_flush = !cpus_have_final_cap(ARM64_HAS_STAGE2_FWB);
}
/*
@@ -1390,7 +1381,7 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
.arg = pgt,
};
- if (stage2_has_fwb(pgt))
+ if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
return 0;
return kvm_pgtable_walk(pgt, addr, size, &walker);
--
2.47.3
More information about the linux-arm-kernel
mailing list