[PATCH v1 4/8] KVM: arm64: Refactor vcpu_set_hcrx() to reduce indentation

Fuad Tabba tabba at google.com
Tue Nov 4 04:59:02 PST 2025


Invert the main conditional check in vcpu_set_hcrx() to return
immediately if the CPU does not support FEAT_HCX
(ARM64_HAS_HCX).

This refactoring pattern avoids wrapping the entire function body in
an 'if' block, reducing indentation and improving readability as the
function continues to grow.

No functional change intended.

Signed-off-by: Fuad Tabba <tabba at google.com>
---
 arch/arm64/include/asm/kvm_emulate.h | 54 ++++++++++++++--------------
 1 file changed, 27 insertions(+), 27 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 7880e8290a20..034e1b39de6c 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -674,40 +674,40 @@ static inline void vcpu_set_hcrx(struct kvm_vcpu *vcpu)
 {
 	struct kvm *kvm = vcpu->kvm;
 
-	if (cpus_have_final_cap(ARM64_HAS_HCX)) {
-		/*
-		 * In general, all HCRX_EL2 bits are gated by a feature.
-		 * The only reason we can set SMPME without checking any
-		 * feature is that its effects are not directly observable
-		 * from the guest.
-		 */
-		vcpu->arch.hcrx_el2 = HCRX_EL2_SMPME;
+	if (!cpus_have_final_cap(ARM64_HAS_HCX))
+		return;
 
-		if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
-			vcpu->arch.hcrx_el2 |= HCRX_EL2_MSCEn;
-		else
-			vcpu->arch.hcrx_el2 |= HCRX_EL2_MCE2;
+	/*
+	 * In general, all HCRX_EL2 bits are gated by a feature.
+	 * The only reason we can set SMPME without checking any feature is that
+	 * its effects are not directly observable from the guest.
+	 */
+	vcpu->arch.hcrx_el2 = HCRX_EL2_SMPME;
 
-		if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP))
-			vcpu->arch.hcrx_el2 |= HCRX_EL2_TALLINT;
+	if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
+		vcpu->arch.hcrx_el2 |= HCRX_EL2_MSCEn;
+	else
+		vcpu->arch.hcrx_el2 |= HCRX_EL2_MCE2;
 
-		if (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V))
-			vcpu->arch.hcrx_el2 |= HCRX_EL2_EnASR;
+	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP))
+		vcpu->arch.hcrx_el2 |= HCRX_EL2_TALLINT;
 
-		if (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64))
-			vcpu->arch.hcrx_el2 |= HCRX_EL2_EnALS;
+	if (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V))
+		vcpu->arch.hcrx_el2 |= HCRX_EL2_EnASR;
 
-		if (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
-			vcpu->arch.hcrx_el2 |= HCRX_EL2_EnAS0;
+	if (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64))
+		vcpu->arch.hcrx_el2 |= HCRX_EL2_EnALS;
 
-		if (kvm_has_tcr2(kvm))
-			vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
+	if (kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
+		vcpu->arch.hcrx_el2 |= HCRX_EL2_EnAS0;
 
-		if (kvm_has_fpmr(kvm))
-			vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
+	if (kvm_has_tcr2(kvm))
+		vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En;
 
-		if (kvm_has_sctlr2(kvm))
-			vcpu->arch.hcrx_el2 |= HCRX_EL2_SCTLR2En;
-	}
+	if (kvm_has_fpmr(kvm))
+		vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
+
+	if (kvm_has_sctlr2(kvm))
+		vcpu->arch.hcrx_el2 |= HCRX_EL2_SCTLR2En;
 }
 #endif /* __ARM64_KVM_EMULATE_H__ */
-- 
2.51.2.997.g839fc31de9-goog




More information about the linux-arm-kernel mailing list