[PATCH 13/20] KVM: arm64: Move RESx into individual register descriptors
Fuad Tabba
tabba at google.com
Thu Jan 29 08:29:39 PST 2026
Hi Marc,
On Mon, 26 Jan 2026 at 12:17, Marc Zyngier <maz at kernel.org> wrote:
>
> Instead of hacking the RES1 bits at runtime, move them into the
> register descriptors. This makes it significantly nicer.
>
> Signed-off-by: Marc Zyngier <maz at kernel.org>
> ---
> arch/arm64/kvm/config.c | 36 +++++++++++++++++++++++++++++-------
> 1 file changed, 29 insertions(+), 7 deletions(-)
>
> diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c
> index 7063fffc22799..d5871758f1fcc 100644
> --- a/arch/arm64/kvm/config.c
> +++ b/arch/arm64/kvm/config.c
> @@ -30,6 +30,7 @@ struct reg_bits_to_feat_map {
> #define RES0_WHEN_E2H1 BIT(7) /* RES0 when E2H=1 and not supported */
> #define RES1_WHEN_E2H0 BIT(8) /* RES1 when E2H=0 and not supported */
> #define RES1_WHEN_E2H1 BIT(9) /* RES1 when E2H=1 and not supported */
> +#define FORCE_RESx BIT(10) /* Unconditional RESx */
>
> unsigned long flags;
>
> @@ -107,6 +108,11 @@ struct reg_feat_map_desc {
> */
> #define NEEDS_FEAT(m, ...) NEEDS_FEAT_FLAG(m, 0, __VA_ARGS__)
>
> +/* Declare fixed RESx bits */
> +#define FORCE_RES0(m) NEEDS_FEAT_FLAG(m, FORCE_RESx, enforce_resx)
> +#define FORCE_RES1(m) NEEDS_FEAT_FLAG(m, FORCE_RESx | AS_RES1, \
> + enforce_resx)
> +
> /*
> * Declare the dependency between a non-FGT register, a set of
> * feature, and the set of individual bits it contains. This generates
nit: features
> @@ -230,6 +236,15 @@ struct reg_feat_map_desc {
> #define FEAT_HCX ID_AA64MMFR1_EL1, HCX, IMP
> #define FEAT_S2PIE ID_AA64MMFR3_EL1, S2PIE, IMP
>
> +static bool enforce_resx(struct kvm *kvm)
> +{
> + /*
> + * Returning false here means that the RESx bits will be always
> + * addded to the fixed set bit. Yes, this is counter-intuitive.
nit: added
> + */
> + return false;
> +}
I see what you're doing here, but it took me a while to get it and
convince myself that there aren't any bugs (my self couldn't find any
bugs, but I wouldn't trust him that much). You already introduce a new
flag, FORCE_RESx. Why not just check that directly in the
compute_resx_bits() loop, before the check for CALL_FUNC?
+ if (map[i].flags & FORCE_RESx)
+ match = false;
+ else if (map[i].flags & CALL_FUNC)
...
The way it is now, to understand FORCE_RES0, you must trace a flag, a
macro expansion, and a function pointer, just to set a boolean to
false.
Cheers,
/fuad
> +
> static bool not_feat_aa64el3(struct kvm *kvm)
> {
> return !kvm_has_feat(kvm, FEAT_AA64EL3);
> @@ -1009,6 +1024,8 @@ static const struct reg_bits_to_feat_map hcr_feat_map[] = {
> HCR_EL2_TWEDEn,
> FEAT_TWED),
> NEEDS_FEAT_FIXED(HCR_EL2_E2H, compute_hcr_e2h),
> + FORCE_RES0(HCR_EL2_RES0),
> + FORCE_RES1(HCR_EL2_RES1),
> };
>
> static const DECLARE_FEAT_MAP(hcr_desc, HCR_EL2,
> @@ -1029,6 +1046,8 @@ static const struct reg_bits_to_feat_map sctlr2_feat_map[] = {
> SCTLR2_EL1_CPTM |
> SCTLR2_EL1_CPTM0,
> FEAT_CPA2),
> + FORCE_RES0(SCTLR2_EL1_RES0),
> + FORCE_RES1(SCTLR2_EL1_RES1),
> };
>
> static const DECLARE_FEAT_MAP(sctlr2_desc, SCTLR2_EL1,
> @@ -1054,6 +1073,8 @@ static const struct reg_bits_to_feat_map tcr2_el2_feat_map[] = {
> TCR2_EL2_E0POE,
> FEAT_S1POE),
> NEEDS_FEAT(TCR2_EL2_PIE, FEAT_S1PIE),
> + FORCE_RES0(TCR2_EL2_RES0),
> + FORCE_RES1(TCR2_EL2_RES1),
> };
>
> static const DECLARE_FEAT_MAP(tcr2_el2_desc, TCR2_EL2,
> @@ -1131,6 +1152,8 @@ static const struct reg_bits_to_feat_map sctlr_el1_feat_map[] = {
> SCTLR_EL1_A |
> SCTLR_EL1_M,
> FEAT_AA64EL1),
> + FORCE_RES0(SCTLR_EL1_RES0),
> + FORCE_RES1(SCTLR_EL1_RES1),
> };
>
> static const DECLARE_FEAT_MAP(sctlr_el1_desc, SCTLR_EL1,
> @@ -1165,6 +1188,8 @@ static const struct reg_bits_to_feat_map mdcr_el2_feat_map[] = {
> MDCR_EL2_TDE |
> MDCR_EL2_TDRA,
> FEAT_AA64EL1),
> + FORCE_RES0(MDCR_EL2_RES0),
> + FORCE_RES1(MDCR_EL2_RES1),
> };
>
> static const DECLARE_FEAT_MAP(mdcr_el2_desc, MDCR_EL2,
> @@ -1203,6 +1228,8 @@ static const struct reg_bits_to_feat_map vtcr_el2_feat_map[] = {
> VTCR_EL2_SL0 |
> VTCR_EL2_T0SZ,
> FEAT_AA64EL1),
> + FORCE_RES0(VTCR_EL2_RES0),
> + FORCE_RES1(VTCR_EL2_RES1),
> };
>
> static const DECLARE_FEAT_MAP(vtcr_el2_desc, VTCR_EL2,
> @@ -1214,7 +1241,8 @@ static void __init check_feat_map(const struct reg_bits_to_feat_map *map,
> u64 mask = 0;
>
> for (int i = 0; i < map_size; i++)
> - mask |= map[i].bits;
> + if (!(map[i].flags & FORCE_RESx))
> + mask |= map[i].bits;
>
> if (mask != ~resx)
> kvm_err("Undefined %s behaviour, bits %016llx\n",
> @@ -1447,28 +1475,22 @@ struct resx get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg)
> break;
> case HCR_EL2:
> resx = compute_reg_resx_bits(kvm, &hcr_desc, 0, 0);
> - resx.res1 |= HCR_EL2_RES1;
> break;
> case SCTLR2_EL1:
> case SCTLR2_EL2:
> resx = compute_reg_resx_bits(kvm, &sctlr2_desc, 0, 0);
> - resx.res1 |= SCTLR2_EL1_RES1;
> break;
> case TCR2_EL2:
> resx = compute_reg_resx_bits(kvm, &tcr2_el2_desc, 0, 0);
> - resx.res1 |= TCR2_EL2_RES1;
> break;
> case SCTLR_EL1:
> resx = compute_reg_resx_bits(kvm, &sctlr_el1_desc, 0, 0);
> - resx.res1 |= SCTLR_EL1_RES1;
> break;
> case MDCR_EL2:
> resx = compute_reg_resx_bits(kvm, &mdcr_el2_desc, 0, 0);
> - resx.res1 |= MDCR_EL2_RES1;
> break;
> case VTCR_EL2:
> resx = compute_reg_resx_bits(kvm, &vtcr_el2_desc, 0, 0);
> - resx.res1 |= VTCR_EL2_RES1;
> break;
> default:
> WARN_ON_ONCE(1);
> --
> 2.47.3
>
More information about the linux-arm-kernel
mailing list