[PATCH 13/20] KVM: arm64: Move RESx into individual register descriptors
Marc Zyngier
maz at kernel.org
Thu Jan 29 10:07:46 PST 2026
On Thu, 29 Jan 2026 17:19:55 +0000,
Marc Zyngier <maz at kernel.org> wrote:
>
> On Thu, 29 Jan 2026 16:29:39 +0000,
> Fuad Tabba <tabba at google.com> wrote:
> >
> > Hi Marc,
> >
> > On Mon, 26 Jan 2026 at 12:17, Marc Zyngier <maz at kernel.org> wrote:
> > >
> > > Instead of hacking the RES1 bits at runtime, move them into the
> > > register descriptors. This makes it significantly nicer.
> > >
> > > Signed-off-by: Marc Zyngier <maz at kernel.org>
> > > ---
> > > arch/arm64/kvm/config.c | 36 +++++++++++++++++++++++++++++-------
> > > 1 file changed, 29 insertions(+), 7 deletions(-)
> > >
> > > diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c
> > > index 7063fffc22799..d5871758f1fcc 100644
> > > --- a/arch/arm64/kvm/config.c
> > > +++ b/arch/arm64/kvm/config.c
> > > @@ -30,6 +30,7 @@ struct reg_bits_to_feat_map {
> > > #define RES0_WHEN_E2H1 BIT(7) /* RES0 when E2H=1 and not supported */
> > > #define RES1_WHEN_E2H0 BIT(8) /* RES1 when E2H=0 and not supported */
> > > #define RES1_WHEN_E2H1 BIT(9) /* RES1 when E2H=1 and not supported */
> > > +#define FORCE_RESx BIT(10) /* Unconditional RESx */
> > >
> > > unsigned long flags;
> > >
> > > @@ -107,6 +108,11 @@ struct reg_feat_map_desc {
> > > */
> > > #define NEEDS_FEAT(m, ...) NEEDS_FEAT_FLAG(m, 0, __VA_ARGS__)
> > >
> > > +/* Declare fixed RESx bits */
> > > +#define FORCE_RES0(m) NEEDS_FEAT_FLAG(m, FORCE_RESx, enforce_resx)
> > > +#define FORCE_RES1(m) NEEDS_FEAT_FLAG(m, FORCE_RESx | AS_RES1, \
> > > + enforce_resx)
> > > +
> > > /*
> > > * Declare the dependency between a non-FGT register, a set of
> > > * feature, and the set of individual bits it contains. This generates
> >
> > nit: features
> >
> > > @@ -230,6 +236,15 @@ struct reg_feat_map_desc {
> > > #define FEAT_HCX ID_AA64MMFR1_EL1, HCX, IMP
> > > #define FEAT_S2PIE ID_AA64MMFR3_EL1, S2PIE, IMP
> > >
> > > +static bool enforce_resx(struct kvm *kvm)
> > > +{
> > > + /*
> > > + * Returning false here means that the RESx bits will be always
> > > + * addded to the fixed set bit. Yes, this is counter-intuitive.
> >
> > nit: added
> >
> > > + */
> > > + return false;
> > > +}
> >
> > I see what you're doing here, but it took me a while to get it and
> > convince myself that there aren't any bugs (my self couldn't find any
> > bugs, but I wouldn't trust him that much). You already introduce a new
> > flag, FORCE_RESx. Why not just check that directly in the
> > compute_resx_bits() loop, before the check for CALL_FUNC?
> >
> > + if (map[i].flags & FORCE_RESx)
> > + match = false;
> > + else if (map[i].flags & CALL_FUNC)
> > ...
> >
> > The way it is now, to understand FORCE_RES0, you must trace a flag, a
> > macro expansion, and a function pointer, just to set a boolean to
> > false.
>
> With that scheme, you'd write something like:
>
> +#define FORCE_RES0(m) NEEDS_FEAT_FLAG(m, FORCE_RESx)
>
> This construct would need a new __NEEDS_FEAT_0() macro that doesn't
> take any argument other than flags. Something like below (untested).
>
> M.
>
> diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c
> index 9485e1f2dc0b7..364bdd1e5be51 100644
> --- a/arch/arm64/kvm/config.c
> +++ b/arch/arm64/kvm/config.c
> @@ -79,6 +79,12 @@ struct reg_feat_map_desc {
> .match = (fun), \
> }
>
> +#define __NEEDS_FEAT_0(m, f, w, ...) \
> + { \
> + .w = (m), \
> + .flags = (f), \
> + }
> +
> #define __NEEDS_FEAT_FLAG(m, f, w, ...) \
> CONCATENATE(__NEEDS_FEAT_, COUNT_ARGS(__VA_ARGS__))(m, f, w, __VA_ARGS__)
>
> @@ -95,9 +101,8 @@ struct reg_feat_map_desc {
> #define NEEDS_FEAT(m, ...) NEEDS_FEAT_FLAG(m, 0, __VA_ARGS__)
>
> /* Declare fixed RESx bits */
> -#define FORCE_RES0(m) NEEDS_FEAT_FLAG(m, FORCE_RESx, enforce_resx)
> -#define FORCE_RES1(m) NEEDS_FEAT_FLAG(m, FORCE_RESx | AS_RES1, \
> - enforce_resx)
> +#define FORCE_RES0(m) NEEDS_FEAT_FLAG(m, FORCE_RESx)
> +#define FORCE_RES1(m) NEEDS_FEAT_FLAG(m, FORCE_RESx | AS_RES1)
>
> /*
> * Declare the dependency between a non-FGT register, a set of
> @@ -221,15 +226,6 @@ struct reg_feat_map_desc {
> #define FEAT_HCX ID_AA64MMFR1_EL1, HCX, IMP
> #define FEAT_S2PIE ID_AA64MMFR3_EL1, S2PIE, IMP
>
> -static bool enforce_resx(struct kvm *kvm)
> -{
> - /*
> - * Returning false here means that the RESx bits will be always
> - * addded to the fixed set bit. Yes, this is counter-intuitive.
> - */
> - return false;
> -}
> -
> static bool not_feat_aa64el3(struct kvm *kvm)
> {
> return !kvm_has_feat(kvm, FEAT_AA64EL3);
> @@ -996,7 +992,7 @@ static const struct reg_bits_to_feat_map hcr_feat_map[] = {
> NEEDS_FEAT(HCR_EL2_TWEDEL |
> HCR_EL2_TWEDEn,
> FEAT_TWED),
> - NEEDS_FEAT_FLAG(HCR_EL2_E2H, RES1_WHEN_E2H1, enforce_resx),
> + NEEDS_FEAT_FLAG(HCR_EL2_E2H, RES1_WHEN_E2H1 | FORCE_RESx),
Actually, this interacts badly with check_feat_map(), which tries to
find whether we have fully populated the registers, excluding the RESx
bits. But since we consider E2H to be a reserved but, we end-up with:
[ 0.141317] kvm [1]: Undefined HCR_EL2 behaviour, bits 0000000400000000
With my approach, it was possible to distinguish the architecturally
RESx bits (defined as RES0 or RES1), as they were the only ones with
the FORCE_RESx attribute.
I can work around it with
diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c
index 364bdd1e5be51..398458f4a6b7b 100644
--- a/arch/arm64/kvm/config.c
+++ b/arch/arm64/kvm/config.c
@@ -1283,7 +1283,7 @@ static void __init check_feat_map(const struct reg_bits_to_feat_map *map,
u64 mask = 0;
for (int i = 0; i < map_size; i++)
- if (!(map[i].flags & FORCE_RESx))
+ if (!(map[i].flags & FORCE_RESx) || !(map[i].bits & resx))
mask |= map[i].bits;
if (mask != ~resx)
but it becomes a bit awkward...
Thanks,
M.
--
Without deviation from the norm, progress is not possible.
More information about the linux-arm-kernel
mailing list