[PATCH 10/20] KVM: arm64: Simplify FIXED_VALUE handling
Fuad Tabba
tabba at google.com
Tue Jan 27 10:20:47 PST 2026
On Mon, 26 Jan 2026 at 12:17, Marc Zyngier <maz at kernel.org> wrote:
>
> The FIXED_VALUE qualifier (mostly used for HCR_EL2) is pointlessly
> complicated, as it tries to piggy-back on the previous RES0 handling
> while being done in a different phase, on different data.
>
> Instead, make it an integral part of the RESx computation, and allow
> it to directly set RESx bits. This is much easier to understand.
>
> It also paves the way for some additional changes to that will allow
> the full removal of the FIXED_VALUE handling.
>
> Signed-off-by: Marc Zyngier <maz at kernel.org>
The new code preserves the logic, and is easier to understand.
Reviewed-by: Fuad Tabba <tabba at google.com>
Cheers,
/fuad
> ---
> arch/arm64/kvm/config.c | 67 ++++++++++++++---------------------------
> 1 file changed, 22 insertions(+), 45 deletions(-)
>
> diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c
> index 39487182057a3..4fac04d3132c0 100644
> --- a/arch/arm64/kvm/config.c
> +++ b/arch/arm64/kvm/config.c
> @@ -37,7 +37,7 @@ struct reg_bits_to_feat_map {
> s8 lo_lim;
> };
> bool (*match)(struct kvm *);
> - bool (*fval)(struct kvm *, u64 *);
> + bool (*fval)(struct kvm *, struct resx *);
> };
> };
>
> @@ -389,14 +389,12 @@ static bool feat_vmid16(struct kvm *kvm)
> return kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16);
> }
>
> -static bool compute_hcr_e2h(struct kvm *kvm, u64 *bits)
> +static bool compute_hcr_e2h(struct kvm *kvm, struct resx *bits)
> {
> - if (bits) {
> - if (kvm_has_feat(kvm, FEAT_E2H0))
> - *bits &= ~HCR_EL2_E2H;
> - else
> - *bits |= HCR_EL2_E2H;
> - }
> + if (kvm_has_feat(kvm, FEAT_E2H0))
> + bits->res0 |= HCR_EL2_E2H;
> + else
> + bits->res1 |= HCR_EL2_E2H;
>
> return true;
> }
> @@ -1281,12 +1279,11 @@ static bool idreg_feat_match(struct kvm *kvm, const struct reg_bits_to_feat_map
> }
>
> static
> -struct resx __compute_fixed_bits(struct kvm *kvm,
> - const struct reg_bits_to_feat_map *map,
> - int map_size,
> - u64 *fixed_bits,
> - unsigned long require,
> - unsigned long exclude)
> +struct resx compute_resx_bits(struct kvm *kvm,
> + const struct reg_bits_to_feat_map *map,
> + int map_size,
> + unsigned long require,
> + unsigned long exclude)
> {
> struct resx resx = {};
>
> @@ -1299,14 +1296,18 @@ struct resx __compute_fixed_bits(struct kvm *kvm,
> if (map[i].flags & exclude)
> continue;
>
> - if (map[i].flags & CALL_FUNC)
> - match = (map[i].flags & FIXED_VALUE) ?
> - map[i].fval(kvm, fixed_bits) :
> - map[i].match(kvm);
> - else
> + switch (map[i].flags & (CALL_FUNC | FIXED_VALUE)) {
> + case CALL_FUNC | FIXED_VALUE:
> + map[i].fval(kvm, &resx);
> + continue;
> + case CALL_FUNC:
> + match = map[i].match(kvm);
> + break;
> + default:
> match = idreg_feat_match(kvm, &map[i]);
> + }
>
> - if (!match || (map[i].flags & FIXED_VALUE)) {
> + if (!match) {
> if (map[i].flags & AS_RES1)
> resx.res1 |= reg_feat_map_bits(&map[i]);
> else
> @@ -1317,17 +1318,6 @@ struct resx __compute_fixed_bits(struct kvm *kvm,
> return resx;
> }
>
> -static
> -struct resx compute_resx_bits(struct kvm *kvm,
> - const struct reg_bits_to_feat_map *map,
> - int map_size,
> - unsigned long require,
> - unsigned long exclude)
> -{
> - return __compute_fixed_bits(kvm, map, map_size, NULL,
> - require, exclude | FIXED_VALUE);
> -}
> -
> static
> struct resx compute_reg_resx_bits(struct kvm *kvm,
> const struct reg_feat_map_desc *r,
> @@ -1368,16 +1358,6 @@ static u64 compute_fgu_bits(struct kvm *kvm, const struct reg_feat_map_desc *r)
> return resx.res0 | resx.res1;
> }
>
> -static
> -struct resx compute_reg_fixed_bits(struct kvm *kvm,
> - const struct reg_feat_map_desc *r,
> - u64 *fixed_bits, unsigned long require,
> - unsigned long exclude)
> -{
> - return __compute_fixed_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz,
> - fixed_bits, require | FIXED_VALUE, exclude);
> -}
> -
> void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt)
> {
> u64 val = 0;
> @@ -1417,7 +1397,6 @@ void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt)
>
> struct resx get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg)
> {
> - u64 fixed = 0, mask;
> struct resx resx;
>
> switch (reg) {
> @@ -1459,10 +1438,8 @@ struct resx get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg)
> resx.res1 |= __HCRX_EL2_RES1;
> break;
> case HCR_EL2:
> - mask = compute_reg_fixed_bits(kvm, &hcr_desc, &fixed, 0, 0).res0;
> resx = compute_reg_resx_bits(kvm, &hcr_desc, 0, 0);
> - resx.res0 |= (mask & ~fixed);
> - resx.res1 |= HCR_EL2_RES1 | (mask & fixed);
> + resx.res1 |= HCR_EL2_RES1;
> break;
> case SCTLR2_EL1:
> case SCTLR2_EL2:
> --
> 2.47.3
>
More information about the linux-arm-kernel
mailing list