[PATCH 03/20] KVM: arm64: Introduce standalone FGU computing primitive
Fuad Tabba
tabba at google.com
Mon Jan 26 10:35:00 PST 2026
On Mon, 26 Jan 2026 at 12:17, Marc Zyngier <maz at kernel.org> wrote:
>
> Computing the FGU bits is made oddly complicated, as we use the RES0
> helper instead of using a specific abstraction.
>
> Introduce such an abstraction, which is going to make things significantly
> simpler in the future.
>
> Signed-off-by: Marc Zyngier <maz at kernel.org>
The old way was mixing "bits that don't exist" with "bits we need to
trap". Here the distinction is clear.
Reviewed-by: Fuad Tabba <tabba at google.com>
Cheers,
/fuad
> ---
> arch/arm64/kvm/config.c | 57 ++++++++++++++++++-----------------------
> 1 file changed, 25 insertions(+), 32 deletions(-)
>
> diff --git a/arch/arm64/kvm/config.c b/arch/arm64/kvm/config.c
> index 0bcdb39885734..2122599f7cbbd 100644
> --- a/arch/arm64/kvm/config.c
> +++ b/arch/arm64/kvm/config.c
> @@ -1335,26 +1335,30 @@ static u64 compute_res0_bits(struct kvm *kvm,
> static u64 compute_reg_res0_bits(struct kvm *kvm,
> const struct reg_feat_map_desc *r,
> unsigned long require, unsigned long exclude)
> -
> {
> u64 res0;
>
> res0 = compute_res0_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz,
> require, exclude);
>
> - /*
> - * If computing FGUs, don't take RES0 or register existence
> - * into account -- we're not computing bits for the register
> - * itself.
> - */
> - if (!(exclude & NEVER_FGU)) {
> - res0 |= compute_res0_bits(kvm, &r->feat_map, 1, require, exclude);
> - res0 |= ~reg_feat_map_bits(&r->feat_map);
> - }
> + res0 |= compute_res0_bits(kvm, &r->feat_map, 1, require, exclude);
> + res0 |= ~reg_feat_map_bits(&r->feat_map);
>
> return res0;
> }
>
> +static u64 compute_fgu_bits(struct kvm *kvm, const struct reg_feat_map_desc *r)
> +{
> + /*
> + * If computing FGUs, we collect the unsupported feature bits as
> + * RES0 bits, but don't take the actual RES0 bits or register
> + * existence into account -- we're not computing bits for the
> + * register itself.
> + */
> + return compute_res0_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz,
> + 0, NEVER_FGU);
> +}
> +
> static u64 compute_reg_fixed_bits(struct kvm *kvm,
> const struct reg_feat_map_desc *r,
> u64 *fixed_bits, unsigned long require,
> @@ -1370,40 +1374,29 @@ void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt)
>
> switch (fgt) {
> case HFGRTR_GROUP:
> - val |= compute_reg_res0_bits(kvm, &hfgrtr_desc,
> - 0, NEVER_FGU);
> - val |= compute_reg_res0_bits(kvm, &hfgwtr_desc,
> - 0, NEVER_FGU);
> + val |= compute_fgu_bits(kvm, &hfgrtr_desc);
> + val |= compute_fgu_bits(kvm, &hfgwtr_desc);
> break;
> case HFGITR_GROUP:
> - val |= compute_reg_res0_bits(kvm, &hfgitr_desc,
> - 0, NEVER_FGU);
> + val |= compute_fgu_bits(kvm, &hfgitr_desc);
> break;
> case HDFGRTR_GROUP:
> - val |= compute_reg_res0_bits(kvm, &hdfgrtr_desc,
> - 0, NEVER_FGU);
> - val |= compute_reg_res0_bits(kvm, &hdfgwtr_desc,
> - 0, NEVER_FGU);
> + val |= compute_fgu_bits(kvm, &hdfgrtr_desc);
> + val |= compute_fgu_bits(kvm, &hdfgwtr_desc);
> break;
> case HAFGRTR_GROUP:
> - val |= compute_reg_res0_bits(kvm, &hafgrtr_desc,
> - 0, NEVER_FGU);
> + val |= compute_fgu_bits(kvm, &hafgrtr_desc);
> break;
> case HFGRTR2_GROUP:
> - val |= compute_reg_res0_bits(kvm, &hfgrtr2_desc,
> - 0, NEVER_FGU);
> - val |= compute_reg_res0_bits(kvm, &hfgwtr2_desc,
> - 0, NEVER_FGU);
> + val |= compute_fgu_bits(kvm, &hfgrtr2_desc);
> + val |= compute_fgu_bits(kvm, &hfgwtr2_desc);
> break;
> case HFGITR2_GROUP:
> - val |= compute_reg_res0_bits(kvm, &hfgitr2_desc,
> - 0, NEVER_FGU);
> + val |= compute_fgu_bits(kvm, &hfgitr2_desc);
> break;
> case HDFGRTR2_GROUP:
> - val |= compute_reg_res0_bits(kvm, &hdfgrtr2_desc,
> - 0, NEVER_FGU);
> - val |= compute_reg_res0_bits(kvm, &hdfgwtr2_desc,
> - 0, NEVER_FGU);
> + val |= compute_fgu_bits(kvm, &hdfgrtr2_desc);
> + val |= compute_fgu_bits(kvm, &hdfgwtr2_desc);
> break;
> default:
> BUG();
> --
> 2.47.3
>
More information about the linux-arm-kernel
mailing list