[PATCH v6 13/25] KVM: arm64: Add consistency checking for frac fields of ID registers
Reiji Watanabe
reijiw at google.com
Thu Mar 10 20:47:59 PST 2022
Feature fractional field of an ID register cannot be simply validated
at KVM_SET_ONE_REG because its validity depends on its (main) feature
field value, which could be in a different ID register (and might be
set later).
Validate fractional fields at the first KVM_RUN instead.
Signed-off-by: Reiji Watanabe <reijiw at google.com>
---
arch/arm64/include/asm/kvm_host.h | 1 +
arch/arm64/kvm/arm.c | 3 +
arch/arm64/kvm/sys_regs.c | 112 ++++++++++++++++++++++++++++++
3 files changed, 116 insertions(+)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 9ffe6604a58a..5e53102a1ac1 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -748,6 +748,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
void set_default_id_regs(struct kvm *kvm);
int kvm_set_id_reg_feature(struct kvm *kvm, u32 id, u8 field_shift, u8 fval);
+int kvm_id_regs_check_frac_fields(const struct kvm_vcpu *vcpu);
/* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 91110d996ed6..e7dcc7704302 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -599,6 +599,9 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
if (likely(vcpu_has_run_once(vcpu)))
return 0;
+ if (!kvm_vm_is_protected(kvm) && kvm_id_regs_check_frac_fields(vcpu))
+ return -EPERM;
+
kvm_arm_vcpu_init_debug(vcpu);
if (likely(irqchip_in_kernel(kvm))) {
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index ba851de6486d..3805b69ed23e 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -3397,6 +3397,102 @@ void kvm_sys_reg_table_init(void)
id_reg_desc_init_all();
}
+/* ID register's fractional field information with its feature field. */
+struct feature_frac {
+ u32 id;
+ u32 shift;
+ u32 frac_id;
+ u32 frac_shift;
+};
+
+static struct feature_frac feature_frac_table[] = {
+ {
+ .frac_id = SYS_ID_AA64PFR1_EL1,
+ .frac_shift = ID_AA64PFR1_RASFRAC_SHIFT,
+ .id = SYS_ID_AA64PFR0_EL1,
+ .shift = ID_AA64PFR0_RAS_SHIFT,
+ },
+ {
+ .frac_id = SYS_ID_AA64PFR1_EL1,
+ .frac_shift = ID_AA64PFR1_MPAMFRAC_SHIFT,
+ .id = SYS_ID_AA64PFR0_EL1,
+ .shift = ID_AA64PFR0_MPAM_SHIFT,
+ },
+ {
+ .frac_id = SYS_ID_AA64PFR1_EL1,
+ .frac_shift = ID_AA64PFR1_CSV2FRAC_SHIFT,
+ .id = SYS_ID_AA64PFR0_EL1,
+ .shift = ID_AA64PFR0_CSV2_SHIFT,
+ },
+};
+
+/*
+ * Return non-zero if the feature/fractional fields pair are not
+ * supported. Return zero otherwise.
+ * This function validates only the fractional feature field,
+ * and relies on the fact the feature field is validated before
+ * through arm64_check_features_kvm.
+ */
+static int vcpu_id_reg_feature_frac_check(const struct kvm_vcpu *vcpu,
+ const struct feature_frac *ftr_frac)
+{
+ const struct id_reg_desc *id_reg;
+ u32 id;
+ u64 val, lim, mask;
+
+ /* Check if the feature field value is same as the limit */
+ id = ftr_frac->id;
+ id_reg = get_id_reg_desc(id);
+
+ mask = (u64)ARM64_FEATURE_FIELD_MASK << ftr_frac->shift;
+ val = __read_id_reg(vcpu, id_reg) & mask;
+ lim = id_reg ? id_reg->vcpu_limit_val : read_sanitised_ftr_reg(id);
+ lim &= mask;
+
+ if (val != lim)
+ /*
+ * The feature level is lower than the limit.
+ * Any fractional version should be fine.
+ */
+ return 0;
+
+ /* Check the fractional feature field */
+ id = ftr_frac->frac_id;
+ id_reg = get_id_reg_desc(id);
+
+ mask = (u64)ARM64_FEATURE_FIELD_MASK << ftr_frac->frac_shift;
+ val = __read_id_reg(vcpu, id_reg) & mask;
+ lim = id_reg ? id_reg->vcpu_limit_val : read_sanitised_ftr_reg(id);
+ lim &= mask;
+
+ if (val == lim)
+ /*
+ * Both the feature and fractional fields are the same
+ * as limit.
+ */
+ return 0;
+
+ return arm64_check_features_kvm(id, val, lim);
+}
+
+int kvm_id_regs_check_frac_fields(const struct kvm_vcpu *vcpu)
+{
+ int i, err;
+ const struct feature_frac *frac;
+
+ /*
+ * Check ID registers' fractional fields, which aren't checked
+ * at KVM_SET_ONE_REG.
+ */
+ for (i = 0; i < ARRAY_SIZE(feature_frac_table); i++) {
+ frac = &feature_frac_table[i];
+ err = vcpu_id_reg_feature_frac_check(vcpu, frac);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
/*
* Update the ID register's field with @fval for the guest.
* The caller is expected to hold the kvm->lock.
@@ -3600,6 +3696,8 @@ static void id_reg_desc_init_all(void)
{
int i;
struct id_reg_desc *id_reg;
+ struct feature_frac *frac;
+ u64 ftr_mask = ARM64_FEATURE_FIELD_MASK;
for (i = 0; i < ARRAY_SIZE(id_reg_desc_table); i++) {
id_reg = (struct id_reg_desc *)id_reg_desc_table[i];
@@ -3608,6 +3706,20 @@ static void id_reg_desc_init_all(void)
id_reg_desc_init(id_reg);
}
+
+ /*
+ * Update ignore_mask of ID registers based on fractional fields
+ * information. Any ID register that have fractional fields
+ * is expected to have its own id_reg_desc.
+ */
+ for (i = 0; i < ARRAY_SIZE(feature_frac_table); i++) {
+ frac = &feature_frac_table[i];
+ id_reg = get_id_reg_desc(frac->frac_id);
+ if (WARN_ON_ONCE(!id_reg))
+ continue;
+
+ id_reg->ignore_mask |= ftr_mask << frac->frac_shift;
+ }
}
/*
--
2.35.1.723.g4982287a31-goog
More information about the linux-arm-kernel
mailing list