[PATCH v2 08/14] arm64/cpufeature: Detect PE support for FEAT_NMI

Marc Zyngier maz at kernel.org
Mon Dec 5 10:03:07 PST 2022


On Sat, 12 Nov 2022 15:17:02 +0000,
Mark Brown <broonie at kernel.org> wrote:
> 
> Use of FEAT_NMI requires that all the PEs in the system and the GIC have NMI
> support. This patch implements the PE part of that detection.
> 
> In order to avoid problematic interactions between real and pseudo NMIs
> we disable the architected feature if the user has enabled pseudo NMIs
> on the command line. If this is done on a system where support for the
> architected feature is detected then a warning is printed during boot in
> order to help users spot what is likely to be a misconfiguration.
> 
> Signed-off-by: Mark Brown <broonie at kernel.org>
> ---
>  arch/arm64/include/asm/cpufeature.h |  6 ++++
>  arch/arm64/kernel/cpufeature.c      | 55 ++++++++++++++++++++++++++++-
>  arch/arm64/tools/cpucaps            |  1 +
>  3 files changed, 61 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
> index f73f11b55042..85eeb331a0ef 100644
> --- a/arch/arm64/include/asm/cpufeature.h
> +++ b/arch/arm64/include/asm/cpufeature.h
> @@ -809,6 +809,12 @@ static __always_inline bool system_uses_irq_prio_masking(void)
>  	       cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
>  }
>  
> +static __always_inline bool system_uses_nmi(void)
> +{
> +	return IS_ENABLED(CONFIG_ARM64_NMI) &&
> +		cpus_have_const_cap(ARM64_HAS_NMI);
> +}
> +
>  static inline bool system_supports_mte(void)
>  {
>  	return IS_ENABLED(CONFIG_ARM64_MTE) &&
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index 6062454a9067..18ab50b76f50 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -84,6 +84,7 @@
>  #include <asm/kvm_host.h>
>  #include <asm/mmu_context.h>
>  #include <asm/mte.h>
> +#include <asm/nmi.h>
>  #include <asm/processor.h>
>  #include <asm/smp.h>
>  #include <asm/sysreg.h>
> @@ -243,6 +244,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
>  };
>  
>  static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
> +	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_NMI_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
>  		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0),
> @@ -2008,9 +2010,11 @@ static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
>  }
>  #endif /* CONFIG_ARM64_E0PD */
>  
> -#ifdef CONFIG_ARM64_PSEUDO_NMI
> +#if IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) || IS_ENABLED(CONFIG_ARM64_NMI)
>  static bool enable_pseudo_nmi;
> +#endif
>  
> +#ifdef CONFIG_ARM64_PSEUDO_NMI
>  static int __init early_enable_pseudo_nmi(char *p)
>  {
>  	return strtobool(p, &enable_pseudo_nmi);
> @@ -2024,6 +2028,41 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
>  }
>  #endif
>  
> +#ifdef CONFIG_ARM64_NMI
> +static bool has_nmi(const struct arm64_cpu_capabilities *entry, int scope)
> +{
> +	if (!has_cpuid_feature(entry, scope))
> +		return false;
> +
> +	/*
> +	 * Having both real and pseudo NMIs enabled simultaneously is
> +	 * likely to cause confusion.  Since pseudo NMIs must be
> +	 * enabled with an explicit command line option, if the user
> +	 * has set that option on a system with real NMIs for some
> +	 * reason assume they know what they're doing.
> +	 */
> +	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && enable_pseudo_nmi) {
> +		pr_info("Pseudo NMI enabled, not using architected NMI\n");
> +		return false;
> +	}
> +
> +	return true;
> +}
> +
> +static void nmi_enable(const struct arm64_cpu_capabilities *__unused)
> +{
> +	/*
> +	 * Enable use of NMIs controlled by ALLINT, SPINTMASK should
> +	 * be clear by default but make it explicit that we are using
> +	 * this mode.  Ensure that ALLINT is clear first in order to
> +	 * avoid leaving things masked.
> +	 */
> +	_allint_clear();
> +	sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPINTMASK, SCTLR_EL1_NMI);
> +	isb();
> +}
> +#endif
> +
>  #ifdef CONFIG_ARM64_BTI
>  static void bti_enable(const struct arm64_cpu_capabilities *__unused)
>  {
> @@ -2640,6 +2679,20 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>  		.matches = has_cpuid_feature,
>  		.cpu_enable = cpu_trap_el0_impdef,
>  	},
> +#ifdef CONFIG_ARM64_NMI
> +	{
> +		.desc = "Non-maskable Interrupts",
> +		.capability = ARM64_HAS_NMI,
> +		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,

PSEUDO_NMI uses ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE. What is the
rational for using a different policy here?

> +		.sys_reg = SYS_ID_AA64PFR1_EL1,
> +		.sign = FTR_UNSIGNED,
> +		.field_pos = ID_AA64PFR1_EL1_NMI_SHIFT,
> +		.field_width = 4,
> +		.min_field_value = ID_AA64PFR1_EL1_NMI_IMP,
> +		.matches = has_nmi,
> +		.cpu_enable = nmi_enable,
> +	},
> +#endif
>  	{},
>  };

The whole thing is way too restrictive: KVM definitely needs to know
that the feature exists, even if there is no use for it in the host
kernel. There is no reason why guests shouldn't be able to use this
even if the host doesn't care about it.

Which means you need two properties: one that advertises the
availability of the feature, and one that makes use of it in the
kernel.

Thanks,

	M.

-- 
Without deviation from the norm, progress is not possible.



More information about the linux-arm-kernel mailing list