[PATCH] arm64: mm: fix warning in arch_faults_on_old_pte()

Suzuki K Poulose suzuki.poulose at arm.com
Tue Jun 15 03:40:15 PDT 2021


On 13/06/2021 22:47, Yu Zhao wrote:
> cow_user_page() doesn't disable preemption, and it triggers the
> warning in arch_faults_on_old_pte() when CONFIG_PREEMPT_COUNT=y.
> 
> Converting the Access flag support to a system-wide feature to avoid
> reading ID_AA64MMFR1_EL1 on local CPUs when determining the h/w cap.
> 
> Note that though the Access flag support is a non-conflicting feature,
> we require all late CPUs to have it if the boot CPU does. Otherwise
> the feature won't be enabled regardless of the capabilities of late
> CPUs.
> 
> If there are h/w implementations that break this rule, they will have
> to add errata, unless they can provide justifications to switch to the
> less strict ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE.
> 
> Signed-off-by: Yu Zhao <yuzhao at google.com>
> ---
>   arch/arm64/include/asm/cpufeature.h | 20 +++++++-------------
>   arch/arm64/include/asm/pgtable.h    |  4 +---
>   arch/arm64/kernel/cpufeature.c      | 19 +++++++++++++++++++
>   arch/arm64/mm/proc.S                | 12 ------------
>   arch/arm64/tools/cpucaps            |  1 +
>   5 files changed, 28 insertions(+), 28 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
> index 338840c00e8e..c4336a374920 100644
> --- a/arch/arm64/include/asm/cpufeature.h
> +++ b/arch/arm64/include/asm/cpufeature.h
> @@ -763,6 +763,13 @@ static inline bool system_supports_tlb_range(void)
>   		cpus_have_const_cap(ARM64_HAS_TLB_RANGE);
>   }
>   
> +/* Check whether hardware update of the Access flag is supported. */
> +static inline bool system_has_hw_af(void)
> +{
> +	return IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
> +		cpus_have_const_cap(ARM64_HW_AF);
> +}
> +
>   extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
>   
>   static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
> @@ -786,19 +793,6 @@ static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
>   	}
>   }
>   
> -/* Check whether hardware update of the Access flag is supported */
> -static inline bool cpu_has_hw_af(void)
> -{
> -	u64 mmfr1;
> -
> -	if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
> -		return false;
> -
> -	mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
> -	return cpuid_feature_extract_unsigned_field(mmfr1,
> -						ID_AA64MMFR1_HADBS_SHIFT);
> -}
> -
>   static inline bool cpu_has_pan(void)
>   {
>   	u64 mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 0b10204e72fc..864a2fdeb559 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -982,9 +982,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
>    */
>   static inline bool arch_faults_on_old_pte(void)
>   {
> -	WARN_ON(preemptible());
> -
> -	return !cpu_has_hw_af();
> +	return !system_has_hw_af();
>   }
>   #define arch_faults_on_old_pte		arch_faults_on_old_pte
>   
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index efed2830d141..afdb6e0336ed 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -1566,6 +1566,14 @@ static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
>   	return true;
>   }
>   
> +static void cpu_enable_hw_af(struct arm64_cpu_capabilities const *cap)
> +{
> +	if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU)) {

You don't need this explicit check here. Since the cap is already
ARM64_CPUCAP_SYSTEM_FEATURE, it is guaranteed that all CPUs have
the capability, otherwise this wouldn't get called at all for any
CPUs.

Suzuki



More information about the linux-arm-kernel mailing list