[PATCH] arm64/mm: remove now-superfluous ISBs from TTBR writes

Anshuman Khandual anshuman.khandual at arm.com
Wed Jun 14 03:05:03 PDT 2023



On 6/13/23 19:49, Jamie Iles wrote:
> At the time of authoring 7655abb95386 ("arm64: mm: Move ASID from TTBR0
> to TTBR1"), the Arm ARM did not specify any ordering guarantees for
> direct writes to TTBR0_ELx and TTBR1_ELx and so an ISB was required
> after each write to ensure TLBs would only be populated from the
> expected (or reserved tables).
> 
> In a recent update to the Arm ARM, the requirements have been relaxed to
> reflect the implementation of current CPUs and required implementation
> of future CPUs to read (RDYDPX in D8.2.3 Translation table base address
> register):

But what about the existing CPUs that might still require an ISB after
each individual write into TTBR0/1_EL1 ? Would they be impacted if the
ISB get dropped ?

> 
>   Direct writes to TTBR0_ELx and TTBR1_ELx occur in program order
>   relative to one another, without the need for explicit
>   synchronization. For any one translation, all indirect reads of
>   TTBR0_ELx and TTBR1_ELx that are made as part of the translation
>   observe only one point in that order of direct writes.
> 
> Remove the superfluous ISBs to optimize uaccess helpers and context
> switch.
> 
> Cc: Catalin Marinas <catalin.marinas at arm.com>
> Cc: Will Deacon <will at kernel.org>
> Cc: Mark Rutland <mark.rutland at arm.com>
> Signed-off-by: Jamie Iles <quic_jiles at quicinc.com>
> ---
>  arch/arm64/include/asm/asm-uaccess.h | 2 --
>  arch/arm64/include/asm/mmu_context.h | 9 +++++++--
>  arch/arm64/include/asm/uaccess.h     | 2 --
>  arch/arm64/mm/context.c              | 1 -
>  4 files changed, 7 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
> index 75b211c98dea..5b6efe8abeeb 100644
> --- a/arch/arm64/include/asm/asm-uaccess.h
> +++ b/arch/arm64/include/asm/asm-uaccess.h
> @@ -18,7 +18,6 @@
>  	bic	\tmp1, \tmp1, #TTBR_ASID_MASK
>  	sub	\tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET	// reserved_pg_dir
>  	msr	ttbr0_el1, \tmp1			// set reserved TTBR0_EL1
> -	isb
>  	add	\tmp1, \tmp1, #RESERVED_SWAPPER_OFFSET
>  	msr	ttbr1_el1, \tmp1		// set reserved ASID
>  	isb
> @@ -31,7 +30,6 @@
>  	extr    \tmp2, \tmp2, \tmp1, #48
>  	ror     \tmp2, \tmp2, #16
>  	msr	ttbr1_el1, \tmp2		// set the active ASID
> -	isb
>  	msr	ttbr0_el1, \tmp1		// set the non-PAN TTBR0_EL1
>  	isb
>  	.endm
> diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
> index 56911691bef0..a80285defe81 100644
> --- a/arch/arm64/include/asm/mmu_context.h
> +++ b/arch/arm64/include/asm/mmu_context.h
> @@ -39,11 +39,16 @@ static inline void contextidr_thread_switch(struct task_struct *next)
>  /*
>   * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
>   */
> -static inline void cpu_set_reserved_ttbr0(void)
> +static inline void __cpu_set_reserved_ttbr0(void)
>  {
>  	unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
>  
>  	write_sysreg(ttbr, ttbr0_el1);
> +}
> +
> +static inline void cpu_set_reserved_ttbr0(void)
> +{
> +	__cpu_set_reserved_ttbr0();
>  	isb();
>  }
>  
> @@ -52,7 +57,7 @@ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
>  static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
>  {
>  	BUG_ON(pgd == swapper_pg_dir);
> -	cpu_set_reserved_ttbr0();
> +	__cpu_set_reserved_ttbr0();
>  	cpu_do_switch_mm(virt_to_phys(pgd),mm);
>  }
>  
> diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
> index 05f4fc265428..14be5000c5a0 100644
> --- a/arch/arm64/include/asm/uaccess.h
> +++ b/arch/arm64/include/asm/uaccess.h
> @@ -65,7 +65,6 @@ static inline void __uaccess_ttbr0_disable(void)
>  	ttbr &= ~TTBR_ASID_MASK;
>  	/* reserved_pg_dir placed before swapper_pg_dir */
>  	write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
> -	isb();
>  	/* Set reserved ASID */
>  	write_sysreg(ttbr, ttbr1_el1);
>  	isb();
> @@ -89,7 +88,6 @@ static inline void __uaccess_ttbr0_enable(void)
>  	ttbr1 &= ~TTBR_ASID_MASK;		/* safety measure */
>  	ttbr1 |= ttbr0 & TTBR_ASID_MASK;
>  	write_sysreg(ttbr1, ttbr1_el1);
> -	isb();
>  
>  	/* Restore user page table */
>  	write_sysreg(ttbr0, ttbr0_el1);
> diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
> index e1e0dca01839..cf4ba575342e 100644
> --- a/arch/arm64/mm/context.c
> +++ b/arch/arm64/mm/context.c
> @@ -365,7 +365,6 @@ void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
>  	ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
>  
>  	write_sysreg(ttbr1, ttbr1_el1);
> -	isb();
>  	write_sysreg(ttbr0, ttbr0_el1);
>  	isb();
>  	post_ttbr_update_workaround();



More information about the linux-arm-kernel mailing list