[PATCH v1 01/13] arm64: Do not enable uaccess for flush_icache_range

Robin Murphy robin.murphy at arm.com
Tue May 11 09:53:02 PDT 2021


On 2021-05-11 15:42, Fuad Tabba wrote:
> __flush_icache_range works on the kernel linear map, and doesn't
> need uaccess. The existing code is a side-effect of its current
> implementation with __flush_cache_user_range fallthrough.
> 
> Instead of fallthrough to share the code, use a common macro for
> the two where the caller can specify whether user-space access is
> needed.
> 
> No functional change intended.
> 
> Reported-by: Catalin Marinas <catalin.marinas at arm.com>
> Reported-by: Will Deacon <will at kernel.org>
> Link: https://lore.kernel.org/linux-arch/20200511110014.lb9PEahJ4hVOYrbwIb_qUHXyNy9KQzNFdb_I3YlzY6A@z/
> Signed-off-by: Fuad Tabba <tabba at google.com>
> ---
>   arch/arm64/include/asm/assembler.h | 13 ++++--
>   arch/arm64/mm/cache.S              | 64 +++++++++++++++++++++---------
>   2 files changed, 54 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
> index 8418c1bd8f04..6ff7a3a3b238 100644
> --- a/arch/arm64/include/asm/assembler.h
> +++ b/arch/arm64/include/asm/assembler.h
> @@ -426,16 +426,21 @@ alternative_endif
>    * Macro to perform an instruction cache maintenance for the interval
>    * [start, end)
>    *
> - * 	start, end:	virtual addresses describing the region
> - *	label:		A label to branch to on user fault.
> - * 	Corrupts:	tmp1, tmp2
> + *	start, end:	virtual addresses describing the region
> + *	needs_uaccess:	might access user space memory
> + *	label:		label to branch to on user fault (if needs_uaccess)
> + *	Corrupts:	tmp1, tmp2
>    */
> -	.macro invalidate_icache_by_line start, end, tmp1, tmp2, label
> +	.macro invalidate_icache_by_line start, end, tmp1, tmp2, needs_uaccess, label
>   	icache_line_size \tmp1, \tmp2
>   	sub	\tmp2, \tmp1, #1
>   	bic	\tmp2, \start, \tmp2
>   9997:
> +	.if	\needs_uaccess
>   USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
> +	.else
> +	ic	ivau, \tmp2
> +	.endif
>   	add	\tmp2, \tmp2, \tmp1
>   	cmp	\tmp2, \end
>   	b.lo	9997b
> diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
> index 2d881f34dd9d..092f73acdf9a 100644
> --- a/arch/arm64/mm/cache.S
> +++ b/arch/arm64/mm/cache.S
> @@ -15,30 +15,20 @@
>   #include <asm/asm-uaccess.h>
>   
>   /*
> - *	flush_icache_range(start,end)
> + *	__flush_cache_range(start,end) [needs_uaccess]
>    *
>    *	Ensure that the I and D caches are coherent within specified region.
>    *	This is typically used when code has been written to a memory region,
>    *	and will be executed.
>    *
> - *	- start   - virtual start address of region
> - *	- end     - virtual end address of region
> + *	- start   	- virtual start address of region
> + *	- end     	- virtual end address of region
> + *	- needs_uaccess - (macro parameter) might access user space memory
>    */
> -SYM_FUNC_START(__flush_icache_range)
> -	/* FALLTHROUGH */
> -
> -/*
> - *	__flush_cache_user_range(start,end)
> - *
> - *	Ensure that the I and D caches are coherent within specified region.
> - *	This is typically used when code has been written to a memory region,
> - *	and will be executed.
> - *
> - *	- start   - virtual start address of region
> - *	- end     - virtual end address of region
> - */
> -SYM_FUNC_START(__flush_cache_user_range)
> +.macro	__flush_cache_range, needs_uaccess
> +	.if 	\needs_uaccess
>   	uaccess_ttbr0_enable x2, x3, x4
> +	.endif

Nit: this feels like it belongs directly in __flush_cache_user_range() 
rather than being hidden in the macro, since it's not really an integral 
part of the cache maintenance operation itself.

Robin.

>   alternative_if ARM64_HAS_CACHE_IDC
>   	dsb	ishst
>   	b	7f
> @@ -47,7 +37,11 @@ alternative_else_nop_endif
>   	sub	x3, x2, #1
>   	bic	x4, x0, x3
>   1:
> +	.if 	\needs_uaccess
>   user_alt 9f, "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
> +	.else
> +alternative_insn "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
> +	.endif
>   	add	x4, x4, x2
>   	cmp	x4, x1
>   	b.lo	1b
> @@ -58,15 +52,47 @@ alternative_if ARM64_HAS_CACHE_DIC
>   	isb
>   	b	8f
>   alternative_else_nop_endif
> -	invalidate_icache_by_line x0, x1, x2, x3, 9f
> +	invalidate_icache_by_line x0, x1, x2, x3, \needs_uaccess, 9f
>   8:	mov	x0, #0
>   1:
> +	.if	\needs_uaccess
>   	uaccess_ttbr0_disable x1, x2
> +	.endif
>   	ret
> +
> +	.if 	\needs_uaccess
>   9:
>   	mov	x0, #-EFAULT
>   	b	1b
> +	.endif
> +.endm
> +
> +/*
> + *	flush_icache_range(start,end)
> + *
> + *	Ensure that the I and D caches are coherent within specified region.
> + *	This is typically used when code has been written to a memory region,
> + *	and will be executed.
> + *
> + *	- start   - virtual start address of region
> + *	- end     - virtual end address of region
> + */
> +SYM_FUNC_START(__flush_icache_range)
> +	__flush_cache_range needs_uaccess=0
>   SYM_FUNC_END(__flush_icache_range)
> +
> +/*
> + *	__flush_cache_user_range(start,end)
> + *
> + *	Ensure that the I and D caches are coherent within specified region.
> + *	This is typically used when code has been written to a memory region,
> + *	and will be executed.
> + *
> + *	- start   - virtual start address of region
> + *	- end     - virtual end address of region
> + */
> +SYM_FUNC_START(__flush_cache_user_range)
> +	__flush_cache_range needs_uaccess=1
>   SYM_FUNC_END(__flush_cache_user_range)
>   
>   /*
> @@ -86,7 +112,7 @@ alternative_else_nop_endif
>   
>   	uaccess_ttbr0_enable x2, x3, x4
>   
> -	invalidate_icache_by_line x0, x1, x2, x3, 2f
> +	invalidate_icache_by_line x0, x1, x2, x3, 1, 2f
>   	mov	x0, xzr
>   1:
>   	uaccess_ttbr0_disable x1, x2
> 



More information about the linux-arm-kernel mailing list