[BOOT-WRAPPER v3 06/10] aarch32: Always enter kernel via exception return

Andre Przywara andre.przywara at arm.com
Thu Aug 22 09:04:03 PDT 2024


On Thu, 22 Aug 2024 11:14:37 +0100
Mark Rutland <mark.rutland at arm.com> wrote:

> When the boot-wrapper is entered at Secure PL1 it will enter the kernel
> via an exception return, and when entered at Hyp it will branch to the
> kernel directly. This is an artifact of the way the boot-wrapper was
> originally written in assembly, and it would be preferable to always
> enter the kernel via an exception return so that PSTATE is always
> initialized to a known-good value.
> 
> Rework jump_kernel() to always enter the kernel via an exception return,
> matching the style of the AArch64 version of jump_kernel()
> 
> Signed-off-by: Mark Rutland <mark.rutland at arm.com>
> Acked-by: Marc Zyngier <maz at kernel.org>
> Cc: Akos Denke <akos.denke at arm.com>
> Cc: Andre Przywara <andre.przywara at arm.com>
> Cc: Luca Fancellu <luca.fancellu at arm.com>

Thanks for the changes, looks good to me now!

Reviewed-by: Andre Przywara <andre.przywara at arm.com>

Cheers,
Andre

> ---
>  arch/aarch32/boot.S | 48 +++++++++++++++++++++++----------------------
>  1 file changed, 25 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/aarch32/boot.S b/arch/aarch32/boot.S
> index f21f89a..e79aa06 100644
> --- a/arch/aarch32/boot.S
> +++ b/arch/aarch32/boot.S
> @@ -76,10 +76,6 @@ reset_at_hyp:
>  
>  	bl	setup_stack
>  
> -	mov	r0, #1
> -	ldr	r1, =flag_no_el3
> -	str	r0, [r1]
> -
>  	bl	cpu_init_bootwrapper
>  
>  	bl	cpu_init_arch
> @@ -96,9 +92,10 @@ err_invalid_id:
>  	 * r1-r3, sp[0]: kernel arguments
>  	 */
>  ASM_FUNC(jump_kernel)
> -	sub	sp, #4				@ Ignore fourth argument
> -	push	{r0 - r3}
> -	mov	r5, sp
> +	mov	r4, r0
> +	mov	r5, r1
> +	mov	r6, r2
> +	mov	r7, r3
>  
>  	ldr	r0, =HSCTLR_KERNEL
>  	mcr	p15, 4, r0, c1, c0, 0		@ HSCTLR
> @@ -111,23 +108,28 @@ ASM_FUNC(jump_kernel)
>  	bl	find_logical_id
>  	bl	setup_stack
>  
> -	ldr	lr, [r5], #4
> -	ldm	r5, {r0 - r2}
> -
> -	ldr	r4, =flag_no_el3
> -	ldr	r4, [r4]
> -	cmp	r4, #1
> -	bxeq	lr				@ no EL3
> +	mov	r0, r5
> +	mov	r1, r6
> +	mov	r2, r7
> +	ldr	r3, =SPSR_KERNEL
>  
> -	ldr	r4, =SPSR_KERNEL
>  	/* Return in thumb2 mode when bit 0 of address is 1 */
> -	tst	lr, #1
> -	orrne	r4, #PSR_T
> +	tst	r4, #1
> +	orrne	r3, #PSR_T
> +
> +	mrs	r5, cpsr
> +	and	r5, #PSR_MODE_MASK
> +	cmp	r5, #PSR_MON
> +	beq	eret_at_mon
> +	cmp	r5, #PSR_HYP
> +	beq	eret_at_hyp
> +	b	.
>  
> -	msr	spsr_cxf, r4
> +eret_at_mon:
> +	mov	lr, r4
> +	msr	spsr_mon, r3
>  	movs	pc, lr
> -
> -	.section .data
> -	.align 2
> -flag_no_el3:
> -	.long 0
> +eret_at_hyp:
> +	msr	elr_hyp, r4
> +	msr	spsr_hyp, r3
> +	eret




More information about the linux-arm-kernel mailing list