[patch v3 34/36] x86/smpboot: Implement a bit spinlock to protect the realmode stack

Peter Zijlstra peterz at infradead.org
Tue May 9 06:13:40 PDT 2023


On Mon, May 08, 2023 at 09:44:22PM +0200, Thomas Gleixner wrote:

> @@ -252,6 +252,17 @@ SYM_INNER_LABEL(secondary_startup_64_no_
>  	movq	TASK_threadsp(%rax), %rsp
>  
>  	/*
> +	 * Now that this CPU is running on its own stack, drop the realmode
> +	 * protection. For the boot CPU the pointer is NULL!
> +	 */
> +	movq	trampoline_lock(%rip), %rax
	movl	$0, (%rax)

> +.Lsetup_gdt:
> +	/*
>  	 * We must switch to a new descriptor in kernel space for the GDT
>  	 * because soon the kernel won't have access anymore to the userspace
>  	 * addresses where we're currently running on. We have to do that here

> --- a/arch/x86/realmode/rm/trampoline_64.S
> +++ b/arch/x86/realmode/rm/trampoline_64.S
> @@ -37,6 +37,24 @@
>  	.text
>  	.code16
>  
> +.macro LOAD_REALMODE_ESP
> +	/*
> +	 * Make sure only one CPU fiddles with the realmode stack
> +	 */
> +.Llock_rm\@:
> +	btl	$0, tr_lock
> +	jnc	2f
> +	pause
> +	jmp	.Llock_rm\@
> +2:
> +	lock
> +	btsl	$0, tr_lock
> +	jc	.Llock_rm\@

Do we really care about performance here; or should we pick the simpler
form? Also, 'lock' is a prefix, not an instruction.

.Llock_rm\@:
	lock btsl	$0, tr_lock;
	jnc		2f
	pause
	jmp		.Llock_rm\@
2:

> +
> +	# Setup stack
> +	movl	$rm_stack_end, %esp
> +.endm
> +
>  	.balign	PAGE_SIZE



More information about the linux-riscv mailing list