[patch v3 31/36] x86/apic: Provide cpu_primary_thread mask
Thomas Gleixner
tglx at linutronix.de
Mon May 29 12:27:13 PDT 2023
On Mon, May 29 2023 at 05:39, Kirill A. Shutemov wrote:
> On Sat, May 27, 2023 at 03:40:02PM +0200, Thomas Gleixner wrote:
> But it gets broken again on "x86/smpboot: Implement a bit spinlock to
> protect the realmode stack" with
>
> [ 0.554079] .... node #0, CPUs: #1 #2
> [ 0.738071] Callback from call_rcu_tasks() invoked.
> [ 10.562065] CPU2 failed to report alive state
> [ 10.566337] #3
> [ 20.570066] CPU3 failed to report alive state
> [ 20.574268] #4
> ...
>
> Notably CPU1 is missing from "failed to report" list. So CPU1 takes the
> lock fine, but seems never unlocks it.
>
> Maybe trampoline_lock(%rip) in head_64.S somehow is not the same as
> &tr_lock in trampoline_64.S. I donno.
It's definitely the same in the regular startup (16bit mode), but TDX
starts up via:
trampoline_start64
trampoline_compat
LOAD_REALMODE_ESP <- lock
That place cannot work with that LOAD_REALMODE_ESP macro. The untested
below should cure it.
Thanks,
tglx
---
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -37,12 +37,12 @@
.text
.code16
-.macro LOAD_REALMODE_ESP
+.macro LOAD_REALMODE_ESP lock:req
/*
* Make sure only one CPU fiddles with the realmode stack
*/
.Llock_rm\@:
- lock btsl $0, tr_lock
+ lock btsl $0, \lock
jnc 2f
pause
jmp .Llock_rm\@
@@ -63,7 +63,7 @@ SYM_CODE_START(trampoline_start)
mov %ax, %es
mov %ax, %ss
- LOAD_REALMODE_ESP
+ LOAD_REALMODE_ESP tr_lock
call verify_cpu # Verify the cpu supports long mode
testl %eax, %eax # Check for return code
@@ -106,7 +106,7 @@ SYM_CODE_START(sev_es_trampoline_start)
mov %ax, %es
mov %ax, %ss
- LOAD_REALMODE_ESP
+ LOAD_REALMODE_ESP tr_lock
jmp .Lswitch_to_protected
SYM_CODE_END(sev_es_trampoline_start)
@@ -189,7 +189,7 @@ SYM_CODE_START(pa_trampoline_compat)
* In compatibility mode. Prep ESP and DX for startup_32, then disable
* paging and complete the switch to legacy 32-bit mode.
*/
- LOAD_REALMODE_ESP
+ LOAD_REALMODE_ESP pa_tr_lock
movw $__KERNEL_DS, %dx
movl $(CR0_STATE & ~X86_CR0_PG), %eax
More information about the linux-riscv
mailing list