[PATCHv4 14/14] x86/acpi: Add support for CPU offlining for ACPI MADT wakeup method
Huang, Kai
kai.huang at intel.com
Tue Dec 5 15:36:55 PST 2023
> +
> +static void acpi_mp_stop_other_cpus(int wait)
> +{
> + smp_shutdown_nonboot_cpus(smp_processor_id());
> +}
Is this and ...
+ smp_ops.stop_other_cpus = acpi_mp_stop_other_cpus;
... this below still needed?
I think the current native_stop_other_cpus() should just work given you have set
up ...
+ smp_ops.crash_play_dead = crash_acpi_mp_play_dead;
... for TDX guest?
> +
> +/* The argument is required to match type of x86_mapping_info::alloc_pgt_page */
> +static void __init *alloc_pgt_page(void *dummy)
> +{
> + return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
> +}
> +
> +/*
> + * Make sure asm_acpi_mp_play_dead() is present in the identity mapping at
> + * the same place as in the kernel page tables. asm_acpi_mp_play_dead() switches
> + * to the identity mapping and the function has be present at the same spot in
> + * the virtual address space before and after switching page tables.
> + */
> +static int __init init_transition_pgtable(pgd_t *pgd)
> +{
> + pgprot_t prot = PAGE_KERNEL_EXEC_NOENC;
> + unsigned long vaddr, paddr;
> + p4d_t *p4d;
> + pud_t *pud;
> + pmd_t *pmd;
> + pte_t *pte;
> +
> + vaddr = (unsigned long)asm_acpi_mp_play_dead;
> + pgd += pgd_index(vaddr);
> + if (!pgd_present(*pgd)) {
> + p4d = (p4d_t *)alloc_pgt_page(NULL);
> + if (!p4d)
> + return -ENOMEM;
> + set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
> + }
> + p4d = p4d_offset(pgd, vaddr);
> + if (!p4d_present(*p4d)) {
> + pud = (pud_t *)alloc_pgt_page(NULL);
> + if (!pud)
> + return -ENOMEM;
> + set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
> + }
> + pud = pud_offset(p4d, vaddr);
> + if (!pud_present(*pud)) {
> + pmd = (pmd_t *)alloc_pgt_page(NULL);
> + if (!pmd)
> + return -ENOMEM;
> + set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
> + }
> + pmd = pmd_offset(pud, vaddr);
> + if (!pmd_present(*pmd)) {
> + pte = (pte_t *)alloc_pgt_page(NULL);
> + if (!pte)
> + return -ENOMEM;
> + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
> + }
> + pte = pte_offset_kernel(pmd, vaddr);
> +
> + paddr = __pa(vaddr);
> + set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
> +
> + return 0;
> +}
Sorry for saying this late. I think we can also use kernel_ident_mapping_init()
to do the init_transition_pgtable()? We can set struct x86_mapping_info::offset
to __PAGE_OFFSET to do that?
Looks set_up_temporary_mappings() in arch/x86/power/hibernate_64.c uses the same
trick.
Anyway I am not sure how many LoC (assuming can do) can be saved so up to you.
> +
> +static void __init free_pte(pmd_t *pmd)
> +{
> + pte_t *pte = pte_offset_kernel(pmd, 0);
> +
> + memblock_free(pte, PAGE_SIZE);
> +}
> +
> +static void __init free_pmd(pud_t *pud)
> +{
> + pmd_t *pmd = pmd_offset(pud, 0);
> + int i;
> +
> + for (i = 0; i < PTRS_PER_PMD; i++) {
> + if (!pmd_present(pmd[i]))
> + continue;
> +
> + if (pmd_leaf(pmd[i]))
> + continue;
> +
> + free_pte(&pmd[i]);
> + }
> +
> + memblock_free(pmd, PAGE_SIZE);
> +}
> +
> +static void __init free_pud(p4d_t *p4d)
> +{
> + pud_t *pud = pud_offset(p4d, 0);
> + int i;
> +
> + for (i = 0; i < PTRS_PER_PUD; i++) {
> + if (!pud_present(pud[i]))
> + continue;
> +
> + if (pud_leaf(pud[i]))
> + continue;
> +
> + free_pmd(&pud[i]);
> + }
> +
> + memblock_free(pud, PAGE_SIZE);
> +}
> +
> +static void __init free_p4d(pgd_t *pgd)
> +{
> + p4d_t *p4d = p4d_offset(pgd, 0);
> + int i;
> +
> + for (i = 0; i < PTRS_PER_P4D; i++) {
> + if (!p4d_present(p4d[i]))
> + continue;
> +
> + free_pud(&p4d[i]);
> + }
> +
> + if (pgtable_l5_enabled())
> + memblock_free(p4d, PAGE_SIZE);
> +}
> +
> +static void __init free_pgd(pgd_t *pgd)
> +{
> + int i;
> +
> + for (i = 0; i < PTRS_PER_PGD; i++) {
> + if (!pgd_present(pgd[i]))
> + continue;
> +
> + free_p4d(&pgd[i]);
> + }
> +
> + memblock_free(pgd, PAGE_SIZE);
> +}
It's a little bit sad such cleanup code isn't in common code, e.g., with a
void (*free_pgt_page)(void *);
to allow the user to specify how to free the page table.
But this can be future job if needed.
[...]
> int __init acpi_parse_mp_wake(union acpi_subtable_headers *header,
> const unsigned long end)
> {
> struct acpi_madt_multiproc_wakeup *mp_wake;
>
> mp_wake = (struct acpi_madt_multiproc_wakeup *)header;
> - if (BAD_MADT_ENTRY(mp_wake, end))
> +
> + /*
> + * Cannot use the standard BAD_MADT_ENTRY() to sanity check the @mp_wake
> + * entry. 'sizeof (struct acpi_madt_multiproc_wakeup)' can be larger
> + * than the actual size of the MP wakeup entry in ACPI table because the
> + * 'reset_vector' is only available in the V1 MP wakeup structure.
> + */
Space/tab issue.
> + if (!mp_wake)
> + return -EINVAL;
> + if (end - (unsigned long)mp_wake < ACPI_MADT_MP_WAKEUP_SIZE_V0)
> + return -EINVAL;
> + if (mp_wake->header.length < ACPI_MADT_MP_WAKEUP_SIZE_V0)
> return -EINVAL;
>
More information about the kexec
mailing list