[patch 19/37] x86/smpboot: Switch to hotplug core state synchronization
Brian Gerst
brgerst at gmail.com
Sat Apr 15 05:58:30 PDT 2023
On Fri, Apr 14, 2023 at 7:44 PM Thomas Gleixner <tglx at linutronix.de> wrote:
>
> The new AP state tracking and synchronization mechanism in the CPU hotplug
> core code allows to remove quite some x86 specific code:
>
> 1) The AP alive synchronization based on cpumasks
>
> 2) The decision whether an AP can be brought up again
>
> Signed-off-by: Thomas Gleixner <tglx at linutronix.de>
> Cc: Juergen Gross <jgross at suse.com>
> Cc: Boris Ostrovsky <boris.ostrovsky at oracle.com>
> Cc: xen-devel at lists.xenproject.org
> ---
> arch/x86/Kconfig | 1
> arch/x86/include/asm/smp.h | 7 +
> arch/x86/kernel/smp.c | 1
> arch/x86/kernel/smpboot.c | 159 ++++++++++-----------------------------------
> arch/x86/xen/smp_hvm.c | 16 +---
> arch/x86/xen/smp_pv.c | 39 ++++++-----
> 6 files changed, 72 insertions(+), 151 deletions(-)
>
> --- a/arch/x86/Kconfig
> +++ b/arch/x86/Kconfig
> @@ -272,6 +272,7 @@ config X86
> select HAVE_UNSTABLE_SCHED_CLOCK
> select HAVE_USER_RETURN_NOTIFIER
> select HAVE_GENERIC_VDSO
> + select HOTPLUG_CORE_SYNC_FULL if SMP
> select HOTPLUG_SMT if SMP
> select IRQ_FORCED_THREADING
> select NEED_PER_CPU_EMBED_FIRST_CHUNK
> --- a/arch/x86/include/asm/smp.h
> +++ b/arch/x86/include/asm/smp.h
> @@ -38,6 +38,8 @@ struct smp_ops {
> void (*crash_stop_other_cpus)(void);
> void (*smp_send_reschedule)(int cpu);
>
> + void (*cleanup_dead_cpu)(unsigned cpu);
> + void (*poll_sync_state)(void);
> int (*cpu_up)(unsigned cpu, struct task_struct *tidle);
> int (*cpu_disable)(void);
> void (*cpu_die)(unsigned int cpu);
> @@ -90,7 +92,8 @@ static inline int __cpu_disable(void)
>
> static inline void __cpu_die(unsigned int cpu)
> {
> - smp_ops.cpu_die(cpu);
> + if (smp_ops.cpu_die)
> + smp_ops.cpu_die(cpu);
> }
>
> static inline void play_dead(void)
> @@ -122,8 +125,6 @@ void native_smp_cpus_done(unsigned int m
> int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
> int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
> int native_cpu_disable(void);
> -int common_cpu_die(unsigned int cpu);
> -void native_cpu_die(unsigned int cpu);
> void hlt_play_dead(void);
> void native_play_dead(void);
> void play_dead_common(void);
> --- a/arch/x86/kernel/smp.c
> +++ b/arch/x86/kernel/smp.c
> @@ -269,7 +269,6 @@ struct smp_ops smp_ops = {
> .smp_send_reschedule = native_smp_send_reschedule,
>
> .cpu_up = native_cpu_up,
> - .cpu_die = native_cpu_die,
> .cpu_disable = native_cpu_disable,
> .play_dead = native_play_dead,
>
> --- a/arch/x86/kernel/smpboot.c
> +++ b/arch/x86/kernel/smpboot.c
> @@ -57,6 +57,7 @@
> #include <linux/pgtable.h>
> #include <linux/overflow.h>
> #include <linux/stackprotector.h>
> +#include <linux/cpuhotplug.h>
>
> #include <asm/acpi.h>
> #include <asm/cacheinfo.h>
> @@ -101,9 +102,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_die_map);
> DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
> EXPORT_PER_CPU_SYMBOL(cpu_info);
>
> -/* All of these masks are initialized in setup_cpu_local_masks() */
> -static cpumask_var_t cpu_initialized_mask;
> -static cpumask_var_t cpu_callout_mask;
> /* Representing CPUs for which sibling maps can be computed */
> static cpumask_var_t cpu_sibling_setup_mask;
>
> @@ -169,8 +167,8 @@ static void smp_callin(void)
> int cpuid = smp_processor_id();
>
> /*
> - * If waken up by an INIT in an 82489DX configuration
> - * cpu_callout_mask guarantees we don't get here before an
> + * If waken up by an INIT in an 82489DX configuration the alive
> + * synchronization guarantees we don't get here before an
> * INIT_deassert IPI reaches our local APIC, so it is now safe to
> * touch our local APIC.
> *
> @@ -216,17 +214,6 @@ static void ap_calibrate_delay(void)
> cpu_data(smp_processor_id()).loops_per_jiffy = loops_per_jiffy;
> }
>
> -static void wait_for_master_cpu(int cpu)
> -{
> - /*
> - * Wait for release by control CPU before continuing with AP
> - * initialization.
> - */
> - WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
> - while (!cpumask_test_cpu(cpu, cpu_callout_mask))
> - cpu_relax();
> -}
> -
> /*
> * Activate a secondary processor.
> */
> @@ -247,11 +234,10 @@ static void notrace start_secondary(void
> cpu_init_exception_handling();
>
> /*
> - * Sync point with wait_cpu_initialized(). Sets AP in
> - * cpu_initialized_mask and then waits for the control CPU
> - * to release it.
> + * Sync point with the hotplug core. Sets the sync state to ALIVE
> + * and waits for the control CPU to release it.
> */
> - wait_for_master_cpu(raw_smp_processor_id());
> + cpuhp_ap_sync_alive();
>
> cpu_init();
> rcu_cpu_starting(raw_smp_processor_id());
> @@ -285,7 +271,6 @@ static void notrace start_secondary(void
> set_cpu_online(smp_processor_id(), true);
> lapic_online();
> unlock_vector_lock();
> - cpu_set_state_online(smp_processor_id());
> x86_platform.nmi_init();
>
> /* enable local interrupts */
> @@ -736,9 +721,10 @@ static void impress_friends(void)
> * Allow the user to impress friends.
> */
> pr_debug("Before bogomips\n");
> - for_each_possible_cpu(cpu)
> - if (cpumask_test_cpu(cpu, cpu_callout_mask))
> + for_each_possible_cpu(cpu) {
> + if (cpumask_test_cpu(cpu, cpu_online_mask))
> bogosum += cpu_data(cpu).loops_per_jiffy;
This should be the same as for_each_online_cpu().
--
Brian Gerst
More information about the linux-arm-kernel
mailing list