[PATCH 1/1] sched/core: Fix migrate_swap() vs. hotplug

Kuyo Chang kuyo.chang at mediatek.com
Thu Jun 5 20:46:57 PDT 2025


On Thu, 2025-06-05 at 12:00 +0200, Peter Zijlstra wrote:
> 
> External email : Please do not click links or open attachments until
> you have verified the sender or the content.
> 
> 
> On Mon, Jun 02, 2025 at 03:22:13PM +0800, Kuyo Chang wrote:
> 
> How easy can you reproduce this?
> 

The probability of duplication is very low, roughly with an occurrence
frequency of about every 1~2 weeks.
I think this issue can only occur if all below types of races happened.
1.stop_two_cpus vs. hotplug
2.cpu1 schedule()
3.ttwu queue ipi latency 

So my initial intention to fix this is by adding
cpus_read_lock/cpus_read_unlock around stop_two_cpus-(1)

> > So, the potential race scenario is:
> > 
> >       CPU0                                                    CPU1
> >       // doing migrate_swap(cpu0/cpu1)
> >       stop_two_cpus()
> >                                                         ...
> >                                                        // doing
> > _cpu_down()
> >                                                            
> > sched_cpu_deactivate()
> >                                                              
> > set_cpu_active(cpu, false);
> >                                                              
> > balance_push_set(cpu, true);
> >       cpu_stop_queue_two_works
> >           __cpu_stop_queue_work(stopper1,...);
> >           __cpu_stop_queue_work(stopper2,..);
> >       stop_cpus_in_progress -> true
> >               preempt_enable();
> >                                                               ...
> >                                                       1st
> > balance_push
> >                                                      
> > stop_one_cpu_nowait
> >                                                      
> > cpu_stop_queue_work
> >                                                      
> > __cpu_stop_queue_work
> >                                                      
> > list_add_tail  -> 1st add push_work
> >                                                      
> > wake_up_q(&wakeq);  -> "wakeq is empty.
> >                                                                    
> >            This implies that the stopper is at wakeq at migrate_swap."
> >       preempt_disable
> >       wake_up_q(&wakeq);
> >               wake_up_process // wakeup migrate/0
> >                   try_to_wake_up
> >                       ttwu_queue
> >                           ttwu_queue_cond ->meet below case
> >                               if (cpu == smp_processor_id())
> >                                return false;
> >                       ttwu_do_activate
> >                       //migrate/0 wakeup done
> >               wake_up_process // wakeup migrate/1
> >                  try_to_wake_up
> >                   ttwu_queue
> >                       ttwu_queue_cond
> >                       ttwu_queue_wakelist
> >                       __ttwu_queue_wakelist
> >                       __smp_call_single_queue
> >       preempt_enable();
> > 
> >                                                       2nd
> > balance_push
> >                                                      
> > stop_one_cpu_nowait
> >                                                      
> > cpu_stop_queue_work
> >                                                      
> > __cpu_stop_queue_work
> >                                                      
> > list_add_tail  -> 2nd add push_work, so the double list add is
> > detected
> >                                                       ...
> >                                                       ...
> >                                                       cpu1 get ipi,
> > do sched_ttwu_pending, wakeup migrate/1
> > 
> 
> So this balance_push() is part of schedule(), and schedule() is
> supposed
> to switch to stopper task, but because of this race condition,
> stopper
> task is stuck in WAKING state and not actually visible to be picked.
> 
> Therefore CPU1 can do another schedule() and end up doing another
> balance_push() even though the last one hasn't been done yet.
> 
> So how about we do something like this? Does this help?
> 

Thank you for your patch.
I believe this patch also effectively addresses this race condition.
I will queue it in our test pool for testing.

> ---
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 62b3416f5e43..c37b80bd53e6 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -3939,6 +3939,11 @@ static inline bool ttwu_queue_cond(struct
> task_struct *p, int cpu)
>         if (!scx_allow_ttwu_queue(p))
>                 return false;
> 
> +#ifdef CONFIG_SMP
> +       if (p->sched_class == &stop_sched_class)
> +               return false;
> +#endif
> +
>         /*
>          * Do not complicate things with the async wake_list while
> the CPU is
>          * in hotplug state.
> diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
> index 5d2d0562115b..8855a50cc216 100644
> --- a/kernel/stop_machine.c
> +++ b/kernel/stop_machine.c
> @@ -82,18 +82,15 @@ static void cpu_stop_signal_done(struct
> cpu_stop_done *done)
>  }
> 
>  static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
> -                                       struct cpu_stop_work *work,
> -                                       struct wake_q_head *wakeq)
> +                                 struct cpu_stop_work *work)
>  {
>         list_add_tail(&work->list, &stopper->works);
> -       wake_q_add(wakeq, stopper->thread);
>  }
> 
>  /* queue @work to @stopper.  if offline, @work is completed
> immediately */
>  static bool cpu_stop_queue_work(unsigned int cpu, struct
> cpu_stop_work *work)
>  {
>         struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
> -       DEFINE_WAKE_Q(wakeq);
>         unsigned long flags;
>         bool enabled;
> 
> @@ -101,12 +98,12 @@ static bool cpu_stop_queue_work(unsigned int
> cpu, struct cpu_stop_work *work)
>         raw_spin_lock_irqsave(&stopper->lock, flags);
>         enabled = stopper->enabled;
>         if (enabled)
> -               __cpu_stop_queue_work(stopper, work, &wakeq);
> +               __cpu_stop_queue_work(stopper, work);
>         else if (work->done)
>                 cpu_stop_signal_done(work->done);
>         raw_spin_unlock_irqrestore(&stopper->lock, flags);
> 
> -       wake_up_q(&wakeq);
> +       wake_up_process(stopper->thread);

BTW, should we add enabled check here?
	if (enabled) 
		wake_up_process(stopper->thread);

>         preempt_enable();
> 
>         return enabled;
> @@ -264,7 +261,6 @@ static int cpu_stop_queue_two_works(int cpu1,
> struct cpu_stop_work *work1,
>  {
>         struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper,
> cpu1);
>         struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper,
> cpu2);
> -       DEFINE_WAKE_Q(wakeq);
>         int err;
> 
>  retry:
> @@ -300,8 +296,8 @@ static int cpu_stop_queue_two_works(int cpu1,
> struct cpu_stop_work *work1,
>         }
> 
>         err = 0;
> -       __cpu_stop_queue_work(stopper1, work1, &wakeq);
> -       __cpu_stop_queue_work(stopper2, work2, &wakeq);
> +       __cpu_stop_queue_work(stopper1, work1);
> +       __cpu_stop_queue_work(stopper2, work2);
> 
>  unlock:
>         raw_spin_unlock(&stopper2->lock);
> @@ -316,7 +312,8 @@ static int cpu_stop_queue_two_works(int cpu1,
> struct cpu_stop_work *work1,
>                 goto retry;
>         }
> 
> -       wake_up_q(&wakeq);
> +       wake_up_process(stopper1->thread);
> +       wake_up_process(stopper2->thread);
>         preempt_enable();
> 
>         return err;



More information about the Linux-mediatek mailing list