[PATCH 30/33] kthread: Honour kthreads preferred affinity after cpuset changes
Waiman Long
llong at redhat.com
Fri Dec 26 15:59:34 PST 2025
On 12/24/25 8:45 AM, Frederic Weisbecker wrote:
> When cpuset isolated partitions get updated, unbound kthreads get
> indifferently affine to all non isolated CPUs, regardless of their
> individual affinity preferences.
>
> For example kswapd is a per-node kthread that prefers to be affine to
> the node it refers to. Whenever an isolated partition is created,
> updated or deleted, kswapd's node affinity is going to be broken if any
> CPU in the related node is not isolated because kswapd will be affine
> globally.
>
> Fix this with letting the consolidated kthread managed affinity code do
> the affinity update on behalf of cpuset.
>
> Signed-off-by: Frederic Weisbecker <frederic at kernel.org>
> ---
> include/linux/kthread.h | 1 +
> kernel/cgroup/cpuset.c | 5 ++---
> kernel/kthread.c | 41 ++++++++++++++++++++++++++++++----------
> kernel/sched/isolation.c | 3 +++
> 4 files changed, 37 insertions(+), 13 deletions(-)
>
> diff --git a/include/linux/kthread.h b/include/linux/kthread.h
> index 8d27403888ce..c92c1149ee6e 100644
> --- a/include/linux/kthread.h
> +++ b/include/linux/kthread.h
> @@ -100,6 +100,7 @@ void kthread_unpark(struct task_struct *k);
> void kthread_parkme(void);
> void kthread_exit(long result) __noreturn;
> void kthread_complete_and_exit(struct completion *, long) __noreturn;
> +int kthreads_update_housekeeping(void);
>
> int kthreadd(void *unused);
> extern struct task_struct *kthreadd_task;
> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
> index 1cc83a3c25f6..c8cfaf5cd4a1 100644
> --- a/kernel/cgroup/cpuset.c
> +++ b/kernel/cgroup/cpuset.c
> @@ -1208,11 +1208,10 @@ void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
>
> if (top_cs) {
> /*
> + * PF_KTHREAD tasks are handled by housekeeping.
> * PF_NO_SETAFFINITY tasks are ignored.
> - * All per cpu kthreads should have PF_NO_SETAFFINITY
> - * flag set, see kthread_set_per_cpu().
> */
> - if (task->flags & PF_NO_SETAFFINITY)
> + if (task->flags & (PF_KTHREAD | PF_NO_SETAFFINITY))
> continue;
> cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
> } else {
> diff --git a/kernel/kthread.c b/kernel/kthread.c
> index 968fa5868d21..03008154249c 100644
> --- a/kernel/kthread.c
> +++ b/kernel/kthread.c
> @@ -891,14 +891,7 @@ int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
> }
> EXPORT_SYMBOL_GPL(kthread_affine_preferred);
>
> -/*
> - * Re-affine kthreads according to their preferences
> - * and the newly online CPU. The CPU down part is handled
> - * by select_fallback_rq() which default re-affines to
> - * housekeepers from other nodes in case the preferred
> - * affinity doesn't apply anymore.
> - */
> -static int kthreads_online_cpu(unsigned int cpu)
> +static int kthreads_update_affinity(bool force)
> {
> cpumask_var_t affinity;
> struct kthread *k;
> @@ -924,7 +917,8 @@ static int kthreads_online_cpu(unsigned int cpu)
> /*
> * Unbound kthreads without preferred affinity are already affine
> * to housekeeping, whether those CPUs are online or not. So no need
> - * to handle newly online CPUs for them.
> + * to handle newly online CPUs for them. However housekeeping changes
> + * have to be applied.
> *
> * But kthreads with a preferred affinity or node are different:
> * if none of their preferred CPUs are online and part of
> @@ -932,7 +926,7 @@ static int kthreads_online_cpu(unsigned int cpu)
> * But as soon as one of their preferred CPU becomes online, they must
> * be affine to them.
> */
> - if (k->preferred_affinity || k->node != NUMA_NO_NODE) {
> + if (force || k->preferred_affinity || k->node != NUMA_NO_NODE) {
> kthread_fetch_affinity(k, affinity);
> set_cpus_allowed_ptr(k->task, affinity);
> }
> @@ -943,6 +937,33 @@ static int kthreads_online_cpu(unsigned int cpu)
> return ret;
> }
>
> +/**
> + * kthreads_update_housekeeping - Update kthreads affinity on cpuset change
> + *
> + * When cpuset changes a partition type to/from "isolated" or updates related
> + * cpumasks, propagate the housekeeping cpumask change to preferred kthreads
> + * affinity.
> + *
> + * Returns 0 if successful, -ENOMEM if temporary mask couldn't
> + * be allocated or -EINVAL in case of internal error.
> + */
> +int kthreads_update_housekeeping(void)
> +{
> + return kthreads_update_affinity(true);
> +}
> +
> +/*
> + * Re-affine kthreads according to their preferences
> + * and the newly online CPU. The CPU down part is handled
> + * by select_fallback_rq() which default re-affines to
> + * housekeepers from other nodes in case the preferred
> + * affinity doesn't apply anymore.
> + */
> +static int kthreads_online_cpu(unsigned int cpu)
> +{
> + return kthreads_update_affinity(false);
> +}
> +
> static int kthreads_init(void)
> {
> return cpuhp_setup_state(CPUHP_AP_KTHREADS_ONLINE, "kthreads:online",
> diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
> index 84a257d05918..c499474866b8 100644
> --- a/kernel/sched/isolation.c
> +++ b/kernel/sched/isolation.c
> @@ -157,6 +157,9 @@ int housekeeping_update(struct cpumask *isol_mask, enum hk_type type)
> err = tmigr_isolated_exclude_cpumask(isol_mask);
> WARN_ON_ONCE(err < 0);
>
> + err = kthreads_update_housekeeping();
> + WARN_ON_ONCE(err < 0);
> +
> kfree(old);
>
> return err;
Reviewed-by: Waiman Long <longman at redhat.com>
More information about the linux-arm-kernel
mailing list