[PATCH 24/33] kthread: Refine naming of affinity related fields
Waiman Long
llong at redhat.com
Fri Dec 26 13:37:31 PST 2025
On 12/24/25 8:45 AM, Frederic Weisbecker wrote:
> The kthreads preferred affinity related fields use "hotplug" as the base
> of their naming because the affinity management was initially deemed to
> deal with CPU hotplug.
>
> The scope of this role is going to broaden now and also deal with
> cpuset isolated partition updates.
>
> Switch the naming accordingly.
>
> Signed-off-by: Frederic Weisbecker <frederic at kernel.org>
> ---
> kernel/kthread.c | 38 +++++++++++++++++++-------------------
> 1 file changed, 19 insertions(+), 19 deletions(-)
>
> diff --git a/kernel/kthread.c b/kernel/kthread.c
> index 99a3808d086f..f1e4f1f35cae 100644
> --- a/kernel/kthread.c
> +++ b/kernel/kthread.c
> @@ -35,8 +35,8 @@ static DEFINE_SPINLOCK(kthread_create_lock);
> static LIST_HEAD(kthread_create_list);
> struct task_struct *kthreadd_task;
>
> -static LIST_HEAD(kthreads_hotplug);
> -static DEFINE_MUTEX(kthreads_hotplug_lock);
> +static LIST_HEAD(kthread_affinity_list);
> +static DEFINE_MUTEX(kthread_affinity_lock);
>
> struct kthread_create_info
> {
> @@ -69,7 +69,7 @@ struct kthread {
> /* To store the full name if task comm is truncated. */
> char *full_name;
> struct task_struct *task;
> - struct list_head hotplug_node;
> + struct list_head affinity_node;
> struct cpumask *preferred_affinity;
> };
>
> @@ -128,7 +128,7 @@ bool set_kthread_struct(struct task_struct *p)
>
> init_completion(&kthread->exited);
> init_completion(&kthread->parked);
> - INIT_LIST_HEAD(&kthread->hotplug_node);
> + INIT_LIST_HEAD(&kthread->affinity_node);
> p->vfork_done = &kthread->exited;
>
> kthread->task = p;
> @@ -323,10 +323,10 @@ void __noreturn kthread_exit(long result)
> {
> struct kthread *kthread = to_kthread(current);
> kthread->result = result;
> - if (!list_empty(&kthread->hotplug_node)) {
> - mutex_lock(&kthreads_hotplug_lock);
> - list_del(&kthread->hotplug_node);
> - mutex_unlock(&kthreads_hotplug_lock);
> + if (!list_empty(&kthread->affinity_node)) {
> + mutex_lock(&kthread_affinity_lock);
> + list_del(&kthread->affinity_node);
> + mutex_unlock(&kthread_affinity_lock);
>
> if (kthread->preferred_affinity) {
> kfree(kthread->preferred_affinity);
> @@ -390,9 +390,9 @@ static void kthread_affine_node(void)
> return;
> }
>
> - mutex_lock(&kthreads_hotplug_lock);
> - WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
> - list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
> + mutex_lock(&kthread_affinity_lock);
> + WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
> + list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
> /*
> * The node cpumask is racy when read from kthread() but:
> * - a racing CPU going down will either fail on the subsequent
> @@ -402,7 +402,7 @@ static void kthread_affine_node(void)
> */
> kthread_fetch_affinity(kthread, affinity);
> set_cpus_allowed_ptr(current, affinity);
> - mutex_unlock(&kthreads_hotplug_lock);
> + mutex_unlock(&kthread_affinity_lock);
>
> free_cpumask_var(affinity);
> }
> @@ -873,16 +873,16 @@ int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
> goto out;
> }
>
> - mutex_lock(&kthreads_hotplug_lock);
> + mutex_lock(&kthread_affinity_lock);
> cpumask_copy(kthread->preferred_affinity, mask);
> - WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
> - list_add_tail(&kthread->hotplug_node, &kthreads_hotplug);
> + WARN_ON_ONCE(!list_empty(&kthread->affinity_node));
> + list_add_tail(&kthread->affinity_node, &kthread_affinity_list);
> kthread_fetch_affinity(kthread, affinity);
>
> scoped_guard (raw_spinlock_irqsave, &p->pi_lock)
> set_cpus_allowed_force(p, affinity);
>
> - mutex_unlock(&kthreads_hotplug_lock);
> + mutex_unlock(&kthread_affinity_lock);
> out:
> free_cpumask_var(affinity);
>
> @@ -903,9 +903,9 @@ static int kthreads_online_cpu(unsigned int cpu)
> struct kthread *k;
> int ret;
>
> - guard(mutex)(&kthreads_hotplug_lock);
> + guard(mutex)(&kthread_affinity_lock);
>
> - if (list_empty(&kthreads_hotplug))
> + if (list_empty(&kthread_affinity_list))
> return 0;
>
> if (!zalloc_cpumask_var(&affinity, GFP_KERNEL))
> @@ -913,7 +913,7 @@ static int kthreads_online_cpu(unsigned int cpu)
>
> ret = 0;
>
> - list_for_each_entry(k, &kthreads_hotplug, hotplug_node) {
> + list_for_each_entry(k, &kthread_affinity_list, affinity_node) {
> if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) ||
> kthread_is_per_cpu(k->task))) {
> ret = -EINVAL;
Acked-by: Waiman Long <longman at redhat.com>
More information about the linux-arm-kernel
mailing list