[PATCH v11 2/3] sched/fair: Scan cluster before scanning LLC in wake-up path

Peter Zijlstra peterz at infradead.org
Fri Oct 20 06:41:30 PDT 2023


On Thu, Oct 19, 2023 at 11:33:22AM +0800, Yicong Yang wrote:

> @@ -7349,8 +7373,13 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
>  	 */
>  	if (prev != target && cpus_share_cache(prev, target) &&
>  	    (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
> -	    asym_fits_cpu(task_util, util_min, util_max, prev))
> -		return prev;
> +	    asym_fits_cpu(task_util, util_min, util_max, prev)) {
> +		if (!static_branch_unlikely(&sched_cluster_active))
> +			return prev;
> +
> +		if (cpus_share_resources(prev, target))
> +			return prev;
> +	}
>  
>  	/*
>  	 * Allow a per-cpu kthread to stack with the wakee if the
> @@ -7377,7 +7406,11 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
>  	    (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
>  	    cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) &&
>  	    asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
> -		return recent_used_cpu;
> +		if (!static_branch_unlikely(&sched_cluster_active))
> +			return recent_used_cpu;
> +
> +		if (cpus_share_resources(recent_used_cpu, target))
> +			return recent_used_cpu;
>  	}
>  
>  	/*

I've changed those like so:


--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7420,10 +7420,9 @@ static int select_idle_sibling(struct ta
 	if (prev != target && cpus_share_cache(prev, target) &&
 	    (available_idle_cpu(prev) || sched_idle_cpu(prev)) &&
 	    asym_fits_cpu(task_util, util_min, util_max, prev)) {
-		if (!static_branch_unlikely(&sched_cluster_active))
-			return prev;
 
-		if (cpus_share_resources(prev, target))
+		if (!static_branch_unlikely(&sched_cluster_active) ||
+		    cpus_share_resources(prev, target))
 			return prev;
 	}
 
@@ -7452,11 +7451,11 @@ static int select_idle_sibling(struct ta
 	    (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) &&
 	    cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) &&
 	    asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) {
-		if (!static_branch_unlikely(&sched_cluster_active))
-			return recent_used_cpu;
 
-		if (cpus_share_resources(recent_used_cpu, target))
+		if (!static_branch_unlikely(&sched_cluster_active) ||
+		    cpus_share_resources(recent_used_cpu, target))
 			return recent_used_cpu;
+
 	}
 
 	/*



More information about the linux-arm-kernel mailing list