[PATCH v3 2/2] sched/fair: Scan cluster before scanning LLC in wake-up path
kernel test robot
lkp at intel.com
Thu Jun 9 02:54:11 PDT 2022
Hi Yicong,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on tip/sched/core]
[also build test WARNING on linus/master v5.19-rc1 next-20220609]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/intel-lab-lkp/linux/commits/Yicong-Yang/sched-fair-Wake-task-within-the-cluster-when-possible/20220608-181847
base: https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 991d8d8142cad94f9c5c05db25e67fa83d6f772a
config: x86_64-randconfig-a005 (https://download.01.org/0day-ci/archive/20220609/202206091721.rhB7mm5c-lkp@intel.com/config)
compiler: clang version 15.0.0 (https://github.com/llvm/llvm-project b92436efcb7813fc481b30f2593a4907568d917a)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/intel-lab-lkp/linux/commit/f2b15e8641f351783c1d47bc654ace164300b7f1
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Yicong-Yang/sched-fair-Wake-task-within-the-cluster-when-possible/20220608-181847
git checkout f2b15e8641f351783c1d47bc654ace164300b7f1
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=x86_64 SHELL=/bin/bash kernel/sched/
If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp at intel.com>
All warnings (new ones prefixed by >>):
kernel/sched/fair.c:5512:6: warning: no previous prototype for function 'init_cfs_bandwidth' [-Wmissing-prototypes]
void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
^
kernel/sched/fair.c:5512:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
^
static
>> kernel/sched/fair.c:6381:29: warning: incompatible pointer to integer conversion passing 'struct cpumask *' to parameter of type 'int' [-Wint-conversion]
idle_cpu = scan_cluster(p, cpus, target, &nr);
^~~~
kernel/sched/fair.c:6327:59: note: passing argument to parameter 'prev_cpu' here
static inline int scan_cluster(struct task_struct *p, int prev_cpu, int target, int *nr)
^
kernel/sched/fair.c:11734:6: warning: no previous prototype for function 'free_fair_sched_group' [-Wmissing-prototypes]
void free_fair_sched_group(struct task_group *tg) { }
^
kernel/sched/fair.c:11734:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
void free_fair_sched_group(struct task_group *tg) { }
^
static
kernel/sched/fair.c:11736:5: warning: no previous prototype for function 'alloc_fair_sched_group' [-Wmissing-prototypes]
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
^
kernel/sched/fair.c:11736:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
^
static
kernel/sched/fair.c:11741:6: warning: no previous prototype for function 'online_fair_sched_group' [-Wmissing-prototypes]
void online_fair_sched_group(struct task_group *tg) { }
^
kernel/sched/fair.c:11741:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
void online_fair_sched_group(struct task_group *tg) { }
^
static
kernel/sched/fair.c:11743:6: warning: no previous prototype for function 'unregister_fair_sched_group' [-Wmissing-prototypes]
void unregister_fair_sched_group(struct task_group *tg) { }
^
kernel/sched/fair.c:11743:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
void unregister_fair_sched_group(struct task_group *tg) { }
^
static
kernel/sched/fair.c:489:20: warning: unused function 'list_del_leaf_cfs_rq' [-Wunused-function]
static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
^
kernel/sched/fair.c:510:19: warning: unused function 'tg_is_idle' [-Wunused-function]
static inline int tg_is_idle(struct task_group *tg)
^
kernel/sched/fair.c:5493:20: warning: unused function 'sync_throttle' [-Wunused-function]
static inline void sync_throttle(struct task_group *tg, int cpu) {}
^
kernel/sched/fair.c:5518:37: warning: unused function 'tg_cfs_bandwidth' [-Wunused-function]
static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
^
kernel/sched/fair.c:5522:20: warning: unused function 'destroy_cfs_bandwidth' [-Wunused-function]
static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
^
11 warnings generated.
vim +6381 kernel/sched/fair.c
6332
6333 /*
6334 * Scan the LLC domain for idle CPUs; this is dynamically regulated by
6335 * comparing the average scan cost (tracked in sd->avg_scan_cost) against the
6336 * average idle time for this rq (as found in rq->avg_idle).
6337 */
6338 static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool has_idle_core, int target)
6339 {
6340 struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
6341 int i, cpu, idle_cpu = -1, nr = INT_MAX;
6342 struct rq *this_rq = this_rq();
6343 int this = smp_processor_id();
6344 struct sched_domain *this_sd;
6345 u64 time = 0;
6346
6347 this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
6348 if (!this_sd)
6349 return -1;
6350
6351 cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
6352
6353 if (sched_feat(SIS_PROP) && !has_idle_core) {
6354 u64 avg_cost, avg_idle, span_avg;
6355 unsigned long now = jiffies;
6356
6357 /*
6358 * If we're busy, the assumption that the last idle period
6359 * predicts the future is flawed; age away the remaining
6360 * predicted idle time.
6361 */
6362 if (unlikely(this_rq->wake_stamp < now)) {
6363 while (this_rq->wake_stamp < now && this_rq->wake_avg_idle) {
6364 this_rq->wake_stamp++;
6365 this_rq->wake_avg_idle >>= 1;
6366 }
6367 }
6368
6369 avg_idle = this_rq->wake_avg_idle;
6370 avg_cost = this_sd->avg_scan_cost + 1;
6371
6372 span_avg = sd->span_weight * avg_idle;
6373 if (span_avg > 4*avg_cost)
6374 nr = div_u64(span_avg, avg_cost);
6375 else
6376 nr = 4;
6377
6378 time = cpu_clock(this);
6379 }
6380
> 6381 idle_cpu = scan_cluster(p, cpus, target, &nr);
6382 if ((unsigned int)idle_cpu < nr_cpumask_bits)
6383 return idle_cpu;
6384
6385 for_each_cpu_wrap(cpu, cpus, target + 1) {
6386 if (has_idle_core) {
6387 i = select_idle_core(p, cpu, cpus, &idle_cpu);
6388 if ((unsigned int)i < nr_cpumask_bits)
6389 return i;
6390
6391 } else {
6392 if (--nr <= 0)
6393 return -1;
6394 idle_cpu = __select_idle_cpu(cpu, p);
6395 if ((unsigned int)idle_cpu < nr_cpumask_bits)
6396 break;
6397 }
6398 }
6399
6400 if (has_idle_core)
6401 set_idle_cores(target, false);
6402
6403 if (sched_feat(SIS_PROP) && !has_idle_core) {
6404 time = cpu_clock(this) - time;
6405
6406 /*
6407 * Account for the scan cost of wakeups against the average
6408 * idle time.
6409 */
6410 this_rq->wake_avg_idle -= min(this_rq->wake_avg_idle, time);
6411
6412 update_avg(&this_sd->avg_scan_cost, time);
6413 }
6414
6415 return idle_cpu;
6416 }
6417
--
0-DAY CI Kernel Test Service
https://01.org/lkp
More information about the linux-arm-kernel
mailing list