[PATCH v3 08/12] sched: move cfs task on a CPU with higher capacity
Peter Zijlstra
peterz at infradead.org
Thu Jul 10 04:31:20 PDT 2014
On Mon, Jun 30, 2014 at 06:05:39PM +0200, Vincent Guittot wrote:
You 'forgot' to update the comment that goes with nohz_kick_needed().
> @@ -7233,9 +7253,10 @@ static inline int nohz_kick_needed(struct rq *rq)
> struct sched_domain *sd;
> struct sched_group_capacity *sgc;
> int nr_busy, cpu = rq->cpu;
> + bool kick = false;
>
> if (unlikely(rq->idle_balance))
> + return false;
>
> /*
> * We may be recently in ticked or tickless idle mode. At the first
> @@ -7249,38 +7270,41 @@ static inline int nohz_kick_needed(struct rq *rq)
> * balancing.
> */
> if (likely(!atomic_read(&nohz.nr_cpus)))
> + return false;
>
> if (time_before(now, nohz.next_balance))
> + return false;
>
> if (rq->nr_running >= 2)
> + return true;
>
> rcu_read_lock();
> sd = rcu_dereference(per_cpu(sd_busy, cpu));
> if (sd) {
> sgc = sd->groups->sgc;
> nr_busy = atomic_read(&sgc->nr_busy_cpus);
>
> + if (nr_busy > 1) {
> + kick = true;
> + goto unlock;
> + }
> +
> + if ((rq->cfs.h_nr_running >= 1)
> + && ((rq->cpu_capacity * sd->imbalance_pct) <
> + (rq->cpu_capacity_orig * 100))) {
> + kick = true;
> + goto unlock;
> + }
Again, why only for shared caches?
> }
>
> sd = rcu_dereference(per_cpu(sd_asym, cpu));
> if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
> sched_domain_span(sd)) < cpu))
> + kick = true;
>
> +unlock:
> rcu_read_unlock();
> + return kick;
> }
More information about the linux-arm-kernel
mailing list