sched: Ditch per cgroup task lists for load-balancing
Per cgroup load-balance has numerous problems, chief amongst them that there is no real sane order in them. So stop pretending it makes sense and enqueue all tasks on a single list. This also allows us to more easily fix the fwd progress issue uncovered by the lock-break stuff. Rotate the list on failure to migreate and limit the total iterations to nr_running (which with releasing the lock isn't strictly accurate but close enough). Also add a filter that skips very light tasks on the first attempt around the list, this attempts to avoid shooting whole cgroups around without affecting over balance. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: pjt@google.com Link: http://lkml.kernel.org/n/tip-tx8yqydc7eimgq7i4rkc3a4g@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
ddcdf6e7d9
commit
367456c756
@ -212,9 +212,6 @@ struct cfs_rq {
|
||||
struct rb_root tasks_timeline;
|
||||
struct rb_node *rb_leftmost;
|
||||
|
||||
struct list_head tasks;
|
||||
struct list_head *balance_iterator;
|
||||
|
||||
/*
|
||||
* 'curr' points to currently running entity on this cfs_rq.
|
||||
* It is set to NULL otherwise (i.e when none are currently running).
|
||||
@ -241,11 +238,6 @@ struct cfs_rq {
|
||||
struct task_group *tg; /* group that "owns" this runqueue */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* the part of load.weight contributed by tasks
|
||||
*/
|
||||
unsigned long task_weight;
|
||||
|
||||
/*
|
||||
* h_load = weight * f(tg)
|
||||
*
|
||||
@ -420,6 +412,8 @@ struct rq {
|
||||
int cpu;
|
||||
int online;
|
||||
|
||||
struct list_head cfs_tasks;
|
||||
|
||||
u64 rt_avg;
|
||||
u64 age_stamp;
|
||||
u64 idle_stamp;
|
||||
|
Reference in New Issue
Block a user