sched: Remove the sched_class load_balance methods
Take out the sched_class methods for load-balancing. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
1e3c88bdeb
commit
3d45fd804a
@@ -1087,14 +1087,6 @@ struct sched_class {
|
|||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
|
int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
|
||||||
|
|
||||||
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
|
|
||||||
struct rq *busiest, unsigned long max_load_move,
|
|
||||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
|
||||||
int *all_pinned, int *this_best_prio);
|
|
||||||
|
|
||||||
int (*move_one_task) (struct rq *this_rq, int this_cpu,
|
|
||||||
struct rq *busiest, struct sched_domain *sd,
|
|
||||||
enum cpu_idle_type idle);
|
|
||||||
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
|
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
|
||||||
void (*post_schedule) (struct rq *this_rq);
|
void (*post_schedule) (struct rq *this_rq);
|
||||||
void (*task_waking) (struct rq *this_rq, struct task_struct *task);
|
void (*task_waking) (struct rq *this_rq, struct task_struct *task);
|
||||||
|
@@ -1390,32 +1390,6 @@ static const u32 prio_to_wmult[40] = {
|
|||||||
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
|
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* runqueue iterator, to support SMP load-balancing between different
|
|
||||||
* scheduling classes, without having to expose their internal data
|
|
||||||
* structures to the load-balancing proper:
|
|
||||||
*/
|
|
||||||
struct rq_iterator {
|
|
||||||
void *arg;
|
|
||||||
struct task_struct *(*start)(void *);
|
|
||||||
struct task_struct *(*next)(void *);
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
static unsigned long
|
|
||||||
balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
||||||
unsigned long max_load_move, struct sched_domain *sd,
|
|
||||||
enum cpu_idle_type idle, int *all_pinned,
|
|
||||||
int *this_best_prio, struct rq_iterator *iterator);
|
|
||||||
|
|
||||||
static int
|
|
||||||
iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
||||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
|
||||||
struct rq_iterator *iterator);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Time spent by the tasks of the cpu accounting group executing in ... */
|
/* Time spent by the tasks of the cpu accounting group executing in ... */
|
||||||
enum cpuacct_stat_index {
|
enum cpuacct_stat_index {
|
||||||
CPUACCT_STAT_USER, /* ... user mode */
|
CPUACCT_STAT_USER, /* ... user mode */
|
||||||
|
@@ -1851,6 +1851,24 @@ static struct task_struct *load_balance_next_fair(void *arg)
|
|||||||
return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
|
return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* runqueue iterator, to support SMP load-balancing between different
|
||||||
|
* scheduling classes, without having to expose their internal data
|
||||||
|
* structures to the load-balancing proper:
|
||||||
|
*/
|
||||||
|
struct rq_iterator {
|
||||||
|
void *arg;
|
||||||
|
struct task_struct *(*start)(void *);
|
||||||
|
struct task_struct *(*next)(void *);
|
||||||
|
};
|
||||||
|
|
||||||
|
static unsigned long
|
||||||
|
balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||||
|
unsigned long max_load_move, struct sched_domain *sd,
|
||||||
|
enum cpu_idle_type idle, int *all_pinned,
|
||||||
|
int *this_best_prio, struct rq_iterator *iterator);
|
||||||
|
|
||||||
|
|
||||||
static unsigned long
|
static unsigned long
|
||||||
__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
__load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||||
unsigned long max_load_move, struct sched_domain *sd,
|
unsigned long max_load_move, struct sched_domain *sd,
|
||||||
@@ -1929,7 +1947,19 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int
|
static int
|
||||||
move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||||
|
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||||
|
struct rq_iterator *iterator);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* move_one_task tries to move exactly one task from busiest to this_rq, as
|
||||||
|
* part of active balancing operations within "domain".
|
||||||
|
* Returns 1 if successful and 0 otherwise.
|
||||||
|
*
|
||||||
|
* Called with both runqueues locked.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
||||||
struct sched_domain *sd, enum cpu_idle_type idle)
|
struct sched_domain *sd, enum cpu_idle_type idle)
|
||||||
{
|
{
|
||||||
struct cfs_rq *busy_cfs_rq;
|
struct cfs_rq *busy_cfs_rq;
|
||||||
@@ -2094,16 +2124,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|||||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||||
int *all_pinned)
|
int *all_pinned)
|
||||||
{
|
{
|
||||||
const struct sched_class *class = sched_class_highest;
|
unsigned long total_load_moved = 0, load_moved;
|
||||||
unsigned long total_load_moved = 0;
|
|
||||||
int this_best_prio = this_rq->curr->prio;
|
int this_best_prio = this_rq->curr->prio;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
total_load_moved +=
|
load_moved = load_balance_fair(this_rq, this_cpu, busiest,
|
||||||
class->load_balance(this_rq, this_cpu, busiest,
|
|
||||||
max_load_move - total_load_moved,
|
max_load_move - total_load_moved,
|
||||||
sd, idle, all_pinned, &this_best_prio);
|
sd, idle, all_pinned, &this_best_prio);
|
||||||
class = class->next;
|
|
||||||
|
total_load_moved += load_moved;
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPT
|
||||||
/*
|
/*
|
||||||
@@ -2114,7 +2143,7 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|||||||
if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
|
if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
} while (class && max_load_move > total_load_moved);
|
} while (load_moved && max_load_move > total_load_moved);
|
||||||
|
|
||||||
return total_load_moved > 0;
|
return total_load_moved > 0;
|
||||||
}
|
}
|
||||||
@@ -2145,25 +2174,6 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* move_one_task tries to move exactly one task from busiest to this_rq, as
|
|
||||||
* part of active balancing operations within "domain".
|
|
||||||
* Returns 1 if successful and 0 otherwise.
|
|
||||||
*
|
|
||||||
* Called with both runqueues locked.
|
|
||||||
*/
|
|
||||||
static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
||||||
struct sched_domain *sd, enum cpu_idle_type idle)
|
|
||||||
{
|
|
||||||
const struct sched_class *class;
|
|
||||||
|
|
||||||
for_each_class(class) {
|
|
||||||
if (class->move_one_task(this_rq, this_cpu, busiest, sd, idle))
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
/********** Helpers for find_busiest_group ************************/
|
/********** Helpers for find_busiest_group ************************/
|
||||||
/*
|
/*
|
||||||
* sd_lb_stats - Structure to store the statistics of a sched_domain
|
* sd_lb_stats - Structure to store the statistics of a sched_domain
|
||||||
@@ -3873,8 +3883,6 @@ static const struct sched_class fair_sched_class = {
|
|||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
.select_task_rq = select_task_rq_fair,
|
.select_task_rq = select_task_rq_fair,
|
||||||
|
|
||||||
.load_balance = load_balance_fair,
|
|
||||||
.move_one_task = move_one_task_fair,
|
|
||||||
.rq_online = rq_online_fair,
|
.rq_online = rq_online_fair,
|
||||||
.rq_offline = rq_offline_fair,
|
.rq_offline = rq_offline_fair,
|
||||||
|
|
||||||
|
@@ -44,24 +44,6 @@ static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
static unsigned long
|
|
||||||
load_balance_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
||||||
unsigned long max_load_move,
|
|
||||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
|
||||||
int *all_pinned, int *this_best_prio)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
||||||
struct sched_domain *sd, enum cpu_idle_type idle)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
|
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@@ -119,9 +101,6 @@ static const struct sched_class idle_sched_class = {
|
|||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
.select_task_rq = select_task_rq_idle,
|
.select_task_rq = select_task_rq_idle,
|
||||||
|
|
||||||
.load_balance = load_balance_idle,
|
|
||||||
.move_one_task = move_one_task_idle,
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
.set_curr_task = set_curr_task_idle,
|
.set_curr_task = set_curr_task_idle,
|
||||||
|
@@ -1481,24 +1481,6 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
|
|||||||
push_rt_tasks(rq);
|
push_rt_tasks(rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long
|
|
||||||
load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
||||||
unsigned long max_load_move,
|
|
||||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
|
||||||
int *all_pinned, int *this_best_prio)
|
|
||||||
{
|
|
||||||
/* don't touch RT tasks */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
||||||
struct sched_domain *sd, enum cpu_idle_type idle)
|
|
||||||
{
|
|
||||||
/* don't touch RT tasks */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void set_cpus_allowed_rt(struct task_struct *p,
|
static void set_cpus_allowed_rt(struct task_struct *p,
|
||||||
const struct cpumask *new_mask)
|
const struct cpumask *new_mask)
|
||||||
{
|
{
|
||||||
@@ -1746,8 +1728,6 @@ static const struct sched_class rt_sched_class = {
|
|||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
.select_task_rq = select_task_rq_rt,
|
.select_task_rq = select_task_rq_rt,
|
||||||
|
|
||||||
.load_balance = load_balance_rt,
|
|
||||||
.move_one_task = move_one_task_rt,
|
|
||||||
.set_cpus_allowed = set_cpus_allowed_rt,
|
.set_cpus_allowed = set_cpus_allowed_rt,
|
||||||
.rq_online = rq_online_rt,
|
.rq_online = rq_online_rt,
|
||||||
.rq_offline = rq_offline_rt,
|
.rq_offline = rq_offline_rt,
|
||||||
|
Reference in New Issue
Block a user