Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel: sched: don't allow rt_runtime_us to be zero for groups having rt tasks sched: rt-group: fixup schedulability constraints calculation sched: fix the wrong time slice value for SCHED_FIFO tasks sched: export task_nice sched: balance RT task resched only on runqueue sched: retain vruntime
This commit is contained in:
@@ -899,6 +899,10 @@ struct sched_class {
|
|||||||
int running);
|
int running);
|
||||||
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
|
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
|
||||||
int oldprio, int running);
|
int oldprio, int running);
|
||||||
|
|
||||||
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
void (*moved_group) (struct task_struct *p);
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
struct load_weight {
|
struct load_weight {
|
||||||
|
@@ -4422,7 +4422,7 @@ int task_nice(const struct task_struct *p)
|
|||||||
{
|
{
|
||||||
return TASK_NICE(p);
|
return TASK_NICE(p);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(task_nice);
|
EXPORT_SYMBOL(task_nice);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* idle_cpu - is a given cpu idle currently?
|
* idle_cpu - is a given cpu idle currently?
|
||||||
@@ -5100,7 +5100,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
|
|||||||
time_slice = 0;
|
time_slice = 0;
|
||||||
if (p->policy == SCHED_RR) {
|
if (p->policy == SCHED_RR) {
|
||||||
time_slice = DEF_TIMESLICE;
|
time_slice = DEF_TIMESLICE;
|
||||||
} else {
|
} else if (p->policy != SCHED_FIFO) {
|
||||||
struct sched_entity *se = &p->se;
|
struct sched_entity *se = &p->se;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct rq *rq;
|
struct rq *rq;
|
||||||
@@ -7625,6 +7625,11 @@ void sched_move_task(struct task_struct *tsk)
|
|||||||
|
|
||||||
set_task_rq(tsk, task_cpu(tsk));
|
set_task_rq(tsk, task_cpu(tsk));
|
||||||
|
|
||||||
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
if (tsk->sched_class->moved_group)
|
||||||
|
tsk->sched_class->moved_group(tsk);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (on_rq) {
|
if (on_rq) {
|
||||||
if (unlikely(running))
|
if (unlikely(running))
|
||||||
tsk->sched_class->set_curr_task(rq);
|
tsk->sched_class->set_curr_task(rq);
|
||||||
@@ -7721,9 +7726,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
|
|||||||
if (runtime == RUNTIME_INF)
|
if (runtime == RUNTIME_INF)
|
||||||
return 1ULL << 16;
|
return 1ULL << 16;
|
||||||
|
|
||||||
runtime *= (1ULL << 16);
|
return div64_64(runtime << 16, period);
|
||||||
div64_64(runtime, period);
|
|
||||||
return runtime;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
|
static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
|
||||||
@@ -7747,25 +7750,40 @@ static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
|
|||||||
return total + to_ratio(period, runtime) < global_ratio;
|
return total + to_ratio(period, runtime) < global_ratio;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Must be called with tasklist_lock held */
|
||||||
|
static inline int tg_has_rt_tasks(struct task_group *tg)
|
||||||
|
{
|
||||||
|
struct task_struct *g, *p;
|
||||||
|
do_each_thread(g, p) {
|
||||||
|
if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
|
||||||
|
return 1;
|
||||||
|
} while_each_thread(g, p);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
|
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
|
||||||
{
|
{
|
||||||
u64 rt_runtime, rt_period;
|
u64 rt_runtime, rt_period;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
rt_period = sysctl_sched_rt_period * NSEC_PER_USEC;
|
rt_period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
|
||||||
rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
|
rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
|
||||||
if (rt_runtime_us == -1)
|
if (rt_runtime_us == -1)
|
||||||
rt_runtime = rt_period;
|
rt_runtime = RUNTIME_INF;
|
||||||
|
|
||||||
mutex_lock(&rt_constraints_mutex);
|
mutex_lock(&rt_constraints_mutex);
|
||||||
|
read_lock(&tasklist_lock);
|
||||||
|
if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) {
|
||||||
|
err = -EBUSY;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
|
if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
if (rt_runtime_us == -1)
|
|
||||||
rt_runtime = RUNTIME_INF;
|
|
||||||
tg->rt_runtime = rt_runtime;
|
tg->rt_runtime = rt_runtime;
|
||||||
unlock:
|
unlock:
|
||||||
|
read_unlock(&tasklist_lock);
|
||||||
mutex_unlock(&rt_constraints_mutex);
|
mutex_unlock(&rt_constraints_mutex);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@@ -1353,6 +1353,16 @@ static void set_curr_task_fair(struct rq *rq)
|
|||||||
set_next_entity(cfs_rq_of(se), se);
|
set_next_entity(cfs_rq_of(se), se);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
static void moved_group_fair(struct task_struct *p)
|
||||||
|
{
|
||||||
|
struct cfs_rq *cfs_rq = task_cfs_rq(p);
|
||||||
|
|
||||||
|
update_curr(cfs_rq);
|
||||||
|
place_entity(cfs_rq, &p->se, 1);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All the scheduling class methods:
|
* All the scheduling class methods:
|
||||||
*/
|
*/
|
||||||
@@ -1381,6 +1391,10 @@ static const struct sched_class fair_sched_class = {
|
|||||||
|
|
||||||
.prio_changed = prio_changed_fair,
|
.prio_changed = prio_changed_fair,
|
||||||
.switched_to = switched_to_fair,
|
.switched_to = switched_to_fair,
|
||||||
|
|
||||||
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
.moved_group = moved_group_fair,
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_SCHED_DEBUG
|
#ifdef CONFIG_SCHED_DEBUG
|
||||||
|
@@ -1107,9 +1107,11 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
|
|||||||
pull_rt_task(rq);
|
pull_rt_task(rq);
|
||||||
/*
|
/*
|
||||||
* If there's a higher priority task waiting to run
|
* If there's a higher priority task waiting to run
|
||||||
* then reschedule.
|
* then reschedule. Note, the above pull_rt_task
|
||||||
|
* can release the rq lock and p could migrate.
|
||||||
|
* Only reschedule if p is still on the same runqueue.
|
||||||
*/
|
*/
|
||||||
if (p->prio > rq->rt.highest_prio)
|
if (p->prio > rq->rt.highest_prio && rq->curr == p)
|
||||||
resched_task(p);
|
resched_task(p);
|
||||||
#else
|
#else
|
||||||
/* For UP simply resched on drop of prio */
|
/* For UP simply resched on drop of prio */
|
||||||
|
Reference in New Issue
Block a user