Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel: sched: add arch_update_cpu_topology hook. sched: add exported arch_reinit_sched_domains() to header file. sched: remove double unlikely from schedule() sched: cleanup old and rarely used 'debug' features.
This commit is contained in:
@@ -790,6 +790,7 @@ struct sched_domain {
|
|||||||
};
|
};
|
||||||
|
|
||||||
extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new);
|
extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new);
|
||||||
|
extern int arch_reinit_sched_domains(void);
|
||||||
|
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
@@ -50,6 +50,8 @@
|
|||||||
for_each_online_node(node) \
|
for_each_online_node(node) \
|
||||||
if (nr_cpus_node(node))
|
if (nr_cpus_node(node))
|
||||||
|
|
||||||
|
void arch_update_cpu_topology(void);
|
||||||
|
|
||||||
/* Conform to ACPI 2.0 SLIT distance definitions */
|
/* Conform to ACPI 2.0 SLIT distance definitions */
|
||||||
#define LOCAL_DISTANCE 10
|
#define LOCAL_DISTANCE 10
|
||||||
#define REMOTE_DISTANCE 20
|
#define REMOTE_DISTANCE 20
|
||||||
|
@@ -594,18 +594,14 @@ enum {
|
|||||||
SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
|
SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
|
||||||
SCHED_FEAT_WAKEUP_PREEMPT = 2,
|
SCHED_FEAT_WAKEUP_PREEMPT = 2,
|
||||||
SCHED_FEAT_START_DEBIT = 4,
|
SCHED_FEAT_START_DEBIT = 4,
|
||||||
SCHED_FEAT_TREE_AVG = 8,
|
SCHED_FEAT_HRTICK = 8,
|
||||||
SCHED_FEAT_APPROX_AVG = 16,
|
SCHED_FEAT_DOUBLE_TICK = 16,
|
||||||
SCHED_FEAT_HRTICK = 32,
|
|
||||||
SCHED_FEAT_DOUBLE_TICK = 64,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const_debug unsigned int sysctl_sched_features =
|
const_debug unsigned int sysctl_sched_features =
|
||||||
SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
|
SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
|
||||||
SCHED_FEAT_WAKEUP_PREEMPT * 1 |
|
SCHED_FEAT_WAKEUP_PREEMPT * 1 |
|
||||||
SCHED_FEAT_START_DEBIT * 1 |
|
SCHED_FEAT_START_DEBIT * 1 |
|
||||||
SCHED_FEAT_TREE_AVG * 0 |
|
|
||||||
SCHED_FEAT_APPROX_AVG * 0 |
|
|
||||||
SCHED_FEAT_HRTICK * 1 |
|
SCHED_FEAT_HRTICK * 1 |
|
||||||
SCHED_FEAT_DOUBLE_TICK * 0;
|
SCHED_FEAT_DOUBLE_TICK * 0;
|
||||||
|
|
||||||
@@ -3886,7 +3882,7 @@ need_resched_nonpreemptible:
|
|||||||
|
|
||||||
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
|
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
|
||||||
if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
|
if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
|
||||||
unlikely(signal_pending(prev)))) {
|
signal_pending(prev))) {
|
||||||
prev->state = TASK_RUNNING;
|
prev->state = TASK_RUNNING;
|
||||||
} else {
|
} else {
|
||||||
deactivate_task(rq, prev, 1);
|
deactivate_task(rq, prev, 1);
|
||||||
@@ -6811,6 +6807,10 @@ static int ndoms_cur; /* number of sched domains in 'doms_cur' */
|
|||||||
*/
|
*/
|
||||||
static cpumask_t fallback_doms;
|
static cpumask_t fallback_doms;
|
||||||
|
|
||||||
|
void __attribute__((weak)) arch_update_cpu_topology(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
|
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
|
||||||
* For now this just excludes isolated cpus, but could be used to
|
* For now this just excludes isolated cpus, but could be used to
|
||||||
@@ -6820,6 +6820,7 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
arch_update_cpu_topology();
|
||||||
ndoms_cur = 1;
|
ndoms_cur = 1;
|
||||||
doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
|
doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
|
||||||
if (!doms_cur)
|
if (!doms_cur)
|
||||||
@@ -6924,7 +6925,7 @@ match2:
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
|
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
|
||||||
static int arch_reinit_sched_domains(void)
|
int arch_reinit_sched_domains(void)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@@ -302,11 +302,6 @@ static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
|
|||||||
return vslice;
|
return vslice;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 sched_vslice(struct cfs_rq *cfs_rq)
|
|
||||||
{
|
|
||||||
return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
|
|
||||||
}
|
|
||||||
|
|
||||||
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||||
{
|
{
|
||||||
return __sched_vslice(cfs_rq->load.weight + se->load.weight,
|
return __sched_vslice(cfs_rq->load.weight + se->load.weight,
|
||||||
@@ -504,15 +499,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
|
|||||||
} else
|
} else
|
||||||
vruntime = cfs_rq->min_vruntime;
|
vruntime = cfs_rq->min_vruntime;
|
||||||
|
|
||||||
if (sched_feat(TREE_AVG)) {
|
|
||||||
struct sched_entity *last = __pick_last_entity(cfs_rq);
|
|
||||||
if (last) {
|
|
||||||
vruntime += last->vruntime;
|
|
||||||
vruntime >>= 1;
|
|
||||||
}
|
|
||||||
} else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
|
|
||||||
vruntime += sched_vslice(cfs_rq)/2;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The 'current' period is already promised to the current tasks,
|
* The 'current' period is already promised to the current tasks,
|
||||||
* however the extra weight of the new task will slow them down a
|
* however the extra weight of the new task will slow them down a
|
||||||
|
Reference in New Issue
Block a user