Merge branch 'linus' into sched/core
Merge reason: pick up the latest scheduler fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
245
kernel/sched.c
245
kernel/sched.c
@@ -292,8 +292,8 @@ static DEFINE_SPINLOCK(task_group_lock);
|
||||
* (The default weight is 1024 - so there's no practical
|
||||
* limitation from this.)
|
||||
*/
|
||||
#define MIN_SHARES 2
|
||||
#define MAX_SHARES (1UL << (18 + SCHED_LOAD_RESOLUTION))
|
||||
#define MIN_SHARES (1UL << 1)
|
||||
#define MAX_SHARES (1UL << 18)
|
||||
|
||||
static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
|
||||
#endif
|
||||
@@ -2544,13 +2544,9 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void sched_ttwu_pending(void)
|
||||
static void sched_ttwu_do_pending(struct task_struct *list)
|
||||
{
|
||||
struct rq *rq = this_rq();
|
||||
struct task_struct *list = xchg(&rq->wake_list, NULL);
|
||||
|
||||
if (!list)
|
||||
return;
|
||||
|
||||
raw_spin_lock(&rq->lock);
|
||||
|
||||
@@ -2563,9 +2559,45 @@ static void sched_ttwu_pending(void)
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
static void sched_ttwu_pending(void)
|
||||
{
|
||||
struct rq *rq = this_rq();
|
||||
struct task_struct *list = xchg(&rq->wake_list, NULL);
|
||||
|
||||
if (!list)
|
||||
return;
|
||||
|
||||
sched_ttwu_do_pending(list);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
void scheduler_ipi(void)
|
||||
{
|
||||
sched_ttwu_pending();
|
||||
struct rq *rq = this_rq();
|
||||
struct task_struct *list = xchg(&rq->wake_list, NULL);
|
||||
|
||||
if (!list)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Not all reschedule IPI handlers call irq_enter/irq_exit, since
|
||||
* traditionally all their work was done from the interrupt return
|
||||
* path. Now that we actually do some work, we need to make sure
|
||||
* we do call them.
|
||||
*
|
||||
* Some archs already do call them, luckily irq_enter/exit nest
|
||||
* properly.
|
||||
*
|
||||
* Arguably we should visit all archs and update all handlers,
|
||||
* however a fair share of IPIs are still resched only so this would
|
||||
* somewhat pessimize the simple resched case.
|
||||
*/
|
||||
irq_enter();
|
||||
sched_ttwu_do_pending(list);
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
static void ttwu_queue_remote(struct task_struct *p, int cpu)
|
||||
@@ -6550,7 +6582,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
||||
break;
|
||||
}
|
||||
|
||||
if (!group->cpu_power) {
|
||||
if (!group->sgp->power) {
|
||||
printk(KERN_CONT "\n");
|
||||
printk(KERN_ERR "ERROR: domain->cpu_power not "
|
||||
"set\n");
|
||||
@@ -6574,9 +6606,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
||||
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
|
||||
|
||||
printk(KERN_CONT " %s", str);
|
||||
if (group->cpu_power != SCHED_POWER_SCALE) {
|
||||
if (group->sgp->power != SCHED_POWER_SCALE) {
|
||||
printk(KERN_CONT " (cpu_power = %d)",
|
||||
group->cpu_power);
|
||||
group->sgp->power);
|
||||
}
|
||||
|
||||
group = group->next;
|
||||
@@ -6767,11 +6799,39 @@ static struct root_domain *alloc_rootdomain(void)
|
||||
return rd;
|
||||
}
|
||||
|
||||
static void free_sched_groups(struct sched_group *sg, int free_sgp)
|
||||
{
|
||||
struct sched_group *tmp, *first;
|
||||
|
||||
if (!sg)
|
||||
return;
|
||||
|
||||
first = sg;
|
||||
do {
|
||||
tmp = sg->next;
|
||||
|
||||
if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
|
||||
kfree(sg->sgp);
|
||||
|
||||
kfree(sg);
|
||||
sg = tmp;
|
||||
} while (sg != first);
|
||||
}
|
||||
|
||||
static void free_sched_domain(struct rcu_head *rcu)
|
||||
{
|
||||
struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
|
||||
if (atomic_dec_and_test(&sd->groups->ref))
|
||||
|
||||
/*
|
||||
* If its an overlapping domain it has private groups, iterate and
|
||||
* nuke them all.
|
||||
*/
|
||||
if (sd->flags & SD_OVERLAP) {
|
||||
free_sched_groups(sd->groups, 1);
|
||||
} else if (atomic_dec_and_test(&sd->groups->ref)) {
|
||||
kfree(sd->groups->sgp);
|
||||
kfree(sd->groups);
|
||||
}
|
||||
kfree(sd);
|
||||
}
|
||||
|
||||
@@ -6938,6 +6998,7 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
|
||||
struct sd_data {
|
||||
struct sched_domain **__percpu sd;
|
||||
struct sched_group **__percpu sg;
|
||||
struct sched_group_power **__percpu sgp;
|
||||
};
|
||||
|
||||
struct s_data {
|
||||
@@ -6957,15 +7018,73 @@ struct sched_domain_topology_level;
|
||||
typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
|
||||
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
|
||||
|
||||
#define SDTL_OVERLAP 0x01
|
||||
|
||||
struct sched_domain_topology_level {
|
||||
sched_domain_init_f init;
|
||||
sched_domain_mask_f mask;
|
||||
int flags;
|
||||
struct sd_data data;
|
||||
};
|
||||
|
||||
/*
|
||||
* Assumes the sched_domain tree is fully constructed
|
||||
*/
|
||||
static int
|
||||
build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
||||
{
|
||||
struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
|
||||
const struct cpumask *span = sched_domain_span(sd);
|
||||
struct cpumask *covered = sched_domains_tmpmask;
|
||||
struct sd_data *sdd = sd->private;
|
||||
struct sched_domain *child;
|
||||
int i;
|
||||
|
||||
cpumask_clear(covered);
|
||||
|
||||
for_each_cpu(i, span) {
|
||||
struct cpumask *sg_span;
|
||||
|
||||
if (cpumask_test_cpu(i, covered))
|
||||
continue;
|
||||
|
||||
sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
||||
GFP_KERNEL, cpu_to_node(i));
|
||||
|
||||
if (!sg)
|
||||
goto fail;
|
||||
|
||||
sg_span = sched_group_cpus(sg);
|
||||
|
||||
child = *per_cpu_ptr(sdd->sd, i);
|
||||
if (child->child) {
|
||||
child = child->child;
|
||||
cpumask_copy(sg_span, sched_domain_span(child));
|
||||
} else
|
||||
cpumask_set_cpu(i, sg_span);
|
||||
|
||||
cpumask_or(covered, covered, sg_span);
|
||||
|
||||
sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
|
||||
atomic_inc(&sg->sgp->ref);
|
||||
|
||||
if (cpumask_test_cpu(cpu, sg_span))
|
||||
groups = sg;
|
||||
|
||||
if (!first)
|
||||
first = sg;
|
||||
if (last)
|
||||
last->next = sg;
|
||||
last = sg;
|
||||
last->next = first;
|
||||
}
|
||||
sd->groups = groups;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
free_sched_groups(first, 0);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
|
||||
{
|
||||
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
|
||||
@@ -6974,24 +7093,24 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
|
||||
if (child)
|
||||
cpu = cpumask_first(sched_domain_span(child));
|
||||
|
||||
if (sg)
|
||||
if (sg) {
|
||||
*sg = *per_cpu_ptr(sdd->sg, cpu);
|
||||
(*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
|
||||
atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
|
||||
}
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
/*
|
||||
* build_sched_groups takes the cpumask we wish to span, and a pointer
|
||||
* to a function which identifies what group(along with sched group) a CPU
|
||||
* belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
|
||||
* (due to the fact that we keep track of groups covered with a struct cpumask).
|
||||
*
|
||||
* build_sched_groups will build a circular linked list of the groups
|
||||
* covered by the given span, and will set each group's ->cpumask correctly,
|
||||
* and ->cpu_power to 0.
|
||||
*
|
||||
* Assumes the sched_domain tree is fully constructed
|
||||
*/
|
||||
static void
|
||||
build_sched_groups(struct sched_domain *sd)
|
||||
static int
|
||||
build_sched_groups(struct sched_domain *sd, int cpu)
|
||||
{
|
||||
struct sched_group *first = NULL, *last = NULL;
|
||||
struct sd_data *sdd = sd->private;
|
||||
@@ -6999,6 +7118,12 @@ build_sched_groups(struct sched_domain *sd)
|
||||
struct cpumask *covered;
|
||||
int i;
|
||||
|
||||
get_group(cpu, sdd, &sd->groups);
|
||||
atomic_inc(&sd->groups->ref);
|
||||
|
||||
if (cpu != cpumask_first(sched_domain_span(sd)))
|
||||
return 0;
|
||||
|
||||
lockdep_assert_held(&sched_domains_mutex);
|
||||
covered = sched_domains_tmpmask;
|
||||
|
||||
@@ -7013,7 +7138,7 @@ build_sched_groups(struct sched_domain *sd)
|
||||
continue;
|
||||
|
||||
cpumask_clear(sched_group_cpus(sg));
|
||||
sg->cpu_power = 0;
|
||||
sg->sgp->power = 0;
|
||||
|
||||
for_each_cpu(j, span) {
|
||||
if (get_group(j, sdd, NULL) != group)
|
||||
@@ -7030,6 +7155,8 @@ build_sched_groups(struct sched_domain *sd)
|
||||
last = sg;
|
||||
}
|
||||
last->next = first;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -7044,13 +7171,18 @@ build_sched_groups(struct sched_domain *sd)
|
||||
*/
|
||||
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
|
||||
{
|
||||
WARN_ON(!sd || !sd->groups);
|
||||
struct sched_group *sg = sd->groups;
|
||||
|
||||
if (cpu != group_first_cpu(sd->groups))
|
||||
WARN_ON(!sd || !sg);
|
||||
|
||||
do {
|
||||
sg->group_weight = cpumask_weight(sched_group_cpus(sg));
|
||||
sg = sg->next;
|
||||
} while (sg != sd->groups);
|
||||
|
||||
if (cpu != group_first_cpu(sg))
|
||||
return;
|
||||
|
||||
sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
|
||||
|
||||
update_group_power(sd, cpu);
|
||||
}
|
||||
|
||||
@@ -7170,15 +7302,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
|
||||
static void claim_allocations(int cpu, struct sched_domain *sd)
|
||||
{
|
||||
struct sd_data *sdd = sd->private;
|
||||
struct sched_group *sg = sd->groups;
|
||||
|
||||
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
|
||||
*per_cpu_ptr(sdd->sd, cpu) = NULL;
|
||||
|
||||
if (cpu == cpumask_first(sched_group_cpus(sg))) {
|
||||
WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
|
||||
if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
|
||||
*per_cpu_ptr(sdd->sg, cpu) = NULL;
|
||||
}
|
||||
|
||||
if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
|
||||
*per_cpu_ptr(sdd->sgp, cpu) = NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
@@ -7203,7 +7335,7 @@ static struct sched_domain_topology_level default_topology[] = {
|
||||
#endif
|
||||
{ sd_init_CPU, cpu_cpu_mask, },
|
||||
#ifdef CONFIG_NUMA
|
||||
{ sd_init_NODE, cpu_node_mask, },
|
||||
{ sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
|
||||
{ sd_init_ALLNODES, cpu_allnodes_mask, },
|
||||
#endif
|
||||
{ NULL, },
|
||||
@@ -7227,9 +7359,14 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
|
||||
if (!sdd->sg)
|
||||
return -ENOMEM;
|
||||
|
||||
sdd->sgp = alloc_percpu(struct sched_group_power *);
|
||||
if (!sdd->sgp)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_cpu(j, cpu_map) {
|
||||
struct sched_domain *sd;
|
||||
struct sched_group *sg;
|
||||
struct sched_group_power *sgp;
|
||||
|
||||
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
|
||||
GFP_KERNEL, cpu_to_node(j));
|
||||
@@ -7244,6 +7381,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
|
||||
return -ENOMEM;
|
||||
|
||||
*per_cpu_ptr(sdd->sg, j) = sg;
|
||||
|
||||
sgp = kzalloc_node(sizeof(struct sched_group_power),
|
||||
GFP_KERNEL, cpu_to_node(j));
|
||||
if (!sgp)
|
||||
return -ENOMEM;
|
||||
|
||||
*per_cpu_ptr(sdd->sgp, j) = sgp;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7259,11 +7403,15 @@ static void __sdt_free(const struct cpumask *cpu_map)
|
||||
struct sd_data *sdd = &tl->data;
|
||||
|
||||
for_each_cpu(j, cpu_map) {
|
||||
kfree(*per_cpu_ptr(sdd->sd, j));
|
||||
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
|
||||
if (sd && (sd->flags & SD_OVERLAP))
|
||||
free_sched_groups(sd->groups, 0);
|
||||
kfree(*per_cpu_ptr(sdd->sg, j));
|
||||
kfree(*per_cpu_ptr(sdd->sgp, j));
|
||||
}
|
||||
free_percpu(sdd->sd);
|
||||
free_percpu(sdd->sg);
|
||||
free_percpu(sdd->sgp);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7309,8 +7457,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
|
||||
struct sched_domain_topology_level *tl;
|
||||
|
||||
sd = NULL;
|
||||
for (tl = sched_domain_topology; tl->init; tl++)
|
||||
for (tl = sched_domain_topology; tl->init; tl++) {
|
||||
sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
|
||||
if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
|
||||
sd->flags |= SD_OVERLAP;
|
||||
if (cpumask_equal(cpu_map, sched_domain_span(sd)))
|
||||
break;
|
||||
}
|
||||
|
||||
while (sd->child)
|
||||
sd = sd->child;
|
||||
@@ -7322,13 +7475,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
|
||||
for_each_cpu(i, cpu_map) {
|
||||
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
|
||||
sd->span_weight = cpumask_weight(sched_domain_span(sd));
|
||||
get_group(i, sd->private, &sd->groups);
|
||||
atomic_inc(&sd->groups->ref);
|
||||
|
||||
if (i != cpumask_first(sched_domain_span(sd)))
|
||||
continue;
|
||||
|
||||
build_sched_groups(sd);
|
||||
if (sd->flags & SD_OVERLAP) {
|
||||
if (build_overlap_sched_groups(sd, i))
|
||||
goto error;
|
||||
} else {
|
||||
if (build_sched_groups(sd, i))
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7750,6 +7903,9 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
|
||||
#endif
|
||||
#endif
|
||||
cfs_rq->min_vruntime = (u64)(-(1LL << 20));
|
||||
#ifndef CONFIG_64BIT
|
||||
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
|
||||
@@ -8441,10 +8597,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
|
||||
if (!tg->se[0])
|
||||
return -EINVAL;
|
||||
|
||||
if (shares < MIN_SHARES)
|
||||
shares = MIN_SHARES;
|
||||
else if (shares > MAX_SHARES)
|
||||
shares = MAX_SHARES;
|
||||
shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
|
||||
|
||||
mutex_lock(&shares_mutex);
|
||||
if (tg->shares == shares)
|
||||
|
Reference in New Issue
Block a user