Merge branch 'sched/core' into cpus4096
Conflicts: include/linux/ftrace.h kernel/sched.c
This commit is contained in:
@@ -267,6 +267,10 @@ struct task_group {
|
||||
struct cgroup_subsys_state css;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USER_SCHED
|
||||
uid_t uid;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
/* schedulable entities of this group on each cpu */
|
||||
struct sched_entity **se;
|
||||
@@ -292,6 +296,12 @@ struct task_group {
|
||||
|
||||
#ifdef CONFIG_USER_SCHED
|
||||
|
||||
/* Helper function to pass uid information to create_sched_user() */
|
||||
void set_tg_uid(struct user_struct *user)
|
||||
{
|
||||
user->tg->uid = user->uid;
|
||||
}
|
||||
|
||||
/*
|
||||
* Root task group.
|
||||
* Every UID task group (including init_task_group aka UID-0) will
|
||||
@@ -1587,6 +1597,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
|
||||
*/
|
||||
static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
__releases(this_rq->lock)
|
||||
__acquires(busiest->lock)
|
||||
__acquires(this_rq->lock)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!irqs_disabled())) {
|
||||
/* printk() doesn't work good under rq->lock */
|
||||
spin_unlock(&this_rq->lock);
|
||||
BUG_ON(1);
|
||||
}
|
||||
if (unlikely(!spin_trylock(&busiest->lock))) {
|
||||
if (busiest < this_rq) {
|
||||
spin_unlock(&this_rq->lock);
|
||||
spin_lock(&busiest->lock);
|
||||
spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
|
||||
ret = 1;
|
||||
} else
|
||||
spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
__releases(busiest->lock)
|
||||
{
|
||||
spin_unlock(&busiest->lock);
|
||||
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
@@ -2783,40 +2826,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
|
||||
__release(rq2->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
|
||||
*/
|
||||
static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
__releases(this_rq->lock)
|
||||
__acquires(busiest->lock)
|
||||
__acquires(this_rq->lock)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!irqs_disabled())) {
|
||||
/* printk() doesn't work good under rq->lock */
|
||||
spin_unlock(&this_rq->lock);
|
||||
BUG_ON(1);
|
||||
}
|
||||
if (unlikely(!spin_trylock(&busiest->lock))) {
|
||||
if (busiest < this_rq) {
|
||||
spin_unlock(&this_rq->lock);
|
||||
spin_lock(&busiest->lock);
|
||||
spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
|
||||
ret = 1;
|
||||
} else
|
||||
spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
__releases(busiest->lock)
|
||||
{
|
||||
spin_unlock(&busiest->lock);
|
||||
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
|
||||
}
|
||||
|
||||
/*
|
||||
* If dest_cpu is allowed for this process, migrate the task to it.
|
||||
* This is accomplished by forcing the cpu_allowed mask to only
|
||||
@@ -3676,7 +3685,7 @@ out_balanced:
|
||||
static void idle_balance(int this_cpu, struct rq *this_rq)
|
||||
{
|
||||
struct sched_domain *sd;
|
||||
int pulled_task = -1;
|
||||
int pulled_task = 0;
|
||||
unsigned long next_balance = jiffies + HZ;
|
||||
cpumask_var_t tmpmask;
|
||||
|
||||
@@ -6577,7 +6586,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
req = list_entry(rq->migration_queue.next,
|
||||
struct migration_req, list);
|
||||
list_del_init(&req->list);
|
||||
spin_unlock_irq(&rq->lock);
|
||||
complete(&req->done);
|
||||
spin_lock_irq(&rq->lock);
|
||||
}
|
||||
spin_unlock_irq(&rq->lock);
|
||||
break;
|
||||
@@ -6781,6 +6792,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
|
||||
SD_BALANCE_EXEC |
|
||||
SD_SHARE_CPUPOWER |
|
||||
SD_SHARE_PKG_RESOURCES);
|
||||
if (nr_node_ids == 1)
|
||||
pflags &= ~SD_SERIALIZE;
|
||||
}
|
||||
if (~cflags & pflags)
|
||||
return 0;
|
||||
@@ -7716,8 +7729,14 @@ static struct sched_domain_attr *dattr_cur;
|
||||
*/
|
||||
static cpumask_var_t fallback_doms;
|
||||
|
||||
void __attribute__((weak)) arch_update_cpu_topology(void)
|
||||
/*
|
||||
* arch_update_cpu_topology lets virtualized architectures update the
|
||||
* cpu core maps. It is supposed to return 1 if the topology changed
|
||||
* or 0 if it stayed the same.
|
||||
*/
|
||||
int __attribute__((weak)) arch_update_cpu_topology(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -7811,17 +7830,21 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
|
||||
struct sched_domain_attr *dattr_new)
|
||||
{
|
||||
int i, j, n;
|
||||
int new_topology;
|
||||
|
||||
mutex_lock(&sched_domains_mutex);
|
||||
|
||||
/* always unregister in case we don't destroy any domains */
|
||||
unregister_sched_domain_sysctl();
|
||||
|
||||
/* Let architecture update cpu core mappings. */
|
||||
new_topology = arch_update_cpu_topology();
|
||||
|
||||
n = doms_new ? ndoms_new : 0;
|
||||
|
||||
/* Destroy deleted domains */
|
||||
for (i = 0; i < ndoms_cur; i++) {
|
||||
for (j = 0; j < n; j++) {
|
||||
for (j = 0; j < n && !new_topology; j++) {
|
||||
if (cpumask_equal(&doms_cur[i], &doms_new[j])
|
||||
&& dattrs_equal(dattr_cur, i, dattr_new, j))
|
||||
goto match1;
|
||||
@@ -7841,7 +7864,7 @@ match1:
|
||||
|
||||
/* Build new domains */
|
||||
for (i = 0; i < ndoms_new; i++) {
|
||||
for (j = 0; j < ndoms_cur; j++) {
|
||||
for (j = 0; j < ndoms_cur && !new_topology; j++) {
|
||||
if (cpumask_equal(&doms_new[i], &doms_cur[j])
|
||||
&& dattrs_equal(dattr_new, i, dattr_cur, j))
|
||||
goto match2;
|
||||
|
Reference in New Issue
Block a user