cpu-hotplug: replace per-subsystem mutexes with get_online_cpus()
This patch converts the known per-subsystem mutexes to get_online_cpus put_online_cpus. It also eliminates the CPU_LOCK_ACQUIRE and CPU_LOCK_RELEASE hotplug notification events. Signed-off-by: Gautham R Shenoy <ego@in.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
86ef5c9a8e
commit
95402b3829
@ -439,7 +439,6 @@ struct rq {
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
||||
static DEFINE_MUTEX(sched_hotcpu_mutex);
|
||||
|
||||
static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
@ -4546,13 +4545,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
|
||||
struct task_struct *p;
|
||||
int retval;
|
||||
|
||||
mutex_lock(&sched_hotcpu_mutex);
|
||||
get_online_cpus();
|
||||
read_lock(&tasklist_lock);
|
||||
|
||||
p = find_process_by_pid(pid);
|
||||
if (!p) {
|
||||
read_unlock(&tasklist_lock);
|
||||
mutex_unlock(&sched_hotcpu_mutex);
|
||||
put_online_cpus();
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
@ -4592,7 +4591,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
|
||||
}
|
||||
out_unlock:
|
||||
put_task_struct(p);
|
||||
mutex_unlock(&sched_hotcpu_mutex);
|
||||
put_online_cpus();
|
||||
return retval;
|
||||
}
|
||||
|
||||
@ -4649,7 +4648,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
|
||||
struct task_struct *p;
|
||||
int retval;
|
||||
|
||||
mutex_lock(&sched_hotcpu_mutex);
|
||||
get_online_cpus();
|
||||
read_lock(&tasklist_lock);
|
||||
|
||||
retval = -ESRCH;
|
||||
@ -4665,7 +4664,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask)
|
||||
|
||||
out_unlock:
|
||||
read_unlock(&tasklist_lock);
|
||||
mutex_unlock(&sched_hotcpu_mutex);
|
||||
put_online_cpus();
|
||||
|
||||
return retval;
|
||||
}
|
||||
@ -5625,9 +5624,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
struct rq *rq;
|
||||
|
||||
switch (action) {
|
||||
case CPU_LOCK_ACQUIRE:
|
||||
mutex_lock(&sched_hotcpu_mutex);
|
||||
break;
|
||||
|
||||
case CPU_UP_PREPARE:
|
||||
case CPU_UP_PREPARE_FROZEN:
|
||||
@ -5697,9 +5693,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
spin_unlock_irq(&rq->lock);
|
||||
break;
|
||||
#endif
|
||||
case CPU_LOCK_RELEASE:
|
||||
mutex_unlock(&sched_hotcpu_mutex);
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
@ -6655,10 +6648,10 @@ static int arch_reinit_sched_domains(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&sched_hotcpu_mutex);
|
||||
get_online_cpus();
|
||||
detach_destroy_domains(&cpu_online_map);
|
||||
err = arch_init_sched_domains(&cpu_online_map);
|
||||
mutex_unlock(&sched_hotcpu_mutex);
|
||||
put_online_cpus();
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -6769,12 +6762,12 @@ void __init sched_init_smp(void)
|
||||
{
|
||||
cpumask_t non_isolated_cpus;
|
||||
|
||||
mutex_lock(&sched_hotcpu_mutex);
|
||||
get_online_cpus();
|
||||
arch_init_sched_domains(&cpu_online_map);
|
||||
cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
|
||||
if (cpus_empty(non_isolated_cpus))
|
||||
cpu_set(smp_processor_id(), non_isolated_cpus);
|
||||
mutex_unlock(&sched_hotcpu_mutex);
|
||||
put_online_cpus();
|
||||
/* XXX: Theoretical race here - CPU may be hotplugged now */
|
||||
hotcpu_notifier(update_sched_domains, 0);
|
||||
|
||||
|
Reference in New Issue
Block a user