Merge branches 'x86/cleanups', 'x86/kexec', 'x86/mce2' and 'linus' into x86/core

This commit is contained in:
Ingo Molnar
2009-03-11 10:49:15 +01:00
320 changed files with 6539 additions and 3665 deletions

View File

@ -1184,10 +1184,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#endif
clear_all_latency_tracing(p);
/* Our parent execution domain becomes current domain
These must match for thread signalling to apply */
p->parent_exec_id = p->self_exec_id;
/* ok, now we should be set up.. */
p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
@ -1225,10 +1221,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
set_task_cpu(p, smp_processor_id());
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
else
p->parent_exec_id = current->parent_exec_id;
} else {
p->real_parent = current;
p->parent_exec_id = current->self_exec_id;
}
spin_lock(&current->sighand->siglock);

View File

@ -51,6 +51,7 @@
#include <linux/tracepoint.h>
#include <linux/ftrace.h>
#include <linux/async.h>
#include <linux/percpu.h>
#if 0
#define DEBUGP printk
@ -366,6 +367,34 @@ static struct module *find_module(const char *name)
}
#ifdef CONFIG_SMP
#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
static void *percpu_modalloc(unsigned long size, unsigned long align,
const char *name)
{
void *ptr;
if (align > PAGE_SIZE) {
printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
name, align, PAGE_SIZE);
align = PAGE_SIZE;
}
ptr = __alloc_reserved_percpu(size, align);
if (!ptr)
printk(KERN_WARNING
"Could not allocate %lu bytes percpu data\n", size);
return ptr;
}
static void percpu_modfree(void *freeme)
{
free_percpu(freeme);
}
#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
/* Number of blocks used and allocated. */
static unsigned int pcpu_num_used, pcpu_num_allocated;
/* Size of each block. -ve means used. */
@ -480,21 +509,6 @@ static void percpu_modfree(void *freeme)
}
}
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
const char *secstrings)
{
return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
}
static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
{
int cpu;
for_each_possible_cpu(cpu)
memcpy(pcpudest + per_cpu_offset(cpu), from, size);
}
static int percpu_modinit(void)
{
pcpu_num_used = 2;
@ -513,7 +527,26 @@ static int percpu_modinit(void)
return 0;
}
__initcall(percpu_modinit);
#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
const char *secstrings)
{
return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
}
static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
{
int cpu;
for_each_possible_cpu(cpu)
memcpy(pcpudest + per_cpu_offset(cpu), from, size);
}
#else /* ... !CONFIG_SMP */
static inline void *percpu_modalloc(unsigned long size, unsigned long align,
const char *name)
{
@ -535,6 +568,7 @@ static inline void percpu_modcopy(void *pcpudst, const void *src,
/* pcpusec should be 0, and size of that section should be 0. */
BUG_ON(size != 0);
}
#endif /* CONFIG_SMP */
#define MODINFO_ATTR(field) \

View File

@ -679,8 +679,8 @@ int rcu_needs_cpu(int cpu)
void rcu_check_callbacks(int cpu, int user)
{
if (user ||
(idle_cpu(cpu) && !in_softirq() &&
hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
(idle_cpu(cpu) && rcu_scheduler_active &&
!in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
/*
* Get here if this CPU took its interrupt from user

View File

@ -44,6 +44,7 @@
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/kernel_stat.h>
enum rcu_barrier {
RCU_BARRIER_STD,
@ -55,6 +56,7 @@ static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
static atomic_t rcu_barrier_cpu_count;
static DEFINE_MUTEX(rcu_barrier_mutex);
static struct completion rcu_barrier_completion;
int rcu_scheduler_active __read_mostly;
/*
* Awaken the corresponding synchronize_rcu() instance now that a
@ -80,6 +82,10 @@ void wakeme_after_rcu(struct rcu_head *head)
void synchronize_rcu(void)
{
struct rcu_synchronize rcu;
if (rcu_blocking_is_gp())
return;
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu(&rcu.head, wakeme_after_rcu);
@ -175,3 +181,9 @@ void __init rcu_init(void)
__rcu_init();
}
void rcu_scheduler_starting(void)
{
WARN_ON(num_online_cpus() != 1);
WARN_ON(nr_context_switches() > 0);
rcu_scheduler_active = 1;
}

View File

@ -1181,6 +1181,9 @@ void __synchronize_sched(void)
{
struct rcu_synchronize rcu;
if (num_online_cpus() == 1)
return; /* blocking is gp if only one CPU! */
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu_sched(&rcu.head, wakeme_after_rcu);

View File

@ -948,8 +948,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
void rcu_check_callbacks(int cpu, int user)
{
if (user ||
(idle_cpu(cpu) && !in_softirq() &&
hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
(idle_cpu(cpu) && rcu_scheduler_active &&
!in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
/*
* Get here if this CPU took its interrupt from user

View File

@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
{
ktime_t now;
if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF)
if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
return;
if (hrtimer_active(&rt_b->rt_period_timer))
@ -9219,6 +9219,16 @@ static int sched_rt_global_constraints(void)
return ret;
}
int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
{
/* Don't accept realtime tasks when there is no way for them to run */
if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
return 0;
return 1;
}
#else /* !CONFIG_RT_GROUP_SCHED */
static int sched_rt_global_constraints(void)
{
@ -9312,8 +9322,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *tsk)
{
#ifdef CONFIG_RT_GROUP_SCHED
/* Don't accept realtime tasks when there is no way for them to run */
if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
return -EINVAL;
#else
/* We don't support RT-tasks being in separate groups */
@ -9476,7 +9485,7 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
{
u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
u64 data;
#ifndef CONFIG_64BIT
@ -9495,7 +9504,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
{
u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
#ifndef CONFIG_64BIT
/*
@ -9591,7 +9600,7 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
ca = task_ca(tsk);
for (; ca; ca = ca->parent) {
u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
*cpuusage += cputime;
}
}

View File

@ -626,6 +626,7 @@ static int ksoftirqd(void * __bind_cpu)
preempt_enable_no_resched();
cond_resched();
preempt_disable();
rcu_qsctr_inc((long)__bind_cpu);
}
preempt_enable();
set_current_state(TASK_INTERRUPTIBLE);

View File

@ -170,7 +170,7 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
* doesn't hit this CPU until we're ready. */
get_cpu();
for_each_online_cpu(i) {
sm_work = percpu_ptr(stop_machine_work, i);
sm_work = per_cpu_ptr(stop_machine_work, i);
INIT_WORK(sm_work, stop_cpu);
queue_work_on(i, stop_machine_wq, sm_work);
}

View File

@ -559,7 +559,7 @@ error:
abort_creds(new);
return retval;
}
/*
* change the user struct in a credentials set to match the new UID
*/
@ -571,6 +571,11 @@ static int set_user(struct cred *new)
if (!new_user)
return -EAGAIN;
if (!task_can_switch_user(new_user, current)) {
free_uid(new_user);
return -EINVAL;
}
if (atomic_read(&new_user->processes) >=
current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
new_user != INIT_USER) {
@ -631,10 +636,11 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
goto error;
}
retval = -EAGAIN;
if (new->uid != old->uid && set_user(new) < 0)
goto error;
if (new->uid != old->uid) {
retval = set_user(new);
if (retval < 0)
goto error;
}
if (ruid != (uid_t) -1 ||
(euid != (uid_t) -1 && euid != old->uid))
new->suid = new->euid;
@ -680,9 +686,10 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
retval = -EPERM;
if (capable(CAP_SETUID)) {
new->suid = new->uid = uid;
if (uid != old->uid && set_user(new) < 0) {
retval = -EAGAIN;
goto error;
if (uid != old->uid) {
retval = set_user(new);
if (retval < 0)
goto error;
}
} else if (uid != old->uid && uid != new->suid) {
goto error;
@ -734,11 +741,13 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
goto error;
}
retval = -EAGAIN;
if (ruid != (uid_t) -1) {
new->uid = ruid;
if (ruid != old->uid && set_user(new) < 0)
goto error;
if (ruid != old->uid) {
retval = set_user(new);
if (retval < 0)
goto error;
}
}
if (euid != (uid_t) -1)
new->euid = euid;

View File

@ -122,8 +122,10 @@ void acct_update_integrals(struct task_struct *tsk)
if (likely(tsk->mm)) {
cputime_t time, dtime;
struct timeval value;
unsigned long flags;
u64 delta;
local_irq_save(flags);
time = tsk->stime + tsk->utime;
dtime = cputime_sub(time, tsk->acct_timexpd);
jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
@ -131,10 +133,12 @@ void acct_update_integrals(struct task_struct *tsk)
delta = delta * USEC_PER_SEC + value.tv_usec;
if (delta == 0)
return;
goto out;
tsk->acct_timexpd = time;
tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
out:
local_irq_restore(flags);
}
}

View File

@ -286,14 +286,12 @@ int __init uids_sysfs_init(void)
/* work function to remove sysfs directory for a user and free up
* corresponding structures.
*/
static void remove_user_sysfs_dir(struct work_struct *w)
static void cleanup_user_struct(struct work_struct *w)
{
struct user_struct *up = container_of(w, struct user_struct, work);
unsigned long flags;
int remove_user = 0;
if (up->user_ns != &init_user_ns)
return;
/* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
* atomic.
*/
@ -312,9 +310,11 @@ static void remove_user_sysfs_dir(struct work_struct *w)
if (!remove_user)
goto done;
kobject_uevent(&up->kobj, KOBJ_REMOVE);
kobject_del(&up->kobj);
kobject_put(&up->kobj);
if (up->user_ns == &init_user_ns) {
kobject_uevent(&up->kobj, KOBJ_REMOVE);
kobject_del(&up->kobj);
kobject_put(&up->kobj);
}
sched_destroy_user(up);
key_put(up->uid_keyring);
@ -335,7 +335,7 @@ static void free_user(struct user_struct *up, unsigned long flags)
atomic_inc(&up->__count);
spin_unlock_irqrestore(&uidhash_lock, flags);
INIT_WORK(&up->work, remove_user_sysfs_dir);
INIT_WORK(&up->work, cleanup_user_struct);
schedule_work(&up->work);
}
@ -362,6 +362,24 @@ static void free_user(struct user_struct *up, unsigned long flags)
#endif
#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
/*
* We need to check if a setuid can take place. This function should be called
* before successfully completing the setuid.
*/
int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
{
return sched_rt_can_attach(up->tg, tsk);
}
#else
int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
{
return 1;
}
#endif
/*
* Locate the user_struct for the passed UID. If found, take a ref on it. The
* caller must undo that ref with free_uid().