Merge branch 'linus' into perfcounters/core
Conflicts: fs/exec.c include/linux/init_task.h Simple context conflicts.
This commit is contained in:
@@ -47,6 +47,7 @@
|
||||
#include <linux/mount.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/memcontrol.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/rmap.h>
|
||||
#include <linux/acct.h>
|
||||
@@ -80,6 +81,8 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
|
||||
|
||||
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
|
||||
|
||||
DEFINE_TRACE(sched_process_fork);
|
||||
|
||||
int nr_processes(void)
|
||||
{
|
||||
int cpu;
|
||||
@@ -137,6 +140,7 @@ void free_task(struct task_struct *tsk)
|
||||
prop_local_destroy_single(&tsk->dirties);
|
||||
free_thread_info(tsk->stack);
|
||||
rt_mutex_debug_task_free(tsk);
|
||||
ftrace_graph_exit_task(tsk);
|
||||
free_task_struct(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL(free_task);
|
||||
@@ -147,9 +151,8 @@ void __put_task_struct(struct task_struct *tsk)
|
||||
WARN_ON(atomic_read(&tsk->usage));
|
||||
WARN_ON(tsk == current);
|
||||
|
||||
security_task_free(tsk);
|
||||
free_uid(tsk->user);
|
||||
put_group_info(tsk->group_info);
|
||||
put_cred(tsk->real_cred);
|
||||
put_cred(tsk->cred);
|
||||
delayacct_tsk_free(tsk);
|
||||
|
||||
if (!profile_handoff_task(tsk))
|
||||
@@ -818,12 +821,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
|
||||
if (!sig)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = copy_thread_group_keys(tsk);
|
||||
if (ret < 0) {
|
||||
kmem_cache_free(signal_cachep, sig);
|
||||
return ret;
|
||||
}
|
||||
|
||||
atomic_set(&sig->count, 1);
|
||||
atomic_set(&sig->live, 1);
|
||||
init_waitqueue_head(&sig->wait_chldexit);
|
||||
@@ -868,7 +865,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
|
||||
void __cleanup_signal(struct signal_struct *sig)
|
||||
{
|
||||
thread_group_cputime_free(sig);
|
||||
exit_thread_group_keys(sig);
|
||||
tty_kref_put(sig->tty);
|
||||
kmem_cache_free(signal_cachep, sig);
|
||||
}
|
||||
@@ -985,16 +981,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
|
||||
#endif
|
||||
retval = -EAGAIN;
|
||||
if (atomic_read(&p->user->processes) >=
|
||||
if (atomic_read(&p->real_cred->user->processes) >=
|
||||
p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
|
||||
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
|
||||
p->user != current->nsproxy->user_ns->root_user)
|
||||
p->real_cred->user != INIT_USER)
|
||||
goto bad_fork_free;
|
||||
}
|
||||
|
||||
atomic_inc(&p->user->__count);
|
||||
atomic_inc(&p->user->processes);
|
||||
get_group_info(p->group_info);
|
||||
retval = copy_creds(p, clone_flags);
|
||||
if (retval < 0)
|
||||
goto bad_fork_free;
|
||||
|
||||
/*
|
||||
* If multiple threads are within copy_process(), then this check
|
||||
@@ -1049,10 +1045,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
do_posix_clock_monotonic_gettime(&p->start_time);
|
||||
p->real_start_time = p->start_time;
|
||||
monotonic_to_bootbased(&p->real_start_time);
|
||||
#ifdef CONFIG_SECURITY
|
||||
p->security = NULL;
|
||||
#endif
|
||||
p->cap_bset = current->cap_bset;
|
||||
p->io_context = NULL;
|
||||
p->audit_context = NULL;
|
||||
cgroup_fork(p);
|
||||
@@ -1093,14 +1085,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
p->blocked_on = NULL; /* not blocked yet */
|
||||
#endif
|
||||
if (unlikely(ptrace_reparented(current)))
|
||||
ptrace_fork(p, clone_flags);
|
||||
|
||||
/* Perform scheduler related setup. Assign this task to a CPU. */
|
||||
sched_fork(p, clone_flags);
|
||||
|
||||
if ((retval = security_task_alloc(p)))
|
||||
goto bad_fork_cleanup_policy;
|
||||
if ((retval = audit_alloc(p)))
|
||||
goto bad_fork_cleanup_security;
|
||||
goto bad_fork_cleanup_policy;
|
||||
/* copy all the process information */
|
||||
if ((retval = copy_semundo(clone_flags, p)))
|
||||
goto bad_fork_cleanup_audit;
|
||||
@@ -1114,10 +1106,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
goto bad_fork_cleanup_sighand;
|
||||
if ((retval = copy_mm(clone_flags, p)))
|
||||
goto bad_fork_cleanup_signal;
|
||||
if ((retval = copy_keys(clone_flags, p)))
|
||||
goto bad_fork_cleanup_mm;
|
||||
if ((retval = copy_namespaces(clone_flags, p)))
|
||||
goto bad_fork_cleanup_keys;
|
||||
goto bad_fork_cleanup_mm;
|
||||
if ((retval = copy_io(clone_flags, p)))
|
||||
goto bad_fork_cleanup_namespaces;
|
||||
retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
|
||||
@@ -1137,6 +1127,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
}
|
||||
}
|
||||
|
||||
ftrace_graph_init_task(p);
|
||||
|
||||
p->pid = pid_nr(pid);
|
||||
p->tgid = p->pid;
|
||||
if (clone_flags & CLONE_THREAD)
|
||||
@@ -1145,7 +1137,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
if (current->nsproxy != p->nsproxy) {
|
||||
retval = ns_cgroup_clone(p, pid);
|
||||
if (retval)
|
||||
goto bad_fork_free_pid;
|
||||
goto bad_fork_free_graph;
|
||||
}
|
||||
|
||||
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
|
||||
@@ -1238,7 +1230,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
spin_unlock(¤t->sighand->siglock);
|
||||
write_unlock_irq(&tasklist_lock);
|
||||
retval = -ERESTARTNOINTR;
|
||||
goto bad_fork_free_pid;
|
||||
goto bad_fork_free_graph;
|
||||
}
|
||||
|
||||
if (clone_flags & CLONE_THREAD) {
|
||||
@@ -1275,6 +1267,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
cgroup_post_fork(p);
|
||||
return p;
|
||||
|
||||
bad_fork_free_graph:
|
||||
ftrace_graph_exit_task(p);
|
||||
bad_fork_free_pid:
|
||||
if (pid != &init_struct_pid)
|
||||
free_pid(pid);
|
||||
@@ -1282,8 +1276,6 @@ bad_fork_cleanup_io:
|
||||
put_io_context(p->io_context);
|
||||
bad_fork_cleanup_namespaces:
|
||||
exit_task_namespaces(p);
|
||||
bad_fork_cleanup_keys:
|
||||
exit_keys(p);
|
||||
bad_fork_cleanup_mm:
|
||||
if (p->mm)
|
||||
mmput(p->mm);
|
||||
@@ -1299,8 +1291,6 @@ bad_fork_cleanup_semundo:
|
||||
exit_sem(p);
|
||||
bad_fork_cleanup_audit:
|
||||
audit_free(p);
|
||||
bad_fork_cleanup_security:
|
||||
security_task_free(p);
|
||||
bad_fork_cleanup_policy:
|
||||
#ifdef CONFIG_NUMA
|
||||
mpol_put(p->mempolicy);
|
||||
@@ -1313,9 +1303,9 @@ bad_fork_cleanup_cgroup:
|
||||
bad_fork_cleanup_put_domain:
|
||||
module_put(task_thread_info(p)->exec_domain->module);
|
||||
bad_fork_cleanup_count:
|
||||
put_group_info(p->group_info);
|
||||
atomic_dec(&p->user->processes);
|
||||
free_uid(p->user);
|
||||
atomic_dec(&p->cred->user->processes);
|
||||
put_cred(p->real_cred);
|
||||
put_cred(p->cred);
|
||||
bad_fork_free:
|
||||
free_task(p);
|
||||
fork_out:
|
||||
@@ -1358,6 +1348,21 @@ long do_fork(unsigned long clone_flags,
|
||||
int trace = 0;
|
||||
long nr;
|
||||
|
||||
/*
|
||||
* Do some preliminary argument and permissions checking before we
|
||||
* actually start allocating stuff
|
||||
*/
|
||||
if (clone_flags & CLONE_NEWUSER) {
|
||||
if (clone_flags & CLONE_THREAD)
|
||||
return -EINVAL;
|
||||
/* hopefully this check will go away when userns support is
|
||||
* complete
|
||||
*/
|
||||
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
|
||||
!capable(CAP_SETGID))
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
/*
|
||||
* We hope to recycle these flags after 2.6.26
|
||||
*/
|
||||
@@ -1606,8 +1611,7 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
|
||||
err = -EINVAL;
|
||||
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
|
||||
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
|
||||
CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER|
|
||||
CLONE_NEWNET))
|
||||
CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
|
||||
goto bad_unshare_out;
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user