Merge branch 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc

* 'task_killable' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc: (22 commits)
  Remove commented-out code copied from NFS
  NFS: Switch from intr mount option to TASK_KILLABLE
  Add wait_for_completion_killable
  Add wait_event_killable
  Add schedule_timeout_killable
  Use mutex_lock_killable in vfs_readdir
  Add mutex_lock_killable
  Use lock_page_killable
  Add lock_page_killable
  Add fatal_signal_pending
  Add TASK_WAKEKILL
  exit: Use task_is_*
  signal: Use task_is_*
  sched: Use task_contributes_to_load, TASK_ALL and TASK_NORMAL
  ptrace: Use task_is_*
  power: Use task_is_*
  wait: Use TASK_NORMAL
  proc/base.c: Use task_is_*
  proc/array.c: Use TASK_REPORT
  perfmon: Use task_is_*
  ...

Fixed up conflicts in NFS/sunrpc manually..
This commit is contained in:
Linus Torvalds
2008-02-01 11:45:47 +11:00
38 changed files with 282 additions and 252 deletions

View File

@ -249,7 +249,7 @@ static int has_stopped_jobs(struct pid *pgrp)
struct task_struct *p;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
if (p->state != TASK_STOPPED)
if (!task_is_stopped(p))
continue;
retval = 1;
break;
@ -614,7 +614,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
p->parent = p->real_parent;
add_parent(p);
if (p->state == TASK_TRACED) {
if (task_is_traced(p)) {
/*
* If it was at a trace stop, turn it into
* a normal stop since it's no longer being
@ -1563,60 +1563,51 @@ repeat:
}
allowed = 1;
switch (p->state) {
case TASK_TRACED:
/*
* When we hit the race with PTRACE_ATTACH,
* we will not report this child. But the
* race means it has not yet been moved to
* our ptrace_children list, so we need to
* set the flag here to avoid a spurious ECHILD
* when the race happens with the only child.
*/
flag = 1;
if (!my_ptrace_child(p))
continue;
/*FALLTHROUGH*/
case TASK_STOPPED:
if (task_is_stopped_or_traced(p)) {
/*
* It's stopped now, so it might later
* continue, exit, or stop again.
*
* When we hit the race with PTRACE_ATTACH, we
* will not report this child. But the race
* means it has not yet been moved to our
* ptrace_children list, so we need to set the
* flag here to avoid a spurious ECHILD when
* the race happens with the only child.
*/
flag = 1;
if (!(options & WUNTRACED) &&
!my_ptrace_child(p))
continue;
if (!my_ptrace_child(p)) {
if (task_is_traced(p))
continue;
if (!(options & WUNTRACED))
continue;
}
retval = wait_task_stopped(p, ret == 2,
(options & WNOWAIT),
infop,
stat_addr, ru);
(options & WNOWAIT), infop,
stat_addr, ru);
if (retval == -EAGAIN)
goto repeat;
if (retval != 0) /* He released the lock. */
goto end;
break;
default:
// case EXIT_DEAD:
if (p->exit_state == EXIT_DEAD)
} else if (p->exit_state == EXIT_DEAD) {
continue;
} else if (p->exit_state == EXIT_ZOMBIE) {
/*
* Eligible but we cannot release it yet:
*/
if (ret == 2)
goto check_continued;
if (!likely(options & WEXITED))
continue;
// case EXIT_ZOMBIE:
if (p->exit_state == EXIT_ZOMBIE) {
/*
* Eligible but we cannot release
* it yet:
*/
if (ret == 2)
goto check_continued;
if (!likely(options & WEXITED))
continue;
retval = wait_task_zombie(
p, (options & WNOWAIT),
infop, stat_addr, ru);
/* He released the lock. */
if (retval != 0)
goto end;
break;
}
retval = wait_task_zombie(p,
(options & WNOWAIT), infop,
stat_addr, ru);
/* He released the lock. */
if (retval != 0)
goto end;
} else {
check_continued:
/*
* It's running now, so it might later
@ -1625,12 +1616,11 @@ check_continued:
flag = 1;
if (!unlikely(options & WCONTINUED))
continue;
retval = wait_task_continued(
p, (options & WNOWAIT),
infop, stat_addr, ru);
retval = wait_task_continued(p,
(options & WNOWAIT), infop,
stat_addr, ru);
if (retval != 0) /* He released the lock. */
goto end;
break;
}
}
if (!flag) {

View File

@ -166,9 +166,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* got a signal? (This code gets eliminated in the
* TASK_UNINTERRUPTIBLE case.)
*/
if (unlikely(state == TASK_INTERRUPTIBLE &&
signal_pending(task))) {
mutex_remove_waiter(lock, &waiter, task_thread_info(task));
if (unlikely((state == TASK_INTERRUPTIBLE &&
signal_pending(task)) ||
(state == TASK_KILLABLE &&
fatal_signal_pending(task)))) {
mutex_remove_waiter(lock, &waiter,
task_thread_info(task));
mutex_release(&lock->dep_map, 1, ip);
spin_unlock_mutex(&lock->wait_lock, flags);
@ -210,6 +213,14 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
EXPORT_SYMBOL_GPL(mutex_lock_nested);
int __sched
mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
}
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
int __sched
mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
@ -272,6 +283,9 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
* mutex_lock_interruptible() and mutex_trylock().
*/
static int fastcall noinline __sched
__mutex_lock_killable_slowpath(atomic_t *lock_count);
static noinline int fastcall __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
/***
@ -294,6 +308,14 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
EXPORT_SYMBOL(mutex_lock_interruptible);
int fastcall __sched mutex_lock_killable(struct mutex *lock)
{
might_sleep();
return __mutex_fastpath_lock_retval
(&lock->count, __mutex_lock_killable_slowpath);
}
EXPORT_SYMBOL(mutex_lock_killable);
static void fastcall noinline __sched
__mutex_lock_slowpath(atomic_t *lock_count)
{
@ -303,6 +325,14 @@ __mutex_lock_slowpath(atomic_t *lock_count)
}
static int fastcall noinline __sched
__mutex_lock_killable_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
}
static noinline int fastcall __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);

View File

@ -86,9 +86,9 @@ static void fake_signal_wake_up(struct task_struct *p, int resume)
static void send_fake_signal(struct task_struct *p)
{
if (p->state == TASK_STOPPED)
if (task_is_stopped(p))
force_sig_specific(SIGSTOP, p);
fake_signal_wake_up(p, p->state == TASK_STOPPED);
fake_signal_wake_up(p, task_is_stopped(p));
}
static int has_mm(struct task_struct *p)
@ -182,7 +182,7 @@ static int try_to_freeze_tasks(int freeze_user_space)
if (frozen(p) || !freezeable(p))
continue;
if (p->state == TASK_TRACED && frozen(p->parent)) {
if (task_is_traced(p) && frozen(p->parent)) {
cancel_freezing(p);
continue;
}

View File

@ -51,7 +51,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
void ptrace_untrace(struct task_struct *child)
{
spin_lock(&child->sighand->siglock);
if (child->state == TASK_TRACED) {
if (task_is_traced(child)) {
if (child->signal->flags & SIGNAL_STOP_STOPPED) {
child->state = TASK_STOPPED;
} else {
@ -79,7 +79,7 @@ void __ptrace_unlink(struct task_struct *child)
add_parent(child);
}
if (child->state == TASK_TRACED)
if (task_is_traced(child))
ptrace_untrace(child);
}
@ -103,9 +103,9 @@ int ptrace_check_attach(struct task_struct *child, int kill)
&& child->signal != NULL) {
ret = 0;
spin_lock_irq(&child->sighand->siglock);
if (child->state == TASK_STOPPED) {
if (task_is_stopped(child)) {
child->state = TASK_TRACED;
} else if (child->state != TASK_TRACED && !kill) {
} else if (!task_is_traced(child) && !kill) {
ret = -ESRCH;
}
spin_unlock_irq(&child->sighand->siglock);

View File

@ -1350,7 +1350,7 @@ static int effective_prio(struct task_struct *p)
*/
static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
{
if (p->state == TASK_UNINTERRUPTIBLE)
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;
enqueue_task(rq, p, wakeup);
@ -1362,7 +1362,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
*/
static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
{
if (p->state == TASK_UNINTERRUPTIBLE)
if (task_contributes_to_load(p))
rq->nr_uninterruptible++;
dequeue_task(rq, p, sleep);
@ -1895,8 +1895,7 @@ out:
int fastcall wake_up_process(struct task_struct *p)
{
return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
return try_to_wake_up(p, TASK_ALL, 0);
}
EXPORT_SYMBOL(wake_up_process);
@ -4124,8 +4123,7 @@ void complete(struct completion *x)
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
1, 0, NULL);
__wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete);
@ -4136,8 +4134,7 @@ void complete_all(struct completion *x)
spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
0, 0, NULL);
__wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
spin_unlock_irqrestore(&x->wait.lock, flags);
}
EXPORT_SYMBOL(complete_all);
@ -4151,8 +4148,10 @@ do_wait_for_common(struct completion *x, long timeout, int state)
wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(&x->wait, &wait);
do {
if (state == TASK_INTERRUPTIBLE &&
signal_pending(current)) {
if ((state == TASK_INTERRUPTIBLE &&
signal_pending(current)) ||
(state == TASK_KILLABLE &&
fatal_signal_pending(current))) {
__remove_wait_queue(&x->wait, &wait);
return -ERESTARTSYS;
}
@ -4212,6 +4211,15 @@ wait_for_completion_interruptible_timeout(struct completion *x,
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
int __sched wait_for_completion_killable(struct completion *x)
{
long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
if (t == -ERESTARTSYS)
return t;
return 0;
}
EXPORT_SYMBOL(wait_for_completion_killable);
static long __sched
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{

View File

@ -456,15 +456,15 @@ void signal_wake_up(struct task_struct *t, int resume)
set_tsk_thread_flag(t, TIF_SIGPENDING);
/*
* For SIGKILL, we want to wake it up in the stopped/traced case.
* We don't check t->state here because there is a race with it
* For SIGKILL, we want to wake it up in the stopped/traced/killable
* case. We don't check t->state here because there is a race with it
* executing another processor and just now entering stopped state.
* By using wake_up_state, we ensure the process will wake up and
* handle its death signal.
*/
mask = TASK_INTERRUPTIBLE;
if (resume)
mask |= TASK_STOPPED | TASK_TRACED;
mask |= TASK_WAKEKILL;
if (!wake_up_state(t, mask))
kick_process(t);
}
@ -620,7 +620,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
* Wake up the stopped thread _after_ setting
* TIF_SIGPENDING
*/
state = TASK_STOPPED;
state = __TASK_STOPPED;
if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
set_tsk_thread_flag(t, TIF_SIGPENDING);
state |= TASK_INTERRUPTIBLE;
@ -838,7 +838,7 @@ static inline int wants_signal(int sig, struct task_struct *p)
return 0;
if (sig == SIGKILL)
return 1;
if (p->state & (TASK_STOPPED | TASK_TRACED))
if (task_is_stopped_or_traced(p))
return 0;
return task_curr(p) || !signal_pending(p);
}
@ -994,6 +994,11 @@ void zap_other_threads(struct task_struct *p)
}
}
int fastcall __fatal_signal_pending(struct task_struct *tsk)
{
return sigismember(&tsk->pending.signal, SIGKILL);
}
/*
* Must be called under rcu_read_lock() or with tasklist_lock read-held.
*/
@ -1441,7 +1446,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
BUG_ON(sig == -1);
/* do_notify_parent_cldstop should have been called instead. */
BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
BUG_ON(task_is_stopped_or_traced(tsk));
BUG_ON(!tsk->ptrace &&
(tsk->group_leader != tsk || !thread_group_empty(tsk)));
@ -1729,7 +1734,7 @@ static int do_signal_stop(int signr)
* so this check has no races.
*/
if (!t->exit_state &&
!(t->state & (TASK_STOPPED|TASK_TRACED))) {
!task_is_stopped_or_traced(t)) {
stop_count++;
signal_wake_up(t, 0);
}

View File

@ -1099,6 +1099,13 @@ signed long __sched schedule_timeout_interruptible(signed long timeout)
}
EXPORT_SYMBOL(schedule_timeout_interruptible);
signed long __sched schedule_timeout_killable(signed long timeout)
{
__set_current_state(TASK_KILLABLE);
return schedule_timeout(timeout);
}
EXPORT_SYMBOL(schedule_timeout_killable);
signed long __sched schedule_timeout_uninterruptible(signed long timeout)
{
__set_current_state(TASK_UNINTERRUPTIBLE);

View File

@ -215,7 +215,7 @@ void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
{
struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
if (waitqueue_active(wq))
__wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key);
__wake_up(wq, TASK_NORMAL, 1, &key);
}
EXPORT_SYMBOL(__wake_up_bit);