genirq: reimplement exit_irq_thread() hook via task_work_add()
exit_irq_thread() and task->irq_thread are needed to handle the unexpected (and unlikely) exit of irq-thread. We can use task_work instead and make this all private to kernel/irq/manage.c, cleanup plus micro-optimization. 1. rename exit_irq_thread() to irq_thread_dtor(), make it static, and move it up before irq_thread(). 2. change irq_thread() to do task_work_add(irq_thread_dtor) at the start and task_work_cancel() before return. tracehook_notify_resume() can never play with kthreads, only do_exit()->exit_task_work() can call the callback and this is what we want. 3. remove task_struct->irq_thread and the special hook in do_exit(). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: David Howells <dhowells@redhat.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Alexander Gordeev <agordeev@redhat.com> Cc: Chris Zankel <chris@zankel.net> Cc: David Smith <dsmith@redhat.com> Cc: "Frank Ch. Eigler" <fche@redhat.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Larry Woodman <lwoodman@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
@@ -142,8 +142,6 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
|
|||||||
extern int __must_check
|
extern int __must_check
|
||||||
request_percpu_irq(unsigned int irq, irq_handler_t handler,
|
request_percpu_irq(unsigned int irq, irq_handler_t handler,
|
||||||
const char *devname, void __percpu *percpu_dev_id);
|
const char *devname, void __percpu *percpu_dev_id);
|
||||||
|
|
||||||
extern void exit_irq_thread(void);
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
extern int __must_check
|
extern int __must_check
|
||||||
@@ -177,8 +175,6 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler,
|
|||||||
{
|
{
|
||||||
return request_irq(irq, handler, 0, devname, percpu_dev_id);
|
return request_irq(irq, handler, 0, devname, percpu_dev_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void exit_irq_thread(void) { }
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void free_irq(unsigned int, void *);
|
extern void free_irq(unsigned int, void *);
|
||||||
|
@@ -1301,11 +1301,6 @@ struct task_struct {
|
|||||||
unsigned sched_reset_on_fork:1;
|
unsigned sched_reset_on_fork:1;
|
||||||
unsigned sched_contributes_to_load:1;
|
unsigned sched_contributes_to_load:1;
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_HARDIRQS
|
|
||||||
/* IRQ handler threads */
|
|
||||||
unsigned irq_thread:1;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
pid_t pid;
|
pid_t pid;
|
||||||
pid_t tgid;
|
pid_t tgid;
|
||||||
|
|
||||||
@@ -1313,10 +1308,9 @@ struct task_struct {
|
|||||||
/* Canary value for the -fstack-protector gcc feature */
|
/* Canary value for the -fstack-protector gcc feature */
|
||||||
unsigned long stack_canary;
|
unsigned long stack_canary;
|
||||||
#endif
|
#endif
|
||||||
|
/*
|
||||||
/*
|
|
||||||
* pointers to (original) parent process, youngest child, younger sibling,
|
* pointers to (original) parent process, youngest child, younger sibling,
|
||||||
* older sibling, respectively. (p->father can be replaced with
|
* older sibling, respectively. (p->father can be replaced with
|
||||||
* p->real_parent->pid)
|
* p->real_parent->pid)
|
||||||
*/
|
*/
|
||||||
struct task_struct __rcu *real_parent; /* real parent process */
|
struct task_struct __rcu *real_parent; /* real parent process */
|
||||||
|
@@ -954,8 +954,6 @@ void do_exit(long code)
|
|||||||
|
|
||||||
exit_task_work(tsk);
|
exit_task_work(tsk);
|
||||||
|
|
||||||
exit_irq_thread();
|
|
||||||
|
|
||||||
if (unlikely(in_atomic()))
|
if (unlikely(in_atomic()))
|
||||||
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
|
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
|
||||||
current->comm, task_pid_nr(current),
|
current->comm, task_pid_nr(current),
|
||||||
|
@@ -14,6 +14,7 @@
|
|||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
#include <linux/task_work.h>
|
||||||
|
|
||||||
#include "internals.h"
|
#include "internals.h"
|
||||||
|
|
||||||
@@ -773,11 +774,39 @@ static void wake_threads_waitq(struct irq_desc *desc)
|
|||||||
wake_up(&desc->wait_for_threads);
|
wake_up(&desc->wait_for_threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void irq_thread_dtor(struct task_work *unused)
|
||||||
|
{
|
||||||
|
struct task_struct *tsk = current;
|
||||||
|
struct irq_desc *desc;
|
||||||
|
struct irqaction *action;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
action = kthread_data(tsk);
|
||||||
|
|
||||||
|
pr_err("genirq: exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
|
||||||
|
tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
|
||||||
|
|
||||||
|
|
||||||
|
desc = irq_to_desc(action->irq);
|
||||||
|
/*
|
||||||
|
* If IRQTF_RUNTHREAD is set, we need to decrement
|
||||||
|
* desc->threads_active and wake possible waiters.
|
||||||
|
*/
|
||||||
|
if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
|
||||||
|
wake_threads_waitq(desc);
|
||||||
|
|
||||||
|
/* Prevent a stale desc->threads_oneshot */
|
||||||
|
irq_finalize_oneshot(desc, action);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Interrupt handler thread
|
* Interrupt handler thread
|
||||||
*/
|
*/
|
||||||
static int irq_thread(void *data)
|
static int irq_thread(void *data)
|
||||||
{
|
{
|
||||||
|
struct task_work on_exit_work;
|
||||||
static const struct sched_param param = {
|
static const struct sched_param param = {
|
||||||
.sched_priority = MAX_USER_RT_PRIO/2,
|
.sched_priority = MAX_USER_RT_PRIO/2,
|
||||||
};
|
};
|
||||||
@@ -793,7 +822,9 @@ static int irq_thread(void *data)
|
|||||||
handler_fn = irq_thread_fn;
|
handler_fn = irq_thread_fn;
|
||||||
|
|
||||||
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
||||||
current->irq_thread = 1;
|
|
||||||
|
init_task_work(&on_exit_work, irq_thread_dtor, NULL);
|
||||||
|
task_work_add(current, &on_exit_work, false);
|
||||||
|
|
||||||
while (!irq_wait_for_interrupt(action)) {
|
while (!irq_wait_for_interrupt(action)) {
|
||||||
irqreturn_t action_ret;
|
irqreturn_t action_ret;
|
||||||
@@ -815,44 +846,11 @@ static int irq_thread(void *data)
|
|||||||
* cannot touch the oneshot mask at this point anymore as
|
* cannot touch the oneshot mask at this point anymore as
|
||||||
* __setup_irq() might have given out currents thread_mask
|
* __setup_irq() might have given out currents thread_mask
|
||||||
* again.
|
* again.
|
||||||
*
|
|
||||||
* Clear irq_thread. Otherwise exit_irq_thread() would make
|
|
||||||
* fuzz about an active irq thread going into nirvana.
|
|
||||||
*/
|
*/
|
||||||
current->irq_thread = 0;
|
task_work_cancel(current, irq_thread_dtor);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Called from do_exit()
|
|
||||||
*/
|
|
||||||
void exit_irq_thread(void)
|
|
||||||
{
|
|
||||||
struct task_struct *tsk = current;
|
|
||||||
struct irq_desc *desc;
|
|
||||||
struct irqaction *action;
|
|
||||||
|
|
||||||
if (!tsk->irq_thread)
|
|
||||||
return;
|
|
||||||
|
|
||||||
action = kthread_data(tsk);
|
|
||||||
|
|
||||||
pr_err("genirq: exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
|
|
||||||
tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
|
|
||||||
|
|
||||||
desc = irq_to_desc(action->irq);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If IRQTF_RUNTHREAD is set, we need to decrement
|
|
||||||
* desc->threads_active and wake possible waiters.
|
|
||||||
*/
|
|
||||||
if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
|
|
||||||
wake_threads_waitq(desc);
|
|
||||||
|
|
||||||
/* Prevent a stale desc->threads_oneshot */
|
|
||||||
irq_finalize_oneshot(desc, action);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void irq_setup_forced_threading(struct irqaction *new)
|
static void irq_setup_forced_threading(struct irqaction *new)
|
||||||
{
|
{
|
||||||
if (!force_irqthreads)
|
if (!force_irqthreads)
|
||||||
|
Reference in New Issue
Block a user