Revert "[PATCH] i386: add idle notifier"

This reverts commit 2ff2d3d747.

Uwe Bugla reports that he cannot mount a floppy drive any more, and Jiri
Slaby bisected it down to this commit.

Benjamin LaHaise also points out that this is a big hot-path, and that
interrupt delivery while idle is very common and should not go through
all these expensive gyrations.

Fix up conflicts in arch/i386/kernel/apic.c and arch/i386/kernel/irq.c
due to other unrelated irq changes.

Cc: Stephane Eranian <eranian@hpl.hp.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@osdl.org>
Cc: Uwe Bugla <uwe.bugla@gmx.de>
Cc: Jiri Slaby <jirislaby@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Linus Torvalds
2007-02-26 09:21:46 -08:00
parent 9654640d0a
commit ea3d5226f5
7 changed files with 1 additions and 85 deletions

View File

@@ -49,7 +49,6 @@
#include <asm/i387.h>
#include <asm/desc.h>
#include <asm/vm86.h>
#include <asm/idle.h>
#ifdef CONFIG_MATH_EMULATION
#include <asm/math_emu.h>
#endif
@@ -82,42 +81,6 @@ void (*pm_idle)(void);
EXPORT_SYMBOL(pm_idle);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
void idle_notifier_register(struct notifier_block *n)
{
atomic_notifier_chain_register(&idle_notifier, n);
}
void idle_notifier_unregister(struct notifier_block *n)
{
atomic_notifier_chain_unregister(&idle_notifier, n);
}
static DEFINE_PER_CPU(volatile unsigned long, idle_state);
void enter_idle(void)
{
/* needs to be atomic w.r.t. interrupts, not against other CPUs */
__set_bit(0, &__get_cpu_var(idle_state));
atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
}
static void __exit_idle(void)
{
/* needs to be atomic w.r.t. interrupts, not against other CPUs */
if (__test_and_clear_bit(0, &__get_cpu_var(idle_state)) == 0)
return;
atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
}
void exit_idle(void)
{
if (current->pid)
return;
__exit_idle();
}
void disable_hlt(void)
{
hlt_counter++;
@@ -168,7 +131,6 @@ EXPORT_SYMBOL(default_idle);
*/
static void poll_idle (void)
{
local_irq_enable();
cpu_relax();
}
@@ -229,16 +191,7 @@ void cpu_idle(void)
play_dead();
__get_cpu_var(irq_stat).idle_timestamp = jiffies;
/*
* Idle routines should keep interrupts disabled
* from here on, until they go to idle.
* Otherwise, idle callbacks can misfire.
*/
local_irq_disable();
enter_idle();
idle();
__exit_idle();
}
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
@@ -293,11 +246,7 @@ void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
__sti_mwait(eax, ecx);
else
local_irq_enable();
} else {
local_irq_enable();
__mwait(eax, ecx);
}
}