Merge branch 'linus' into timers/nohz
This commit is contained in:
@@ -56,15 +56,6 @@ asmlinkage extern void ret_from_fork(void);
|
||||
|
||||
unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
|
||||
|
||||
unsigned long boot_option_idle_override = 0;
|
||||
EXPORT_SYMBOL(boot_option_idle_override);
|
||||
|
||||
/*
|
||||
* Powermanagement idle function, if any..
|
||||
*/
|
||||
void (*pm_idle)(void);
|
||||
EXPORT_SYMBOL(pm_idle);
|
||||
|
||||
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
|
||||
|
||||
void idle_notifier_register(struct notifier_block *n)
|
||||
@@ -94,25 +85,6 @@ void exit_idle(void)
|
||||
__exit_idle();
|
||||
}
|
||||
|
||||
/*
|
||||
* We use this if we don't have any better
|
||||
* idle routine..
|
||||
*/
|
||||
void default_idle(void)
|
||||
{
|
||||
current_thread_info()->status &= ~TS_POLLING;
|
||||
/*
|
||||
* TS_POLLING-cleared state must be visible before we
|
||||
* test NEED_RESCHED:
|
||||
*/
|
||||
smp_mb();
|
||||
if (!need_resched())
|
||||
safe_halt(); /* enables interrupts racelessly */
|
||||
else
|
||||
local_irq_enable();
|
||||
current_thread_info()->status |= TS_POLLING;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
DECLARE_PER_CPU(int, cpu_state);
|
||||
|
||||
@@ -150,12 +122,9 @@ void cpu_idle(void)
|
||||
while (1) {
|
||||
tick_nohz_stop_sched_tick(1);
|
||||
while (!need_resched()) {
|
||||
void (*idle)(void);
|
||||
|
||||
rmb();
|
||||
idle = pm_idle;
|
||||
if (!idle)
|
||||
idle = default_idle;
|
||||
|
||||
if (cpu_is_offline(smp_processor_id()))
|
||||
play_dead();
|
||||
/*
|
||||
@@ -165,7 +134,10 @@ void cpu_idle(void)
|
||||
*/
|
||||
local_irq_disable();
|
||||
enter_idle();
|
||||
idle();
|
||||
/* Don't trace irqs off for idle */
|
||||
stop_critical_timings();
|
||||
pm_idle();
|
||||
start_critical_timings();
|
||||
/* In many cases the interrupt that ended idle
|
||||
has already called exit_idle. But some idle
|
||||
loops can be woken up without interrupt. */
|
||||
@@ -294,6 +266,7 @@ void flush_thread(void)
|
||||
/*
|
||||
* Forget coprocessor state..
|
||||
*/
|
||||
tsk->fpu_counter = 0;
|
||||
clear_fpu(tsk);
|
||||
clear_used_math();
|
||||
}
|
||||
@@ -365,10 +338,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
|
||||
p->thread.fs = me->thread.fs;
|
||||
p->thread.gs = me->thread.gs;
|
||||
|
||||
asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
|
||||
asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
|
||||
asm("mov %%es,%0" : "=m" (p->thread.es));
|
||||
asm("mov %%ds,%0" : "=m" (p->thread.ds));
|
||||
savesegment(gs, p->thread.gsindex);
|
||||
savesegment(fs, p->thread.fsindex);
|
||||
savesegment(es, p->thread.es);
|
||||
savesegment(ds, p->thread.ds);
|
||||
|
||||
if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
|
||||
p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
|
||||
@@ -407,7 +380,9 @@ out:
|
||||
void
|
||||
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
|
||||
{
|
||||
asm volatile("movl %0, %%fs; movl %0, %%es; movl %0, %%ds" :: "r"(0));
|
||||
loadsegment(fs, 0);
|
||||
loadsegment(es, 0);
|
||||
loadsegment(ds, 0);
|
||||
load_gs_index(0);
|
||||
regs->ip = new_ip;
|
||||
regs->sp = new_sp;
|
||||
@@ -566,6 +541,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
*next = &next_p->thread;
|
||||
int cpu = smp_processor_id();
|
||||
struct tss_struct *tss = &per_cpu(init_tss, cpu);
|
||||
unsigned fsindex, gsindex;
|
||||
|
||||
/* we're going to use this soon, after a few expensive things */
|
||||
if (next_p->fpu_counter>5)
|
||||
@@ -580,22 +556,38 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
* Switch DS and ES.
|
||||
* This won't pick up thread selector changes, but I guess that is ok.
|
||||
*/
|
||||
asm volatile("mov %%es,%0" : "=m" (prev->es));
|
||||
savesegment(es, prev->es);
|
||||
if (unlikely(next->es | prev->es))
|
||||
loadsegment(es, next->es);
|
||||
|
||||
asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
|
||||
|
||||
savesegment(ds, prev->ds);
|
||||
if (unlikely(next->ds | prev->ds))
|
||||
loadsegment(ds, next->ds);
|
||||
|
||||
|
||||
/* We must save %fs and %gs before load_TLS() because
|
||||
* %fs and %gs may be cleared by load_TLS().
|
||||
*
|
||||
* (e.g. xen_load_tls())
|
||||
*/
|
||||
savesegment(fs, fsindex);
|
||||
savesegment(gs, gsindex);
|
||||
|
||||
load_TLS(next, cpu);
|
||||
|
||||
/*
|
||||
* Leave lazy mode, flushing any hypercalls made here.
|
||||
* This must be done before restoring TLS segments so
|
||||
* the GDT and LDT are properly updated, and must be
|
||||
* done before math_state_restore, so the TS bit is up
|
||||
* to date.
|
||||
*/
|
||||
arch_leave_lazy_cpu_mode();
|
||||
|
||||
/*
|
||||
* Switch FS and GS.
|
||||
*/
|
||||
{
|
||||
unsigned fsindex;
|
||||
asm volatile("movl %%fs,%0" : "=r" (fsindex));
|
||||
/* segment register != 0 always requires a reload.
|
||||
also reload when it has changed.
|
||||
when prev process used 64bit base always reload
|
||||
@@ -613,10 +605,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
if (next->fs)
|
||||
wrmsrl(MSR_FS_BASE, next->fs);
|
||||
prev->fsindex = fsindex;
|
||||
}
|
||||
{
|
||||
unsigned gsindex;
|
||||
asm volatile("movl %%gs,%0" : "=r" (gsindex));
|
||||
|
||||
if (unlikely(gsindex | next->gsindex | prev->gs)) {
|
||||
load_gs_index(next->gsindex);
|
||||
if (gsindex)
|
||||
@@ -658,8 +647,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
/* If the task has used fpu the last 5 timeslices, just do a full
|
||||
* restore of the math state immediately to avoid the trap; the
|
||||
* chances of needing FPU soon are obviously high now
|
||||
*
|
||||
* tsk_used_math() checks prevent calling math_state_restore(),
|
||||
* which can sleep in the case of !tsk_used_math()
|
||||
*/
|
||||
if (next_p->fpu_counter>5)
|
||||
if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
|
||||
math_state_restore();
|
||||
return prev_p;
|
||||
}
|
||||
@@ -794,7 +786,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
|
||||
set_32bit_tls(task, FS_TLS, addr);
|
||||
if (doit) {
|
||||
load_TLS(&task->thread, cpu);
|
||||
asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
|
||||
loadsegment(fs, FS_TLS_SEL);
|
||||
}
|
||||
task->thread.fsindex = FS_TLS_SEL;
|
||||
task->thread.fs = 0;
|
||||
@@ -804,7 +796,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
|
||||
if (doit) {
|
||||
/* set the selector to 0 to not confuse
|
||||
__switch_to */
|
||||
asm volatile("movl %0,%%fs" :: "r" (0));
|
||||
loadsegment(fs, 0);
|
||||
ret = checking_wrmsrl(MSR_FS_BASE, addr);
|
||||
}
|
||||
}
|
||||
@@ -827,7 +819,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
|
||||
if (task->thread.gsindex == GS_TLS_SEL)
|
||||
base = read_32bit_tls(task, GS_TLS);
|
||||
else if (doit) {
|
||||
asm("movl %%gs,%0" : "=r" (gsindex));
|
||||
savesegment(gs, gsindex);
|
||||
if (gsindex)
|
||||
rdmsrl(MSR_KERNEL_GS_BASE, base);
|
||||
else
|
||||
|
Reference in New Issue
Block a user