Merge branch 'linus' into tracing/hw-breakpoints
Conflicts: arch/x86/kernel/process_64.c Semantic conflict fixed in: arch/x86/kvm/x86.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -57,9 +57,6 @@
|
||||
|
||||
asmlinkage extern void ret_from_fork(void);
|
||||
|
||||
DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
|
||||
EXPORT_PER_CPU_SYMBOL(current_task);
|
||||
|
||||
DEFINE_PER_CPU(unsigned long, old_rsp);
|
||||
static DEFINE_PER_CPU(unsigned char, is_idle);
|
||||
|
||||
@@ -399,9 +396,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
int cpu = smp_processor_id();
|
||||
struct tss_struct *tss = &per_cpu(init_tss, cpu);
|
||||
unsigned fsindex, gsindex;
|
||||
bool preload_fpu;
|
||||
|
||||
/*
|
||||
* If the task has used fpu the last 5 timeslices, just do a full
|
||||
* restore of the math state immediately to avoid the trap; the
|
||||
* chances of needing FPU soon are obviously high now
|
||||
*/
|
||||
preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
|
||||
|
||||
/* we're going to use this soon, after a few expensive things */
|
||||
if (next_p->fpu_counter > 5)
|
||||
if (preload_fpu)
|
||||
prefetch(next->xstate);
|
||||
|
||||
/*
|
||||
@@ -432,6 +437,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
|
||||
load_TLS(next, cpu);
|
||||
|
||||
/* Must be after DS reload */
|
||||
unlazy_fpu(prev_p);
|
||||
|
||||
/* Make sure cpu is ready for new context */
|
||||
if (preload_fpu)
|
||||
clts();
|
||||
|
||||
/*
|
||||
* Leave lazy mode, flushing any hypercalls made here.
|
||||
* This must be done before restoring TLS segments so
|
||||
@@ -472,9 +484,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
|
||||
prev->gsindex = gsindex;
|
||||
|
||||
/* Must be after DS reload */
|
||||
unlazy_fpu(prev_p);
|
||||
|
||||
/*
|
||||
* Switch the PDA and FPU contexts.
|
||||
*/
|
||||
@@ -493,15 +502,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
|
||||
__switch_to_xtra(prev_p, next_p, tss);
|
||||
|
||||
/* If the task has used fpu the last 5 timeslices, just do a full
|
||||
* restore of the math state immediately to avoid the trap; the
|
||||
* chances of needing FPU soon are obviously high now
|
||||
*
|
||||
* tsk_used_math() checks prevent calling math_state_restore(),
|
||||
* which can sleep in the case of !tsk_used_math()
|
||||
/*
|
||||
* Preload the FPU context, now that we've determined that the
|
||||
* task is likely to be using it.
|
||||
*/
|
||||
if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
|
||||
math_state_restore();
|
||||
if (preload_fpu)
|
||||
__math_state_restore();
|
||||
/*
|
||||
* There's a problem with moving the arch_install_thread_hw_breakpoint()
|
||||
* call before current is updated. Suppose a kernel breakpoint is
|
||||
|
Reference in New Issue
Block a user