x86: use generic register name in the thread and tss structures
This changes size-specific register names (eip/rip, esp/rsp, etc.) to generic names in the thread and tss structures. Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
committed by
Ingo Molnar
parent
25149b62d3
commit
faca62273b
@@ -101,8 +101,8 @@ void foo(void)
|
|||||||
OFFSET(pbe_orig_address, pbe, orig_address);
|
OFFSET(pbe_orig_address, pbe, orig_address);
|
||||||
OFFSET(pbe_next, pbe, next);
|
OFFSET(pbe_next, pbe, next);
|
||||||
|
|
||||||
/* Offset from the sysenter stack to tss.esp0 */
|
/* Offset from the sysenter stack to tss.sp0 */
|
||||||
DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, x86_tss.esp0) -
|
DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
|
||||||
sizeof(struct tss_struct));
|
sizeof(struct tss_struct));
|
||||||
|
|
||||||
DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
|
DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
|
||||||
|
@@ -691,7 +691,7 @@ void __cpuinit cpu_init(void)
|
|||||||
BUG();
|
BUG();
|
||||||
enter_lazy_tlb(&init_mm, curr);
|
enter_lazy_tlb(&init_mm, curr);
|
||||||
|
|
||||||
load_esp0(t, thread);
|
load_sp0(t, thread);
|
||||||
set_tss_desc(cpu,t);
|
set_tss_desc(cpu,t);
|
||||||
load_TR_desc();
|
load_TR_desc();
|
||||||
load_LDT(&init_mm.context);
|
load_LDT(&init_mm.context);
|
||||||
|
@@ -35,12 +35,13 @@ static void doublefault_fn(void)
|
|||||||
if (ptr_ok(tss)) {
|
if (ptr_ok(tss)) {
|
||||||
struct i386_hw_tss *t = (struct i386_hw_tss *)tss;
|
struct i386_hw_tss *t = (struct i386_hw_tss *)tss;
|
||||||
|
|
||||||
printk(KERN_EMERG "eip = %08lx, esp = %08lx\n", t->eip, t->esp);
|
printk(KERN_EMERG "eip = %08lx, esp = %08lx\n",
|
||||||
|
t->ip, t->sp);
|
||||||
|
|
||||||
printk(KERN_EMERG "eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n",
|
printk(KERN_EMERG "eax = %08lx, ebx = %08lx, ecx = %08lx, edx = %08lx\n",
|
||||||
t->eax, t->ebx, t->ecx, t->edx);
|
t->ax, t->bx, t->cx, t->dx);
|
||||||
printk(KERN_EMERG "esi = %08lx, edi = %08lx\n",
|
printk(KERN_EMERG "esi = %08lx, edi = %08lx\n",
|
||||||
t->esi, t->edi);
|
t->si, t->di);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -50,15 +51,15 @@ static void doublefault_fn(void)
|
|||||||
|
|
||||||
struct tss_struct doublefault_tss __cacheline_aligned = {
|
struct tss_struct doublefault_tss __cacheline_aligned = {
|
||||||
.x86_tss = {
|
.x86_tss = {
|
||||||
.esp0 = STACK_START,
|
.sp0 = STACK_START,
|
||||||
.ss0 = __KERNEL_DS,
|
.ss0 = __KERNEL_DS,
|
||||||
.ldt = 0,
|
.ldt = 0,
|
||||||
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
|
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
|
||||||
|
|
||||||
.eip = (unsigned long) doublefault_fn,
|
.ip = (unsigned long) doublefault_fn,
|
||||||
/* 0x2 bit is always set */
|
/* 0x2 bit is always set */
|
||||||
.eflags = X86_EFLAGS_SF | 0x2,
|
.flags = X86_EFLAGS_SF | 0x2,
|
||||||
.esp = STACK_START,
|
.sp = STACK_START,
|
||||||
.es = __USER_DS,
|
.es = __USER_DS,
|
||||||
.cs = __KERNEL_CS,
|
.cs = __KERNEL_CS,
|
||||||
.ss = __KERNEL_DS,
|
.ss = __KERNEL_DS,
|
||||||
|
@@ -288,7 +288,7 @@ ENTRY(ia32_sysenter_target)
|
|||||||
CFI_SIGNAL_FRAME
|
CFI_SIGNAL_FRAME
|
||||||
CFI_DEF_CFA esp, 0
|
CFI_DEF_CFA esp, 0
|
||||||
CFI_REGISTER esp, ebp
|
CFI_REGISTER esp, ebp
|
||||||
movl TSS_sysenter_esp0(%esp),%esp
|
movl TSS_sysenter_sp0(%esp),%esp
|
||||||
sysenter_past_esp:
|
sysenter_past_esp:
|
||||||
/*
|
/*
|
||||||
* No need to follow this irqs on/off section: the syscall
|
* No need to follow this irqs on/off section: the syscall
|
||||||
@@ -743,7 +743,7 @@ END(device_not_available)
|
|||||||
* that sets up the real kernel stack. Check here, since we can't
|
* that sets up the real kernel stack. Check here, since we can't
|
||||||
* allow the wrong stack to be used.
|
* allow the wrong stack to be used.
|
||||||
*
|
*
|
||||||
* "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
|
* "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
|
||||||
* already pushed 3 words if it hits on the sysenter instruction:
|
* already pushed 3 words if it hits on the sysenter instruction:
|
||||||
* eflags, cs and eip.
|
* eflags, cs and eip.
|
||||||
*
|
*
|
||||||
@@ -755,7 +755,7 @@ END(device_not_available)
|
|||||||
cmpw $__KERNEL_CS,4(%esp); \
|
cmpw $__KERNEL_CS,4(%esp); \
|
||||||
jne ok; \
|
jne ok; \
|
||||||
label: \
|
label: \
|
||||||
movl TSS_sysenter_esp0+offset(%esp),%esp; \
|
movl TSS_sysenter_sp0+offset(%esp),%esp; \
|
||||||
CFI_DEF_CFA esp, 0; \
|
CFI_DEF_CFA esp, 0; \
|
||||||
CFI_UNDEFINED eip; \
|
CFI_UNDEFINED eip; \
|
||||||
pushfl; \
|
pushfl; \
|
||||||
|
@@ -382,7 +382,7 @@ struct pv_cpu_ops pv_cpu_ops = {
|
|||||||
.write_ldt_entry = write_dt_entry,
|
.write_ldt_entry = write_dt_entry,
|
||||||
.write_gdt_entry = write_dt_entry,
|
.write_gdt_entry = write_dt_entry,
|
||||||
.write_idt_entry = write_dt_entry,
|
.write_idt_entry = write_dt_entry,
|
||||||
.load_esp0 = native_load_esp0,
|
.load_sp0 = native_load_sp0,
|
||||||
|
|
||||||
.irq_enable_syscall_ret = native_irq_enable_syscall_ret,
|
.irq_enable_syscall_ret = native_irq_enable_syscall_ret,
|
||||||
.iret = native_iret,
|
.iret = native_iret,
|
||||||
|
@@ -75,7 +75,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_number);
|
|||||||
*/
|
*/
|
||||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
return ((unsigned long *)tsk->thread.esp)[3];
|
return ((unsigned long *)tsk->thread.sp)[3];
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -488,10 +488,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
|
|||||||
childregs->ax = 0;
|
childregs->ax = 0;
|
||||||
childregs->sp = sp;
|
childregs->sp = sp;
|
||||||
|
|
||||||
p->thread.esp = (unsigned long) childregs;
|
p->thread.sp = (unsigned long) childregs;
|
||||||
p->thread.esp0 = (unsigned long) (childregs+1);
|
p->thread.sp0 = (unsigned long) (childregs+1);
|
||||||
|
|
||||||
p->thread.eip = (unsigned long) ret_from_fork;
|
p->thread.ip = (unsigned long) ret_from_fork;
|
||||||
|
|
||||||
savesegment(gs,p->thread.gs);
|
savesegment(gs,p->thread.gs);
|
||||||
|
|
||||||
@@ -718,7 +718,7 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
|
|||||||
/*
|
/*
|
||||||
* Reload esp0.
|
* Reload esp0.
|
||||||
*/
|
*/
|
||||||
load_esp0(tss, next);
|
load_sp0(tss, next);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Save away %gs. No need to save %fs, as it was saved on the
|
* Save away %gs. No need to save %fs, as it was saved on the
|
||||||
@@ -851,7 +851,7 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
if (!p || p == current || p->state == TASK_RUNNING)
|
if (!p || p == current || p->state == TASK_RUNNING)
|
||||||
return 0;
|
return 0;
|
||||||
stack_page = (unsigned long)task_stack_page(p);
|
stack_page = (unsigned long)task_stack_page(p);
|
||||||
sp = p->thread.esp;
|
sp = p->thread.sp;
|
||||||
if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
|
if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
|
||||||
return 0;
|
return 0;
|
||||||
/* include/asm-i386/system.h:switch_to() pushes bp last. */
|
/* include/asm-i386/system.h:switch_to() pushes bp last. */
|
||||||
|
@@ -493,9 +493,9 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
|
|||||||
if (sp == ~0UL)
|
if (sp == ~0UL)
|
||||||
childregs->sp = (unsigned long)childregs;
|
childregs->sp = (unsigned long)childregs;
|
||||||
|
|
||||||
p->thread.rsp = (unsigned long) childregs;
|
p->thread.sp = (unsigned long) childregs;
|
||||||
p->thread.rsp0 = (unsigned long) (childregs+1);
|
p->thread.sp0 = (unsigned long) (childregs+1);
|
||||||
p->thread.userrsp = me->thread.userrsp;
|
p->thread.usersp = me->thread.usersp;
|
||||||
|
|
||||||
set_tsk_thread_flag(p, TIF_FORK);
|
set_tsk_thread_flag(p, TIF_FORK);
|
||||||
|
|
||||||
@@ -607,7 +607,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|||||||
/*
|
/*
|
||||||
* Reload esp0, LDT and the page table pointer:
|
* Reload esp0, LDT and the page table pointer:
|
||||||
*/
|
*/
|
||||||
tss->rsp0 = next->rsp0;
|
tss->sp0 = next->sp0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Switch DS and ES.
|
* Switch DS and ES.
|
||||||
@@ -666,8 +666,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|||||||
/*
|
/*
|
||||||
* Switch the PDA and FPU contexts.
|
* Switch the PDA and FPU contexts.
|
||||||
*/
|
*/
|
||||||
prev->userrsp = read_pda(oldrsp);
|
prev->usersp = read_pda(oldrsp);
|
||||||
write_pda(oldrsp, next->userrsp);
|
write_pda(oldrsp, next->usersp);
|
||||||
write_pda(pcurrent, next_p);
|
write_pda(pcurrent, next_p);
|
||||||
|
|
||||||
write_pda(kernelstack,
|
write_pda(kernelstack,
|
||||||
@@ -769,9 +769,9 @@ unsigned long get_wchan(struct task_struct *p)
|
|||||||
if (!p || p == current || p->state==TASK_RUNNING)
|
if (!p || p == current || p->state==TASK_RUNNING)
|
||||||
return 0;
|
return 0;
|
||||||
stack = (unsigned long)task_stack_page(p);
|
stack = (unsigned long)task_stack_page(p);
|
||||||
if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
|
if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
|
||||||
return 0;
|
return 0;
|
||||||
fp = *(u64 *)(p->thread.rsp);
|
fp = *(u64 *)(p->thread.sp);
|
||||||
do {
|
do {
|
||||||
if (fp < (unsigned long)stack ||
|
if (fp < (unsigned long)stack ||
|
||||||
fp > (unsigned long)stack+THREAD_SIZE)
|
fp > (unsigned long)stack+THREAD_SIZE)
|
||||||
|
@@ -454,7 +454,7 @@ void __devinit initialize_secondary(void)
|
|||||||
"movl %0,%%esp\n\t"
|
"movl %0,%%esp\n\t"
|
||||||
"jmp *%1"
|
"jmp *%1"
|
||||||
:
|
:
|
||||||
:"m" (current->thread.esp),"m" (current->thread.eip));
|
:"m" (current->thread.sp),"m" (current->thread.ip));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Static state in head.S used to set up a CPU */
|
/* Static state in head.S used to set up a CPU */
|
||||||
@@ -753,7 +753,7 @@ static inline struct task_struct * __cpuinit alloc_idle_task(int cpu)
|
|||||||
/* initialize thread_struct. we really want to avoid destroy
|
/* initialize thread_struct. we really want to avoid destroy
|
||||||
* idle tread
|
* idle tread
|
||||||
*/
|
*/
|
||||||
idle->thread.esp = (unsigned long)task_pt_regs(idle);
|
idle->thread.sp = (unsigned long)task_pt_regs(idle);
|
||||||
init_idle(idle, cpu);
|
init_idle(idle, cpu);
|
||||||
return idle;
|
return idle;
|
||||||
}
|
}
|
||||||
@@ -798,7 +798,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
|
|||||||
per_cpu(current_task, cpu) = idle;
|
per_cpu(current_task, cpu) = idle;
|
||||||
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
|
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
|
||||||
|
|
||||||
idle->thread.eip = (unsigned long) start_secondary;
|
idle->thread.ip = (unsigned long) start_secondary;
|
||||||
/* start_eip had better be page-aligned! */
|
/* start_eip had better be page-aligned! */
|
||||||
start_eip = setup_trampoline();
|
start_eip = setup_trampoline();
|
||||||
|
|
||||||
@@ -808,7 +808,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
|
|||||||
/* So we see what's up */
|
/* So we see what's up */
|
||||||
printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip);
|
printk("Booting processor %d/%d ip %lx\n", cpu, apicid, start_eip);
|
||||||
/* Stack for startup_32 can be just as for start_secondary onwards */
|
/* Stack for startup_32 can be just as for start_secondary onwards */
|
||||||
stack_start.sp = (void *) idle->thread.esp;
|
stack_start.sp = (void *) idle->thread.sp;
|
||||||
|
|
||||||
irq_ctx_init(cpu);
|
irq_ctx_init(cpu);
|
||||||
|
|
||||||
|
@@ -577,7 +577,7 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
|
|||||||
c_idle.idle = get_idle_for_cpu(cpu);
|
c_idle.idle = get_idle_for_cpu(cpu);
|
||||||
|
|
||||||
if (c_idle.idle) {
|
if (c_idle.idle) {
|
||||||
c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)
|
c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *)
|
||||||
(THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
|
(THREAD_SIZE + task_stack_page(c_idle.idle))) - 1);
|
||||||
init_idle(c_idle.idle, cpu);
|
init_idle(c_idle.idle, cpu);
|
||||||
goto do_rest;
|
goto do_rest;
|
||||||
@@ -613,8 +613,8 @@ do_rest:
|
|||||||
|
|
||||||
start_rip = setup_trampoline();
|
start_rip = setup_trampoline();
|
||||||
|
|
||||||
init_rsp = c_idle.idle->thread.rsp;
|
init_rsp = c_idle.idle->thread.sp;
|
||||||
per_cpu(init_tss,cpu).rsp0 = init_rsp;
|
per_cpu(init_tss,cpu).sp0 = init_rsp;
|
||||||
initial_code = start_secondary;
|
initial_code = start_secondary;
|
||||||
clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
|
clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
|
||||||
|
|
||||||
|
@@ -163,7 +163,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|||||||
unsigned long dummy;
|
unsigned long dummy;
|
||||||
stack = &dummy;
|
stack = &dummy;
|
||||||
if (task != current)
|
if (task != current)
|
||||||
stack = (unsigned long *)task->thread.esp;
|
stack = (unsigned long *)task->thread.sp;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_FRAME_POINTER
|
#ifdef CONFIG_FRAME_POINTER
|
||||||
@@ -173,7 +173,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
|
|||||||
asm ("movl %%ebp, %0" : "=r" (bp) : );
|
asm ("movl %%ebp, %0" : "=r" (bp) : );
|
||||||
} else {
|
} else {
|
||||||
/* bp is the last reg pushed by switch_to */
|
/* bp is the last reg pushed by switch_to */
|
||||||
bp = *(unsigned long *) task->thread.esp;
|
bp = *(unsigned long *) task->thread.sp;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@@ -253,7 +253,7 @@ static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
|||||||
|
|
||||||
if (sp == NULL) {
|
if (sp == NULL) {
|
||||||
if (task)
|
if (task)
|
||||||
sp = (unsigned long*)task->thread.esp;
|
sp = (unsigned long*)task->thread.sp;
|
||||||
else
|
else
|
||||||
sp = (unsigned long *)&sp;
|
sp = (unsigned long *)&sp;
|
||||||
}
|
}
|
||||||
|
@@ -230,7 +230,7 @@ void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
|
|||||||
unsigned long dummy;
|
unsigned long dummy;
|
||||||
stack = &dummy;
|
stack = &dummy;
|
||||||
if (tsk && tsk != current)
|
if (tsk && tsk != current)
|
||||||
stack = (unsigned long *)tsk->thread.rsp;
|
stack = (unsigned long *)tsk->thread.sp;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -366,7 +366,7 @@ _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long *sp)
|
|||||||
|
|
||||||
if (sp == NULL) {
|
if (sp == NULL) {
|
||||||
if (tsk)
|
if (tsk)
|
||||||
sp = (unsigned long *)tsk->thread.rsp;
|
sp = (unsigned long *)tsk->thread.sp;
|
||||||
else
|
else
|
||||||
sp = (unsigned long *)&sp;
|
sp = (unsigned long *)&sp;
|
||||||
}
|
}
|
||||||
|
@@ -147,10 +147,10 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
tss = &per_cpu(init_tss, get_cpu());
|
tss = &per_cpu(init_tss, get_cpu());
|
||||||
current->thread.esp0 = current->thread.saved_esp0;
|
current->thread.sp0 = current->thread.saved_sp0;
|
||||||
current->thread.sysenter_cs = __KERNEL_CS;
|
current->thread.sysenter_cs = __KERNEL_CS;
|
||||||
load_esp0(tss, ¤t->thread);
|
load_sp0(tss, ¤t->thread);
|
||||||
current->thread.saved_esp0 = 0;
|
current->thread.saved_sp0 = 0;
|
||||||
put_cpu();
|
put_cpu();
|
||||||
|
|
||||||
ret = KVM86->regs32;
|
ret = KVM86->regs32;
|
||||||
@@ -207,7 +207,7 @@ asmlinkage int sys_vm86old(struct pt_regs regs)
|
|||||||
int tmp, ret = -EPERM;
|
int tmp, ret = -EPERM;
|
||||||
|
|
||||||
tsk = current;
|
tsk = current;
|
||||||
if (tsk->thread.saved_esp0)
|
if (tsk->thread.saved_sp0)
|
||||||
goto out;
|
goto out;
|
||||||
tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
|
tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
|
||||||
offsetof(struct kernel_vm86_struct, vm86plus) -
|
offsetof(struct kernel_vm86_struct, vm86plus) -
|
||||||
@@ -256,7 +256,7 @@ asmlinkage int sys_vm86(struct pt_regs regs)
|
|||||||
|
|
||||||
/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
|
/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
|
||||||
ret = -EPERM;
|
ret = -EPERM;
|
||||||
if (tsk->thread.saved_esp0)
|
if (tsk->thread.saved_sp0)
|
||||||
goto out;
|
goto out;
|
||||||
v86 = (struct vm86plus_struct __user *)regs.cx;
|
v86 = (struct vm86plus_struct __user *)regs.cx;
|
||||||
tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
|
tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
|
||||||
@@ -318,15 +318,15 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
|
|||||||
* Save old state, set default return value (%ax) to 0
|
* Save old state, set default return value (%ax) to 0
|
||||||
*/
|
*/
|
||||||
info->regs32->ax = 0;
|
info->regs32->ax = 0;
|
||||||
tsk->thread.saved_esp0 = tsk->thread.esp0;
|
tsk->thread.saved_sp0 = tsk->thread.sp0;
|
||||||
tsk->thread.saved_fs = info->regs32->fs;
|
tsk->thread.saved_fs = info->regs32->fs;
|
||||||
savesegment(gs, tsk->thread.saved_gs);
|
savesegment(gs, tsk->thread.saved_gs);
|
||||||
|
|
||||||
tss = &per_cpu(init_tss, get_cpu());
|
tss = &per_cpu(init_tss, get_cpu());
|
||||||
tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
|
tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
|
||||||
if (cpu_has_sep)
|
if (cpu_has_sep)
|
||||||
tsk->thread.sysenter_cs = 0;
|
tsk->thread.sysenter_cs = 0;
|
||||||
load_esp0(tss, &tsk->thread);
|
load_sp0(tss, &tsk->thread);
|
||||||
put_cpu();
|
put_cpu();
|
||||||
|
|
||||||
tsk->thread.screen_bitmap = info->screen_bitmap;
|
tsk->thread.screen_bitmap = info->screen_bitmap;
|
||||||
|
@@ -62,7 +62,7 @@ static struct {
|
|||||||
void (*cpuid)(void /* non-c */);
|
void (*cpuid)(void /* non-c */);
|
||||||
void (*_set_ldt)(u32 selector);
|
void (*_set_ldt)(u32 selector);
|
||||||
void (*set_tr)(u32 selector);
|
void (*set_tr)(u32 selector);
|
||||||
void (*set_kernel_stack)(u32 selector, u32 esp0);
|
void (*set_kernel_stack)(u32 selector, u32 sp0);
|
||||||
void (*allocate_page)(u32, u32, u32, u32, u32);
|
void (*allocate_page)(u32, u32, u32, u32, u32);
|
||||||
void (*release_page)(u32, u32);
|
void (*release_page)(u32, u32);
|
||||||
void (*set_pte)(pte_t, pte_t *, unsigned);
|
void (*set_pte)(pte_t, pte_t *, unsigned);
|
||||||
@@ -214,17 +214,17 @@ static void vmi_set_tr(void)
|
|||||||
vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct));
|
vmi_ops.set_tr(GDT_ENTRY_TSS*sizeof(struct desc_struct));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmi_load_esp0(struct tss_struct *tss,
|
static void vmi_load_sp0(struct tss_struct *tss,
|
||||||
struct thread_struct *thread)
|
struct thread_struct *thread)
|
||||||
{
|
{
|
||||||
tss->x86_tss.esp0 = thread->esp0;
|
tss->x86_tss.sp0 = thread->sp0;
|
||||||
|
|
||||||
/* This can only happen when SEP is enabled, no need to test "SEP"arately */
|
/* This can only happen when SEP is enabled, no need to test "SEP"arately */
|
||||||
if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
|
if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
|
||||||
tss->x86_tss.ss1 = thread->sysenter_cs;
|
tss->x86_tss.ss1 = thread->sysenter_cs;
|
||||||
wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
|
wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
|
||||||
}
|
}
|
||||||
vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.esp0);
|
vmi_ops.set_kernel_stack(__KERNEL_DS, tss->x86_tss.sp0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmi_flush_tlb_user(void)
|
static void vmi_flush_tlb_user(void)
|
||||||
@@ -793,7 +793,7 @@ static inline int __init activate_vmi(void)
|
|||||||
para_fill(pv_cpu_ops.write_ldt_entry, WriteLDTEntry);
|
para_fill(pv_cpu_ops.write_ldt_entry, WriteLDTEntry);
|
||||||
para_fill(pv_cpu_ops.write_gdt_entry, WriteGDTEntry);
|
para_fill(pv_cpu_ops.write_gdt_entry, WriteGDTEntry);
|
||||||
para_fill(pv_cpu_ops.write_idt_entry, WriteIDTEntry);
|
para_fill(pv_cpu_ops.write_idt_entry, WriteIDTEntry);
|
||||||
para_wrap(pv_cpu_ops.load_esp0, vmi_load_esp0, set_kernel_stack, UpdateKernelStack);
|
para_wrap(pv_cpu_ops.load_sp0, vmi_load_sp0, set_kernel_stack, UpdateKernelStack);
|
||||||
para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
|
para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask);
|
||||||
para_fill(pv_cpu_ops.io_delay, IODelay);
|
para_fill(pv_cpu_ops.io_delay, IODelay);
|
||||||
|
|
||||||
|
@@ -755,10 +755,10 @@ static void lguest_time_init(void)
|
|||||||
* segment), the privilege level (we're privilege level 1, the Host is 0 and
|
* segment), the privilege level (we're privilege level 1, the Host is 0 and
|
||||||
* will not tolerate us trying to use that), the stack pointer, and the number
|
* will not tolerate us trying to use that), the stack pointer, and the number
|
||||||
* of pages in the stack. */
|
* of pages in the stack. */
|
||||||
static void lguest_load_esp0(struct tss_struct *tss,
|
static void lguest_load_sp0(struct tss_struct *tss,
|
||||||
struct thread_struct *thread)
|
struct thread_struct *thread)
|
||||||
{
|
{
|
||||||
lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->esp0,
|
lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->sp0,
|
||||||
THREAD_SIZE/PAGE_SIZE);
|
THREAD_SIZE/PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -957,7 +957,7 @@ __init void lguest_init(void)
|
|||||||
pv_cpu_ops.cpuid = lguest_cpuid;
|
pv_cpu_ops.cpuid = lguest_cpuid;
|
||||||
pv_cpu_ops.load_idt = lguest_load_idt;
|
pv_cpu_ops.load_idt = lguest_load_idt;
|
||||||
pv_cpu_ops.iret = lguest_iret;
|
pv_cpu_ops.iret = lguest_iret;
|
||||||
pv_cpu_ops.load_esp0 = lguest_load_esp0;
|
pv_cpu_ops.load_sp0 = lguest_load_sp0;
|
||||||
pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
|
pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
|
||||||
pv_cpu_ops.set_ldt = lguest_set_ldt;
|
pv_cpu_ops.set_ldt = lguest_set_ldt;
|
||||||
pv_cpu_ops.load_tls = lguest_load_tls;
|
pv_cpu_ops.load_tls = lguest_load_tls;
|
||||||
|
@@ -243,9 +243,9 @@ void enable_sep_cpu(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
tss->x86_tss.ss1 = __KERNEL_CS;
|
tss->x86_tss.ss1 = __KERNEL_CS;
|
||||||
tss->x86_tss.esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
|
tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
|
||||||
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
|
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
|
||||||
wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.esp1, 0);
|
wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
|
||||||
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
|
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
|
||||||
put_cpu();
|
put_cpu();
|
||||||
}
|
}
|
||||||
|
@@ -499,11 +499,11 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
|
|||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void xen_load_esp0(struct tss_struct *tss,
|
static void xen_load_sp0(struct tss_struct *tss,
|
||||||
struct thread_struct *thread)
|
struct thread_struct *thread)
|
||||||
{
|
{
|
||||||
struct multicall_space mcs = xen_mc_entry(0);
|
struct multicall_space mcs = xen_mc_entry(0);
|
||||||
MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0);
|
MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
|
||||||
xen_mc_issue(PARAVIRT_LAZY_CPU);
|
xen_mc_issue(PARAVIRT_LAZY_CPU);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -968,7 +968,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
|
|||||||
.write_ldt_entry = xen_write_ldt_entry,
|
.write_ldt_entry = xen_write_ldt_entry,
|
||||||
.write_gdt_entry = xen_write_gdt_entry,
|
.write_gdt_entry = xen_write_gdt_entry,
|
||||||
.write_idt_entry = xen_write_idt_entry,
|
.write_idt_entry = xen_write_idt_entry,
|
||||||
.load_esp0 = xen_load_esp0,
|
.load_sp0 = xen_load_sp0,
|
||||||
|
|
||||||
.set_iopl_mask = xen_set_iopl_mask,
|
.set_iopl_mask = xen_set_iopl_mask,
|
||||||
.io_delay = xen_io_delay,
|
.io_delay = xen_io_delay,
|
||||||
|
@@ -239,10 +239,10 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
|
|||||||
ctxt->gdt_ents = ARRAY_SIZE(gdt->gdt);
|
ctxt->gdt_ents = ARRAY_SIZE(gdt->gdt);
|
||||||
|
|
||||||
ctxt->user_regs.cs = __KERNEL_CS;
|
ctxt->user_regs.cs = __KERNEL_CS;
|
||||||
ctxt->user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
|
ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
|
||||||
|
|
||||||
ctxt->kernel_ss = __KERNEL_DS;
|
ctxt->kernel_ss = __KERNEL_DS;
|
||||||
ctxt->kernel_sp = idle->thread.esp0;
|
ctxt->kernel_sp = idle->thread.sp0;
|
||||||
|
|
||||||
ctxt->event_callback_cs = __KERNEL_CS;
|
ctxt->event_callback_cs = __KERNEL_CS;
|
||||||
ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
|
ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
|
||||||
|
@@ -94,7 +94,7 @@ static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
|
|||||||
/* Set up the two "TSS" members which tell the CPU what stack to use
|
/* Set up the two "TSS" members which tell the CPU what stack to use
|
||||||
* for traps which do directly into the Guest (ie. traps at privilege
|
* for traps which do directly into the Guest (ie. traps at privilege
|
||||||
* level 1). */
|
* level 1). */
|
||||||
pages->state.guest_tss.esp1 = lg->esp1;
|
pages->state.guest_tss.sp1 = lg->esp1;
|
||||||
pages->state.guest_tss.ss1 = lg->ss1;
|
pages->state.guest_tss.ss1 = lg->ss1;
|
||||||
|
|
||||||
/* Copy direct-to-Guest trap entries. */
|
/* Copy direct-to-Guest trap entries. */
|
||||||
@@ -416,7 +416,7 @@ void __init lguest_arch_host_init(void)
|
|||||||
/* We know where we want the stack to be when the Guest enters
|
/* We know where we want the stack to be when the Guest enters
|
||||||
* the switcher: in pages->regs. The stack grows upwards, so
|
* the switcher: in pages->regs. The stack grows upwards, so
|
||||||
* we start it at the end of that structure. */
|
* we start it at the end of that structure. */
|
||||||
state->guest_tss.esp0 = (long)(&pages->regs + 1);
|
state->guest_tss.sp0 = (long)(&pages->regs + 1);
|
||||||
/* And this is the GDT entry to use for the stack: we keep a
|
/* And this is the GDT entry to use for the stack: we keep a
|
||||||
* couple of special LGUEST entries. */
|
* couple of special LGUEST entries. */
|
||||||
state->guest_tss.ss0 = LGUEST_DS;
|
state->guest_tss.ss0 = LGUEST_DS;
|
||||||
|
@@ -101,7 +101,7 @@ struct pv_cpu_ops {
|
|||||||
int entrynum, u32 low, u32 high);
|
int entrynum, u32 low, u32 high);
|
||||||
void (*write_idt_entry)(struct desc_struct *,
|
void (*write_idt_entry)(struct desc_struct *,
|
||||||
int entrynum, u32 low, u32 high);
|
int entrynum, u32 low, u32 high);
|
||||||
void (*load_esp0)(struct tss_struct *tss, struct thread_struct *t);
|
void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
|
||||||
|
|
||||||
void (*set_iopl_mask)(unsigned mask);
|
void (*set_iopl_mask)(unsigned mask);
|
||||||
|
|
||||||
@@ -449,10 +449,10 @@ static inline int paravirt_enabled(void)
|
|||||||
return pv_info.paravirt_enabled;
|
return pv_info.paravirt_enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void load_esp0(struct tss_struct *tss,
|
static inline void load_sp0(struct tss_struct *tss,
|
||||||
struct thread_struct *thread)
|
struct thread_struct *thread)
|
||||||
{
|
{
|
||||||
PVOP_VCALL2(pv_cpu_ops.load_esp0, tss, thread);
|
PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define ARCH_SETUP pv_init_ops.arch_setup();
|
#define ARCH_SETUP pv_init_ops.arch_setup();
|
||||||
|
@@ -292,20 +292,17 @@ struct thread_struct;
|
|||||||
/* This is the TSS defined by the hardware. */
|
/* This is the TSS defined by the hardware. */
|
||||||
struct i386_hw_tss {
|
struct i386_hw_tss {
|
||||||
unsigned short back_link,__blh;
|
unsigned short back_link,__blh;
|
||||||
unsigned long esp0;
|
unsigned long sp0;
|
||||||
unsigned short ss0,__ss0h;
|
unsigned short ss0,__ss0h;
|
||||||
unsigned long esp1;
|
unsigned long sp1;
|
||||||
unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
|
unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
|
||||||
unsigned long esp2;
|
unsigned long sp2;
|
||||||
unsigned short ss2,__ss2h;
|
unsigned short ss2,__ss2h;
|
||||||
unsigned long __cr3;
|
unsigned long __cr3;
|
||||||
unsigned long eip;
|
unsigned long ip;
|
||||||
unsigned long eflags;
|
unsigned long flags;
|
||||||
unsigned long eax,ecx,edx,ebx;
|
unsigned long ax, cx, dx, bx;
|
||||||
unsigned long esp;
|
unsigned long sp, bp, si, di;
|
||||||
unsigned long ebp;
|
|
||||||
unsigned long esi;
|
|
||||||
unsigned long edi;
|
|
||||||
unsigned short es, __esh;
|
unsigned short es, __esh;
|
||||||
unsigned short cs, __csh;
|
unsigned short cs, __csh;
|
||||||
unsigned short ss, __ssh;
|
unsigned short ss, __ssh;
|
||||||
@@ -346,10 +343,10 @@ struct tss_struct {
|
|||||||
struct thread_struct {
|
struct thread_struct {
|
||||||
/* cached TLS descriptors. */
|
/* cached TLS descriptors. */
|
||||||
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
|
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
|
||||||
unsigned long esp0;
|
unsigned long sp0;
|
||||||
unsigned long sysenter_cs;
|
unsigned long sysenter_cs;
|
||||||
unsigned long eip;
|
unsigned long ip;
|
||||||
unsigned long esp;
|
unsigned long sp;
|
||||||
unsigned long fs;
|
unsigned long fs;
|
||||||
unsigned long gs;
|
unsigned long gs;
|
||||||
/* Hardware debugging registers */
|
/* Hardware debugging registers */
|
||||||
@@ -366,7 +363,7 @@ struct thread_struct {
|
|||||||
/* virtual 86 mode info */
|
/* virtual 86 mode info */
|
||||||
struct vm86_struct __user * vm86_info;
|
struct vm86_struct __user * vm86_info;
|
||||||
unsigned long screen_bitmap;
|
unsigned long screen_bitmap;
|
||||||
unsigned long v86flags, v86mask, saved_esp0;
|
unsigned long v86flags, v86mask, saved_sp0;
|
||||||
unsigned int saved_fs, saved_gs;
|
unsigned int saved_fs, saved_gs;
|
||||||
/* IO permissions */
|
/* IO permissions */
|
||||||
unsigned long *io_bitmap_ptr;
|
unsigned long *io_bitmap_ptr;
|
||||||
@@ -378,7 +375,7 @@ struct thread_struct {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define INIT_THREAD { \
|
#define INIT_THREAD { \
|
||||||
.esp0 = sizeof(init_stack) + (long)&init_stack, \
|
.sp0 = sizeof(init_stack) + (long)&init_stack, \
|
||||||
.vm86_info = NULL, \
|
.vm86_info = NULL, \
|
||||||
.sysenter_cs = __KERNEL_CS, \
|
.sysenter_cs = __KERNEL_CS, \
|
||||||
.io_bitmap_ptr = NULL, \
|
.io_bitmap_ptr = NULL, \
|
||||||
@@ -393,7 +390,7 @@ struct thread_struct {
|
|||||||
*/
|
*/
|
||||||
#define INIT_TSS { \
|
#define INIT_TSS { \
|
||||||
.x86_tss = { \
|
.x86_tss = { \
|
||||||
.esp0 = sizeof(init_stack) + (long)&init_stack, \
|
.sp0 = sizeof(init_stack) + (long)&init_stack, \
|
||||||
.ss0 = __KERNEL_DS, \
|
.ss0 = __KERNEL_DS, \
|
||||||
.ss1 = __KERNEL_CS, \
|
.ss1 = __KERNEL_CS, \
|
||||||
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
|
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
|
||||||
@@ -503,9 +500,9 @@ static inline void rep_nop(void)
|
|||||||
|
|
||||||
#define cpu_relax() rep_nop()
|
#define cpu_relax() rep_nop()
|
||||||
|
|
||||||
static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread)
|
static inline void native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
|
||||||
{
|
{
|
||||||
tss->x86_tss.esp0 = thread->esp0;
|
tss->x86_tss.sp0 = thread->sp0;
|
||||||
/* This can only happen when SEP is enabled, no need to test "SEP"arately */
|
/* This can only happen when SEP is enabled, no need to test "SEP"arately */
|
||||||
if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
|
if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) {
|
||||||
tss->x86_tss.ss1 = thread->sysenter_cs;
|
tss->x86_tss.ss1 = thread->sysenter_cs;
|
||||||
@@ -585,9 +582,9 @@ static inline void native_set_iopl_mask(unsigned mask)
|
|||||||
#define paravirt_enabled() 0
|
#define paravirt_enabled() 0
|
||||||
#define __cpuid native_cpuid
|
#define __cpuid native_cpuid
|
||||||
|
|
||||||
static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread)
|
static inline void load_sp0(struct tss_struct *tss, struct thread_struct *thread)
|
||||||
{
|
{
|
||||||
native_load_esp0(tss, thread);
|
native_load_sp0(tss, thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -177,9 +177,9 @@ union i387_union {
|
|||||||
|
|
||||||
struct tss_struct {
|
struct tss_struct {
|
||||||
u32 reserved1;
|
u32 reserved1;
|
||||||
u64 rsp0;
|
u64 sp0;
|
||||||
u64 rsp1;
|
u64 sp1;
|
||||||
u64 rsp2;
|
u64 sp2;
|
||||||
u64 reserved2;
|
u64 reserved2;
|
||||||
u64 ist[7];
|
u64 ist[7];
|
||||||
u32 reserved3;
|
u32 reserved3;
|
||||||
@@ -216,9 +216,9 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist);
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct thread_struct {
|
struct thread_struct {
|
||||||
unsigned long rsp0;
|
unsigned long sp0;
|
||||||
unsigned long rsp;
|
unsigned long sp;
|
||||||
unsigned long userrsp; /* Copy from PDA */
|
unsigned long usersp; /* Copy from PDA */
|
||||||
unsigned long fs;
|
unsigned long fs;
|
||||||
unsigned long gs;
|
unsigned long gs;
|
||||||
unsigned short es, ds, fsindex, gsindex;
|
unsigned short es, ds, fsindex, gsindex;
|
||||||
@@ -245,11 +245,11 @@ struct thread_struct {
|
|||||||
} __attribute__((aligned(16)));
|
} __attribute__((aligned(16)));
|
||||||
|
|
||||||
#define INIT_THREAD { \
|
#define INIT_THREAD { \
|
||||||
.rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
|
.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define INIT_TSS { \
|
#define INIT_TSS { \
|
||||||
.rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
|
.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define INIT_MMAP \
|
#define INIT_MMAP \
|
||||||
@@ -293,10 +293,10 @@ extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
|
|||||||
* Return saved PC of a blocked thread.
|
* Return saved PC of a blocked thread.
|
||||||
* What is this good for? it will be always the scheduler or ret_from_fork.
|
* What is this good for? it will be always the scheduler or ret_from_fork.
|
||||||
*/
|
*/
|
||||||
#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
|
#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
|
||||||
|
|
||||||
extern unsigned long get_wchan(struct task_struct *p);
|
extern unsigned long get_wchan(struct task_struct *p);
|
||||||
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
|
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
|
||||||
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ip)
|
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ip)
|
||||||
#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
|
#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
|
||||||
|
|
||||||
|
@@ -28,9 +28,9 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
|
|||||||
"1:\t" \
|
"1:\t" \
|
||||||
"popl %%ebp\n\t" \
|
"popl %%ebp\n\t" \
|
||||||
"popfl" \
|
"popfl" \
|
||||||
:"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
|
:"=m" (prev->thread.sp),"=m" (prev->thread.ip), \
|
||||||
"=a" (last),"=S" (esi),"=D" (edi) \
|
"=a" (last),"=S" (esi),"=D" (edi) \
|
||||||
:"m" (next->thread.esp),"m" (next->thread.eip), \
|
:"m" (next->thread.sp),"m" (next->thread.ip), \
|
||||||
"2" (prev), "d" (next)); \
|
"2" (prev), "d" (next)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
@@ -40,7 +40,7 @@
|
|||||||
RESTORE_CONTEXT \
|
RESTORE_CONTEXT \
|
||||||
: "=a" (last) \
|
: "=a" (last) \
|
||||||
: [next] "S" (next), [prev] "D" (prev), \
|
: [next] "S" (next), [prev] "D" (prev), \
|
||||||
[threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
|
[threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
|
||||||
[ti_flags] "i" (offsetof(struct thread_info, flags)),\
|
[ti_flags] "i" (offsetof(struct thread_info, flags)),\
|
||||||
[tif_fork] "i" (TIF_FORK), \
|
[tif_fork] "i" (TIF_FORK), \
|
||||||
[thread_info] "i" (offsetof(struct task_struct, stack)), \
|
[thread_info] "i" (offsetof(struct task_struct, stack)), \
|
||||||
|
Reference in New Issue
Block a user