Merge branch 'linus' into tracing/hw-breakpoints
Conflicts: arch/x86/kernel/process_64.c Semantic conflict fixed in: arch/x86/kvm/x86.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -404,7 +404,17 @@ extern unsigned long kernel_eflags;
|
||||
extern asmlinkage void ignore_sysret(void);
|
||||
#else /* X86_64 */
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
DECLARE_PER_CPU(unsigned long, stack_canary);
|
||||
/*
|
||||
* Make sure stack canary segment base is cached-aligned:
|
||||
* "For Intel Atom processors, avoid non zero segment base address
|
||||
* that is not aligned to cache line boundary at all cost."
|
||||
* (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
|
||||
*/
|
||||
struct stack_canary {
|
||||
char __pad[20]; /* canary at %gs:20 */
|
||||
unsigned long canary;
|
||||
};
|
||||
DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
|
||||
#endif
|
||||
#endif /* X86_64 */
|
||||
|
||||
@@ -703,13 +713,23 @@ static inline void cpu_relax(void)
|
||||
rep_nop();
|
||||
}
|
||||
|
||||
/* Stop speculative execution: */
|
||||
/* Stop speculative execution and prefetching of modified code. */
|
||||
static inline void sync_core(void)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
asm volatile("cpuid" : "=a" (tmp) : "0" (1)
|
||||
: "ebx", "ecx", "edx", "memory");
|
||||
#if defined(CONFIG_M386) || defined(CONFIG_M486)
|
||||
if (boot_cpu_data.x86 < 5)
|
||||
/* There is no speculative execution.
|
||||
* jmp is a barrier to prefetching. */
|
||||
asm volatile("jmp 1f\n1:\n" ::: "memory");
|
||||
else
|
||||
#endif
|
||||
/* cpuid is a barrier to speculative execution.
|
||||
* Prefetched instructions are automatically
|
||||
* invalidated when modified. */
|
||||
asm volatile("cpuid" : "=a" (tmp) : "0" (1)
|
||||
: "ebx", "ecx", "edx", "memory");
|
||||
}
|
||||
|
||||
static inline void __monitor(const void *eax, unsigned long ecx,
|
||||
|
Reference in New Issue
Block a user