x86/i386: Make sure stack-protector segment base is cache aligned
The Intel Optimization Reference Guide says: In Intel Atom microarchitecture, the address generation unit assumes that the segment base will be 0 by default. Non-zero segment base will cause load and store operations to experience a delay. - If the segment base isn't aligned to a cache line boundary, the max throughput of memory operations is reduced to one [e]very 9 cycles. [...] Assembly/Compiler Coding Rule 15. (H impact, ML generality) For Intel Atom processors, use segments with base set to 0 whenever possible; avoid non-zero segment base address that is not aligned to cache line boundary at all cost. We can't avoid having a non-zero base for the stack-protector segment, but we can make it cache-aligned. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: <stable@kernel.org> LKML-Reference: <4AA01893.6000507@goop.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
23386d63bb
commit
1ea0d14e48
@ -1043,7 +1043,7 @@ DEFINE_PER_CPU(struct orig_ist, orig_ist);
|
||||
#else /* CONFIG_X86_64 */
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
DEFINE_PER_CPU(unsigned long, stack_canary);
|
||||
DEFINE_PER_CPU(struct stack_canary, stack_canary) ____cacheline_aligned;
|
||||
#endif
|
||||
|
||||
/* Make sure %fs and %gs are initialized properly in idle threads */
|
||||
|
@ -439,7 +439,6 @@ is386: movl $2,%ecx # set MP
|
||||
jne 1f
|
||||
movl $per_cpu__gdt_page,%eax
|
||||
movl $per_cpu__stack_canary,%ecx
|
||||
subl $20, %ecx
|
||||
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
|
||||
shrl $16, %ecx
|
||||
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
|
||||
|
Reference in New Issue
Block a user