#ifndef _ASM_X86_SYSTEM_H_ #define _ASM_X86_SYSTEM_H_ #include #include #ifdef CONFIG_X86_32 # include "system_32.h" #else # include "system_64.h" #endif #ifdef __KERNEL__ #define _set_base(addr, base) do { unsigned long __pr; \ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ "rorl $16,%%edx\n\t" \ "movb %%dl,%2\n\t" \ "movb %%dh,%3" \ :"=&d" (__pr) \ :"m" (*((addr)+2)), \ "m" (*((addr)+4)), \ "m" (*((addr)+7)), \ "0" (base) \ ); } while (0) #define _set_limit(addr, limit) do { unsigned long __lr; \ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ "rorl $16,%%edx\n\t" \ "movb %2,%%dh\n\t" \ "andb $0xf0,%%dh\n\t" \ "orb %%dh,%%dl\n\t" \ "movb %%dl,%2" \ :"=&d" (__lr) \ :"m" (*(addr)), \ "m" (*((addr)+6)), \ "0" (limit) \ ); } while (0) #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base)) #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1)) extern void load_gs_index(unsigned); /* * Load a segment. Fall back on loading the zero * segment if something goes wrong.. */ #define loadsegment(seg, value) \ asm volatile("\n" \ "1:\t" \ "movl %k0,%%" #seg "\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3:\t" \ "movl %k1, %%" #seg "\n\t" \ "jmp 2b\n" \ ".previous\n" \ ".section __ex_table,\"a\"\n\t" \ _ASM_ALIGN "\n\t" \ _ASM_PTR " 1b,3b\n" \ ".previous" \ : :"r" (value), "r" (0)) /* * Save a segment register away */ #define savesegment(seg, value) \ asm volatile("mov %%" #seg ",%0":"=rm" (value)) static inline unsigned long get_limit(unsigned long segment) { unsigned long __limit; __asm__("lsll %1,%0" :"=r" (__limit):"r" (segment)); return __limit+1; } static inline void native_clts(void) { asm volatile ("clts"); } /* * Volatile isn't enough to prevent the compiler from reordering the * read/write functions for the control registers and messing everything up. * A memory clobber would solve the problem, but would prevent reordering of * all loads stores around it, which can hurt performance. Solution is to * use a variable and mimic reads and writes to it to enforce serialization */ static unsigned long __force_order; static inline unsigned long native_read_cr0(void) { unsigned long val; asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order)); return val; } static inline void native_write_cr0(unsigned long val) { asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order)); } static inline unsigned long native_read_cr2(void) { unsigned long val; asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order)); return val; } static inline void native_write_cr2(unsigned long val) { asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order)); } static inline unsigned long native_read_cr3(void) { unsigned long val; asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order)); return val; } static inline void native_write_cr3(unsigned long val) { asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order)); } static inline unsigned long native_read_cr4(void) { unsigned long val; asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order)); return val; } static inline unsigned long native_read_cr4_safe(void) { unsigned long val; /* This could fault if %cr4 does not exist. In x86_64, a cr4 always * exists, so it will never fail. */ #ifdef CONFIG_X86_32 asm volatile("1: mov %%cr4, %0 \n" "2: \n" ".section __ex_table,\"a\" \n" ".long 1b,2b \n" ".previous \n" : "=r" (val), "=m" (__force_order) : "0" (0)); #else val = native_read_cr4(); #endif return val; } static inline void native_write_cr4(unsigned long val) { asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order)); } static inline void native_wbinvd(void) { asm volatile("wbinvd": : :"memory"); } #ifdef CONFIG_PARAVIRT #include #else #define read_cr0() (native_read_cr0()) #define write_cr0(x) (native_write_cr0(x)) #define read_cr2() (native_read_cr2()) #define write_cr2(x) (native_write_cr2(x)) #define read_cr3() (native_read_cr3()) #define write_cr3(x) (native_write_cr3(x)) #define read_cr4() (native_read_cr4()) #define read_cr4_safe() (native_read_cr4_safe()) #define write_cr4(x) (native_write_cr4(x)) #define wbinvd() (native_wbinvd()) /* Clear the 'TS' bit */ #define clts() (native_clts()) #endif/* CONFIG_PARAVIRT */ #define stts() write_cr0(8 | read_cr0()) #endif /* __KERNEL__ */ static inline void clflush(void *__p) { asm volatile("clflush %0" : "+m" (*(char __force *)__p)); } #define nop() __asm__ __volatile__ ("nop") void disable_hlt(void); void enable_hlt(void); extern int es7000_plat; void cpu_idle_wait(void); extern unsigned long arch_align_stack(unsigned long sp); extern void free_init_pages(char *what, unsigned long begin, unsigned long end); void default_idle(void); /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking * to devices. */ #ifdef CONFIG_X86_32 /* * For now, "wmb()" doesn't actually do anything, as all * Intel CPU's follow what Intel calls a *Processor Order*, * in which all writes are seen in the program order even * outside the CPU. * * I expect future Intel CPU's to have a weaker ordering, * but I'd also expect them to finally get their act together * and add some real memory barriers if so. * * Some non intel clones support out of order store. wmb() ceases to be a * nop for these. */ #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM) #else #define mb() asm volatile("mfence":::"memory") #define rmb() asm volatile("lfence":::"memory") #define wmb() asm volatile("sfence" ::: "memory") #endif /** * read_barrier_depends - Flush all pending reads that subsequents reads * depend on. * * No data-dependent reads from memory-like regions are ever reordered * over this barrier. All reads preceding this primitive are guaranteed * to access memory (but not necessarily other CPUs' caches) before any * reads following this primitive that depend on the data return by * any of the preceding reads. This primitive is much lighter weight than * rmb() on most CPUs, and is never heavier weight than is * rmb(). * * These ordering constraints are respected by both the local CPU * and the compiler. * * Ordering is not guaranteed by anything other than these primitives, * not even by data dependencies. See the documentation for * memory_barrier() for examples and URLs to more information. * * For example, the following code would force ordering (the initial * value of "a" is zero, "b" is one, and "p" is "&a"): * * * CPU 0 CPU 1 * * b = 2; * memory_barrier(); * p = &b; q = p; * read_barrier_depends(); * d = *q; * * * because the read of "*q" depends on the read of "p" and these * two reads are separated by a read_barrier_depends(). However, * the following code, with the same initial values for "a" and "b": * * * CPU 0 CPU 1 * * a = 2; * memory_barrier(); * b = 3; y = b; * read_barrier_depends(); * x = a; * * * does not enforce ordering, since there is no data dependency between * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() * in cases like this where there are no data dependencies. **/ #define read_barrier_depends() do { } while (0) #ifdef CONFIG_SMP #define smp_mb() mb() #ifdef CONFIG_X86_PPRO_FENCE # define smp_rmb() rmb() #else # define smp_rmb() barrier() #endif #ifdef CONFIG_X86_OOSTORE # define smp_wmb() wmb() #else # define smp_wmb() barrier() #endif #define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #else #define smp_mb() barrier() #define smp_rmb() barrier() #define smp_wmb() barrier() #define smp_read_barrier_depends() do { } while (0) #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif #endif