Merge branch 'x86/urgent' into x86/pat
This commit is contained in:
@ -23,8 +23,6 @@
|
||||
*/
|
||||
static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
|
||||
{
|
||||
u16 gs;
|
||||
|
||||
/* changed the size calculations - should hopefully work better. lbt */
|
||||
dump->magic = CMAGIC;
|
||||
dump->start_code = 0;
|
||||
@ -57,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
|
||||
dump->regs.ds = (u16)regs->ds;
|
||||
dump->regs.es = (u16)regs->es;
|
||||
dump->regs.fs = (u16)regs->fs;
|
||||
savesegment(gs, gs);
|
||||
savesegment(gs, dump->regs.gs);
|
||||
dump->regs.orig_ax = regs->orig_ax;
|
||||
dump->regs.ip = regs->ip;
|
||||
dump->regs.cs = (u16)regs->cs;
|
||||
|
@ -3,6 +3,9 @@
|
||||
|
||||
/*
|
||||
* Copyright 1992, Linus Torvalds.
|
||||
*
|
||||
* Note: inlines with more than a single statement should be marked
|
||||
* __always_inline to avoid problems with older gcc's inlining heuristics.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_BITOPS_H
|
||||
@ -53,7 +56,8 @@
|
||||
* Note that @nr may be almost arbitrarily large; this function is not
|
||||
* restricted to acting on a single-word quantity.
|
||||
*/
|
||||
static inline void set_bit(unsigned int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
set_bit(unsigned int nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (IS_IMMEDIATE(nr)) {
|
||||
asm volatile(LOCK_PREFIX "orb %1,%0"
|
||||
@ -90,7 +94,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
|
||||
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
|
||||
* in order to ensure changes are visible on other processors.
|
||||
*/
|
||||
static inline void clear_bit(int nr, volatile unsigned long *addr)
|
||||
static __always_inline void
|
||||
clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (IS_IMMEDIATE(nr)) {
|
||||
asm volatile(LOCK_PREFIX "andb %1,%0"
|
||||
@ -204,7 +209,8 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
*
|
||||
* This is the same as test_and_set_bit on x86.
|
||||
*/
|
||||
static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
|
||||
static __always_inline int
|
||||
test_and_set_bit_lock(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
return test_and_set_bit(nr, addr);
|
||||
}
|
||||
@ -300,7 +306,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
|
||||
return oldbit;
|
||||
}
|
||||
|
||||
static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
|
||||
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return ((1UL << (nr % BITS_PER_LONG)) &
|
||||
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
|
||||
|
@ -93,6 +93,7 @@
|
||||
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
|
||||
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
|
||||
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
|
||||
#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
|
||||
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
|
||||
|
@ -2,8 +2,8 @@
|
||||
#define _ASM_X86_DMA_MAPPING_H
|
||||
|
||||
/*
|
||||
* IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
|
||||
* documentation.
|
||||
* IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
|
||||
* Documentation/DMA-API.txt for documentation.
|
||||
*/
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
|
@ -49,6 +49,7 @@
|
||||
#define E820_RESERVED_KERN 128
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/types.h>
|
||||
struct e820entry {
|
||||
__u64 addr; /* start of memory segment */
|
||||
__u64 size; /* size of memory segment */
|
||||
|
@ -99,7 +99,6 @@ extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
|
||||
* A boot-time mapping is currently limited to at most 16 pages.
|
||||
*/
|
||||
extern void early_ioremap_init(void);
|
||||
extern void early_ioremap_clear(void);
|
||||
extern void early_ioremap_reset(void);
|
||||
extern void __iomem *early_ioremap(unsigned long offset, unsigned long size);
|
||||
extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
|
||||
|
@ -23,6 +23,12 @@
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
int
|
||||
reserve_io_memtype_wc(u64 base, unsigned long size, pgprot_t *prot);
|
||||
|
||||
void
|
||||
free_io_memtype(u64 base, unsigned long size);
|
||||
|
||||
void *
|
||||
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
|
||||
|
||||
|
@ -6,9 +6,16 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
/* Select x86 specific features in <linux/kvm.h> */
|
||||
#define __KVM_HAVE_PIT
|
||||
#define __KVM_HAVE_IOAPIC
|
||||
#define __KVM_HAVE_DEVICE_ASSIGNMENT
|
||||
#define __KVM_HAVE_MSI
|
||||
#define __KVM_HAVE_USER_NMI
|
||||
|
||||
/* Architectural interrupt line count. */
|
||||
#define KVM_NR_INTERRUPTS 256
|
||||
|
||||
|
@ -1,31 +1,18 @@
|
||||
#ifndef _ASM_X86_MATH_EMU_H
|
||||
#define _ASM_X86_MATH_EMU_H
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/vm86.h>
|
||||
|
||||
/* This structure matches the layout of the data saved to the stack
|
||||
following a device-not-present interrupt, part of it saved
|
||||
automatically by the 80386/80486.
|
||||
*/
|
||||
struct info {
|
||||
struct math_emu_info {
|
||||
long ___orig_eip;
|
||||
long ___ebx;
|
||||
long ___ecx;
|
||||
long ___edx;
|
||||
long ___esi;
|
||||
long ___edi;
|
||||
long ___ebp;
|
||||
long ___eax;
|
||||
long ___ds;
|
||||
long ___es;
|
||||
long ___fs;
|
||||
long ___orig_eax;
|
||||
long ___eip;
|
||||
long ___cs;
|
||||
long ___eflags;
|
||||
long ___esp;
|
||||
long ___ss;
|
||||
long ___vm86_es; /* This and the following only in vm86 mode */
|
||||
long ___vm86_ds;
|
||||
long ___vm86_fs;
|
||||
long ___vm86_gs;
|
||||
union {
|
||||
struct pt_regs *regs;
|
||||
struct kernel_vm86_regs *vm86;
|
||||
};
|
||||
};
|
||||
#endif /* _ASM_X86_MATH_EMU_H */
|
||||
|
@ -3,8 +3,8 @@
|
||||
|
||||
#ifdef __x86_64__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/ioctls.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
/*
|
||||
* Machine Check support for x86
|
||||
@ -115,8 +115,6 @@ extern int mce_notify_user(void);
|
||||
|
||||
#endif /* !CONFIG_X86_32 */
|
||||
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_MCE
|
||||
extern void mcheck_init(struct cpuinfo_x86 *c);
|
||||
#else
|
||||
@ -126,5 +124,4 @@ extern void stop_mce(void);
|
||||
extern void restart_mce(void);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* _ASM_X86_MCE_H */
|
||||
|
@ -32,8 +32,6 @@ static inline void get_memcfg_numa(void)
|
||||
get_memcfg_numa_flat();
|
||||
}
|
||||
|
||||
extern int early_pfn_to_nid(unsigned long pfn);
|
||||
|
||||
extern void resume_map_numa_kva(pgd_t *pgd);
|
||||
|
||||
#else /* !CONFIG_NUMA */
|
||||
|
@ -40,8 +40,6 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr)
|
||||
#define node_end_pfn(nid) (NODE_DATA(nid)->node_start_pfn + \
|
||||
NODE_DATA(nid)->node_spanned_pages)
|
||||
|
||||
extern int early_pfn_to_nid(unsigned long pfn);
|
||||
|
||||
#ifdef CONFIG_NUMA_EMU
|
||||
#define FAKE_NODE_MIN_SIZE (64 * 1024 * 1024)
|
||||
#define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL))
|
||||
|
@ -60,6 +60,7 @@ extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
|
||||
u32 gsi);
|
||||
extern void mp_config_acpi_legacy_irqs(void);
|
||||
extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low);
|
||||
extern int acpi_probe_gsi(void);
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
|
||||
u32 gsi, int triggering, int polarity);
|
||||
@ -71,6 +72,11 @@ mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#else /* !CONFIG_ACPI: */
|
||||
static inline int acpi_probe_gsi(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
#define PHYSID_ARRAY_SIZE BITS_TO_LONGS(MAX_APICS)
|
||||
|
@ -202,6 +202,35 @@
|
||||
#define MSR_IA32_THERM_STATUS 0x0000019c
|
||||
#define MSR_IA32_MISC_ENABLE 0x000001a0
|
||||
|
||||
/* MISC_ENABLE bits: architectural */
|
||||
#define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0)
|
||||
#define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1)
|
||||
#define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7)
|
||||
#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11)
|
||||
#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12)
|
||||
#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16)
|
||||
#define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
|
||||
#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22)
|
||||
#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23)
|
||||
#define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34)
|
||||
|
||||
/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
|
||||
#define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2)
|
||||
#define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3)
|
||||
#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4)
|
||||
#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6)
|
||||
#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8)
|
||||
#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9)
|
||||
#define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10)
|
||||
#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10)
|
||||
#define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13)
|
||||
#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19)
|
||||
#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20)
|
||||
#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24)
|
||||
#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37)
|
||||
#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38)
|
||||
#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39)
|
||||
|
||||
/* Intel Model 6 */
|
||||
#define MSR_P6_EVNTSEL0 0x00000186
|
||||
#define MSR_P6_EVNTSEL1 0x00000187
|
||||
|
@ -23,6 +23,7 @@
|
||||
#ifndef _ASM_X86_MTRR_H
|
||||
#define _ASM_X86_MTRR_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
|
@ -57,7 +57,6 @@ typedef struct { pgdval_t pgd; } pgd_t;
|
||||
typedef struct { pgprotval_t pgprot; } pgprot_t;
|
||||
|
||||
extern int page_is_ram(unsigned long pagenr);
|
||||
extern int pagerange_is_ram(unsigned long start, unsigned long end);
|
||||
extern int devmem_is_allowed(unsigned long pagenr);
|
||||
extern void map_devmem(unsigned long pfn, unsigned long size,
|
||||
pgprot_t vma_prot);
|
||||
|
@ -1352,14 +1352,7 @@ static inline void arch_leave_lazy_cpu_mode(void)
|
||||
PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
|
||||
}
|
||||
|
||||
static inline void arch_flush_lazy_cpu_mode(void)
|
||||
{
|
||||
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
|
||||
arch_leave_lazy_cpu_mode();
|
||||
arch_enter_lazy_cpu_mode();
|
||||
}
|
||||
}
|
||||
|
||||
void arch_flush_lazy_cpu_mode(void);
|
||||
|
||||
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
|
||||
static inline void arch_enter_lazy_mmu_mode(void)
|
||||
@ -1372,13 +1365,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
|
||||
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
|
||||
}
|
||||
|
||||
static inline void arch_flush_lazy_mmu_mode(void)
|
||||
{
|
||||
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
|
||||
arch_leave_lazy_mmu_mode();
|
||||
arch_enter_lazy_mmu_mode();
|
||||
}
|
||||
}
|
||||
void arch_flush_lazy_mmu_mode(void);
|
||||
|
||||
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
|
||||
unsigned long phys, pgprot_t flags)
|
||||
@ -1402,6 +1389,7 @@ static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
|
||||
{
|
||||
return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
|
||||
}
|
||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
||||
|
||||
static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
|
||||
{
|
||||
|
@ -19,4 +19,7 @@ extern int free_memtype(u64 start, u64 end);
|
||||
|
||||
extern void pat_disable(char *reason);
|
||||
|
||||
extern int kernel_map_sync_memtype(u64 base, unsigned long size,
|
||||
unsigned long flag);
|
||||
|
||||
#endif /* _ASM_X86_PAT_H */
|
||||
|
@ -42,6 +42,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
||||
|
||||
static inline void pte_free(struct mm_struct *mm, struct page *pte)
|
||||
{
|
||||
pgtable_page_dtor(pte);
|
||||
__free_page(pte);
|
||||
}
|
||||
|
||||
|
@ -302,16 +302,30 @@ static inline pte_t pte_mkspecial(pte_t pte)
|
||||
|
||||
extern pteval_t __supported_pte_mask;
|
||||
|
||||
/*
|
||||
* Mask out unsupported bits in a present pgprot. Non-present pgprots
|
||||
* can use those bits for other purposes, so leave them be.
|
||||
*/
|
||||
static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
|
||||
{
|
||||
pgprotval_t protval = pgprot_val(pgprot);
|
||||
|
||||
if (protval & _PAGE_PRESENT)
|
||||
protval &= __supported_pte_mask;
|
||||
|
||||
return protval;
|
||||
}
|
||||
|
||||
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
|
||||
{
|
||||
return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
|
||||
pgprot_val(pgprot)) & __supported_pte_mask);
|
||||
return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
|
||||
massage_pgprot(pgprot));
|
||||
}
|
||||
|
||||
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
|
||||
{
|
||||
return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
|
||||
pgprot_val(pgprot)) & __supported_pte_mask);
|
||||
return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
|
||||
massage_pgprot(pgprot));
|
||||
}
|
||||
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
@ -323,7 +337,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
* the newprot (if present):
|
||||
*/
|
||||
val &= _PAGE_CHG_MASK;
|
||||
val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
|
||||
val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
|
||||
|
||||
return __pte(val);
|
||||
}
|
||||
@ -339,7 +353,7 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
||||
|
||||
#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
|
||||
|
||||
#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
|
||||
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
|
||||
|
||||
static inline int is_new_memtype_allowed(unsigned long flags,
|
||||
unsigned long new_flags)
|
||||
|
@ -353,7 +353,7 @@ struct i387_soft_struct {
|
||||
u8 no_update;
|
||||
u8 rm;
|
||||
u8 alimit;
|
||||
struct info *info;
|
||||
struct math_emu_info *info;
|
||||
u32 entry_eip;
|
||||
};
|
||||
|
||||
|
@ -83,7 +83,7 @@
|
||||
#ifdef CONFIG_X86_PTRACE_BTS
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* configuration/status structure used in PTRACE_BTS_CONFIG and
|
||||
PTRACE_BTS_STATUS commands.
|
||||
|
@ -2,7 +2,7 @@
|
||||
#define _ASM_X86_SIGCONTEXT_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define FP_XSTATE_MAGIC1 0x46505853U
|
||||
#define FP_XSTATE_MAGIC2 0x46505845U
|
||||
|
@ -1,6 +1,8 @@
|
||||
#ifndef _ASM_X86_SIGCONTEXT32_H
|
||||
#define _ASM_X86_SIGCONTEXT32_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* signal context for 32bit programs. */
|
||||
|
||||
#define X86_FXSR_MAGIC 0x0000
|
||||
|
@ -245,6 +245,7 @@ static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
|
||||
{
|
||||
return __ticket_spin_is_contended(lock);
|
||||
}
|
||||
#define __raw_spin_is_contended __raw_spin_is_contended
|
||||
|
||||
static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
|
||||
{
|
||||
|
@ -1,7 +1,7 @@
|
||||
#ifndef _ASM_X86_SWAB_H
|
||||
#define _ASM_X86_SWAB_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
|
||||
|
@ -40,7 +40,7 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
|
||||
struct old_sigaction __user *);
|
||||
asmlinkage int sys_sigaltstack(unsigned long);
|
||||
asmlinkage unsigned long sys_sigreturn(unsigned long);
|
||||
asmlinkage int sys_rt_sigreturn(struct pt_regs);
|
||||
asmlinkage int sys_rt_sigreturn(unsigned long);
|
||||
|
||||
/* kernel/ioport.c */
|
||||
asmlinkage long sys_iopl(unsigned long);
|
||||
|
@ -1,18 +1,13 @@
|
||||
/* x86 architecture timex specifications */
|
||||
#ifndef _ASM_X86_TIMEX_H
|
||||
#define _ASM_X86_TIMEX_H
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/tsc.h>
|
||||
|
||||
#ifdef CONFIG_X86_ELAN
|
||||
# define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */
|
||||
#elif defined(CONFIG_X86_RDC321X)
|
||||
# define PIT_TICK_RATE 1041667 /* Underlying HZ for R8610 */
|
||||
#else
|
||||
# define PIT_TICK_RATE 1193182 /* Underlying HZ */
|
||||
#endif
|
||||
#define CLOCK_TICK_RATE PIT_TICK_RATE
|
||||
/* The PIT ticks at this frequency (in HZ): */
|
||||
#define PIT_TICK_RATE 1193182
|
||||
|
||||
#define CLOCK_TICK_RATE PIT_TICK_RATE
|
||||
|
||||
#define ARCH_HAS_READ_CURRENT_TIMER
|
||||
|
||||
|
@ -41,7 +41,7 @@ dotraplinkage void do_int3(struct pt_regs *, long);
|
||||
dotraplinkage void do_overflow(struct pt_regs *, long);
|
||||
dotraplinkage void do_bounds(struct pt_regs *, long);
|
||||
dotraplinkage void do_invalid_op(struct pt_regs *, long);
|
||||
dotraplinkage void do_device_not_available(struct pt_regs *, long);
|
||||
dotraplinkage void do_device_not_available(struct pt_regs);
|
||||
dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
|
||||
dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
|
||||
dotraplinkage void do_segment_not_present(struct pt_regs *, long);
|
||||
@ -77,7 +77,7 @@ extern int panic_on_unrecovered_nmi;
|
||||
extern int kstack_depth_to_print;
|
||||
|
||||
void math_error(void __user *);
|
||||
asmlinkage void math_emulate(long);
|
||||
void math_emulate(struct math_emu_info *);
|
||||
#ifdef CONFIG_X86_32
|
||||
unsigned long patch_espfix_desc(unsigned long, unsigned long);
|
||||
#else
|
||||
|
@ -137,7 +137,7 @@ static inline pte_t mfn_pte(unsigned long page_nr, pgprot_t pgprot)
|
||||
pte_t pte;
|
||||
|
||||
pte.pte = ((phys_addr_t)page_nr << PAGE_SHIFT) |
|
||||
(pgprot_val(pgprot) & __supported_pte_mask);
|
||||
massage_pgprot(pgprot);
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
Reference in New Issue
Block a user