Merge branch 'x86-stage-3-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-stage-3-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (190 commits) Revert "cpuacct: reduce one NULL check in fast-path" Revert "x86: don't compile vsmp_64 for 32bit" x86: Correct behaviour of irq affinity x86: early_ioremap_init(), use __fix_to_virt(), because we are sure it's safe x86: use default_cpu_mask_to_apicid for 64bit x86: fix set_extra_move_desc calling x86, PAT, PCI: Change vma prot in pci_mmap to reflect inherited prot x86/dmi: fix dmi_alloc() section mismatches x86: e820 fix various signedness issues in setup.c and e820.c x86: apic/io_apic.c define msi_ir_chip and ir_ioapic_chip all the time x86: irq.c keep CONFIG_X86_LOCAL_APIC interrupts together x86: irq.c use same path for show_interrupts x86: cpu/cpu.h cleanup x86: Fix a couple of sparse warnings in arch/x86/kernel/apic/io_apic.c Revert "x86: create a non-zero sized bm_pte only when needed" x86: pci-nommu.c cleanup x86: io_delay.c cleanup x86: rtc.c cleanup x86: i8253 cleanup x86: kdebugfs.c cleanup ...
This commit is contained in:
@@ -75,7 +75,7 @@ static inline void default_inquire_remote_apic(int apicid)
|
||||
#define setup_secondary_clock setup_secondary_APIC_clock
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_VSMP
|
||||
#ifdef CONFIG_X86_64
|
||||
extern int is_vsmp_box(void);
|
||||
#else
|
||||
static inline int is_vsmp_box(void)
|
||||
@@ -108,6 +108,16 @@ extern void native_apic_icr_write(u32 low, u32 id);
|
||||
extern u64 native_apic_icr_read(void);
|
||||
|
||||
#ifdef CONFIG_X86_X2APIC
|
||||
/*
|
||||
* Make previous memory operations globally visible before
|
||||
* sending the IPI through x2apic wrmsr. We need a serializing instruction or
|
||||
* mfence for this.
|
||||
*/
|
||||
static inline void x2apic_wrmsr_fence(void)
|
||||
{
|
||||
asm volatile("mfence" : : : "memory");
|
||||
}
|
||||
|
||||
static inline void native_apic_msr_write(u32 reg, u32 v)
|
||||
{
|
||||
if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
|
||||
@@ -184,6 +194,9 @@ static inline int x2apic_enabled(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define x2apic 0
|
||||
|
||||
#endif
|
||||
|
||||
extern int get_physical_broadcast(void);
|
||||
@@ -379,6 +392,7 @@ static inline u32 safe_apic_wait_icr_idle(void)
|
||||
|
||||
static inline void ack_APIC_irq(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
/*
|
||||
* ack_APIC_irq() actually gets compiled as a single instruction
|
||||
* ... yummie.
|
||||
@@ -386,6 +400,7 @@ static inline void ack_APIC_irq(void)
|
||||
|
||||
/* Docs say use 0 for future compatibility */
|
||||
apic_write(APIC_EOI, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned default_get_apic_id(unsigned long x)
|
||||
@@ -474,10 +489,19 @@ static inline int default_apic_id_registered(void)
|
||||
return physid_isset(read_apic_id(), phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
extern int default_apicid_to_node(int logical_apicid);
|
||||
|
||||
#endif
|
||||
|
||||
static inline unsigned int
|
||||
default_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return cpumask_bits(cpumask)[0];
|
||||
return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
@@ -491,15 +515,6 @@ default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
return (unsigned int)(mask1 & mask2 & mask3);
|
||||
}
|
||||
|
||||
static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
}
|
||||
|
||||
extern int default_apicid_to_node(int logical_apicid);
|
||||
|
||||
#endif
|
||||
|
||||
static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
|
||||
{
|
||||
return physid_isset(apicid, bitmap);
|
||||
|
@@ -53,6 +53,7 @@
|
||||
#define APIC_ESR_SENDILL 0x00020
|
||||
#define APIC_ESR_RECVILL 0x00040
|
||||
#define APIC_ESR_ILLREGA 0x00080
|
||||
#define APIC_LVTCMCI 0x2f0
|
||||
#define APIC_ICR 0x300
|
||||
#define APIC_DEST_SELF 0x40000
|
||||
#define APIC_DEST_ALLINC 0x80000
|
||||
|
@@ -1,10 +1,6 @@
|
||||
#ifndef _ASM_X86_BOOT_H
|
||||
#define _ASM_X86_BOOT_H
|
||||
|
||||
/* Don't touch these, unless you really know what you're doing. */
|
||||
#define DEF_SYSSEG 0x1000
|
||||
#define DEF_SYSSIZE 0x7F00
|
||||
|
||||
/* Internal svga startup constants */
|
||||
#define NORMAL_VGA 0xffff /* 80x25 mode */
|
||||
#define EXTENDED_VGA 0xfffe /* 80x50 mode */
|
||||
|
@@ -90,6 +90,9 @@ int set_memory_4k(unsigned long addr, int numpages);
|
||||
int set_memory_array_uc(unsigned long *addr, int addrinarray);
|
||||
int set_memory_array_wb(unsigned long *addr, int addrinarray);
|
||||
|
||||
int set_pages_array_uc(struct page **pages, int addrinarray);
|
||||
int set_pages_array_wb(struct page **pages, int addrinarray);
|
||||
|
||||
/*
|
||||
* For legacy compatibility with the old APIs, a few functions
|
||||
* are provided that work on a "struct page".
|
||||
|
226
arch/x86/include/asm/cpu_debug.h
Executable file
226
arch/x86/include/asm/cpu_debug.h
Executable file
@@ -0,0 +1,226 @@
|
||||
#ifndef _ASM_X86_CPU_DEBUG_H
|
||||
#define _ASM_X86_CPU_DEBUG_H
|
||||
|
||||
/*
|
||||
* CPU x86 architecture debug
|
||||
*
|
||||
* Copyright(C) 2009 Jaswinder Singh Rajput
|
||||
*/
|
||||
|
||||
/* Register flags */
|
||||
enum cpu_debug_bit {
|
||||
/* Model Specific Registers (MSRs) */
|
||||
CPU_MC_BIT, /* Machine Check */
|
||||
CPU_MONITOR_BIT, /* Monitor */
|
||||
CPU_TIME_BIT, /* Time */
|
||||
CPU_PMC_BIT, /* Performance Monitor */
|
||||
CPU_PLATFORM_BIT, /* Platform */
|
||||
CPU_APIC_BIT, /* APIC */
|
||||
CPU_POWERON_BIT, /* Power-on */
|
||||
CPU_CONTROL_BIT, /* Control */
|
||||
CPU_FEATURES_BIT, /* Features control */
|
||||
CPU_LBRANCH_BIT, /* Last Branch */
|
||||
CPU_BIOS_BIT, /* BIOS */
|
||||
CPU_FREQ_BIT, /* Frequency */
|
||||
CPU_MTTR_BIT, /* MTRR */
|
||||
CPU_PERF_BIT, /* Performance */
|
||||
CPU_CACHE_BIT, /* Cache */
|
||||
CPU_SYSENTER_BIT, /* Sysenter */
|
||||
CPU_THERM_BIT, /* Thermal */
|
||||
CPU_MISC_BIT, /* Miscellaneous */
|
||||
CPU_DEBUG_BIT, /* Debug */
|
||||
CPU_PAT_BIT, /* PAT */
|
||||
CPU_VMX_BIT, /* VMX */
|
||||
CPU_CALL_BIT, /* System Call */
|
||||
CPU_BASE_BIT, /* BASE Address */
|
||||
CPU_VER_BIT, /* Version ID */
|
||||
CPU_CONF_BIT, /* Configuration */
|
||||
CPU_SMM_BIT, /* System mgmt mode */
|
||||
CPU_SVM_BIT, /*Secure Virtual Machine*/
|
||||
CPU_OSVM_BIT, /* OS-Visible Workaround*/
|
||||
/* Standard Registers */
|
||||
CPU_TSS_BIT, /* Task Stack Segment */
|
||||
CPU_CR_BIT, /* Control Registers */
|
||||
CPU_DT_BIT, /* Descriptor Table */
|
||||
/* End of Registers flags */
|
||||
CPU_REG_ALL_BIT, /* Select all Registers */
|
||||
};
|
||||
|
||||
#define CPU_REG_ALL (~0) /* Select all Registers */
|
||||
|
||||
#define CPU_MC (1 << CPU_MC_BIT)
|
||||
#define CPU_MONITOR (1 << CPU_MONITOR_BIT)
|
||||
#define CPU_TIME (1 << CPU_TIME_BIT)
|
||||
#define CPU_PMC (1 << CPU_PMC_BIT)
|
||||
#define CPU_PLATFORM (1 << CPU_PLATFORM_BIT)
|
||||
#define CPU_APIC (1 << CPU_APIC_BIT)
|
||||
#define CPU_POWERON (1 << CPU_POWERON_BIT)
|
||||
#define CPU_CONTROL (1 << CPU_CONTROL_BIT)
|
||||
#define CPU_FEATURES (1 << CPU_FEATURES_BIT)
|
||||
#define CPU_LBRANCH (1 << CPU_LBRANCH_BIT)
|
||||
#define CPU_BIOS (1 << CPU_BIOS_BIT)
|
||||
#define CPU_FREQ (1 << CPU_FREQ_BIT)
|
||||
#define CPU_MTRR (1 << CPU_MTTR_BIT)
|
||||
#define CPU_PERF (1 << CPU_PERF_BIT)
|
||||
#define CPU_CACHE (1 << CPU_CACHE_BIT)
|
||||
#define CPU_SYSENTER (1 << CPU_SYSENTER_BIT)
|
||||
#define CPU_THERM (1 << CPU_THERM_BIT)
|
||||
#define CPU_MISC (1 << CPU_MISC_BIT)
|
||||
#define CPU_DEBUG (1 << CPU_DEBUG_BIT)
|
||||
#define CPU_PAT (1 << CPU_PAT_BIT)
|
||||
#define CPU_VMX (1 << CPU_VMX_BIT)
|
||||
#define CPU_CALL (1 << CPU_CALL_BIT)
|
||||
#define CPU_BASE (1 << CPU_BASE_BIT)
|
||||
#define CPU_VER (1 << CPU_VER_BIT)
|
||||
#define CPU_CONF (1 << CPU_CONF_BIT)
|
||||
#define CPU_SMM (1 << CPU_SMM_BIT)
|
||||
#define CPU_SVM (1 << CPU_SVM_BIT)
|
||||
#define CPU_OSVM (1 << CPU_OSVM_BIT)
|
||||
#define CPU_TSS (1 << CPU_TSS_BIT)
|
||||
#define CPU_CR (1 << CPU_CR_BIT)
|
||||
#define CPU_DT (1 << CPU_DT_BIT)
|
||||
|
||||
/* Register file flags */
|
||||
enum cpu_file_bit {
|
||||
CPU_INDEX_BIT, /* index */
|
||||
CPU_VALUE_BIT, /* value */
|
||||
};
|
||||
|
||||
#define CPU_FILE_VALUE (1 << CPU_VALUE_BIT)
|
||||
|
||||
/*
|
||||
* DisplayFamily_DisplayModel Processor Families/Processor Number Series
|
||||
* -------------------------- ------------------------------------------
|
||||
* 05_01, 05_02, 05_04 Pentium, Pentium with MMX
|
||||
*
|
||||
* 06_01 Pentium Pro
|
||||
* 06_03, 06_05 Pentium II Xeon, Pentium II
|
||||
* 06_07, 06_08, 06_0A, 06_0B Pentium III Xeon, Pentum III
|
||||
*
|
||||
* 06_09, 060D Pentium M
|
||||
*
|
||||
* 06_0E Core Duo, Core Solo
|
||||
*
|
||||
* 06_0F Xeon 3000, 3200, 5100, 5300, 7300 series,
|
||||
* Core 2 Quad, Core 2 Extreme, Core 2 Duo,
|
||||
* Pentium dual-core
|
||||
* 06_17 Xeon 5200, 5400 series, Core 2 Quad Q9650
|
||||
*
|
||||
* 06_1C Atom
|
||||
*
|
||||
* 0F_00, 0F_01, 0F_02 Xeon, Xeon MP, Pentium 4
|
||||
* 0F_03, 0F_04 Xeon, Xeon MP, Pentium 4, Pentium D
|
||||
*
|
||||
* 0F_06 Xeon 7100, 5000 Series, Xeon MP,
|
||||
* Pentium 4, Pentium D
|
||||
*/
|
||||
|
||||
/* Register processors bits */
|
||||
enum cpu_processor_bit {
|
||||
CPU_NONE,
|
||||
/* Intel */
|
||||
CPU_INTEL_PENTIUM_BIT,
|
||||
CPU_INTEL_P6_BIT,
|
||||
CPU_INTEL_PENTIUM_M_BIT,
|
||||
CPU_INTEL_CORE_BIT,
|
||||
CPU_INTEL_CORE2_BIT,
|
||||
CPU_INTEL_ATOM_BIT,
|
||||
CPU_INTEL_XEON_P4_BIT,
|
||||
CPU_INTEL_XEON_MP_BIT,
|
||||
/* AMD */
|
||||
CPU_AMD_K6_BIT,
|
||||
CPU_AMD_K7_BIT,
|
||||
CPU_AMD_K8_BIT,
|
||||
CPU_AMD_0F_BIT,
|
||||
CPU_AMD_10_BIT,
|
||||
CPU_AMD_11_BIT,
|
||||
};
|
||||
|
||||
#define CPU_INTEL_PENTIUM (1 << CPU_INTEL_PENTIUM_BIT)
|
||||
#define CPU_INTEL_P6 (1 << CPU_INTEL_P6_BIT)
|
||||
#define CPU_INTEL_PENTIUM_M (1 << CPU_INTEL_PENTIUM_M_BIT)
|
||||
#define CPU_INTEL_CORE (1 << CPU_INTEL_CORE_BIT)
|
||||
#define CPU_INTEL_CORE2 (1 << CPU_INTEL_CORE2_BIT)
|
||||
#define CPU_INTEL_ATOM (1 << CPU_INTEL_ATOM_BIT)
|
||||
#define CPU_INTEL_XEON_P4 (1 << CPU_INTEL_XEON_P4_BIT)
|
||||
#define CPU_INTEL_XEON_MP (1 << CPU_INTEL_XEON_MP_BIT)
|
||||
|
||||
#define CPU_INTEL_PX (CPU_INTEL_P6 | CPU_INTEL_PENTIUM_M)
|
||||
#define CPU_INTEL_COREX (CPU_INTEL_CORE | CPU_INTEL_CORE2)
|
||||
#define CPU_INTEL_XEON (CPU_INTEL_XEON_P4 | CPU_INTEL_XEON_MP)
|
||||
#define CPU_CO_AT (CPU_INTEL_CORE | CPU_INTEL_ATOM)
|
||||
#define CPU_C2_AT (CPU_INTEL_CORE2 | CPU_INTEL_ATOM)
|
||||
#define CPU_CX_AT (CPU_INTEL_COREX | CPU_INTEL_ATOM)
|
||||
#define CPU_CX_XE (CPU_INTEL_COREX | CPU_INTEL_XEON)
|
||||
#define CPU_P6_XE (CPU_INTEL_P6 | CPU_INTEL_XEON)
|
||||
#define CPU_PM_CO_AT (CPU_INTEL_PENTIUM_M | CPU_CO_AT)
|
||||
#define CPU_C2_AT_XE (CPU_C2_AT | CPU_INTEL_XEON)
|
||||
#define CPU_CX_AT_XE (CPU_CX_AT | CPU_INTEL_XEON)
|
||||
#define CPU_P6_CX_AT (CPU_INTEL_P6 | CPU_CX_AT)
|
||||
#define CPU_P6_CX_XE (CPU_P6_XE | CPU_INTEL_COREX)
|
||||
#define CPU_P6_CX_AT_XE (CPU_INTEL_P6 | CPU_CX_AT_XE)
|
||||
#define CPU_PM_CX_AT_XE (CPU_INTEL_PENTIUM_M | CPU_CX_AT_XE)
|
||||
#define CPU_PM_CX_AT (CPU_INTEL_PENTIUM_M | CPU_CX_AT)
|
||||
#define CPU_PM_CX_XE (CPU_INTEL_PENTIUM_M | CPU_CX_XE)
|
||||
#define CPU_PX_CX_AT (CPU_INTEL_PX | CPU_CX_AT)
|
||||
#define CPU_PX_CX_AT_XE (CPU_INTEL_PX | CPU_CX_AT_XE)
|
||||
|
||||
/* Select all supported Intel CPUs */
|
||||
#define CPU_INTEL_ALL (CPU_INTEL_PENTIUM | CPU_PX_CX_AT_XE)
|
||||
|
||||
#define CPU_AMD_K6 (1 << CPU_AMD_K6_BIT)
|
||||
#define CPU_AMD_K7 (1 << CPU_AMD_K7_BIT)
|
||||
#define CPU_AMD_K8 (1 << CPU_AMD_K8_BIT)
|
||||
#define CPU_AMD_0F (1 << CPU_AMD_0F_BIT)
|
||||
#define CPU_AMD_10 (1 << CPU_AMD_10_BIT)
|
||||
#define CPU_AMD_11 (1 << CPU_AMD_11_BIT)
|
||||
|
||||
#define CPU_K10_PLUS (CPU_AMD_10 | CPU_AMD_11)
|
||||
#define CPU_K0F_PLUS (CPU_AMD_0F | CPU_K10_PLUS)
|
||||
#define CPU_K8_PLUS (CPU_AMD_K8 | CPU_K0F_PLUS)
|
||||
#define CPU_K7_PLUS (CPU_AMD_K7 | CPU_K8_PLUS)
|
||||
|
||||
/* Select all supported AMD CPUs */
|
||||
#define CPU_AMD_ALL (CPU_AMD_K6 | CPU_K7_PLUS)
|
||||
|
||||
/* Select all supported CPUs */
|
||||
#define CPU_ALL (CPU_INTEL_ALL | CPU_AMD_ALL)
|
||||
|
||||
#define MAX_CPU_FILES 512
|
||||
|
||||
struct cpu_private {
|
||||
unsigned cpu;
|
||||
unsigned type;
|
||||
unsigned reg;
|
||||
unsigned file;
|
||||
};
|
||||
|
||||
struct cpu_debug_base {
|
||||
char *name; /* Register name */
|
||||
unsigned flag; /* Register flag */
|
||||
unsigned write; /* Register write flag */
|
||||
};
|
||||
|
||||
/*
|
||||
* Currently it looks similar to cpu_debug_base but once we add more files
|
||||
* cpu_file_base will go in different direction
|
||||
*/
|
||||
struct cpu_file_base {
|
||||
char *name; /* Register file name */
|
||||
unsigned flag; /* Register file flag */
|
||||
unsigned write; /* Register write flag */
|
||||
};
|
||||
|
||||
struct cpu_cpuX_base {
|
||||
struct dentry *dentry; /* Register dentry */
|
||||
int init; /* Register index file */
|
||||
};
|
||||
|
||||
struct cpu_debug_range {
|
||||
unsigned min; /* Register range min */
|
||||
unsigned max; /* Register range max */
|
||||
unsigned flag; /* Supported flags */
|
||||
unsigned model; /* Supported models */
|
||||
};
|
||||
|
||||
#endif /* _ASM_X86_CPU_DEBUG_H */
|
@@ -91,7 +91,6 @@ static inline int desc_empty(const void *ptr)
|
||||
#define store_gdt(dtr) native_store_gdt(dtr)
|
||||
#define store_idt(dtr) native_store_idt(dtr)
|
||||
#define store_tr(tr) (tr = native_store_tr())
|
||||
#define store_ldt(ldt) asm("sldt %0":"=m" (ldt))
|
||||
|
||||
#define load_TLS(t, cpu) native_load_tls(t, cpu)
|
||||
#define set_ldt native_set_ldt
|
||||
@@ -112,6 +111,8 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
|
||||
}
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
|
||||
#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
|
||||
|
||||
static inline void native_write_idt_entry(gate_desc *idt, int entry,
|
||||
const gate_desc *gate)
|
||||
{
|
||||
|
@@ -1,22 +1,15 @@
|
||||
#ifndef _ASM_X86_DMI_H
|
||||
#define _ASM_X86_DMI_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
#define DMI_MAX_DATA 2048
|
||||
|
||||
extern int dmi_alloc_index;
|
||||
extern char dmi_alloc_data[DMI_MAX_DATA];
|
||||
|
||||
/* This is so early that there is no good way to allocate dynamic memory.
|
||||
Allocate data in an BSS array. */
|
||||
static inline void *dmi_alloc(unsigned len)
|
||||
static __always_inline __init void *dmi_alloc(unsigned len)
|
||||
{
|
||||
int idx = dmi_alloc_index;
|
||||
if ((dmi_alloc_index + len) > DMI_MAX_DATA)
|
||||
return NULL;
|
||||
dmi_alloc_index += len;
|
||||
return dmi_alloc_data + idx;
|
||||
return extend_brk(len, sizeof(int));
|
||||
}
|
||||
|
||||
/* Use early IO mappings for DMI because it's initialized early */
|
||||
|
@@ -72,7 +72,7 @@ extern int e820_all_mapped(u64 start, u64 end, unsigned type);
|
||||
extern void e820_add_region(u64 start, u64 size, int type);
|
||||
extern void e820_print_map(char *who);
|
||||
extern int
|
||||
sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, int *pnr_map);
|
||||
sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, u32 *pnr_map);
|
||||
extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
|
||||
unsigned new_type);
|
||||
extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
|
||||
|
@@ -33,6 +33,8 @@ BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
|
||||
smp_invalidate_interrupt)
|
||||
#endif
|
||||
|
||||
BUILD_INTERRUPT(generic_interrupt, GENERIC_INTERRUPT_VECTOR)
|
||||
|
||||
/*
|
||||
* every pentium local APIC has two 'local interrupts', with a
|
||||
* soft-definable vector attached to both interrupts, one of
|
||||
|
@@ -12,6 +12,7 @@ typedef struct {
|
||||
unsigned int apic_timer_irqs; /* arch dependent */
|
||||
unsigned int irq_spurious_count;
|
||||
#endif
|
||||
unsigned int generic_irqs; /* arch dependent */
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int irq_resched_count;
|
||||
unsigned int irq_call_count;
|
||||
|
@@ -63,6 +63,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
|
||||
void *kmap_atomic(struct page *page, enum km_type type);
|
||||
void kunmap_atomic(void *kvaddr, enum km_type type);
|
||||
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
|
||||
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
|
||||
struct page *kmap_atomic_to_page(void *ptr);
|
||||
|
||||
#ifndef CONFIG_PARAVIRT
|
||||
|
@@ -27,6 +27,7 @@
|
||||
|
||||
/* Interrupt handlers registered during init_IRQ */
|
||||
extern void apic_timer_interrupt(void);
|
||||
extern void generic_interrupt(void);
|
||||
extern void error_interrupt(void);
|
||||
extern void spurious_interrupt(void);
|
||||
extern void thermal_interrupt(void);
|
||||
|
18
arch/x86/include/asm/init.h
Normal file
18
arch/x86/include/asm/init.h
Normal file
@@ -0,0 +1,18 @@
|
||||
#ifndef _ASM_X86_INIT_32_H
|
||||
#define _ASM_X86_INIT_32_H
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void __init early_ioremap_page_table_range_init(void);
|
||||
#endif
|
||||
|
||||
extern unsigned long __init
|
||||
kernel_physical_mapping_init(unsigned long start,
|
||||
unsigned long end,
|
||||
unsigned long page_size_mask);
|
||||
|
||||
|
||||
extern unsigned long __initdata e820_table_start;
|
||||
extern unsigned long __meminitdata e820_table_end;
|
||||
extern unsigned long __meminitdata e820_table_top;
|
||||
|
||||
#endif /* _ASM_X86_INIT_32_H */
|
@@ -162,7 +162,8 @@ extern int (*ioapic_renumber_irq)(int ioapic, int irq);
|
||||
extern void ioapic_init_mappings(void);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern int save_mask_IO_APIC_setup(void);
|
||||
extern int save_IO_APIC_setup(void);
|
||||
extern void mask_IO_APIC_setup(void);
|
||||
extern void restore_IO_APIC_setup(void);
|
||||
extern void reinit_intr_remapped_IO_APIC(int);
|
||||
#endif
|
||||
@@ -172,7 +173,7 @@ extern void probe_nr_irqs_gsi(void);
|
||||
extern int setup_ioapic_entry(int apic, int irq,
|
||||
struct IO_APIC_route_entry *entry,
|
||||
unsigned int destination, int trigger,
|
||||
int polarity, int vector);
|
||||
int polarity, int vector, int pin);
|
||||
extern void ioapic_write_entry(int apic, int pin,
|
||||
struct IO_APIC_route_entry e);
|
||||
#else /* !CONFIG_X86_IO_APIC */
|
||||
|
@@ -36,6 +36,7 @@ static inline int irq_canonicalize(int irq)
|
||||
extern void fixup_irqs(void);
|
||||
#endif
|
||||
|
||||
extern void (*generic_interrupt_extension)(void);
|
||||
extern void init_IRQ(void);
|
||||
extern void native_init_IRQ(void);
|
||||
extern bool handle_irq(unsigned irq, struct pt_regs *regs);
|
||||
|
@@ -1,8 +1,6 @@
|
||||
#ifndef _ASM_X86_IRQ_REMAPPING_H
|
||||
#define _ASM_X86_IRQ_REMAPPING_H
|
||||
|
||||
extern int x2apic;
|
||||
|
||||
#define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8)
|
||||
|
||||
#endif /* _ASM_X86_IRQ_REMAPPING_H */
|
||||
|
@@ -111,6 +111,11 @@
|
||||
*/
|
||||
#define LOCAL_PERF_VECTOR 0xee
|
||||
|
||||
/*
|
||||
* Generic system vector for platform specific use
|
||||
*/
|
||||
#define GENERIC_INTERRUPT_VECTOR 0xed
|
||||
|
||||
/*
|
||||
* First APIC vector available to drivers: (vectors 0x30-0xee) we
|
||||
* start at 0x31(0x41) to spread out vectors evenly between priority
|
||||
|
@@ -9,13 +9,13 @@
|
||||
# define PAGES_NR 4
|
||||
#else
|
||||
# define PA_CONTROL_PAGE 0
|
||||
# define PA_TABLE_PAGE 1
|
||||
# define PAGES_NR 2
|
||||
# define VA_CONTROL_PAGE 1
|
||||
# define PA_TABLE_PAGE 2
|
||||
# define PA_SWAP_PAGE 3
|
||||
# define PAGES_NR 4
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# define KEXEC_CONTROL_CODE_MAX_SIZE 2048
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@@ -136,10 +136,11 @@ relocate_kernel(unsigned long indirection_page,
|
||||
unsigned int has_pae,
|
||||
unsigned int preserve_context);
|
||||
#else
|
||||
NORET_TYPE void
|
||||
unsigned long
|
||||
relocate_kernel(unsigned long indirection_page,
|
||||
unsigned long page_list,
|
||||
unsigned long start_address) ATTRIB_NORET;
|
||||
unsigned long start_address,
|
||||
unsigned int preserve_context);
|
||||
#endif
|
||||
|
||||
#define ARCH_HAS_KIMAGE_ARCH
|
||||
|
@@ -1,14 +1,11 @@
|
||||
#ifndef _ASM_X86_LINKAGE_H
|
||||
#define _ASM_X86_LINKAGE_H
|
||||
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#undef notrace
|
||||
#define notrace __attribute__((no_instrument_function))
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define __ALIGN .p2align 4,,15
|
||||
#define __ALIGN_STR ".p2align 4,,15"
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
|
||||
/*
|
||||
@@ -50,16 +47,20 @@
|
||||
__asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
|
||||
"g" (arg4), "g" (arg5), "g" (arg6))
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
#define GLOBAL(name) \
|
||||
.globl name; \
|
||||
name:
|
||||
|
||||
#ifdef CONFIG_X86_ALIGNMENT_16
|
||||
#define __ALIGN .align 16,0x90
|
||||
#define __ALIGN_STR ".align 16,0x90"
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16)
|
||||
#define __ALIGN .p2align 4, 0x90
|
||||
#define __ALIGN_STR __stringify(__ALIGN)
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_LINKAGE_H */
|
||||
|
||||
|
@@ -11,6 +11,8 @@
|
||||
*/
|
||||
|
||||
#define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */
|
||||
#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
|
||||
#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
|
||||
|
||||
#define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */
|
||||
#define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */
|
||||
@@ -90,14 +92,29 @@ extern int mce_disabled;
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
void mce_setup(struct mce *m);
|
||||
void mce_log(struct mce *m);
|
||||
DECLARE_PER_CPU(struct sys_device, device_mce);
|
||||
extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
|
||||
|
||||
/*
|
||||
* To support more than 128 would need to escape the predefined
|
||||
* Linux defined extended banks first.
|
||||
*/
|
||||
#define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1)
|
||||
|
||||
#ifdef CONFIG_X86_MCE_INTEL
|
||||
void mce_intel_feature_init(struct cpuinfo_x86 *c);
|
||||
void cmci_clear(void);
|
||||
void cmci_reenable(void);
|
||||
void cmci_rediscover(int dying);
|
||||
void cmci_recheck(void);
|
||||
#else
|
||||
static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
|
||||
static inline void cmci_clear(void) {}
|
||||
static inline void cmci_reenable(void) {}
|
||||
static inline void cmci_rediscover(int dying) {}
|
||||
static inline void cmci_recheck(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_MCE_AMD
|
||||
@@ -106,11 +123,23 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c);
|
||||
static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
|
||||
#endif
|
||||
|
||||
void mce_log_therm_throt_event(unsigned int cpu, __u64 status);
|
||||
extern int mce_available(struct cpuinfo_x86 *c);
|
||||
|
||||
void mce_log_therm_throt_event(__u64 status);
|
||||
|
||||
extern atomic_t mce_entry;
|
||||
|
||||
extern void do_machine_check(struct pt_regs *, long);
|
||||
|
||||
typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
|
||||
DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
|
||||
|
||||
enum mcp_flags {
|
||||
MCP_TIMESTAMP = (1 << 0), /* log time stamp */
|
||||
MCP_UC = (1 << 1), /* log uncorrected errors */
|
||||
};
|
||||
extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
|
||||
|
||||
extern int mce_notify_user(void);
|
||||
|
||||
#endif /* !CONFIG_X86_32 */
|
||||
@@ -120,8 +149,8 @@ extern void mcheck_init(struct cpuinfo_x86 *c);
|
||||
#else
|
||||
#define mcheck_init(c) do { } while (0)
|
||||
#endif
|
||||
extern void stop_mce(void);
|
||||
extern void restart_mce(void);
|
||||
|
||||
extern void (*mce_threshold_vector)(void);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_X86_MCE_H */
|
||||
|
@@ -47,6 +47,7 @@
|
||||
#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
|
||||
#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
|
||||
MSI_ADDR_DEST_ID_MASK)
|
||||
#define MSI_ADDR_EXT_DEST_ID(dest) ((dest) & 0xffffff00)
|
||||
|
||||
#define MSI_ADDR_IR_EXT_INT (1 << 4)
|
||||
#define MSI_ADDR_IR_SHV (1 << 3)
|
||||
|
@@ -81,6 +81,11 @@
|
||||
#define MSR_IA32_MC0_ADDR 0x00000402
|
||||
#define MSR_IA32_MC0_MISC 0x00000403
|
||||
|
||||
/* These are consecutive and not in the normal 4er MCE bank block */
|
||||
#define MSR_IA32_MC0_CTL2 0x00000280
|
||||
#define CMCI_EN (1ULL << 30)
|
||||
#define CMCI_THRESHOLD_MASK 0xffffULL
|
||||
|
||||
#define MSR_P6_PERFCTR0 0x000000c1
|
||||
#define MSR_P6_PERFCTR1 0x000000c2
|
||||
#define MSR_P6_EVNTSEL0 0x00000186
|
||||
|
@@ -39,6 +39,11 @@
|
||||
#define __VIRTUAL_MASK_SHIFT 32
|
||||
#endif /* CONFIG_X86_PAE */
|
||||
|
||||
/*
|
||||
* Kernel image size is limited to 512 MB (see in arch/x86/kernel/head_32.S)
|
||||
*/
|
||||
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
|
@@ -40,14 +40,8 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct pgprot;
|
||||
|
||||
extern int page_is_ram(unsigned long pagenr);
|
||||
extern int devmem_is_allowed(unsigned long pagenr);
|
||||
extern void map_devmem(unsigned long pfn, unsigned long size,
|
||||
struct pgprot vma_prot);
|
||||
extern void unmap_devmem(unsigned long pfn, unsigned long size,
|
||||
struct pgprot vma_prot);
|
||||
|
||||
extern unsigned long max_low_pfn_mapped;
|
||||
extern unsigned long max_pfn_mapped;
|
||||
|
@@ -317,8 +317,6 @@ struct pv_mmu_ops {
|
||||
#if PAGETABLE_LEVELS >= 3
|
||||
#ifdef CONFIG_X86_PAE
|
||||
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
|
||||
void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte);
|
||||
void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep);
|
||||
void (*pmd_clear)(pmd_t *pmdp);
|
||||
@@ -389,7 +387,7 @@ extern struct pv_lock_ops pv_lock_ops;
|
||||
|
||||
#define paravirt_type(op) \
|
||||
[paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
|
||||
[paravirt_opptr] "m" (op)
|
||||
[paravirt_opptr] "i" (&(op))
|
||||
#define paravirt_clobber(clobber) \
|
||||
[paravirt_clobber] "i" (clobber)
|
||||
|
||||
@@ -443,7 +441,7 @@ int paravirt_disable_iospace(void);
|
||||
* offset into the paravirt_patch_template structure, and can therefore be
|
||||
* freely converted back into a structure offset.
|
||||
*/
|
||||
#define PARAVIRT_CALL "call *%[paravirt_opptr];"
|
||||
#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
|
||||
|
||||
/*
|
||||
* These macros are intended to wrap calls through one of the paravirt
|
||||
@@ -1365,13 +1363,6 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
|
||||
pte.pte, pte.pte >> 32);
|
||||
}
|
||||
|
||||
static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
/* 5 arg words */
|
||||
pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
|
||||
}
|
||||
|
||||
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
@@ -1388,12 +1379,6 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
|
||||
set_pte(ptep, pte);
|
||||
}
|
||||
|
||||
static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
set_pte(ptep, pte);
|
||||
}
|
||||
|
||||
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
|
@@ -2,6 +2,7 @@
|
||||
#define _ASM_X86_PAT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/pgtable_types.h>
|
||||
|
||||
#ifdef CONFIG_X86_PAT
|
||||
extern int pat_enabled;
|
||||
@@ -17,5 +18,9 @@ extern int free_memtype(u64 start, u64 end);
|
||||
|
||||
extern int kernel_map_sync_memtype(u64 base, unsigned long size,
|
||||
unsigned long flag);
|
||||
extern void map_devmem(unsigned long pfn, unsigned long size,
|
||||
struct pgprot vma_prot);
|
||||
extern void unmap_devmem(unsigned long pfn, unsigned long size,
|
||||
struct pgprot vma_prot);
|
||||
|
||||
#endif /* _ASM_X86_PAT_H */
|
||||
|
@@ -26,13 +26,6 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
|
||||
native_set_pte(ptep, pte);
|
||||
}
|
||||
|
||||
static inline void native_set_pte_present(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
native_set_pte(ptep, pte);
|
||||
}
|
||||
|
||||
static inline void native_pmd_clear(pmd_t *pmdp)
|
||||
{
|
||||
native_set_pmd(pmdp, __pmd(0));
|
||||
|
@@ -31,23 +31,6 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
|
||||
ptep->pte_low = pte.pte_low;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since this is only called on user PTEs, and the page fault handler
|
||||
* must handle the already racy situation of simultaneous page faults,
|
||||
* we are justified in merely clearing the PTE present bit, followed
|
||||
* by a set. The ordering here is important.
|
||||
*/
|
||||
static inline void native_set_pte_present(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
ptep->pte_low = 0;
|
||||
smp_wmb();
|
||||
ptep->pte_high = pte.pte_high;
|
||||
smp_wmb();
|
||||
ptep->pte_low = pte.pte_low;
|
||||
}
|
||||
|
||||
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
|
||||
|
@@ -31,8 +31,6 @@ extern struct list_head pgd_list;
|
||||
#define set_pte(ptep, pte) native_set_pte(ptep, pte)
|
||||
#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
|
||||
|
||||
#define set_pte_present(mm, addr, ptep, pte) \
|
||||
native_set_pte_present(mm, addr, ptep, pte)
|
||||
#define set_pte_atomic(ptep, pte) \
|
||||
native_set_pte_atomic(ptep, pte)
|
||||
|
||||
|
@@ -42,9 +42,6 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
|
||||
*/
|
||||
#undef TEST_ACCESS_OK
|
||||
|
||||
/* The boot page tables (all created as a single array) */
|
||||
extern unsigned long pg0[];
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
# include <asm/pgtable-3level.h>
|
||||
#else
|
||||
|
@@ -25,6 +25,11 @@
|
||||
* area for the same reason. ;)
|
||||
*/
|
||||
#define VMALLOC_OFFSET (8 * 1024 * 1024)
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
extern bool __vmalloc_start_set; /* set once high_memory is set */
|
||||
#endif
|
||||
|
||||
#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
|
||||
#ifdef CONFIG_X86_PAE
|
||||
#define LAST_PKMAP 512
|
||||
|
@@ -273,6 +273,7 @@ typedef struct page *pgtable_t;
|
||||
|
||||
extern pteval_t __supported_pte_mask;
|
||||
extern int nx_enabled;
|
||||
extern void set_nx(void);
|
||||
|
||||
#define pgprot_writecombine pgprot_writecombine
|
||||
extern pgprot_t pgprot_writecombine(pgprot_t prot);
|
||||
|
@@ -75,9 +75,9 @@ struct cpuinfo_x86 {
|
||||
#else
|
||||
/* Number of 4K pages in DTLB/ITLB combined(in pages): */
|
||||
int x86_tlbsize;
|
||||
#endif
|
||||
__u8 x86_virt_bits;
|
||||
__u8 x86_phys_bits;
|
||||
#endif
|
||||
/* CPUID returned core id bits: */
|
||||
__u8 x86_coreid_bits;
|
||||
/* Max extended CPUID function supported: */
|
||||
@@ -391,6 +391,9 @@ DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
|
||||
DECLARE_INIT_PER_CPU(irq_stack_union);
|
||||
|
||||
DECLARE_PER_CPU(char *, irq_stack_ptr);
|
||||
DECLARE_PER_CPU(unsigned int, irq_count);
|
||||
extern unsigned long kernel_eflags;
|
||||
extern asmlinkage void ignore_sysret(void);
|
||||
#else /* X86_64 */
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
DECLARE_PER_CPU(unsigned long, stack_canary);
|
||||
|
@@ -1 +1,8 @@
|
||||
#ifndef _ASM_X86_SECTIONS_H
|
||||
#define _ASM_X86_SECTIONS_H
|
||||
|
||||
#include <asm-generic/sections.h>
|
||||
|
||||
extern char __brk_base[], __brk_limit[];
|
||||
|
||||
#endif /* _ASM_X86_SECTIONS_H */
|
||||
|
@@ -64,7 +64,7 @@ extern void x86_quirk_time_init(void);
|
||||
#include <asm/bootparam.h>
|
||||
|
||||
/* Interrupt control for vSMPowered x86_64 systems */
|
||||
#ifdef CONFIG_X86_VSMP
|
||||
#ifdef CONFIG_X86_64
|
||||
void vsmp_init(void);
|
||||
#else
|
||||
static inline void vsmp_init(void) { }
|
||||
@@ -100,20 +100,51 @@ extern struct boot_params boot_params;
|
||||
*/
|
||||
#define LOWMEMSIZE() (0x9f000)
|
||||
|
||||
/* exceedingly early brk-like allocator */
|
||||
extern unsigned long _brk_end;
|
||||
void *extend_brk(size_t size, size_t align);
|
||||
|
||||
/*
|
||||
* Reserve space in the brk section. The name must be unique within
|
||||
* the file, and somewhat descriptive. The size is in bytes. Must be
|
||||
* used at file scope.
|
||||
*
|
||||
* (This uses a temp function to wrap the asm so we can pass it the
|
||||
* size parameter; otherwise we wouldn't be able to. We can't use a
|
||||
* "section" attribute on a normal variable because it always ends up
|
||||
* being @progbits, which ends up allocating space in the vmlinux
|
||||
* executable.)
|
||||
*/
|
||||
#define RESERVE_BRK(name,sz) \
|
||||
static void __section(.discard) __used \
|
||||
__brk_reservation_fn_##name##__(void) { \
|
||||
asm volatile ( \
|
||||
".pushsection .brk_reservation,\"aw\",@nobits;" \
|
||||
".brk." #name ":" \
|
||||
" 1:.skip %c0;" \
|
||||
" .size .brk." #name ", . - 1b;" \
|
||||
" .popsection" \
|
||||
: : "i" (sz)); \
|
||||
}
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
void __init i386_start_kernel(void);
|
||||
extern void probe_roms(void);
|
||||
|
||||
extern unsigned long init_pg_tables_start;
|
||||
extern unsigned long init_pg_tables_end;
|
||||
|
||||
#else
|
||||
void __init x86_64_start_kernel(char *real_mode);
|
||||
void __init x86_64_start_reservations(char *real_mode_data);
|
||||
|
||||
#endif /* __i386__ */
|
||||
#endif /* _SETUP */
|
||||
#else
|
||||
#define RESERVE_BRK(name,sz) \
|
||||
.pushsection .brk_reservation,"aw",@nobits; \
|
||||
.brk.name: \
|
||||
1: .skip sz; \
|
||||
.size .brk.name,.-1b; \
|
||||
.popsection
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
@@ -199,6 +199,10 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
|
||||
#define SCIR_CPU_ACTIVITY 0x02 /* not idle */
|
||||
#define SCIR_CPU_HB_INTERVAL (HZ) /* once per second */
|
||||
|
||||
/* Loop through all installed blades */
|
||||
#define for_each_possible_blade(bid) \
|
||||
for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++)
|
||||
|
||||
/*
|
||||
* Macros for converting between kernel virtual addresses, socket local physical
|
||||
* addresses, and UV global physical addresses.
|
||||
|
@@ -296,6 +296,8 @@ HYPERVISOR_get_debugreg(int reg)
|
||||
static inline int
|
||||
HYPERVISOR_update_descriptor(u64 ma, u64 desc)
|
||||
{
|
||||
if (sizeof(u64) == sizeof(long))
|
||||
return _hypercall2(int, update_descriptor, ma, desc);
|
||||
return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user