Merge branch 'x86/um-header' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86/um-header' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits)
  x86: canonicalize remaining header guards
  x86: drop double underscores from header guards
  x86: Fix ASM_X86__ header guards
  x86, um: get rid of uml-config.h
  x86, um: get rid of arch/um/Kconfig.arch
  x86, um: get rid of arch/um/os symlink
  x86, um: get rid of excessive includes of uml-config.h
  x86, um: get rid of header symlinks
  x86, um: merge Kconfig.i386 and Kconfig.x86_64
  x86, um: get rid of sysdep symlink
  x86, um: trim the junk from uml ptrace-*.h
  x86, um: take vm-flags.h to sysdep
  x86, um: get rid of uml asm/arch
  x86, um: get rid of uml highmem.h
  x86, um: get rid of uml unistd.h
  x86, um: get rid of system.h -> system.h include
  x86, um: uml atomic.h is not needed anymore
  x86, um: untangle uml ldt.h
  x86, um: get rid of more uml asm/arch uses
  x86, um: remove dead header (uml module-generic.h; never used these days)
  ...
This commit is contained in:
Linus Torvalds
2008-10-23 10:22:01 -07:00
593 changed files with 1338 additions and 1762 deletions

View File

@@ -0,0 +1,24 @@
include include/asm-generic/Kbuild.asm
header-y += boot.h
header-y += bootparam.h
header-y += debugreg.h
header-y += ldt.h
header-y += msr-index.h
header-y += prctl.h
header-y += ptrace-abi.h
header-y += sigcontext32.h
header-y += ucontext.h
header-y += processor-flags.h
unifdef-y += e820.h
unifdef-y += ist.h
unifdef-y += mce.h
unifdef-y += msr.h
unifdef-y += mtrr.h
unifdef-y += posix_types_32.h
unifdef-y += posix_types_64.h
unifdef-y += unistd_32.h
unifdef-y += unistd_64.h
unifdef-y += vm86.h
unifdef-y += vsyscall.h

View File

@@ -0,0 +1,73 @@
/* a.out coredump register dumper
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#ifndef _ASM_X86_A_OUT_CORE_H
#define _ASM_X86_A_OUT_CORE_H
#ifdef __KERNEL__
#ifdef CONFIG_X86_32
#include <linux/user.h>
#include <linux/elfcore.h>
/*
* fill in the user structure for an a.out core dump
*/
static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
{
u16 gs;
/* changed the size calculations - should hopefully work better. lbt */
dump->magic = CMAGIC;
dump->start_code = 0;
dump->start_stack = regs->sp & ~(PAGE_SIZE - 1);
dump->u_tsize = ((unsigned long)current->mm->end_code) >> PAGE_SHIFT;
dump->u_dsize = ((unsigned long)(current->mm->brk + (PAGE_SIZE - 1)))
>> PAGE_SHIFT;
dump->u_dsize -= dump->u_tsize;
dump->u_ssize = 0;
dump->u_debugreg[0] = current->thread.debugreg0;
dump->u_debugreg[1] = current->thread.debugreg1;
dump->u_debugreg[2] = current->thread.debugreg2;
dump->u_debugreg[3] = current->thread.debugreg3;
dump->u_debugreg[4] = 0;
dump->u_debugreg[5] = 0;
dump->u_debugreg[6] = current->thread.debugreg6;
dump->u_debugreg[7] = current->thread.debugreg7;
if (dump->start_stack < TASK_SIZE)
dump->u_ssize = ((unsigned long)(TASK_SIZE - dump->start_stack))
>> PAGE_SHIFT;
dump->regs.bx = regs->bx;
dump->regs.cx = regs->cx;
dump->regs.dx = regs->dx;
dump->regs.si = regs->si;
dump->regs.di = regs->di;
dump->regs.bp = regs->bp;
dump->regs.ax = regs->ax;
dump->regs.ds = (u16)regs->ds;
dump->regs.es = (u16)regs->es;
dump->regs.fs = (u16)regs->fs;
savesegment(gs, gs);
dump->regs.orig_ax = regs->orig_ax;
dump->regs.ip = regs->ip;
dump->regs.cs = (u16)regs->cs;
dump->regs.flags = regs->flags;
dump->regs.sp = regs->sp;
dump->regs.ss = (u16)regs->ss;
dump->u_fpvalid = dump_fpu(regs, &dump->i387);
}
#endif /* CONFIG_X86_32 */
#endif /* __KERNEL__ */
#endif /* _ASM_X86_A_OUT_CORE_H */

View File

@@ -0,0 +1,20 @@
#ifndef _ASM_X86_A_OUT_H
#define _ASM_X86_A_OUT_H
struct exec
{
unsigned int a_info; /* Use macros N_MAGIC, etc for access */
unsigned a_text; /* length of text, in bytes */
unsigned a_data; /* length of data, in bytes */
unsigned a_bss; /* length of uninitialized data area for file, in bytes */
unsigned a_syms; /* length of symbol table data in file, in bytes */
unsigned a_entry; /* start address */
unsigned a_trsize; /* length of relocation info for text, in bytes */
unsigned a_drsize; /* length of relocation info for data, in bytes */
};
#define N_TRSIZE(a) ((a).a_trsize)
#define N_DRSIZE(a) ((a).a_drsize)
#define N_SYMSIZE(a) ((a).a_syms)
#endif /* _ASM_X86_A_OUT_H */

178
arch/x86/include/asm/acpi.h Normal file
View File

@@ -0,0 +1,178 @@
#ifndef _ASM_X86_ACPI_H
#define _ASM_X86_ACPI_H
/*
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <acpi/pdc_intel.h>
#include <asm/numa.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mpspec.h>
#define COMPILER_DEPENDENT_INT64 long long
#define COMPILER_DEPENDENT_UINT64 unsigned long long
/*
* Calling conventions:
*
* ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
* ACPI_EXTERNAL_XFACE - External ACPI interfaces
* ACPI_INTERNAL_XFACE - Internal ACPI interfaces
* ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
*/
#define ACPI_SYSTEM_XFACE
#define ACPI_EXTERNAL_XFACE
#define ACPI_INTERNAL_XFACE
#define ACPI_INTERNAL_VAR_XFACE
/* Asm macros */
#define ACPI_ASM_MACROS
#define BREAKPOINT3
#define ACPI_DISABLE_IRQS() local_irq_disable()
#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE() wbinvd()
int __acpi_acquire_global_lock(unsigned int *lock);
int __acpi_release_global_lock(unsigned int *lock);
#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \
((Acq) = __acpi_acquire_global_lock(&facs->global_lock))
#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \
((Acq) = __acpi_release_global_lock(&facs->global_lock))
/*
* Math helper asm macros
*/
#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
asm("divl %2;" \
: "=a"(q32), "=d"(r32) \
: "r"(d32), \
"0"(n_lo), "1"(n_hi))
#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
asm("shrl $1,%2 ;" \
"rcrl $1,%3;" \
: "=r"(n_hi), "=r"(n_lo) \
: "0"(n_hi), "1"(n_lo))
#ifdef CONFIG_ACPI
extern int acpi_lapic;
extern int acpi_ioapic;
extern int acpi_noirq;
extern int acpi_strict;
extern int acpi_disabled;
extern int acpi_ht;
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;
void acpi_pic_sci_set_trigger(unsigned int, u16);
static inline void disable_acpi(void)
{
acpi_disabled = 1;
acpi_ht = 0;
acpi_pci_disabled = 1;
acpi_noirq = 1;
}
/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
#define FIX_ACPI_PAGES 4
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
static inline void acpi_disable_pci(void)
{
acpi_pci_disabled = 1;
acpi_noirq_set();
}
extern int acpi_irq_balance_set(char *str);
/* routines for saving/restoring kernel state */
extern int acpi_save_state_mem(void);
extern void acpi_restore_state_mem(void);
extern unsigned long acpi_wakeup_address;
/* early initialization routine */
extern void acpi_reserve_bootmem(void);
/*
* Check if the CPU can handle C2 and deeper
*/
static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
{
/*
* Early models (<=5) of AMD Opterons are not supposed to go into
* C2 state.
*
* Steppings 0x0A and later are good
*/
if (boot_cpu_data.x86 == 0x0F &&
boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86_model <= 0x05 &&
boot_cpu_data.x86_mask < 0x0A)
return 1;
else if (boot_cpu_has(X86_FEATURE_AMDC1E))
return 1;
else
return max_cstate;
}
#else /* !CONFIG_ACPI */
#define acpi_lapic 0
#define acpi_ioapic 0
static inline void acpi_noirq_set(void) { }
static inline void acpi_disable_pci(void) { }
static inline void disable_acpi(void) { }
#endif /* !CONFIG_ACPI */
#define ARCH_HAS_POWER_INIT 1
struct bootnode;
#ifdef CONFIG_ACPI_NUMA
extern int acpi_numa;
extern int acpi_scan_nodes(unsigned long start, unsigned long end);
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
int num_nodes);
#else
static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
int num_nodes)
{
}
#endif
#define acpi_unlazy_tlb(x) leave_mm(x)
#endif /* _ASM_X86_ACPI_H */

View File

@@ -0,0 +1,35 @@
#ifndef _ASM_X86_AGP_H
#define _ASM_X86_AGP_H
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
/*
* Functions to keep the agpgart mappings coherent with the MMU. The
* GART gives the CPU a physical alias of pages in memory. The alias
* region is mapped uncacheable. Make sure there are no conflicting
* mappings with different cachability attributes for the same
* page. This avoids data corruption on some CPUs.
*/
#define map_page_into_agp(page) set_pages_uc(page, 1)
#define unmap_page_from_agp(page) set_pages_wb(page, 1)
/*
* Could use CLFLUSH here if the cpu supports it. But then it would
* need to be called for each cacheline of the whole page so it may
* not be worth it. Would need a page for it.
*/
#define flush_agp_cache() wbinvd()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif /* _ASM_X86_AGP_H */

View File

@@ -0,0 +1,22 @@
#ifdef __ASSEMBLY__
#ifdef CONFIG_X86_32
# define X86_ALIGN .long
#else
# define X86_ALIGN .quad
#endif
#ifdef CONFIG_SMP
.macro LOCK_PREFIX
1: lock
.section .smp_locks,"a"
.align 4
X86_ALIGN 1b
.previous
.endm
#else
.macro LOCK_PREFIX
.endm
#endif
#endif /* __ASSEMBLY__ */

View File

@@ -0,0 +1,183 @@
#ifndef _ASM_X86_ALTERNATIVE_H
#define _ASM_X86_ALTERNATIVE_H
#include <linux/types.h>
#include <linux/stddef.h>
#include <asm/asm.h>
/*
* Alternative inline assembly for SMP.
*
* The LOCK_PREFIX macro defined here replaces the LOCK and
* LOCK_PREFIX macros used everywhere in the source tree.
*
* SMP alternatives use the same data structures as the other
* alternatives and the X86_FEATURE_UP flag to indicate the case of a
* UP system running a SMP kernel. The existing apply_alternatives()
* works fine for patching a SMP kernel for UP.
*
* The SMP alternative tables can be kept after boot and contain both
* UP and SMP versions of the instructions to allow switching back to
* SMP at runtime, when hotplugging in a new CPU, which is especially
* useful in virtualized environments.
*
* The very common lock prefix is handled as special case in a
* separate table which is a pure address list without replacement ptr
* and size information. That keeps the table sizes small.
*/
#ifdef CONFIG_SMP
#define LOCK_PREFIX \
".section .smp_locks,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR "661f\n" /* address */ \
".previous\n" \
"661:\n\tlock; "
#else /* ! CONFIG_SMP */
#define LOCK_PREFIX ""
#endif
/* This must be included *after* the definition of LOCK_PREFIX */
#include <asm/cpufeature.h>
struct alt_instr {
u8 *instr; /* original instruction */
u8 *replacement;
u8 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction, <= instrlen */
u8 pad1;
#ifdef CONFIG_X86_64
u32 pad2;
#endif
};
extern void alternative_instructions(void);
extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
struct module;
#ifdef CONFIG_SMP
extern void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end);
extern void alternatives_smp_module_del(struct module *mod);
extern void alternatives_smp_switch(int smp);
#else
static inline void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,
void *text, void *text_end) {}
static inline void alternatives_smp_module_del(struct module *mod) {}
static inline void alternatives_smp_switch(int smp) {}
#endif /* CONFIG_SMP */
const unsigned char *const *find_nop_table(void);
/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
* kernels.
*
* length of oldinstr must be longer or equal the length of newinstr
* It can be padded with nops as needed.
*
* For non barrier like inlines please define new variants
* without volatile and memory clobber.
*/
#define alternative(oldinstr, newinstr, feature) \
asm volatile ("661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR "661b\n" /* label */ \
_ASM_PTR "663f\n" /* new instruction */ \
" .byte %c0\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
".previous\n" \
".section .altinstr_replacement,\"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" :: "i" (feature) : "memory")
/*
* Alternative inline assembly with input.
*
* Pecularities:
* No memory clobber here.
* Argument numbers start with 1.
* Best is to use constraints that are fixed size (like (%1) ... "r")
* If you use variable sized constraints like "m" or "g" in the
* replacement make sure to pad to the worst case length.
*/
#define alternative_input(oldinstr, newinstr, feature, input...) \
asm volatile ("661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR "661b\n" /* label */ \
_ASM_PTR "663f\n" /* new instruction */ \
" .byte %c0\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
".previous\n" \
".section .altinstr_replacement,\"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" :: "i" (feature), ##input)
/* Like alternative_input, but with a single output argument */
#define alternative_io(oldinstr, newinstr, feature, output, input...) \
asm volatile ("661:\n\t" oldinstr "\n662:\n" \
".section .altinstructions,\"a\"\n" \
_ASM_ALIGN "\n" \
_ASM_PTR "661b\n" /* label */ \
_ASM_PTR "663f\n" /* new instruction */ \
" .byte %c[feat]\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
".previous\n" \
".section .altinstr_replacement,\"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
".previous" : output : [feat] "i" (feature), ##input)
/*
* use this macro(s) if you need more than one output parameter
* in alternative_io
*/
#define ASM_OUTPUT2(a, b) a, b
struct paravirt_patch_site;
#ifdef CONFIG_PARAVIRT
void apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end);
#else
static inline void apply_paravirt(struct paravirt_patch_site *start,
struct paravirt_patch_site *end)
{}
#define __parainstructions NULL
#define __parainstructions_end NULL
#endif
extern void add_nops(void *insns, unsigned int len);
/*
* Clear and restore the kernel write-protection flag on the local CPU.
* Allows the kernel to edit read-only pages.
* Side-effect: any interrupt handler running between save and restore will have
* the ability to write to read-only pages.
*
* Warning:
* Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
* no thread can be preempted in the instructions being modified (no iret to an
* invalid instruction possible) or if the instructions are changed from a
* consistent state to another consistent state atomically.
* More care must be taken when modifying code in the SMP case because of
* Intel's errata.
* On the local CPU you need to be protected again NMI or MCE handlers seeing an
* inconsistent instruction while you patch.
* The _early version expects the memory to already be RW.
*/
extern void *text_poke(void *addr, const void *opcode, size_t len);
extern void *text_poke_early(void *addr, const void *opcode, size_t len);
#endif /* _ASM_X86_ALTERNATIVE_H */

View File

@@ -0,0 +1,35 @@
/*
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Leo Duran <leo.duran@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_X86_AMD_IOMMU_H
#define _ASM_X86_AMD_IOMMU_H
#include <linux/irqreturn.h>
#ifdef CONFIG_AMD_IOMMU
extern int amd_iommu_init(void);
extern int amd_iommu_init_dma_ops(void);
extern void amd_iommu_detect(void);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
#else
static inline int amd_iommu_init(void) { return -ENODEV; }
static inline void amd_iommu_detect(void) { }
#endif
#endif /* _ASM_X86_AMD_IOMMU_H */

View File

@@ -0,0 +1,404 @@
/*
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Leo Duran <leo.duran@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_X86_AMD_IOMMU_TYPES_H
#define _ASM_X86_AMD_IOMMU_TYPES_H
#include <linux/types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
/*
* some size calculation constants
*/
#define DEV_TABLE_ENTRY_SIZE 32
#define ALIAS_TABLE_ENTRY_SIZE 2
#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *))
/* Length of the MMIO region for the AMD IOMMU */
#define MMIO_REGION_LENGTH 0x4000
/* Capability offsets used by the driver */
#define MMIO_CAP_HDR_OFFSET 0x00
#define MMIO_RANGE_OFFSET 0x0c
#define MMIO_MISC_OFFSET 0x10
/* Masks, shifts and macros to parse the device range capability */
#define MMIO_RANGE_LD_MASK 0xff000000
#define MMIO_RANGE_FD_MASK 0x00ff0000
#define MMIO_RANGE_BUS_MASK 0x0000ff00
#define MMIO_RANGE_LD_SHIFT 24
#define MMIO_RANGE_FD_SHIFT 16
#define MMIO_RANGE_BUS_SHIFT 8
#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
#define MMIO_MSI_NUM(x) ((x) & 0x1f)
/* Flag masks for the AMD IOMMU exclusion range */
#define MMIO_EXCL_ENABLE_MASK 0x01ULL
#define MMIO_EXCL_ALLOW_MASK 0x02ULL
/* Used offsets into the MMIO space */
#define MMIO_DEV_TABLE_OFFSET 0x0000
#define MMIO_CMD_BUF_OFFSET 0x0008
#define MMIO_EVT_BUF_OFFSET 0x0010
#define MMIO_CONTROL_OFFSET 0x0018
#define MMIO_EXCL_BASE_OFFSET 0x0020
#define MMIO_EXCL_LIMIT_OFFSET 0x0028
#define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010
#define MMIO_EVT_TAIL_OFFSET 0x2018
#define MMIO_STATUS_OFFSET 0x2020
/* MMIO status bits */
#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
/* event logging constants */
#define EVENT_ENTRY_SIZE 0x10
#define EVENT_TYPE_SHIFT 28
#define EVENT_TYPE_MASK 0xf
#define EVENT_TYPE_ILL_DEV 0x1
#define EVENT_TYPE_IO_FAULT 0x2
#define EVENT_TYPE_DEV_TAB_ERR 0x3
#define EVENT_TYPE_PAGE_TAB_ERR 0x4
#define EVENT_TYPE_ILL_CMD 0x5
#define EVENT_TYPE_CMD_HARD_ERR 0x6
#define EVENT_TYPE_IOTLB_INV_TO 0x7
#define EVENT_TYPE_INV_DEV_REQ 0x8
#define EVENT_DEVID_MASK 0xffff
#define EVENT_DEVID_SHIFT 0
#define EVENT_DOMID_MASK 0xffff
#define EVENT_DOMID_SHIFT 0
#define EVENT_FLAGS_MASK 0xfff
#define EVENT_FLAGS_SHIFT 0x10
/* feature control bits */
#define CONTROL_IOMMU_EN 0x00ULL
#define CONTROL_HT_TUN_EN 0x01ULL
#define CONTROL_EVT_LOG_EN 0x02ULL
#define CONTROL_EVT_INT_EN 0x03ULL
#define CONTROL_COMWAIT_EN 0x04ULL
#define CONTROL_PASSPW_EN 0x08ULL
#define CONTROL_RESPASSPW_EN 0x09ULL
#define CONTROL_COHERENT_EN 0x0aULL
#define CONTROL_ISOC_EN 0x0bULL
#define CONTROL_CMDBUF_EN 0x0cULL
#define CONTROL_PPFLOG_EN 0x0dULL
#define CONTROL_PPFINT_EN 0x0eULL
/* command specific defines */
#define CMD_COMPL_WAIT 0x01
#define CMD_INV_DEV_ENTRY 0x02
#define CMD_INV_IOMMU_PAGES 0x03
#define CMD_COMPL_WAIT_STORE_MASK 0x01
#define CMD_COMPL_WAIT_INT_MASK 0x02
#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
/* macros and definitions for device table entries */
#define DEV_ENTRY_VALID 0x00
#define DEV_ENTRY_TRANSLATION 0x01
#define DEV_ENTRY_IR 0x3d
#define DEV_ENTRY_IW 0x3e
#define DEV_ENTRY_NO_PAGE_FAULT 0x62
#define DEV_ENTRY_EX 0x67
#define DEV_ENTRY_SYSMGT1 0x68
#define DEV_ENTRY_SYSMGT2 0x69
#define DEV_ENTRY_INIT_PASS 0xb8
#define DEV_ENTRY_EINT_PASS 0xb9
#define DEV_ENTRY_NMI_PASS 0xba
#define DEV_ENTRY_LINT0_PASS 0xbe
#define DEV_ENTRY_LINT1_PASS 0xbf
#define DEV_ENTRY_MODE_MASK 0x07
#define DEV_ENTRY_MODE_SHIFT 0x09
/* constants to configure the command buffer */
#define CMD_BUFFER_SIZE 8192
#define CMD_BUFFER_ENTRIES 512
#define MMIO_CMD_SIZE_SHIFT 56
#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
/* constants for event buffer handling */
#define EVT_BUFFER_SIZE 8192 /* 512 entries */
#define EVT_LEN_MASK (0x9ULL << 56)
#define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02
#define PAGE_MODE_3_LEVEL 0x03
#define IOMMU_PDE_NL_0 0x000ULL
#define IOMMU_PDE_NL_1 0x200ULL
#define IOMMU_PDE_NL_2 0x400ULL
#define IOMMU_PDE_NL_3 0x600ULL
#define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL)
#define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL)
#define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL)
#define IOMMU_MAP_SIZE_L1 (1ULL << 21)
#define IOMMU_MAP_SIZE_L2 (1ULL << 30)
#define IOMMU_MAP_SIZE_L3 (1ULL << 39)
#define IOMMU_PTE_P (1ULL << 0)
#define IOMMU_PTE_TV (1ULL << 1)
#define IOMMU_PTE_U (1ULL << 59)
#define IOMMU_PTE_FC (1ULL << 60)
#define IOMMU_PTE_IR (1ULL << 61)
#define IOMMU_PTE_IW (1ULL << 62)
#define IOMMU_L1_PDE(address) \
((address) | IOMMU_PDE_NL_1 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
#define IOMMU_L2_PDE(address) \
((address) | IOMMU_PDE_NL_2 | IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW)
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK))
#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
#define IOMMU_PROT_MASK 0x03
#define IOMMU_PROT_IR 0x01
#define IOMMU_PROT_IW 0x02
/* IOMMU capabilities */
#define IOMMU_CAP_IOTLB 24
#define IOMMU_CAP_NPCACHE 26
#define MAX_DOMAIN_ID 65536
/* FIXME: move this macro to <linux/pci.h> */
#define PCI_BUS(x) (((x) >> 8) & 0xff)
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
*/
struct protection_domain {
spinlock_t lock; /* mostly used to lock the page table*/
u16 id; /* the domain id written to the device table */
int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */
void *priv; /* private data */
};
/*
* Data container for a dma_ops specific protection domain
*/
struct dma_ops_domain {
struct list_head list;
/* generic protection domain information */
struct protection_domain domain;
/* size of the aperture for the mappings */
unsigned long aperture_size;
/* address we start to search for free addresses */
unsigned long next_bit;
/* address allocation bitmap */
unsigned long *bitmap;
/*
* Array of PTE pages for the aperture. In this array we save all the
* leaf pages of the domain page table used for the aperture. This way
* we don't need to walk the page table to find a specific PTE. We can
* just calculate its address in constant time.
*/
u64 **pte_pages;
/* This will be set to true when TLB needs to be flushed */
bool need_flush;
/*
* if this is a preallocated domain, keep the device for which it was
* preallocated in this variable
*/
u16 target_dev;
};
/*
* Structure where we save information about one hardware AMD IOMMU in the
* system.
*/
struct amd_iommu {
struct list_head list;
/* locks the accesses to the hardware */
spinlock_t lock;
/* Pointer to PCI device of this IOMMU */
struct pci_dev *dev;
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
* pointers.
*/
u16 cap_ptr;
/* physical address of MMIO space */
u64 mmio_phys;
/* virtual address of MMIO space */
u8 *mmio_base;
/* capabilities of that IOMMU read from ACPI */
u32 cap;
/* pci domain of this IOMMU */
u16 pci_seg;
/* first device this IOMMU handles. read from PCI */
u16 first_device;
/* last device this IOMMU handles. read from PCI */
u16 last_device;
/* start of exclusion range of that IOMMU */
u64 exclusion_start;
/* length of exclusion range of that IOMMU */
u64 exclusion_length;
/* command buffer virtual address */
u8 *cmd_buf;
/* size of command buffer */
u32 cmd_buf_size;
/* event buffer virtual address */
u8 *evt_buf;
/* size of event buffer */
u32 evt_buf_size;
/* MSI number for event interrupt */
u16 evt_msi_num;
/* if one, we need to send a completion wait command */
int need_sync;
/* true if interrupts for this IOMMU are already enabled */
bool int_enabled;
/* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom;
};
/*
* List with all IOMMUs in the system. This list is not locked because it is
* only written and read at driver initialization or suspend time
*/
extern struct list_head amd_iommu_list;
/*
* Structure defining one entry in the device table
*/
struct dev_table_entry {
u32 data[8];
};
/*
* One entry for unity mappings parsed out of the ACPI table.
*/
struct unity_map_entry {
struct list_head list;
/* starting device id this entry is used for (including) */
u16 devid_start;
/* end device id this entry is used for (including) */
u16 devid_end;
/* start address to unity map (including) */
u64 address_start;
/* end address to unity map (including) */
u64 address_end;
/* required protection */
int prot;
};
/*
* List of all unity mappings. It is not locked because as runtime it is only
* read. It is created at ACPI table parsing time.
*/
extern struct list_head amd_iommu_unity_map;
/*
* Data structures for device handling
*/
/*
* Device table used by hardware. Read and write accesses by software are
* locked with the amd_iommu_pd_table lock.
*/
extern struct dev_table_entry *amd_iommu_dev_table;
/*
* Alias table to find requestor ids to device ids. Not locked because only
* read on runtime.
*/
extern u16 *amd_iommu_alias_table;
/*
* Reverse lookup table to find the IOMMU which translates a specific device.
*/
extern struct amd_iommu **amd_iommu_rlookup_table;
/* size of the dma_ops aperture as power of 2 */
extern unsigned amd_iommu_aperture_order;
/* largest PCI device id we expect translation requests for */
extern u16 amd_iommu_last_bdf;
/* data structures for protection domain handling */
extern struct protection_domain **amd_iommu_pd_table;
/* allocation bitmap for domain ids */
extern unsigned long *amd_iommu_pd_alloc_bitmap;
/* will be 1 if device isolation is enabled */
extern int amd_iommu_isolate;
/*
* If true, the addresses will be flushed on unmap time, not when
* they are reused
*/
extern bool amd_iommu_unmap_flush;
/* takes a PCI device id and prints it out in a readable form */
static inline void print_devid(u16 devid, int nl)
{
int bus = devid >> 8;
int dev = devid >> 3 & 0x1f;
int fn = devid & 0x07;
printk("%02x:%02x.%x", bus, dev, fn);
if (nl)
printk("\n");
}
/* takes bus and device/function and returns the device id
* FIXME: should that be in generic PCI code? */
static inline u16 calc_devid(u8 bus, u8 devfn)
{
return (((u16)bus) << 8) | devfn;
}
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */

199
arch/x86/include/asm/apic.h Normal file
View File

@@ -0,0 +1,199 @@
#ifndef _ASM_X86_APIC_H
#define _ASM_X86_APIC_H
#include <linux/pm.h>
#include <linux/delay.h>
#include <asm/alternative.h>
#include <asm/fixmap.h>
#include <asm/apicdef.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/cpufeature.h>
#include <asm/msr.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
/*
* Debugging macros
*/
#define APIC_QUIET 0
#define APIC_VERBOSE 1
#define APIC_DEBUG 2
/*
* Define the default level of output to be very little
* This can be turned up by using apic=verbose for more
* information and apic=debug for _lots_ of information.
* apic_verbosity is defined in apic.c
*/
#define apic_printk(v, s, a...) do { \
if ((v) <= apic_verbosity) \
printk(s, ##a); \
} while (0)
extern void generic_apic_probe(void);
#ifdef CONFIG_X86_LOCAL_APIC
extern unsigned int apic_verbosity;
extern int local_apic_timer_c2_ok;
extern int disable_apic;
/*
* Basic functions accessing APICs.
*/
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define setup_boot_clock setup_boot_APIC_clock
#define setup_secondary_clock setup_secondary_APIC_clock
#endif
extern int is_vsmp_box(void);
extern void xapic_wait_icr_idle(void);
extern u32 safe_xapic_wait_icr_idle(void);
extern u64 xapic_icr_read(void);
extern void xapic_icr_write(u32, u32);
extern int setup_profiling_timer(unsigned int);
static inline void native_apic_mem_write(u32 reg, u32 v)
{
volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
alternative_io("movl %0, %1", "xchgl %0, %1", X86_FEATURE_11AP,
ASM_OUTPUT2("=r" (v), "=m" (*addr)),
ASM_OUTPUT2("0" (v), "m" (*addr)));
}
static inline u32 native_apic_mem_read(u32 reg)
{
return *((volatile u32 *)(APIC_BASE + reg));
}
static inline void native_apic_msr_write(u32 reg, u32 v)
{
if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
reg == APIC_LVR)
return;
wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
}
static inline u32 native_apic_msr_read(u32 reg)
{
u32 low, high;
if (reg == APIC_DFR)
return -1;
rdmsr(APIC_BASE_MSR + (reg >> 4), low, high);
return low;
}
#ifndef CONFIG_X86_32
extern int x2apic, x2apic_preenabled;
extern void check_x2apic(void);
extern void enable_x2apic(void);
extern void enable_IR_x2apic(void);
extern void x2apic_icr_write(u32 low, u32 id);
static inline int x2apic_enabled(void)
{
int msr, msr2;
if (!cpu_has_x2apic)
return 0;
rdmsr(MSR_IA32_APICBASE, msr, msr2);
if (msr & X2APIC_ENABLE)
return 1;
return 0;
}
#else
#define x2apic_enabled() 0
#endif
struct apic_ops {
u32 (*read)(u32 reg);
void (*write)(u32 reg, u32 v);
u64 (*icr_read)(void);
void (*icr_write)(u32 low, u32 high);
void (*wait_icr_idle)(void);
u32 (*safe_wait_icr_idle)(void);
};
extern struct apic_ops *apic_ops;
#define apic_read (apic_ops->read)
#define apic_write (apic_ops->write)
#define apic_icr_read (apic_ops->icr_read)
#define apic_icr_write (apic_ops->icr_write)
#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
extern int get_physical_broadcast(void);
#ifdef CONFIG_X86_64
static inline void ack_x2APIC_irq(void)
{
/* Docs say use 0 for future compatibility */
native_apic_msr_write(APIC_EOI, 0);
}
#endif
static inline void ack_APIC_irq(void)
{
/*
* ack_APIC_irq() actually gets compiled as a single instruction
* ... yummie.
*/
/* Docs say use 0 for future compatibility */
apic_write(APIC_EOI, 0);
}
extern int lapic_get_maxlvt(void);
extern void clear_local_APIC(void);
extern void connect_bsp_APIC(void);
extern void disconnect_bsp_APIC(int virt_wire_setup);
extern void disable_local_APIC(void);
extern void lapic_shutdown(void);
extern int verify_local_APIC(void);
extern void cache_APIC_registers(void);
extern void sync_Arb_IDs(void);
extern void init_bsp_APIC(void);
extern void setup_local_APIC(void);
extern void end_local_APIC_setup(void);
extern void init_apic_mappings(void);
extern void setup_boot_APIC_clock(void);
extern void setup_secondary_APIC_clock(void);
extern int APIC_init_uniprocessor(void);
extern void enable_NMI_through_LVT0(void);
/*
* On 32bit this is mach-xxx local
*/
#ifdef CONFIG_X86_64
extern void early_init_lapic_mapping(void);
extern int apic_is_clustered_box(void);
#else
static inline int apic_is_clustered_box(void)
{
return 0;
}
#endif
extern u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask);
extern u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask);
#else /* !CONFIG_X86_LOCAL_APIC */
static inline void lapic_shutdown(void) { }
#define local_apic_timer_c2_ok 1
static inline void init_apic_mappings(void) { }
#endif /* !CONFIG_X86_LOCAL_APIC */
#endif /* _ASM_X86_APIC_H */

View File

@@ -0,0 +1,417 @@
#ifndef _ASM_X86_APICDEF_H
#define _ASM_X86_APICDEF_H
/*
* Constants for various Intel APICs. (local APIC, IOAPIC, etc.)
*
* Alan Cox <Alan.Cox@linux.org>, 1995.
* Ingo Molnar <mingo@redhat.com>, 1999, 2000
*/
#define APIC_DEFAULT_PHYS_BASE 0xfee00000
#define APIC_ID 0x20
#define APIC_LVR 0x30
#define APIC_LVR_MASK 0xFF00FF
#define GET_APIC_VERSION(x) ((x) & 0xFFu)
#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu)
#ifdef CONFIG_X86_32
# define APIC_INTEGRATED(x) ((x) & 0xF0u)
#else
# define APIC_INTEGRATED(x) (1)
#endif
#define APIC_XAPIC(x) ((x) >= 0x14)
#define APIC_TASKPRI 0x80
#define APIC_TPRI_MASK 0xFFu
#define APIC_ARBPRI 0x90
#define APIC_ARBPRI_MASK 0xFFu
#define APIC_PROCPRI 0xA0
#define APIC_EOI 0xB0
#define APIC_EIO_ACK 0x0
#define APIC_RRR 0xC0
#define APIC_LDR 0xD0
#define APIC_LDR_MASK (0xFFu << 24)
#define GET_APIC_LOGICAL_ID(x) (((x) >> 24) & 0xFFu)
#define SET_APIC_LOGICAL_ID(x) (((x) << 24))
#define APIC_ALL_CPUS 0xFFu
#define APIC_DFR 0xE0
#define APIC_DFR_CLUSTER 0x0FFFFFFFul
#define APIC_DFR_FLAT 0xFFFFFFFFul
#define APIC_SPIV 0xF0
#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
#define APIC_SPIV_APIC_ENABLED (1 << 8)
#define APIC_ISR 0x100
#define APIC_ISR_NR 0x8 /* Number of 32 bit ISR registers. */
#define APIC_TMR 0x180
#define APIC_IRR 0x200
#define APIC_ESR 0x280
#define APIC_ESR_SEND_CS 0x00001
#define APIC_ESR_RECV_CS 0x00002
#define APIC_ESR_SEND_ACC 0x00004
#define APIC_ESR_RECV_ACC 0x00008
#define APIC_ESR_SENDILL 0x00020
#define APIC_ESR_RECVILL 0x00040
#define APIC_ESR_ILLREGA 0x00080
#define APIC_ICR 0x300
#define APIC_DEST_SELF 0x40000
#define APIC_DEST_ALLINC 0x80000
#define APIC_DEST_ALLBUT 0xC0000
#define APIC_ICR_RR_MASK 0x30000
#define APIC_ICR_RR_INVALID 0x00000
#define APIC_ICR_RR_INPROG 0x10000
#define APIC_ICR_RR_VALID 0x20000
#define APIC_INT_LEVELTRIG 0x08000
#define APIC_INT_ASSERT 0x04000
#define APIC_ICR_BUSY 0x01000
#define APIC_DEST_LOGICAL 0x00800
#define APIC_DEST_PHYSICAL 0x00000
#define APIC_DM_FIXED 0x00000
#define APIC_DM_LOWEST 0x00100
#define APIC_DM_SMI 0x00200
#define APIC_DM_REMRD 0x00300
#define APIC_DM_NMI 0x00400
#define APIC_DM_INIT 0x00500
#define APIC_DM_STARTUP 0x00600
#define APIC_DM_EXTINT 0x00700
#define APIC_VECTOR_MASK 0x000FF
#define APIC_ICR2 0x310
#define GET_APIC_DEST_FIELD(x) (((x) >> 24) & 0xFF)
#define SET_APIC_DEST_FIELD(x) ((x) << 24)
#define APIC_LVTT 0x320
#define APIC_LVTTHMR 0x330
#define APIC_LVTPC 0x340
#define APIC_LVT0 0x350
#define APIC_LVT_TIMER_BASE_MASK (0x3 << 18)
#define GET_APIC_TIMER_BASE(x) (((x) >> 18) & 0x3)
#define SET_APIC_TIMER_BASE(x) (((x) << 18))
#define APIC_TIMER_BASE_CLKIN 0x0
#define APIC_TIMER_BASE_TMBASE 0x1
#define APIC_TIMER_BASE_DIV 0x2
#define APIC_LVT_TIMER_PERIODIC (1 << 17)
#define APIC_LVT_MASKED (1 << 16)
#define APIC_LVT_LEVEL_TRIGGER (1 << 15)
#define APIC_LVT_REMOTE_IRR (1 << 14)
#define APIC_INPUT_POLARITY (1 << 13)
#define APIC_SEND_PENDING (1 << 12)
#define APIC_MODE_MASK 0x700
#define GET_APIC_DELIVERY_MODE(x) (((x) >> 8) & 0x7)
#define SET_APIC_DELIVERY_MODE(x, y) (((x) & ~0x700) | ((y) << 8))
#define APIC_MODE_FIXED 0x0
#define APIC_MODE_NMI 0x4
#define APIC_MODE_EXTINT 0x7
#define APIC_LVT1 0x360
#define APIC_LVTERR 0x370
#define APIC_TMICT 0x380
#define APIC_TMCCT 0x390
#define APIC_TDCR 0x3E0
#define APIC_SELF_IPI 0x3F0
#define APIC_TDR_DIV_TMBASE (1 << 2)
#define APIC_TDR_DIV_1 0xB
#define APIC_TDR_DIV_2 0x0
#define APIC_TDR_DIV_4 0x1
#define APIC_TDR_DIV_8 0x2
#define APIC_TDR_DIV_16 0x3
#define APIC_TDR_DIV_32 0x8
#define APIC_TDR_DIV_64 0x9
#define APIC_TDR_DIV_128 0xA
#define APIC_EILVT0 0x500
#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
#define APIC_EILVT_NR_AMD_10H 4
#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF)
#define APIC_EILVT_MSG_FIX 0x0
#define APIC_EILVT_MSG_SMI 0x2
#define APIC_EILVT_MSG_NMI 0x4
#define APIC_EILVT_MSG_EXT 0x7
#define APIC_EILVT_MASKED (1 << 16)
#define APIC_EILVT1 0x510
#define APIC_EILVT2 0x520
#define APIC_EILVT3 0x530
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
#define APIC_BASE_MSR 0x800
#define X2APIC_ENABLE (1UL << 10)
#ifdef CONFIG_X86_32
# define MAX_IO_APICS 64
#else
# define MAX_IO_APICS 128
# define MAX_LOCAL_APIC 32768
#endif
/*
* All x86-64 systems are xAPIC compatible.
* In the following, "apicid" is a physical APIC ID.
*/
#define XAPIC_DEST_CPUS_SHIFT 4
#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
#define APIC_CLUSTER(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
#define APIC_CLUSTERID(apicid) (APIC_CLUSTER(apicid) >> XAPIC_DEST_CPUS_SHIFT)
#define APIC_CPUID(apicid) ((apicid) & XAPIC_DEST_CPUS_MASK)
#define NUM_APIC_CLUSTERS ((BAD_APICID + 1) >> XAPIC_DEST_CPUS_SHIFT)
/*
* the local APIC register structure, memory mapped. Not terribly well
* tested, but we might eventually use this one in the future - the
* problem why we cannot use it right now is the P5 APIC, it has an
* errata which cannot take 8-bit reads and writes, only 32-bit ones ...
*/
#define u32 unsigned int
struct local_apic {
/*000*/ struct { u32 __reserved[4]; } __reserved_01;
/*010*/ struct { u32 __reserved[4]; } __reserved_02;
/*020*/ struct { /* APIC ID Register */
u32 __reserved_1 : 24,
phys_apic_id : 4,
__reserved_2 : 4;
u32 __reserved[3];
} id;
/*030*/ const
struct { /* APIC Version Register */
u32 version : 8,
__reserved_1 : 8,
max_lvt : 8,
__reserved_2 : 8;
u32 __reserved[3];
} version;
/*040*/ struct { u32 __reserved[4]; } __reserved_03;
/*050*/ struct { u32 __reserved[4]; } __reserved_04;
/*060*/ struct { u32 __reserved[4]; } __reserved_05;
/*070*/ struct { u32 __reserved[4]; } __reserved_06;
/*080*/ struct { /* Task Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} tpr;
/*090*/ const
struct { /* Arbitration Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} apr;
/*0A0*/ const
struct { /* Processor Priority Register */
u32 priority : 8,
__reserved_1 : 24;
u32 __reserved_2[3];
} ppr;
/*0B0*/ struct { /* End Of Interrupt Register */
u32 eoi;
u32 __reserved[3];
} eoi;
/*0C0*/ struct { u32 __reserved[4]; } __reserved_07;
/*0D0*/ struct { /* Logical Destination Register */
u32 __reserved_1 : 24,
logical_dest : 8;
u32 __reserved_2[3];
} ldr;
/*0E0*/ struct { /* Destination Format Register */
u32 __reserved_1 : 28,
model : 4;
u32 __reserved_2[3];
} dfr;
/*0F0*/ struct { /* Spurious Interrupt Vector Register */
u32 spurious_vector : 8,
apic_enabled : 1,
focus_cpu : 1,
__reserved_2 : 22;
u32 __reserved_3[3];
} svr;
/*100*/ struct { /* In Service Register */
/*170*/ u32 bitfield;
u32 __reserved[3];
} isr [8];
/*180*/ struct { /* Trigger Mode Register */
/*1F0*/ u32 bitfield;
u32 __reserved[3];
} tmr [8];
/*200*/ struct { /* Interrupt Request Register */
/*270*/ u32 bitfield;
u32 __reserved[3];
} irr [8];
/*280*/ union { /* Error Status Register */
struct {
u32 send_cs_error : 1,
receive_cs_error : 1,
send_accept_error : 1,
receive_accept_error : 1,
__reserved_1 : 1,
send_illegal_vector : 1,
receive_illegal_vector : 1,
illegal_register_address : 1,
__reserved_2 : 24;
u32 __reserved_3[3];
} error_bits;
struct {
u32 errors;
u32 __reserved_3[3];
} all_errors;
} esr;
/*290*/ struct { u32 __reserved[4]; } __reserved_08;
/*2A0*/ struct { u32 __reserved[4]; } __reserved_09;
/*2B0*/ struct { u32 __reserved[4]; } __reserved_10;
/*2C0*/ struct { u32 __reserved[4]; } __reserved_11;
/*2D0*/ struct { u32 __reserved[4]; } __reserved_12;
/*2E0*/ struct { u32 __reserved[4]; } __reserved_13;
/*2F0*/ struct { u32 __reserved[4]; } __reserved_14;
/*300*/ struct { /* Interrupt Command Register 1 */
u32 vector : 8,
delivery_mode : 3,
destination_mode : 1,
delivery_status : 1,
__reserved_1 : 1,
level : 1,
trigger : 1,
__reserved_2 : 2,
shorthand : 2,
__reserved_3 : 12;
u32 __reserved_4[3];
} icr1;
/*310*/ struct { /* Interrupt Command Register 2 */
union {
u32 __reserved_1 : 24,
phys_dest : 4,
__reserved_2 : 4;
u32 __reserved_3 : 24,
logical_dest : 8;
} dest;
u32 __reserved_4[3];
} icr2;
/*320*/ struct { /* LVT - Timer */
u32 vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
timer_mode : 1,
__reserved_3 : 14;
u32 __reserved_4[3];
} lvt_timer;
/*330*/ struct { /* LVT - Thermal Sensor */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_thermal;
/*340*/ struct { /* LVT - Performance Counter */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_pc;
/*350*/ struct { /* LVT - LINT0 */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
u32 __reserved_3[3];
} lvt_lint0;
/*360*/ struct { /* LVT - LINT1 */
u32 vector : 8,
delivery_mode : 3,
__reserved_1 : 1,
delivery_status : 1,
polarity : 1,
remote_irr : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15;
u32 __reserved_3[3];
} lvt_lint1;
/*370*/ struct { /* LVT - Error */
u32 vector : 8,
__reserved_1 : 4,
delivery_status : 1,
__reserved_2 : 3,
mask : 1,
__reserved_3 : 15;
u32 __reserved_4[3];
} lvt_error;
/*380*/ struct { /* Timer Initial Count Register */
u32 initial_count;
u32 __reserved_2[3];
} timer_icr;
/*390*/ const
struct { /* Timer Current Count Register */
u32 curr_count;
u32 __reserved_2[3];
} timer_ccr;
/*3A0*/ struct { u32 __reserved[4]; } __reserved_16;
/*3B0*/ struct { u32 __reserved[4]; } __reserved_17;
/*3C0*/ struct { u32 __reserved[4]; } __reserved_18;
/*3D0*/ struct { u32 __reserved[4]; } __reserved_19;
/*3E0*/ struct { /* Timer Divide Configuration Register */
u32 divisor : 4,
__reserved_1 : 28;
u32 __reserved_2[3];
} timer_dcr;
/*3F0*/ struct { u32 __reserved[4]; } __reserved_20;
} __attribute__ ((packed));
#undef u32
#ifdef CONFIG_X86_32
#define BAD_APICID 0xFFu
#else
#define BAD_APICID 0xFFFFu
#endif
#endif /* _ASM_X86_APICDEF_H */

View File

@@ -0,0 +1,26 @@
#ifndef _ASM_X86_ARCH_HOOKS_H
#define _ASM_X86_ARCH_HOOKS_H
#include <linux/interrupt.h>
/*
* linux/include/asm/arch_hooks.h
*
* define the architecture specific hooks
*/
/* these aren't arch hooks, they are generic routines
* that can be used by the hooks */
extern void init_ISA_irqs(void);
extern irqreturn_t timer_interrupt(int irq, void *dev_id);
/* these are the defined hooks */
extern void intr_init_hook(void);
extern void pre_intr_init_hook(void);
extern void pre_setup_arch_hook(void);
extern void trap_init_hook(void);
extern void pre_time_init_hook(void);
extern void time_init_hook(void);
extern void mca_nmi_hook(void);
#endif /* _ASM_X86_ARCH_HOOKS_H */

View File

@@ -0,0 +1,47 @@
#ifndef _ASM_X86_ASM_H
#define _ASM_X86_ASM_H
#ifdef __ASSEMBLY__
# define __ASM_FORM(x) x
# define __ASM_EX_SEC .section __ex_table
#else
# define __ASM_FORM(x) " " #x " "
# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
#endif
#ifdef CONFIG_X86_32
# define __ASM_SEL(a,b) __ASM_FORM(a)
#else
# define __ASM_SEL(a,b) __ASM_FORM(b)
#endif
#define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q)
#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg)
#define _ASM_PTR __ASM_SEL(.long, .quad)
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
#define _ASM_MOV __ASM_SIZE(mov)
#define _ASM_INC __ASM_SIZE(inc)
#define _ASM_DEC __ASM_SIZE(dec)
#define _ASM_ADD __ASM_SIZE(add)
#define _ASM_SUB __ASM_SIZE(sub)
#define _ASM_XADD __ASM_SIZE(xadd)
#define _ASM_AX __ASM_REG(ax)
#define _ASM_BX __ASM_REG(bx)
#define _ASM_CX __ASM_REG(cx)
#define _ASM_DX __ASM_REG(dx)
#define _ASM_SP __ASM_REG(sp)
#define _ASM_BP __ASM_REG(bp)
#define _ASM_SI __ASM_REG(si)
#define _ASM_DI __ASM_REG(di)
/* Exception table entry */
# define _ASM_EXTABLE(from,to) \
__ASM_EX_SEC \
_ASM_ALIGN "\n" \
_ASM_PTR #from "," #to "\n" \
" .previous\n"
#endif /* _ASM_X86_ASM_H */

View File

@@ -0,0 +1,5 @@
#ifdef CONFIG_X86_32
# include "atomic_32.h"
#else
# include "atomic_64.h"
#endif

View File

@@ -0,0 +1,259 @@
#ifndef _ASM_X86_ATOMIC_32_H
#define _ASM_X86_ATOMIC_32_H
#include <linux/compiler.h>
#include <asm/processor.h>
#include <asm/cmpxchg.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
typedef struct {
int counter;
} atomic_t;
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
#define atomic_read(v) ((v)->counter)
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
#define atomic_set(v, i) (((v)->counter) = (i))
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
static inline void atomic_add(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0"
: "+m" (v->counter)
: "ir" (i));
}
/**
* atomic_sub - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v.
*/
static inline void atomic_sub(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0"
: "+m" (v->counter)
: "ir" (i));
}
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
: "+m" (v->counter), "=qm" (c)
: "ir" (i) : "memory");
return c;
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
static inline void atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter));
}
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
static inline void atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter));
}
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "decl %0; sete %1"
: "+m" (v->counter), "=qm" (c)
: : "memory");
return c != 0;
}
/**
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline int atomic_inc_and_test(atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "incl %0; sete %1"
: "+m" (v->counter), "=qm" (c)
: : "memory");
return c != 0;
}
/**
* atomic_add_negative - add and test if negative
* @v: pointer of type atomic_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline int atomic_add_negative(int i, atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
: "+m" (v->counter), "=qm" (c)
: "ir" (i) : "memory");
return c;
}
/**
* atomic_add_return - add integer and return
* @v: pointer of type atomic_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns @i + @v
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
int __i;
#ifdef CONFIG_M386
unsigned long flags;
if (unlikely(boot_cpu_data.x86 <= 3))
goto no_xadd;
#endif
/* Modern 486+ processor */
__i = i;
asm volatile(LOCK_PREFIX "xaddl %0, %1"
: "+r" (i), "+m" (v->counter)
: : "memory");
return i + __i;
#ifdef CONFIG_M386
no_xadd: /* Legacy 386 processor */
local_irq_save(flags);
__i = atomic_read(v);
atomic_set(v, i + __i);
local_irq_restore(flags);
return i + __i;
#endif
}
/**
* atomic_sub_return - subtract integer and return
* @v: pointer of type atomic_t
* @i: integer value to subtract
*
* Atomically subtracts @i from @v and returns @v - @i
*/
static inline int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i, v);
}
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
asm volatile(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)), "m" (*(addr)) : "memory")
#define atomic_set_mask(mask, addr) \
asm volatile(LOCK_PREFIX "orl %0,%1" \
: : "r" (mask), "m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* _ASM_X86_ATOMIC_32_H */

View File

@@ -0,0 +1,473 @@
#ifndef _ASM_X86_ATOMIC_64_H
#define _ASM_X86_ATOMIC_64_H
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
/* atomic_t should be 32 bit signed type */
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
typedef struct {
int counter;
} atomic_t;
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
#define atomic_read(v) ((v)->counter)
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
#define atomic_set(v, i) (((v)->counter) = (i))
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
static inline void atomic_add(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0"
: "=m" (v->counter)
: "ir" (i), "m" (v->counter));
}
/**
* atomic_sub - subtract the atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v.
*/
static inline void atomic_sub(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0"
: "=m" (v->counter)
: "ir" (i), "m" (v->counter));
}
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
static inline int atomic_sub_and_test(int i, atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
: "=m" (v->counter), "=qm" (c)
: "ir" (i), "m" (v->counter) : "memory");
return c;
}
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
static inline void atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
: "=m" (v->counter)
: "m" (v->counter));
}
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
static inline void atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
: "=m" (v->counter)
: "m" (v->counter));
}
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "decl %0; sete %1"
: "=m" (v->counter), "=qm" (c)
: "m" (v->counter) : "memory");
return c != 0;
}
/**
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline int atomic_inc_and_test(atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "incl %0; sete %1"
: "=m" (v->counter), "=qm" (c)
: "m" (v->counter) : "memory");
return c != 0;
}
/**
* atomic_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline int atomic_add_negative(int i, atomic_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
: "=m" (v->counter), "=qm" (c)
: "ir" (i), "m" (v->counter) : "memory");
return c;
}
/**
* atomic_add_return - add and return
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns @i + @v
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
int __i = i;
asm volatile(LOCK_PREFIX "xaddl %0, %1"
: "+r" (i), "+m" (v->counter)
: : "memory");
return i + __i;
}
static inline int atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i, v);
}
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
/* An 64bit atomic type */
typedef struct {
long counter;
} atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
/**
* atomic64_read - read atomic64 variable
* @v: pointer of type atomic64_t
*
* Atomically reads the value of @v.
* Doesn't imply a read memory barrier.
*/
#define atomic64_read(v) ((v)->counter)
/**
* atomic64_set - set atomic64 variable
* @v: pointer to type atomic64_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
#define atomic64_set(v, i) (((v)->counter) = (i))
/**
* atomic64_add - add integer to atomic64 variable
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v.
*/
static inline void atomic64_add(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "addq %1,%0"
: "=m" (v->counter)
: "er" (i), "m" (v->counter));
}
/**
* atomic64_sub - subtract the atomic64 variable
* @i: integer value to subtract
* @v: pointer to type atomic64_t
*
* Atomically subtracts @i from @v.
*/
static inline void atomic64_sub(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "subq %1,%0"
: "=m" (v->counter)
: "er" (i), "m" (v->counter));
}
/**
* atomic64_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer to type atomic64_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
static inline int atomic64_sub_and_test(long i, atomic64_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
: "=m" (v->counter), "=qm" (c)
: "er" (i), "m" (v->counter) : "memory");
return c;
}
/**
* atomic64_inc - increment atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1.
*/
static inline void atomic64_inc(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "incq %0"
: "=m" (v->counter)
: "m" (v->counter));
}
/**
* atomic64_dec - decrement atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically decrements @v by 1.
*/
static inline void atomic64_dec(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "decq %0"
: "=m" (v->counter)
: "m" (v->counter));
}
/**
* atomic64_dec_and_test - decrement and test
* @v: pointer to type atomic64_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline int atomic64_dec_and_test(atomic64_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "decq %0; sete %1"
: "=m" (v->counter), "=qm" (c)
: "m" (v->counter) : "memory");
return c != 0;
}
/**
* atomic64_inc_and_test - increment and test
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline int atomic64_inc_and_test(atomic64_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "incq %0; sete %1"
: "=m" (v->counter), "=qm" (c)
: "m" (v->counter) : "memory");
return c != 0;
}
/**
* atomic64_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline int atomic64_add_negative(long i, atomic64_t *v)
{
unsigned char c;
asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
: "=m" (v->counter), "=qm" (c)
: "er" (i), "m" (v->counter) : "memory");
return c;
}
/**
* atomic64_add_return - add and return
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns @i + @v
*/
static inline long atomic64_add_return(long i, atomic64_t *v)
{
long __i = i;
asm volatile(LOCK_PREFIX "xaddq %0, %1;"
: "+r" (i), "+m" (v->counter)
: : "memory");
return i + __i;
}
static inline long atomic64_sub_return(long i, atomic64_t *v)
{
return atomic64_add_return(-i, v);
}
#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
* atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int c, old;
c = atomic_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/**
* atomic64_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
{
long c, old;
c = atomic64_read(v);
for (;;) {
if (unlikely(c == (u)))
break;
old = atomic64_cmpxchg((v), c, c + (a));
if (likely(old == c))
break;
c = old;
}
return c != (u);
}
/**
* atomic_inc_short - increment of a short integer
* @v: pointer to type int
*
* Atomically adds 1 to @v
* Returns the new value of @u
*/
static inline short int atomic_inc_short(short int *v)
{
asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
return *v;
}
/**
* atomic_or_long - OR of two long integers
* @v1: pointer to type unsigned long
* @v2: pointer to type unsigned long
*
* Atomically ORs @v1 and @v2
* Returns the result of the OR
*/
static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
{
asm(LOCK_PREFIX "orq %1, %0" : "+m" (*v1) : "r" (v2));
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
/* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \
asm volatile(LOCK_PREFIX "andl %0,%1" \
: : "r" (~(mask)), "m" (*(addr)) : "memory")
#define atomic_set_mask(mask, addr) \
asm volatile(LOCK_PREFIX "orl %0,%1" \
: : "r" ((unsigned)(mask)), "m" (*(addr)) \
: "memory")
/* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* _ASM_X86_ATOMIC_64_H */

View File

@@ -0,0 +1,12 @@
#ifndef _ASM_X86_AUXVEC_H
#define _ASM_X86_AUXVEC_H
/*
* Architecture-neutral AT_ values in 0-17, leave some room
* for more of them, start the x86-specific ones at 32.
*/
#ifdef __i386__
#define AT_SYSINFO 32
#endif
#define AT_SYSINFO_EHDR 33
#endif /* _ASM_X86_AUXVEC_H */

View File

@@ -0,0 +1,139 @@
#ifndef __ASM_MACH_APIC_H
#define __ASM_MACH_APIC_H
#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
#define esr_disable (1)
static inline int apic_id_registered(void)
{
return (1);
}
static inline cpumask_t target_cpus(void)
{
#ifdef CONFIG_SMP
return cpu_online_map;
#else
return cpumask_of_cpu(0);
#endif
}
#undef APIC_DEST_LOGICAL
#define APIC_DEST_LOGICAL 0
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
#define INT_DELIVERY_MODE (dest_Fixed)
#define INT_DEST_MODE (0) /* phys delivery to target proc */
#define NO_BALANCE_IRQ (0)
#define WAKE_SECONDARY_VIA_INIT
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
{
return (0);
}
static inline unsigned long check_apicid_present(int bit)
{
return (1);
}
static inline unsigned long calculate_ldr(int cpu)
{
unsigned long val, id;
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
id = xapic_phys_to_log_apicid(cpu);
val |= SET_APIC_LOGICAL_ID(id);
return val;
}
/*
* Set up the logical destination ID.
*
* Intel recommends to set DFR, LDR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
static inline void init_apic_ldr(void)
{
unsigned long val;
int cpu = smp_processor_id();
apic_write(APIC_DFR, APIC_DFR_VALUE);
val = calculate_ldr(cpu);
apic_write(APIC_LDR, val);
}
static inline void setup_apic_routing(void)
{
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
"Physflat", nr_ioapics);
}
static inline int multi_timer_check(int apic, int irq)
{
return (0);
}
static inline int apicid_to_node(int logical_apicid)
{
return apicid_2_node[hard_smp_processor_id()];
}
static inline int cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < NR_CPUS)
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
return BAD_APICID;
}
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
{
return physid_mask_of_physid(phys_apicid);
}
extern u8 cpu_2_logical_apicid[];
/* Mapping from cpu number to logical apicid */
static inline int cpu_to_logical_apicid(int cpu)
{
if (cpu >= NR_CPUS)
return BAD_APICID;
return cpu_physical_id(cpu);
}
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
{
/* For clustered we don't have a good way to do this yet - hack */
return physids_promote(0xFFL);
}
static inline void setup_portio_remap(void)
{
}
static inline void enable_apic_mode(void)
{
}
static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
{
return (1);
}
/* As we are using single CPU as destination, pick only one CPU here */
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
{
int cpu;
int apicid;
cpu = first_cpu(cpumask);
apicid = cpu_to_logical_apicid(cpu);
return apicid;
}
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
{
return cpuid_apic >> index_msb;
}
#endif /* __ASM_MACH_APIC_H */

View File

@@ -0,0 +1,13 @@
#ifndef __ASM_MACH_APICDEF_H
#define __ASM_MACH_APICDEF_H
#define APIC_ID_MASK (0xFF<<24)
static inline unsigned get_apic_id(unsigned long x)
{
return (((x)>>24)&0xFF);
}
#define GET_APIC_ID(x) get_apic_id(x)
#endif

View File

@@ -0,0 +1,25 @@
#ifndef __ASM_MACH_IPI_H
#define __ASM_MACH_IPI_H
void send_IPI_mask_sequence(cpumask_t mask, int vector);
static inline void send_IPI_mask(cpumask_t mask, int vector)
{
send_IPI_mask_sequence(mask, vector);
}
static inline void send_IPI_allbutself(int vector)
{
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask))
send_IPI_mask(mask, vector);
}
static inline void send_IPI_all(int vector)
{
send_IPI_mask(cpu_online_map, vector);
}
#endif /* __ASM_MACH_IPI_H */

View File

@@ -0,0 +1,36 @@
#ifndef _ASM_X86_BIOS_EBDA_H
#define _ASM_X86_BIOS_EBDA_H
#include <asm/io.h>
/*
* there is a real-mode segmented pointer pointing to the
* 4K EBDA area at 0x40E.
*/
static inline unsigned int get_bios_ebda(void)
{
unsigned int address = *(unsigned short *)phys_to_virt(0x40E);
address <<= 4;
return address; /* 0 means none */
}
void reserve_ebda_region(void);
#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
/*
* This is obviously not a great place for this, but we want to be
* able to scatter it around anywhere in the kernel.
*/
void check_for_bios_corruption(void);
void start_periodic_check_for_corruption(void);
#else
static inline void check_for_bios_corruption(void)
{
}
static inline void start_periodic_check_for_corruption(void)
{
}
#endif
#endif /* _ASM_X86_BIOS_EBDA_H */

View File

@@ -0,0 +1,451 @@
#ifndef _ASM_X86_BITOPS_H
#define _ASM_X86_BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
*/
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/alternative.h>
/*
* These have to be done with inline assembly: that way the bit-setting
* is guaranteed to be atomic. All bit operations return 0 if the bit
* was cleared before the operation and != 0 if it was not.
*
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 1)
/* Technically wrong, but this avoids compilation errors on some gcc
versions. */
#define BITOP_ADDR(x) "=m" (*(volatile long *) (x))
#else
#define BITOP_ADDR(x) "+m" (*(volatile long *) (x))
#endif
#define ADDR BITOP_ADDR(addr)
/*
* We do the locked ops that don't return the old value as
* a mask operation on a byte.
*/
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
#define CONST_MASK(nr) (1 << ((nr) & 7))
/**
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* This function is atomic and may not be reordered. See __set_bit()
* if you do not require the atomic guarantees.
*
* Note: there are no guarantees that this function will not be reordered
* on non x86 architectures, so if you are writing portable code,
* make sure not to rely on its reordering guarantees.
*
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void set_bit(unsigned int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "orb %1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" ((u8)CONST_MASK(nr))
: "memory");
} else {
asm volatile(LOCK_PREFIX "bts %1,%0"
: BITOP_ADDR(addr) : "Ir" (nr) : "memory");
}
}
/**
* __set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
* Unlike set_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __set_bit(int nr, volatile unsigned long *addr)
{
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and may not be reordered. However, it does
* not contain a memory barrier, so if it is used for locking purposes,
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
static inline void clear_bit(int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "andb %1,%0"
: CONST_MASK_ADDR(nr, addr)
: "iq" ((u8)~CONST_MASK(nr)));
} else {
asm volatile(LOCK_PREFIX "btr %1,%0"
: BITOP_ADDR(addr)
: "Ir" (nr));
}
}
/*
* clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* clear_bit() is atomic and implies release semantics before the memory
* operation. It can be used for an unlock.
*/
static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
{
barrier();
clear_bit(nr, addr);
}
static inline void __clear_bit(int nr, volatile unsigned long *addr)
{
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
}
/*
* __clear_bit_unlock - Clears a bit in memory
* @nr: Bit to clear
* @addr: Address to start counting from
*
* __clear_bit() is non-atomic and implies release semantics before the memory
* operation. It can be used for an unlock if no other CPUs can concurrently
* modify other bits in the word.
*
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/
static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
{
barrier();
__clear_bit(nr, addr);
}
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
/**
* __change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
* Unlike change_bit(), this function is non-atomic and may be reordered.
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
static inline void __change_bit(int nr, volatile unsigned long *addr)
{
asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
}
/**
* change_bit - Toggle a bit in memory
* @nr: Bit to change
* @addr: Address to start counting from
*
* change_bit() is atomic and may not be reordered.
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void change_bit(int nr, volatile unsigned long *addr)
{
asm volatile(LOCK_PREFIX "btc %1,%0" : ADDR : "Ir" (nr));
}
/**
* test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
asm volatile(LOCK_PREFIX "bts %2,%1\n\t"
"sbb %0,%0" : "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
return oldbit;
}
/**
* test_and_set_bit_lock - Set a bit and return its old value for lock
* @nr: Bit to set
* @addr: Address to count from
*
* This is the same as test_and_set_bit on x86.
*/
static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
/**
* __test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
asm("bts %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit), ADDR
: "Ir" (nr));
return oldbit;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
asm volatile(LOCK_PREFIX "btr %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
return oldbit;
}
/**
* __test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
asm volatile("btr %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit), ADDR
: "Ir" (nr));
return oldbit;
}
/* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
asm volatile("btc %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit), ADDR
: "Ir" (nr) : "memory");
return oldbit;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @nr: Bit to change
* @addr: Address to count from
*
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
asm volatile(LOCK_PREFIX "btc %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit), ADDR : "Ir" (nr) : "memory");
return oldbit;
}
static inline int constant_test_bit(int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
}
static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
{
int oldbit;
asm volatile("bt %2,%1\n\t"
"sbb %0,%0"
: "=r" (oldbit)
: "m" (*(unsigned long *)addr), "Ir" (nr));
return oldbit;
}
#if 0 /* Fool kernel-doc since it doesn't do macros yet */
/**
* test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
static int test_bit(int nr, const volatile unsigned long *addr);
#endif
#define test_bit(nr, addr) \
(__builtin_constant_p((nr)) \
? constant_test_bit((nr), (addr)) \
: variable_test_bit((nr), (addr)))
/**
* __ffs - find first set bit in word
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static inline unsigned long __ffs(unsigned long word)
{
asm("bsf %1,%0"
: "=r" (word)
: "rm" (word));
return word;
}
/**
* ffz - find first zero bit in word
* @word: The word to search
*
* Undefined if no zero exists, so code should check against ~0UL first.
*/
static inline unsigned long ffz(unsigned long word)
{
asm("bsf %1,%0"
: "=r" (word)
: "r" (~word));
return word;
}
/*
* __fls: find last set bit in word
* @word: The word to search
*
* Undefined if no set bit exists, so code should check against 0 first.
*/
static inline unsigned long __fls(unsigned long word)
{
asm("bsr %1,%0"
: "=r" (word)
: "rm" (word));
return word;
}
#ifdef __KERNEL__
/**
* ffs - find first set bit in word
* @x: the word to search
*
* This is defined the same way as the libc and compiler builtin ffs
* routines, therefore differs in spirit from the other bitops.
*
* ffs(value) returns 0 if value is 0 or the position of the first
* set bit if value is nonzero. The first (least significant) bit
* is at position 1.
*/
static inline int ffs(int x)
{
int r;
#ifdef CONFIG_X86_CMOV
asm("bsfl %1,%0\n\t"
"cmovzl %2,%0"
: "=r" (r) : "rm" (x), "r" (-1));
#else
asm("bsfl %1,%0\n\t"
"jnz 1f\n\t"
"movl $-1,%0\n"
"1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
/**
* fls - find last set bit in word
* @x: the word to search
*
* This is defined in a similar way as the libc and compiler builtin
* ffs, but returns the position of the most significant set bit.
*
* fls(value) returns 0 if value is 0 or the position of the last
* set bit if value is nonzero. The last (most significant) bit is
* at position 32.
*/
static inline int fls(int x)
{
int r;
#ifdef CONFIG_X86_CMOV
asm("bsrl %1,%0\n\t"
"cmovzl %2,%0"
: "=&r" (r) : "rm" (x), "rm" (-1));
#else
asm("bsrl %1,%0\n\t"
"jnz 1f\n\t"
"movl $-1,%0\n"
"1:" : "=r" (r) : "rm" (x));
#endif
return r + 1;
}
#endif /* __KERNEL__ */
#undef ADDR
#ifdef __KERNEL__
#include <asm-generic/bitops/sched.h>
#define ARCH_HAS_FAST_MULTIPLIER 1
#include <asm-generic/bitops/hweight.h>
#endif /* __KERNEL__ */
#include <asm-generic/bitops/fls64.h>
#ifdef __KERNEL__
#include <asm-generic/bitops/ext2-non-atomic.h>
#define ext2_set_bit_atomic(lock, nr, addr) \
test_and_set_bit((nr), (unsigned long *)(addr))
#define ext2_clear_bit_atomic(lock, nr, addr) \
test_and_clear_bit((nr), (unsigned long *)(addr))
#include <asm-generic/bitops/minix.h>
#endif /* __KERNEL__ */
#endif /* _ASM_X86_BITOPS_H */

View File

@@ -0,0 +1,26 @@
#ifndef _ASM_X86_BOOT_H
#define _ASM_X86_BOOT_H
/* Don't touch these, unless you really know what you're doing. */
#define DEF_SYSSEG 0x1000
#define DEF_SYSSIZE 0x7F00
/* Internal svga startup constants */
#define NORMAL_VGA 0xffff /* 80x25 mode */
#define EXTENDED_VGA 0xfffe /* 80x50 mode */
#define ASK_VGA 0xfffd /* ask for it at bootup */
/* Physical address where kernel should be loaded. */
#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
+ (CONFIG_PHYSICAL_ALIGN - 1)) \
& ~(CONFIG_PHYSICAL_ALIGN - 1))
#ifdef CONFIG_X86_64
#define BOOT_HEAP_SIZE 0x7000
#define BOOT_STACK_SIZE 0x4000
#else
#define BOOT_HEAP_SIZE 0x4000
#define BOOT_STACK_SIZE 0x1000
#endif
#endif /* _ASM_X86_BOOT_H */

View File

@@ -0,0 +1,111 @@
#ifndef _ASM_X86_BOOTPARAM_H
#define _ASM_X86_BOOTPARAM_H
#include <linux/types.h>
#include <linux/screen_info.h>
#include <linux/apm_bios.h>
#include <linux/edd.h>
#include <asm/e820.h>
#include <asm/ist.h>
#include <video/edid.h>
/* setup data types */
#define SETUP_NONE 0
#define SETUP_E820_EXT 1
/* extensible setup data list node */
struct setup_data {
__u64 next;
__u32 type;
__u32 len;
__u8 data[0];
};
struct setup_header {
__u8 setup_sects;
__u16 root_flags;
__u32 syssize;
__u16 ram_size;
#define RAMDISK_IMAGE_START_MASK 0x07FF
#define RAMDISK_PROMPT_FLAG 0x8000
#define RAMDISK_LOAD_FLAG 0x4000
__u16 vid_mode;
__u16 root_dev;
__u16 boot_flag;
__u16 jump;
__u32 header;
__u16 version;
__u32 realmode_swtch;
__u16 start_sys;
__u16 kernel_version;
__u8 type_of_loader;
__u8 loadflags;
#define LOADED_HIGH (1<<0)
#define QUIET_FLAG (1<<5)
#define KEEP_SEGMENTS (1<<6)
#define CAN_USE_HEAP (1<<7)
__u16 setup_move_size;
__u32 code32_start;
__u32 ramdisk_image;
__u32 ramdisk_size;
__u32 bootsect_kludge;
__u16 heap_end_ptr;
__u16 _pad1;
__u32 cmd_line_ptr;
__u32 initrd_addr_max;
__u32 kernel_alignment;
__u8 relocatable_kernel;
__u8 _pad2[3];
__u32 cmdline_size;
__u32 hardware_subarch;
__u64 hardware_subarch_data;
__u32 payload_offset;
__u32 payload_length;
__u64 setup_data;
} __attribute__((packed));
struct sys_desc_table {
__u16 length;
__u8 table[14];
};
struct efi_info {
__u32 efi_loader_signature;
__u32 efi_systab;
__u32 efi_memdesc_size;
__u32 efi_memdesc_version;
__u32 efi_memmap;
__u32 efi_memmap_size;
__u32 efi_systab_hi;
__u32 efi_memmap_hi;
};
/* The so-called "zeropage" */
struct boot_params {
struct screen_info screen_info; /* 0x000 */
struct apm_bios_info apm_bios_info; /* 0x040 */
__u8 _pad2[12]; /* 0x054 */
struct ist_info ist_info; /* 0x060 */
__u8 _pad3[16]; /* 0x070 */
__u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
__u8 hd1_info[16]; /* obsolete! */ /* 0x090 */
struct sys_desc_table sys_desc_table; /* 0x0a0 */
__u8 _pad4[144]; /* 0x0b0 */
struct edid_info edid_info; /* 0x140 */
struct efi_info efi_info; /* 0x1c0 */
__u32 alt_mem_k; /* 0x1e0 */
__u32 scratch; /* Scratch field! */ /* 0x1e4 */
__u8 e820_entries; /* 0x1e8 */
__u8 eddbuf_entries; /* 0x1e9 */
__u8 edd_mbr_sig_buf_entries; /* 0x1ea */
__u8 _pad6[6]; /* 0x1eb */
struct setup_header hdr; /* setup header */ /* 0x1f1 */
__u8 _pad7[0x290-0x1f1-sizeof(struct setup_header)];
__u32 edd_mbr_sig_buffer[EDD_MBR_SIG_MAX]; /* 0x290 */
struct e820entry e820_map[E820MAX]; /* 0x2d0 */
__u8 _pad8[48]; /* 0xcd0 */
struct edd_info eddbuf[EDDMAXNR]; /* 0xd00 */
__u8 _pad9[276]; /* 0xeec */
} __attribute__((packed));
#endif /* _ASM_X86_BOOTPARAM_H */

View File

@@ -0,0 +1,39 @@
#ifndef _ASM_X86_BUG_H
#define _ASM_X86_BUG_H
#ifdef CONFIG_BUG
#define HAVE_ARCH_BUG
#ifdef CONFIG_DEBUG_BUGVERBOSE
#ifdef CONFIG_X86_32
# define __BUG_C0 "2:\t.long 1b, %c0\n"
#else
# define __BUG_C0 "2:\t.quad 1b, %c0\n"
#endif
#define BUG() \
do { \
asm volatile("1:\tud2\n" \
".pushsection __bug_table,\"a\"\n" \
__BUG_C0 \
"\t.word %c1, 0\n" \
"\t.org 2b+%c2\n" \
".popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (sizeof(struct bug_entry))); \
for (;;) ; \
} while (0)
#else
#define BUG() \
do { \
asm volatile("ud2"); \
for (;;) ; \
} while (0)
#endif
#endif /* !CONFIG_BUG */
#include <asm-generic/bug.h>
#endif /* _ASM_X86_BUG_H */

View File

@@ -0,0 +1,12 @@
#ifndef _ASM_X86_BUGS_H
#define _ASM_X86_BUGS_H
extern void check_bugs(void);
#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
int ppro_with_ram_bug(void);
#else
static inline int ppro_with_ram_bug(void) { return 0; }
#endif
#endif /* _ASM_X86_BUGS_H */

View File

@@ -0,0 +1,81 @@
#ifndef _ASM_X86_BYTEORDER_H
#define _ASM_X86_BYTEORDER_H
#include <asm/types.h>
#include <linux/compiler.h>
#ifdef __GNUC__
#ifdef __i386__
static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
#ifdef CONFIG_X86_BSWAP
asm("bswap %0" : "=r" (x) : "0" (x));
#else
asm("xchgb %b0,%h0\n\t" /* swap lower bytes */
"rorl $16,%0\n\t" /* swap words */
"xchgb %b0,%h0" /* swap higher bytes */
: "=q" (x)
: "0" (x));
#endif
return x;
}
static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
{
union {
struct {
__u32 a;
__u32 b;
} s;
__u64 u;
} v;
v.u = val;
#ifdef CONFIG_X86_BSWAP
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
: "=r" (v.s.a), "=r" (v.s.b)
: "0" (v.s.a), "1" (v.s.b));
#else
v.s.a = ___arch__swab32(v.s.a);
v.s.b = ___arch__swab32(v.s.b);
asm("xchgl %0,%1"
: "=r" (v.s.a), "=r" (v.s.b)
: "0" (v.s.a), "1" (v.s.b));
#endif
return v.u;
}
#else /* __i386__ */
static inline __attribute_const__ __u64 ___arch__swab64(__u64 x)
{
asm("bswapq %0"
: "=r" (x)
: "0" (x));
return x;
}
static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
asm("bswapl %0"
: "=r" (x)
: "0" (x));
return x;
}
#endif
/* Do not define swab16. Gcc is smart enough to recognize "C" version and
convert it into rotation or exhange. */
#define __arch__swab64(x) ___arch__swab64(x)
#define __arch__swab32(x) ___arch__swab32(x)
#define __BYTEORDER_HAS_U64__
#endif /* __GNUC__ */
#include <linux/byteorder/little_endian.h>
#endif /* _ASM_X86_BYTEORDER_H */

View File

@@ -0,0 +1,20 @@
#ifndef _ASM_X86_CACHE_H
#define _ASM_X86_CACHE_H
/* L1 cache line size */
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
#ifdef CONFIG_X86_VSMP
/* vSMP Internode cacheline shift */
#define INTERNODE_CACHE_SHIFT (12)
#ifdef CONFIG_SMP
#define __cacheline_aligned_in_smp \
__attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) \
__attribute__((__section__(".data.page_aligned")))
#endif
#endif
#endif /* _ASM_X86_CACHE_H */

View File

@@ -0,0 +1,118 @@
#ifndef _ASM_X86_CACHEFLUSH_H
#define _ASM_X86_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
/* Caches aren't brain-dead on the intel. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma, pg) do { } while (0)
#define flush_icache_user_range(vma, pg, adr, len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
memcpy((dst), (src), (len))
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy((dst), (src), (len))
#define PG_non_WB PG_arch_1
PAGEFLAG(NonWB, non_WB)
/*
* The set_memory_* API can be used to change various attributes of a virtual
* address range. The attributes include:
* Cachability : UnCached, WriteCombining, WriteBack
* Executability : eXeutable, NoteXecutable
* Read/Write : ReadOnly, ReadWrite
* Presence : NotPresent
*
* Within a catagory, the attributes are mutually exclusive.
*
* The implementation of this API will take care of various aspects that
* are associated with changing such attributes, such as:
* - Flushing TLBs
* - Flushing CPU caches
* - Making sure aliases of the memory behind the mapping don't violate
* coherency rules as defined by the CPU in the system.
*
* What this API does not do:
* - Provide exclusion between various callers - including callers that
* operation on other mappings of the same physical page
* - Restore default attributes when a page is freed
* - Guarantee that mappings other than the requested one are
* in any state, other than that these do not violate rules for
* the CPU you have. Do not depend on any effects on other mappings,
* CPUs other than the one you have may have more relaxed rules.
* The caller is required to take care of these.
*/
int _set_memory_uc(unsigned long addr, int numpages);
int _set_memory_wc(unsigned long addr, int numpages);
int _set_memory_wb(unsigned long addr, int numpages);
int set_memory_uc(unsigned long addr, int numpages);
int set_memory_wc(unsigned long addr, int numpages);
int set_memory_wb(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_np(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages);
int set_memory_array_uc(unsigned long *addr, int addrinarray);
int set_memory_array_wb(unsigned long *addr, int addrinarray);
/*
* For legacy compatibility with the old APIs, a few functions
* are provided that work on a "struct page".
* These functions operate ONLY on the 1:1 kernel mapping of the
* memory that the struct page represents, and internally just
* call the set_memory_* function. See the description of the
* set_memory_* function for more details on conventions.
*
* These APIs should be considered *deprecated* and are likely going to
* be removed in the future.
* The reason for this is the implicit operation on the 1:1 mapping only,
* making this not a generally useful API.
*
* Specifically, many users of the old APIs had a virtual address,
* called virt_to_page() or vmalloc_to_page() on that address to
* get a struct page* that the old API required.
* To convert these cases, use set_memory_*() on the original
* virtual address, do not use these functions.
*/
int set_pages_uc(struct page *page, int numpages);
int set_pages_wb(struct page *page, int numpages);
int set_pages_x(struct page *page, int numpages);
int set_pages_nx(struct page *page, int numpages);
int set_pages_ro(struct page *page, int numpages);
int set_pages_rw(struct page *page, int numpages);
void clflush_cache_range(void *addr, unsigned int size);
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
extern const int rodata_test_data;
#endif
#ifdef CONFIG_DEBUG_RODATA_TEST
int rodata_test(void);
#else
static inline int rodata_test(void)
{
return 0;
}
#endif
#endif /* _ASM_X86_CACHEFLUSH_H */

View File

@@ -0,0 +1,72 @@
/*
* Derived from include/asm-powerpc/iommu.h
*
* Copyright IBM Corporation, 2006-2007
*
* Author: Jon Mason <jdmason@us.ibm.com>
* Author: Muli Ben-Yehuda <muli@il.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_X86_CALGARY_H
#define _ASM_X86_CALGARY_H
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/timer.h>
#include <asm/types.h>
struct iommu_table {
struct cal_chipset_ops *chip_ops; /* chipset specific funcs */
unsigned long it_base; /* mapped address of tce table */
unsigned long it_hint; /* Hint for next alloc */
unsigned long *it_map; /* A simple allocation bitmap for now */
void __iomem *bbar; /* Bridge BAR */
u64 tar_val; /* Table Address Register */
struct timer_list watchdog_timer;
spinlock_t it_lock; /* Protects it_map */
unsigned int it_size; /* Size of iommu table in entries */
unsigned char it_busno; /* Bus number this table belongs to */
};
struct cal_chipset_ops {
void (*handle_quirks)(struct iommu_table *tbl, struct pci_dev *dev);
void (*tce_cache_blast)(struct iommu_table *tbl);
void (*dump_error_regs)(struct iommu_table *tbl);
};
#define TCE_TABLE_SIZE_UNSPECIFIED ~0
#define TCE_TABLE_SIZE_64K 0
#define TCE_TABLE_SIZE_128K 1
#define TCE_TABLE_SIZE_256K 2
#define TCE_TABLE_SIZE_512K 3
#define TCE_TABLE_SIZE_1M 4
#define TCE_TABLE_SIZE_2M 5
#define TCE_TABLE_SIZE_4M 6
#define TCE_TABLE_SIZE_8M 7
extern int use_calgary;
#ifdef CONFIG_CALGARY_IOMMU
extern int calgary_iommu_init(void);
extern void detect_calgary(void);
#else
static inline int calgary_iommu_init(void) { return 1; }
static inline void detect_calgary(void) { return; }
#endif
#endif /* _ASM_X86_CALGARY_H */

View File

@@ -0,0 +1,170 @@
/*
* Some macros to handle stack frames in assembly.
*/
#define R15 0
#define R14 8
#define R13 16
#define R12 24
#define RBP 32
#define RBX 40
/* arguments: interrupts/non tracing syscalls only save upto here*/
#define R11 48
#define R10 56
#define R9 64
#define R8 72
#define RAX 80
#define RCX 88
#define RDX 96
#define RSI 104
#define RDI 112
#define ORIG_RAX 120 /* + error_code */
/* end of arguments */
/* cpu exception frame or undefined in case of fast syscall. */
#define RIP 128
#define CS 136
#define EFLAGS 144
#define RSP 152
#define SS 160
#define ARGOFFSET R11
#define SWFRAME ORIG_RAX
.macro SAVE_ARGS addskip=0, norcx=0, nor891011=0
subq $9*8+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET 9*8+\addskip
movq %rdi, 8*8(%rsp)
CFI_REL_OFFSET rdi, 8*8
movq %rsi, 7*8(%rsp)
CFI_REL_OFFSET rsi, 7*8
movq %rdx, 6*8(%rsp)
CFI_REL_OFFSET rdx, 6*8
.if \norcx
.else
movq %rcx, 5*8(%rsp)
CFI_REL_OFFSET rcx, 5*8
.endif
movq %rax, 4*8(%rsp)
CFI_REL_OFFSET rax, 4*8
.if \nor891011
.else
movq %r8, 3*8(%rsp)
CFI_REL_OFFSET r8, 3*8
movq %r9, 2*8(%rsp)
CFI_REL_OFFSET r9, 2*8
movq %r10, 1*8(%rsp)
CFI_REL_OFFSET r10, 1*8
movq %r11, (%rsp)
CFI_REL_OFFSET r11, 0*8
.endif
.endm
#define ARG_SKIP 9*8
.macro RESTORE_ARGS skiprax=0, addskip=0, skiprcx=0, skipr11=0, \
skipr8910=0, skiprdx=0
.if \skipr11
.else
movq (%rsp), %r11
CFI_RESTORE r11
.endif
.if \skipr8910
.else
movq 1*8(%rsp), %r10
CFI_RESTORE r10
movq 2*8(%rsp), %r9
CFI_RESTORE r9
movq 3*8(%rsp), %r8
CFI_RESTORE r8
.endif
.if \skiprax
.else
movq 4*8(%rsp), %rax
CFI_RESTORE rax
.endif
.if \skiprcx
.else
movq 5*8(%rsp), %rcx
CFI_RESTORE rcx
.endif
.if \skiprdx
.else
movq 6*8(%rsp), %rdx
CFI_RESTORE rdx
.endif
movq 7*8(%rsp), %rsi
CFI_RESTORE rsi
movq 8*8(%rsp), %rdi
CFI_RESTORE rdi
.if ARG_SKIP+\addskip > 0
addq $ARG_SKIP+\addskip, %rsp
CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
.endif
.endm
.macro LOAD_ARGS offset, skiprax=0
movq \offset(%rsp), %r11
movq \offset+8(%rsp), %r10
movq \offset+16(%rsp), %r9
movq \offset+24(%rsp), %r8
movq \offset+40(%rsp), %rcx
movq \offset+48(%rsp), %rdx
movq \offset+56(%rsp), %rsi
movq \offset+64(%rsp), %rdi
.if \skiprax
.else
movq \offset+72(%rsp), %rax
.endif
.endm
#define REST_SKIP 6*8
.macro SAVE_REST
subq $REST_SKIP, %rsp
CFI_ADJUST_CFA_OFFSET REST_SKIP
movq %rbx, 5*8(%rsp)
CFI_REL_OFFSET rbx, 5*8
movq %rbp, 4*8(%rsp)
CFI_REL_OFFSET rbp, 4*8
movq %r12, 3*8(%rsp)
CFI_REL_OFFSET r12, 3*8
movq %r13, 2*8(%rsp)
CFI_REL_OFFSET r13, 2*8
movq %r14, 1*8(%rsp)
CFI_REL_OFFSET r14, 1*8
movq %r15, (%rsp)
CFI_REL_OFFSET r15, 0*8
.endm
.macro RESTORE_REST
movq (%rsp), %r15
CFI_RESTORE r15
movq 1*8(%rsp), %r14
CFI_RESTORE r14
movq 2*8(%rsp), %r13
CFI_RESTORE r13
movq 3*8(%rsp), %r12
CFI_RESTORE r12
movq 4*8(%rsp), %rbp
CFI_RESTORE rbp
movq 5*8(%rsp), %rbx
CFI_RESTORE rbx
addq $REST_SKIP, %rsp
CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
.endm
.macro SAVE_ALL
SAVE_ARGS
SAVE_REST
.endm
.macro RESTORE_ALL addskip=0
RESTORE_REST
RESTORE_ARGS 0, \addskip
.endm
.macro icebp
.byte 0xf1
.endm

View File

@@ -0,0 +1,5 @@
#ifdef CONFIG_X86_32
# include "checksum_32.h"
#else
# include "checksum_64.h"
#endif

View File

@@ -0,0 +1,189 @@
#ifndef _ASM_X86_CHECKSUM_32_H
#define _ASM_X86_CHECKSUM_32_H
#include <linux/in6.h>
#include <asm/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
* access_ok().
*/
static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
{
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
}
static inline __wsum csum_partial_copy_from_user(const void __user *src,
void *dst,
int len, __wsum sum,
int *err_ptr)
{
might_sleep();
return csum_partial_copy_generic((__force void *)src, dst,
len, sum, err_ptr, NULL);
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
asm volatile("movl (%1), %0 ;\n"
"subl $4, %2 ;\n"
"jbe 2f ;\n"
"addl 4(%1), %0 ;\n"
"adcl 8(%1), %0 ;\n"
"adcl 12(%1), %0;\n"
"1: adcl 16(%1), %0 ;\n"
"lea 4(%1), %1 ;\n"
"decl %2 ;\n"
"jne 1b ;\n"
"adcl $0, %0 ;\n"
"movl %0, %2 ;\n"
"shrl $16, %0 ;\n"
"addw %w2, %w0 ;\n"
"adcl $0, %0 ;\n"
"notl %0 ;\n"
"2: ;\n"
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)
: "memory");
return (__force __sum16)sum;
}
/*
* Fold a partial checksum
*/
static inline __sum16 csum_fold(__wsum sum)
{
asm("addl %1, %0 ;\n"
"adcl $0xffff, %0 ;\n"
: "=r" (sum)
: "r" ((__force u32)sum << 16),
"0" ((__force u32)sum & 0xffff0000));
return (__force __sum16)(~(__force u32)sum >> 16);
}
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
asm("addl %1, %0 ;\n"
"adcl %2, %0 ;\n"
"adcl %3, %0 ;\n"
"adcl $0, %0 ;\n"
: "=r" (sum)
: "g" (daddr), "g"(saddr),
"g" ((len + proto) << 8), "0" (sum));
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto,
__wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
asm("addl 0(%1), %0 ;\n"
"adcl 4(%1), %0 ;\n"
"adcl 8(%1), %0 ;\n"
"adcl 12(%1), %0 ;\n"
"adcl 0(%2), %0 ;\n"
"adcl 4(%2), %0 ;\n"
"adcl 8(%2), %0 ;\n"
"adcl 12(%2), %0 ;\n"
"adcl %3, %0 ;\n"
"adcl %4, %0 ;\n"
"adcl $0, %0 ;\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr),
"r" (htonl(len)), "r" (htonl(proto)), "0" (sum));
return csum_fold(sum);
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static inline __wsum csum_and_copy_to_user(const void *src,
void __user *dst,
int len, __wsum sum,
int *err_ptr)
{
might_sleep();
if (access_ok(VERIFY_WRITE, dst, len))
return csum_partial_copy_generic(src, (__force void *)dst,
len, sum, NULL, err_ptr);
if (len)
*err_ptr = -EFAULT;
return (__force __wsum)-1; /* invalid checksum */
}
#endif /* _ASM_X86_CHECKSUM_32_H */

View File

@@ -0,0 +1,191 @@
#ifndef _ASM_X86_CHECKSUM_64_H
#define _ASM_X86_CHECKSUM_64_H
/*
* Checksums for x86-64
* Copyright 2002 by Andi Kleen, SuSE Labs
* with some code from asm-x86/checksum.h
*/
#include <linux/compiler.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
/**
* csum_fold - Fold and invert a 32bit checksum.
* sum: 32bit unfolded sum
*
* Fold a 32bit running checksum to 16bit and invert it. This is usually
* the last step before putting a checksum into a packet.
* Make sure not to mix with 64bit checksums.
*/
static inline __sum16 csum_fold(__wsum sum)
{
asm(" addl %1,%0\n"
" adcl $0xffff,%0"
: "=r" (sum)
: "r" ((__force u32)sum << 16),
"0" ((__force u32)sum & 0xffff0000));
return (__force __sum16)(~(__force u32)sum >> 16);
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* By Jorge Cwik <jorge@laser.satlink.net>, adapted for linux by
* Arnt Gulbrandsen.
*/
/**
* ip_fast_csum - Compute the IPv4 header checksum efficiently.
* iph: ipv4 header
* ihl: length of header / 4
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
unsigned int sum;
asm(" movl (%1), %0\n"
" subl $4, %2\n"
" jbe 2f\n"
" addl 4(%1), %0\n"
" adcl 8(%1), %0\n"
" adcl 12(%1), %0\n"
"1: adcl 16(%1), %0\n"
" lea 4(%1), %1\n"
" decl %2\n"
" jne 1b\n"
" adcl $0, %0\n"
" movl %0, %2\n"
" shrl $16, %0\n"
" addw %w2, %w0\n"
" adcl $0, %0\n"
" notl %0\n"
"2:"
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)
: "memory");
return (__force __sum16)sum;
}
/**
* csum_tcpup_nofold - Compute an IPv4 pseudo header checksum.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: ip protocol of packet
* @sum: initial sum to be added in (32bit unfolded)
*
* Returns the pseudo header checksum the input data. Result is
* 32bit unfolded.
*/
static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
asm(" addl %1, %0\n"
" adcl %2, %0\n"
" adcl %3, %0\n"
" adcl $0, %0\n"
: "=r" (sum)
: "g" (daddr), "g" (saddr),
"g" ((len + proto)<<8), "0" (sum));
return sum;
}
/**
* csum_tcpup_magic - Compute an IPv4 pseudo header checksum.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: ip protocol of packet
* @sum: initial sum to be added in (32bit unfolded)
*
* Returns the 16bit pseudo header checksum the input data already
* complemented and ready to be filled in.
*/
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned short len,
unsigned short proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/**
* csum_partial - Compute an internet checksum.
* @buff: buffer to be checksummed
* @len: length of buffer.
* @sum: initial sum to be added in (32bit unfolded)
*
* Returns the 32bit unfolded internet checksum of the buffer.
* Before filling it in it needs to be csum_fold()'ed.
* buff should be aligned to a 64bit boundary if possible.
*/
extern __wsum csum_partial(const void *buff, int len, __wsum sum);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
#define HAVE_CSUM_COPY_USER 1
/* Do not call this directly. Use the wrappers below */
extern __wsum csum_partial_copy_generic(const void *src, const void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum isum, int *errp);
extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst,
int len, __wsum isum, int *errp);
extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum);
/* Old names. To be removed. */
#define csum_and_copy_to_user csum_partial_copy_to_user
#define csum_and_copy_from_user csum_partial_copy_from_user
/**
* ip_compute_csum - Compute an 16bit IP checksum.
* @buff: buffer address.
* @len: length of buffer.
*
* Returns the 16bit folded/inverted checksum of the passed buffer.
* Ready to fill in.
*/
extern __sum16 ip_compute_csum(const void *buff, int len);
/**
* csum_ipv6_magic - Compute checksum of an IPv6 pseudo header.
* @saddr: source address
* @daddr: destination address
* @len: length of packet
* @proto: protocol of packet
* @sum: initial sum (32bit unfolded) to be added in
*
* Computes an IPv6 pseudo header checksum. This sum is added the checksum
* into UDP/TCP packets and contains some link layer information.
* Returns the unfolded 32bit checksum.
*/
struct in6_addr;
#define _HAVE_ARCH_IPV6_CSUM 1
extern __sum16
csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
__u32 len, unsigned short proto, __wsum sum);
static inline unsigned add32_with_carry(unsigned a, unsigned b)
{
asm("addl %2,%0\n\t"
"adcl $0,%0"
: "=r" (a)
: "0" (a), "r" (b));
return a;
}
#endif /* _ASM_X86_CHECKSUM_64_H */

View File

@@ -0,0 +1,5 @@
#ifdef CONFIG_X86_32
# include "cmpxchg_32.h"
#else
# include "cmpxchg_64.h"
#endif

View File

@@ -0,0 +1,344 @@
#ifndef _ASM_X86_CMPXCHG_32_H
#define _ASM_X86_CMPXCHG_32_H
#include <linux/bitops.h> /* for LOCK_PREFIX */
/*
* Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
* you need to test for the feature in boot_cpu_data.
*/
#define xchg(ptr, v) \
((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
struct __xchg_dummy {
unsigned long a[100];
};
#define __xg(x) ((struct __xchg_dummy *)(x))
/*
* The semantics of XCHGCMP8B are a bit strange, this is why
* there is a loop and the loading of %%eax and %%edx has to
* be inside. This inlines well in most cases, the cached
* cost is around ~38 cycles. (in the future we might want
* to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
* might have an implicit FPU-save as a cost, so it's not
* clear which path to go.)
*
* cmpxchg8b must be used with the lock prefix here to allow
* the instruction to be executed atomically, see page 3-102
* of the instruction set reference 24319102.pdf. We need
* the reader side to see the coherent 64bit value.
*/
static inline void __set_64bit(unsigned long long *ptr,
unsigned int low, unsigned int high)
{
asm volatile("\n1:\t"
"movl (%0), %%eax\n\t"
"movl 4(%0), %%edx\n\t"
LOCK_PREFIX "cmpxchg8b (%0)\n\t"
"jnz 1b"
: /* no outputs */
: "D"(ptr),
"b"(low),
"c"(high)
: "ax", "dx", "memory");
}
static inline void __set_64bit_constant(unsigned long long *ptr,
unsigned long long value)
{
__set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
}
#define ll_low(x) *(((unsigned int *)&(x)) + 0)
#define ll_high(x) *(((unsigned int *)&(x)) + 1)
static inline void __set_64bit_var(unsigned long long *ptr,
unsigned long long value)
{
__set_64bit(ptr, ll_low(value), ll_high(value));
}
#define set_64bit(ptr, value) \
(__builtin_constant_p((value)) \
? __set_64bit_constant((ptr), (value)) \
: __set_64bit_var((ptr), (value)))
#define _set_64bit(ptr, value) \
(__builtin_constant_p(value) \
? __set_64bit(ptr, (unsigned int)(value), \
(unsigned int)((value) >> 32)) \
: __set_64bit(ptr, ll_low((value)), ll_high((value))))
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK
*/
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
{
switch (size) {
case 1:
asm volatile("xchgb %b0,%1"
: "=q" (x)
: "m" (*__xg(ptr)), "0" (x)
: "memory");
break;
case 2:
asm volatile("xchgw %w0,%1"
: "=r" (x)
: "m" (*__xg(ptr)), "0" (x)
: "memory");
break;
case 4:
asm volatile("xchgl %0,%1"
: "=r" (x)
: "m" (*__xg(ptr)), "0" (x)
: "memory");
break;
}
return x;
}
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#ifdef CONFIG_X86_CMPXCHG
#define __HAVE_ARCH_CMPXCHG 1
#define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
#define sync_cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
#endif
#ifdef CONFIG_X86_CMPXCHG64
#define cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#endif
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 2:
asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 4:
asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
}
return old;
}
/*
* Always use locked operations when touching memory shared with a
* hypervisor, since the system may be SMP even if the guest kernel
* isn't.
*/
static inline unsigned long __sync_cmpxchg(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
asm volatile("lock; cmpxchgb %b1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 2:
asm volatile("lock; cmpxchgw %w1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 4:
asm volatile("lock; cmpxchgl %1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
}
return old;
}
static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
asm volatile("cmpxchgb %b1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 2:
asm volatile("cmpxchgw %w1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 4:
asm volatile("cmpxchgl %1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
}
return old;
}
static inline unsigned long long __cmpxchg64(volatile void *ptr,
unsigned long long old,
unsigned long long new)
{
unsigned long long prev;
asm volatile(LOCK_PREFIX "cmpxchg8b %3"
: "=A"(prev)
: "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)),
"m"(*__xg(ptr)),
"0"(old)
: "memory");
return prev;
}
static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
unsigned long long old,
unsigned long long new)
{
unsigned long long prev;
asm volatile("cmpxchg8b %3"
: "=A"(prev)
: "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)),
"m"(*__xg(ptr)),
"0"(old)
: "memory");
return prev;
}
#ifndef CONFIG_X86_CMPXCHG
/*
* Building a kernel capable running on 80386. It may be necessary to
* simulate the cmpxchg on the 80386 CPU. For that purpose we define
* a function for each of the sizes we support.
*/
extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
switch (size) {
case 1:
return cmpxchg_386_u8(ptr, old, new);
case 2:
return cmpxchg_386_u16(ptr, old, new);
case 4:
return cmpxchg_386_u32(ptr, old, new);
}
return old;
}
#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
if (likely(boot_cpu_data.x86 > 3)) \
__ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), (unsigned long)(n), \
sizeof(*(ptr))); \
else \
__ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
(unsigned long)(o), (unsigned long)(n), \
sizeof(*(ptr))); \
__ret; \
})
#define cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
if (likely(boot_cpu_data.x86 > 3)) \
__ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(o), (unsigned long)(n), \
sizeof(*(ptr))); \
else \
__ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
(unsigned long)(o), (unsigned long)(n), \
sizeof(*(ptr))); \
__ret; \
})
#endif
#ifndef CONFIG_X86_CMPXCHG64
/*
* Building a kernel capable running on 80386 and 80486. It may be necessary
* to simulate the cmpxchg8b on the 80386 and 80486 CPU.
*/
extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
#define cmpxchg64(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
if (likely(boot_cpu_data.x86 > 4)) \
__ret = (__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
else \
__ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
__ret; \
})
#define cmpxchg64_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
if (likely(boot_cpu_data.x86 > 4)) \
__ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
else \
__ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
__ret; \
})
#endif
#endif /* _ASM_X86_CMPXCHG_32_H */

View File

@@ -0,0 +1,185 @@
#ifndef _ASM_X86_CMPXCHG_64_H
#define _ASM_X86_CMPXCHG_64_H
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \
(ptr), sizeof(*(ptr))))
#define __xg(x) ((volatile long *)(x))
static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
{
*ptr = val;
}
#define _set_64bit set_64bit
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK
*/
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
{
switch (size) {
case 1:
asm volatile("xchgb %b0,%1"
: "=q" (x)
: "m" (*__xg(ptr)), "0" (x)
: "memory");
break;
case 2:
asm volatile("xchgw %w0,%1"
: "=r" (x)
: "m" (*__xg(ptr)), "0" (x)
: "memory");
break;
case 4:
asm volatile("xchgl %k0,%1"
: "=r" (x)
: "m" (*__xg(ptr)), "0" (x)
: "memory");
break;
case 8:
asm volatile("xchgq %0,%1"
: "=r" (x)
: "m" (*__xg(ptr)), "0" (x)
: "memory");
break;
}
return x;
}
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#define __HAVE_ARCH_CMPXCHG 1
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 2:
asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 4:
asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 8:
asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
}
return old;
}
/*
* Always use locked operations when touching memory shared with a
* hypervisor, since the system may be SMP even if the guest kernel
* isn't.
*/
static inline unsigned long __sync_cmpxchg(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
asm volatile("lock; cmpxchgb %b1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 2:
asm volatile("lock; cmpxchgw %w1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 4:
asm volatile("lock; cmpxchgl %1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
}
return old;
}
static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
asm volatile("cmpxchgb %b1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 2:
asm volatile("cmpxchgw %w1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 4:
asm volatile("cmpxchgl %k1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 8:
asm volatile("cmpxchgq %1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
}
return old;
}
#define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
})
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
#define sync_cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#endif /* _ASM_X86_CMPXCHG_64_H */

View File

@@ -0,0 +1,218 @@
#ifndef _ASM_X86_COMPAT_H
#define _ASM_X86_COMPAT_H
/*
* Architecture specific compatibility types
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <asm/user32.h>
#define COMPAT_USER_HZ 100
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
typedef s32 compat_time_t;
typedef s32 compat_clock_t;
typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
typedef s32 compat_off_t;
typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
typedef s32 compat_timer_t;
typedef s32 compat_key_t;
typedef s32 compat_int_t;
typedef s32 compat_long_t;
typedef s64 __attribute__((aligned(4))) compat_s64;
typedef u32 compat_uint_t;
typedef u32 compat_ulong_t;
typedef u64 __attribute__((aligned(4))) compat_u64;
struct compat_timespec {
compat_time_t tv_sec;
s32 tv_nsec;
};
struct compat_timeval {
compat_time_t tv_sec;
s32 tv_usec;
};
struct compat_stat {
compat_dev_t st_dev;
u16 __pad1;
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_nlink_t st_nlink;
__compat_uid_t st_uid;
__compat_gid_t st_gid;
compat_dev_t st_rdev;
u16 __pad2;
u32 st_size;
u32 st_blksize;
u32 st_blocks;
u32 st_atime;
u32 st_atime_nsec;
u32 st_mtime;
u32 st_mtime_nsec;
u32 st_ctime;
u32 st_ctime_nsec;
u32 __unused4;
u32 __unused5;
};
struct compat_flock {
short l_type;
short l_whence;
compat_off_t l_start;
compat_off_t l_len;
compat_pid_t l_pid;
};
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
/*
* IA32 uses 4 byte alignment for 64 bit quantities,
* so we need to pack this structure.
*/
struct compat_flock64 {
short l_type;
short l_whence;
compat_loff_t l_start;
compat_loff_t l_len;
compat_pid_t l_pid;
} __attribute__((packed));
struct compat_statfs {
int f_type;
int f_bsize;
int f_blocks;
int f_bfree;
int f_bavail;
int f_files;
int f_ffree;
compat_fsid_t f_fsid;
int f_namelen; /* SunOS ignores this field. */
int f_frsize;
int f_spare[5];
};
#define COMPAT_RLIM_OLD_INFINITY 0x7fffffff
#define COMPAT_RLIM_INFINITY 0xffffffff
typedef u32 compat_old_sigset_t; /* at least 32 bits */
#define _COMPAT_NSIG 64
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
#define COMPAT_OFF_T_MAX 0x7fffffff
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
struct compat_ipc64_perm {
compat_key_t key;
__compat_uid32_t uid;
__compat_gid32_t gid;
__compat_uid32_t cuid;
__compat_gid32_t cgid;
unsigned short mode;
unsigned short __pad1;
unsigned short seq;
unsigned short __pad2;
compat_ulong_t unused1;
compat_ulong_t unused2;
};
struct compat_semid64_ds {
struct compat_ipc64_perm sem_perm;
compat_time_t sem_otime;
compat_ulong_t __unused1;
compat_time_t sem_ctime;
compat_ulong_t __unused2;
compat_ulong_t sem_nsems;
compat_ulong_t __unused3;
compat_ulong_t __unused4;
};
struct compat_msqid64_ds {
struct compat_ipc64_perm msg_perm;
compat_time_t msg_stime;
compat_ulong_t __unused1;
compat_time_t msg_rtime;
compat_ulong_t __unused2;
compat_time_t msg_ctime;
compat_ulong_t __unused3;
compat_ulong_t msg_cbytes;
compat_ulong_t msg_qnum;
compat_ulong_t msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
struct compat_shmid64_ds {
struct compat_ipc64_perm shm_perm;
compat_size_t shm_segsz;
compat_time_t shm_atime;
compat_ulong_t __unused1;
compat_time_t shm_dtime;
compat_ulong_t __unused2;
compat_time_t shm_ctime;
compat_ulong_t __unused3;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
compat_ulong_t shm_nattch;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
/*
* The type of struct elf_prstatus.pr_reg in compatible core dumps.
*/
typedef struct user_regs_struct32 compat_elf_gregset_t;
/*
* A pointer passed in from user mode. This should not
* be used for syscall parameters, just declare them
* as pointers because the syscall entry code will have
* appropriately converted them already.
*/
typedef u32 compat_uptr_t;
static inline void __user *compat_ptr(compat_uptr_t uptr)
{
return (void __user *)(unsigned long)uptr;
}
static inline compat_uptr_t ptr_to_compat(void __user *uptr)
{
return (u32)(unsigned long)uptr;
}
static inline void __user *compat_alloc_user_space(long len)
{
struct pt_regs *regs = task_pt_regs(current);
return (void __user *)regs->sp - len;
}
static inline int is_compat_task(void)
{
return current_thread_info()->status & TS_COMPAT;
}
#endif /* _ASM_X86_COMPAT_H */

View File

@@ -0,0 +1,20 @@
#ifndef _ASM_X86_CPU_H
#define _ASM_X86_CPU_H
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
#include <linux/percpu.h>
struct x86_cpu {
struct cpu cpu;
};
#ifdef CONFIG_HOTPLUG_CPU
extern int arch_register_cpu(int num);
extern void arch_unregister_cpu(int);
#endif
DECLARE_PER_CPU(int, cpu_state);
#endif /* _ASM_X86_CPU_H */

View File

@@ -0,0 +1,271 @@
/*
* Defines x86 CPU feature bits
*/
#ifndef _ASM_X86_CPUFEATURE_H
#define _ASM_X86_CPUFEATURE_H
#include <asm/required-features.h>
#define NCAPINTS 9 /* N 32-bit words worth of info */
/*
* Note: If the comment begins with a quoted string, that string is used
* in /proc/cpuinfo instead of the macro name. If the string is "",
* this feature bit is not displayed in /proc/cpuinfo at all.
*/
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */
#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */
#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */
#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */
#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
#define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */
/* (plus FCMOVcc, FCOMI with FPU) */
#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
#define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */
#define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */
#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
#define X86_FEATURE_XMM (0*32+25) /* "sse" */
#define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */
#define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */
#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
#define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */
#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
/* Don't duplicate feature flags which are redundant with Intel! */
#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
#define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */
#define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */
#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */
#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */
#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */
/* Other features, Linux-defined mapping, word 3 */
/* This range is used for feature bits which conflict or are synthesized */
#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */
#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
/* cpu types for specific tunings: */
#define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */
#define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */
#define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */
#define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */
#define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */
#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */
#define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
#define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
#define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */
#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */
#define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */
#define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
#define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */
#define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */
#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */
#define X86_FEATURE_CID (4*32+10) /* Context ID */
#define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */
#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */
#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
#define X86_FEATURE_AES (4*32+25) /* AES instructions */
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */
#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */
#define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */
#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
#define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */
#define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */
#define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */
#define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
#define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */
#define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */
#define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */
#define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */
#define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */
#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
/*
* Auxiliary flags: Linux defined - For features scattered in various
* CPUID levels like 0x6, 0xA etc
*/
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
/* Virtualization flags: Linux defined */
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */
#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */
#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */
#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
#include <linux/bitops.h>
extern const char * const x86_cap_flags[NCAPINTS*32];
extern const char * const x86_power_flags[32];
#define test_cpu_cap(c, bit) \
test_bit(bit, (unsigned long *)((c)->x86_capability))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && \
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
(((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
(((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
(((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
(((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
(((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \
? 1 : \
test_cpu_cap(c, bit))
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
#define setup_clear_cpu_cap(bit) do { \
clear_cpu_cap(&boot_cpu_data, bit); \
set_bit(bit, (unsigned long *)cleared_cpu_caps); \
} while (0)
#define setup_force_cpu_cap(bit) do { \
set_cpu_cap(&boot_cpu_data, bit); \
clear_bit(bit, (unsigned long *)cleared_cpu_caps); \
} while (0)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
#define cpu_has_vme boot_cpu_has(X86_FEATURE_VME)
#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
#define cpu_has_pae boot_cpu_has(X86_FEATURE_PAE)
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
#define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR)
#define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR)
#define cpu_has_centaur_mcr boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH)
#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
#else
# define cpu_has_invlpg (boot_cpu_data.x86 > 3)
#endif
#ifdef CONFIG_X86_64
#undef cpu_has_vme
#define cpu_has_vme 0
#undef cpu_has_pae
#define cpu_has_pae ___BUG___
#undef cpu_has_mp
#define cpu_has_mp 1
#undef cpu_has_k6_mtrr
#define cpu_has_k6_mtrr 0
#undef cpu_has_cyrix_arr
#define cpu_has_cyrix_arr 0
#undef cpu_has_centaur_mcr
#define cpu_has_centaur_mcr 0
#endif /* CONFIG_X86_64 */
#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
#endif /* _ASM_X86_CPUFEATURE_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/cputime.h>

View File

@@ -0,0 +1,39 @@
#ifndef _ASM_X86_CURRENT_H
#define _ASM_X86_CURRENT_H
#ifdef CONFIG_X86_32
#include <linux/compiler.h>
#include <asm/percpu.h>
struct task_struct;
DECLARE_PER_CPU(struct task_struct *, current_task);
static __always_inline struct task_struct *get_current(void)
{
return x86_read_percpu(current_task);
}
#else /* X86_32 */
#ifndef __ASSEMBLY__
#include <asm/pda.h>
struct task_struct;
static __always_inline struct task_struct *get_current(void)
{
return read_pda(pcurrent);
}
#else /* __ASSEMBLY__ */
#include <asm/asm-offsets.h>
#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
#endif /* __ASSEMBLY__ */
#endif /* X86_32 */
#define current get_current()
#endif /* _ASM_X86_CURRENT_H */

View File

@@ -0,0 +1,70 @@
#ifndef _ASM_X86_DEBUGREG_H
#define _ASM_X86_DEBUGREG_H
/* Indicate the register numbers for a number of the specific
debug registers. Registers 0-3 contain the addresses we wish to trap on */
#define DR_FIRSTADDR 0 /* u_debugreg[DR_FIRSTADDR] */
#define DR_LASTADDR 3 /* u_debugreg[DR_LASTADDR] */
#define DR_STATUS 6 /* u_debugreg[DR_STATUS] */
#define DR_CONTROL 7 /* u_debugreg[DR_CONTROL] */
/* Define a few things for the status register. We can use this to determine
which debugging register was responsible for the trap. The other bits
are either reserved or not of interest to us. */
#define DR_TRAP0 (0x1) /* db0 */
#define DR_TRAP1 (0x2) /* db1 */
#define DR_TRAP2 (0x4) /* db2 */
#define DR_TRAP3 (0x8) /* db3 */
#define DR_STEP (0x4000) /* single-step */
#define DR_SWITCH (0x8000) /* task switch */
/* Now define a bunch of things for manipulating the control register.
The top two bytes of the control register consist of 4 fields of 4
bits - each field corresponds to one of the four debug registers,
and indicates what types of access we trap on, and how large the data
field is that we are looking at */
#define DR_CONTROL_SHIFT 16 /* Skip this many bits in ctl register */
#define DR_CONTROL_SIZE 4 /* 4 control bits per register */
#define DR_RW_EXECUTE (0x0) /* Settings for the access types to trap on */
#define DR_RW_WRITE (0x1)
#define DR_RW_READ (0x3)
#define DR_LEN_1 (0x0) /* Settings for data length to trap on */
#define DR_LEN_2 (0x4)
#define DR_LEN_4 (0xC)
#define DR_LEN_8 (0x8)
/* The low byte to the control register determine which registers are
enabled. There are 4 fields of two bits. One bit is "local", meaning
that the processor will reset the bit after a task switch and the other
is global meaning that we have to explicitly reset the bit. With linux,
you can use either one, since we explicitly zero the register when we enter
kernel mode. */
#define DR_LOCAL_ENABLE_SHIFT 0 /* Extra shift to the local enable bit */
#define DR_GLOBAL_ENABLE_SHIFT 1 /* Extra shift to the global enable bit */
#define DR_ENABLE_SIZE 2 /* 2 enable bits per register */
#define DR_LOCAL_ENABLE_MASK (0x55) /* Set local bits for all 4 regs */
#define DR_GLOBAL_ENABLE_MASK (0xAA) /* Set global bits for all 4 regs */
/* The second byte to the control register has a few special things.
We can slow the instruction pipeline for instructions coming via the
gdt or the ldt if we want to. I am not sure why this is an advantage */
#ifdef __i386__
#define DR_CONTROL_RESERVED (0xFC00) /* Reserved by Intel */
#else
#define DR_CONTROL_RESERVED (0xFFFFFFFF0000FC00UL) /* Reserved */
#endif
#define DR_LOCAL_SLOWDOWN (0x100) /* Local slow the pipeline */
#define DR_GLOBAL_SLOWDOWN (0x200) /* Global slow the pipeline */
#endif /* _ASM_X86_DEBUGREG_H */

View File

@@ -0,0 +1,31 @@
#ifndef _ASM_X86_DELAY_H
#define _ASM_X86_DELAY_H
/*
* Copyright (C) 1993 Linus Torvalds
*
* Delay routines calling functions in arch/x86/lib/delay.c
*/
/* Undefined functions to get compile-time errors */
extern void __bad_udelay(void);
extern void __bad_ndelay(void);
extern void __udelay(unsigned long usecs);
extern void __ndelay(unsigned long nsecs);
extern void __const_udelay(unsigned long xloops);
extern void __delay(unsigned long loops);
/* 0x10c7 is 2**32 / 1000000 (rounded up) */
#define udelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \
__udelay(n))
/* 0x5 is 2**32 / 1000000000 (rounded up) */
#define ndelay(n) (__builtin_constant_p(n) ? \
((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \
__ndelay(n))
void use_tsc_delay(void);
#endif /* _ASM_X86_DELAY_H */

409
arch/x86/include/asm/desc.h Normal file
View File

@@ -0,0 +1,409 @@
#ifndef _ASM_X86_DESC_H
#define _ASM_X86_DESC_H
#ifndef __ASSEMBLY__
#include <asm/desc_defs.h>
#include <asm/ldt.h>
#include <asm/mmu.h>
#include <linux/smp.h>
static inline void fill_ldt(struct desc_struct *desc,
const struct user_desc *info)
{
desc->limit0 = info->limit & 0x0ffff;
desc->base0 = info->base_addr & 0x0000ffff;
desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
desc->type = (info->read_exec_only ^ 1) << 1;
desc->type |= info->contents << 2;
desc->s = 1;
desc->dpl = 0x3;
desc->p = info->seg_not_present ^ 1;
desc->limit = (info->limit & 0xf0000) >> 16;
desc->avl = info->useable;
desc->d = info->seg_32bit;
desc->g = info->limit_in_pages;
desc->base2 = (info->base_addr & 0xff000000) >> 24;
/*
* Don't allow setting of the lm bit. It is useless anyway
* because 64bit system calls require __USER_CS:
*/
desc->l = 0;
}
extern struct desc_ptr idt_descr;
extern gate_desc idt_table[];
struct gdt_page {
struct desc_struct gdt[GDT_ENTRIES];
} __attribute__((aligned(PAGE_SIZE)));
DECLARE_PER_CPU(struct gdt_page, gdt_page);
static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
{
return per_cpu(gdt_page, cpu).gdt;
}
#ifdef CONFIG_X86_64
static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
unsigned dpl, unsigned ist, unsigned seg)
{
gate->offset_low = PTR_LOW(func);
gate->segment = __KERNEL_CS;
gate->ist = ist;
gate->p = 1;
gate->dpl = dpl;
gate->zero0 = 0;
gate->zero1 = 0;
gate->type = type;
gate->offset_middle = PTR_MIDDLE(func);
gate->offset_high = PTR_HIGH(func);
}
#else
static inline void pack_gate(gate_desc *gate, unsigned char type,
unsigned long base, unsigned dpl, unsigned flags,
unsigned short seg)
{
gate->a = (seg << 16) | (base & 0xffff);
gate->b = (base & 0xffff0000) |
(((0x80 | type | (dpl << 5)) & 0xff) << 8);
}
#endif
static inline int desc_empty(const void *ptr)
{
const u32 *desc = ptr;
return !(desc[0] | desc[1]);
}
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
#define load_TR_desc() native_load_tr_desc()
#define load_gdt(dtr) native_load_gdt(dtr)
#define load_idt(dtr) native_load_idt(dtr)
#define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
#define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
#define store_gdt(dtr) native_store_gdt(dtr)
#define store_idt(dtr) native_store_idt(dtr)
#define store_tr(tr) (tr = native_store_tr())
#define store_ldt(ldt) asm("sldt %0":"=m" (ldt))
#define load_TLS(t, cpu) native_load_tls(t, cpu)
#define set_ldt native_set_ldt
#define write_ldt_entry(dt, entry, desc) \
native_write_ldt_entry(dt, entry, desc)
#define write_gdt_entry(dt, entry, desc, type) \
native_write_gdt_entry(dt, entry, desc, type)
#define write_idt_entry(dt, entry, g) \
native_write_idt_entry(dt, entry, g)
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{
}
static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
{
}
#endif /* CONFIG_PARAVIRT */
static inline void native_write_idt_entry(gate_desc *idt, int entry,
const gate_desc *gate)
{
memcpy(&idt[entry], gate, sizeof(*gate));
}
static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
const void *desc)
{
memcpy(&ldt[entry], desc, 8);
}
static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
const void *desc, int type)
{
unsigned int size;
switch (type) {
case DESC_TSS:
size = sizeof(tss_desc);
break;
case DESC_LDT:
size = sizeof(ldt_desc);
break;
default:
size = sizeof(struct desc_struct);
break;
}
memcpy(&gdt[entry], desc, size);
}
static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
unsigned long limit, unsigned char type,
unsigned char flags)
{
desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
(limit & 0x000f0000) | ((type & 0xff) << 8) |
((flags & 0xf) << 20);
desc->p = 1;
}
static inline void set_tssldt_descriptor(void *d, unsigned long addr,
unsigned type, unsigned size)
{
#ifdef CONFIG_X86_64
struct ldttss_desc64 *desc = d;
memset(desc, 0, sizeof(*desc));
desc->limit0 = size & 0xFFFF;
desc->base0 = PTR_LOW(addr);
desc->base1 = PTR_MIDDLE(addr) & 0xFF;
desc->type = type;
desc->p = 1;
desc->limit1 = (size >> 16) & 0xF;
desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
desc->base3 = PTR_HIGH(addr);
#else
pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
#endif
}
static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
{
struct desc_struct *d = get_cpu_gdt_table(cpu);
tss_desc tss;
/*
* sizeof(unsigned long) coming from an extra "long" at the end
* of the iobitmap. See tss_struct definition in processor.h
*
* -1? seg base+limit should be pointing to the address of the
* last valid byte
*/
set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
IO_BITMAP_OFFSET + IO_BITMAP_BYTES +
sizeof(unsigned long) - 1);
write_gdt_entry(d, entry, &tss, DESC_TSS);
}
#define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
static inline void native_set_ldt(const void *addr, unsigned int entries)
{
if (likely(entries == 0))
asm volatile("lldt %w0"::"q" (0));
else {
unsigned cpu = smp_processor_id();
ldt_desc ldt;
set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
entries * LDT_ENTRY_SIZE - 1);
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT,
&ldt, DESC_LDT);
asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
}
}
static inline void native_load_tr_desc(void)
{
asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
}
static inline void native_load_gdt(const struct desc_ptr *dtr)
{
asm volatile("lgdt %0"::"m" (*dtr));
}
static inline void native_load_idt(const struct desc_ptr *dtr)
{
asm volatile("lidt %0"::"m" (*dtr));
}
static inline void native_store_gdt(struct desc_ptr *dtr)
{
asm volatile("sgdt %0":"=m" (*dtr));
}
static inline void native_store_idt(struct desc_ptr *dtr)
{
asm volatile("sidt %0":"=m" (*dtr));
}
static inline unsigned long native_store_tr(void)
{
unsigned long tr;
asm volatile("str %0":"=r" (tr));
return tr;
}
static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
{
unsigned int i;
struct desc_struct *gdt = get_cpu_gdt_table(cpu);
for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
}
#define _LDT_empty(info) \
((info)->base_addr == 0 && \
(info)->limit == 0 && \
(info)->contents == 0 && \
(info)->read_exec_only == 1 && \
(info)->seg_32bit == 0 && \
(info)->limit_in_pages == 0 && \
(info)->seg_not_present == 1 && \
(info)->useable == 0)
#ifdef CONFIG_X86_64
#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
#else
#define LDT_empty(info) (_LDT_empty(info))
#endif
static inline void clear_LDT(void)
{
set_ldt(NULL, 0);
}
/*
* load one particular LDT into the current CPU
*/
static inline void load_LDT_nolock(mm_context_t *pc)
{
set_ldt(pc->ldt, pc->size);
}
static inline void load_LDT(mm_context_t *pc)
{
preempt_disable();
load_LDT_nolock(pc);
preempt_enable();
}
static inline unsigned long get_desc_base(const struct desc_struct *desc)
{
return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24);
}
static inline unsigned long get_desc_limit(const struct desc_struct *desc)
{
return desc->limit0 | (desc->limit << 16);
}
static inline void _set_gate(int gate, unsigned type, void *addr,
unsigned dpl, unsigned ist, unsigned seg)
{
gate_desc s;
pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
/*
* does not need to be atomic because it is only done once at
* setup time
*/
write_idt_entry(idt_table, gate, &s);
}
/*
* This needs to use 'idt_table' rather than 'idt', and
* thus use the _nonmapped_ version of the IDT, as the
* Pentium F0 0F bugfix can have resulted in the mapped
* IDT being write-protected.
*/
static inline void set_intr_gate(unsigned int n, void *addr)
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
}
#define SYS_VECTOR_FREE 0
#define SYS_VECTOR_ALLOCED 1
extern int first_system_vector;
extern char system_vectors[];
static inline void alloc_system_vector(int vector)
{
if (system_vectors[vector] == SYS_VECTOR_FREE) {
system_vectors[vector] = SYS_VECTOR_ALLOCED;
if (first_system_vector > vector)
first_system_vector = vector;
} else
BUG();
}
static inline void alloc_intr_gate(unsigned int n, void *addr)
{
alloc_system_vector(n);
set_intr_gate(n, addr);
}
/*
* This routine sets up an interrupt gate at directory privilege level 3.
*/
static inline void set_system_intr_gate(unsigned int n, void *addr)
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
}
static inline void set_system_trap_gate(unsigned int n, void *addr)
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
}
static inline void set_trap_gate(unsigned int n, void *addr)
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
}
static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
}
static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
}
static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
{
BUG_ON((unsigned)n > 0xFF);
_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
}
#else
/*
* GET_DESC_BASE reads the descriptor base of the specified segment.
*
* Args:
* idx - descriptor index
* gdt - GDT pointer
* base - 32bit register to which the base will be written
* lo_w - lo word of the "base" register
* lo_b - lo byte of the "base" register
* hi_b - hi byte of the low word of the "base" register
*
* Example:
* GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
* Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax.
*/
#define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \
movb idx * 8 + 4(gdt), lo_b; \
movb idx * 8 + 7(gdt), hi_b; \
shll $16, base; \
movw idx * 8 + 2(gdt), lo_w;
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_DESC_H */

View File

@@ -0,0 +1,95 @@
/* Written 2000 by Andi Kleen */
#ifndef _ASM_X86_DESC_DEFS_H
#define _ASM_X86_DESC_DEFS_H
/*
* Segment descriptor structure definitions, usable from both x86_64 and i386
* archs.
*/
#ifndef __ASSEMBLY__
#include <linux/types.h>
/*
* FIXME: Acessing the desc_struct through its fields is more elegant,
* and should be the one valid thing to do. However, a lot of open code
* still touches the a and b acessors, and doing this allow us to do it
* incrementally. We keep the signature as a struct, rather than an union,
* so we can get rid of it transparently in the future -- glommer
*/
/* 8 byte segment descriptor */
struct desc_struct {
union {
struct {
unsigned int a;
unsigned int b;
};
struct {
u16 limit0;
u16 base0;
unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
};
};
} __attribute__((packed));
enum {
GATE_INTERRUPT = 0xE,
GATE_TRAP = 0xF,
GATE_CALL = 0xC,
GATE_TASK = 0x5,
};
/* 16byte gate */
struct gate_struct64 {
u16 offset_low;
u16 segment;
unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
u16 offset_middle;
u32 offset_high;
u32 zero1;
} __attribute__((packed));
#define PTR_LOW(x) ((unsigned long long)(x) & 0xFFFF)
#define PTR_MIDDLE(x) (((unsigned long long)(x) >> 16) & 0xFFFF)
#define PTR_HIGH(x) ((unsigned long long)(x) >> 32)
enum {
DESC_TSS = 0x9,
DESC_LDT = 0x2,
DESCTYPE_S = 0x10, /* !system */
};
/* LDT or TSS descriptor in the GDT. 16 bytes. */
struct ldttss_desc64 {
u16 limit0;
u16 base0;
unsigned base1 : 8, type : 5, dpl : 2, p : 1;
unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
u32 base3;
u32 zero1;
} __attribute__((packed));
#ifdef CONFIG_X86_64
typedef struct gate_struct64 gate_desc;
typedef struct ldttss_desc64 ldt_desc;
typedef struct ldttss_desc64 tss_desc;
#define gate_offset(g) ((g).offset_low | ((unsigned long)(g).offset_middle << 16) | ((unsigned long)(g).offset_high << 32))
#define gate_segment(g) ((g).segment)
#else
typedef struct desc_struct gate_desc;
typedef struct desc_struct ldt_desc;
typedef struct desc_struct tss_desc;
#define gate_offset(g) (((g).b & 0xffff0000) | ((g).a & 0x0000ffff))
#define gate_segment(g) ((g).a >> 16)
#endif
struct desc_ptr {
unsigned short size;
unsigned long address;
} __attribute__((packed)) ;
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_DESC_DEFS_H */

View File

@@ -0,0 +1,16 @@
#ifndef _ASM_X86_DEVICE_H
#define _ASM_X86_DEVICE_H
struct dev_archdata {
#ifdef CONFIG_ACPI
void *acpi_handle;
#endif
#ifdef CONFIG_X86_64
struct dma_mapping_ops *dma_ops;
#endif
#ifdef CONFIG_DMAR
void *iommu; /* hook for IOMMU specific extension */
#endif
};
#endif /* _ASM_X86_DEVICE_H */

View File

@@ -0,0 +1,60 @@
#ifndef _ASM_X86_DIV64_H
#define _ASM_X86_DIV64_H
#ifdef CONFIG_X86_32
#include <linux/types.h>
/*
* do_div() is NOT a C function. It wants to return
* two values (the quotient and the remainder), but
* since that doesn't work very well in C, what it
* does is:
*
* - modifies the 64-bit dividend _in_place_
* - returns the 32-bit remainder
*
* This ends up being the most efficient "calling
* convention" on x86.
*/
#define do_div(n, base) \
({ \
unsigned long __upper, __low, __high, __mod, __base; \
__base = (base); \
asm("":"=a" (__low), "=d" (__high) : "A" (n)); \
__upper = __high; \
if (__high) { \
__upper = __high % (__base); \
__high = __high / (__base); \
} \
asm("divl %2":"=a" (__low), "=d" (__mod) \
: "rm" (__base), "0" (__low), "1" (__upper)); \
asm("":"=A" (n) : "a" (__low), "d" (__high)); \
__mod; \
})
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
union {
u64 v64;
u32 v32[2];
} d = { dividend };
u32 upper;
upper = d.v32[1];
d.v32[1] = 0;
if (upper >= divisor) {
d.v32[1] = upper / divisor;
upper %= divisor;
}
asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
"rm" (divisor), "0" (d.v32[0]), "1" (upper));
return d.v64;
}
#define div_u64_rem div_u64_rem
#else
# include <asm-generic/div64.h>
#endif /* CONFIG_X86_32 */
#endif /* _ASM_X86_DIV64_H */

View File

@@ -0,0 +1,308 @@
#ifndef _ASM_X86_DMA_MAPPING_H
#define _ASM_X86_DMA_MAPPING_H
/*
* IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
* documentation.
*/
#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
#include <asm-generic/dma-coherent.h>
extern dma_addr_t bad_dma_address;
extern int iommu_merge;
extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow;
struct dma_mapping_ops {
int (*mapping_error)(struct device *dev,
dma_addr_t dma_addr);
void* (*alloc_coherent)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
size_t size, int direction);
void (*unmap_single)(struct device *dev, dma_addr_t addr,
size_t size, int direction);
void (*sync_single_for_cpu)(struct device *hwdev,
dma_addr_t dma_handle, size_t size,
int direction);
void (*sync_single_for_device)(struct device *hwdev,
dma_addr_t dma_handle, size_t size,
int direction);
void (*sync_single_range_for_cpu)(struct device *hwdev,
dma_addr_t dma_handle, unsigned long offset,
size_t size, int direction);
void (*sync_single_range_for_device)(struct device *hwdev,
dma_addr_t dma_handle, unsigned long offset,
size_t size, int direction);
void (*sync_sg_for_cpu)(struct device *hwdev,
struct scatterlist *sg, int nelems,
int direction);
void (*sync_sg_for_device)(struct device *hwdev,
struct scatterlist *sg, int nelems,
int direction);
int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
int nents, int direction);
void (*unmap_sg)(struct device *hwdev,
struct scatterlist *sg, int nents,
int direction);
int (*dma_supported)(struct device *hwdev, u64 mask);
int is_phys;
};
extern struct dma_mapping_ops *dma_ops;
static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
{
#ifdef CONFIG_X86_32
return dma_ops;
#else
if (unlikely(!dev) || !dev->archdata.dma_ops)
return dma_ops;
else
return dev->archdata.dma_ops;
#endif /* _ASM_X86_DMA_MAPPING_H */
}
/* Make sure we keep the same behaviour */
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
#ifdef CONFIG_X86_32
return 0;
#else
struct dma_mapping_ops *ops = get_dma_ops(dev);
if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr);
return (dma_addr == bad_dma_address);
#endif
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag);
static inline dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
int direction)
{
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
BUG_ON(!valid_dma_direction(direction));
return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
int direction)
{
struct dma_mapping_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(direction));
if (ops->unmap_single)
ops->unmap_single(dev, addr, size, direction);
}
static inline int
dma_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, int direction)
{
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
BUG_ON(!valid_dma_direction(direction));
return ops->map_sg(hwdev, sg, nents, direction);
}
static inline void
dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
int direction)
{
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
BUG_ON(!valid_dma_direction(direction));
if (ops->unmap_sg)
ops->unmap_sg(hwdev, sg, nents, direction);
}
static inline void
dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
BUG_ON(!valid_dma_direction(direction));
if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
flush_write_buffers();
}
static inline void
dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
BUG_ON(!valid_dma_direction(direction));
if (ops->sync_single_for_device)
ops->sync_single_for_device(hwdev, dma_handle, size, direction);
flush_write_buffers();
}
static inline void
dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size, int direction)
{
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
BUG_ON(!valid_dma_direction(direction));
if (ops->sync_single_range_for_cpu)
ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
size, direction);
flush_write_buffers();
}
static inline void
dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
int direction)
{
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
BUG_ON(!valid_dma_direction(direction));
if (ops->sync_single_range_for_device)
ops->sync_single_range_for_device(hwdev, dma_handle,
offset, size, direction);
flush_write_buffers();
}
static inline void
dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
BUG_ON(!valid_dma_direction(direction));
if (ops->sync_sg_for_cpu)
ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
flush_write_buffers();
}
static inline void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
BUG_ON(!valid_dma_direction(direction));
if (ops->sync_sg_for_device)
ops->sync_sg_for_device(hwdev, sg, nelems, direction);
flush_write_buffers();
}
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction)
{
struct dma_mapping_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(direction));
return ops->map_single(dev, page_to_phys(page) + offset,
size, direction);
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction)
{
dma_unmap_single(dev, addr, size, direction);
}
static inline void
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
flush_write_buffers();
}
static inline int dma_get_cache_alignment(void)
{
/* no easy way to get cache size on all x86, so return the
* maximum possible, to be safe */
return boot_cpu_data.x86_clflush_size;
}
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
gfp_t gfp)
{
unsigned long dma_mask = 0;
dma_mask = dev->coherent_dma_mask;
if (!dma_mask)
dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
return dma_mask;
}
static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
{
#ifdef CONFIG_X86_64
unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
gfp |= GFP_DMA32;
#endif
return gfp;
}
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp)
{
struct dma_mapping_ops *ops = get_dma_ops(dev);
void *memory;
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
return memory;
if (!dev) {
dev = &x86_dma_fallback_dev;
gfp |= GFP_DMA;
}
if (!is_device_dma_capable(dev))
return NULL;
if (!ops->alloc_coherent)
return NULL;
return ops->alloc_coherent(dev, size, dma_handle,
dma_alloc_coherent_gfp_flags(dev, gfp));
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t bus)
{
struct dma_mapping_ops *ops = get_dma_ops(dev);
WARN_ON(irqs_disabled()); /* for portability */
if (dma_release_from_coherent(dev, get_order(size), vaddr))
return;
if (ops->free_coherent)
ops->free_coherent(dev, size, vaddr, bus);
}
#endif

318
arch/x86/include/asm/dma.h Normal file
View File

@@ -0,0 +1,318 @@
/*
* linux/include/asm/dma.h: Defines for using and allocating dma channels.
* Written by Hennus Bergman, 1992.
* High DMA channel support & info by Hannu Savolainen
* and John Boyd, Nov. 1992.
*/
#ifndef _ASM_X86_DMA_H
#define _ASM_X86_DMA_H
#include <linux/spinlock.h> /* And spinlocks */
#include <asm/io.h> /* need byte IO */
#include <linux/delay.h>
#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
#define dma_outb outb_p
#else
#define dma_outb outb
#endif
#define dma_inb inb
/*
* NOTES about DMA transfers:
*
* controller 1: channels 0-3, byte operations, ports 00-1F
* controller 2: channels 4-7, word operations, ports C0-DF
*
* - ALL registers are 8 bits only, regardless of transfer size
* - channel 4 is not used - cascades 1 into 2.
* - channels 0-3 are byte - addresses/counts are for physical bytes
* - channels 5-7 are word - addresses/counts are for physical words
* - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries
* - transfer count loaded to registers is 1 less than actual count
* - controller 2 offsets are all even (2x offsets for controller 1)
* - page registers for 5-7 don't use data bit 0, represent 128K pages
* - page registers for 0-3 use bit 0, represent 64K pages
*
* DMA transfers are limited to the lower 16MB of _physical_ memory.
* Note that addresses loaded into registers must be _physical_ addresses,
* not logical addresses (which may differ if paging is active).
*
* Address mapping for channels 0-3:
*
* A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
* | ... | | ... | | ... |
* | ... | | ... | | ... |
* | ... | | ... | | ... |
* P7 ... P0 A7 ... A0 A7 ... A0
* | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Address mapping for channels 5-7:
*
* A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
* | ... | \ \ ... \ \ \ ... \ \
* | ... | \ \ ... \ \ \ ... \ (not used)
* | ... | \ \ ... \ \ \ ... \
* P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
* | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
* and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
* the hardware level, so odd-byte transfers aren't possible).
*
* Transfer count (_not # bytes_) is limited to 64K, represented as actual
* count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more,
* and up to 128K bytes may be transferred on channels 5-7 in one operation.
*
*/
#define MAX_DMA_CHANNELS 8
#ifdef CONFIG_X86_32
/* The maximum address that we can perform a DMA transfer to on this platform */
#define MAX_DMA_ADDRESS (PAGE_OFFSET + 0x1000000)
#else
/* 16MB ISA DMA zone */
#define MAX_DMA_PFN ((16 * 1024 * 1024) >> PAGE_SHIFT)
/* 4GB broken PCI/AGP hardware bus master zone */
#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
/* Compat define for old dma zone */
#define MAX_DMA_ADDRESS ((unsigned long)__va(MAX_DMA_PFN << PAGE_SHIFT))
#endif
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
/* DMA controller registers */
#define DMA1_CMD_REG 0x08 /* command register (w) */
#define DMA1_STAT_REG 0x08 /* status register (r) */
#define DMA1_REQ_REG 0x09 /* request register (w) */
#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
#define DMA1_MODE_REG 0x0B /* mode register (w) */
#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
#define DMA2_CMD_REG 0xD0 /* command register (w) */
#define DMA2_STAT_REG 0xD0 /* status register (r) */
#define DMA2_REQ_REG 0xD2 /* request register (w) */
#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
#define DMA2_MODE_REG 0xD6 /* mode register (w) */
#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
#define DMA_ADDR_0 0x00 /* DMA address registers */
#define DMA_ADDR_1 0x02
#define DMA_ADDR_2 0x04
#define DMA_ADDR_3 0x06
#define DMA_ADDR_4 0xC0
#define DMA_ADDR_5 0xC4
#define DMA_ADDR_6 0xC8
#define DMA_ADDR_7 0xCC
#define DMA_CNT_0 0x01 /* DMA count registers */
#define DMA_CNT_1 0x03
#define DMA_CNT_2 0x05
#define DMA_CNT_3 0x07
#define DMA_CNT_4 0xC2
#define DMA_CNT_5 0xC6
#define DMA_CNT_6 0xCA
#define DMA_CNT_7 0xCE
#define DMA_PAGE_0 0x87 /* DMA page registers */
#define DMA_PAGE_1 0x83
#define DMA_PAGE_2 0x81
#define DMA_PAGE_3 0x82
#define DMA_PAGE_5 0x8B
#define DMA_PAGE_6 0x89
#define DMA_PAGE_7 0x8A
/* I/O to memory, no autoinit, increment, single mode */
#define DMA_MODE_READ 0x44
/* memory to I/O, no autoinit, increment, single mode */
#define DMA_MODE_WRITE 0x48
/* pass thru DREQ->HRQ, DACK<-HLDA only */
#define DMA_MODE_CASCADE 0xC0
#define DMA_AUTOINIT 0x10
extern spinlock_t dma_spin_lock;
static inline unsigned long claim_dma_lock(void)
{
unsigned long flags;
spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
static inline void release_dma_lock(unsigned long flags)
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
}
/* enable/disable a specific DMA channel */
static inline void enable_dma(unsigned int dmanr)
{
if (dmanr <= 3)
dma_outb(dmanr, DMA1_MASK_REG);
else
dma_outb(dmanr & 3, DMA2_MASK_REG);
}
static inline void disable_dma(unsigned int dmanr)
{
if (dmanr <= 3)
dma_outb(dmanr | 4, DMA1_MASK_REG);
else
dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
}
/* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
* Use this once to initialize the FF to a known state.
* After that, keep track of it. :-)
* --- In order to do that, the DMA routines below should ---
* --- only be used while holding the DMA lock ! ---
*/
static inline void clear_dma_ff(unsigned int dmanr)
{
if (dmanr <= 3)
dma_outb(0, DMA1_CLEAR_FF_REG);
else
dma_outb(0, DMA2_CLEAR_FF_REG);
}
/* set mode (above) for a specific DMA channel */
static inline void set_dma_mode(unsigned int dmanr, char mode)
{
if (dmanr <= 3)
dma_outb(mode | dmanr, DMA1_MODE_REG);
else
dma_outb(mode | (dmanr & 3), DMA2_MODE_REG);
}
/* Set only the page register bits of the transfer address.
* This is used for successive transfers when we know the contents of
* the lower 16 bits of the DMA current address register, but a 64k boundary
* may have been crossed.
*/
static inline void set_dma_page(unsigned int dmanr, char pagenr)
{
switch (dmanr) {
case 0:
dma_outb(pagenr, DMA_PAGE_0);
break;
case 1:
dma_outb(pagenr, DMA_PAGE_1);
break;
case 2:
dma_outb(pagenr, DMA_PAGE_2);
break;
case 3:
dma_outb(pagenr, DMA_PAGE_3);
break;
case 5:
dma_outb(pagenr & 0xfe, DMA_PAGE_5);
break;
case 6:
dma_outb(pagenr & 0xfe, DMA_PAGE_6);
break;
case 7:
dma_outb(pagenr & 0xfe, DMA_PAGE_7);
break;
}
}
/* Set transfer address & page bits for specific DMA channel.
* Assumes dma flipflop is clear.
*/
static inline void set_dma_addr(unsigned int dmanr, unsigned int a)
{
set_dma_page(dmanr, a>>16);
if (dmanr <= 3) {
dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE);
} else {
dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE);
}
}
/* Set transfer size (max 64k for DMA0..3, 128k for DMA5..7) for
* a specific DMA channel.
* You must ensure the parameters are valid.
* NOTE: from a manual: "the number of transfers is one more
* than the initial word count"! This is taken into account.
* Assumes dma flip-flop is clear.
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
*/
static inline void set_dma_count(unsigned int dmanr, unsigned int count)
{
count--;
if (dmanr <= 3) {
dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
dma_outb((count >> 8) & 0xff,
((dmanr & 3) << 1) + 1 + IO_DMA1_BASE);
} else {
dma_outb((count >> 1) & 0xff,
((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
dma_outb((count >> 9) & 0xff,
((dmanr & 3) << 2) + 2 + IO_DMA2_BASE);
}
}
/* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
* still in progress will return unpredictable results.
* If called before the channel has been used, it may return 1.
* Otherwise, it returns the number of _bytes_ left to transfer.
*
* Assumes DMA flip-flop is clear.
*/
static inline int get_dma_residue(unsigned int dmanr)
{
unsigned int io_port;
/* using short to get 16-bit wrap around */
unsigned short count;
io_port = (dmanr <= 3) ? ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE
: ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE;
count = 1 + dma_inb(io_port);
count += dma_inb(io_port) << 8;
return (dmanr <= 3) ? count : (count << 1);
}
/* These are in kernel/dma.c: */
extern int request_dma(unsigned int dmanr, const char *device_id);
extern void free_dma(unsigned int dmanr);
/* From PCI */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_X86_DMA_H */

View File

@@ -0,0 +1,26 @@
#ifndef _ASM_X86_DMI_H
#define _ASM_X86_DMI_H
#include <asm/io.h>
#define DMI_MAX_DATA 2048
extern int dmi_alloc_index;
extern char dmi_alloc_data[DMI_MAX_DATA];
/* This is so early that there is no good way to allocate dynamic memory.
Allocate data in an BSS array. */
static inline void *dmi_alloc(unsigned len)
{
int idx = dmi_alloc_index;
if ((dmi_alloc_index + len) > DMI_MAX_DATA)
return NULL;
dmi_alloc_index += len;
return dmi_alloc_data + idx;
}
/* Use early IO mappings for DMI because it's initialized early */
#define dmi_ioremap early_ioremap
#define dmi_iounmap early_iounmap
#endif /* _ASM_X86_DMI_H */

238
arch/x86/include/asm/ds.h Normal file
View File

@@ -0,0 +1,238 @@
/*
* Debug Store (DS) support
*
* This provides a low-level interface to the hardware's Debug Store
* feature that is used for branch trace store (BTS) and
* precise-event based sampling (PEBS).
*
* It manages:
* - per-thread and per-cpu allocation of BTS and PEBS
* - buffer memory allocation (optional)
* - buffer overflow handling
* - buffer access
*
* It assumes:
* - get_task_struct on all parameter tasks
* - current is allowed to trace parameter tasks
*
*
* Copyright (C) 2007-2008 Intel Corporation.
* Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
*/
#ifndef _ASM_X86_DS_H
#define _ASM_X86_DS_H
#ifdef CONFIG_X86_DS
#include <linux/types.h>
#include <linux/init.h>
struct task_struct;
/*
* Request BTS or PEBS
*
* Due to alignement constraints, the actual buffer may be slightly
* smaller than the requested or provided buffer.
*
* Returns 0 on success; -Eerrno otherwise
*
* task: the task to request recording for;
* NULL for per-cpu recording on the current cpu
* base: the base pointer for the (non-pageable) buffer;
* NULL if buffer allocation requested
* size: the size of the requested or provided buffer
* ovfl: pointer to a function to be called on buffer overflow;
* NULL if cyclic buffer requested
*/
typedef void (*ds_ovfl_callback_t)(struct task_struct *);
extern int ds_request_bts(struct task_struct *task, void *base, size_t size,
ds_ovfl_callback_t ovfl);
extern int ds_request_pebs(struct task_struct *task, void *base, size_t size,
ds_ovfl_callback_t ovfl);
/*
* Release BTS or PEBS resources
*
* Frees buffers allocated on ds_request.
*
* Returns 0 on success; -Eerrno otherwise
*
* task: the task to release resources for;
* NULL to release resources for the current cpu
*/
extern int ds_release_bts(struct task_struct *task);
extern int ds_release_pebs(struct task_struct *task);
/*
* Return the (array) index of the write pointer.
* (assuming an array of BTS/PEBS records)
*
* Returns -Eerrno on error
*
* task: the task to access;
* NULL to access the current cpu
* pos (out): if not NULL, will hold the result
*/
extern int ds_get_bts_index(struct task_struct *task, size_t *pos);
extern int ds_get_pebs_index(struct task_struct *task, size_t *pos);
/*
* Return the (array) index one record beyond the end of the array.
* (assuming an array of BTS/PEBS records)
*
* Returns -Eerrno on error
*
* task: the task to access;
* NULL to access the current cpu
* pos (out): if not NULL, will hold the result
*/
extern int ds_get_bts_end(struct task_struct *task, size_t *pos);
extern int ds_get_pebs_end(struct task_struct *task, size_t *pos);
/*
* Provide a pointer to the BTS/PEBS record at parameter index.
* (assuming an array of BTS/PEBS records)
*
* The pointer points directly into the buffer. The user is
* responsible for copying the record.
*
* Returns the size of a single record on success; -Eerrno on error
*
* task: the task to access;
* NULL to access the current cpu
* index: the index of the requested record
* record (out): pointer to the requested record
*/
extern int ds_access_bts(struct task_struct *task,
size_t index, const void **record);
extern int ds_access_pebs(struct task_struct *task,
size_t index, const void **record);
/*
* Write one or more BTS/PEBS records at the write pointer index and
* advance the write pointer.
*
* If size is not a multiple of the record size, trailing bytes are
* zeroed out.
*
* May result in one or more overflow notifications.
*
* If called during overflow handling, that is, with index >=
* interrupt threshold, the write will wrap around.
*
* An overflow notification is given if and when the interrupt
* threshold is reached during or after the write.
*
* Returns the number of bytes written or -Eerrno.
*
* task: the task to access;
* NULL to access the current cpu
* buffer: the buffer to write
* size: the size of the buffer
*/
extern int ds_write_bts(struct task_struct *task,
const void *buffer, size_t size);
extern int ds_write_pebs(struct task_struct *task,
const void *buffer, size_t size);
/*
* Same as ds_write_bts/pebs, but omit ownership checks.
*
* This is needed to have some other task than the owner of the
* BTS/PEBS buffer or the parameter task itself write into the
* respective buffer.
*/
extern int ds_unchecked_write_bts(struct task_struct *task,
const void *buffer, size_t size);
extern int ds_unchecked_write_pebs(struct task_struct *task,
const void *buffer, size_t size);
/*
* Reset the write pointer of the BTS/PEBS buffer.
*
* Returns 0 on success; -Eerrno on error
*
* task: the task to access;
* NULL to access the current cpu
*/
extern int ds_reset_bts(struct task_struct *task);
extern int ds_reset_pebs(struct task_struct *task);
/*
* Clear the BTS/PEBS buffer and reset the write pointer.
* The entire buffer will be zeroed out.
*
* Returns 0 on success; -Eerrno on error
*
* task: the task to access;
* NULL to access the current cpu
*/
extern int ds_clear_bts(struct task_struct *task);
extern int ds_clear_pebs(struct task_struct *task);
/*
* Provide the PEBS counter reset value.
*
* Returns 0 on success; -Eerrno on error
*
* task: the task to access;
* NULL to access the current cpu
* value (out): the counter reset value
*/
extern int ds_get_pebs_reset(struct task_struct *task, u64 *value);
/*
* Set the PEBS counter reset value.
*
* Returns 0 on success; -Eerrno on error
*
* task: the task to access;
* NULL to access the current cpu
* value: the new counter reset value
*/
extern int ds_set_pebs_reset(struct task_struct *task, u64 value);
/*
* Initialization
*/
struct cpuinfo_x86;
extern void __cpuinit ds_init_intel(struct cpuinfo_x86 *);
/*
* The DS context - part of struct thread_struct.
*/
struct ds_context {
/* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */
unsigned char *ds;
/* the owner of the BTS and PEBS configuration, respectively */
struct task_struct *owner[2];
/* buffer overflow notification function for BTS and PEBS */
ds_ovfl_callback_t callback[2];
/* the original buffer address */
void *buffer[2];
/* the number of allocated pages for on-request allocated buffers */
unsigned int pages[2];
/* use count */
unsigned long count;
/* a pointer to the context location inside the thread_struct
* or the per_cpu context array */
struct ds_context **this;
/* a pointer to the task owning this context, or NULL, if the
* context is owned by a cpu */
struct task_struct *task;
};
/* called by exit_thread() to free leftover contexts */
extern void ds_free(struct ds_context *context);
#else /* CONFIG_X86_DS */
#define ds_init_intel(config) do {} while (0)
#endif /* CONFIG_X86_DS */
#endif /* _ASM_X86_DS_H */

View File

@@ -0,0 +1,61 @@
#ifndef _ASM_X86_DWARF2_H
#define _ASM_X86_DWARF2_H
#ifndef __ASSEMBLY__
#warning "asm/dwarf2.h should be only included in pure assembly files"
#endif
/*
Macros for dwarf2 CFI unwind table entries.
See "as.info" for details on these pseudo ops. Unfortunately
they are only supported in very new binutils, so define them
away for older version.
*/
#ifdef CONFIG_AS_CFI
#define CFI_STARTPROC .cfi_startproc
#define CFI_ENDPROC .cfi_endproc
#define CFI_DEF_CFA .cfi_def_cfa
#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
#define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset
#define CFI_OFFSET .cfi_offset
#define CFI_REL_OFFSET .cfi_rel_offset
#define CFI_REGISTER .cfi_register
#define CFI_RESTORE .cfi_restore
#define CFI_REMEMBER_STATE .cfi_remember_state
#define CFI_RESTORE_STATE .cfi_restore_state
#define CFI_UNDEFINED .cfi_undefined
#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
#define CFI_SIGNAL_FRAME .cfi_signal_frame
#else
#define CFI_SIGNAL_FRAME
#endif
#else
/* Due to the structure of pre-exisiting code, don't use assembler line
comment character # to ignore the arguments. Instead, use a dummy macro. */
.macro cfi_ignore a=0, b=0, c=0, d=0
.endm
#define CFI_STARTPROC cfi_ignore
#define CFI_ENDPROC cfi_ignore
#define CFI_DEF_CFA cfi_ignore
#define CFI_DEF_CFA_REGISTER cfi_ignore
#define CFI_DEF_CFA_OFFSET cfi_ignore
#define CFI_ADJUST_CFA_OFFSET cfi_ignore
#define CFI_OFFSET cfi_ignore
#define CFI_REL_OFFSET cfi_ignore
#define CFI_REGISTER cfi_ignore
#define CFI_RESTORE cfi_ignore
#define CFI_REMEMBER_STATE cfi_ignore
#define CFI_RESTORE_STATE cfi_ignore
#define CFI_UNDEFINED cfi_ignore
#define CFI_SIGNAL_FRAME cfi_ignore
#endif
#endif /* _ASM_X86_DWARF2_H */

146
arch/x86/include/asm/e820.h Normal file
View File

@@ -0,0 +1,146 @@
#ifndef _ASM_X86_E820_H
#define _ASM_X86_E820_H
#define E820MAP 0x2d0 /* our map */
#define E820MAX 128 /* number of entries in E820MAP */
/*
* Legacy E820 BIOS limits us to 128 (E820MAX) nodes due to the
* constrained space in the zeropage. If we have more nodes than
* that, and if we've booted off EFI firmware, then the EFI tables
* passed us from the EFI firmware can list more nodes. Size our
* internal memory map tables to have room for these additional
* nodes, based on up to three entries per node for which the
* kernel was built: MAX_NUMNODES == (1 << CONFIG_NODES_SHIFT),
* plus E820MAX, allowing space for the possible duplicate E820
* entries that might need room in the same arrays, prior to the
* call to sanitize_e820_map() to remove duplicates. The allowance
* of three memory map entries per node is "enough" entries for
* the initial hardware platform motivating this mechanism to make
* use of additional EFI map entries. Future platforms may want
* to allow more than three entries per node or otherwise refine
* this size.
*/
/*
* Odd: 'make headers_check' complains about numa.h if I try
* to collapse the next two #ifdef lines to a single line:
* #if defined(__KERNEL__) && defined(CONFIG_EFI)
*/
#ifdef __KERNEL__
#ifdef CONFIG_EFI
#include <linux/numa.h>
#define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES)
#else /* ! CONFIG_EFI */
#define E820_X_MAX E820MAX
#endif
#else /* ! __KERNEL__ */
#define E820_X_MAX E820MAX
#endif
#define E820NR 0x1e8 /* # entries in E820MAP */
#define E820_RAM 1
#define E820_RESERVED 2
#define E820_ACPI 3
#define E820_NVS 4
#define E820_UNUSABLE 5
/* reserved RAM used by kernel itself */
#define E820_RESERVED_KERN 128
#ifndef __ASSEMBLY__
struct e820entry {
__u64 addr; /* start of memory segment */
__u64 size; /* size of memory segment */
__u32 type; /* type of memory segment */
} __attribute__((packed));
struct e820map {
__u32 nr_map;
struct e820entry map[E820_X_MAX];
};
#ifdef __KERNEL__
/* see comment in arch/x86/kernel/e820.c */
extern struct e820map e820;
extern struct e820map e820_saved;
extern unsigned long pci_mem_start;
extern int e820_any_mapped(u64 start, u64 end, unsigned type);
extern int e820_all_mapped(u64 start, u64 end, unsigned type);
extern void e820_add_region(u64 start, u64 size, int type);
extern void e820_print_map(char *who);
extern int
sanitize_e820_map(struct e820entry *biosmap, int max_nr_map, int *pnr_map);
extern u64 e820_update_range(u64 start, u64 size, unsigned old_type,
unsigned new_type);
extern u64 e820_remove_range(u64 start, u64 size, unsigned old_type,
int checktype);
extern void update_e820(void);
extern void e820_setup_gap(void);
extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
unsigned long start_addr, unsigned long long end_addr);
struct setup_data;
extern void parse_e820_ext(struct setup_data *data, unsigned long pa_data);
#if defined(CONFIG_X86_64) || \
(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
extern void e820_mark_nosave_regions(unsigned long limit_pfn);
#else
static inline void e820_mark_nosave_regions(unsigned long limit_pfn)
{
}
#endif
#ifdef CONFIG_MEMTEST
extern void early_memtest(unsigned long start, unsigned long end);
#else
static inline void early_memtest(unsigned long start, unsigned long end)
{
}
#endif
extern unsigned long end_user_pfn;
extern u64 find_e820_area(u64 start, u64 end, u64 size, u64 align);
extern u64 find_e820_area_size(u64 start, u64 *sizep, u64 align);
extern void reserve_early(u64 start, u64 end, char *name);
extern void reserve_early_overlap_ok(u64 start, u64 end, char *name);
extern void free_early(u64 start, u64 end);
extern void early_res_to_bootmem(u64 start, u64 end);
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
extern int e820_find_active_region(const struct e820entry *ei,
unsigned long start_pfn,
unsigned long last_pfn,
unsigned long *ei_startpfn,
unsigned long *ei_endpfn);
extern void e820_register_active_regions(int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern u64 e820_hole_size(u64 start, u64 end);
extern void finish_e820_parsing(void);
extern void e820_reserve_resources(void);
extern void e820_reserve_resources_late(void);
extern void setup_memory_map(void);
extern char *default_machine_specific_memory_setup(void);
extern char *machine_specific_memory_setup(void);
extern char *memory_setup(void);
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#define ISA_START_ADDRESS 0xa0000
#define ISA_END_ADDRESS 0x100000
#define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
#define BIOS_BEGIN 0x000a0000
#define BIOS_END 0x00100000
#ifdef __KERNEL__
#include <linux/ioport.h>
#define HIGH_MEMORY (1024*1024)
#endif /* __KERNEL__ */
#endif /* _ASM_X86_E820_H */

View File

@@ -0,0 +1,18 @@
#ifndef _ASM_X86_EDAC_H
#define _ASM_X86_EDAC_H
/* ECC atomic, DMA, SMP and interrupt safe scrub function */
static inline void atomic_scrub(void *va, u32 size)
{
u32 i, *virt_addr = va;
/*
* Very carefully read and write to memory atomically so we
* are interrupt, DMA and SMP safe.
*/
for (i = 0; i < size / 4; i++, virt_addr++)
asm volatile("lock; addl $0, %0"::"m" (*virt_addr));
}
#endif /* _ASM_X86_EDAC_H */

110
arch/x86/include/asm/efi.h Normal file
View File

@@ -0,0 +1,110 @@
#ifndef _ASM_X86_EFI_H
#define _ASM_X86_EFI_H
#ifdef CONFIG_X86_32
extern unsigned long asmlinkage efi_call_phys(void *, ...);
#define efi_call_phys0(f) efi_call_phys(f)
#define efi_call_phys1(f, a1) efi_call_phys(f, a1)
#define efi_call_phys2(f, a1, a2) efi_call_phys(f, a1, a2)
#define efi_call_phys3(f, a1, a2, a3) efi_call_phys(f, a1, a2, a3)
#define efi_call_phys4(f, a1, a2, a3, a4) \
efi_call_phys(f, a1, a2, a3, a4)
#define efi_call_phys5(f, a1, a2, a3, a4, a5) \
efi_call_phys(f, a1, a2, a3, a4, a5)
#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \
efi_call_phys(f, a1, a2, a3, a4, a5, a6)
/*
* Wrap all the virtual calls in a way that forces the parameters on the stack.
*/
#define efi_call_virt(f, args...) \
((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args)
#define efi_call_virt0(f) efi_call_virt(f)
#define efi_call_virt1(f, a1) efi_call_virt(f, a1)
#define efi_call_virt2(f, a1, a2) efi_call_virt(f, a1, a2)
#define efi_call_virt3(f, a1, a2, a3) efi_call_virt(f, a1, a2, a3)
#define efi_call_virt4(f, a1, a2, a3, a4) \
efi_call_virt(f, a1, a2, a3, a4)
#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
efi_call_virt(f, a1, a2, a3, a4, a5)
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
#define efi_ioremap(addr, size) ioremap_cache(addr, size)
#else /* !CONFIG_X86_32 */
#define MAX_EFI_IO_PAGES 100
extern u64 efi_call0(void *fp);
extern u64 efi_call1(void *fp, u64 arg1);
extern u64 efi_call2(void *fp, u64 arg1, u64 arg2);
extern u64 efi_call3(void *fp, u64 arg1, u64 arg2, u64 arg3);
extern u64 efi_call4(void *fp, u64 arg1, u64 arg2, u64 arg3, u64 arg4);
extern u64 efi_call5(void *fp, u64 arg1, u64 arg2, u64 arg3,
u64 arg4, u64 arg5);
extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
u64 arg4, u64 arg5, u64 arg6);
#define efi_call_phys0(f) \
efi_call0((void *)(f))
#define efi_call_phys1(f, a1) \
efi_call1((void *)(f), (u64)(a1))
#define efi_call_phys2(f, a1, a2) \
efi_call2((void *)(f), (u64)(a1), (u64)(a2))
#define efi_call_phys3(f, a1, a2, a3) \
efi_call3((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3))
#define efi_call_phys4(f, a1, a2, a3, a4) \
efi_call4((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
(u64)(a4))
#define efi_call_phys5(f, a1, a2, a3, a4, a5) \
efi_call5((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
(u64)(a4), (u64)(a5))
#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \
efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
(u64)(a4), (u64)(a5), (u64)(a6))
#define efi_call_virt0(f) \
efi_call0((void *)(efi.systab->runtime->f))
#define efi_call_virt1(f, a1) \
efi_call1((void *)(efi.systab->runtime->f), (u64)(a1))
#define efi_call_virt2(f, a1, a2) \
efi_call2((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2))
#define efi_call_virt3(f, a1, a2, a3) \
efi_call3((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3))
#define efi_call_virt4(f, a1, a2, a3, a4) \
efi_call4((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4))
#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
efi_call5((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5))
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size);
#endif /* CONFIG_X86_32 */
extern void efi_reserve_early(void);
extern void efi_call_phys_prelog(void);
extern void efi_call_phys_epilog(void);
#ifndef CONFIG_EFI
/*
* IF EFI is not configured, have the EFI calls return -ENOSYS.
*/
#define efi_call0(_f) (-ENOSYS)
#define efi_call1(_f, _a1) (-ENOSYS)
#define efi_call2(_f, _a1, _a2) (-ENOSYS)
#define efi_call3(_f, _a1, _a2, _a3) (-ENOSYS)
#define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS)
#define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS)
#define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS)
#endif /* CONFIG_EFI */
#endif /* _ASM_X86_EFI_H */

336
arch/x86/include/asm/elf.h Normal file
View File

@@ -0,0 +1,336 @@
#ifndef _ASM_X86_ELF_H
#define _ASM_X86_ELF_H
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/user.h>
#include <asm/auxvec.h>
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_i387_struct elf_fpregset_t;
#ifdef __i386__
typedef struct user_fxsr_struct elf_fpxregset_t;
#define R_386_NONE 0
#define R_386_32 1
#define R_386_PC32 2
#define R_386_GOT32 3
#define R_386_PLT32 4
#define R_386_COPY 5
#define R_386_GLOB_DAT 6
#define R_386_JMP_SLOT 7
#define R_386_RELATIVE 8
#define R_386_GOTOFF 9
#define R_386_GOTPC 10
#define R_386_NUM 11
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_386
#else
/* x86-64 relocation types */
#define R_X86_64_NONE 0 /* No reloc */
#define R_X86_64_64 1 /* Direct 64 bit */
#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
#define R_X86_64_GOT32 3 /* 32 bit GOT entry */
#define R_X86_64_PLT32 4 /* 32 bit PLT address */
#define R_X86_64_COPY 5 /* Copy symbol at runtime */
#define R_X86_64_GLOB_DAT 6 /* Create GOT entry */
#define R_X86_64_JUMP_SLOT 7 /* Create PLT entry */
#define R_X86_64_RELATIVE 8 /* Adjust by program base */
#define R_X86_64_GOTPCREL 9 /* 32 bit signed pc relative
offset to GOT */
#define R_X86_64_32 10 /* Direct 32 bit zero extended */
#define R_X86_64_32S 11 /* Direct 32 bit sign extended */
#define R_X86_64_16 12 /* Direct 16 bit zero extended */
#define R_X86_64_PC16 13 /* 16 bit sign extended pc relative */
#define R_X86_64_8 14 /* Direct 8 bit sign extended */
#define R_X86_64_PC8 15 /* 8 bit sign extended pc relative */
#define R_X86_64_NUM 16
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_X86_64
#endif
#include <asm/vdso.h>
extern unsigned int vdso_enabled;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch_ia32(x) \
(((x)->e_machine == EM_386) || ((x)->e_machine == EM_486))
#include <asm/processor.h>
#include <asm/system.h>
#ifdef CONFIG_X86_32
#include <asm/desc.h>
#define elf_check_arch(x) elf_check_arch_ia32(x)
/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
contains a pointer to a function which might be registered using `atexit'.
This provides a mean for the dynamic linker to call DT_FINI functions for
shared libraries that have been loaded before the code runs.
A value of 0 tells we have no such handler.
We might as well make sure everything else is cleared too (except for %esp),
just to make things more deterministic.
*/
#define ELF_PLAT_INIT(_r, load_addr) \
do { \
_r->bx = 0; _r->cx = 0; _r->dx = 0; \
_r->si = 0; _r->di = 0; _r->bp = 0; \
_r->ax = 0; \
} while (0)
/*
* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
* now struct_user_regs, they are different)
*/
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
do { \
pr_reg[0] = regs->bx; \
pr_reg[1] = regs->cx; \
pr_reg[2] = regs->dx; \
pr_reg[3] = regs->si; \
pr_reg[4] = regs->di; \
pr_reg[5] = regs->bp; \
pr_reg[6] = regs->ax; \
pr_reg[7] = regs->ds & 0xffff; \
pr_reg[8] = regs->es & 0xffff; \
pr_reg[9] = regs->fs & 0xffff; \
savesegment(gs, pr_reg[10]); \
pr_reg[11] = regs->orig_ax; \
pr_reg[12] = regs->ip; \
pr_reg[13] = regs->cs & 0xffff; \
pr_reg[14] = regs->flags; \
pr_reg[15] = regs->sp; \
pr_reg[16] = regs->ss & 0xffff; \
} while (0);
#define ELF_PLATFORM (utsname()->machine)
#define set_personality_64bit() do { } while (0)
#else /* CONFIG_X86_32 */
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) \
((x)->e_machine == EM_X86_64)
#define compat_elf_check_arch(x) elf_check_arch_ia32(x)
static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp)
{
loadsegment(fs, 0);
loadsegment(ds, __USER32_DS);
loadsegment(es, __USER32_DS);
load_gs_index(0);
regs->ip = ip;
regs->sp = sp;
regs->flags = X86_EFLAGS_IF;
regs->cs = __USER32_CS;
regs->ss = __USER32_DS;
}
static inline void elf_common_init(struct thread_struct *t,
struct pt_regs *regs, const u16 ds)
{
regs->ax = regs->bx = regs->cx = regs->dx = 0;
regs->si = regs->di = regs->bp = 0;
regs->r8 = regs->r9 = regs->r10 = regs->r11 = 0;
regs->r12 = regs->r13 = regs->r14 = regs->r15 = 0;
t->fs = t->gs = 0;
t->fsindex = t->gsindex = 0;
t->ds = t->es = ds;
}
#define ELF_PLAT_INIT(_r, load_addr) \
do { \
elf_common_init(&current->thread, _r, 0); \
clear_thread_flag(TIF_IA32); \
} while (0)
#define COMPAT_ELF_PLAT_INIT(regs, load_addr) \
elf_common_init(&current->thread, regs, __USER_DS)
#define compat_start_thread(regs, ip, sp) \
do { \
start_ia32_thread(regs, ip, sp); \
set_fs(USER_DS); \
} while (0)
#define COMPAT_SET_PERSONALITY(ex) \
do { \
if (test_thread_flag(TIF_IA32)) \
clear_thread_flag(TIF_ABI_PENDING); \
else \
set_thread_flag(TIF_ABI_PENDING); \
current->personality |= force_personality32; \
} while (0)
#define COMPAT_ELF_PLATFORM ("i686")
/*
* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
* now struct_user_regs, they are different). Assumes current is the process
* getting dumped.
*/
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
do { \
unsigned v; \
(pr_reg)[0] = (regs)->r15; \
(pr_reg)[1] = (regs)->r14; \
(pr_reg)[2] = (regs)->r13; \
(pr_reg)[3] = (regs)->r12; \
(pr_reg)[4] = (regs)->bp; \
(pr_reg)[5] = (regs)->bx; \
(pr_reg)[6] = (regs)->r11; \
(pr_reg)[7] = (regs)->r10; \
(pr_reg)[8] = (regs)->r9; \
(pr_reg)[9] = (regs)->r8; \
(pr_reg)[10] = (regs)->ax; \
(pr_reg)[11] = (regs)->cx; \
(pr_reg)[12] = (regs)->dx; \
(pr_reg)[13] = (regs)->si; \
(pr_reg)[14] = (regs)->di; \
(pr_reg)[15] = (regs)->orig_ax; \
(pr_reg)[16] = (regs)->ip; \
(pr_reg)[17] = (regs)->cs; \
(pr_reg)[18] = (regs)->flags; \
(pr_reg)[19] = (regs)->sp; \
(pr_reg)[20] = (regs)->ss; \
(pr_reg)[21] = current->thread.fs; \
(pr_reg)[22] = current->thread.gs; \
asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
asm("movl %%gs,%0" : "=r" (v)); (pr_reg)[26] = v; \
} while (0);
/* I'm not sure if we can use '-' here */
#define ELF_PLATFORM ("x86_64")
extern void set_personality_64bit(void);
extern unsigned int sysctl_vsyscall32;
extern int force_personality32;
#endif /* !CONFIG_X86_32 */
#define CORE_DUMP_USE_REGSET
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define SET_PERSONALITY(ex) set_personality_64bit()
/*
* An executable for which elf_read_implies_exec() returns TRUE will
* have the READ_IMPLIES_EXEC personality flag set automatically.
*/
#define elf_read_implies_exec(ex, executable_stack) \
(executable_stack != EXSTACK_DISABLE_X)
struct task_struct;
#define ARCH_DLINFO_IA32(vdso_enabled) \
do { \
if (vdso_enabled) { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
} \
} while (0)
#ifdef CONFIG_X86_32
#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
#define ARCH_DLINFO ARCH_DLINFO_IA32(vdso_enabled)
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#else /* CONFIG_X86_32 */
#define VDSO_HIGH_BASE 0xffffe000U /* CONFIG_COMPAT_VDSO address */
/* 1GB for 64bit, 8MB for 32bit */
#define STACK_RND_MASK (test_thread_flag(TIF_IA32) ? 0x7ff : 0x3fffff)
#define ARCH_DLINFO \
do { \
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long)current->mm->context.vdso); \
} while (0)
#define AT_SYSINFO 32
#define COMPAT_ARCH_DLINFO ARCH_DLINFO_IA32(sysctl_vsyscall32)
#define COMPAT_ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
#endif /* !CONFIG_X86_32 */
#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
#define VDSO_ENTRY \
((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int executable_stack);
extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
#define compat_arch_setup_additional_pages syscall32_setup_pages
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
#endif /* _ASM_X86_ELF_H */

View File

@@ -0,0 +1,18 @@
#ifndef _ASM_X86_EMERGENCY_RESTART_H
#define _ASM_X86_EMERGENCY_RESTART_H
enum reboot_type {
BOOT_TRIPLE = 't',
BOOT_KBD = 'k',
#ifdef CONFIG_X86_32
BOOT_BIOS = 'b',
#endif
BOOT_ACPI = 'a',
BOOT_EFI = 'e'
};
extern enum reboot_type reboot_type;
extern void machine_emergency_restart(void);
#endif /* _ASM_X86_EMERGENCY_RESTART_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/errno.h>

View File

@@ -0,0 +1,193 @@
#ifndef __ASM_ES7000_APIC_H
#define __ASM_ES7000_APIC_H
#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
#define esr_disable (1)
static inline int apic_id_registered(void)
{
return (1);
}
static inline cpumask_t target_cpus(void)
{
#if defined CONFIG_ES7000_CLUSTERED_APIC
return CPU_MASK_ALL;
#else
return cpumask_of_cpu(smp_processor_id());
#endif
}
#if defined CONFIG_ES7000_CLUSTERED_APIC
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
#define INT_DELIVERY_MODE (dest_LowestPrio)
#define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */
#define NO_BALANCE_IRQ (1)
#undef WAKE_SECONDARY_VIA_INIT
#define WAKE_SECONDARY_VIA_MIP
#else
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
#define INT_DELIVERY_MODE (dest_Fixed)
#define INT_DEST_MODE (0) /* phys delivery to target procs */
#define NO_BALANCE_IRQ (0)
#undef APIC_DEST_LOGICAL
#define APIC_DEST_LOGICAL 0x0
#define WAKE_SECONDARY_VIA_INIT
#endif
static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
{
return 0;
}
static inline unsigned long check_apicid_present(int bit)
{
return physid_isset(bit, phys_cpu_present_map);
}
#define apicid_cluster(apicid) (apicid & 0xF0)
static inline unsigned long calculate_ldr(int cpu)
{
unsigned long id;
id = xapic_phys_to_log_apicid(cpu);
return (SET_APIC_LOGICAL_ID(id));
}
/*
* Set up the logical destination ID.
*
* Intel recommends to set DFR, LdR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
static inline void init_apic_ldr(void)
{
unsigned long val;
int cpu = smp_processor_id();
apic_write(APIC_DFR, APIC_DFR_VALUE);
val = calculate_ldr(cpu);
apic_write(APIC_LDR, val);
}
#ifndef CONFIG_X86_GENERICARCH
extern void enable_apic_mode(void);
#endif
extern int apic_version [MAX_APICS];
static inline void setup_apic_routing(void)
{
int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
(apic_version[apic] == 0x14) ?
"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]);
}
static inline int multi_timer_check(int apic, int irq)
{
return 0;
}
static inline int apicid_to_node(int logical_apicid)
{
return 0;
}
static inline int cpu_present_to_apicid(int mps_cpu)
{
if (!mps_cpu)
return boot_cpu_physical_apicid;
else if (mps_cpu < NR_CPUS)
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
else
return BAD_APICID;
}
static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
{
static int id = 0;
physid_mask_t mask;
mask = physid_mask_of_physid(id);
++id;
return mask;
}
extern u8 cpu_2_logical_apicid[];
/* Mapping from cpu number to logical apicid */
static inline int cpu_to_logical_apicid(int cpu)
{
#ifdef CONFIG_SMP
if (cpu >= NR_CPUS)
return BAD_APICID;
return (int)cpu_2_logical_apicid[cpu];
#else
return logical_smp_processor_id();
#endif
}
static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
{
/* For clustered we don't have a good way to do this yet - hack */
return physids_promote(0xff);
}
static inline void setup_portio_remap(void)
{
}
extern unsigned int boot_cpu_physical_apicid;
static inline int check_phys_apicid_present(int cpu_physical_apicid)
{
boot_cpu_physical_apicid = read_apic_id();
return (1);
}
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
{
int num_bits_set;
int cpus_found = 0;
int cpu;
int apicid;
num_bits_set = cpus_weight(cpumask);
/* Return id to all */
if (num_bits_set == NR_CPUS)
#if defined CONFIG_ES7000_CLUSTERED_APIC
return 0xFF;
#else
return cpu_to_logical_apicid(0);
#endif
/*
* The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS.
*/
cpu = first_cpu(cpumask);
apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) {
if (cpu_isset(cpu, cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)){
printk ("%s: Not a valid mask!\n", __func__);
#if defined CONFIG_ES7000_CLUSTERED_APIC
return 0xFF;
#else
return cpu_to_logical_apicid(0);
#endif
}
apicid = new_apicid;
cpus_found++;
}
cpu++;
}
return apicid;
}
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
{
return cpuid_apic >> index_msb;
}
#endif /* __ASM_ES7000_APIC_H */

View File

@@ -0,0 +1,13 @@
#ifndef __ASM_ES7000_APICDEF_H
#define __ASM_ES7000_APICDEF_H
#define APIC_ID_MASK (0xFF<<24)
static inline unsigned get_apic_id(unsigned long x)
{
return (((x)>>24)&0xFF);
}
#define GET_APIC_ID(x) get_apic_id(x)
#endif

View File

@@ -0,0 +1,24 @@
#ifndef __ASM_ES7000_IPI_H
#define __ASM_ES7000_IPI_H
void send_IPI_mask_sequence(cpumask_t mask, int vector);
static inline void send_IPI_mask(cpumask_t mask, int vector)
{
send_IPI_mask_sequence(mask, vector);
}
static inline void send_IPI_allbutself(int vector)
{
cpumask_t mask = cpu_online_map;
cpu_clear(smp_processor_id(), mask);
if (!cpus_empty(mask))
send_IPI_mask(mask, vector);
}
static inline void send_IPI_all(int vector)
{
send_IPI_mask(cpu_online_map, vector);
}
#endif /* __ASM_ES7000_IPI_H */

View File

@@ -0,0 +1,30 @@
#ifndef __ASM_ES7000_MPPARSE_H
#define __ASM_ES7000_MPPARSE_H
#include <linux/acpi.h>
extern int parse_unisys_oem (char *oemptr);
extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
extern void setup_unisys(void);
#ifndef CONFIG_X86_GENERICARCH
extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
char *productid);
#endif
#ifdef CONFIG_ACPI
static inline int es7000_check_dsdt(void)
{
struct acpi_table_header header;
if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
!strncmp(header.oem_id, "UNISYS", 6))
return 1;
return 0;
}
#endif
#endif /* __ASM_MACH_MPPARSE_H */

View File

@@ -0,0 +1,59 @@
#ifndef __ASM_ES7000_WAKECPU_H
#define __ASM_ES7000_WAKECPU_H
/*
* This file copes with machines that wakeup secondary CPUs by the
* INIT, INIT, STARTUP sequence.
*/
#ifdef CONFIG_ES7000_CLUSTERED_APIC
#define WAKE_SECONDARY_VIA_MIP
#else
#define WAKE_SECONDARY_VIA_INIT
#endif
#ifdef WAKE_SECONDARY_VIA_MIP
extern int es7000_start_cpu(int cpu, unsigned long eip);
static inline int
wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
{
int boot_error = 0;
boot_error = es7000_start_cpu(phys_apicid, start_eip);
return boot_error;
}
#endif
#define TRAMPOLINE_LOW phys_to_virt(0x467)
#define TRAMPOLINE_HIGH phys_to_virt(0x469)
#define boot_cpu_apicid boot_cpu_physical_apicid
static inline void wait_for_init_deassert(atomic_t *deassert)
{
#ifdef WAKE_SECONDARY_VIA_INIT
while (!atomic_read(deassert))
cpu_relax();
#endif
return;
}
/* Nothing to do for most platforms, since cleared by the INIT cycle */
static inline void smp_callin_clear_local_apic(void)
{
}
static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
{
}
static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
{
}
#if APIC_DEBUG
#define inquire_remote_apic(apicid) __inquire_remote_apic(apicid)
#else
#define inquire_remote_apic(apicid) {}
#endif
#endif /* __ASM_MACH_WAKECPU_H */

21
arch/x86/include/asm/fb.h Normal file
View File

@@ -0,0 +1,21 @@
#ifndef _ASM_X86_FB_H
#define _ASM_X86_FB_H
#include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
if (boot_cpu_data.x86 > 3)
pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
}
#ifdef CONFIG_X86_32
extern int fb_is_primary_device(struct fb_info *info);
#else
static inline int fb_is_primary_device(struct fb_info *info) { return 0; }
#endif
#endif /* _ASM_X86_FB_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/fcntl.h>

View File

@@ -0,0 +1,68 @@
#ifndef _ASM_X86_FIXMAP_H
#define _ASM_X86_FIXMAP_H
#ifdef CONFIG_X86_32
# include "fixmap_32.h"
#else
# include "fixmap_64.h"
#endif
extern int fixmaps_set;
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
void native_set_fixmap(enum fixed_addresses idx,
unsigned long phys, pgprot_t flags);
#ifndef CONFIG_PARAVIRT
static inline void __set_fixmap(enum fixed_addresses idx,
unsigned long phys, pgprot_t flags)
{
native_set_fixmap(idx, phys, flags);
}
#endif
#define set_fixmap(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL)
/*
* Some hardware wants to get fixmapped without caching.
*/
#define set_fixmap_nocache(idx, phys) \
__set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
#define clear_fixmap(idx) \
__set_fixmap(idx, 0, __pgprot(0))
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
extern void __this_fixmap_does_not_exist(void);
/*
* 'index to address' translation. If anyone tries to use the idx
* directly without translation, we catch the bug with a NULL-deference
* kernel oops. Illegal ranges of incoming indices are caught too.
*/
static __always_inline unsigned long fix_to_virt(const unsigned int idx)
{
/*
* this branch gets completely eliminated after inlining,
* except when someone tries to use fixaddr indices in an
* illegal way. (such as mixing up address types or using
* out-of-range indices).
*
* If it doesn't get removed, the linker will complain
* loudly with a reasonably clear error message..
*/
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
{
BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
return __virt_to_fix(vaddr);
}
#endif /* _ASM_X86_FIXMAP_H */

View File

@@ -0,0 +1,123 @@
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
#ifndef _ASM_X86_FIXMAP_32_H
#define _ASM_X86_FIXMAP_32_H
/* used by vmalloc.c, vsyscall.lds.S.
*
* Leave one empty page between vmalloc'ed areas and
* the start of the fixmap.
*/
extern unsigned long __FIXADDR_TOP;
#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <asm/acpi.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#endif
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process. We allocate these special addresses
* from the end of virtual memory (0xfffff000) backwards.
* Also this lets us do fail-safe vmalloc(), we
* can guarantee that these special addresses and
* vmalloc()-ed addresses never overlap.
*
* these 'compile-time allocated' memory buffers are
* fixed-size 4k pages. (or larger if used with an increment
* highger than 1) use fixmap_set(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
enum fixed_addresses {
FIX_HOLE,
FIX_VDSO,
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif
#ifdef CONFIG_X86_IO_APIC
FIX_IO_APIC_BASE_0,
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
#endif
#ifdef CONFIG_X86_VISWS_APIC
FIX_CO_CPU, /* Cobalt timer */
FIX_CO_APIC, /* Cobalt APIC Redirection Table */
FIX_LI_PCIA, /* Lithium PCI Bridge A */
FIX_LI_PCIB, /* Lithium PCI Bridge B */
#endif
#ifdef CONFIG_X86_F00F_BUG
FIX_F00F_IDT, /* Virtual mapping for IDT */
#endif
#ifdef CONFIG_X86_CYCLONE_TIMER
FIX_CYCLONE_TIMER, /*cyclone timer register*/
#endif
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif
#ifdef CONFIG_PCI_MMCONFIG
FIX_PCIE_MCFG,
#endif
#ifdef CONFIG_PARAVIRT
FIX_PARAVIRT_BOOTMAP,
#endif
__end_of_permanent_fixed_addresses,
/*
* 256 temporary boot-time mappings, used by early_ioremap(),
* before ioremap() is functional.
*
* We round it up to the next 256 pages boundary so that we
* can have a single pgd entry and a single pte table:
*/
#define NR_FIX_BTMAPS 64
#define FIX_BTMAPS_SLOTS 4
FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
(__end_of_permanent_fixed_addresses & 255),
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
FIX_WP_TEST,
#ifdef CONFIG_ACPI
FIX_ACPI_BEGIN,
FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
#endif
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
FIX_OHCI1394_BASE,
#endif
__end_of_fixed_addresses
};
extern void reserve_top_address(unsigned long reserve);
#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_FIXMAP_32_H */

View File

@@ -0,0 +1,83 @@
/*
* fixmap.h: compile-time virtual memory allocation
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Ingo Molnar
*/
#ifndef _ASM_X86_FIXMAP_64_H
#define _ASM_X86_FIXMAP_64_H
#include <linux/kernel.h>
#include <asm/acpi.h>
#include <asm/apicdef.h>
#include <asm/page.h>
#include <asm/vsyscall.h>
#include <asm/efi.h>
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
* compile time, but to set the physical address only
* in the boot process.
*
* These 'compile-time allocated' memory buffers are
* fixed-size 4k pages (or larger if used with an increment
* higher than 1). Use set_fixmap(idx,phys) to associate
* physical memory with fixmap indices.
*
* TLB entries of such buffers will not be flushed across
* task switches.
*/
enum fixed_addresses {
VSYSCALL_LAST_PAGE,
VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
+ ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
VSYSCALL_HPET,
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
FIX_IO_APIC_BASE_0,
FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
FIX_EFI_IO_MAP_LAST_PAGE,
FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
+ MAX_EFI_IO_PAGES - 1,
#ifdef CONFIG_PARAVIRT
FIX_PARAVIRT_BOOTMAP,
#endif
__end_of_permanent_fixed_addresses,
#ifdef CONFIG_ACPI
FIX_ACPI_BEGIN,
FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
#endif
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
FIX_OHCI1394_BASE,
#endif
/*
* 256 temporary boot-time mappings, used by early_ioremap(),
* before ioremap() is functional.
*
* We round it up to the next 256 pages boundary so that we
* can have a single pgd entry and a single pte table:
*/
#define NR_FIX_BTMAPS 64
#define FIX_BTMAPS_SLOTS 4
FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
(__end_of_permanent_fixed_addresses & 255),
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
__end_of_fixed_addresses
};
#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
#endif /* _ASM_X86_FIXMAP_64_H */

View File

@@ -0,0 +1,281 @@
/*
* Architecture specific parts of the Floppy driver
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995
*/
#ifndef _ASM_X86_FLOPPY_H
#define _ASM_X86_FLOPPY_H
#include <linux/vmalloc.h>
/*
* The DMA channel used by the floppy controller cannot access data at
* addresses >= 16MB
*
* Went back to the 1MB limit, as some people had problems with the floppy
* driver otherwise. It doesn't matter much for performance anyway, as most
* floppy accesses go through the track buffer.
*/
#define _CROSS_64KB(a, s, vdma) \
(!(vdma) && \
((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
#define CROSS_64KB(a, s) _CROSS_64KB(a, s, use_virtual_dma & 1)
#define SW fd_routine[use_virtual_dma & 1]
#define CSW fd_routine[can_use_virtual_dma & 1]
#define fd_inb(port) inb_p(port)
#define fd_outb(value, port) outb_p(value, port)
#define fd_request_dma() CSW._request_dma(FLOPPY_DMA, "floppy")
#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
#define FLOPPY_CAN_FALLBACK_ON_NODMA
static int virtual_dma_count;
static int virtual_dma_residue;
static char *virtual_dma_addr;
static int virtual_dma_mode;
static int doing_pdma;
static irqreturn_t floppy_hardint(int irq, void *dev_id)
{
unsigned char st;
#undef TRACE_FLPY_INT
#ifdef TRACE_FLPY_INT
static int calls;
static int bytes;
static int dma_wait;
#endif
if (!doing_pdma)
return floppy_interrupt(irq, dev_id);
#ifdef TRACE_FLPY_INT
if (!calls)
bytes = virtual_dma_count;
#endif
{
int lcount;
char *lptr;
st = 1;
for (lcount = virtual_dma_count, lptr = virtual_dma_addr;
lcount; lcount--, lptr++) {
st = inb(virtual_dma_port + 4) & 0xa0;
if (st != 0xa0)
break;
if (virtual_dma_mode)
outb_p(*lptr, virtual_dma_port + 5);
else
*lptr = inb_p(virtual_dma_port + 5);
}
virtual_dma_count = lcount;
virtual_dma_addr = lptr;
st = inb(virtual_dma_port + 4);
}
#ifdef TRACE_FLPY_INT
calls++;
#endif
if (st == 0x20)
return IRQ_HANDLED;
if (!(st & 0x20)) {
virtual_dma_residue += virtual_dma_count;
virtual_dma_count = 0;
#ifdef TRACE_FLPY_INT
printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
virtual_dma_count, virtual_dma_residue, calls, bytes,
dma_wait);
calls = 0;
dma_wait = 0;
#endif
doing_pdma = 0;
floppy_interrupt(irq, dev_id);
return IRQ_HANDLED;
}
#ifdef TRACE_FLPY_INT
if (!virtual_dma_count)
dma_wait++;
#endif
return IRQ_HANDLED;
}
static void fd_disable_dma(void)
{
if (!(can_use_virtual_dma & 1))
disable_dma(FLOPPY_DMA);
doing_pdma = 0;
virtual_dma_residue += virtual_dma_count;
virtual_dma_count = 0;
}
static int vdma_request_dma(unsigned int dmanr, const char *device_id)
{
return 0;
}
static void vdma_nop(unsigned int dummy)
{
}
static int vdma_get_dma_residue(unsigned int dummy)
{
return virtual_dma_count + virtual_dma_residue;
}
static int fd_request_irq(void)
{
if (can_use_virtual_dma)
return request_irq(FLOPPY_IRQ, floppy_hardint,
IRQF_DISABLED, "floppy", NULL);
else
return request_irq(FLOPPY_IRQ, floppy_interrupt,
IRQF_DISABLED, "floppy", NULL);
}
static unsigned long dma_mem_alloc(unsigned long size)
{
return __get_dma_pages(GFP_KERNEL|__GFP_NORETRY, get_order(size));
}
static unsigned long vdma_mem_alloc(unsigned long size)
{
return (unsigned long)vmalloc(size);
}
#define nodma_mem_alloc(size) vdma_mem_alloc(size)
static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
{
if ((unsigned long)addr >= (unsigned long)high_memory)
vfree((void *)addr);
else
free_pages(addr, get_order(size));
}
#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
static void _fd_chose_dma_mode(char *addr, unsigned long size)
{
if (can_use_virtual_dma == 2) {
if ((unsigned long)addr >= (unsigned long)high_memory ||
isa_virt_to_bus(addr) >= 0x1000000 ||
_CROSS_64KB(addr, size, 0))
use_virtual_dma = 1;
else
use_virtual_dma = 0;
} else {
use_virtual_dma = can_use_virtual_dma & 1;
}
}
#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
{
doing_pdma = 1;
virtual_dma_port = io;
virtual_dma_mode = (mode == DMA_MODE_WRITE);
virtual_dma_addr = addr;
virtual_dma_count = size;
virtual_dma_residue = 0;
return 0;
}
static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
{
#ifdef FLOPPY_SANITY_CHECK
if (CROSS_64KB(addr, size)) {
printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
return -1;
}
#endif
/* actual, physical DMA */
doing_pdma = 0;
clear_dma_ff(FLOPPY_DMA);
set_dma_mode(FLOPPY_DMA, mode);
set_dma_addr(FLOPPY_DMA, isa_virt_to_bus(addr));
set_dma_count(FLOPPY_DMA, size);
enable_dma(FLOPPY_DMA);
return 0;
}
static struct fd_routine_l {
int (*_request_dma)(unsigned int dmanr, const char *device_id);
void (*_free_dma)(unsigned int dmanr);
int (*_get_dma_residue)(unsigned int dummy);
unsigned long (*_dma_mem_alloc)(unsigned long size);
int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
} fd_routine[] = {
{
request_dma,
free_dma,
get_dma_residue,
dma_mem_alloc,
hard_dma_setup
},
{
vdma_request_dma,
vdma_nop,
vdma_get_dma_residue,
vdma_mem_alloc,
vdma_dma_setup
}
};
static int FDC1 = 0x3f0;
static int FDC2 = -1;
/*
* Floppy types are stored in the rtc's CMOS RAM and so rtc_lock
* is needed to prevent corrupted CMOS RAM in case "insmod floppy"
* coincides with another rtc CMOS user. Paul G.
*/
#define FLOPPY0_TYPE \
({ \
unsigned long flags; \
unsigned char val; \
spin_lock_irqsave(&rtc_lock, flags); \
val = (CMOS_READ(0x10) >> 4) & 15; \
spin_unlock_irqrestore(&rtc_lock, flags); \
val; \
})
#define FLOPPY1_TYPE \
({ \
unsigned long flags; \
unsigned char val; \
spin_lock_irqsave(&rtc_lock, flags); \
val = CMOS_READ(0x10) & 15; \
spin_unlock_irqrestore(&rtc_lock, flags); \
val; \
})
#define N_FDC 2
#define N_DRIVE 8
#define EXTRA_FLOPPY_PARAMS
#endif /* _ASM_X86_FLOPPY_H */

View File

@@ -0,0 +1,27 @@
#ifdef __ASSEMBLY__
#include <asm/dwarf2.h>
/* The annotation hides the frame from the unwinder and makes it look
like a ordinary ebp save/restore. This avoids some special cases for
frame pointer later */
#ifdef CONFIG_FRAME_POINTER
.macro FRAME
pushl %ebp
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET ebp,0
movl %esp,%ebp
.endm
.macro ENDFRAME
popl %ebp
CFI_ADJUST_CFA_OFFSET -4
CFI_RESTORE ebp
.endm
#else
.macro FRAME
.endm
.macro ENDFRAME
.endm
#endif
#endif /* __ASSEMBLY__ */

View File

@@ -0,0 +1,24 @@
#ifndef _ASM_X86_FTRACE_H
#define _ASM_X86_FTRACE_H
#ifdef CONFIG_FTRACE
#define MCOUNT_ADDR ((long)(mcount))
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
#ifndef __ASSEMBLY__
extern void mcount(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/*
* call mcount is "e8 <4 byte offset>"
* The addr points to the 4 byte offset and the caller of this
* function wants the pointer to e8. Simply subtract one.
*/
return addr - 1;
}
#endif
#endif /* CONFIG_FTRACE */
#endif /* _ASM_X86_FTRACE_H */

View File

@@ -0,0 +1,140 @@
#ifndef _ASM_X86_FUTEX_H
#define _ASM_X86_FUTEX_H
#ifdef __KERNEL__
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/asm.h>
#include <asm/errno.h>
#include <asm/processor.h>
#include <asm/system.h>
#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
asm volatile("1:\t" insn "\n" \
"2:\t.section .fixup,\"ax\"\n" \
"3:\tmov\t%3, %1\n" \
"\tjmp\t2b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
: "i" (-EFAULT), "0" (oparg), "1" (0))
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
asm volatile("1:\tmovl %2, %0\n" \
"\tmovl\t%0, %3\n" \
"\t" insn "\n" \
"2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
"\tjnz\t1b\n" \
"3:\t.section .fixup,\"ax\"\n" \
"4:\tmov\t%5, %1\n" \
"\tjmp\t3b\n" \
"\t.previous\n" \
_ASM_EXTABLE(1b, 4b) \
_ASM_EXTABLE(2b, 4b) \
: "=&a" (oldval), "=&r" (ret), \
"+m" (*uaddr), "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0))
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem;
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
/* Real i386 machines can only support FUTEX_OP_SET */
if (op != FUTEX_OP_SET && boot_cpu_data.x86 == 3)
return -ENOSYS;
#endif
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
__futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg);
break;
default:
ret = -ENOSYS;
}
pagefault_enable();
if (!ret) {
switch (cmp) {
case FUTEX_OP_CMP_EQ:
ret = (oldval == cmparg);
break;
case FUTEX_OP_CMP_NE:
ret = (oldval != cmparg);
break;
case FUTEX_OP_CMP_LT:
ret = (oldval < cmparg);
break;
case FUTEX_OP_CMP_GE:
ret = (oldval >= cmparg);
break;
case FUTEX_OP_CMP_LE:
ret = (oldval <= cmparg);
break;
case FUTEX_OP_CMP_GT:
ret = (oldval > cmparg);
break;
default:
ret = -ENOSYS;
}
}
return ret;
}
static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
int newval)
{
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
/* Real i386 machines have no cmpxchg instruction */
if (boot_cpu_data.x86 == 3)
return -ENOSYS;
#endif
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
"2:\t.section .fixup, \"ax\"\n"
"3:\tmov %2, %0\n"
"\tjmp 2b\n"
"\t.previous\n"
_ASM_EXTABLE(1b, 3b)
: "=a" (oldval), "+m" (*uaddr)
: "i" (-EFAULT), "r" (newval), "0" (oldval)
: "memory"
);
return oldval;
}
#endif
#endif /* _ASM_X86_FUTEX_H */

View File

@@ -0,0 +1,73 @@
#ifndef _ASM_X86_GART_H
#define _ASM_X86_GART_H
#include <asm/e820.h>
extern void set_up_gart_resume(u32, u32);
extern int fallback_aper_order;
extern int fallback_aper_force;
extern int fix_aperture;
/* PTE bits. */
#define GPTE_VALID 1
#define GPTE_COHERENT 2
/* Aperture control register bits. */
#define GARTEN (1<<0)
#define DISGARTCPU (1<<4)
#define DISGARTIO (1<<5)
/* GART cache control register bits. */
#define INVGART (1<<0)
#define GARTPTEERR (1<<1)
/* K8 On-cpu GART registers */
#define AMD64_GARTAPERTURECTL 0x90
#define AMD64_GARTAPERTUREBASE 0x94
#define AMD64_GARTTABLEBASE 0x98
#define AMD64_GARTCACHECTL 0x9c
#define AMD64_GARTEN (1<<0)
extern int agp_amd64_init(void);
static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
{
u32 tmp, ctl;
/* address of the mappings table */
addr >>= 12;
tmp = (u32) addr<<4;
tmp &= ~0xf;
pci_write_config_dword(dev, AMD64_GARTTABLEBASE, tmp);
/* Enable GART translation for this hammer. */
pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
ctl |= GARTEN;
ctl &= ~(DISGARTCPU | DISGARTIO);
pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
}
static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
{
if (!aper_base)
return 0;
if (aper_base + aper_size > 0x100000000ULL) {
printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
return 0;
}
if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
return 0;
}
if (aper_size < min_size) {
printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
aper_size>>20, min_size>>20);
return 0;
}
return 1;
}
#endif /* _ASM_X86_GART_H */

View File

@@ -0,0 +1,5 @@
#ifdef CONFIG_X86_32
# include "genapic_32.h"
#else
# include "genapic_64.h"
#endif

View File

@@ -0,0 +1,126 @@
#ifndef _ASM_X86_GENAPIC_32_H
#define _ASM_X86_GENAPIC_32_H
#include <asm/mpspec.h>
/*
* Generic APIC driver interface.
*
* An straight forward mapping of the APIC related parts of the
* x86 subarchitecture interface to a dynamic object.
*
* This is used by the "generic" x86 subarchitecture.
*
* Copyright 2003 Andi Kleen, SuSE Labs.
*/
struct mpc_config_bus;
struct mp_config_table;
struct mpc_config_processor;
struct genapic {
char *name;
int (*probe)(void);
int (*apic_id_registered)(void);
cpumask_t (*target_cpus)(void);
int int_delivery_mode;
int int_dest_mode;
int ESR_DISABLE;
int apic_destination_logical;
unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
unsigned long (*check_apicid_present)(int apicid);
int no_balance_irq;
int no_ioapic_check;
void (*init_apic_ldr)(void);
physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
void (*setup_apic_routing)(void);
int (*multi_timer_check)(int apic, int irq);
int (*apicid_to_node)(int logical_apicid);
int (*cpu_to_logical_apicid)(int cpu);
int (*cpu_present_to_apicid)(int mps_cpu);
physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
void (*setup_portio_remap)(void);
int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
void (*enable_apic_mode)(void);
u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
/* mpparse */
/* When one of the next two hooks returns 1 the genapic
is switched to this. Essentially they are additional probe
functions. */
int (*mps_oem_check)(struct mp_config_table *mpc, char *oem,
char *productid);
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
unsigned (*get_apic_id)(unsigned long x);
unsigned long apic_id_mask;
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
cpumask_t (*vector_allocation_domain)(int cpu);
#ifdef CONFIG_SMP
/* ipi */
void (*send_IPI_mask)(cpumask_t mask, int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
#endif
};
#define APICFUNC(x) .x = x,
/* More functions could be probably marked IPIFUNC and save some space
in UP GENERICARCH kernels, but I don't have the nerve right now
to untangle this mess. -AK */
#ifdef CONFIG_SMP
#define IPIFUNC(x) APICFUNC(x)
#else
#define IPIFUNC(x)
#endif
#define APIC_INIT(aname, aprobe) \
{ \
.name = aname, \
.probe = aprobe, \
.int_delivery_mode = INT_DELIVERY_MODE, \
.int_dest_mode = INT_DEST_MODE, \
.no_balance_irq = NO_BALANCE_IRQ, \
.ESR_DISABLE = esr_disable, \
.apic_destination_logical = APIC_DEST_LOGICAL, \
APICFUNC(apic_id_registered) \
APICFUNC(target_cpus) \
APICFUNC(check_apicid_used) \
APICFUNC(check_apicid_present) \
APICFUNC(init_apic_ldr) \
APICFUNC(ioapic_phys_id_map) \
APICFUNC(setup_apic_routing) \
APICFUNC(multi_timer_check) \
APICFUNC(apicid_to_node) \
APICFUNC(cpu_to_logical_apicid) \
APICFUNC(cpu_present_to_apicid) \
APICFUNC(apicid_to_cpu_present) \
APICFUNC(setup_portio_remap) \
APICFUNC(check_phys_apicid_present) \
APICFUNC(mps_oem_check) \
APICFUNC(get_apic_id) \
.apic_id_mask = APIC_ID_MASK, \
APICFUNC(cpu_mask_to_apicid) \
APICFUNC(vector_allocation_domain) \
APICFUNC(acpi_madt_oem_check) \
IPIFUNC(send_IPI_mask) \
IPIFUNC(send_IPI_allbutself) \
IPIFUNC(send_IPI_all) \
APICFUNC(enable_apic_mode) \
APICFUNC(phys_pkg_id) \
}
extern struct genapic *genapic;
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
#define get_uv_system_type() UV_NONE
#define is_uv_system() 0
#define uv_wakeup_secondary(a, b) 1
#define uv_system_init() do {} while (0)
#endif /* _ASM_X86_GENAPIC_32_H */

View File

@@ -0,0 +1,58 @@
#ifndef _ASM_X86_GENAPIC_64_H
#define _ASM_X86_GENAPIC_64_H
/*
* Copyright 2004 James Cleverdon, IBM.
* Subject to the GNU Public License, v.2
*
* Generic APIC sub-arch data struct.
*
* Hacked for x86-64 by James Cleverdon from i386 architecture code by
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
struct genapic {
char *name;
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
u32 int_delivery_mode;
u32 int_dest_mode;
int (*apic_id_registered)(void);
cpumask_t (*target_cpus)(void);
cpumask_t (*vector_allocation_domain)(int cpu);
void (*init_apic_ldr)(void);
/* ipi */
void (*send_IPI_mask)(cpumask_t mask, int vector);
void (*send_IPI_allbutself)(int vector);
void (*send_IPI_all)(int vector);
void (*send_IPI_self)(int vector);
/* */
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
unsigned int (*phys_pkg_id)(int index_msb);
unsigned int (*get_apic_id)(unsigned long x);
unsigned long (*set_apic_id)(unsigned int id);
unsigned long apic_id_mask;
};
extern struct genapic *genapic;
extern struct genapic apic_flat;
extern struct genapic apic_physflat;
extern struct genapic apic_x2apic_cluster;
extern struct genapic apic_x2apic_phys;
extern int acpi_madt_oem_check(char *, char *);
extern void apic_send_IPI_self(int vector);
enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
extern enum uv_system_type get_uv_system_type(void);
extern int is_uv_system(void);
extern struct genapic apic_x2apic_uv_x;
DECLARE_PER_CPU(int, x2apic_extra_bits);
extern void uv_cpu_init(void);
extern void uv_system_init(void);
extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
extern void setup_apic_routing(void);
#endif /* _ASM_X86_GENAPIC_64_H */

View File

@@ -0,0 +1,253 @@
/*
* AMD Geode definitions
* Copyright (C) 2006, Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*/
#ifndef _ASM_X86_GEODE_H
#define _ASM_X86_GEODE_H
#include <asm/processor.h>
#include <linux/io.h>
/* Generic southbridge functions */
#define GEODE_DEV_PMS 0
#define GEODE_DEV_ACPI 1
#define GEODE_DEV_GPIO 2
#define GEODE_DEV_MFGPT 3
extern int geode_get_dev_base(unsigned int dev);
/* Useful macros */
#define geode_pms_base() geode_get_dev_base(GEODE_DEV_PMS)
#define geode_acpi_base() geode_get_dev_base(GEODE_DEV_ACPI)
#define geode_gpio_base() geode_get_dev_base(GEODE_DEV_GPIO)
#define geode_mfgpt_base() geode_get_dev_base(GEODE_DEV_MFGPT)
/* MSRS */
#define MSR_GLIU_P2D_RO0 0x10000029
#define MSR_LX_GLD_MSR_CONFIG 0x48002001
#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
* sheet has the wrong value */
#define MSR_GLCP_SYS_RSTPLL 0x4C000014
#define MSR_GLCP_DOTPLL 0x4C000015
#define MSR_LBAR_SMB 0x5140000B
#define MSR_LBAR_GPIO 0x5140000C
#define MSR_LBAR_MFGPT 0x5140000D
#define MSR_LBAR_ACPI 0x5140000E
#define MSR_LBAR_PMS 0x5140000F
#define MSR_DIVIL_SOFT_RESET 0x51400017
#define MSR_PIC_YSEL_LOW 0x51400020
#define MSR_PIC_YSEL_HIGH 0x51400021
#define MSR_PIC_ZSEL_LOW 0x51400022
#define MSR_PIC_ZSEL_HIGH 0x51400023
#define MSR_PIC_IRQM_LPC 0x51400025
#define MSR_MFGPT_IRQ 0x51400028
#define MSR_MFGPT_NR 0x51400029
#define MSR_MFGPT_SETUP 0x5140002B
#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
#define MSR_GX_MSR_PADSEL 0xC0002011
/* Resource Sizes */
#define LBAR_GPIO_SIZE 0xFF
#define LBAR_MFGPT_SIZE 0x40
#define LBAR_ACPI_SIZE 0x40
#define LBAR_PMS_SIZE 0x80
/* ACPI registers (PMS block) */
/*
* PM1_EN is only valid when VSA is enabled for 16 bit reads.
* When VSA is not enabled, *always* read both PM1_STS and PM1_EN
* with a 32 bit read at offset 0x0
*/
#define PM1_STS 0x00
#define PM1_EN 0x02
#define PM1_CNT 0x08
#define PM2_CNT 0x0C
#define PM_TMR 0x10
#define PM_GPE0_STS 0x18
#define PM_GPE0_EN 0x1C
/* PMC registers (PMS block) */
#define PM_SSD 0x00
#define PM_SCXA 0x04
#define PM_SCYA 0x08
#define PM_OUT_SLPCTL 0x0C
#define PM_SCLK 0x10
#define PM_SED 0x1
#define PM_SCXD 0x18
#define PM_SCYD 0x1C
#define PM_IN_SLPCTL 0x20
#define PM_WKD 0x30
#define PM_WKXD 0x34
#define PM_RD 0x38
#define PM_WKXA 0x3C
#define PM_FSD 0x40
#define PM_TSD 0x44
#define PM_PSD 0x48
#define PM_NWKD 0x4C
#define PM_AWKD 0x50
#define PM_SSC 0x54
/* VSA2 magic values */
#define VSA_VRC_INDEX 0xAC1C
#define VSA_VRC_DATA 0xAC1E
#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
#define VSA_VR_SIGNATURE 0x0003
#define VSA_VR_MEM_SIZE 0x0200
#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
#define GSW_VSA_SIG 0x534d /* General Software signature */
/* GPIO */
#define GPIO_OUTPUT_VAL 0x00
#define GPIO_OUTPUT_ENABLE 0x04
#define GPIO_OUTPUT_OPEN_DRAIN 0x08
#define GPIO_OUTPUT_INVERT 0x0C
#define GPIO_OUTPUT_AUX1 0x10
#define GPIO_OUTPUT_AUX2 0x14
#define GPIO_PULL_UP 0x18
#define GPIO_PULL_DOWN 0x1C
#define GPIO_INPUT_ENABLE 0x20
#define GPIO_INPUT_INVERT 0x24
#define GPIO_INPUT_FILTER 0x28
#define GPIO_INPUT_EVENT_COUNT 0x2C
#define GPIO_READ_BACK 0x30
#define GPIO_INPUT_AUX1 0x34
#define GPIO_EVENTS_ENABLE 0x38
#define GPIO_LOCK_ENABLE 0x3C
#define GPIO_POSITIVE_EDGE_EN 0x40
#define GPIO_NEGATIVE_EDGE_EN 0x44
#define GPIO_POSITIVE_EDGE_STS 0x48
#define GPIO_NEGATIVE_EDGE_STS 0x4C
#define GPIO_MAP_X 0xE0
#define GPIO_MAP_Y 0xE4
#define GPIO_MAP_Z 0xE8
#define GPIO_MAP_W 0xEC
static inline u32 geode_gpio(unsigned int nr)
{
BUG_ON(nr > 28);
return 1 << nr;
}
extern void geode_gpio_set(u32, unsigned int);
extern void geode_gpio_clear(u32, unsigned int);
extern int geode_gpio_isset(u32, unsigned int);
extern void geode_gpio_setup_event(unsigned int, int, int);
extern void geode_gpio_set_irq(unsigned int, unsigned int);
static inline void geode_gpio_event_irq(unsigned int gpio, int pair)
{
geode_gpio_setup_event(gpio, pair, 0);
}
static inline void geode_gpio_event_pme(unsigned int gpio, int pair)
{
geode_gpio_setup_event(gpio, pair, 1);
}
/* Specific geode tests */
static inline int is_geode_gx(void)
{
return ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC) &&
(boot_cpu_data.x86 == 5) &&
(boot_cpu_data.x86_model == 5));
}
static inline int is_geode_lx(void)
{
return ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
(boot_cpu_data.x86 == 5) &&
(boot_cpu_data.x86_model == 10));
}
static inline int is_geode(void)
{
return (is_geode_gx() || is_geode_lx());
}
#ifdef CONFIG_MGEODE_LX
extern int geode_has_vsa2(void);
#else
static inline int geode_has_vsa2(void)
{
return 0;
}
#endif
/* MFGPTs */
#define MFGPT_MAX_TIMERS 8
#define MFGPT_TIMER_ANY (-1)
#define MFGPT_DOMAIN_WORKING 1
#define MFGPT_DOMAIN_STANDBY 2
#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
#define MFGPT_CMP1 0
#define MFGPT_CMP2 1
#define MFGPT_EVENT_IRQ 0
#define MFGPT_EVENT_NMI 1
#define MFGPT_EVENT_RESET 3
#define MFGPT_REG_CMP1 0
#define MFGPT_REG_CMP2 2
#define MFGPT_REG_COUNTER 4
#define MFGPT_REG_SETUP 6
#define MFGPT_SETUP_CNTEN (1 << 15)
#define MFGPT_SETUP_CMP2 (1 << 14)
#define MFGPT_SETUP_CMP1 (1 << 13)
#define MFGPT_SETUP_SETUP (1 << 12)
#define MFGPT_SETUP_STOPEN (1 << 11)
#define MFGPT_SETUP_EXTEN (1 << 10)
#define MFGPT_SETUP_REVEN (1 << 5)
#define MFGPT_SETUP_CLKSEL (1 << 4)
static inline void geode_mfgpt_write(int timer, u16 reg, u16 value)
{
u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
outw(value, base + reg + (timer * 8));
}
static inline u16 geode_mfgpt_read(int timer, u16 reg)
{
u32 base = geode_get_dev_base(GEODE_DEV_MFGPT);
return inw(base + reg + (timer * 8));
}
extern int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable);
extern int geode_mfgpt_set_irq(int timer, int cmp, int *irq, int enable);
extern int geode_mfgpt_alloc_timer(int timer, int domain);
#define geode_mfgpt_setup_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 1)
#define geode_mfgpt_release_irq(t, c, i) geode_mfgpt_set_irq((t), (c), (i), 0)
#ifdef CONFIG_GEODE_MFGPT_TIMER
extern int __init mfgpt_timer_setup(void);
#else
static inline int mfgpt_timer_setup(void) { return 0; }
#endif
#endif /* _ASM_X86_GEODE_H */

View File

@@ -0,0 +1,56 @@
/*
* Generic GPIO API implementation for x86.
*
* Derived from the generic GPIO API for powerpc:
*
* Copyright (c) 2007-2008 MontaVista Software, Inc.
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef _ASM_X86_GPIO_H
#define _ASM_X86_GPIO_H
#include <asm-generic/gpio.h>
#ifdef CONFIG_GPIOLIB
/*
* Just call gpiolib.
*/
static inline int gpio_get_value(unsigned int gpio)
{
return __gpio_get_value(gpio);
}
static inline void gpio_set_value(unsigned int gpio, int value)
{
__gpio_set_value(gpio, value);
}
static inline int gpio_cansleep(unsigned int gpio)
{
return __gpio_cansleep(gpio);
}
/*
* Not implemented, yet.
*/
static inline int gpio_to_irq(unsigned int gpio)
{
return -ENOSYS;
}
static inline int irq_to_gpio(unsigned int irq)
{
return -EINVAL;
}
#endif /* CONFIG_GPIOLIB */
#endif /* _ASM_X86_GPIO_H */

View File

@@ -0,0 +1,11 @@
#ifdef CONFIG_X86_32
# include "hardirq_32.h"
#else
# include "hardirq_64.h"
#endif
extern u64 arch_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu arch_irq_stat_cpu
extern u64 arch_irq_stat(void);
#define arch_irq_stat arch_irq_stat

View File

@@ -0,0 +1,28 @@
#ifndef _ASM_X86_HARDIRQ_32_H
#define _ASM_X86_HARDIRQ_32_H
#include <linux/threads.h>
#include <linux/irq.h>
typedef struct {
unsigned int __softirq_pending;
unsigned long idle_timestamp;
unsigned int __nmi_count; /* arch dependent */
unsigned int apic_timer_irqs; /* arch dependent */
unsigned int irq0_irqs;
unsigned int irq_resched_count;
unsigned int irq_call_count;
unsigned int irq_tlb_count;
unsigned int irq_thermal_count;
unsigned int irq_spurious_count;
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
void ack_bad_irq(unsigned int irq);
#include <linux/irq_cpustat.h>
#endif /* _ASM_X86_HARDIRQ_32_H */

View File

@@ -0,0 +1,23 @@
#ifndef _ASM_X86_HARDIRQ_64_H
#define _ASM_X86_HARDIRQ_64_H
#include <linux/threads.h>
#include <linux/irq.h>
#include <asm/pda.h>
#include <asm/apic.h>
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
#define __ARCH_IRQ_STAT 1
#define local_softirq_pending() read_pda(__softirq_pending)
#define __ARCH_SET_SOFTIRQ_PENDING 1
#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
extern void ack_bad_irq(unsigned int irq);
#endif /* _ASM_X86_HARDIRQ_64_H */

View File

@@ -0,0 +1,82 @@
/*
* highmem.h: virtual kernel memory mappings for high memory
*
* Used in CONFIG_HIGHMEM systems for memory pages which
* are not addressable by direct kernel virtual addresses.
*
* Copyright (C) 1999 Gerhard Wichert, Siemens AG
* Gerhard.Wichert@pdb.siemens.de
*
*
* Redesigned the x86 32-bit VM architecture to deal with
* up to 16 Terabyte physical memory. With current x86 CPUs
* we now support up to 64 Gigabytes physical RAM.
*
* Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
*/
#ifndef _ASM_X86_HIGHMEM_H
#define _ASM_X86_HIGHMEM_H
#ifdef __KERNEL__
#include <linux/interrupt.h>
#include <linux/threads.h>
#include <asm/kmap_types.h>
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
extern pte_t *kmap_pte;
extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
/*
* Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
/*
* Ordering is:
*
* FIXADDR_TOP
* fixed_addresses
* FIXADDR_START
* temp fixed addresses
* FIXADDR_BOOT_START
* Persistent kmap area
* PKMAP_BASE
* VMALLOC_END
* Vmalloc area
* VMALLOC_START
* high_memory
*/
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
void *kmap(struct page *page);
void kunmap(struct page *page);
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
void *kmap_atomic(struct page *page, enum km_type type);
void kunmap_atomic(void *kvaddr, enum km_type type);
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
struct page *kmap_atomic_to_page(void *ptr);
#ifndef CONFIG_PARAVIRT
#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
#endif
#define flush_cache_kmaps() do { } while (0)
extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
unsigned long end_pfn);
#endif /* __KERNEL__ */
#endif /* _ASM_X86_HIGHMEM_H */

114
arch/x86/include/asm/hpet.h Normal file
View File

@@ -0,0 +1,114 @@
#ifndef _ASM_X86_HPET_H
#define _ASM_X86_HPET_H
#include <linux/msi.h>
#ifdef CONFIG_HPET_TIMER
#define HPET_MMAP_SIZE 1024
#define HPET_ID 0x000
#define HPET_PERIOD 0x004
#define HPET_CFG 0x010
#define HPET_STATUS 0x020
#define HPET_COUNTER 0x0f0
#define HPET_Tn_CFG(n) (0x100 + 0x20 * n)
#define HPET_Tn_CMP(n) (0x108 + 0x20 * n)
#define HPET_Tn_ROUTE(n) (0x110 + 0x20 * n)
#define HPET_T0_CFG 0x100
#define HPET_T0_CMP 0x108
#define HPET_T0_ROUTE 0x110
#define HPET_T1_CFG 0x120
#define HPET_T1_CMP 0x128
#define HPET_T1_ROUTE 0x130
#define HPET_T2_CFG 0x140
#define HPET_T2_CMP 0x148
#define HPET_T2_ROUTE 0x150
#define HPET_ID_REV 0x000000ff
#define HPET_ID_NUMBER 0x00001f00
#define HPET_ID_64BIT 0x00002000
#define HPET_ID_LEGSUP 0x00008000
#define HPET_ID_VENDOR 0xffff0000
#define HPET_ID_NUMBER_SHIFT 8
#define HPET_ID_VENDOR_SHIFT 16
#define HPET_ID_VENDOR_8086 0x8086
#define HPET_CFG_ENABLE 0x001
#define HPET_CFG_LEGACY 0x002
#define HPET_LEGACY_8254 2
#define HPET_LEGACY_RTC 8
#define HPET_TN_LEVEL 0x0002
#define HPET_TN_ENABLE 0x0004
#define HPET_TN_PERIODIC 0x0008
#define HPET_TN_PERIODIC_CAP 0x0010
#define HPET_TN_64BIT_CAP 0x0020
#define HPET_TN_SETVAL 0x0040
#define HPET_TN_32BIT 0x0100
#define HPET_TN_ROUTE 0x3e00
#define HPET_TN_FSB 0x4000
#define HPET_TN_FSB_CAP 0x8000
#define HPET_TN_ROUTE_SHIFT 9
/* Max HPET Period is 10^8 femto sec as in HPET spec */
#define HPET_MAX_PERIOD 100000000UL
/*
* Min HPET period is 10^5 femto sec just for safety. If it is less than this,
* then 32 bit HPET counter wrapsaround in less than 0.5 sec.
*/
#define HPET_MIN_PERIOD 100000UL
/* hpet memory map physical address */
extern unsigned long hpet_address;
extern unsigned long force_hpet_address;
extern int hpet_force_user;
extern int is_hpet_enabled(void);
extern int hpet_enable(void);
extern void hpet_disable(void);
extern unsigned long hpet_readl(unsigned long a);
extern void force_hpet_resume(void);
extern void hpet_msi_unmask(unsigned int irq);
extern void hpet_msi_mask(unsigned int irq);
extern void hpet_msi_write(unsigned int irq, struct msi_msg *msg);
extern void hpet_msi_read(unsigned int irq, struct msi_msg *msg);
#ifdef CONFIG_PCI_MSI
extern int arch_setup_hpet_msi(unsigned int irq);
#else
static inline int arch_setup_hpet_msi(unsigned int irq)
{
return -EINVAL;
}
#endif
#ifdef CONFIG_HPET_EMULATE_RTC
#include <linux/interrupt.h>
typedef irqreturn_t (*rtc_irq_handler)(int interrupt, void *cookie);
extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
unsigned char sec);
extern int hpet_set_periodic_freq(unsigned long freq);
extern int hpet_rtc_dropped_irq(void);
extern int hpet_rtc_timer_init(void);
extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
extern int hpet_register_irq_handler(rtc_irq_handler handler);
extern void hpet_unregister_irq_handler(rtc_irq_handler handler);
#endif /* CONFIG_HPET_EMULATE_RTC */
#else /* CONFIG_HPET_TIMER */
static inline int hpet_enable(void) { return 0; }
static inline int is_hpet_enabled(void) { return 0; }
#define hpet_readl(a) 0
#endif
#endif /* _ASM_X86_HPET_H */

View File

@@ -0,0 +1,93 @@
#ifndef _ASM_X86_HUGETLB_H
#define _ASM_X86_HUGETLB_H
#include <asm/page.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len) {
return 0;
}
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}
static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
}
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor,
unsigned long ceiling)
{
free_pgd_range(tlb, addr, end, floor, ceiling);
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte_at(mm, addr, ptep, pte);
}
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return ptep_get_and_clear(mm, addr, ptep);
}
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
}
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
ptep_set_wrprotect(mm, addr, ptep);
}
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
}
static inline pte_t huge_ptep_get(pte_t *ptep)
{
return *ptep;
}
static inline int arch_prepare_hugepage(struct page *page)
{
return 0;
}
static inline void arch_release_hugepage(struct page *page)
{
}
#endif /* _ASM_X86_HUGETLB_H */

View File

@@ -0,0 +1,131 @@
#ifndef _ASM_X86_HW_IRQ_H
#define _ASM_X86_HW_IRQ_H
/*
* (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar
*
* moved some of the old arch/i386/kernel/irq.h to here. VY
*
* IRQ/IPI changes taken from work by Thomas Radke
* <tomsoft@informatik.tu-chemnitz.de>
*
* hacked by Andi Kleen for x86-64.
* unified by tglx
*/
#include <asm/irq_vectors.h>
#ifndef __ASSEMBLY__
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/smp.h>
#include <asm/atomic.h>
#include <asm/irq.h>
#include <asm/sections.h>
#define platform_legacy_irq(irq) ((irq) < 16)
/* Interrupt handlers registered during init_IRQ */
extern void apic_timer_interrupt(void);
extern void error_interrupt(void);
extern void spurious_interrupt(void);
extern void thermal_interrupt(void);
extern void reschedule_interrupt(void);
extern void invalidate_interrupt(void);
extern void invalidate_interrupt0(void);
extern void invalidate_interrupt1(void);
extern void invalidate_interrupt2(void);
extern void invalidate_interrupt3(void);
extern void invalidate_interrupt4(void);
extern void invalidate_interrupt5(void);
extern void invalidate_interrupt6(void);
extern void invalidate_interrupt7(void);
extern void irq_move_cleanup_interrupt(void);
extern void threshold_interrupt(void);
extern void call_function_interrupt(void);
extern void call_function_single_interrupt(void);
/* PIC specific functions */
extern void disable_8259A_irq(unsigned int irq);
extern void enable_8259A_irq(unsigned int irq);
extern int i8259A_irq_pending(unsigned int irq);
extern void make_8259A_irq(unsigned int irq);
extern void init_8259A(int aeoi);
/* IOAPIC */
#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
extern unsigned long io_apic_irqs;
extern void init_VISWS_APIC_irqs(void);
extern void setup_IO_APIC(void);
extern void disable_IO_APIC(void);
extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
extern void setup_ioapic_dest(void);
#ifdef CONFIG_X86_64
extern void enable_IO_APIC(void);
#endif
/* IPI functions */
#ifdef CONFIG_X86_32
extern void send_IPI_self(int vector);
#endif
extern void send_IPI(int dest, int vector);
/* Statistics */
extern atomic_t irq_err_count;
extern atomic_t irq_mis_count;
/* EISA */
extern void eisa_set_level_irq(unsigned int irq);
/* Voyager functions */
extern asmlinkage void vic_cpi_interrupt(void);
extern asmlinkage void vic_sys_interrupt(void);
extern asmlinkage void vic_cmn_interrupt(void);
extern asmlinkage void qic_timer_interrupt(void);
extern asmlinkage void qic_invalidate_interrupt(void);
extern asmlinkage void qic_reschedule_interrupt(void);
extern asmlinkage void qic_enable_irq_interrupt(void);
extern asmlinkage void qic_call_function_interrupt(void);
/* SMP */
extern void smp_apic_timer_interrupt(struct pt_regs *);
extern void smp_spurious_interrupt(struct pt_regs *);
extern void smp_error_interrupt(struct pt_regs *);
#ifdef CONFIG_X86_SMP
extern void smp_reschedule_interrupt(struct pt_regs *);
extern void smp_call_function_interrupt(struct pt_regs *);
extern void smp_call_function_single_interrupt(struct pt_regs *);
#ifdef CONFIG_X86_32
extern void smp_invalidate_interrupt(struct pt_regs *);
#else
extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
#endif
#endif
#ifdef CONFIG_X86_32
extern void (*const interrupt[NR_VECTORS])(void);
#endif
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
#ifdef CONFIG_X86_IO_APIC
extern void lock_vector_lock(void);
extern void unlock_vector_lock(void);
extern void __setup_vector_irq(int cpu);
#else
static inline void lock_vector_lock(void) {}
static inline void unlock_vector_lock(void) {}
static inline void __setup_vector_irq(int cpu) {}
#endif
#endif /* !ASSEMBLY_ */
#endif /* _ASM_X86_HW_IRQ_H */

View File

@@ -0,0 +1,45 @@
#ifndef _ASM_X86_HYPERTRANSPORT_H
#define _ASM_X86_HYPERTRANSPORT_H
/*
* Constants for x86 Hypertransport Interrupts.
*/
#define HT_IRQ_LOW_BASE 0xf8000000
#define HT_IRQ_LOW_VECTOR_SHIFT 16
#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000
#define HT_IRQ_LOW_VECTOR(v) \
(((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
#define HT_IRQ_LOW_DEST_ID_SHIFT 8
#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00
#define HT_IRQ_LOW_DEST_ID(v) \
(((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000
#define HT_IRQ_LOW_DM_LOGICAL 0x0000040
#define HT_IRQ_LOW_RQEOI_EDGE 0x0000000
#define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020
#define HT_IRQ_LOW_MT_FIXED 0x0000000
#define HT_IRQ_LOW_MT_ARBITRATED 0x0000004
#define HT_IRQ_LOW_MT_SMI 0x0000008
#define HT_IRQ_LOW_MT_NMI 0x000000c
#define HT_IRQ_LOW_MT_INIT 0x0000010
#define HT_IRQ_LOW_MT_STARTUP 0x0000014
#define HT_IRQ_LOW_MT_EXTINT 0x0000018
#define HT_IRQ_LOW_MT_LINT1 0x000008c
#define HT_IRQ_LOW_MT_LINT0 0x0000098
#define HT_IRQ_LOW_IRQ_MASKED 0x0000001
#define HT_IRQ_HIGH_DEST_ID_SHIFT 0
#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff
#define HT_IRQ_HIGH_DEST_ID(v) \
((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
#endif /* _ASM_X86_HYPERTRANSPORT_H */

400
arch/x86/include/asm/i387.h Normal file
View File

@@ -0,0 +1,400 @@
/*
* Copyright (C) 1994 Linus Torvalds
*
* Pentium III FXSR, SSE support
* General FPU state handling cleanups
* Gareth Hughes <gareth@valinux.com>, May 2000
* x86-64 work by Andi Kleen 2002
*/
#ifndef _ASM_X86_I387_H
#define _ASM_X86_I387_H
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/regset.h>
#include <linux/hardirq.h>
#include <asm/asm.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <asm/user.h>
#include <asm/uaccess.h>
#include <asm/xsave.h>
extern unsigned int sig_xstate_size;
extern void fpu_init(void);
extern void mxcsr_feature_mask_init(void);
extern int init_fpu(struct task_struct *child);
extern asmlinkage void math_state_restore(void);
extern void init_thread_xstate(void);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
extern user_regset_active_fn fpregs_active, xfpregs_active;
extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get;
extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set;
extern struct _fpx_sw_bytes fx_sw_reserved;
#ifdef CONFIG_IA32_EMULATION
extern unsigned int sig_xstate_ia32_size;
extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
struct _fpstate_ia32;
struct _xstate_ia32;
extern int save_i387_xstate_ia32(void __user *buf);
extern int restore_i387_xstate_ia32(void __user *buf);
#endif
#define X87_FSW_ES (1 << 7) /* Exception Summary */
#ifdef CONFIG_X86_64
/* Ignore delayed exceptions from user space */
static inline void tolerant_fwait(void)
{
asm volatile("1: fwait\n"
"2:\n"
_ASM_EXTABLE(1b, 2b));
}
static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
{
int err;
asm volatile("1: rex64/fxrstor (%[fx])\n\t"
"2:\n"
".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n"
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b)
: [err] "=r" (err)
#if 0 /* See comment in __save_init_fpu() below. */
: [fx] "r" (fx), "m" (*fx), "0" (0));
#else
: [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
#endif
return err;
}
static inline int restore_fpu_checking(struct task_struct *tsk)
{
if (task_thread_info(tsk)->status & TS_XSAVE)
return xrstor_checking(&tsk->thread.xstate->xsave);
else
return fxrstor_checking(&tsk->thread.xstate->fxsave);
}
/* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. The kernel data segment can be sometimes 0 and sometimes
new user value. Both should be ok.
Use the PDA as safe address because it should be already in L1. */
static inline void clear_fpu_state(struct task_struct *tsk)
{
struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
/*
* xsave header may indicate the init state of the FP.
*/
if ((task_thread_info(tsk)->status & TS_XSAVE) &&
!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
return;
if (unlikely(fx->swd & X87_FSW_ES))
asm volatile("fnclex");
alternative_input(ASM_NOP8 ASM_NOP2,
" emms\n" /* clear stack tags */
" fildl %%gs:0", /* load to clear state */
X86_FEATURE_FXSAVE_LEAK);
}
static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
{
int err;
asm volatile("1: rex64/fxsave (%[fx])\n\t"
"2:\n"
".section .fixup,\"ax\"\n"
"3: movl $-1,%[err]\n"
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b)
: [err] "=r" (err), "=m" (*fx)
#if 0 /* See comment in __fxsave_clear() below. */
: [fx] "r" (fx), "0" (0));
#else
: [fx] "cdaSDb" (fx), "0" (0));
#endif
if (unlikely(err) &&
__clear_user(fx, sizeof(struct i387_fxsave_struct)))
err = -EFAULT;
/* No need to clear here because the caller clears USED_MATH */
return err;
}
static inline void fxsave(struct task_struct *tsk)
{
/* Using "rex64; fxsave %0" is broken because, if the memory operand
uses any extended registers for addressing, a second REX prefix
will be generated (to the assembler, rex64 followed by semicolon
is a separate instruction), and hence the 64-bitness is lost. */
#if 0
/* Using "fxsaveq %0" would be the ideal choice, but is only supported
starting with gas 2.16. */
__asm__ __volatile__("fxsaveq %0"
: "=m" (tsk->thread.xstate->fxsave));
#elif 0
/* Using, as a workaround, the properly prefixed form below isn't
accepted by any binutils version so far released, complaining that
the same type of prefix is used twice if an extended register is
needed for addressing (fix submitted to mainline 2005-11-21). */
__asm__ __volatile__("rex64/fxsave %0"
: "=m" (tsk->thread.xstate->fxsave));
#else
/* This, however, we can work around by forcing the compiler to select
an addressing mode that doesn't require extended registers. */
__asm__ __volatile__("rex64/fxsave (%1)"
: "=m" (tsk->thread.xstate->fxsave)
: "cdaSDb" (&tsk->thread.xstate->fxsave));
#endif
}
static inline void __save_init_fpu(struct task_struct *tsk)
{
if (task_thread_info(tsk)->status & TS_XSAVE)
xsave(tsk);
else
fxsave(tsk);
clear_fpu_state(tsk);
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
#else /* CONFIG_X86_32 */
extern void finit(void);
static inline void tolerant_fwait(void)
{
asm volatile("fnclex ; fwait");
}
static inline void restore_fpu(struct task_struct *tsk)
{
if (task_thread_info(tsk)->status & TS_XSAVE) {
xrstor_checking(&tsk->thread.xstate->xsave);
return;
}
/*
* The "nop" is needed to make the instructions the same
* length.
*/
alternative_input(
"nop ; frstor %1",
"fxrstor %1",
X86_FEATURE_FXSR,
"m" (tsk->thread.xstate->fxsave));
}
/* We need a safe address that is cheap to find and that is already
in L1 during context switch. The best choices are unfortunately
different for UP and SMP */
#ifdef CONFIG_SMP
#define safe_address (__per_cpu_offset[0])
#else
#define safe_address (kstat_cpu(0).cpustat.user)
#endif
/*
* These must be called with preempt disabled
*/
static inline void __save_init_fpu(struct task_struct *tsk)
{
if (task_thread_info(tsk)->status & TS_XSAVE) {
struct xsave_struct *xstate = &tsk->thread.xstate->xsave;
struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave;
xsave(tsk);
/*
* xsave header may indicate the init state of the FP.
*/
if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
goto end;
if (unlikely(fx->swd & X87_FSW_ES))
asm volatile("fnclex");
/*
* we can do a simple return here or be paranoid :)
*/
goto clear_state;
}
/* Use more nops than strictly needed in case the compiler
varies code */
alternative_input(
"fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
"fxsave %[fx]\n"
"bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
X86_FEATURE_FXSR,
[fx] "m" (tsk->thread.xstate->fxsave),
[fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory");
clear_state:
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. safe_address is a random variable that should be in L1 */
alternative_input(
GENERIC_NOP8 GENERIC_NOP2,
"emms\n\t" /* clear stack tags */
"fildl %[addr]", /* set F?P to defined value */
X86_FEATURE_FXSAVE_LEAK,
[addr] "m" (safe_address));
end:
task_thread_info(tsk)->status &= ~TS_USEDFPU;
}
#endif /* CONFIG_X86_64 */
/*
* Signal frame handlers...
*/
extern int save_i387_xstate(void __user *buf);
extern int restore_i387_xstate(void __user *buf);
static inline void __unlazy_fpu(struct task_struct *tsk)
{
if (task_thread_info(tsk)->status & TS_USEDFPU) {
__save_init_fpu(tsk);
stts();
} else
tsk->fpu_counter = 0;
}
static inline void __clear_fpu(struct task_struct *tsk)
{
if (task_thread_info(tsk)->status & TS_USEDFPU) {
tolerant_fwait();
task_thread_info(tsk)->status &= ~TS_USEDFPU;
stts();
}
}
static inline void kernel_fpu_begin(void)
{
struct thread_info *me = current_thread_info();
preempt_disable();
if (me->status & TS_USEDFPU)
__save_init_fpu(me->task);
else
clts();
}
static inline void kernel_fpu_end(void)
{
stts();
preempt_enable();
}
/*
* Some instructions like VIA's padlock instructions generate a spurious
* DNA fault but don't modify SSE registers. And these instructions
* get used from interrupt context aswell. To prevent these kernel instructions
* in interrupt context interact wrongly with other user/kernel fpu usage, we
* should use them only in the context of irq_ts_save/restore()
*/
static inline int irq_ts_save(void)
{
/*
* If we are in process context, we are ok to take a spurious DNA fault.
* Otherwise, doing clts() in process context require pre-emption to
* be disabled or some heavy lifting like kernel_fpu_begin()
*/
if (!in_interrupt())
return 0;
if (read_cr0() & X86_CR0_TS) {
clts();
return 1;
}
return 0;
}
static inline void irq_ts_restore(int TS_state)
{
if (TS_state)
stts();
}
#ifdef CONFIG_X86_64
static inline void save_init_fpu(struct task_struct *tsk)
{
__save_init_fpu(tsk);
stts();
}
#define unlazy_fpu __unlazy_fpu
#define clear_fpu __clear_fpu
#else /* CONFIG_X86_32 */
/*
* These disable preemption on their own and are safe
*/
static inline void save_init_fpu(struct task_struct *tsk)
{
preempt_disable();
__save_init_fpu(tsk);
stts();
preempt_enable();
}
static inline void unlazy_fpu(struct task_struct *tsk)
{
preempt_disable();
__unlazy_fpu(tsk);
preempt_enable();
}
static inline void clear_fpu(struct task_struct *tsk)
{
preempt_disable();
__clear_fpu(tsk);
preempt_enable();
}
#endif /* CONFIG_X86_64 */
/*
* i387 state interaction
*/
static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
return tsk->thread.xstate->fxsave.cwd;
} else {
return (unsigned short)tsk->thread.xstate->fsave.cwd;
}
}
static inline unsigned short get_fpu_swd(struct task_struct *tsk)
{
if (cpu_has_fxsr) {
return tsk->thread.xstate->fxsave.swd;
} else {
return (unsigned short)tsk->thread.xstate->fsave.swd;
}
}
static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
{
if (cpu_has_xmm) {
return tsk->thread.xstate->fxsave.mxcsr;
} else {
return MXCSR_DEFAULT;
}
}
#endif /* _ASM_X86_I387_H */

View File

@@ -0,0 +1,18 @@
#ifndef _ASM_X86_I8253_H
#define _ASM_X86_I8253_H
/* i8253A PIT registers */
#define PIT_MODE 0x43
#define PIT_CH0 0x40
#define PIT_CH2 0x42
extern spinlock_t i8253_lock;
extern struct clock_event_device *global_clock_event;
extern void setup_pit_timer(void);
#define inb_pit inb_p
#define outb_pit outb_p
#endif /* _ASM_X86_I8253_H */

View File

@@ -0,0 +1,63 @@
#ifndef _ASM_X86_I8259_H
#define _ASM_X86_I8259_H
#include <linux/delay.h>
extern unsigned int cached_irq_mask;
#define __byte(x, y) (((unsigned char *)&(y))[x])
#define cached_master_mask (__byte(0, cached_irq_mask))
#define cached_slave_mask (__byte(1, cached_irq_mask))
/* i8259A PIC registers */
#define PIC_MASTER_CMD 0x20
#define PIC_MASTER_IMR 0x21
#define PIC_MASTER_ISR PIC_MASTER_CMD
#define PIC_MASTER_POLL PIC_MASTER_ISR
#define PIC_MASTER_OCW3 PIC_MASTER_ISR
#define PIC_SLAVE_CMD 0xa0
#define PIC_SLAVE_IMR 0xa1
/* i8259A PIC related value */
#define PIC_CASCADE_IR 2
#define MASTER_ICW4_DEFAULT 0x01
#define SLAVE_ICW4_DEFAULT 0x01
#define PIC_ICW4_AEOI 2
extern spinlock_t i8259A_lock;
extern void init_8259A(int auto_eoi);
extern void enable_8259A_irq(unsigned int irq);
extern void disable_8259A_irq(unsigned int irq);
extern unsigned int startup_8259A_irq(unsigned int irq);
/* the PIC may need a careful delay on some platforms, hence specific calls */
static inline unsigned char inb_pic(unsigned int port)
{
unsigned char value = inb(port);
/*
* delay for some accesses to PIC on motherboard or in chipset
* must be at least one microsecond, so be safe here:
*/
udelay(2);
return value;
}
static inline void outb_pic(unsigned char value, unsigned int port)
{
outb(value, port);
/*
* delay for some accesses to PIC on motherboard or in chipset
* must be at least one microsecond, so be safe here:
*/
udelay(2);
}
extern struct irq_chip i8259A_chip;
extern void mask_8259A(void);
extern void unmask_8259A(void);
#endif /* _ASM_X86_I8259_H */

170
arch/x86/include/asm/ia32.h Normal file
View File

@@ -0,0 +1,170 @@
#ifndef _ASM_X86_IA32_H
#define _ASM_X86_IA32_H
#ifdef CONFIG_IA32_EMULATION
#include <linux/compat.h>
/*
* 32 bit structures for IA32 support.
*/
#include <asm/sigcontext32.h>
/* signal.h */
struct sigaction32 {
unsigned int sa_handler; /* Really a pointer, but need to deal
with 32 bits */
unsigned int sa_flags;
unsigned int sa_restorer; /* Another 32 bit pointer */
compat_sigset_t sa_mask; /* A 32 bit mask */
};
struct old_sigaction32 {
unsigned int sa_handler; /* Really a pointer, but need to deal
with 32 bits */
compat_old_sigset_t sa_mask; /* A 32 bit mask */
unsigned int sa_flags;
unsigned int sa_restorer; /* Another 32 bit pointer */
};
typedef struct sigaltstack_ia32 {
unsigned int ss_sp;
int ss_flags;
unsigned int ss_size;
} stack_ia32_t;
struct ucontext_ia32 {
unsigned int uc_flags;
unsigned int uc_link;
stack_ia32_t uc_stack;
struct sigcontext_ia32 uc_mcontext;
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
/* This matches struct stat64 in glibc2.2, hence the absolutely
* insane amounts of padding around dev_t's.
*/
struct stat64 {
unsigned long long st_dev;
unsigned char __pad0[4];
#define STAT64_HAS_BROKEN_ST_INO 1
unsigned int __st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned int st_uid;
unsigned int st_gid;
unsigned long long st_rdev;
unsigned char __pad3[4];
long long st_size;
unsigned int st_blksize;
long long st_blocks;/* Number 512-byte blocks allocated */
unsigned st_atime;
unsigned st_atime_nsec;
unsigned st_mtime;
unsigned st_mtime_nsec;
unsigned st_ctime;
unsigned st_ctime_nsec;
unsigned long long st_ino;
} __attribute__((packed));
typedef struct compat_siginfo {
int si_signo;
int si_errno;
int si_code;
union {
int _pad[((128 / sizeof(int)) - 3)];
/* kill() */
struct {
unsigned int _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
compat_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
compat_sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
int _overrun_incr; /* amount to add to overrun */
} _timer;
/* POSIX.1b signals */
struct {
unsigned int _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
compat_sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
unsigned int _pid; /* which child */
unsigned int _uid; /* sender's uid */
int _status; /* exit code */
compat_clock_t _utime;
compat_clock_t _stime;
} _sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
struct {
unsigned int _addr; /* faulting insn/memory ref. */
} _sigfault;
/* SIGPOLL */
struct {
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
} _sifields;
} compat_siginfo_t;
struct sigframe32 {
u32 pretcode;
int sig;
struct sigcontext_ia32 sc;
struct _fpstate_ia32 fpstate;
unsigned int extramask[_COMPAT_NSIG_WORDS-1];
};
struct rt_sigframe32 {
u32 pretcode;
int sig;
u32 pinfo;
u32 puc;
compat_siginfo_t info;
struct ucontext_ia32 uc;
struct _fpstate_ia32 fpstate;
};
struct ustat32 {
__u32 f_tfree;
compat_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
};
#define IA32_STACK_TOP IA32_PAGE_OFFSET
#ifdef __KERNEL__
struct linux_binprm;
extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
unsigned long stack_top, int exec_stack);
struct mm_struct;
extern void ia32_pick_mmap_layout(struct mm_struct *mm);
#endif
#endif /* !CONFIG_IA32_SUPPORT */
#endif /* _ASM_X86_IA32_H */

View File

@@ -0,0 +1,18 @@
#ifndef _ASM_X86_IA32_UNISTD_H
#define _ASM_X86_IA32_UNISTD_H
/*
* This file contains the system call numbers of the ia32 port,
* this is for the kernel only.
* Only add syscalls here where some part of the kernel needs to know
* the number. This should be otherwise in sync with asm-x86/unistd_32.h. -AK
*/
#define __NR_ia32_restart_syscall 0
#define __NR_ia32_exit 1
#define __NR_ia32_read 3
#define __NR_ia32_write 4
#define __NR_ia32_sigreturn 119
#define __NR_ia32_rt_sigreturn 173
#endif /* _ASM_X86_IA32_UNISTD_H */

View File

@@ -0,0 +1,16 @@
#ifndef _ASM_X86_IDLE_H
#define _ASM_X86_IDLE_H
#define IDLE_START 1
#define IDLE_END 2
struct notifier_block;
void idle_notifier_register(struct notifier_block *n);
void idle_notifier_unregister(struct notifier_block *n);
void enter_idle(void);
void exit_idle(void);
void c1e_remove_cpu(int cpu);
#endif /* _ASM_X86_IDLE_H */

View File

@@ -0,0 +1,31 @@
#ifndef _ASM_X86_INTEL_ARCH_PERFMON_H
#define _ASM_X86_INTEL_ARCH_PERFMON_H
#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0)
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
union cpuid10_eax {
struct {
unsigned int version_id:8;
unsigned int num_counters:8;
unsigned int bit_width:8;
unsigned int mask_length:8;
} split;
unsigned int full;
};
#endif /* _ASM_X86_INTEL_ARCH_PERFMON_H */

91
arch/x86/include/asm/io.h Normal file
View File

@@ -0,0 +1,91 @@
#ifndef _ASM_X86_IO_H
#define _ASM_X86_IO_H
#define ARCH_HAS_IOREMAP_WC
#include <linux/compiler.h>
#define build_mmio_read(name, size, type, reg, barrier) \
static inline type name(const volatile void __iomem *addr) \
{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \
:"m" (*(volatile type __force *)addr) barrier); return ret; }
#define build_mmio_write(name, size, type, reg, barrier) \
static inline void name(type val, volatile void __iomem *addr) \
{ asm volatile("mov" size " %0,%1": :reg (val), \
"m" (*(volatile type __force *)addr) barrier); }
build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
build_mmio_read(__readb, "b", unsigned char, "=q", )
build_mmio_read(__readw, "w", unsigned short, "=r", )
build_mmio_read(__readl, "l", unsigned int, "=r", )
build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
build_mmio_write(writew, "w", unsigned short, "r", :"memory")
build_mmio_write(writel, "l", unsigned int, "r", :"memory")
build_mmio_write(__writeb, "b", unsigned char, "q", )
build_mmio_write(__writew, "w", unsigned short, "r", )
build_mmio_write(__writel, "l", unsigned int, "r", )
#define readb_relaxed(a) __readb(a)
#define readw_relaxed(a) __readw(a)
#define readl_relaxed(a) __readl(a)
#define __raw_readb __readb
#define __raw_readw __readw
#define __raw_readl __readl
#define __raw_writeb __writeb
#define __raw_writew __writew
#define __raw_writel __writel
#define mmiowb() barrier()
#ifdef CONFIG_X86_64
build_mmio_read(readq, "q", unsigned long, "=r", :"memory")
build_mmio_read(__readq, "q", unsigned long, "=r", )
build_mmio_write(writeq, "q", unsigned long, "r", :"memory")
build_mmio_write(__writeq, "q", unsigned long, "r", )
#define readq_relaxed(a) __readq(a)
#define __raw_readq __readq
#define __raw_writeq writeq
/* Let people know we have them */
#define readq readq
#define writeq writeq
#endif
extern int iommu_bio_merge;
#ifdef CONFIG_X86_32
# include "io_32.h"
#else
# include "io_64.h"
#endif
extern void *xlate_dev_mem_ptr(unsigned long phys);
extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
unsigned long prot_val);
extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
/*
* early_ioremap() and early_iounmap() are for temporary early boot-time
* mappings, before the real ioremap() is functional.
* A boot-time mapping is currently limited to at most 16 pages.
*/
extern void early_ioremap_init(void);
extern void early_ioremap_clear(void);
extern void early_ioremap_reset(void);
extern void *early_ioremap(unsigned long offset, unsigned long size);
extern void *early_memremap(unsigned long offset, unsigned long size);
extern void early_iounmap(void *addr, unsigned long size);
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
#endif /* _ASM_X86_IO_H */

View File

@@ -0,0 +1,284 @@
#ifndef _ASM_X86_IO_32_H
#define _ASM_X86_IO_32_H
#include <linux/string.h>
#include <linux/compiler.h>
/*
* This file contains the definitions for the x86 IO instructions
* inb/inw/inl/outb/outw/outl and the "string versions" of the same
* (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
* versions of the single-IO instructions (inb_p/inw_p/..).
*
* This file is not meant to be obfuscating: it's just complicated
* to (a) handle it all in a way that makes gcc able to optimize it
* as well as possible and (b) trying to avoid writing the same thing
* over and over again with slight variations and possibly making a
* mistake somewhere.
*/
/*
* Thanks to James van Artsdalen for a better timing-fix than
* the two short jumps: using outb's to a nonexistent port seems
* to guarantee better timings even on fast machines.
*
* On the other hand, I'd like to be sure of a non-existent port:
* I feel a bit unsafe about using 0x80 (should be safe, though)
*
* Linus
*/
/*
* Bit simplified and optimized by Jan Hubicka
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
*
* isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
* isa_read[wl] and isa_write[wl] fixed
* - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*/
#define IO_SPACE_LIMIT 0xffff
#define XQUAD_PORTIO_BASE 0xfe400000
#define XQUAD_PORTIO_QUAD 0x40000 /* 256k per quad. */
#ifdef __KERNEL__
#include <asm-generic/iomap.h>
#include <linux/vmalloc.h>
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
*
* The returned physical address is the physical (CPU) mapping for
* the memory address given. It is only valid to use this function on
* addresses directly mapped or allocated via kmalloc.
*
* This function does not give bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa(address);
}
/**
* phys_to_virt - map physical address to virtual
* @address: address to remap
*
* The returned virtual address is a current CPU mapping for
* the memory address given. It is only valid to use this function on
* addresses that have a kernel mapping
*
* This function does not handle bus mappings for DMA transfers. In
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
/**
* ioremap - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
*
* ioremap performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
*
* If the area you are trying to map is a PCI BAR you should have a
* look at pci_iomap().
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
unsigned long prot_val);
/*
* The default ioremap() behavior is non-cached:
*/
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
extern void iounmap(volatile void __iomem *addr);
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt
/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.
*
* Allow them on x86 for legacy drivers, though.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
static inline void
memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
memset((void __force *)addr, val, count);
}
static inline void
memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
{
__memcpy(dst, (const void __force *)src, count);
}
static inline void
memcpy_toio(volatile void __iomem *dst, const void *src, int count)
{
__memcpy((void __force *)dst, src, count);
}
/*
* ISA space is 'always mapped' on a typical x86 system, no need to
* explicitly ioremap() it. The fact that the ISA IO space is mapped
* to PAGE_OFFSET is pure coincidence - it does not mean ISA values
* are physical addresses. The following constant pointer can be
* used as the IO-area pointer (it can be iounmapped as well, so the
* analogy with PCI is quite large):
*/
#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
/*
* Cache management
*
* This needed for two cases
* 1. Out of order aware processors
* 2. Accidentally out of order processors (PPro errata #51)
*/
#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
static inline void flush_write_buffers(void)
{
asm volatile("lock; addl $0,0(%%esp)": : :"memory");
}
#else
#define flush_write_buffers() do { } while (0)
#endif
#endif /* __KERNEL__ */
extern void native_io_delay(void);
extern int io_delay_type;
extern void io_delay_init(void);
#if defined(CONFIG_PARAVIRT)
#include <asm/paravirt.h>
#else
static inline void slow_down_io(void)
{
native_io_delay();
#ifdef REALLY_SLOW_IO
native_io_delay();
native_io_delay();
native_io_delay();
#endif
}
#endif
#define __BUILDIO(bwl, bw, type) \
static inline void out##bwl(unsigned type value, int port) \
{ \
out##bwl##_local(value, port); \
} \
\
static inline unsigned type in##bwl(int port) \
{ \
return in##bwl##_local(port); \
}
#define BUILDIO(bwl, bw, type) \
static inline void out##bwl##_local(unsigned type value, int port) \
{ \
asm volatile("out" #bwl " %" #bw "0, %w1" \
: : "a"(value), "Nd"(port)); \
} \
\
static inline unsigned type in##bwl##_local(int port) \
{ \
unsigned type value; \
asm volatile("in" #bwl " %w1, %" #bw "0" \
: "=a"(value) : "Nd"(port)); \
return value; \
} \
\
static inline void out##bwl##_local_p(unsigned type value, int port) \
{ \
out##bwl##_local(value, port); \
slow_down_io(); \
} \
\
static inline unsigned type in##bwl##_local_p(int port) \
{ \
unsigned type value = in##bwl##_local(port); \
slow_down_io(); \
return value; \
} \
\
__BUILDIO(bwl, bw, type) \
\
static inline void out##bwl##_p(unsigned type value, int port) \
{ \
out##bwl(value, port); \
slow_down_io(); \
} \
\
static inline unsigned type in##bwl##_p(int port) \
{ \
unsigned type value = in##bwl(port); \
slow_down_io(); \
return value; \
} \
\
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
{ \
asm volatile("rep; outs" #bwl \
: "+S"(addr), "+c"(count) : "d"(port)); \
} \
\
static inline void ins##bwl(int port, void *addr, unsigned long count) \
{ \
asm volatile("rep; ins" #bwl \
: "+D"(addr), "+c"(count) : "d"(port)); \
}
BUILDIO(b, b, char)
BUILDIO(w, w, short)
BUILDIO(l, , int)
#endif /* _ASM_X86_IO_32_H */

View File

@@ -0,0 +1,244 @@
#ifndef _ASM_X86_IO_64_H
#define _ASM_X86_IO_64_H
/*
* This file contains the definitions for the x86 IO instructions
* inb/inw/inl/outb/outw/outl and the "string versions" of the same
* (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
* versions of the single-IO instructions (inb_p/inw_p/..).
*
* This file is not meant to be obfuscating: it's just complicated
* to (a) handle it all in a way that makes gcc able to optimize it
* as well as possible and (b) trying to avoid writing the same thing
* over and over again with slight variations and possibly making a
* mistake somewhere.
*/
/*
* Thanks to James van Artsdalen for a better timing-fix than
* the two short jumps: using outb's to a nonexistent port seems
* to guarantee better timings even on fast machines.
*
* On the other hand, I'd like to be sure of a non-existent port:
* I feel a bit unsafe about using 0x80 (should be safe, though)
*
* Linus
*/
/*
* Bit simplified and optimized by Jan Hubicka
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
*
* isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
* isa_read[wl] and isa_write[wl] fixed
* - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*/
extern void native_io_delay(void);
extern int io_delay_type;
extern void io_delay_init(void);
#if defined(CONFIG_PARAVIRT)
#include <asm/paravirt.h>
#else
static inline void slow_down_io(void)
{
native_io_delay();
#ifdef REALLY_SLOW_IO
native_io_delay();
native_io_delay();
native_io_delay();
#endif
}
#endif
/*
* Talk about misusing macros..
*/
#define __OUT1(s, x) \
static inline void out##s(unsigned x value, unsigned short port) {
#define __OUT2(s, s1, s2) \
asm volatile ("out" #s " %" s1 "0,%" s2 "1"
#ifndef REALLY_SLOW_IO
#define REALLY_SLOW_IO
#define UNSET_REALLY_SLOW_IO
#endif
#define __OUT(s, s1, x) \
__OUT1(s, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
} \
__OUT1(s##_p, x) __OUT2(s, s1, "w") : : "a" (value), "Nd" (port)); \
slow_down_io(); \
}
#define __IN1(s) \
static inline RETURN_TYPE in##s(unsigned short port) \
{ \
RETURN_TYPE _v;
#define __IN2(s, s1, s2) \
asm volatile ("in" #s " %" s2 "1,%" s1 "0"
#define __IN(s, s1, i...) \
__IN1(s) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
return _v; \
} \
__IN1(s##_p) __IN2(s, s1, "w") : "=a" (_v) : "Nd" (port), ##i); \
slow_down_io(); \
return _v; }
#ifdef UNSET_REALLY_SLOW_IO
#undef REALLY_SLOW_IO
#endif
#define __INS(s) \
static inline void ins##s(unsigned short port, void *addr, \
unsigned long count) \
{ \
asm volatile ("rep ; ins" #s \
: "=D" (addr), "=c" (count) \
: "d" (port), "0" (addr), "1" (count)); \
}
#define __OUTS(s) \
static inline void outs##s(unsigned short port, const void *addr, \
unsigned long count) \
{ \
asm volatile ("rep ; outs" #s \
: "=S" (addr), "=c" (count) \
: "d" (port), "0" (addr), "1" (count)); \
}
#define RETURN_TYPE unsigned char
__IN(b, "")
#undef RETURN_TYPE
#define RETURN_TYPE unsigned short
__IN(w, "")
#undef RETURN_TYPE
#define RETURN_TYPE unsigned int
__IN(l, "")
#undef RETURN_TYPE
__OUT(b, "b", char)
__OUT(w, "w", short)
__OUT(l, , int)
__INS(b)
__INS(w)
__INS(l)
__OUTS(b)
__OUTS(w)
__OUTS(l)
#define IO_SPACE_LIMIT 0xffff
#if defined(__KERNEL__) && defined(__x86_64__)
#include <linux/vmalloc.h>
#ifndef __i386__
/*
* Change virtual addresses to physical addresses and vv.
* These are pretty trivial
*/
static inline unsigned long virt_to_phys(volatile void *address)
{
return __pa(address);
}
static inline void *phys_to_virt(unsigned long address)
{
return __va(address);
}
#endif
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
#include <asm-generic/iomap.h>
/*
* This one maps high address device memory and turns off caching for that area.
* it's useful if some control registers are in such an area and write combining
* or read caching is not desirable:
*/
extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
unsigned long prot_val);
/*
* The default ioremap() behavior is non-cached:
*/
static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
{
return ioremap_nocache(offset, size);
}
extern void iounmap(volatile void __iomem *addr);
extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
#define isa_virt_to_bus virt_to_phys
#define isa_page_to_bus page_to_phys
#define isa_bus_to_virt phys_to_virt
/*
* However PCI ones are not necessarily 1:1 and therefore these interfaces
* are forbidden in portable PCI drivers.
*
* Allow them on x86 for legacy drivers, though.
*/
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
void __memcpy_fromio(void *, unsigned long, unsigned);
void __memcpy_toio(unsigned long, const void *, unsigned);
static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
unsigned len)
{
__memcpy_fromio(to, (unsigned long)from, len);
}
static inline void memcpy_toio(volatile void __iomem *to, const void *from,
unsigned len)
{
__memcpy_toio((unsigned long)to, from, len);
}
void memset_io(volatile void __iomem *a, int b, size_t c);
/*
* ISA space is 'always mapped' on a typical x86 system, no need to
* explicitly ioremap() it. The fact that the ISA IO space is mapped
* to PAGE_OFFSET is pure coincidence - it does not mean ISA values
* are physical addresses. The following constant pointer can be
* used as the IO-area pointer (it can be iounmapped as well, so the
* analogy with PCI is quite large):
*/
#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
#define flush_write_buffers()
#define BIO_VMERGE_BOUNDARY iommu_bio_merge
/*
* Convert a virtual cached pointer to an uncached pointer
*/
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif /* _ASM_X86_IO_64_H */

View File

@@ -0,0 +1,204 @@
#ifndef _ASM_X86_IO_APIC_H
#define _ASM_X86_IO_APIC_H
#include <linux/types.h>
#include <asm/mpspec.h>
#include <asm/apicdef.h>
#include <asm/irq_vectors.h>
/*
* Intel IO-APIC support for SMP and UP systems.
*
* Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar
*/
/* I/O Unit Redirection Table */
#define IO_APIC_REDIR_VECTOR_MASK 0x000FF
#define IO_APIC_REDIR_DEST_LOGICAL 0x00800
#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000
#define IO_APIC_REDIR_SEND_PENDING (1 << 12)
#define IO_APIC_REDIR_REMOTE_IRR (1 << 14)
#define IO_APIC_REDIR_LEVEL_TRIGGER (1 << 15)
#define IO_APIC_REDIR_MASKED (1 << 16)
/*
* The structure of the IO-APIC:
*/
union IO_APIC_reg_00 {
u32 raw;
struct {
u32 __reserved_2 : 14,
LTS : 1,
delivery_type : 1,
__reserved_1 : 8,
ID : 8;
} __attribute__ ((packed)) bits;
};
union IO_APIC_reg_01 {
u32 raw;
struct {
u32 version : 8,
__reserved_2 : 7,
PRQ : 1,
entries : 8,
__reserved_1 : 8;
} __attribute__ ((packed)) bits;
};
union IO_APIC_reg_02 {
u32 raw;
struct {
u32 __reserved_2 : 24,
arbitration : 4,
__reserved_1 : 4;
} __attribute__ ((packed)) bits;
};
union IO_APIC_reg_03 {
u32 raw;
struct {
u32 boot_DT : 1,
__reserved_1 : 31;
} __attribute__ ((packed)) bits;
};
enum ioapic_irq_destination_types {
dest_Fixed = 0,
dest_LowestPrio = 1,
dest_SMI = 2,
dest__reserved_1 = 3,
dest_NMI = 4,
dest_INIT = 5,
dest__reserved_2 = 6,
dest_ExtINT = 7
};
struct IO_APIC_route_entry {
__u32 vector : 8,
delivery_mode : 3, /* 000: FIXED
* 001: lowest prio
* 111: ExtINT
*/
dest_mode : 1, /* 0: physical, 1: logical */
delivery_status : 1,
polarity : 1,
irr : 1,
trigger : 1, /* 0: edge, 1: level */
mask : 1, /* 0: enabled, 1: disabled */
__reserved_2 : 15;
__u32 __reserved_3 : 24,
dest : 8;
} __attribute__ ((packed));
struct IR_IO_APIC_route_entry {
__u64 vector : 8,
zero : 3,
index2 : 1,
delivery_status : 1,
polarity : 1,
irr : 1,
trigger : 1,
mask : 1,
reserved : 31,
format : 1,
index : 15;
} __attribute__ ((packed));
#ifdef CONFIG_X86_IO_APIC
/*
* # of IO-APICs and # of IRQ routing registers
*/
extern int nr_ioapics;
extern int nr_ioapic_registers[MAX_IO_APICS];
/*
* MP-BIOS irq configuration table structures:
*/
#define MP_MAX_IOAPIC_PIN 127
struct mp_config_ioapic {
unsigned long mp_apicaddr;
unsigned int mp_apicid;
unsigned char mp_type;
unsigned char mp_apicver;
unsigned char mp_flags;
};
struct mp_config_intsrc {
unsigned int mp_dstapic;
unsigned char mp_type;
unsigned char mp_irqtype;
unsigned short mp_irqflag;
unsigned char mp_srcbus;
unsigned char mp_srcbusirq;
unsigned char mp_dstirq;
};
/* I/O APIC entries */
extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
/* # of MP IRQ source entries */
extern int mp_irq_entries;
/* MP IRQ source entries */
extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
/* non-0 if default (table-less) MP configuration */
extern int mpc_default_type;
/* Older SiS APIC requires we rewrite the index register */
extern int sis_apic_bug;
/* 1 if "noapic" boot option passed */
extern int skip_ioapic_setup;
/* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
extern int timer_through_8259;
static inline void disable_ioapic_setup(void)
{
skip_ioapic_setup = 1;
}
/*
* If we use the IO-APIC for IRQ routing, disable automatic
* assignment of PCI IRQ's.
*/
#define io_apic_assign_pci_irqs \
(mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
#ifdef CONFIG_ACPI
extern int io_apic_get_unique_id(int ioapic, int apic_id);
extern int io_apic_get_version(int ioapic);
extern int io_apic_get_redir_entries(int ioapic);
extern int io_apic_set_pci_routing(int ioapic, int pin, int irq,
int edge_level, int active_high_low);
#endif /* CONFIG_ACPI */
extern int (*ioapic_renumber_irq)(int ioapic, int irq);
extern void ioapic_init_mappings(void);
#ifdef CONFIG_X86_64
extern int save_mask_IO_APIC_setup(void);
extern void restore_IO_APIC_setup(void);
extern void reinit_intr_remapped_IO_APIC(int);
#endif
extern int probe_nr_irqs(void);
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
static const int timer_through_8259 = 0;
static inline void ioapic_init_mappings(void) { }
static inline int probe_nr_irqs(void)
{
return NR_IRQS;
}
#endif
#endif /* _ASM_X86_IO_APIC_H */

View File

@@ -0,0 +1 @@
#include <asm-generic/ioctl.h>

View File

@@ -0,0 +1,94 @@
#ifndef _ASM_X86_IOCTLS_H
#define _ASM_X86_IOCTLS_H
#include <asm/ioctl.h>
/* 0x54 is just a magic number to make these relatively unique ('T') */
#define TCGETS 0x5401
#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
#define TCSETSW 0x5403
#define TCSETSF 0x5404
#define TCGETA 0x5405
#define TCSETA 0x5406
#define TCSETAW 0x5407
#define TCSETAF 0x5408
#define TCSBRK 0x5409
#define TCXONC 0x540A
#define TCFLSH 0x540B
#define TIOCEXCL 0x540C
#define TIOCNXCL 0x540D
#define TIOCSCTTY 0x540E
#define TIOCGPGRP 0x540F
#define TIOCSPGRP 0x5410
#define TIOCOUTQ 0x5411
#define TIOCSTI 0x5412
#define TIOCGWINSZ 0x5413
#define TIOCSWINSZ 0x5414
#define TIOCMGET 0x5415
#define TIOCMBIS 0x5416
#define TIOCMBIC 0x5417
#define TIOCMSET 0x5418
#define TIOCGSOFTCAR 0x5419
#define TIOCSSOFTCAR 0x541A
#define FIONREAD 0x541B
#define TIOCINQ FIONREAD
#define TIOCLINUX 0x541C
#define TIOCCONS 0x541D
#define TIOCGSERIAL 0x541E
#define TIOCSSERIAL 0x541F
#define TIOCPKT 0x5420
#define FIONBIO 0x5421
#define TIOCNOTTY 0x5422
#define TIOCSETD 0x5423
#define TIOCGETD 0x5424
#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
#define TIOCSBRK 0x5427 /* BSD compatibility */
#define TIOCCBRK 0x5428 /* BSD compatibility */
#define TIOCGSID 0x5429 /* Return the session ID of FD */
#define TCGETS2 _IOR('T', 0x2A, struct termios2)
#define TCSETS2 _IOW('T', 0x2B, struct termios2)
#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
#define TIOCGRS485 0x542E
#define TIOCSRS485 0x542F
#define TIOCGPTN _IOR('T', 0x30, unsigned int)
/* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */
#define TCSETX 0x5433
#define TCSETXF 0x5434
#define TCSETXW 0x5435
#define FIONCLEX 0x5450
#define FIOCLEX 0x5451
#define FIOASYNC 0x5452
#define TIOCSERCONFIG 0x5453
#define TIOCSERGWILD 0x5454
#define TIOCSERSWILD 0x5455
#define TIOCGLCKTRMIOS 0x5456
#define TIOCSLCKTRMIOS 0x5457
#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
#define TIOCSERGETLSR 0x5459 /* Get line status register */
#define TIOCSERGETMULTI 0x545A /* Get multiport config */
#define TIOCSERSETMULTI 0x545B /* Set multiport config */
#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
#define FIOQSIZE 0x5460
/* Used for packet mode */
#define TIOCPKT_DATA 0
#define TIOCPKT_FLUSHREAD 1
#define TIOCPKT_FLUSHWRITE 2
#define TIOCPKT_STOP 4
#define TIOCPKT_START 8
#define TIOCPKT_NOSTOP 16
#define TIOCPKT_DOSTOP 32
#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
#endif /* _ASM_X86_IOCTLS_H */

Some files were not shown because too many files have changed in this diff Show More