Merge branch 'core/percpu' into percpu-cpumask-x86-for-linus-2
Conflicts: arch/parisc/kernel/irq.c arch/x86/include/asm/fixmap_64.h arch/x86/include/asm/setup.h kernel/irq/handle.c Semantic merge: arch/x86/include/asm/fixmap.h Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -79,6 +79,7 @@ typedef int (*acpi_table_handler) (struct acpi_table_header *table);
|
||||
typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end);
|
||||
|
||||
char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
|
||||
void __acpi_unmap_table(char *map, unsigned long size);
|
||||
int early_acpi_boot_init(void);
|
||||
int acpi_boot_init (void);
|
||||
int acpi_boot_table_init (void);
|
||||
|
@@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size);
|
||||
#define BOOTMEM_DEFAULT 0
|
||||
#define BOOTMEM_EXCLUSIVE (1<<0)
|
||||
|
||||
extern int reserve_bootmem(unsigned long addr,
|
||||
unsigned long size,
|
||||
int flags);
|
||||
extern int reserve_bootmem_node(pg_data_t *pgdat,
|
||||
unsigned long physaddr,
|
||||
unsigned long size,
|
||||
int flags);
|
||||
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
|
||||
extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
|
||||
#endif
|
||||
unsigned long physaddr,
|
||||
unsigned long size,
|
||||
int flags);
|
||||
|
||||
extern void *__alloc_bootmem_nopanic(unsigned long size,
|
||||
extern void *__alloc_bootmem(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem(unsigned long size,
|
||||
extern void *__alloc_bootmem_nopanic(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_low(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_node(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
@@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_low(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
|
||||
|
||||
#define alloc_bootmem(x) \
|
||||
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_nopanic(x) \
|
||||
__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_low(x) \
|
||||
__alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
|
||||
#define alloc_bootmem_pages(x) \
|
||||
__alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_pages_nopanic(x) \
|
||||
__alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_low_pages(x) \
|
||||
__alloc_bootmem_low(x, PAGE_SIZE, 0)
|
||||
#define alloc_bootmem_node(pgdat, x) \
|
||||
__alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_pages_node(pgdat, x) \
|
||||
__alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
|
||||
__alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
|
||||
#define alloc_bootmem_low(x) \
|
||||
__alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
|
||||
#define alloc_bootmem_low_pages(x) \
|
||||
__alloc_bootmem_low(x, PAGE_SIZE, 0)
|
||||
#define alloc_bootmem_low_pages_node(pgdat, x) \
|
||||
__alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
|
||||
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
|
||||
|
||||
extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
|
||||
int flags);
|
||||
|
10
include/linux/decompress/bunzip2.h
Normal file
10
include/linux/decompress/bunzip2.h
Normal file
@@ -0,0 +1,10 @@
|
||||
#ifndef DECOMPRESS_BUNZIP2_H
|
||||
#define DECOMPRESS_BUNZIP2_H
|
||||
|
||||
int bunzip2(unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
unsigned char *output,
|
||||
int *pos,
|
||||
void(*error)(char *x));
|
||||
#endif
|
33
include/linux/decompress/generic.h
Normal file
33
include/linux/decompress/generic.h
Normal file
@@ -0,0 +1,33 @@
|
||||
#ifndef DECOMPRESS_GENERIC_H
|
||||
#define DECOMPRESS_GENERIC_H
|
||||
|
||||
/* Minimal chunksize to be read.
|
||||
*Bzip2 prefers at least 4096
|
||||
*Lzma prefers 0x10000 */
|
||||
#define COMPR_IOBUF_SIZE 4096
|
||||
|
||||
typedef int (*decompress_fn) (unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*writebb)(void*, unsigned int),
|
||||
unsigned char *output,
|
||||
int *posp,
|
||||
void(*error)(char *x));
|
||||
|
||||
/* inbuf - input buffer
|
||||
*len - len of pre-read data in inbuf
|
||||
*fill - function to fill inbuf if empty
|
||||
*writebb - function to write out outbug
|
||||
*posp - if non-null, input position (number of bytes read) will be
|
||||
* returned here
|
||||
*
|
||||
*If len != 0, the inbuf is initialized (with as much data), and fill
|
||||
*should not be called
|
||||
*If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
|
||||
*fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
|
||||
*/
|
||||
|
||||
/* Utility routine to detect the decompression method */
|
||||
decompress_fn decompress_method(const unsigned char *inbuf, int len,
|
||||
const char **name);
|
||||
|
||||
#endif
|
13
include/linux/decompress/inflate.h
Normal file
13
include/linux/decompress/inflate.h
Normal file
@@ -0,0 +1,13 @@
|
||||
#ifndef INFLATE_H
|
||||
#define INFLATE_H
|
||||
|
||||
/* Other housekeeping constants */
|
||||
#define INBUFSIZ 4096
|
||||
|
||||
int gunzip(unsigned char *inbuf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
unsigned char *output,
|
||||
int *pos,
|
||||
void(*error_fn)(char *x));
|
||||
#endif
|
87
include/linux/decompress/mm.h
Normal file
87
include/linux/decompress/mm.h
Normal file
@@ -0,0 +1,87 @@
|
||||
/*
|
||||
* linux/compr_mm.h
|
||||
*
|
||||
* Memory management for pre-boot and ramdisk uncompressors
|
||||
*
|
||||
* Authors: Alain Knaff <alain@knaff.lu>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef DECOMPR_MM_H
|
||||
#define DECOMPR_MM_H
|
||||
|
||||
#ifdef STATIC
|
||||
|
||||
/* Code active when included from pre-boot environment: */
|
||||
|
||||
/* A trivial malloc implementation, adapted from
|
||||
* malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
|
||||
*/
|
||||
static unsigned long malloc_ptr;
|
||||
static int malloc_count;
|
||||
|
||||
static void *malloc(int size)
|
||||
{
|
||||
void *p;
|
||||
|
||||
if (size < 0)
|
||||
error("Malloc error");
|
||||
if (!malloc_ptr)
|
||||
malloc_ptr = free_mem_ptr;
|
||||
|
||||
malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
|
||||
|
||||
p = (void *)malloc_ptr;
|
||||
malloc_ptr += size;
|
||||
|
||||
if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
|
||||
error("Out of memory");
|
||||
|
||||
malloc_count++;
|
||||
return p;
|
||||
}
|
||||
|
||||
static void free(void *where)
|
||||
{
|
||||
malloc_count--;
|
||||
if (!malloc_count)
|
||||
malloc_ptr = free_mem_ptr;
|
||||
}
|
||||
|
||||
#define large_malloc(a) malloc(a)
|
||||
#define large_free(a) free(a)
|
||||
|
||||
#define set_error_fn(x)
|
||||
|
||||
#define INIT
|
||||
|
||||
#else /* STATIC */
|
||||
|
||||
/* Code active when compiled standalone for use when loading ramdisk: */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
/* Use defines rather than static inline in order to avoid spurious
|
||||
* warnings when not needed (indeed large_malloc / large_free are not
|
||||
* needed by inflate */
|
||||
|
||||
#define malloc(a) kmalloc(a, GFP_KERNEL)
|
||||
#define free(a) kfree(a)
|
||||
|
||||
#define large_malloc(a) vmalloc(a)
|
||||
#define large_free(a) vfree(a)
|
||||
|
||||
static void(*error)(char *m);
|
||||
#define set_error_fn(x) error = x;
|
||||
|
||||
#define INIT __init
|
||||
#define STATIC
|
||||
|
||||
#include <linux/init.h>
|
||||
|
||||
#endif /* STATIC */
|
||||
|
||||
#endif /* DECOMPR_MM_H */
|
12
include/linux/decompress/unlzma.h
Normal file
12
include/linux/decompress/unlzma.h
Normal file
@@ -0,0 +1,12 @@
|
||||
#ifndef DECOMPRESS_UNLZMA_H
|
||||
#define DECOMPRESS_UNLZMA_H
|
||||
|
||||
int unlzma(unsigned char *, int,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
unsigned char *output,
|
||||
int *posp,
|
||||
void(*error)(char *x)
|
||||
);
|
||||
|
||||
#endif
|
@@ -111,6 +111,15 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
|
||||
{
|
||||
#ifdef ELF_CORE_COPY_KERNEL_REGS
|
||||
ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs);
|
||||
#else
|
||||
elf_core_copy_regs(elfregs, regs);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
|
||||
{
|
||||
#ifdef ELF_CORE_COPY_TASK_REGS
|
||||
|
@@ -484,6 +484,7 @@ int show_interrupts(struct seq_file *p, void *v);
|
||||
struct irq_desc;
|
||||
|
||||
extern int early_irq_init(void);
|
||||
extern int arch_probe_nr_irqs(void);
|
||||
extern int arch_early_irq_init(void);
|
||||
extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
|
||||
|
||||
|
@@ -180,11 +180,11 @@ struct irq_desc {
|
||||
unsigned int irqs_unhandled;
|
||||
spinlock_t lock;
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_t affinity;
|
||||
cpumask_var_t affinity;
|
||||
unsigned int cpu;
|
||||
#endif
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_t pending_mask;
|
||||
cpumask_var_t pending_mask;
|
||||
#endif
|
||||
#endif
|
||||
#ifdef CONFIG_PROC_FS
|
||||
struct proc_dir_entry *dir;
|
||||
@@ -414,4 +414,84 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
|
||||
|
||||
#endif /* !CONFIG_S390 */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/**
|
||||
* init_alloc_desc_masks - allocate cpumasks for irq_desc
|
||||
* @desc: pointer to irq_desc struct
|
||||
* @cpu: cpu which will be handling the cpumasks
|
||||
* @boot: true if need bootmem
|
||||
*
|
||||
* Allocates affinity and pending_mask cpumask if required.
|
||||
* Returns true if successful (or not required).
|
||||
* Side effect: affinity has all bits set, pending_mask has all bits clear.
|
||||
*/
|
||||
static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
|
||||
bool boot)
|
||||
{
|
||||
int node;
|
||||
|
||||
if (boot) {
|
||||
alloc_bootmem_cpumask_var(&desc->affinity);
|
||||
cpumask_setall(desc->affinity);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
alloc_bootmem_cpumask_var(&desc->pending_mask);
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
node = cpu_to_node(cpu);
|
||||
|
||||
if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
|
||||
return false;
|
||||
cpumask_setall(desc->affinity);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
|
||||
free_cpumask_var(desc->affinity);
|
||||
return false;
|
||||
}
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* init_copy_desc_masks - copy cpumasks for irq_desc
|
||||
* @old_desc: pointer to old irq_desc struct
|
||||
* @new_desc: pointer to new irq_desc struct
|
||||
*
|
||||
* Insures affinity and pending_masks are copied to new irq_desc.
|
||||
* If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
|
||||
* irq_desc struct so the copy is redundant.
|
||||
*/
|
||||
|
||||
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
||||
struct irq_desc *new_desc)
|
||||
{
|
||||
#ifdef CONFIG_CPUMASKS_OFFSTACK
|
||||
cpumask_copy(new_desc->affinity, old_desc->affinity);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
#else /* !CONFIG_SMP */
|
||||
|
||||
static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
|
||||
bool boot)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
|
||||
struct irq_desc *new_desc)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#endif /* _LINUX_IRQ_H */
|
||||
|
@@ -20,6 +20,7 @@
|
||||
|
||||
# define for_each_irq_desc_reverse(irq, desc) \
|
||||
for (irq = nr_irqs - 1; irq >= 0; irq--)
|
||||
|
||||
#else /* CONFIG_GENERIC_HARDIRQS */
|
||||
|
||||
extern int nr_irqs;
|
||||
|
@@ -182,6 +182,14 @@ struct kprobe_blackpoint {
|
||||
DECLARE_PER_CPU(struct kprobe *, current_kprobe);
|
||||
DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
/*
|
||||
* For #ifdef avoidance:
|
||||
*/
|
||||
static inline int kprobes_built_in(void)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
||||
struct pt_regs *regs);
|
||||
@@ -271,8 +279,16 @@ void unregister_kretprobes(struct kretprobe **rps, int num);
|
||||
void kprobe_flush_task(struct task_struct *tk);
|
||||
void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
|
||||
|
||||
#else /* CONFIG_KPROBES */
|
||||
#else /* !CONFIG_KPROBES: */
|
||||
|
||||
static inline int kprobes_built_in(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline struct kprobe *get_kprobe(void *addr)
|
||||
{
|
||||
return NULL;
|
||||
@@ -329,5 +345,5 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num)
|
||||
static inline void kprobe_flush_task(struct task_struct *tk)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_KPROBES */
|
||||
#endif /* _LINUX_KPROBES_H */
|
||||
#endif /* CONFIG_KPROBES */
|
||||
#endif /* _LINUX_KPROBES_H */
|
||||
|
@@ -49,4 +49,5 @@
|
||||
#define FUTEXFS_SUPER_MAGIC 0xBAD1DEA
|
||||
#define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA
|
||||
|
||||
#define STACK_END_MAGIC 0x57AC6E9D
|
||||
#endif /* __LINUX_MAGIC_H__ */
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#ifndef MMIOTRACE_H
|
||||
#define MMIOTRACE_H
|
||||
#ifndef _LINUX_MMIOTRACE_H
|
||||
#define _LINUX_MMIOTRACE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
@@ -13,28 +13,34 @@ typedef void (*kmmio_post_handler_t)(struct kmmio_probe *,
|
||||
unsigned long condition, struct pt_regs *);
|
||||
|
||||
struct kmmio_probe {
|
||||
struct list_head list; /* kmmio internal list */
|
||||
unsigned long addr; /* start location of the probe point */
|
||||
unsigned long len; /* length of the probe region */
|
||||
kmmio_pre_handler_t pre_handler; /* Called before addr is executed. */
|
||||
kmmio_post_handler_t post_handler; /* Called after addr is executed */
|
||||
void *private;
|
||||
/* kmmio internal list: */
|
||||
struct list_head list;
|
||||
/* start location of the probe point: */
|
||||
unsigned long addr;
|
||||
/* length of the probe region: */
|
||||
unsigned long len;
|
||||
/* Called before addr is executed: */
|
||||
kmmio_pre_handler_t pre_handler;
|
||||
/* Called after addr is executed: */
|
||||
kmmio_post_handler_t post_handler;
|
||||
void *private;
|
||||
};
|
||||
|
||||
/* kmmio is active by some kmmio_probes? */
|
||||
static inline int is_kmmio_active(void)
|
||||
{
|
||||
extern unsigned int kmmio_count;
|
||||
return kmmio_count;
|
||||
}
|
||||
extern unsigned int kmmio_count;
|
||||
|
||||
extern int register_kmmio_probe(struct kmmio_probe *p);
|
||||
extern void unregister_kmmio_probe(struct kmmio_probe *p);
|
||||
|
||||
#ifdef CONFIG_MMIOTRACE
|
||||
/* kmmio is active by some kmmio_probes? */
|
||||
static inline int is_kmmio_active(void)
|
||||
{
|
||||
return kmmio_count;
|
||||
}
|
||||
|
||||
/* Called from page fault handler. */
|
||||
extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
|
||||
|
||||
#ifdef CONFIG_MMIOTRACE
|
||||
/* Called from ioremap.c */
|
||||
extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
|
||||
void __iomem *addr);
|
||||
@@ -43,7 +49,17 @@ extern void mmiotrace_iounmap(volatile void __iomem *addr);
|
||||
/* For anyone to insert markers. Remember trailing newline. */
|
||||
extern int mmiotrace_printk(const char *fmt, ...)
|
||||
__attribute__ ((format (printf, 1, 2)));
|
||||
#else
|
||||
#else /* !CONFIG_MMIOTRACE: */
|
||||
static inline int is_kmmio_active(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kmmio_handler(struct pt_regs *regs, unsigned long addr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void mmiotrace_ioremap(resource_size_t offset,
|
||||
unsigned long size, void __iomem *addr)
|
||||
{
|
||||
@@ -63,28 +79,28 @@ static inline int mmiotrace_printk(const char *fmt, ...)
|
||||
#endif /* CONFIG_MMIOTRACE */
|
||||
|
||||
enum mm_io_opcode {
|
||||
MMIO_READ = 0x1, /* struct mmiotrace_rw */
|
||||
MMIO_WRITE = 0x2, /* struct mmiotrace_rw */
|
||||
MMIO_PROBE = 0x3, /* struct mmiotrace_map */
|
||||
MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */
|
||||
MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */
|
||||
MMIO_READ = 0x1, /* struct mmiotrace_rw */
|
||||
MMIO_WRITE = 0x2, /* struct mmiotrace_rw */
|
||||
MMIO_PROBE = 0x3, /* struct mmiotrace_map */
|
||||
MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */
|
||||
MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */
|
||||
};
|
||||
|
||||
struct mmiotrace_rw {
|
||||
resource_size_t phys; /* PCI address of register */
|
||||
unsigned long value;
|
||||
unsigned long pc; /* optional program counter */
|
||||
int map_id;
|
||||
unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */
|
||||
unsigned char width; /* size of register access in bytes */
|
||||
resource_size_t phys; /* PCI address of register */
|
||||
unsigned long value;
|
||||
unsigned long pc; /* optional program counter */
|
||||
int map_id;
|
||||
unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */
|
||||
unsigned char width; /* size of register access in bytes */
|
||||
};
|
||||
|
||||
struct mmiotrace_map {
|
||||
resource_size_t phys; /* base address in PCI space */
|
||||
unsigned long virt; /* base virtual address */
|
||||
unsigned long len; /* mapping size */
|
||||
int map_id;
|
||||
unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */
|
||||
resource_size_t phys; /* base address in PCI space */
|
||||
unsigned long virt; /* base virtual address */
|
||||
unsigned long len; /* mapping size */
|
||||
int map_id;
|
||||
unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */
|
||||
};
|
||||
|
||||
/* in kernel/trace/trace_mmiotrace.c */
|
||||
@@ -94,4 +110,4 @@ extern void mmio_trace_rw(struct mmiotrace_rw *rw);
|
||||
extern void mmio_trace_mapping(struct mmiotrace_map *map);
|
||||
extern int mmio_trace_printk(const char *fmt, va_list args);
|
||||
|
||||
#endif /* MMIOTRACE_H */
|
||||
#endif /* _LINUX_MMIOTRACE_H */
|
||||
|
@@ -5,53 +5,66 @@
|
||||
#include <linux/slab.h> /* For kmalloc() */
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/pfn.h>
|
||||
|
||||
#include <asm/percpu.h>
|
||||
|
||||
#ifndef PER_CPU_BASE_SECTION
|
||||
#ifdef CONFIG_SMP
|
||||
#define PER_CPU_BASE_SECTION ".data.percpu"
|
||||
#else
|
||||
#define PER_CPU_BASE_SECTION ".data"
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define DEFINE_PER_CPU(type, name) \
|
||||
__attribute__((__section__(".data.percpu"))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
|
||||
#ifdef MODULE
|
||||
#define SHARED_ALIGNED_SECTION ".data.percpu"
|
||||
#define PER_CPU_SHARED_ALIGNED_SECTION ""
|
||||
#else
|
||||
#define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned"
|
||||
#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
|
||||
#endif
|
||||
#define PER_CPU_FIRST_SECTION ".first"
|
||||
|
||||
#else
|
||||
|
||||
#define PER_CPU_SHARED_ALIGNED_SECTION ""
|
||||
#define PER_CPU_FIRST_SECTION ""
|
||||
|
||||
#endif
|
||||
|
||||
#define DEFINE_PER_CPU_SECTION(type, name, section) \
|
||||
__attribute__((__section__(PER_CPU_BASE_SECTION section))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
|
||||
#define DEFINE_PER_CPU(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, "")
|
||||
|
||||
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
|
||||
__attribute__((__section__(SHARED_ALIGNED_SECTION))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \
|
||||
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
|
||||
____cacheline_aligned_in_smp
|
||||
|
||||
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
|
||||
__attribute__((__section__(".data.percpu.page_aligned"))) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
#else
|
||||
#define DEFINE_PER_CPU(type, name) \
|
||||
PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
|
||||
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
|
||||
|
||||
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU(type, name)
|
||||
|
||||
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
|
||||
DEFINE_PER_CPU(type, name)
|
||||
#endif
|
||||
#define DEFINE_PER_CPU_FIRST(type, name) \
|
||||
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
|
||||
|
||||
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
|
||||
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
|
||||
|
||||
/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
|
||||
#ifndef PERCPU_ENOUGH_ROOM
|
||||
/* enough to cover all DEFINE_PER_CPUs in modules */
|
||||
#ifdef CONFIG_MODULES
|
||||
#define PERCPU_MODULE_RESERVE 8192
|
||||
#define PERCPU_MODULE_RESERVE (8 << 10)
|
||||
#else
|
||||
#define PERCPU_MODULE_RESERVE 0
|
||||
#define PERCPU_MODULE_RESERVE 0
|
||||
#endif
|
||||
|
||||
#ifndef PERCPU_ENOUGH_ROOM
|
||||
#define PERCPU_ENOUGH_ROOM \
|
||||
(__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
|
||||
#endif /* PERCPU_ENOUGH_ROOM */
|
||||
(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
|
||||
PERCPU_MODULE_RESERVE)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Must be an lvalue. Since @var must be a simple identifier,
|
||||
@@ -65,52 +78,94 @@
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
|
||||
|
||||
/* minimum unit size, also is the maximum supported allocation size */
|
||||
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
|
||||
|
||||
/*
|
||||
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
|
||||
* back on the first chunk for dynamic percpu allocation if arch is
|
||||
* manually allocating and mapping it for faster access (as a part of
|
||||
* large page mapping for example).
|
||||
*
|
||||
* The following values give between one and two pages of free space
|
||||
* after typical minimal boot (2-way SMP, single disk and NIC) with
|
||||
* both defconfig and a distro config on x86_64 and 32. More
|
||||
* intelligent way to determine this would be nice.
|
||||
*/
|
||||
#if BITS_PER_LONG > 32
|
||||
#define PERCPU_DYNAMIC_RESERVE (20 << 10)
|
||||
#else
|
||||
#define PERCPU_DYNAMIC_RESERVE (12 << 10)
|
||||
#endif
|
||||
|
||||
extern void *pcpu_base_addr;
|
||||
|
||||
typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
|
||||
typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
|
||||
|
||||
extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
|
||||
size_t static_size, size_t reserved_size,
|
||||
ssize_t dyn_size, ssize_t unit_size,
|
||||
void *base_addr,
|
||||
pcpu_populate_pte_fn_t populate_pte_fn);
|
||||
|
||||
extern ssize_t __init pcpu_embed_first_chunk(
|
||||
size_t static_size, size_t reserved_size,
|
||||
ssize_t dyn_size, ssize_t unit_size);
|
||||
|
||||
/*
|
||||
* Use this to get to a cpu's version of the per-cpu object
|
||||
* dynamically allocated. Non-atomic access to the current CPU's
|
||||
* version should probably be combined with get_cpu()/put_cpu().
|
||||
*/
|
||||
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
|
||||
|
||||
extern void *__alloc_reserved_percpu(size_t size, size_t align);
|
||||
|
||||
#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
|
||||
|
||||
struct percpu_data {
|
||||
void *ptrs[1];
|
||||
};
|
||||
|
||||
#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
|
||||
/*
|
||||
* Use this to get to a cpu's version of the per-cpu object dynamically
|
||||
* allocated. Non-atomic access to the current CPU's version should
|
||||
* probably be combined with get_cpu()/put_cpu().
|
||||
*/
|
||||
#define percpu_ptr(ptr, cpu) \
|
||||
({ \
|
||||
struct percpu_data *__p = __percpu_disguise(ptr); \
|
||||
(__typeof__(ptr))__p->ptrs[(cpu)]; \
|
||||
|
||||
#define per_cpu_ptr(ptr, cpu) \
|
||||
({ \
|
||||
struct percpu_data *__p = __percpu_disguise(ptr); \
|
||||
(__typeof__(ptr))__p->ptrs[(cpu)]; \
|
||||
})
|
||||
|
||||
extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
|
||||
extern void percpu_free(void *__pdata);
|
||||
#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
|
||||
|
||||
extern void *__alloc_percpu(size_t size, size_t align);
|
||||
extern void free_percpu(void *__pdata);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
|
||||
static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
|
||||
static inline void *__alloc_percpu(size_t size, size_t align)
|
||||
{
|
||||
return kzalloc(size, gfp);
|
||||
/*
|
||||
* Can't easily make larger alignment work with kmalloc. WARN
|
||||
* on it. Larger alignment should only be used for module
|
||||
* percpu sections on SMP for which this path isn't used.
|
||||
*/
|
||||
WARN_ON_ONCE(align > SMP_CACHE_BYTES);
|
||||
return kzalloc(size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static inline void percpu_free(void *__pdata)
|
||||
static inline void free_percpu(void *p)
|
||||
{
|
||||
kfree(__pdata);
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define percpu_alloc_mask(size, gfp, mask) \
|
||||
__percpu_alloc_mask((size), (gfp), &(mask))
|
||||
|
||||
#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
|
||||
|
||||
/* (legacy) interface for use without CPU hotplug handling */
|
||||
|
||||
#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
|
||||
cpu_possible_map)
|
||||
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
|
||||
#define free_percpu(ptr) percpu_free((ptr))
|
||||
#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
|
||||
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
|
||||
__alignof__(type))
|
||||
|
||||
#endif /* __LINUX_PERCPU_H */
|
||||
|
@@ -1185,10 +1185,9 @@ struct task_struct {
|
||||
pid_t pid;
|
||||
pid_t tgid;
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
/* Canary value for the -fstack-protector gcc feature */
|
||||
unsigned long stack_canary;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* pointers to (original) parent process, youngest child, younger sibling,
|
||||
* older sibling, respectively. (p->father can be replaced with
|
||||
@@ -2107,6 +2106,19 @@ static inline int object_is_on_stack(void *obj)
|
||||
|
||||
extern void thread_info_cache_init(void);
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||
static inline unsigned long stack_not_used(struct task_struct *p)
|
||||
{
|
||||
unsigned long *n = end_of_stack(p);
|
||||
|
||||
do { /* Skip over canary */
|
||||
n++;
|
||||
} while (!*n);
|
||||
|
||||
return (unsigned long)n - (unsigned long)end_of_stack(p);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* set thread flags in other task's structures
|
||||
* - see asm/thread_info.h for TIF_xxxx flags available
|
||||
*/
|
||||
|
@@ -176,6 +176,12 @@ static inline void init_call_single_data(void)
|
||||
#define put_cpu() preempt_enable()
|
||||
#define put_cpu_no_resched() preempt_enable_no_resched()
|
||||
|
||||
/*
|
||||
* Callback to arch code if there's nosmp or maxcpus=0 on the
|
||||
* boot command line:
|
||||
*/
|
||||
extern void arch_disable_smp_support(void);
|
||||
|
||||
void smp_setup_processor_id(void);
|
||||
|
||||
#endif /* __LINUX_SMP_H */
|
||||
|
16
include/linux/stackprotector.h
Normal file
16
include/linux/stackprotector.h
Normal file
@@ -0,0 +1,16 @@
|
||||
#ifndef _LINUX_STACKPROTECTOR_H
|
||||
#define _LINUX_STACKPROTECTOR_H 1
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
# include <asm/stackprotector.h>
|
||||
#else
|
||||
static inline void boot_init_stack_canary(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@@ -193,5 +193,11 @@ int arch_update_cpu_topology(void);
|
||||
#ifndef topology_core_siblings
|
||||
#define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
|
||||
#endif
|
||||
#ifndef topology_thread_cpumask
|
||||
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
|
||||
#endif
|
||||
#ifndef topology_core_cpumask
|
||||
#define topology_core_cpumask(cpu) cpumask_of(cpu)
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_TOPOLOGY_H */
|
||||
|
@@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr);
|
||||
|
||||
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
|
||||
struct page ***pages);
|
||||
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
|
||||
pgprot_t prot, struct page **pages);
|
||||
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
|
||||
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
|
||||
|
||||
/* Allocate/destroy a 'vmalloc' VM area. */
|
||||
@@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
|
||||
*/
|
||||
extern rwlock_t vmlist_lock;
|
||||
extern struct vm_struct *vmlist;
|
||||
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
|
||||
|
||||
#endif /* _LINUX_VMALLOC_H */
|
||||
|
Reference in New Issue
Block a user