Merge branch 'cpus4096' into irq/threaded
Conflicts: arch/parisc/kernel/irq.c kernel/irq/handle.c Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
@ -980,12 +980,9 @@ static void check_stack_usage(void)
|
||||
{
|
||||
static DEFINE_SPINLOCK(low_water_lock);
|
||||
static int lowest_to_date = THREAD_SIZE;
|
||||
unsigned long *n = end_of_stack(current);
|
||||
unsigned long free;
|
||||
|
||||
while (*n == 0)
|
||||
n++;
|
||||
free = (unsigned long)n - (unsigned long)end_of_stack(current);
|
||||
free = stack_not_used(current);
|
||||
|
||||
if (free >= lowest_to_date)
|
||||
return;
|
||||
|
@ -61,6 +61,7 @@
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <trace/sched.h>
|
||||
#include <linux/magic.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
@ -212,6 +213,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
struct thread_info *ti;
|
||||
unsigned long *stackend;
|
||||
|
||||
int err;
|
||||
|
||||
prepare_to_copy(orig);
|
||||
@ -237,6 +240,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
|
||||
goto out;
|
||||
|
||||
setup_thread_stack(tsk, orig);
|
||||
stackend = end_of_stack(tsk);
|
||||
*stackend = STACK_END_MAGIC; /* for overflow detection */
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
tsk->stack_canary = get_random_int();
|
||||
|
@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)
|
||||
desc->irq_count = 0;
|
||||
desc->irqs_unhandled = 0;
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_setall(&desc->affinity);
|
||||
cpumask_setall(desc->affinity);
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
#endif
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/bootmem.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
@ -69,6 +70,7 @@ int nr_irqs = NR_IRQS;
|
||||
EXPORT_SYMBOL_GPL(nr_irqs);
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
|
||||
static struct irq_desc irq_desc_init = {
|
||||
.irq = -1,
|
||||
.status = IRQ_DISABLED,
|
||||
@ -76,9 +78,6 @@ static struct irq_desc irq_desc_init = {
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||
#ifdef CONFIG_SMP
|
||||
.affinity = CPU_MASK_ALL
|
||||
#endif
|
||||
};
|
||||
|
||||
void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
|
||||
@ -115,6 +114,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
|
||||
printk(KERN_ERR "can not alloc kstat_irqs\n");
|
||||
BUG_ON(1);
|
||||
}
|
||||
if (!init_alloc_desc_masks(desc, cpu, false)) {
|
||||
printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
|
||||
BUG_ON(1);
|
||||
}
|
||||
arch_init_chip_data(desc, cpu);
|
||||
}
|
||||
|
||||
@ -123,7 +126,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
|
||||
*/
|
||||
DEFINE_SPINLOCK(sparse_irq_lock);
|
||||
|
||||
struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
|
||||
struct irq_desc **irq_desc_ptrs __read_mostly;
|
||||
|
||||
static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
|
||||
[0 ... NR_IRQS_LEGACY-1] = {
|
||||
@ -133,14 +136,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||
#ifdef CONFIG_SMP
|
||||
.affinity = CPU_MASK_ALL
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
/* FIXME: use bootmem alloc ...*/
|
||||
static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
|
||||
static unsigned int *kstat_irqs_legacy;
|
||||
|
||||
int __init early_irq_init(void)
|
||||
{
|
||||
@ -150,18 +149,30 @@ int __init early_irq_init(void)
|
||||
|
||||
init_irq_default_affinity();
|
||||
|
||||
/* initialize nr_irqs based on nr_cpu_ids */
|
||||
arch_probe_nr_irqs();
|
||||
printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
|
||||
|
||||
desc = irq_desc_legacy;
|
||||
legacy_count = ARRAY_SIZE(irq_desc_legacy);
|
||||
|
||||
/* allocate irq_desc_ptrs array based on nr_irqs */
|
||||
irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
|
||||
|
||||
/* allocate based on nr_cpu_ids */
|
||||
/* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
|
||||
kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
|
||||
sizeof(int));
|
||||
|
||||
for (i = 0; i < legacy_count; i++) {
|
||||
desc[i].irq = i;
|
||||
desc[i].kstat_irqs = kstat_irqs_legacy[i];
|
||||
desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
|
||||
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
|
||||
|
||||
init_alloc_desc_masks(&desc[i], 0, true);
|
||||
irq_desc_ptrs[i] = desc + i;
|
||||
}
|
||||
|
||||
for (i = legacy_count; i < NR_IRQS; i++)
|
||||
for (i = legacy_count; i < nr_irqs; i++)
|
||||
irq_desc_ptrs[i] = NULL;
|
||||
|
||||
return arch_early_irq_init();
|
||||
@ -169,7 +180,10 @@ int __init early_irq_init(void)
|
||||
|
||||
struct irq_desc *irq_to_desc(unsigned int irq)
|
||||
{
|
||||
return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
|
||||
if (irq_desc_ptrs && irq < nr_irqs)
|
||||
return irq_desc_ptrs[irq];
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
|
||||
@ -178,10 +192,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
|
||||
unsigned long flags;
|
||||
int node;
|
||||
|
||||
if (irq >= NR_IRQS) {
|
||||
printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
|
||||
irq, NR_IRQS);
|
||||
WARN_ON(1);
|
||||
if (irq >= nr_irqs) {
|
||||
WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
|
||||
irq, nr_irqs);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -223,9 +236,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
|
||||
#ifdef CONFIG_SMP
|
||||
.affinity = CPU_MASK_ALL
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
@ -238,14 +248,16 @@ int __init early_irq_init(void)
|
||||
|
||||
init_irq_default_affinity();
|
||||
|
||||
printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
|
||||
|
||||
desc = irq_desc;
|
||||
count = ARRAY_SIZE(irq_desc);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
desc[i].irq = i;
|
||||
init_alloc_desc_masks(&desc[i], 0, true);
|
||||
desc[i].kstat_irqs = kstat_irqs_all[i];
|
||||
}
|
||||
|
||||
return arch_early_irq_init();
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,14 @@ extern struct lock_class_key irq_desc_lock_class;
|
||||
extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
|
||||
extern void clear_kstat_irqs(struct irq_desc *desc);
|
||||
extern spinlock_t sparse_irq_lock;
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
/* irq_desc_ptrs allocated at boot time */
|
||||
extern struct irq_desc **irq_desc_ptrs;
|
||||
#else
|
||||
/* irq_desc_ptrs is a fixed size array */
|
||||
extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
|
||||
|
@ -90,14 +90,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
|
||||
cpumask_copy(&desc->affinity, cpumask);
|
||||
cpumask_copy(desc->affinity, cpumask);
|
||||
desc->chip->set_affinity(irq, cpumask);
|
||||
} else {
|
||||
desc->status |= IRQ_MOVE_PENDING;
|
||||
cpumask_copy(&desc->pending_mask, cpumask);
|
||||
cpumask_copy(desc->pending_mask, cpumask);
|
||||
}
|
||||
#else
|
||||
cpumask_copy(&desc->affinity, cpumask);
|
||||
cpumask_copy(desc->affinity, cpumask);
|
||||
desc->chip->set_affinity(irq, cpumask);
|
||||
#endif
|
||||
desc->status |= IRQ_AFFINITY_SET;
|
||||
@ -119,16 +119,16 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
|
||||
* one of the targets is online.
|
||||
*/
|
||||
if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
|
||||
if (cpumask_any_and(&desc->affinity, cpu_online_mask)
|
||||
if (cpumask_any_and(desc->affinity, cpu_online_mask)
|
||||
< nr_cpu_ids)
|
||||
goto set_affinity;
|
||||
else
|
||||
desc->status &= ~IRQ_AFFINITY_SET;
|
||||
}
|
||||
|
||||
cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
|
||||
cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
|
||||
set_affinity:
|
||||
desc->chip->set_affinity(irq, &desc->affinity);
|
||||
desc->chip->set_affinity(irq, desc->affinity);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ void move_masked_irq(int irq)
|
||||
|
||||
desc->status &= ~IRQ_MOVE_PENDING;
|
||||
|
||||
if (unlikely(cpumask_empty(&desc->pending_mask)))
|
||||
if (unlikely(cpumask_empty(desc->pending_mask)))
|
||||
return;
|
||||
|
||||
if (!desc->chip->set_affinity)
|
||||
@ -38,13 +38,13 @@ void move_masked_irq(int irq)
|
||||
* For correct operation this depends on the caller
|
||||
* masking the irqs.
|
||||
*/
|
||||
if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask)
|
||||
if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
|
||||
< nr_cpu_ids)) {
|
||||
cpumask_and(&desc->affinity,
|
||||
&desc->pending_mask, cpu_online_mask);
|
||||
desc->chip->set_affinity(irq, &desc->affinity);
|
||||
cpumask_and(desc->affinity,
|
||||
desc->pending_mask, cpu_online_mask);
|
||||
desc->chip->set_affinity(irq, desc->affinity);
|
||||
}
|
||||
cpumask_clear(&desc->pending_mask);
|
||||
cpumask_clear(desc->pending_mask);
|
||||
}
|
||||
|
||||
void move_native_irq(int irq)
|
||||
|
@ -33,15 +33,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
|
||||
old_desc->kstat_irqs = NULL;
|
||||
}
|
||||
|
||||
static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
|
||||
static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
|
||||
struct irq_desc *desc, int cpu)
|
||||
{
|
||||
memcpy(desc, old_desc, sizeof(struct irq_desc));
|
||||
if (!init_alloc_desc_masks(desc, cpu, false)) {
|
||||
printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
|
||||
"for migration.\n", irq);
|
||||
return false;
|
||||
}
|
||||
spin_lock_init(&desc->lock);
|
||||
desc->cpu = cpu;
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
|
||||
init_copy_desc_masks(old_desc, desc);
|
||||
arch_init_copy_chip_data(old_desc, desc, cpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
|
||||
@ -71,12 +78,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
|
||||
node = cpu_to_node(cpu);
|
||||
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
|
||||
if (!desc) {
|
||||
printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq);
|
||||
printk(KERN_ERR "irq %d: can not get new irq_desc "
|
||||
"for migration.\n", irq);
|
||||
/* still use old one */
|
||||
desc = old_desc;
|
||||
goto out_unlock;
|
||||
}
|
||||
init_copy_one_irq_desc(irq, old_desc, desc, cpu);
|
||||
if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
|
||||
/* still use old one */
|
||||
kfree(desc);
|
||||
desc = old_desc;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
irq_desc_ptrs[irq] = desc;
|
||||
spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;
|
||||
static int irq_affinity_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc((long)m->private);
|
||||
const struct cpumask *mask = &desc->affinity;
|
||||
const struct cpumask *mask = desc->affinity;
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (desc->status & IRQ_MOVE_PENDING)
|
||||
mask = &desc->pending_mask;
|
||||
mask = desc->pending_mask;
|
||||
#endif
|
||||
seq_cpumask(m, mask);
|
||||
seq_putc(m, '\n');
|
||||
|
@ -1130,7 +1130,7 @@ void crash_save_cpu(struct pt_regs *regs, int cpu)
|
||||
return;
|
||||
memset(&prstatus, 0, sizeof(prstatus));
|
||||
prstatus.pr_pid = current->pid;
|
||||
elf_core_copy_regs(&prstatus.pr_reg, regs);
|
||||
elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
|
||||
buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
|
||||
&prstatus, sizeof(prstatus));
|
||||
final_note(buf);
|
||||
|
@ -51,6 +51,7 @@
|
||||
#include <linux/tracepoint.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/async.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#if 0
|
||||
#define DEBUGP printk
|
||||
@ -366,6 +367,34 @@ static struct module *find_module(const char *name)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
|
||||
|
||||
static void *percpu_modalloc(unsigned long size, unsigned long align,
|
||||
const char *name)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
if (align > PAGE_SIZE) {
|
||||
printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
|
||||
name, align, PAGE_SIZE);
|
||||
align = PAGE_SIZE;
|
||||
}
|
||||
|
||||
ptr = __alloc_reserved_percpu(size, align);
|
||||
if (!ptr)
|
||||
printk(KERN_WARNING
|
||||
"Could not allocate %lu bytes percpu data\n", size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static void percpu_modfree(void *freeme)
|
||||
{
|
||||
free_percpu(freeme);
|
||||
}
|
||||
|
||||
#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
|
||||
|
||||
/* Number of blocks used and allocated. */
|
||||
static unsigned int pcpu_num_used, pcpu_num_allocated;
|
||||
/* Size of each block. -ve means used. */
|
||||
@ -480,21 +509,6 @@ static void percpu_modfree(void *freeme)
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
|
||||
Elf_Shdr *sechdrs,
|
||||
const char *secstrings)
|
||||
{
|
||||
return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
|
||||
}
|
||||
|
||||
static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
memcpy(pcpudest + per_cpu_offset(cpu), from, size);
|
||||
}
|
||||
|
||||
static int percpu_modinit(void)
|
||||
{
|
||||
pcpu_num_used = 2;
|
||||
@ -513,7 +527,26 @@ static int percpu_modinit(void)
|
||||
return 0;
|
||||
}
|
||||
__initcall(percpu_modinit);
|
||||
|
||||
#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
|
||||
|
||||
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
|
||||
Elf_Shdr *sechdrs,
|
||||
const char *secstrings)
|
||||
{
|
||||
return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
|
||||
}
|
||||
|
||||
static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
memcpy(pcpudest + per_cpu_offset(cpu), from, size);
|
||||
}
|
||||
|
||||
#else /* ... !CONFIG_SMP */
|
||||
|
||||
static inline void *percpu_modalloc(unsigned long size, unsigned long align,
|
||||
const char *name)
|
||||
{
|
||||
@ -535,6 +568,7 @@ static inline void percpu_modcopy(void *pcpudst, const void *src,
|
||||
/* pcpusec should be 0, and size of that section should be 0. */
|
||||
BUG_ON(size != 0);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define MODINFO_ATTR(field) \
|
||||
|
@ -74,6 +74,9 @@ NORET_TYPE void panic(const char * fmt, ...)
|
||||
vsnprintf(buf, sizeof(buf), fmt, args);
|
||||
va_end(args);
|
||||
printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
|
||||
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
||||
dump_stack();
|
||||
#endif
|
||||
bust_spinlocks(0);
|
||||
|
||||
/*
|
||||
@ -355,15 +358,18 @@ EXPORT_SYMBOL(warn_slowpath);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
|
||||
/*
|
||||
* Called when gcc's -fstack-protector feature is used, and
|
||||
* gcc detects corruption of the on-stack canary value
|
||||
*/
|
||||
void __stack_chk_fail(void)
|
||||
{
|
||||
panic("stack-protector: Kernel stack is corrupted");
|
||||
panic("stack-protector: Kernel stack is corrupted in: %p\n",
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(__stack_chk_fail);
|
||||
|
||||
#endif
|
||||
|
||||
core_param(panic, panic_timeout, int, 0644);
|
||||
|
@ -3448,19 +3448,23 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
|
||||
*/
|
||||
#define MAX_PINNED_INTERVAL 512
|
||||
|
||||
/* Working cpumask for load_balance and load_balance_newidle. */
|
||||
static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
|
||||
|
||||
/*
|
||||
* Check this_cpu to ensure it is balanced within domain. Attempt to move
|
||||
* tasks if there is an imbalance.
|
||||
*/
|
||||
static int load_balance(int this_cpu, struct rq *this_rq,
|
||||
struct sched_domain *sd, enum cpu_idle_type idle,
|
||||
int *balance, struct cpumask *cpus)
|
||||
int *balance)
|
||||
{
|
||||
int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
|
||||
struct sched_group *group;
|
||||
unsigned long imbalance;
|
||||
struct rq *busiest;
|
||||
unsigned long flags;
|
||||
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
|
||||
|
||||
cpumask_setall(cpus);
|
||||
|
||||
@ -3615,8 +3619,7 @@ out:
|
||||
* this_rq is locked.
|
||||
*/
|
||||
static int
|
||||
load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
|
||||
struct cpumask *cpus)
|
||||
load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
|
||||
{
|
||||
struct sched_group *group;
|
||||
struct rq *busiest = NULL;
|
||||
@ -3624,6 +3627,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
|
||||
int ld_moved = 0;
|
||||
int sd_idle = 0;
|
||||
int all_pinned = 0;
|
||||
struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
|
||||
|
||||
cpumask_setall(cpus);
|
||||
|
||||
@ -3764,10 +3768,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
|
||||
struct sched_domain *sd;
|
||||
int pulled_task = 0;
|
||||
unsigned long next_balance = jiffies + HZ;
|
||||
cpumask_var_t tmpmask;
|
||||
|
||||
if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC))
|
||||
return;
|
||||
|
||||
for_each_domain(this_cpu, sd) {
|
||||
unsigned long interval;
|
||||
@ -3778,7 +3778,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
|
||||
if (sd->flags & SD_BALANCE_NEWIDLE)
|
||||
/* If we've pulled tasks over stop searching: */
|
||||
pulled_task = load_balance_newidle(this_cpu, this_rq,
|
||||
sd, tmpmask);
|
||||
sd);
|
||||
|
||||
interval = msecs_to_jiffies(sd->balance_interval);
|
||||
if (time_after(next_balance, sd->last_balance + interval))
|
||||
@ -3793,7 +3793,6 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
|
||||
*/
|
||||
this_rq->next_balance = next_balance;
|
||||
}
|
||||
free_cpumask_var(tmpmask);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3943,11 +3942,6 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
|
||||
unsigned long next_balance = jiffies + 60*HZ;
|
||||
int update_next_balance = 0;
|
||||
int need_serialize;
|
||||
cpumask_var_t tmp;
|
||||
|
||||
/* Fails alloc? Rebalancing probably not a priority right now. */
|
||||
if (!alloc_cpumask_var(&tmp, GFP_ATOMIC))
|
||||
return;
|
||||
|
||||
for_each_domain(cpu, sd) {
|
||||
if (!(sd->flags & SD_LOAD_BALANCE))
|
||||
@ -3972,7 +3966,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
|
||||
}
|
||||
|
||||
if (time_after_eq(jiffies, sd->last_balance + interval)) {
|
||||
if (load_balance(cpu, rq, sd, idle, &balance, tmp)) {
|
||||
if (load_balance(cpu, rq, sd, idle, &balance)) {
|
||||
/*
|
||||
* We've pulled tasks over so either we're no
|
||||
* longer idle, or one of our SMT siblings is
|
||||
@ -4006,8 +4000,6 @@ out:
|
||||
*/
|
||||
if (likely(update_next_balance))
|
||||
rq->next_balance = next_balance;
|
||||
|
||||
free_cpumask_var(tmp);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -5944,12 +5936,7 @@ void sched_show_task(struct task_struct *p)
|
||||
printk(KERN_CONT " %016lx ", thread_saved_pc(p));
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_STACK_USAGE
|
||||
{
|
||||
unsigned long *n = end_of_stack(p);
|
||||
while (!*n)
|
||||
n++;
|
||||
free = (unsigned long)n - (unsigned long)end_of_stack(p);
|
||||
}
|
||||
free = stack_not_used(p);
|
||||
#endif
|
||||
printk(KERN_CONT "%5lu %5d %6d\n", free,
|
||||
task_pid_nr(p), task_pid_nr(p->real_parent));
|
||||
@ -7254,7 +7241,7 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
|
||||
{
|
||||
int group;
|
||||
|
||||
cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
|
||||
cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
|
||||
group = cpumask_first(mask);
|
||||
if (sg)
|
||||
*sg = &per_cpu(sched_group_core, group).sg;
|
||||
@ -7283,7 +7270,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
|
||||
cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
|
||||
group = cpumask_first(mask);
|
||||
#elif defined(CONFIG_SCHED_SMT)
|
||||
cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map);
|
||||
cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
|
||||
group = cpumask_first(mask);
|
||||
#else
|
||||
group = cpu;
|
||||
@ -7626,7 +7613,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
|
||||
SD_INIT(sd, SIBLING);
|
||||
set_domain_attribute(sd, attr);
|
||||
cpumask_and(sched_domain_span(sd),
|
||||
&per_cpu(cpu_sibling_map, i), cpu_map);
|
||||
topology_thread_cpumask(i), cpu_map);
|
||||
sd->parent = p;
|
||||
p->child = sd;
|
||||
cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
|
||||
@ -7637,7 +7624,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
|
||||
/* Set up CPU (sibling) groups */
|
||||
for_each_cpu(i, cpu_map) {
|
||||
cpumask_and(this_sibling_map,
|
||||
&per_cpu(cpu_sibling_map, i), cpu_map);
|
||||
topology_thread_cpumask(i), cpu_map);
|
||||
if (i != cpumask_first(this_sibling_map))
|
||||
continue;
|
||||
|
||||
@ -8308,6 +8295,9 @@ void __init sched_init(void)
|
||||
#endif
|
||||
#ifdef CONFIG_USER_SCHED
|
||||
alloc_size *= 2;
|
||||
#endif
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
alloc_size += num_possible_cpus() * cpumask_size();
|
||||
#endif
|
||||
/*
|
||||
* As sched_init() is called before page_alloc is setup,
|
||||
@ -8346,6 +8336,12 @@ void __init sched_init(void)
|
||||
ptr += nr_cpu_ids * sizeof(void **);
|
||||
#endif /* CONFIG_USER_SCHED */
|
||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
for_each_possible_cpu(i) {
|
||||
per_cpu(load_balance_tmpmask, i) = (void *)ptr;
|
||||
ptr += cpumask_size();
|
||||
}
|
||||
#endif /* CONFIG_CPUMASK_OFFSTACK */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@ -9490,7 +9486,7 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
|
||||
|
||||
static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
|
||||
{
|
||||
u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
|
||||
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
|
||||
u64 data;
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
@ -9509,7 +9505,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
|
||||
|
||||
static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
|
||||
{
|
||||
u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
|
||||
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
/*
|
||||
@ -9605,7 +9601,7 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
|
||||
ca = task_ca(tsk);
|
||||
|
||||
for (; ca; ca = ca->parent) {
|
||||
u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
|
||||
u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
|
||||
*cpuusage += cputime;
|
||||
}
|
||||
}
|
||||
|
@ -960,12 +960,13 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
|
||||
|
||||
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
|
||||
|
||||
static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
|
||||
static inline int pick_optimal_cpu(int this_cpu,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
int first;
|
||||
|
||||
/* "this_cpu" is cheaper to preempt than a remote processor */
|
||||
if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
|
||||
if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
|
||||
return this_cpu;
|
||||
|
||||
first = cpumask_first(mask);
|
||||
@ -981,6 +982,7 @@ static int find_lowest_rq(struct task_struct *task)
|
||||
struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
|
||||
int this_cpu = smp_processor_id();
|
||||
int cpu = task_cpu(task);
|
||||
cpumask_var_t domain_mask;
|
||||
|
||||
if (task->rt.nr_cpus_allowed == 1)
|
||||
return -1; /* No other targets possible */
|
||||
@ -1013,19 +1015,25 @@ static int find_lowest_rq(struct task_struct *task)
|
||||
if (this_cpu == cpu)
|
||||
this_cpu = -1; /* Skip this_cpu opt if the same */
|
||||
|
||||
for_each_domain(cpu, sd) {
|
||||
if (sd->flags & SD_WAKE_AFFINE) {
|
||||
cpumask_t domain_mask;
|
||||
int best_cpu;
|
||||
if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
|
||||
for_each_domain(cpu, sd) {
|
||||
if (sd->flags & SD_WAKE_AFFINE) {
|
||||
int best_cpu;
|
||||
|
||||
cpumask_and(&domain_mask, sched_domain_span(sd),
|
||||
lowest_mask);
|
||||
cpumask_and(domain_mask,
|
||||
sched_domain_span(sd),
|
||||
lowest_mask);
|
||||
|
||||
best_cpu = pick_optimal_cpu(this_cpu,
|
||||
&domain_mask);
|
||||
if (best_cpu != -1)
|
||||
return best_cpu;
|
||||
best_cpu = pick_optimal_cpu(this_cpu,
|
||||
domain_mask);
|
||||
|
||||
if (best_cpu != -1) {
|
||||
free_cpumask_var(domain_mask);
|
||||
return best_cpu;
|
||||
}
|
||||
}
|
||||
}
|
||||
free_cpumask_var(domain_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -796,6 +796,11 @@ int __init __weak early_irq_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init __weak arch_probe_nr_irqs(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init __weak arch_early_irq_init(void)
|
||||
{
|
||||
return 0;
|
||||
|
@ -170,7 +170,7 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
|
||||
* doesn't hit this CPU until we're ready. */
|
||||
get_cpu();
|
||||
for_each_online_cpu(i) {
|
||||
sm_work = percpu_ptr(stop_machine_work, i);
|
||||
sm_work = per_cpu_ptr(stop_machine_work, i);
|
||||
INIT_WORK(sm_work, stop_cpu);
|
||||
queue_work_on(i, stop_machine_wq, sm_work);
|
||||
}
|
||||
|
Reference in New Issue
Block a user