Merge branch 'linus' into sched/urgent
This commit is contained in:
@@ -84,6 +84,7 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
|
||||
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
|
||||
obj-$(CONFIG_MARKERS) += marker.o
|
||||
obj-$(CONFIG_LATENCYTOP) += latencytop.o
|
||||
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
|
||||
obj-$(CONFIG_FTRACE) += trace/
|
||||
obj-$(CONFIG_TRACING) += trace/
|
||||
obj-$(CONFIG_SMP) += sched_cpupri.o
|
||||
|
41
kernel/cpu.c
41
kernel/cpu.c
@@ -216,7 +216,6 @@ static int __ref take_cpu_down(void *_param)
|
||||
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
{
|
||||
int err, nr_calls = 0;
|
||||
struct task_struct *p;
|
||||
cpumask_t old_allowed, tmp;
|
||||
void *hcpu = (void *)(long)cpu;
|
||||
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
|
||||
@@ -249,21 +248,18 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
cpus_setall(tmp);
|
||||
cpu_clear(cpu, tmp);
|
||||
set_cpus_allowed_ptr(current, &tmp);
|
||||
tmp = cpumask_of_cpu(cpu);
|
||||
|
||||
p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
|
||||
|
||||
if (IS_ERR(p) || cpu_online(cpu)) {
|
||||
err = __stop_machine(take_cpu_down, &tcd_param, &tmp);
|
||||
if (err) {
|
||||
/* CPU didn't die: tell everyone. Can't complain. */
|
||||
if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
|
||||
hcpu) == NOTIFY_BAD)
|
||||
BUG();
|
||||
|
||||
if (IS_ERR(p)) {
|
||||
err = PTR_ERR(p);
|
||||
goto out_allowed;
|
||||
}
|
||||
goto out_thread;
|
||||
goto out_allowed;
|
||||
}
|
||||
BUG_ON(cpu_online(cpu));
|
||||
|
||||
/* Wait for it to sleep (leaving idle task). */
|
||||
while (!idle_cpu(cpu))
|
||||
@@ -279,8 +275,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
|
||||
check_for_tasks(cpu);
|
||||
|
||||
out_thread:
|
||||
err = kthread_stop(p);
|
||||
out_allowed:
|
||||
set_cpus_allowed_ptr(current, &old_allowed);
|
||||
out_release:
|
||||
@@ -461,3 +455,28 @@ out:
|
||||
#endif /* CONFIG_PM_SLEEP_SMP */
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* cpu_bit_bitmap[] is a special, "compressed" data structure that
|
||||
* represents all NR_CPUS bits binary values of 1<<nr.
|
||||
*
|
||||
* It is used by cpumask_of_cpu() to get a constant address to a CPU
|
||||
* mask value that has a single bit set only.
|
||||
*/
|
||||
|
||||
/* cpu_bit_bitmap[0] is empty - so we can back into it */
|
||||
#define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
|
||||
#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
|
||||
#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
|
||||
#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
|
||||
|
||||
const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
|
||||
|
||||
MASK_DECLARE_8(0), MASK_DECLARE_8(8),
|
||||
MASK_DECLARE_8(16), MASK_DECLARE_8(24),
|
||||
#if BITS_PER_LONG > 32
|
||||
MASK_DECLARE_8(32), MASK_DECLARE_8(40),
|
||||
MASK_DECLARE_8(48), MASK_DECLARE_8(56),
|
||||
#endif
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
|
||||
|
154
kernel/dma-coherent.c
Normal file
154
kernel/dma-coherent.c
Normal file
@@ -0,0 +1,154 @@
|
||||
/*
|
||||
* Coherent per-device memory handling.
|
||||
* Borrowed from i386
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
struct dma_coherent_mem {
|
||||
void *virt_base;
|
||||
u32 device_base;
|
||||
int size;
|
||||
int flags;
|
||||
unsigned long *bitmap;
|
||||
};
|
||||
|
||||
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
||||
dma_addr_t device_addr, size_t size, int flags)
|
||||
{
|
||||
void __iomem *mem_base = NULL;
|
||||
int pages = size >> PAGE_SHIFT;
|
||||
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
|
||||
|
||||
if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
|
||||
goto out;
|
||||
if (!size)
|
||||
goto out;
|
||||
if (dev->dma_mem)
|
||||
goto out;
|
||||
|
||||
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
|
||||
|
||||
mem_base = ioremap(bus_addr, size);
|
||||
if (!mem_base)
|
||||
goto out;
|
||||
|
||||
dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
|
||||
if (!dev->dma_mem)
|
||||
goto out;
|
||||
dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
||||
if (!dev->dma_mem->bitmap)
|
||||
goto free1_out;
|
||||
|
||||
dev->dma_mem->virt_base = mem_base;
|
||||
dev->dma_mem->device_base = device_addr;
|
||||
dev->dma_mem->size = pages;
|
||||
dev->dma_mem->flags = flags;
|
||||
|
||||
if (flags & DMA_MEMORY_MAP)
|
||||
return DMA_MEMORY_MAP;
|
||||
|
||||
return DMA_MEMORY_IO;
|
||||
|
||||
free1_out:
|
||||
kfree(dev->dma_mem);
|
||||
out:
|
||||
if (mem_base)
|
||||
iounmap(mem_base);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_declare_coherent_memory);
|
||||
|
||||
void dma_release_declared_memory(struct device *dev)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev->dma_mem;
|
||||
|
||||
if (!mem)
|
||||
return;
|
||||
dev->dma_mem = NULL;
|
||||
iounmap(mem->virt_base);
|
||||
kfree(mem->bitmap);
|
||||
kfree(mem);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_release_declared_memory);
|
||||
|
||||
void *dma_mark_declared_memory_occupied(struct device *dev,
|
||||
dma_addr_t device_addr, size_t size)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev->dma_mem;
|
||||
int pos, err;
|
||||
int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
|
||||
|
||||
pages >>= PAGE_SHIFT;
|
||||
|
||||
if (!mem)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
|
||||
err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
|
||||
if (err != 0)
|
||||
return ERR_PTR(err);
|
||||
return mem->virt_base + (pos << PAGE_SHIFT);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
||||
|
||||
/**
|
||||
* Try to allocate memory from the per-device coherent area.
|
||||
*
|
||||
* @dev: device from which we allocate memory
|
||||
* @size: size of requested memory area
|
||||
* @dma_handle: This will be filled with the correct dma handle
|
||||
* @ret: This pointer will be filled with the virtual address
|
||||
* to allocated area.
|
||||
*
|
||||
* This function should be only called from per-arch %dma_alloc_coherent()
|
||||
* to support allocation from per-device coherent memory pools.
|
||||
*
|
||||
* Returns 0 if dma_alloc_coherent should continue with allocating from
|
||||
* generic memory areas, or !0 if dma_alloc_coherent should return %ret.
|
||||
*/
|
||||
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
|
||||
dma_addr_t *dma_handle, void **ret)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
||||
int order = get_order(size);
|
||||
|
||||
if (mem) {
|
||||
int page = bitmap_find_free_region(mem->bitmap, mem->size,
|
||||
order);
|
||||
if (page >= 0) {
|
||||
*dma_handle = mem->device_base + (page << PAGE_SHIFT);
|
||||
*ret = mem->virt_base + (page << PAGE_SHIFT);
|
||||
memset(*ret, 0, size);
|
||||
} else if (mem->flags & DMA_MEMORY_EXCLUSIVE)
|
||||
*ret = NULL;
|
||||
}
|
||||
return (mem != NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to free the memory allocated from per-device coherent memory pool.
|
||||
* @dev: device from which the memory was allocated
|
||||
* @order: the order of pages allocated
|
||||
* @vaddr: virtual address of allocated pages
|
||||
*
|
||||
* This checks whether the memory was allocated from the per-device
|
||||
* coherent memory pool and if so, releases that memory.
|
||||
*
|
||||
* Returns 1 if we correctly released the memory, or 0 if
|
||||
* %dma_release_coherent() should proceed with releasing memory from
|
||||
* generic pools.
|
||||
*/
|
||||
int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
||||
|
||||
if (mem && vaddr >= mem->virt_base && vaddr <
|
||||
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
||||
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
||||
|
||||
bitmap_release_region(mem->bitmap, page, order);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
@@ -27,6 +27,7 @@
|
||||
#include <linux/key.h>
|
||||
#include <linux/binfmts.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/nsproxy.h>
|
||||
#include <linux/capability.h>
|
||||
@@ -414,6 +415,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
|
||||
|
||||
if (likely(!mm_alloc_pgd(mm))) {
|
||||
mm->def_flags = 0;
|
||||
mmu_notifier_mm_init(mm);
|
||||
return mm;
|
||||
}
|
||||
|
||||
@@ -446,6 +448,7 @@ void __mmdrop(struct mm_struct *mm)
|
||||
BUG_ON(mm == &init_mm);
|
||||
mm_free_pgd(mm);
|
||||
destroy_context(mm);
|
||||
mmu_notifier_mm_destroy(mm);
|
||||
free_mm(mm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__mmdrop);
|
||||
|
@@ -325,18 +325,6 @@ static unsigned long find_symbol(const char *name,
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/* lookup symbol in given range of kernel_symbols */
|
||||
static const struct kernel_symbol *lookup_symbol(const char *name,
|
||||
const struct kernel_symbol *start,
|
||||
const struct kernel_symbol *stop)
|
||||
{
|
||||
const struct kernel_symbol *ks = start;
|
||||
for (; ks < stop; ks++)
|
||||
if (strcmp(ks->name, name) == 0)
|
||||
return ks;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Search for module by name: must hold module_mutex. */
|
||||
static struct module *find_module(const char *name)
|
||||
{
|
||||
@@ -690,7 +678,7 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
|
||||
if (flags & O_NONBLOCK) {
|
||||
struct stopref sref = { mod, flags, forced };
|
||||
|
||||
return stop_machine_run(__try_stop_module, &sref, NR_CPUS);
|
||||
return stop_machine(__try_stop_module, &sref, NULL);
|
||||
} else {
|
||||
/* We don't need to stop the machine for this. */
|
||||
mod->state = MODULE_STATE_GOING;
|
||||
@@ -1428,7 +1416,7 @@ static int __unlink_module(void *_mod)
|
||||
static void free_module(struct module *mod)
|
||||
{
|
||||
/* Delete from various lists */
|
||||
stop_machine_run(__unlink_module, mod, NR_CPUS);
|
||||
stop_machine(__unlink_module, mod, NULL);
|
||||
remove_notes_attrs(mod);
|
||||
remove_sect_attrs(mod);
|
||||
mod_kobject_remove(mod);
|
||||
@@ -1703,6 +1691,19 @@ static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KALLSYMS
|
||||
|
||||
/* lookup symbol in given range of kernel_symbols */
|
||||
static const struct kernel_symbol *lookup_symbol(const char *name,
|
||||
const struct kernel_symbol *start,
|
||||
const struct kernel_symbol *stop)
|
||||
{
|
||||
const struct kernel_symbol *ks = start;
|
||||
for (; ks < stop; ks++)
|
||||
if (strcmp(ks->name, name) == 0)
|
||||
return ks;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int is_exported(const char *name, const struct module *mod)
|
||||
{
|
||||
if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab))
|
||||
@@ -2196,7 +2197,7 @@ static struct module *load_module(void __user *umod,
|
||||
/* Now sew it into the lists so we can get lockdep and oops
|
||||
* info during argument parsing. Noone should access us, since
|
||||
* strong_try_module_get() will fail. */
|
||||
stop_machine_run(__link_module, mod, NR_CPUS);
|
||||
stop_machine(__link_module, mod, NULL);
|
||||
|
||||
/* Size of section 0 is 0, so this works well if no params */
|
||||
err = parse_args(mod->name, mod->args,
|
||||
@@ -2230,7 +2231,7 @@ static struct module *load_module(void __user *umod,
|
||||
return mod;
|
||||
|
||||
unlink:
|
||||
stop_machine_run(__unlink_module, mod, NR_CPUS);
|
||||
stop_machine(__unlink_module, mod, NULL);
|
||||
module_arch_cleanup(mod);
|
||||
cleanup:
|
||||
kobject_del(&mod->mkobj.kobj);
|
||||
|
@@ -91,8 +91,8 @@ static void force_quiescent_state(struct rcu_data *rdp,
|
||||
* rdp->cpu is the current cpu.
|
||||
*
|
||||
* cpu_online_map is updated by the _cpu_down()
|
||||
* using stop_machine_run(). Since we're in irqs disabled
|
||||
* section, stop_machine_run() is not exectuting, hence
|
||||
* using __stop_machine(). Since we're in irqs disabled
|
||||
* section, __stop_machine() is not exectuting, hence
|
||||
* the cpu_online_map is stable.
|
||||
*
|
||||
* However, a cpu might have been offlined _just_ before
|
||||
|
@@ -1,4 +1,4 @@
|
||||
/* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation.
|
||||
/* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation.
|
||||
* GPL v2 and any later version.
|
||||
*/
|
||||
#include <linux/cpu.h>
|
||||
@@ -13,204 +13,178 @@
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/* Since we effect priority and affinity (both of which are visible
|
||||
* to, and settable by outside processes) we do indirection via a
|
||||
* kthread. */
|
||||
|
||||
/* Thread to stop each CPU in user context. */
|
||||
/* This controls the threads on each CPU. */
|
||||
enum stopmachine_state {
|
||||
STOPMACHINE_WAIT,
|
||||
/* Dummy starting state for thread. */
|
||||
STOPMACHINE_NONE,
|
||||
/* Awaiting everyone to be scheduled. */
|
||||
STOPMACHINE_PREPARE,
|
||||
/* Disable interrupts. */
|
||||
STOPMACHINE_DISABLE_IRQ,
|
||||
/* Run the function */
|
||||
STOPMACHINE_RUN,
|
||||
/* Exit */
|
||||
STOPMACHINE_EXIT,
|
||||
};
|
||||
|
||||
static enum stopmachine_state stopmachine_state;
|
||||
static unsigned int stopmachine_num_threads;
|
||||
static atomic_t stopmachine_thread_ack;
|
||||
|
||||
static int stopmachine(void *cpu)
|
||||
{
|
||||
int irqs_disabled = 0;
|
||||
int prepared = 0;
|
||||
cpumask_of_cpu_ptr(cpumask, (int)(long)cpu);
|
||||
|
||||
set_cpus_allowed_ptr(current, cpumask);
|
||||
|
||||
/* Ack: we are alive */
|
||||
smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
|
||||
atomic_inc(&stopmachine_thread_ack);
|
||||
|
||||
/* Simple state machine */
|
||||
while (stopmachine_state != STOPMACHINE_EXIT) {
|
||||
if (stopmachine_state == STOPMACHINE_DISABLE_IRQ
|
||||
&& !irqs_disabled) {
|
||||
local_irq_disable();
|
||||
hard_irq_disable();
|
||||
irqs_disabled = 1;
|
||||
/* Ack: irqs disabled. */
|
||||
smp_mb(); /* Must read state first. */
|
||||
atomic_inc(&stopmachine_thread_ack);
|
||||
} else if (stopmachine_state == STOPMACHINE_PREPARE
|
||||
&& !prepared) {
|
||||
/* Everyone is in place, hold CPU. */
|
||||
preempt_disable();
|
||||
prepared = 1;
|
||||
smp_mb(); /* Must read state first. */
|
||||
atomic_inc(&stopmachine_thread_ack);
|
||||
}
|
||||
/* Yield in first stage: migration threads need to
|
||||
* help our sisters onto their CPUs. */
|
||||
if (!prepared && !irqs_disabled)
|
||||
yield();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* Ack: we are exiting. */
|
||||
smp_mb(); /* Must read state first. */
|
||||
atomic_inc(&stopmachine_thread_ack);
|
||||
|
||||
if (irqs_disabled)
|
||||
local_irq_enable();
|
||||
if (prepared)
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Change the thread state */
|
||||
static void stopmachine_set_state(enum stopmachine_state state)
|
||||
{
|
||||
atomic_set(&stopmachine_thread_ack, 0);
|
||||
smp_wmb();
|
||||
stopmachine_state = state;
|
||||
while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
static int stop_machine(void)
|
||||
{
|
||||
int i, ret = 0;
|
||||
|
||||
atomic_set(&stopmachine_thread_ack, 0);
|
||||
stopmachine_num_threads = 0;
|
||||
stopmachine_state = STOPMACHINE_WAIT;
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
if (i == raw_smp_processor_id())
|
||||
continue;
|
||||
ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
stopmachine_num_threads++;
|
||||
}
|
||||
|
||||
/* Wait for them all to come to life. */
|
||||
while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) {
|
||||
yield();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* If some failed, kill them all. */
|
||||
if (ret < 0) {
|
||||
stopmachine_set_state(STOPMACHINE_EXIT);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Now they are all started, make them hold the CPUs, ready. */
|
||||
preempt_disable();
|
||||
stopmachine_set_state(STOPMACHINE_PREPARE);
|
||||
|
||||
/* Make them disable irqs. */
|
||||
local_irq_disable();
|
||||
hard_irq_disable();
|
||||
stopmachine_set_state(STOPMACHINE_DISABLE_IRQ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void restart_machine(void)
|
||||
{
|
||||
stopmachine_set_state(STOPMACHINE_EXIT);
|
||||
local_irq_enable();
|
||||
preempt_enable_no_resched();
|
||||
}
|
||||
static enum stopmachine_state state;
|
||||
|
||||
struct stop_machine_data {
|
||||
int (*fn)(void *);
|
||||
void *data;
|
||||
struct completion done;
|
||||
int fnret;
|
||||
};
|
||||
|
||||
static int do_stop(void *_smdata)
|
||||
/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
|
||||
static unsigned int num_threads;
|
||||
static atomic_t thread_ack;
|
||||
static struct completion finished;
|
||||
static DEFINE_MUTEX(lock);
|
||||
|
||||
static void set_state(enum stopmachine_state newstate)
|
||||
{
|
||||
struct stop_machine_data *smdata = _smdata;
|
||||
int ret;
|
||||
|
||||
ret = stop_machine();
|
||||
if (ret == 0) {
|
||||
ret = smdata->fn(smdata->data);
|
||||
restart_machine();
|
||||
}
|
||||
|
||||
/* We're done: you can kthread_stop us now */
|
||||
complete(&smdata->done);
|
||||
|
||||
/* Wait for kthread_stop */
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
schedule();
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
return ret;
|
||||
/* Reset ack counter. */
|
||||
atomic_set(&thread_ack, num_threads);
|
||||
smp_wmb();
|
||||
state = newstate;
|
||||
}
|
||||
|
||||
struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
|
||||
unsigned int cpu)
|
||||
/* Last one to ack a state moves to the next state. */
|
||||
static void ack_state(void)
|
||||
{
|
||||
static DEFINE_MUTEX(stopmachine_mutex);
|
||||
struct stop_machine_data smdata;
|
||||
struct task_struct *p;
|
||||
if (atomic_dec_and_test(&thread_ack)) {
|
||||
/* If we're the last one to ack the EXIT, we're finished. */
|
||||
if (state == STOPMACHINE_EXIT)
|
||||
complete(&finished);
|
||||
else
|
||||
set_state(state + 1);
|
||||
}
|
||||
}
|
||||
|
||||
smdata.fn = fn;
|
||||
smdata.data = data;
|
||||
init_completion(&smdata.done);
|
||||
/* This is the actual thread which stops the CPU. It exits by itself rather
|
||||
* than waiting for kthread_stop(), because it's easier for hotplug CPU. */
|
||||
static int stop_cpu(struct stop_machine_data *smdata)
|
||||
{
|
||||
enum stopmachine_state curstate = STOPMACHINE_NONE;
|
||||
int uninitialized_var(ret);
|
||||
|
||||
mutex_lock(&stopmachine_mutex);
|
||||
/* Simple state machine */
|
||||
do {
|
||||
/* Chill out and ensure we re-read stopmachine_state. */
|
||||
cpu_relax();
|
||||
if (state != curstate) {
|
||||
curstate = state;
|
||||
switch (curstate) {
|
||||
case STOPMACHINE_DISABLE_IRQ:
|
||||
local_irq_disable();
|
||||
hard_irq_disable();
|
||||
break;
|
||||
case STOPMACHINE_RUN:
|
||||
/* |= allows error detection if functions on
|
||||
* multiple CPUs. */
|
||||
smdata->fnret |= smdata->fn(smdata->data);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
ack_state();
|
||||
}
|
||||
} while (curstate != STOPMACHINE_EXIT);
|
||||
|
||||
/* If they don't care which CPU fn runs on, bind to any online one. */
|
||||
if (cpu == NR_CPUS)
|
||||
cpu = raw_smp_processor_id();
|
||||
local_irq_enable();
|
||||
do_exit(0);
|
||||
}
|
||||
|
||||
p = kthread_create(do_stop, &smdata, "kstopmachine");
|
||||
if (!IS_ERR(p)) {
|
||||
/* Callback for CPUs which aren't supposed to do anything. */
|
||||
static int chill(void *unused)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
|
||||
{
|
||||
int i, err;
|
||||
struct stop_machine_data active, idle;
|
||||
struct task_struct **threads;
|
||||
|
||||
active.fn = fn;
|
||||
active.data = data;
|
||||
active.fnret = 0;
|
||||
idle.fn = chill;
|
||||
idle.data = NULL;
|
||||
|
||||
/* This could be too big for stack on large machines. */
|
||||
threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL);
|
||||
if (!threads)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Set up initial state. */
|
||||
mutex_lock(&lock);
|
||||
init_completion(&finished);
|
||||
num_threads = num_online_cpus();
|
||||
set_state(STOPMACHINE_PREPARE);
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
struct stop_machine_data *smdata = &idle;
|
||||
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
|
||||
|
||||
/* One high-prio thread per cpu. We'll do this one. */
|
||||
sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
|
||||
kthread_bind(p, cpu);
|
||||
wake_up_process(p);
|
||||
wait_for_completion(&smdata.done);
|
||||
if (!cpus) {
|
||||
if (i == first_cpu(cpu_online_map))
|
||||
smdata = &active;
|
||||
} else {
|
||||
if (cpu_isset(i, *cpus))
|
||||
smdata = &active;
|
||||
}
|
||||
|
||||
threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u",
|
||||
i);
|
||||
if (IS_ERR(threads[i])) {
|
||||
err = PTR_ERR(threads[i]);
|
||||
threads[i] = NULL;
|
||||
goto kill_threads;
|
||||
}
|
||||
|
||||
/* Place it onto correct cpu. */
|
||||
kthread_bind(threads[i], i);
|
||||
|
||||
/* Make it highest prio. */
|
||||
if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, ¶m))
|
||||
BUG();
|
||||
}
|
||||
mutex_unlock(&stopmachine_mutex);
|
||||
return p;
|
||||
|
||||
/* We've created all the threads. Wake them all: hold this CPU so one
|
||||
* doesn't hit this CPU until we're ready. */
|
||||
get_cpu();
|
||||
for_each_online_cpu(i)
|
||||
wake_up_process(threads[i]);
|
||||
|
||||
/* This will release the thread on our CPU. */
|
||||
put_cpu();
|
||||
wait_for_completion(&finished);
|
||||
mutex_unlock(&lock);
|
||||
|
||||
kfree(threads);
|
||||
|
||||
return active.fnret;
|
||||
|
||||
kill_threads:
|
||||
for_each_online_cpu(i)
|
||||
if (threads[i])
|
||||
kthread_stop(threads[i]);
|
||||
mutex_unlock(&lock);
|
||||
|
||||
kfree(threads);
|
||||
return err;
|
||||
}
|
||||
|
||||
int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
|
||||
int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
|
||||
{
|
||||
struct task_struct *p;
|
||||
int ret;
|
||||
|
||||
/* No CPUs can come up or down during this. */
|
||||
get_online_cpus();
|
||||
p = __stop_machine_run(fn, data, cpu);
|
||||
if (!IS_ERR(p))
|
||||
ret = kthread_stop(p);
|
||||
else
|
||||
ret = PTR_ERR(p);
|
||||
ret = __stop_machine(fn, data, cpus);
|
||||
put_online_cpus();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(stop_machine_run);
|
||||
EXPORT_SYMBOL_GPL(stop_machine);
|
||||
|
@@ -196,12 +196,10 @@ static int tick_check_new_device(struct clock_event_device *newdev)
|
||||
struct tick_device *td;
|
||||
int cpu, ret = NOTIFY_OK;
|
||||
unsigned long flags;
|
||||
cpumask_of_cpu_ptr_declare(cpumask);
|
||||
|
||||
spin_lock_irqsave(&tick_device_lock, flags);
|
||||
|
||||
cpu = smp_processor_id();
|
||||
cpumask_of_cpu_ptr_next(cpumask, cpu);
|
||||
if (!cpu_isset(cpu, newdev->cpumask))
|
||||
goto out_bc;
|
||||
|
||||
@@ -209,7 +207,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
|
||||
curdev = td->evtdev;
|
||||
|
||||
/* cpu local device ? */
|
||||
if (!cpus_equal(newdev->cpumask, *cpumask)) {
|
||||
if (!cpus_equal(newdev->cpumask, cpumask_of_cpu(cpu))) {
|
||||
|
||||
/*
|
||||
* If the cpu affinity of the device interrupt can not
|
||||
@@ -222,7 +220,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
|
||||
* If we have a cpu local device already, do not replace it
|
||||
* by a non cpu local device
|
||||
*/
|
||||
if (curdev && cpus_equal(curdev->cpumask, *cpumask))
|
||||
if (curdev && cpus_equal(curdev->cpumask, cpumask_of_cpu(cpu)))
|
||||
goto out_bc;
|
||||
}
|
||||
|
||||
@@ -254,7 +252,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
|
||||
curdev = NULL;
|
||||
}
|
||||
clockevents_exchange_device(curdev, newdev);
|
||||
tick_setup_device(td, newdev, cpu, cpumask);
|
||||
tick_setup_device(td, newdev, cpu, &cpumask_of_cpu(cpu));
|
||||
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
|
||||
tick_oneshot_notify();
|
||||
|
||||
|
@@ -587,7 +587,7 @@ static int __ftrace_modify_code(void *data)
|
||||
|
||||
static void ftrace_run_update_code(int command)
|
||||
{
|
||||
stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
|
||||
stop_machine(__ftrace_modify_code, &command, NULL);
|
||||
}
|
||||
|
||||
void ftrace_disable_daemon(void)
|
||||
@@ -787,7 +787,7 @@ static int ftrace_update_code(void)
|
||||
!ftrace_enabled || !ftraced_trigger)
|
||||
return 0;
|
||||
|
||||
stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
|
||||
stop_machine(__ftrace_update_code, NULL, NULL);
|
||||
|
||||
return 1;
|
||||
}
|
||||
@@ -1564,7 +1564,7 @@ static int __init ftrace_dynamic_init(void)
|
||||
|
||||
addr = (unsigned long)ftrace_record_ip;
|
||||
|
||||
stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
|
||||
stop_machine(ftrace_dyn_arch_init, &addr, NULL);
|
||||
|
||||
/* ftrace_dyn_arch_init places the return code in addr */
|
||||
if (addr) {
|
||||
|
@@ -213,9 +213,7 @@ static void start_stack_timers(void)
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
cpumask_of_cpu_ptr(new_mask, cpu);
|
||||
|
||||
set_cpus_allowed_ptr(current, new_mask);
|
||||
set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
|
||||
start_stack_timer(cpu);
|
||||
}
|
||||
set_cpus_allowed_ptr(current, &saved_mask);
|
||||
|
Reference in New Issue
Block a user