Merge branch 'x86-mtrr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-mtrr-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, mtrr: Use pci_dev->revision x86, mtrr: use stop_machine APIs for doing MTRR rendezvous stop_machine: implement stop_machine_from_inactive_cpu() stop_machine: reorganize stop_cpus() implementation x86, mtrr: lock stop machine during MTRR rendezvous sequence
This commit is contained in:
@@ -136,10 +136,11 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
|
||||
static DEFINE_MUTEX(stop_cpus_mutex);
|
||||
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
|
||||
|
||||
int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
|
||||
static void queue_stop_cpus_work(const struct cpumask *cpumask,
|
||||
cpu_stop_fn_t fn, void *arg,
|
||||
struct cpu_stop_done *done)
|
||||
{
|
||||
struct cpu_stop_work *work;
|
||||
struct cpu_stop_done done;
|
||||
unsigned int cpu;
|
||||
|
||||
/* initialize works and done */
|
||||
@@ -147,9 +148,8 @@ int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
|
||||
work = &per_cpu(stop_cpus_work, cpu);
|
||||
work->fn = fn;
|
||||
work->arg = arg;
|
||||
work->done = &done;
|
||||
work->done = done;
|
||||
}
|
||||
cpu_stop_init_done(&done, cpumask_weight(cpumask));
|
||||
|
||||
/*
|
||||
* Disable preemption while queueing to avoid getting
|
||||
@@ -161,7 +161,15 @@ int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
|
||||
cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
|
||||
&per_cpu(stop_cpus_work, cpu));
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static int __stop_cpus(const struct cpumask *cpumask,
|
||||
cpu_stop_fn_t fn, void *arg)
|
||||
{
|
||||
struct cpu_stop_done done;
|
||||
|
||||
cpu_stop_init_done(&done, cpumask_weight(cpumask));
|
||||
queue_stop_cpus_work(cpumask, fn, arg, &done);
|
||||
wait_for_completion(&done.completion);
|
||||
return done.executed ? done.ret : -ENOENT;
|
||||
}
|
||||
@@ -431,8 +439,15 @@ static int stop_machine_cpu_stop(void *data)
|
||||
struct stop_machine_data *smdata = data;
|
||||
enum stopmachine_state curstate = STOPMACHINE_NONE;
|
||||
int cpu = smp_processor_id(), err = 0;
|
||||
unsigned long flags;
|
||||
bool is_active;
|
||||
|
||||
/*
|
||||
* When called from stop_machine_from_inactive_cpu(), irq might
|
||||
* already be disabled. Save the state and restore it on exit.
|
||||
*/
|
||||
local_save_flags(flags);
|
||||
|
||||
if (!smdata->active_cpus)
|
||||
is_active = cpu == cpumask_first(cpu_online_mask);
|
||||
else
|
||||
@@ -460,7 +475,7 @@ static int stop_machine_cpu_stop(void *data)
|
||||
}
|
||||
} while (curstate != STOPMACHINE_EXIT);
|
||||
|
||||
local_irq_enable();
|
||||
local_irq_restore(flags);
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -487,4 +502,57 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(stop_machine);
|
||||
|
||||
/**
|
||||
* stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
|
||||
* @fn: the function to run
|
||||
* @data: the data ptr for the @fn()
|
||||
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
|
||||
*
|
||||
* This is identical to stop_machine() but can be called from a CPU which
|
||||
* is not active. The local CPU is in the process of hotplug (so no other
|
||||
* CPU hotplug can start) and not marked active and doesn't have enough
|
||||
* context to sleep.
|
||||
*
|
||||
* This function provides stop_machine() functionality for such state by
|
||||
* using busy-wait for synchronization and executing @fn directly for local
|
||||
* CPU.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Local CPU is inactive. Temporarily stops all active CPUs.
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 if all executions of @fn returned 0, any non zero return value if any
|
||||
* returned non zero.
|
||||
*/
|
||||
int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
|
||||
const struct cpumask *cpus)
|
||||
{
|
||||
struct stop_machine_data smdata = { .fn = fn, .data = data,
|
||||
.active_cpus = cpus };
|
||||
struct cpu_stop_done done;
|
||||
int ret;
|
||||
|
||||
/* Local CPU must be inactive and CPU hotplug in progress. */
|
||||
BUG_ON(cpu_active(raw_smp_processor_id()));
|
||||
smdata.num_threads = num_active_cpus() + 1; /* +1 for local */
|
||||
|
||||
/* No proper task established and can't sleep - busy wait for lock. */
|
||||
while (!mutex_trylock(&stop_cpus_mutex))
|
||||
cpu_relax();
|
||||
|
||||
/* Schedule work on other CPUs and execute directly for local CPU */
|
||||
set_state(&smdata, STOPMACHINE_PREPARE);
|
||||
cpu_stop_init_done(&done, num_active_cpus());
|
||||
queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata,
|
||||
&done);
|
||||
ret = stop_machine_cpu_stop(&smdata);
|
||||
|
||||
/* Busy wait for completion. */
|
||||
while (!completion_done(&done.completion))
|
||||
cpu_relax();
|
||||
|
||||
mutex_unlock(&stop_cpus_mutex);
|
||||
return ret ?: done.ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_STOP_MACHINE */
|
||||
|
Reference in New Issue
Block a user