Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (28 commits)
  rcu: Move end of special early-boot RCU operation earlier
  rcu: Changes from reviews: avoid casts, fix/add warnings, improve comments
  rcu: Create rcutree plugins to handle hotplug CPU for multi-level trees
  rcu: Remove lockdep annotations from RCU's _notrace() API members
  rcu: Add #ifdef to suppress __rcu_offline_cpu() warning in !HOTPLUG_CPU builds
  rcu: Add CPU-offline processing for single-node configurations
  rcu: Add "notrace" to RCU function headers used by ftrace
  rcu: Remove CONFIG_PREEMPT_RCU
  rcu: Merge preemptable-RCU functionality into hierarchical RCU
  rcu: Simplify rcu_pending()/rcu_check_callbacks() API
  rcu: Use debugfs_remove_recursive() simplify code.
  rcu: Merge per-RCU-flavor initialization into pre-existing macro
  rcu: Fix online/offline indication for rcudata.csv trace file
  rcu: Consolidate sparse and lockdep declarations in include/linux/rcupdate.h
  rcu: Renamings to increase RCU clarity
  rcu: Move private definitions from include/linux/rcutree.h to kernel/rcutree.h
  rcu: Expunge lingering references to CONFIG_CLASSIC_RCU, optimize on !SMP
  rcu: Delay rcu_barrier() wait until beginning of next CPU-hotunplug operation.
  rcu: Fix typo in rcu_irq_exit() comment header
  rcu: Make rcupreempt_trace.c look at offline CPUs
  ...
This commit is contained in:
Linus Torvalds
2009-09-11 13:20:18 -07:00
37 changed files with 1638 additions and 3669 deletions

View File

@ -5325,7 +5325,7 @@ need_resched:
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_qsctr_inc(cpu);
rcu_sched_qs(cpu);
prev = rq->curr;
switch_count = &prev->nivcsw;
@ -7053,6 +7053,11 @@ fail:
return ret;
}
#define RCU_MIGRATION_IDLE 0
#define RCU_MIGRATION_NEED_QS 1
#define RCU_MIGRATION_GOT_QS 2
#define RCU_MIGRATION_MUST_SYNC 3
/*
* migration_thread - this is a highprio system thread that performs
* thread migration by bumping thread off CPU then 'pushing' onto
@ -7060,6 +7065,7 @@ fail:
*/
static int migration_thread(void *data)
{
int badcpu;
int cpu = (long)data;
struct rq *rq;
@ -7094,8 +7100,17 @@ static int migration_thread(void *data)
req = list_entry(head->next, struct migration_req, list);
list_del_init(head->next);
spin_unlock(&rq->lock);
__migrate_task(req->task, cpu, req->dest_cpu);
if (req->task != NULL) {
spin_unlock(&rq->lock);
__migrate_task(req->task, cpu, req->dest_cpu);
} else if (likely(cpu == (badcpu = smp_processor_id()))) {
req->dest_cpu = RCU_MIGRATION_GOT_QS;
spin_unlock(&rq->lock);
} else {
req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
spin_unlock(&rq->lock);
WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
}
local_irq_enable();
complete(&req->done);
@ -10583,3 +10598,113 @@ struct cgroup_subsys cpuacct_subsys = {
.subsys_id = cpuacct_subsys_id,
};
#endif /* CONFIG_CGROUP_CPUACCT */
#ifndef CONFIG_SMP
int rcu_expedited_torture_stats(char *page)
{
return 0;
}
EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
void synchronize_sched_expedited(void)
{
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
#else /* #ifndef CONFIG_SMP */
static DEFINE_PER_CPU(struct migration_req, rcu_migration_req);
static DEFINE_MUTEX(rcu_sched_expedited_mutex);
#define RCU_EXPEDITED_STATE_POST -2
#define RCU_EXPEDITED_STATE_IDLE -1
static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
int rcu_expedited_torture_stats(char *page)
{
int cnt = 0;
int cpu;
cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state);
for_each_online_cpu(cpu) {
cnt += sprintf(&page[cnt], " %d:%d",
cpu, per_cpu(rcu_migration_req, cpu).dest_cpu);
}
cnt += sprintf(&page[cnt], "\n");
return cnt;
}
EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
static long synchronize_sched_expedited_count;
/*
* Wait for an rcu-sched grace period to elapse, but use "big hammer"
* approach to force grace period to end quickly. This consumes
* significant time on all CPUs, and is thus not recommended for
* any sort of common-case code.
*
* Note that it is illegal to call this function while holding any
* lock that is acquired by a CPU-hotplug notifier. Failing to
* observe this restriction will result in deadlock.
*/
void synchronize_sched_expedited(void)
{
int cpu;
unsigned long flags;
bool need_full_sync = 0;
struct rq *rq;
struct migration_req *req;
long snap;
int trycount = 0;
smp_mb(); /* ensure prior mod happens before capturing snap. */
snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
get_online_cpus();
while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
put_online_cpus();
if (trycount++ < 10)
udelay(trycount * num_online_cpus());
else {
synchronize_sched();
return;
}
if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
smp_mb(); /* ensure test happens before caller kfree */
return;
}
get_online_cpus();
}
rcu_expedited_state = RCU_EXPEDITED_STATE_POST;
for_each_online_cpu(cpu) {
rq = cpu_rq(cpu);
req = &per_cpu(rcu_migration_req, cpu);
init_completion(&req->done);
req->task = NULL;
req->dest_cpu = RCU_MIGRATION_NEED_QS;
spin_lock_irqsave(&rq->lock, flags);
list_add(&req->list, &rq->migration_queue);
spin_unlock_irqrestore(&rq->lock, flags);
wake_up_process(rq->migration_thread);
}
for_each_online_cpu(cpu) {
rcu_expedited_state = cpu;
req = &per_cpu(rcu_migration_req, cpu);
rq = cpu_rq(cpu);
wait_for_completion(&req->done);
spin_lock_irqsave(&rq->lock, flags);
if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
need_full_sync = 1;
req->dest_cpu = RCU_MIGRATION_IDLE;
spin_unlock_irqrestore(&rq->lock, flags);
}
rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
mutex_unlock(&rcu_sched_expedited_mutex);
put_online_cpus();
if (need_full_sync)
synchronize_sched();
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
#endif /* #else #ifndef CONFIG_SMP */