Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: locking: Make sparse work with inline spinlocks and rwlocks x86/mce: Fix RCU lockdep splats rcu: Increase RCU CPU stall timeouts if PROVE_RCU ftrace: Replace read_barrier_depends() with rcu_dereference_raw() rcu: Suppress RCU lockdep warnings during early boot rcu, ftrace: Fix RCU lockdep splat in ftrace_perf_buf_prepare() rcu: Suppress __mpol_dup() false positive from RCU lockdep rcu: Make rcu_read_lock_sched_held() handle !PREEMPT rcu: Add control variables to lockdep_rcu_dereference() diagnostics rcu, cgroup: Relax the check in task_subsys_state() as early boot is now handled by lockdep-RCU rcu: Use wrapper function instead of exporting tasklist_lock sched, rcu: Fix rcu_dereference() for RCU-lockdep rcu: Make task_subsys_state() RCU-lockdep checks handle boot-time use rcu: Fix holdoff for accelerated GPs for last non-dynticked CPU x86/gart: Unexport gart_iommu_aperture Fix trivial conflicts in kernel/trace/ftrace.c
This commit is contained in:
@@ -87,7 +87,7 @@ static void __exit_signal(struct task_struct *tsk)
|
||||
|
||||
sighand = rcu_dereference_check(tsk->sighand,
|
||||
rcu_read_lock_held() ||
|
||||
lockdep_is_held(&tasklist_lock));
|
||||
lockdep_tasklist_lock_is_held());
|
||||
spin_lock(&sighand->siglock);
|
||||
|
||||
posix_cpu_timers_exit(tsk);
|
||||
|
@@ -86,7 +86,14 @@ int max_threads; /* tunable limit on nr_threads */
|
||||
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
|
||||
|
||||
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
|
||||
EXPORT_SYMBOL_GPL(tasklist_lock);
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
int lockdep_tasklist_lock_is_held(void)
|
||||
{
|
||||
return lockdep_is_held(&tasklist_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
|
||||
#endif /* #ifdef CONFIG_PROVE_RCU */
|
||||
|
||||
int nr_processes(void)
|
||||
{
|
||||
|
@@ -3822,6 +3822,7 @@ void lockdep_rcu_dereference(const char *file, const int line)
|
||||
printk("%s:%d invoked rcu_dereference_check() without protection!\n",
|
||||
file, line);
|
||||
printk("\nother info that might help us debug this:\n\n");
|
||||
printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks);
|
||||
lockdep_print_held_locks(curr);
|
||||
printk("\nstack backtrace:\n");
|
||||
dump_stack();
|
||||
|
@@ -367,7 +367,9 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type)
|
||||
struct task_struct *result = NULL;
|
||||
if (pid) {
|
||||
struct hlist_node *first;
|
||||
first = rcu_dereference_check(pid->tasks[type].first, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock));
|
||||
first = rcu_dereference_check(pid->tasks[type].first,
|
||||
rcu_read_lock_held() ||
|
||||
lockdep_tasklist_lock_is_held());
|
||||
if (first)
|
||||
result = hlist_entry(first, struct task_struct, pids[(type)].node);
|
||||
}
|
||||
|
@@ -246,12 +246,21 @@ struct rcu_data {
|
||||
|
||||
#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
|
||||
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
|
||||
#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
|
||||
#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
|
||||
#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
|
||||
/* to take at least one */
|
||||
/* scheduling clock irq */
|
||||
/* before ratting on them. */
|
||||
|
||||
#ifdef CONFIG_PROVE_RCU
|
||||
#define RCU_STALL_DELAY_DELTA (5 * HZ)
|
||||
#else
|
||||
#define RCU_STALL_DELAY_DELTA 0
|
||||
#endif
|
||||
|
||||
#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ + RCU_STALL_DELAY_DELTA)
|
||||
/* for rsp->jiffies_stall */
|
||||
#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ + RCU_STALL_DELAY_DELTA)
|
||||
/* for rsp->jiffies_stall */
|
||||
#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
|
||||
/* to take at least one */
|
||||
/* scheduling clock irq */
|
||||
/* before ratting on them. */
|
||||
|
||||
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
|
||||
|
||||
|
@@ -1010,6 +1010,10 @@ int rcu_needs_cpu(int cpu)
|
||||
int c = 0;
|
||||
int thatcpu;
|
||||
|
||||
/* Check for being in the holdoff period. */
|
||||
if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
|
||||
return rcu_needs_cpu_quick_check(cpu);
|
||||
|
||||
/* Don't bother unless we are the last non-dyntick-idle CPU. */
|
||||
for_each_cpu_not(thatcpu, nohz_cpu_mask)
|
||||
if (thatcpu != cpu) {
|
||||
@@ -1041,10 +1045,8 @@ int rcu_needs_cpu(int cpu)
|
||||
}
|
||||
|
||||
/* If RCU callbacks are still pending, RCU still needs this CPU. */
|
||||
if (c) {
|
||||
if (c)
|
||||
raise_softirq(RCU_SOFTIRQ);
|
||||
per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
|
||||
}
|
||||
return c;
|
||||
}
|
||||
|
||||
|
@@ -3476,7 +3476,7 @@ static void run_rebalance_domains(struct softirq_action *h)
|
||||
|
||||
static inline int on_null_domain(int cpu)
|
||||
{
|
||||
return !rcu_dereference(cpu_rq(cpu)->sd);
|
||||
return !rcu_dereference_sched(cpu_rq(cpu)->sd);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -27,6 +27,7 @@
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
@@ -84,18 +85,22 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
|
||||
|
||||
/*
|
||||
* Traverse the ftrace_list, invoking all entries. The reason that we
|
||||
* can use rcu_dereference_raw() is that elements removed from this list
|
||||
* are simply leaked, so there is no need to interact with a grace-period
|
||||
* mechanism. The rcu_dereference_raw() calls are needed to handle
|
||||
* concurrent insertions into the ftrace_list.
|
||||
*
|
||||
* Silly Alpha and silly pointer-speculation compiler optimizations!
|
||||
*/
|
||||
static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
|
||||
{
|
||||
struct ftrace_ops *op = ftrace_list;
|
||||
|
||||
/* in case someone actually ports this to alpha! */
|
||||
read_barrier_depends();
|
||||
struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/
|
||||
|
||||
while (op != &ftrace_list_end) {
|
||||
/* silly alpha */
|
||||
read_barrier_depends();
|
||||
op->func(ip, parent_ip);
|
||||
op = op->next;
|
||||
op = rcu_dereference_raw(op->next); /*see above*/
|
||||
};
|
||||
}
|
||||
|
||||
@@ -150,8 +155,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
|
||||
* the ops->next pointer is valid before another CPU sees
|
||||
* the ops pointer included into the ftrace_list.
|
||||
*/
|
||||
smp_wmb();
|
||||
ftrace_list = ops;
|
||||
rcu_assign_pointer(ftrace_list, ops);
|
||||
|
||||
if (ftrace_enabled) {
|
||||
ftrace_func_t func;
|
||||
|
@@ -138,9 +138,9 @@ __kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
|
||||
cpu = smp_processor_id();
|
||||
|
||||
if (in_nmi())
|
||||
trace_buf = rcu_dereference(perf_trace_buf_nmi);
|
||||
trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
|
||||
else
|
||||
trace_buf = rcu_dereference(perf_trace_buf);
|
||||
trace_buf = rcu_dereference_sched(perf_trace_buf);
|
||||
|
||||
if (!trace_buf)
|
||||
goto err;
|
||||
|
Reference in New Issue
Block a user