Merge branch 'rcu-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'rcu-v28-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (21 commits) rcu: RCU-based detection of stalled CPUs for Classic RCU, fix rcu: RCU-based detection of stalled CPUs for Classic RCU rcu: add rcu_read_lock_sched() / rcu_read_unlock_sched() rcu: fix sparse shadowed variable warning doc/RCU: fix pseudocode in rcuref.txt rcuclassic: fix compiler warning rcu: use irq-safe locks rcuclassic: fix compilation NG rcu: fix locking cleanup fallout rcu: remove redundant ACCESS_ONCE definition from rcupreempt.c rcu: fix classic RCU locking cleanup lockdep problem rcu: trace fix possible mem-leak rcu: just rename call_rcu_bh instead of making it a macro rcu: remove list_for_each_rcu() rcu: fixes to include/linux/rcupreempt.h rcu: classic RCU locking and memory-barrier cleanups rcu: prevent console flood when one CPU sees another AWOL via RCU rcu, debug: detect stalled grace periods, cleanups rcu, debug: detect stalled grace periods rcu classic: new algorithm for callbacks-processing(v2) ...
This commit is contained in:
@ -190,7 +190,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
|
||||
* ACCESS_ONCE() in different C statements.
|
||||
*
|
||||
* This macro does absolutely -nothing- to prevent the CPU from reordering,
|
||||
* merging, or refetching absolutely anything at any time.
|
||||
* merging, or refetching absolutely anything at any time. Its main intended
|
||||
* use is to mediate communication between process-level code and irq/NMI
|
||||
* handlers, all running on the same CPU.
|
||||
*/
|
||||
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
|
||||
|
||||
|
@ -40,12 +40,21 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/seqlock.h>
|
||||
|
||||
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
|
||||
#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */
|
||||
#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
|
||||
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
|
||||
|
||||
/* Global control variables for rcupdate callback mechanism. */
|
||||
struct rcu_ctrlblk {
|
||||
long cur; /* Current batch number. */
|
||||
long completed; /* Number of the last completed batch */
|
||||
int next_pending; /* Is the next batch already waiting? */
|
||||
long pending; /* Number of the last pending batch */
|
||||
#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
|
||||
unsigned long gp_start; /* Time at which GP started in jiffies. */
|
||||
unsigned long jiffies_stall;
|
||||
/* Time at which to check for CPU stalls. */
|
||||
#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
|
||||
|
||||
int signaled;
|
||||
|
||||
@ -66,11 +75,7 @@ static inline int rcu_batch_after(long a, long b)
|
||||
return (a - b) > 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-CPU data for Read-Copy UPdate.
|
||||
* nxtlist - new callbacks are added here
|
||||
* curlist - current batch for which quiescent cycle started if any
|
||||
*/
|
||||
/* Per-CPU data for Read-Copy UPdate. */
|
||||
struct rcu_data {
|
||||
/* 1) quiescent state handling : */
|
||||
long quiescbatch; /* Batch # for grace period */
|
||||
@ -78,12 +83,24 @@ struct rcu_data {
|
||||
int qs_pending; /* core waits for quiesc state */
|
||||
|
||||
/* 2) batch handling */
|
||||
long batch; /* Batch # for current RCU batch */
|
||||
/*
|
||||
* if nxtlist is not NULL, then:
|
||||
* batch:
|
||||
* The batch # for the last entry of nxtlist
|
||||
* [*nxttail[1], NULL = *nxttail[2]):
|
||||
* Entries that batch # <= batch
|
||||
* [*nxttail[0], *nxttail[1]):
|
||||
* Entries that batch # <= batch - 1
|
||||
* [nxtlist, *nxttail[0]):
|
||||
* Entries that batch # <= batch - 2
|
||||
* The grace period for these entries has completed, and
|
||||
* the other grace-period-completed entries may be moved
|
||||
* here temporarily in rcu_process_callbacks().
|
||||
*/
|
||||
long batch;
|
||||
struct rcu_head *nxtlist;
|
||||
struct rcu_head **nxttail;
|
||||
struct rcu_head **nxttail[3];
|
||||
long qlen; /* # of queued callbacks */
|
||||
struct rcu_head *curlist;
|
||||
struct rcu_head **curtail;
|
||||
struct rcu_head *donelist;
|
||||
struct rcu_head **donetail;
|
||||
long blimit; /* Upper limit on a processed batch */
|
||||
|
@ -198,20 +198,6 @@ static inline void list_splice_init_rcu(struct list_head *list,
|
||||
at->prev = last;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_for_each_rcu - iterate over an rcu-protected list
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as list_add_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_rcu(pos, head) \
|
||||
for (pos = rcu_dereference((head)->next); \
|
||||
prefetch(pos->next), pos != (head); \
|
||||
pos = rcu_dereference(pos->next))
|
||||
|
||||
#define __list_for_each_rcu(pos, head) \
|
||||
for (pos = rcu_dereference((head)->next); \
|
||||
pos != (head); \
|
||||
|
@ -132,6 +132,26 @@ struct rcu_head {
|
||||
*/
|
||||
#define rcu_read_unlock_bh() __rcu_read_unlock_bh()
|
||||
|
||||
/**
|
||||
* rcu_read_lock_sched - mark the beginning of a RCU-classic critical section
|
||||
*
|
||||
* Should be used with either
|
||||
* - synchronize_sched()
|
||||
* or
|
||||
* - call_rcu_sched() and rcu_barrier_sched()
|
||||
* on the write-side to insure proper synchronization.
|
||||
*/
|
||||
#define rcu_read_lock_sched() preempt_disable()
|
||||
|
||||
/*
|
||||
* rcu_read_unlock_sched - marks the end of a RCU-classic critical section
|
||||
*
|
||||
* See rcu_read_lock_sched for more information.
|
||||
*/
|
||||
#define rcu_read_unlock_sched() preempt_enable()
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* rcu_dereference - fetch an RCU-protected pointer in an
|
||||
* RCU read-side critical section. This pointer may later
|
||||
|
@ -57,7 +57,13 @@ static inline void rcu_qsctr_inc(int cpu)
|
||||
rdssp->sched_qs++;
|
||||
}
|
||||
#define rcu_bh_qsctr_inc(cpu)
|
||||
#define call_rcu_bh(head, rcu) call_rcu(head, rcu)
|
||||
|
||||
/*
|
||||
* Someone might want to pass call_rcu_bh as a function pointer.
|
||||
* So this needs to just be a rename and not a macro function.
|
||||
* (no parentheses)
|
||||
*/
|
||||
#define call_rcu_bh call_rcu
|
||||
|
||||
/**
|
||||
* call_rcu_sched - Queue RCU callback for invocation after sched grace period.
|
||||
@ -111,7 +117,6 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
|
||||
struct softirq_action;
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
|
||||
|
||||
static inline void rcu_enter_nohz(void)
|
||||
{
|
||||
@ -126,8 +131,8 @@ static inline void rcu_exit_nohz(void)
|
||||
{
|
||||
static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
|
||||
|
||||
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
||||
__get_cpu_var(rcu_dyntick_sched).dynticks++;
|
||||
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
||||
WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
|
||||
&rs);
|
||||
}
|
||||
|
Reference in New Issue
Block a user