rcu: Consistent rcu_is_watching() naming
The old rcu_is_cpu_idle() function is just __rcu_is_watching() with preemption disabled. This commit therefore renames rcu_is_cpu_idle() to rcu_is_watching. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
This commit is contained in:
@@ -262,7 +262,7 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
|
|||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
|
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
|
||||||
extern int rcu_is_cpu_idle(void);
|
extern bool __rcu_is_watching(void);
|
||||||
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
|
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -351,7 +351,7 @@ static inline int rcu_read_lock_held(void)
|
|||||||
{
|
{
|
||||||
if (!debug_lockdep_rcu_enabled())
|
if (!debug_lockdep_rcu_enabled())
|
||||||
return 1;
|
return 1;
|
||||||
if (rcu_is_cpu_idle())
|
if (!rcu_is_watching())
|
||||||
return 0;
|
return 0;
|
||||||
if (!rcu_lockdep_current_cpu_online())
|
if (!rcu_lockdep_current_cpu_online())
|
||||||
return 0;
|
return 0;
|
||||||
@@ -402,7 +402,7 @@ static inline int rcu_read_lock_sched_held(void)
|
|||||||
|
|
||||||
if (!debug_lockdep_rcu_enabled())
|
if (!debug_lockdep_rcu_enabled())
|
||||||
return 1;
|
return 1;
|
||||||
if (rcu_is_cpu_idle())
|
if (!rcu_is_watching())
|
||||||
return 0;
|
return 0;
|
||||||
if (!rcu_lockdep_current_cpu_online())
|
if (!rcu_lockdep_current_cpu_online())
|
||||||
return 0;
|
return 0;
|
||||||
@@ -771,7 +771,7 @@ static inline void rcu_read_lock(void)
|
|||||||
__rcu_read_lock();
|
__rcu_read_lock();
|
||||||
__acquire(RCU);
|
__acquire(RCU);
|
||||||
rcu_lock_acquire(&rcu_lock_map);
|
rcu_lock_acquire(&rcu_lock_map);
|
||||||
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
rcu_lockdep_assert(rcu_is_watching(),
|
||||||
"rcu_read_lock() used illegally while idle");
|
"rcu_read_lock() used illegally while idle");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -792,7 +792,7 @@ static inline void rcu_read_lock(void)
|
|||||||
*/
|
*/
|
||||||
static inline void rcu_read_unlock(void)
|
static inline void rcu_read_unlock(void)
|
||||||
{
|
{
|
||||||
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
rcu_lockdep_assert(rcu_is_watching(),
|
||||||
"rcu_read_unlock() used illegally while idle");
|
"rcu_read_unlock() used illegally while idle");
|
||||||
rcu_lock_release(&rcu_lock_map);
|
rcu_lock_release(&rcu_lock_map);
|
||||||
__release(RCU);
|
__release(RCU);
|
||||||
@@ -821,7 +821,7 @@ static inline void rcu_read_lock_bh(void)
|
|||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
__acquire(RCU_BH);
|
__acquire(RCU_BH);
|
||||||
rcu_lock_acquire(&rcu_bh_lock_map);
|
rcu_lock_acquire(&rcu_bh_lock_map);
|
||||||
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
rcu_lockdep_assert(rcu_is_watching(),
|
||||||
"rcu_read_lock_bh() used illegally while idle");
|
"rcu_read_lock_bh() used illegally while idle");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -832,7 +832,7 @@ static inline void rcu_read_lock_bh(void)
|
|||||||
*/
|
*/
|
||||||
static inline void rcu_read_unlock_bh(void)
|
static inline void rcu_read_unlock_bh(void)
|
||||||
{
|
{
|
||||||
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
rcu_lockdep_assert(rcu_is_watching(),
|
||||||
"rcu_read_unlock_bh() used illegally while idle");
|
"rcu_read_unlock_bh() used illegally while idle");
|
||||||
rcu_lock_release(&rcu_bh_lock_map);
|
rcu_lock_release(&rcu_bh_lock_map);
|
||||||
__release(RCU_BH);
|
__release(RCU_BH);
|
||||||
@@ -857,7 +857,7 @@ static inline void rcu_read_lock_sched(void)
|
|||||||
preempt_disable();
|
preempt_disable();
|
||||||
__acquire(RCU_SCHED);
|
__acquire(RCU_SCHED);
|
||||||
rcu_lock_acquire(&rcu_sched_lock_map);
|
rcu_lock_acquire(&rcu_sched_lock_map);
|
||||||
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
rcu_lockdep_assert(rcu_is_watching(),
|
||||||
"rcu_read_lock_sched() used illegally while idle");
|
"rcu_read_lock_sched() used illegally while idle");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -875,7 +875,7 @@ static inline notrace void rcu_read_lock_sched_notrace(void)
|
|||||||
*/
|
*/
|
||||||
static inline void rcu_read_unlock_sched(void)
|
static inline void rcu_read_unlock_sched(void)
|
||||||
{
|
{
|
||||||
rcu_lockdep_assert(!rcu_is_cpu_idle(),
|
rcu_lockdep_assert(rcu_is_watching(),
|
||||||
"rcu_read_unlock_sched() used illegally while idle");
|
"rcu_read_unlock_sched() used illegally while idle");
|
||||||
rcu_lock_release(&rcu_sched_lock_map);
|
rcu_lock_release(&rcu_sched_lock_map);
|
||||||
__release(RCU_SCHED);
|
__release(RCU_SCHED);
|
||||||
|
@@ -132,13 +132,21 @@ static inline void rcu_scheduler_starting(void)
|
|||||||
}
|
}
|
||||||
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
|
||||||
|
|
||||||
#ifdef CONFIG_RCU_TRACE
|
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
|
||||||
|
|
||||||
static inline bool __rcu_is_watching(void)
|
static inline bool rcu_is_watching(void)
|
||||||
{
|
{
|
||||||
return !rcu_is_cpu_idle();
|
return __rcu_is_watching();
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* #ifdef CONFIG_RCU_TRACE */
|
#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
|
||||||
|
|
||||||
|
static inline bool rcu_is_watching(void)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
|
||||||
|
|
||||||
#endif /* __LINUX_RCUTINY_H */
|
#endif /* __LINUX_RCUTINY_H */
|
||||||
|
@@ -90,6 +90,6 @@ extern void exit_rcu(void);
|
|||||||
extern void rcu_scheduler_starting(void);
|
extern void rcu_scheduler_starting(void);
|
||||||
extern int rcu_scheduler_active __read_mostly;
|
extern int rcu_scheduler_active __read_mostly;
|
||||||
|
|
||||||
extern bool __rcu_is_watching(void);
|
extern bool rcu_is_watching(void);
|
||||||
|
|
||||||
#endif /* __LINUX_RCUTREE_H */
|
#endif /* __LINUX_RCUTREE_H */
|
||||||
|
@@ -4224,7 +4224,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
|
|||||||
printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
|
printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
|
||||||
!rcu_lockdep_current_cpu_online()
|
!rcu_lockdep_current_cpu_online()
|
||||||
? "RCU used illegally from offline CPU!\n"
|
? "RCU used illegally from offline CPU!\n"
|
||||||
: rcu_is_cpu_idle()
|
: !rcu_is_watching()
|
||||||
? "RCU used illegally from idle CPU!\n"
|
? "RCU used illegally from idle CPU!\n"
|
||||||
: "",
|
: "",
|
||||||
rcu_scheduler_active, debug_locks);
|
rcu_scheduler_active, debug_locks);
|
||||||
@@ -4247,7 +4247,7 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
|
|||||||
* So complain bitterly if someone does call rcu_read_lock(),
|
* So complain bitterly if someone does call rcu_read_lock(),
|
||||||
* rcu_read_lock_bh() and so on from extended quiescent states.
|
* rcu_read_lock_bh() and so on from extended quiescent states.
|
||||||
*/
|
*/
|
||||||
if (rcu_is_cpu_idle())
|
if (!rcu_is_watching())
|
||||||
printk("RCU used illegally from extended quiescent state!\n");
|
printk("RCU used illegally from extended quiescent state!\n");
|
||||||
|
|
||||||
lockdep_print_held_locks(curr);
|
lockdep_print_held_locks(curr);
|
||||||
|
@@ -148,7 +148,7 @@ int rcu_read_lock_bh_held(void)
|
|||||||
{
|
{
|
||||||
if (!debug_lockdep_rcu_enabled())
|
if (!debug_lockdep_rcu_enabled())
|
||||||
return 1;
|
return 1;
|
||||||
if (rcu_is_cpu_idle())
|
if (!rcu_is_watching())
|
||||||
return 0;
|
return 0;
|
||||||
if (!rcu_lockdep_current_cpu_online())
|
if (!rcu_lockdep_current_cpu_online())
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -179,11 +179,11 @@ EXPORT_SYMBOL_GPL(rcu_irq_enter);
|
|||||||
/*
|
/*
|
||||||
* Test whether RCU thinks that the current CPU is idle.
|
* Test whether RCU thinks that the current CPU is idle.
|
||||||
*/
|
*/
|
||||||
int rcu_is_cpu_idle(void)
|
bool __rcu_is_watching(void)
|
||||||
{
|
{
|
||||||
return !rcu_dynticks_nesting;
|
return rcu_dynticks_nesting;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(rcu_is_cpu_idle);
|
EXPORT_SYMBOL(__rcu_is_watching);
|
||||||
|
|
||||||
#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
|
#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
|
||||||
|
|
||||||
|
@@ -654,36 +654,36 @@ void rcu_nmi_exit(void)
|
|||||||
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
|
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
|
|
||||||
*
|
|
||||||
* If the current CPU is in its idle loop and is neither in an interrupt
|
|
||||||
* or NMI handler, return true.
|
|
||||||
*/
|
|
||||||
int rcu_is_cpu_idle(void)
|
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
preempt_disable();
|
|
||||||
ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
|
|
||||||
preempt_enable();
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(rcu_is_cpu_idle);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __rcu_is_watching - are RCU read-side critical sections safe?
|
* __rcu_is_watching - are RCU read-side critical sections safe?
|
||||||
*
|
*
|
||||||
* Return true if RCU is watching the running CPU, which means that
|
* Return true if RCU is watching the running CPU, which means that
|
||||||
* this CPU can safely enter RCU read-side critical sections. Unlike
|
* this CPU can safely enter RCU read-side critical sections. Unlike
|
||||||
* rcu_is_cpu_idle(), the caller of __rcu_is_watching() must have at
|
* rcu_is_watching(), the caller of __rcu_is_watching() must have at
|
||||||
* least disabled preemption.
|
* least disabled preemption.
|
||||||
*/
|
*/
|
||||||
bool __rcu_is_watching(void)
|
bool __rcu_is_watching(void)
|
||||||
{
|
{
|
||||||
return !!(atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1);
|
return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rcu_is_watching - see if RCU thinks that the current CPU is idle
|
||||||
|
*
|
||||||
|
* If the current CPU is in its idle loop and is neither in an interrupt
|
||||||
|
* or NMI handler, return true.
|
||||||
|
*/
|
||||||
|
bool rcu_is_watching(void)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
ret = __rcu_is_watching();
|
||||||
|
preempt_enable();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(rcu_is_watching);
|
||||||
|
|
||||||
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
|
#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2268,7 +2268,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
|
|||||||
* If called from an extended quiescent state, invoke the RCU
|
* If called from an extended quiescent state, invoke the RCU
|
||||||
* core in order to force a re-evaluation of RCU's idleness.
|
* core in order to force a re-evaluation of RCU's idleness.
|
||||||
*/
|
*/
|
||||||
if (rcu_is_cpu_idle() && cpu_online(smp_processor_id()))
|
if (!rcu_is_watching() && cpu_online(smp_processor_id()))
|
||||||
invoke_rcu_core();
|
invoke_rcu_core();
|
||||||
|
|
||||||
/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
|
/* If interrupts were disabled or CPU offline, don't invoke RCU core. */
|
||||||
|
Reference in New Issue
Block a user