rcu: Add grace-period, quiescent-state, and call_rcu trace events
Add trace events to record grace-period start and end, quiescent states, CPUs noticing grace-period start and end, grace-period initialization, call_rcu() invocation, tasks blocking in RCU read-side critical sections, tasks exiting those same critical sections, force_quiescent_state() detection of dyntick-idle and offline CPUs, CPUs entering and leaving dyntick-idle mode (except from NMIs), CPUs coming online and going offline, and CPUs being kicked for staying in dyntick-idle mode for too long (as in many weeks, even on 32-bit systems). Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> rcu: Add the rcu flavor to callback trace events The earlier trace events for registering RCU callbacks and for invoking them did not include the RCU flavor (rcu_bh, rcu_preempt, or rcu_sched). This commit adds the RCU flavor to those trace events. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
committed by
Paul E. McKenney
parent
965a002b4f
commit
d4c08f2ac3
@@ -166,6 +166,8 @@ void rcu_sched_qs(int cpu)
|
||||
|
||||
rdp->passed_quiesc_completed = rdp->gpnum - 1;
|
||||
barrier();
|
||||
if (rdp->passed_quiesc == 0)
|
||||
trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs");
|
||||
rdp->passed_quiesc = 1;
|
||||
}
|
||||
|
||||
@@ -175,6 +177,8 @@ void rcu_bh_qs(int cpu)
|
||||
|
||||
rdp->passed_quiesc_completed = rdp->gpnum - 1;
|
||||
barrier();
|
||||
if (rdp->passed_quiesc == 0)
|
||||
trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
|
||||
rdp->passed_quiesc = 1;
|
||||
}
|
||||
|
||||
@@ -319,6 +323,7 @@ static int rcu_implicit_offline_qs(struct rcu_data *rdp)
|
||||
* trust its state not to change because interrupts are disabled.
|
||||
*/
|
||||
if (cpu_is_offline(rdp->cpu)) {
|
||||
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl");
|
||||
rdp->offline_fqs++;
|
||||
return 1;
|
||||
}
|
||||
@@ -359,6 +364,7 @@ void rcu_enter_nohz(void)
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
trace_rcu_dyntick("Start");
|
||||
/* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
|
||||
smp_mb__before_atomic_inc(); /* See above. */
|
||||
atomic_inc(&rdtp->dynticks);
|
||||
@@ -396,6 +402,7 @@ void rcu_exit_nohz(void)
|
||||
/* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
|
||||
smp_mb__after_atomic_inc(); /* See above. */
|
||||
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
|
||||
trace_rcu_dyntick("End");
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -501,6 +508,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
|
||||
* of the current RCU grace period.
|
||||
*/
|
||||
if ((curr & 0x1) == 0 || ULONG_CMP_GE(curr, snap + 2)) {
|
||||
trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti");
|
||||
rdp->dynticks_fqs++;
|
||||
return 1;
|
||||
}
|
||||
@@ -683,6 +691,7 @@ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct
|
||||
* go looking for one.
|
||||
*/
|
||||
rdp->gpnum = rnp->gpnum;
|
||||
trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
|
||||
if (rnp->qsmask & rdp->grpmask) {
|
||||
rdp->qs_pending = 1;
|
||||
rdp->passed_quiesc = 0;
|
||||
@@ -746,6 +755,7 @@ __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat
|
||||
|
||||
/* Remember that we saw this grace-period completion. */
|
||||
rdp->completed = rnp->completed;
|
||||
trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend");
|
||||
|
||||
/*
|
||||
* If we were in an extended quiescent state, we may have
|
||||
@@ -856,6 +866,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
|
||||
|
||||
/* Advance to a new grace period and initialize state. */
|
||||
rsp->gpnum++;
|
||||
trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
|
||||
WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
|
||||
rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
|
||||
rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
|
||||
@@ -870,6 +881,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
|
||||
rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
|
||||
rcu_start_gp_per_cpu(rsp, rnp, rdp);
|
||||
rcu_preempt_boost_start_gp(rnp);
|
||||
trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
|
||||
rnp->level, rnp->grplo,
|
||||
rnp->grphi, rnp->qsmask);
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
return;
|
||||
}
|
||||
@@ -906,6 +920,9 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
|
||||
if (rnp == rdp->mynode)
|
||||
rcu_start_gp_per_cpu(rsp, rnp, rdp);
|
||||
rcu_preempt_boost_start_gp(rnp);
|
||||
trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
|
||||
rnp->level, rnp->grplo,
|
||||
rnp->grphi, rnp->qsmask);
|
||||
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
}
|
||||
|
||||
@@ -939,6 +956,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
|
||||
if (gp_duration > rsp->gp_max)
|
||||
rsp->gp_max = gp_duration;
|
||||
rsp->completed = rsp->gpnum;
|
||||
trace_rcu_grace_period(rsp->name, rsp->completed, "end");
|
||||
rsp->signaled = RCU_GP_IDLE;
|
||||
rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
|
||||
}
|
||||
@@ -967,6 +985,10 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
|
||||
return;
|
||||
}
|
||||
rnp->qsmask &= ~mask;
|
||||
trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
|
||||
mask, rnp->qsmask, rnp->level,
|
||||
rnp->grplo, rnp->grphi,
|
||||
!!rnp->gp_tasks);
|
||||
if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
|
||||
|
||||
/* Other bits still set at this level, so done. */
|
||||
@@ -1135,11 +1157,20 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
|
||||
if (rnp->qsmaskinit != 0) {
|
||||
if (rnp != rdp->mynode)
|
||||
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
else
|
||||
trace_rcu_grace_period(rsp->name,
|
||||
rnp->gpnum + 1 -
|
||||
!!(rnp->qsmask & mask),
|
||||
"cpuofl");
|
||||
break;
|
||||
}
|
||||
if (rnp == rdp->mynode)
|
||||
if (rnp == rdp->mynode) {
|
||||
trace_rcu_grace_period(rsp->name,
|
||||
rnp->gpnum + 1 -
|
||||
!!(rnp->qsmask & mask),
|
||||
"cpuofl");
|
||||
need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
|
||||
else
|
||||
} else
|
||||
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
|
||||
mask = rnp->grpmask;
|
||||
rnp = rnp->parent;
|
||||
@@ -1226,7 +1257,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
|
||||
next = list->next;
|
||||
prefetch(next);
|
||||
debug_rcu_head_unqueue(list);
|
||||
__rcu_reclaim(list);
|
||||
__rcu_reclaim(rsp->name, list);
|
||||
list = next;
|
||||
if (++count >= bl)
|
||||
break;
|
||||
@@ -1552,6 +1583,12 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
|
||||
rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
|
||||
rdp->qlen++;
|
||||
|
||||
if (__is_kfree_rcu_offset((unsigned long)func))
|
||||
trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
|
||||
rdp->qlen);
|
||||
else
|
||||
trace_rcu_callback(rsp->name, head, rdp->qlen);
|
||||
|
||||
/* If interrupts were disabled, don't dive into RCU core. */
|
||||
if (irqs_disabled_flags(flags)) {
|
||||
local_irq_restore(flags);
|
||||
@@ -1850,6 +1887,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
|
||||
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
|
||||
#endif /* #ifdef CONFIG_NO_HZ */
|
||||
rdp->cpu = cpu;
|
||||
rdp->rsp = rsp;
|
||||
raw_spin_unlock_irqrestore(&rnp->lock, flags);
|
||||
}
|
||||
|
||||
@@ -1898,6 +1936,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
|
||||
rdp->gpnum = rnp->completed; /* if GP in progress... */
|
||||
rdp->completed = rnp->completed;
|
||||
rdp->passed_quiesc_completed = rnp->completed - 1;
|
||||
trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl");
|
||||
}
|
||||
raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
|
||||
rnp = rnp->parent;
|
||||
|
Reference in New Issue
Block a user