kmemtrace, rcu: fix rcupreempt.c data structure dependencies
Impact: cleanup We want to remove percpu.h from rcupreempt.h, but if we do that the percpu primitives there wont build anymore. Move them to the .c file instead. Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> Cc: paulmck@linux.vnet.ibm.com LKML-Reference: <1237898630.25315.83.camel@penberg-laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -40,23 +40,8 @@
|
|||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <linux/seqlock.h>
|
#include <linux/seqlock.h>
|
||||||
|
|
||||||
struct rcu_dyntick_sched {
|
extern void rcu_qsctr_inc(int cpu);
|
||||||
int dynticks;
|
static inline void rcu_bh_qsctr_inc(int cpu) { }
|
||||||
int dynticks_snap;
|
|
||||||
int sched_qs;
|
|
||||||
int sched_qs_snap;
|
|
||||||
int sched_dynticks_snap;
|
|
||||||
};
|
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
|
|
||||||
|
|
||||||
static inline void rcu_qsctr_inc(int cpu)
|
|
||||||
{
|
|
||||||
struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
|
|
||||||
|
|
||||||
rdssp->sched_qs++;
|
|
||||||
}
|
|
||||||
#define rcu_bh_qsctr_inc(cpu)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Someone might want to pass call_rcu_bh as a function pointer.
|
* Someone might want to pass call_rcu_bh as a function pointer.
|
||||||
@@ -117,30 +102,12 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
|
|||||||
struct softirq_action;
|
struct softirq_action;
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ
|
||||||
|
extern void rcu_enter_nohz(void);
|
||||||
static inline void rcu_enter_nohz(void)
|
extern void rcu_exit_nohz(void);
|
||||||
{
|
#else
|
||||||
static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
|
|
||||||
|
|
||||||
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
|
|
||||||
__get_cpu_var(rcu_dyntick_sched).dynticks++;
|
|
||||||
WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void rcu_exit_nohz(void)
|
|
||||||
{
|
|
||||||
static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
|
|
||||||
|
|
||||||
__get_cpu_var(rcu_dyntick_sched).dynticks++;
|
|
||||||
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
|
||||||
WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
|
|
||||||
&rs);
|
|
||||||
}
|
|
||||||
|
|
||||||
#else /* CONFIG_NO_HZ */
|
|
||||||
# define rcu_enter_nohz() do { } while (0)
|
# define rcu_enter_nohz() do { } while (0)
|
||||||
# define rcu_exit_nohz() do { } while (0)
|
# define rcu_exit_nohz() do { } while (0)
|
||||||
#endif /* CONFIG_NO_HZ */
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A context switch is a grace period for rcupreempt synchronize_rcu()
|
* A context switch is a grace period for rcupreempt synchronize_rcu()
|
||||||
|
@@ -147,7 +147,51 @@ struct rcu_ctrlblk {
|
|||||||
wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */
|
wait_queue_head_t sched_wq; /* Place for rcu_sched to sleep. */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct rcu_dyntick_sched {
|
||||||
|
int dynticks;
|
||||||
|
int dynticks_snap;
|
||||||
|
int sched_qs;
|
||||||
|
int sched_qs_snap;
|
||||||
|
int sched_dynticks_snap;
|
||||||
|
};
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
|
||||||
|
.dynticks = 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
void rcu_qsctr_inc(int cpu)
|
||||||
|
{
|
||||||
|
struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
|
||||||
|
|
||||||
|
rdssp->sched_qs++;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_NO_HZ
|
||||||
|
|
||||||
|
void rcu_enter_nohz(void)
|
||||||
|
{
|
||||||
|
static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
|
||||||
|
|
||||||
|
smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
|
||||||
|
__get_cpu_var(rcu_dyntick_sched).dynticks++;
|
||||||
|
WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
|
||||||
|
}
|
||||||
|
|
||||||
|
void rcu_exit_nohz(void)
|
||||||
|
{
|
||||||
|
static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
|
||||||
|
|
||||||
|
__get_cpu_var(rcu_dyntick_sched).dynticks++;
|
||||||
|
smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
|
||||||
|
WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
|
||||||
|
&rs);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_NO_HZ */
|
||||||
|
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct rcu_data, rcu_data);
|
static DEFINE_PER_CPU(struct rcu_data, rcu_data);
|
||||||
|
|
||||||
static struct rcu_ctrlblk rcu_ctrlblk = {
|
static struct rcu_ctrlblk rcu_ctrlblk = {
|
||||||
.fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
|
.fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock),
|
||||||
.completed = 0,
|
.completed = 0,
|
||||||
@@ -427,10 +471,6 @@ static void __rcu_advance_callbacks(struct rcu_data *rdp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_dyntick_sched, rcu_dyntick_sched) = {
|
|
||||||
.dynticks = 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ
|
#ifdef CONFIG_NO_HZ
|
||||||
static DEFINE_PER_CPU(int, rcu_update_flag);
|
static DEFINE_PER_CPU(int, rcu_update_flag);
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user