Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf, x86: Try to handle unknown nmis with an enabled PMU perf, x86: Fix handle_irq return values perf, x86: Fix accidentally ack'ing a second event on intel perf counter oprofile, x86: fix init_sysfs() function stub lockup_detector: Sync touch_*_watchdog back to old semantics tracing: Fix a race in function profile oprofile, x86: fix init_sysfs error handling perf_events: Fix time tracking for events with pid != -1 and cpu != -1 perf: Initialize callchains roots's childen hits oprofile: fix crash when accessing freed task structs
This commit is contained in:
@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event)
|
||||
}
|
||||
}
|
||||
|
||||
static inline int
|
||||
event_filter_match(struct perf_event *event)
|
||||
{
|
||||
return event->cpu == -1 || event->cpu == smp_processor_id();
|
||||
}
|
||||
|
||||
static void
|
||||
event_sched_out(struct perf_event *event,
|
||||
struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_context *ctx)
|
||||
{
|
||||
u64 delta;
|
||||
/*
|
||||
* An event which could not be activated because of
|
||||
* filter mismatch still needs to have its timings
|
||||
* maintained, otherwise bogus information is return
|
||||
* via read() for time_enabled, time_running:
|
||||
*/
|
||||
if (event->state == PERF_EVENT_STATE_INACTIVE
|
||||
&& !event_filter_match(event)) {
|
||||
delta = ctx->time - event->tstamp_stopped;
|
||||
event->tstamp_running += delta;
|
||||
event->tstamp_stopped = ctx->time;
|
||||
}
|
||||
|
||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
return;
|
||||
|
||||
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event,
|
||||
struct perf_event_context *ctx)
|
||||
{
|
||||
struct perf_event *event;
|
||||
|
||||
if (group_event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
return;
|
||||
int state = group_event->state;
|
||||
|
||||
event_sched_out(group_event, cpuctx, ctx);
|
||||
|
||||
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event,
|
||||
list_for_each_entry(event, &group_event->sibling_list, group_entry)
|
||||
event_sched_out(event, cpuctx, ctx);
|
||||
|
||||
if (group_event->attr.exclusive)
|
||||
if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
|
||||
cpuctx->exclusive = 0;
|
||||
}
|
||||
|
||||
|
@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct ftrace_profile *rec = v;
|
||||
char str[KSYM_SYMBOL_LEN];
|
||||
int ret = 0;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
static DEFINE_MUTEX(mutex);
|
||||
static struct trace_seq s;
|
||||
unsigned long long avg;
|
||||
unsigned long long stddev;
|
||||
#endif
|
||||
mutex_lock(&ftrace_profile_lock);
|
||||
|
||||
/* we raced with function_profile_reset() */
|
||||
if (unlikely(rec->counter == 0)) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
|
||||
seq_printf(m, " %-30.30s %10lu", str, rec->counter);
|
||||
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v)
|
||||
do_div(stddev, (rec->counter - 1) * 1000);
|
||||
}
|
||||
|
||||
mutex_lock(&mutex);
|
||||
trace_seq_init(&s);
|
||||
trace_print_graph_duration(rec->time, &s);
|
||||
trace_seq_puts(&s, " ");
|
||||
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v)
|
||||
trace_seq_puts(&s, " ");
|
||||
trace_print_graph_duration(stddev, &s);
|
||||
trace_print_seq(m, &s);
|
||||
mutex_unlock(&mutex);
|
||||
#endif
|
||||
seq_putc(m, '\n');
|
||||
out:
|
||||
mutex_unlock(&ftrace_profile_lock);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
|
||||
|
@@ -122,7 +122,7 @@ static void __touch_watchdog(void)
|
||||
|
||||
void touch_softlockup_watchdog(void)
|
||||
{
|
||||
__get_cpu_var(watchdog_touch_ts) = 0;
|
||||
__raw_get_cpu_var(watchdog_touch_ts) = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
||||
|
||||
@@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void)
|
||||
#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
||||
void touch_nmi_watchdog(void)
|
||||
{
|
||||
__get_cpu_var(watchdog_nmi_touch) = true;
|
||||
if (watchdog_enabled) {
|
||||
unsigned cpu;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
if (per_cpu(watchdog_nmi_touch, cpu) != true)
|
||||
per_cpu(watchdog_nmi_touch, cpu) = true;
|
||||
}
|
||||
}
|
||||
touch_softlockup_watchdog();
|
||||
}
|
||||
EXPORT_SYMBOL(touch_nmi_watchdog);
|
||||
@@ -433,6 +440,9 @@ static int watchdog_enable(int cpu)
|
||||
wake_up_process(p);
|
||||
}
|
||||
|
||||
/* if any cpu succeeds, watchdog is considered enabled for the system */
|
||||
watchdog_enabled = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -455,9 +465,6 @@ static void watchdog_disable(int cpu)
|
||||
per_cpu(softlockup_watchdog, cpu) = NULL;
|
||||
kthread_stop(p);
|
||||
}
|
||||
|
||||
/* if any cpu succeeds, watchdog is considered enabled for the system */
|
||||
watchdog_enabled = 1;
|
||||
}
|
||||
|
||||
static void watchdog_enable_all_cpus(void)
|
||||
|
Reference in New Issue
Block a user