perf evsel: Support event groups

The perf_evsel__open now have an extra boolean argument specifying if
event grouping is desired.

The first file descriptor created on a CPU becomes the group leader.

Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Arnaldo Carvalho de Melo
2011-01-11 23:42:19 -02:00
parent 5c581041cf
commit f08199d314
4 changed files with 27 additions and 18 deletions

View File

@@ -128,7 +128,7 @@ int __perf_evsel__read(struct perf_evsel *evsel,
}
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads)
struct thread_map *threads, bool group)
{
int cpu, thread;
@@ -137,12 +137,18 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
return -1;
for (cpu = 0; cpu < cpus->nr; cpu++) {
int group_fd = -1;
for (thread = 0; thread < threads->nr; thread++) {
FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
threads->map[thread],
cpus->map[cpu], -1, 0);
cpus->map[cpu],
group_fd, 0);
if (FD(evsel, cpu, thread) < 0)
goto out_close;
if (group && group_fd == -1)
group_fd = FD(evsel, cpu, thread);
}
}
@@ -175,10 +181,9 @@ static struct {
.threads = { -1, },
};
int perf_evsel__open(struct perf_evsel *evsel,
struct cpu_map *cpus, struct thread_map *threads)
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads, bool group)
{
if (cpus == NULL) {
/* Work around old compiler warnings about strict aliasing */
cpus = &empty_cpu_map.map;
@@ -187,15 +192,17 @@ int perf_evsel__open(struct perf_evsel *evsel,
if (threads == NULL)
threads = &empty_thread_map.map;
return __perf_evsel__open(evsel, cpus, threads);
return __perf_evsel__open(evsel, cpus, threads, group);
}
int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
struct cpu_map *cpus, bool group)
{
return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
}
int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
struct thread_map *threads, bool group)
{
return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
}