perf: Move some code around
Move all inherit code near each other. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
committed by
Ingo Molnar
parent
108b02cfce
commit
97dee4f320
@@ -5556,106 +5556,6 @@ err:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
|
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
|
||||||
|
|
||||||
/*
|
|
||||||
* inherit a event from parent task to child task:
|
|
||||||
*/
|
|
||||||
static struct perf_event *
|
|
||||||
inherit_event(struct perf_event *parent_event,
|
|
||||||
struct task_struct *parent,
|
|
||||||
struct perf_event_context *parent_ctx,
|
|
||||||
struct task_struct *child,
|
|
||||||
struct perf_event *group_leader,
|
|
||||||
struct perf_event_context *child_ctx)
|
|
||||||
{
|
|
||||||
struct perf_event *child_event;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Instead of creating recursive hierarchies of events,
|
|
||||||
* we link inherited events back to the original parent,
|
|
||||||
* which has a filp for sure, which we use as the reference
|
|
||||||
* count:
|
|
||||||
*/
|
|
||||||
if (parent_event->parent)
|
|
||||||
parent_event = parent_event->parent;
|
|
||||||
|
|
||||||
child_event = perf_event_alloc(&parent_event->attr,
|
|
||||||
parent_event->cpu,
|
|
||||||
group_leader, parent_event,
|
|
||||||
NULL);
|
|
||||||
if (IS_ERR(child_event))
|
|
||||||
return child_event;
|
|
||||||
get_ctx(child_ctx);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Make the child state follow the state of the parent event,
|
|
||||||
* not its attr.disabled bit. We hold the parent's mutex,
|
|
||||||
* so we won't race with perf_event_{en, dis}able_family.
|
|
||||||
*/
|
|
||||||
if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
|
|
||||||
child_event->state = PERF_EVENT_STATE_INACTIVE;
|
|
||||||
else
|
|
||||||
child_event->state = PERF_EVENT_STATE_OFF;
|
|
||||||
|
|
||||||
if (parent_event->attr.freq) {
|
|
||||||
u64 sample_period = parent_event->hw.sample_period;
|
|
||||||
struct hw_perf_event *hwc = &child_event->hw;
|
|
||||||
|
|
||||||
hwc->sample_period = sample_period;
|
|
||||||
hwc->last_period = sample_period;
|
|
||||||
|
|
||||||
local64_set(&hwc->period_left, sample_period);
|
|
||||||
}
|
|
||||||
|
|
||||||
child_event->ctx = child_ctx;
|
|
||||||
child_event->overflow_handler = parent_event->overflow_handler;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Link it up in the child's context:
|
|
||||||
*/
|
|
||||||
add_event_to_ctx(child_event, child_ctx);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Get a reference to the parent filp - we will fput it
|
|
||||||
* when the child event exits. This is safe to do because
|
|
||||||
* we are in the parent and we know that the filp still
|
|
||||||
* exists and has a nonzero count:
|
|
||||||
*/
|
|
||||||
atomic_long_inc(&parent_event->filp->f_count);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Link this into the parent event's child list
|
|
||||||
*/
|
|
||||||
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
|
|
||||||
mutex_lock(&parent_event->child_mutex);
|
|
||||||
list_add_tail(&child_event->child_list, &parent_event->child_list);
|
|
||||||
mutex_unlock(&parent_event->child_mutex);
|
|
||||||
|
|
||||||
return child_event;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int inherit_group(struct perf_event *parent_event,
|
|
||||||
struct task_struct *parent,
|
|
||||||
struct perf_event_context *parent_ctx,
|
|
||||||
struct task_struct *child,
|
|
||||||
struct perf_event_context *child_ctx)
|
|
||||||
{
|
|
||||||
struct perf_event *leader;
|
|
||||||
struct perf_event *sub;
|
|
||||||
struct perf_event *child_ctr;
|
|
||||||
|
|
||||||
leader = inherit_event(parent_event, parent, parent_ctx,
|
|
||||||
child, NULL, child_ctx);
|
|
||||||
if (IS_ERR(leader))
|
|
||||||
return PTR_ERR(leader);
|
|
||||||
list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
|
|
||||||
child_ctr = inherit_event(sub, parent, parent_ctx,
|
|
||||||
child, leader, child_ctx);
|
|
||||||
if (IS_ERR(child_ctr))
|
|
||||||
return PTR_ERR(child_ctr);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void sync_child_event(struct perf_event *child_event,
|
static void sync_child_event(struct perf_event *child_event,
|
||||||
struct task_struct *child)
|
struct task_struct *child)
|
||||||
{
|
{
|
||||||
@@ -5844,6 +5744,106 @@ again:
|
|||||||
put_ctx(ctx);
|
put_ctx(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* inherit a event from parent task to child task:
|
||||||
|
*/
|
||||||
|
static struct perf_event *
|
||||||
|
inherit_event(struct perf_event *parent_event,
|
||||||
|
struct task_struct *parent,
|
||||||
|
struct perf_event_context *parent_ctx,
|
||||||
|
struct task_struct *child,
|
||||||
|
struct perf_event *group_leader,
|
||||||
|
struct perf_event_context *child_ctx)
|
||||||
|
{
|
||||||
|
struct perf_event *child_event;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Instead of creating recursive hierarchies of events,
|
||||||
|
* we link inherited events back to the original parent,
|
||||||
|
* which has a filp for sure, which we use as the reference
|
||||||
|
* count:
|
||||||
|
*/
|
||||||
|
if (parent_event->parent)
|
||||||
|
parent_event = parent_event->parent;
|
||||||
|
|
||||||
|
child_event = perf_event_alloc(&parent_event->attr,
|
||||||
|
parent_event->cpu,
|
||||||
|
group_leader, parent_event,
|
||||||
|
NULL);
|
||||||
|
if (IS_ERR(child_event))
|
||||||
|
return child_event;
|
||||||
|
get_ctx(child_ctx);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make the child state follow the state of the parent event,
|
||||||
|
* not its attr.disabled bit. We hold the parent's mutex,
|
||||||
|
* so we won't race with perf_event_{en, dis}able_family.
|
||||||
|
*/
|
||||||
|
if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
|
||||||
|
child_event->state = PERF_EVENT_STATE_INACTIVE;
|
||||||
|
else
|
||||||
|
child_event->state = PERF_EVENT_STATE_OFF;
|
||||||
|
|
||||||
|
if (parent_event->attr.freq) {
|
||||||
|
u64 sample_period = parent_event->hw.sample_period;
|
||||||
|
struct hw_perf_event *hwc = &child_event->hw;
|
||||||
|
|
||||||
|
hwc->sample_period = sample_period;
|
||||||
|
hwc->last_period = sample_period;
|
||||||
|
|
||||||
|
local64_set(&hwc->period_left, sample_period);
|
||||||
|
}
|
||||||
|
|
||||||
|
child_event->ctx = child_ctx;
|
||||||
|
child_event->overflow_handler = parent_event->overflow_handler;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Link it up in the child's context:
|
||||||
|
*/
|
||||||
|
add_event_to_ctx(child_event, child_ctx);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Get a reference to the parent filp - we will fput it
|
||||||
|
* when the child event exits. This is safe to do because
|
||||||
|
* we are in the parent and we know that the filp still
|
||||||
|
* exists and has a nonzero count:
|
||||||
|
*/
|
||||||
|
atomic_long_inc(&parent_event->filp->f_count);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Link this into the parent event's child list
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
|
||||||
|
mutex_lock(&parent_event->child_mutex);
|
||||||
|
list_add_tail(&child_event->child_list, &parent_event->child_list);
|
||||||
|
mutex_unlock(&parent_event->child_mutex);
|
||||||
|
|
||||||
|
return child_event;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int inherit_group(struct perf_event *parent_event,
|
||||||
|
struct task_struct *parent,
|
||||||
|
struct perf_event_context *parent_ctx,
|
||||||
|
struct task_struct *child,
|
||||||
|
struct perf_event_context *child_ctx)
|
||||||
|
{
|
||||||
|
struct perf_event *leader;
|
||||||
|
struct perf_event *sub;
|
||||||
|
struct perf_event *child_ctr;
|
||||||
|
|
||||||
|
leader = inherit_event(parent_event, parent, parent_ctx,
|
||||||
|
child, NULL, child_ctx);
|
||||||
|
if (IS_ERR(leader))
|
||||||
|
return PTR_ERR(leader);
|
||||||
|
list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
|
||||||
|
child_ctr = inherit_event(sub, parent, parent_ctx,
|
||||||
|
child, leader, child_ctx);
|
||||||
|
if (IS_ERR(child_ctr))
|
||||||
|
return PTR_ERR(child_ctr);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
inherit_task_group(struct perf_event *event, struct task_struct *parent,
|
inherit_task_group(struct perf_event *event, struct task_struct *parent,
|
||||||
struct perf_event_context *parent_ctx,
|
struct perf_event_context *parent_ctx,
|
||||||
|
Reference in New Issue
Block a user