Merge branch 'perf/urgent' into perf/core
Conflicts: arch/x86/kernel/apic/hw_nmi.c Merge reason: Resolve conflict, queue up dependent patch. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -31,6 +31,7 @@
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/ftrace_event.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
|
||||
#include <asm/irq_regs.h>
|
||||
|
||||
@@ -1286,8 +1287,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
|
||||
{
|
||||
int ctxn;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
|
||||
|
||||
for_each_task_context_nr(ctxn)
|
||||
perf_event_context_sched_out(task, ctxn, next);
|
||||
}
|
||||
@@ -1621,8 +1620,12 @@ static void rotate_ctx(struct perf_event_context *ctx)
|
||||
{
|
||||
raw_spin_lock(&ctx->lock);
|
||||
|
||||
/* Rotate the first entry last of non-pinned groups */
|
||||
list_rotate_left(&ctx->flexible_groups);
|
||||
/*
|
||||
* Rotate the first entry last of non-pinned groups. Rotation might be
|
||||
* disabled by the inheritance code.
|
||||
*/
|
||||
if (!ctx->rotate_disable)
|
||||
list_rotate_left(&ctx->flexible_groups);
|
||||
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
@@ -2234,11 +2237,6 @@ int perf_event_release_kernel(struct perf_event *event)
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
mutex_lock(&event->owner->perf_event_mutex);
|
||||
list_del_init(&event->owner_entry);
|
||||
mutex_unlock(&event->owner->perf_event_mutex);
|
||||
put_task_struct(event->owner);
|
||||
|
||||
free_event(event);
|
||||
|
||||
return 0;
|
||||
@@ -2251,9 +2249,43 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
|
||||
static int perf_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct perf_event *event = file->private_data;
|
||||
struct task_struct *owner;
|
||||
|
||||
file->private_data = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
owner = ACCESS_ONCE(event->owner);
|
||||
/*
|
||||
* Matches the smp_wmb() in perf_event_exit_task(). If we observe
|
||||
* !owner it means the list deletion is complete and we can indeed
|
||||
* free this event, otherwise we need to serialize on
|
||||
* owner->perf_event_mutex.
|
||||
*/
|
||||
smp_read_barrier_depends();
|
||||
if (owner) {
|
||||
/*
|
||||
* Since delayed_put_task_struct() also drops the last
|
||||
* task reference we can safely take a new reference
|
||||
* while holding the rcu_read_lock().
|
||||
*/
|
||||
get_task_struct(owner);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (owner) {
|
||||
mutex_lock(&owner->perf_event_mutex);
|
||||
/*
|
||||
* We have to re-check the event->owner field, if it is cleared
|
||||
* we raced with perf_event_exit_task(), acquiring the mutex
|
||||
* ensured they're done, and we can proceed with freeing the
|
||||
* event.
|
||||
*/
|
||||
if (event->owner)
|
||||
list_del_init(&event->owner_entry);
|
||||
mutex_unlock(&owner->perf_event_mutex);
|
||||
put_task_struct(owner);
|
||||
}
|
||||
|
||||
return perf_event_release_kernel(event);
|
||||
}
|
||||
|
||||
@@ -5668,7 +5700,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
event->owner = current;
|
||||
get_task_struct(current);
|
||||
|
||||
mutex_lock(¤t->perf_event_mutex);
|
||||
list_add_tail(&event->owner_entry, ¤t->perf_event_list);
|
||||
mutex_unlock(¤t->perf_event_mutex);
|
||||
@@ -5736,12 +5768,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
|
||||
++ctx->generation;
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
event->owner = current;
|
||||
get_task_struct(current);
|
||||
mutex_lock(¤t->perf_event_mutex);
|
||||
list_add_tail(&event->owner_entry, ¤t->perf_event_list);
|
||||
mutex_unlock(¤t->perf_event_mutex);
|
||||
|
||||
return event;
|
||||
|
||||
err_free:
|
||||
@@ -5892,8 +5918,24 @@ again:
|
||||
*/
|
||||
void perf_event_exit_task(struct task_struct *child)
|
||||
{
|
||||
struct perf_event *event, *tmp;
|
||||
int ctxn;
|
||||
|
||||
mutex_lock(&child->perf_event_mutex);
|
||||
list_for_each_entry_safe(event, tmp, &child->perf_event_list,
|
||||
owner_entry) {
|
||||
list_del_init(&event->owner_entry);
|
||||
|
||||
/*
|
||||
* Ensure the list deletion is visible before we clear
|
||||
* the owner, closes a race against perf_release() where
|
||||
* we need to serialize on the owner->perf_event_mutex.
|
||||
*/
|
||||
smp_wmb();
|
||||
event->owner = NULL;
|
||||
}
|
||||
mutex_unlock(&child->perf_event_mutex);
|
||||
|
||||
for_each_task_context_nr(ctxn)
|
||||
perf_event_exit_task_context(child, ctxn);
|
||||
}
|
||||
@@ -6113,6 +6155,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
|
||||
struct perf_event *event;
|
||||
struct task_struct *parent = current;
|
||||
int inherited_all = 1;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
child->perf_event_ctxp[ctxn] = NULL;
|
||||
@@ -6153,6 +6196,15 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't hold ctx->lock when iterating the ->flexible_group list due
|
||||
* to allocations, but we need to prevent rotation because
|
||||
* rotate_ctx() will change the list from interrupt context.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
|
||||
parent_ctx->rotate_disable = 1;
|
||||
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
|
||||
|
||||
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
|
||||
ret = inherit_task_group(event, parent, parent_ctx,
|
||||
child, ctxn, &inherited_all);
|
||||
@@ -6160,6 +6212,10 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
|
||||
break;
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
|
||||
parent_ctx->rotate_disable = 0;
|
||||
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
|
||||
|
||||
child_ctx = child->perf_event_ctxp[ctxn];
|
||||
|
||||
if (child_ctx && inherited_all) {
|
||||
@@ -6312,6 +6368,8 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
|
||||
|
||||
void __init perf_event_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
perf_event_init_all_cpus();
|
||||
init_srcu_struct(&pmus_srcu);
|
||||
perf_pmu_register(&perf_swevent);
|
||||
@@ -6319,4 +6377,7 @@ void __init perf_event_init(void)
|
||||
perf_pmu_register(&perf_task_clock);
|
||||
perf_tp_register();
|
||||
perf_cpu_notifier(perf_cpu_notify);
|
||||
|
||||
ret = init_hw_breakpoint();
|
||||
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
|
||||
}
|
||||
|
Reference in New Issue
Block a user