drivers/oprofile: coding style fixes in buffer_sync.c

Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: oprofile-list <oprofile-list@lists.sourceforge.net>
Cc: Barry Kasindorf <barry.kasindorf@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Robert Richter
2008-07-22 21:08:51 +02:00
committed by Ingo Molnar
parent 6657fe4f56
commit 73185e0a5d

View File

@@ -48,7 +48,8 @@ static void process_task_mortuary(void);
* Can be invoked from softirq via RCU callback due to * Can be invoked from softirq via RCU callback due to
* call_rcu() of the task struct, hence the _irqsave. * call_rcu() of the task struct, hence the _irqsave.
*/ */
static int task_free_notify(struct notifier_block * self, unsigned long val, void * data) static int
task_free_notify(struct notifier_block *self, unsigned long val, void *data)
{ {
unsigned long flags; unsigned long flags;
struct task_struct *task = data; struct task_struct *task = data;
@@ -62,7 +63,8 @@ static int task_free_notify(struct notifier_block * self, unsigned long val, voi
/* The task is on its way out. A sync of the buffer means we can catch /* The task is on its way out. A sync of the buffer means we can catch
* any remaining samples for this task. * any remaining samples for this task.
*/ */
static int task_exit_notify(struct notifier_block * self, unsigned long val, void * data) static int
task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
{ {
/* To avoid latency problems, we only process the current CPU, /* To avoid latency problems, we only process the current CPU,
* hoping that most samples for the task are on this CPU * hoping that most samples for the task are on this CPU
@@ -77,7 +79,8 @@ static int task_exit_notify(struct notifier_block * self, unsigned long val, voi
* we don't lose any. This does not have to be exact, it's a QoI issue * we don't lose any. This does not have to be exact, it's a QoI issue
* only. * only.
*/ */
static int munmap_notify(struct notifier_block * self, unsigned long val, void * data) static int
munmap_notify(struct notifier_block *self, unsigned long val, void *data)
{ {
unsigned long addr = (unsigned long)data; unsigned long addr = (unsigned long)data;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
@@ -103,7 +106,8 @@ static int munmap_notify(struct notifier_block * self, unsigned long val, void *
/* We need to be told about new modules so we don't attribute to a previously /* We need to be told about new modules so we don't attribute to a previously
* loaded module, or drop the samples on the floor. * loaded module, or drop the samples on the floor.
*/ */
static int module_load_notify(struct notifier_block * self, unsigned long val, void * data) static int
module_load_notify(struct notifier_block *self, unsigned long val, void *data)
{ {
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
if (val != MODULE_STATE_COMING) if (val != MODULE_STATE_COMING)
@@ -235,7 +239,8 @@ out:
* sure to do this lookup before a mm->mmap modification happens so * sure to do this lookup before a mm->mmap modification happens so
* we don't lose track. * we don't lose track.
*/ */
static unsigned long lookup_dcookie(struct mm_struct * mm, unsigned long addr, off_t * offset) static unsigned long
lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
{ {
unsigned long cookie = NO_COOKIE; unsigned long cookie = NO_COOKIE;
struct vm_area_struct *vma; struct vm_area_struct *vma;
@@ -533,15 +538,13 @@ void sync_buffer(int cpu)
cookie = get_exec_dcookie(mm); cookie = get_exec_dcookie(mm);
add_user_ctx_switch(new, cookie); add_user_ctx_switch(new, cookie);
} }
} else { } else if (state >= sb_bt_start &&
if (state >= sb_bt_start &&
!add_sample(mm, s, in_kernel)) { !add_sample(mm, s, in_kernel)) {
if (state == sb_bt_start) { if (state == sb_bt_start) {
state = sb_bt_ignore; state = sb_bt_ignore;
atomic_inc(&oprofile_stats.bt_lost_no_mapping); atomic_inc(&oprofile_stats.bt_lost_no_mapping);
} }
} }
}
increment_tail(cpu_buf); increment_tail(cpu_buf);
} }