linux-kernel-test/kernel/trace/trace_workqueue.c
KOSAKI Motohiro c3ffc7a40b tracing: Don't use tracing_record_cmdline() in workqueue tracer
Impact: improve workqueue tracer output

Currently, /sys/kernel/debug/tracing/trace_stat/workqueues can display
wrong and strange thread names.

Why?

Currently, ftrace has tracing_record_cmdline()/trace_find_cmdline()
convenience function that implements a task->comm string cache.

This can avoid unnecessary memcpy overhead and the workqueue tracer
uses it.

However, in general, any trace statistics feature shouldn't use
tracing_record_cmdline() because trace statistics can display
very old process. Then comm cache can return wrong string because
recent process overrides the cache.

Fortunately, workqueue trace guarantees that displayed processes
are live. Thus we can search comm string from PID at display time.

<before>

% cat workqueues
 # CPU  INSERTED  EXECUTED   NAME
 # |      |         |          |

   7 431913     431913       kondemand/7
   7      0          0       tail
   7     21         21       git
   7      0          0       ls
   7      9          9       cat
   7 832632     832632       unix_chkpwd
   7 236292     236292       ls

Note: tail, git, ls, cat unix_chkpwd are obiously not workqueue thread.

<after>

% cat workqueues
 # CPU  INSERTED  EXECUTED   NAME
 # |      |         |          |

   7    510        510       kondemand/7
   7      0          0       kmpathd/7
   7     15         15       ata/7
   7      0          0       aio/7
   7     11         11       kblockd/7
   7   1063       1063       work_on_cpu/7
   7    167        167       events/7

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-03-09 10:26:13 +01:00

281 lines
7.0 KiB
C

/*
* Workqueue statistical tracer.
*
* Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
*
*/
#include <trace/workqueue.h>
#include <linux/list.h>
#include <linux/percpu.h>
#include "trace_stat.h"
#include "trace.h"
/* A cpu workqueue thread */
struct cpu_workqueue_stats {
struct list_head list;
/* Useful to know if we print the cpu headers */
bool first_entry;
int cpu;
pid_t pid;
/* Can be inserted from interrupt or user context, need to be atomic */
atomic_t inserted;
/*
* Don't need to be atomic, works are serialized in a single workqueue thread
* on a single CPU.
*/
unsigned int executed;
};
/* List of workqueue threads on one cpu */
struct workqueue_global_stats {
struct list_head list;
spinlock_t lock;
};
/* Don't need a global lock because allocated before the workqueues, and
* never freed.
*/
static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
/* Insertion of a work */
static void
probe_workqueue_insertion(struct task_struct *wq_thread,
struct work_struct *work)
{
int cpu = cpumask_first(&wq_thread->cpus_allowed);
struct cpu_workqueue_stats *node, *next;
unsigned long flags;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
list) {
if (node->pid == wq_thread->pid) {
atomic_inc(&node->inserted);
goto found;
}
}
pr_debug("trace_workqueue: entry not found\n");
found:
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
}
/* Execution of a work */
static void
probe_workqueue_execution(struct task_struct *wq_thread,
struct work_struct *work)
{
int cpu = cpumask_first(&wq_thread->cpus_allowed);
struct cpu_workqueue_stats *node, *next;
unsigned long flags;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
list) {
if (node->pid == wq_thread->pid) {
node->executed++;
goto found;
}
}
pr_debug("trace_workqueue: entry not found\n");
found:
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
}
/* Creation of a cpu workqueue thread */
static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
{
struct cpu_workqueue_stats *cws;
unsigned long flags;
WARN_ON(cpu < 0 || cpu >= num_possible_cpus());
/* Workqueues are sometimes created in atomic context */
cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
if (!cws) {
pr_warning("trace_workqueue: not enough memory\n");
return;
}
INIT_LIST_HEAD(&cws->list);
cws->cpu = cpu;
cws->pid = wq_thread->pid;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
if (list_empty(&workqueue_cpu_stat(cpu)->list))
cws->first_entry = true;
list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
}
/* Destruction of a cpu workqueue thread */
static void probe_workqueue_destruction(struct task_struct *wq_thread)
{
/* Workqueue only execute on one cpu */
int cpu = cpumask_first(&wq_thread->cpus_allowed);
struct cpu_workqueue_stats *node, *next;
unsigned long flags;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
list) {
if (node->pid == wq_thread->pid) {
list_del(&node->list);
kfree(node);
goto found;
}
}
pr_debug("trace_workqueue: don't find workqueue to destroy\n");
found:
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
}
static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
{
unsigned long flags;
struct cpu_workqueue_stats *ret = NULL;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
if (!list_empty(&workqueue_cpu_stat(cpu)->list))
ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
struct cpu_workqueue_stats, list);
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
return ret;
}
static void *workqueue_stat_start(void)
{
int cpu;
void *ret = NULL;
for_each_possible_cpu(cpu) {
ret = workqueue_stat_start_cpu(cpu);
if (ret)
return ret;
}
return NULL;
}
static void *workqueue_stat_next(void *prev, int idx)
{
struct cpu_workqueue_stats *prev_cws = prev;
int cpu = prev_cws->cpu;
unsigned long flags;
void *ret = NULL;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
for (++cpu ; cpu < num_possible_cpus(); cpu++) {
ret = workqueue_stat_start_cpu(cpu);
if (ret)
return ret;
}
return NULL;
}
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
list);
}
static int workqueue_stat_show(struct seq_file *s, void *p)
{
struct cpu_workqueue_stats *cws = p;
unsigned long flags;
int cpu = cws->cpu;
struct task_struct *tsk = find_task_by_vpid(cws->pid);
seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
atomic_read(&cws->inserted),
cws->executed,
tsk ? tsk->comm : "<...>");
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
seq_printf(s, "\n");
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
return 0;
}
static int workqueue_stat_headers(struct seq_file *s)
{
seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
seq_printf(s, "# | | | |\n\n");
return 0;
}
struct tracer_stat workqueue_stats __read_mostly = {
.name = "workqueues",
.stat_start = workqueue_stat_start,
.stat_next = workqueue_stat_next,
.stat_show = workqueue_stat_show,
.stat_headers = workqueue_stat_headers
};
int __init stat_workqueue_init(void)
{
if (register_stat_tracer(&workqueue_stats)) {
pr_warning("Unable to register workqueue stat tracer\n");
return 1;
}
return 0;
}
fs_initcall(stat_workqueue_init);
/*
* Workqueues are created very early, just after pre-smp initcalls.
* So we must register our tracepoints at this stage.
*/
int __init trace_workqueue_early_init(void)
{
int ret, cpu;
ret = register_trace_workqueue_insertion(probe_workqueue_insertion);
if (ret)
goto out;
ret = register_trace_workqueue_execution(probe_workqueue_execution);
if (ret)
goto no_insertion;
ret = register_trace_workqueue_creation(probe_workqueue_creation);
if (ret)
goto no_execution;
ret = register_trace_workqueue_destruction(probe_workqueue_destruction);
if (ret)
goto no_creation;
for_each_possible_cpu(cpu) {
spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
}
return 0;
no_creation:
unregister_trace_workqueue_creation(probe_workqueue_creation);
no_execution:
unregister_trace_workqueue_execution(probe_workqueue_execution);
no_insertion:
unregister_trace_workqueue_insertion(probe_workqueue_insertion);
out:
pr_warning("trace_workqueue: unable to trace workqueues\n");
return 1;
}
early_initcall(trace_workqueue_early_init);