1 #include <traceevent/event-parse.h>
3 #include "util/color.h"
4 #include "util/evlist.h"
5 #include "util/machine.h"
6 #include "util/thread.h"
7 #include "util/parse-options.h"
8 #include "util/strlist.h"
9 #include "util/thread_map.h"
14 static struct syscall_fmt {
20 { .name = "access", .errmsg = true, },
21 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
22 { .name = "connect", .errmsg = true, },
23 { .name = "fstat", .errmsg = true, .alias = "newfstat", },
24 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
25 { .name = "futex", .errmsg = true, },
26 { .name = "open", .errmsg = true, },
27 { .name = "poll", .errmsg = true, .timeout = true, },
28 { .name = "ppoll", .errmsg = true, .timeout = true, },
29 { .name = "read", .errmsg = true, },
30 { .name = "recvfrom", .errmsg = true, },
31 { .name = "select", .errmsg = true, .timeout = true, },
32 { .name = "socket", .errmsg = true, },
33 { .name = "stat", .errmsg = true, .alias = "newstat", },
36 static int syscall_fmt__cmp(const void *name, const void *fmtp)
38 const struct syscall_fmt *fmt = fmtp;
39 return strcmp(name, fmt->name);
42 static struct syscall_fmt *syscall_fmt__find(const char *name)
44 const int nmemb = ARRAY_SIZE(syscall_fmts);
45 return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
49 struct event_format *tp_format;
52 struct syscall_fmt *fmt;
55 static size_t fprintf_duration(unsigned long t, FILE *fp)
57 double duration = (double)t / NSEC_PER_MSEC;
58 size_t printed = fprintf(fp, "(");
61 printed += color_fprintf(fp, PERF_COLOR_RED, "%6.3f ms", duration);
62 else if (duration >= 0.01)
63 printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
65 printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
66 return printed + fprintf(stdout, "): ");
73 unsigned long nr_events;
78 static struct thread_trace *thread_trace__new(void)
80 return zalloc(sizeof(struct thread_trace));
83 static struct thread_trace *thread__trace(struct thread *thread)
85 struct thread_trace *ttrace;
90 if (thread->priv == NULL)
91 thread->priv = thread_trace__new();
93 if (thread->priv == NULL)
96 ttrace = thread->priv;
101 color_fprintf(stdout, PERF_COLOR_RED,
102 "WARNING: not enough memory, dropping samples!\n");
110 struct syscall *table;
112 struct perf_record_opts opts;
115 struct strlist *ev_qualifier;
116 unsigned long nr_events;
118 bool multiple_threads;
119 double duration_filter;
123 static bool trace__filter_duration(struct trace *trace, double t)
125 return t < (trace->duration_filter * NSEC_PER_MSEC);
128 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
130 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC;
132 return fprintf(fp, "%10.3f ", ts);
135 static bool done = false;
137 static void sig_handler(int sig __maybe_unused)
142 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
143 u64 duration, u64 tstamp, FILE *fp)
145 size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
146 printed += fprintf_duration(duration, fp);
148 if (trace->multiple_threads)
149 printed += fprintf(fp, "%d ", thread->tid);
154 static int trace__process_event(struct machine *machine, union perf_event *event)
158 switch (event->header.type) {
159 case PERF_RECORD_LOST:
160 color_fprintf(stdout, PERF_COLOR_RED,
161 "LOST %" PRIu64 " events!\n", event->lost.lost);
162 ret = machine__process_lost_event(machine, event);
164 ret = machine__process_event(machine, event);
171 static int trace__tool_process(struct perf_tool *tool __maybe_unused,
172 union perf_event *event,
173 struct perf_sample *sample __maybe_unused,
174 struct machine *machine)
176 return trace__process_event(machine, event);
179 static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
181 int err = symbol__init();
186 machine__init(&trace->host, "", HOST_KERNEL_ID);
187 machine__create_kernel_maps(&trace->host);
189 if (perf_target__has_task(&trace->opts.target)) {
190 err = perf_event__synthesize_thread_map(NULL, evlist->threads,
194 err = perf_event__synthesize_threads(NULL, trace__tool_process,
204 static int trace__read_syscall_info(struct trace *trace, int id)
208 const char *name = audit_syscall_to_name(id, trace->audit_machine);
213 if (id > trace->syscalls.max) {
214 struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
216 if (nsyscalls == NULL)
219 if (trace->syscalls.max != -1) {
220 memset(nsyscalls + trace->syscalls.max + 1, 0,
221 (id - trace->syscalls.max) * sizeof(*sc));
223 memset(nsyscalls, 0, (id + 1) * sizeof(*sc));
226 trace->syscalls.table = nsyscalls;
227 trace->syscalls.max = id;
230 sc = trace->syscalls.table + id;
233 if (trace->ev_qualifier && !strlist__find(trace->ev_qualifier, name)) {
236 * No need to do read tracepoint information since this will be
242 sc->fmt = syscall_fmt__find(sc->name);
244 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
245 sc->tp_format = event_format__new("syscalls", tp_name);
247 if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) {
248 snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias);
249 sc->tp_format = event_format__new("syscalls", tp_name);
252 return sc->tp_format != NULL ? 0 : -1;
255 static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
261 if (sc->tp_format != NULL) {
262 struct format_field *field;
264 for (field = sc->tp_format->format.fields->next; field; field = field->next) {
265 printed += scnprintf(bf + printed, size - printed,
266 "%s%s: %ld", printed ? ", " : "",
267 field->name, args[i++]);
271 printed += scnprintf(bf + printed, size - printed,
273 printed ? ", " : "", i, args[i]);
281 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
282 struct perf_sample *sample);
284 static struct syscall *trace__syscall_info(struct trace *trace,
285 struct perf_evsel *evsel,
286 struct perf_sample *sample)
288 int id = perf_evsel__intval(evsel, sample, "id");
291 printf("Invalid syscall %d id, skipping...\n", id);
295 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) &&
296 trace__read_syscall_info(trace, id))
299 if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL))
302 return &trace->syscalls.table[id];
305 printf("Problems reading syscall %d", id);
306 if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
307 printf("(%s)", trace->syscalls.table[id].name);
308 puts(" information");
312 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
313 struct perf_sample *sample)
318 struct thread *thread;
319 struct syscall *sc = trace__syscall_info(trace, evsel, sample);
320 struct thread_trace *ttrace;
328 thread = machine__findnew_thread(&trace->host, sample->tid);
329 ttrace = thread__trace(thread);
333 args = perf_evsel__rawptr(evsel, sample, "args");
335 printf("Problems reading syscall arguments\n");
339 ttrace = thread->priv;
341 if (ttrace->entry_str == NULL) {
342 ttrace->entry_str = malloc(1024);
343 if (!ttrace->entry_str)
347 ttrace->entry_time = sample->time;
348 msg = ttrace->entry_str;
349 printed += scnprintf(msg + printed, 1024 - printed, "%s(", sc->name);
351 printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed, args);
353 if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
354 if (!trace->duration_filter) {
355 trace__fprintf_entry_head(trace, thread, 1, sample->time, stdout);
356 printf("%-70s\n", ttrace->entry_str);
359 ttrace->entry_pending = true;
364 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
365 struct perf_sample *sample)
369 struct thread *thread;
370 struct syscall *sc = trace__syscall_info(trace, evsel, sample);
371 struct thread_trace *ttrace;
379 thread = machine__findnew_thread(&trace->host, sample->tid);
380 ttrace = thread__trace(thread);
384 ret = perf_evsel__intval(evsel, sample, "ret");
386 ttrace = thread->priv;
388 ttrace->exit_time = sample->time;
390 if (ttrace->entry_time) {
391 duration = sample->time - ttrace->entry_time;
392 if (trace__filter_duration(trace, duration))
394 } else if (trace->duration_filter)
397 trace__fprintf_entry_head(trace, thread, duration, sample->time, stdout);
399 if (ttrace->entry_pending) {
400 printf("%-70s", ttrace->entry_str);
403 color_fprintf(stdout, PERF_COLOR_YELLOW, "continued");
404 printf("]: %s()", sc->name);
407 if (ret < 0 && sc->fmt && sc->fmt->errmsg) {
409 const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
410 *e = audit_errno_to_name(-ret);
412 printf(") = -1 %s %s", e, emsg);
413 } else if (ret == 0 && sc->fmt && sc->fmt->timeout)
414 printf(") = 0 Timeout");
416 printf(") = %d", ret);
420 ttrace->entry_pending = false;
425 static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
426 struct perf_sample *sample)
428 u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
429 double runtime_ms = (double)runtime / NSEC_PER_MSEC;
430 struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
431 struct thread_trace *ttrace = thread__trace(thread);
436 ttrace->runtime_ms += runtime_ms;
437 trace->runtime_ms += runtime_ms;
441 printf("%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
443 perf_evsel__strval(evsel, sample, "comm"),
444 (pid_t)perf_evsel__intval(evsel, sample, "pid"),
446 perf_evsel__intval(evsel, sample, "vruntime"));
450 static int trace__run(struct trace *trace, int argc, const char **argv)
452 struct perf_evlist *evlist = perf_evlist__new();
453 struct perf_evsel *evsel;
455 unsigned long before;
456 const bool forks = argc > 0;
458 if (evlist == NULL) {
459 printf("Not enough memory to run!\n");
463 if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
464 perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
465 printf("Couldn't read the raw_syscalls tracepoints information!\n");
466 goto out_delete_evlist;
470 perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
471 trace__sched_stat_runtime)) {
472 printf("Couldn't read the sched_stat_runtime tracepoint information!\n");
473 goto out_delete_evlist;
476 err = perf_evlist__create_maps(evlist, &trace->opts.target);
478 printf("Problems parsing the target to trace, check your options!\n");
479 goto out_delete_evlist;
482 err = trace__symbols_init(trace, evlist);
484 printf("Problems initializing symbol libraries!\n");
485 goto out_delete_maps;
488 perf_evlist__config(evlist, &trace->opts);
490 signal(SIGCHLD, sig_handler);
491 signal(SIGINT, sig_handler);
494 err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
497 printf("Couldn't run the workload!\n");
498 goto out_delete_maps;
502 err = perf_evlist__open(evlist);
504 printf("Couldn't create the events: %s\n", strerror(errno));
505 goto out_delete_maps;
508 err = perf_evlist__mmap(evlist, UINT_MAX, false);
510 printf("Couldn't mmap the events: %s\n", strerror(errno));
511 goto out_close_evlist;
514 perf_evlist__enable(evlist);
517 perf_evlist__start_workload(evlist);
519 trace->multiple_threads = evlist->threads->map[0] == -1 || evlist->threads->nr > 1;
521 before = trace->nr_events;
523 for (i = 0; i < evlist->nr_mmaps; i++) {
524 union perf_event *event;
526 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
527 const u32 type = event->header.type;
528 tracepoint_handler handler;
529 struct perf_sample sample;
533 err = perf_evlist__parse_sample(evlist, event, &sample);
535 printf("Can't parse sample, err = %d, skipping...\n", err);
539 if (trace->base_time == 0)
540 trace->base_time = sample.time;
542 if (type != PERF_RECORD_SAMPLE) {
543 trace__process_event(&trace->host, event);
547 evsel = perf_evlist__id2evsel(evlist, sample.id);
549 printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
553 if (sample.raw_data == NULL) {
554 printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
555 perf_evsel__name(evsel), sample.tid,
556 sample.cpu, sample.raw_size);
560 handler = evsel->handler.func;
561 handler(trace, evsel, &sample);
565 if (trace->nr_events == before) {
567 goto out_unmap_evlist;
569 poll(evlist->pollfd, evlist->nr_fds, -1);
573 perf_evlist__disable(evlist);
578 perf_evlist__munmap(evlist);
580 perf_evlist__close(evlist);
582 perf_evlist__delete_maps(evlist);
584 perf_evlist__delete(evlist);
589 static size_t trace__fprintf_threads_header(FILE *fp)
593 printed = fprintf(fp, "\n _____________________________________________________________________\n");
594 printed += fprintf(fp," __) Summary of events (__\n\n");
595 printed += fprintf(fp," [ task - pid ] [ events ] [ ratio ] [ runtime ]\n");
596 printed += fprintf(fp," _____________________________________________________________________\n\n");
601 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
603 size_t printed = trace__fprintf_threads_header(fp);
606 for (nd = rb_first(&trace->host.threads); nd; nd = rb_next(nd)) {
607 struct thread *thread = rb_entry(nd, struct thread, rb_node);
608 struct thread_trace *ttrace = thread->priv;
615 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
617 color = PERF_COLOR_NORMAL;
619 color = PERF_COLOR_RED;
620 else if (ratio > 25.0)
621 color = PERF_COLOR_GREEN;
622 else if (ratio > 5.0)
623 color = PERF_COLOR_YELLOW;
625 printed += color_fprintf(fp, color, "%20s", thread->comm);
626 printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events);
627 printed += color_fprintf(fp, color, "%5.1f%%", ratio);
628 printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
634 static int trace__set_duration(const struct option *opt, const char *str,
635 int unset __maybe_unused)
637 struct trace *trace = opt->value;
639 trace->duration_filter = atof(str);
643 int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
645 const char * const trace_usage[] = {
646 "perf trace [<options>] [<command>]",
647 "perf trace [<options>] -- <command> [<options>]",
650 struct trace trace = {
651 .audit_machine = audit_detect_machine(),
660 .user_freq = UINT_MAX,
661 .user_interval = ULLONG_MAX,
666 const char *ev_qualifier_str = NULL;
667 const struct option trace_options[] = {
668 OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
669 "list of events to trace"),
670 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
671 "trace events on existing process id"),
672 OPT_STRING(0, "tid", &trace.opts.target.tid, "tid",
673 "trace events on existing thread id"),
674 OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide,
675 "system-wide collection from all CPUs"),
676 OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu",
677 "list of cpus to monitor"),
678 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
679 "child tasks do not inherit counters"),
680 OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages,
681 "number of mmap data pages"),
682 OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user",
684 OPT_CALLBACK(0, "duration", &trace, "float",
685 "show only events with duration > N.M ms",
686 trace__set_duration),
687 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
693 argc = parse_options(argc, argv, trace_options, trace_usage, 0);
695 if (ev_qualifier_str != NULL) {
696 trace.ev_qualifier = strlist__new(true, ev_qualifier_str);
697 if (trace.ev_qualifier == NULL) {
698 puts("Not enough memory to parse event qualifier");
703 err = perf_target__validate(&trace.opts.target);
705 perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
710 err = perf_target__parse_uid(&trace.opts.target);
712 perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
717 if (!argc && perf_target__none(&trace.opts.target))
718 trace.opts.target.system_wide = true;
720 err = trace__run(&trace, argc, argv);
722 if (trace.sched && !err)
723 trace__fprintf_thread_summary(&trace, stdout);