2 * builtin-timechart.c - make an svg timechart of system activity
4 * (C) Copyright 2009 Intel Corporation
7 * Arjan van de Ven <arjan@linux.intel.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
15 #include <traceevent/event-parse.h>
19 #include "util/util.h"
21 #include "util/color.h"
22 #include <linux/list.h>
23 #include "util/cache.h"
24 #include "util/evlist.h"
25 #include "util/evsel.h"
26 #include <linux/rbtree.h>
27 #include "util/symbol.h"
28 #include "util/callchain.h"
29 #include "util/strlist.h"
32 #include "util/header.h"
33 #include "util/parse-options.h"
34 #include "util/parse-events.h"
35 #include "util/event.h"
36 #include "util/session.h"
37 #include "util/svghelper.h"
38 #include "util/tool.h"
39 #include "util/data.h"
41 #define SUPPORT_OLD_POWER_EVENTS 1
42 #define PWR_EVENT_EXIT -1
44 static int proc_num = 15;
46 static unsigned int numcpus;
47 static u64 min_freq; /* Lowest CPU frequency seen */
48 static u64 max_freq; /* Highest CPU frequency seen */
49 static u64 turbo_frequency;
51 static u64 first_time, last_time;
53 static bool power_only;
54 static bool tasks_only;
55 static bool with_backtrace;
62 * Datastructure layout:
63 * We keep an list of "pid"s, matching the kernels notion of a task struct.
64 * Each "pid" entry, has a list of "comm"s.
65 * this is because we want to track different programs different, while
66 * exec will reuse the original pid (by design).
67 * Each comm has a list of samples that will be used to draw
82 struct per_pidcomm *all;
83 struct per_pidcomm *current;
88 struct per_pidcomm *next;
102 struct cpu_sample *samples;
105 struct sample_wrapper {
106 struct sample_wrapper *next;
109 unsigned char data[0];
113 #define TYPE_RUNNING 1
114 #define TYPE_WAITING 2
115 #define TYPE_BLOCKED 3
118 struct cpu_sample *next;
124 const char *backtrace;
127 static struct per_pid *all_data;
133 struct power_event *next;
142 struct wake_event *next;
146 const char *backtrace;
149 static struct power_event *power_events;
150 static struct wake_event *wake_events;
152 struct process_filter {
155 struct process_filter *next;
158 static struct process_filter *process_filter;
161 static struct per_pid *find_create_pid(int pid)
163 struct per_pid *cursor = all_data;
166 if (cursor->pid == pid)
168 cursor = cursor->next;
170 cursor = zalloc(sizeof(*cursor));
171 assert(cursor != NULL);
173 cursor->next = all_data;
178 static void pid_set_comm(int pid, char *comm)
181 struct per_pidcomm *c;
182 p = find_create_pid(pid);
185 if (c->comm && strcmp(c->comm, comm) == 0) {
190 c->comm = strdup(comm);
196 c = zalloc(sizeof(*c));
198 c->comm = strdup(comm);
204 static void pid_fork(int pid, int ppid, u64 timestamp)
206 struct per_pid *p, *pp;
207 p = find_create_pid(pid);
208 pp = find_create_pid(ppid);
210 if (pp->current && pp->current->comm && !p->current)
211 pid_set_comm(pid, pp->current->comm);
213 p->start_time = timestamp;
215 p->current->start_time = timestamp;
216 p->current->state_since = timestamp;
220 static void pid_exit(int pid, u64 timestamp)
223 p = find_create_pid(pid);
224 p->end_time = timestamp;
226 p->current->end_time = timestamp;
230 pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end,
231 const char *backtrace)
234 struct per_pidcomm *c;
235 struct cpu_sample *sample;
237 p = find_create_pid(pid);
240 c = zalloc(sizeof(*c));
247 sample = zalloc(sizeof(*sample));
248 assert(sample != NULL);
249 sample->start_time = start;
250 sample->end_time = end;
252 sample->next = c->samples;
254 sample->backtrace = backtrace;
257 if (sample->type == TYPE_RUNNING && end > start && start > 0) {
258 c->total_time += (end-start);
259 p->total_time += (end-start);
262 if (c->start_time == 0 || c->start_time > start)
263 c->start_time = start;
264 if (p->start_time == 0 || p->start_time > start)
265 p->start_time = start;
268 #define MAX_CPUS 4096
270 static u64 cpus_cstate_start_times[MAX_CPUS];
271 static int cpus_cstate_state[MAX_CPUS];
272 static u64 cpus_pstate_start_times[MAX_CPUS];
273 static u64 cpus_pstate_state[MAX_CPUS];
275 static int process_comm_event(struct perf_tool *tool __maybe_unused,
276 union perf_event *event,
277 struct perf_sample *sample __maybe_unused,
278 struct machine *machine __maybe_unused)
280 pid_set_comm(event->comm.tid, event->comm.comm);
284 static int process_fork_event(struct perf_tool *tool __maybe_unused,
285 union perf_event *event,
286 struct perf_sample *sample __maybe_unused,
287 struct machine *machine __maybe_unused)
289 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
293 static int process_exit_event(struct perf_tool *tool __maybe_unused,
294 union perf_event *event,
295 struct perf_sample *sample __maybe_unused,
296 struct machine *machine __maybe_unused)
298 pid_exit(event->fork.pid, event->fork.time);
302 #ifdef SUPPORT_OLD_POWER_EVENTS
303 static int use_old_power_events;
306 static void c_state_start(int cpu, u64 timestamp, int state)
308 cpus_cstate_start_times[cpu] = timestamp;
309 cpus_cstate_state[cpu] = state;
312 static void c_state_end(int cpu, u64 timestamp)
314 struct power_event *pwr = zalloc(sizeof(*pwr));
319 pwr->state = cpus_cstate_state[cpu];
320 pwr->start_time = cpus_cstate_start_times[cpu];
321 pwr->end_time = timestamp;
324 pwr->next = power_events;
329 static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
331 struct power_event *pwr;
333 if (new_freq > 8000000) /* detect invalid data */
336 pwr = zalloc(sizeof(*pwr));
340 pwr->state = cpus_pstate_state[cpu];
341 pwr->start_time = cpus_pstate_start_times[cpu];
342 pwr->end_time = timestamp;
345 pwr->next = power_events;
347 if (!pwr->start_time)
348 pwr->start_time = first_time;
352 cpus_pstate_state[cpu] = new_freq;
353 cpus_pstate_start_times[cpu] = timestamp;
355 if ((u64)new_freq > max_freq)
358 if (new_freq < min_freq || min_freq == 0)
361 if (new_freq == max_freq - 1000)
362 turbo_frequency = max_freq;
365 static void sched_wakeup(int cpu, u64 timestamp, int waker, int wakee,
366 u8 flags, const char *backtrace)
369 struct wake_event *we = zalloc(sizeof(*we));
374 we->time = timestamp;
376 we->backtrace = backtrace;
378 if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
382 we->next = wake_events;
384 p = find_create_pid(we->wakee);
386 if (p && p->current && p->current->state == TYPE_NONE) {
387 p->current->state_since = timestamp;
388 p->current->state = TYPE_WAITING;
390 if (p && p->current && p->current->state == TYPE_BLOCKED) {
391 pid_put_sample(p->pid, p->current->state, cpu,
392 p->current->state_since, timestamp, NULL);
393 p->current->state_since = timestamp;
394 p->current->state = TYPE_WAITING;
398 static void sched_switch(int cpu, u64 timestamp, int prev_pid, int next_pid,
399 u64 prev_state, const char *backtrace)
401 struct per_pid *p = NULL, *prev_p;
403 prev_p = find_create_pid(prev_pid);
405 p = find_create_pid(next_pid);
407 if (prev_p->current && prev_p->current->state != TYPE_NONE)
408 pid_put_sample(prev_pid, TYPE_RUNNING, cpu,
409 prev_p->current->state_since, timestamp,
411 if (p && p->current) {
412 if (p->current->state != TYPE_NONE)
413 pid_put_sample(next_pid, p->current->state, cpu,
414 p->current->state_since, timestamp,
417 p->current->state_since = timestamp;
418 p->current->state = TYPE_RUNNING;
421 if (prev_p->current) {
422 prev_p->current->state = TYPE_NONE;
423 prev_p->current->state_since = timestamp;
425 prev_p->current->state = TYPE_BLOCKED;
427 prev_p->current->state = TYPE_WAITING;
431 static const char *cat_backtrace(union perf_event *event,
432 struct perf_sample *sample,
433 struct machine *machine)
435 struct addr_location al;
439 u8 cpumode = PERF_RECORD_MISC_USER;
440 struct addr_location tal;
441 struct ip_callchain *chain = sample->callchain;
442 FILE *f = open_memstream(&p, &p_len);
445 perror("open_memstream error");
452 if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
453 fprintf(stderr, "problem processing %d event, skipping it.\n",
458 for (i = 0; i < chain->nr; i++) {
461 if (callchain_param.order == ORDER_CALLEE)
464 ip = chain->ips[chain->nr - i - 1];
466 if (ip >= PERF_CONTEXT_MAX) {
468 case PERF_CONTEXT_HV:
469 cpumode = PERF_RECORD_MISC_HYPERVISOR;
471 case PERF_CONTEXT_KERNEL:
472 cpumode = PERF_RECORD_MISC_KERNEL;
474 case PERF_CONTEXT_USER:
475 cpumode = PERF_RECORD_MISC_USER;
478 pr_debug("invalid callchain context: "
479 "%"PRId64"\n", (s64) ip);
482 * It seems the callchain is corrupted.
492 tal.filtered = false;
493 thread__find_addr_location(al.thread, machine, cpumode,
494 MAP__FUNCTION, ip, &tal);
497 fprintf(f, "..... %016" PRIx64 " %s\n", ip,
500 fprintf(f, "..... %016" PRIx64 "\n", ip);
509 typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
510 struct perf_sample *sample,
511 const char *backtrace);
513 static int process_sample_event(struct perf_tool *tool __maybe_unused,
514 union perf_event *event,
515 struct perf_sample *sample,
516 struct perf_evsel *evsel,
517 struct machine *machine __maybe_unused)
519 if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
520 if (!first_time || first_time > sample->time)
521 first_time = sample->time;
522 if (last_time < sample->time)
523 last_time = sample->time;
526 if (sample->cpu > numcpus)
527 numcpus = sample->cpu;
529 if (evsel->handler != NULL) {
530 tracepoint_handler f = evsel->handler;
531 return f(evsel, sample, cat_backtrace(event, sample, machine));
538 process_sample_cpu_idle(struct perf_evsel *evsel,
539 struct perf_sample *sample,
540 const char *backtrace __maybe_unused)
542 u32 state = perf_evsel__intval(evsel, sample, "state");
543 u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
545 if (state == (u32)PWR_EVENT_EXIT)
546 c_state_end(cpu_id, sample->time);
548 c_state_start(cpu_id, sample->time, state);
553 process_sample_cpu_frequency(struct perf_evsel *evsel,
554 struct perf_sample *sample,
555 const char *backtrace __maybe_unused)
557 u32 state = perf_evsel__intval(evsel, sample, "state");
558 u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
560 p_state_change(cpu_id, sample->time, state);
565 process_sample_sched_wakeup(struct perf_evsel *evsel,
566 struct perf_sample *sample,
567 const char *backtrace)
569 u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
570 int waker = perf_evsel__intval(evsel, sample, "common_pid");
571 int wakee = perf_evsel__intval(evsel, sample, "pid");
573 sched_wakeup(sample->cpu, sample->time, waker, wakee, flags, backtrace);
578 process_sample_sched_switch(struct perf_evsel *evsel,
579 struct perf_sample *sample,
580 const char *backtrace)
582 int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
583 int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
584 u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
586 sched_switch(sample->cpu, sample->time, prev_pid, next_pid, prev_state,
591 #ifdef SUPPORT_OLD_POWER_EVENTS
593 process_sample_power_start(struct perf_evsel *evsel,
594 struct perf_sample *sample,
595 const char *backtrace __maybe_unused)
597 u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
598 u64 value = perf_evsel__intval(evsel, sample, "value");
600 c_state_start(cpu_id, sample->time, value);
605 process_sample_power_end(struct perf_evsel *evsel __maybe_unused,
606 struct perf_sample *sample,
607 const char *backtrace __maybe_unused)
609 c_state_end(sample->cpu, sample->time);
614 process_sample_power_frequency(struct perf_evsel *evsel,
615 struct perf_sample *sample,
616 const char *backtrace __maybe_unused)
618 u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
619 u64 value = perf_evsel__intval(evsel, sample, "value");
621 p_state_change(cpu_id, sample->time, value);
624 #endif /* SUPPORT_OLD_POWER_EVENTS */
627 * After the last sample we need to wrap up the current C/P state
628 * and close out each CPU for these.
630 static void end_sample_processing(void)
633 struct power_event *pwr;
635 for (cpu = 0; cpu <= numcpus; cpu++) {
638 pwr = zalloc(sizeof(*pwr));
642 pwr->state = cpus_cstate_state[cpu];
643 pwr->start_time = cpus_cstate_start_times[cpu];
644 pwr->end_time = last_time;
647 pwr->next = power_events;
653 pwr = zalloc(sizeof(*pwr));
657 pwr->state = cpus_pstate_state[cpu];
658 pwr->start_time = cpus_pstate_start_times[cpu];
659 pwr->end_time = last_time;
662 pwr->next = power_events;
664 if (!pwr->start_time)
665 pwr->start_time = first_time;
667 pwr->state = min_freq;
673 * Sort the pid datastructure
675 static void sort_pids(void)
677 struct per_pid *new_list, *p, *cursor, *prev;
678 /* sort by ppid first, then by pid, lowest to highest */
687 if (new_list == NULL) {
695 if (cursor->ppid > p->ppid ||
696 (cursor->ppid == p->ppid && cursor->pid > p->pid)) {
697 /* must insert before */
699 p->next = prev->next;
712 cursor = cursor->next;
721 static void draw_c_p_states(void)
723 struct power_event *pwr;
727 * two pass drawing so that the P state bars are on top of the C state blocks
730 if (pwr->type == CSTATE)
731 svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
737 if (pwr->type == PSTATE) {
739 pwr->state = min_freq;
740 svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
746 static void draw_wakeups(void)
748 struct wake_event *we;
750 struct per_pidcomm *c;
754 int from = 0, to = 0;
755 char *task_from = NULL, *task_to = NULL;
757 /* locate the column of the waker and wakee */
760 if (p->pid == we->waker || p->pid == we->wakee) {
763 if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
764 if (p->pid == we->waker && !from) {
766 task_from = strdup(c->comm);
768 if (p->pid == we->wakee && !to) {
770 task_to = strdup(c->comm);
777 if (p->pid == we->waker && !from) {
779 task_from = strdup(c->comm);
781 if (p->pid == we->wakee && !to) {
783 task_to = strdup(c->comm);
792 task_from = malloc(40);
793 sprintf(task_from, "[%i]", we->waker);
796 task_to = malloc(40);
797 sprintf(task_to, "[%i]", we->wakee);
801 svg_interrupt(we->time, to, we->backtrace);
802 else if (from && to && abs(from - to) == 1)
803 svg_wakeline(we->time, from, to, we->backtrace);
805 svg_partial_wakeline(we->time, from, task_from, to,
806 task_to, we->backtrace);
814 static void draw_cpu_usage(void)
817 struct per_pidcomm *c;
818 struct cpu_sample *sample;
825 if (sample->type == TYPE_RUNNING)
826 svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
828 sample = sample->next;
836 static void draw_process_bars(void)
839 struct per_pidcomm *c;
840 struct cpu_sample *sample;
855 svg_box(Y, c->start_time, c->end_time, "process");
858 if (sample->type == TYPE_RUNNING)
859 svg_running(Y, sample->cpu,
863 if (sample->type == TYPE_BLOCKED)
864 svg_blocked(Y, sample->cpu,
868 if (sample->type == TYPE_WAITING)
869 svg_waiting(Y, sample->cpu,
873 sample = sample->next;
878 if (c->total_time > 5000000000) /* 5 seconds */
879 sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
881 sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
883 svg_text(Y, c->start_time, comm);
893 static void add_process_filter(const char *string)
895 int pid = strtoull(string, NULL, 10);
896 struct process_filter *filt = malloc(sizeof(*filt));
901 filt->name = strdup(string);
903 filt->next = process_filter;
905 process_filter = filt;
908 static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
910 struct process_filter *filt;
914 filt = process_filter;
916 if (filt->pid && p->pid == filt->pid)
918 if (strcmp(filt->name, c->comm) == 0)
925 static int determine_display_tasks_filtered(void)
928 struct per_pidcomm *c;
934 if (p->start_time == 1)
935 p->start_time = first_time;
937 /* no exit marker, task kept running to the end */
938 if (p->end_time == 0)
939 p->end_time = last_time;
946 if (c->start_time == 1)
947 c->start_time = first_time;
949 if (passes_filter(p, c)) {
955 if (c->end_time == 0)
956 c->end_time = last_time;
965 static int determine_display_tasks(u64 threshold)
968 struct per_pidcomm *c;
972 return determine_display_tasks_filtered();
977 if (p->start_time == 1)
978 p->start_time = first_time;
980 /* no exit marker, task kept running to the end */
981 if (p->end_time == 0)
982 p->end_time = last_time;
983 if (p->total_time >= threshold)
991 if (c->start_time == 1)
992 c->start_time = first_time;
994 if (c->total_time >= threshold) {
999 if (c->end_time == 0)
1000 c->end_time = last_time;
1011 #define TIME_THRESH 10000000
1013 static void write_svg_file(const char *filename)
1017 int thresh = TIME_THRESH;
1024 /* We'd like to show at least proc_num tasks;
1025 * be less picky if we have fewer */
1027 count = determine_display_tasks(thresh);
1029 } while (!process_filter && thresh && count < proc_num);
1031 open_svg(filename, numcpus, count, first_time, last_time);
1036 for (i = 0; i < numcpus; i++)
1037 svg_cpu_box(i, max_freq, turbo_frequency);
1041 draw_process_bars();
1050 static int __cmd_timechart(const char *output_name)
1052 struct perf_tool perf_timechart = {
1053 .comm = process_comm_event,
1054 .fork = process_fork_event,
1055 .exit = process_exit_event,
1056 .sample = process_sample_event,
1057 .ordered_samples = true,
1059 const struct perf_evsel_str_handler power_tracepoints[] = {
1060 { "power:cpu_idle", process_sample_cpu_idle },
1061 { "power:cpu_frequency", process_sample_cpu_frequency },
1062 { "sched:sched_wakeup", process_sample_sched_wakeup },
1063 { "sched:sched_switch", process_sample_sched_switch },
1064 #ifdef SUPPORT_OLD_POWER_EVENTS
1065 { "power:power_start", process_sample_power_start },
1066 { "power:power_end", process_sample_power_end },
1067 { "power:power_frequency", process_sample_power_frequency },
1070 struct perf_data_file file = {
1072 .mode = PERF_DATA_MODE_READ,
1075 struct perf_session *session = perf_session__new(&file, false,
1079 if (session == NULL)
1082 if (!perf_session__has_traces(session, "timechart record"))
1085 if (perf_session__set_tracepoints_handlers(session,
1086 power_tracepoints)) {
1087 pr_err("Initializing session tracepoint handlers failed\n");
1091 ret = perf_session__process_events(session, &perf_timechart);
1095 end_sample_processing();
1099 write_svg_file(output_name);
1101 pr_info("Written %2.1f seconds of trace to %s.\n",
1102 (last_time - first_time) / 1000000000.0, output_name);
1104 perf_session__delete(session);
1108 static int __cmd_record(int argc, const char **argv)
1110 unsigned int rec_argc, i, j;
1111 const char **rec_argv;
1113 unsigned int record_elems;
1115 const char * const common_args[] = {
1116 "record", "-a", "-R", "-c", "1",
1118 unsigned int common_args_nr = ARRAY_SIZE(common_args);
1120 const char * const backtrace_args[] = {
1123 unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
1125 const char * const power_args[] = {
1126 "-e", "power:cpu_frequency",
1127 "-e", "power:cpu_idle",
1129 unsigned int power_args_nr = ARRAY_SIZE(power_args);
1131 const char * const old_power_args[] = {
1132 #ifdef SUPPORT_OLD_POWER_EVENTS
1133 "-e", "power:power_start",
1134 "-e", "power:power_end",
1135 "-e", "power:power_frequency",
1138 unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
1140 const char * const tasks_args[] = {
1141 "-e", "sched:sched_wakeup",
1142 "-e", "sched:sched_switch",
1144 unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
1146 #ifdef SUPPORT_OLD_POWER_EVENTS
1147 if (!is_valid_tracepoint("power:cpu_idle") &&
1148 is_valid_tracepoint("power:power_start")) {
1149 use_old_power_events = 1;
1152 old_power_args_nr = 0;
1161 old_power_args_nr = 0;
1164 if (!with_backtrace)
1165 backtrace_args_no = 0;
1167 record_elems = common_args_nr + tasks_args_nr +
1168 power_args_nr + old_power_args_nr + backtrace_args_no;
1170 rec_argc = record_elems + argc;
1171 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1173 if (rec_argv == NULL)
1177 for (i = 0; i < common_args_nr; i++)
1178 *p++ = strdup(common_args[i]);
1180 for (i = 0; i < backtrace_args_no; i++)
1181 *p++ = strdup(backtrace_args[i]);
1183 for (i = 0; i < tasks_args_nr; i++)
1184 *p++ = strdup(tasks_args[i]);
1186 for (i = 0; i < power_args_nr; i++)
1187 *p++ = strdup(power_args[i]);
1189 for (i = 0; i < old_power_args_nr; i++)
1190 *p++ = strdup(old_power_args[i]);
1192 for (j = 1; j < (unsigned int)argc; j++)
1195 return cmd_record(rec_argc, rec_argv, NULL);
1199 parse_process(const struct option *opt __maybe_unused, const char *arg,
1200 int __maybe_unused unset)
1203 add_process_filter(arg);
1207 int cmd_timechart(int argc, const char **argv,
1208 const char *prefix __maybe_unused)
1210 const char *output_name = "output.svg";
1211 const struct option timechart_options[] = {
1212 OPT_STRING('i', "input", &input_name, "file", "input file name"),
1213 OPT_STRING('o', "output", &output_name, "file", "output file name"),
1214 OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1215 OPT_BOOLEAN('P', "power-only", &power_only, "output power data only"),
1216 OPT_BOOLEAN('T', "tasks-only", &tasks_only,
1217 "output processes data only"),
1218 OPT_CALLBACK('p', "process", NULL, "process",
1219 "process selector. Pass a pid or process name.",
1221 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1222 "Look for files with symbols relative to this directory"),
1223 OPT_INTEGER('n', "proc-num", &proc_num,
1224 "min. number of tasks to print"),
1227 const char * const timechart_usage[] = {
1228 "perf timechart [<options>] {record}",
1232 const struct option record_options[] = {
1233 OPT_BOOLEAN('P', "power-only", &power_only, "output power data only"),
1234 OPT_BOOLEAN('T', "tasks-only", &tasks_only,
1235 "output processes data only"),
1236 OPT_BOOLEAN('g', "callchain", &with_backtrace, "record callchain"),
1239 const char * const record_usage[] = {
1240 "perf timechart record [<options>]",
1243 argc = parse_options(argc, argv, timechart_options, timechart_usage,
1244 PARSE_OPT_STOP_AT_NON_OPTION);
1246 if (power_only && tasks_only) {
1247 pr_err("-P and -T options cannot be used at the same time.\n");
1253 if (argc && !strncmp(argv[0], "rec", 3)) {
1254 argc = parse_options(argc, argv, record_options, record_usage,
1255 PARSE_OPT_STOP_AT_NON_OPTION);
1257 if (power_only && tasks_only) {
1258 pr_err("-P and -T options cannot be used at the same time.\n");
1262 return __cmd_record(argc, argv);
1264 usage_with_options(timechart_usage, timechart_options);
1268 return __cmd_timechart(output_name);