5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
10 #include "util/parse-options.h"
11 #include "util/trace-event.h"
13 #include "util/debug.h"
14 #include "util/data_map.h"
16 #include <sys/types.h>
17 #include <sys/prctl.h>
19 #include <semaphore.h>
23 static char const *input_name = "perf.data";
25 static unsigned long total_comm = 0;
27 static struct rb_root threads;
28 static struct thread *last_match;
30 static struct perf_header *header;
31 static u64 sample_type;
33 static char default_sort_order[] = "avg, max, switch, runtime";
34 static char *sort_order = default_sort_order;
36 static int profile_cpu = -1;
41 #define PR_SET_NAME 15 /* Set process name */
44 #define BUG_ON(x) assert(!(x))
46 static u64 run_measurement_overhead;
47 static u64 sleep_measurement_overhead;
54 static unsigned long nr_tasks;
63 unsigned long nr_events;
64 unsigned long curr_event;
65 struct sched_atom **atoms;
76 enum sched_event_type {
80 SCHED_EVENT_MIGRATION,
84 enum sched_event_type type;
90 struct task_desc *wakee;
93 static struct task_desc *pid_to_task[MAX_PID];
95 static struct task_desc **tasks;
97 static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
98 static u64 start_time;
100 static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
102 static unsigned long nr_run_events;
103 static unsigned long nr_sleep_events;
104 static unsigned long nr_wakeup_events;
106 static unsigned long nr_sleep_corrections;
107 static unsigned long nr_run_events_optimized;
109 static unsigned long targetless_wakeups;
110 static unsigned long multitarget_wakeups;
112 static u64 cpu_usage;
113 static u64 runavg_cpu_usage;
114 static u64 parent_cpu_usage;
115 static u64 runavg_parent_cpu_usage;
117 static unsigned long nr_runs;
118 static u64 sum_runtime;
119 static u64 sum_fluct;
122 static unsigned long replay_repeat = 10;
123 static unsigned long nr_timestamps;
124 static unsigned long nr_unordered_timestamps;
125 static unsigned long nr_state_machine_bugs;
126 static unsigned long nr_context_switch_bugs;
127 static unsigned long nr_events;
128 static unsigned long nr_lost_chunks;
129 static unsigned long nr_lost_events;
131 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
141 struct list_head list;
142 enum thread_state state;
150 struct list_head work_list;
151 struct thread *thread;
159 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
161 static struct rb_root atom_root, sorted_atom_root;
163 static u64 all_runtime;
164 static u64 all_count;
167 static u64 get_nsecs(void)
171 clock_gettime(CLOCK_MONOTONIC, &ts);
173 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
176 static void burn_nsecs(u64 nsecs)
178 u64 T0 = get_nsecs(), T1;
182 } while (T1 + run_measurement_overhead < T0 + nsecs);
185 static void sleep_nsecs(u64 nsecs)
189 ts.tv_nsec = nsecs % 999999999;
190 ts.tv_sec = nsecs / 999999999;
192 nanosleep(&ts, NULL);
195 static void calibrate_run_measurement_overhead(void)
197 u64 T0, T1, delta, min_delta = 1000000000ULL;
200 for (i = 0; i < 10; i++) {
205 min_delta = min(min_delta, delta);
207 run_measurement_overhead = min_delta;
209 printf("run measurement overhead: %Ld nsecs\n", min_delta);
212 static void calibrate_sleep_measurement_overhead(void)
214 u64 T0, T1, delta, min_delta = 1000000000ULL;
217 for (i = 0; i < 10; i++) {
222 min_delta = min(min_delta, delta);
225 sleep_measurement_overhead = min_delta;
227 printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
230 static struct sched_atom *
231 get_new_event(struct task_desc *task, u64 timestamp)
233 struct sched_atom *event = calloc(1, sizeof(*event));
234 unsigned long idx = task->nr_events;
237 event->timestamp = timestamp;
241 size = sizeof(struct sched_atom *) * task->nr_events;
242 task->atoms = realloc(task->atoms, size);
243 BUG_ON(!task->atoms);
245 task->atoms[idx] = event;
250 static struct sched_atom *last_event(struct task_desc *task)
252 if (!task->nr_events)
255 return task->atoms[task->nr_events - 1];
259 add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
261 struct sched_atom *event, *curr_event = last_event(task);
264 * optimize an existing RUN event by merging this one
267 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
268 nr_run_events_optimized++;
269 curr_event->duration += duration;
273 event = get_new_event(task, timestamp);
275 event->type = SCHED_EVENT_RUN;
276 event->duration = duration;
282 add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
283 struct task_desc *wakee)
285 struct sched_atom *event, *wakee_event;
287 event = get_new_event(task, timestamp);
288 event->type = SCHED_EVENT_WAKEUP;
289 event->wakee = wakee;
291 wakee_event = last_event(wakee);
292 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
293 targetless_wakeups++;
296 if (wakee_event->wait_sem) {
297 multitarget_wakeups++;
301 wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem));
302 sem_init(wakee_event->wait_sem, 0, 0);
303 wakee_event->specific_wait = 1;
304 event->wait_sem = wakee_event->wait_sem;
310 add_sched_event_sleep(struct task_desc *task, u64 timestamp,
311 u64 task_state __used)
313 struct sched_atom *event = get_new_event(task, timestamp);
315 event->type = SCHED_EVENT_SLEEP;
320 static struct task_desc *register_pid(unsigned long pid, const char *comm)
322 struct task_desc *task;
324 BUG_ON(pid >= MAX_PID);
326 task = pid_to_task[pid];
331 task = calloc(1, sizeof(*task));
334 strcpy(task->comm, comm);
336 * every task starts in sleeping state - this gets ignored
337 * if there's no wakeup pointing to this sleep state:
339 add_sched_event_sleep(task, 0, 0);
341 pid_to_task[pid] = task;
343 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
345 tasks[task->nr] = task;
348 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
354 static void print_task_traces(void)
356 struct task_desc *task;
359 for (i = 0; i < nr_tasks; i++) {
361 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
362 task->nr, task->comm, task->pid, task->nr_events);
366 static void add_cross_task_wakeups(void)
368 struct task_desc *task1, *task2;
371 for (i = 0; i < nr_tasks; i++) {
377 add_sched_event_wakeup(task1, 0, task2);
382 process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
389 delta = start_time + atom->timestamp - now;
391 switch (atom->type) {
392 case SCHED_EVENT_RUN:
393 burn_nsecs(atom->duration);
395 case SCHED_EVENT_SLEEP:
397 ret = sem_wait(atom->wait_sem);
400 case SCHED_EVENT_WAKEUP:
402 ret = sem_post(atom->wait_sem);
405 case SCHED_EVENT_MIGRATION:
412 static u64 get_cpu_usage_nsec_parent(void)
418 err = getrusage(RUSAGE_SELF, &ru);
421 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
422 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
427 static u64 get_cpu_usage_nsec_self(void)
429 char filename [] = "/proc/1234567890/sched";
430 unsigned long msecs, nsecs;
438 sprintf(filename, "/proc/%d/sched", getpid());
439 file = fopen(filename, "r");
442 while ((chars = getline(&line, &len, file)) != -1) {
443 ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n",
446 total = msecs*1e6 + nsecs;
457 static void *thread_func(void *ctx)
459 struct task_desc *this_task = ctx;
460 u64 cpu_usage_0, cpu_usage_1;
461 unsigned long i, ret;
464 sprintf(comm2, ":%s", this_task->comm);
465 prctl(PR_SET_NAME, comm2);
468 ret = sem_post(&this_task->ready_for_work);
470 ret = pthread_mutex_lock(&start_work_mutex);
472 ret = pthread_mutex_unlock(&start_work_mutex);
475 cpu_usage_0 = get_cpu_usage_nsec_self();
477 for (i = 0; i < this_task->nr_events; i++) {
478 this_task->curr_event = i;
479 process_sched_event(this_task, this_task->atoms[i]);
482 cpu_usage_1 = get_cpu_usage_nsec_self();
483 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
485 ret = sem_post(&this_task->work_done_sem);
488 ret = pthread_mutex_lock(&work_done_wait_mutex);
490 ret = pthread_mutex_unlock(&work_done_wait_mutex);
496 static void create_tasks(void)
498 struct task_desc *task;
503 err = pthread_attr_init(&attr);
505 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
507 err = pthread_mutex_lock(&start_work_mutex);
509 err = pthread_mutex_lock(&work_done_wait_mutex);
511 for (i = 0; i < nr_tasks; i++) {
513 sem_init(&task->sleep_sem, 0, 0);
514 sem_init(&task->ready_for_work, 0, 0);
515 sem_init(&task->work_done_sem, 0, 0);
516 task->curr_event = 0;
517 err = pthread_create(&task->thread, &attr, thread_func, task);
522 static void wait_for_tasks(void)
524 u64 cpu_usage_0, cpu_usage_1;
525 struct task_desc *task;
526 unsigned long i, ret;
528 start_time = get_nsecs();
530 pthread_mutex_unlock(&work_done_wait_mutex);
532 for (i = 0; i < nr_tasks; i++) {
534 ret = sem_wait(&task->ready_for_work);
536 sem_init(&task->ready_for_work, 0, 0);
538 ret = pthread_mutex_lock(&work_done_wait_mutex);
541 cpu_usage_0 = get_cpu_usage_nsec_parent();
543 pthread_mutex_unlock(&start_work_mutex);
545 for (i = 0; i < nr_tasks; i++) {
547 ret = sem_wait(&task->work_done_sem);
549 sem_init(&task->work_done_sem, 0, 0);
550 cpu_usage += task->cpu_usage;
554 cpu_usage_1 = get_cpu_usage_nsec_parent();
555 if (!runavg_cpu_usage)
556 runavg_cpu_usage = cpu_usage;
557 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
559 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
560 if (!runavg_parent_cpu_usage)
561 runavg_parent_cpu_usage = parent_cpu_usage;
562 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
563 parent_cpu_usage)/10;
565 ret = pthread_mutex_lock(&start_work_mutex);
568 for (i = 0; i < nr_tasks; i++) {
570 sem_init(&task->sleep_sem, 0, 0);
571 task->curr_event = 0;
575 static void run_one_test(void)
577 u64 T0, T1, delta, avg_delta, fluct, std_dev;
584 sum_runtime += delta;
587 avg_delta = sum_runtime / nr_runs;
588 if (delta < avg_delta)
589 fluct = avg_delta - delta;
591 fluct = delta - avg_delta;
593 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
596 run_avg = (run_avg*9 + delta)/10;
598 printf("#%-3ld: %0.3f, ",
599 nr_runs, (double)delta/1000000.0);
601 printf("ravg: %0.2f, ",
602 (double)run_avg/1e6);
604 printf("cpu: %0.2f / %0.2f",
605 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
609 * rusage statistics done by the parent, these are less
610 * accurate than the sum_exec_runtime based statistics:
612 printf(" [%0.2f / %0.2f]",
613 (double)parent_cpu_usage/1e6,
614 (double)runavg_parent_cpu_usage/1e6);
619 if (nr_sleep_corrections)
620 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
621 nr_sleep_corrections = 0;
624 static void test_calibrations(void)
632 printf("the run test took %Ld nsecs\n", T1-T0);
638 printf("the sleep test took %Ld nsecs\n", T1-T0);
642 process_comm_event(event_t *event, unsigned long offset, unsigned long head)
644 struct thread *thread;
646 thread = threads__findnew(event->comm.tid, &threads, &last_match);
648 dump_printf("%p [%p]: perf_event_comm: %s:%d\n",
649 (void *)(offset + head),
650 (void *)(long)(event->header.size),
651 event->comm.comm, event->comm.pid);
653 if (thread == NULL ||
654 thread__set_comm(thread, event->comm.comm)) {
655 dump_printf("problem processing perf_event_comm, skipping event.\n");
664 struct raw_event_sample {
669 #define FILL_FIELD(ptr, field, event, data) \
670 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
672 #define FILL_ARRAY(ptr, array, event, data) \
674 void *__array = raw_field_ptr(event, #array, data); \
675 memcpy(ptr.array, __array, sizeof(ptr.array)); \
678 #define FILL_COMMON_FIELDS(ptr, event, data) \
680 FILL_FIELD(ptr, common_type, event, data); \
681 FILL_FIELD(ptr, common_flags, event, data); \
682 FILL_FIELD(ptr, common_preempt_count, event, data); \
683 FILL_FIELD(ptr, common_pid, event, data); \
684 FILL_FIELD(ptr, common_tgid, event, data); \
689 struct trace_switch_event {
694 u8 common_preempt_count;
707 struct trace_runtime_event {
712 u8 common_preempt_count;
722 struct trace_wakeup_event {
727 u8 common_preempt_count;
739 struct trace_fork_event {
744 u8 common_preempt_count;
748 char parent_comm[16];
754 struct trace_migrate_task_event {
759 u8 common_preempt_count;
770 struct trace_sched_handler {
771 void (*switch_event)(struct trace_switch_event *,
775 struct thread *thread);
777 void (*runtime_event)(struct trace_runtime_event *,
781 struct thread *thread);
783 void (*wakeup_event)(struct trace_wakeup_event *,
787 struct thread *thread);
789 void (*fork_event)(struct trace_fork_event *,
793 struct thread *thread);
795 void (*migrate_task_event)(struct trace_migrate_task_event *,
799 struct thread *thread);
804 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
807 u64 timestamp __used,
808 struct thread *thread __used)
810 struct task_desc *waker, *wakee;
813 printf("sched_wakeup event %p\n", event);
815 printf(" ... pid %d woke up %s/%d\n",
816 wakeup_event->common_pid,
821 waker = register_pid(wakeup_event->common_pid, "<unknown>");
822 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
824 add_sched_event_wakeup(waker, timestamp, wakee);
827 static u64 cpu_last_switched[MAX_CPUS];
830 replay_switch_event(struct trace_switch_event *switch_event,
834 struct thread *thread __used)
836 struct task_desc *prev, *next;
841 printf("sched_switch event %p\n", event);
843 if (cpu >= MAX_CPUS || cpu < 0)
846 timestamp0 = cpu_last_switched[cpu];
848 delta = timestamp - timestamp0;
853 die("hm, delta: %Ld < 0 ?\n", delta);
856 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
857 switch_event->prev_comm, switch_event->prev_pid,
858 switch_event->next_comm, switch_event->next_pid,
862 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
863 next = register_pid(switch_event->next_pid, switch_event->next_comm);
865 cpu_last_switched[cpu] = timestamp;
867 add_sched_event_run(prev, timestamp, delta);
868 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
873 replay_fork_event(struct trace_fork_event *fork_event,
876 u64 timestamp __used,
877 struct thread *thread __used)
880 printf("sched_fork event %p\n", event);
881 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
882 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
884 register_pid(fork_event->parent_pid, fork_event->parent_comm);
885 register_pid(fork_event->child_pid, fork_event->child_comm);
888 static struct trace_sched_handler replay_ops = {
889 .wakeup_event = replay_wakeup_event,
890 .switch_event = replay_switch_event,
891 .fork_event = replay_fork_event,
894 struct sort_dimension {
897 struct list_head list;
900 static LIST_HEAD(cmp_pid);
903 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
905 struct sort_dimension *sort;
908 BUG_ON(list_empty(list));
910 list_for_each_entry(sort, list, list) {
911 ret = sort->cmp(l, r);
919 static struct work_atoms *
920 thread_atoms_search(struct rb_root *root, struct thread *thread,
921 struct list_head *sort_list)
923 struct rb_node *node = root->rb_node;
924 struct work_atoms key = { .thread = thread };
927 struct work_atoms *atoms;
930 atoms = container_of(node, struct work_atoms, node);
932 cmp = thread_lat_cmp(sort_list, &key, atoms);
934 node = node->rb_left;
936 node = node->rb_right;
938 BUG_ON(thread != atoms->thread);
946 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
947 struct list_head *sort_list)
949 struct rb_node **new = &(root->rb_node), *parent = NULL;
952 struct work_atoms *this;
955 this = container_of(*new, struct work_atoms, node);
958 cmp = thread_lat_cmp(sort_list, data, this);
961 new = &((*new)->rb_left);
963 new = &((*new)->rb_right);
966 rb_link_node(&data->node, parent, new);
967 rb_insert_color(&data->node, root);
970 static void thread_atoms_insert(struct thread *thread)
972 struct work_atoms *atoms;
974 atoms = calloc(sizeof(*atoms), 1);
978 atoms->thread = thread;
979 INIT_LIST_HEAD(&atoms->work_list);
980 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
984 latency_fork_event(struct trace_fork_event *fork_event __used,
985 struct event *event __used,
987 u64 timestamp __used,
988 struct thread *thread __used)
990 /* should insert the newcomer */
994 static char sched_out_state(struct trace_switch_event *switch_event)
996 const char *str = TASK_STATE_TO_CHAR_STR;
998 return str[switch_event->prev_state];
1002 add_sched_out_event(struct work_atoms *atoms,
1006 struct work_atom *atom;
1008 atom = calloc(sizeof(*atom), 1);
1012 atom->sched_out_time = timestamp;
1014 if (run_state == 'R') {
1015 atom->state = THREAD_WAIT_CPU;
1016 atom->wake_up_time = atom->sched_out_time;
1019 list_add_tail(&atom->list, &atoms->work_list);
1023 add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
1025 struct work_atom *atom;
1027 BUG_ON(list_empty(&atoms->work_list));
1029 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1031 atom->runtime += delta;
1032 atoms->total_runtime += delta;
1036 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1038 struct work_atom *atom;
1041 if (list_empty(&atoms->work_list))
1044 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1046 if (atom->state != THREAD_WAIT_CPU)
1049 if (timestamp < atom->wake_up_time) {
1050 atom->state = THREAD_IGNORE;
1054 atom->state = THREAD_SCHED_IN;
1055 atom->sched_in_time = timestamp;
1057 delta = atom->sched_in_time - atom->wake_up_time;
1058 atoms->total_lat += delta;
1059 if (delta > atoms->max_lat)
1060 atoms->max_lat = delta;
1065 latency_switch_event(struct trace_switch_event *switch_event,
1066 struct event *event __used,
1069 struct thread *thread __used)
1071 struct work_atoms *out_events, *in_events;
1072 struct thread *sched_out, *sched_in;
1076 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1078 timestamp0 = cpu_last_switched[cpu];
1079 cpu_last_switched[cpu] = timestamp;
1081 delta = timestamp - timestamp0;
1086 die("hm, delta: %Ld < 0 ?\n", delta);
1089 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
1090 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
1092 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1094 thread_atoms_insert(sched_out);
1095 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1097 die("out-event: Internal tree error");
1099 add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1101 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1103 thread_atoms_insert(sched_in);
1104 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1106 die("in-event: Internal tree error");
1108 * Take came in we have not heard about yet,
1109 * add in an initial atom in runnable state:
1111 add_sched_out_event(in_events, 'R', timestamp);
1113 add_sched_in_event(in_events, timestamp);
1117 latency_runtime_event(struct trace_runtime_event *runtime_event,
1118 struct event *event __used,
1121 struct thread *this_thread __used)
1123 struct work_atoms *atoms;
1124 struct thread *thread;
1126 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1128 thread = threads__findnew(runtime_event->pid, &threads, &last_match);
1129 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1131 thread_atoms_insert(thread);
1132 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1134 die("in-event: Internal tree error");
1135 add_sched_out_event(atoms, 'R', timestamp);
1138 add_runtime_event(atoms, runtime_event->runtime, timestamp);
1142 latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1143 struct event *__event __used,
1146 struct thread *thread __used)
1148 struct work_atoms *atoms;
1149 struct work_atom *atom;
1150 struct thread *wakee;
1152 /* Note for later, it may be interesting to observe the failing cases */
1153 if (!wakeup_event->success)
1156 wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
1157 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1159 thread_atoms_insert(wakee);
1160 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1162 die("wakeup-event: Internal tree error");
1163 add_sched_out_event(atoms, 'S', timestamp);
1166 BUG_ON(list_empty(&atoms->work_list));
1168 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1171 * You WILL be missing events if you've recorded only
1172 * one CPU, or are only looking at only one, so don't
1173 * make useless noise.
1175 if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1176 nr_state_machine_bugs++;
1179 if (atom->sched_out_time > timestamp) {
1180 nr_unordered_timestamps++;
1184 atom->state = THREAD_WAIT_CPU;
1185 atom->wake_up_time = timestamp;
1189 latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1190 struct event *__event __used,
1193 struct thread *thread __used)
1195 struct work_atoms *atoms;
1196 struct work_atom *atom;
1197 struct thread *migrant;
1200 * Only need to worry about migration when profiling one CPU.
1202 if (profile_cpu == -1)
1205 migrant = threads__findnew(migrate_task_event->pid, &threads, &last_match);
1206 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1208 thread_atoms_insert(migrant);
1209 register_pid(migrant->pid, migrant->comm);
1210 atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1212 die("migration-event: Internal tree error");
1213 add_sched_out_event(atoms, 'R', timestamp);
1216 BUG_ON(list_empty(&atoms->work_list));
1218 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1219 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1223 if (atom->sched_out_time > timestamp)
1224 nr_unordered_timestamps++;
1227 static struct trace_sched_handler lat_ops = {
1228 .wakeup_event = latency_wakeup_event,
1229 .switch_event = latency_switch_event,
1230 .runtime_event = latency_runtime_event,
1231 .fork_event = latency_fork_event,
1232 .migrate_task_event = latency_migrate_task_event,
1235 static void output_lat_thread(struct work_atoms *work_list)
1241 if (!work_list->nb_atoms)
1244 * Ignore idle threads:
1246 if (!strcmp(work_list->thread->comm, "swapper"))
1249 all_runtime += work_list->total_runtime;
1250 all_count += work_list->nb_atoms;
1252 ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
1254 for (i = 0; i < 24 - ret; i++)
1257 avg = work_list->total_lat / work_list->nb_atoms;
1259 printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
1260 (double)work_list->total_runtime / 1e6,
1261 work_list->nb_atoms, (double)avg / 1e6,
1262 (double)work_list->max_lat / 1e6);
1265 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1267 if (l->thread->pid < r->thread->pid)
1269 if (l->thread->pid > r->thread->pid)
1275 static struct sort_dimension pid_sort_dimension = {
1280 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1290 avgl = l->total_lat / l->nb_atoms;
1291 avgr = r->total_lat / r->nb_atoms;
1301 static struct sort_dimension avg_sort_dimension = {
1306 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1308 if (l->max_lat < r->max_lat)
1310 if (l->max_lat > r->max_lat)
1316 static struct sort_dimension max_sort_dimension = {
1321 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1323 if (l->nb_atoms < r->nb_atoms)
1325 if (l->nb_atoms > r->nb_atoms)
1331 static struct sort_dimension switch_sort_dimension = {
1336 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1338 if (l->total_runtime < r->total_runtime)
1340 if (l->total_runtime > r->total_runtime)
1346 static struct sort_dimension runtime_sort_dimension = {
1351 static struct sort_dimension *available_sorts[] = {
1352 &pid_sort_dimension,
1353 &avg_sort_dimension,
1354 &max_sort_dimension,
1355 &switch_sort_dimension,
1356 &runtime_sort_dimension,
1359 #define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1361 static LIST_HEAD(sort_list);
1363 static int sort_dimension__add(const char *tok, struct list_head *list)
1367 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1368 if (!strcmp(available_sorts[i]->name, tok)) {
1369 list_add_tail(&available_sorts[i]->list, list);
1378 static void setup_sorting(void);
1380 static void sort_lat(void)
1382 struct rb_node *node;
1385 struct work_atoms *data;
1386 node = rb_first(&atom_root);
1390 rb_erase(node, &atom_root);
1391 data = rb_entry(node, struct work_atoms, node);
1392 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
1396 static struct trace_sched_handler *trace_handler;
1399 process_sched_wakeup_event(struct raw_event_sample *raw,
1400 struct event *event,
1402 u64 timestamp __used,
1403 struct thread *thread __used)
1405 struct trace_wakeup_event wakeup_event;
1407 FILL_COMMON_FIELDS(wakeup_event, event, raw->data);
1409 FILL_ARRAY(wakeup_event, comm, event, raw->data);
1410 FILL_FIELD(wakeup_event, pid, event, raw->data);
1411 FILL_FIELD(wakeup_event, prio, event, raw->data);
1412 FILL_FIELD(wakeup_event, success, event, raw->data);
1413 FILL_FIELD(wakeup_event, cpu, event, raw->data);
1415 if (trace_handler->wakeup_event)
1416 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
1420 * Track the current task - that way we can know whether there's any
1421 * weird events, such as a task being switched away that is not current.
1425 static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1427 static struct thread *curr_thread[MAX_CPUS];
1429 static char next_shortname1 = 'A';
1430 static char next_shortname2 = '0';
1433 map_switch_event(struct trace_switch_event *switch_event,
1434 struct event *event __used,
1437 struct thread *thread __used)
1439 struct thread *sched_out, *sched_in;
1445 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1447 if (this_cpu > max_cpu)
1450 timestamp0 = cpu_last_switched[this_cpu];
1451 cpu_last_switched[this_cpu] = timestamp;
1453 delta = timestamp - timestamp0;
1458 die("hm, delta: %Ld < 0 ?\n", delta);
1461 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
1462 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
1464 curr_thread[this_cpu] = sched_in;
1469 if (!sched_in->shortname[0]) {
1470 sched_in->shortname[0] = next_shortname1;
1471 sched_in->shortname[1] = next_shortname2;
1473 if (next_shortname1 < 'Z') {
1476 next_shortname1='A';
1477 if (next_shortname2 < '9') {
1480 next_shortname2='0';
1486 for (cpu = 0; cpu <= max_cpu; cpu++) {
1487 if (cpu != this_cpu)
1492 if (curr_thread[cpu]) {
1493 if (curr_thread[cpu]->pid)
1494 printf("%2s ", curr_thread[cpu]->shortname);
1501 printf(" %12.6f secs ", (double)timestamp/1e9);
1502 if (new_shortname) {
1503 printf("%s => %s:%d\n",
1504 sched_in->shortname, sched_in->comm, sched_in->pid);
1512 process_sched_switch_event(struct raw_event_sample *raw,
1513 struct event *event,
1515 u64 timestamp __used,
1516 struct thread *thread __used)
1518 struct trace_switch_event switch_event;
1520 FILL_COMMON_FIELDS(switch_event, event, raw->data);
1522 FILL_ARRAY(switch_event, prev_comm, event, raw->data);
1523 FILL_FIELD(switch_event, prev_pid, event, raw->data);
1524 FILL_FIELD(switch_event, prev_prio, event, raw->data);
1525 FILL_FIELD(switch_event, prev_state, event, raw->data);
1526 FILL_ARRAY(switch_event, next_comm, event, raw->data);
1527 FILL_FIELD(switch_event, next_pid, event, raw->data);
1528 FILL_FIELD(switch_event, next_prio, event, raw->data);
1530 if (curr_pid[this_cpu] != (u32)-1) {
1532 * Are we trying to switch away a PID that is
1535 if (curr_pid[this_cpu] != switch_event.prev_pid)
1536 nr_context_switch_bugs++;
1538 if (trace_handler->switch_event)
1539 trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread);
1541 curr_pid[this_cpu] = switch_event.next_pid;
1545 process_sched_runtime_event(struct raw_event_sample *raw,
1546 struct event *event,
1548 u64 timestamp __used,
1549 struct thread *thread __used)
1551 struct trace_runtime_event runtime_event;
1553 FILL_ARRAY(runtime_event, comm, event, raw->data);
1554 FILL_FIELD(runtime_event, pid, event, raw->data);
1555 FILL_FIELD(runtime_event, runtime, event, raw->data);
1556 FILL_FIELD(runtime_event, vruntime, event, raw->data);
1558 if (trace_handler->runtime_event)
1559 trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
1563 process_sched_fork_event(struct raw_event_sample *raw,
1564 struct event *event,
1566 u64 timestamp __used,
1567 struct thread *thread __used)
1569 struct trace_fork_event fork_event;
1571 FILL_COMMON_FIELDS(fork_event, event, raw->data);
1573 FILL_ARRAY(fork_event, parent_comm, event, raw->data);
1574 FILL_FIELD(fork_event, parent_pid, event, raw->data);
1575 FILL_ARRAY(fork_event, child_comm, event, raw->data);
1576 FILL_FIELD(fork_event, child_pid, event, raw->data);
1578 if (trace_handler->fork_event)
1579 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
1583 process_sched_exit_event(struct event *event,
1585 u64 timestamp __used,
1586 struct thread *thread __used)
1589 printf("sched_exit event %p\n", event);
1593 process_sched_migrate_task_event(struct raw_event_sample *raw,
1594 struct event *event,
1596 u64 timestamp __used,
1597 struct thread *thread __used)
1599 struct trace_migrate_task_event migrate_task_event;
1601 FILL_COMMON_FIELDS(migrate_task_event, event, raw->data);
1603 FILL_ARRAY(migrate_task_event, comm, event, raw->data);
1604 FILL_FIELD(migrate_task_event, pid, event, raw->data);
1605 FILL_FIELD(migrate_task_event, prio, event, raw->data);
1606 FILL_FIELD(migrate_task_event, cpu, event, raw->data);
1608 if (trace_handler->migrate_task_event)
1609 trace_handler->migrate_task_event(&migrate_task_event, event, cpu, timestamp, thread);
1613 process_raw_event(event_t *raw_event __used, void *more_data,
1614 int cpu, u64 timestamp, struct thread *thread)
1616 struct raw_event_sample *raw = more_data;
1617 struct event *event;
1620 type = trace_parse_common_type(raw->data);
1621 event = trace_find_event(type);
1623 if (!strcmp(event->name, "sched_switch"))
1624 process_sched_switch_event(raw, event, cpu, timestamp, thread);
1625 if (!strcmp(event->name, "sched_stat_runtime"))
1626 process_sched_runtime_event(raw, event, cpu, timestamp, thread);
1627 if (!strcmp(event->name, "sched_wakeup"))
1628 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
1629 if (!strcmp(event->name, "sched_wakeup_new"))
1630 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
1631 if (!strcmp(event->name, "sched_process_fork"))
1632 process_sched_fork_event(raw, event, cpu, timestamp, thread);
1633 if (!strcmp(event->name, "sched_process_exit"))
1634 process_sched_exit_event(event, cpu, timestamp, thread);
1635 if (!strcmp(event->name, "sched_migrate_task"))
1636 process_sched_migrate_task_event(raw, event, cpu, timestamp, thread);
1640 process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1642 struct thread *thread;
1643 u64 ip = event->ip.ip;
1647 void *more_data = event->ip.__more_data;
1649 if (!(sample_type & PERF_SAMPLE_RAW))
1652 thread = threads__findnew(event->ip.pid, &threads, &last_match);
1654 if (sample_type & PERF_SAMPLE_TIME) {
1655 timestamp = *(u64 *)more_data;
1656 more_data += sizeof(u64);
1659 if (sample_type & PERF_SAMPLE_CPU) {
1660 cpu = *(u32 *)more_data;
1661 more_data += sizeof(u32);
1662 more_data += sizeof(u32); /* reserved */
1665 if (sample_type & PERF_SAMPLE_PERIOD) {
1666 period = *(u64 *)more_data;
1667 more_data += sizeof(u64);
1670 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1671 (void *)(offset + head),
1672 (void *)(long)(event->header.size),
1674 event->ip.pid, event->ip.tid,
1678 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1680 if (thread == NULL) {
1681 eprintf("problem processing %d event, skipping it.\n",
1682 event->header.type);
1686 if (profile_cpu != -1 && profile_cpu != (int) cpu)
1689 process_raw_event(event, more_data, cpu, timestamp, thread);
1695 process_lost_event(event_t *event __used,
1696 unsigned long offset __used,
1697 unsigned long head __used)
1700 nr_lost_events += event->lost.lost;
1705 static int sample_type_check(u64 type)
1709 if (!(sample_type & PERF_SAMPLE_RAW)) {
1711 "No trace sample to read. Did you call perf record "
1719 static struct perf_file_handler file_handler = {
1720 .process_sample_event = process_sample_event,
1721 .process_comm_event = process_comm_event,
1722 .process_lost_event = process_lost_event,
1723 .sample_type_check = sample_type_check,
1726 static int read_events(void)
1728 register_idle_thread(&threads, &last_match);
1729 register_perf_file_handler(&file_handler);
1731 return mmap_dispatch_perf_file(&header, input_name, 0, 0, &cwdlen, &cwd);
1734 static void print_bad_events(void)
1736 if (nr_unordered_timestamps && nr_timestamps) {
1737 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1738 (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1739 nr_unordered_timestamps, nr_timestamps);
1741 if (nr_lost_events && nr_events) {
1742 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1743 (double)nr_lost_events/(double)nr_events*100.0,
1744 nr_lost_events, nr_events, nr_lost_chunks);
1746 if (nr_state_machine_bugs && nr_timestamps) {
1747 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1748 (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1749 nr_state_machine_bugs, nr_timestamps);
1751 printf(" (due to lost events?)");
1754 if (nr_context_switch_bugs && nr_timestamps) {
1755 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1756 (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1757 nr_context_switch_bugs, nr_timestamps);
1759 printf(" (due to lost events?)");
1764 static void __cmd_lat(void)
1766 struct rb_node *next;
1772 printf("\n -----------------------------------------------------------------------------------------\n");
1773 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
1774 printf(" -----------------------------------------------------------------------------------------\n");
1776 next = rb_first(&sorted_atom_root);
1779 struct work_atoms *work_list;
1781 work_list = rb_entry(next, struct work_atoms, node);
1782 output_lat_thread(work_list);
1783 next = rb_next(next);
1786 printf(" -----------------------------------------------------------------------------------------\n");
1787 printf(" TOTAL: |%11.3f ms |%9Ld |\n",
1788 (double)all_runtime/1e6, all_count);
1790 printf(" ---------------------------------------------------\n");
1797 static struct trace_sched_handler map_ops = {
1798 .wakeup_event = NULL,
1799 .switch_event = map_switch_event,
1800 .runtime_event = NULL,
1804 static void __cmd_map(void)
1806 max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1813 static void __cmd_replay(void)
1817 calibrate_run_measurement_overhead();
1818 calibrate_sleep_measurement_overhead();
1820 test_calibrations();
1824 printf("nr_run_events: %ld\n", nr_run_events);
1825 printf("nr_sleep_events: %ld\n", nr_sleep_events);
1826 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
1828 if (targetless_wakeups)
1829 printf("target-less wakeups: %ld\n", targetless_wakeups);
1830 if (multitarget_wakeups)
1831 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1832 if (nr_run_events_optimized)
1833 printf("run atoms optimized: %ld\n",
1834 nr_run_events_optimized);
1836 print_task_traces();
1837 add_cross_task_wakeups();
1840 printf("------------------------------------------------------------\n");
1841 for (i = 0; i < replay_repeat; i++)
1846 static const char * const sched_usage[] = {
1847 "perf sched [<options>] {record|latency|map|replay|trace}",
1851 static const struct option sched_options[] = {
1852 OPT_STRING('i', "input", &input_name, "file",
1854 OPT_BOOLEAN('v', "verbose", &verbose,
1855 "be more verbose (show symbol address, etc)"),
1856 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1857 "dump raw trace in ASCII"),
1861 static const char * const latency_usage[] = {
1862 "perf sched latency [<options>]",
1866 static const struct option latency_options[] = {
1867 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1868 "sort by key(s): runtime, switch, avg, max"),
1869 OPT_BOOLEAN('v', "verbose", &verbose,
1870 "be more verbose (show symbol address, etc)"),
1871 OPT_INTEGER('C', "CPU", &profile_cpu,
1872 "CPU to profile on"),
1873 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1874 "dump raw trace in ASCII"),
1878 static const char * const replay_usage[] = {
1879 "perf sched replay [<options>]",
1883 static const struct option replay_options[] = {
1884 OPT_INTEGER('r', "repeat", &replay_repeat,
1885 "repeat the workload replay N times (-1: infinite)"),
1886 OPT_BOOLEAN('v', "verbose", &verbose,
1887 "be more verbose (show symbol address, etc)"),
1888 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1889 "dump raw trace in ASCII"),
1893 static void setup_sorting(void)
1895 char *tmp, *tok, *str = strdup(sort_order);
1897 for (tok = strtok_r(str, ", ", &tmp);
1898 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1899 if (sort_dimension__add(tok, &sort_list) < 0) {
1900 error("Unknown --sort key: `%s'", tok);
1901 usage_with_options(latency_usage, latency_options);
1907 sort_dimension__add("pid", &cmp_pid);
1910 static const char *record_args[] = {
1918 "-e", "sched:sched_switch:r",
1919 "-e", "sched:sched_stat_wait:r",
1920 "-e", "sched:sched_stat_sleep:r",
1921 "-e", "sched:sched_stat_iowait:r",
1922 "-e", "sched:sched_stat_runtime:r",
1923 "-e", "sched:sched_process_exit:r",
1924 "-e", "sched:sched_process_fork:r",
1925 "-e", "sched:sched_wakeup:r",
1926 "-e", "sched:sched_migrate_task:r",
1929 static int __cmd_record(int argc, const char **argv)
1931 unsigned int rec_argc, i, j;
1932 const char **rec_argv;
1934 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1935 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1937 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1938 rec_argv[i] = strdup(record_args[i]);
1940 for (j = 1; j < (unsigned int)argc; j++, i++)
1941 rec_argv[i] = argv[j];
1943 BUG_ON(i != rec_argc);
1945 return cmd_record(i, rec_argv, NULL);
1948 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1952 argc = parse_options(argc, argv, sched_options, sched_usage,
1953 PARSE_OPT_STOP_AT_NON_OPTION);
1955 usage_with_options(sched_usage, sched_options);
1957 if (!strncmp(argv[0], "rec", 3)) {
1958 return __cmd_record(argc, argv);
1959 } else if (!strncmp(argv[0], "lat", 3)) {
1960 trace_handler = &lat_ops;
1962 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1964 usage_with_options(latency_usage, latency_options);
1968 } else if (!strcmp(argv[0], "map")) {
1969 trace_handler = &map_ops;
1972 } else if (!strncmp(argv[0], "rep", 3)) {
1973 trace_handler = &replay_ops;
1975 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1977 usage_with_options(replay_usage, replay_options);
1980 } else if (!strcmp(argv[0], "trace")) {
1982 * Aliased to 'perf trace' for now:
1984 return cmd_trace(argc, argv, prefix);
1986 usage_with_options(sched_usage, sched_options);