4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
8 #define _FILE_OFFSET_BITS 64
14 #include "util/build-id.h"
15 #include "util/util.h"
16 #include "util/parse-options.h"
17 #include "util/parse-events.h"
19 #include "util/header.h"
20 #include "util/event.h"
21 #include "util/debug.h"
22 #include "util/session.h"
23 #include "util/symbol.h"
24 #include "util/cpumap.h"
35 static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
37 static u64 user_interval = ULLONG_MAX;
38 static u64 default_interval = 0;
39 static u64 sample_type;
41 static int nr_cpus = 0;
42 static unsigned int page_size;
43 static unsigned int mmap_pages = 128;
44 static unsigned int user_freq = UINT_MAX;
45 static int freq = 1000;
47 static int pipe_output = 0;
48 static const char *output_name = "perf.data";
50 static int realtime_prio = 0;
51 static bool raw_samples = false;
52 static bool system_wide = false;
53 static pid_t target_pid = -1;
54 static pid_t target_tid = -1;
55 static pid_t *all_tids = NULL;
56 static int thread_num = 0;
57 static pid_t child_pid = -1;
58 static bool no_inherit = false;
59 static enum write_mode_t write_mode = WRITE_FORCE;
60 static bool call_graph = false;
61 static bool inherit_stat = false;
62 static bool no_samples = false;
63 static bool sample_address = false;
64 static bool no_buildid = false;
65 static bool no_buildid_cache = false;
67 static long samples = 0;
68 static u64 bytes_written = 0;
70 static struct pollfd *event_array;
72 static int nr_poll = 0;
73 static int nr_cpu = 0;
75 static int file_new = 1;
76 static off_t post_processing_offset;
78 static struct perf_session *session;
79 static const char *cpu_list;
88 static struct mmap_data mmap_array[MAX_NR_CPUS];
90 static unsigned long mmap_read_head(struct mmap_data *md)
92 struct perf_event_mmap_page *pc = md->base;
101 static void mmap_write_tail(struct mmap_data *md, unsigned long tail)
103 struct perf_event_mmap_page *pc = md->base;
106 * ensure all reads are done before we write the tail out.
109 pc->data_tail = tail;
112 static void advance_output(size_t size)
114 bytes_written += size;
117 static void write_output(void *buf, size_t size)
120 int ret = write(output, buf, size);
123 die("failed to write");
128 bytes_written += ret;
132 static int process_synthesized_event(event_t *event,
133 struct sample_data *sample __used,
134 struct perf_session *self __used)
136 write_output(event, event->header.size);
140 static void mmap_read(struct mmap_data *md)
142 unsigned int head = mmap_read_head(md);
143 unsigned int old = md->prev;
144 unsigned char *data = md->base + page_size;
150 * If we're further behind than half the buffer, there's a chance
151 * the writer will bite our tail and mess up the samples under us.
153 * If we somehow ended up ahead of the head, we got messed up.
155 * In either case, truncate and restart at head.
159 fprintf(stderr, "WARNING: failed to keep up with mmap data\n");
161 * head points to a known good entry, start there.
171 if ((old & md->mask) + size != (head & md->mask)) {
172 buf = &data[old & md->mask];
173 size = md->mask + 1 - (old & md->mask);
176 write_output(buf, size);
179 buf = &data[old & md->mask];
183 write_output(buf, size);
186 mmap_write_tail(md, old);
189 static volatile int done = 0;
190 static volatile int signr = -1;
192 static void sig_handler(int sig)
198 static void sig_atexit(void)
201 kill(child_pid, SIGTERM);
206 signal(signr, SIG_DFL);
207 kill(getpid(), signr);
212 static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr)
214 struct perf_header_attr *h_attr;
216 if (nr < session->header.attrs) {
217 h_attr = session->header.attr[nr];
219 h_attr = perf_header_attr__new(a);
221 if (perf_header__add_attr(&session->header, h_attr) < 0) {
222 perf_header_attr__delete(h_attr);
230 static void create_counter(int counter, int cpu)
232 char *filter = filters[counter];
233 struct perf_event_attr *attr = attrs + counter;
234 struct perf_header_attr *h_attr;
235 int track = !counter; /* only the first counter needs these */
245 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
246 PERF_FORMAT_TOTAL_TIME_RUNNING |
249 attr->sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID;
252 attr->sample_type |= PERF_SAMPLE_ID;
255 * We default some events to a 1 default interval. But keep
256 * it a weak assumption overridable by the user.
258 if (!attr->sample_period || (user_freq != UINT_MAX &&
259 user_interval != ULLONG_MAX)) {
261 attr->sample_type |= PERF_SAMPLE_PERIOD;
263 attr->sample_freq = freq;
265 attr->sample_period = default_interval;
270 attr->sample_freq = 0;
273 attr->inherit_stat = 1;
275 if (sample_address) {
276 attr->sample_type |= PERF_SAMPLE_ADDR;
277 attr->mmap_data = track;
281 attr->sample_type |= PERF_SAMPLE_CALLCHAIN;
284 attr->sample_type |= PERF_SAMPLE_CPU;
287 attr->sample_type |= PERF_SAMPLE_TIME;
288 attr->sample_type |= PERF_SAMPLE_RAW;
289 attr->sample_type |= PERF_SAMPLE_CPU;
293 sample_type = attr->sample_type;
297 attr->inherit = !no_inherit;
298 if (target_pid == -1 && target_tid == -1 && !system_wide) {
300 attr->enable_on_exec = 1;
303 for (thread_index = 0; thread_index < thread_num; thread_index++) {
305 fd[nr_cpu][counter][thread_index] = sys_perf_event_open(attr,
306 all_tids[thread_index], cpu, group_fd, 0);
308 if (fd[nr_cpu][counter][thread_index] < 0) {
311 if (err == EPERM || err == EACCES)
312 die("Permission error - are you root?\n"
313 "\t Consider tweaking"
314 " /proc/sys/kernel/perf_event_paranoid.\n");
315 else if (err == ENODEV && cpu_list) {
316 die("No such device - did you specify"
317 " an out-of-range profile CPU?\n");
321 * If it's cycles then fall back to hrtimer
322 * based cpu-clock-tick sw counter, which
323 * is always available even if no PMU support:
325 if (attr->type == PERF_TYPE_HARDWARE
326 && attr->config == PERF_COUNT_HW_CPU_CYCLES) {
329 warning(" ... trying to fall back to cpu-clock-ticks\n");
330 attr->type = PERF_TYPE_SOFTWARE;
331 attr->config = PERF_COUNT_SW_CPU_CLOCK;
335 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
336 fd[nr_cpu][counter][thread_index], strerror(err));
338 #if defined(__i386__) || defined(__x86_64__)
339 if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
340 die("No hardware sampling interrupt available."
341 " No APIC? If so then you can boot the kernel"
342 " with the \"lapic\" boot parameter to"
343 " force-enable it.\n");
346 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
350 h_attr = get_header_attr(attr, counter);
355 if (memcmp(&h_attr->attr, attr, sizeof(*attr))) {
356 fprintf(stderr, "incompatible append\n");
361 if (read(fd[nr_cpu][counter][thread_index], &read_data, sizeof(read_data)) == -1) {
362 perror("Unable to read perf file descriptor");
366 if (perf_header_attr__add_id(h_attr, read_data.id) < 0) {
367 pr_warning("Not enough memory to add id\n");
371 assert(fd[nr_cpu][counter][thread_index] >= 0);
372 fcntl(fd[nr_cpu][counter][thread_index], F_SETFL, O_NONBLOCK);
375 * First counter acts as the group leader:
377 if (group && group_fd == -1)
378 group_fd = fd[nr_cpu][counter][thread_index];
380 if (counter || thread_index) {
381 ret = ioctl(fd[nr_cpu][counter][thread_index],
382 PERF_EVENT_IOC_SET_OUTPUT,
385 error("failed to set output: %d (%s)\n", errno,
390 mmap_array[nr_cpu].counter = counter;
391 mmap_array[nr_cpu].prev = 0;
392 mmap_array[nr_cpu].mask = mmap_pages*page_size - 1;
393 mmap_array[nr_cpu].base = mmap(NULL, (mmap_pages+1)*page_size,
394 PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter][thread_index], 0);
395 if (mmap_array[nr_cpu].base == MAP_FAILED) {
396 error("failed to mmap with %d (%s)\n", errno, strerror(errno));
400 event_array[nr_poll].fd = fd[nr_cpu][counter][thread_index];
401 event_array[nr_poll].events = POLLIN;
405 if (filter != NULL) {
406 ret = ioctl(fd[nr_cpu][counter][thread_index],
407 PERF_EVENT_IOC_SET_FILTER, filter);
409 error("failed to set filter with %d (%s)\n", errno,
417 static void open_counters(int cpu)
422 for (counter = 0; counter < nr_counters; counter++)
423 create_counter(counter, cpu);
428 static int process_buildids(void)
430 u64 size = lseek(output, 0, SEEK_CUR);
435 session->fd = output;
436 return __perf_session__process_events(session, post_processing_offset,
437 size - post_processing_offset,
438 size, &build_id__mark_dso_hit_ops);
441 static void atexit_header(void)
444 session->header.data_size += bytes_written;
448 perf_header__write(&session->header, output, true);
449 perf_session__delete(session);
454 static void event__synthesize_guest_os(struct machine *machine, void *data)
457 struct perf_session *psession = data;
459 if (machine__is_host(machine))
463 *As for guest kernel when processing subcommand record&report,
464 *we arrange module mmap prior to guest kernel mmap and trigger
465 *a preload dso because default guest module symbols are loaded
466 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
467 *method is used to avoid symbol missing when the first addr is
468 *in module instead of in guest kernel.
470 err = event__synthesize_modules(process_synthesized_event,
473 pr_err("Couldn't record guest kernel [%d]'s reference"
474 " relocation symbol.\n", machine->pid);
477 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
478 * have no _text sometimes.
480 err = event__synthesize_kernel_mmap(process_synthesized_event,
481 psession, machine, "_text");
483 err = event__synthesize_kernel_mmap(process_synthesized_event,
484 psession, machine, "_stext");
486 pr_err("Couldn't record guest kernel [%d]'s reference"
487 " relocation symbol.\n", machine->pid);
490 static struct perf_event_header finished_round_event = {
491 .size = sizeof(struct perf_event_header),
492 .type = PERF_RECORD_FINISHED_ROUND,
495 static void mmap_read_all(void)
499 for (i = 0; i < nr_cpu; i++) {
500 if (mmap_array[i].base)
501 mmap_read(&mmap_array[i]);
504 if (perf_header__has_feat(&session->header, HEADER_TRACE_INFO))
505 write_output(&finished_round_event, sizeof(finished_round_event));
508 static int __cmd_record(int argc, const char **argv)
514 unsigned long waking = 0;
515 int child_ready_pipe[2], go_pipe[2];
516 const bool forks = argc > 0;
518 struct machine *machine;
520 page_size = sysconf(_SC_PAGE_SIZE);
523 signal(SIGCHLD, sig_handler);
524 signal(SIGINT, sig_handler);
526 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
527 perror("failed to create pipes");
531 if (!strcmp(output_name, "-"))
533 else if (!stat(output_name, &st) && st.st_size) {
534 if (write_mode == WRITE_FORCE) {
535 char oldname[PATH_MAX];
536 snprintf(oldname, sizeof(oldname), "%s.old",
539 rename(output_name, oldname);
541 } else if (write_mode == WRITE_APPEND) {
542 write_mode = WRITE_FORCE;
545 flags = O_CREAT|O_RDWR;
546 if (write_mode == WRITE_APPEND)
552 output = STDOUT_FILENO;
554 output = open(output_name, flags, S_IRUSR | S_IWUSR);
556 perror("failed to create output file");
560 session = perf_session__new(output_name, O_WRONLY,
561 write_mode == WRITE_FORCE, false);
562 if (session == NULL) {
563 pr_err("Not enough memory for reading perf file header\n");
568 perf_header__set_feat(&session->header, HEADER_BUILD_ID);
571 err = perf_header__read(session, output);
573 goto out_delete_session;
576 if (have_tracepoints(attrs, nr_counters))
577 perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
580 * perf_session__delete(session) will be called at atexit_header()
582 atexit(atexit_header);
587 perror("failed to fork");
594 close(child_ready_pipe[0]);
596 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
599 * Do a dummy execvp to get the PLT entry resolved,
600 * so we avoid the resolver overhead on the real
603 execvp("", (char **)argv);
606 * Tell the parent we're ready to go
608 close(child_ready_pipe[1]);
611 * Wait until the parent tells us to go.
613 if (read(go_pipe[0], &buf, 1) == -1)
614 perror("unable to read pipe");
616 execvp(argv[0], (char **)argv);
622 if (!system_wide && target_tid == -1 && target_pid == -1)
623 all_tids[0] = child_pid;
625 close(child_ready_pipe[1]);
628 * wait for child to settle
630 if (read(child_ready_pipe[0], &buf, 1) == -1) {
631 perror("unable to read pipe");
634 close(child_ready_pipe[0]);
637 nr_cpus = read_cpu_map(cpu_list);
639 perror("failed to collect number of CPUs");
643 if (!system_wide && no_inherit && !cpu_list) {
646 for (i = 0; i < nr_cpus; i++)
647 open_counters(cpumap[i]);
650 perf_session__set_sample_type(session, sample_type);
653 err = perf_header__write_pipe(output);
656 } else if (file_new) {
657 err = perf_header__write(&session->header, output, false);
662 post_processing_offset = lseek(output, 0, SEEK_CUR);
665 err = event__synthesize_attrs(&session->header,
666 process_synthesized_event,
669 pr_err("Couldn't synthesize attrs.\n");
673 err = event__synthesize_event_types(process_synthesized_event,
676 pr_err("Couldn't synthesize event_types.\n");
680 if (have_tracepoints(attrs, nr_counters)) {
682 * FIXME err <= 0 here actually means that
683 * there were no tracepoints so its not really
684 * an error, just that we don't need to
685 * synthesize anything. We really have to
686 * return this more properly and also
687 * propagate errors that now are calling die()
689 err = event__synthesize_tracing_data(output, attrs,
691 process_synthesized_event,
694 pr_err("Couldn't record tracing data.\n");
701 machine = perf_session__find_host_machine(session);
703 pr_err("Couldn't find native kernel information.\n");
707 err = event__synthesize_kernel_mmap(process_synthesized_event,
708 session, machine, "_text");
710 err = event__synthesize_kernel_mmap(process_synthesized_event,
711 session, machine, "_stext");
713 pr_err("Couldn't record kernel reference relocation symbol\n"
714 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
715 "Check /proc/kallsyms permission or run as root.\n");
717 err = event__synthesize_modules(process_synthesized_event,
720 pr_err("Couldn't record kernel module information.\n"
721 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
722 "Check /proc/modules permission or run as root.\n");
725 perf_session__process_machines(session, event__synthesize_guest_os);
728 event__synthesize_thread(target_tid, process_synthesized_event,
731 event__synthesize_threads(process_synthesized_event, session);
734 struct sched_param param;
736 param.sched_priority = realtime_prio;
737 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
738 pr_err("Could not set realtime priority.\n");
755 if (hits == samples) {
758 err = poll(event_array, nr_poll, -1);
763 for (i = 0; i < nr_cpu; i++) {
765 counter < nr_counters;
770 ioctl(fd[i][counter][thread],
771 PERF_EVENT_IOC_DISABLE);
780 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
783 * Approximate RIP event size: 24 bytes.
786 "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n",
787 (double)bytes_written / 1024.0 / 1024.0,
794 perf_session__delete(session);
798 static const char * const record_usage[] = {
799 "perf record [<options>] [<command>]",
800 "perf record [<options>] -- <command> [<options>]",
804 static bool force, append_file;
806 const struct option record_options[] = {
807 OPT_CALLBACK('e', "event", NULL, "event",
808 "event selector. use 'perf list' to list available events",
810 OPT_CALLBACK(0, "filter", NULL, "filter",
811 "event filter", parse_filter),
812 OPT_INTEGER('p', "pid", &target_pid,
813 "record events on existing process id"),
814 OPT_INTEGER('t', "tid", &target_tid,
815 "record events on existing thread id"),
816 OPT_INTEGER('r', "realtime", &realtime_prio,
817 "collect data with this RT SCHED_FIFO priority"),
818 OPT_BOOLEAN('R', "raw-samples", &raw_samples,
819 "collect raw sample records from all opened counters"),
820 OPT_BOOLEAN('a', "all-cpus", &system_wide,
821 "system-wide collection from all CPUs"),
822 OPT_BOOLEAN('A', "append", &append_file,
823 "append to the output file to do incremental profiling"),
824 OPT_STRING('C', "cpu", &cpu_list, "cpu",
825 "list of cpus to monitor"),
826 OPT_BOOLEAN('f', "force", &force,
827 "overwrite existing data file (deprecated)"),
828 OPT_U64('c', "count", &user_interval, "event period to sample"),
829 OPT_STRING('o', "output", &output_name, "file",
831 OPT_BOOLEAN('i', "no-inherit", &no_inherit,
832 "child tasks do not inherit counters"),
833 OPT_UINTEGER('F', "freq", &user_freq, "profile at this frequency"),
834 OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
835 OPT_BOOLEAN('g', "call-graph", &call_graph,
836 "do call-graph (stack chain/backtrace) recording"),
837 OPT_INCR('v', "verbose", &verbose,
838 "be more verbose (show counter open errors, etc)"),
839 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
840 OPT_BOOLEAN('s', "stat", &inherit_stat,
841 "per thread counts"),
842 OPT_BOOLEAN('d', "data", &sample_address,
844 OPT_BOOLEAN('n', "no-samples", &no_samples,
846 OPT_BOOLEAN('N', "no-buildid-cache", &no_buildid_cache,
847 "do not update the buildid cache"),
848 OPT_BOOLEAN('B', "no-buildid", &no_buildid,
849 "do not collect buildids in perf.data"),
853 int cmd_record(int argc, const char **argv, const char *prefix __used)
855 int i, j, err = -ENOMEM;
857 argc = parse_options(argc, argv, record_options, record_usage,
858 PARSE_OPT_STOP_AT_NON_OPTION);
859 if (!argc && target_pid == -1 && target_tid == -1 &&
860 !system_wide && !cpu_list)
861 usage_with_options(record_usage, record_options);
863 if (force && append_file) {
864 fprintf(stderr, "Can't overwrite and append at the same time."
865 " You need to choose between -f and -A");
866 usage_with_options(record_usage, record_options);
867 } else if (append_file) {
868 write_mode = WRITE_APPEND;
870 write_mode = WRITE_FORCE;
875 if (no_buildid_cache || no_buildid)
876 disable_buildid_cache();
880 attrs[0].type = PERF_TYPE_HARDWARE;
881 attrs[0].config = PERF_COUNT_HW_CPU_CYCLES;
884 if (target_pid != -1) {
885 target_tid = target_pid;
886 thread_num = find_all_tid(target_pid, &all_tids);
887 if (thread_num <= 0) {
888 fprintf(stderr, "Can't find all threads of pid %d\n",
890 usage_with_options(record_usage, record_options);
893 all_tids=malloc(sizeof(pid_t));
895 goto out_symbol_exit;
897 all_tids[0] = target_tid;
901 for (i = 0; i < MAX_NR_CPUS; i++) {
902 for (j = 0; j < MAX_COUNTERS; j++) {
903 fd[i][j] = malloc(sizeof(int)*thread_num);
908 event_array = malloc(
909 sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
913 if (user_interval != ULLONG_MAX)
914 default_interval = user_interval;
915 if (user_freq != UINT_MAX)
919 * User specified count overrides default frequency.
921 if (default_interval)
924 default_interval = freq;
926 fprintf(stderr, "frequency and count are zero, aborting\n");
928 goto out_free_event_array;
931 err = __cmd_record(argc, argv);
933 out_free_event_array:
936 for (i = 0; i < MAX_NR_CPUS; i++) {
937 for (j = 0; j < MAX_COUNTERS; j++)