2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
13 #include "thread_map.h"
19 #include "parse-events.h"
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30 struct thread_map *threads)
34 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35 INIT_HLIST_HEAD(&evlist->heads[i]);
36 INIT_LIST_HEAD(&evlist->entries);
37 perf_evlist__set_maps(evlist, cpus, threads);
38 evlist->workload.pid = -1;
41 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
42 struct thread_map *threads)
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
47 perf_evlist__init(evlist, cpus, threads);
52 void perf_evlist__config_attrs(struct perf_evlist *evlist,
53 struct perf_record_opts *opts)
55 struct perf_evsel *evsel, *first;
57 if (evlist->cpus->map[0] < 0)
58 opts->no_inherit = true;
60 first = list_entry(evlist->entries.next, struct perf_evsel, node);
62 list_for_each_entry(evsel, &evlist->entries, node) {
63 perf_evsel__config(evsel, opts, first);
65 if (evlist->nr_entries > 1)
66 evsel->attr.sample_type |= PERF_SAMPLE_ID;
70 static void perf_evlist__purge(struct perf_evlist *evlist)
72 struct perf_evsel *pos, *n;
74 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
75 list_del_init(&pos->node);
76 perf_evsel__delete(pos);
79 evlist->nr_entries = 0;
82 void perf_evlist__exit(struct perf_evlist *evlist)
87 evlist->pollfd = NULL;
90 void perf_evlist__delete(struct perf_evlist *evlist)
92 perf_evlist__purge(evlist);
93 perf_evlist__exit(evlist);
97 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
99 list_add_tail(&entry->node, &evlist->entries);
100 ++evlist->nr_entries;
103 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
104 struct list_head *list,
107 list_splice_tail(list, &evlist->entries);
108 evlist->nr_entries += nr_entries;
111 int perf_evlist__add_default(struct perf_evlist *evlist)
113 struct perf_event_attr attr = {
114 .type = PERF_TYPE_HARDWARE,
115 .config = PERF_COUNT_HW_CPU_CYCLES,
117 struct perf_evsel *evsel;
119 event_attr_init(&attr);
121 evsel = perf_evsel__new(&attr, 0);
125 /* use strdup() because free(evsel) assumes name is allocated */
126 evsel->name = strdup("cycles");
130 perf_evlist__add(evlist, evsel);
133 perf_evsel__delete(evsel);
138 int perf_evlist__add_attrs(struct perf_evlist *evlist,
139 struct perf_event_attr *attrs, size_t nr_attrs)
141 struct perf_evsel *evsel, *n;
145 for (i = 0; i < nr_attrs; i++) {
146 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
148 goto out_delete_partial_list;
149 list_add_tail(&evsel->node, &head);
152 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
156 out_delete_partial_list:
157 list_for_each_entry_safe(evsel, n, &head, node)
158 perf_evsel__delete(evsel);
162 static int trace_event__id(const char *evname)
164 char *filename, *colon;
167 if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
170 colon = strrchr(filename, ':');
174 fd = open(filename, O_RDONLY);
177 if (read(fd, id, sizeof(id)) > 0)
186 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
187 const char *tracepoints[],
188 size_t nr_tracepoints)
192 struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
197 for (i = 0; i < nr_tracepoints; i++) {
198 err = trace_event__id(tracepoints[i]);
203 attrs[i].type = PERF_TYPE_TRACEPOINT;
204 attrs[i].config = err;
205 attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
207 attrs[i].sample_period = 1;
210 err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
216 static struct perf_evsel *
217 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
219 struct perf_evsel *evsel;
221 list_for_each_entry(evsel, &evlist->entries, node) {
222 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
223 (int)evsel->attr.config == id)
230 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
231 const struct perf_evsel_str_handler *assocs,
234 struct perf_evsel *evsel;
238 for (i = 0; i < nr_assocs; i++) {
239 err = trace_event__id(assocs[i].name);
243 evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
248 if (evsel->handler.func != NULL)
250 evsel->handler.func = assocs[i].handler;
258 void perf_evlist__disable(struct perf_evlist *evlist)
261 struct perf_evsel *pos;
263 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
264 list_for_each_entry(pos, &evlist->entries, node) {
265 for (thread = 0; thread < evlist->threads->nr; thread++)
266 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
271 void perf_evlist__enable(struct perf_evlist *evlist)
274 struct perf_evsel *pos;
276 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
277 list_for_each_entry(pos, &evlist->entries, node) {
278 for (thread = 0; thread < evlist->threads->nr; thread++)
279 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
284 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
286 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
287 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
288 return evlist->pollfd != NULL ? 0 : -ENOMEM;
291 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
293 fcntl(fd, F_SETFL, O_NONBLOCK);
294 evlist->pollfd[evlist->nr_fds].fd = fd;
295 evlist->pollfd[evlist->nr_fds].events = POLLIN;
299 static void perf_evlist__id_hash(struct perf_evlist *evlist,
300 struct perf_evsel *evsel,
301 int cpu, int thread, u64 id)
304 struct perf_sample_id *sid = SID(evsel, cpu, thread);
308 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
309 hlist_add_head(&sid->node, &evlist->heads[hash]);
312 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
313 int cpu, int thread, u64 id)
315 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
316 evsel->id[evsel->ids++] = id;
319 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
320 struct perf_evsel *evsel,
321 int cpu, int thread, int fd)
323 u64 read_data[4] = { 0, };
324 int id_idx = 1; /* The first entry is the counter value */
326 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
327 read(fd, &read_data, sizeof(read_data)) == -1)
330 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
332 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
335 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
339 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
341 struct hlist_head *head;
342 struct hlist_node *pos;
343 struct perf_sample_id *sid;
346 if (evlist->nr_entries == 1)
347 return list_entry(evlist->entries.next, struct perf_evsel, node);
349 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
350 head = &evlist->heads[hash];
352 hlist_for_each_entry(sid, pos, head, node)
356 if (!perf_evlist__sample_id_all(evlist))
357 return list_entry(evlist->entries.next, struct perf_evsel, node);
362 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
364 /* XXX Move this to perf.c, making it generally available */
365 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
366 struct perf_mmap *md = &evlist->mmap[idx];
367 unsigned int head = perf_mmap__read_head(md);
368 unsigned int old = md->prev;
369 unsigned char *data = md->base + page_size;
370 union perf_event *event = NULL;
372 if (evlist->overwrite) {
374 * If we're further behind than half the buffer, there's a chance
375 * the writer will bite our tail and mess up the samples under us.
377 * If we somehow ended up ahead of the head, we got messed up.
379 * In either case, truncate and restart at head.
381 int diff = head - old;
382 if (diff > md->mask / 2 || diff < 0) {
383 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
386 * head points to a known good entry, start there.
395 event = (union perf_event *)&data[old & md->mask];
396 size = event->header.size;
399 * Event straddles the mmap boundary -- header should always
400 * be inside due to u64 alignment of output.
402 if ((old & md->mask) + size != ((old + size) & md->mask)) {
403 unsigned int offset = old;
404 unsigned int len = min(sizeof(*event), size), cpy;
405 void *dst = &evlist->event_copy;
408 cpy = min(md->mask + 1 - (offset & md->mask), len);
409 memcpy(dst, &data[offset & md->mask], cpy);
415 event = &evlist->event_copy;
423 if (!evlist->overwrite)
424 perf_mmap__write_tail(md, old);
429 void perf_evlist__munmap(struct perf_evlist *evlist)
433 for (i = 0; i < evlist->nr_mmaps; i++) {
434 if (evlist->mmap[i].base != NULL) {
435 munmap(evlist->mmap[i].base, evlist->mmap_len);
436 evlist->mmap[i].base = NULL;
444 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
446 evlist->nr_mmaps = evlist->cpus->nr;
447 if (evlist->cpus->map[0] == -1)
448 evlist->nr_mmaps = evlist->threads->nr;
449 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
450 return evlist->mmap != NULL ? 0 : -ENOMEM;
453 static int __perf_evlist__mmap(struct perf_evlist *evlist,
454 int idx, int prot, int mask, int fd)
456 evlist->mmap[idx].prev = 0;
457 evlist->mmap[idx].mask = mask;
458 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
460 if (evlist->mmap[idx].base == MAP_FAILED) {
461 evlist->mmap[idx].base = NULL;
465 perf_evlist__add_pollfd(evlist, fd);
469 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
471 struct perf_evsel *evsel;
474 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
477 for (thread = 0; thread < evlist->threads->nr; thread++) {
478 list_for_each_entry(evsel, &evlist->entries, node) {
479 int fd = FD(evsel, cpu, thread);
483 if (__perf_evlist__mmap(evlist, cpu,
484 prot, mask, output) < 0)
487 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
491 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
492 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
501 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
502 if (evlist->mmap[cpu].base != NULL) {
503 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
504 evlist->mmap[cpu].base = NULL;
510 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
512 struct perf_evsel *evsel;
515 for (thread = 0; thread < evlist->threads->nr; thread++) {
518 list_for_each_entry(evsel, &evlist->entries, node) {
519 int fd = FD(evsel, 0, thread);
523 if (__perf_evlist__mmap(evlist, thread,
524 prot, mask, output) < 0)
527 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
531 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
532 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
540 for (thread = 0; thread < evlist->threads->nr; thread++) {
541 if (evlist->mmap[thread].base != NULL) {
542 munmap(evlist->mmap[thread].base, evlist->mmap_len);
543 evlist->mmap[thread].base = NULL;
549 /** perf_evlist__mmap - Create per cpu maps to receive events
551 * @evlist - list of events
552 * @pages - map length in pages
553 * @overwrite - overwrite older events?
555 * If overwrite is false the user needs to signal event consuption using:
557 * struct perf_mmap *m = &evlist->mmap[cpu];
558 * unsigned int head = perf_mmap__read_head(m);
560 * perf_mmap__write_tail(m, head)
562 * Using perf_evlist__read_on_cpu does this automatically.
564 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
567 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
568 struct perf_evsel *evsel;
569 const struct cpu_map *cpus = evlist->cpus;
570 const struct thread_map *threads = evlist->threads;
571 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
573 /* 512 kiB: default amount of unprivileged mlocked memory */
574 if (pages == UINT_MAX)
575 pages = (512 * 1024) / page_size;
576 else if (!is_power_of_2(pages))
579 mask = pages * page_size - 1;
581 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
584 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
587 evlist->overwrite = overwrite;
588 evlist->mmap_len = (pages + 1) * page_size;
590 list_for_each_entry(evsel, &evlist->entries, node) {
591 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
592 evsel->sample_id == NULL &&
593 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
597 if (evlist->cpus->map[0] == -1)
598 return perf_evlist__mmap_per_thread(evlist, prot, mask);
600 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
603 int perf_evlist__create_maps(struct perf_evlist *evlist,
604 struct perf_target *target)
606 evlist->threads = thread_map__new_str(target->pid, target->tid,
609 if (evlist->threads == NULL)
612 if (perf_target__has_task(target))
613 evlist->cpus = cpu_map__dummy_new();
614 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
615 evlist->cpus = cpu_map__dummy_new();
617 evlist->cpus = cpu_map__new(target->cpu_list);
619 if (evlist->cpus == NULL)
620 goto out_delete_threads;
625 thread_map__delete(evlist->threads);
629 void perf_evlist__delete_maps(struct perf_evlist *evlist)
631 cpu_map__delete(evlist->cpus);
632 thread_map__delete(evlist->threads);
634 evlist->threads = NULL;
637 int perf_evlist__set_filters(struct perf_evlist *evlist)
639 const struct thread_map *threads = evlist->threads;
640 const struct cpu_map *cpus = evlist->cpus;
641 struct perf_evsel *evsel;
648 list_for_each_entry(evsel, &evlist->entries, node) {
649 filter = evsel->filter;
652 for (cpu = 0; cpu < cpus->nr; cpu++) {
653 for (thread = 0; thread < threads->nr; thread++) {
654 fd = FD(evsel, cpu, thread);
655 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
665 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
667 struct perf_evsel *pos, *first;
669 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
671 list_for_each_entry_continue(pos, &evlist->entries, node) {
672 if (first->attr.sample_type != pos->attr.sample_type)
679 u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
681 struct perf_evsel *first;
683 first = list_entry(evlist->entries.next, struct perf_evsel, node);
684 return first->attr.sample_type;
687 u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
689 struct perf_evsel *first;
690 struct perf_sample *data;
694 first = list_entry(evlist->entries.next, struct perf_evsel, node);
696 if (!first->attr.sample_id_all)
699 sample_type = first->attr.sample_type;
701 if (sample_type & PERF_SAMPLE_TID)
702 size += sizeof(data->tid) * 2;
704 if (sample_type & PERF_SAMPLE_TIME)
705 size += sizeof(data->time);
707 if (sample_type & PERF_SAMPLE_ID)
708 size += sizeof(data->id);
710 if (sample_type & PERF_SAMPLE_STREAM_ID)
711 size += sizeof(data->stream_id);
713 if (sample_type & PERF_SAMPLE_CPU)
714 size += sizeof(data->cpu) * 2;
719 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
721 struct perf_evsel *pos, *first;
723 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
725 list_for_each_entry_continue(pos, &evlist->entries, node) {
726 if (first->attr.sample_id_all != pos->attr.sample_id_all)
733 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
735 struct perf_evsel *first;
737 first = list_entry(evlist->entries.next, struct perf_evsel, node);
738 return first->attr.sample_id_all;
741 void perf_evlist__set_selected(struct perf_evlist *evlist,
742 struct perf_evsel *evsel)
744 evlist->selected = evsel;
747 int perf_evlist__open(struct perf_evlist *evlist, bool group)
749 struct perf_evsel *evsel, *first;
750 int err, ncpus, nthreads;
752 first = list_entry(evlist->entries.next, struct perf_evsel, node);
754 list_for_each_entry(evsel, &evlist->entries, node) {
755 struct xyarray *group_fd = NULL;
757 if (group && evsel != first)
758 group_fd = first->fd;
760 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
768 ncpus = evlist->cpus ? evlist->cpus->nr : 1;
769 nthreads = evlist->threads ? evlist->threads->nr : 1;
771 list_for_each_entry_reverse(evsel, &evlist->entries, node)
772 perf_evsel__close(evsel, ncpus, nthreads);
778 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
779 struct perf_record_opts *opts,
782 int child_ready_pipe[2], go_pipe[2];
785 if (pipe(child_ready_pipe) < 0) {
786 perror("failed to create 'ready' pipe");
790 if (pipe(go_pipe) < 0) {
791 perror("failed to create 'go' pipe");
792 goto out_close_ready_pipe;
795 evlist->workload.pid = fork();
796 if (evlist->workload.pid < 0) {
797 perror("failed to fork");
798 goto out_close_pipes;
801 if (!evlist->workload.pid) {
802 if (opts->pipe_output)
805 close(child_ready_pipe[0]);
807 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
810 * Do a dummy execvp to get the PLT entry resolved,
811 * so we avoid the resolver overhead on the real
814 execvp("", (char **)argv);
817 * Tell the parent we're ready to go
819 close(child_ready_pipe[1]);
822 * Wait until the parent tells us to go.
824 if (read(go_pipe[0], &bf, 1) == -1)
825 perror("unable to read pipe");
827 execvp(argv[0], (char **)argv);
830 kill(getppid(), SIGUSR1);
834 if (perf_target__none(&opts->target))
835 evlist->threads->map[0] = evlist->workload.pid;
837 close(child_ready_pipe[1]);
840 * wait for child to settle
842 if (read(child_ready_pipe[0], &bf, 1) == -1) {
843 perror("unable to read pipe");
844 goto out_close_pipes;
847 evlist->workload.cork_fd = go_pipe[1];
848 close(child_ready_pipe[0]);
854 out_close_ready_pipe:
855 close(child_ready_pipe[0]);
856 close(child_ready_pipe[1]);
860 int perf_evlist__start_workload(struct perf_evlist *evlist)
862 if (evlist->workload.cork_fd > 0) {
864 * Remove the cork, let it rip!
866 return close(evlist->workload.cork_fd);