2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <lk/debugfs.h>
13 #include "thread_map.h"
20 #include "parse-events.h"
21 #include "parse-options.h"
25 #include <linux/bitops.h>
26 #include <linux/hash.h>
28 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
29 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
31 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
32 struct thread_map *threads)
36 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
37 INIT_HLIST_HEAD(&evlist->heads[i]);
38 INIT_LIST_HEAD(&evlist->entries);
39 perf_evlist__set_maps(evlist, cpus, threads);
40 evlist->workload.pid = -1;
43 struct perf_evlist *perf_evlist__new(void)
45 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
48 perf_evlist__init(evlist, NULL, NULL);
53 struct perf_evlist *perf_evlist__new_default(void)
55 struct perf_evlist *evlist = perf_evlist__new();
57 if (evlist && perf_evlist__add_default(evlist)) {
58 perf_evlist__delete(evlist);
66 * perf_evlist__set_id_pos - set the positions of event ids.
67 * @evlist: selected event list
69 * Events with compatible sample types all have the same id_pos
70 * and is_pos. For convenience, put a copy on evlist.
72 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
74 struct perf_evsel *first = perf_evlist__first(evlist);
76 evlist->id_pos = first->id_pos;
77 evlist->is_pos = first->is_pos;
80 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
82 struct perf_evsel *evsel;
84 list_for_each_entry(evsel, &evlist->entries, node)
85 perf_evsel__calc_id_pos(evsel);
87 perf_evlist__set_id_pos(evlist);
90 static void perf_evlist__purge(struct perf_evlist *evlist)
92 struct perf_evsel *pos, *n;
94 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
95 list_del_init(&pos->node);
96 perf_evsel__delete(pos);
99 evlist->nr_entries = 0;
102 void perf_evlist__exit(struct perf_evlist *evlist)
105 free(evlist->pollfd);
107 evlist->pollfd = NULL;
110 void perf_evlist__delete(struct perf_evlist *evlist)
112 perf_evlist__purge(evlist);
113 perf_evlist__exit(evlist);
117 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
119 list_add_tail(&entry->node, &evlist->entries);
120 if (!evlist->nr_entries++)
121 perf_evlist__set_id_pos(evlist);
124 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
125 struct list_head *list,
128 bool set_id_pos = !evlist->nr_entries;
130 list_splice_tail(list, &evlist->entries);
131 evlist->nr_entries += nr_entries;
133 perf_evlist__set_id_pos(evlist);
136 void __perf_evlist__set_leader(struct list_head *list)
138 struct perf_evsel *evsel, *leader;
140 leader = list_entry(list->next, struct perf_evsel, node);
141 evsel = list_entry(list->prev, struct perf_evsel, node);
143 leader->nr_members = evsel->idx - leader->idx + 1;
145 list_for_each_entry(evsel, list, node) {
146 evsel->leader = leader;
150 void perf_evlist__set_leader(struct perf_evlist *evlist)
152 if (evlist->nr_entries) {
153 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
154 __perf_evlist__set_leader(&evlist->entries);
158 int perf_evlist__add_default(struct perf_evlist *evlist)
160 struct perf_event_attr attr = {
161 .type = PERF_TYPE_HARDWARE,
162 .config = PERF_COUNT_HW_CPU_CYCLES,
164 struct perf_evsel *evsel;
166 event_attr_init(&attr);
168 evsel = perf_evsel__new(&attr, 0);
172 /* use strdup() because free(evsel) assumes name is allocated */
173 evsel->name = strdup("cycles");
177 perf_evlist__add(evlist, evsel);
180 perf_evsel__delete(evsel);
185 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
186 struct perf_event_attr *attrs, size_t nr_attrs)
188 struct perf_evsel *evsel, *n;
192 for (i = 0; i < nr_attrs; i++) {
193 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
195 goto out_delete_partial_list;
196 list_add_tail(&evsel->node, &head);
199 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
203 out_delete_partial_list:
204 list_for_each_entry_safe(evsel, n, &head, node)
205 perf_evsel__delete(evsel);
209 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
210 struct perf_event_attr *attrs, size_t nr_attrs)
214 for (i = 0; i < nr_attrs; i++)
215 event_attr_init(attrs + i);
217 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
221 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
223 struct perf_evsel *evsel;
225 list_for_each_entry(evsel, &evlist->entries, node) {
226 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
227 (int)evsel->attr.config == id)
235 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
238 struct perf_evsel *evsel;
240 list_for_each_entry(evsel, &evlist->entries, node) {
241 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
242 (strcmp(evsel->name, name) == 0))
249 int perf_evlist__add_newtp(struct perf_evlist *evlist,
250 const char *sys, const char *name, void *handler)
252 struct perf_evsel *evsel;
254 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
258 evsel->handler.func = handler;
259 perf_evlist__add(evlist, evsel);
263 void perf_evlist__disable(struct perf_evlist *evlist)
266 struct perf_evsel *pos;
267 int nr_cpus = cpu_map__nr(evlist->cpus);
268 int nr_threads = thread_map__nr(evlist->threads);
270 for (cpu = 0; cpu < nr_cpus; cpu++) {
271 list_for_each_entry(pos, &evlist->entries, node) {
272 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
274 for (thread = 0; thread < nr_threads; thread++)
275 ioctl(FD(pos, cpu, thread),
276 PERF_EVENT_IOC_DISABLE, 0);
281 void perf_evlist__enable(struct perf_evlist *evlist)
284 struct perf_evsel *pos;
285 int nr_cpus = cpu_map__nr(evlist->cpus);
286 int nr_threads = thread_map__nr(evlist->threads);
288 for (cpu = 0; cpu < nr_cpus; cpu++) {
289 list_for_each_entry(pos, &evlist->entries, node) {
290 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
292 for (thread = 0; thread < nr_threads; thread++)
293 ioctl(FD(pos, cpu, thread),
294 PERF_EVENT_IOC_ENABLE, 0);
299 int perf_evlist__disable_event(struct perf_evlist *evlist,
300 struct perf_evsel *evsel)
302 int cpu, thread, err;
307 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
308 for (thread = 0; thread < evlist->threads->nr; thread++) {
309 err = ioctl(FD(evsel, cpu, thread),
310 PERF_EVENT_IOC_DISABLE, 0);
318 int perf_evlist__enable_event(struct perf_evlist *evlist,
319 struct perf_evsel *evsel)
321 int cpu, thread, err;
326 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
327 for (thread = 0; thread < evlist->threads->nr; thread++) {
328 err = ioctl(FD(evsel, cpu, thread),
329 PERF_EVENT_IOC_ENABLE, 0);
337 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
339 int nr_cpus = cpu_map__nr(evlist->cpus);
340 int nr_threads = thread_map__nr(evlist->threads);
341 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
342 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
343 return evlist->pollfd != NULL ? 0 : -ENOMEM;
346 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
348 fcntl(fd, F_SETFL, O_NONBLOCK);
349 evlist->pollfd[evlist->nr_fds].fd = fd;
350 evlist->pollfd[evlist->nr_fds].events = POLLIN;
354 static void perf_evlist__id_hash(struct perf_evlist *evlist,
355 struct perf_evsel *evsel,
356 int cpu, int thread, u64 id)
359 struct perf_sample_id *sid = SID(evsel, cpu, thread);
363 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
364 hlist_add_head(&sid->node, &evlist->heads[hash]);
367 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
368 int cpu, int thread, u64 id)
370 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
371 evsel->id[evsel->ids++] = id;
374 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
375 struct perf_evsel *evsel,
376 int cpu, int thread, int fd)
378 u64 read_data[4] = { 0, };
379 int id_idx = 1; /* The first entry is the counter value */
383 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
390 /* Legacy way to get event id.. All hail to old kernels! */
393 * This way does not work with group format read, so bail
396 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
399 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
400 read(fd, &read_data, sizeof(read_data)) == -1)
403 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
405 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
408 id = read_data[id_idx];
411 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
415 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
417 struct hlist_head *head;
418 struct perf_sample_id *sid;
421 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
422 head = &evlist->heads[hash];
424 hlist_for_each_entry(sid, head, node)
431 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
433 struct perf_sample_id *sid;
435 if (evlist->nr_entries == 1)
436 return perf_evlist__first(evlist);
438 sid = perf_evlist__id2sid(evlist, id);
442 if (!perf_evlist__sample_id_all(evlist))
443 return perf_evlist__first(evlist);
448 static int perf_evlist__event2id(struct perf_evlist *evlist,
449 union perf_event *event, u64 *id)
451 const u64 *array = event->sample.array;
454 n = (event->header.size - sizeof(event->header)) >> 3;
456 if (event->header.type == PERF_RECORD_SAMPLE) {
457 if (evlist->id_pos >= n)
459 *id = array[evlist->id_pos];
461 if (evlist->is_pos > n)
469 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
470 union perf_event *event)
472 struct perf_evsel *first = perf_evlist__first(evlist);
473 struct hlist_head *head;
474 struct perf_sample_id *sid;
478 if (evlist->nr_entries == 1)
481 if (!first->attr.sample_id_all &&
482 event->header.type != PERF_RECORD_SAMPLE)
485 if (perf_evlist__event2id(evlist, event, &id))
488 /* Synthesized events have an id of zero */
492 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
493 head = &evlist->heads[hash];
495 hlist_for_each_entry(sid, head, node) {
502 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
504 struct perf_mmap *md = &evlist->mmap[idx];
505 unsigned int head = perf_mmap__read_head(md);
506 unsigned int old = md->prev;
507 unsigned char *data = md->base + page_size;
508 union perf_event *event = NULL;
510 if (evlist->overwrite) {
512 * If we're further behind than half the buffer, there's a chance
513 * the writer will bite our tail and mess up the samples under us.
515 * If we somehow ended up ahead of the head, we got messed up.
517 * In either case, truncate and restart at head.
519 int diff = head - old;
520 if (diff > md->mask / 2 || diff < 0) {
521 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
524 * head points to a known good entry, start there.
533 event = (union perf_event *)&data[old & md->mask];
534 size = event->header.size;
537 * Event straddles the mmap boundary -- header should always
538 * be inside due to u64 alignment of output.
540 if ((old & md->mask) + size != ((old + size) & md->mask)) {
541 unsigned int offset = old;
542 unsigned int len = min(sizeof(*event), size), cpy;
543 void *dst = &md->event_copy;
546 cpy = min(md->mask + 1 - (offset & md->mask), len);
547 memcpy(dst, &data[offset & md->mask], cpy);
553 event = &md->event_copy;
561 if (!evlist->overwrite)
562 perf_mmap__write_tail(md, old);
567 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
569 if (evlist->mmap[idx].base != NULL) {
570 munmap(evlist->mmap[idx].base, evlist->mmap_len);
571 evlist->mmap[idx].base = NULL;
575 void perf_evlist__munmap(struct perf_evlist *evlist)
579 for (i = 0; i < evlist->nr_mmaps; i++)
580 __perf_evlist__munmap(evlist, i);
586 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
588 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
589 if (cpu_map__empty(evlist->cpus))
590 evlist->nr_mmaps = thread_map__nr(evlist->threads);
591 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
592 return evlist->mmap != NULL ? 0 : -ENOMEM;
595 static int __perf_evlist__mmap(struct perf_evlist *evlist,
596 int idx, int prot, int mask, int fd)
598 evlist->mmap[idx].prev = 0;
599 evlist->mmap[idx].mask = mask;
600 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
602 if (evlist->mmap[idx].base == MAP_FAILED) {
603 evlist->mmap[idx].base = NULL;
607 perf_evlist__add_pollfd(evlist, fd);
611 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
613 struct perf_evsel *evsel;
615 int nr_cpus = cpu_map__nr(evlist->cpus);
616 int nr_threads = thread_map__nr(evlist->threads);
618 pr_debug2("perf event ring buffer mmapped per cpu\n");
619 for (cpu = 0; cpu < nr_cpus; cpu++) {
622 for (thread = 0; thread < nr_threads; thread++) {
623 list_for_each_entry(evsel, &evlist->entries, node) {
624 int fd = FD(evsel, cpu, thread);
628 if (__perf_evlist__mmap(evlist, cpu,
629 prot, mask, output) < 0)
632 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
636 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
637 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
646 for (cpu = 0; cpu < nr_cpus; cpu++)
647 __perf_evlist__munmap(evlist, cpu);
651 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
653 struct perf_evsel *evsel;
655 int nr_threads = thread_map__nr(evlist->threads);
657 pr_debug2("perf event ring buffer mmapped per thread\n");
658 for (thread = 0; thread < nr_threads; thread++) {
661 list_for_each_entry(evsel, &evlist->entries, node) {
662 int fd = FD(evsel, 0, thread);
666 if (__perf_evlist__mmap(evlist, thread,
667 prot, mask, output) < 0)
670 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
674 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
675 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
683 for (thread = 0; thread < nr_threads; thread++)
684 __perf_evlist__munmap(evlist, thread);
688 static size_t perf_evlist__mmap_size(unsigned long pages)
690 /* 512 kiB: default amount of unprivileged mlocked memory */
691 if (pages == UINT_MAX)
692 pages = (512 * 1024) / page_size;
693 else if (!is_power_of_2(pages))
696 return (pages + 1) * page_size;
699 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
700 int unset __maybe_unused)
702 unsigned int pages, val, *mmap_pages = opt->value;
704 static struct parse_tag tags[] = {
705 { .tag = 'B', .mult = 1 },
706 { .tag = 'K', .mult = 1 << 10 },
707 { .tag = 'M', .mult = 1 << 20 },
708 { .tag = 'G', .mult = 1 << 30 },
712 val = parse_tag_value(str, tags);
713 if (val != (unsigned int) -1) {
714 /* we got file size value */
715 pages = PERF_ALIGN(val, page_size) / page_size;
716 if (!is_power_of_2(pages)) {
717 pages = next_pow2(pages);
718 pr_info("rounding mmap pages size to %u (%u pages)\n",
719 pages * page_size, pages);
722 /* we got pages count value */
724 pages = strtoul(str, &eptr, 10);
726 pr_err("failed to parse --mmap_pages/-m value\n");
731 size = perf_evlist__mmap_size(pages);
733 pr_err("--mmap_pages/-m value must be a power of two.");
741 /** perf_evlist__mmap - Create per cpu maps to receive events
743 * @evlist - list of events
744 * @pages - map length in pages
745 * @overwrite - overwrite older events?
747 * If overwrite is false the user needs to signal event consuption using:
749 * struct perf_mmap *m = &evlist->mmap[cpu];
750 * unsigned int head = perf_mmap__read_head(m);
752 * perf_mmap__write_tail(m, head)
754 * Using perf_evlist__read_on_cpu does this automatically.
756 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
759 struct perf_evsel *evsel;
760 const struct cpu_map *cpus = evlist->cpus;
761 const struct thread_map *threads = evlist->threads;
762 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
764 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
767 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
770 evlist->overwrite = overwrite;
771 evlist->mmap_len = perf_evlist__mmap_size(pages);
772 pr_debug("mmap size %luB\n", evlist->mmap_len);
773 mask = evlist->mmap_len - page_size - 1;
775 list_for_each_entry(evsel, &evlist->entries, node) {
776 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
777 evsel->sample_id == NULL &&
778 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
782 if (cpu_map__empty(cpus))
783 return perf_evlist__mmap_per_thread(evlist, prot, mask);
785 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
788 int perf_evlist__create_maps(struct perf_evlist *evlist,
789 struct perf_target *target)
791 evlist->threads = thread_map__new_str(target->pid, target->tid,
794 if (evlist->threads == NULL)
797 if (perf_target__has_task(target))
798 evlist->cpus = cpu_map__dummy_new();
799 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
800 evlist->cpus = cpu_map__dummy_new();
802 evlist->cpus = cpu_map__new(target->cpu_list);
804 if (evlist->cpus == NULL)
805 goto out_delete_threads;
810 thread_map__delete(evlist->threads);
814 void perf_evlist__delete_maps(struct perf_evlist *evlist)
816 cpu_map__delete(evlist->cpus);
817 thread_map__delete(evlist->threads);
819 evlist->threads = NULL;
822 int perf_evlist__apply_filters(struct perf_evlist *evlist)
824 struct perf_evsel *evsel;
826 const int ncpus = cpu_map__nr(evlist->cpus),
827 nthreads = thread_map__nr(evlist->threads);
829 list_for_each_entry(evsel, &evlist->entries, node) {
830 if (evsel->filter == NULL)
833 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
841 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
843 struct perf_evsel *evsel;
845 const int ncpus = cpu_map__nr(evlist->cpus),
846 nthreads = thread_map__nr(evlist->threads);
848 list_for_each_entry(evsel, &evlist->entries, node) {
849 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
857 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
859 struct perf_evsel *pos;
861 if (evlist->nr_entries == 1)
864 if (evlist->id_pos < 0 || evlist->is_pos < 0)
867 list_for_each_entry(pos, &evlist->entries, node) {
868 if (pos->id_pos != evlist->id_pos ||
869 pos->is_pos != evlist->is_pos)
876 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
878 struct perf_evsel *evsel;
880 if (evlist->combined_sample_type)
881 return evlist->combined_sample_type;
883 list_for_each_entry(evsel, &evlist->entries, node)
884 evlist->combined_sample_type |= evsel->attr.sample_type;
886 return evlist->combined_sample_type;
889 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
891 evlist->combined_sample_type = 0;
892 return __perf_evlist__combined_sample_type(evlist);
895 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
897 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
898 u64 read_format = first->attr.read_format;
899 u64 sample_type = first->attr.sample_type;
901 list_for_each_entry_continue(pos, &evlist->entries, node) {
902 if (read_format != pos->attr.read_format)
906 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
907 if ((sample_type & PERF_SAMPLE_READ) &&
908 !(read_format & PERF_FORMAT_ID)) {
915 u64 perf_evlist__read_format(struct perf_evlist *evlist)
917 struct perf_evsel *first = perf_evlist__first(evlist);
918 return first->attr.read_format;
921 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
923 struct perf_evsel *first = perf_evlist__first(evlist);
924 struct perf_sample *data;
928 if (!first->attr.sample_id_all)
931 sample_type = first->attr.sample_type;
933 if (sample_type & PERF_SAMPLE_TID)
934 size += sizeof(data->tid) * 2;
936 if (sample_type & PERF_SAMPLE_TIME)
937 size += sizeof(data->time);
939 if (sample_type & PERF_SAMPLE_ID)
940 size += sizeof(data->id);
942 if (sample_type & PERF_SAMPLE_STREAM_ID)
943 size += sizeof(data->stream_id);
945 if (sample_type & PERF_SAMPLE_CPU)
946 size += sizeof(data->cpu) * 2;
948 if (sample_type & PERF_SAMPLE_IDENTIFIER)
949 size += sizeof(data->id);
954 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
956 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
958 list_for_each_entry_continue(pos, &evlist->entries, node) {
959 if (first->attr.sample_id_all != pos->attr.sample_id_all)
966 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
968 struct perf_evsel *first = perf_evlist__first(evlist);
969 return first->attr.sample_id_all;
972 void perf_evlist__set_selected(struct perf_evlist *evlist,
973 struct perf_evsel *evsel)
975 evlist->selected = evsel;
978 void perf_evlist__close(struct perf_evlist *evlist)
980 struct perf_evsel *evsel;
981 int ncpus = cpu_map__nr(evlist->cpus);
982 int nthreads = thread_map__nr(evlist->threads);
984 list_for_each_entry_reverse(evsel, &evlist->entries, node)
985 perf_evsel__close(evsel, ncpus, nthreads);
988 int perf_evlist__open(struct perf_evlist *evlist)
990 struct perf_evsel *evsel;
993 perf_evlist__update_id_pos(evlist);
995 list_for_each_entry(evsel, &evlist->entries, node) {
996 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
1003 perf_evlist__close(evlist);
1008 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
1009 struct perf_target *target,
1010 const char *argv[], bool pipe_output,
1013 int child_ready_pipe[2], go_pipe[2];
1016 if (pipe(child_ready_pipe) < 0) {
1017 perror("failed to create 'ready' pipe");
1021 if (pipe(go_pipe) < 0) {
1022 perror("failed to create 'go' pipe");
1023 goto out_close_ready_pipe;
1026 evlist->workload.pid = fork();
1027 if (evlist->workload.pid < 0) {
1028 perror("failed to fork");
1029 goto out_close_pipes;
1032 if (!evlist->workload.pid) {
1036 signal(SIGTERM, SIG_DFL);
1038 close(child_ready_pipe[0]);
1040 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1043 * Tell the parent we're ready to go
1045 close(child_ready_pipe[1]);
1048 * Wait until the parent tells us to go.
1050 if (read(go_pipe[0], &bf, 1) == -1)
1051 perror("unable to read pipe");
1053 execvp(argv[0], (char **)argv);
1057 kill(getppid(), SIGUSR1);
1061 if (perf_target__none(target))
1062 evlist->threads->map[0] = evlist->workload.pid;
1064 close(child_ready_pipe[1]);
1067 * wait for child to settle
1069 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1070 perror("unable to read pipe");
1071 goto out_close_pipes;
1074 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1075 evlist->workload.cork_fd = go_pipe[1];
1076 close(child_ready_pipe[0]);
1082 out_close_ready_pipe:
1083 close(child_ready_pipe[0]);
1084 close(child_ready_pipe[1]);
1088 int perf_evlist__start_workload(struct perf_evlist *evlist)
1090 if (evlist->workload.cork_fd > 0) {
1094 * Remove the cork, let it rip!
1096 ret = write(evlist->workload.cork_fd, &bf, 1);
1098 perror("enable to write to pipe");
1100 close(evlist->workload.cork_fd);
1107 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1108 struct perf_sample *sample)
1110 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1114 return perf_evsel__parse_sample(evsel, event, sample);
1117 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1119 struct perf_evsel *evsel;
1122 list_for_each_entry(evsel, &evlist->entries, node) {
1123 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1124 perf_evsel__name(evsel));
1127 return printed + fprintf(fp, "\n");;