]> Pileus Git - ~andy/linux/blob - tools/perf/util/evlist.c
perf evlist: Introduce perf_evlist__new_default function
[~andy/linux] / tools / perf / util / evlist.c
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include <lk/debugfs.h>
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "target.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include "debug.h"
18 #include <unistd.h>
19
20 #include "parse-events.h"
21 #include "parse-options.h"
22
23 #include <sys/mman.h>
24
25 #include <linux/bitops.h>
26 #include <linux/hash.h>
27
28 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
29 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
30
31 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
32                        struct thread_map *threads)
33 {
34         int i;
35
36         for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
37                 INIT_HLIST_HEAD(&evlist->heads[i]);
38         INIT_LIST_HEAD(&evlist->entries);
39         perf_evlist__set_maps(evlist, cpus, threads);
40         evlist->workload.pid = -1;
41 }
42
43 struct perf_evlist *perf_evlist__new(void)
44 {
45         struct perf_evlist *evlist = zalloc(sizeof(*evlist));
46
47         if (evlist != NULL)
48                 perf_evlist__init(evlist, NULL, NULL);
49
50         return evlist;
51 }
52
53 struct perf_evlist *perf_evlist__new_default(void)
54 {
55         struct perf_evlist *evlist = perf_evlist__new();
56
57         if (evlist && perf_evlist__add_default(evlist)) {
58                 perf_evlist__delete(evlist);
59                 evlist = NULL;
60         }
61
62         return evlist;
63 }
64
65 /**
66  * perf_evlist__set_id_pos - set the positions of event ids.
67  * @evlist: selected event list
68  *
69  * Events with compatible sample types all have the same id_pos
70  * and is_pos.  For convenience, put a copy on evlist.
71  */
72 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
73 {
74         struct perf_evsel *first = perf_evlist__first(evlist);
75
76         evlist->id_pos = first->id_pos;
77         evlist->is_pos = first->is_pos;
78 }
79
80 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
81 {
82         struct perf_evsel *evsel;
83
84         list_for_each_entry(evsel, &evlist->entries, node)
85                 perf_evsel__calc_id_pos(evsel);
86
87         perf_evlist__set_id_pos(evlist);
88 }
89
90 static void perf_evlist__purge(struct perf_evlist *evlist)
91 {
92         struct perf_evsel *pos, *n;
93
94         list_for_each_entry_safe(pos, n, &evlist->entries, node) {
95                 list_del_init(&pos->node);
96                 perf_evsel__delete(pos);
97         }
98
99         evlist->nr_entries = 0;
100 }
101
102 void perf_evlist__exit(struct perf_evlist *evlist)
103 {
104         free(evlist->mmap);
105         free(evlist->pollfd);
106         evlist->mmap = NULL;
107         evlist->pollfd = NULL;
108 }
109
110 void perf_evlist__delete(struct perf_evlist *evlist)
111 {
112         perf_evlist__purge(evlist);
113         perf_evlist__exit(evlist);
114         free(evlist);
115 }
116
117 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
118 {
119         list_add_tail(&entry->node, &evlist->entries);
120         if (!evlist->nr_entries++)
121                 perf_evlist__set_id_pos(evlist);
122 }
123
124 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
125                                    struct list_head *list,
126                                    int nr_entries)
127 {
128         bool set_id_pos = !evlist->nr_entries;
129
130         list_splice_tail(list, &evlist->entries);
131         evlist->nr_entries += nr_entries;
132         if (set_id_pos)
133                 perf_evlist__set_id_pos(evlist);
134 }
135
136 void __perf_evlist__set_leader(struct list_head *list)
137 {
138         struct perf_evsel *evsel, *leader;
139
140         leader = list_entry(list->next, struct perf_evsel, node);
141         evsel = list_entry(list->prev, struct perf_evsel, node);
142
143         leader->nr_members = evsel->idx - leader->idx + 1;
144
145         list_for_each_entry(evsel, list, node) {
146                 evsel->leader = leader;
147         }
148 }
149
150 void perf_evlist__set_leader(struct perf_evlist *evlist)
151 {
152         if (evlist->nr_entries) {
153                 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
154                 __perf_evlist__set_leader(&evlist->entries);
155         }
156 }
157
158 int perf_evlist__add_default(struct perf_evlist *evlist)
159 {
160         struct perf_event_attr attr = {
161                 .type = PERF_TYPE_HARDWARE,
162                 .config = PERF_COUNT_HW_CPU_CYCLES,
163         };
164         struct perf_evsel *evsel;
165
166         event_attr_init(&attr);
167
168         evsel = perf_evsel__new(&attr, 0);
169         if (evsel == NULL)
170                 goto error;
171
172         /* use strdup() because free(evsel) assumes name is allocated */
173         evsel->name = strdup("cycles");
174         if (!evsel->name)
175                 goto error_free;
176
177         perf_evlist__add(evlist, evsel);
178         return 0;
179 error_free:
180         perf_evsel__delete(evsel);
181 error:
182         return -ENOMEM;
183 }
184
185 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
186                                   struct perf_event_attr *attrs, size_t nr_attrs)
187 {
188         struct perf_evsel *evsel, *n;
189         LIST_HEAD(head);
190         size_t i;
191
192         for (i = 0; i < nr_attrs; i++) {
193                 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
194                 if (evsel == NULL)
195                         goto out_delete_partial_list;
196                 list_add_tail(&evsel->node, &head);
197         }
198
199         perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
200
201         return 0;
202
203 out_delete_partial_list:
204         list_for_each_entry_safe(evsel, n, &head, node)
205                 perf_evsel__delete(evsel);
206         return -1;
207 }
208
209 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
210                                      struct perf_event_attr *attrs, size_t nr_attrs)
211 {
212         size_t i;
213
214         for (i = 0; i < nr_attrs; i++)
215                 event_attr_init(attrs + i);
216
217         return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
218 }
219
220 struct perf_evsel *
221 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
222 {
223         struct perf_evsel *evsel;
224
225         list_for_each_entry(evsel, &evlist->entries, node) {
226                 if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
227                     (int)evsel->attr.config == id)
228                         return evsel;
229         }
230
231         return NULL;
232 }
233
234 struct perf_evsel *
235 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
236                                      const char *name)
237 {
238         struct perf_evsel *evsel;
239
240         list_for_each_entry(evsel, &evlist->entries, node) {
241                 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
242                     (strcmp(evsel->name, name) == 0))
243                         return evsel;
244         }
245
246         return NULL;
247 }
248
249 int perf_evlist__add_newtp(struct perf_evlist *evlist,
250                            const char *sys, const char *name, void *handler)
251 {
252         struct perf_evsel *evsel;
253
254         evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
255         if (evsel == NULL)
256                 return -1;
257
258         evsel->handler.func = handler;
259         perf_evlist__add(evlist, evsel);
260         return 0;
261 }
262
263 void perf_evlist__disable(struct perf_evlist *evlist)
264 {
265         int cpu, thread;
266         struct perf_evsel *pos;
267         int nr_cpus = cpu_map__nr(evlist->cpus);
268         int nr_threads = thread_map__nr(evlist->threads);
269
270         for (cpu = 0; cpu < nr_cpus; cpu++) {
271                 list_for_each_entry(pos, &evlist->entries, node) {
272                         if (!perf_evsel__is_group_leader(pos) || !pos->fd)
273                                 continue;
274                         for (thread = 0; thread < nr_threads; thread++)
275                                 ioctl(FD(pos, cpu, thread),
276                                       PERF_EVENT_IOC_DISABLE, 0);
277                 }
278         }
279 }
280
281 void perf_evlist__enable(struct perf_evlist *evlist)
282 {
283         int cpu, thread;
284         struct perf_evsel *pos;
285         int nr_cpus = cpu_map__nr(evlist->cpus);
286         int nr_threads = thread_map__nr(evlist->threads);
287
288         for (cpu = 0; cpu < nr_cpus; cpu++) {
289                 list_for_each_entry(pos, &evlist->entries, node) {
290                         if (!perf_evsel__is_group_leader(pos) || !pos->fd)
291                                 continue;
292                         for (thread = 0; thread < nr_threads; thread++)
293                                 ioctl(FD(pos, cpu, thread),
294                                       PERF_EVENT_IOC_ENABLE, 0);
295                 }
296         }
297 }
298
299 int perf_evlist__disable_event(struct perf_evlist *evlist,
300                                struct perf_evsel *evsel)
301 {
302         int cpu, thread, err;
303
304         if (!evsel->fd)
305                 return 0;
306
307         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
308                 for (thread = 0; thread < evlist->threads->nr; thread++) {
309                         err = ioctl(FD(evsel, cpu, thread),
310                                     PERF_EVENT_IOC_DISABLE, 0);
311                         if (err)
312                                 return err;
313                 }
314         }
315         return 0;
316 }
317
318 int perf_evlist__enable_event(struct perf_evlist *evlist,
319                               struct perf_evsel *evsel)
320 {
321         int cpu, thread, err;
322
323         if (!evsel->fd)
324                 return -EINVAL;
325
326         for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
327                 for (thread = 0; thread < evlist->threads->nr; thread++) {
328                         err = ioctl(FD(evsel, cpu, thread),
329                                     PERF_EVENT_IOC_ENABLE, 0);
330                         if (err)
331                                 return err;
332                 }
333         }
334         return 0;
335 }
336
337 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
338 {
339         int nr_cpus = cpu_map__nr(evlist->cpus);
340         int nr_threads = thread_map__nr(evlist->threads);
341         int nfds = nr_cpus * nr_threads * evlist->nr_entries;
342         evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
343         return evlist->pollfd != NULL ? 0 : -ENOMEM;
344 }
345
346 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
347 {
348         fcntl(fd, F_SETFL, O_NONBLOCK);
349         evlist->pollfd[evlist->nr_fds].fd = fd;
350         evlist->pollfd[evlist->nr_fds].events = POLLIN;
351         evlist->nr_fds++;
352 }
353
354 static void perf_evlist__id_hash(struct perf_evlist *evlist,
355                                  struct perf_evsel *evsel,
356                                  int cpu, int thread, u64 id)
357 {
358         int hash;
359         struct perf_sample_id *sid = SID(evsel, cpu, thread);
360
361         sid->id = id;
362         sid->evsel = evsel;
363         hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
364         hlist_add_head(&sid->node, &evlist->heads[hash]);
365 }
366
367 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
368                          int cpu, int thread, u64 id)
369 {
370         perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
371         evsel->id[evsel->ids++] = id;
372 }
373
374 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
375                                   struct perf_evsel *evsel,
376                                   int cpu, int thread, int fd)
377 {
378         u64 read_data[4] = { 0, };
379         int id_idx = 1; /* The first entry is the counter value */
380         u64 id;
381         int ret;
382
383         ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
384         if (!ret)
385                 goto add;
386
387         if (errno != ENOTTY)
388                 return -1;
389
390         /* Legacy way to get event id.. All hail to old kernels! */
391
392         /*
393          * This way does not work with group format read, so bail
394          * out in that case.
395          */
396         if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
397                 return -1;
398
399         if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
400             read(fd, &read_data, sizeof(read_data)) == -1)
401                 return -1;
402
403         if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
404                 ++id_idx;
405         if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
406                 ++id_idx;
407
408         id = read_data[id_idx];
409
410  add:
411         perf_evlist__id_add(evlist, evsel, cpu, thread, id);
412         return 0;
413 }
414
415 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
416 {
417         struct hlist_head *head;
418         struct perf_sample_id *sid;
419         int hash;
420
421         hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
422         head = &evlist->heads[hash];
423
424         hlist_for_each_entry(sid, head, node)
425                 if (sid->id == id)
426                         return sid;
427
428         return NULL;
429 }
430
431 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
432 {
433         struct perf_sample_id *sid;
434
435         if (evlist->nr_entries == 1)
436                 return perf_evlist__first(evlist);
437
438         sid = perf_evlist__id2sid(evlist, id);
439         if (sid)
440                 return sid->evsel;
441
442         if (!perf_evlist__sample_id_all(evlist))
443                 return perf_evlist__first(evlist);
444
445         return NULL;
446 }
447
448 static int perf_evlist__event2id(struct perf_evlist *evlist,
449                                  union perf_event *event, u64 *id)
450 {
451         const u64 *array = event->sample.array;
452         ssize_t n;
453
454         n = (event->header.size - sizeof(event->header)) >> 3;
455
456         if (event->header.type == PERF_RECORD_SAMPLE) {
457                 if (evlist->id_pos >= n)
458                         return -1;
459                 *id = array[evlist->id_pos];
460         } else {
461                 if (evlist->is_pos > n)
462                         return -1;
463                 n -= evlist->is_pos;
464                 *id = array[n];
465         }
466         return 0;
467 }
468
469 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
470                                                    union perf_event *event)
471 {
472         struct perf_evsel *first = perf_evlist__first(evlist);
473         struct hlist_head *head;
474         struct perf_sample_id *sid;
475         int hash;
476         u64 id;
477
478         if (evlist->nr_entries == 1)
479                 return first;
480
481         if (!first->attr.sample_id_all &&
482             event->header.type != PERF_RECORD_SAMPLE)
483                 return first;
484
485         if (perf_evlist__event2id(evlist, event, &id))
486                 return NULL;
487
488         /* Synthesized events have an id of zero */
489         if (!id)
490                 return first;
491
492         hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
493         head = &evlist->heads[hash];
494
495         hlist_for_each_entry(sid, head, node) {
496                 if (sid->id == id)
497                         return sid->evsel;
498         }
499         return NULL;
500 }
501
502 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
503 {
504         struct perf_mmap *md = &evlist->mmap[idx];
505         unsigned int head = perf_mmap__read_head(md);
506         unsigned int old = md->prev;
507         unsigned char *data = md->base + page_size;
508         union perf_event *event = NULL;
509
510         if (evlist->overwrite) {
511                 /*
512                  * If we're further behind than half the buffer, there's a chance
513                  * the writer will bite our tail and mess up the samples under us.
514                  *
515                  * If we somehow ended up ahead of the head, we got messed up.
516                  *
517                  * In either case, truncate and restart at head.
518                  */
519                 int diff = head - old;
520                 if (diff > md->mask / 2 || diff < 0) {
521                         fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
522
523                         /*
524                          * head points to a known good entry, start there.
525                          */
526                         old = head;
527                 }
528         }
529
530         if (old != head) {
531                 size_t size;
532
533                 event = (union perf_event *)&data[old & md->mask];
534                 size = event->header.size;
535
536                 /*
537                  * Event straddles the mmap boundary -- header should always
538                  * be inside due to u64 alignment of output.
539                  */
540                 if ((old & md->mask) + size != ((old + size) & md->mask)) {
541                         unsigned int offset = old;
542                         unsigned int len = min(sizeof(*event), size), cpy;
543                         void *dst = &md->event_copy;
544
545                         do {
546                                 cpy = min(md->mask + 1 - (offset & md->mask), len);
547                                 memcpy(dst, &data[offset & md->mask], cpy);
548                                 offset += cpy;
549                                 dst += cpy;
550                                 len -= cpy;
551                         } while (len);
552
553                         event = &md->event_copy;
554                 }
555
556                 old += size;
557         }
558
559         md->prev = old;
560
561         if (!evlist->overwrite)
562                 perf_mmap__write_tail(md, old);
563
564         return event;
565 }
566
567 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
568 {
569         if (evlist->mmap[idx].base != NULL) {
570                 munmap(evlist->mmap[idx].base, evlist->mmap_len);
571                 evlist->mmap[idx].base = NULL;
572         }
573 }
574
575 void perf_evlist__munmap(struct perf_evlist *evlist)
576 {
577         int i;
578
579         for (i = 0; i < evlist->nr_mmaps; i++)
580                 __perf_evlist__munmap(evlist, i);
581
582         free(evlist->mmap);
583         evlist->mmap = NULL;
584 }
585
586 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
587 {
588         evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
589         if (cpu_map__empty(evlist->cpus))
590                 evlist->nr_mmaps = thread_map__nr(evlist->threads);
591         evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
592         return evlist->mmap != NULL ? 0 : -ENOMEM;
593 }
594
595 static int __perf_evlist__mmap(struct perf_evlist *evlist,
596                                int idx, int prot, int mask, int fd)
597 {
598         evlist->mmap[idx].prev = 0;
599         evlist->mmap[idx].mask = mask;
600         evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
601                                       MAP_SHARED, fd, 0);
602         if (evlist->mmap[idx].base == MAP_FAILED) {
603                 evlist->mmap[idx].base = NULL;
604                 return -1;
605         }
606
607         perf_evlist__add_pollfd(evlist, fd);
608         return 0;
609 }
610
611 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
612 {
613         struct perf_evsel *evsel;
614         int cpu, thread;
615         int nr_cpus = cpu_map__nr(evlist->cpus);
616         int nr_threads = thread_map__nr(evlist->threads);
617
618         pr_debug2("perf event ring buffer mmapped per cpu\n");
619         for (cpu = 0; cpu < nr_cpus; cpu++) {
620                 int output = -1;
621
622                 for (thread = 0; thread < nr_threads; thread++) {
623                         list_for_each_entry(evsel, &evlist->entries, node) {
624                                 int fd = FD(evsel, cpu, thread);
625
626                                 if (output == -1) {
627                                         output = fd;
628                                         if (__perf_evlist__mmap(evlist, cpu,
629                                                                 prot, mask, output) < 0)
630                                                 goto out_unmap;
631                                 } else {
632                                         if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
633                                                 goto out_unmap;
634                                 }
635
636                                 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
637                                     perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
638                                         goto out_unmap;
639                         }
640                 }
641         }
642
643         return 0;
644
645 out_unmap:
646         for (cpu = 0; cpu < nr_cpus; cpu++)
647                 __perf_evlist__munmap(evlist, cpu);
648         return -1;
649 }
650
651 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
652 {
653         struct perf_evsel *evsel;
654         int thread;
655         int nr_threads = thread_map__nr(evlist->threads);
656
657         pr_debug2("perf event ring buffer mmapped per thread\n");
658         for (thread = 0; thread < nr_threads; thread++) {
659                 int output = -1;
660
661                 list_for_each_entry(evsel, &evlist->entries, node) {
662                         int fd = FD(evsel, 0, thread);
663
664                         if (output == -1) {
665                                 output = fd;
666                                 if (__perf_evlist__mmap(evlist, thread,
667                                                         prot, mask, output) < 0)
668                                         goto out_unmap;
669                         } else {
670                                 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
671                                         goto out_unmap;
672                         }
673
674                         if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
675                             perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
676                                 goto out_unmap;
677                 }
678         }
679
680         return 0;
681
682 out_unmap:
683         for (thread = 0; thread < nr_threads; thread++)
684                 __perf_evlist__munmap(evlist, thread);
685         return -1;
686 }
687
688 static size_t perf_evlist__mmap_size(unsigned long pages)
689 {
690         /* 512 kiB: default amount of unprivileged mlocked memory */
691         if (pages == UINT_MAX)
692                 pages = (512 * 1024) / page_size;
693         else if (!is_power_of_2(pages))
694                 return 0;
695
696         return (pages + 1) * page_size;
697 }
698
699 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
700                                   int unset __maybe_unused)
701 {
702         unsigned int pages, val, *mmap_pages = opt->value;
703         size_t size;
704         static struct parse_tag tags[] = {
705                 { .tag  = 'B', .mult = 1       },
706                 { .tag  = 'K', .mult = 1 << 10 },
707                 { .tag  = 'M', .mult = 1 << 20 },
708                 { .tag  = 'G', .mult = 1 << 30 },
709                 { .tag  = 0 },
710         };
711
712         val = parse_tag_value(str, tags);
713         if (val != (unsigned int) -1) {
714                 /* we got file size value */
715                 pages = PERF_ALIGN(val, page_size) / page_size;
716                 if (!is_power_of_2(pages)) {
717                         pages = next_pow2(pages);
718                         pr_info("rounding mmap pages size to %u (%u pages)\n",
719                                 pages * page_size, pages);
720                 }
721         } else {
722                 /* we got pages count value */
723                 char *eptr;
724                 pages = strtoul(str, &eptr, 10);
725                 if (*eptr != '\0') {
726                         pr_err("failed to parse --mmap_pages/-m value\n");
727                         return -1;
728                 }
729         }
730
731         size = perf_evlist__mmap_size(pages);
732         if (!size) {
733                 pr_err("--mmap_pages/-m value must be a power of two.");
734                 return -1;
735         }
736
737         *mmap_pages = pages;
738         return 0;
739 }
740
741 /** perf_evlist__mmap - Create per cpu maps to receive events
742  *
743  * @evlist - list of events
744  * @pages - map length in pages
745  * @overwrite - overwrite older events?
746  *
747  * If overwrite is false the user needs to signal event consuption using:
748  *
749  *      struct perf_mmap *m = &evlist->mmap[cpu];
750  *      unsigned int head = perf_mmap__read_head(m);
751  *
752  *      perf_mmap__write_tail(m, head)
753  *
754  * Using perf_evlist__read_on_cpu does this automatically.
755  */
756 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
757                       bool overwrite)
758 {
759         struct perf_evsel *evsel;
760         const struct cpu_map *cpus = evlist->cpus;
761         const struct thread_map *threads = evlist->threads;
762         int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
763
764         if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
765                 return -ENOMEM;
766
767         if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
768                 return -ENOMEM;
769
770         evlist->overwrite = overwrite;
771         evlist->mmap_len = perf_evlist__mmap_size(pages);
772         pr_debug("mmap size %luB\n", evlist->mmap_len);
773         mask = evlist->mmap_len - page_size - 1;
774
775         list_for_each_entry(evsel, &evlist->entries, node) {
776                 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
777                     evsel->sample_id == NULL &&
778                     perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
779                         return -ENOMEM;
780         }
781
782         if (cpu_map__empty(cpus))
783                 return perf_evlist__mmap_per_thread(evlist, prot, mask);
784
785         return perf_evlist__mmap_per_cpu(evlist, prot, mask);
786 }
787
788 int perf_evlist__create_maps(struct perf_evlist *evlist,
789                              struct perf_target *target)
790 {
791         evlist->threads = thread_map__new_str(target->pid, target->tid,
792                                               target->uid);
793
794         if (evlist->threads == NULL)
795                 return -1;
796
797         if (perf_target__has_task(target))
798                 evlist->cpus = cpu_map__dummy_new();
799         else if (!perf_target__has_cpu(target) && !target->uses_mmap)
800                 evlist->cpus = cpu_map__dummy_new();
801         else
802                 evlist->cpus = cpu_map__new(target->cpu_list);
803
804         if (evlist->cpus == NULL)
805                 goto out_delete_threads;
806
807         return 0;
808
809 out_delete_threads:
810         thread_map__delete(evlist->threads);
811         return -1;
812 }
813
814 void perf_evlist__delete_maps(struct perf_evlist *evlist)
815 {
816         cpu_map__delete(evlist->cpus);
817         thread_map__delete(evlist->threads);
818         evlist->cpus    = NULL;
819         evlist->threads = NULL;
820 }
821
822 int perf_evlist__apply_filters(struct perf_evlist *evlist)
823 {
824         struct perf_evsel *evsel;
825         int err = 0;
826         const int ncpus = cpu_map__nr(evlist->cpus),
827                   nthreads = thread_map__nr(evlist->threads);
828
829         list_for_each_entry(evsel, &evlist->entries, node) {
830                 if (evsel->filter == NULL)
831                         continue;
832
833                 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
834                 if (err)
835                         break;
836         }
837
838         return err;
839 }
840
841 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
842 {
843         struct perf_evsel *evsel;
844         int err = 0;
845         const int ncpus = cpu_map__nr(evlist->cpus),
846                   nthreads = thread_map__nr(evlist->threads);
847
848         list_for_each_entry(evsel, &evlist->entries, node) {
849                 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
850                 if (err)
851                         break;
852         }
853
854         return err;
855 }
856
857 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
858 {
859         struct perf_evsel *pos;
860
861         if (evlist->nr_entries == 1)
862                 return true;
863
864         if (evlist->id_pos < 0 || evlist->is_pos < 0)
865                 return false;
866
867         list_for_each_entry(pos, &evlist->entries, node) {
868                 if (pos->id_pos != evlist->id_pos ||
869                     pos->is_pos != evlist->is_pos)
870                         return false;
871         }
872
873         return true;
874 }
875
876 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
877 {
878         struct perf_evsel *evsel;
879
880         if (evlist->combined_sample_type)
881                 return evlist->combined_sample_type;
882
883         list_for_each_entry(evsel, &evlist->entries, node)
884                 evlist->combined_sample_type |= evsel->attr.sample_type;
885
886         return evlist->combined_sample_type;
887 }
888
889 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
890 {
891         evlist->combined_sample_type = 0;
892         return __perf_evlist__combined_sample_type(evlist);
893 }
894
895 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
896 {
897         struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
898         u64 read_format = first->attr.read_format;
899         u64 sample_type = first->attr.sample_type;
900
901         list_for_each_entry_continue(pos, &evlist->entries, node) {
902                 if (read_format != pos->attr.read_format)
903                         return false;
904         }
905
906         /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
907         if ((sample_type & PERF_SAMPLE_READ) &&
908             !(read_format & PERF_FORMAT_ID)) {
909                 return false;
910         }
911
912         return true;
913 }
914
915 u64 perf_evlist__read_format(struct perf_evlist *evlist)
916 {
917         struct perf_evsel *first = perf_evlist__first(evlist);
918         return first->attr.read_format;
919 }
920
921 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
922 {
923         struct perf_evsel *first = perf_evlist__first(evlist);
924         struct perf_sample *data;
925         u64 sample_type;
926         u16 size = 0;
927
928         if (!first->attr.sample_id_all)
929                 goto out;
930
931         sample_type = first->attr.sample_type;
932
933         if (sample_type & PERF_SAMPLE_TID)
934                 size += sizeof(data->tid) * 2;
935
936        if (sample_type & PERF_SAMPLE_TIME)
937                 size += sizeof(data->time);
938
939         if (sample_type & PERF_SAMPLE_ID)
940                 size += sizeof(data->id);
941
942         if (sample_type & PERF_SAMPLE_STREAM_ID)
943                 size += sizeof(data->stream_id);
944
945         if (sample_type & PERF_SAMPLE_CPU)
946                 size += sizeof(data->cpu) * 2;
947
948         if (sample_type & PERF_SAMPLE_IDENTIFIER)
949                 size += sizeof(data->id);
950 out:
951         return size;
952 }
953
954 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
955 {
956         struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
957
958         list_for_each_entry_continue(pos, &evlist->entries, node) {
959                 if (first->attr.sample_id_all != pos->attr.sample_id_all)
960                         return false;
961         }
962
963         return true;
964 }
965
966 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
967 {
968         struct perf_evsel *first = perf_evlist__first(evlist);
969         return first->attr.sample_id_all;
970 }
971
972 void perf_evlist__set_selected(struct perf_evlist *evlist,
973                                struct perf_evsel *evsel)
974 {
975         evlist->selected = evsel;
976 }
977
978 void perf_evlist__close(struct perf_evlist *evlist)
979 {
980         struct perf_evsel *evsel;
981         int ncpus = cpu_map__nr(evlist->cpus);
982         int nthreads = thread_map__nr(evlist->threads);
983
984         list_for_each_entry_reverse(evsel, &evlist->entries, node)
985                 perf_evsel__close(evsel, ncpus, nthreads);
986 }
987
988 int perf_evlist__open(struct perf_evlist *evlist)
989 {
990         struct perf_evsel *evsel;
991         int err;
992
993         perf_evlist__update_id_pos(evlist);
994
995         list_for_each_entry(evsel, &evlist->entries, node) {
996                 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
997                 if (err < 0)
998                         goto out_err;
999         }
1000
1001         return 0;
1002 out_err:
1003         perf_evlist__close(evlist);
1004         errno = -err;
1005         return err;
1006 }
1007
1008 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
1009                                   struct perf_target *target,
1010                                   const char *argv[], bool pipe_output,
1011                                   bool want_signal)
1012 {
1013         int child_ready_pipe[2], go_pipe[2];
1014         char bf;
1015
1016         if (pipe(child_ready_pipe) < 0) {
1017                 perror("failed to create 'ready' pipe");
1018                 return -1;
1019         }
1020
1021         if (pipe(go_pipe) < 0) {
1022                 perror("failed to create 'go' pipe");
1023                 goto out_close_ready_pipe;
1024         }
1025
1026         evlist->workload.pid = fork();
1027         if (evlist->workload.pid < 0) {
1028                 perror("failed to fork");
1029                 goto out_close_pipes;
1030         }
1031
1032         if (!evlist->workload.pid) {
1033                 if (pipe_output)
1034                         dup2(2, 1);
1035
1036                 signal(SIGTERM, SIG_DFL);
1037
1038                 close(child_ready_pipe[0]);
1039                 close(go_pipe[1]);
1040                 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1041
1042                 /*
1043                  * Tell the parent we're ready to go
1044                  */
1045                 close(child_ready_pipe[1]);
1046
1047                 /*
1048                  * Wait until the parent tells us to go.
1049                  */
1050                 if (read(go_pipe[0], &bf, 1) == -1)
1051                         perror("unable to read pipe");
1052
1053                 execvp(argv[0], (char **)argv);
1054
1055                 perror(argv[0]);
1056                 if (want_signal)
1057                         kill(getppid(), SIGUSR1);
1058                 exit(-1);
1059         }
1060
1061         if (perf_target__none(target))
1062                 evlist->threads->map[0] = evlist->workload.pid;
1063
1064         close(child_ready_pipe[1]);
1065         close(go_pipe[0]);
1066         /*
1067          * wait for child to settle
1068          */
1069         if (read(child_ready_pipe[0], &bf, 1) == -1) {
1070                 perror("unable to read pipe");
1071                 goto out_close_pipes;
1072         }
1073
1074         fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1075         evlist->workload.cork_fd = go_pipe[1];
1076         close(child_ready_pipe[0]);
1077         return 0;
1078
1079 out_close_pipes:
1080         close(go_pipe[0]);
1081         close(go_pipe[1]);
1082 out_close_ready_pipe:
1083         close(child_ready_pipe[0]);
1084         close(child_ready_pipe[1]);
1085         return -1;
1086 }
1087
1088 int perf_evlist__start_workload(struct perf_evlist *evlist)
1089 {
1090         if (evlist->workload.cork_fd > 0) {
1091                 char bf = 0;
1092                 int ret;
1093                 /*
1094                  * Remove the cork, let it rip!
1095                  */
1096                 ret = write(evlist->workload.cork_fd, &bf, 1);
1097                 if (ret < 0)
1098                         perror("enable to write to pipe");
1099
1100                 close(evlist->workload.cork_fd);
1101                 return ret;
1102         }
1103
1104         return 0;
1105 }
1106
1107 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1108                               struct perf_sample *sample)
1109 {
1110         struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1111
1112         if (!evsel)
1113                 return -EFAULT;
1114         return perf_evsel__parse_sample(evsel, event, sample);
1115 }
1116
1117 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1118 {
1119         struct perf_evsel *evsel;
1120         size_t printed = 0;
1121
1122         list_for_each_entry(evsel, &evlist->entries, node) {
1123                 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1124                                    perf_evsel__name(evsel));
1125         }
1126
1127         return printed + fprintf(fp, "\n");;
1128 }