1 #define _FILE_OFFSET_BITS 64
8 #include <linux/list.h>
9 #include <linux/kernel.h>
16 #include "trace-event.h"
21 static bool no_buildid_cache = false;
23 static int event_count;
24 static struct perf_trace_event_type *events;
26 int perf_header__push_event(u64 id, const char *name)
28 if (strlen(name) > MAX_EVENT_NAME)
29 pr_warning("Event %s will be truncated\n", name);
32 events = malloc(sizeof(struct perf_trace_event_type));
36 struct perf_trace_event_type *nevents;
38 nevents = realloc(events, (event_count + 1) * sizeof(*events));
43 memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
44 events[event_count].event_id = id;
45 strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
50 char *perf_header__find_event(u64 id)
53 for (i = 0 ; i < event_count; i++) {
54 if (events[i].event_id == id)
55 return events[i].name;
60 static const char *__perf_magic = "PERFFILE";
62 #define PERF_MAGIC (*(u64 *)__perf_magic)
64 struct perf_file_attr {
65 struct perf_event_attr attr;
66 struct perf_file_section ids;
69 void perf_header__set_feat(struct perf_header *self, int feat)
71 set_bit(feat, self->adds_features);
74 void perf_header__clear_feat(struct perf_header *self, int feat)
76 clear_bit(feat, self->adds_features);
79 bool perf_header__has_feat(const struct perf_header *self, int feat)
81 return test_bit(feat, self->adds_features);
84 static int do_write(int fd, const void *buf, size_t size)
87 int ret = write(fd, buf, size);
101 static int write_padded(int fd, const void *bf, size_t count,
102 size_t count_aligned)
104 static const char zero_buf[NAME_ALIGN];
105 int err = do_write(fd, bf, count);
108 err = do_write(fd, zero_buf, count_aligned - count);
113 #define dsos__for_each_with_build_id(pos, head) \
114 list_for_each_entry(pos, head, node) \
115 if (!pos->has_build_id) \
119 static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
124 dsos__for_each_with_build_id(pos, head) {
126 struct build_id_event b;
131 len = pos->long_name_len + 1;
132 len = ALIGN(len, NAME_ALIGN);
133 memset(&b, 0, sizeof(b));
134 memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
136 b.header.misc = misc;
137 b.header.size = sizeof(b) + len;
138 err = do_write(fd, &b, sizeof(b));
141 err = write_padded(fd, pos->long_name,
142 pos->long_name_len + 1, len);
150 static int machine__write_buildid_table(struct machine *self, int fd)
153 u16 kmisc = PERF_RECORD_MISC_KERNEL,
154 umisc = PERF_RECORD_MISC_USER;
156 if (!machine__is_host(self)) {
157 kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
158 umisc = PERF_RECORD_MISC_GUEST_USER;
161 err = __dsos__write_buildid_table(&self->kernel_dsos, self->pid,
164 err = __dsos__write_buildid_table(&self->user_dsos,
165 self->pid, umisc, fd);
169 static int dsos__write_buildid_table(struct perf_header *header, int fd)
171 struct perf_session *session = container_of(header,
172 struct perf_session, header);
174 int err = machine__write_buildid_table(&session->host_machine, fd);
179 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
180 struct machine *pos = rb_entry(nd, struct machine, rb_node);
181 err = machine__write_buildid_table(pos, fd);
188 int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
189 const char *name, bool is_kallsyms)
191 const size_t size = PATH_MAX;
192 char *realname = realpath(name, NULL),
193 *filename = malloc(size),
194 *linkname = malloc(size), *targetname;
197 if (realname == NULL || filename == NULL || linkname == NULL)
200 len = snprintf(filename, size, "%s%s%s",
201 debugdir, is_kallsyms ? "/" : "", realname);
202 if (mkdir_p(filename, 0755))
205 snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
207 if (access(filename, F_OK)) {
209 if (copyfile("/proc/kallsyms", filename))
211 } else if (link(realname, filename) && copyfile(name, filename))
215 len = snprintf(linkname, size, "%s/.build-id/%.2s",
216 debugdir, sbuild_id);
218 if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
221 snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
222 targetname = filename + strlen(debugdir) - 5;
223 memcpy(targetname, "../..", 5);
225 if (symlink(targetname, linkname) == 0)
234 static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
235 const char *name, const char *debugdir,
238 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
240 build_id__sprintf(build_id, build_id_size, sbuild_id);
242 return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
245 int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
247 const size_t size = PATH_MAX;
248 char *filename = malloc(size),
249 *linkname = malloc(size);
252 if (filename == NULL || linkname == NULL)
255 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
256 debugdir, sbuild_id, sbuild_id + 2);
258 if (access(linkname, F_OK))
261 if (readlink(linkname, filename, size) < 0)
264 if (unlink(linkname))
268 * Since the link is relative, we must make it absolute:
270 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
271 debugdir, sbuild_id, filename);
273 if (unlink(linkname))
283 static int dso__cache_build_id(struct dso *self, const char *debugdir)
285 bool is_kallsyms = self->kernel && self->long_name[0] != '/';
287 return build_id_cache__add_b(self->build_id, sizeof(self->build_id),
288 self->long_name, debugdir, is_kallsyms);
291 static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
296 dsos__for_each_with_build_id(pos, head)
297 if (dso__cache_build_id(pos, debugdir))
303 static int machine__cache_build_ids(struct machine *self, const char *debugdir)
305 int ret = __dsos__cache_build_ids(&self->kernel_dsos, debugdir);
306 ret |= __dsos__cache_build_ids(&self->user_dsos, debugdir);
310 static int perf_session__cache_build_ids(struct perf_session *self)
314 char debugdir[PATH_MAX];
316 snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
318 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
321 ret = machine__cache_build_ids(&self->host_machine, debugdir);
323 for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
324 struct machine *pos = rb_entry(nd, struct machine, rb_node);
325 ret |= machine__cache_build_ids(pos, debugdir);
330 static bool machine__read_build_ids(struct machine *self, bool with_hits)
332 bool ret = __dsos__read_build_ids(&self->kernel_dsos, with_hits);
333 ret |= __dsos__read_build_ids(&self->user_dsos, with_hits);
337 static bool perf_session__read_build_ids(struct perf_session *self, bool with_hits)
340 bool ret = machine__read_build_ids(&self->host_machine, with_hits);
342 for (nd = rb_first(&self->machines); nd; nd = rb_next(nd)) {
343 struct machine *pos = rb_entry(nd, struct machine, rb_node);
344 ret |= machine__read_build_ids(pos, with_hits);
350 static int perf_header__adds_write(struct perf_header *self,
351 struct perf_evlist *evlist, int fd)
354 struct perf_session *session;
355 struct perf_file_section *feat_sec;
360 session = container_of(self, struct perf_session, header);
362 if (perf_header__has_feat(self, HEADER_BUILD_ID &&
363 !perf_session__read_build_ids(session, true)))
364 perf_header__clear_feat(self, HEADER_BUILD_ID);
366 nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
370 feat_sec = calloc(sizeof(*feat_sec), nr_sections);
371 if (feat_sec == NULL)
374 sec_size = sizeof(*feat_sec) * nr_sections;
376 sec_start = self->data_offset + self->data_size;
377 lseek(fd, sec_start + sec_size, SEEK_SET);
379 if (perf_header__has_feat(self, HEADER_TRACE_INFO)) {
380 struct perf_file_section *trace_sec;
382 trace_sec = &feat_sec[idx++];
384 /* Write trace info */
385 trace_sec->offset = lseek(fd, 0, SEEK_CUR);
386 read_tracing_data(fd, &evlist->entries);
387 trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
390 if (perf_header__has_feat(self, HEADER_BUILD_ID)) {
391 struct perf_file_section *buildid_sec;
393 buildid_sec = &feat_sec[idx++];
395 /* Write build-ids */
396 buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
397 err = dsos__write_buildid_table(self, fd);
399 pr_debug("failed to write buildid table\n");
402 buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
404 if (!no_buildid_cache)
405 perf_session__cache_build_ids(session);
408 lseek(fd, sec_start, SEEK_SET);
409 err = do_write(fd, feat_sec, sec_size);
411 pr_debug("failed to write feature section\n");
417 int perf_header__write_pipe(int fd)
419 struct perf_pipe_file_header f_header;
422 f_header = (struct perf_pipe_file_header){
424 .size = sizeof(f_header),
427 err = do_write(fd, &f_header, sizeof(f_header));
429 pr_debug("failed to write perf pipe header\n");
436 int perf_session__write_header(struct perf_session *session,
437 struct perf_evlist *evlist,
438 int fd, bool at_exit)
440 struct perf_file_header f_header;
441 struct perf_file_attr f_attr;
442 struct perf_header *self = &session->header;
443 struct perf_evsel *attr, *pair = NULL;
446 lseek(fd, sizeof(f_header), SEEK_SET);
448 if (session->evlist != evlist)
449 pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
451 list_for_each_entry(attr, &evlist->entries, node) {
452 attr->id_offset = lseek(fd, 0, SEEK_CUR);
453 err = do_write(fd, attr->id, attr->ids * sizeof(u64));
456 pr_debug("failed to write perf header\n");
459 if (session->evlist != evlist) {
460 err = do_write(fd, pair->id, pair->ids * sizeof(u64));
463 attr->ids += pair->ids;
464 pair = list_entry(pair->node.next, struct perf_evsel, node);
468 self->attr_offset = lseek(fd, 0, SEEK_CUR);
470 list_for_each_entry(attr, &evlist->entries, node) {
471 f_attr = (struct perf_file_attr){
474 .offset = attr->id_offset,
475 .size = attr->ids * sizeof(u64),
478 err = do_write(fd, &f_attr, sizeof(f_attr));
480 pr_debug("failed to write perf header attribute\n");
485 self->event_offset = lseek(fd, 0, SEEK_CUR);
486 self->event_size = event_count * sizeof(struct perf_trace_event_type);
488 err = do_write(fd, events, self->event_size);
490 pr_debug("failed to write perf header events\n");
495 self->data_offset = lseek(fd, 0, SEEK_CUR);
498 err = perf_header__adds_write(self, evlist, fd);
503 f_header = (struct perf_file_header){
505 .size = sizeof(f_header),
506 .attr_size = sizeof(f_attr),
508 .offset = self->attr_offset,
509 .size = evlist->nr_entries * sizeof(f_attr),
512 .offset = self->data_offset,
513 .size = self->data_size,
516 .offset = self->event_offset,
517 .size = self->event_size,
521 memcpy(&f_header.adds_features, &self->adds_features, sizeof(self->adds_features));
523 lseek(fd, 0, SEEK_SET);
524 err = do_write(fd, &f_header, sizeof(f_header));
526 pr_debug("failed to write perf header\n");
529 lseek(fd, self->data_offset + self->data_size, SEEK_SET);
535 static int perf_header__getbuffer64(struct perf_header *self,
536 int fd, void *buf, size_t size)
538 if (readn(fd, buf, size) <= 0)
541 if (self->needs_swap)
542 mem_bswap_64(buf, size);
547 int perf_header__process_sections(struct perf_header *self, int fd,
548 int (*process)(struct perf_file_section *self,
549 struct perf_header *ph,
552 struct perf_file_section *feat_sec;
556 int err = -1, feat = 1;
558 nr_sections = bitmap_weight(self->adds_features, HEADER_FEAT_BITS);
562 feat_sec = calloc(sizeof(*feat_sec), nr_sections);
566 sec_size = sizeof(*feat_sec) * nr_sections;
568 lseek(fd, self->data_offset + self->data_size, SEEK_SET);
570 if (perf_header__getbuffer64(self, fd, feat_sec, sec_size))
574 while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
575 if (perf_header__has_feat(self, feat)) {
576 struct perf_file_section *sec = &feat_sec[idx++];
578 err = process(sec, self, feat, fd);
589 int perf_file_header__read(struct perf_file_header *self,
590 struct perf_header *ph, int fd)
592 lseek(fd, 0, SEEK_SET);
594 if (readn(fd, self, sizeof(*self)) <= 0 ||
595 memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
598 if (self->attr_size != sizeof(struct perf_file_attr)) {
599 u64 attr_size = bswap_64(self->attr_size);
601 if (attr_size != sizeof(struct perf_file_attr))
604 mem_bswap_64(self, offsetof(struct perf_file_header,
606 ph->needs_swap = true;
609 if (self->size != sizeof(*self)) {
610 /* Support the previous format */
611 if (self->size == offsetof(typeof(*self), adds_features))
612 bitmap_zero(self->adds_features, HEADER_FEAT_BITS);
617 memcpy(&ph->adds_features, &self->adds_features,
618 sizeof(ph->adds_features));
620 * FIXME: hack that assumes that if we need swap the perf.data file
621 * may be coming from an arch with a different word-size, ergo different
622 * DEFINE_BITMAP format, investigate more later, but for now its mostly
623 * safe to assume that we have a build-id section. Trace files probably
624 * have several other issues in this realm anyway...
626 if (ph->needs_swap) {
627 memset(&ph->adds_features, 0, sizeof(ph->adds_features));
628 perf_header__set_feat(ph, HEADER_BUILD_ID);
631 ph->event_offset = self->event_types.offset;
632 ph->event_size = self->event_types.size;
633 ph->data_offset = self->data.offset;
634 ph->data_size = self->data.size;
638 static int __event_process_build_id(struct build_id_event *bev,
640 struct perf_session *session)
643 struct list_head *head;
644 struct machine *machine;
647 enum dso_kernel_type dso_type;
649 machine = perf_session__findnew_machine(session, bev->pid);
653 misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
656 case PERF_RECORD_MISC_KERNEL:
657 dso_type = DSO_TYPE_KERNEL;
658 head = &machine->kernel_dsos;
660 case PERF_RECORD_MISC_GUEST_KERNEL:
661 dso_type = DSO_TYPE_GUEST_KERNEL;
662 head = &machine->kernel_dsos;
664 case PERF_RECORD_MISC_USER:
665 case PERF_RECORD_MISC_GUEST_USER:
666 dso_type = DSO_TYPE_USER;
667 head = &machine->user_dsos;
673 dso = __dsos__findnew(head, filename);
675 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
677 dso__set_build_id(dso, &bev->build_id);
679 if (filename[0] == '[')
680 dso->kernel = dso_type;
682 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
684 pr_debug("build id event received for %s: %s\n",
685 dso->long_name, sbuild_id);
693 static int perf_header__read_build_ids(struct perf_header *self,
694 int input, u64 offset, u64 size)
696 struct perf_session *session = container_of(self,
697 struct perf_session, header);
698 struct build_id_event bev;
699 char filename[PATH_MAX];
700 u64 limit = offset + size;
703 while (offset < limit) {
706 if (read(input, &bev, sizeof(bev)) != sizeof(bev))
709 if (self->needs_swap)
710 perf_event_header__bswap(&bev.header);
712 len = bev.header.size - sizeof(bev);
713 if (read(input, filename, len) != len)
716 __event_process_build_id(&bev, filename, session);
718 offset += bev.header.size;
725 static int perf_file_section__process(struct perf_file_section *self,
726 struct perf_header *ph,
729 if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) {
730 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
731 "%d, continuing...\n", self->offset, feat);
736 case HEADER_TRACE_INFO:
737 trace_report(fd, false);
740 case HEADER_BUILD_ID:
741 if (perf_header__read_build_ids(ph, fd, self->offset, self->size))
742 pr_debug("Failed to read buildids, continuing...\n");
745 pr_debug("unknown feature %d, continuing...\n", feat);
751 static int perf_file_header__read_pipe(struct perf_pipe_file_header *self,
752 struct perf_header *ph, int fd,
755 if (readn(fd, self, sizeof(*self)) <= 0 ||
756 memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
759 if (repipe && do_write(STDOUT_FILENO, self, sizeof(*self)) < 0)
762 if (self->size != sizeof(*self)) {
763 u64 size = bswap_64(self->size);
765 if (size != sizeof(*self))
768 ph->needs_swap = true;
774 static int perf_header__read_pipe(struct perf_session *session, int fd)
776 struct perf_header *self = &session->header;
777 struct perf_pipe_file_header f_header;
779 if (perf_file_header__read_pipe(&f_header, self, fd,
780 session->repipe) < 0) {
781 pr_debug("incompatible file format\n");
790 int perf_session__read_header(struct perf_session *session, int fd)
792 struct perf_header *self = &session->header;
793 struct perf_file_header f_header;
794 struct perf_file_attr f_attr;
796 int nr_attrs, nr_ids, i, j;
798 session->evlist = perf_evlist__new(NULL, NULL);
799 if (session->evlist == NULL)
802 if (session->fd_pipe)
803 return perf_header__read_pipe(session, fd);
805 if (perf_file_header__read(&f_header, self, fd) < 0) {
806 pr_debug("incompatible file format\n");
810 nr_attrs = f_header.attrs.size / sizeof(f_attr);
811 lseek(fd, f_header.attrs.offset, SEEK_SET);
813 for (i = 0; i < nr_attrs; i++) {
814 struct perf_evsel *evsel;
817 if (perf_header__getbuffer64(self, fd, &f_attr, sizeof(f_attr)))
820 tmp = lseek(fd, 0, SEEK_CUR);
821 evsel = perf_evsel__new(&f_attr.attr, i);
824 goto out_delete_evlist;
826 * Do it before so that if perf_evsel__alloc_id fails, this
827 * entry gets purged too at perf_evlist__delete().
829 perf_evlist__add(session->evlist, evsel);
831 nr_ids = f_attr.ids.size / sizeof(u64);
833 * We don't have the cpu and thread maps on the header, so
834 * for allocating the perf_sample_id table we fake 1 cpu and
835 * hattr->ids threads.
837 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
838 goto out_delete_evlist;
840 lseek(fd, f_attr.ids.offset, SEEK_SET);
842 for (j = 0; j < nr_ids; j++) {
843 if (perf_header__getbuffer64(self, fd, &f_id, sizeof(f_id)))
846 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
849 lseek(fd, tmp, SEEK_SET);
852 if (f_header.event_types.size) {
853 lseek(fd, f_header.event_types.offset, SEEK_SET);
854 events = malloc(f_header.event_types.size);
857 if (perf_header__getbuffer64(self, fd, events,
858 f_header.event_types.size))
860 event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
863 perf_header__process_sections(self, fd, perf_file_section__process);
865 lseek(fd, self->data_offset, SEEK_SET);
873 perf_evlist__delete(session->evlist);
874 session->evlist = NULL;
878 u64 perf_evlist__sample_type(struct perf_evlist *evlist)
880 struct perf_evsel *pos;
883 list_for_each_entry(pos, &evlist->entries, node) {
885 type = pos->attr.sample_type;
886 else if (type != pos->attr.sample_type)
887 die("non matching sample_type");
893 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
895 bool value = false, first = true;
896 struct perf_evsel *pos;
898 list_for_each_entry(pos, &evlist->entries, node) {
900 value = pos->attr.sample_id_all;
902 } else if (value != pos->attr.sample_id_all)
903 die("non matching sample_id_all");
909 int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
910 perf_event__handler_t process,
911 struct perf_session *session)
913 union perf_event *ev;
917 size = sizeof(struct perf_event_attr);
918 size = ALIGN(size, sizeof(u64));
919 size += sizeof(struct perf_event_header);
920 size += ids * sizeof(u64);
927 ev->attr.attr = *attr;
928 memcpy(ev->attr.id, id, ids * sizeof(u64));
930 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
931 ev->attr.header.size = size;
933 err = process(ev, NULL, session);
940 int perf_session__synthesize_attrs(struct perf_session *session,
941 perf_event__handler_t process)
943 struct perf_evsel *attr;
946 list_for_each_entry(attr, &session->evlist->entries, node) {
947 err = perf_event__synthesize_attr(&attr->attr, attr->ids,
948 attr->id, process, session);
950 pr_debug("failed to create perf header attribute\n");
958 int perf_event__process_attr(union perf_event *event,
959 struct perf_session *session)
961 unsigned int i, ids, n_ids;
962 struct perf_evsel *evsel;
964 if (session->evlist == NULL) {
965 session->evlist = perf_evlist__new(NULL, NULL);
966 if (session->evlist == NULL)
970 evsel = perf_evsel__new(&event->attr.attr,
971 session->evlist->nr_entries);
975 perf_evlist__add(session->evlist, evsel);
977 ids = event->header.size;
978 ids -= (void *)&event->attr.id - (void *)event;
979 n_ids = ids / sizeof(u64);
981 * We don't have the cpu and thread maps on the header, so
982 * for allocating the perf_sample_id table we fake 1 cpu and
983 * hattr->ids threads.
985 if (perf_evsel__alloc_id(evsel, 1, n_ids))
988 for (i = 0; i < n_ids; i++) {
989 perf_evlist__id_add(session->evlist, evsel, 0, i,
993 perf_session__update_sample_type(session);
998 int perf_event__synthesize_event_type(u64 event_id, char *name,
999 perf_event__handler_t process,
1000 struct perf_session *session)
1002 union perf_event ev;
1006 memset(&ev, 0, sizeof(ev));
1008 ev.event_type.event_type.event_id = event_id;
1009 memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
1010 strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
1012 ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
1013 size = strlen(name);
1014 size = ALIGN(size, sizeof(u64));
1015 ev.event_type.header.size = sizeof(ev.event_type) -
1016 (sizeof(ev.event_type.event_type.name) - size);
1018 err = process(&ev, NULL, session);
1023 int perf_event__synthesize_event_types(perf_event__handler_t process,
1024 struct perf_session *session)
1026 struct perf_trace_event_type *type;
1029 for (i = 0; i < event_count; i++) {
1032 err = perf_event__synthesize_event_type(type->event_id,
1033 type->name, process,
1036 pr_debug("failed to create perf header event type\n");
1044 int perf_event__process_event_type(union perf_event *event,
1045 struct perf_session *session __unused)
1047 if (perf_header__push_event(event->event_type.event_type.event_id,
1048 event->event_type.event_type.name) < 0)
1054 int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
1055 perf_event__handler_t process,
1056 struct perf_session *session __unused)
1058 union perf_event ev;
1059 ssize_t size = 0, aligned_size = 0, padding;
1062 memset(&ev, 0, sizeof(ev));
1064 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
1065 size = read_tracing_data_size(fd, &evlist->entries);
1068 aligned_size = ALIGN(size, sizeof(u64));
1069 padding = aligned_size - size;
1070 ev.tracing_data.header.size = sizeof(ev.tracing_data);
1071 ev.tracing_data.size = aligned_size;
1073 process(&ev, NULL, session);
1075 err = read_tracing_data(fd, &evlist->entries);
1076 write_padded(fd, NULL, 0, padding);
1078 return aligned_size;
1081 int perf_event__process_tracing_data(union perf_event *event,
1082 struct perf_session *session)
1084 ssize_t size_read, padding, size = event->tracing_data.size;
1085 off_t offset = lseek(session->fd, 0, SEEK_CUR);
1088 /* setup for reading amidst mmap */
1089 lseek(session->fd, offset + sizeof(struct tracing_data_event),
1092 size_read = trace_report(session->fd, session->repipe);
1094 padding = ALIGN(size_read, sizeof(u64)) - size_read;
1096 if (read(session->fd, buf, padding) < 0)
1097 die("reading input file");
1098 if (session->repipe) {
1099 int retw = write(STDOUT_FILENO, buf, padding);
1100 if (retw <= 0 || retw != padding)
1101 die("repiping tracing data padding");
1104 if (size_read + padding != size)
1105 die("tracing data size mismatch");
1107 return size_read + padding;
1110 int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
1111 perf_event__handler_t process,
1112 struct machine *machine,
1113 struct perf_session *session)
1115 union perf_event ev;
1122 memset(&ev, 0, sizeof(ev));
1124 len = pos->long_name_len + 1;
1125 len = ALIGN(len, NAME_ALIGN);
1126 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
1127 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
1128 ev.build_id.header.misc = misc;
1129 ev.build_id.pid = machine->pid;
1130 ev.build_id.header.size = sizeof(ev.build_id) + len;
1131 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
1133 err = process(&ev, NULL, session);
1138 int perf_event__process_build_id(union perf_event *event,
1139 struct perf_session *session)
1141 __event_process_build_id(&event->build_id,
1142 event->build_id.filename,
1147 void disable_buildid_cache(void)
1149 no_buildid_cache = true;