]> Pileus Git - ~andy/linux/blob - tools/perf/tests/code-reading.c
perf evlist: Move destruction of maps to evlist destructor
[~andy/linux] / tools / perf / tests / code-reading.c
1 #include <sys/types.h>
2 #include <stdlib.h>
3 #include <unistd.h>
4 #include <stdio.h>
5 #include <inttypes.h>
6 #include <ctype.h>
7 #include <string.h>
8
9 #include "parse-events.h"
10 #include "evlist.h"
11 #include "evsel.h"
12 #include "thread_map.h"
13 #include "cpumap.h"
14 #include "machine.h"
15 #include "event.h"
16 #include "thread.h"
17
18 #include "tests.h"
19
20 #define BUFSZ   1024
21 #define READLEN 128
22
23 struct state {
24         u64 done[1024];
25         size_t done_cnt;
26 };
27
28 static unsigned int hex(char c)
29 {
30         if (c >= '0' && c <= '9')
31                 return c - '0';
32         if (c >= 'a' && c <= 'f')
33                 return c - 'a' + 10;
34         return c - 'A' + 10;
35 }
36
37 static void read_objdump_line(const char *line, size_t line_len, void **buf,
38                               size_t *len)
39 {
40         const char *p;
41         size_t i;
42
43         /* Skip to a colon */
44         p = strchr(line, ':');
45         if (!p)
46                 return;
47         i = p + 1 - line;
48
49         /* Read bytes */
50         while (*len) {
51                 char c1, c2;
52
53                 /* Skip spaces */
54                 for (; i < line_len; i++) {
55                         if (!isspace(line[i]))
56                                 break;
57                 }
58                 /* Get 2 hex digits */
59                 if (i >= line_len || !isxdigit(line[i]))
60                         break;
61                 c1 = line[i++];
62                 if (i >= line_len || !isxdigit(line[i]))
63                         break;
64                 c2 = line[i++];
65                 /* Followed by a space */
66                 if (i < line_len && line[i] && !isspace(line[i]))
67                         break;
68                 /* Store byte */
69                 *(unsigned char *)*buf = (hex(c1) << 4) | hex(c2);
70                 *buf += 1;
71                 *len -= 1;
72         }
73 }
74
75 static int read_objdump_output(FILE *f, void **buf, size_t *len)
76 {
77         char *line = NULL;
78         size_t line_len;
79         ssize_t ret;
80         int err = 0;
81
82         while (1) {
83                 ret = getline(&line, &line_len, f);
84                 if (feof(f))
85                         break;
86                 if (ret < 0) {
87                         pr_debug("getline failed\n");
88                         err = -1;
89                         break;
90                 }
91                 read_objdump_line(line, ret, buf, len);
92         }
93
94         free(line);
95
96         return err;
97 }
98
99 static int read_via_objdump(const char *filename, u64 addr, void *buf,
100                             size_t len)
101 {
102         char cmd[PATH_MAX * 2];
103         const char *fmt;
104         FILE *f;
105         int ret;
106
107         fmt = "%s -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
108         ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
109                        filename);
110         if (ret <= 0 || (size_t)ret >= sizeof(cmd))
111                 return -1;
112
113         pr_debug("Objdump command is: %s\n", cmd);
114
115         /* Ignore objdump errors */
116         strcat(cmd, " 2>/dev/null");
117
118         f = popen(cmd, "r");
119         if (!f) {
120                 pr_debug("popen failed\n");
121                 return -1;
122         }
123
124         ret = read_objdump_output(f, &buf, &len);
125         if (len) {
126                 pr_debug("objdump read too few bytes\n");
127                 if (!ret)
128                         ret = len;
129         }
130
131         pclose(f);
132
133         return ret;
134 }
135
136 static int read_object_code(u64 addr, size_t len, u8 cpumode,
137                             struct thread *thread, struct machine *machine,
138                             struct state *state)
139 {
140         struct addr_location al;
141         unsigned char buf1[BUFSZ];
142         unsigned char buf2[BUFSZ];
143         size_t ret_len;
144         u64 objdump_addr;
145         int ret;
146
147         pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
148
149         thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, addr,
150                               &al);
151         if (!al.map || !al.map->dso) {
152                 pr_debug("thread__find_addr_map failed\n");
153                 return -1;
154         }
155
156         pr_debug("File is: %s\n", al.map->dso->long_name);
157
158         if (al.map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
159             !dso__is_kcore(al.map->dso)) {
160                 pr_debug("Unexpected kernel address - skipping\n");
161                 return 0;
162         }
163
164         pr_debug("On file address is: %#"PRIx64"\n", al.addr);
165
166         if (len > BUFSZ)
167                 len = BUFSZ;
168
169         /* Do not go off the map */
170         if (addr + len > al.map->end)
171                 len = al.map->end - addr;
172
173         /* Read the object code using perf */
174         ret_len = dso__data_read_offset(al.map->dso, machine, al.addr, buf1,
175                                         len);
176         if (ret_len != len) {
177                 pr_debug("dso__data_read_offset failed\n");
178                 return -1;
179         }
180
181         /*
182          * Converting addresses for use by objdump requires more information.
183          * map__load() does that.  See map__rip_2objdump() for details.
184          */
185         if (map__load(al.map, NULL))
186                 return -1;
187
188         /* objdump struggles with kcore - try each map only once */
189         if (dso__is_kcore(al.map->dso)) {
190                 size_t d;
191
192                 for (d = 0; d < state->done_cnt; d++) {
193                         if (state->done[d] == al.map->start) {
194                                 pr_debug("kcore map tested already");
195                                 pr_debug(" - skipping\n");
196                                 return 0;
197                         }
198                 }
199                 if (state->done_cnt >= ARRAY_SIZE(state->done)) {
200                         pr_debug("Too many kcore maps - skipping\n");
201                         return 0;
202                 }
203                 state->done[state->done_cnt++] = al.map->start;
204         }
205
206         /* Read the object code using objdump */
207         objdump_addr = map__rip_2objdump(al.map, al.addr);
208         ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len);
209         if (ret > 0) {
210                 /*
211                  * The kernel maps are inaccurate - assume objdump is right in
212                  * that case.
213                  */
214                 if (cpumode == PERF_RECORD_MISC_KERNEL ||
215                     cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
216                         len -= ret;
217                         if (len) {
218                                 pr_debug("Reducing len to %zu\n", len);
219                         } else if (dso__is_kcore(al.map->dso)) {
220                                 /*
221                                  * objdump cannot handle very large segments
222                                  * that may be found in kcore.
223                                  */
224                                 pr_debug("objdump failed for kcore");
225                                 pr_debug(" - skipping\n");
226                                 return 0;
227                         } else {
228                                 return -1;
229                         }
230                 }
231         }
232         if (ret < 0) {
233                 pr_debug("read_via_objdump failed\n");
234                 return -1;
235         }
236
237         /* The results should be identical */
238         if (memcmp(buf1, buf2, len)) {
239                 pr_debug("Bytes read differ from those read by objdump\n");
240                 return -1;
241         }
242         pr_debug("Bytes read match those read by objdump\n");
243
244         return 0;
245 }
246
247 static int process_sample_event(struct machine *machine,
248                                 struct perf_evlist *evlist,
249                                 union perf_event *event, struct state *state)
250 {
251         struct perf_sample sample;
252         struct thread *thread;
253         u8 cpumode;
254
255         if (perf_evlist__parse_sample(evlist, event, &sample)) {
256                 pr_debug("perf_evlist__parse_sample failed\n");
257                 return -1;
258         }
259
260         thread = machine__findnew_thread(machine, sample.pid, sample.pid);
261         if (!thread) {
262                 pr_debug("machine__findnew_thread failed\n");
263                 return -1;
264         }
265
266         cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
267
268         return read_object_code(sample.ip, READLEN, cpumode, thread, machine,
269                                 state);
270 }
271
272 static int process_event(struct machine *machine, struct perf_evlist *evlist,
273                          union perf_event *event, struct state *state)
274 {
275         if (event->header.type == PERF_RECORD_SAMPLE)
276                 return process_sample_event(machine, evlist, event, state);
277
278         if (event->header.type == PERF_RECORD_THROTTLE ||
279             event->header.type == PERF_RECORD_UNTHROTTLE)
280                 return 0;
281
282         if (event->header.type < PERF_RECORD_MAX) {
283                 int ret;
284
285                 ret = machine__process_event(machine, event, NULL);
286                 if (ret < 0)
287                         pr_debug("machine__process_event failed, event type %u\n",
288                                  event->header.type);
289                 return ret;
290         }
291
292         return 0;
293 }
294
295 static int process_events(struct machine *machine, struct perf_evlist *evlist,
296                           struct state *state)
297 {
298         union perf_event *event;
299         int i, ret;
300
301         for (i = 0; i < evlist->nr_mmaps; i++) {
302                 while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
303                         ret = process_event(machine, evlist, event, state);
304                         perf_evlist__mmap_consume(evlist, i);
305                         if (ret < 0)
306                                 return ret;
307                 }
308         }
309         return 0;
310 }
311
312 static int comp(const void *a, const void *b)
313 {
314         return *(int *)a - *(int *)b;
315 }
316
317 static void do_sort_something(void)
318 {
319         int buf[40960], i;
320
321         for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
322                 buf[i] = ARRAY_SIZE(buf) - i - 1;
323
324         qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
325
326         for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
327                 if (buf[i] != i) {
328                         pr_debug("qsort failed\n");
329                         break;
330                 }
331         }
332 }
333
334 static void sort_something(void)
335 {
336         int i;
337
338         for (i = 0; i < 10; i++)
339                 do_sort_something();
340 }
341
342 static void syscall_something(void)
343 {
344         int pipefd[2];
345         int i;
346
347         for (i = 0; i < 1000; i++) {
348                 if (pipe(pipefd) < 0) {
349                         pr_debug("pipe failed\n");
350                         break;
351                 }
352                 close(pipefd[1]);
353                 close(pipefd[0]);
354         }
355 }
356
357 static void fs_something(void)
358 {
359         const char *test_file_name = "temp-perf-code-reading-test-file--";
360         FILE *f;
361         int i;
362
363         for (i = 0; i < 1000; i++) {
364                 f = fopen(test_file_name, "w+");
365                 if (f) {
366                         fclose(f);
367                         unlink(test_file_name);
368                 }
369         }
370 }
371
372 static void do_something(void)
373 {
374         fs_something();
375
376         sort_something();
377
378         syscall_something();
379 }
380
381 enum {
382         TEST_CODE_READING_OK,
383         TEST_CODE_READING_NO_VMLINUX,
384         TEST_CODE_READING_NO_KCORE,
385         TEST_CODE_READING_NO_ACCESS,
386         TEST_CODE_READING_NO_KERNEL_OBJ,
387 };
388
389 static int do_test_code_reading(bool try_kcore)
390 {
391         struct machines machines;
392         struct machine *machine;
393         struct thread *thread;
394         struct record_opts opts = {
395                 .mmap_pages          = UINT_MAX,
396                 .user_freq           = UINT_MAX,
397                 .user_interval       = ULLONG_MAX,
398                 .freq                = 4000,
399                 .target              = {
400                         .uses_mmap   = true,
401                 },
402         };
403         struct state state = {
404                 .done_cnt = 0,
405         };
406         struct thread_map *threads = NULL;
407         struct cpu_map *cpus = NULL;
408         struct perf_evlist *evlist = NULL;
409         struct perf_evsel *evsel = NULL;
410         int err = -1, ret;
411         pid_t pid;
412         struct map *map;
413         bool have_vmlinux, have_kcore, excl_kernel = false;
414
415         pid = getpid();
416
417         machines__init(&machines);
418         machine = &machines.host;
419
420         ret = machine__create_kernel_maps(machine);
421         if (ret < 0) {
422                 pr_debug("machine__create_kernel_maps failed\n");
423                 goto out_err;
424         }
425
426         /* Force the use of kallsyms instead of vmlinux to try kcore */
427         if (try_kcore)
428                 symbol_conf.kallsyms_name = "/proc/kallsyms";
429
430         /* Load kernel map */
431         map = machine->vmlinux_maps[MAP__FUNCTION];
432         ret = map__load(map, NULL);
433         if (ret < 0) {
434                 pr_debug("map__load failed\n");
435                 goto out_err;
436         }
437         have_vmlinux = dso__is_vmlinux(map->dso);
438         have_kcore = dso__is_kcore(map->dso);
439
440         /* 2nd time through we just try kcore */
441         if (try_kcore && !have_kcore)
442                 return TEST_CODE_READING_NO_KCORE;
443
444         /* No point getting kernel events if there is no kernel object */
445         if (!have_vmlinux && !have_kcore)
446                 excl_kernel = true;
447
448         threads = thread_map__new_by_tid(pid);
449         if (!threads) {
450                 pr_debug("thread_map__new_by_tid failed\n");
451                 goto out_err;
452         }
453
454         ret = perf_event__synthesize_thread_map(NULL, threads,
455                                                 perf_event__process, machine, false);
456         if (ret < 0) {
457                 pr_debug("perf_event__synthesize_thread_map failed\n");
458                 goto out_err;
459         }
460
461         thread = machine__findnew_thread(machine, pid, pid);
462         if (!thread) {
463                 pr_debug("machine__findnew_thread failed\n");
464                 goto out_err;
465         }
466
467         cpus = cpu_map__new(NULL);
468         if (!cpus) {
469                 pr_debug("cpu_map__new failed\n");
470                 goto out_err;
471         }
472
473         while (1) {
474                 const char *str;
475
476                 evlist = perf_evlist__new();
477                 if (!evlist) {
478                         pr_debug("perf_evlist__new failed\n");
479                         goto out_err;
480                 }
481
482                 perf_evlist__set_maps(evlist, cpus, threads);
483
484                 if (excl_kernel)
485                         str = "cycles:u";
486                 else
487                         str = "cycles";
488                 pr_debug("Parsing event '%s'\n", str);
489                 ret = parse_events(evlist, str);
490                 if (ret < 0) {
491                         pr_debug("parse_events failed\n");
492                         goto out_err;
493                 }
494
495                 perf_evlist__config(evlist, &opts);
496
497                 evsel = perf_evlist__first(evlist);
498
499                 evsel->attr.comm = 1;
500                 evsel->attr.disabled = 1;
501                 evsel->attr.enable_on_exec = 0;
502
503                 ret = perf_evlist__open(evlist);
504                 if (ret < 0) {
505                         if (!excl_kernel) {
506                                 excl_kernel = true;
507                                 perf_evlist__delete(evlist);
508                                 evlist = NULL;
509                                 continue;
510                         }
511                         pr_debug("perf_evlist__open failed\n");
512                         goto out_err;
513                 }
514                 break;
515         }
516
517         ret = perf_evlist__mmap(evlist, UINT_MAX, false);
518         if (ret < 0) {
519                 pr_debug("perf_evlist__mmap failed\n");
520                 goto out_err;
521         }
522
523         perf_evlist__enable(evlist);
524
525         do_something();
526
527         perf_evlist__disable(evlist);
528
529         ret = process_events(machine, evlist, &state);
530         if (ret < 0)
531                 goto out_err;
532
533         if (!have_vmlinux && !have_kcore && !try_kcore)
534                 err = TEST_CODE_READING_NO_KERNEL_OBJ;
535         else if (!have_vmlinux && !try_kcore)
536                 err = TEST_CODE_READING_NO_VMLINUX;
537         else if (excl_kernel)
538                 err = TEST_CODE_READING_NO_ACCESS;
539         else
540                 err = TEST_CODE_READING_OK;
541 out_err:
542         if (evlist) {
543                 perf_evlist__munmap(evlist);
544                 perf_evlist__close(evlist);
545                 perf_evlist__delete(evlist);
546         } else {
547                 cpu_map__delete(cpus);
548                 thread_map__delete(threads);
549         }
550         machines__destroy_kernel_maps(&machines);
551         machine__delete_threads(machine);
552         machines__exit(&machines);
553
554         return err;
555 }
556
557 int test__code_reading(void)
558 {
559         int ret;
560
561         ret = do_test_code_reading(false);
562         if (!ret)
563                 ret = do_test_code_reading(true);
564
565         switch (ret) {
566         case TEST_CODE_READING_OK:
567                 return 0;
568         case TEST_CODE_READING_NO_VMLINUX:
569                 fprintf(stderr, " (no vmlinux)");
570                 return 0;
571         case TEST_CODE_READING_NO_KCORE:
572                 fprintf(stderr, " (no kcore)");
573                 return 0;
574         case TEST_CODE_READING_NO_ACCESS:
575                 fprintf(stderr, " (no access)");
576                 return 0;
577         case TEST_CODE_READING_NO_KERNEL_OBJ:
578                 fprintf(stderr, " (no kernel obj)");
579                 return 0;
580         default:
581                 return -1;
582         };
583 }