]> Pileus Git - ~andy/linux/blob - tools/perf/util/hist.c
perf hists: Add missing period_* fields when collapsing a hist entry
[~andy/linux] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include <math.h>
8
9 static bool hists__filter_entry_by_dso(struct hists *hists,
10                                        struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12                                           struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14                                           struct hist_entry *he);
15
16 enum hist_filter {
17         HIST_FILTER__DSO,
18         HIST_FILTER__THREAD,
19         HIST_FILTER__PARENT,
20         HIST_FILTER__SYMBOL,
21 };
22
23 struct callchain_param  callchain_param = {
24         .mode   = CHAIN_GRAPH_REL,
25         .min_percent = 0.5,
26         .order  = ORDER_CALLEE
27 };
28
29 u16 hists__col_len(struct hists *hists, enum hist_column col)
30 {
31         return hists->col_len[col];
32 }
33
34 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
35 {
36         hists->col_len[col] = len;
37 }
38
39 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
40 {
41         if (len > hists__col_len(hists, col)) {
42                 hists__set_col_len(hists, col, len);
43                 return true;
44         }
45         return false;
46 }
47
48 void hists__reset_col_len(struct hists *hists)
49 {
50         enum hist_column col;
51
52         for (col = 0; col < HISTC_NR_COLS; ++col)
53                 hists__set_col_len(hists, col, 0);
54 }
55
56 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
57 {
58         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
59
60         if (hists__col_len(hists, dso) < unresolved_col_width &&
61             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
62             !symbol_conf.dso_list)
63                 hists__set_col_len(hists, dso, unresolved_col_width);
64 }
65
66 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
67 {
68         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
69         u16 len;
70
71         if (h->ms.sym)
72                 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
73         else
74                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
75
76         len = thread__comm_len(h->thread);
77         if (hists__new_col_len(hists, HISTC_COMM, len))
78                 hists__set_col_len(hists, HISTC_THREAD, len + 6);
79
80         if (h->ms.map) {
81                 len = dso__name_len(h->ms.map->dso);
82                 hists__new_col_len(hists, HISTC_DSO, len);
83         }
84
85         if (h->branch_info) {
86                 int symlen;
87                 /*
88                  * +4 accounts for '[x] ' priv level info
89                  * +2 account of 0x prefix on raw addresses
90                  */
91                 if (h->branch_info->from.sym) {
92                         symlen = (int)h->branch_info->from.sym->namelen + 4;
93                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
94
95                         symlen = dso__name_len(h->branch_info->from.map->dso);
96                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
97                 } else {
98                         symlen = unresolved_col_width + 4 + 2;
99                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
100                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
101                 }
102
103                 if (h->branch_info->to.sym) {
104                         symlen = (int)h->branch_info->to.sym->namelen + 4;
105                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
106
107                         symlen = dso__name_len(h->branch_info->to.map->dso);
108                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
109                 } else {
110                         symlen = unresolved_col_width + 4 + 2;
111                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
113                 }
114         }
115 }
116
117 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
118 {
119         struct rb_node *next = rb_first(&hists->entries);
120         struct hist_entry *n;
121         int row = 0;
122
123         hists__reset_col_len(hists);
124
125         while (next && row++ < max_rows) {
126                 n = rb_entry(next, struct hist_entry, rb_node);
127                 if (!n->filtered)
128                         hists__calc_col_len(hists, n);
129                 next = rb_next(&n->rb_node);
130         }
131 }
132
133 static void hist_entry__add_cpumode_period(struct hist_entry *he,
134                                            unsigned int cpumode, u64 period)
135 {
136         switch (cpumode) {
137         case PERF_RECORD_MISC_KERNEL:
138                 he->period_sys += period;
139                 break;
140         case PERF_RECORD_MISC_USER:
141                 he->period_us += period;
142                 break;
143         case PERF_RECORD_MISC_GUEST_KERNEL:
144                 he->period_guest_sys += period;
145                 break;
146         case PERF_RECORD_MISC_GUEST_USER:
147                 he->period_guest_us += period;
148                 break;
149         default:
150                 break;
151         }
152 }
153
154 static void hist_entry__decay(struct hist_entry *he)
155 {
156         he->period = (he->period * 7) / 8;
157         he->nr_events = (he->nr_events * 7) / 8;
158 }
159
160 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
161 {
162         u64 prev_period = he->period;
163
164         if (prev_period == 0)
165                 return true;
166
167         hist_entry__decay(he);
168
169         if (!he->filtered)
170                 hists->stats.total_period -= prev_period - he->period;
171
172         return he->period == 0;
173 }
174
175 static void __hists__decay_entries(struct hists *hists, bool zap_user,
176                                    bool zap_kernel, bool threaded)
177 {
178         struct rb_node *next = rb_first(&hists->entries);
179         struct hist_entry *n;
180
181         while (next) {
182                 n = rb_entry(next, struct hist_entry, rb_node);
183                 next = rb_next(&n->rb_node);
184                 /*
185                  * We may be annotating this, for instance, so keep it here in
186                  * case some it gets new samples, we'll eventually free it when
187                  * the user stops browsing and it agains gets fully decayed.
188                  */
189                 if (((zap_user && n->level == '.') ||
190                      (zap_kernel && n->level != '.') ||
191                      hists__decay_entry(hists, n)) &&
192                     !n->used) {
193                         rb_erase(&n->rb_node, &hists->entries);
194
195                         if (sort__need_collapse || threaded)
196                                 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
197
198                         hist_entry__free(n);
199                         --hists->nr_entries;
200                 }
201         }
202 }
203
204 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
205 {
206         return __hists__decay_entries(hists, zap_user, zap_kernel, false);
207 }
208
209 void hists__decay_entries_threaded(struct hists *hists,
210                                    bool zap_user, bool zap_kernel)
211 {
212         return __hists__decay_entries(hists, zap_user, zap_kernel, true);
213 }
214
215 /*
216  * histogram, sorted on item, collects periods
217  */
218
219 static struct hist_entry *hist_entry__new(struct hist_entry *template)
220 {
221         size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
222         struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
223
224         if (he != NULL) {
225                 *he = *template;
226                 he->nr_events = 1;
227                 if (he->ms.map)
228                         he->ms.map->referenced = true;
229                 if (symbol_conf.use_callchain)
230                         callchain_init(he->callchain);
231         }
232
233         return he;
234 }
235
236 static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
237 {
238         if (!h->filtered) {
239                 hists__calc_col_len(hists, h);
240                 ++hists->nr_entries;
241                 hists->stats.total_period += h->period;
242         }
243 }
244
245 static u8 symbol__parent_filter(const struct symbol *parent)
246 {
247         if (symbol_conf.exclude_other && parent == NULL)
248                 return 1 << HIST_FILTER__PARENT;
249         return 0;
250 }
251
252 static struct hist_entry *add_hist_entry(struct hists *hists,
253                                       struct hist_entry *entry,
254                                       struct addr_location *al,
255                                       u64 period)
256 {
257         struct rb_node **p;
258         struct rb_node *parent = NULL;
259         struct hist_entry *he;
260         int cmp;
261
262         pthread_mutex_lock(&hists->lock);
263
264         p = &hists->entries_in->rb_node;
265
266         while (*p != NULL) {
267                 parent = *p;
268                 he = rb_entry(parent, struct hist_entry, rb_node_in);
269
270                 cmp = hist_entry__cmp(entry, he);
271
272                 if (!cmp) {
273                         he->period += period;
274                         ++he->nr_events;
275
276                         /* If the map of an existing hist_entry has
277                          * become out-of-date due to an exec() or
278                          * similar, update it.  Otherwise we will
279                          * mis-adjust symbol addresses when computing
280                          * the history counter to increment.
281                          */
282                         if (he->ms.map != entry->ms.map) {
283                                 he->ms.map = entry->ms.map;
284                                 if (he->ms.map)
285                                         he->ms.map->referenced = true;
286                         }
287                         goto out;
288                 }
289
290                 if (cmp < 0)
291                         p = &(*p)->rb_left;
292                 else
293                         p = &(*p)->rb_right;
294         }
295
296         he = hist_entry__new(entry);
297         if (!he)
298                 goto out_unlock;
299
300         rb_link_node(&he->rb_node_in, parent, p);
301         rb_insert_color(&he->rb_node_in, hists->entries_in);
302 out:
303         hist_entry__add_cpumode_period(he, al->cpumode, period);
304 out_unlock:
305         pthread_mutex_unlock(&hists->lock);
306         return he;
307 }
308
309 struct hist_entry *__hists__add_branch_entry(struct hists *self,
310                                              struct addr_location *al,
311                                              struct symbol *sym_parent,
312                                              struct branch_info *bi,
313                                              u64 period)
314 {
315         struct hist_entry entry = {
316                 .thread = al->thread,
317                 .ms = {
318                         .map    = bi->to.map,
319                         .sym    = bi->to.sym,
320                 },
321                 .cpu    = al->cpu,
322                 .ip     = bi->to.addr,
323                 .level  = al->level,
324                 .period = period,
325                 .parent = sym_parent,
326                 .filtered = symbol__parent_filter(sym_parent),
327                 .branch_info = bi,
328         };
329
330         return add_hist_entry(self, &entry, al, period);
331 }
332
333 struct hist_entry *__hists__add_entry(struct hists *self,
334                                       struct addr_location *al,
335                                       struct symbol *sym_parent, u64 period)
336 {
337         struct hist_entry entry = {
338                 .thread = al->thread,
339                 .ms = {
340                         .map    = al->map,
341                         .sym    = al->sym,
342                 },
343                 .cpu    = al->cpu,
344                 .ip     = al->addr,
345                 .level  = al->level,
346                 .period = period,
347                 .parent = sym_parent,
348                 .filtered = symbol__parent_filter(sym_parent),
349         };
350
351         return add_hist_entry(self, &entry, al, period);
352 }
353
354 int64_t
355 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
356 {
357         struct sort_entry *se;
358         int64_t cmp = 0;
359
360         list_for_each_entry(se, &hist_entry__sort_list, list) {
361                 cmp = se->se_cmp(left, right);
362                 if (cmp)
363                         break;
364         }
365
366         return cmp;
367 }
368
369 int64_t
370 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
371 {
372         struct sort_entry *se;
373         int64_t cmp = 0;
374
375         list_for_each_entry(se, &hist_entry__sort_list, list) {
376                 int64_t (*f)(struct hist_entry *, struct hist_entry *);
377
378                 f = se->se_collapse ?: se->se_cmp;
379
380                 cmp = f(left, right);
381                 if (cmp)
382                         break;
383         }
384
385         return cmp;
386 }
387
388 void hist_entry__free(struct hist_entry *he)
389 {
390         free(he);
391 }
392
393 /*
394  * collapse the histogram
395  */
396
397 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
398                                          struct rb_root *root,
399                                          struct hist_entry *he)
400 {
401         struct rb_node **p = &root->rb_node;
402         struct rb_node *parent = NULL;
403         struct hist_entry *iter;
404         int64_t cmp;
405
406         while (*p != NULL) {
407                 parent = *p;
408                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
409
410                 cmp = hist_entry__collapse(iter, he);
411
412                 if (!cmp) {
413                         iter->period            += he->period;
414                         iter->period_sys        += he->period_sys;
415                         iter->period_us         += he->period_us;
416                         iter->period_guest_sys  += he->period_guest_sys;
417                         iter->period_guest_us   += he->period_guest_us;
418                         iter->nr_events         += he->nr_events;
419
420                         if (symbol_conf.use_callchain) {
421                                 callchain_cursor_reset(&callchain_cursor);
422                                 callchain_merge(&callchain_cursor,
423                                                 iter->callchain,
424                                                 he->callchain);
425                         }
426                         hist_entry__free(he);
427                         return false;
428                 }
429
430                 if (cmp < 0)
431                         p = &(*p)->rb_left;
432                 else
433                         p = &(*p)->rb_right;
434         }
435
436         rb_link_node(&he->rb_node_in, parent, p);
437         rb_insert_color(&he->rb_node_in, root);
438         return true;
439 }
440
441 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
442 {
443         struct rb_root *root;
444
445         pthread_mutex_lock(&hists->lock);
446
447         root = hists->entries_in;
448         if (++hists->entries_in > &hists->entries_in_array[1])
449                 hists->entries_in = &hists->entries_in_array[0];
450
451         pthread_mutex_unlock(&hists->lock);
452
453         return root;
454 }
455
456 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
457 {
458         hists__filter_entry_by_dso(hists, he);
459         hists__filter_entry_by_thread(hists, he);
460         hists__filter_entry_by_symbol(hists, he);
461 }
462
463 static void __hists__collapse_resort(struct hists *hists, bool threaded)
464 {
465         struct rb_root *root;
466         struct rb_node *next;
467         struct hist_entry *n;
468
469         if (!sort__need_collapse && !threaded)
470                 return;
471
472         root = hists__get_rotate_entries_in(hists);
473         next = rb_first(root);
474
475         while (next) {
476                 n = rb_entry(next, struct hist_entry, rb_node_in);
477                 next = rb_next(&n->rb_node_in);
478
479                 rb_erase(&n->rb_node_in, root);
480                 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
481                         /*
482                          * If it wasn't combined with one of the entries already
483                          * collapsed, we need to apply the filters that may have
484                          * been set by, say, the hist_browser.
485                          */
486                         hists__apply_filters(hists, n);
487                 }
488         }
489 }
490
491 void hists__collapse_resort(struct hists *hists)
492 {
493         return __hists__collapse_resort(hists, false);
494 }
495
496 void hists__collapse_resort_threaded(struct hists *hists)
497 {
498         return __hists__collapse_resort(hists, true);
499 }
500
501 /*
502  * reverse the map, sort on period.
503  */
504
505 static void __hists__insert_output_entry(struct rb_root *entries,
506                                          struct hist_entry *he,
507                                          u64 min_callchain_hits)
508 {
509         struct rb_node **p = &entries->rb_node;
510         struct rb_node *parent = NULL;
511         struct hist_entry *iter;
512
513         if (symbol_conf.use_callchain)
514                 callchain_param.sort(&he->sorted_chain, he->callchain,
515                                       min_callchain_hits, &callchain_param);
516
517         while (*p != NULL) {
518                 parent = *p;
519                 iter = rb_entry(parent, struct hist_entry, rb_node);
520
521                 if (he->period > iter->period)
522                         p = &(*p)->rb_left;
523                 else
524                         p = &(*p)->rb_right;
525         }
526
527         rb_link_node(&he->rb_node, parent, p);
528         rb_insert_color(&he->rb_node, entries);
529 }
530
531 static void __hists__output_resort(struct hists *hists, bool threaded)
532 {
533         struct rb_root *root;
534         struct rb_node *next;
535         struct hist_entry *n;
536         u64 min_callchain_hits;
537
538         min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
539
540         if (sort__need_collapse || threaded)
541                 root = &hists->entries_collapsed;
542         else
543                 root = hists->entries_in;
544
545         next = rb_first(root);
546         hists->entries = RB_ROOT;
547
548         hists->nr_entries = 0;
549         hists->stats.total_period = 0;
550         hists__reset_col_len(hists);
551
552         while (next) {
553                 n = rb_entry(next, struct hist_entry, rb_node_in);
554                 next = rb_next(&n->rb_node_in);
555
556                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
557                 hists__inc_nr_entries(hists, n);
558         }
559 }
560
561 void hists__output_resort(struct hists *hists)
562 {
563         return __hists__output_resort(hists, false);
564 }
565
566 void hists__output_resort_threaded(struct hists *hists)
567 {
568         return __hists__output_resort(hists, true);
569 }
570
571 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
572                                        enum hist_filter filter)
573 {
574         h->filtered &= ~(1 << filter);
575         if (h->filtered)
576                 return;
577
578         ++hists->nr_entries;
579         if (h->ms.unfolded)
580                 hists->nr_entries += h->nr_rows;
581         h->row_offset = 0;
582         hists->stats.total_period += h->period;
583         hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
584
585         hists__calc_col_len(hists, h);
586 }
587
588
589 static bool hists__filter_entry_by_dso(struct hists *hists,
590                                        struct hist_entry *he)
591 {
592         if (hists->dso_filter != NULL &&
593             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
594                 he->filtered |= (1 << HIST_FILTER__DSO);
595                 return true;
596         }
597
598         return false;
599 }
600
601 void hists__filter_by_dso(struct hists *hists)
602 {
603         struct rb_node *nd;
604
605         hists->nr_entries = hists->stats.total_period = 0;
606         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
607         hists__reset_col_len(hists);
608
609         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
610                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
611
612                 if (symbol_conf.exclude_other && !h->parent)
613                         continue;
614
615                 if (hists__filter_entry_by_dso(hists, h))
616                         continue;
617
618                 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
619         }
620 }
621
622 static bool hists__filter_entry_by_thread(struct hists *hists,
623                                           struct hist_entry *he)
624 {
625         if (hists->thread_filter != NULL &&
626             he->thread != hists->thread_filter) {
627                 he->filtered |= (1 << HIST_FILTER__THREAD);
628                 return true;
629         }
630
631         return false;
632 }
633
634 void hists__filter_by_thread(struct hists *hists)
635 {
636         struct rb_node *nd;
637
638         hists->nr_entries = hists->stats.total_period = 0;
639         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
640         hists__reset_col_len(hists);
641
642         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
643                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
644
645                 if (hists__filter_entry_by_thread(hists, h))
646                         continue;
647
648                 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
649         }
650 }
651
652 static bool hists__filter_entry_by_symbol(struct hists *hists,
653                                           struct hist_entry *he)
654 {
655         if (hists->symbol_filter_str != NULL &&
656             (!he->ms.sym || strstr(he->ms.sym->name,
657                                    hists->symbol_filter_str) == NULL)) {
658                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
659                 return true;
660         }
661
662         return false;
663 }
664
665 void hists__filter_by_symbol(struct hists *hists)
666 {
667         struct rb_node *nd;
668
669         hists->nr_entries = hists->stats.total_period = 0;
670         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
671         hists__reset_col_len(hists);
672
673         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
674                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
675
676                 if (hists__filter_entry_by_symbol(hists, h))
677                         continue;
678
679                 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
680         }
681 }
682
683 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
684 {
685         return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
686 }
687
688 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
689 {
690         return symbol__annotate(he->ms.sym, he->ms.map, privsize);
691 }
692
693 void hists__inc_nr_events(struct hists *hists, u32 type)
694 {
695         ++hists->stats.nr_events[0];
696         ++hists->stats.nr_events[type];
697 }