]> Pileus Git - ~andy/linux/blob - tools/perf/util/hist.c
Merge branch 'ttm-fixes-3.13' of git://people.freedesktop.org/~thomash/linux into...
[~andy/linux] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include "evsel.h"
8 #include <math.h>
9
10 static bool hists__filter_entry_by_dso(struct hists *hists,
11                                        struct hist_entry *he);
12 static bool hists__filter_entry_by_thread(struct hists *hists,
13                                           struct hist_entry *he);
14 static bool hists__filter_entry_by_symbol(struct hists *hists,
15                                           struct hist_entry *he);
16
17 enum hist_filter {
18         HIST_FILTER__DSO,
19         HIST_FILTER__THREAD,
20         HIST_FILTER__PARENT,
21         HIST_FILTER__SYMBOL,
22 };
23
24 struct callchain_param  callchain_param = {
25         .mode   = CHAIN_GRAPH_REL,
26         .min_percent = 0.5,
27         .order  = ORDER_CALLEE,
28         .key    = CCKEY_FUNCTION
29 };
30
31 u16 hists__col_len(struct hists *hists, enum hist_column col)
32 {
33         return hists->col_len[col];
34 }
35
36 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
37 {
38         hists->col_len[col] = len;
39 }
40
41 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
42 {
43         if (len > hists__col_len(hists, col)) {
44                 hists__set_col_len(hists, col, len);
45                 return true;
46         }
47         return false;
48 }
49
50 void hists__reset_col_len(struct hists *hists)
51 {
52         enum hist_column col;
53
54         for (col = 0; col < HISTC_NR_COLS; ++col)
55                 hists__set_col_len(hists, col, 0);
56 }
57
58 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
59 {
60         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61
62         if (hists__col_len(hists, dso) < unresolved_col_width &&
63             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
64             !symbol_conf.dso_list)
65                 hists__set_col_len(hists, dso, unresolved_col_width);
66 }
67
68 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
69 {
70         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
71         int symlen;
72         u16 len;
73
74         /*
75          * +4 accounts for '[x] ' priv level info
76          * +2 accounts for 0x prefix on raw addresses
77          * +3 accounts for ' y ' symtab origin info
78          */
79         if (h->ms.sym) {
80                 symlen = h->ms.sym->namelen + 4;
81                 if (verbose)
82                         symlen += BITS_PER_LONG / 4 + 2 + 3;
83                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
84         } else {
85                 symlen = unresolved_col_width + 4 + 2;
86                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
87                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
88         }
89
90         len = thread__comm_len(h->thread);
91         if (hists__new_col_len(hists, HISTC_COMM, len))
92                 hists__set_col_len(hists, HISTC_THREAD, len + 6);
93
94         if (h->ms.map) {
95                 len = dso__name_len(h->ms.map->dso);
96                 hists__new_col_len(hists, HISTC_DSO, len);
97         }
98
99         if (h->parent)
100                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
101
102         if (h->branch_info) {
103                 if (h->branch_info->from.sym) {
104                         symlen = (int)h->branch_info->from.sym->namelen + 4;
105                         if (verbose)
106                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
107                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
108
109                         symlen = dso__name_len(h->branch_info->from.map->dso);
110                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
111                 } else {
112                         symlen = unresolved_col_width + 4 + 2;
113                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
114                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
115                 }
116
117                 if (h->branch_info->to.sym) {
118                         symlen = (int)h->branch_info->to.sym->namelen + 4;
119                         if (verbose)
120                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
121                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
122
123                         symlen = dso__name_len(h->branch_info->to.map->dso);
124                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
125                 } else {
126                         symlen = unresolved_col_width + 4 + 2;
127                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
128                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
129                 }
130         }
131
132         if (h->mem_info) {
133                 if (h->mem_info->daddr.sym) {
134                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
135                                + unresolved_col_width + 2;
136                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
137                                            symlen);
138                 } else {
139                         symlen = unresolved_col_width + 4 + 2;
140                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
141                                            symlen);
142                 }
143                 if (h->mem_info->daddr.map) {
144                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
145                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
146                                            symlen);
147                 } else {
148                         symlen = unresolved_col_width + 4 + 2;
149                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
150                 }
151         } else {
152                 symlen = unresolved_col_width + 4 + 2;
153                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
154                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
155         }
156
157         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
158         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
159         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
160         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
161         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
162         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
163
164         if (h->transaction)
165                 hists__new_col_len(hists, HISTC_TRANSACTION,
166                                    hist_entry__transaction_len());
167 }
168
169 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
170 {
171         struct rb_node *next = rb_first(&hists->entries);
172         struct hist_entry *n;
173         int row = 0;
174
175         hists__reset_col_len(hists);
176
177         while (next && row++ < max_rows) {
178                 n = rb_entry(next, struct hist_entry, rb_node);
179                 if (!n->filtered)
180                         hists__calc_col_len(hists, n);
181                 next = rb_next(&n->rb_node);
182         }
183 }
184
185 static void hist_entry__add_cpumode_period(struct hist_entry *he,
186                                            unsigned int cpumode, u64 period)
187 {
188         switch (cpumode) {
189         case PERF_RECORD_MISC_KERNEL:
190                 he->stat.period_sys += period;
191                 break;
192         case PERF_RECORD_MISC_USER:
193                 he->stat.period_us += period;
194                 break;
195         case PERF_RECORD_MISC_GUEST_KERNEL:
196                 he->stat.period_guest_sys += period;
197                 break;
198         case PERF_RECORD_MISC_GUEST_USER:
199                 he->stat.period_guest_us += period;
200                 break;
201         default:
202                 break;
203         }
204 }
205
206 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
207                                 u64 weight)
208 {
209
210         he_stat->period         += period;
211         he_stat->weight         += weight;
212         he_stat->nr_events      += 1;
213 }
214
215 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
216 {
217         dest->period            += src->period;
218         dest->period_sys        += src->period_sys;
219         dest->period_us         += src->period_us;
220         dest->period_guest_sys  += src->period_guest_sys;
221         dest->period_guest_us   += src->period_guest_us;
222         dest->nr_events         += src->nr_events;
223         dest->weight            += src->weight;
224 }
225
226 static void hist_entry__decay(struct hist_entry *he)
227 {
228         he->stat.period = (he->stat.period * 7) / 8;
229         he->stat.nr_events = (he->stat.nr_events * 7) / 8;
230         /* XXX need decay for weight too? */
231 }
232
233 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
234 {
235         u64 prev_period = he->stat.period;
236
237         if (prev_period == 0)
238                 return true;
239
240         hist_entry__decay(he);
241
242         if (!he->filtered)
243                 hists->stats.total_period -= prev_period - he->stat.period;
244
245         return he->stat.period == 0;
246 }
247
248 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
249 {
250         struct rb_node *next = rb_first(&hists->entries);
251         struct hist_entry *n;
252
253         while (next) {
254                 n = rb_entry(next, struct hist_entry, rb_node);
255                 next = rb_next(&n->rb_node);
256                 /*
257                  * We may be annotating this, for instance, so keep it here in
258                  * case some it gets new samples, we'll eventually free it when
259                  * the user stops browsing and it agains gets fully decayed.
260                  */
261                 if (((zap_user && n->level == '.') ||
262                      (zap_kernel && n->level != '.') ||
263                      hists__decay_entry(hists, n)) &&
264                     !n->used) {
265                         rb_erase(&n->rb_node, &hists->entries);
266
267                         if (sort__need_collapse)
268                                 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
269
270                         hist_entry__free(n);
271                         --hists->nr_entries;
272                 }
273         }
274 }
275
276 /*
277  * histogram, sorted on item, collects periods
278  */
279
280 static struct hist_entry *hist_entry__new(struct hist_entry *template)
281 {
282         size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
283         struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
284
285         if (he != NULL) {
286                 *he = *template;
287
288                 if (he->ms.map)
289                         he->ms.map->referenced = true;
290
291                 if (he->branch_info) {
292                         /*
293                          * This branch info is (a part of) allocated from
294                          * machine__resolve_bstack() and will be freed after
295                          * adding new entries.  So we need to save a copy.
296                          */
297                         he->branch_info = malloc(sizeof(*he->branch_info));
298                         if (he->branch_info == NULL) {
299                                 free(he);
300                                 return NULL;
301                         }
302
303                         memcpy(he->branch_info, template->branch_info,
304                                sizeof(*he->branch_info));
305
306                         if (he->branch_info->from.map)
307                                 he->branch_info->from.map->referenced = true;
308                         if (he->branch_info->to.map)
309                                 he->branch_info->to.map->referenced = true;
310                 }
311
312                 if (he->mem_info) {
313                         if (he->mem_info->iaddr.map)
314                                 he->mem_info->iaddr.map->referenced = true;
315                         if (he->mem_info->daddr.map)
316                                 he->mem_info->daddr.map->referenced = true;
317                 }
318
319                 if (symbol_conf.use_callchain)
320                         callchain_init(he->callchain);
321
322                 INIT_LIST_HEAD(&he->pairs.node);
323         }
324
325         return he;
326 }
327
328 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
329 {
330         if (!h->filtered) {
331                 hists__calc_col_len(hists, h);
332                 ++hists->nr_entries;
333                 hists->stats.total_period += h->stat.period;
334         }
335 }
336
337 static u8 symbol__parent_filter(const struct symbol *parent)
338 {
339         if (symbol_conf.exclude_other && parent == NULL)
340                 return 1 << HIST_FILTER__PARENT;
341         return 0;
342 }
343
344 static struct hist_entry *add_hist_entry(struct hists *hists,
345                                       struct hist_entry *entry,
346                                       struct addr_location *al,
347                                       u64 period,
348                                       u64 weight)
349 {
350         struct rb_node **p;
351         struct rb_node *parent = NULL;
352         struct hist_entry *he;
353         int64_t cmp;
354
355         p = &hists->entries_in->rb_node;
356
357         while (*p != NULL) {
358                 parent = *p;
359                 he = rb_entry(parent, struct hist_entry, rb_node_in);
360
361                 /*
362                  * Make sure that it receives arguments in a same order as
363                  * hist_entry__collapse() so that we can use an appropriate
364                  * function when searching an entry regardless which sort
365                  * keys were used.
366                  */
367                 cmp = hist_entry__cmp(he, entry);
368
369                 if (!cmp) {
370                         he_stat__add_period(&he->stat, period, weight);
371
372                         /*
373                          * This mem info was allocated from machine__resolve_mem
374                          * and will not be used anymore.
375                          */
376                         free(entry->mem_info);
377
378                         /* If the map of an existing hist_entry has
379                          * become out-of-date due to an exec() or
380                          * similar, update it.  Otherwise we will
381                          * mis-adjust symbol addresses when computing
382                          * the history counter to increment.
383                          */
384                         if (he->ms.map != entry->ms.map) {
385                                 he->ms.map = entry->ms.map;
386                                 if (he->ms.map)
387                                         he->ms.map->referenced = true;
388                         }
389                         goto out;
390                 }
391
392                 if (cmp < 0)
393                         p = &(*p)->rb_left;
394                 else
395                         p = &(*p)->rb_right;
396         }
397
398         he = hist_entry__new(entry);
399         if (!he)
400                 return NULL;
401
402         hists->nr_entries++;
403         rb_link_node(&he->rb_node_in, parent, p);
404         rb_insert_color(&he->rb_node_in, hists->entries_in);
405 out:
406         hist_entry__add_cpumode_period(he, al->cpumode, period);
407         return he;
408 }
409
410 struct hist_entry *__hists__add_entry(struct hists *hists,
411                                       struct addr_location *al,
412                                       struct symbol *sym_parent,
413                                       struct branch_info *bi,
414                                       struct mem_info *mi,
415                                       u64 period, u64 weight, u64 transaction)
416 {
417         struct hist_entry entry = {
418                 .thread = al->thread,
419                 .comm = thread__comm(al->thread),
420                 .ms = {
421                         .map    = al->map,
422                         .sym    = al->sym,
423                 },
424                 .cpu    = al->cpu,
425                 .ip     = al->addr,
426                 .level  = al->level,
427                 .stat = {
428                         .nr_events = 1,
429                         .period = period,
430                         .weight = weight,
431                 },
432                 .parent = sym_parent,
433                 .filtered = symbol__parent_filter(sym_parent),
434                 .hists  = hists,
435                 .branch_info = bi,
436                 .mem_info = mi,
437                 .transaction = transaction,
438         };
439
440         return add_hist_entry(hists, &entry, al, period, weight);
441 }
442
443 int64_t
444 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
445 {
446         struct sort_entry *se;
447         int64_t cmp = 0;
448
449         list_for_each_entry(se, &hist_entry__sort_list, list) {
450                 cmp = se->se_cmp(left, right);
451                 if (cmp)
452                         break;
453         }
454
455         return cmp;
456 }
457
458 int64_t
459 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
460 {
461         struct sort_entry *se;
462         int64_t cmp = 0;
463
464         list_for_each_entry(se, &hist_entry__sort_list, list) {
465                 int64_t (*f)(struct hist_entry *, struct hist_entry *);
466
467                 f = se->se_collapse ?: se->se_cmp;
468
469                 cmp = f(left, right);
470                 if (cmp)
471                         break;
472         }
473
474         return cmp;
475 }
476
477 void hist_entry__free(struct hist_entry *he)
478 {
479         free(he->branch_info);
480         free(he->mem_info);
481         free_srcline(he->srcline);
482         free(he);
483 }
484
485 /*
486  * collapse the histogram
487  */
488
489 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
490                                          struct rb_root *root,
491                                          struct hist_entry *he)
492 {
493         struct rb_node **p = &root->rb_node;
494         struct rb_node *parent = NULL;
495         struct hist_entry *iter;
496         int64_t cmp;
497
498         while (*p != NULL) {
499                 parent = *p;
500                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
501
502                 cmp = hist_entry__collapse(iter, he);
503
504                 if (!cmp) {
505                         he_stat__add_stat(&iter->stat, &he->stat);
506
507                         if (symbol_conf.use_callchain) {
508                                 callchain_cursor_reset(&callchain_cursor);
509                                 callchain_merge(&callchain_cursor,
510                                                 iter->callchain,
511                                                 he->callchain);
512                         }
513                         hist_entry__free(he);
514                         return false;
515                 }
516
517                 if (cmp < 0)
518                         p = &(*p)->rb_left;
519                 else
520                         p = &(*p)->rb_right;
521         }
522
523         rb_link_node(&he->rb_node_in, parent, p);
524         rb_insert_color(&he->rb_node_in, root);
525         return true;
526 }
527
528 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
529 {
530         struct rb_root *root;
531
532         pthread_mutex_lock(&hists->lock);
533
534         root = hists->entries_in;
535         if (++hists->entries_in > &hists->entries_in_array[1])
536                 hists->entries_in = &hists->entries_in_array[0];
537
538         pthread_mutex_unlock(&hists->lock);
539
540         return root;
541 }
542
543 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
544 {
545         hists__filter_entry_by_dso(hists, he);
546         hists__filter_entry_by_thread(hists, he);
547         hists__filter_entry_by_symbol(hists, he);
548 }
549
550 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
551 {
552         struct rb_root *root;
553         struct rb_node *next;
554         struct hist_entry *n;
555
556         if (!sort__need_collapse)
557                 return;
558
559         root = hists__get_rotate_entries_in(hists);
560         next = rb_first(root);
561
562         while (next) {
563                 if (session_done())
564                         break;
565                 n = rb_entry(next, struct hist_entry, rb_node_in);
566                 next = rb_next(&n->rb_node_in);
567
568                 rb_erase(&n->rb_node_in, root);
569                 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
570                         /*
571                          * If it wasn't combined with one of the entries already
572                          * collapsed, we need to apply the filters that may have
573                          * been set by, say, the hist_browser.
574                          */
575                         hists__apply_filters(hists, n);
576                 }
577                 if (prog)
578                         ui_progress__update(prog, 1);
579         }
580 }
581
582 /*
583  * reverse the map, sort on period.
584  */
585
586 static int period_cmp(u64 period_a, u64 period_b)
587 {
588         if (period_a > period_b)
589                 return 1;
590         if (period_a < period_b)
591                 return -1;
592         return 0;
593 }
594
595 static int hist_entry__sort_on_period(struct hist_entry *a,
596                                       struct hist_entry *b)
597 {
598         int ret;
599         int i, nr_members;
600         struct perf_evsel *evsel;
601         struct hist_entry *pair;
602         u64 *periods_a, *periods_b;
603
604         ret = period_cmp(a->stat.period, b->stat.period);
605         if (ret || !symbol_conf.event_group)
606                 return ret;
607
608         evsel = hists_to_evsel(a->hists);
609         nr_members = evsel->nr_members;
610         if (nr_members <= 1)
611                 return ret;
612
613         periods_a = zalloc(sizeof(periods_a) * nr_members);
614         periods_b = zalloc(sizeof(periods_b) * nr_members);
615
616         if (!periods_a || !periods_b)
617                 goto out;
618
619         list_for_each_entry(pair, &a->pairs.head, pairs.node) {
620                 evsel = hists_to_evsel(pair->hists);
621                 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
622         }
623
624         list_for_each_entry(pair, &b->pairs.head, pairs.node) {
625                 evsel = hists_to_evsel(pair->hists);
626                 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
627         }
628
629         for (i = 1; i < nr_members; i++) {
630                 ret = period_cmp(periods_a[i], periods_b[i]);
631                 if (ret)
632                         break;
633         }
634
635 out:
636         free(periods_a);
637         free(periods_b);
638
639         return ret;
640 }
641
642 static void __hists__insert_output_entry(struct rb_root *entries,
643                                          struct hist_entry *he,
644                                          u64 min_callchain_hits)
645 {
646         struct rb_node **p = &entries->rb_node;
647         struct rb_node *parent = NULL;
648         struct hist_entry *iter;
649
650         if (symbol_conf.use_callchain)
651                 callchain_param.sort(&he->sorted_chain, he->callchain,
652                                       min_callchain_hits, &callchain_param);
653
654         while (*p != NULL) {
655                 parent = *p;
656                 iter = rb_entry(parent, struct hist_entry, rb_node);
657
658                 if (hist_entry__sort_on_period(he, iter) > 0)
659                         p = &(*p)->rb_left;
660                 else
661                         p = &(*p)->rb_right;
662         }
663
664         rb_link_node(&he->rb_node, parent, p);
665         rb_insert_color(&he->rb_node, entries);
666 }
667
668 void hists__output_resort(struct hists *hists)
669 {
670         struct rb_root *root;
671         struct rb_node *next;
672         struct hist_entry *n;
673         u64 min_callchain_hits;
674
675         min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
676
677         if (sort__need_collapse)
678                 root = &hists->entries_collapsed;
679         else
680                 root = hists->entries_in;
681
682         next = rb_first(root);
683         hists->entries = RB_ROOT;
684
685         hists->nr_entries = 0;
686         hists->stats.total_period = 0;
687         hists__reset_col_len(hists);
688
689         while (next) {
690                 n = rb_entry(next, struct hist_entry, rb_node_in);
691                 next = rb_next(&n->rb_node_in);
692
693                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
694                 hists__inc_nr_entries(hists, n);
695         }
696 }
697
698 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
699                                        enum hist_filter filter)
700 {
701         h->filtered &= ~(1 << filter);
702         if (h->filtered)
703                 return;
704
705         ++hists->nr_entries;
706         if (h->ms.unfolded)
707                 hists->nr_entries += h->nr_rows;
708         h->row_offset = 0;
709         hists->stats.total_period += h->stat.period;
710         hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
711
712         hists__calc_col_len(hists, h);
713 }
714
715
716 static bool hists__filter_entry_by_dso(struct hists *hists,
717                                        struct hist_entry *he)
718 {
719         if (hists->dso_filter != NULL &&
720             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
721                 he->filtered |= (1 << HIST_FILTER__DSO);
722                 return true;
723         }
724
725         return false;
726 }
727
728 void hists__filter_by_dso(struct hists *hists)
729 {
730         struct rb_node *nd;
731
732         hists->nr_entries = hists->stats.total_period = 0;
733         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
734         hists__reset_col_len(hists);
735
736         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
737                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
738
739                 if (symbol_conf.exclude_other && !h->parent)
740                         continue;
741
742                 if (hists__filter_entry_by_dso(hists, h))
743                         continue;
744
745                 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
746         }
747 }
748
749 static bool hists__filter_entry_by_thread(struct hists *hists,
750                                           struct hist_entry *he)
751 {
752         if (hists->thread_filter != NULL &&
753             he->thread != hists->thread_filter) {
754                 he->filtered |= (1 << HIST_FILTER__THREAD);
755                 return true;
756         }
757
758         return false;
759 }
760
761 void hists__filter_by_thread(struct hists *hists)
762 {
763         struct rb_node *nd;
764
765         hists->nr_entries = hists->stats.total_period = 0;
766         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
767         hists__reset_col_len(hists);
768
769         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
770                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
771
772                 if (hists__filter_entry_by_thread(hists, h))
773                         continue;
774
775                 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
776         }
777 }
778
779 static bool hists__filter_entry_by_symbol(struct hists *hists,
780                                           struct hist_entry *he)
781 {
782         if (hists->symbol_filter_str != NULL &&
783             (!he->ms.sym || strstr(he->ms.sym->name,
784                                    hists->symbol_filter_str) == NULL)) {
785                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
786                 return true;
787         }
788
789         return false;
790 }
791
792 void hists__filter_by_symbol(struct hists *hists)
793 {
794         struct rb_node *nd;
795
796         hists->nr_entries = hists->stats.total_period = 0;
797         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
798         hists__reset_col_len(hists);
799
800         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
801                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
802
803                 if (hists__filter_entry_by_symbol(hists, h))
804                         continue;
805
806                 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
807         }
808 }
809
810 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
811 {
812         return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
813 }
814
815 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
816 {
817         return symbol__annotate(he->ms.sym, he->ms.map, privsize);
818 }
819
820 void events_stats__inc(struct events_stats *stats, u32 type)
821 {
822         ++stats->nr_events[0];
823         ++stats->nr_events[type];
824 }
825
826 void hists__inc_nr_events(struct hists *hists, u32 type)
827 {
828         events_stats__inc(&hists->stats, type);
829 }
830
831 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
832                                                  struct hist_entry *pair)
833 {
834         struct rb_root *root;
835         struct rb_node **p;
836         struct rb_node *parent = NULL;
837         struct hist_entry *he;
838         int64_t cmp;
839
840         if (sort__need_collapse)
841                 root = &hists->entries_collapsed;
842         else
843                 root = hists->entries_in;
844
845         p = &root->rb_node;
846
847         while (*p != NULL) {
848                 parent = *p;
849                 he = rb_entry(parent, struct hist_entry, rb_node_in);
850
851                 cmp = hist_entry__collapse(he, pair);
852
853                 if (!cmp)
854                         goto out;
855
856                 if (cmp < 0)
857                         p = &(*p)->rb_left;
858                 else
859                         p = &(*p)->rb_right;
860         }
861
862         he = hist_entry__new(pair);
863         if (he) {
864                 memset(&he->stat, 0, sizeof(he->stat));
865                 he->hists = hists;
866                 rb_link_node(&he->rb_node_in, parent, p);
867                 rb_insert_color(&he->rb_node_in, root);
868                 hists__inc_nr_entries(hists, he);
869                 he->dummy = true;
870         }
871 out:
872         return he;
873 }
874
875 static struct hist_entry *hists__find_entry(struct hists *hists,
876                                             struct hist_entry *he)
877 {
878         struct rb_node *n;
879
880         if (sort__need_collapse)
881                 n = hists->entries_collapsed.rb_node;
882         else
883                 n = hists->entries_in->rb_node;
884
885         while (n) {
886                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
887                 int64_t cmp = hist_entry__collapse(iter, he);
888
889                 if (cmp < 0)
890                         n = n->rb_left;
891                 else if (cmp > 0)
892                         n = n->rb_right;
893                 else
894                         return iter;
895         }
896
897         return NULL;
898 }
899
900 /*
901  * Look for pairs to link to the leader buckets (hist_entries):
902  */
903 void hists__match(struct hists *leader, struct hists *other)
904 {
905         struct rb_root *root;
906         struct rb_node *nd;
907         struct hist_entry *pos, *pair;
908
909         if (sort__need_collapse)
910                 root = &leader->entries_collapsed;
911         else
912                 root = leader->entries_in;
913
914         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
915                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
916                 pair = hists__find_entry(other, pos);
917
918                 if (pair)
919                         hist_entry__add_pair(pair, pos);
920         }
921 }
922
923 /*
924  * Look for entries in the other hists that are not present in the leader, if
925  * we find them, just add a dummy entry on the leader hists, with period=0,
926  * nr_events=0, to serve as the list header.
927  */
928 int hists__link(struct hists *leader, struct hists *other)
929 {
930         struct rb_root *root;
931         struct rb_node *nd;
932         struct hist_entry *pos, *pair;
933
934         if (sort__need_collapse)
935                 root = &other->entries_collapsed;
936         else
937                 root = other->entries_in;
938
939         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
940                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
941
942                 if (!hist_entry__has_pairs(pos)) {
943                         pair = hists__add_dummy_entry(leader, pos);
944                         if (pair == NULL)
945                                 return -1;
946                         hist_entry__add_pair(pos, pair);
947                 }
948         }
949
950         return 0;
951 }