9 static bool hists__filter_entry_by_dso(struct hists *hists,
10 struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12 struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14 struct hist_entry *he);
23 struct callchain_param callchain_param = {
24 .mode = CHAIN_GRAPH_REL,
29 u16 hists__col_len(struct hists *hists, enum hist_column col)
31 return hists->col_len[col];
34 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
36 hists->col_len[col] = len;
39 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
41 if (len > hists__col_len(hists, col)) {
42 hists__set_col_len(hists, col, len);
48 void hists__reset_col_len(struct hists *hists)
52 for (col = 0; col < HISTC_NR_COLS; ++col)
53 hists__set_col_len(hists, col, 0);
56 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
60 if (hists__col_len(hists, dso) < unresolved_col_width &&
61 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
62 !symbol_conf.dso_list)
63 hists__set_col_len(hists, dso, unresolved_col_width);
66 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
68 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
72 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
74 hists__set_unres_dso_col_len(hists, HISTC_DSO);
76 len = thread__comm_len(h->thread);
77 if (hists__new_col_len(hists, HISTC_COMM, len))
78 hists__set_col_len(hists, HISTC_THREAD, len + 6);
81 len = dso__name_len(h->ms.map->dso);
82 hists__new_col_len(hists, HISTC_DSO, len);
88 * +4 accounts for '[x] ' priv level info
89 * +2 account of 0x prefix on raw addresses
91 if (h->branch_info->from.sym) {
92 symlen = (int)h->branch_info->from.sym->namelen + 4;
93 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
95 symlen = dso__name_len(h->branch_info->from.map->dso);
96 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
98 symlen = unresolved_col_width + 4 + 2;
99 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
100 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
103 if (h->branch_info->to.sym) {
104 symlen = (int)h->branch_info->to.sym->namelen + 4;
105 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
107 symlen = dso__name_len(h->branch_info->to.map->dso);
108 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
110 symlen = unresolved_col_width + 4 + 2;
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
117 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
119 struct rb_node *next = rb_first(&hists->entries);
120 struct hist_entry *n;
123 hists__reset_col_len(hists);
125 while (next && row++ < max_rows) {
126 n = rb_entry(next, struct hist_entry, rb_node);
128 hists__calc_col_len(hists, n);
129 next = rb_next(&n->rb_node);
133 static void hist_entry__add_cpumode_period(struct hist_entry *he,
134 unsigned int cpumode, u64 period)
137 case PERF_RECORD_MISC_KERNEL:
138 he->period_sys += period;
140 case PERF_RECORD_MISC_USER:
141 he->period_us += period;
143 case PERF_RECORD_MISC_GUEST_KERNEL:
144 he->period_guest_sys += period;
146 case PERF_RECORD_MISC_GUEST_USER:
147 he->period_guest_us += period;
154 static void hist_entry__decay(struct hist_entry *he)
156 he->period = (he->period * 7) / 8;
157 he->nr_events = (he->nr_events * 7) / 8;
160 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
162 u64 prev_period = he->period;
164 if (prev_period == 0)
167 hist_entry__decay(he);
170 hists->stats.total_period -= prev_period - he->period;
172 return he->period == 0;
175 static void __hists__decay_entries(struct hists *hists, bool zap_user,
176 bool zap_kernel, bool threaded)
178 struct rb_node *next = rb_first(&hists->entries);
179 struct hist_entry *n;
182 n = rb_entry(next, struct hist_entry, rb_node);
183 next = rb_next(&n->rb_node);
185 * We may be annotating this, for instance, so keep it here in
186 * case some it gets new samples, we'll eventually free it when
187 * the user stops browsing and it agains gets fully decayed.
189 if (((zap_user && n->level == '.') ||
190 (zap_kernel && n->level != '.') ||
191 hists__decay_entry(hists, n)) &&
193 rb_erase(&n->rb_node, &hists->entries);
195 if (sort__need_collapse || threaded)
196 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
204 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
206 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
209 void hists__decay_entries_threaded(struct hists *hists,
210 bool zap_user, bool zap_kernel)
212 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
216 * histogram, sorted on item, collects periods
219 static struct hist_entry *hist_entry__new(struct hist_entry *template)
221 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
222 struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
228 he->ms.map->referenced = true;
229 if (symbol_conf.use_callchain)
230 callchain_init(he->callchain);
236 static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
239 hists__calc_col_len(hists, h);
241 hists->stats.total_period += h->period;
245 static u8 symbol__parent_filter(const struct symbol *parent)
247 if (symbol_conf.exclude_other && parent == NULL)
248 return 1 << HIST_FILTER__PARENT;
252 static struct hist_entry *add_hist_entry(struct hists *hists,
253 struct hist_entry *entry,
254 struct addr_location *al,
258 struct rb_node *parent = NULL;
259 struct hist_entry *he;
262 pthread_mutex_lock(&hists->lock);
264 p = &hists->entries_in->rb_node;
268 he = rb_entry(parent, struct hist_entry, rb_node_in);
270 cmp = hist_entry__cmp(entry, he);
273 he->period += period;
276 /* If the map of an existing hist_entry has
277 * become out-of-date due to an exec() or
278 * similar, update it. Otherwise we will
279 * mis-adjust symbol addresses when computing
280 * the history counter to increment.
282 if (he->ms.map != entry->ms.map) {
283 he->ms.map = entry->ms.map;
285 he->ms.map->referenced = true;
296 he = hist_entry__new(entry);
300 rb_link_node(&he->rb_node_in, parent, p);
301 rb_insert_color(&he->rb_node_in, hists->entries_in);
303 hist_entry__add_cpumode_period(he, al->cpumode, period);
305 pthread_mutex_unlock(&hists->lock);
309 struct hist_entry *__hists__add_branch_entry(struct hists *self,
310 struct addr_location *al,
311 struct symbol *sym_parent,
312 struct branch_info *bi,
315 struct hist_entry entry = {
316 .thread = al->thread,
325 .parent = sym_parent,
326 .filtered = symbol__parent_filter(sym_parent),
330 return add_hist_entry(self, &entry, al, period);
333 struct hist_entry *__hists__add_entry(struct hists *self,
334 struct addr_location *al,
335 struct symbol *sym_parent, u64 period)
337 struct hist_entry entry = {
338 .thread = al->thread,
347 .parent = sym_parent,
348 .filtered = symbol__parent_filter(sym_parent),
351 return add_hist_entry(self, &entry, al, period);
355 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
357 struct sort_entry *se;
360 list_for_each_entry(se, &hist_entry__sort_list, list) {
361 cmp = se->se_cmp(left, right);
370 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
372 struct sort_entry *se;
375 list_for_each_entry(se, &hist_entry__sort_list, list) {
376 int64_t (*f)(struct hist_entry *, struct hist_entry *);
378 f = se->se_collapse ?: se->se_cmp;
380 cmp = f(left, right);
388 void hist_entry__free(struct hist_entry *he)
394 * collapse the histogram
397 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
398 struct rb_root *root,
399 struct hist_entry *he)
401 struct rb_node **p = &root->rb_node;
402 struct rb_node *parent = NULL;
403 struct hist_entry *iter;
408 iter = rb_entry(parent, struct hist_entry, rb_node_in);
410 cmp = hist_entry__collapse(iter, he);
413 iter->period += he->period;
414 iter->period_sys += he->period_sys;
415 iter->period_us += he->period_us;
416 iter->period_guest_sys += he->period_guest_sys;
417 iter->period_guest_us += he->period_guest_us;
418 iter->nr_events += he->nr_events;
420 if (symbol_conf.use_callchain) {
421 callchain_cursor_reset(&callchain_cursor);
422 callchain_merge(&callchain_cursor,
426 hist_entry__free(he);
436 rb_link_node(&he->rb_node_in, parent, p);
437 rb_insert_color(&he->rb_node_in, root);
441 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
443 struct rb_root *root;
445 pthread_mutex_lock(&hists->lock);
447 root = hists->entries_in;
448 if (++hists->entries_in > &hists->entries_in_array[1])
449 hists->entries_in = &hists->entries_in_array[0];
451 pthread_mutex_unlock(&hists->lock);
456 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
458 hists__filter_entry_by_dso(hists, he);
459 hists__filter_entry_by_thread(hists, he);
460 hists__filter_entry_by_symbol(hists, he);
463 static void __hists__collapse_resort(struct hists *hists, bool threaded)
465 struct rb_root *root;
466 struct rb_node *next;
467 struct hist_entry *n;
469 if (!sort__need_collapse && !threaded)
472 root = hists__get_rotate_entries_in(hists);
473 next = rb_first(root);
476 n = rb_entry(next, struct hist_entry, rb_node_in);
477 next = rb_next(&n->rb_node_in);
479 rb_erase(&n->rb_node_in, root);
480 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
482 * If it wasn't combined with one of the entries already
483 * collapsed, we need to apply the filters that may have
484 * been set by, say, the hist_browser.
486 hists__apply_filters(hists, n);
491 void hists__collapse_resort(struct hists *hists)
493 return __hists__collapse_resort(hists, false);
496 void hists__collapse_resort_threaded(struct hists *hists)
498 return __hists__collapse_resort(hists, true);
502 * reverse the map, sort on period.
505 static void __hists__insert_output_entry(struct rb_root *entries,
506 struct hist_entry *he,
507 u64 min_callchain_hits)
509 struct rb_node **p = &entries->rb_node;
510 struct rb_node *parent = NULL;
511 struct hist_entry *iter;
513 if (symbol_conf.use_callchain)
514 callchain_param.sort(&he->sorted_chain, he->callchain,
515 min_callchain_hits, &callchain_param);
519 iter = rb_entry(parent, struct hist_entry, rb_node);
521 if (he->period > iter->period)
527 rb_link_node(&he->rb_node, parent, p);
528 rb_insert_color(&he->rb_node, entries);
531 static void __hists__output_resort(struct hists *hists, bool threaded)
533 struct rb_root *root;
534 struct rb_node *next;
535 struct hist_entry *n;
536 u64 min_callchain_hits;
538 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
540 if (sort__need_collapse || threaded)
541 root = &hists->entries_collapsed;
543 root = hists->entries_in;
545 next = rb_first(root);
546 hists->entries = RB_ROOT;
548 hists->nr_entries = 0;
549 hists->stats.total_period = 0;
550 hists__reset_col_len(hists);
553 n = rb_entry(next, struct hist_entry, rb_node_in);
554 next = rb_next(&n->rb_node_in);
556 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
557 hists__inc_nr_entries(hists, n);
561 void hists__output_resort(struct hists *hists)
563 return __hists__output_resort(hists, false);
566 void hists__output_resort_threaded(struct hists *hists)
568 return __hists__output_resort(hists, true);
571 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
572 enum hist_filter filter)
574 h->filtered &= ~(1 << filter);
580 hists->nr_entries += h->nr_rows;
582 hists->stats.total_period += h->period;
583 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
585 hists__calc_col_len(hists, h);
589 static bool hists__filter_entry_by_dso(struct hists *hists,
590 struct hist_entry *he)
592 if (hists->dso_filter != NULL &&
593 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
594 he->filtered |= (1 << HIST_FILTER__DSO);
601 void hists__filter_by_dso(struct hists *hists)
605 hists->nr_entries = hists->stats.total_period = 0;
606 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
607 hists__reset_col_len(hists);
609 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
610 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
612 if (symbol_conf.exclude_other && !h->parent)
615 if (hists__filter_entry_by_dso(hists, h))
618 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
622 static bool hists__filter_entry_by_thread(struct hists *hists,
623 struct hist_entry *he)
625 if (hists->thread_filter != NULL &&
626 he->thread != hists->thread_filter) {
627 he->filtered |= (1 << HIST_FILTER__THREAD);
634 void hists__filter_by_thread(struct hists *hists)
638 hists->nr_entries = hists->stats.total_period = 0;
639 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
640 hists__reset_col_len(hists);
642 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
643 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
645 if (hists__filter_entry_by_thread(hists, h))
648 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
652 static bool hists__filter_entry_by_symbol(struct hists *hists,
653 struct hist_entry *he)
655 if (hists->symbol_filter_str != NULL &&
656 (!he->ms.sym || strstr(he->ms.sym->name,
657 hists->symbol_filter_str) == NULL)) {
658 he->filtered |= (1 << HIST_FILTER__SYMBOL);
665 void hists__filter_by_symbol(struct hists *hists)
669 hists->nr_entries = hists->stats.total_period = 0;
670 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
671 hists__reset_col_len(hists);
673 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
674 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
676 if (hists__filter_entry_by_symbol(hists, h))
679 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
683 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
685 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
688 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
690 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
693 void hists__inc_nr_events(struct hists *hists, u32 type)
695 ++hists->stats.nr_events[0];
696 ++hists->stats.nr_events[type];