hist.c 22.9 KB
Newer Older
1
#include "annotate.h"
2
#include "util.h"
3
#include "build-id.h"
4
#include "hist.h"
5 6
#include "session.h"
#include "sort.h"
7
#include "evsel.h"
8
#include <math.h>
9

10 11 12 13
static bool hists__filter_entry_by_dso(struct hists *hists,
				       struct hist_entry *he);
static bool hists__filter_entry_by_thread(struct hists *hists,
					  struct hist_entry *he);
14 15
static bool hists__filter_entry_by_symbol(struct hists *hists,
					  struct hist_entry *he);
16

17 18 19 20
enum hist_filter {
	HIST_FILTER__DSO,
	HIST_FILTER__THREAD,
	HIST_FILTER__PARENT,
21
	HIST_FILTER__SYMBOL,
22 23
};

24 25
struct callchain_param	callchain_param = {
	.mode	= CHAIN_GRAPH_REL,
26 27
	.min_percent = 0.5,
	.order  = ORDER_CALLEE
28 29
};

30
u16 hists__col_len(struct hists *hists, enum hist_column col)
31
{
32
	return hists->col_len[col];
33 34
}

35
void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
36
{
37
	hists->col_len[col] = len;
38 39
}

40
bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
41
{
42 43
	if (len > hists__col_len(hists, col)) {
		hists__set_col_len(hists, col, len);
44 45 46 47 48
		return true;
	}
	return false;
}

49
void hists__reset_col_len(struct hists *hists)
50 51 52 53
{
	enum hist_column col;

	for (col = 0; col < HISTC_NR_COLS; ++col)
54
		hists__set_col_len(hists, col, 0);
55 56
}

57 58 59 60 61 62 63 64 65 66
static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
{
	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;

	if (hists__col_len(hists, dso) < unresolved_col_width &&
	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
	    !symbol_conf.dso_list)
		hists__set_col_len(hists, dso, unresolved_col_width);
}

67
void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
68
{
69
	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
70
	int symlen;
71 72
	u16 len;

73 74 75 76 77 78 79 80 81 82 83
	/*
	 * +4 accounts for '[x] ' priv level info
	 * +2 accounts for 0x prefix on raw addresses
	 * +3 accounts for ' y ' symtab origin info
	 */
	if (h->ms.sym) {
		symlen = h->ms.sym->namelen + 4;
		if (verbose)
			symlen += BITS_PER_LONG / 4 + 2 + 3;
		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
	} else {
84 85
		symlen = unresolved_col_width + 4 + 2;
		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
86
		hists__set_unres_dso_col_len(hists, HISTC_DSO);
87
	}
88 89

	len = thread__comm_len(h->thread);
90 91
	if (hists__new_col_len(hists, HISTC_COMM, len))
		hists__set_col_len(hists, HISTC_THREAD, len + 6);
92 93 94

	if (h->ms.map) {
		len = dso__name_len(h->ms.map->dso);
95
		hists__new_col_len(hists, HISTC_DSO, len);
96
	}
97

98 99 100
	if (h->parent)
		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);

101 102 103
	if (h->branch_info) {
		if (h->branch_info->from.sym) {
			symlen = (int)h->branch_info->from.sym->namelen + 4;
104 105
			if (verbose)
				symlen += BITS_PER_LONG / 4 + 2 + 3;
106 107 108 109 110 111 112 113 114 115 116 117
			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);

			symlen = dso__name_len(h->branch_info->from.map->dso);
			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
		}

		if (h->branch_info->to.sym) {
			symlen = (int)h->branch_info->to.sym->namelen + 4;
118 119
			if (verbose)
				symlen += BITS_PER_LONG / 4 + 2 + 3;
120 121 122 123 124 125 126 127 128 129
			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);

			symlen = dso__name_len(h->branch_info->to.map->dso);
			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
		}
	}
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161

	if (h->mem_info) {
		if (h->mem_info->daddr.sym) {
			symlen = (int)h->mem_info->daddr.sym->namelen + 4
			       + unresolved_col_width + 2;
			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
					   symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
					   symlen);
		}
		if (h->mem_info->daddr.map) {
			symlen = dso__name_len(h->mem_info->daddr.map->dso);
			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
					   symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
		}
	} else {
		symlen = unresolved_col_width + 4 + 2;
		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
	}

	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
162 163
}

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
	struct rb_node *next = rb_first(&hists->entries);
	struct hist_entry *n;
	int row = 0;

	hists__reset_col_len(hists);

	while (next && row++ < max_rows) {
		n = rb_entry(next, struct hist_entry, rb_node);
		if (!n->filtered)
			hists__calc_col_len(hists, n);
		next = rb_next(&n->rb_node);
	}
}

180
static void hist_entry__add_cpumode_period(struct hist_entry *he,
181
					   unsigned int cpumode, u64 period)
182
{
183
	switch (cpumode) {
184
	case PERF_RECORD_MISC_KERNEL:
185
		he->stat.period_sys += period;
186 187
		break;
	case PERF_RECORD_MISC_USER:
188
		he->stat.period_us += period;
189 190
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
191
		he->stat.period_guest_sys += period;
192 193
		break;
	case PERF_RECORD_MISC_GUEST_USER:
194
		he->stat.period_guest_us += period;
195 196 197 198 199 200
		break;
	default:
		break;
	}
}

201 202
static void he_stat__add_period(struct he_stat *he_stat, u64 period,
				u64 weight)
203
{
204

205
	he_stat->period		+= period;
206
	he_stat->weight		+= weight;
207 208 209 210 211 212 213 214 215 216 217
	he_stat->nr_events	+= 1;
}

static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
{
	dest->period		+= src->period;
	dest->period_sys	+= src->period_sys;
	dest->period_us		+= src->period_us;
	dest->period_guest_sys	+= src->period_guest_sys;
	dest->period_guest_us	+= src->period_guest_us;
	dest->nr_events		+= src->nr_events;
218
	dest->weight		+= src->weight;
219 220
}

221 222
static void hist_entry__decay(struct hist_entry *he)
{
223 224
	he->stat.period = (he->stat.period * 7) / 8;
	he->stat.nr_events = (he->stat.nr_events * 7) / 8;
225
	/* XXX need decay for weight too? */
226 227 228 229
}

static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
{
230
	u64 prev_period = he->stat.period;
231 232

	if (prev_period == 0)
233
		return true;
234

235
	hist_entry__decay(he);
236 237

	if (!he->filtered)
238
		hists->stats.total_period -= prev_period - he->stat.period;
239

240
	return he->stat.period == 0;
241 242
}

243
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
244 245 246 247 248 249 250
{
	struct rb_node *next = rb_first(&hists->entries);
	struct hist_entry *n;

	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);
251 252 253 254 255
		/*
		 * We may be annotating this, for instance, so keep it here in
		 * case some it gets new samples, we'll eventually free it when
		 * the user stops browsing and it agains gets fully decayed.
		 */
256 257 258 259
		if (((zap_user && n->level == '.') ||
		     (zap_kernel && n->level != '.') ||
		     hists__decay_entry(hists, n)) &&
		    !n->used) {
260 261
			rb_erase(&n->rb_node, &hists->entries);

262
			if (sort__need_collapse)
263 264 265 266 267 268 269 270
				rb_erase(&n->rb_node_in, &hists->entries_collapsed);

			hist_entry__free(n);
			--hists->nr_entries;
		}
	}
}

271
/*
272
 * histogram, sorted on item, collects periods
273 274
 */

275 276
static struct hist_entry *hist_entry__new(struct hist_entry *template)
{
277
	size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
278
	struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
279

280 281
	if (he != NULL) {
		*he = *template;
282

283 284
		if (he->ms.map)
			he->ms.map->referenced = true;
285 286

		if (he->branch_info) {
287 288 289 290 291 292 293 294 295 296 297 298 299 300
			/*
			 * This branch info is (a part of) allocated from
			 * machine__resolve_bstack() and will be freed after
			 * adding new entries.  So we need to save a copy.
			 */
			he->branch_info = malloc(sizeof(*he->branch_info));
			if (he->branch_info == NULL) {
				free(he);
				return NULL;
			}

			memcpy(he->branch_info, template->branch_info,
			       sizeof(*he->branch_info));

301 302 303 304 305 306
			if (he->branch_info->from.map)
				he->branch_info->from.map->referenced = true;
			if (he->branch_info->to.map)
				he->branch_info->to.map->referenced = true;
		}

307 308 309 310 311 312 313
		if (he->mem_info) {
			if (he->mem_info->iaddr.map)
				he->mem_info->iaddr.map->referenced = true;
			if (he->mem_info->daddr.map)
				he->mem_info->daddr.map->referenced = true;
		}

314
		if (symbol_conf.use_callchain)
315
			callchain_init(he->callchain);
316 317

		INIT_LIST_HEAD(&he->pairs.node);
318 319
	}

320
	return he;
321 322
}

323
void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
324
{
325
	if (!h->filtered) {
326 327
		hists__calc_col_len(hists, h);
		++hists->nr_entries;
328
		hists->stats.total_period += h->stat.period;
329
	}
330 331
}

332 333 334 335 336 337 338
static u8 symbol__parent_filter(const struct symbol *parent)
{
	if (symbol_conf.exclude_other && parent == NULL)
		return 1 << HIST_FILTER__PARENT;
	return 0;
}

339 340
static struct hist_entry *add_hist_entry(struct hists *hists,
				      struct hist_entry *entry,
341
				      struct addr_location *al,
342 343
				      u64 period,
				      u64 weight)
344
{
345
	struct rb_node **p;
346 347 348 349
	struct rb_node *parent = NULL;
	struct hist_entry *he;
	int cmp;

350 351 352 353
	pthread_mutex_lock(&hists->lock);

	p = &hists->entries_in->rb_node;

354 355
	while (*p != NULL) {
		parent = *p;
356
		he = rb_entry(parent, struct hist_entry, rb_node_in);
357

358 359 360 361 362 363 364
		/*
		 * Make sure that it receives arguments in a same order as
		 * hist_entry__collapse() so that we can use an appropriate
		 * function when searching an entry regardless which sort
		 * keys were used.
		 */
		cmp = hist_entry__cmp(he, entry);
365 366

		if (!cmp) {
367
			he_stat__add_period(&he->stat, period, weight);
368

369 370 371 372 373 374
			/*
			 * This mem info was allocated from machine__resolve_mem
			 * and will not be used anymore.
			 */
			free(entry->mem_info);

375 376 377 378 379 380 381 382 383 384 385
			/* If the map of an existing hist_entry has
			 * become out-of-date due to an exec() or
			 * similar, update it.  Otherwise we will
			 * mis-adjust symbol addresses when computing
			 * the history counter to increment.
			 */
			if (he->ms.map != entry->ms.map) {
				he->ms.map = entry->ms.map;
				if (he->ms.map)
					he->ms.map->referenced = true;
			}
386
			goto out;
387 388 389 390 391 392 393 394
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

395
	he = hist_entry__new(entry);
396
	if (!he)
397 398 399 400
		goto out_unlock;

	rb_link_node(&he->rb_node_in, parent, p);
	rb_insert_color(&he->rb_node_in, hists->entries_in);
401
out:
402
	hist_entry__add_cpumode_period(he, al->cpumode, period);
403 404
out_unlock:
	pthread_mutex_unlock(&hists->lock);
405 406 407
	return he;
}

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
struct hist_entry *__hists__add_mem_entry(struct hists *self,
					  struct addr_location *al,
					  struct symbol *sym_parent,
					  struct mem_info *mi,
					  u64 period,
					  u64 weight)
{
	struct hist_entry entry = {
		.thread	= al->thread,
		.ms = {
			.map	= al->map,
			.sym	= al->sym,
		},
		.stat = {
			.period	= period,
			.weight = weight,
			.nr_events = 1,
		},
		.cpu	= al->cpu,
		.ip	= al->addr,
		.level	= al->level,
		.parent = sym_parent,
		.filtered = symbol__parent_filter(sym_parent),
		.hists = self,
		.mem_info = mi,
		.branch_info = NULL,
	};
	return add_hist_entry(self, &entry, al, period, weight);
}

438 439 440 441
struct hist_entry *__hists__add_branch_entry(struct hists *self,
					     struct addr_location *al,
					     struct symbol *sym_parent,
					     struct branch_info *bi,
442 443
					     u64 period,
					     u64 weight)
444 445 446 447 448 449 450 451 452 453
{
	struct hist_entry entry = {
		.thread	= al->thread,
		.ms = {
			.map	= bi->to.map,
			.sym	= bi->to.sym,
		},
		.cpu	= al->cpu,
		.ip	= bi->to.addr,
		.level	= al->level,
454 455
		.stat = {
			.period	= period,
456
			.nr_events = 1,
457
			.weight = weight,
458
		},
459 460 461
		.parent = sym_parent,
		.filtered = symbol__parent_filter(sym_parent),
		.branch_info = bi,
462
		.hists	= self,
463
		.mem_info = NULL,
464 465
	};

466
	return add_hist_entry(self, &entry, al, period, weight);
467 468 469 470
}

struct hist_entry *__hists__add_entry(struct hists *self,
				      struct addr_location *al,
471 472
				      struct symbol *sym_parent, u64 period,
				      u64 weight)
473 474 475 476 477 478 479 480 481 482
{
	struct hist_entry entry = {
		.thread	= al->thread,
		.ms = {
			.map	= al->map,
			.sym	= al->sym,
		},
		.cpu	= al->cpu,
		.ip	= al->addr,
		.level	= al->level,
483 484
		.stat = {
			.period	= period,
485
			.nr_events = 1,
486
			.weight = weight,
487
		},
488 489
		.parent = sym_parent,
		.filtered = symbol__parent_filter(sym_parent),
490
		.hists	= self,
491 492
		.branch_info = NULL,
		.mem_info = NULL,
493 494
	};

495
	return add_hist_entry(self, &entry, al, period, weight);
496 497
}

498 499 500 501 502 503 504
int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
505
		cmp = se->se_cmp(left, right);
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
		if (cmp)
			break;
	}

	return cmp;
}

int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
		int64_t (*f)(struct hist_entry *, struct hist_entry *);

522
		f = se->se_collapse ?: se->se_cmp;
523 524 525 526 527 528 529 530 531 532 533

		cmp = f(left, right);
		if (cmp)
			break;
	}

	return cmp;
}

void hist_entry__free(struct hist_entry *he)
{
534
	free(he->branch_info);
535
	free(he->mem_info);
536 537 538 539 540 541 542
	free(he);
}

/*
 * collapse the histogram
 */

543
static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
544 545
					 struct rb_root *root,
					 struct hist_entry *he)
546
{
547
	struct rb_node **p = &root->rb_node;
548 549 550 551 552 553
	struct rb_node *parent = NULL;
	struct hist_entry *iter;
	int64_t cmp;

	while (*p != NULL) {
		parent = *p;
554
		iter = rb_entry(parent, struct hist_entry, rb_node_in);
555 556 557 558

		cmp = hist_entry__collapse(iter, he);

		if (!cmp) {
559
			he_stat__add_stat(&iter->stat, &he->stat);
560

561
			if (symbol_conf.use_callchain) {
562 563 564
				callchain_cursor_reset(&callchain_cursor);
				callchain_merge(&callchain_cursor,
						iter->callchain,
565 566
						he->callchain);
			}
567
			hist_entry__free(he);
568
			return false;
569 570 571 572 573 574 575 576
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

577 578
	rb_link_node(&he->rb_node_in, parent, p);
	rb_insert_color(&he->rb_node_in, root);
579
	return true;
580 581
}

582
static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
583
{
584 585 586 587 588 589 590 591 592 593 594 595 596
	struct rb_root *root;

	pthread_mutex_lock(&hists->lock);

	root = hists->entries_in;
	if (++hists->entries_in > &hists->entries_in_array[1])
		hists->entries_in = &hists->entries_in_array[0];

	pthread_mutex_unlock(&hists->lock);

	return root;
}

597 598 599 600
static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
{
	hists__filter_entry_by_dso(hists, he);
	hists__filter_entry_by_thread(hists, he);
601
	hists__filter_entry_by_symbol(hists, he);
602 603
}

604
void hists__collapse_resort(struct hists *hists)
605 606
{
	struct rb_root *root;
607 608 609
	struct rb_node *next;
	struct hist_entry *n;

610
	if (!sort__need_collapse)
611 612
		return;

613 614
	root = hists__get_rotate_entries_in(hists);
	next = rb_first(root);
615

616
	while (next) {
617 618
		n = rb_entry(next, struct hist_entry, rb_node_in);
		next = rb_next(&n->rb_node_in);
619

620
		rb_erase(&n->rb_node_in, root);
621 622 623 624 625 626 627 628
		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
			/*
			 * If it wasn't combined with one of the entries already
			 * collapsed, we need to apply the filters that may have
			 * been set by, say, the hist_browser.
			 */
			hists__apply_filters(hists, n);
		}
629
	}
630
}
631

632
/*
633
 * reverse the map, sort on period.
634 635
 */

636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691
static int period_cmp(u64 period_a, u64 period_b)
{
	if (period_a > period_b)
		return 1;
	if (period_a < period_b)
		return -1;
	return 0;
}

static int hist_entry__sort_on_period(struct hist_entry *a,
				      struct hist_entry *b)
{
	int ret;
	int i, nr_members;
	struct perf_evsel *evsel;
	struct hist_entry *pair;
	u64 *periods_a, *periods_b;

	ret = period_cmp(a->stat.period, b->stat.period);
	if (ret || !symbol_conf.event_group)
		return ret;

	evsel = hists_to_evsel(a->hists);
	nr_members = evsel->nr_members;
	if (nr_members <= 1)
		return ret;

	periods_a = zalloc(sizeof(periods_a) * nr_members);
	periods_b = zalloc(sizeof(periods_b) * nr_members);

	if (!periods_a || !periods_b)
		goto out;

	list_for_each_entry(pair, &a->pairs.head, pairs.node) {
		evsel = hists_to_evsel(pair->hists);
		periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
	}

	list_for_each_entry(pair, &b->pairs.head, pairs.node) {
		evsel = hists_to_evsel(pair->hists);
		periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
	}

	for (i = 1; i < nr_members; i++) {
		ret = period_cmp(periods_a[i], periods_b[i]);
		if (ret)
			break;
	}

out:
	free(periods_a);
	free(periods_b);

	return ret;
}

692 693 694
static void __hists__insert_output_entry(struct rb_root *entries,
					 struct hist_entry *he,
					 u64 min_callchain_hits)
695
{
696
	struct rb_node **p = &entries->rb_node;
697 698 699
	struct rb_node *parent = NULL;
	struct hist_entry *iter;

700
	if (symbol_conf.use_callchain)
701
		callchain_param.sort(&he->sorted_chain, he->callchain,
702 703 704 705 706 707
				      min_callchain_hits, &callchain_param);

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct hist_entry, rb_node);

708
		if (hist_entry__sort_on_period(he, iter) > 0)
709 710 711 712 713 714
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&he->rb_node, parent, p);
715
	rb_insert_color(&he->rb_node, entries);
716 717
}

718
void hists__output_resort(struct hists *hists)
719
{
720
	struct rb_root *root;
721 722 723 724
	struct rb_node *next;
	struct hist_entry *n;
	u64 min_callchain_hits;

725
	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
726

727
	if (sort__need_collapse)
728 729 730 731 732 733
		root = &hists->entries_collapsed;
	else
		root = hists->entries_in;

	next = rb_first(root);
	hists->entries = RB_ROOT;
734

735
	hists->nr_entries = 0;
736
	hists->stats.total_period = 0;
737
	hists__reset_col_len(hists);
738

739
	while (next) {
740 741
		n = rb_entry(next, struct hist_entry, rb_node_in);
		next = rb_next(&n->rb_node_in);
742

743
		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
744
		hists__inc_nr_entries(hists, n);
745
	}
746
}
747

748
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
749 750 751 752 753 754
				       enum hist_filter filter)
{
	h->filtered &= ~(1 << filter);
	if (h->filtered)
		return;

755
	++hists->nr_entries;
756
	if (h->ms.unfolded)
757
		hists->nr_entries += h->nr_rows;
758
	h->row_offset = 0;
759 760
	hists->stats.total_period += h->stat.period;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
761

762
	hists__calc_col_len(hists, h);
763 764
}

765 766 767 768 769 770 771 772 773 774 775 776 777

static bool hists__filter_entry_by_dso(struct hists *hists,
				       struct hist_entry *he)
{
	if (hists->dso_filter != NULL &&
	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
		he->filtered |= (1 << HIST_FILTER__DSO);
		return true;
	}

	return false;
}

778
void hists__filter_by_dso(struct hists *hists)
779 780 781
{
	struct rb_node *nd;

782 783 784
	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);
785

786
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
787 788 789 790 791
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (symbol_conf.exclude_other && !h->parent)
			continue;

792
		if (hists__filter_entry_by_dso(hists, h))
793 794
			continue;

795
		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
796 797 798
	}
}

799 800 801 802 803 804 805 806 807 808 809 810
static bool hists__filter_entry_by_thread(struct hists *hists,
					  struct hist_entry *he)
{
	if (hists->thread_filter != NULL &&
	    he->thread != hists->thread_filter) {
		he->filtered |= (1 << HIST_FILTER__THREAD);
		return true;
	}

	return false;
}

811
void hists__filter_by_thread(struct hists *hists)
812 813 814
{
	struct rb_node *nd;

815 816 817
	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);
818

819
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
820 821
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

822
		if (hists__filter_entry_by_thread(hists, h))
823
			continue;
824

825
		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
826 827
	}
}
828

829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
static bool hists__filter_entry_by_symbol(struct hists *hists,
					  struct hist_entry *he)
{
	if (hists->symbol_filter_str != NULL &&
	    (!he->ms.sym || strstr(he->ms.sym->name,
				   hists->symbol_filter_str) == NULL)) {
		he->filtered |= (1 << HIST_FILTER__SYMBOL);
		return true;
	}

	return false;
}

void hists__filter_by_symbol(struct hists *hists)
{
	struct rb_node *nd;

	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);

	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (hists__filter_entry_by_symbol(hists, h))
			continue;

		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
	}
}

860
int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
861
{
862
	return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
863 864
}

865
int hist_entry__annotate(struct hist_entry *he, size_t privsize)
866
{
867
	return symbol__annotate(he->ms.sym, he->ms.map, privsize);
868
}
869

870 871 872 873 874 875
void events_stats__inc(struct events_stats *stats, u32 type)
{
	++stats->nr_events[0];
	++stats->nr_events[type];
}

876
void hists__inc_nr_events(struct hists *hists, u32 type)
877
{
878
	events_stats__inc(&hists->stats, type);
879
}
880

881 882 883
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
						 struct hist_entry *pair)
{
884 885
	struct rb_root *root;
	struct rb_node **p;
886 887 888 889
	struct rb_node *parent = NULL;
	struct hist_entry *he;
	int cmp;

890 891 892 893 894 895 896
	if (sort__need_collapse)
		root = &hists->entries_collapsed;
	else
		root = hists->entries_in;

	p = &root->rb_node;

897 898
	while (*p != NULL) {
		parent = *p;
899
		he = rb_entry(parent, struct hist_entry, rb_node_in);
900

901
		cmp = hist_entry__collapse(he, pair);
902 903 904 905 906 907 908 909 910 911 912 913

		if (!cmp)
			goto out;

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	he = hist_entry__new(pair);
	if (he) {
914 915
		memset(&he->stat, 0, sizeof(he->stat));
		he->hists = hists;
916 917
		rb_link_node(&he->rb_node_in, parent, p);
		rb_insert_color(&he->rb_node_in, root);
918 919 920 921 922 923
		hists__inc_nr_entries(hists, he);
	}
out:
	return he;
}

924 925 926
static struct hist_entry *hists__find_entry(struct hists *hists,
					    struct hist_entry *he)
{
927 928 929 930 931 932
	struct rb_node *n;

	if (sort__need_collapse)
		n = hists->entries_collapsed.rb_node;
	else
		n = hists->entries_in->rb_node;
933 934

	while (n) {
935 936
		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
		int64_t cmp = hist_entry__collapse(iter, he);
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953

		if (cmp < 0)
			n = n->rb_left;
		else if (cmp > 0)
			n = n->rb_right;
		else
			return iter;
	}

	return NULL;
}

/*
 * Look for pairs to link to the leader buckets (hist_entries):
 */
void hists__match(struct hists *leader, struct hists *other)
{
954
	struct rb_root *root;
955 956 957
	struct rb_node *nd;
	struct hist_entry *pos, *pair;

958 959 960 961 962 963 964
	if (sort__need_collapse)
		root = &leader->entries_collapsed;
	else
		root = leader->entries_in;

	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
965 966 967
		pair = hists__find_entry(other, pos);

		if (pair)
968
			hist_entry__add_pair(pair, pos);
969 970
	}
}
971 972 973 974 975 976 977 978

/*
 * Look for entries in the other hists that are not present in the leader, if
 * we find them, just add a dummy entry on the leader hists, with period=0,
 * nr_events=0, to serve as the list header.
 */
int hists__link(struct hists *leader, struct hists *other)
{
979
	struct rb_root *root;
980 981 982
	struct rb_node *nd;
	struct hist_entry *pos, *pair;

983 984 985 986 987 988 989
	if (sort__need_collapse)
		root = &other->entries_collapsed;
	else
		root = other->entries_in;

	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
		pos = rb_entry(nd, struct hist_entry, rb_node_in);
990 991 992 993 994

		if (!hist_entry__has_pairs(pos)) {
			pair = hists__add_dummy_entry(leader, pos);
			if (pair == NULL)
				return -1;
995
			hist_entry__add_pair(pos, pair);
996 997 998 999 1000
		}
	}

	return 0;
}