Commit f8be1c8c authored by Namhyung Kim's avatar Namhyung Kim Committed by Jiri Olsa

perf hists: Add support for accumulated stat of hist entry

Maintain accumulated stat information in hist_entry->stat_acc if
symbol_conf.cumulate_callchain is set.  Fields in ->stat_acc have same
vaules initially, and will be updated as callchain is processed later.
Signed-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
Tested-by: default avatarArun Sharma <asharma@fb.com>
Tested-by: default avatarRodrigo Campos <rodrigo@sdfg.com.ar>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/r/1401335910-16832-4-git-send-email-namhyung@kernel.orgSigned-off-by: default avatarJiri Olsa <jolsa@kernel.org>
parent 69bcb019
...@@ -232,6 +232,8 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) ...@@ -232,6 +232,8 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
return true; return true;
he_stat__decay(&he->stat); he_stat__decay(&he->stat);
if (symbol_conf.cumulate_callchain)
he_stat__decay(he->stat_acc);
diff = prev_period - he->stat.period; diff = prev_period - he->stat.period;
...@@ -279,12 +281,26 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) ...@@ -279,12 +281,26 @@ void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
static struct hist_entry *hist_entry__new(struct hist_entry *template) static struct hist_entry *hist_entry__new(struct hist_entry *template)
{ {
size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; size_t callchain_size = 0;
struct hist_entry *he = zalloc(sizeof(*he) + callchain_size); struct hist_entry *he;
if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
callchain_size = sizeof(struct callchain_root);
he = zalloc(sizeof(*he) + callchain_size);
if (he != NULL) { if (he != NULL) {
*he = *template; *he = *template;
if (symbol_conf.cumulate_callchain) {
he->stat_acc = malloc(sizeof(he->stat));
if (he->stat_acc == NULL) {
free(he);
return NULL;
}
memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
}
if (he->ms.map) if (he->ms.map)
he->ms.map->referenced = true; he->ms.map->referenced = true;
...@@ -296,6 +312,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template) ...@@ -296,6 +312,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template)
*/ */
he->branch_info = malloc(sizeof(*he->branch_info)); he->branch_info = malloc(sizeof(*he->branch_info));
if (he->branch_info == NULL) { if (he->branch_info == NULL) {
free(he->stat_acc);
free(he); free(he);
return NULL; return NULL;
} }
...@@ -359,6 +376,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists, ...@@ -359,6 +376,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
if (!cmp) { if (!cmp) {
he_stat__add_period(&he->stat, period, weight); he_stat__add_period(&he->stat, period, weight);
if (symbol_conf.cumulate_callchain)
he_stat__add_period(he->stat_acc, period, weight);
/* /*
* This mem info was allocated from sample__resolve_mem * This mem info was allocated from sample__resolve_mem
...@@ -394,6 +413,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists, ...@@ -394,6 +413,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
rb_insert_color(&he->rb_node_in, hists->entries_in); rb_insert_color(&he->rb_node_in, hists->entries_in);
out: out:
he_stat__add_cpumode_period(&he->stat, al->cpumode, period); he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
if (symbol_conf.cumulate_callchain)
he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
return he; return he;
} }
...@@ -768,6 +789,7 @@ void hist_entry__free(struct hist_entry *he) ...@@ -768,6 +789,7 @@ void hist_entry__free(struct hist_entry *he)
{ {
zfree(&he->branch_info); zfree(&he->branch_info);
zfree(&he->mem_info); zfree(&he->mem_info);
zfree(&he->stat_acc);
free_srcline(he->srcline); free_srcline(he->srcline);
free(he); free(he);
} }
...@@ -793,6 +815,8 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, ...@@ -793,6 +815,8 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
if (!cmp) { if (!cmp) {
he_stat__add_stat(&iter->stat, &he->stat); he_stat__add_stat(&iter->stat, &he->stat);
if (symbol_conf.cumulate_callchain)
he_stat__add_stat(iter->stat_acc, he->stat_acc);
if (symbol_conf.use_callchain) { if (symbol_conf.use_callchain) {
callchain_cursor_reset(&callchain_cursor); callchain_cursor_reset(&callchain_cursor);
......
...@@ -82,6 +82,7 @@ struct hist_entry { ...@@ -82,6 +82,7 @@ struct hist_entry {
struct list_head head; struct list_head head;
} pairs; } pairs;
struct he_stat stat; struct he_stat stat;
struct he_stat *stat_acc;
struct map_symbol ms; struct map_symbol ms;
struct thread *thread; struct thread *thread;
struct comm *comm; struct comm *comm;
......
...@@ -109,6 +109,7 @@ struct symbol_conf { ...@@ -109,6 +109,7 @@ struct symbol_conf {
show_nr_samples, show_nr_samples,
show_total_period, show_total_period,
use_callchain, use_callchain,
cumulate_callchain,
exclude_other, exclude_other,
show_cpu_utilization, show_cpu_utilization,
initialized, initialized,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment