Commit 6155bc14 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Mostly tooling fixes, but also an event groups fix, two PMU driver
  fixes and a CPU model variant addition"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf: Tighten (and fix) the grouping condition
  perf/x86/intel: Add model number for Airmont
  perf/rapl: Fix crash in rapl_scale()
  perf/x86/intel/uncore: Move uncore_box_init() out of driver initialization
  perf probe: Fix probing kretprobes
  perf symbols: Introduce 'for' method to iterate over the symbols with a given name
  perf probe: Do not rely on map__load() filter to find symbols
  perf symbols: Introduce method to iterate symbols ordered by name
  perf symbols: Return the first entry with a given name in find_by_name method
  perf annotate: Fix memory leaks in LOCK handling
  perf annotate: Handle ins parsing failures
  perf scripting perl: Force to use stdbool
  perf evlist: Remove extraneous 'was' on error message
parents bc208e0e e742f3dc
...@@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void) ...@@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void)
break; break;
case 55: /* 22nm Atom "Silvermont" */ case 55: /* 22nm Atom "Silvermont" */
case 76: /* 14nm Atom "Airmont" */
case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids)); sizeof(hw_cache_event_ids));
......
...@@ -142,7 +142,7 @@ static inline u64 rapl_scale(u64 v) ...@@ -142,7 +142,7 @@ static inline u64 rapl_scale(u64 v)
* or use ldexp(count, -32). * or use ldexp(count, -32).
* Watts = Joules/Time delta * Watts = Joules/Time delta
*/ */
return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit);
} }
static u64 rapl_event_update(struct perf_event *event) static u64 rapl_event_update(struct perf_event *event)
......
...@@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id ...@@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
box->phys_id = phys_id; box->phys_id = phys_id;
box->pci_dev = pdev; box->pci_dev = pdev;
box->pmu = pmu; box->pmu = pmu;
uncore_box_init(box);
pci_set_drvdata(pdev, box); pci_set_drvdata(pdev, box);
raw_spin_lock(&uncore_box_lock); raw_spin_lock(&uncore_box_lock);
...@@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu) ...@@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu)
pmu = &type->pmus[j]; pmu = &type->pmus[j];
box = *per_cpu_ptr(pmu->box, cpu); box = *per_cpu_ptr(pmu->box, cpu);
/* called by uncore_cpu_init? */ /* called by uncore_cpu_init? */
if (box && box->phys_id >= 0) { if (box && box->phys_id >= 0)
uncore_box_init(box);
continue; continue;
}
for_each_online_cpu(k) { for_each_online_cpu(k) {
exist = *per_cpu_ptr(pmu->box, k); exist = *per_cpu_ptr(pmu->box, k);
...@@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu) ...@@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu)
} }
} }
if (box) { if (box)
box->phys_id = phys_id; box->phys_id = phys_id;
uncore_box_init(box);
}
} }
} }
return 0; return 0;
......
...@@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box) ...@@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box)
return box->pmu->type->num_counters; return box->pmu->type->num_counters;
} }
static inline void uncore_box_init(struct intel_uncore_box *box)
{
if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
if (box->pmu->type->ops->init_box)
box->pmu->type->ops->init_box(box);
}
}
static inline void uncore_disable_box(struct intel_uncore_box *box) static inline void uncore_disable_box(struct intel_uncore_box *box)
{ {
if (box->pmu->type->ops->disable_box) if (box->pmu->type->ops->disable_box)
...@@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box) ...@@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box)
static inline void uncore_enable_box(struct intel_uncore_box *box) static inline void uncore_enable_box(struct intel_uncore_box *box)
{ {
uncore_box_init(box);
if (box->pmu->type->ops->enable_box) if (box->pmu->type->ops->enable_box)
box->pmu->type->ops->enable_box(box); box->pmu->type->ops->enable_box(box);
} }
...@@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box, ...@@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box,
return box->pmu->type->ops->read_counter(box, event); return box->pmu->type->ops->read_counter(box, event);
} }
static inline void uncore_box_init(struct intel_uncore_box *box)
{
if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
if (box->pmu->type->ops->init_box)
box->pmu->type->ops->init_box(box);
}
}
static inline bool uncore_box_is_fake(struct intel_uncore_box *box) static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
{ {
return (box->phys_id < 0); return (box->phys_id < 0);
......
...@@ -450,11 +450,6 @@ struct perf_event { ...@@ -450,11 +450,6 @@ struct perf_event {
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_PERF_EVENTS */
}; };
enum perf_event_context_type {
task_context,
cpu_context,
};
/** /**
* struct perf_event_context - event context structure * struct perf_event_context - event context structure
* *
...@@ -462,7 +457,6 @@ enum perf_event_context_type { ...@@ -462,7 +457,6 @@ enum perf_event_context_type {
*/ */
struct perf_event_context { struct perf_event_context {
struct pmu *pmu; struct pmu *pmu;
enum perf_event_context_type type;
/* /*
* Protect the states of the events in the list, * Protect the states of the events in the list,
* nr_active, and the list: * nr_active, and the list:
......
...@@ -6776,7 +6776,6 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) ...@@ -6776,7 +6776,6 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
__perf_event_init_context(&cpuctx->ctx); __perf_event_init_context(&cpuctx->ctx);
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu; cpuctx->ctx.pmu = pmu;
__perf_cpu_hrtimer_init(cpuctx, cpu); __perf_cpu_hrtimer_init(cpuctx, cpu);
...@@ -7420,7 +7419,19 @@ SYSCALL_DEFINE5(perf_event_open, ...@@ -7420,7 +7419,19 @@ SYSCALL_DEFINE5(perf_event_open,
* task or CPU context: * task or CPU context:
*/ */
if (move_group) { if (move_group) {
if (group_leader->ctx->type != ctx->type) /*
* Make sure we're both on the same task, or both
* per-cpu events.
*/
if (group_leader->ctx->task != ctx->task)
goto err_context;
/*
* Make sure we're both events for the same CPU;
* grouping events for different CPUs is broken; since
* you can never concurrently schedule them anyhow.
*/
if (group_leader->cpu != event->cpu)
goto err_context; goto err_context;
} else { } else {
if (group_leader->ctx != ctx) if (group_leader->ctx != ctx)
......
...@@ -5,7 +5,10 @@ ...@@ -5,7 +5,10 @@
* ANY CHANGES MADE HERE WILL BE LOST! * ANY CHANGES MADE HERE WILL BE LOST!
* *
*/ */
#include <stdbool.h>
#ifndef HAS_BOOL
# define HAS_BOOL 1
#endif
#line 1 "Context.xs" #line 1 "Context.xs"
/* /*
* Context.xs. XS interfaces for perf script. * Context.xs. XS interfaces for perf script.
......
...@@ -177,14 +177,17 @@ static int lock__parse(struct ins_operands *ops) ...@@ -177,14 +177,17 @@ static int lock__parse(struct ins_operands *ops)
goto out_free_ops; goto out_free_ops;
ops->locked.ins = ins__find(name); ops->locked.ins = ins__find(name);
free(name);
if (ops->locked.ins == NULL) if (ops->locked.ins == NULL)
goto out_free_ops; goto out_free_ops;
if (!ops->locked.ins->ops) if (!ops->locked.ins->ops)
return 0; return 0;
if (ops->locked.ins->ops->parse) if (ops->locked.ins->ops->parse &&
ops->locked.ins->ops->parse(ops->locked.ops); ops->locked.ins->ops->parse(ops->locked.ops) < 0)
goto out_free_ops;
return 0; return 0;
...@@ -208,6 +211,13 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size, ...@@ -208,6 +211,13 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
static void lock__delete(struct ins_operands *ops) static void lock__delete(struct ins_operands *ops)
{ {
struct ins *ins = ops->locked.ins;
if (ins && ins->ops->free)
ins->ops->free(ops->locked.ops);
else
ins__delete(ops->locked.ops);
zfree(&ops->locked.ops); zfree(&ops->locked.ops);
zfree(&ops->target.raw); zfree(&ops->target.raw);
zfree(&ops->target.name); zfree(&ops->target.name);
...@@ -531,8 +541,8 @@ static void disasm_line__init_ins(struct disasm_line *dl) ...@@ -531,8 +541,8 @@ static void disasm_line__init_ins(struct disasm_line *dl)
if (!dl->ins->ops) if (!dl->ins->ops)
return; return;
if (dl->ins->ops->parse) if (dl->ins->ops->parse && dl->ins->ops->parse(&dl->ops) < 0)
dl->ins->ops->parse(&dl->ops); dl->ins = NULL;
} }
static int disasm_line__parse(char *line, char **namep, char **rawp) static int disasm_line__parse(char *line, char **namep, char **rawp)
......
...@@ -1445,7 +1445,7 @@ int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused, ...@@ -1445,7 +1445,7 @@ int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
case ENOENT: case ENOENT:
scnprintf(buf, size, "%s", scnprintf(buf, size, "%s",
"Error:\tUnable to find debugfs\n" "Error:\tUnable to find debugfs\n"
"Hint:\tWas your kernel was compiled with debugfs support?\n" "Hint:\tWas your kernel compiled with debugfs support?\n"
"Hint:\tIs the debugfs filesystem mounted?\n" "Hint:\tIs the debugfs filesystem mounted?\n"
"Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'"); "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
break; break;
......
...@@ -116,6 +116,22 @@ struct thread; ...@@ -116,6 +116,22 @@ struct thread;
#define map__for_each_symbol(map, pos, n) \ #define map__for_each_symbol(map, pos, n) \
dso__for_each_symbol(map->dso, pos, n, map->type) dso__for_each_symbol(map->dso, pos, n, map->type)
/* map__for_each_symbol_with_name - iterate over the symbols in the given map
* that have the given name
*
* @map: the 'struct map *' in which symbols itereated
* @sym_name: the symbol name
* @pos: the 'struct symbol *' to use as a loop cursor
* @filter: to use when loading the DSO
*/
#define __map__for_each_symbol_by_name(map, sym_name, pos, filter) \
for (pos = map__find_symbol_by_name(map, sym_name, filter); \
pos && strcmp(pos->name, sym_name) == 0; \
pos = symbol__next_by_name(pos))
#define map__for_each_symbol_by_name(map, sym_name, pos) \
__map__for_each_symbol_by_name(map, sym_name, (pos), NULL)
typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym); typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
void map__init(struct map *map, enum map_type type, void map__init(struct map *map, enum map_type type,
......
...@@ -446,7 +446,7 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs, ...@@ -446,7 +446,7 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
} }
for (i = 0; i < ntevs; i++) { for (i = 0; i < ntevs; i++) {
if (tevs[i].point.address) { if (tevs[i].point.address && !tevs[i].point.retprobe) {
tmp = strdup(reloc_sym->name); tmp = strdup(reloc_sym->name);
if (!tmp) if (!tmp)
return -ENOMEM; return -ENOMEM;
...@@ -2193,18 +2193,17 @@ static int __add_probe_trace_events(struct perf_probe_event *pev, ...@@ -2193,18 +2193,17 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
return ret; return ret;
} }
static char *looking_function_name; static int find_probe_functions(struct map *map, char *name)
static int num_matched_functions;
static int probe_function_filter(struct map *map __maybe_unused,
struct symbol *sym)
{ {
if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) && int found = 0;
strcmp(looking_function_name, sym->name) == 0) { struct symbol *sym;
num_matched_functions++;
return 0; map__for_each_symbol_by_name(map, name, sym) {
if (sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL)
found++;
} }
return 1;
return found;
} }
#define strdup_or_goto(str, label) \ #define strdup_or_goto(str, label) \
...@@ -2222,10 +2221,10 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, ...@@ -2222,10 +2221,10 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
struct kmap *kmap = NULL; struct kmap *kmap = NULL;
struct ref_reloc_sym *reloc_sym = NULL; struct ref_reloc_sym *reloc_sym = NULL;
struct symbol *sym; struct symbol *sym;
struct rb_node *nd;
struct probe_trace_event *tev; struct probe_trace_event *tev;
struct perf_probe_point *pp = &pev->point; struct perf_probe_point *pp = &pev->point;
struct probe_trace_point *tp; struct probe_trace_point *tp;
int num_matched_functions;
int ret, i; int ret, i;
/* Init maps of given executable or kernel */ /* Init maps of given executable or kernel */
...@@ -2242,10 +2241,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, ...@@ -2242,10 +2241,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
* Load matched symbols: Since the different local symbols may have * Load matched symbols: Since the different local symbols may have
* same name but different addresses, this lists all the symbols. * same name but different addresses, this lists all the symbols.
*/ */
num_matched_functions = 0; num_matched_functions = find_probe_functions(map, pp->function);
looking_function_name = pp->function; if (num_matched_functions == 0) {
ret = map__load(map, probe_function_filter);
if (ret || num_matched_functions == 0) {
pr_err("Failed to find symbol %s in %s\n", pp->function, pr_err("Failed to find symbol %s in %s\n", pp->function,
target ? : "kernel"); target ? : "kernel");
ret = -ENOENT; ret = -ENOENT;
...@@ -2257,7 +2254,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, ...@@ -2257,7 +2254,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
goto out; goto out;
} }
if (!pev->uprobes) { if (!pev->uprobes && !pp->retprobe) {
kmap = map__kmap(map); kmap = map__kmap(map);
reloc_sym = kmap->ref_reloc_sym; reloc_sym = kmap->ref_reloc_sym;
if (!reloc_sym) { if (!reloc_sym) {
...@@ -2275,7 +2272,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev, ...@@ -2275,7 +2272,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
} }
ret = 0; ret = 0;
map__for_each_symbol(map, sym, nd) {
map__for_each_symbol_by_name(map, pp->function, sym) {
tev = (*tevs) + ret; tev = (*tevs) + ret;
tp = &tev->point; tp = &tev->point;
if (ret == num_matched_functions) { if (ret == num_matched_functions) {
......
...@@ -396,6 +396,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, ...@@ -396,6 +396,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
const char *name) const char *name)
{ {
struct rb_node *n; struct rb_node *n;
struct symbol_name_rb_node *s;
if (symbols == NULL) if (symbols == NULL)
return NULL; return NULL;
...@@ -403,7 +404,6 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, ...@@ -403,7 +404,6 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
n = symbols->rb_node; n = symbols->rb_node;
while (n) { while (n) {
struct symbol_name_rb_node *s;
int cmp; int cmp;
s = rb_entry(n, struct symbol_name_rb_node, rb_node); s = rb_entry(n, struct symbol_name_rb_node, rb_node);
...@@ -414,10 +414,24 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols, ...@@ -414,10 +414,24 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
else if (cmp > 0) else if (cmp > 0)
n = n->rb_right; n = n->rb_right;
else else
return &s->sym; break;
} }
return NULL; if (n == NULL)
return NULL;
/* return first symbol that has same name (if any) */
for (n = rb_prev(n); n; n = rb_prev(n)) {
struct symbol_name_rb_node *tmp;
tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
if (strcmp(tmp->sym.name, s->sym.name))
break;
s = tmp;
}
return &s->sym;
} }
struct symbol *dso__find_symbol(struct dso *dso, struct symbol *dso__find_symbol(struct dso *dso,
...@@ -436,6 +450,17 @@ struct symbol *dso__next_symbol(struct symbol *sym) ...@@ -436,6 +450,17 @@ struct symbol *dso__next_symbol(struct symbol *sym)
return symbols__next(sym); return symbols__next(sym);
} }
struct symbol *symbol__next_by_name(struct symbol *sym)
{
struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
struct rb_node *n = rb_next(&s->rb_node);
return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
}
/*
* Teturns first symbol that matched with @name.
*/
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name) const char *name)
{ {
......
...@@ -231,6 +231,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type, ...@@ -231,6 +231,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
u64 addr); u64 addr);
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type, struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name); const char *name);
struct symbol *symbol__next_by_name(struct symbol *sym);
struct symbol *dso__first_symbol(struct dso *dso, enum map_type type); struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
struct symbol *dso__next_symbol(struct symbol *sym); struct symbol *dso__next_symbol(struct symbol *sym);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment