Commit 18f5600b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Small perf fixlets"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  tracing: Don't call page_to_pfn() if page is NULL
  perf/x86: Fix Intel Ivy Bridge support
  perf/x86/ibs: Check syscall attribute flags
  perf/x86: Export Sandy Bridge uncore clockticks event in sysfs
parents 789f95b7 85f2a2ef
...@@ -586,6 +586,8 @@ extern struct event_constraint intel_westmere_pebs_event_constraints[]; ...@@ -586,6 +586,8 @@ extern struct event_constraint intel_westmere_pebs_event_constraints[];
extern struct event_constraint intel_snb_pebs_event_constraints[]; extern struct event_constraint intel_snb_pebs_event_constraints[];
extern struct event_constraint intel_ivb_pebs_event_constraints[];
struct event_constraint *intel_pebs_constraints(struct perf_event *event); struct event_constraint *intel_pebs_constraints(struct perf_event *event);
void intel_pmu_pebs_enable(struct perf_event *event); void intel_pmu_pebs_enable(struct perf_event *event);
......
...@@ -209,6 +209,15 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config) ...@@ -209,6 +209,15 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static const struct perf_event_attr ibs_notsupp = {
.exclude_user = 1,
.exclude_kernel = 1,
.exclude_hv = 1,
.exclude_idle = 1,
.exclude_host = 1,
.exclude_guest = 1,
};
static int perf_ibs_init(struct perf_event *event) static int perf_ibs_init(struct perf_event *event)
{ {
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
...@@ -229,6 +238,9 @@ static int perf_ibs_init(struct perf_event *event) ...@@ -229,6 +238,9 @@ static int perf_ibs_init(struct perf_event *event)
if (event->pmu != &perf_ibs->pmu) if (event->pmu != &perf_ibs->pmu)
return -ENOENT; return -ENOENT;
if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp))
return -EINVAL;
if (config & ~perf_ibs->config_mask) if (config & ~perf_ibs->config_mask)
return -EINVAL; return -EINVAL;
......
...@@ -2048,7 +2048,6 @@ __init int intel_pmu_init(void) ...@@ -2048,7 +2048,6 @@ __init int intel_pmu_init(void)
case 42: /* SandyBridge */ case 42: /* SandyBridge */
case 45: /* SandyBridge, "Romely-EP" */ case 45: /* SandyBridge, "Romely-EP" */
x86_add_quirk(intel_sandybridge_quirk); x86_add_quirk(intel_sandybridge_quirk);
case 58: /* IvyBridge */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids)); sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
...@@ -2073,6 +2072,29 @@ __init int intel_pmu_init(void) ...@@ -2073,6 +2072,29 @@ __init int intel_pmu_init(void)
pr_cont("SandyBridge events, "); pr_cont("SandyBridge events, ");
break; break;
case 58: /* IvyBridge */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
sizeof(hw_cache_extra_regs));
intel_pmu_lbr_init_snb();
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
pr_cont("IvyBridge events, ");
break;
default: default:
switch (x86_pmu.version) { switch (x86_pmu.version) {
......
...@@ -407,6 +407,20 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { ...@@ -407,6 +407,20 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END EVENT_CONSTRAINT_END
}; };
struct event_constraint intel_ivb_pebs_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
EVENT_CONSTRAINT_END
};
struct event_constraint *intel_pebs_constraints(struct perf_event *event) struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{ {
struct event_constraint *c; struct event_constraint *c;
......
...@@ -661,6 +661,11 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box) ...@@ -661,6 +661,11 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
} }
} }
static struct uncore_event_desc snb_uncore_events[] = {
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
{ /* end: all zeroes */ },
};
static struct attribute *snb_uncore_formats_attr[] = { static struct attribute *snb_uncore_formats_attr[] = {
&format_attr_event.attr, &format_attr_event.attr,
&format_attr_umask.attr, &format_attr_umask.attr,
...@@ -704,6 +709,7 @@ static struct intel_uncore_type snb_uncore_cbox = { ...@@ -704,6 +709,7 @@ static struct intel_uncore_type snb_uncore_cbox = {
.constraints = snb_uncore_cbox_constraints, .constraints = snb_uncore_cbox_constraints,
.ops = &snb_uncore_msr_ops, .ops = &snb_uncore_msr_ops,
.format_group = &snb_uncore_format_group, .format_group = &snb_uncore_format_group,
.event_descs = snb_uncore_events,
}; };
static struct intel_uncore_type *snb_msr_uncores[] = { static struct intel_uncore_type *snb_msr_uncores[] = {
......
...@@ -274,6 +274,8 @@ struct perf_event_attr { ...@@ -274,6 +274,8 @@ struct perf_event_attr {
__u64 branch_sample_type; /* enum branch_sample_type */ __u64 branch_sample_type; /* enum branch_sample_type */
}; };
#define perf_flags(attr) (*(&(attr)->read_format + 1))
/* /*
* Ioctls that can be done on a perf event fd: * Ioctls that can be done on a perf event fd:
*/ */
......
...@@ -214,7 +214,7 @@ TRACE_EVENT(mm_page_alloc, ...@@ -214,7 +214,7 @@ TRACE_EVENT(mm_page_alloc,
TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
__entry->page, __entry->page,
page_to_pfn(__entry->page), __entry->page ? page_to_pfn(__entry->page) : 0,
__entry->order, __entry->order,
__entry->migratetype, __entry->migratetype,
show_gfp_flags(__entry->gfp_flags)) show_gfp_flags(__entry->gfp_flags))
...@@ -240,7 +240,7 @@ DECLARE_EVENT_CLASS(mm_page, ...@@ -240,7 +240,7 @@ DECLARE_EVENT_CLASS(mm_page,
TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
__entry->page, __entry->page,
page_to_pfn(__entry->page), __entry->page ? page_to_pfn(__entry->page) : 0,
__entry->order, __entry->order,
__entry->migratetype, __entry->migratetype,
__entry->order == 0) __entry->order == 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment