Commit dd6f29da authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Two fixes on the kernel side: fix an over-eager condition that failed
  larger perf ring-buffer sizes, plus fix crashes in the Intel BTS code
  for a corner case, found by fuzzing"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/core: Fix impossible ring-buffer sizes warning
  perf/x86: Add check_period PMU callback
parents c5f1ac5e 528871b4
...@@ -2278,6 +2278,19 @@ void perf_check_microcode(void) ...@@ -2278,6 +2278,19 @@ void perf_check_microcode(void)
x86_pmu.check_microcode(); x86_pmu.check_microcode();
} }
static int x86_pmu_check_period(struct perf_event *event, u64 value)
{
if (x86_pmu.check_period && x86_pmu.check_period(event, value))
return -EINVAL;
if (value && x86_pmu.limit_period) {
if (x86_pmu.limit_period(event, value) > value)
return -EINVAL;
}
return 0;
}
static struct pmu pmu = { static struct pmu pmu = {
.pmu_enable = x86_pmu_enable, .pmu_enable = x86_pmu_enable,
.pmu_disable = x86_pmu_disable, .pmu_disable = x86_pmu_disable,
...@@ -2302,6 +2315,7 @@ static struct pmu pmu = { ...@@ -2302,6 +2315,7 @@ static struct pmu pmu = {
.event_idx = x86_pmu_event_idx, .event_idx = x86_pmu_event_idx,
.sched_task = x86_pmu_sched_task, .sched_task = x86_pmu_sched_task,
.task_ctx_size = sizeof(struct x86_perf_task_context), .task_ctx_size = sizeof(struct x86_perf_task_context),
.check_period = x86_pmu_check_period,
}; };
void arch_perf_update_userpage(struct perf_event *event, void arch_perf_update_userpage(struct perf_event *event,
......
...@@ -3587,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx, ...@@ -3587,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
intel_pmu_lbr_sched_task(ctx, sched_in); intel_pmu_lbr_sched_task(ctx, sched_in);
} }
static int intel_pmu_check_period(struct perf_event *event, u64 value)
{
return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
}
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
PMU_FORMAT_ATTR(ldlat, "config1:0-15"); PMU_FORMAT_ATTR(ldlat, "config1:0-15");
...@@ -3667,6 +3672,8 @@ static __initconst const struct x86_pmu core_pmu = { ...@@ -3667,6 +3672,8 @@ static __initconst const struct x86_pmu core_pmu = {
.cpu_starting = intel_pmu_cpu_starting, .cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying, .cpu_dying = intel_pmu_cpu_dying,
.cpu_dead = intel_pmu_cpu_dead, .cpu_dead = intel_pmu_cpu_dead,
.check_period = intel_pmu_check_period,
}; };
static struct attribute *intel_pmu_attrs[]; static struct attribute *intel_pmu_attrs[];
...@@ -3711,6 +3718,8 @@ static __initconst const struct x86_pmu intel_pmu = { ...@@ -3711,6 +3718,8 @@ static __initconst const struct x86_pmu intel_pmu = {
.guest_get_msrs = intel_guest_get_msrs, .guest_get_msrs = intel_guest_get_msrs,
.sched_task = intel_pmu_sched_task, .sched_task = intel_pmu_sched_task,
.check_period = intel_pmu_check_period,
}; };
static __init void intel_clovertown_quirk(void) static __init void intel_clovertown_quirk(void)
......
...@@ -646,6 +646,11 @@ struct x86_pmu { ...@@ -646,6 +646,11 @@ struct x86_pmu {
* Intel host/guest support (KVM) * Intel host/guest support (KVM)
*/ */
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
/*
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
*/
int (*check_period) (struct perf_event *event, u64 period);
}; };
struct x86_perf_task_context { struct x86_perf_task_context {
...@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void) ...@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void)
#ifdef CONFIG_CPU_SUP_INTEL #ifdef CONFIG_CPU_SUP_INTEL
static inline bool intel_pmu_has_bts(struct perf_event *event) static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
{ {
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
unsigned int hw_event, bts_event; unsigned int hw_event, bts_event;
...@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event) ...@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
return hw_event == bts_event && hwc->sample_period == 1; return hw_event == bts_event && period == 1;
}
static inline bool intel_pmu_has_bts(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
return intel_pmu_has_bts_period(event, hwc->sample_period);
} }
int intel_pmu_save_and_restart(struct perf_event *event); int intel_pmu_save_and_restart(struct perf_event *event);
......
...@@ -447,6 +447,11 @@ struct pmu { ...@@ -447,6 +447,11 @@ struct pmu {
* Filter events for PMU-specific reasons. * Filter events for PMU-specific reasons.
*/ */
int (*filter_match) (struct perf_event *event); /* optional */ int (*filter_match) (struct perf_event *event); /* optional */
/*
* Check period value for PERF_EVENT_IOC_PERIOD ioctl.
*/
int (*check_period) (struct perf_event *event, u64 value); /* optional */
}; };
enum perf_addr_filter_action_t { enum perf_addr_filter_action_t {
......
...@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event, ...@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
} }
} }
static int perf_event_check_period(struct perf_event *event, u64 value)
{
return event->pmu->check_period(event, value);
}
static int perf_event_period(struct perf_event *event, u64 __user *arg) static int perf_event_period(struct perf_event *event, u64 __user *arg)
{ {
u64 value; u64 value;
...@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) ...@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
if (event->attr.freq && value > sysctl_perf_event_sample_rate) if (event->attr.freq && value > sysctl_perf_event_sample_rate)
return -EINVAL; return -EINVAL;
if (perf_event_check_period(event, value))
return -EINVAL;
event_function_call(event, __perf_event_period, &value); event_function_call(event, __perf_event_period, &value);
return 0; return 0;
...@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu) ...@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
return 0; return 0;
} }
static int perf_event_nop_int(struct perf_event *event, u64 value)
{
return 0;
}
static DEFINE_PER_CPU(unsigned int, nop_txn_flags); static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
...@@ -9691,6 +9704,9 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type) ...@@ -9691,6 +9704,9 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
pmu->pmu_disable = perf_pmu_nop_void; pmu->pmu_disable = perf_pmu_nop_void;
} }
if (!pmu->check_period)
pmu->check_period = perf_event_nop_int;
if (!pmu->event_idx) if (!pmu->event_idx)
pmu->event_idx = perf_event_idx_default; pmu->event_idx = perf_event_idx_default;
......
...@@ -734,7 +734,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) ...@@ -734,7 +734,7 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
size = sizeof(struct ring_buffer); size = sizeof(struct ring_buffer);
size += nr_pages * sizeof(void *); size += nr_pages * sizeof(void *);
if (order_base_2(size) >= MAX_ORDER) if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
goto fail; goto fail;
rb = kzalloc(size, GFP_KERNEL); rb = kzalloc(size, GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment