Commit df58ab24 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Rename perf_counter_limit sysctl

Rename perf_counter_limit to perf_counter_max_sample_rate and
prohibit creation of counters with a known higher sample
frequency.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 0764771d
...@@ -650,7 +650,7 @@ extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); ...@@ -650,7 +650,7 @@ extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
extern int sysctl_perf_counter_paranoid; extern int sysctl_perf_counter_paranoid;
extern int sysctl_perf_counter_mlock; extern int sysctl_perf_counter_mlock;
extern int sysctl_perf_counter_limit; extern int sysctl_perf_counter_sample_rate;
extern void perf_counter_init(void); extern void perf_counter_init(void);
......
...@@ -44,11 +44,12 @@ static atomic_t nr_mmap_counters __read_mostly; ...@@ -44,11 +44,12 @@ static atomic_t nr_mmap_counters __read_mostly;
static atomic_t nr_comm_counters __read_mostly; static atomic_t nr_comm_counters __read_mostly;
/* /*
* 0 - not paranoid * perf counter paranoia level:
* 1 - disallow cpu counters to unpriv * 0 - not paranoid
* 2 - disallow kernel profiling to unpriv * 1 - disallow cpu counters to unpriv
* 2 - disallow kernel profiling to unpriv
*/ */
int sysctl_perf_counter_paranoid __read_mostly; /* do we need to be privileged */ int sysctl_perf_counter_paranoid __read_mostly;
static inline bool perf_paranoid_cpu(void) static inline bool perf_paranoid_cpu(void)
{ {
...@@ -61,7 +62,11 @@ static inline bool perf_paranoid_kernel(void) ...@@ -61,7 +62,11 @@ static inline bool perf_paranoid_kernel(void)
} }
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
/*
* max perf counter sample rate
*/
int sysctl_perf_counter_sample_rate __read_mostly = 100000;
static atomic64_t perf_counter_id; static atomic64_t perf_counter_id;
...@@ -1244,7 +1249,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx) ...@@ -1244,7 +1249,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
if (interrupts == MAX_INTERRUPTS) { if (interrupts == MAX_INTERRUPTS) {
perf_log_throttle(counter, 1); perf_log_throttle(counter, 1);
counter->pmu->unthrottle(counter); counter->pmu->unthrottle(counter);
interrupts = 2*sysctl_perf_counter_limit/HZ; interrupts = 2*sysctl_perf_counter_sample_rate/HZ;
} }
if (!counter->attr.freq || !counter->attr.sample_freq) if (!counter->attr.freq || !counter->attr.sample_freq)
...@@ -1682,7 +1687,7 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg) ...@@ -1682,7 +1687,7 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
spin_lock_irq(&ctx->lock); spin_lock_irq(&ctx->lock);
if (counter->attr.freq) { if (counter->attr.freq) {
if (value > sysctl_perf_counter_limit) { if (value > sysctl_perf_counter_sample_rate) {
ret = -EINVAL; ret = -EINVAL;
goto unlock; goto unlock;
} }
...@@ -2979,7 +2984,8 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi, ...@@ -2979,7 +2984,8 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
} else { } else {
if (hwc->interrupts != MAX_INTERRUPTS) { if (hwc->interrupts != MAX_INTERRUPTS) {
hwc->interrupts++; hwc->interrupts++;
if (HZ * hwc->interrupts > (u64)sysctl_perf_counter_limit) { if (HZ * hwc->interrupts >
(u64)sysctl_perf_counter_sample_rate) {
hwc->interrupts = MAX_INTERRUPTS; hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(counter, 0); perf_log_throttle(counter, 0);
ret = 1; ret = 1;
...@@ -3639,6 +3645,11 @@ SYSCALL_DEFINE5(perf_counter_open, ...@@ -3639,6 +3645,11 @@ SYSCALL_DEFINE5(perf_counter_open,
return -EACCES; return -EACCES;
} }
if (attr.freq) {
if (attr.sample_freq > sysctl_perf_counter_sample_rate)
return -EINVAL;
}
/* /*
* Get the target context (task or percpu): * Get the target context (task or percpu):
*/ */
......
...@@ -932,9 +932,9 @@ static struct ctl_table kern_table[] = { ...@@ -932,9 +932,9 @@ static struct ctl_table kern_table[] = {
}, },
{ {
.ctl_name = CTL_UNNUMBERED, .ctl_name = CTL_UNNUMBERED,
.procname = "perf_counter_int_limit", .procname = "perf_counter_max_sample_rate",
.data = &sysctl_perf_counter_limit, .data = &sysctl_perf_counter_sample_rate,
.maxlen = sizeof(sysctl_perf_counter_limit), .maxlen = sizeof(sysctl_perf_counter_sample_rate),
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dointvec, .proc_handler = &proc_dointvec,
}, },
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment