Commit 335bc70b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-fixes-for-linus' of...

Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  perf: Validate cpu early in perf_event_alloc()
  perf: Find_get_context: fix the per-cpu-counter check
  perf: Fix contexted inheritance
parents 404cbbd5 66832eb4
...@@ -2228,14 +2228,11 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) ...@@ -2228,14 +2228,11 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
unsigned long flags; unsigned long flags;
int ctxn, err; int ctxn, err;
if (!task && cpu != -1) { if (!task) {
/* Must be root to operate on a CPU event: */ /* Must be root to operate on a CPU event: */
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES); return ERR_PTR(-EACCES);
if (cpu < 0 || cpu >= nr_cpumask_bits)
return ERR_PTR(-EINVAL);
/* /*
* We could be clever and allow to attach a event to an * We could be clever and allow to attach a event to an
* offline CPU and activate it when the CPU comes up, but * offline CPU and activate it when the CPU comes up, but
...@@ -5541,6 +5538,11 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, ...@@ -5541,6 +5538,11 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct hw_perf_event *hwc; struct hw_perf_event *hwc;
long err; long err;
if ((unsigned)cpu >= nr_cpu_ids) {
if (!task || cpu != -1)
return ERR_PTR(-EINVAL);
}
event = kzalloc(sizeof(*event), GFP_KERNEL); event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event) if (!event)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -6494,7 +6496,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn) ...@@ -6494,7 +6496,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
raw_spin_lock_irqsave(&parent_ctx->lock, flags); raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 0; parent_ctx->rotate_disable = 0;
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
child_ctx = child->perf_event_ctxp[ctxn]; child_ctx = child->perf_event_ctxp[ctxn];
...@@ -6502,12 +6503,11 @@ int perf_event_init_context(struct task_struct *child, int ctxn) ...@@ -6502,12 +6503,11 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
/* /*
* Mark the child context as a clone of the parent * Mark the child context as a clone of the parent
* context, or of whatever the parent is a clone of. * context, or of whatever the parent is a clone of.
* Note that if the parent is a clone, it could get *
* uncloned at any point, but that doesn't matter * Note that if the parent is a clone, the holding of
* because the list of events and the generation * parent_ctx->lock avoids it from being uncloned.
* count can't have changed since we took the mutex.
*/ */
cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); cloned_ctx = parent_ctx->parent_ctx;
if (cloned_ctx) { if (cloned_ctx) {
child_ctx->parent_ctx = cloned_ctx; child_ctx->parent_ctx = cloned_ctx;
child_ctx->parent_gen = parent_ctx->parent_gen; child_ctx->parent_gen = parent_ctx->parent_gen;
...@@ -6518,6 +6518,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) ...@@ -6518,6 +6518,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
get_ctx(child_ctx->parent_ctx); get_ctx(child_ctx->parent_ctx);
} }
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
mutex_unlock(&parent_ctx->mutex); mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx); perf_unpin_context(parent_ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment