Commit 2b18593e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fix from Borislav Petkov:

 - A single data race fix on the perf event cleanup path to avoid
   endless loops due to insufficient locking

* tag 'perf_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/core: Fix data race between perf_event_set_output() and perf_mmap_close()
parents 59c80f05 68e3c698
...@@ -6253,10 +6253,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -6253,10 +6253,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
if (!atomic_inc_not_zero(&event->rb->mmap_count)) { if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
/* /*
* Raced against perf_mmap_close() through * Raced against perf_mmap_close(); remove the
* perf_event_set_output(). Try again, hope for better * event and try again.
* luck.
*/ */
ring_buffer_attach(event, NULL);
mutex_unlock(&event->mmap_mutex); mutex_unlock(&event->mmap_mutex);
goto again; goto again;
} }
...@@ -11825,14 +11825,25 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, ...@@ -11825,14 +11825,25 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
goto out; goto out;
} }
static void mutex_lock_double(struct mutex *a, struct mutex *b)
{
if (b < a)
swap(a, b);
mutex_lock(a);
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
}
static int static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event) perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{ {
struct perf_buffer *rb = NULL; struct perf_buffer *rb = NULL;
int ret = -EINVAL; int ret = -EINVAL;
if (!output_event) if (!output_event) {
mutex_lock(&event->mmap_mutex);
goto set; goto set;
}
/* don't allow circular references */ /* don't allow circular references */
if (event == output_event) if (event == output_event)
...@@ -11870,8 +11881,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) ...@@ -11870,8 +11881,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
event->pmu != output_event->pmu) event->pmu != output_event->pmu)
goto out; goto out;
/*
* Hold both mmap_mutex to serialize against perf_mmap_close(). Since
* output_event is already on rb->event_list, and the list iteration
* restarts after every removal, it is guaranteed this new event is
* observed *OR* if output_event is already removed, it's guaranteed we
* observe !rb->mmap_count.
*/
mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
set: set:
mutex_lock(&event->mmap_mutex);
/* Can't redirect output if we've got an active mmap() */ /* Can't redirect output if we've got an active mmap() */
if (atomic_read(&event->mmap_count)) if (atomic_read(&event->mmap_count))
goto unlock; goto unlock;
...@@ -11881,6 +11899,12 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) ...@@ -11881,6 +11899,12 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
rb = ring_buffer_get(output_event); rb = ring_buffer_get(output_event);
if (!rb) if (!rb)
goto unlock; goto unlock;
/* did we race against perf_mmap_close() */
if (!atomic_read(&rb->mmap_count)) {
ring_buffer_put(rb);
goto unlock;
}
} }
ring_buffer_attach(event, rb); ring_buffer_attach(event, rb);
...@@ -11888,20 +11912,13 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) ...@@ -11888,20 +11912,13 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
ret = 0; ret = 0;
unlock: unlock:
mutex_unlock(&event->mmap_mutex); mutex_unlock(&event->mmap_mutex);
if (output_event)
mutex_unlock(&output_event->mmap_mutex);
out: out:
return ret; return ret;
} }
static void mutex_lock_double(struct mutex *a, struct mutex *b)
{
if (b < a)
swap(a, b);
mutex_lock(a);
mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
}
static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
{ {
bool nmi_safe = false; bool nmi_safe = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment