Commit f95e5a3d authored by Marco Elver's avatar Marco Elver Committed by Peter Zijlstra

powerpc/hw_breakpoint: Avoid relying on caller synchronization

Internal data structures (cpu_bps, task_bps) of powerpc's hw_breakpoint
implementation have relied on nr_bp_mutex serializing access to them.

Before overhauling synchronization of kernel/events/hw_breakpoint.c,
introduce 2 spinlocks to synchronize cpu_bps and task_bps respectively,
thus avoiding reliance on callers synchronizing powerpc's hw_breakpoint.
Reported-by: default avatarDmitry Vyukov <dvyukov@google.com>
Signed-off-by: default avatarMarco Elver <elver@google.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarDmitry Vyukov <dvyukov@google.com>
Acked-by: default avatarIan Rogers <irogers@google.com>
Link: https://lore.kernel.org/r/20220829124719.675715-10-elver@google.com
parent 24198ad3
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -129,7 +130,14 @@ struct breakpoint { ...@@ -129,7 +130,14 @@ struct breakpoint {
bool ptrace_bp; bool ptrace_bp;
}; };
/*
* While kernel/events/hw_breakpoint.c does its own synchronization, we cannot
* rely on it safely synchronizing internals here; however, we can rely on it
* not requesting more breakpoints than available.
*/
static DEFINE_SPINLOCK(cpu_bps_lock);
static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]); static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
static DEFINE_SPINLOCK(task_bps_lock);
static LIST_HEAD(task_bps); static LIST_HEAD(task_bps);
static struct breakpoint *alloc_breakpoint(struct perf_event *bp) static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
...@@ -174,7 +182,9 @@ static int task_bps_add(struct perf_event *bp) ...@@ -174,7 +182,9 @@ static int task_bps_add(struct perf_event *bp)
if (IS_ERR(tmp)) if (IS_ERR(tmp))
return PTR_ERR(tmp); return PTR_ERR(tmp);
spin_lock(&task_bps_lock);
list_add(&tmp->list, &task_bps); list_add(&tmp->list, &task_bps);
spin_unlock(&task_bps_lock);
return 0; return 0;
} }
...@@ -182,6 +192,7 @@ static void task_bps_remove(struct perf_event *bp) ...@@ -182,6 +192,7 @@ static void task_bps_remove(struct perf_event *bp)
{ {
struct list_head *pos, *q; struct list_head *pos, *q;
spin_lock(&task_bps_lock);
list_for_each_safe(pos, q, &task_bps) { list_for_each_safe(pos, q, &task_bps) {
struct breakpoint *tmp = list_entry(pos, struct breakpoint, list); struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
...@@ -191,6 +202,7 @@ static void task_bps_remove(struct perf_event *bp) ...@@ -191,6 +202,7 @@ static void task_bps_remove(struct perf_event *bp)
break; break;
} }
} }
spin_unlock(&task_bps_lock);
} }
/* /*
...@@ -200,12 +212,17 @@ static void task_bps_remove(struct perf_event *bp) ...@@ -200,12 +212,17 @@ static void task_bps_remove(struct perf_event *bp)
static bool all_task_bps_check(struct perf_event *bp) static bool all_task_bps_check(struct perf_event *bp)
{ {
struct breakpoint *tmp; struct breakpoint *tmp;
bool ret = false;
spin_lock(&task_bps_lock);
list_for_each_entry(tmp, &task_bps, list) { list_for_each_entry(tmp, &task_bps, list) {
if (!can_co_exist(tmp, bp)) if (!can_co_exist(tmp, bp)) {
return true; ret = true;
break;
}
} }
return false; spin_unlock(&task_bps_lock);
return ret;
} }
/* /*
...@@ -215,13 +232,18 @@ static bool all_task_bps_check(struct perf_event *bp) ...@@ -215,13 +232,18 @@ static bool all_task_bps_check(struct perf_event *bp)
static bool same_task_bps_check(struct perf_event *bp) static bool same_task_bps_check(struct perf_event *bp)
{ {
struct breakpoint *tmp; struct breakpoint *tmp;
bool ret = false;
spin_lock(&task_bps_lock);
list_for_each_entry(tmp, &task_bps, list) { list_for_each_entry(tmp, &task_bps, list) {
if (tmp->bp->hw.target == bp->hw.target && if (tmp->bp->hw.target == bp->hw.target &&
!can_co_exist(tmp, bp)) !can_co_exist(tmp, bp)) {
return true; ret = true;
break;
}
} }
return false; spin_unlock(&task_bps_lock);
return ret;
} }
static int cpu_bps_add(struct perf_event *bp) static int cpu_bps_add(struct perf_event *bp)
...@@ -234,6 +256,7 @@ static int cpu_bps_add(struct perf_event *bp) ...@@ -234,6 +256,7 @@ static int cpu_bps_add(struct perf_event *bp)
if (IS_ERR(tmp)) if (IS_ERR(tmp))
return PTR_ERR(tmp); return PTR_ERR(tmp);
spin_lock(&cpu_bps_lock);
cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
for (i = 0; i < nr_wp_slots(); i++) { for (i = 0; i < nr_wp_slots(); i++) {
if (!cpu_bp[i]) { if (!cpu_bp[i]) {
...@@ -241,6 +264,7 @@ static int cpu_bps_add(struct perf_event *bp) ...@@ -241,6 +264,7 @@ static int cpu_bps_add(struct perf_event *bp)
break; break;
} }
} }
spin_unlock(&cpu_bps_lock);
return 0; return 0;
} }
...@@ -249,6 +273,7 @@ static void cpu_bps_remove(struct perf_event *bp) ...@@ -249,6 +273,7 @@ static void cpu_bps_remove(struct perf_event *bp)
struct breakpoint **cpu_bp; struct breakpoint **cpu_bp;
int i = 0; int i = 0;
spin_lock(&cpu_bps_lock);
cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
for (i = 0; i < nr_wp_slots(); i++) { for (i = 0; i < nr_wp_slots(); i++) {
if (!cpu_bp[i]) if (!cpu_bp[i])
...@@ -260,19 +285,25 @@ static void cpu_bps_remove(struct perf_event *bp) ...@@ -260,19 +285,25 @@ static void cpu_bps_remove(struct perf_event *bp)
break; break;
} }
} }
spin_unlock(&cpu_bps_lock);
} }
static bool cpu_bps_check(int cpu, struct perf_event *bp) static bool cpu_bps_check(int cpu, struct perf_event *bp)
{ {
struct breakpoint **cpu_bp; struct breakpoint **cpu_bp;
bool ret = false;
int i; int i;
spin_lock(&cpu_bps_lock);
cpu_bp = per_cpu_ptr(cpu_bps, cpu); cpu_bp = per_cpu_ptr(cpu_bps, cpu);
for (i = 0; i < nr_wp_slots(); i++) { for (i = 0; i < nr_wp_slots(); i++) {
if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) {
return true; ret = true;
break;
}
} }
return false; spin_unlock(&cpu_bps_lock);
return ret;
} }
static bool all_cpu_bps_check(struct perf_event *bp) static bool all_cpu_bps_check(struct perf_event *bp)
...@@ -286,10 +317,6 @@ static bool all_cpu_bps_check(struct perf_event *bp) ...@@ -286,10 +317,6 @@ static bool all_cpu_bps_check(struct perf_event *bp)
return false; return false;
} }
/*
* We don't use any locks to serialize accesses to cpu_bps or task_bps
* because are already inside nr_bp_mutex.
*/
int arch_reserve_bp_slot(struct perf_event *bp) int arch_reserve_bp_slot(struct perf_event *bp)
{ {
int ret; int ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment