On Mon, 17 Oct 2022 at 17:08, Sasha Levin sashal@kernel.org wrote:
From: Marco Elver elver@google.com
[ Upstream commit f95e5a3d59011eec1257d0e76de1e1f8969d426f ]
Internal data structures (cpu_bps, task_bps) of powerpc's hw_breakpoint implementation have relied on nr_bp_mutex serializing access to them.
Before overhauling synchronization of kernel/events/hw_breakpoint.c, introduce 2 spinlocks to synchronize cpu_bps and task_bps respectively, thus avoiding reliance on callers synchronizing powerpc's hw_breakpoint.
Reported-by: Dmitry Vyukov dvyukov@google.com Signed-off-by: Marco Elver elver@google.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Acked-by: Dmitry Vyukov dvyukov@google.com Acked-by: Ian Rogers irogers@google.com Link: https://lore.kernel.org/r/20220829124719.675715-10-elver@google.com Signed-off-by: Sasha Levin sashal@kernel.org
Backporting this patch seems unnecessary if we're not backporting the hw_breakpoint overhaul.
Without the overhaul, nothing will break without this patch.
Thanks, -- Marco
arch/powerpc/kernel/hw_breakpoint.c | 53 ++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 13 deletions(-)
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 2669f80b3a49..8db1a15d7acb 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -15,6 +15,7 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/smp.h> +#include <linux/spinlock.h> #include <linux/debugfs.h> #include <linux/init.h>
@@ -129,7 +130,14 @@ struct breakpoint { bool ptrace_bp; };
+/*
- While kernel/events/hw_breakpoint.c does its own synchronization, we cannot
- rely on it safely synchronizing internals here; however, we can rely on it
- not requesting more breakpoints than available.
- */
+static DEFINE_SPINLOCK(cpu_bps_lock); static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]); +static DEFINE_SPINLOCK(task_bps_lock); static LIST_HEAD(task_bps);
static struct breakpoint *alloc_breakpoint(struct perf_event *bp) @@ -174,7 +182,9 @@ static int task_bps_add(struct perf_event *bp) if (IS_ERR(tmp)) return PTR_ERR(tmp);
spin_lock(&task_bps_lock); list_add(&tmp->list, &task_bps);
spin_unlock(&task_bps_lock); return 0;
}
@@ -182,6 +192,7 @@ static void task_bps_remove(struct perf_event *bp) { struct list_head *pos, *q;
spin_lock(&task_bps_lock); list_for_each_safe(pos, q, &task_bps) { struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
@@ -191,6 +202,7 @@ static void task_bps_remove(struct perf_event *bp) break; } }
spin_unlock(&task_bps_lock);
}
/* @@ -200,12 +212,17 @@ static void task_bps_remove(struct perf_event *bp) static bool all_task_bps_check(struct perf_event *bp) { struct breakpoint *tmp;
bool ret = false;
spin_lock(&task_bps_lock); list_for_each_entry(tmp, &task_bps, list) {
if (!can_co_exist(tmp, bp))
return true;
if (!can_co_exist(tmp, bp)) {
ret = true;
break;
} }
return false;
spin_unlock(&task_bps_lock);
return ret;
}
/* @@ -215,13 +232,18 @@ static bool all_task_bps_check(struct perf_event *bp) static bool same_task_bps_check(struct perf_event *bp) { struct breakpoint *tmp;
bool ret = false;
spin_lock(&task_bps_lock); list_for_each_entry(tmp, &task_bps, list) { if (tmp->bp->hw.target == bp->hw.target &&
!can_co_exist(tmp, bp))
return true;
!can_co_exist(tmp, bp)) {
ret = true;
break;
} }
return false;
spin_unlock(&task_bps_lock);
return ret;
}
static int cpu_bps_add(struct perf_event *bp) @@ -234,6 +256,7 @@ static int cpu_bps_add(struct perf_event *bp) if (IS_ERR(tmp)) return PTR_ERR(tmp);
spin_lock(&cpu_bps_lock); cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); for (i = 0; i < nr_wp_slots(); i++) { if (!cpu_bp[i]) {
@@ -241,6 +264,7 @@ static int cpu_bps_add(struct perf_event *bp) break; } }
spin_unlock(&cpu_bps_lock); return 0;
}
@@ -249,6 +273,7 @@ static void cpu_bps_remove(struct perf_event *bp) struct breakpoint **cpu_bp; int i = 0;
spin_lock(&cpu_bps_lock); cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu); for (i = 0; i < nr_wp_slots(); i++) { if (!cpu_bp[i])
@@ -260,19 +285,25 @@ static void cpu_bps_remove(struct perf_event *bp) break; } }
spin_unlock(&cpu_bps_lock);
}
static bool cpu_bps_check(int cpu, struct perf_event *bp) { struct breakpoint **cpu_bp;
bool ret = false; int i;
spin_lock(&cpu_bps_lock); cpu_bp = per_cpu_ptr(cpu_bps, cpu); for (i = 0; i < nr_wp_slots(); i++) {
if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp))
return true;
if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) {
ret = true;
break;
} }
return false;
spin_unlock(&cpu_bps_lock);
return ret;
}
static bool all_cpu_bps_check(struct perf_event *bp) @@ -286,10 +317,6 @@ static bool all_cpu_bps_check(struct perf_event *bp) return false; }
-/*
- We don't use any locks to serialize accesses to cpu_bps or task_bps
- because are already inside nr_bp_mutex.
- */
int arch_reserve_bp_slot(struct perf_event *bp) { int ret; -- 2.35.1