#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
+#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/init.h>
bool ptrace_bp;
};
+/*
+ * While kernel/events/hw_breakpoint.c does its own synchronization, we cannot
+ * rely on it safely synchronizing internals here; however, we can rely on it
+ * not requesting more breakpoints than available.
+ */
+static DEFINE_SPINLOCK(cpu_bps_lock);
static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
+static DEFINE_SPINLOCK(task_bps_lock);
static LIST_HEAD(task_bps);
static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
if (IS_ERR(tmp))
return PTR_ERR(tmp);
+ spin_lock(&task_bps_lock);
list_add(&tmp->list, &task_bps);
+ spin_unlock(&task_bps_lock);
return 0;
}
{
struct list_head *pos, *q;
+ spin_lock(&task_bps_lock);
list_for_each_safe(pos, q, &task_bps) {
struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
break;
}
}
+ spin_unlock(&task_bps_lock);
}
/*
static bool all_task_bps_check(struct perf_event *bp)
{
struct breakpoint *tmp;
+ bool ret = false;
+ spin_lock(&task_bps_lock);
list_for_each_entry(tmp, &task_bps, list) {
- if (!can_co_exist(tmp, bp))
- return true;
+ if (!can_co_exist(tmp, bp)) {
+ ret = true;
+ break;
+ }
}
- return false;
+ spin_unlock(&task_bps_lock);
+ return ret;
}
/*
static bool same_task_bps_check(struct perf_event *bp)
{
struct breakpoint *tmp;
+ bool ret = false;
+ spin_lock(&task_bps_lock);
list_for_each_entry(tmp, &task_bps, list) {
if (tmp->bp->hw.target == bp->hw.target &&
- !can_co_exist(tmp, bp))
- return true;
+ !can_co_exist(tmp, bp)) {
+ ret = true;
+ break;
+ }
}
- return false;
+ spin_unlock(&task_bps_lock);
+ return ret;
}
static int cpu_bps_add(struct perf_event *bp)
if (IS_ERR(tmp))
return PTR_ERR(tmp);
+ spin_lock(&cpu_bps_lock);
cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
for (i = 0; i < nr_wp_slots(); i++) {
if (!cpu_bp[i]) {
break;
}
}
+ spin_unlock(&cpu_bps_lock);
return 0;
}
struct breakpoint **cpu_bp;
int i = 0;
+ spin_lock(&cpu_bps_lock);
cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
for (i = 0; i < nr_wp_slots(); i++) {
if (!cpu_bp[i])
break;
}
}
+ spin_unlock(&cpu_bps_lock);
}
static bool cpu_bps_check(int cpu, struct perf_event *bp)
{
struct breakpoint **cpu_bp;
+ bool ret = false;
int i;
+ spin_lock(&cpu_bps_lock);
cpu_bp = per_cpu_ptr(cpu_bps, cpu);
for (i = 0; i < nr_wp_slots(); i++) {
- if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp))
- return true;
+ if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) {
+ ret = true;
+ break;
+ }
}
- return false;
+ spin_unlock(&cpu_bps_lock);
+ return ret;
}
static bool all_cpu_bps_check(struct perf_event *bp)
return false;
}
-/*
- * We don't use any locks to serialize accesses to cpu_bps or task_bps
- * because are already inside nr_bp_mutex.
- */
int arch_reserve_bp_slot(struct perf_event *bp)
{
int ret;