return bp->overflow_handler == ptrace_triggered;
 }
 
+struct breakpoint {
+       struct list_head list;
+       struct perf_event *bp;
+       bool ptrace_bp;
+};
+
+static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
+static LIST_HEAD(task_bps);
+
+static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
+{
+       struct breakpoint *tmp;
+
+       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+       if (!tmp)
+               return ERR_PTR(-ENOMEM);
+       tmp->bp = bp;
+       tmp->ptrace_bp = is_ptrace_bp(bp);
+       return tmp;
+}
+
+static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2)
+{
+       __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr;
+
+       bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE);
+       bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE);
+       bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE);
+       bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE);
+
+       return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr);
+}
+
+static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp)
+{
+       return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp;
+}
+
+static bool can_co_exist(struct breakpoint *b, struct perf_event *bp)
+{
+       return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp));
+}
+
+static int task_bps_add(struct perf_event *bp)
+{
+       struct breakpoint *tmp;
+
+       tmp = alloc_breakpoint(bp);
+       if (IS_ERR(tmp))
+               return PTR_ERR(tmp);
+
+       list_add(&tmp->list, &task_bps);
+       return 0;
+}
+
+static void task_bps_remove(struct perf_event *bp)
+{
+       struct list_head *pos, *q;
+
+       list_for_each_safe(pos, q, &task_bps) {
+               struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
+
+               if (tmp->bp == bp) {
+                       list_del(&tmp->list);
+                       kfree(tmp);
+                       break;
+               }
+       }
+}
+
+/*
+ * If any task has breakpoint from alternate infrastructure,
+ * return true. Otherwise return false.
+ */
+static bool all_task_bps_check(struct perf_event *bp)
+{
+       struct breakpoint *tmp;
+
+       list_for_each_entry(tmp, &task_bps, list) {
+               if (!can_co_exist(tmp, bp))
+                       return true;
+       }
+       return false;
+}
+
+/*
+ * If same task has breakpoint from alternate infrastructure,
+ * return true. Otherwise return false.
+ */
+static bool same_task_bps_check(struct perf_event *bp)
+{
+       struct breakpoint *tmp;
+
+       list_for_each_entry(tmp, &task_bps, list) {
+               if (tmp->bp->hw.target == bp->hw.target &&
+                   !can_co_exist(tmp, bp))
+                       return true;
+       }
+       return false;
+}
+
+static int cpu_bps_add(struct perf_event *bp)
+{
+       struct breakpoint **cpu_bp;
+       struct breakpoint *tmp;
+       int i = 0;
+
+       tmp = alloc_breakpoint(bp);
+       if (IS_ERR(tmp))
+               return PTR_ERR(tmp);
+
+       cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
+       for (i = 0; i < nr_wp_slots(); i++) {
+               if (!cpu_bp[i]) {
+                       cpu_bp[i] = tmp;
+                       break;
+               }
+       }
+       return 0;
+}
+
+static void cpu_bps_remove(struct perf_event *bp)
+{
+       struct breakpoint **cpu_bp;
+       int i = 0;
+
+       cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
+       for (i = 0; i < nr_wp_slots(); i++) {
+               if (!cpu_bp[i])
+                       continue;
+
+               if (cpu_bp[i]->bp == bp) {
+                       kfree(cpu_bp[i]);
+                       cpu_bp[i] = NULL;
+                       break;
+               }
+       }
+}
+
+static bool cpu_bps_check(int cpu, struct perf_event *bp)
+{
+       struct breakpoint **cpu_bp;
+       int i;
+
+       cpu_bp = per_cpu_ptr(cpu_bps, cpu);
+       for (i = 0; i < nr_wp_slots(); i++) {
+               if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp))
+                       return true;
+       }
+       return false;
+}
+
+static bool all_cpu_bps_check(struct perf_event *bp)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu) {
+               if (cpu_bps_check(cpu, bp))
+                       return true;
+       }
+       return false;
+}
+
+/*
+ * We don't use any locks to serialize accesses to cpu_bps or task_bps
+ * because are already inside nr_bp_mutex.
+ */
+int arch_reserve_bp_slot(struct perf_event *bp)
+{
+       int ret;
+
+       /* ptrace breakpoint */
+       if (is_ptrace_bp(bp)) {
+               if (all_cpu_bps_check(bp))
+                       return -ENOSPC;
+
+               if (same_task_bps_check(bp))
+                       return -ENOSPC;
+
+               return task_bps_add(bp);
+       }
+
+       /* perf breakpoint */
+       if (is_kernel_addr(bp->attr.bp_addr))
+               return 0;
+
+       if (bp->hw.target && bp->cpu == -1) {
+               if (same_task_bps_check(bp))
+                       return -ENOSPC;
+
+               return task_bps_add(bp);
+       } else if (!bp->hw.target && bp->cpu != -1) {
+               if (all_task_bps_check(bp))
+                       return -ENOSPC;
+
+               return cpu_bps_add(bp);
+       }
+
+       if (same_task_bps_check(bp))
+               return -ENOSPC;
+
+       ret = cpu_bps_add(bp);
+       if (ret)
+               return ret;
+       ret = task_bps_add(bp);
+       if (ret)
+               cpu_bps_remove(bp);
+
+       return ret;
+}
+
+void arch_release_bp_slot(struct perf_event *bp)
+{
+       if (!is_kernel_addr(bp->attr.bp_addr)) {
+               if (bp->hw.target)
+                       task_bps_remove(bp);
+               if (bp->cpu != -1)
+                       cpu_bps_remove(bp);
+       }
+}
+
 /*
  * Perform cleanup of arch-specific counters during unregistration
  * of the perf-event