1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2007 Alan Stern
4 * Copyright (C) IBM Corporation, 2009
5 * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
7 * Thanks to Ingo Molnar for his many suggestions.
9 * Authors: Alan Stern <stern@rowland.harvard.edu>
10 * K.Prasad <prasad@linux.vnet.ibm.com>
11 * Frederic Weisbecker <fweisbec@gmail.com>
15 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
16 * using the CPU's debug registers.
17 * This file contains the arch-independent routines.
20 #include <linux/hw_breakpoint.h>
22 #include <linux/atomic.h>
23 #include <linux/bug.h>
24 #include <linux/cpu.h>
25 #include <linux/export.h>
26 #include <linux/init.h>
27 #include <linux/irqflags.h>
28 #include <linux/kdebug.h>
29 #include <linux/kernel.h>
30 #include <linux/mutex.h>
31 #include <linux/notifier.h>
32 #include <linux/percpu-rwsem.h>
33 #include <linux/percpu.h>
34 #include <linux/rhashtable.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
39 * Datastructure to track the total uses of N slots across tasks or CPUs;
40 * bp_slots_histogram::count[N] is the number of assigned N+1 breakpoint slots.
42 struct bp_slots_histogram {
43 #ifdef hw_breakpoint_slots
44 atomic_t count[hw_breakpoint_slots(0)];
51 * Per-CPU constraints data.
54 /* Number of pinned CPU breakpoints in a CPU. */
55 unsigned int cpu_pinned;
56 /* Histogram of pinned task breakpoints in a CPU. */
57 struct bp_slots_histogram tsk_pinned;
60 static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
62 static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
64 return per_cpu_ptr(bp_cpuinfo + type, cpu);
67 /* Number of pinned CPU breakpoints globally. */
68 static struct bp_slots_histogram cpu_pinned[TYPE_MAX];
69 /* Number of pinned CPU-independent task breakpoints. */
70 static struct bp_slots_histogram tsk_pinned_all[TYPE_MAX];
72 /* Keep track of the breakpoints attached to tasks */
73 static struct rhltable task_bps_ht;
74 static const struct rhashtable_params task_bps_ht_params = {
75 .head_offset = offsetof(struct hw_perf_event, bp_list),
76 .key_offset = offsetof(struct hw_perf_event, target),
77 .key_len = sizeof_field(struct hw_perf_event, target),
78 .automatic_shrinking = true,
81 static bool constraints_initialized __ro_after_init;
84 * Synchronizes accesses to the per-CPU constraints; the locking rules are:
86 * 1. Atomic updates to bp_cpuinfo::tsk_pinned only require a held read-lock
87 * (due to bp_slots_histogram::count being atomic, no update are lost).
89 * 2. Holding a write-lock is required for computations that require a
90 * stable snapshot of all bp_cpuinfo::tsk_pinned.
92 * 3. In all other cases, non-atomic accesses require the appropriately held
93 * lock (read-lock for read-only accesses; write-lock for reads/writes).
95 DEFINE_STATIC_PERCPU_RWSEM(bp_cpuinfo_sem);
98 * Return mutex to serialize accesses to per-task lists in task_bps_ht. Since
99 * rhltable synchronizes concurrent insertions/deletions, independent tasks may
100 * insert/delete concurrently; therefore, a mutex per task is sufficient.
102 * Uses task_struct::perf_event_mutex, to avoid extending task_struct with a
103 * hw_breakpoint-only mutex, which may be infrequently used. The caveat here is
104 * that hw_breakpoint may contend with per-task perf event list management. The
105 * assumption is that perf usecases involving hw_breakpoints are very unlikely
106 * to result in unnecessary contention.
108 static inline struct mutex *get_task_bps_mutex(struct perf_event *bp)
110 struct task_struct *tsk = bp->hw.target;
112 return tsk ? &tsk->perf_event_mutex : NULL;
115 static struct mutex *bp_constraints_lock(struct perf_event *bp)
117 struct mutex *tsk_mtx = get_task_bps_mutex(bp);
121 percpu_down_read(&bp_cpuinfo_sem);
123 percpu_down_write(&bp_cpuinfo_sem);
129 static void bp_constraints_unlock(struct mutex *tsk_mtx)
132 percpu_up_read(&bp_cpuinfo_sem);
133 mutex_unlock(tsk_mtx);
135 percpu_up_write(&bp_cpuinfo_sem);
139 static bool bp_constraints_is_locked(struct perf_event *bp)
141 struct mutex *tsk_mtx = get_task_bps_mutex(bp);
143 return percpu_is_write_locked(&bp_cpuinfo_sem) ||
144 (tsk_mtx ? mutex_is_locked(tsk_mtx) :
145 percpu_is_read_locked(&bp_cpuinfo_sem));
148 static inline void assert_bp_constraints_lock_held(struct perf_event *bp)
150 struct mutex *tsk_mtx = get_task_bps_mutex(bp);
153 lockdep_assert_held(tsk_mtx);
154 lockdep_assert_held(&bp_cpuinfo_sem);
157 #ifdef hw_breakpoint_slots
159 * Number of breakpoint slots is constant, and the same for all types.
161 static_assert(hw_breakpoint_slots(TYPE_INST) == hw_breakpoint_slots(TYPE_DATA));
162 static inline int hw_breakpoint_slots_cached(int type) { return hw_breakpoint_slots(type); }
163 static inline int init_breakpoint_slots(void) { return 0; }
166 * Dynamic number of breakpoint slots.
168 static int __nr_bp_slots[TYPE_MAX] __ro_after_init;
170 static inline int hw_breakpoint_slots_cached(int type)
172 return __nr_bp_slots[type];
176 bp_slots_histogram_alloc(struct bp_slots_histogram *hist, enum bp_type_idx type)
178 hist->count = kcalloc(hw_breakpoint_slots_cached(type), sizeof(*hist->count), GFP_KERNEL);
182 static __init void bp_slots_histogram_free(struct bp_slots_histogram *hist)
187 static __init int init_breakpoint_slots(void)
191 for (i = 0; i < TYPE_MAX; i++)
192 __nr_bp_slots[i] = hw_breakpoint_slots(i);
194 for_each_possible_cpu(cpu) {
195 for (i = 0; i < TYPE_MAX; i++) {
196 struct bp_cpuinfo *info = get_bp_info(cpu, i);
198 if (!bp_slots_histogram_alloc(&info->tsk_pinned, i))
202 for (i = 0; i < TYPE_MAX; i++) {
203 if (!bp_slots_histogram_alloc(&cpu_pinned[i], i))
205 if (!bp_slots_histogram_alloc(&tsk_pinned_all[i], i))
211 for_each_possible_cpu(err_cpu) {
212 for (i = 0; i < TYPE_MAX; i++)
213 bp_slots_histogram_free(&get_bp_info(err_cpu, i)->tsk_pinned);
217 for (i = 0; i < TYPE_MAX; i++) {
218 bp_slots_histogram_free(&cpu_pinned[i]);
219 bp_slots_histogram_free(&tsk_pinned_all[i]);
227 bp_slots_histogram_add(struct bp_slots_histogram *hist, int old, int val)
229 const int old_idx = old - 1;
230 const int new_idx = old_idx + val;
233 WARN_ON(atomic_dec_return_relaxed(&hist->count[old_idx]) < 0);
235 WARN_ON(atomic_inc_return_relaxed(&hist->count[new_idx]) < 0);
239 bp_slots_histogram_max(struct bp_slots_histogram *hist, enum bp_type_idx type)
241 for (int i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) {
242 const int count = atomic_read(&hist->count[i]);
244 /* Catch unexpected writers; we want a stable snapshot. */
245 ASSERT_EXCLUSIVE_WRITER(hist->count[i]);
248 WARN(count < 0, "inconsistent breakpoint slots histogram");
255 bp_slots_histogram_max_merge(struct bp_slots_histogram *hist1, struct bp_slots_histogram *hist2,
256 enum bp_type_idx type)
258 for (int i = hw_breakpoint_slots_cached(type) - 1; i >= 0; i--) {
259 const int count1 = atomic_read(&hist1->count[i]);
260 const int count2 = atomic_read(&hist2->count[i]);
262 /* Catch unexpected writers; we want a stable snapshot. */
263 ASSERT_EXCLUSIVE_WRITER(hist1->count[i]);
264 ASSERT_EXCLUSIVE_WRITER(hist2->count[i]);
265 if (count1 + count2 > 0)
267 WARN(count1 < 0, "inconsistent breakpoint slots histogram");
268 WARN(count2 < 0, "inconsistent breakpoint slots histogram");
274 #ifndef hw_breakpoint_weight
275 static inline int hw_breakpoint_weight(struct perf_event *bp)
281 static inline enum bp_type_idx find_slot_idx(u64 bp_type)
283 if (bp_type & HW_BREAKPOINT_RW)
290 * Return the maximum number of pinned breakpoints a task has in this CPU.
292 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
294 struct bp_slots_histogram *tsk_pinned = &get_bp_info(cpu, type)->tsk_pinned;
297 * At this point we want to have acquired the bp_cpuinfo_sem as a
298 * writer to ensure that there are no concurrent writers in
299 * toggle_bp_task_slot() to tsk_pinned, and we get a stable snapshot.
301 lockdep_assert_held_write(&bp_cpuinfo_sem);
302 return bp_slots_histogram_max_merge(tsk_pinned, &tsk_pinned_all[type], type);
306 * Count the number of breakpoints of the same type and same task.
307 * The given event must be not on the list.
309 * If @cpu is -1, but the result of task_bp_pinned() is not CPU-independent,
310 * returns a negative value.
312 static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
314 struct rhlist_head *head, *pos;
315 struct perf_event *iter;
319 * We need a stable snapshot of the per-task breakpoint list.
321 assert_bp_constraints_lock_held(bp);
324 head = rhltable_lookup(&task_bps_ht, &bp->hw.target, task_bps_ht_params);
328 rhl_for_each_entry_rcu(iter, pos, head, hw.bp_list) {
329 if (find_slot_idx(iter->attr.bp_type) != type)
332 if (iter->cpu >= 0) {
336 } else if (cpu != iter->cpu)
340 count += hw_breakpoint_weight(iter);
348 static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
351 return cpumask_of(bp->cpu);
352 return cpu_possible_mask;
356 * Returns the max pinned breakpoint slots in a given
357 * CPU (cpu > -1) or across all of them (cpu = -1).
360 max_bp_pinned_slots(struct perf_event *bp, enum bp_type_idx type)
362 const struct cpumask *cpumask = cpumask_of_bp(bp);
363 int pinned_slots = 0;
366 if (bp->hw.target && bp->cpu < 0) {
367 int max_pinned = task_bp_pinned(-1, bp, type);
369 if (max_pinned >= 0) {
371 * Fast path: task_bp_pinned() is CPU-independent and
372 * returns the same value for any CPU.
374 max_pinned += bp_slots_histogram_max(&cpu_pinned[type], type);
379 for_each_cpu(cpu, cpumask) {
380 struct bp_cpuinfo *info = get_bp_info(cpu, type);
383 nr = info->cpu_pinned;
385 nr += max_task_bp_pinned(cpu, type);
387 nr += task_bp_pinned(cpu, bp, type);
389 pinned_slots = max(nr, pinned_slots);
396 * Add/remove the given breakpoint in our constraint table
399 toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, int weight)
401 int cpu, next_tsk_pinned;
406 if (!bp->hw.target) {
408 * Update the pinned CPU slots, in per-CPU bp_cpuinfo and in the
411 struct bp_cpuinfo *info = get_bp_info(bp->cpu, type);
413 lockdep_assert_held_write(&bp_cpuinfo_sem);
414 bp_slots_histogram_add(&cpu_pinned[type], info->cpu_pinned, weight);
415 info->cpu_pinned += weight;
420 * If bp->hw.target, tsk_pinned is only modified, but not used
421 * otherwise. We can permit concurrent updates as long as there are no
422 * other uses: having acquired bp_cpuinfo_sem as a reader allows
423 * concurrent updates here. Uses of tsk_pinned will require acquiring
424 * bp_cpuinfo_sem as a writer to stabilize tsk_pinned's value.
426 lockdep_assert_held_read(&bp_cpuinfo_sem);
429 * Update the pinned task slots, in per-CPU bp_cpuinfo and in the global
430 * histogram. We need to take care of 4 cases:
432 * 1. This breakpoint targets all CPUs (cpu < 0), and there may only
433 * exist other task breakpoints targeting all CPUs. In this case we
434 * can simply update the global slots histogram.
436 * 2. This breakpoint targets a specific CPU (cpu >= 0), but there may
437 * only exist other task breakpoints targeting all CPUs.
439 * a. On enable: remove the existing breakpoints from the global
440 * slots histogram and use the per-CPU histogram.
442 * b. On disable: re-insert the existing breakpoints into the global
443 * slots histogram and remove from per-CPU histogram.
445 * 3. Some other existing task breakpoints target specific CPUs. Only
446 * update the per-CPU slots histogram.
451 * Remove before updating histograms so we can determine if this
452 * was the last task breakpoint for a specific CPU.
454 int ret = rhltable_remove(&task_bps_ht, &bp->hw.bp_list, task_bps_ht_params);
460 * Note: If !enable, next_tsk_pinned will not count the to-be-removed breakpoint.
462 next_tsk_pinned = task_bp_pinned(-1, bp, type);
464 if (next_tsk_pinned >= 0) {
465 if (bp->cpu < 0) { /* Case 1: fast path */
467 next_tsk_pinned += hw_breakpoint_weight(bp);
468 bp_slots_histogram_add(&tsk_pinned_all[type], next_tsk_pinned, weight);
469 } else if (enable) { /* Case 2.a: slow path */
470 /* Add existing to per-CPU histograms. */
471 for_each_possible_cpu(cpu) {
472 bp_slots_histogram_add(&get_bp_info(cpu, type)->tsk_pinned,
475 /* Add this first CPU-pinned task breakpoint. */
476 bp_slots_histogram_add(&get_bp_info(bp->cpu, type)->tsk_pinned,
477 next_tsk_pinned, weight);
478 /* Rebalance global task pinned histogram. */
479 bp_slots_histogram_add(&tsk_pinned_all[type], next_tsk_pinned,
481 } else { /* Case 2.b: slow path */
482 /* Remove this last CPU-pinned task breakpoint. */
483 bp_slots_histogram_add(&get_bp_info(bp->cpu, type)->tsk_pinned,
484 next_tsk_pinned + hw_breakpoint_weight(bp), weight);
485 /* Remove all from per-CPU histograms. */
486 for_each_possible_cpu(cpu) {
487 bp_slots_histogram_add(&get_bp_info(cpu, type)->tsk_pinned,
488 next_tsk_pinned, -next_tsk_pinned);
490 /* Rebalance global task pinned histogram. */
491 bp_slots_histogram_add(&tsk_pinned_all[type], 0, next_tsk_pinned);
493 } else { /* Case 3: slow path */
494 const struct cpumask *cpumask = cpumask_of_bp(bp);
496 for_each_cpu(cpu, cpumask) {
497 next_tsk_pinned = task_bp_pinned(cpu, bp, type);
499 next_tsk_pinned += hw_breakpoint_weight(bp);
500 bp_slots_histogram_add(&get_bp_info(cpu, type)->tsk_pinned,
501 next_tsk_pinned, weight);
506 * Readers want a stable snapshot of the per-task breakpoint list.
508 assert_bp_constraints_lock_held(bp);
511 return rhltable_insert(&task_bps_ht, &bp->hw.bp_list, task_bps_ht_params);
516 __weak int arch_reserve_bp_slot(struct perf_event *bp)
521 __weak void arch_release_bp_slot(struct perf_event *bp)
526 * Function to perform processor-specific cleanup during unregistration
528 __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
531 * A weak stub function here for those archs that don't define
532 * it inside arch/.../kernel/hw_breakpoint.c
537 * Constraints to check before allowing this new breakpoint counter.
539 * Note: Flexible breakpoints are currently unimplemented, but outlined in the
540 * below algorithm for completeness. The implementation treats flexible as
541 * pinned due to no guarantee that we currently always schedule flexible events
542 * before a pinned event in a same CPU.
544 * == Non-pinned counter == (Considered as pinned for now)
546 * - If attached to a single cpu, check:
548 * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
549 * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
551 * -> If there are already non-pinned counters in this cpu, it means
552 * there is already a free slot for them.
553 * Otherwise, we check that the maximum number of per task
554 * breakpoints (for this cpu) plus the number of per cpu breakpoint
555 * (for this cpu) doesn't cover every registers.
557 * - If attached to every cpus, check:
559 * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
560 * + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
562 * -> This is roughly the same, except we check the number of per cpu
563 * bp for every cpu and we keep the max one. Same for the per tasks
567 * == Pinned counter ==
569 * - If attached to a single cpu, check:
571 * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
572 * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
574 * -> Same checks as before. But now the info->flexible, if any, must keep
575 * one register at least (or they will never be fed).
577 * - If attached to every cpus, check:
579 * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
580 * + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
582 static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
584 enum bp_type_idx type;
585 int max_pinned_slots;
589 /* We couldn't initialize breakpoint constraints on boot */
590 if (!constraints_initialized)
594 if (bp_type == HW_BREAKPOINT_EMPTY ||
595 bp_type == HW_BREAKPOINT_INVALID)
598 type = find_slot_idx(bp_type);
599 weight = hw_breakpoint_weight(bp);
601 /* Check if this new breakpoint can be satisfied across all CPUs. */
602 max_pinned_slots = max_bp_pinned_slots(bp, type) + weight;
603 if (max_pinned_slots > hw_breakpoint_slots_cached(type))
606 ret = arch_reserve_bp_slot(bp);
610 return toggle_bp_slot(bp, true, type, weight);
613 int reserve_bp_slot(struct perf_event *bp)
615 struct mutex *mtx = bp_constraints_lock(bp);
616 int ret = __reserve_bp_slot(bp, bp->attr.bp_type);
618 bp_constraints_unlock(mtx);
622 static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
624 enum bp_type_idx type;
627 arch_release_bp_slot(bp);
629 type = find_slot_idx(bp_type);
630 weight = hw_breakpoint_weight(bp);
631 WARN_ON(toggle_bp_slot(bp, false, type, weight));
634 void release_bp_slot(struct perf_event *bp)
636 struct mutex *mtx = bp_constraints_lock(bp);
638 arch_unregister_hw_breakpoint(bp);
639 __release_bp_slot(bp, bp->attr.bp_type);
640 bp_constraints_unlock(mtx);
643 static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
647 __release_bp_slot(bp, old_type);
649 err = __reserve_bp_slot(bp, new_type);
652 * Reserve the old_type slot back in case
653 * there's no space for the new type.
655 * This must succeed, because we just released
656 * the old_type slot in the __release_bp_slot
657 * call above. If not, something is broken.
659 WARN_ON(__reserve_bp_slot(bp, old_type));
665 static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
667 struct mutex *mtx = bp_constraints_lock(bp);
668 int ret = __modify_bp_slot(bp, old_type, new_type);
670 bp_constraints_unlock(mtx);
675 * Allow the kernel debugger to reserve breakpoint slots without
676 * taking a lock using the dbg_* variant of for the reserve and
677 * release breakpoint slots.
679 int dbg_reserve_bp_slot(struct perf_event *bp)
683 if (bp_constraints_is_locked(bp))
686 /* Locks aren't held; disable lockdep assert checking. */
688 ret = __reserve_bp_slot(bp, bp->attr.bp_type);
694 int dbg_release_bp_slot(struct perf_event *bp)
696 if (bp_constraints_is_locked(bp))
699 /* Locks aren't held; disable lockdep assert checking. */
701 __release_bp_slot(bp, bp->attr.bp_type);
707 static int hw_breakpoint_parse(struct perf_event *bp,
708 const struct perf_event_attr *attr,
709 struct arch_hw_breakpoint *hw)
713 err = hw_breakpoint_arch_parse(bp, attr, hw);
717 if (arch_check_bp_in_kernelspace(hw)) {
718 if (attr->exclude_kernel)
721 * Don't let unprivileged users set a breakpoint in the trap
722 * path to avoid trap recursion attacks.
724 if (!capable(CAP_SYS_ADMIN))
731 int register_perf_hw_breakpoint(struct perf_event *bp)
733 struct arch_hw_breakpoint hw = { };
736 err = reserve_bp_slot(bp);
740 err = hw_breakpoint_parse(bp, &bp->attr, &hw);
752 * register_user_hw_breakpoint - register a hardware breakpoint for user space
753 * @attr: breakpoint attributes
754 * @triggered: callback to trigger when we hit the breakpoint
755 * @context: context data could be used in the triggered callback
756 * @tsk: pointer to 'task_struct' of the process to which the address belongs
759 register_user_hw_breakpoint(struct perf_event_attr *attr,
760 perf_overflow_handler_t triggered,
762 struct task_struct *tsk)
764 return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
767 EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
769 static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
770 struct perf_event_attr *from)
772 to->bp_addr = from->bp_addr;
773 to->bp_type = from->bp_type;
774 to->bp_len = from->bp_len;
775 to->disabled = from->disabled;
779 modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
782 struct arch_hw_breakpoint hw = { };
785 err = hw_breakpoint_parse(bp, attr, &hw);
790 struct perf_event_attr old_attr;
793 hw_breakpoint_copy_attr(&old_attr, attr);
794 if (memcmp(&old_attr, attr, sizeof(*attr)))
798 if (bp->attr.bp_type != attr->bp_type) {
799 err = modify_bp_slot(bp, bp->attr.bp_type, attr->bp_type);
804 hw_breakpoint_copy_attr(&bp->attr, attr);
811 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
812 * @bp: the breakpoint structure to modify
813 * @attr: new breakpoint attributes
815 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
820 * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
821 * will not be possible to raise IPIs that invoke __perf_event_disable.
822 * So call the function directly after making sure we are targeting the
825 if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
826 perf_event_disable_local(bp);
828 perf_event_disable(bp);
830 err = modify_user_hw_breakpoint_check(bp, attr, false);
832 if (!bp->attr.disabled)
833 perf_event_enable(bp);
837 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
840 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
841 * @bp: the breakpoint structure to unregister
843 void unregister_hw_breakpoint(struct perf_event *bp)
847 perf_event_release_kernel(bp);
849 EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
852 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
853 * @attr: breakpoint attributes
854 * @triggered: callback to trigger when we hit the breakpoint
855 * @context: context data could be used in the triggered callback
857 * @return a set of per_cpu pointers to perf events
859 struct perf_event * __percpu *
860 register_wide_hw_breakpoint(struct perf_event_attr *attr,
861 perf_overflow_handler_t triggered,
864 struct perf_event * __percpu *cpu_events, *bp;
868 cpu_events = alloc_percpu(typeof(*cpu_events));
870 return (void __percpu __force *)ERR_PTR(-ENOMEM);
873 for_each_online_cpu(cpu) {
874 bp = perf_event_create_kernel_counter(attr, cpu, NULL,
881 per_cpu(*cpu_events, cpu) = bp;
888 unregister_wide_hw_breakpoint(cpu_events);
889 return (void __percpu __force *)ERR_PTR(err);
891 EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
894 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
895 * @cpu_events: the per cpu set of events to unregister
897 void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
901 for_each_possible_cpu(cpu)
902 unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
904 free_percpu(cpu_events);
906 EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
909 * hw_breakpoint_is_used - check if breakpoints are currently used
911 * Returns: true if breakpoints are used, false otherwise.
913 bool hw_breakpoint_is_used(void)
917 if (!constraints_initialized)
920 for_each_possible_cpu(cpu) {
921 for (int type = 0; type < TYPE_MAX; ++type) {
922 struct bp_cpuinfo *info = get_bp_info(cpu, type);
924 if (info->cpu_pinned)
927 for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) {
928 if (atomic_read(&info->tsk_pinned.count[slot]))
934 for (int type = 0; type < TYPE_MAX; ++type) {
935 for (int slot = 0; slot < hw_breakpoint_slots_cached(type); ++slot) {
937 * Warn, because if there are CPU pinned counters,
938 * should never get here; bp_cpuinfo::cpu_pinned should
939 * be consistent with the global cpu_pinned histogram.
941 if (WARN_ON(atomic_read(&cpu_pinned[type].count[slot])))
944 if (atomic_read(&tsk_pinned_all[type].count[slot]))
952 static struct notifier_block hw_breakpoint_exceptions_nb = {
953 .notifier_call = hw_breakpoint_exceptions_notify,
954 /* we need to be notified first */
955 .priority = 0x7fffffff
958 static void bp_perf_event_destroy(struct perf_event *event)
960 release_bp_slot(event);
963 static int hw_breakpoint_event_init(struct perf_event *bp)
967 if (bp->attr.type != PERF_TYPE_BREAKPOINT)
971 * no branch sampling for breakpoint events
973 if (has_branch_stack(bp))
976 err = register_perf_hw_breakpoint(bp);
980 bp->destroy = bp_perf_event_destroy;
985 static int hw_breakpoint_add(struct perf_event *bp, int flags)
987 if (!(flags & PERF_EF_START))
988 bp->hw.state = PERF_HES_STOPPED;
990 if (is_sampling_event(bp)) {
991 bp->hw.last_period = bp->hw.sample_period;
992 perf_swevent_set_period(bp);
995 return arch_install_hw_breakpoint(bp);
998 static void hw_breakpoint_del(struct perf_event *bp, int flags)
1000 arch_uninstall_hw_breakpoint(bp);
1003 static void hw_breakpoint_start(struct perf_event *bp, int flags)
1008 static void hw_breakpoint_stop(struct perf_event *bp, int flags)
1010 bp->hw.state = PERF_HES_STOPPED;
1013 static struct pmu perf_breakpoint = {
1014 .task_ctx_nr = perf_sw_context, /* could eventually get its own */
1016 .event_init = hw_breakpoint_event_init,
1017 .add = hw_breakpoint_add,
1018 .del = hw_breakpoint_del,
1019 .start = hw_breakpoint_start,
1020 .stop = hw_breakpoint_stop,
1021 .read = hw_breakpoint_pmu_read,
1024 int __init init_hw_breakpoint(void)
1028 ret = rhltable_init(&task_bps_ht, &task_bps_ht_params);
1032 ret = init_breakpoint_slots();
1036 constraints_initialized = true;
1038 perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
1040 return register_die_notifier(&hw_breakpoint_exceptions_nb);