2 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
3 * using the CPU's debug registers.
5 * Copyright (C) 2012 ARM Limited
6 * Author: Will Deacon <will.deacon@arm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #define pr_fmt(fmt) "hw-breakpoint: " fmt
23 #include <linux/compat.h>
24 #include <linux/cpu_pm.h>
25 #include <linux/errno.h>
26 #include <linux/hw_breakpoint.h>
27 #include <linux/perf_event.h>
28 #include <linux/ptrace.h>
29 #include <linux/smp.h>
31 #include <asm/compat.h>
32 #include <asm/current.h>
33 #include <asm/debug-monitors.h>
34 #include <asm/hw_breakpoint.h>
35 #include <asm/traps.h>
36 #include <asm/cputype.h>
37 #include <asm/system_misc.h>
38 #include <asm/uaccess.h>
40 /* Breakpoint currently in use for each BRP. */
41 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
43 /* Watchpoint currently in use for each WRP. */
44 static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
46 /* Currently stepping a per-CPU kernel breakpoint. */
47 static DEFINE_PER_CPU(int, stepping_kernel_bp);
49 /* Number of BRP/WRP registers on this CPU. */
50 static int core_num_brps;
51 static int core_num_wrps;
53 int hw_breakpoint_slots(int type)
56 * We can be called early, so don't rely on
57 * our static variables being initialised.
61 return get_num_brps();
63 return get_num_wrps();
65 pr_warning("unknown slot type: %d\n", type);
70 #define READ_WB_REG_CASE(OFF, N, REG, VAL) \
72 AARCH64_DBG_READ(N, REG, VAL); \
75 #define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
77 AARCH64_DBG_WRITE(N, REG, VAL); \
80 #define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \
81 READ_WB_REG_CASE(OFF, 0, REG, VAL); \
82 READ_WB_REG_CASE(OFF, 1, REG, VAL); \
83 READ_WB_REG_CASE(OFF, 2, REG, VAL); \
84 READ_WB_REG_CASE(OFF, 3, REG, VAL); \
85 READ_WB_REG_CASE(OFF, 4, REG, VAL); \
86 READ_WB_REG_CASE(OFF, 5, REG, VAL); \
87 READ_WB_REG_CASE(OFF, 6, REG, VAL); \
88 READ_WB_REG_CASE(OFF, 7, REG, VAL); \
89 READ_WB_REG_CASE(OFF, 8, REG, VAL); \
90 READ_WB_REG_CASE(OFF, 9, REG, VAL); \
91 READ_WB_REG_CASE(OFF, 10, REG, VAL); \
92 READ_WB_REG_CASE(OFF, 11, REG, VAL); \
93 READ_WB_REG_CASE(OFF, 12, REG, VAL); \
94 READ_WB_REG_CASE(OFF, 13, REG, VAL); \
95 READ_WB_REG_CASE(OFF, 14, REG, VAL); \
96 READ_WB_REG_CASE(OFF, 15, REG, VAL)
98 #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \
99 WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \
100 WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \
101 WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \
102 WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \
103 WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \
104 WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \
105 WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \
106 WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \
107 WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \
108 WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \
109 WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \
110 WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \
111 WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \
112 WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \
113 WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \
114 WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
116 static u64 read_wb_reg(int reg, int n)
121 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
122 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
123 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
124 GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
126 pr_warning("attempt to read from unknown breakpoint register %d\n", n);
132 static void write_wb_reg(int reg, int n, u64 val)
135 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
136 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
137 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
138 GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
140 pr_warning("attempt to write to unknown breakpoint register %d\n", n);
146 * Convert a breakpoint privilege level to the corresponding exception
149 static enum dbg_active_el debug_exception_level(int privilege)
152 case AARCH64_BREAKPOINT_EL0:
153 return DBG_ACTIVE_EL0;
154 case AARCH64_BREAKPOINT_EL1:
155 return DBG_ACTIVE_EL1;
157 pr_warning("invalid breakpoint privilege level %d\n", privilege);
162 enum hw_breakpoint_ops {
163 HW_BREAKPOINT_INSTALL,
164 HW_BREAKPOINT_UNINSTALL,
165 HW_BREAKPOINT_RESTORE
168 static int is_compat_bp(struct perf_event *bp)
170 struct task_struct *tsk = bp->hw.target;
173 * tsk can be NULL for per-cpu (non-ptrace) breakpoints.
174 * In this case, use the native interface, since we don't have
175 * the notion of a "compat CPU" and could end up relying on
176 * deprecated behaviour if we use unaligned watchpoints in
179 return tsk && is_compat_thread(task_thread_info(tsk));
183 * hw_breakpoint_slot_setup - Find and setup a perf slot according to
186 * @slots: pointer to array of slots
187 * @max_slots: max number of slots
188 * @bp: perf_event to setup
189 * @ops: operation to be carried out on the slot
192 * slot index on success
193 * -ENOSPC if no slot is available/matches
194 * -EINVAL on wrong operations parameter
196 static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
197 struct perf_event *bp,
198 enum hw_breakpoint_ops ops)
201 struct perf_event **slot;
203 for (i = 0; i < max_slots; ++i) {
206 case HW_BREAKPOINT_INSTALL:
212 case HW_BREAKPOINT_UNINSTALL:
218 case HW_BREAKPOINT_RESTORE:
223 pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
230 static int hw_breakpoint_control(struct perf_event *bp,
231 enum hw_breakpoint_ops ops)
233 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
234 struct perf_event **slots;
235 struct debug_info *debug_info = ¤t->thread.debug;
236 int i, max_slots, ctrl_reg, val_reg, reg_enable;
237 enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
240 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
242 ctrl_reg = AARCH64_DBG_REG_BCR;
243 val_reg = AARCH64_DBG_REG_BVR;
244 slots = this_cpu_ptr(bp_on_reg);
245 max_slots = core_num_brps;
246 reg_enable = !debug_info->bps_disabled;
249 ctrl_reg = AARCH64_DBG_REG_WCR;
250 val_reg = AARCH64_DBG_REG_WVR;
251 slots = this_cpu_ptr(wp_on_reg);
252 max_slots = core_num_wrps;
253 reg_enable = !debug_info->wps_disabled;
256 i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
258 if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
262 case HW_BREAKPOINT_INSTALL:
264 * Ensure debug monitors are enabled at the correct exception
267 enable_debug_monitors(dbg_el);
269 case HW_BREAKPOINT_RESTORE:
270 /* Setup the address register. */
271 write_wb_reg(val_reg, i, info->address);
273 /* Setup the control register. */
274 ctrl = encode_ctrl_reg(info->ctrl);
275 write_wb_reg(ctrl_reg, i,
276 reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
278 case HW_BREAKPOINT_UNINSTALL:
279 /* Reset the control register. */
280 write_wb_reg(ctrl_reg, i, 0);
283 * Release the debug monitors for the correct exception
286 disable_debug_monitors(dbg_el);
294 * Install a perf counter breakpoint.
296 int arch_install_hw_breakpoint(struct perf_event *bp)
298 return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
301 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
303 hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
306 static int get_hbp_len(u8 hbp_len)
308 unsigned int len_in_bytes = 0;
311 case ARM_BREAKPOINT_LEN_1:
314 case ARM_BREAKPOINT_LEN_2:
317 case ARM_BREAKPOINT_LEN_4:
320 case ARM_BREAKPOINT_LEN_8:
329 * Check whether bp virtual address is in kernel space.
331 int arch_check_bp_in_kernelspace(struct perf_event *bp)
335 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
338 len = get_hbp_len(info->ctrl.len);
340 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
344 * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
345 * Hopefully this will disappear when ptrace can bypass the conversion
346 * to generic breakpoint descriptions.
348 int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
349 int *gen_len, int *gen_type)
353 case ARM_BREAKPOINT_EXECUTE:
354 *gen_type = HW_BREAKPOINT_X;
356 case ARM_BREAKPOINT_LOAD:
357 *gen_type = HW_BREAKPOINT_R;
359 case ARM_BREAKPOINT_STORE:
360 *gen_type = HW_BREAKPOINT_W;
362 case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
363 *gen_type = HW_BREAKPOINT_RW;
371 case ARM_BREAKPOINT_LEN_1:
372 *gen_len = HW_BREAKPOINT_LEN_1;
374 case ARM_BREAKPOINT_LEN_2:
375 *gen_len = HW_BREAKPOINT_LEN_2;
377 case ARM_BREAKPOINT_LEN_4:
378 *gen_len = HW_BREAKPOINT_LEN_4;
380 case ARM_BREAKPOINT_LEN_8:
381 *gen_len = HW_BREAKPOINT_LEN_8;
391 * Construct an arch_hw_breakpoint from a perf_event.
393 static int arch_build_bp_info(struct perf_event *bp)
395 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
398 switch (bp->attr.bp_type) {
399 case HW_BREAKPOINT_X:
400 info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
402 case HW_BREAKPOINT_R:
403 info->ctrl.type = ARM_BREAKPOINT_LOAD;
405 case HW_BREAKPOINT_W:
406 info->ctrl.type = ARM_BREAKPOINT_STORE;
408 case HW_BREAKPOINT_RW:
409 info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
416 switch (bp->attr.bp_len) {
417 case HW_BREAKPOINT_LEN_1:
418 info->ctrl.len = ARM_BREAKPOINT_LEN_1;
420 case HW_BREAKPOINT_LEN_2:
421 info->ctrl.len = ARM_BREAKPOINT_LEN_2;
423 case HW_BREAKPOINT_LEN_4:
424 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
426 case HW_BREAKPOINT_LEN_8:
427 info->ctrl.len = ARM_BREAKPOINT_LEN_8;
434 * On AArch64, we only permit breakpoints of length 4, whereas
435 * AArch32 also requires breakpoints of length 2 for Thumb.
436 * Watchpoints can be of length 1, 2, 4 or 8 bytes.
438 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
439 if (is_compat_bp(bp)) {
440 if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
441 info->ctrl.len != ARM_BREAKPOINT_LEN_4)
443 } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) {
445 * FIXME: Some tools (I'm looking at you perf) assume
446 * that breakpoints should be sizeof(long). This
447 * is nonsense. For now, we fix up the parameter
448 * but we should probably return -EINVAL instead.
450 info->ctrl.len = ARM_BREAKPOINT_LEN_4;
455 info->address = bp->attr.bp_addr;
459 * Note that we disallow combined EL0/EL1 breakpoints because
460 * that would complicate the stepping code.
462 if (arch_check_bp_in_kernelspace(bp))
463 info->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
465 info->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
468 info->ctrl.enabled = !bp->attr.disabled;
474 * Validate the arch-specific HW Breakpoint register settings.
476 int arch_validate_hwbkpt_settings(struct perf_event *bp)
478 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
480 u64 alignment_mask, offset;
482 /* Build the arch_hw_breakpoint. */
483 ret = arch_build_bp_info(bp);
488 * Check address alignment.
489 * We don't do any clever alignment correction for watchpoints
490 * because using 64-bit unaligned addresses is deprecated for
493 * AArch32 tasks expect some simple alignment fixups, so emulate
496 if (is_compat_bp(bp)) {
497 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
498 alignment_mask = 0x7;
500 alignment_mask = 0x3;
501 offset = info->address & alignment_mask;
508 /* Allow halfword watchpoints and breakpoints. */
509 if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
512 /* Allow single byte watchpoint. */
513 if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
519 info->address &= ~alignment_mask;
520 info->ctrl.len <<= offset;
522 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE)
523 alignment_mask = 0x3;
525 alignment_mask = 0x7;
526 if (info->address & alignment_mask)
531 * Disallow per-task kernel breakpoints since these would
532 * complicate the stepping code.
534 if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
541 * Enable/disable all of the breakpoints active at the specified
542 * exception level at the register level.
543 * This is used when single-stepping after a breakpoint exception.
545 static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
547 int i, max_slots, privilege;
549 struct perf_event **slots;
552 case AARCH64_DBG_REG_BCR:
553 slots = this_cpu_ptr(bp_on_reg);
554 max_slots = core_num_brps;
556 case AARCH64_DBG_REG_WCR:
557 slots = this_cpu_ptr(wp_on_reg);
558 max_slots = core_num_wrps;
564 for (i = 0; i < max_slots; ++i) {
568 privilege = counter_arch_bp(slots[i])->ctrl.privilege;
569 if (debug_exception_level(privilege) != el)
572 ctrl = read_wb_reg(reg, i);
577 write_wb_reg(reg, i, ctrl);
582 * Debug exception handlers.
584 static int breakpoint_handler(unsigned long unused, unsigned int esr,
585 struct pt_regs *regs)
587 int i, step = 0, *kernel_step;
590 struct perf_event *bp, **slots;
591 struct debug_info *debug_info;
592 struct arch_hw_breakpoint_ctrl ctrl;
594 slots = this_cpu_ptr(bp_on_reg);
595 addr = instruction_pointer(regs);
596 debug_info = ¤t->thread.debug;
598 for (i = 0; i < core_num_brps; ++i) {
606 /* Check if the breakpoint value matches. */
607 val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
608 if (val != (addr & ~0x3))
611 /* Possible match, check the byte address select to confirm. */
612 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
613 decode_ctrl_reg(ctrl_reg, &ctrl);
614 if (!((1 << (addr & 0x3)) & ctrl.len))
617 counter_arch_bp(bp)->trigger = addr;
618 perf_bp_event(bp, regs);
620 /* Do we need to handle the stepping? */
621 if (!bp->overflow_handler)
630 if (user_mode(regs)) {
631 debug_info->bps_disabled = 1;
632 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
634 /* If we're already stepping a watchpoint, just return. */
635 if (debug_info->wps_disabled)
638 if (test_thread_flag(TIF_SINGLESTEP))
639 debug_info->suspended_step = 1;
641 user_enable_single_step(current);
643 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
644 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
646 if (*kernel_step != ARM_KERNEL_STEP_NONE)
649 if (kernel_active_single_step()) {
650 *kernel_step = ARM_KERNEL_STEP_SUSPEND;
652 *kernel_step = ARM_KERNEL_STEP_ACTIVE;
653 kernel_enable_single_step(regs);
660 static int watchpoint_handler(unsigned long addr, unsigned int esr,
661 struct pt_regs *regs)
663 int i, step = 0, *kernel_step, access;
665 u64 val, alignment_mask;
666 struct perf_event *wp, **slots;
667 struct debug_info *debug_info;
668 struct arch_hw_breakpoint *info;
669 struct arch_hw_breakpoint_ctrl ctrl;
671 slots = this_cpu_ptr(wp_on_reg);
672 debug_info = ¤t->thread.debug;
674 for (i = 0; i < core_num_wrps; ++i) {
682 info = counter_arch_bp(wp);
683 /* AArch32 watchpoints are either 4 or 8 bytes aligned. */
684 if (is_compat_task()) {
685 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
686 alignment_mask = 0x7;
688 alignment_mask = 0x3;
690 alignment_mask = 0x7;
693 /* Check if the watchpoint value matches. */
694 val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
695 if (val != (untagged_addr(addr) & ~alignment_mask))
698 /* Possible match, check the byte address select to confirm. */
699 ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
700 decode_ctrl_reg(ctrl_reg, &ctrl);
701 if (!((1 << (addr & alignment_mask)) & ctrl.len))
705 * Check that the access type matches.
706 * 0 => load, otherwise => store
708 access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W :
710 if (!(access & hw_breakpoint_type(wp)))
713 info->trigger = addr;
714 perf_bp_event(wp, regs);
716 /* Do we need to handle the stepping? */
717 if (!wp->overflow_handler)
728 * We always disable EL0 watchpoints because the kernel can
729 * cause these to fire via an unprivileged access.
731 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
733 if (user_mode(regs)) {
734 debug_info->wps_disabled = 1;
736 /* If we're already stepping a breakpoint, just return. */
737 if (debug_info->bps_disabled)
740 if (test_thread_flag(TIF_SINGLESTEP))
741 debug_info->suspended_step = 1;
743 user_enable_single_step(current);
745 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
746 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
748 if (*kernel_step != ARM_KERNEL_STEP_NONE)
751 if (kernel_active_single_step()) {
752 *kernel_step = ARM_KERNEL_STEP_SUSPEND;
754 *kernel_step = ARM_KERNEL_STEP_ACTIVE;
755 kernel_enable_single_step(regs);
763 * Handle single-step exception.
765 int reinstall_suspended_bps(struct pt_regs *regs)
767 struct debug_info *debug_info = ¤t->thread.debug;
768 int handled_exception = 0, *kernel_step;
770 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
773 * Called from single-step exception handler.
774 * Return 0 if execution can resume, 1 if a SIGTRAP should be
777 if (user_mode(regs)) {
778 if (debug_info->bps_disabled) {
779 debug_info->bps_disabled = 0;
780 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
781 handled_exception = 1;
784 if (debug_info->wps_disabled) {
785 debug_info->wps_disabled = 0;
786 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
787 handled_exception = 1;
790 if (handled_exception) {
791 if (debug_info->suspended_step) {
792 debug_info->suspended_step = 0;
793 /* Allow exception handling to fall-through. */
794 handled_exception = 0;
796 user_disable_single_step(current);
799 } else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
800 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
801 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
803 if (!debug_info->wps_disabled)
804 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
806 if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
807 kernel_disable_single_step();
808 handled_exception = 1;
810 handled_exception = 0;
813 *kernel_step = ARM_KERNEL_STEP_NONE;
816 return !handled_exception;
820 * Context-switcher for restoring suspended breakpoints.
822 void hw_breakpoint_thread_switch(struct task_struct *next)
826 * disabled: 0 0 => The usual case, NOTIFY_DONE
827 * 0 1 => Disable the registers
828 * 1 0 => Enable the registers
829 * 1 1 => NOTIFY_DONE. per-task bps will
830 * get taken care of by perf.
833 struct debug_info *current_debug_info, *next_debug_info;
835 current_debug_info = ¤t->thread.debug;
836 next_debug_info = &next->thread.debug;
838 /* Update breakpoints. */
839 if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
840 toggle_bp_registers(AARCH64_DBG_REG_BCR,
842 !next_debug_info->bps_disabled);
844 /* Update watchpoints. */
845 if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
846 toggle_bp_registers(AARCH64_DBG_REG_WCR,
848 !next_debug_info->wps_disabled);
852 * CPU initialisation.
854 static void hw_breakpoint_reset(void *unused)
857 struct perf_event **slots;
859 * When a CPU goes through cold-boot, it does not have any installed
860 * slot, so it is safe to share the same function for restoring and
861 * resetting breakpoints; when a CPU is hotplugged in, it goes
862 * through the slots, which are all empty, hence it just resets control
863 * and value for debug registers.
864 * When this function is triggered on warm-boot through a CPU PM
865 * notifier some slots might be initialized; if so they are
866 * reprogrammed according to the debug slots content.
868 for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
870 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
872 write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
873 write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
877 for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
879 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
881 write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
882 write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
887 static int hw_breakpoint_reset_notify(struct notifier_block *self,
888 unsigned long action,
891 int cpu = (long)hcpu;
892 if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
893 smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
897 static struct notifier_block hw_breakpoint_reset_nb = {
898 .notifier_call = hw_breakpoint_reset_notify,
902 extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
904 static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
910 * One-time initialisation.
912 static int __init arch_hw_breakpoint_init(void)
914 core_num_brps = get_num_brps();
915 core_num_wrps = get_num_wrps();
917 pr_info("found %d breakpoint and %d watchpoint registers.\n",
918 core_num_brps, core_num_wrps);
920 cpu_notifier_register_begin();
923 * Reset the breakpoint resources. We assume that a halting
924 * debugger will leave the world in a nice state for us.
926 smp_call_function(hw_breakpoint_reset, NULL, 1);
927 hw_breakpoint_reset(NULL);
929 /* Register debug fault handlers. */
930 hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
931 TRAP_HWBKPT, "hw-breakpoint handler");
932 hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP,
933 TRAP_HWBKPT, "hw-watchpoint handler");
935 /* Register hotplug notifier. */
936 __register_cpu_notifier(&hw_breakpoint_reset_nb);
938 cpu_notifier_register_done();
940 /* Register cpu_suspend hw breakpoint restore hook */
941 cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
945 arch_initcall(arch_hw_breakpoint_init);
947 void hw_breakpoint_pmu_read(struct perf_event *bp)
952 * Dummy function to register with die_notifier.
954 int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
955 unsigned long val, void *data)