1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/mm/fault.c
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 1995-2004 Russell King
7 * Copyright (C) 2012 ARM Ltd.
10 #include <linux/acpi.h>
11 #include <linux/bitfield.h>
12 #include <linux/extable.h>
13 #include <linux/kfence.h>
14 #include <linux/signal.h>
16 #include <linux/hardirq.h>
17 #include <linux/init.h>
18 #include <linux/kasan.h>
19 #include <linux/kprobes.h>
20 #include <linux/uaccess.h>
21 #include <linux/page-flags.h>
22 #include <linux/sched/signal.h>
23 #include <linux/sched/debug.h>
24 #include <linux/highmem.h>
25 #include <linux/perf_event.h>
26 #include <linux/preempt.h>
27 #include <linux/hugetlb.h>
31 #include <asm/cmpxchg.h>
32 #include <asm/cpufeature.h>
34 #include <asm/exception.h>
35 #include <asm/daifflags.h>
36 #include <asm/debug-monitors.h>
38 #include <asm/kprobes.h>
40 #include <asm/processor.h>
41 #include <asm/sysreg.h>
42 #include <asm/system_misc.h>
43 #include <asm/tlbflush.h>
44 #include <asm/traps.h>
47 int (*fn)(unsigned long far, unsigned long esr,
48 struct pt_regs *regs);
54 static const struct fault_info fault_info[];
55 static struct fault_info debug_fault_info[];
57 static inline const struct fault_info *esr_to_fault_info(unsigned long esr)
59 return fault_info + (esr & ESR_ELx_FSC);
62 static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr)
64 return debug_fault_info + DBG_ESR_EVT(esr);
67 static void data_abort_decode(unsigned long esr)
69 unsigned long iss2 = ESR_ELx_ISS2(esr);
71 pr_alert("Data abort info:\n");
73 if (esr & ESR_ELx_ISV) {
74 pr_alert(" Access size = %u byte(s)\n",
75 1U << ((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT));
76 pr_alert(" SSE = %lu, SRT = %lu\n",
77 (esr & ESR_ELx_SSE) >> ESR_ELx_SSE_SHIFT,
78 (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT);
79 pr_alert(" SF = %lu, AR = %lu\n",
80 (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT,
81 (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT);
83 pr_alert(" ISV = 0, ISS = 0x%08lx, ISS2 = 0x%08lx\n",
84 esr & ESR_ELx_ISS_MASK, iss2);
87 pr_alert(" CM = %lu, WnR = %lu, TnD = %lu, TagAccess = %lu\n",
88 (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT,
89 (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT,
90 (iss2 & ESR_ELx_TnD) >> ESR_ELx_TnD_SHIFT,
91 (iss2 & ESR_ELx_TagAccess) >> ESR_ELx_TagAccess_SHIFT);
93 pr_alert(" GCS = %ld, Overlay = %lu, DirtyBit = %lu, Xs = %llu\n",
94 (iss2 & ESR_ELx_GCS) >> ESR_ELx_GCS_SHIFT,
95 (iss2 & ESR_ELx_Overlay) >> ESR_ELx_Overlay_SHIFT,
96 (iss2 & ESR_ELx_DirtyBit) >> ESR_ELx_DirtyBit_SHIFT,
97 (iss2 & ESR_ELx_Xs_MASK) >> ESR_ELx_Xs_SHIFT);
100 static void mem_abort_decode(unsigned long esr)
102 pr_alert("Mem abort info:\n");
104 pr_alert(" ESR = 0x%016lx\n", esr);
105 pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n",
106 ESR_ELx_EC(esr), esr_get_class_string(esr),
107 (esr & ESR_ELx_IL) ? 32 : 16);
108 pr_alert(" SET = %lu, FnV = %lu\n",
109 (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT,
110 (esr & ESR_ELx_FnV) >> ESR_ELx_FnV_SHIFT);
111 pr_alert(" EA = %lu, S1PTW = %lu\n",
112 (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
113 (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
114 pr_alert(" FSC = 0x%02lx: %s\n", (esr & ESR_ELx_FSC),
115 esr_to_fault_info(esr)->name);
117 if (esr_is_data_abort(esr))
118 data_abort_decode(esr);
121 static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm)
123 /* Either init_pg_dir or swapper_pg_dir */
125 return __pa_symbol(mm->pgd);
127 return (unsigned long)virt_to_phys(mm->pgd);
131 * Dump out the page tables associated with 'addr' in the currently active mm.
133 static void show_pte(unsigned long addr)
135 struct mm_struct *mm;
139 if (is_ttbr0_addr(addr)) {
141 mm = current->active_mm;
142 if (mm == &init_mm) {
143 pr_alert("[%016lx] user address but active_mm is swapper\n",
147 } else if (is_ttbr1_addr(addr)) {
151 pr_alert("[%016lx] address between user and kernel address ranges\n",
156 pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n",
157 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
158 vabits_actual, mm_to_pgd_phys(mm));
159 pgdp = pgd_offset(mm, addr);
160 pgd = READ_ONCE(*pgdp);
161 pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
169 if (pgd_none(pgd) || pgd_bad(pgd))
172 p4dp = p4d_offset(pgdp, addr);
173 p4d = READ_ONCE(*p4dp);
174 pr_cont(", p4d=%016llx", p4d_val(p4d));
175 if (p4d_none(p4d) || p4d_bad(p4d))
178 pudp = pud_offset(p4dp, addr);
179 pud = READ_ONCE(*pudp);
180 pr_cont(", pud=%016llx", pud_val(pud));
181 if (pud_none(pud) || pud_bad(pud))
184 pmdp = pmd_offset(pudp, addr);
185 pmd = READ_ONCE(*pmdp);
186 pr_cont(", pmd=%016llx", pmd_val(pmd));
187 if (pmd_none(pmd) || pmd_bad(pmd))
190 ptep = pte_offset_map(pmdp, addr);
194 pte = READ_ONCE(*ptep);
195 pr_cont(", pte=%016llx", pte_val(pte));
203 * This function sets the access flags (dirty, accessed), as well as write
204 * permission, and only to a more permissive setting.
206 * It needs to cope with hardware update of the accessed/dirty state by other
207 * agents in the system and can safely skip the __sync_icache_dcache() call as,
208 * like set_pte_at(), the PTE is never changed from no-exec to exec here.
210 * Returns whether or not the PTE actually changed.
212 int ptep_set_access_flags(struct vm_area_struct *vma,
213 unsigned long address, pte_t *ptep,
214 pte_t entry, int dirty)
216 pteval_t old_pteval, pteval;
217 pte_t pte = READ_ONCE(*ptep);
219 if (pte_same(pte, entry))
222 /* only preserve the access flags and write permission */
223 pte_val(entry) &= PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY;
226 * Setting the flags must be done atomically to avoid racing with the
227 * hardware update of the access/dirty state. The PTE_RDONLY bit must
228 * be set to the most permissive (lowest value) of *ptep and entry
229 * (calculated as: a & b == ~(~a | ~b)).
231 pte_val(entry) ^= PTE_RDONLY;
232 pteval = pte_val(pte);
235 pteval ^= PTE_RDONLY;
236 pteval |= pte_val(entry);
237 pteval ^= PTE_RDONLY;
238 pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
239 } while (pteval != old_pteval);
241 /* Invalidate a stale read-only entry */
243 flush_tlb_page(vma, address);
247 static bool is_el1_instruction_abort(unsigned long esr)
249 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
252 static bool is_el1_data_abort(unsigned long esr)
254 return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR;
257 static inline bool is_el1_permission_fault(unsigned long addr, unsigned long esr,
258 struct pt_regs *regs)
260 unsigned long fsc_type = esr & ESR_ELx_FSC_TYPE;
262 if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr))
265 if (fsc_type == ESR_ELx_FSC_PERM)
268 if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
269 return fsc_type == ESR_ELx_FSC_FAULT &&
270 (regs->pstate & PSR_PAN_BIT);
275 static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
277 struct pt_regs *regs)
282 if (!is_el1_data_abort(esr) ||
283 (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
286 local_irq_save(flags);
287 asm volatile("at s1e1r, %0" :: "r" (addr));
289 par = read_sysreg_par();
290 local_irq_restore(flags);
293 * If we now have a valid translation, treat the translation fault as
296 if (!(par & SYS_PAR_EL1_F))
300 * If we got a different type of fault from the AT instruction,
301 * treat the translation fault as spurious.
303 dfsc = FIELD_GET(SYS_PAR_EL1_FST, par);
304 return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT;
307 static void die_kernel_fault(const char *msg, unsigned long addr,
308 unsigned long esr, struct pt_regs *regs)
312 pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg,
315 kasan_non_canonical_hook(addr);
317 mem_abort_decode(esr);
320 die("Oops", regs, esr);
322 make_task_dead(SIGKILL);
325 #ifdef CONFIG_KASAN_HW_TAGS
326 static void report_tag_fault(unsigned long addr, unsigned long esr,
327 struct pt_regs *regs)
330 * SAS bits aren't set for all faults reported in EL1, so we can't
331 * find out access size.
333 bool is_write = !!(esr & ESR_ELx_WNR);
334 kasan_report((void *)addr, 0, is_write, regs->pc);
337 /* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */
338 static inline void report_tag_fault(unsigned long addr, unsigned long esr,
339 struct pt_regs *regs) { }
342 static void do_tag_recovery(unsigned long addr, unsigned long esr,
343 struct pt_regs *regs)
346 report_tag_fault(addr, esr, regs);
349 * Disable MTE Tag Checking on the local CPU for the current EL.
350 * It will be done lazily on the other CPUs when they will hit a
353 sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK,
354 SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF, NONE));
358 static bool is_el1_mte_sync_tag_check_fault(unsigned long esr)
360 unsigned long fsc = esr & ESR_ELx_FSC;
362 if (!is_el1_data_abort(esr))
365 if (fsc == ESR_ELx_FSC_MTE)
371 static bool is_translation_fault(unsigned long esr)
373 return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT;
376 static void __do_kernel_fault(unsigned long addr, unsigned long esr,
377 struct pt_regs *regs)
382 * Are we prepared to handle this kernel fault?
383 * We are almost certainly not prepared to handle instruction faults.
385 if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
388 if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs),
389 "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr))
392 if (is_el1_mte_sync_tag_check_fault(esr)) {
393 do_tag_recovery(addr, esr, regs);
398 if (is_el1_permission_fault(addr, esr, regs)) {
399 if (esr & ESR_ELx_WNR)
400 msg = "write to read-only memory";
401 else if (is_el1_instruction_abort(esr))
402 msg = "execute from non-executable memory";
404 msg = "read from unreadable memory";
405 } else if (addr < PAGE_SIZE) {
406 msg = "NULL pointer dereference";
408 if (is_translation_fault(esr) &&
409 kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
412 msg = "paging request";
415 if (efi_runtime_fixup_exception(regs, msg))
418 die_kernel_fault(msg, addr, esr, regs);
421 static void set_thread_esr(unsigned long address, unsigned long esr)
423 current->thread.fault_address = address;
426 * If the faulting address is in the kernel, we must sanitize the ESR.
427 * From userspace's point of view, kernel-only mappings don't exist
428 * at all, so we report them as level 0 translation faults.
429 * (This is not quite the way that "no mapping there at all" behaves:
430 * an alignment fault not caused by the memory type would take
431 * precedence over translation fault for a real access to empty
432 * space. Unfortunately we can't easily distinguish "alignment fault
433 * not caused by memory type" from "alignment fault caused by memory
434 * type", so we ignore this wrinkle and just return the translation
437 if (!is_ttbr0_addr(current->thread.fault_address)) {
438 switch (ESR_ELx_EC(esr)) {
439 case ESR_ELx_EC_DABT_LOW:
441 * These bits provide only information about the
442 * faulting instruction, which userspace knows already.
443 * We explicitly clear bits which are architecturally
444 * RES0 in case they are given meanings in future.
445 * We always report the ESR as if the fault was taken
446 * to EL1 and so ISV and the bits in ISS[23:14] are
447 * clear. (In fact it always will be a fault to EL1.)
449 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL |
450 ESR_ELx_CM | ESR_ELx_WNR;
451 esr |= ESR_ELx_FSC_FAULT;
453 case ESR_ELx_EC_IABT_LOW:
455 * Claim a level 0 translation fault.
456 * All other bits are architecturally RES0 for faults
457 * reported with that DFSC value, so we clear them.
459 esr &= ESR_ELx_EC_MASK | ESR_ELx_IL;
460 esr |= ESR_ELx_FSC_FAULT;
464 * This should never happen (entry.S only brings us
465 * into this code for insn and data aborts from a lower
466 * exception level). Fail safe by not providing an ESR
467 * context record at all.
469 WARN(1, "ESR 0x%lx is not DABT or IABT from EL0\n", esr);
475 current->thread.fault_code = esr;
478 static void do_bad_area(unsigned long far, unsigned long esr,
479 struct pt_regs *regs)
481 unsigned long addr = untagged_addr(far);
484 * If we are in kernel mode at this point, we have no context to
485 * handle this fault with.
487 if (user_mode(regs)) {
488 const struct fault_info *inf = esr_to_fault_info(esr);
490 set_thread_esr(addr, esr);
491 arm64_force_sig_fault(inf->sig, inf->code, far, inf->name);
493 __do_kernel_fault(addr, esr, regs);
497 #define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
498 #define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
500 static vm_fault_t __do_page_fault(struct mm_struct *mm,
501 struct vm_area_struct *vma, unsigned long addr,
502 unsigned int mm_flags, unsigned long vm_flags,
503 struct pt_regs *regs)
506 * Ok, we have a good vm_area for this memory access, so we can handle
508 * Check that the permissions on the VMA allow for the fault which
511 if (!(vma->vm_flags & vm_flags))
512 return VM_FAULT_BADACCESS;
513 return handle_mm_fault(vma, addr, mm_flags, regs);
516 static bool is_el0_instruction_abort(unsigned long esr)
518 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
522 * Note: not valid for EL1 DC IVAC, but we never use that such that it
523 * should fault. EL0 cannot issue DC IVAC (undef).
525 static bool is_write_abort(unsigned long esr)
527 return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
530 static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
531 struct pt_regs *regs)
533 const struct fault_info *inf;
534 struct mm_struct *mm = current->mm;
536 unsigned long vm_flags;
537 unsigned int mm_flags = FAULT_FLAG_DEFAULT;
538 unsigned long addr = untagged_addr(far);
539 struct vm_area_struct *vma;
541 if (kprobe_page_fault(regs, esr))
545 * If we're in an interrupt or have no user context, we must not take
548 if (faulthandler_disabled() || !mm)
552 mm_flags |= FAULT_FLAG_USER;
555 * vm_flags tells us what bits we must have in vma->vm_flags
556 * for the fault to be benign, __do_page_fault() would check
557 * vma->vm_flags & vm_flags and returns an error if the
558 * intersection is empty
560 if (is_el0_instruction_abort(esr)) {
561 /* It was exec fault */
563 mm_flags |= FAULT_FLAG_INSTRUCTION;
564 } else if (is_write_abort(esr)) {
565 /* It was write fault */
567 mm_flags |= FAULT_FLAG_WRITE;
569 /* It was read fault */
571 /* Write implies read */
572 vm_flags |= VM_WRITE;
573 /* If EPAN is absent then exec implies read */
574 if (!alternative_has_cap_unlikely(ARM64_HAS_EPAN))
578 if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
579 if (is_el1_instruction_abort(esr))
580 die_kernel_fault("execution of user memory",
583 if (!search_exception_tables(regs->pc))
584 die_kernel_fault("access to user memory outside uaccess routines",
588 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
590 if (!(mm_flags & FAULT_FLAG_USER))
593 vma = lock_vma_under_rcu(mm, addr);
597 if (!(vma->vm_flags & vm_flags)) {
601 fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
602 if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
605 if (!(fault & VM_FAULT_RETRY)) {
606 count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
609 count_vm_vma_lock_event(VMA_LOCK_RETRY);
611 /* Quick path to respond to signals */
612 if (fault_signal_pending(fault, regs)) {
613 if (!user_mode(regs))
620 vma = lock_mm_and_find_vma(mm, addr, regs);
621 if (unlikely(!vma)) {
622 fault = VM_FAULT_BADMAP;
626 fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs);
628 /* Quick path to respond to signals */
629 if (fault_signal_pending(fault, regs)) {
630 if (!user_mode(regs))
635 /* The fault is fully completed (including releasing mmap lock) */
636 if (fault & VM_FAULT_COMPLETED)
639 if (fault & VM_FAULT_RETRY) {
640 mm_flags |= FAULT_FLAG_TRIED;
643 mmap_read_unlock(mm);
647 * Handle the "normal" (no error) case first.
649 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
650 VM_FAULT_BADACCESS))))
654 * If we are in kernel mode at this point, we have no context to
655 * handle this fault with.
657 if (!user_mode(regs))
660 if (fault & VM_FAULT_OOM) {
662 * We ran out of memory, call the OOM killer, and return to
663 * userspace (which will retry the fault, or kill us if we got
666 pagefault_out_of_memory();
670 inf = esr_to_fault_info(esr);
671 set_thread_esr(addr, esr);
672 if (fault & VM_FAULT_SIGBUS) {
674 * We had some memory, but were unable to successfully fix up
677 arm64_force_sig_fault(SIGBUS, BUS_ADRERR, far, inf->name);
678 } else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) {
682 if (fault & VM_FAULT_HWPOISON_LARGE)
683 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
685 arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name);
688 * Something tried to access memory that isn't in our memory
691 arm64_force_sig_fault(SIGSEGV,
692 fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR,
699 __do_kernel_fault(addr, esr, regs);
703 static int __kprobes do_translation_fault(unsigned long far,
705 struct pt_regs *regs)
707 unsigned long addr = untagged_addr(far);
709 if (is_ttbr0_addr(addr))
710 return do_page_fault(far, esr, regs);
712 do_bad_area(far, esr, regs);
716 static int do_alignment_fault(unsigned long far, unsigned long esr,
717 struct pt_regs *regs)
719 if (IS_ENABLED(CONFIG_COMPAT_ALIGNMENT_FIXUPS) &&
720 compat_user_mode(regs))
721 return do_compat_alignment_fixup(far, regs);
722 do_bad_area(far, esr, regs);
726 static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs)
728 return 1; /* "fault" */
731 static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs)
733 const struct fault_info *inf;
734 unsigned long siaddr;
736 inf = esr_to_fault_info(esr);
738 if (user_mode(regs) && apei_claim_sea(regs) == 0) {
740 * APEI claimed this as a firmware-first notification.
741 * Some processing deferred to task_work before ret_to_user().
746 if (esr & ESR_ELx_FnV) {
750 * The architecture specifies that the tag bits of FAR_EL1 are
751 * UNKNOWN for synchronous external aborts. Mask them out now
752 * so that userspace doesn't see them.
754 siaddr = untagged_addr(far);
756 arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
761 static int do_tag_check_fault(unsigned long far, unsigned long esr,
762 struct pt_regs *regs)
765 * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN
766 * for tag check faults. Set them to corresponding bits in the untagged
769 far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
770 do_bad_area(far, esr, regs);
774 static const struct fault_info fault_info[] = {
775 { do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" },
776 { do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" },
777 { do_bad, SIGKILL, SI_KERNEL, "level 2 address size fault" },
778 { do_bad, SIGKILL, SI_KERNEL, "level 3 address size fault" },
779 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
780 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
781 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
782 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
783 { do_bad, SIGKILL, SI_KERNEL, "unknown 8" },
784 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
785 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
786 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
787 { do_bad, SIGKILL, SI_KERNEL, "unknown 12" },
788 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
789 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
790 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
791 { do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" },
792 { do_tag_check_fault, SIGSEGV, SEGV_MTESERR, "synchronous tag check fault" },
793 { do_bad, SIGKILL, SI_KERNEL, "unknown 18" },
794 { do_bad, SIGKILL, SI_KERNEL, "unknown 19" },
795 { do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" },
796 { do_sea, SIGKILL, SI_KERNEL, "level 1 (translation table walk)" },
797 { do_sea, SIGKILL, SI_KERNEL, "level 2 (translation table walk)" },
798 { do_sea, SIGKILL, SI_KERNEL, "level 3 (translation table walk)" },
799 { do_sea, SIGBUS, BUS_OBJERR, "synchronous parity or ECC error" }, // Reserved when RAS is implemented
800 { do_bad, SIGKILL, SI_KERNEL, "unknown 25" },
801 { do_bad, SIGKILL, SI_KERNEL, "unknown 26" },
802 { do_bad, SIGKILL, SI_KERNEL, "unknown 27" },
803 { do_sea, SIGKILL, SI_KERNEL, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
804 { do_sea, SIGKILL, SI_KERNEL, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
805 { do_sea, SIGKILL, SI_KERNEL, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
806 { do_sea, SIGKILL, SI_KERNEL, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented
807 { do_bad, SIGKILL, SI_KERNEL, "unknown 32" },
808 { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
809 { do_bad, SIGKILL, SI_KERNEL, "unknown 34" },
810 { do_bad, SIGKILL, SI_KERNEL, "unknown 35" },
811 { do_bad, SIGKILL, SI_KERNEL, "unknown 36" },
812 { do_bad, SIGKILL, SI_KERNEL, "unknown 37" },
813 { do_bad, SIGKILL, SI_KERNEL, "unknown 38" },
814 { do_bad, SIGKILL, SI_KERNEL, "unknown 39" },
815 { do_bad, SIGKILL, SI_KERNEL, "unknown 40" },
816 { do_bad, SIGKILL, SI_KERNEL, "unknown 41" },
817 { do_bad, SIGKILL, SI_KERNEL, "unknown 42" },
818 { do_bad, SIGKILL, SI_KERNEL, "unknown 43" },
819 { do_bad, SIGKILL, SI_KERNEL, "unknown 44" },
820 { do_bad, SIGKILL, SI_KERNEL, "unknown 45" },
821 { do_bad, SIGKILL, SI_KERNEL, "unknown 46" },
822 { do_bad, SIGKILL, SI_KERNEL, "unknown 47" },
823 { do_bad, SIGKILL, SI_KERNEL, "TLB conflict abort" },
824 { do_bad, SIGKILL, SI_KERNEL, "Unsupported atomic hardware update fault" },
825 { do_bad, SIGKILL, SI_KERNEL, "unknown 50" },
826 { do_bad, SIGKILL, SI_KERNEL, "unknown 51" },
827 { do_bad, SIGKILL, SI_KERNEL, "implementation fault (lockdown abort)" },
828 { do_bad, SIGBUS, BUS_OBJERR, "implementation fault (unsupported exclusive)" },
829 { do_bad, SIGKILL, SI_KERNEL, "unknown 54" },
830 { do_bad, SIGKILL, SI_KERNEL, "unknown 55" },
831 { do_bad, SIGKILL, SI_KERNEL, "unknown 56" },
832 { do_bad, SIGKILL, SI_KERNEL, "unknown 57" },
833 { do_bad, SIGKILL, SI_KERNEL, "unknown 58" },
834 { do_bad, SIGKILL, SI_KERNEL, "unknown 59" },
835 { do_bad, SIGKILL, SI_KERNEL, "unknown 60" },
836 { do_bad, SIGKILL, SI_KERNEL, "section domain fault" },
837 { do_bad, SIGKILL, SI_KERNEL, "page domain fault" },
838 { do_bad, SIGKILL, SI_KERNEL, "unknown 63" },
841 void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs)
843 const struct fault_info *inf = esr_to_fault_info(esr);
844 unsigned long addr = untagged_addr(far);
846 if (!inf->fn(far, esr, regs))
849 if (!user_mode(regs))
850 die_kernel_fault(inf->name, addr, esr, regs);
853 * At this point we have an unrecognized fault type whose tag bits may
854 * have been defined as UNKNOWN. Therefore we only expose the untagged
855 * address to the signal handler.
857 arm64_notify_die(inf->name, regs, inf->sig, inf->code, addr, esr);
859 NOKPROBE_SYMBOL(do_mem_abort);
861 void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs)
863 arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
866 NOKPROBE_SYMBOL(do_sp_pc_abort);
869 * __refdata because early_brk64 is __init, but the reference to it is
870 * clobbered at arch_initcall time.
871 * See traps.c and debug-monitors.c:debug_traps_init().
873 static struct fault_info __refdata debug_fault_info[] = {
874 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
875 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
876 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
877 { do_bad, SIGKILL, SI_KERNEL, "unknown 3" },
878 { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
879 { do_bad, SIGKILL, SI_KERNEL, "aarch32 vector catch" },
880 { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
881 { do_bad, SIGKILL, SI_KERNEL, "unknown 7" },
884 void __init hook_debug_fault_code(int nr,
885 int (*fn)(unsigned long, unsigned long, struct pt_regs *),
886 int sig, int code, const char *name)
888 BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
890 debug_fault_info[nr].fn = fn;
891 debug_fault_info[nr].sig = sig;
892 debug_fault_info[nr].code = code;
893 debug_fault_info[nr].name = name;
897 * In debug exception context, we explicitly disable preemption despite
898 * having interrupts disabled.
899 * This serves two purposes: it makes it much less likely that we would
900 * accidentally schedule in exception context and it will force a warning
901 * if we somehow manage to schedule by accident.
903 static void debug_exception_enter(struct pt_regs *regs)
907 /* This code is a bit fragile. Test it. */
908 RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
910 NOKPROBE_SYMBOL(debug_exception_enter);
912 static void debug_exception_exit(struct pt_regs *regs)
914 preempt_enable_no_resched();
916 NOKPROBE_SYMBOL(debug_exception_exit);
918 void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr,
919 struct pt_regs *regs)
921 const struct fault_info *inf = esr_to_debug_fault_info(esr);
922 unsigned long pc = instruction_pointer(regs);
924 debug_exception_enter(regs);
926 if (user_mode(regs) && !is_ttbr0_addr(pc))
927 arm64_apply_bp_hardening();
929 if (inf->fn(addr_if_watchpoint, esr, regs)) {
930 arm64_notify_die(inf->name, regs, inf->sig, inf->code, pc, esr);
933 debug_exception_exit(regs);
935 NOKPROBE_SYMBOL(do_debug_exception);
938 * Used during anonymous page fault handling.
940 struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
943 gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
946 * If the page is mapped with PROT_MTE, initialise the tags at the
947 * point of allocation and page zeroing as this is usually faster than
948 * separate DC ZVA and STGM.
950 if (vma->vm_flags & VM_MTE)
951 flags |= __GFP_ZEROTAGS;
953 return vma_alloc_folio(flags, 0, vma, vaddr, false);
956 void tag_clear_highpage(struct page *page)
958 /* Newly allocated page, shouldn't have been tagged yet */
959 WARN_ON_ONCE(!try_page_mte_tagging(page));
960 mte_zero_clear_page_tags(page_address(page));
961 set_page_mte_tagged(page);