2 * Based on arch/arm/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 1995-2004 Russell King
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/extable.h>
22 #include <linux/signal.h>
24 #include <linux/hardirq.h>
25 #include <linux/init.h>
26 #include <linux/kprobes.h>
27 #include <linux/uaccess.h>
28 #include <linux/page-flags.h>
29 #include <linux/sched.h>
30 #include <linux/highmem.h>
31 #include <linux/perf_event.h>
32 #include <linux/preempt.h>
35 #include <asm/cpufeature.h>
36 #include <asm/exception.h>
37 #include <asm/debug-monitors.h>
39 #include <asm/sysreg.h>
40 #include <asm/system_misc.h>
41 #include <asm/pgtable.h>
42 #include <asm/tlbflush.h>
45 int (*fn)(unsigned long addr, unsigned int esr,
46 struct pt_regs *regs);
52 static const struct fault_info fault_info[];
54 static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
56 return fault_info + (esr & 63);
60 static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
64 /* kprobe_running() needs smp_processor_id() */
65 if (!user_mode(regs)) {
67 if (kprobe_running() && kprobe_fault_handler(regs, esr))
75 static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
82 * Dump out the page tables associated with 'addr' in the currently active mm.
84 void show_pte(unsigned long addr)
89 if (addr < TASK_SIZE) {
91 mm = current->active_mm;
93 pr_alert("[%016lx] user address but active_mm is swapper\n",
97 } else if (addr >= VA_START) {
101 pr_alert("[%016lx] address between user and kernel address ranges\n",
106 pr_alert("pgd = %p\n", mm->pgd);
107 pgd = pgd_offset(mm, addr);
108 pr_alert("[%016lx] *pgd=%016llx", addr, pgd_val(*pgd));
115 if (pgd_none(*pgd) || pgd_bad(*pgd))
118 pud = pud_offset(pgd, addr);
119 pr_cont(", *pud=%016llx", pud_val(*pud));
120 if (pud_none(*pud) || pud_bad(*pud))
123 pmd = pmd_offset(pud, addr);
124 pr_cont(", *pmd=%016llx", pmd_val(*pmd));
125 if (pmd_none(*pmd) || pmd_bad(*pmd))
128 pte = pte_offset_map(pmd, addr);
129 pr_cont(", *pte=%016llx", pte_val(*pte));
136 #ifdef CONFIG_ARM64_HW_AFDBM
138 * This function sets the access flags (dirty, accessed), as well as write
139 * permission, and only to a more permissive setting.
141 * It needs to cope with hardware update of the accessed/dirty state by other
142 * agents in the system and can safely skip the __sync_icache_dcache() call as,
143 * like set_pte_at(), the PTE is never changed from no-exec to exec here.
145 * Returns whether or not the PTE actually changed.
147 int ptep_set_access_flags(struct vm_area_struct *vma,
148 unsigned long address, pte_t *ptep,
149 pte_t entry, int dirty)
154 if (pte_same(*ptep, entry))
157 /* only preserve the access flags and write permission */
158 pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;
161 * PTE_RDONLY is cleared by default in the asm below, so set it in
162 * back if necessary (read-only or clean PTE).
164 if (!pte_write(entry) || !pte_sw_dirty(entry))
165 pte_val(entry) |= PTE_RDONLY;
168 * Setting the flags must be done atomically to avoid racing with the
169 * hardware update of the access/dirty state.
171 asm volatile("// ptep_set_access_flags\n"
172 " prfm pstl1strm, %2\n"
174 " and %0, %0, %3 // clear PTE_RDONLY\n"
175 " orr %0, %0, %4 // set flags\n"
176 " stxr %w1, %0, %2\n"
178 : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
179 : "L" (~PTE_RDONLY), "r" (pte_val(entry)));
181 flush_tlb_fix_spurious_fault(vma, address);
186 static bool is_el1_instruction_abort(unsigned int esr)
188 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
192 * The kernel tried to access some page that wasn't present.
194 static void __do_kernel_fault(unsigned long addr, unsigned int esr,
195 struct pt_regs *regs)
198 * Are we prepared to handle this kernel fault?
199 * We are almost certainly not prepared to handle instruction faults.
201 if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
205 * No handler, we'll have to terminate things with extreme prejudice.
208 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
209 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
210 "paging request", addr);
213 die("Oops", regs, esr);
219 * Something tried to access memory that isn't in our memory map. User mode
220 * accesses just cause a SIGSEGV
222 static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
223 unsigned int esr, unsigned int sig, int code,
224 struct pt_regs *regs)
227 const struct fault_info *inf;
229 if (unhandled_signal(tsk, sig) && show_unhandled_signals_ratelimited()) {
230 inf = esr_to_fault_info(esr);
231 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
232 tsk->comm, task_pid_nr(tsk), inf->name, sig,
237 tsk->thread.fault_address = addr;
238 tsk->thread.fault_code = esr;
242 si.si_addr = (void __user *)addr;
243 force_sig_info(sig, &si, tsk);
246 static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
248 struct task_struct *tsk = current;
249 const struct fault_info *inf;
252 * If we are in kernel mode at this point, we have no context to
253 * handle this fault with.
255 if (user_mode(regs)) {
256 inf = esr_to_fault_info(esr);
257 __do_user_fault(tsk, addr, esr, inf->sig, inf->code, regs);
259 __do_kernel_fault(addr, esr, regs);
262 #define VM_FAULT_BADMAP 0x010000
263 #define VM_FAULT_BADACCESS 0x020000
265 static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
266 unsigned int mm_flags, unsigned long vm_flags,
267 struct task_struct *tsk)
269 struct vm_area_struct *vma;
272 vma = find_vma(mm, addr);
273 fault = VM_FAULT_BADMAP;
276 if (unlikely(vma->vm_start > addr))
280 * Ok, we have a good vm_area for this memory access, so we can handle
285 * Check that the permissions on the VMA allow for the fault which
288 if (!(vma->vm_flags & vm_flags)) {
289 fault = VM_FAULT_BADACCESS;
293 return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
296 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
302 static inline bool is_permission_fault(unsigned int esr)
304 unsigned int ec = ESR_ELx_EC(esr);
305 unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
307 return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
308 (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
311 static bool is_el0_instruction_abort(unsigned int esr)
313 return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
316 static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
317 struct pt_regs *regs)
319 struct task_struct *tsk;
320 struct mm_struct *mm;
321 int fault, sig, code;
322 unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
323 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
325 if (notify_page_fault(regs, esr))
332 * If we're in an interrupt or have no user context, we must not take
335 if (faulthandler_disabled() || !mm)
339 mm_flags |= FAULT_FLAG_USER;
341 if (is_el0_instruction_abort(esr)) {
343 } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
345 mm_flags |= FAULT_FLAG_WRITE;
348 if (is_permission_fault(esr) && (addr < TASK_SIZE)) {
349 /* regs->orig_addr_limit may be 0 if we entered from EL0 */
350 if (regs->orig_addr_limit == KERNEL_DS)
351 die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
353 if (is_el1_instruction_abort(esr))
354 die("Attempting to execute userspace memory", regs, esr);
356 if (!search_exception_tables(regs->pc))
357 die("Accessing user space memory outside uaccess.h routines", regs, esr);
361 * As per x86, we may deadlock here. However, since the kernel only
362 * validly references user space from well defined areas of the code,
363 * we can bug out early if this is from code which shouldn't.
365 if (!down_read_trylock(&mm->mmap_sem)) {
366 if (!user_mode(regs) && !search_exception_tables(regs->pc))
369 down_read(&mm->mmap_sem);
372 * The above down_read_trylock() might have succeeded in which
373 * case, we'll have missed the might_sleep() from down_read().
376 #ifdef CONFIG_DEBUG_VM
377 if (!user_mode(regs) && !search_exception_tables(regs->pc))
382 fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
385 * If we need to retry but a fatal signal is pending, handle the
386 * signal first. We do not need to release the mmap_sem because it
387 * would already be released in __lock_page_or_retry in mm/filemap.c.
389 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
390 if (!user_mode(regs))
396 * Major/minor page fault accounting is only done on the initial
397 * attempt. If we go through a retry, it is extremely likely that the
398 * page will be found in page cache at that point.
401 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
402 if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
403 if (fault & VM_FAULT_MAJOR) {
405 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
409 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
412 if (fault & VM_FAULT_RETRY) {
414 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
417 mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
418 mm_flags |= FAULT_FLAG_TRIED;
423 up_read(&mm->mmap_sem);
426 * Handle the "normal" case first - VM_FAULT_MAJOR
428 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
429 VM_FAULT_BADACCESS))))
433 * If we are in kernel mode at this point, we have no context to
434 * handle this fault with.
436 if (!user_mode(regs))
439 if (fault & VM_FAULT_OOM) {
441 * We ran out of memory, call the OOM killer, and return to
442 * userspace (which will retry the fault, or kill us if we got
445 pagefault_out_of_memory();
449 if (fault & VM_FAULT_SIGBUS) {
451 * We had some memory, but were unable to successfully fix up
458 * Something tried to access memory that isn't in our memory
462 code = fault == VM_FAULT_BADACCESS ?
463 SEGV_ACCERR : SEGV_MAPERR;
466 __do_user_fault(tsk, addr, esr, sig, code, regs);
470 __do_kernel_fault(addr, esr, regs);
475 * First Level Translation Fault Handler
477 * We enter here because the first level page table doesn't contain a valid
478 * entry for the address.
480 * If the address is in kernel space (>= TASK_SIZE), then we are probably
481 * faulting in the vmalloc() area.
483 * If the init_task's first level page tables contains the relevant entry, we
484 * copy the it to this task. If not, we send the process a signal, fixup the
485 * exception, or oops the kernel.
487 * NOTE! We MUST NOT take any locks for this case. We may be in an interrupt
488 * or a critical region, and should only copy the information from the master
489 * page table, nothing more.
491 static int __kprobes do_translation_fault(unsigned long addr,
493 struct pt_regs *regs)
495 if (addr < TASK_SIZE)
496 return do_page_fault(addr, esr, regs);
498 do_bad_area(addr, esr, regs);
502 static int do_alignment_fault(unsigned long addr, unsigned int esr,
503 struct pt_regs *regs)
505 do_bad_area(addr, esr, regs);
510 * This abort handler always returns "fault".
512 static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
517 static const struct fault_info fault_info[] = {
518 { do_bad, SIGBUS, 0, "ttbr address size fault" },
519 { do_bad, SIGBUS, 0, "level 1 address size fault" },
520 { do_bad, SIGBUS, 0, "level 2 address size fault" },
521 { do_bad, SIGBUS, 0, "level 3 address size fault" },
522 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
523 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
524 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
525 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
526 { do_bad, SIGBUS, 0, "unknown 8" },
527 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
528 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
529 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
530 { do_bad, SIGBUS, 0, "unknown 12" },
531 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
532 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
533 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
534 { do_bad, SIGBUS, 0, "synchronous external abort" },
535 { do_bad, SIGBUS, 0, "unknown 17" },
536 { do_bad, SIGBUS, 0, "unknown 18" },
537 { do_bad, SIGBUS, 0, "unknown 19" },
538 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
539 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
540 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
541 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
542 { do_bad, SIGBUS, 0, "synchronous parity error" },
543 { do_bad, SIGBUS, 0, "unknown 25" },
544 { do_bad, SIGBUS, 0, "unknown 26" },
545 { do_bad, SIGBUS, 0, "unknown 27" },
546 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
547 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
548 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
549 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
550 { do_bad, SIGBUS, 0, "unknown 32" },
551 { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
552 { do_bad, SIGBUS, 0, "unknown 34" },
553 { do_bad, SIGBUS, 0, "unknown 35" },
554 { do_bad, SIGBUS, 0, "unknown 36" },
555 { do_bad, SIGBUS, 0, "unknown 37" },
556 { do_bad, SIGBUS, 0, "unknown 38" },
557 { do_bad, SIGBUS, 0, "unknown 39" },
558 { do_bad, SIGBUS, 0, "unknown 40" },
559 { do_bad, SIGBUS, 0, "unknown 41" },
560 { do_bad, SIGBUS, 0, "unknown 42" },
561 { do_bad, SIGBUS, 0, "unknown 43" },
562 { do_bad, SIGBUS, 0, "unknown 44" },
563 { do_bad, SIGBUS, 0, "unknown 45" },
564 { do_bad, SIGBUS, 0, "unknown 46" },
565 { do_bad, SIGBUS, 0, "unknown 47" },
566 { do_bad, SIGBUS, 0, "TLB conflict abort" },
567 { do_bad, SIGBUS, 0, "unknown 49" },
568 { do_bad, SIGBUS, 0, "unknown 50" },
569 { do_bad, SIGBUS, 0, "unknown 51" },
570 { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
571 { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" },
572 { do_bad, SIGBUS, 0, "unknown 54" },
573 { do_bad, SIGBUS, 0, "unknown 55" },
574 { do_bad, SIGBUS, 0, "unknown 56" },
575 { do_bad, SIGBUS, 0, "unknown 57" },
576 { do_bad, SIGBUS, 0, "unknown 58" },
577 { do_bad, SIGBUS, 0, "unknown 59" },
578 { do_bad, SIGBUS, 0, "unknown 60" },
579 { do_bad, SIGBUS, 0, "section domain fault" },
580 { do_bad, SIGBUS, 0, "page domain fault" },
581 { do_bad, SIGBUS, 0, "unknown 63" },
585 * Dispatch a data abort to the relevant handler.
587 asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
588 struct pt_regs *regs)
590 const struct fault_info *inf = esr_to_fault_info(esr);
593 if (!inf->fn(addr, esr, regs))
596 pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n",
597 inf->name, esr, addr);
599 info.si_signo = inf->sig;
601 info.si_code = inf->code;
602 info.si_addr = (void __user *)addr;
603 arm64_notify_die("", regs, &info, esr);
606 asmlinkage void __exception do_el0_irq_bp_hardening(void)
608 /* PC has already been checked in entry.S */
609 arm64_apply_bp_hardening();
612 asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
614 struct pt_regs *regs)
617 * We've taken an instruction abort from userspace and not yet
618 * re-enabled IRQs. If the address is a kernel address, apply
619 * BP hardening prior to enabling IRQs and pre-emption.
621 if (addr > TASK_SIZE)
622 arm64_apply_bp_hardening();
625 do_mem_abort(addr, esr, regs);
630 * Handle stack alignment exceptions.
632 asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
634 struct pt_regs *regs)
637 struct task_struct *tsk = current;
639 if (user_mode(regs)) {
640 if (instruction_pointer(regs) > TASK_SIZE)
641 arm64_apply_bp_hardening();
645 if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
646 pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
647 tsk->comm, task_pid_nr(tsk),
648 esr_get_class_string(esr), (void *)regs->pc,
651 info.si_signo = SIGBUS;
653 info.si_code = BUS_ADRALN;
654 info.si_addr = (void __user *)addr;
655 arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
658 int __init early_brk64(unsigned long addr, unsigned int esr,
659 struct pt_regs *regs);
662 * __refdata because early_brk64 is __init, but the reference to it is
663 * clobbered at arch_initcall time.
664 * See traps.c and debug-monitors.c:debug_traps_init().
666 static struct fault_info __refdata debug_fault_info[] = {
667 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
668 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
669 { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
670 { do_bad, SIGBUS, 0, "unknown 3" },
671 { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
672 { do_bad, SIGTRAP, 0, "aarch32 vector catch" },
673 { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
674 { do_bad, SIGBUS, 0, "unknown 7" },
677 void __init hook_debug_fault_code(int nr,
678 int (*fn)(unsigned long, unsigned int, struct pt_regs *),
679 int sig, int code, const char *name)
681 BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info));
683 debug_fault_info[nr].fn = fn;
684 debug_fault_info[nr].sig = sig;
685 debug_fault_info[nr].code = code;
686 debug_fault_info[nr].name = name;
689 asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
691 struct pt_regs *regs)
693 const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
694 unsigned long pc = instruction_pointer(regs);
699 * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
700 * already disabled to preserve the last enabled/disabled addresses.
702 if (interrupts_enabled(regs))
703 trace_hardirqs_off();
705 if (user_mode(regs) && pc > TASK_SIZE)
706 arm64_apply_bp_hardening();
708 if (!inf->fn(addr_if_watchpoint, esr, regs)) {
711 pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
714 info.si_signo = inf->sig;
716 info.si_code = inf->code;
717 info.si_addr = (void __user *)pc;
718 arm64_notify_die("", regs, &info, 0);
722 if (interrupts_enabled(regs))
727 NOKPROBE_SYMBOL(do_debug_exception);
729 #ifdef CONFIG_ARM64_PAN
730 int cpu_enable_pan(void *__unused)
733 * We modify PSTATE. This won't work from irq context as the PSTATE
734 * is discarded once we return from the exception.
736 WARN_ON_ONCE(in_interrupt());
738 config_sctlr_el1(SCTLR_EL1_SPAN, 0);
739 asm(SET_PSTATE_PAN(1));
742 #endif /* CONFIG_ARM64_PAN */
744 #ifdef CONFIG_ARM64_UAO
746 * Kernel threads have fs=KERNEL_DS by default, and don't need to call
747 * set_fs(), devtmpfs in particular relies on this behaviour.
748 * We need to enable the feature at runtime (instead of adding it to
749 * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
751 int cpu_enable_uao(void *__unused)
753 asm(SET_PSTATE_UAO(1));
756 #endif /* CONFIG_ARM64_UAO */