2 * linux/arch/arm/mm/fault.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2004 Russell King
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/extable.h>
12 #include <linux/signal.h>
14 #include <linux/hardirq.h>
15 #include <linux/init.h>
16 #include <linux/kprobes.h>
17 #include <linux/uaccess.h>
18 #include <linux/page-flags.h>
19 #include <linux/sched/signal.h>
20 #include <linux/sched/debug.h>
21 #include <linux/highmem.h>
22 #include <linux/perf_event.h>
24 #include <asm/exception.h>
25 #include <asm/pgtable.h>
26 #include <asm/system_misc.h>
27 #include <asm/system_info.h>
28 #include <asm/tlbflush.h>
35 static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
39 if (!user_mode(regs)) {
40 /* kprobe_running() needs smp_processor_id() */
42 if (kprobe_running() && kprobe_fault_handler(regs, fsr))
50 static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
57 * This is useful to dump out the page tables associated with
60 void show_pte(struct mm_struct *mm, unsigned long addr)
67 pr_alert("pgd = %p\n", mm->pgd);
68 pgd = pgd_offset(mm, addr);
69 pr_alert("[%08lx] *pgd=%08llx",
70 addr, (long long)pgd_val(*pgd));
85 pud = pud_offset(pgd, addr);
86 if (PTRS_PER_PUD != 1)
87 pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
97 pmd = pmd_offset(pud, addr);
98 if (PTRS_PER_PMD != 1)
99 pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
109 /* We must not map this if we have highmem enabled */
110 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
113 pte = pte_offset_map(pmd, addr);
114 pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
115 #ifndef CONFIG_ARM_LPAE
116 pr_cont(", *ppte=%08llx",
117 (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
124 #else /* CONFIG_MMU */
125 void show_pte(struct mm_struct *mm, unsigned long addr)
127 #endif /* CONFIG_MMU */
130 * Oops. The kernel tried to access some page that wasn't present.
133 __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
134 struct pt_regs *regs)
137 * Are we prepared to handle this kernel fault?
139 if (fixup_exception(regs))
143 * No handler, we'll have to terminate things with extreme prejudice.
146 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
147 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
148 "paging request", addr);
151 die("Oops", regs, fsr);
153 make_task_dead(SIGKILL);
157 * Something tried to access memory that isn't in our memory map..
158 * User mode accesses just cause a SIGSEGV
161 __do_user_fault(struct task_struct *tsk, unsigned long addr,
162 unsigned int fsr, unsigned int sig, int code,
163 struct pt_regs *regs)
167 if (addr > TASK_SIZE)
168 harden_branch_predictor();
170 #ifdef CONFIG_DEBUG_USER
171 if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
172 ((user_debug & UDBG_BUS) && (sig == SIGBUS))) {
173 printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
174 tsk->comm, sig, addr, fsr);
175 show_pte(tsk->mm, addr);
180 tsk->thread.address = addr;
181 tsk->thread.error_code = fsr;
182 tsk->thread.trap_no = 14;
186 si.si_addr = (void __user *)addr;
187 force_sig_info(sig, &si, tsk);
190 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
192 struct task_struct *tsk = current;
193 struct mm_struct *mm = tsk->active_mm;
196 * If we are in kernel mode at this point, we
197 * have no context to handle this fault with.
200 __do_user_fault(tsk, addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
202 __do_kernel_fault(mm, addr, fsr, regs);
206 #define VM_FAULT_BADMAP 0x010000
207 #define VM_FAULT_BADACCESS 0x020000
210 * Check that the permissions on the VMA allow for the fault which occurred.
211 * If we encountered a write fault, we must have write permission, otherwise
212 * we allow any permission.
214 static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
216 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
218 if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
220 if (fsr & FSR_LNX_PF)
223 return vma->vm_flags & mask ? false : true;
227 __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
228 unsigned int flags, struct task_struct *tsk)
230 struct vm_area_struct *vma;
233 vma = find_vma(mm, addr);
234 fault = VM_FAULT_BADMAP;
237 if (unlikely(vma->vm_start > addr))
241 * Ok, we have a good vm_area for this
242 * memory access, so we can handle it.
245 if (access_error(fsr, vma)) {
246 fault = VM_FAULT_BADACCESS;
250 return handle_mm_fault(vma, addr & PAGE_MASK, flags);
253 /* Don't allow expansion below FIRST_USER_ADDRESS */
254 if (vma->vm_flags & VM_GROWSDOWN &&
255 addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
262 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
264 struct task_struct *tsk;
265 struct mm_struct *mm;
266 int fault, sig, code;
267 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
269 if (notify_page_fault(regs, fsr))
275 /* Enable interrupts if they were enabled in the parent context. */
276 if (interrupts_enabled(regs))
280 * If we're in an interrupt or have no user
281 * context, we must not take the fault..
283 if (faulthandler_disabled() || !mm)
287 flags |= FAULT_FLAG_USER;
288 if ((fsr & FSR_WRITE) && !(fsr & FSR_CM))
289 flags |= FAULT_FLAG_WRITE;
292 * As per x86, we may deadlock here. However, since the kernel only
293 * validly references user space from well defined areas of the code,
294 * we can bug out early if this is from code which shouldn't.
296 if (!down_read_trylock(&mm->mmap_sem)) {
297 if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
300 down_read(&mm->mmap_sem);
303 * The above down_read_trylock() might have succeeded in
304 * which case, we'll have missed the might_sleep() from
308 #ifdef CONFIG_DEBUG_VM
309 if (!user_mode(regs) &&
310 !search_exception_tables(regs->ARM_pc))
315 fault = __do_page_fault(mm, addr, fsr, flags, tsk);
317 /* If we need to retry but a fatal signal is pending, handle the
318 * signal first. We do not need to release the mmap_sem because
319 * it would already be released in __lock_page_or_retry in
321 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
322 if (!user_mode(regs))
328 * Major/minor page fault accounting is only done on the
329 * initial attempt. If we go through a retry, it is extremely
330 * likely that the page will be found in page cache at that point.
333 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
334 if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
335 if (fault & VM_FAULT_MAJOR) {
337 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
341 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
344 if (fault & VM_FAULT_RETRY) {
345 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
347 flags &= ~FAULT_FLAG_ALLOW_RETRY;
348 flags |= FAULT_FLAG_TRIED;
353 up_read(&mm->mmap_sem);
356 * Handle the "normal" case first - VM_FAULT_MAJOR
358 if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
362 * If we are in kernel mode at this point, we
363 * have no context to handle this fault with.
365 if (!user_mode(regs))
368 if (fault & VM_FAULT_OOM) {
370 * We ran out of memory, call the OOM killer, and return to
371 * userspace (which will retry the fault, or kill us if we
374 pagefault_out_of_memory();
378 if (fault & VM_FAULT_SIGBUS) {
380 * We had some memory, but were unable to
381 * successfully fix up this page fault.
387 * Something tried to access memory that
388 * isn't in our memory map..
391 code = fault == VM_FAULT_BADACCESS ?
392 SEGV_ACCERR : SEGV_MAPERR;
395 __do_user_fault(tsk, addr, fsr, sig, code, regs);
399 __do_kernel_fault(mm, addr, fsr, regs);
402 #else /* CONFIG_MMU */
404 do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
408 #endif /* CONFIG_MMU */
411 * First Level Translation Fault Handler
413 * We enter here because the first level page table doesn't contain
414 * a valid entry for the address.
416 * If the address is in kernel space (>= TASK_SIZE), then we are
417 * probably faulting in the vmalloc() area.
419 * If the init_task's first level page tables contains the relevant
420 * entry, we copy the it to this task. If not, we send the process
421 * a signal, fixup the exception, or oops the kernel.
423 * NOTE! We MUST NOT take any locks for this case. We may be in an
424 * interrupt or a critical region, and should only copy the information
425 * from the master page table, nothing more.
429 do_translation_fault(unsigned long addr, unsigned int fsr,
430 struct pt_regs *regs)
437 if (addr < TASK_SIZE)
438 return do_page_fault(addr, fsr, regs);
443 index = pgd_index(addr);
445 pgd = cpu_get_pgd() + index;
446 pgd_k = init_mm.pgd + index;
448 if (pgd_none(*pgd_k))
450 if (!pgd_present(*pgd))
451 set_pgd(pgd, *pgd_k);
453 pud = pud_offset(pgd, addr);
454 pud_k = pud_offset(pgd_k, addr);
456 if (pud_none(*pud_k))
458 if (!pud_present(*pud))
459 set_pud(pud, *pud_k);
461 pmd = pmd_offset(pud, addr);
462 pmd_k = pmd_offset(pud_k, addr);
464 #ifdef CONFIG_ARM_LPAE
466 * Only one hardware entry per PMD with LPAE.
471 * On ARM one Linux PGD entry contains two hardware entries (see page
472 * tables layout in pgtable.h). We normally guarantee that we always
473 * fill both L1 entries. But create_mapping() doesn't follow the rule.
474 * It can create inidividual L1 entries, so here we have to call
475 * pmd_none() check for the entry really corresponded to address, not
476 * for the first of pair.
478 index = (addr >> SECTION_SHIFT) & 1;
480 if (pmd_none(pmd_k[index]))
483 copy_pmd(pmd, pmd_k);
487 do_bad_area(addr, fsr, regs);
490 #else /* CONFIG_MMU */
492 do_translation_fault(unsigned long addr, unsigned int fsr,
493 struct pt_regs *regs)
497 #endif /* CONFIG_MMU */
500 * Some section permission faults need to be handled gracefully.
501 * They can happen due to a __{get,put}_user during an oops.
503 #ifndef CONFIG_ARM_LPAE
505 do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
507 do_bad_area(addr, fsr, regs);
510 #endif /* CONFIG_ARM_LPAE */
513 * This abort handler always returns "fault".
516 do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
522 int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
529 #ifdef CONFIG_ARM_LPAE
530 #include "fsr-3level.c"
532 #include "fsr-2level.c"
536 hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
537 int sig, int code, const char *name)
539 if (nr < 0 || nr >= ARRAY_SIZE(fsr_info))
542 fsr_info[nr].fn = fn;
543 fsr_info[nr].sig = sig;
544 fsr_info[nr].code = code;
545 fsr_info[nr].name = name;
549 * Dispatch a data abort to the relevant handler.
551 asmlinkage void __exception
552 do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
554 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
557 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
560 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
561 inf->name, fsr, addr);
562 show_pte(current->mm, addr);
564 info.si_signo = inf->sig;
566 info.si_code = inf->code;
567 info.si_addr = (void __user *)addr;
568 arm_notify_die("", regs, &info, fsr, 0);
572 hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
573 int sig, int code, const char *name)
575 if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
578 ifsr_info[nr].fn = fn;
579 ifsr_info[nr].sig = sig;
580 ifsr_info[nr].code = code;
581 ifsr_info[nr].name = name;
584 asmlinkage void __exception
585 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
587 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
590 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
593 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
594 inf->name, ifsr, addr);
596 info.si_signo = inf->sig;
598 info.si_code = inf->code;
599 info.si_addr = (void __user *)addr;
600 arm_notify_die("", regs, &info, ifsr, 0);
604 * Abort handler to be used only during first unmasking of asynchronous aborts
605 * on the boot CPU. This makes sure that the machine will not die if the
606 * firmware/bootloader left an imprecise abort pending for us to trip over.
608 static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
609 struct pt_regs *regs)
611 pr_warn("Hit pending asynchronous external abort (FSR=0x%08x) during "
612 "first unmask, this is most likely caused by a "
613 "firmware/bootloader bug.\n", fsr);
618 void __init early_abt_enable(void)
620 fsr_info[FSR_FS_AEA].fn = early_abort_handler;
622 fsr_info[FSR_FS_AEA].fn = do_bad;
625 #ifndef CONFIG_ARM_LPAE
626 static int __init exceptions_init(void)
628 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
629 hook_fault_code(4, do_translation_fault, SIGSEGV, SEGV_MAPERR,
630 "I-cache maintenance fault");
633 if (cpu_architecture() >= CPU_ARCH_ARMv7) {
635 * TODO: Access flag faults introduced in ARMv6K.
636 * Runtime check for 'K' extension is needed
638 hook_fault_code(3, do_bad, SIGSEGV, SEGV_MAPERR,
639 "section access flag fault");
640 hook_fault_code(6, do_bad, SIGSEGV, SEGV_MAPERR,
641 "section access flag fault");
647 arch_initcall(exceptions_init);