2 * Page fault handler for SH with an MMU.
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2012 Paul Mundt
7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/kernel.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hardirq.h>
18 #include <linux/kprobes.h>
19 #include <linux/perf_event.h>
20 #include <linux/kdebug.h>
21 #include <linux/uaccess.h>
22 #include <asm/io_trapped.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
25 #include <asm/traps.h>
28 force_sig_info_fault(int si_signo, int si_code, unsigned long address)
30 force_sig_fault(si_signo, si_code, (void __user *)address);
34 * This is useful to dump out the page tables associated with
37 static void show_pte(struct mm_struct *mm, unsigned long addr)
50 pr_alert("pgd = %p\n", pgd);
51 pgd += pgd_index(addr);
52 pr_alert("[%08lx] *pgd=%0*llx", addr, (u32)(sizeof(*pgd) * 2),
69 p4d = p4d_offset(pgd, addr);
70 if (PTRS_PER_P4D != 1)
71 pr_cont(", *p4d=%0*Lx", (u32)(sizeof(*p4d) * 2),
82 pud = pud_offset(p4d, addr);
83 if (PTRS_PER_PUD != 1)
84 pr_cont(", *pud=%0*llx", (u32)(sizeof(*pud) * 2),
95 pmd = pmd_offset(pud, addr);
96 if (PTRS_PER_PMD != 1)
97 pr_cont(", *pmd=%0*llx", (u32)(sizeof(*pmd) * 2),
108 /* We must not map this if we have highmem enabled */
109 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
112 pte = pte_offset_kernel(pmd, addr);
113 pr_cont(", *pte=%0*llx", (u32)(sizeof(*pte) * 2),
120 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
122 unsigned index = pgd_index(address);
129 pgd_k = init_mm.pgd + index;
131 if (!pgd_present(*pgd_k))
134 p4d = p4d_offset(pgd, address);
135 p4d_k = p4d_offset(pgd_k, address);
136 if (!p4d_present(*p4d_k))
139 pud = pud_offset(p4d, address);
140 pud_k = pud_offset(p4d_k, address);
141 if (!pud_present(*pud_k))
144 if (!pud_present(*pud))
145 set_pud(pud, *pud_k);
147 pmd = pmd_offset(pud, address);
148 pmd_k = pmd_offset(pud_k, address);
149 if (!pmd_present(*pmd_k))
152 if (!pmd_present(*pmd))
153 set_pmd(pmd, *pmd_k);
156 * The page tables are fully synchronised so there must
157 * be another reason for the fault. Return NULL here to
158 * signal that we have not taken care of the fault.
160 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
167 #ifdef CONFIG_SH_STORE_QUEUES
168 #define __FAULT_ADDR_LIMIT P3_ADDR_MAX
170 #define __FAULT_ADDR_LIMIT VMALLOC_END
174 * Handle a fault on the vmalloc or module mapping area
176 static noinline int vmalloc_fault(unsigned long address)
182 /* Make sure we are in vmalloc/module/P3 area: */
183 if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
187 * Synchronize this task's top level page-table
188 * with the 'reference' page table.
190 * Do _not_ use "current" here. We might be inside
191 * an interrupt in the middle of a task switch..
194 pmd_k = vmalloc_sync_one(pgd_k, address);
198 pte_k = pte_offset_kernel(pmd_k, address);
199 if (!pte_present(*pte_k))
206 show_fault_oops(struct pt_regs *regs, unsigned long address)
208 if (!oops_may_print())
211 pr_alert("BUG: unable to handle kernel %s at %08lx\n",
212 address < PAGE_SIZE ? "NULL pointer dereference"
216 printk_address(regs->pc, 1);
218 show_pte(NULL, address);
222 no_context(struct pt_regs *regs, unsigned long error_code,
223 unsigned long address)
225 /* Are we prepared to handle this kernel fault? */
226 if (fixup_exception(regs))
229 if (handle_trapped_io(regs, address))
233 * Oops. The kernel tried to access some bad page. We'll have to
234 * terminate things with extreme prejudice.
238 show_fault_oops(regs, address);
240 die("Oops", regs, error_code);
246 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
247 unsigned long address, int si_code)
249 /* User mode accesses just cause a SIGSEGV */
250 if (user_mode(regs)) {
252 * It's possible to have interrupts off here:
256 force_sig_info_fault(SIGSEGV, si_code, address);
261 no_context(regs, error_code, address);
265 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
266 unsigned long address)
268 __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
272 __bad_area(struct pt_regs *regs, unsigned long error_code,
273 unsigned long address, int si_code)
275 struct mm_struct *mm = current->mm;
278 * Something tried to access memory that isn't in our memory map..
279 * Fix it, but check if it's kernel or user first..
281 mmap_read_unlock(mm);
283 __bad_area_nosemaphore(regs, error_code, address, si_code);
287 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
289 __bad_area(regs, error_code, address, SEGV_MAPERR);
293 bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
294 unsigned long address)
296 __bad_area(regs, error_code, address, SEGV_ACCERR);
300 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
302 struct task_struct *tsk = current;
303 struct mm_struct *mm = tsk->mm;
305 mmap_read_unlock(mm);
307 /* Kernel mode? Handle exceptions or die: */
308 if (!user_mode(regs))
309 no_context(regs, error_code, address);
311 force_sig_info_fault(SIGBUS, BUS_ADRERR, address);
315 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
316 unsigned long address, vm_fault_t fault)
319 * Pagefault was interrupted by SIGKILL. We have no reason to
320 * continue pagefault.
322 if (fault_signal_pending(fault, regs)) {
323 if (!user_mode(regs))
324 no_context(regs, error_code, address);
328 /* Release mmap_lock first if necessary */
329 if (!(fault & VM_FAULT_RETRY))
330 mmap_read_unlock(current->mm);
332 if (!(fault & VM_FAULT_ERROR))
335 if (fault & VM_FAULT_OOM) {
336 /* Kernel mode? Handle exceptions or die: */
337 if (!user_mode(regs)) {
338 no_context(regs, error_code, address);
343 * We ran out of memory, call the OOM killer, and return the
344 * userspace (which will retry the fault, or kill us if we got
347 pagefault_out_of_memory();
349 if (fault & VM_FAULT_SIGBUS)
350 do_sigbus(regs, error_code, address);
351 else if (fault & VM_FAULT_SIGSEGV)
352 bad_area(regs, error_code, address);
360 static inline int access_error(int error_code, struct vm_area_struct *vma)
362 if (error_code & FAULT_CODE_WRITE) {
363 /* write, present and write, not present: */
364 if (unlikely(!(vma->vm_flags & VM_WRITE)))
369 /* ITLB miss on NX page */
370 if (unlikely((error_code & FAULT_CODE_ITLB) &&
371 !(vma->vm_flags & VM_EXEC)))
374 /* read, not present: */
375 if (unlikely(!vma_is_accessible(vma)))
381 static int fault_in_kernel_space(unsigned long address)
383 return address >= TASK_SIZE;
387 * This routine handles page faults. It determines the address,
388 * and the problem, and then passes it off to one of the appropriate
391 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
392 unsigned long error_code,
393 unsigned long address)
396 struct task_struct *tsk;
397 struct mm_struct *mm;
398 struct vm_area_struct * vma;
400 unsigned int flags = FAULT_FLAG_DEFAULT;
404 vec = lookup_exception_vector();
407 * We fault-in kernel-space virtual memory on-demand. The
408 * 'reference' page table is init_mm.pgd.
410 * NOTE! We MUST NOT take any locks for this case. We may
411 * be in an interrupt or a critical region, and should
412 * only copy the information from the master page table,
415 if (unlikely(fault_in_kernel_space(address))) {
416 if (vmalloc_fault(address) >= 0)
418 if (kprobe_page_fault(regs, vec))
421 bad_area_nosemaphore(regs, error_code, address);
425 if (unlikely(kprobe_page_fault(regs, vec)))
428 /* Only enable interrupts if they were on before the fault */
429 if ((regs->sr & SR_IMASK) != SR_IMASK)
432 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
435 * If we're in an interrupt, have no user context or are running
436 * with pagefaults disabled then we must not take the fault:
438 if (unlikely(faulthandler_disabled() || !mm)) {
439 bad_area_nosemaphore(regs, error_code, address);
446 vma = find_vma(mm, address);
447 if (unlikely(!vma)) {
448 bad_area(regs, error_code, address);
451 if (likely(vma->vm_start <= address))
453 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
454 bad_area(regs, error_code, address);
457 if (unlikely(expand_stack(vma, address))) {
458 bad_area(regs, error_code, address);
463 * Ok, we have a good vm_area for this memory access, so
467 if (unlikely(access_error(error_code, vma))) {
468 bad_area_access_error(regs, error_code, address);
472 set_thread_fault_code(error_code);
475 flags |= FAULT_FLAG_USER;
476 if (error_code & FAULT_CODE_WRITE)
477 flags |= FAULT_FLAG_WRITE;
480 * If for any reason at all we couldn't handle the fault,
481 * make sure we exit gracefully rather than endlessly redo
484 fault = handle_mm_fault(vma, address, flags, regs);
486 if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
487 if (mm_fault_error(regs, error_code, address, fault))
490 if (flags & FAULT_FLAG_ALLOW_RETRY) {
491 if (fault & VM_FAULT_RETRY) {
492 flags |= FAULT_FLAG_TRIED;
495 * No need to mmap_read_unlock(mm) as we would
496 * have already released it in __lock_page_or_retry
503 mmap_read_unlock(mm);