3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Derived from "arch/i386/mm/fault.c"
6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Modified by Cort Dougan and Paul Mackerras.
10 * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/signal.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/types.h>
25 #include <linux/pagemap.h>
26 #include <linux/ptrace.h>
27 #include <linux/mman.h>
29 #include <linux/interrupt.h>
30 #include <linux/highmem.h>
31 #include <linux/extable.h>
32 #include <linux/kprobes.h>
33 #include <linux/kdebug.h>
34 #include <linux/perf_event.h>
35 #include <linux/ratelimit.h>
36 #include <linux/context_tracking.h>
37 #include <linux/hugetlb.h>
38 #include <linux/uaccess.h>
40 #include <asm/firmware.h>
42 #include <asm/pgtable.h>
44 #include <asm/mmu_context.h>
45 #include <asm/siginfo.h>
46 #include <asm/debug.h>
48 static inline bool notify_page_fault(struct pt_regs *regs)
53 /* kprobe_running() needs smp_processor_id() */
54 if (!user_mode(regs)) {
56 if (kprobe_running() && kprobe_fault_handler(regs, 11))
60 #endif /* CONFIG_KPROBES */
62 if (unlikely(debugger_fault_handler(regs)))
69 * Check whether the instruction inst is a store using
70 * an update addressing form which will update r1.
72 static bool store_updates_sp(unsigned int inst)
74 /* check for 1 in the rA field */
75 if (((inst >> 16) & 0x1f) != 1)
77 /* check major opcode */
85 case OP_STD: /* std or stdu */
86 return (inst & 3) == 1;
88 /* check minor opcode */
89 switch ((inst >> 1) & 0x3ff) {
94 case OP_31_XOP_STFSUX:
95 case OP_31_XOP_STFDUX:
102 * do_page_fault error handling helpers
106 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code,
110 * If we are in kernel mode, bail out with a SEGV, this will
111 * be caught by the assembly which will restore the non-volatile
112 * registers before calling bad_page_fault()
114 if (!user_mode(regs))
117 _exception_pkey(SIGSEGV, regs, si_code, address, pkey);
122 static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address)
124 return __bad_area_nosemaphore(regs, address, SEGV_MAPERR, 0);
127 static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code,
130 struct mm_struct *mm = current->mm;
133 * Something tried to access memory that isn't in our memory map..
134 * Fix it, but check if it's kernel or user first..
136 up_read(&mm->mmap_sem);
138 return __bad_area_nosemaphore(regs, address, si_code, pkey);
141 static noinline int bad_area(struct pt_regs *regs, unsigned long address)
143 return __bad_area(regs, address, SEGV_MAPERR, 0);
146 static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address,
149 return __bad_area_nosemaphore(regs, address, SEGV_PKUERR, pkey);
152 static noinline int bad_access(struct pt_regs *regs, unsigned long address)
154 return __bad_area(regs, address, SEGV_ACCERR, 0);
157 static int do_sigbus(struct pt_regs *regs, unsigned long address,
161 unsigned int lsb = 0;
163 if (!user_mode(regs))
166 current->thread.trap_nr = BUS_ADRERR;
167 clear_siginfo(&info);
168 info.si_signo = SIGBUS;
170 info.si_code = BUS_ADRERR;
171 info.si_addr = (void __user *)address;
172 #ifdef CONFIG_MEMORY_FAILURE
173 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
174 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
175 current->comm, current->pid, address);
176 info.si_code = BUS_MCEERR_AR;
179 if (fault & VM_FAULT_HWPOISON_LARGE)
180 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
181 if (fault & VM_FAULT_HWPOISON)
184 info.si_addr_lsb = lsb;
185 force_sig_info(SIGBUS, &info, current);
189 static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
193 * Kernel page fault interrupted by SIGKILL. We have no reason to
194 * continue processing.
196 if (fatal_signal_pending(current) && !user_mode(regs))
200 if (fault & VM_FAULT_OOM) {
202 * We ran out of memory, or some other thing happened to us that
203 * made us unable to handle the page fault gracefully.
205 if (!user_mode(regs))
207 pagefault_out_of_memory();
209 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
210 VM_FAULT_HWPOISON_LARGE))
211 return do_sigbus(regs, addr, fault);
212 else if (fault & VM_FAULT_SIGSEGV)
213 return bad_area_nosemaphore(regs, addr);
220 /* Is this a bad kernel fault ? */
221 static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
222 unsigned long address)
224 /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
225 if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
227 printk_ratelimited(KERN_CRIT "kernel tried to execute"
228 " exec-protected page (%lx) -"
229 "exploit attempt? (uid: %d)\n",
230 address, from_kuid(&init_user_ns,
233 return is_exec || (address >= TASK_SIZE);
236 // This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE
237 #define SIGFRAME_MAX_SIZE (4096 + 128)
239 static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
240 struct vm_area_struct *vma, unsigned int flags,
244 * N.B. The POWER/Open ABI allows programs to access up to
245 * 288 bytes below the stack pointer.
246 * The kernel signal delivery code writes a bit over 4KB
247 * below the stack pointer (r1) before decrementing it.
248 * The exec code can write slightly over 640kB to the stack
249 * before setting the user r1. Thus we allow the stack to
250 * expand to 1MB without further checks.
252 if (address + 0x100000 < vma->vm_end) {
253 unsigned int __user *nip = (unsigned int __user *)regs->nip;
254 /* get user regs even if this fault is in kernel mode */
255 struct pt_regs *uregs = current->thread.regs;
260 * A user-mode access to an address a long way below
261 * the stack pointer is only valid if the instruction
262 * is one which would update the stack pointer to the
263 * address accessed if the instruction completed,
264 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
265 * (or the byte, halfword, float or double forms).
267 * If we don't check this then any write to the area
268 * between the last mapped region and the stack will
269 * expand the stack rather than segfaulting.
271 if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1])
274 if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
275 access_ok(VERIFY_READ, nip, sizeof(*nip))) {
280 res = __get_user_inatomic(inst, nip);
283 return !store_updates_sp(inst);
291 static bool access_error(bool is_write, bool is_exec,
292 struct vm_area_struct *vma)
295 * Allow execution from readable areas if the MMU does not
296 * provide separate controls over reading and executing.
298 * Note: That code used to not be enabled for 4xx/BookE.
299 * It is now as I/D cache coherency for these is done at
300 * set_pte_at() time and I see no reason why the test
301 * below wouldn't be valid on those processors. This -may-
302 * break programs compiled with a really old ABI though.
305 return !(vma->vm_flags & VM_EXEC) &&
306 (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
307 !(vma->vm_flags & (VM_READ | VM_WRITE)));
311 if (unlikely(!(vma->vm_flags & VM_WRITE)))
316 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
319 * We should ideally do the vma pkey access check here. But in the
320 * fault path, handle_mm_fault() also does the same check. To avoid
321 * these multiple checks, we skip it here and handle access error due
327 #ifdef CONFIG_PPC_SMLPAR
328 static inline void cmo_account_page_fault(void)
330 if (firmware_has_feature(FW_FEATURE_CMO)) {
334 page_ins = be32_to_cpu(get_lppaca()->page_ins);
335 page_ins += 1 << PAGE_FACTOR;
336 get_lppaca()->page_ins = cpu_to_be32(page_ins);
341 static inline void cmo_account_page_fault(void) { }
342 #endif /* CONFIG_PPC_SMLPAR */
344 #ifdef CONFIG_PPC_STD_MMU
345 static void sanity_check_fault(bool is_write, unsigned long error_code)
348 * For hash translation mode, we should never get a
349 * PROTFAULT. Any update to pte to reduce access will result in us
350 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
351 * fault instead of DSISR_PROTFAULT.
353 * A pte update to relax the access will not result in a hash page table
354 * entry invalidate and hence can result in DSISR_PROTFAULT.
355 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
356 * the special !is_write in the below conditional.
358 * For platforms that doesn't supports coherent icache and do support
359 * per page noexec bit, we do setup things such that we do the
360 * sync between D/I cache via fault. But that is handled via low level
361 * hash fault code (hash_page_do_lazy_icache()) and we should not reach
364 * For wrong access that can result in PROTFAULT, the above vma->vm_flags
365 * check should handle those and hence we should fall to the bad_area
366 * handling correctly.
368 * For embedded with per page exec support that doesn't support coherent
369 * icache we do get PROTFAULT and we handle that D/I cache sync in
370 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
371 * is conditional for server MMU.
373 * For radix, we can get prot fault for autonuma case, because radix
374 * page table will have them marked noaccess for user.
376 if (!radix_enabled() && !is_write)
377 WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
380 static void sanity_check_fault(bool is_write, unsigned long error_code) { }
381 #endif /* CONFIG_PPC_STD_MMU */
384 * Define the correct "is_write" bit in error_code based
385 * on the processor family
387 #if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
388 #define page_fault_is_write(__err) ((__err) & ESR_DST)
389 #define page_fault_is_bad(__err) (0)
391 #define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE)
392 #if defined(CONFIG_PPC_8xx)
393 #define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G)
394 #elif defined(CONFIG_PPC64)
395 #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_64S)
397 #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S)
402 * For 600- and 800-family processors, the error_code parameter is DSISR
403 * for a data fault, SRR1 for an instruction fault. For 400-family processors
404 * the error_code parameter is ESR for a data fault, 0 for an instruction
406 * For 64-bit processors, the error_code parameter is
407 * - DSISR for a non-SLB data access fault,
408 * - SRR1 & 0x08000000 for a non-SLB instruction access fault
411 * The return value is 0 if the fault was handled, or the signal
412 * number if this is a kernel fault that can't be handled here.
414 static int __do_page_fault(struct pt_regs *regs, unsigned long address,
415 unsigned long error_code)
417 struct vm_area_struct * vma;
418 struct mm_struct *mm = current->mm;
419 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
420 int is_exec = TRAP(regs) == 0x400;
421 int is_user = user_mode(regs);
422 int is_write = page_fault_is_write(error_code);
423 vm_fault_t fault, major = 0;
424 bool must_retry = false;
426 if (notify_page_fault(regs))
429 if (unlikely(page_fault_is_bad(error_code))) {
431 _exception(SIGBUS, regs, BUS_OBJERR, address);
437 /* Additional sanity check(s) */
438 sanity_check_fault(is_write, error_code);
441 * The kernel should never take an execute fault nor should it
442 * take a page fault to a kernel address.
444 if (unlikely(!is_user && bad_kernel_fault(is_exec, error_code, address)))
448 * If we're in an interrupt, have no user context or are running
449 * in a region with pagefaults disabled then we must not take the fault
451 if (unlikely(faulthandler_disabled() || !mm)) {
453 printk_ratelimited(KERN_ERR "Page fault in user mode"
454 " with faulthandler_disabled()=%d"
456 faulthandler_disabled(), mm);
457 return bad_area_nosemaphore(regs, address);
460 /* We restore the interrupt state now */
461 if (!arch_irq_disabled_regs(regs))
464 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
466 if (error_code & DSISR_KEYFAULT)
467 return bad_key_fault_exception(regs, address,
468 get_mm_addr_key(mm, address));
471 * We want to do this outside mmap_sem, because reading code around nip
472 * can result in fault, which will cause a deadlock when called with
476 flags |= FAULT_FLAG_USER;
478 flags |= FAULT_FLAG_WRITE;
480 flags |= FAULT_FLAG_INSTRUCTION;
482 /* When running in the kernel we expect faults to occur only to
483 * addresses in user space. All other faults represent errors in the
484 * kernel and should generate an OOPS. Unfortunately, in the case of an
485 * erroneous fault occurring in a code path which already holds mmap_sem
486 * we will deadlock attempting to validate the fault against the
487 * address space. Luckily the kernel only validly references user
488 * space from well defined areas of code, which are listed in the
491 * As the vast majority of faults will be valid we will only perform
492 * the source reference check when there is a possibility of a deadlock.
493 * Attempt to lock the address space, if we cannot we then validate the
494 * source. If this is invalid we can skip the address space check,
495 * thus avoiding the deadlock.
497 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
498 if (!is_user && !search_exception_tables(regs->nip))
499 return bad_area_nosemaphore(regs, address);
502 down_read(&mm->mmap_sem);
505 * The above down_read_trylock() might have succeeded in
506 * which case we'll have missed the might_sleep() from
512 vma = find_vma(mm, address);
514 return bad_area(regs, address);
515 if (likely(vma->vm_start <= address))
517 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
518 return bad_area(regs, address);
520 /* The stack is being expanded, check if it's valid */
521 if (unlikely(bad_stack_expansion(regs, address, vma, flags,
524 return bad_area(regs, address);
526 up_read(&mm->mmap_sem);
527 if (fault_in_pages_readable((const char __user *)regs->nip,
528 sizeof(unsigned int)))
529 return bad_area_nosemaphore(regs, address);
533 /* Try to expand it */
534 if (unlikely(expand_stack(vma, address)))
535 return bad_area(regs, address);
538 if (unlikely(access_error(is_write, is_exec, vma)))
539 return bad_access(regs, address);
542 * If for any reason at all we couldn't handle the fault,
543 * make sure we exit gracefully rather than endlessly redo
546 fault = handle_mm_fault(vma, address, flags);
548 #ifdef CONFIG_PPC_MEM_KEYS
550 * we skipped checking for access error due to key earlier.
551 * Check that using handle_mm_fault error return.
553 if (unlikely(fault & VM_FAULT_SIGSEGV) &&
554 !arch_vma_access_permitted(vma, is_write, is_exec, 0)) {
556 int pkey = vma_pkey(vma);
558 up_read(&mm->mmap_sem);
559 return bad_key_fault_exception(regs, address, pkey);
561 #endif /* CONFIG_PPC_MEM_KEYS */
563 major |= fault & VM_FAULT_MAJOR;
566 * Handle the retry right now, the mmap_sem has been released in that
569 if (unlikely(fault & VM_FAULT_RETRY)) {
570 /* We retry only once */
571 if (flags & FAULT_FLAG_ALLOW_RETRY) {
573 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
576 flags &= ~FAULT_FLAG_ALLOW_RETRY;
577 flags |= FAULT_FLAG_TRIED;
578 if (!fatal_signal_pending(current))
583 * User mode? Just return to handle the fatal exception otherwise
584 * return to bad_page_fault
586 return is_user ? 0 : SIGBUS;
589 up_read(¤t->mm->mmap_sem);
591 if (unlikely(fault & VM_FAULT_ERROR))
592 return mm_fault_error(regs, address, fault);
595 * Major/minor page fault accounting.
599 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
600 cmo_account_page_fault();
603 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
607 NOKPROBE_SYMBOL(__do_page_fault);
609 int do_page_fault(struct pt_regs *regs, unsigned long address,
610 unsigned long error_code)
612 enum ctx_state prev_state = exception_enter();
613 int rc = __do_page_fault(regs, address, error_code);
614 exception_exit(prev_state);
617 NOKPROBE_SYMBOL(do_page_fault);
620 * bad_page_fault is called when we have a bad access from the kernel.
621 * It is called from the DSI and ISI handlers in head.S and from some
622 * of the procedures in traps.c.
624 void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
626 const struct exception_table_entry *entry;
628 /* Are we prepared to handle this fault? */
629 if ((entry = search_exception_tables(regs->nip)) != NULL) {
630 regs->nip = extable_fixup(entry);
634 /* kernel has accessed a bad area */
636 switch (TRAP(regs)) {
639 pr_alert("BUG: %s at 0x%08lx\n",
640 regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" :
641 "Unable to handle kernel data access", regs->dar);
645 pr_alert("BUG: Unable to handle kernel instruction fetch%s",
646 regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
649 pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n",
653 pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n",
657 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
660 if (task_stack_end_corrupted(current))
661 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
663 die("Kernel access of bad area", regs, sig);