1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Derived from "arch/i386/mm/fault.c"
7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
9 * Modified by Cort Dougan and Paul Mackerras.
11 * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/pagemap.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
25 #include <linux/interrupt.h>
26 #include <linux/highmem.h>
27 #include <linux/extable.h>
28 #include <linux/kprobes.h>
29 #include <linux/kdebug.h>
30 #include <linux/perf_event.h>
31 #include <linux/ratelimit.h>
32 #include <linux/context_tracking.h>
33 #include <linux/hugetlb.h>
34 #include <linux/uaccess.h>
35 #include <linux/kfence.h>
36 #include <linux/pkeys.h>
38 #include <asm/firmware.h>
39 #include <asm/interrupt.h>
42 #include <asm/mmu_context.h>
43 #include <asm/siginfo.h>
44 #include <asm/debug.h>
50 * do_page_fault error handling helpers
54 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code)
57 * If we are in kernel mode, bail out with a SEGV, this will
58 * be caught by the assembly which will restore the non-volatile
59 * registers before calling bad_page_fault()
64 _exception(SIGSEGV, regs, si_code, address);
69 static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address)
71 return __bad_area_nosemaphore(regs, address, SEGV_MAPERR);
74 static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
76 struct mm_struct *mm = current->mm;
79 * Something tried to access memory that isn't in our memory map..
80 * Fix it, but check if it's kernel or user first..
84 return __bad_area_nosemaphore(regs, address, si_code);
87 static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
88 struct vm_area_struct *vma)
90 struct mm_struct *mm = current->mm;
94 * We don't try to fetch the pkey from page table because reading
95 * page table without locking doesn't guarantee stable pte value.
96 * Hence the pkey value that we return to userspace can be different
97 * from the pkey that actually caused access error.
99 * It does *not* guarantee that the VMA we find here
100 * was the one that we faulted on.
102 * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
103 * 2. T1 : set AMR to deny access to pkey=4, touches, page
105 * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
106 * 5. T1 : enters fault handler, takes mmap_lock, etc...
107 * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
108 * faulted on a pte with its pkey=4.
110 pkey = vma_pkey(vma);
112 mmap_read_unlock(mm);
115 * If we are in kernel mode, bail out with a SEGV, this will
116 * be caught by the assembly which will restore the non-volatile
117 * registers before calling bad_page_fault()
119 if (!user_mode(regs))
122 _exception_pkey(regs, address, pkey);
127 static noinline int bad_access(struct pt_regs *regs, unsigned long address)
129 return __bad_area(regs, address, SEGV_ACCERR);
132 static int do_sigbus(struct pt_regs *regs, unsigned long address,
135 if (!user_mode(regs))
138 current->thread.trap_nr = BUS_ADRERR;
139 #ifdef CONFIG_MEMORY_FAILURE
140 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
141 unsigned int lsb = 0; /* shutup gcc */
143 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
144 current->comm, current->pid, address);
146 if (fault & VM_FAULT_HWPOISON_LARGE)
147 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
148 if (fault & VM_FAULT_HWPOISON)
151 force_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb);
156 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
160 static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
164 * Kernel page fault interrupted by SIGKILL. We have no reason to
165 * continue processing.
167 if (fatal_signal_pending(current) && !user_mode(regs))
171 if (fault & VM_FAULT_OOM) {
173 * We ran out of memory, or some other thing happened to us that
174 * made us unable to handle the page fault gracefully.
176 if (!user_mode(regs))
178 pagefault_out_of_memory();
180 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
181 VM_FAULT_HWPOISON_LARGE))
182 return do_sigbus(regs, addr, fault);
183 else if (fault & VM_FAULT_SIGSEGV)
184 return bad_area_nosemaphore(regs, addr);
191 /* Is this a bad kernel fault ? */
192 static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
193 unsigned long address, bool is_write)
195 int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
198 pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
199 address >= TASK_SIZE ? "exec-protected" : "user",
201 from_kuid(&init_user_ns, current_uid()));
203 // Kernel exec fault is always bad
207 // Kernel fault on kernel address is bad
208 if (address >= TASK_SIZE)
211 // Read/write fault blocked by KUAP is bad, it can never succeed.
212 if (bad_kuap_fault(regs, address, is_write)) {
213 pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n",
214 is_write ? "write" : "read", address,
215 from_kuid(&init_user_ns, current_uid()));
217 // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad
218 if (!search_exception_tables(regs->nip))
221 // Read/write fault in a valid region (the exception table search passed
222 // above), but blocked by KUAP is bad, it can never succeed.
223 return WARN(true, "Bug: %s fault blocked by KUAP!", is_write ? "Write" : "Read");
226 // What's left? Kernel fault on user and allowed by KUAP in the faulting context.
230 static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey,
231 struct vm_area_struct *vma)
234 * Make sure to check the VMA so that we do not perform
235 * faults just to hit a pkey fault as soon as we fill in a
236 * page. Only called for current mm, hence foreign == 0
238 if (!arch_vma_access_permitted(vma, is_write, is_exec, 0))
244 static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma)
247 * Allow execution from readable areas if the MMU does not
248 * provide separate controls over reading and executing.
250 * Note: That code used to not be enabled for 4xx/BookE.
251 * It is now as I/D cache coherency for these is done at
252 * set_pte_at() time and I see no reason why the test
253 * below wouldn't be valid on those processors. This -may-
254 * break programs compiled with a really old ABI though.
257 return !(vma->vm_flags & VM_EXEC) &&
258 (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
259 !(vma->vm_flags & (VM_READ | VM_WRITE)));
263 if (unlikely(!(vma->vm_flags & VM_WRITE)))
269 * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as
270 * defined in protection_map[]. Read faults can only be caused by
271 * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix.
273 if (unlikely(!vma_is_accessible(vma)))
276 if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)))
280 * We should ideally do the vma pkey access check here. But in the
281 * fault path, handle_mm_fault() also does the same check. To avoid
282 * these multiple checks, we skip it here and handle access error due
288 #ifdef CONFIG_PPC_SMLPAR
289 static inline void cmo_account_page_fault(void)
291 if (firmware_has_feature(FW_FEATURE_CMO)) {
295 page_ins = be32_to_cpu(get_lppaca()->page_ins);
296 page_ins += 1 << PAGE_FACTOR;
297 get_lppaca()->page_ins = cpu_to_be32(page_ins);
302 static inline void cmo_account_page_fault(void) { }
303 #endif /* CONFIG_PPC_SMLPAR */
305 static void sanity_check_fault(bool is_write, bool is_user,
306 unsigned long error_code, unsigned long address)
309 * Userspace trying to access kernel address, we get PROTFAULT for that.
311 if (is_user && address >= TASK_SIZE) {
312 if ((long)address == -1)
315 pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n",
316 current->comm, current->pid, address,
317 from_kuid(&init_user_ns, current_uid()));
321 if (!IS_ENABLED(CONFIG_PPC_BOOK3S))
325 * For hash translation mode, we should never get a
326 * PROTFAULT. Any update to pte to reduce access will result in us
327 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
328 * fault instead of DSISR_PROTFAULT.
330 * A pte update to relax the access will not result in a hash page table
331 * entry invalidate and hence can result in DSISR_PROTFAULT.
332 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
333 * the special !is_write in the below conditional.
335 * For platforms that doesn't supports coherent icache and do support
336 * per page noexec bit, we do setup things such that we do the
337 * sync between D/I cache via fault. But that is handled via low level
338 * hash fault code (hash_page_do_lazy_icache()) and we should not reach
341 * For wrong access that can result in PROTFAULT, the above vma->vm_flags
342 * check should handle those and hence we should fall to the bad_area
343 * handling correctly.
345 * For embedded with per page exec support that doesn't support coherent
346 * icache we do get PROTFAULT and we handle that D/I cache sync in
347 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
348 * is conditional for server MMU.
350 * For radix, we can get prot fault for autonuma case, because radix
351 * page table will have them marked noaccess for user.
353 if (radix_enabled() || is_write)
356 WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
360 * Define the correct "is_write" bit in error_code based
361 * on the processor family
363 #if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
364 #define page_fault_is_write(__err) ((__err) & ESR_DST)
366 #define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE)
369 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
370 #define page_fault_is_bad(__err) (0)
371 #elif defined(CONFIG_PPC_8xx)
372 #define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G)
373 #elif defined(CONFIG_PPC64)
374 static int page_fault_is_bad(unsigned long err)
376 unsigned long flag = DSISR_BAD_FAULT_64S;
379 * PAPR+ v2.11 § 14.15.3.4.1 (unreleased)
380 * If byte 0, bit 3 of pi-attribute-specifier-type in
381 * ibm,pi-features property is defined, ignore the DSI error
382 * which is caused by the paste instruction on the
383 * suspended NX window.
385 if (mmu_has_feature(MMU_FTR_NX_DSI))
386 flag &= ~DSISR_BAD_COPYPASTE;
391 #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S)
395 * For 600- and 800-family processors, the error_code parameter is DSISR
396 * for a data fault, SRR1 for an instruction fault.
397 * For 400-family processors the error_code parameter is ESR for a data fault,
398 * 0 for an instruction fault.
399 * For 64-bit processors, the error_code parameter is DSISR for a data access
400 * fault, SRR1 & 0x08000000 for an instruction access fault.
402 * The return value is 0 if the fault was handled, or the signal
403 * number if this is a kernel fault that can't be handled here.
405 static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
406 unsigned long error_code)
408 struct vm_area_struct * vma;
409 struct mm_struct *mm = current->mm;
410 unsigned int flags = FAULT_FLAG_DEFAULT;
411 int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
412 int is_user = user_mode(regs);
413 int is_write = page_fault_is_write(error_code);
414 vm_fault_t fault, major = 0;
415 bool kprobe_fault = kprobe_page_fault(regs, 11);
417 if (unlikely(debugger_fault_handler(regs) || kprobe_fault))
420 if (unlikely(page_fault_is_bad(error_code))) {
422 _exception(SIGBUS, regs, BUS_OBJERR, address);
428 /* Additional sanity check(s) */
429 sanity_check_fault(is_write, is_user, error_code, address);
432 * The kernel should never take an execute fault nor should it
433 * take a page fault to a kernel address or a page fault to a user
434 * address outside of dedicated places
436 if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) {
437 if (kfence_handle_page_fault(address, is_write, regs))
444 * If we're in an interrupt, have no user context or are running
445 * in a region with pagefaults disabled then we must not take the fault
447 if (unlikely(faulthandler_disabled() || !mm)) {
449 printk_ratelimited(KERN_ERR "Page fault in user mode"
450 " with faulthandler_disabled()=%d"
452 faulthandler_disabled(), mm);
453 return bad_area_nosemaphore(regs, address);
456 interrupt_cond_local_irq_enable(regs);
458 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
461 * We want to do this outside mmap_lock, because reading code around nip
462 * can result in fault, which will cause a deadlock when called with
466 flags |= FAULT_FLAG_USER;
468 flags |= FAULT_FLAG_WRITE;
470 flags |= FAULT_FLAG_INSTRUCTION;
472 /* When running in the kernel we expect faults to occur only to
473 * addresses in user space. All other faults represent errors in the
474 * kernel and should generate an OOPS. Unfortunately, in the case of an
475 * erroneous fault occurring in a code path which already holds mmap_lock
476 * we will deadlock attempting to validate the fault against the
477 * address space. Luckily the kernel only validly references user
478 * space from well defined areas of code, which are listed in the
479 * exceptions table. lock_mm_and_find_vma() handles that logic.
482 vma = lock_mm_and_find_vma(mm, address, regs);
484 return bad_area_nosemaphore(regs, address);
486 if (unlikely(access_pkey_error(is_write, is_exec,
487 (error_code & DSISR_KEYFAULT), vma)))
488 return bad_access_pkey(regs, address, vma);
490 if (unlikely(access_error(is_write, is_exec, vma)))
491 return bad_access(regs, address);
494 * If for any reason at all we couldn't handle the fault,
495 * make sure we exit gracefully rather than endlessly redo
498 fault = handle_mm_fault(vma, address, flags, regs);
500 major |= fault & VM_FAULT_MAJOR;
502 if (fault_signal_pending(fault, regs))
503 return user_mode(regs) ? 0 : SIGBUS;
505 /* The fault is fully completed (including releasing mmap lock) */
506 if (fault & VM_FAULT_COMPLETED)
510 * Handle the retry right now, the mmap_lock has been released in that
513 if (unlikely(fault & VM_FAULT_RETRY)) {
514 flags |= FAULT_FLAG_TRIED;
518 mmap_read_unlock(current->mm);
520 if (unlikely(fault & VM_FAULT_ERROR))
521 return mm_fault_error(regs, address, fault);
525 * Major/minor page fault accounting.
528 cmo_account_page_fault();
532 NOKPROBE_SYMBOL(___do_page_fault);
534 static __always_inline void __do_page_fault(struct pt_regs *regs)
538 err = ___do_page_fault(regs, regs->dar, regs->dsisr);
540 bad_page_fault(regs, err);
543 DEFINE_INTERRUPT_HANDLER(do_page_fault)
545 __do_page_fault(regs);
548 #ifdef CONFIG_PPC_BOOK3S_64
549 /* Same as do_page_fault but interrupt entry has already run in do_hash_fault */
550 void hash__do_page_fault(struct pt_regs *regs)
552 __do_page_fault(regs);
554 NOKPROBE_SYMBOL(hash__do_page_fault);
558 * bad_page_fault is called when we have a bad access from the kernel.
559 * It is called from the DSI and ISI handlers in head.S and from some
560 * of the procedures in traps.c.
562 static void __bad_page_fault(struct pt_regs *regs, int sig)
564 int is_write = page_fault_is_write(regs->dsisr);
567 /* kernel has accessed a bad area */
569 if (regs->dar < PAGE_SIZE)
570 msg = "Kernel NULL pointer dereference";
572 msg = "Unable to handle kernel data access";
574 switch (TRAP(regs)) {
575 case INTERRUPT_DATA_STORAGE:
576 case INTERRUPT_H_DATA_STORAGE:
577 pr_alert("BUG: %s on %s at 0x%08lx\n", msg,
578 is_write ? "write" : "read", regs->dar);
580 case INTERRUPT_DATA_SEGMENT:
581 pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar);
583 case INTERRUPT_INST_STORAGE:
584 case INTERRUPT_INST_SEGMENT:
585 pr_alert("BUG: Unable to handle kernel instruction fetch%s",
586 regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
588 case INTERRUPT_ALIGNMENT:
589 pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n",
593 pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n",
597 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
600 if (task_stack_end_corrupted(current))
601 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
603 die("Kernel access of bad area", regs, sig);
606 void bad_page_fault(struct pt_regs *regs, int sig)
608 const struct exception_table_entry *entry;
610 /* Are we prepared to handle this fault? */
611 entry = search_exception_tables(instruction_pointer(regs));
613 instruction_pointer_set(regs, extable_fixup(entry));
615 __bad_page_fault(regs, sig);
618 #ifdef CONFIG_PPC_BOOK3S_64
619 DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv)
621 bad_page_fault(regs, SIGSEGV);
625 * In radix, segment interrupts indicate the EA is not addressable by the
626 * page table geometry, so they are always sent here.
628 * In hash, this is called if do_slb_fault returns error. Typically it is
629 * because the EA was outside the region allowed by software.
631 DEFINE_INTERRUPT_HANDLER(do_bad_segment_interrupt)
633 int err = regs->result;
635 if (err == -EFAULT) {
637 _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
639 bad_page_fault(regs, SIGSEGV);
640 } else if (err == -EINVAL) {
641 unrecoverable_exception(regs);