1 // SPDX-License-Identifier: GPL-2.0-only
3 * Memory fault handling for Hexagon
5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
9 * Page fault handling for the Hexagon Virtual Machine.
10 * Can also be called by a native port emulating the HVM
14 #include <asm/pgtable.h>
15 #include <asm/traps.h>
16 #include <linux/uaccess.h>
18 #include <linux/sched/signal.h>
19 #include <linux/signal.h>
20 #include <linux/extable.h>
21 #include <linux/hardirq.h>
24 * Decode of hardware exception sends us to one of several
25 * entry points. At each, we generate canonical arguments
26 * for handling by the abstract memory management code.
34 * Canonical page fault handler
36 void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
38 struct vm_area_struct *vma;
39 struct mm_struct *mm = current->mm;
41 int si_code = SEGV_MAPERR;
43 const struct exception_table_entry *fixup;
44 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
47 * If we're in an interrupt or have no user context,
48 * then must not take the fault.
50 if (unlikely(in_interrupt() || !mm))
56 flags |= FAULT_FLAG_USER;
58 down_read(&mm->mmap_sem);
59 vma = find_vma(mm, address);
63 if (vma->vm_start <= address)
66 if (!(vma->vm_flags & VM_GROWSDOWN))
69 if (expand_stack(vma, address))
73 /* Address space is OK. Now check access rights. */
74 si_code = SEGV_ACCERR;
78 if (!(vma->vm_flags & VM_EXEC))
82 if (!(vma->vm_flags & VM_READ))
86 if (!(vma->vm_flags & VM_WRITE))
88 flags |= FAULT_FLAG_WRITE;
92 fault = handle_mm_fault(vma, address, flags);
94 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
97 /* The most common case -- we are done. */
98 if (likely(!(fault & VM_FAULT_ERROR))) {
99 if (flags & FAULT_FLAG_ALLOW_RETRY) {
100 if (fault & VM_FAULT_MAJOR)
104 if (fault & VM_FAULT_RETRY) {
105 flags &= ~FAULT_FLAG_ALLOW_RETRY;
106 flags |= FAULT_FLAG_TRIED;
111 up_read(&mm->mmap_sem);
115 up_read(&mm->mmap_sem);
117 /* Handle copyin/out exception cases */
118 if (!user_mode(regs))
121 if (fault & VM_FAULT_OOM) {
122 pagefault_out_of_memory();
126 /* User-mode address is in the memory map, but we are
127 * unable to fix up the page fault.
129 if (fault & VM_FAULT_SIGBUS) {
131 si_code = BUS_ADRERR;
133 /* Address is not in the memory map */
136 si_code = SEGV_ACCERR;
138 force_sig_fault(si_signo, si_code, (void __user *)address);
142 up_read(&mm->mmap_sem);
144 if (user_mode(regs)) {
145 force_sig_fault(SIGSEGV, si_code, (void __user *)address);
148 /* Kernel-mode fault falls through */
151 fixup = search_exception_tables(pt_elr(regs));
153 pt_set_elr(regs, fixup->fixup);
157 /* Things are looking very, very bad now */
159 printk(KERN_EMERG "Unable to handle kernel paging request at "
160 "virtual address 0x%08lx, regs %p\n", address, regs);
161 die("Bad Kernel VA", regs, SIGKILL);
165 void read_protection_fault(struct pt_regs *regs)
167 unsigned long badvadr = pt_badva(regs);
169 do_page_fault(badvadr, FLT_LOAD, regs);
172 void write_protection_fault(struct pt_regs *regs)
174 unsigned long badvadr = pt_badva(regs);
176 do_page_fault(badvadr, FLT_STORE, regs);
179 void execute_protection_fault(struct pt_regs *regs)
181 unsigned long badvadr = pt_badva(regs);
183 do_page_fault(badvadr, FLT_IFETCH, regs);