1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Lennox Wu <lennox.wu@sunplusct.com>
5 * Chen Liqin <liqin.chen@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/perf_event.h>
14 #include <linux/signal.h>
15 #include <linux/uaccess.h>
17 #include <asm/ptrace.h>
18 #include <asm/tlbflush.h>
20 #include "../kernel/head.h"
22 static inline void no_context(struct pt_regs *regs, unsigned long addr)
24 /* Are we prepared to handle this kernel fault? */
25 if (fixup_exception(regs))
29 * Oops. The kernel tried to access some bad page. We'll have to
30 * terminate things with extreme prejudice.
33 pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
34 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
35 "paging request", addr);
37 make_task_dead(SIGKILL);
40 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
42 if (fault & VM_FAULT_OOM) {
44 * We ran out of memory, call the OOM killer, and return the userspace
45 * (which will retry the fault, or kill us if we got oom-killed).
47 if (!user_mode(regs)) {
48 no_context(regs, addr);
51 pagefault_out_of_memory();
53 } else if (fault & VM_FAULT_SIGBUS) {
54 /* Kernel mode? Handle exceptions or die */
55 if (!user_mode(regs)) {
56 no_context(regs, addr);
59 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
65 static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
68 * Something tried to access memory that isn't in our memory map.
69 * Fix it, but check if it's kernel or user first.
72 /* User mode accesses just cause a SIGSEGV */
73 if (user_mode(regs)) {
74 do_trap(regs, SIGSEGV, code, addr);
78 no_context(regs, addr);
81 static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
91 /* User mode accesses just cause a SIGSEGV */
93 return do_trap(regs, SIGSEGV, code, addr);
96 * Synchronize this task's top level page-table
97 * with the 'reference' page table.
99 * Do _not_ use "tsk->active_mm->pgd" here.
100 * We might be inside an interrupt in the middle
103 index = pgd_index(addr);
104 pfn = csr_read(CSR_SATP) & SATP_PPN;
105 pgd = (pgd_t *)pfn_to_virt(pfn) + index;
106 pgd_k = init_mm.pgd + index;
108 if (!pgd_present(*pgd_k)) {
109 no_context(regs, addr);
112 set_pgd(pgd, *pgd_k);
114 p4d = p4d_offset(pgd, addr);
115 p4d_k = p4d_offset(pgd_k, addr);
116 if (!p4d_present(*p4d_k)) {
117 no_context(regs, addr);
121 pud = pud_offset(p4d, addr);
122 pud_k = pud_offset(p4d_k, addr);
123 if (!pud_present(*pud_k)) {
124 no_context(regs, addr);
129 * Since the vmalloc area is global, it is unnecessary
130 * to copy individual PTEs
132 pmd = pmd_offset(pud, addr);
133 pmd_k = pmd_offset(pud_k, addr);
134 if (!pmd_present(*pmd_k)) {
135 no_context(regs, addr);
138 set_pmd(pmd, *pmd_k);
141 * Make sure the actual PTE exists as well to
142 * catch kernel vmalloc-area accesses to non-mapped
143 * addresses. If we don't do this, this will just
144 * silently loop forever.
146 pte_k = pte_offset_kernel(pmd_k, addr);
147 if (!pte_present(*pte_k)) {
148 no_context(regs, addr);
153 * The kernel assumes that TLBs don't cache invalid
154 * entries, but in RISC-V, SFENCE.VMA specifies an
155 * ordering constraint, not a cache flush; it is
156 * necessary even after writing invalid entries.
158 local_flush_tlb_page(addr);
161 static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
164 case EXC_INST_PAGE_FAULT:
165 if (!(vma->vm_flags & VM_EXEC)) {
169 case EXC_LOAD_PAGE_FAULT:
170 /* Write implies read */
171 if (!(vma->vm_flags & (VM_READ | VM_WRITE))) {
175 case EXC_STORE_PAGE_FAULT:
176 if (!(vma->vm_flags & VM_WRITE)) {
181 panic("%s: unhandled cause %lu", __func__, cause);
187 * This routine handles page faults. It determines the address and the
188 * problem, and then passes it off to one of the appropriate routines.
190 asmlinkage void do_page_fault(struct pt_regs *regs)
192 struct task_struct *tsk;
193 struct vm_area_struct *vma;
194 struct mm_struct *mm;
195 unsigned long addr, cause;
196 unsigned int flags = FAULT_FLAG_DEFAULT;
197 int code = SEGV_MAPERR;
201 addr = regs->badaddr;
207 * Fault-in kernel-space virtual memory on-demand.
208 * The 'reference' page table is init_mm.pgd.
210 * NOTE! We MUST NOT take any locks for this case. We may
211 * be in an interrupt or a critical region, and should
212 * only copy the information from the master page table,
215 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
216 vmalloc_fault(regs, code, addr);
220 /* Enable interrupts if they were enabled in the parent context. */
221 if (likely(regs->status & SR_PIE))
225 * If we're in an interrupt, have no user context, or are running
226 * in an atomic region, then we must not take the fault.
228 if (unlikely(faulthandler_disabled() || !mm)) {
229 no_context(regs, addr);
234 flags |= FAULT_FLAG_USER;
236 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
238 if (cause == EXC_STORE_PAGE_FAULT)
239 flags |= FAULT_FLAG_WRITE;
240 else if (cause == EXC_INST_PAGE_FAULT)
241 flags |= FAULT_FLAG_INSTRUCTION;
244 vma = find_vma(mm, addr);
245 if (unlikely(!vma)) {
246 bad_area(regs, mm, code, addr);
249 if (likely(vma->vm_start <= addr))
251 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
252 bad_area(regs, mm, code, addr);
255 if (unlikely(expand_stack(vma, addr))) {
256 bad_area(regs, mm, code, addr);
261 * Ok, we have a good vm_area for this memory access, so
267 if (unlikely(access_error(cause, vma))) {
268 bad_area(regs, mm, code, addr);
273 * If for any reason at all we could not handle the fault,
274 * make sure we exit gracefully rather than endlessly redo
277 fault = handle_mm_fault(vma, addr, flags, regs);
280 * If we need to retry but a fatal signal is pending, handle the
281 * signal first. We do not need to release the mmap_lock because it
282 * would already be released in __lock_page_or_retry in mm/filemap.c.
284 if (fault_signal_pending(fault, regs))
287 if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
288 flags |= FAULT_FLAG_TRIED;
291 * No need to mmap_read_unlock(mm) as we would
292 * have already released it in __lock_page_or_retry
298 mmap_read_unlock(mm);
300 if (unlikely(fault & VM_FAULT_ERROR)) {
301 mm_fault_error(regs, addr, fault);