1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
4 #include <linux/extable.h>
5 #include <linux/kprobes.h>
6 #include <linux/mmu_context.h>
7 #include <linux/perf_event.h>
9 int fixup_exception(struct pt_regs *regs)
11 const struct exception_table_entry *fixup;
13 fixup = search_exception_tables(instruction_pointer(regs));
15 regs->pc = fixup->fixup;
23 static inline bool is_write(struct pt_regs *regs)
25 switch (trap_no(regs)) {
35 #ifdef CONFIG_CPU_HAS_LDSTEX
36 static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
41 extern unsigned long csky_cmpxchg_ldw;
42 extern unsigned long csky_cmpxchg_stw;
43 static inline void csky_cmpxchg_fixup(struct pt_regs *regs)
45 if (trap_no(regs) != VEC_TLBMODIFIED)
48 if (instruction_pointer(regs) == csky_cmpxchg_stw)
49 instruction_pointer_set(regs, csky_cmpxchg_ldw);
54 static inline void no_context(struct pt_regs *regs, unsigned long addr)
56 current->thread.trap_no = trap_no(regs);
58 /* Are we prepared to handle this kernel fault? */
59 if (fixup_exception(regs))
63 * Oops. The kernel tried to access some bad page. We'll have to
64 * terminate things with extreme prejudice.
67 pr_alert("Unable to handle kernel paging request at virtual "
68 "addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc);
70 make_task_dead(SIGKILL);
73 static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
75 current->thread.trap_no = trap_no(regs);
77 if (fault & VM_FAULT_OOM) {
79 * We ran out of memory, call the OOM killer, and return the userspace
80 * (which will retry the fault, or kill us if we got oom-killed).
82 if (!user_mode(regs)) {
83 no_context(regs, addr);
86 pagefault_out_of_memory();
88 } else if (fault & VM_FAULT_SIGBUS) {
89 /* Kernel mode? Handle exceptions or die */
90 if (!user_mode(regs)) {
91 no_context(regs, addr);
94 do_trap(regs, SIGBUS, BUS_ADRERR, addr);
100 static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
103 * Something tried to access memory that isn't in our memory map.
104 * Fix it, but check if it's kernel or user first.
106 mmap_read_unlock(mm);
107 /* User mode accesses just cause a SIGSEGV */
108 if (user_mode(regs)) {
109 do_trap(regs, SIGSEGV, code, addr);
113 no_context(regs, addr);
116 static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
124 /* User mode accesses just cause a SIGSEGV */
125 if (user_mode(regs)) {
126 do_trap(regs, SIGSEGV, code, addr);
131 * Synchronize this task's top level page-table
132 * with the 'reference' page table.
134 * Do _not_ use "tsk" here. We might be inside
135 * an interrupt in the middle of a task switch..
137 offset = pgd_index(addr);
139 pgd = get_pgd() + offset;
140 pgd_k = init_mm.pgd + offset;
142 if (!pgd_present(*pgd_k)) {
143 no_context(regs, addr);
146 set_pgd(pgd, *pgd_k);
149 pud_k = (pud_t *)pgd_k;
150 if (!pud_present(*pud_k)) {
151 no_context(regs, addr);
155 pmd = pmd_offset(pud, addr);
156 pmd_k = pmd_offset(pud_k, addr);
157 if (!pmd_present(*pmd_k)) {
158 no_context(regs, addr);
161 set_pmd(pmd, *pmd_k);
163 pte_k = pte_offset_kernel(pmd_k, addr);
164 if (!pte_present(*pte_k)) {
165 no_context(regs, addr);
172 static inline bool access_error(struct pt_regs *regs, struct vm_area_struct *vma)
174 if (is_write(regs)) {
175 if (!(vma->vm_flags & VM_WRITE))
178 if (unlikely(!vma_is_accessible(vma)))
185 * This routine handles page faults. It determines the address and the
186 * problem, and then passes it off to one of the appropriate routines.
188 asmlinkage void do_page_fault(struct pt_regs *regs)
190 struct task_struct *tsk;
191 struct vm_area_struct *vma;
192 struct mm_struct *mm;
193 unsigned long addr = read_mmu_entryhi() & PAGE_MASK;
194 unsigned int flags = FAULT_FLAG_DEFAULT;
195 int code = SEGV_MAPERR;
201 csky_cmpxchg_fixup(regs);
203 if (kprobe_page_fault(regs, tsk->thread.trap_no))
207 * Fault-in kernel-space virtual memory on-demand.
208 * The 'reference' page table is init_mm.pgd.
210 * NOTE! We MUST NOT take any locks for this case. We may
211 * be in an interrupt or a critical region, and should
212 * only copy the information from the master page table,
215 if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
216 vmalloc_fault(regs, code, addr);
220 /* Enable interrupts if they were enabled in the parent context. */
221 if (likely(regs->sr & BIT(6)))
225 * If we're in an interrupt, have no user context, or are running
226 * in an atomic region, then we must not take the fault.
228 if (unlikely(faulthandler_disabled() || !mm)) {
229 no_context(regs, addr);
234 flags |= FAULT_FLAG_USER;
236 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
239 flags |= FAULT_FLAG_WRITE;
242 vma = find_vma(mm, addr);
243 if (unlikely(!vma)) {
244 bad_area(regs, mm, code, addr);
247 if (likely(vma->vm_start <= addr))
249 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
250 bad_area(regs, mm, code, addr);
253 if (unlikely(expand_stack(vma, addr))) {
254 bad_area(regs, mm, code, addr);
259 * Ok, we have a good vm_area for this memory access, so
265 if (unlikely(access_error(regs, vma))) {
266 bad_area(regs, mm, code, addr);
271 * If for any reason at all we could not handle the fault,
272 * make sure we exit gracefully rather than endlessly redo
275 fault = handle_mm_fault(vma, addr, flags, regs);
278 * If we need to retry but a fatal signal is pending, handle the
279 * signal first. We do not need to release the mmap_lock because it
280 * would already be released in __lock_page_or_retry in mm/filemap.c.
282 if (fault_signal_pending(fault, regs)) {
283 if (!user_mode(regs))
284 no_context(regs, addr);
288 if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
289 flags |= FAULT_FLAG_TRIED;
292 * No need to mmap_read_unlock(mm) as we would
293 * have already released it in __lock_page_or_retry
299 mmap_read_unlock(mm);
301 if (unlikely(fault & VM_FAULT_ERROR)) {
302 mm_fault_error(regs, addr, fault);