GNU Linux-libre 4.19.245-gnu1
[releases.git] / arch / sh / mm / fault.c
1 /*
2  * Page fault handler for SH with an MMU.
3  *
4  *  Copyright (C) 1999  Niibe Yutaka
5  *  Copyright (C) 2003 - 2012  Paul Mundt
6  *
7  *  Based on linux/arch/i386/mm/fault.c:
8  *   Copyright (C) 1995  Linus Torvalds
9  *
10  * This file is subject to the terms and conditions of the GNU General Public
11  * License.  See the file "COPYING" in the main directory of this archive
12  * for more details.
13  */
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hardirq.h>
18 #include <linux/kprobes.h>
19 #include <linux/perf_event.h>
20 #include <linux/kdebug.h>
21 #include <linux/uaccess.h>
22 #include <asm/io_trapped.h>
23 #include <asm/mmu_context.h>
24 #include <asm/tlbflush.h>
25 #include <asm/traps.h>
26
27 static inline int notify_page_fault(struct pt_regs *regs, int trap)
28 {
29         int ret = 0;
30
31         if (kprobes_built_in() && !user_mode(regs)) {
32                 preempt_disable();
33                 if (kprobe_running() && kprobe_fault_handler(regs, trap))
34                         ret = 1;
35                 preempt_enable();
36         }
37
38         return ret;
39 }
40
41 static void
42 force_sig_info_fault(int si_signo, int si_code, unsigned long address,
43                      struct task_struct *tsk)
44 {
45         force_sig_fault(si_signo, si_code, (void __user *)address, tsk);
46 }
47
48 /*
49  * This is useful to dump out the page tables associated with
50  * 'addr' in mm 'mm'.
51  */
52 static void show_pte(struct mm_struct *mm, unsigned long addr)
53 {
54         pgd_t *pgd;
55
56         if (mm) {
57                 pgd = mm->pgd;
58         } else {
59                 pgd = get_TTB();
60
61                 if (unlikely(!pgd))
62                         pgd = swapper_pg_dir;
63         }
64
65         printk(KERN_ALERT "pgd = %p\n", pgd);
66         pgd += pgd_index(addr);
67         printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
68                (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
69
70         do {
71                 pud_t *pud;
72                 pmd_t *pmd;
73                 pte_t *pte;
74
75                 if (pgd_none(*pgd))
76                         break;
77
78                 if (pgd_bad(*pgd)) {
79                         printk("(bad)");
80                         break;
81                 }
82
83                 pud = pud_offset(pgd, addr);
84                 if (PTRS_PER_PUD != 1)
85                         printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
86                                (u64)pud_val(*pud));
87
88                 if (pud_none(*pud))
89                         break;
90
91                 if (pud_bad(*pud)) {
92                         printk("(bad)");
93                         break;
94                 }
95
96                 pmd = pmd_offset(pud, addr);
97                 if (PTRS_PER_PMD != 1)
98                         printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
99                                (u64)pmd_val(*pmd));
100
101                 if (pmd_none(*pmd))
102                         break;
103
104                 if (pmd_bad(*pmd)) {
105                         printk("(bad)");
106                         break;
107                 }
108
109                 /* We must not map this if we have highmem enabled */
110                 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
111                         break;
112
113                 pte = pte_offset_kernel(pmd, addr);
114                 printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
115                        (u64)pte_val(*pte));
116         } while (0);
117
118         printk("\n");
119 }
120
121 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
122 {
123         unsigned index = pgd_index(address);
124         pgd_t *pgd_k;
125         pud_t *pud, *pud_k;
126         pmd_t *pmd, *pmd_k;
127
128         pgd += index;
129         pgd_k = init_mm.pgd + index;
130
131         if (!pgd_present(*pgd_k))
132                 return NULL;
133
134         pud = pud_offset(pgd, address);
135         pud_k = pud_offset(pgd_k, address);
136         if (!pud_present(*pud_k))
137                 return NULL;
138
139         if (!pud_present(*pud))
140             set_pud(pud, *pud_k);
141
142         pmd = pmd_offset(pud, address);
143         pmd_k = pmd_offset(pud_k, address);
144         if (!pmd_present(*pmd_k))
145                 return NULL;
146
147         if (!pmd_present(*pmd))
148                 set_pmd(pmd, *pmd_k);
149         else {
150                 /*
151                  * The page tables are fully synchronised so there must
152                  * be another reason for the fault. Return NULL here to
153                  * signal that we have not taken care of the fault.
154                  */
155                 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
156                 return NULL;
157         }
158
159         return pmd_k;
160 }
161
162 #ifdef CONFIG_SH_STORE_QUEUES
163 #define __FAULT_ADDR_LIMIT      P3_ADDR_MAX
164 #else
165 #define __FAULT_ADDR_LIMIT      VMALLOC_END
166 #endif
167
168 /*
169  * Handle a fault on the vmalloc or module mapping area
170  */
171 static noinline int vmalloc_fault(unsigned long address)
172 {
173         pgd_t *pgd_k;
174         pmd_t *pmd_k;
175         pte_t *pte_k;
176
177         /* Make sure we are in vmalloc/module/P3 area: */
178         if (!(address >= VMALLOC_START && address < __FAULT_ADDR_LIMIT))
179                 return -1;
180
181         /*
182          * Synchronize this task's top level page-table
183          * with the 'reference' page table.
184          *
185          * Do _not_ use "current" here. We might be inside
186          * an interrupt in the middle of a task switch..
187          */
188         pgd_k = get_TTB();
189         pmd_k = vmalloc_sync_one(pgd_k, address);
190         if (!pmd_k)
191                 return -1;
192
193         pte_k = pte_offset_kernel(pmd_k, address);
194         if (!pte_present(*pte_k))
195                 return -1;
196
197         return 0;
198 }
199
200 static void
201 show_fault_oops(struct pt_regs *regs, unsigned long address)
202 {
203         if (!oops_may_print())
204                 return;
205
206         printk(KERN_ALERT "BUG: unable to handle kernel ");
207         if (address < PAGE_SIZE)
208                 printk(KERN_CONT "NULL pointer dereference");
209         else
210                 printk(KERN_CONT "paging request");
211
212         printk(KERN_CONT " at %08lx\n", address);
213         printk(KERN_ALERT "PC:");
214         printk_address(regs->pc, 1);
215
216         show_pte(NULL, address);
217 }
218
219 static noinline void
220 no_context(struct pt_regs *regs, unsigned long error_code,
221            unsigned long address)
222 {
223         /* Are we prepared to handle this kernel fault?  */
224         if (fixup_exception(regs))
225                 return;
226
227         if (handle_trapped_io(regs, address))
228                 return;
229
230         /*
231          * Oops. The kernel tried to access some bad page. We'll have to
232          * terminate things with extreme prejudice.
233          */
234         bust_spinlocks(1);
235
236         show_fault_oops(regs, address);
237
238         die("Oops", regs, error_code);
239         bust_spinlocks(0);
240         do_exit(SIGKILL);
241 }
242
243 static void
244 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
245                        unsigned long address, int si_code)
246 {
247         struct task_struct *tsk = current;
248
249         /* User mode accesses just cause a SIGSEGV */
250         if (user_mode(regs)) {
251                 /*
252                  * It's possible to have interrupts off here:
253                  */
254                 local_irq_enable();
255
256                 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
257
258                 return;
259         }
260
261         no_context(regs, error_code, address);
262 }
263
264 static noinline void
265 bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
266                      unsigned long address)
267 {
268         __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
269 }
270
271 static void
272 __bad_area(struct pt_regs *regs, unsigned long error_code,
273            unsigned long address, int si_code)
274 {
275         struct mm_struct *mm = current->mm;
276
277         /*
278          * Something tried to access memory that isn't in our memory map..
279          * Fix it, but check if it's kernel or user first..
280          */
281         up_read(&mm->mmap_sem);
282
283         __bad_area_nosemaphore(regs, error_code, address, si_code);
284 }
285
286 static noinline void
287 bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
288 {
289         __bad_area(regs, error_code, address, SEGV_MAPERR);
290 }
291
292 static noinline void
293 bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
294                       unsigned long address)
295 {
296         __bad_area(regs, error_code, address, SEGV_ACCERR);
297 }
298
299 static void
300 do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
301 {
302         struct task_struct *tsk = current;
303         struct mm_struct *mm = tsk->mm;
304
305         up_read(&mm->mmap_sem);
306
307         /* Kernel mode? Handle exceptions or die: */
308         if (!user_mode(regs))
309                 no_context(regs, error_code, address);
310
311         force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
312 }
313
314 static noinline int
315 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
316                unsigned long address, vm_fault_t fault)
317 {
318         /*
319          * Pagefault was interrupted by SIGKILL. We have no reason to
320          * continue pagefault.
321          */
322         if (fatal_signal_pending(current)) {
323                 if (!(fault & VM_FAULT_RETRY))
324                         up_read(&current->mm->mmap_sem);
325                 if (!user_mode(regs))
326                         no_context(regs, error_code, address);
327                 return 1;
328         }
329
330         if (!(fault & VM_FAULT_ERROR))
331                 return 0;
332
333         if (fault & VM_FAULT_OOM) {
334                 /* Kernel mode? Handle exceptions or die: */
335                 if (!user_mode(regs)) {
336                         up_read(&current->mm->mmap_sem);
337                         no_context(regs, error_code, address);
338                         return 1;
339                 }
340                 up_read(&current->mm->mmap_sem);
341
342                 /*
343                  * We ran out of memory, call the OOM killer, and return the
344                  * userspace (which will retry the fault, or kill us if we got
345                  * oom-killed):
346                  */
347                 pagefault_out_of_memory();
348         } else {
349                 if (fault & VM_FAULT_SIGBUS)
350                         do_sigbus(regs, error_code, address);
351                 else if (fault & VM_FAULT_SIGSEGV)
352                         bad_area(regs, error_code, address);
353                 else
354                         BUG();
355         }
356
357         return 1;
358 }
359
360 static inline int access_error(int error_code, struct vm_area_struct *vma)
361 {
362         if (error_code & FAULT_CODE_WRITE) {
363                 /* write, present and write, not present: */
364                 if (unlikely(!(vma->vm_flags & VM_WRITE)))
365                         return 1;
366                 return 0;
367         }
368
369         /* ITLB miss on NX page */
370         if (unlikely((error_code & FAULT_CODE_ITLB) &&
371                      !(vma->vm_flags & VM_EXEC)))
372                 return 1;
373
374         /* read, not present: */
375         if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
376                 return 1;
377
378         return 0;
379 }
380
381 static int fault_in_kernel_space(unsigned long address)
382 {
383         return address >= TASK_SIZE;
384 }
385
386 /*
387  * This routine handles page faults.  It determines the address,
388  * and the problem, and then passes it off to one of the appropriate
389  * routines.
390  */
391 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
392                                         unsigned long error_code,
393                                         unsigned long address)
394 {
395         unsigned long vec;
396         struct task_struct *tsk;
397         struct mm_struct *mm;
398         struct vm_area_struct * vma;
399         vm_fault_t fault;
400         unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
401
402         tsk = current;
403         mm = tsk->mm;
404         vec = lookup_exception_vector();
405
406         /*
407          * We fault-in kernel-space virtual memory on-demand. The
408          * 'reference' page table is init_mm.pgd.
409          *
410          * NOTE! We MUST NOT take any locks for this case. We may
411          * be in an interrupt or a critical region, and should
412          * only copy the information from the master page table,
413          * nothing more.
414          */
415         if (unlikely(fault_in_kernel_space(address))) {
416                 if (vmalloc_fault(address) >= 0)
417                         return;
418                 if (notify_page_fault(regs, vec))
419                         return;
420
421                 bad_area_nosemaphore(regs, error_code, address);
422                 return;
423         }
424
425         if (unlikely(notify_page_fault(regs, vec)))
426                 return;
427
428         /* Only enable interrupts if they were on before the fault */
429         if ((regs->sr & SR_IMASK) != SR_IMASK)
430                 local_irq_enable();
431
432         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
433
434         /*
435          * If we're in an interrupt, have no user context or are running
436          * with pagefaults disabled then we must not take the fault:
437          */
438         if (unlikely(faulthandler_disabled() || !mm)) {
439                 bad_area_nosemaphore(regs, error_code, address);
440                 return;
441         }
442
443 retry:
444         down_read(&mm->mmap_sem);
445
446         vma = find_vma(mm, address);
447         if (unlikely(!vma)) {
448                 bad_area(regs, error_code, address);
449                 return;
450         }
451         if (likely(vma->vm_start <= address))
452                 goto good_area;
453         if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
454                 bad_area(regs, error_code, address);
455                 return;
456         }
457         if (unlikely(expand_stack(vma, address))) {
458                 bad_area(regs, error_code, address);
459                 return;
460         }
461
462         /*
463          * Ok, we have a good vm_area for this memory access, so
464          * we can handle it..
465          */
466 good_area:
467         if (unlikely(access_error(error_code, vma))) {
468                 bad_area_access_error(regs, error_code, address);
469                 return;
470         }
471
472         set_thread_fault_code(error_code);
473
474         if (user_mode(regs))
475                 flags |= FAULT_FLAG_USER;
476         if (error_code & FAULT_CODE_WRITE)
477                 flags |= FAULT_FLAG_WRITE;
478
479         /*
480          * If for any reason at all we couldn't handle the fault,
481          * make sure we exit gracefully rather than endlessly redo
482          * the fault.
483          */
484         fault = handle_mm_fault(vma, address, flags);
485
486         if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
487                 if (mm_fault_error(regs, error_code, address, fault))
488                         return;
489
490         if (flags & FAULT_FLAG_ALLOW_RETRY) {
491                 if (fault & VM_FAULT_MAJOR) {
492                         tsk->maj_flt++;
493                         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
494                                       regs, address);
495                 } else {
496                         tsk->min_flt++;
497                         perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
498                                       regs, address);
499                 }
500                 if (fault & VM_FAULT_RETRY) {
501                         flags &= ~FAULT_FLAG_ALLOW_RETRY;
502                         flags |= FAULT_FLAG_TRIED;
503
504                         /*
505                          * No need to up_read(&mm->mmap_sem) as we would
506                          * have already released it in __lock_page_or_retry
507                          * in mm/filemap.c.
508                          */
509                         goto retry;
510                 }
511         }
512
513         up_read(&mm->mmap_sem);
514 }