arm64: dts: qcom: sm8550: add TRNG node
[linux-modified.git] / arch / powerpc / mm / copro_fault.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * CoProcessor (SPU/AFU) mm fault handler
4  *
5  * (C) Copyright IBM Deutschland Entwicklung GmbH 2007
6  *
7  * Author: Arnd Bergmann <arndb@de.ibm.com>
8  * Author: Jeremy Kerr <jk@ozlabs.org>
9  */
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/export.h>
13 #include <asm/reg.h>
14 #include <asm/copro.h>
15 #include <asm/spu.h>
16 #include <misc/cxl-base.h>
17
18 /*
19  * This ought to be kept in sync with the powerpc specific do_page_fault
20  * function. Currently, there are a few corner cases that we haven't had
21  * to handle fortunately.
22  */
23 int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
24                 unsigned long dsisr, vm_fault_t *flt)
25 {
26         struct vm_area_struct *vma;
27         unsigned long is_write;
28         int ret;
29
30         if (mm == NULL)
31                 return -EFAULT;
32
33         if (mm->pgd == NULL)
34                 return -EFAULT;
35
36         vma = lock_mm_and_find_vma(mm, ea, NULL);
37         if (!vma)
38                 return -EFAULT;
39
40         ret = -EFAULT;
41         is_write = dsisr & DSISR_ISSTORE;
42         if (is_write) {
43                 if (!(vma->vm_flags & VM_WRITE))
44                         goto out_unlock;
45         } else {
46                 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
47                         goto out_unlock;
48                 /*
49                  * PROT_NONE is covered by the VMA check above.
50                  * and hash should get a NOHPTE fault instead of
51                  * a PROTFAULT in case fixup is needed for things
52                  * like autonuma.
53                  */
54                 if (!radix_enabled())
55                         WARN_ON_ONCE(dsisr & DSISR_PROTFAULT);
56         }
57
58         ret = 0;
59         *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL);
60
61         /* The fault is fully completed (including releasing mmap lock) */
62         if (*flt & VM_FAULT_COMPLETED)
63                 return 0;
64
65         if (unlikely(*flt & VM_FAULT_ERROR)) {
66                 if (*flt & VM_FAULT_OOM) {
67                         ret = -ENOMEM;
68                         goto out_unlock;
69                 } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
70                         ret = -EFAULT;
71                         goto out_unlock;
72                 }
73                 BUG();
74         }
75
76 out_unlock:
77         mmap_read_unlock(mm);
78         return ret;
79 }
80 EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
81
82 #ifdef CONFIG_PPC_64S_HASH_MMU
83 int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
84 {
85         u64 vsid, vsidkey;
86         int psize, ssize;
87
88         switch (get_region_id(ea)) {
89         case USER_REGION_ID:
90                 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
91                 if (mm == NULL)
92                         return 1;
93                 psize = get_slice_psize(mm, ea);
94                 ssize = user_segment_size(ea);
95                 vsid = get_user_vsid(&mm->context, ea, ssize);
96                 vsidkey = SLB_VSID_USER;
97                 break;
98         case VMALLOC_REGION_ID:
99                 pr_devel("%s: 0x%llx -- VMALLOC_REGION_ID\n", __func__, ea);
100                 psize = mmu_vmalloc_psize;
101                 ssize = mmu_kernel_ssize;
102                 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
103                 vsidkey = SLB_VSID_KERNEL;
104                 break;
105         case IO_REGION_ID:
106                 pr_devel("%s: 0x%llx -- IO_REGION_ID\n", __func__, ea);
107                 psize = mmu_io_psize;
108                 ssize = mmu_kernel_ssize;
109                 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
110                 vsidkey = SLB_VSID_KERNEL;
111                 break;
112         case LINEAR_MAP_REGION_ID:
113                 pr_devel("%s: 0x%llx -- LINEAR_MAP_REGION_ID\n", __func__, ea);
114                 psize = mmu_linear_psize;
115                 ssize = mmu_kernel_ssize;
116                 vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
117                 vsidkey = SLB_VSID_KERNEL;
118                 break;
119         default:
120                 pr_debug("%s: invalid region access at %016llx\n", __func__, ea);
121                 return 1;
122         }
123         /* Bad address */
124         if (!vsid)
125                 return 1;
126
127         vsid = (vsid << slb_vsid_shift(ssize)) | vsidkey;
128
129         vsid |= mmu_psize_defs[psize].sllp |
130                 ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
131
132         slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
133         slb->vsid = vsid;
134
135         return 0;
136 }
137 EXPORT_SYMBOL_GPL(copro_calculate_slb);
138
139 void copro_flush_all_slbs(struct mm_struct *mm)
140 {
141 #ifdef CONFIG_SPU_BASE
142         spu_flush_all_slbs(mm);
143 #endif
144         cxl_slbia(mm);
145 }
146 EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
147 #endif