GNU Linux-libre 6.7.9-gnu
[releases.git] / arch / riscv / mm / tlbflush.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/sched.h>
6 #include <linux/hugetlb.h>
7 #include <asm/sbi.h>
8 #include <asm/mmu_context.h>
9
10 static inline void local_flush_tlb_all_asid(unsigned long asid)
11 {
12         if (asid != FLUSH_TLB_NO_ASID)
13                 __asm__ __volatile__ ("sfence.vma x0, %0"
14                                 :
15                                 : "r" (asid)
16                                 : "memory");
17         else
18                 local_flush_tlb_all();
19 }
20
21 static inline void local_flush_tlb_page_asid(unsigned long addr,
22                 unsigned long asid)
23 {
24         if (asid != FLUSH_TLB_NO_ASID)
25                 __asm__ __volatile__ ("sfence.vma %0, %1"
26                                 :
27                                 : "r" (addr), "r" (asid)
28                                 : "memory");
29         else
30                 local_flush_tlb_page(addr);
31 }
32
33 /*
34  * Flush entire TLB if number of entries to be flushed is greater
35  * than the threshold below.
36  */
37 static unsigned long tlb_flush_all_threshold __read_mostly = 64;
38
39 static void local_flush_tlb_range_threshold_asid(unsigned long start,
40                                                  unsigned long size,
41                                                  unsigned long stride,
42                                                  unsigned long asid)
43 {
44         unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
45         int i;
46
47         if (nr_ptes_in_range > tlb_flush_all_threshold) {
48                 local_flush_tlb_all_asid(asid);
49                 return;
50         }
51
52         for (i = 0; i < nr_ptes_in_range; ++i) {
53                 local_flush_tlb_page_asid(start, asid);
54                 start += stride;
55         }
56 }
57
58 static inline void local_flush_tlb_range_asid(unsigned long start,
59                 unsigned long size, unsigned long stride, unsigned long asid)
60 {
61         if (size <= stride)
62                 local_flush_tlb_page_asid(start, asid);
63         else if (size == FLUSH_TLB_MAX_SIZE)
64                 local_flush_tlb_all_asid(asid);
65         else
66                 local_flush_tlb_range_threshold_asid(start, size, stride, asid);
67 }
68
69 /* Flush a range of kernel pages without broadcasting */
70 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
71 {
72         local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
73 }
74
75 static void __ipi_flush_tlb_all(void *info)
76 {
77         local_flush_tlb_all();
78 }
79
80 void flush_tlb_all(void)
81 {
82         if (riscv_use_ipi_for_rfence())
83                 on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
84         else
85                 sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
86 }
87
88 struct flush_tlb_range_data {
89         unsigned long asid;
90         unsigned long start;
91         unsigned long size;
92         unsigned long stride;
93 };
94
95 static void __ipi_flush_tlb_range_asid(void *info)
96 {
97         struct flush_tlb_range_data *d = info;
98
99         local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
100 }
101
102 static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
103                               unsigned long size, unsigned long stride)
104 {
105         struct flush_tlb_range_data ftd;
106         const struct cpumask *cmask;
107         unsigned long asid = FLUSH_TLB_NO_ASID;
108         bool broadcast;
109
110         if (mm) {
111                 unsigned int cpuid;
112
113                 cmask = mm_cpumask(mm);
114                 if (cpumask_empty(cmask))
115                         return;
116
117                 cpuid = get_cpu();
118                 /* check if the tlbflush needs to be sent to other CPUs */
119                 broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
120
121                 if (static_branch_unlikely(&use_asid_allocator))
122                         asid = atomic_long_read(&mm->context.id) & asid_mask;
123         } else {
124                 cmask = cpu_online_mask;
125                 broadcast = true;
126         }
127
128         if (broadcast) {
129                 if (riscv_use_ipi_for_rfence()) {
130                         ftd.asid = asid;
131                         ftd.start = start;
132                         ftd.size = size;
133                         ftd.stride = stride;
134                         on_each_cpu_mask(cmask,
135                                          __ipi_flush_tlb_range_asid,
136                                          &ftd, 1);
137                 } else
138                         sbi_remote_sfence_vma_asid(cmask,
139                                                    start, size, asid);
140         } else {
141                 local_flush_tlb_range_asid(start, size, stride, asid);
142         }
143
144         if (mm)
145                 put_cpu();
146 }
147
148 void flush_tlb_mm(struct mm_struct *mm)
149 {
150         __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
151 }
152
153 void flush_tlb_mm_range(struct mm_struct *mm,
154                         unsigned long start, unsigned long end,
155                         unsigned int page_size)
156 {
157         __flush_tlb_range(mm, start, end - start, page_size);
158 }
159
160 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
161 {
162         __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
163 }
164
165 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
166                      unsigned long end)
167 {
168         unsigned long stride_size;
169
170         if (!is_vm_hugetlb_page(vma)) {
171                 stride_size = PAGE_SIZE;
172         } else {
173                 stride_size = huge_page_size(hstate_vma(vma));
174
175                 /*
176                  * As stated in the privileged specification, every PTE in a
177                  * NAPOT region must be invalidated, so reset the stride in that
178                  * case.
179                  */
180                 if (has_svnapot()) {
181                         if (stride_size >= PGDIR_SIZE)
182                                 stride_size = PGDIR_SIZE;
183                         else if (stride_size >= P4D_SIZE)
184                                 stride_size = P4D_SIZE;
185                         else if (stride_size >= PUD_SIZE)
186                                 stride_size = PUD_SIZE;
187                         else if (stride_size >= PMD_SIZE)
188                                 stride_size = PMD_SIZE;
189                         else
190                                 stride_size = PAGE_SIZE;
191                 }
192         }
193
194         __flush_tlb_range(vma->vm_mm, start, end - start, stride_size);
195 }
196
197 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
198 {
199         __flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
200 }
201
202 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
203 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
204                         unsigned long end)
205 {
206         __flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
207 }
208 #endif