GNU Linux-libre 5.19-rc6-gnu
[releases.git] / arch / parisc / kernel / cache.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7  * Copyright (C) 1999 SuSE GmbH Nuernberg
8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9  *
10  * Cache and TLB management
11  *
12  */
13  
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <asm/pdc.h>
23 #include <asm/cache.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/page.h>
27 #include <asm/processor.h>
28 #include <asm/sections.h>
29 #include <asm/shmparam.h>
30 #include <asm/mmu_context.h>
31
32 int split_tlb __ro_after_init;
33 int dcache_stride __ro_after_init;
34 int icache_stride __ro_after_init;
35 EXPORT_SYMBOL(dcache_stride);
36
37 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
38 EXPORT_SYMBOL(flush_dcache_page_asm);
39 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
41
42 /* Internal implementation in arch/parisc/kernel/pacache.S */
43 void flush_data_cache_local(void *);  /* flushes local data-cache only */
44 void flush_instruction_cache_local(void); /* flushes local code-cache only */
45
46 /* On some machines (i.e., ones with the Merced bus), there can be
47  * only a single PxTLB broadcast at a time; this must be guaranteed
48  * by software. We need a spinlock around all TLB flushes to ensure
49  * this.
50  */
51 DEFINE_SPINLOCK(pa_tlb_flush_lock);
52
53 /* Swapper page setup lock. */
54 DEFINE_SPINLOCK(pa_swapper_pg_lock);
55
56 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
57 int pa_serialize_tlb_flushes __ro_after_init;
58 #endif
59
60 struct pdc_cache_info cache_info __ro_after_init;
61 #ifndef CONFIG_PA20
62 static struct pdc_btlb_info btlb_info __ro_after_init;
63 #endif
64
65 DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
66 DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
67 DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
68
69 static void cache_flush_local_cpu(void *dummy)
70 {
71         if (static_branch_likely(&parisc_has_icache))
72                 flush_instruction_cache_local();
73         if (static_branch_likely(&parisc_has_dcache))
74                 flush_data_cache_local(NULL);
75 }
76
77 void flush_cache_all_local(void)
78 {
79         cache_flush_local_cpu(NULL);
80 }
81
82 void flush_cache_all(void)
83 {
84         if (static_branch_likely(&parisc_has_cache))
85                 on_each_cpu(cache_flush_local_cpu, NULL, 1);
86 }
87
88 static inline void flush_data_cache(void)
89 {
90         if (static_branch_likely(&parisc_has_dcache))
91                 on_each_cpu(flush_data_cache_local, NULL, 1);
92 }
93
94
95 /* Kernel virtual address of pfn.  */
96 #define pfn_va(pfn)     __va(PFN_PHYS(pfn))
97
98 void
99 __update_cache(pte_t pte)
100 {
101         unsigned long pfn = pte_pfn(pte);
102         struct page *page;
103
104         /* We don't have pte special.  As a result, we can be called with
105            an invalid pfn and we don't need to flush the kernel dcache page.
106            This occurs with FireGL card in C8000.  */
107         if (!pfn_valid(pfn))
108                 return;
109
110         page = pfn_to_page(pfn);
111         if (page_mapping_file(page) &&
112             test_bit(PG_dcache_dirty, &page->flags)) {
113                 flush_kernel_dcache_page_addr(pfn_va(pfn));
114                 clear_bit(PG_dcache_dirty, &page->flags);
115         } else if (parisc_requires_coherency())
116                 flush_kernel_dcache_page_addr(pfn_va(pfn));
117 }
118
119 void
120 show_cache_info(struct seq_file *m)
121 {
122         char buf[32];
123
124         seq_printf(m, "I-cache\t\t: %ld KB\n", 
125                 cache_info.ic_size/1024 );
126         if (cache_info.dc_loop != 1)
127                 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
128         seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
129                 cache_info.dc_size/1024,
130                 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
131                 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
132                 ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
133                 cache_info.dc_conf.cc_alias
134         );
135         seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
136                 cache_info.it_size,
137                 cache_info.dt_size,
138                 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
139         );
140                 
141 #ifndef CONFIG_PA20
142         /* BTLB - Block TLB */
143         if (btlb_info.max_size==0) {
144                 seq_printf(m, "BTLB\t\t: not supported\n" );
145         } else {
146                 seq_printf(m, 
147                 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
148                 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
149                 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
150                 btlb_info.max_size, (int)4096,
151                 btlb_info.max_size>>8,
152                 btlb_info.fixed_range_info.num_i,
153                 btlb_info.fixed_range_info.num_d,
154                 btlb_info.fixed_range_info.num_comb, 
155                 btlb_info.variable_range_info.num_i,
156                 btlb_info.variable_range_info.num_d,
157                 btlb_info.variable_range_info.num_comb
158                 );
159         }
160 #endif
161 }
162
163 void __init 
164 parisc_cache_init(void)
165 {
166         if (pdc_cache_info(&cache_info) < 0)
167                 panic("parisc_cache_init: pdc_cache_info failed");
168
169 #if 0
170         printk("ic_size %lx dc_size %lx it_size %lx\n",
171                 cache_info.ic_size,
172                 cache_info.dc_size,
173                 cache_info.it_size);
174
175         printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
176                 cache_info.dc_base,
177                 cache_info.dc_stride,
178                 cache_info.dc_count,
179                 cache_info.dc_loop);
180
181         printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
182                 *(unsigned long *) (&cache_info.dc_conf),
183                 cache_info.dc_conf.cc_alias,
184                 cache_info.dc_conf.cc_block,
185                 cache_info.dc_conf.cc_line,
186                 cache_info.dc_conf.cc_shift);
187         printk("        wt %d sh %d cst %d hv %d\n",
188                 cache_info.dc_conf.cc_wt,
189                 cache_info.dc_conf.cc_sh,
190                 cache_info.dc_conf.cc_cst,
191                 cache_info.dc_conf.cc_hv);
192
193         printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
194                 cache_info.ic_base,
195                 cache_info.ic_stride,
196                 cache_info.ic_count,
197                 cache_info.ic_loop);
198
199         printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
200                 cache_info.it_sp_base,
201                 cache_info.it_sp_stride,
202                 cache_info.it_sp_count,
203                 cache_info.it_loop,
204                 cache_info.it_off_base,
205                 cache_info.it_off_stride,
206                 cache_info.it_off_count);
207
208         printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
209                 cache_info.dt_sp_base,
210                 cache_info.dt_sp_stride,
211                 cache_info.dt_sp_count,
212                 cache_info.dt_loop,
213                 cache_info.dt_off_base,
214                 cache_info.dt_off_stride,
215                 cache_info.dt_off_count);
216
217         printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
218                 *(unsigned long *) (&cache_info.ic_conf),
219                 cache_info.ic_conf.cc_alias,
220                 cache_info.ic_conf.cc_block,
221                 cache_info.ic_conf.cc_line,
222                 cache_info.ic_conf.cc_shift);
223         printk("        wt %d sh %d cst %d hv %d\n",
224                 cache_info.ic_conf.cc_wt,
225                 cache_info.ic_conf.cc_sh,
226                 cache_info.ic_conf.cc_cst,
227                 cache_info.ic_conf.cc_hv);
228
229         printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
230                 cache_info.dt_conf.tc_sh,
231                 cache_info.dt_conf.tc_page,
232                 cache_info.dt_conf.tc_cst,
233                 cache_info.dt_conf.tc_aid,
234                 cache_info.dt_conf.tc_sr);
235
236         printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
237                 cache_info.it_conf.tc_sh,
238                 cache_info.it_conf.tc_page,
239                 cache_info.it_conf.tc_cst,
240                 cache_info.it_conf.tc_aid,
241                 cache_info.it_conf.tc_sr);
242 #endif
243
244         split_tlb = 0;
245         if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
246                 if (cache_info.dt_conf.tc_sh == 2)
247                         printk(KERN_WARNING "Unexpected TLB configuration. "
248                         "Will flush I/D separately (could be optimized).\n");
249
250                 split_tlb = 1;
251         }
252
253         /* "New and Improved" version from Jim Hull 
254          *      (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
255          * The following CAFL_STRIDE is an optimized version, see
256          * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
257          * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
258          */
259 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
260         dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
261         icache_stride = CAFL_STRIDE(cache_info.ic_conf);
262 #undef CAFL_STRIDE
263
264 #ifndef CONFIG_PA20
265         if (pdc_btlb_info(&btlb_info) < 0) {
266                 memset(&btlb_info, 0, sizeof btlb_info);
267         }
268 #endif
269
270         if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
271                                                 PDC_MODEL_NVA_UNSUPPORTED) {
272                 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
273 #if 0
274                 panic("SMP kernel required to avoid non-equivalent aliasing");
275 #endif
276         }
277 }
278
279 void disable_sr_hashing(void)
280 {
281         int srhash_type, retval;
282         unsigned long space_bits;
283
284         switch (boot_cpu_data.cpu_type) {
285         case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
286                 BUG();
287                 return;
288
289         case pcxs:
290         case pcxt:
291         case pcxt_:
292                 srhash_type = SRHASH_PCXST;
293                 break;
294
295         case pcxl:
296                 srhash_type = SRHASH_PCXL;
297                 break;
298
299         case pcxl2: /* pcxl2 doesn't support space register hashing */
300                 return;
301
302         default: /* Currently all PA2.0 machines use the same ins. sequence */
303                 srhash_type = SRHASH_PA20;
304                 break;
305         }
306
307         disable_sr_hashing_asm(srhash_type);
308
309         retval = pdc_spaceid_bits(&space_bits);
310         /* If this procedure isn't implemented, don't panic. */
311         if (retval < 0 && retval != PDC_BAD_OPTION)
312                 panic("pdc_spaceid_bits call failed.\n");
313         if (space_bits != 0)
314                 panic("SpaceID hashing is still on!\n");
315 }
316
317 static inline void
318 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
319                    unsigned long physaddr)
320 {
321         if (!static_branch_likely(&parisc_has_cache))
322                 return;
323         preempt_disable();
324         flush_dcache_page_asm(physaddr, vmaddr);
325         if (vma->vm_flags & VM_EXEC)
326                 flush_icache_page_asm(physaddr, vmaddr);
327         preempt_enable();
328 }
329
330 static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
331 {
332         unsigned long flags, space, pgd, prot;
333 #ifdef CONFIG_TLB_PTLOCK
334         unsigned long pgd_lock;
335 #endif
336
337         vmaddr &= PAGE_MASK;
338
339         preempt_disable();
340
341         /* Set context for flush */
342         local_irq_save(flags);
343         prot = mfctl(8);
344         space = mfsp(SR_USER);
345         pgd = mfctl(25);
346 #ifdef CONFIG_TLB_PTLOCK
347         pgd_lock = mfctl(28);
348 #endif
349         switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
350         local_irq_restore(flags);
351
352         flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
353         if (vma->vm_flags & VM_EXEC)
354                 flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
355         flush_tlb_page(vma, vmaddr);
356
357         /* Restore previous context */
358         local_irq_save(flags);
359 #ifdef CONFIG_TLB_PTLOCK
360         mtctl(pgd_lock, 28);
361 #endif
362         mtctl(pgd, 25);
363         mtsp(space, SR_USER);
364         mtctl(prot, 8);
365         local_irq_restore(flags);
366
367         preempt_enable();
368 }
369
370 static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
371 {
372         pte_t *ptep = NULL;
373         pgd_t *pgd = mm->pgd;
374         p4d_t *p4d;
375         pud_t *pud;
376         pmd_t *pmd;
377
378         if (!pgd_none(*pgd)) {
379                 p4d = p4d_offset(pgd, addr);
380                 if (!p4d_none(*p4d)) {
381                         pud = pud_offset(p4d, addr);
382                         if (!pud_none(*pud)) {
383                                 pmd = pmd_offset(pud, addr);
384                                 if (!pmd_none(*pmd))
385                                         ptep = pte_offset_map(pmd, addr);
386                         }
387                 }
388         }
389         return ptep;
390 }
391
392 static inline bool pte_needs_flush(pte_t pte)
393 {
394         return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
395                 == (_PAGE_PRESENT | _PAGE_ACCESSED);
396 }
397
398 void flush_dcache_page(struct page *page)
399 {
400         struct address_space *mapping = page_mapping_file(page);
401         struct vm_area_struct *mpnt;
402         unsigned long offset;
403         unsigned long addr, old_addr = 0;
404         unsigned long count = 0;
405         pgoff_t pgoff;
406
407         if (mapping && !mapping_mapped(mapping)) {
408                 set_bit(PG_dcache_dirty, &page->flags);
409                 return;
410         }
411
412         flush_kernel_dcache_page_addr(page_address(page));
413
414         if (!mapping)
415                 return;
416
417         pgoff = page->index;
418
419         /*
420          * We have carefully arranged in arch_get_unmapped_area() that
421          * *any* mappings of a file are always congruently mapped (whether
422          * declared as MAP_PRIVATE or MAP_SHARED), so we only need
423          * to flush one address here for them all to become coherent
424          * on machines that support equivalent aliasing
425          */
426         flush_dcache_mmap_lock(mapping);
427         vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
428                 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
429                 addr = mpnt->vm_start + offset;
430                 if (parisc_requires_coherency()) {
431                         pte_t *ptep;
432
433                         ptep = get_ptep(mpnt->vm_mm, addr);
434                         if (ptep && pte_needs_flush(*ptep))
435                                 flush_user_cache_page(mpnt, addr);
436                 } else {
437                         /*
438                          * The TLB is the engine of coherence on parisc:
439                          * The CPU is entitled to speculate any page
440                          * with a TLB mapping, so here we kill the
441                          * mapping then flush the page along a special
442                          * flush only alias mapping. This guarantees that
443                          * the page is no-longer in the cache for any
444                          * process and nor may it be speculatively read
445                          * in (until the user or kernel specifically
446                          * accesses it, of course)
447                          */
448                         flush_tlb_page(mpnt, addr);
449                         if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
450                                         != (addr & (SHM_COLOUR - 1))) {
451                                 __flush_cache_page(mpnt, addr, page_to_phys(page));
452                                 /*
453                                  * Software is allowed to have any number
454                                  * of private mappings to a page.
455                                  */
456                                 if (!(mpnt->vm_flags & VM_SHARED))
457                                         continue;
458                                 if (old_addr)
459                                         pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
460                                                 old_addr, addr, mpnt->vm_file);
461                                 old_addr = addr;
462                         }
463                 }
464                 WARN_ON(++count == 4096);
465         }
466         flush_dcache_mmap_unlock(mapping);
467 }
468 EXPORT_SYMBOL(flush_dcache_page);
469
470 /* Defined in arch/parisc/kernel/pacache.S */
471 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
472 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
473
474 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
475 static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
476
477 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
478 static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
479
480 void __init parisc_setup_cache_timing(void)
481 {
482         unsigned long rangetime, alltime;
483         unsigned long size;
484         unsigned long threshold, threshold2;
485
486         alltime = mfctl(16);
487         flush_data_cache();
488         alltime = mfctl(16) - alltime;
489
490         size = (unsigned long)(_end - _text);
491         rangetime = mfctl(16);
492         flush_kernel_dcache_range((unsigned long)_text, size);
493         rangetime = mfctl(16) - rangetime;
494
495         printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
496                 alltime, size, rangetime);
497
498         threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
499         pr_info("Calculated flush threshold is %lu KiB\n",
500                 threshold/1024);
501
502         /*
503          * The threshold computed above isn't very reliable. The following
504          * heuristic works reasonably well on c8000/rp3440.
505          */
506         threshold2 = cache_info.dc_size * num_online_cpus();
507         parisc_cache_flush_threshold = threshold2;
508         printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
509                 parisc_cache_flush_threshold/1024);
510
511         /* calculate TLB flush threshold */
512
513         /* On SMP machines, skip the TLB measure of kernel text which
514          * has been mapped as huge pages. */
515         if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
516                 threshold = max(cache_info.it_size, cache_info.dt_size);
517                 threshold *= PAGE_SIZE;
518                 threshold /= num_online_cpus();
519                 goto set_tlb_threshold;
520         }
521
522         size = (unsigned long)_end - (unsigned long)_text;
523         rangetime = mfctl(16);
524         flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
525         rangetime = mfctl(16) - rangetime;
526
527         alltime = mfctl(16);
528         flush_tlb_all();
529         alltime = mfctl(16) - alltime;
530
531         printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
532                 alltime, size, rangetime);
533
534         threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
535         printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
536                 threshold/1024);
537
538 set_tlb_threshold:
539         if (threshold > FLUSH_TLB_THRESHOLD)
540                 parisc_tlb_flush_threshold = threshold;
541         else
542                 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
543
544         printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
545                 parisc_tlb_flush_threshold/1024);
546 }
547
548 extern void purge_kernel_dcache_page_asm(unsigned long);
549 extern void clear_user_page_asm(void *, unsigned long);
550 extern void copy_user_page_asm(void *, void *, unsigned long);
551
552 void flush_kernel_dcache_page_addr(void *addr)
553 {
554         unsigned long flags;
555
556         flush_kernel_dcache_page_asm(addr);
557         purge_tlb_start(flags);
558         pdtlb(SR_KERNEL, addr);
559         purge_tlb_end(flags);
560 }
561 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
562
563 static void flush_cache_page_if_present(struct vm_area_struct *vma,
564         unsigned long vmaddr, unsigned long pfn)
565 {
566         pte_t *ptep = get_ptep(vma->vm_mm, vmaddr);
567
568         /*
569          * The pte check is racy and sometimes the flush will trigger
570          * a non-access TLB miss. Hopefully, the page has already been
571          * flushed.
572          */
573         if (ptep && pte_needs_flush(*ptep))
574                 flush_cache_page(vma, vmaddr, pfn);
575 }
576
577 void copy_user_highpage(struct page *to, struct page *from,
578         unsigned long vaddr, struct vm_area_struct *vma)
579 {
580         void *kto, *kfrom;
581
582         kfrom = kmap_local_page(from);
583         kto = kmap_local_page(to);
584         flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
585         copy_page_asm(kto, kfrom);
586         kunmap_local(kto);
587         kunmap_local(kfrom);
588 }
589
590 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
591                 unsigned long user_vaddr, void *dst, void *src, int len)
592 {
593         flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
594         memcpy(dst, src, len);
595         flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
596 }
597
598 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
599                 unsigned long user_vaddr, void *dst, void *src, int len)
600 {
601         flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
602         memcpy(dst, src, len);
603 }
604
605 /* __flush_tlb_range()
606  *
607  * returns 1 if all TLBs were flushed.
608  */
609 int __flush_tlb_range(unsigned long sid, unsigned long start,
610                       unsigned long end)
611 {
612         unsigned long flags;
613
614         if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
615             end - start >= parisc_tlb_flush_threshold) {
616                 flush_tlb_all();
617                 return 1;
618         }
619
620         /* Purge TLB entries for small ranges using the pdtlb and
621            pitlb instructions.  These instructions execute locally
622            but cause a purge request to be broadcast to other TLBs.  */
623         while (start < end) {
624                 purge_tlb_start(flags);
625                 mtsp(sid, SR_TEMP1);
626                 pdtlb(SR_TEMP1, start);
627                 pitlb(SR_TEMP1, start);
628                 purge_tlb_end(flags);
629                 start += PAGE_SIZE;
630         }
631         return 0;
632 }
633
634 static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
635 {
636         unsigned long addr, pfn;
637         pte_t *ptep;
638
639         for (addr = start; addr < end; addr += PAGE_SIZE) {
640                 /*
641                  * The vma can contain pages that aren't present. Although
642                  * the pte search is expensive, we need the pte to find the
643                  * page pfn and to check whether the page should be flushed.
644                  */
645                 ptep = get_ptep(vma->vm_mm, addr);
646                 if (ptep && pte_needs_flush(*ptep)) {
647                         if (parisc_requires_coherency()) {
648                                 flush_user_cache_page(vma, addr);
649                         } else {
650                                 pfn = pte_pfn(*ptep);
651                                 if (WARN_ON(!pfn_valid(pfn)))
652                                         return;
653                                 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
654                         }
655                 }
656         }
657 }
658
659 static inline unsigned long mm_total_size(struct mm_struct *mm)
660 {
661         struct vm_area_struct *vma;
662         unsigned long usize = 0;
663
664         for (vma = mm->mmap; vma && usize < parisc_cache_flush_threshold; vma = vma->vm_next)
665                 usize += vma->vm_end - vma->vm_start;
666         return usize;
667 }
668
669 void flush_cache_mm(struct mm_struct *mm)
670 {
671         struct vm_area_struct *vma;
672
673         /*
674          * Flushing the whole cache on each cpu takes forever on
675          * rp3440, etc. So, avoid it if the mm isn't too big.
676          *
677          * Note that we must flush the entire cache on machines
678          * with aliasing caches to prevent random segmentation
679          * faults.
680          */
681         if (!parisc_requires_coherency()
682             ||  mm_total_size(mm) >= parisc_cache_flush_threshold) {
683                 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
684                         return;
685                 flush_tlb_all();
686                 flush_cache_all();
687                 return;
688         }
689
690         /* Flush mm */
691         for (vma = mm->mmap; vma; vma = vma->vm_next)
692                 flush_cache_pages(vma, vma->vm_start, vma->vm_end);
693 }
694
695 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
696 {
697         if (!parisc_requires_coherency()
698             || end - start >= parisc_cache_flush_threshold) {
699                 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
700                         return;
701                 flush_tlb_range(vma, start, end);
702                 flush_cache_all();
703                 return;
704         }
705
706         flush_cache_pages(vma, start, end);
707 }
708
709 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
710 {
711         if (WARN_ON(!pfn_valid(pfn)))
712                 return;
713         if (parisc_requires_coherency())
714                 flush_user_cache_page(vma, vmaddr);
715         else
716                 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
717 }
718
719 void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
720 {
721         if (!PageAnon(page))
722                 return;
723
724         if (parisc_requires_coherency()) {
725                 if (vma->vm_flags & VM_SHARED)
726                         flush_data_cache();
727                 else
728                         flush_user_cache_page(vma, vmaddr);
729                 return;
730         }
731
732         flush_tlb_page(vma, vmaddr);
733         preempt_disable();
734         flush_dcache_page_asm(page_to_phys(page), vmaddr);
735         preempt_enable();
736 }
737
738 void flush_kernel_vmap_range(void *vaddr, int size)
739 {
740         unsigned long start = (unsigned long)vaddr;
741         unsigned long end = start + size;
742
743         if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
744             (unsigned long)size >= parisc_cache_flush_threshold) {
745                 flush_tlb_kernel_range(start, end);
746                 flush_data_cache();
747                 return;
748         }
749
750         flush_kernel_dcache_range_asm(start, end);
751         flush_tlb_kernel_range(start, end);
752 }
753 EXPORT_SYMBOL(flush_kernel_vmap_range);
754
755 void invalidate_kernel_vmap_range(void *vaddr, int size)
756 {
757         unsigned long start = (unsigned long)vaddr;
758         unsigned long end = start + size;
759
760         /* Ensure DMA is complete */
761         asm_syncdma();
762
763         if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
764             (unsigned long)size >= parisc_cache_flush_threshold) {
765                 flush_tlb_kernel_range(start, end);
766                 flush_data_cache();
767                 return;
768         }
769
770         purge_kernel_dcache_range_asm(start, end);
771         flush_tlb_kernel_range(start, end);
772 }
773 EXPORT_SYMBOL(invalidate_kernel_vmap_range);