GNU Linux-libre 6.5.10-gnu
[releases.git] / arch / parisc / kernel / cache.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7  * Copyright (C) 1999 SuSE GmbH Nuernberg
8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9  *
10  * Cache and TLB management
11  *
12  */
13  
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <linux/syscalls.h>
23 #include <asm/pdc.h>
24 #include <asm/cache.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/page.h>
28 #include <asm/processor.h>
29 #include <asm/sections.h>
30 #include <asm/shmparam.h>
31 #include <asm/mmu_context.h>
32 #include <asm/cachectl.h>
33
34 int split_tlb __ro_after_init;
35 int dcache_stride __ro_after_init;
36 int icache_stride __ro_after_init;
37 EXPORT_SYMBOL(dcache_stride);
38
39 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40 EXPORT_SYMBOL(flush_dcache_page_asm);
41 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
42 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
43
44 /* Internal implementation in arch/parisc/kernel/pacache.S */
45 void flush_data_cache_local(void *);  /* flushes local data-cache only */
46 void flush_instruction_cache_local(void); /* flushes local code-cache only */
47
48 /* On some machines (i.e., ones with the Merced bus), there can be
49  * only a single PxTLB broadcast at a time; this must be guaranteed
50  * by software. We need a spinlock around all TLB flushes to ensure
51  * this.
52  */
53 DEFINE_SPINLOCK(pa_tlb_flush_lock);
54
55 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
56 int pa_serialize_tlb_flushes __ro_after_init;
57 #endif
58
59 struct pdc_cache_info cache_info __ro_after_init;
60 #ifndef CONFIG_PA20
61 static struct pdc_btlb_info btlb_info __ro_after_init;
62 #endif
63
64 DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
65 DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
66 DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
67
68 static void cache_flush_local_cpu(void *dummy)
69 {
70         if (static_branch_likely(&parisc_has_icache))
71                 flush_instruction_cache_local();
72         if (static_branch_likely(&parisc_has_dcache))
73                 flush_data_cache_local(NULL);
74 }
75
76 void flush_cache_all_local(void)
77 {
78         cache_flush_local_cpu(NULL);
79 }
80
81 void flush_cache_all(void)
82 {
83         if (static_branch_likely(&parisc_has_cache))
84                 on_each_cpu(cache_flush_local_cpu, NULL, 1);
85 }
86
87 static inline void flush_data_cache(void)
88 {
89         if (static_branch_likely(&parisc_has_dcache))
90                 on_each_cpu(flush_data_cache_local, NULL, 1);
91 }
92
93
94 /* Kernel virtual address of pfn.  */
95 #define pfn_va(pfn)     __va(PFN_PHYS(pfn))
96
97 void
98 __update_cache(pte_t pte)
99 {
100         unsigned long pfn = pte_pfn(pte);
101         struct page *page;
102
103         /* We don't have pte special.  As a result, we can be called with
104            an invalid pfn and we don't need to flush the kernel dcache page.
105            This occurs with FireGL card in C8000.  */
106         if (!pfn_valid(pfn))
107                 return;
108
109         page = pfn_to_page(pfn);
110         if (page_mapping_file(page) &&
111             test_bit(PG_dcache_dirty, &page->flags)) {
112                 flush_kernel_dcache_page_addr(pfn_va(pfn));
113                 clear_bit(PG_dcache_dirty, &page->flags);
114         } else if (parisc_requires_coherency())
115                 flush_kernel_dcache_page_addr(pfn_va(pfn));
116 }
117
118 void
119 show_cache_info(struct seq_file *m)
120 {
121         char buf[32];
122
123         seq_printf(m, "I-cache\t\t: %ld KB\n", 
124                 cache_info.ic_size/1024 );
125         if (cache_info.dc_loop != 1)
126                 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
127         seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
128                 cache_info.dc_size/1024,
129                 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
130                 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
131                 ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
132                 cache_info.dc_conf.cc_alias
133         );
134         seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
135                 cache_info.it_size,
136                 cache_info.dt_size,
137                 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
138         );
139                 
140 #ifndef CONFIG_PA20
141         /* BTLB - Block TLB */
142         if (btlb_info.max_size==0) {
143                 seq_printf(m, "BTLB\t\t: not supported\n" );
144         } else {
145                 seq_printf(m, 
146                 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
147                 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
148                 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
149                 btlb_info.max_size, (int)4096,
150                 btlb_info.max_size>>8,
151                 btlb_info.fixed_range_info.num_i,
152                 btlb_info.fixed_range_info.num_d,
153                 btlb_info.fixed_range_info.num_comb, 
154                 btlb_info.variable_range_info.num_i,
155                 btlb_info.variable_range_info.num_d,
156                 btlb_info.variable_range_info.num_comb
157                 );
158         }
159 #endif
160 }
161
162 void __init 
163 parisc_cache_init(void)
164 {
165         if (pdc_cache_info(&cache_info) < 0)
166                 panic("parisc_cache_init: pdc_cache_info failed");
167
168 #if 0
169         printk("ic_size %lx dc_size %lx it_size %lx\n",
170                 cache_info.ic_size,
171                 cache_info.dc_size,
172                 cache_info.it_size);
173
174         printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
175                 cache_info.dc_base,
176                 cache_info.dc_stride,
177                 cache_info.dc_count,
178                 cache_info.dc_loop);
179
180         printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
181                 *(unsigned long *) (&cache_info.dc_conf),
182                 cache_info.dc_conf.cc_alias,
183                 cache_info.dc_conf.cc_block,
184                 cache_info.dc_conf.cc_line,
185                 cache_info.dc_conf.cc_shift);
186         printk("        wt %d sh %d cst %d hv %d\n",
187                 cache_info.dc_conf.cc_wt,
188                 cache_info.dc_conf.cc_sh,
189                 cache_info.dc_conf.cc_cst,
190                 cache_info.dc_conf.cc_hv);
191
192         printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
193                 cache_info.ic_base,
194                 cache_info.ic_stride,
195                 cache_info.ic_count,
196                 cache_info.ic_loop);
197
198         printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
199                 cache_info.it_sp_base,
200                 cache_info.it_sp_stride,
201                 cache_info.it_sp_count,
202                 cache_info.it_loop,
203                 cache_info.it_off_base,
204                 cache_info.it_off_stride,
205                 cache_info.it_off_count);
206
207         printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
208                 cache_info.dt_sp_base,
209                 cache_info.dt_sp_stride,
210                 cache_info.dt_sp_count,
211                 cache_info.dt_loop,
212                 cache_info.dt_off_base,
213                 cache_info.dt_off_stride,
214                 cache_info.dt_off_count);
215
216         printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
217                 *(unsigned long *) (&cache_info.ic_conf),
218                 cache_info.ic_conf.cc_alias,
219                 cache_info.ic_conf.cc_block,
220                 cache_info.ic_conf.cc_line,
221                 cache_info.ic_conf.cc_shift);
222         printk("        wt %d sh %d cst %d hv %d\n",
223                 cache_info.ic_conf.cc_wt,
224                 cache_info.ic_conf.cc_sh,
225                 cache_info.ic_conf.cc_cst,
226                 cache_info.ic_conf.cc_hv);
227
228         printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
229                 cache_info.dt_conf.tc_sh,
230                 cache_info.dt_conf.tc_page,
231                 cache_info.dt_conf.tc_cst,
232                 cache_info.dt_conf.tc_aid,
233                 cache_info.dt_conf.tc_sr);
234
235         printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
236                 cache_info.it_conf.tc_sh,
237                 cache_info.it_conf.tc_page,
238                 cache_info.it_conf.tc_cst,
239                 cache_info.it_conf.tc_aid,
240                 cache_info.it_conf.tc_sr);
241 #endif
242
243         split_tlb = 0;
244         if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
245                 if (cache_info.dt_conf.tc_sh == 2)
246                         printk(KERN_WARNING "Unexpected TLB configuration. "
247                         "Will flush I/D separately (could be optimized).\n");
248
249                 split_tlb = 1;
250         }
251
252         /* "New and Improved" version from Jim Hull 
253          *      (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
254          * The following CAFL_STRIDE is an optimized version, see
255          * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
256          * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
257          */
258 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
259         dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
260         icache_stride = CAFL_STRIDE(cache_info.ic_conf);
261 #undef CAFL_STRIDE
262
263 #ifndef CONFIG_PA20
264         if (pdc_btlb_info(&btlb_info) < 0) {
265                 memset(&btlb_info, 0, sizeof btlb_info);
266         }
267 #endif
268
269         if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
270                                                 PDC_MODEL_NVA_UNSUPPORTED) {
271                 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
272 #if 0
273                 panic("SMP kernel required to avoid non-equivalent aliasing");
274 #endif
275         }
276 }
277
278 void disable_sr_hashing(void)
279 {
280         int srhash_type, retval;
281         unsigned long space_bits;
282
283         switch (boot_cpu_data.cpu_type) {
284         case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
285                 BUG();
286                 return;
287
288         case pcxs:
289         case pcxt:
290         case pcxt_:
291                 srhash_type = SRHASH_PCXST;
292                 break;
293
294         case pcxl:
295                 srhash_type = SRHASH_PCXL;
296                 break;
297
298         case pcxl2: /* pcxl2 doesn't support space register hashing */
299                 return;
300
301         default: /* Currently all PA2.0 machines use the same ins. sequence */
302                 srhash_type = SRHASH_PA20;
303                 break;
304         }
305
306         disable_sr_hashing_asm(srhash_type);
307
308         retval = pdc_spaceid_bits(&space_bits);
309         /* If this procedure isn't implemented, don't panic. */
310         if (retval < 0 && retval != PDC_BAD_OPTION)
311                 panic("pdc_spaceid_bits call failed.\n");
312         if (space_bits != 0)
313                 panic("SpaceID hashing is still on!\n");
314 }
315
316 static inline void
317 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
318                    unsigned long physaddr)
319 {
320         if (!static_branch_likely(&parisc_has_cache))
321                 return;
322         preempt_disable();
323         flush_dcache_page_asm(physaddr, vmaddr);
324         if (vma->vm_flags & VM_EXEC)
325                 flush_icache_page_asm(physaddr, vmaddr);
326         preempt_enable();
327 }
328
329 static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
330 {
331         unsigned long flags, space, pgd, prot;
332 #ifdef CONFIG_TLB_PTLOCK
333         unsigned long pgd_lock;
334 #endif
335
336         vmaddr &= PAGE_MASK;
337
338         preempt_disable();
339
340         /* Set context for flush */
341         local_irq_save(flags);
342         prot = mfctl(8);
343         space = mfsp(SR_USER);
344         pgd = mfctl(25);
345 #ifdef CONFIG_TLB_PTLOCK
346         pgd_lock = mfctl(28);
347 #endif
348         switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
349         local_irq_restore(flags);
350
351         flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
352         if (vma->vm_flags & VM_EXEC)
353                 flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
354         flush_tlb_page(vma, vmaddr);
355
356         /* Restore previous context */
357         local_irq_save(flags);
358 #ifdef CONFIG_TLB_PTLOCK
359         mtctl(pgd_lock, 28);
360 #endif
361         mtctl(pgd, 25);
362         mtsp(space, SR_USER);
363         mtctl(prot, 8);
364         local_irq_restore(flags);
365
366         preempt_enable();
367 }
368
369 static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
370 {
371         pte_t *ptep = NULL;
372         pgd_t *pgd = mm->pgd;
373         p4d_t *p4d;
374         pud_t *pud;
375         pmd_t *pmd;
376
377         if (!pgd_none(*pgd)) {
378                 p4d = p4d_offset(pgd, addr);
379                 if (!p4d_none(*p4d)) {
380                         pud = pud_offset(p4d, addr);
381                         if (!pud_none(*pud)) {
382                                 pmd = pmd_offset(pud, addr);
383                                 if (!pmd_none(*pmd))
384                                         ptep = pte_offset_map(pmd, addr);
385                         }
386                 }
387         }
388         return ptep;
389 }
390
391 static inline bool pte_needs_flush(pte_t pte)
392 {
393         return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
394                 == (_PAGE_PRESENT | _PAGE_ACCESSED);
395 }
396
397 void flush_dcache_page(struct page *page)
398 {
399         struct address_space *mapping = page_mapping_file(page);
400         struct vm_area_struct *mpnt;
401         unsigned long offset;
402         unsigned long addr, old_addr = 0;
403         unsigned long count = 0;
404         unsigned long flags;
405         pgoff_t pgoff;
406
407         if (mapping && !mapping_mapped(mapping)) {
408                 set_bit(PG_dcache_dirty, &page->flags);
409                 return;
410         }
411
412         flush_kernel_dcache_page_addr(page_address(page));
413
414         if (!mapping)
415                 return;
416
417         pgoff = page->index;
418
419         /*
420          * We have carefully arranged in arch_get_unmapped_area() that
421          * *any* mappings of a file are always congruently mapped (whether
422          * declared as MAP_PRIVATE or MAP_SHARED), so we only need
423          * to flush one address here for them all to become coherent
424          * on machines that support equivalent aliasing
425          */
426         flush_dcache_mmap_lock_irqsave(mapping, flags);
427         vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
428                 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
429                 addr = mpnt->vm_start + offset;
430                 if (parisc_requires_coherency()) {
431                         bool needs_flush = false;
432                         pte_t *ptep;
433
434                         ptep = get_ptep(mpnt->vm_mm, addr);
435                         if (ptep) {
436                                 needs_flush = pte_needs_flush(*ptep);
437                                 pte_unmap(ptep);
438                         }
439                         if (needs_flush)
440                                 flush_user_cache_page(mpnt, addr);
441                 } else {
442                         /*
443                          * The TLB is the engine of coherence on parisc:
444                          * The CPU is entitled to speculate any page
445                          * with a TLB mapping, so here we kill the
446                          * mapping then flush the page along a special
447                          * flush only alias mapping. This guarantees that
448                          * the page is no-longer in the cache for any
449                          * process and nor may it be speculatively read
450                          * in (until the user or kernel specifically
451                          * accesses it, of course)
452                          */
453                         flush_tlb_page(mpnt, addr);
454                         if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
455                                         != (addr & (SHM_COLOUR - 1))) {
456                                 __flush_cache_page(mpnt, addr, page_to_phys(page));
457                                 /*
458                                  * Software is allowed to have any number
459                                  * of private mappings to a page.
460                                  */
461                                 if (!(mpnt->vm_flags & VM_SHARED))
462                                         continue;
463                                 if (old_addr)
464                                         pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
465                                                 old_addr, addr, mpnt->vm_file);
466                                 old_addr = addr;
467                         }
468                 }
469                 WARN_ON(++count == 4096);
470         }
471         flush_dcache_mmap_unlock_irqrestore(mapping, flags);
472 }
473 EXPORT_SYMBOL(flush_dcache_page);
474
475 /* Defined in arch/parisc/kernel/pacache.S */
476 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
477 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
478
479 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
480 static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
481
482 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
483 static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
484
485 void __init parisc_setup_cache_timing(void)
486 {
487         unsigned long rangetime, alltime;
488         unsigned long size;
489         unsigned long threshold, threshold2;
490
491         alltime = mfctl(16);
492         flush_data_cache();
493         alltime = mfctl(16) - alltime;
494
495         size = (unsigned long)(_end - _text);
496         rangetime = mfctl(16);
497         flush_kernel_dcache_range((unsigned long)_text, size);
498         rangetime = mfctl(16) - rangetime;
499
500         printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
501                 alltime, size, rangetime);
502
503         threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
504         pr_info("Calculated flush threshold is %lu KiB\n",
505                 threshold/1024);
506
507         /*
508          * The threshold computed above isn't very reliable. The following
509          * heuristic works reasonably well on c8000/rp3440.
510          */
511         threshold2 = cache_info.dc_size * num_online_cpus();
512         parisc_cache_flush_threshold = threshold2;
513         printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
514                 parisc_cache_flush_threshold/1024);
515
516         /* calculate TLB flush threshold */
517
518         /* On SMP machines, skip the TLB measure of kernel text which
519          * has been mapped as huge pages. */
520         if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
521                 threshold = max(cache_info.it_size, cache_info.dt_size);
522                 threshold *= PAGE_SIZE;
523                 threshold /= num_online_cpus();
524                 goto set_tlb_threshold;
525         }
526
527         size = (unsigned long)_end - (unsigned long)_text;
528         rangetime = mfctl(16);
529         flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
530         rangetime = mfctl(16) - rangetime;
531
532         alltime = mfctl(16);
533         flush_tlb_all();
534         alltime = mfctl(16) - alltime;
535
536         printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
537                 alltime, size, rangetime);
538
539         threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
540         printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
541                 threshold/1024);
542
543 set_tlb_threshold:
544         if (threshold > FLUSH_TLB_THRESHOLD)
545                 parisc_tlb_flush_threshold = threshold;
546         else
547                 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
548
549         printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
550                 parisc_tlb_flush_threshold/1024);
551 }
552
553 extern void purge_kernel_dcache_page_asm(unsigned long);
554 extern void clear_user_page_asm(void *, unsigned long);
555 extern void copy_user_page_asm(void *, void *, unsigned long);
556
557 void flush_kernel_dcache_page_addr(const void *addr)
558 {
559         unsigned long flags;
560
561         flush_kernel_dcache_page_asm(addr);
562         purge_tlb_start(flags);
563         pdtlb(SR_KERNEL, addr);
564         purge_tlb_end(flags);
565 }
566 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
567
568 static void flush_cache_page_if_present(struct vm_area_struct *vma,
569         unsigned long vmaddr, unsigned long pfn)
570 {
571         bool needs_flush = false;
572         pte_t *ptep;
573
574         /*
575          * The pte check is racy and sometimes the flush will trigger
576          * a non-access TLB miss. Hopefully, the page has already been
577          * flushed.
578          */
579         ptep = get_ptep(vma->vm_mm, vmaddr);
580         if (ptep) {
581                 needs_flush = pte_needs_flush(*ptep);
582                 pte_unmap(ptep);
583         }
584         if (needs_flush)
585                 flush_cache_page(vma, vmaddr, pfn);
586 }
587
588 void copy_user_highpage(struct page *to, struct page *from,
589         unsigned long vaddr, struct vm_area_struct *vma)
590 {
591         void *kto, *kfrom;
592
593         kfrom = kmap_local_page(from);
594         kto = kmap_local_page(to);
595         flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
596         copy_page_asm(kto, kfrom);
597         kunmap_local(kto);
598         kunmap_local(kfrom);
599 }
600
601 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
602                 unsigned long user_vaddr, void *dst, void *src, int len)
603 {
604         flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
605         memcpy(dst, src, len);
606         flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
607 }
608
609 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
610                 unsigned long user_vaddr, void *dst, void *src, int len)
611 {
612         flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
613         memcpy(dst, src, len);
614 }
615
616 /* __flush_tlb_range()
617  *
618  * returns 1 if all TLBs were flushed.
619  */
620 int __flush_tlb_range(unsigned long sid, unsigned long start,
621                       unsigned long end)
622 {
623         unsigned long flags;
624
625         if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
626             end - start >= parisc_tlb_flush_threshold) {
627                 flush_tlb_all();
628                 return 1;
629         }
630
631         /* Purge TLB entries for small ranges using the pdtlb and
632            pitlb instructions.  These instructions execute locally
633            but cause a purge request to be broadcast to other TLBs.  */
634         while (start < end) {
635                 purge_tlb_start(flags);
636                 mtsp(sid, SR_TEMP1);
637                 pdtlb(SR_TEMP1, start);
638                 pitlb(SR_TEMP1, start);
639                 purge_tlb_end(flags);
640                 start += PAGE_SIZE;
641         }
642         return 0;
643 }
644
645 static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
646 {
647         unsigned long addr, pfn;
648         pte_t *ptep;
649
650         for (addr = start; addr < end; addr += PAGE_SIZE) {
651                 bool needs_flush = false;
652                 /*
653                  * The vma can contain pages that aren't present. Although
654                  * the pte search is expensive, we need the pte to find the
655                  * page pfn and to check whether the page should be flushed.
656                  */
657                 ptep = get_ptep(vma->vm_mm, addr);
658                 if (ptep) {
659                         needs_flush = pte_needs_flush(*ptep);
660                         pfn = pte_pfn(*ptep);
661                         pte_unmap(ptep);
662                 }
663                 if (needs_flush) {
664                         if (parisc_requires_coherency()) {
665                                 flush_user_cache_page(vma, addr);
666                         } else {
667                                 if (WARN_ON(!pfn_valid(pfn)))
668                                         return;
669                                 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
670                         }
671                 }
672         }
673 }
674
675 static inline unsigned long mm_total_size(struct mm_struct *mm)
676 {
677         struct vm_area_struct *vma;
678         unsigned long usize = 0;
679         VMA_ITERATOR(vmi, mm, 0);
680
681         for_each_vma(vmi, vma) {
682                 if (usize >= parisc_cache_flush_threshold)
683                         break;
684                 usize += vma->vm_end - vma->vm_start;
685         }
686         return usize;
687 }
688
689 void flush_cache_mm(struct mm_struct *mm)
690 {
691         struct vm_area_struct *vma;
692         VMA_ITERATOR(vmi, mm, 0);
693
694         /*
695          * Flushing the whole cache on each cpu takes forever on
696          * rp3440, etc. So, avoid it if the mm isn't too big.
697          *
698          * Note that we must flush the entire cache on machines
699          * with aliasing caches to prevent random segmentation
700          * faults.
701          */
702         if (!parisc_requires_coherency()
703             ||  mm_total_size(mm) >= parisc_cache_flush_threshold) {
704                 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
705                         return;
706                 flush_tlb_all();
707                 flush_cache_all();
708                 return;
709         }
710
711         /* Flush mm */
712         for_each_vma(vmi, vma)
713                 flush_cache_pages(vma, vma->vm_start, vma->vm_end);
714 }
715
716 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
717 {
718         if (!parisc_requires_coherency()
719             || end - start >= parisc_cache_flush_threshold) {
720                 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
721                         return;
722                 flush_tlb_range(vma, start, end);
723                 flush_cache_all();
724                 return;
725         }
726
727         flush_cache_pages(vma, start, end);
728 }
729
730 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
731 {
732         if (WARN_ON(!pfn_valid(pfn)))
733                 return;
734         if (parisc_requires_coherency())
735                 flush_user_cache_page(vma, vmaddr);
736         else
737                 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
738 }
739
740 void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
741 {
742         if (!PageAnon(page))
743                 return;
744
745         if (parisc_requires_coherency()) {
746                 if (vma->vm_flags & VM_SHARED)
747                         flush_data_cache();
748                 else
749                         flush_user_cache_page(vma, vmaddr);
750                 return;
751         }
752
753         flush_tlb_page(vma, vmaddr);
754         preempt_disable();
755         flush_dcache_page_asm(page_to_phys(page), vmaddr);
756         preempt_enable();
757 }
758
759 void flush_kernel_vmap_range(void *vaddr, int size)
760 {
761         unsigned long start = (unsigned long)vaddr;
762         unsigned long end = start + size;
763
764         if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
765             (unsigned long)size >= parisc_cache_flush_threshold) {
766                 flush_tlb_kernel_range(start, end);
767                 flush_data_cache();
768                 return;
769         }
770
771         flush_kernel_dcache_range_asm(start, end);
772         flush_tlb_kernel_range(start, end);
773 }
774 EXPORT_SYMBOL(flush_kernel_vmap_range);
775
776 void invalidate_kernel_vmap_range(void *vaddr, int size)
777 {
778         unsigned long start = (unsigned long)vaddr;
779         unsigned long end = start + size;
780
781         /* Ensure DMA is complete */
782         asm_syncdma();
783
784         if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
785             (unsigned long)size >= parisc_cache_flush_threshold) {
786                 flush_tlb_kernel_range(start, end);
787                 flush_data_cache();
788                 return;
789         }
790
791         purge_kernel_dcache_range_asm(start, end);
792         flush_tlb_kernel_range(start, end);
793 }
794 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
795
796
797 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
798         unsigned int, cache)
799 {
800         unsigned long start, end;
801         ASM_EXCEPTIONTABLE_VAR(error);
802
803         if (bytes == 0)
804                 return 0;
805         if (!access_ok((void __user *) addr, bytes))
806                 return -EFAULT;
807
808         end = addr + bytes;
809
810         if (cache & DCACHE) {
811                 start = addr;
812                 __asm__ __volatile__ (
813 #ifdef CONFIG_64BIT
814                         "1: cmpb,*<<,n  %0,%2,1b\n"
815 #else
816                         "1: cmpb,<<,n   %0,%2,1b\n"
817 #endif
818                         "   fic,m       %3(%4,%0)\n"
819                         "2: sync\n"
820                         ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
821                         : "+r" (start), "+r" (error)
822                         : "r" (end), "r" (dcache_stride), "i" (SR_USER));
823         }
824
825         if (cache & ICACHE && error == 0) {
826                 start = addr;
827                 __asm__ __volatile__ (
828 #ifdef CONFIG_64BIT
829                         "1: cmpb,*<<,n  %0,%2,1b\n"
830 #else
831                         "1: cmpb,<<,n   %0,%2,1b\n"
832 #endif
833                         "   fdc,m       %3(%4,%0)\n"
834                         "2: sync\n"
835                         ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b)
836                         : "+r" (start), "+r" (error)
837                         : "r" (end), "r" (icache_stride), "i" (SR_USER));
838         }
839
840         return error;
841 }