Mention branches and keyring.
[releases.git] / kernel / cache.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7  * Copyright (C) 1999 SuSE GmbH Nuernberg
8  * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9  *
10  * Cache and TLB management
11  *
12  */
13  
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <linux/syscalls.h>
23 #include <asm/pdc.h>
24 #include <asm/cache.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
27 #include <asm/page.h>
28 #include <asm/processor.h>
29 #include <asm/sections.h>
30 #include <asm/shmparam.h>
31 #include <asm/mmu_context.h>
32 #include <asm/cachectl.h>
33
34 int split_tlb __ro_after_init;
35 int dcache_stride __ro_after_init;
36 int icache_stride __ro_after_init;
37 EXPORT_SYMBOL(dcache_stride);
38
39 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40 EXPORT_SYMBOL(flush_dcache_page_asm);
41 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
42 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
43
44 /* Internal implementation in arch/parisc/kernel/pacache.S */
45 void flush_data_cache_local(void *);  /* flushes local data-cache only */
46 void flush_instruction_cache_local(void); /* flushes local code-cache only */
47
48 /* On some machines (i.e., ones with the Merced bus), there can be
49  * only a single PxTLB broadcast at a time; this must be guaranteed
50  * by software. We need a spinlock around all TLB flushes to ensure
51  * this.
52  */
53 DEFINE_SPINLOCK(pa_tlb_flush_lock);
54
55 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
56 int pa_serialize_tlb_flushes __ro_after_init;
57 #endif
58
59 struct pdc_cache_info cache_info __ro_after_init;
60 #ifndef CONFIG_PA20
61 struct pdc_btlb_info btlb_info;
62 #endif
63
64 DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
65 DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
66 DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
67
68 static void cache_flush_local_cpu(void *dummy)
69 {
70         if (static_branch_likely(&parisc_has_icache))
71                 flush_instruction_cache_local();
72         if (static_branch_likely(&parisc_has_dcache))
73                 flush_data_cache_local(NULL);
74 }
75
76 void flush_cache_all_local(void)
77 {
78         cache_flush_local_cpu(NULL);
79 }
80
81 void flush_cache_all(void)
82 {
83         if (static_branch_likely(&parisc_has_cache))
84                 on_each_cpu(cache_flush_local_cpu, NULL, 1);
85 }
86
87 static inline void flush_data_cache(void)
88 {
89         if (static_branch_likely(&parisc_has_dcache))
90                 on_each_cpu(flush_data_cache_local, NULL, 1);
91 }
92
93
94 /* Kernel virtual address of pfn.  */
95 #define pfn_va(pfn)     __va(PFN_PHYS(pfn))
96
97 void __update_cache(pte_t pte)
98 {
99         unsigned long pfn = pte_pfn(pte);
100         struct folio *folio;
101         unsigned int nr;
102
103         /* We don't have pte special.  As a result, we can be called with
104            an invalid pfn and we don't need to flush the kernel dcache page.
105            This occurs with FireGL card in C8000.  */
106         if (!pfn_valid(pfn))
107                 return;
108
109         folio = page_folio(pfn_to_page(pfn));
110         pfn = folio_pfn(folio);
111         nr = folio_nr_pages(folio);
112         if (folio_flush_mapping(folio) &&
113             test_bit(PG_dcache_dirty, &folio->flags)) {
114                 while (nr--)
115                         flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
116                 clear_bit(PG_dcache_dirty, &folio->flags);
117         } else if (parisc_requires_coherency())
118                 while (nr--)
119                         flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
120 }
121
122 void
123 show_cache_info(struct seq_file *m)
124 {
125         char buf[32];
126
127         seq_printf(m, "I-cache\t\t: %ld KB\n", 
128                 cache_info.ic_size/1024 );
129         if (cache_info.dc_loop != 1)
130                 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
131         seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
132                 cache_info.dc_size/1024,
133                 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
134                 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
135                 ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
136                 cache_info.dc_conf.cc_alias
137         );
138         seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
139                 cache_info.it_size,
140                 cache_info.dt_size,
141                 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
142         );
143                 
144 #ifndef CONFIG_PA20
145         /* BTLB - Block TLB */
146         if (btlb_info.max_size==0) {
147                 seq_printf(m, "BTLB\t\t: not supported\n" );
148         } else {
149                 seq_printf(m, 
150                 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
151                 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
152                 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
153                 btlb_info.max_size, (int)4096,
154                 btlb_info.max_size>>8,
155                 btlb_info.fixed_range_info.num_i,
156                 btlb_info.fixed_range_info.num_d,
157                 btlb_info.fixed_range_info.num_comb, 
158                 btlb_info.variable_range_info.num_i,
159                 btlb_info.variable_range_info.num_d,
160                 btlb_info.variable_range_info.num_comb
161                 );
162         }
163 #endif
164 }
165
166 void __init 
167 parisc_cache_init(void)
168 {
169         if (pdc_cache_info(&cache_info) < 0)
170                 panic("parisc_cache_init: pdc_cache_info failed");
171
172 #if 0
173         printk("ic_size %lx dc_size %lx it_size %lx\n",
174                 cache_info.ic_size,
175                 cache_info.dc_size,
176                 cache_info.it_size);
177
178         printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
179                 cache_info.dc_base,
180                 cache_info.dc_stride,
181                 cache_info.dc_count,
182                 cache_info.dc_loop);
183
184         printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
185                 *(unsigned long *) (&cache_info.dc_conf),
186                 cache_info.dc_conf.cc_alias,
187                 cache_info.dc_conf.cc_block,
188                 cache_info.dc_conf.cc_line,
189                 cache_info.dc_conf.cc_shift);
190         printk("        wt %d sh %d cst %d hv %d\n",
191                 cache_info.dc_conf.cc_wt,
192                 cache_info.dc_conf.cc_sh,
193                 cache_info.dc_conf.cc_cst,
194                 cache_info.dc_conf.cc_hv);
195
196         printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
197                 cache_info.ic_base,
198                 cache_info.ic_stride,
199                 cache_info.ic_count,
200                 cache_info.ic_loop);
201
202         printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
203                 cache_info.it_sp_base,
204                 cache_info.it_sp_stride,
205                 cache_info.it_sp_count,
206                 cache_info.it_loop,
207                 cache_info.it_off_base,
208                 cache_info.it_off_stride,
209                 cache_info.it_off_count);
210
211         printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
212                 cache_info.dt_sp_base,
213                 cache_info.dt_sp_stride,
214                 cache_info.dt_sp_count,
215                 cache_info.dt_loop,
216                 cache_info.dt_off_base,
217                 cache_info.dt_off_stride,
218                 cache_info.dt_off_count);
219
220         printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
221                 *(unsigned long *) (&cache_info.ic_conf),
222                 cache_info.ic_conf.cc_alias,
223                 cache_info.ic_conf.cc_block,
224                 cache_info.ic_conf.cc_line,
225                 cache_info.ic_conf.cc_shift);
226         printk("        wt %d sh %d cst %d hv %d\n",
227                 cache_info.ic_conf.cc_wt,
228                 cache_info.ic_conf.cc_sh,
229                 cache_info.ic_conf.cc_cst,
230                 cache_info.ic_conf.cc_hv);
231
232         printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
233                 cache_info.dt_conf.tc_sh,
234                 cache_info.dt_conf.tc_page,
235                 cache_info.dt_conf.tc_cst,
236                 cache_info.dt_conf.tc_aid,
237                 cache_info.dt_conf.tc_sr);
238
239         printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
240                 cache_info.it_conf.tc_sh,
241                 cache_info.it_conf.tc_page,
242                 cache_info.it_conf.tc_cst,
243                 cache_info.it_conf.tc_aid,
244                 cache_info.it_conf.tc_sr);
245 #endif
246
247         split_tlb = 0;
248         if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
249                 if (cache_info.dt_conf.tc_sh == 2)
250                         printk(KERN_WARNING "Unexpected TLB configuration. "
251                         "Will flush I/D separately (could be optimized).\n");
252
253                 split_tlb = 1;
254         }
255
256         /* "New and Improved" version from Jim Hull 
257          *      (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
258          * The following CAFL_STRIDE is an optimized version, see
259          * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
260          * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
261          */
262 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
263         dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
264         icache_stride = CAFL_STRIDE(cache_info.ic_conf);
265 #undef CAFL_STRIDE
266
267         if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
268                                                 PDC_MODEL_NVA_UNSUPPORTED) {
269                 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
270 #if 0
271                 panic("SMP kernel required to avoid non-equivalent aliasing");
272 #endif
273         }
274 }
275
276 void disable_sr_hashing(void)
277 {
278         int srhash_type, retval;
279         unsigned long space_bits;
280
281         switch (boot_cpu_data.cpu_type) {
282         case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
283                 BUG();
284                 return;
285
286         case pcxs:
287         case pcxt:
288         case pcxt_:
289                 srhash_type = SRHASH_PCXST;
290                 break;
291
292         case pcxl:
293                 srhash_type = SRHASH_PCXL;
294                 break;
295
296         case pcxl2: /* pcxl2 doesn't support space register hashing */
297                 return;
298
299         default: /* Currently all PA2.0 machines use the same ins. sequence */
300                 srhash_type = SRHASH_PA20;
301                 break;
302         }
303
304         disable_sr_hashing_asm(srhash_type);
305
306         retval = pdc_spaceid_bits(&space_bits);
307         /* If this procedure isn't implemented, don't panic. */
308         if (retval < 0 && retval != PDC_BAD_OPTION)
309                 panic("pdc_spaceid_bits call failed.\n");
310         if (space_bits != 0)
311                 panic("SpaceID hashing is still on!\n");
312 }
313
314 static inline void
315 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
316                    unsigned long physaddr)
317 {
318         if (!static_branch_likely(&parisc_has_cache))
319                 return;
320         preempt_disable();
321         flush_dcache_page_asm(physaddr, vmaddr);
322         if (vma->vm_flags & VM_EXEC)
323                 flush_icache_page_asm(physaddr, vmaddr);
324         preempt_enable();
325 }
326
327 static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
328 {
329         unsigned long flags, space, pgd, prot;
330 #ifdef CONFIG_TLB_PTLOCK
331         unsigned long pgd_lock;
332 #endif
333
334         vmaddr &= PAGE_MASK;
335
336         preempt_disable();
337
338         /* Set context for flush */
339         local_irq_save(flags);
340         prot = mfctl(8);
341         space = mfsp(SR_USER);
342         pgd = mfctl(25);
343 #ifdef CONFIG_TLB_PTLOCK
344         pgd_lock = mfctl(28);
345 #endif
346         switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
347         local_irq_restore(flags);
348
349         flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
350         if (vma->vm_flags & VM_EXEC)
351                 flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
352         flush_tlb_page(vma, vmaddr);
353
354         /* Restore previous context */
355         local_irq_save(flags);
356 #ifdef CONFIG_TLB_PTLOCK
357         mtctl(pgd_lock, 28);
358 #endif
359         mtctl(pgd, 25);
360         mtsp(space, SR_USER);
361         mtctl(prot, 8);
362         local_irq_restore(flags);
363
364         preempt_enable();
365 }
366
367 void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
368                 unsigned int nr)
369 {
370         void *kaddr = page_address(page);
371
372         for (;;) {
373                 flush_kernel_dcache_page_addr(kaddr);
374                 flush_kernel_icache_page(kaddr);
375                 if (--nr == 0)
376                         break;
377                 kaddr += PAGE_SIZE;
378         }
379 }
380
381 static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
382 {
383         pte_t *ptep = NULL;
384         pgd_t *pgd = mm->pgd;
385         p4d_t *p4d;
386         pud_t *pud;
387         pmd_t *pmd;
388
389         if (!pgd_none(*pgd)) {
390                 p4d = p4d_offset(pgd, addr);
391                 if (!p4d_none(*p4d)) {
392                         pud = pud_offset(p4d, addr);
393                         if (!pud_none(*pud)) {
394                                 pmd = pmd_offset(pud, addr);
395                                 if (!pmd_none(*pmd))
396                                         ptep = pte_offset_map(pmd, addr);
397                         }
398                 }
399         }
400         return ptep;
401 }
402
403 static inline bool pte_needs_flush(pte_t pte)
404 {
405         return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
406                 == (_PAGE_PRESENT | _PAGE_ACCESSED);
407 }
408
409 void flush_dcache_folio(struct folio *folio)
410 {
411         struct address_space *mapping = folio_flush_mapping(folio);
412         struct vm_area_struct *vma;
413         unsigned long addr, old_addr = 0;
414         void *kaddr;
415         unsigned long count = 0;
416         unsigned long i, nr, flags;
417         pgoff_t pgoff;
418
419         if (mapping && !mapping_mapped(mapping)) {
420                 set_bit(PG_dcache_dirty, &folio->flags);
421                 return;
422         }
423
424         nr = folio_nr_pages(folio);
425         kaddr = folio_address(folio);
426         for (i = 0; i < nr; i++)
427                 flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
428
429         if (!mapping)
430                 return;
431
432         pgoff = folio->index;
433
434         /*
435          * We have carefully arranged in arch_get_unmapped_area() that
436          * *any* mappings of a file are always congruently mapped (whether
437          * declared as MAP_PRIVATE or MAP_SHARED), so we only need
438          * to flush one address here for them all to become coherent
439          * on machines that support equivalent aliasing
440          */
441         flush_dcache_mmap_lock_irqsave(mapping, flags);
442         vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
443                 unsigned long offset = pgoff - vma->vm_pgoff;
444                 unsigned long pfn = folio_pfn(folio);
445
446                 addr = vma->vm_start;
447                 nr = folio_nr_pages(folio);
448                 if (offset > -nr) {
449                         pfn -= offset;
450                         nr += offset;
451                 } else {
452                         addr += offset * PAGE_SIZE;
453                 }
454                 if (addr + nr * PAGE_SIZE > vma->vm_end)
455                         nr = (vma->vm_end - addr) / PAGE_SIZE;
456
457                 if (parisc_requires_coherency()) {
458                         for (i = 0; i < nr; i++) {
459                                 pte_t *ptep = get_ptep(vma->vm_mm,
460                                                         addr + i * PAGE_SIZE);
461                                 if (!ptep)
462                                         continue;
463                                 if (pte_needs_flush(*ptep))
464                                         flush_user_cache_page(vma,
465                                                         addr + i * PAGE_SIZE);
466                                 /* Optimise accesses to the same table? */
467                                 pte_unmap(ptep);
468                         }
469                 } else {
470                         /*
471                          * The TLB is the engine of coherence on parisc:
472                          * The CPU is entitled to speculate any page
473                          * with a TLB mapping, so here we kill the
474                          * mapping then flush the page along a special
475                          * flush only alias mapping. This guarantees that
476                          * the page is no-longer in the cache for any
477                          * process and nor may it be speculatively read
478                          * in (until the user or kernel specifically
479                          * accesses it, of course)
480                          */
481                         for (i = 0; i < nr; i++)
482                                 flush_tlb_page(vma, addr + i * PAGE_SIZE);
483                         if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
484                                         != (addr & (SHM_COLOUR - 1))) {
485                                 for (i = 0; i < nr; i++)
486                                         __flush_cache_page(vma,
487                                                 addr + i * PAGE_SIZE,
488                                                 (pfn + i) * PAGE_SIZE);
489                                 /*
490                                  * Software is allowed to have any number
491                                  * of private mappings to a page.
492                                  */
493                                 if (!(vma->vm_flags & VM_SHARED))
494                                         continue;
495                                 if (old_addr)
496                                         pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
497                                                 old_addr, addr, vma->vm_file);
498                                 if (nr == folio_nr_pages(folio))
499                                         old_addr = addr;
500                         }
501                 }
502                 WARN_ON(++count == 4096);
503         }
504         flush_dcache_mmap_unlock_irqrestore(mapping, flags);
505 }
506 EXPORT_SYMBOL(flush_dcache_folio);
507
508 /* Defined in arch/parisc/kernel/pacache.S */
509 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
510 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
511
512 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
513 static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
514
515 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
516 static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
517
518 void __init parisc_setup_cache_timing(void)
519 {
520         unsigned long rangetime, alltime;
521         unsigned long size;
522         unsigned long threshold, threshold2;
523
524         alltime = mfctl(16);
525         flush_data_cache();
526         alltime = mfctl(16) - alltime;
527
528         size = (unsigned long)(_end - _text);
529         rangetime = mfctl(16);
530         flush_kernel_dcache_range((unsigned long)_text, size);
531         rangetime = mfctl(16) - rangetime;
532
533         printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
534                 alltime, size, rangetime);
535
536         threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
537         pr_info("Calculated flush threshold is %lu KiB\n",
538                 threshold/1024);
539
540         /*
541          * The threshold computed above isn't very reliable. The following
542          * heuristic works reasonably well on c8000/rp3440.
543          */
544         threshold2 = cache_info.dc_size * num_online_cpus();
545         parisc_cache_flush_threshold = threshold2;
546         printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
547                 parisc_cache_flush_threshold/1024);
548
549         /* calculate TLB flush threshold */
550
551         /* On SMP machines, skip the TLB measure of kernel text which
552          * has been mapped as huge pages. */
553         if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
554                 threshold = max(cache_info.it_size, cache_info.dt_size);
555                 threshold *= PAGE_SIZE;
556                 threshold /= num_online_cpus();
557                 goto set_tlb_threshold;
558         }
559
560         size = (unsigned long)_end - (unsigned long)_text;
561         rangetime = mfctl(16);
562         flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
563         rangetime = mfctl(16) - rangetime;
564
565         alltime = mfctl(16);
566         flush_tlb_all();
567         alltime = mfctl(16) - alltime;
568
569         printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
570                 alltime, size, rangetime);
571
572         threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
573         printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
574                 threshold/1024);
575
576 set_tlb_threshold:
577         if (threshold > FLUSH_TLB_THRESHOLD)
578                 parisc_tlb_flush_threshold = threshold;
579         else
580                 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
581
582         printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
583                 parisc_tlb_flush_threshold/1024);
584 }
585
586 extern void purge_kernel_dcache_page_asm(unsigned long);
587 extern void clear_user_page_asm(void *, unsigned long);
588 extern void copy_user_page_asm(void *, void *, unsigned long);
589
590 void flush_kernel_dcache_page_addr(const void *addr)
591 {
592         unsigned long flags;
593
594         flush_kernel_dcache_page_asm(addr);
595         purge_tlb_start(flags);
596         pdtlb(SR_KERNEL, addr);
597         purge_tlb_end(flags);
598 }
599 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
600
601 static void flush_cache_page_if_present(struct vm_area_struct *vma,
602         unsigned long vmaddr, unsigned long pfn)
603 {
604         bool needs_flush = false;
605         pte_t *ptep;
606
607         /*
608          * The pte check is racy and sometimes the flush will trigger
609          * a non-access TLB miss. Hopefully, the page has already been
610          * flushed.
611          */
612         ptep = get_ptep(vma->vm_mm, vmaddr);
613         if (ptep) {
614                 needs_flush = pte_needs_flush(*ptep);
615                 pte_unmap(ptep);
616         }
617         if (needs_flush)
618                 flush_cache_page(vma, vmaddr, pfn);
619 }
620
621 void copy_user_highpage(struct page *to, struct page *from,
622         unsigned long vaddr, struct vm_area_struct *vma)
623 {
624         void *kto, *kfrom;
625
626         kfrom = kmap_local_page(from);
627         kto = kmap_local_page(to);
628         flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
629         copy_page_asm(kto, kfrom);
630         kunmap_local(kto);
631         kunmap_local(kfrom);
632 }
633
634 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
635                 unsigned long user_vaddr, void *dst, void *src, int len)
636 {
637         flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
638         memcpy(dst, src, len);
639         flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
640 }
641
642 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
643                 unsigned long user_vaddr, void *dst, void *src, int len)
644 {
645         flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
646         memcpy(dst, src, len);
647 }
648
649 /* __flush_tlb_range()
650  *
651  * returns 1 if all TLBs were flushed.
652  */
653 int __flush_tlb_range(unsigned long sid, unsigned long start,
654                       unsigned long end)
655 {
656         unsigned long flags;
657
658         if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
659             end - start >= parisc_tlb_flush_threshold) {
660                 flush_tlb_all();
661                 return 1;
662         }
663
664         /* Purge TLB entries for small ranges using the pdtlb and
665            pitlb instructions.  These instructions execute locally
666            but cause a purge request to be broadcast to other TLBs.  */
667         while (start < end) {
668                 purge_tlb_start(flags);
669                 mtsp(sid, SR_TEMP1);
670                 pdtlb(SR_TEMP1, start);
671                 pitlb(SR_TEMP1, start);
672                 purge_tlb_end(flags);
673                 start += PAGE_SIZE;
674         }
675         return 0;
676 }
677
678 static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
679 {
680         unsigned long addr, pfn;
681         pte_t *ptep;
682
683         for (addr = start; addr < end; addr += PAGE_SIZE) {
684                 bool needs_flush = false;
685                 /*
686                  * The vma can contain pages that aren't present. Although
687                  * the pte search is expensive, we need the pte to find the
688                  * page pfn and to check whether the page should be flushed.
689                  */
690                 ptep = get_ptep(vma->vm_mm, addr);
691                 if (ptep) {
692                         needs_flush = pte_needs_flush(*ptep);
693                         pfn = pte_pfn(*ptep);
694                         pte_unmap(ptep);
695                 }
696                 if (needs_flush) {
697                         if (parisc_requires_coherency()) {
698                                 flush_user_cache_page(vma, addr);
699                         } else {
700                                 if (WARN_ON(!pfn_valid(pfn)))
701                                         return;
702                                 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
703                         }
704                 }
705         }
706 }
707
708 static inline unsigned long mm_total_size(struct mm_struct *mm)
709 {
710         struct vm_area_struct *vma;
711         unsigned long usize = 0;
712         VMA_ITERATOR(vmi, mm, 0);
713
714         for_each_vma(vmi, vma) {
715                 if (usize >= parisc_cache_flush_threshold)
716                         break;
717                 usize += vma->vm_end - vma->vm_start;
718         }
719         return usize;
720 }
721
722 void flush_cache_mm(struct mm_struct *mm)
723 {
724         struct vm_area_struct *vma;
725         VMA_ITERATOR(vmi, mm, 0);
726
727         /*
728          * Flushing the whole cache on each cpu takes forever on
729          * rp3440, etc. So, avoid it if the mm isn't too big.
730          *
731          * Note that we must flush the entire cache on machines
732          * with aliasing caches to prevent random segmentation
733          * faults.
734          */
735         if (!parisc_requires_coherency()
736             ||  mm_total_size(mm) >= parisc_cache_flush_threshold) {
737                 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
738                         return;
739                 flush_tlb_all();
740                 flush_cache_all();
741                 return;
742         }
743
744         /* Flush mm */
745         for_each_vma(vmi, vma)
746                 flush_cache_pages(vma, vma->vm_start, vma->vm_end);
747 }
748
749 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
750 {
751         if (!parisc_requires_coherency()
752             || end - start >= parisc_cache_flush_threshold) {
753                 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
754                         return;
755                 flush_tlb_range(vma, start, end);
756                 flush_cache_all();
757                 return;
758         }
759
760         flush_cache_pages(vma, start, end);
761 }
762
763 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
764 {
765         if (WARN_ON(!pfn_valid(pfn)))
766                 return;
767         if (parisc_requires_coherency())
768                 flush_user_cache_page(vma, vmaddr);
769         else
770                 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
771 }
772
773 void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
774 {
775         if (!PageAnon(page))
776                 return;
777
778         if (parisc_requires_coherency()) {
779                 if (vma->vm_flags & VM_SHARED)
780                         flush_data_cache();
781                 else
782                         flush_user_cache_page(vma, vmaddr);
783                 return;
784         }
785
786         flush_tlb_page(vma, vmaddr);
787         preempt_disable();
788         flush_dcache_page_asm(page_to_phys(page), vmaddr);
789         preempt_enable();
790 }
791
792 void flush_kernel_vmap_range(void *vaddr, int size)
793 {
794         unsigned long start = (unsigned long)vaddr;
795         unsigned long end = start + size;
796
797         if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
798             (unsigned long)size >= parisc_cache_flush_threshold) {
799                 flush_tlb_kernel_range(start, end);
800                 flush_data_cache();
801                 return;
802         }
803
804         flush_kernel_dcache_range_asm(start, end);
805         flush_tlb_kernel_range(start, end);
806 }
807 EXPORT_SYMBOL(flush_kernel_vmap_range);
808
809 void invalidate_kernel_vmap_range(void *vaddr, int size)
810 {
811         unsigned long start = (unsigned long)vaddr;
812         unsigned long end = start + size;
813
814         /* Ensure DMA is complete */
815         asm_syncdma();
816
817         if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
818             (unsigned long)size >= parisc_cache_flush_threshold) {
819                 flush_tlb_kernel_range(start, end);
820                 flush_data_cache();
821                 return;
822         }
823
824         purge_kernel_dcache_range_asm(start, end);
825         flush_tlb_kernel_range(start, end);
826 }
827 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
828
829
830 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
831         unsigned int, cache)
832 {
833         unsigned long start, end;
834         ASM_EXCEPTIONTABLE_VAR(error);
835
836         if (bytes == 0)
837                 return 0;
838         if (!access_ok((void __user *) addr, bytes))
839                 return -EFAULT;
840
841         end = addr + bytes;
842
843         if (cache & DCACHE) {
844                 start = addr;
845                 __asm__ __volatile__ (
846 #ifdef CONFIG_64BIT
847                         "1: cmpb,*<<,n  %0,%2,1b\n"
848 #else
849                         "1: cmpb,<<,n   %0,%2,1b\n"
850 #endif
851                         "   fic,m       %3(%4,%0)\n"
852                         "2: sync\n"
853                         ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
854                         : "+r" (start), "+r" (error)
855                         : "r" (end), "r" (dcache_stride), "i" (SR_USER));
856         }
857
858         if (cache & ICACHE && error == 0) {
859                 start = addr;
860                 __asm__ __volatile__ (
861 #ifdef CONFIG_64BIT
862                         "1: cmpb,*<<,n  %0,%2,1b\n"
863 #else
864                         "1: cmpb,<<,n   %0,%2,1b\n"
865 #endif
866                         "   fdc,m       %3(%4,%0)\n"
867                         "2: sync\n"
868                         ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
869                         : "+r" (start), "+r" (error)
870                         : "r" (end), "r" (icache_stride), "i" (SR_USER));
871         }
872
873         return error;
874 }