2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
10 * Cache and TLB management
14 #include <linux/init.h>
15 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
22 #include <linux/syscalls.h>
24 #include <asm/cache.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlbflush.h>
28 #include <asm/processor.h>
29 #include <asm/sections.h>
30 #include <asm/shmparam.h>
31 #include <asm/mmu_context.h>
32 #include <asm/cachectl.h>
34 int split_tlb __ro_after_init;
35 int dcache_stride __ro_after_init;
36 int icache_stride __ro_after_init;
37 EXPORT_SYMBOL(dcache_stride);
39 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40 EXPORT_SYMBOL(flush_dcache_page_asm);
41 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
42 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
44 /* Internal implementation in arch/parisc/kernel/pacache.S */
45 void flush_data_cache_local(void *); /* flushes local data-cache only */
46 void flush_instruction_cache_local(void); /* flushes local code-cache only */
48 /* On some machines (i.e., ones with the Merced bus), there can be
49 * only a single PxTLB broadcast at a time; this must be guaranteed
50 * by software. We need a spinlock around all TLB flushes to ensure
53 DEFINE_SPINLOCK(pa_tlb_flush_lock);
55 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
56 int pa_serialize_tlb_flushes __ro_after_init;
59 struct pdc_cache_info cache_info __ro_after_init;
61 struct pdc_btlb_info btlb_info;
64 DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
65 DEFINE_STATIC_KEY_TRUE(parisc_has_dcache);
66 DEFINE_STATIC_KEY_TRUE(parisc_has_icache);
68 static void cache_flush_local_cpu(void *dummy)
70 if (static_branch_likely(&parisc_has_icache))
71 flush_instruction_cache_local();
72 if (static_branch_likely(&parisc_has_dcache))
73 flush_data_cache_local(NULL);
76 void flush_cache_all_local(void)
78 cache_flush_local_cpu(NULL);
81 void flush_cache_all(void)
83 if (static_branch_likely(&parisc_has_cache))
84 on_each_cpu(cache_flush_local_cpu, NULL, 1);
87 static inline void flush_data_cache(void)
89 if (static_branch_likely(&parisc_has_dcache))
90 on_each_cpu(flush_data_cache_local, NULL, 1);
94 /* Kernel virtual address of pfn. */
95 #define pfn_va(pfn) __va(PFN_PHYS(pfn))
97 void __update_cache(pte_t pte)
99 unsigned long pfn = pte_pfn(pte);
103 /* We don't have pte special. As a result, we can be called with
104 an invalid pfn and we don't need to flush the kernel dcache page.
105 This occurs with FireGL card in C8000. */
109 folio = page_folio(pfn_to_page(pfn));
110 pfn = folio_pfn(folio);
111 nr = folio_nr_pages(folio);
112 if (folio_flush_mapping(folio) &&
113 test_bit(PG_dcache_dirty, &folio->flags)) {
115 flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
116 clear_bit(PG_dcache_dirty, &folio->flags);
117 } else if (parisc_requires_coherency())
119 flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
123 show_cache_info(struct seq_file *m)
127 seq_printf(m, "I-cache\t\t: %ld KB\n",
128 cache_info.ic_size/1024 );
129 if (cache_info.dc_loop != 1)
130 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
131 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s, alias=%d)\n",
132 cache_info.dc_size/1024,
133 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
134 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
135 ((cache_info.dc_loop == 1) ? "direct mapped" : buf),
136 cache_info.dc_conf.cc_alias
138 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
141 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
145 /* BTLB - Block TLB */
146 if (btlb_info.max_size==0) {
147 seq_printf(m, "BTLB\t\t: not supported\n" );
150 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
151 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
152 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
153 btlb_info.max_size, (int)4096,
154 btlb_info.max_size>>8,
155 btlb_info.fixed_range_info.num_i,
156 btlb_info.fixed_range_info.num_d,
157 btlb_info.fixed_range_info.num_comb,
158 btlb_info.variable_range_info.num_i,
159 btlb_info.variable_range_info.num_d,
160 btlb_info.variable_range_info.num_comb
167 parisc_cache_init(void)
169 if (pdc_cache_info(&cache_info) < 0)
170 panic("parisc_cache_init: pdc_cache_info failed");
173 printk("ic_size %lx dc_size %lx it_size %lx\n",
178 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
180 cache_info.dc_stride,
184 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
185 *(unsigned long *) (&cache_info.dc_conf),
186 cache_info.dc_conf.cc_alias,
187 cache_info.dc_conf.cc_block,
188 cache_info.dc_conf.cc_line,
189 cache_info.dc_conf.cc_shift);
190 printk(" wt %d sh %d cst %d hv %d\n",
191 cache_info.dc_conf.cc_wt,
192 cache_info.dc_conf.cc_sh,
193 cache_info.dc_conf.cc_cst,
194 cache_info.dc_conf.cc_hv);
196 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
198 cache_info.ic_stride,
202 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
203 cache_info.it_sp_base,
204 cache_info.it_sp_stride,
205 cache_info.it_sp_count,
207 cache_info.it_off_base,
208 cache_info.it_off_stride,
209 cache_info.it_off_count);
211 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
212 cache_info.dt_sp_base,
213 cache_info.dt_sp_stride,
214 cache_info.dt_sp_count,
216 cache_info.dt_off_base,
217 cache_info.dt_off_stride,
218 cache_info.dt_off_count);
220 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
221 *(unsigned long *) (&cache_info.ic_conf),
222 cache_info.ic_conf.cc_alias,
223 cache_info.ic_conf.cc_block,
224 cache_info.ic_conf.cc_line,
225 cache_info.ic_conf.cc_shift);
226 printk(" wt %d sh %d cst %d hv %d\n",
227 cache_info.ic_conf.cc_wt,
228 cache_info.ic_conf.cc_sh,
229 cache_info.ic_conf.cc_cst,
230 cache_info.ic_conf.cc_hv);
232 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
233 cache_info.dt_conf.tc_sh,
234 cache_info.dt_conf.tc_page,
235 cache_info.dt_conf.tc_cst,
236 cache_info.dt_conf.tc_aid,
237 cache_info.dt_conf.tc_sr);
239 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
240 cache_info.it_conf.tc_sh,
241 cache_info.it_conf.tc_page,
242 cache_info.it_conf.tc_cst,
243 cache_info.it_conf.tc_aid,
244 cache_info.it_conf.tc_sr);
248 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
249 if (cache_info.dt_conf.tc_sh == 2)
250 printk(KERN_WARNING "Unexpected TLB configuration. "
251 "Will flush I/D separately (could be optimized).\n");
256 /* "New and Improved" version from Jim Hull
257 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
258 * The following CAFL_STRIDE is an optimized version, see
259 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
260 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
262 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
263 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
264 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
267 /* stride needs to be non-zero, otherwise cache flushes will not work */
268 WARN_ON(cache_info.dc_size && dcache_stride == 0);
269 WARN_ON(cache_info.ic_size && icache_stride == 0);
271 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
272 PDC_MODEL_NVA_UNSUPPORTED) {
273 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
275 panic("SMP kernel required to avoid non-equivalent aliasing");
280 void disable_sr_hashing(void)
282 int srhash_type, retval;
283 unsigned long space_bits;
285 switch (boot_cpu_data.cpu_type) {
286 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
293 srhash_type = SRHASH_PCXST;
297 srhash_type = SRHASH_PCXL;
300 case pcxl2: /* pcxl2 doesn't support space register hashing */
303 default: /* Currently all PA2.0 machines use the same ins. sequence */
304 srhash_type = SRHASH_PA20;
308 disable_sr_hashing_asm(srhash_type);
310 retval = pdc_spaceid_bits(&space_bits);
311 /* If this procedure isn't implemented, don't panic. */
312 if (retval < 0 && retval != PDC_BAD_OPTION)
313 panic("pdc_spaceid_bits call failed.\n");
315 panic("SpaceID hashing is still on!\n");
319 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
320 unsigned long physaddr)
322 if (!static_branch_likely(&parisc_has_cache))
325 flush_dcache_page_asm(physaddr, vmaddr);
326 if (vma->vm_flags & VM_EXEC)
327 flush_icache_page_asm(physaddr, vmaddr);
331 static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
333 unsigned long flags, space, pgd, prot;
334 #ifdef CONFIG_TLB_PTLOCK
335 unsigned long pgd_lock;
342 /* Set context for flush */
343 local_irq_save(flags);
345 space = mfsp(SR_USER);
347 #ifdef CONFIG_TLB_PTLOCK
348 pgd_lock = mfctl(28);
350 switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
351 local_irq_restore(flags);
353 flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
354 if (vma->vm_flags & VM_EXEC)
355 flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
356 flush_tlb_page(vma, vmaddr);
358 /* Restore previous context */
359 local_irq_save(flags);
360 #ifdef CONFIG_TLB_PTLOCK
364 mtsp(space, SR_USER);
366 local_irq_restore(flags);
371 void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
374 void *kaddr = page_address(page);
377 flush_kernel_dcache_page_addr(kaddr);
378 flush_kernel_icache_page(kaddr);
385 static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
388 pgd_t *pgd = mm->pgd;
393 if (!pgd_none(*pgd)) {
394 p4d = p4d_offset(pgd, addr);
395 if (!p4d_none(*p4d)) {
396 pud = pud_offset(p4d, addr);
397 if (!pud_none(*pud)) {
398 pmd = pmd_offset(pud, addr);
400 ptep = pte_offset_map(pmd, addr);
407 static inline bool pte_needs_flush(pte_t pte)
409 return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE))
410 == (_PAGE_PRESENT | _PAGE_ACCESSED);
413 void flush_dcache_folio(struct folio *folio)
415 struct address_space *mapping = folio_flush_mapping(folio);
416 struct vm_area_struct *vma;
417 unsigned long addr, old_addr = 0;
419 unsigned long count = 0;
420 unsigned long i, nr, flags;
423 if (mapping && !mapping_mapped(mapping)) {
424 set_bit(PG_dcache_dirty, &folio->flags);
428 nr = folio_nr_pages(folio);
429 kaddr = folio_address(folio);
430 for (i = 0; i < nr; i++)
431 flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
436 pgoff = folio->index;
439 * We have carefully arranged in arch_get_unmapped_area() that
440 * *any* mappings of a file are always congruently mapped (whether
441 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
442 * to flush one address here for them all to become coherent
443 * on machines that support equivalent aliasing
445 flush_dcache_mmap_lock_irqsave(mapping, flags);
446 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
447 unsigned long offset = pgoff - vma->vm_pgoff;
448 unsigned long pfn = folio_pfn(folio);
450 addr = vma->vm_start;
451 nr = folio_nr_pages(folio);
456 addr += offset * PAGE_SIZE;
458 if (addr + nr * PAGE_SIZE > vma->vm_end)
459 nr = (vma->vm_end - addr) / PAGE_SIZE;
461 if (parisc_requires_coherency()) {
462 for (i = 0; i < nr; i++) {
463 pte_t *ptep = get_ptep(vma->vm_mm,
464 addr + i * PAGE_SIZE);
467 if (pte_needs_flush(*ptep))
468 flush_user_cache_page(vma,
469 addr + i * PAGE_SIZE);
470 /* Optimise accesses to the same table? */
475 * The TLB is the engine of coherence on parisc:
476 * The CPU is entitled to speculate any page
477 * with a TLB mapping, so here we kill the
478 * mapping then flush the page along a special
479 * flush only alias mapping. This guarantees that
480 * the page is no-longer in the cache for any
481 * process and nor may it be speculatively read
482 * in (until the user or kernel specifically
483 * accesses it, of course)
485 for (i = 0; i < nr; i++)
486 flush_tlb_page(vma, addr + i * PAGE_SIZE);
487 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
488 != (addr & (SHM_COLOUR - 1))) {
489 for (i = 0; i < nr; i++)
490 __flush_cache_page(vma,
491 addr + i * PAGE_SIZE,
492 (pfn + i) * PAGE_SIZE);
494 * Software is allowed to have any number
495 * of private mappings to a page.
497 if (!(vma->vm_flags & VM_SHARED))
500 pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
501 old_addr, addr, vma->vm_file);
502 if (nr == folio_nr_pages(folio))
506 WARN_ON(++count == 4096);
508 flush_dcache_mmap_unlock_irqrestore(mapping, flags);
510 EXPORT_SYMBOL(flush_dcache_folio);
512 /* Defined in arch/parisc/kernel/pacache.S */
513 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
514 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
516 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
517 static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
519 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
520 static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
522 void __init parisc_setup_cache_timing(void)
524 unsigned long rangetime, alltime;
526 unsigned long threshold, threshold2;
530 alltime = mfctl(16) - alltime;
532 size = (unsigned long)(_end - _text);
533 rangetime = mfctl(16);
534 flush_kernel_dcache_range((unsigned long)_text, size);
535 rangetime = mfctl(16) - rangetime;
537 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
538 alltime, size, rangetime);
540 threshold = L1_CACHE_ALIGN((unsigned long)((uint64_t)size * alltime / rangetime));
541 pr_info("Calculated flush threshold is %lu KiB\n",
545 * The threshold computed above isn't very reliable. The following
546 * heuristic works reasonably well on c8000/rp3440.
548 threshold2 = cache_info.dc_size * num_online_cpus();
549 parisc_cache_flush_threshold = threshold2;
550 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
551 parisc_cache_flush_threshold/1024);
553 /* calculate TLB flush threshold */
555 /* On SMP machines, skip the TLB measure of kernel text which
556 * has been mapped as huge pages. */
557 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
558 threshold = max(cache_info.it_size, cache_info.dt_size);
559 threshold *= PAGE_SIZE;
560 threshold /= num_online_cpus();
561 goto set_tlb_threshold;
564 size = (unsigned long)_end - (unsigned long)_text;
565 rangetime = mfctl(16);
566 flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
567 rangetime = mfctl(16) - rangetime;
571 alltime = mfctl(16) - alltime;
573 printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
574 alltime, size, rangetime);
576 threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
577 printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
581 if (threshold > FLUSH_TLB_THRESHOLD)
582 parisc_tlb_flush_threshold = threshold;
584 parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
586 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
587 parisc_tlb_flush_threshold/1024);
590 extern void purge_kernel_dcache_page_asm(unsigned long);
591 extern void clear_user_page_asm(void *, unsigned long);
592 extern void copy_user_page_asm(void *, void *, unsigned long);
594 void flush_kernel_dcache_page_addr(const void *addr)
598 flush_kernel_dcache_page_asm(addr);
599 purge_tlb_start(flags);
600 pdtlb(SR_KERNEL, addr);
601 purge_tlb_end(flags);
603 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
605 static void flush_cache_page_if_present(struct vm_area_struct *vma,
606 unsigned long vmaddr, unsigned long pfn)
608 bool needs_flush = false;
612 * The pte check is racy and sometimes the flush will trigger
613 * a non-access TLB miss. Hopefully, the page has already been
616 ptep = get_ptep(vma->vm_mm, vmaddr);
618 needs_flush = pte_needs_flush(*ptep);
622 flush_cache_page(vma, vmaddr, pfn);
625 void copy_user_highpage(struct page *to, struct page *from,
626 unsigned long vaddr, struct vm_area_struct *vma)
630 kfrom = kmap_local_page(from);
631 kto = kmap_local_page(to);
632 flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
633 copy_page_asm(kto, kfrom);
638 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
639 unsigned long user_vaddr, void *dst, void *src, int len)
641 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
642 memcpy(dst, src, len);
643 flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
646 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
647 unsigned long user_vaddr, void *dst, void *src, int len)
649 flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
650 memcpy(dst, src, len);
653 /* __flush_tlb_range()
655 * returns 1 if all TLBs were flushed.
657 int __flush_tlb_range(unsigned long sid, unsigned long start,
662 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
663 end - start >= parisc_tlb_flush_threshold) {
668 /* Purge TLB entries for small ranges using the pdtlb and
669 pitlb instructions. These instructions execute locally
670 but cause a purge request to be broadcast to other TLBs. */
671 while (start < end) {
672 purge_tlb_start(flags);
674 pdtlb(SR_TEMP1, start);
675 pitlb(SR_TEMP1, start);
676 purge_tlb_end(flags);
682 static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
684 unsigned long addr, pfn;
687 for (addr = start; addr < end; addr += PAGE_SIZE) {
688 bool needs_flush = false;
690 * The vma can contain pages that aren't present. Although
691 * the pte search is expensive, we need the pte to find the
692 * page pfn and to check whether the page should be flushed.
694 ptep = get_ptep(vma->vm_mm, addr);
696 needs_flush = pte_needs_flush(*ptep);
697 pfn = pte_pfn(*ptep);
701 if (parisc_requires_coherency()) {
702 flush_user_cache_page(vma, addr);
704 if (WARN_ON(!pfn_valid(pfn)))
706 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
712 static inline unsigned long mm_total_size(struct mm_struct *mm)
714 struct vm_area_struct *vma;
715 unsigned long usize = 0;
716 VMA_ITERATOR(vmi, mm, 0);
718 for_each_vma(vmi, vma) {
719 if (usize >= parisc_cache_flush_threshold)
721 usize += vma->vm_end - vma->vm_start;
726 void flush_cache_mm(struct mm_struct *mm)
728 struct vm_area_struct *vma;
729 VMA_ITERATOR(vmi, mm, 0);
732 * Flushing the whole cache on each cpu takes forever on
733 * rp3440, etc. So, avoid it if the mm isn't too big.
735 * Note that we must flush the entire cache on machines
736 * with aliasing caches to prevent random segmentation
739 if (!parisc_requires_coherency()
740 || mm_total_size(mm) >= parisc_cache_flush_threshold) {
741 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
749 for_each_vma(vmi, vma)
750 flush_cache_pages(vma, vma->vm_start, vma->vm_end);
753 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
755 if (!parisc_requires_coherency()
756 || end - start >= parisc_cache_flush_threshold) {
757 if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
759 flush_tlb_range(vma, start, end);
764 flush_cache_pages(vma, start, end);
767 void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
769 if (WARN_ON(!pfn_valid(pfn)))
771 if (parisc_requires_coherency())
772 flush_user_cache_page(vma, vmaddr);
774 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
777 void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
782 if (parisc_requires_coherency()) {
783 if (vma->vm_flags & VM_SHARED)
786 flush_user_cache_page(vma, vmaddr);
790 flush_tlb_page(vma, vmaddr);
792 flush_dcache_page_asm(page_to_phys(page), vmaddr);
796 void flush_kernel_vmap_range(void *vaddr, int size)
798 unsigned long start = (unsigned long)vaddr;
799 unsigned long end = start + size;
801 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
802 (unsigned long)size >= parisc_cache_flush_threshold) {
803 flush_tlb_kernel_range(start, end);
808 flush_kernel_dcache_range_asm(start, end);
809 flush_tlb_kernel_range(start, end);
811 EXPORT_SYMBOL(flush_kernel_vmap_range);
813 void invalidate_kernel_vmap_range(void *vaddr, int size)
815 unsigned long start = (unsigned long)vaddr;
816 unsigned long end = start + size;
818 /* Ensure DMA is complete */
821 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
822 (unsigned long)size >= parisc_cache_flush_threshold) {
823 flush_tlb_kernel_range(start, end);
828 purge_kernel_dcache_range_asm(start, end);
829 flush_tlb_kernel_range(start, end);
831 EXPORT_SYMBOL(invalidate_kernel_vmap_range);
834 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
837 unsigned long start, end;
838 ASM_EXCEPTIONTABLE_VAR(error);
842 if (!access_ok((void __user *) addr, bytes))
847 if (cache & DCACHE) {
849 __asm__ __volatile__ (
851 "1: cmpb,*<<,n %0,%2,1b\n"
853 "1: cmpb,<<,n %0,%2,1b\n"
857 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
858 : "+r" (start), "+r" (error)
859 : "r" (end), "r" (dcache_stride), "i" (SR_USER));
862 if (cache & ICACHE && error == 0) {
864 __asm__ __volatile__ (
866 "1: cmpb,*<<,n %0,%2,1b\n"
868 "1: cmpb,<<,n %0,%2,1b\n"
872 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
873 : "+r" (start), "+r" (error)
874 : "r" (end), "r" (icache_stride), "i" (SR_USER));