2 * PARISC TLB and cache flushing support
3 * Copyright (C) 2000-2001 Hewlett-Packard (John Marvin)
4 * Copyright (C) 2001 Matthew Wilcox (willy at parisc-linux.org)
5 * Copyright (C) 2002 Richard Hirst (rhirst with parisc-linux.org)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 * NOTE: fdc,fic, and pdc instructions that use base register modification
24 * should only use index and base registers that are not shadowed,
25 * so that the fast path emulation in the non access miss handler
36 #include <asm/assembly.h>
37 #include <asm/pgtable.h>
38 #include <asm/cache.h>
40 #include <linux/linkage.h>
41 #include <linux/init.h>
46 ENTRY_CFI(flush_tlb_all_local)
48 * The pitlbe and pdtlbe instructions should only be used to
49 * flush the entire tlb. Also, there needs to be no intervening
50 * tlb operations, e.g. tlb misses, so the operation needs
51 * to happen in real mode with all interruptions disabled.
54 /* pcxt_ssm_bug - relied upon translation! PA 2.0 Arch. F-4 and F-5 */
55 rsm PSW_SM_I, %r19 /* save I-bit state */
63 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
64 mtctl %r0, %cr17 /* Clear IIASQ tail */
65 mtctl %r0, %cr17 /* Clear IIASQ head */
66 mtctl %r1, %cr18 /* IIAOQ head */
68 mtctl %r1, %cr18 /* IIAOQ tail */
69 load32 REAL_MODE_PSW, %r1
74 1: load32 PA(cache_info), %r1
76 /* Flush Instruction Tlb */
78 LDREG ITLB_SID_BASE(%r1), %r20
79 LDREG ITLB_SID_STRIDE(%r1), %r21
80 LDREG ITLB_SID_COUNT(%r1), %r22
81 LDREG ITLB_OFF_BASE(%r1), %arg0
82 LDREG ITLB_OFF_STRIDE(%r1), %arg1
83 LDREG ITLB_OFF_COUNT(%r1), %arg2
84 LDREG ITLB_LOOP(%r1), %arg3
86 addib,COND(=) -1, %arg3, fitoneloop /* Preadjust and test */
87 movb,<,n %arg3, %r31, fitdone /* If loop < 0, skip */
88 copy %arg0, %r28 /* Init base addr */
90 fitmanyloop: /* Loop if LOOP >= 2 */
92 add %r21, %r20, %r20 /* increment space */
93 copy %arg2, %r29 /* Init middle loop count */
95 fitmanymiddle: /* Loop if LOOP >= 2 */
96 addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */
97 pitlbe %r0(%sr1, %r28)
98 pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */
99 addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */
100 copy %arg3, %r31 /* Re-init inner loop count */
102 movb,tr %arg0, %r28, fitmanyloop /* Re-init base addr */
103 addib,COND(<=),n -1, %r22, fitdone /* Outer loop count decr */
105 fitoneloop: /* Loop if LOOP = 1 */
107 copy %arg0, %r28 /* init base addr */
108 copy %arg2, %r29 /* init middle loop count */
110 fitonemiddle: /* Loop if LOOP = 1 */
111 addib,COND(>) -1, %r29, fitonemiddle /* Middle loop count decr */
112 pitlbe,m %arg1(%sr1, %r28) /* pitlbe for one loop */
114 addib,COND(>) -1, %r22, fitoneloop /* Outer loop count decr */
115 add %r21, %r20, %r20 /* increment space */
121 LDREG DTLB_SID_BASE(%r1), %r20
122 LDREG DTLB_SID_STRIDE(%r1), %r21
123 LDREG DTLB_SID_COUNT(%r1), %r22
124 LDREG DTLB_OFF_BASE(%r1), %arg0
125 LDREG DTLB_OFF_STRIDE(%r1), %arg1
126 LDREG DTLB_OFF_COUNT(%r1), %arg2
127 LDREG DTLB_LOOP(%r1), %arg3
129 addib,COND(=) -1, %arg3, fdtoneloop /* Preadjust and test */
130 movb,<,n %arg3, %r31, fdtdone /* If loop < 0, skip */
131 copy %arg0, %r28 /* Init base addr */
133 fdtmanyloop: /* Loop if LOOP >= 2 */
135 add %r21, %r20, %r20 /* increment space */
136 copy %arg2, %r29 /* Init middle loop count */
138 fdtmanymiddle: /* Loop if LOOP >= 2 */
139 addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */
140 pdtlbe %r0(%sr1, %r28)
141 pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */
142 addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */
143 copy %arg3, %r31 /* Re-init inner loop count */
145 movb,tr %arg0, %r28, fdtmanyloop /* Re-init base addr */
146 addib,COND(<=),n -1, %r22,fdtdone /* Outer loop count decr */
148 fdtoneloop: /* Loop if LOOP = 1 */
150 copy %arg0, %r28 /* init base addr */
151 copy %arg2, %r29 /* init middle loop count */
153 fdtonemiddle: /* Loop if LOOP = 1 */
154 addib,COND(>) -1, %r29, fdtonemiddle /* Middle loop count decr */
155 pdtlbe,m %arg1(%sr1, %r28) /* pdtlbe for one loop */
157 addib,COND(>) -1, %r22, fdtoneloop /* Outer loop count decr */
158 add %r21, %r20, %r20 /* increment space */
163 * Switch back to virtual mode
174 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
175 mtctl %r0, %cr17 /* Clear IIASQ tail */
176 mtctl %r0, %cr17 /* Clear IIASQ head */
177 mtctl %r1, %cr18 /* IIAOQ head */
179 mtctl %r1, %cr18 /* IIAOQ tail */
180 load32 KERNEL_PSW, %r1
181 or %r1, %r19, %r1 /* I-bit to state on entry */
182 mtctl %r1, %ipsw /* restore I-bit (entire PSW) */
188 ENDPROC_CFI(flush_tlb_all_local)
190 .import cache_info,data
192 ENTRY_CFI(flush_instruction_cache_local)
193 load32 cache_info, %r1
195 /* Flush Instruction Cache */
197 LDREG ICACHE_BASE(%r1), %arg0
198 LDREG ICACHE_STRIDE(%r1), %arg1
199 LDREG ICACHE_COUNT(%r1), %arg2
200 LDREG ICACHE_LOOP(%r1), %arg3
201 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
203 addib,COND(=) -1, %arg3, fioneloop /* Preadjust and test */
204 movb,<,n %arg3, %r31, fisync /* If loop < 0, do sync */
206 fimanyloop: /* Loop if LOOP >= 2 */
207 addib,COND(>) -1, %r31, fimanyloop /* Adjusted inner loop decr */
208 fice %r0(%sr1, %arg0)
209 fice,m %arg1(%sr1, %arg0) /* Last fice and addr adjust */
210 movb,tr %arg3, %r31, fimanyloop /* Re-init inner loop count */
211 addib,COND(<=),n -1, %arg2, fisync /* Outer loop decr */
213 fioneloop: /* Loop if LOOP = 1 */
214 /* Some implementations may flush with a single fice instruction */
215 cmpib,COND(>>=),n 15, %arg2, fioneloop2
218 fice,m %arg1(%sr1, %arg0)
219 fice,m %arg1(%sr1, %arg0)
220 fice,m %arg1(%sr1, %arg0)
221 fice,m %arg1(%sr1, %arg0)
222 fice,m %arg1(%sr1, %arg0)
223 fice,m %arg1(%sr1, %arg0)
224 fice,m %arg1(%sr1, %arg0)
225 fice,m %arg1(%sr1, %arg0)
226 fice,m %arg1(%sr1, %arg0)
227 fice,m %arg1(%sr1, %arg0)
228 fice,m %arg1(%sr1, %arg0)
229 fice,m %arg1(%sr1, %arg0)
230 fice,m %arg1(%sr1, %arg0)
231 fice,m %arg1(%sr1, %arg0)
232 fice,m %arg1(%sr1, %arg0)
233 addib,COND(>) -16, %arg2, fioneloop1
234 fice,m %arg1(%sr1, %arg0)
237 cmpb,COND(=),n %arg2, %r0, fisync /* Predict branch taken */
240 addib,COND(>) -1, %arg2, fioneloop2 /* Outer loop count decr */
241 fice,m %arg1(%sr1, %arg0) /* Fice for one loop */
245 mtsm %r22 /* restore I-bit */
248 ENDPROC_CFI(flush_instruction_cache_local)
251 .import cache_info, data
252 ENTRY_CFI(flush_data_cache_local)
253 load32 cache_info, %r1
255 /* Flush Data Cache */
257 LDREG DCACHE_BASE(%r1), %arg0
258 LDREG DCACHE_STRIDE(%r1), %arg1
259 LDREG DCACHE_COUNT(%r1), %arg2
260 LDREG DCACHE_LOOP(%r1), %arg3
261 rsm PSW_SM_I, %r22 /* No mmgt ops during loop*/
263 addib,COND(=) -1, %arg3, fdoneloop /* Preadjust and test */
264 movb,<,n %arg3, %r31, fdsync /* If loop < 0, do sync */
266 fdmanyloop: /* Loop if LOOP >= 2 */
267 addib,COND(>) -1, %r31, fdmanyloop /* Adjusted inner loop decr */
268 fdce %r0(%sr1, %arg0)
269 fdce,m %arg1(%sr1, %arg0) /* Last fdce and addr adjust */
270 movb,tr %arg3, %r31, fdmanyloop /* Re-init inner loop count */
271 addib,COND(<=),n -1, %arg2, fdsync /* Outer loop decr */
273 fdoneloop: /* Loop if LOOP = 1 */
274 /* Some implementations may flush with a single fdce instruction */
275 cmpib,COND(>>=),n 15, %arg2, fdoneloop2
278 fdce,m %arg1(%sr1, %arg0)
279 fdce,m %arg1(%sr1, %arg0)
280 fdce,m %arg1(%sr1, %arg0)
281 fdce,m %arg1(%sr1, %arg0)
282 fdce,m %arg1(%sr1, %arg0)
283 fdce,m %arg1(%sr1, %arg0)
284 fdce,m %arg1(%sr1, %arg0)
285 fdce,m %arg1(%sr1, %arg0)
286 fdce,m %arg1(%sr1, %arg0)
287 fdce,m %arg1(%sr1, %arg0)
288 fdce,m %arg1(%sr1, %arg0)
289 fdce,m %arg1(%sr1, %arg0)
290 fdce,m %arg1(%sr1, %arg0)
291 fdce,m %arg1(%sr1, %arg0)
292 fdce,m %arg1(%sr1, %arg0)
293 addib,COND(>) -16, %arg2, fdoneloop1
294 fdce,m %arg1(%sr1, %arg0)
297 cmpb,COND(=),n %arg2, %r0, fdsync /* Predict branch taken */
300 addib,COND(>) -1, %arg2, fdoneloop2 /* Outer loop count decr */
301 fdce,m %arg1(%sr1, %arg0) /* Fdce for one loop */
306 mtsm %r22 /* restore I-bit */
309 ENDPROC_CFI(flush_data_cache_local)
311 /* Macros to serialize TLB purge operations on SMP. */
313 .macro tlb_lock la,flags,tmp
315 #if __PA_LDCW_ALIGNMENT > 4
316 load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
317 depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
319 load32 pa_tlb_lock, \la
332 .macro tlb_unlock la,flags,tmp
341 /* Clear page using kernel mapping. */
343 ENTRY_CFI(clear_page_asm)
346 /* Unroll the loop. */
347 ldi (PAGE_SIZE / 128), %r1
367 /* Note reverse branch hint for addib is taken. */
368 addib,COND(>),n -1, %r1, 1b
374 * Note that until (if) we start saving the full 64-bit register
375 * values on interrupt, we can't use std on a 32 bit kernel.
377 ldi (PAGE_SIZE / 64), %r1
397 addib,COND(>),n -1, %r1, 1b
402 ENDPROC_CFI(clear_page_asm)
404 /* Copy page using kernel mapping. */
406 ENTRY_CFI(copy_page_asm)
408 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
409 * Unroll the loop by hand and arrange insn appropriately.
410 * Prefetch doesn't improve performance on rp3440.
411 * GCC probably can do this just as well...
414 ldi (PAGE_SIZE / 128), %r1
458 /* Note reverse branch hint for addib is taken. */
459 addib,COND(>),n -1, %r1, 1b
465 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
466 * bundles (very restricted rules for bundling).
467 * Note that until (if) we start saving
468 * the full 64 bit register values on interrupt, we can't
469 * use ldd/std on a 32 bit kernel.
472 ldi (PAGE_SIZE / 64), %r1
508 addib,COND(>),n -1, %r1, 1b
513 ENDPROC_CFI(copy_page_asm)
516 * NOTE: Code in clear_user_page has a hard coded dependency on the
517 * maximum alias boundary being 4 Mb. We've been assured by the
518 * parisc chip designers that there will not ever be a parisc
519 * chip with a larger alias boundary (Never say never :-) ).
521 * Subtle: the dtlb miss handlers support the temp alias region by
522 * "knowing" that if a dtlb miss happens within the temp alias
523 * region it must have occurred while in clear_user_page. Since
524 * this routine makes use of processor local translations, we
525 * don't want to insert them into the kernel page table. Instead,
526 * we load up some general registers (they need to be registers
527 * which aren't shadowed) with the physical page numbers (preshifted
528 * for tlb insertion) needed to insert the translations. When we
529 * miss on the translation, the dtlb miss handler inserts the
530 * translation into the tlb using these values:
532 * %r26 physical page (shifted for tlb insert) of "to" translation
533 * %r23 physical page (shifted for tlb insert) of "from" translation
536 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
537 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
538 .macro convert_phys_for_tlb_insert20 phys
539 extrd,u \phys, 56-PAGE_ADD_SHIFT, 32-PAGE_ADD_SHIFT, \phys
540 #if _PAGE_SIZE_ENCODING_DEFAULT
541 depdi _PAGE_SIZE_ENCODING_DEFAULT, 63, (63-58), \phys
546 * copy_user_page_asm() performs a page copy using mappings
547 * equivalent to the user page mappings. It can be used to
548 * implement copy_user_page() but unfortunately both the `from'
549 * and `to' pages need to be flushed through mappings equivalent
550 * to the user mappings after the copy because the kernel accesses
551 * the `from' page through the kmap kernel mapping and the `to'
552 * page needs to be flushed since code can be copied. As a
553 * result, this implementation is less efficient than the simpler
554 * copy using the kernel mapping. It only needs the `from' page
555 * to flushed via the user mapping. The kunmap routines handle
556 * the flushes needed for the kernel mapping.
558 * I'm still keeping this around because it may be possible to
559 * use it if more information is passed into copy_user_page().
560 * Have to do some measurements to see if it is worthwhile to
561 * lobby for such a change.
565 ENTRY_CFI(copy_user_page_asm)
566 /* Convert virtual `to' and `from' addresses to physical addresses.
567 Move `from' physical address to non shadowed register. */
568 ldil L%(__PAGE_OFFSET), %r1
572 ldil L%(TMPALIAS_MAP_START), %r28
574 #if (TMPALIAS_MAP_START >= 0x80000000)
575 depdi 0, 31,32, %r28 /* clear any sign extension */
577 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
578 convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
579 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
580 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
582 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
584 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
585 extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
586 depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
587 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
589 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
592 /* Purge any old translations */
598 tlb_lock %r20,%r21,%r22
601 tlb_unlock %r20,%r21,%r22
605 /* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
606 * Unroll the loop by hand and arrange insn appropriately.
607 * GCC probably can do this just as well.
611 ldi (PAGE_SIZE / 128), %r1
655 /* conditional branches nullify on forward taken branch, and on
656 * non-taken backward branch. Note that .+4 is a backwards branch.
657 * The ldd should only get executed if the branch is taken.
659 addib,COND(>),n -1, %r1, 1b /* bundle 10 */
660 ldd 0(%r29), %r19 /* start next loads */
663 ldi (PAGE_SIZE / 64), %r1
666 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw
667 * bundles (very restricted rules for bundling). It probably
668 * does OK on PCXU and better, but we could do better with
669 * ldd/std instructions. Note that until (if) we start saving
670 * the full 64 bit register values on interrupt, we can't
671 * use ldd/std on a 32 bit kernel.
708 addib,COND(>) -1, %r1,1b
714 ENDPROC_CFI(copy_user_page_asm)
716 ENTRY_CFI(clear_user_page_asm)
719 ldil L%(TMPALIAS_MAP_START), %r28
721 #if (TMPALIAS_MAP_START >= 0x80000000)
722 depdi 0, 31,32, %r28 /* clear any sign extension */
724 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
725 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
726 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
728 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
729 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
730 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
733 /* Purge any old translation */
738 tlb_lock %r20,%r21,%r22
740 tlb_unlock %r20,%r21,%r22
744 ldi (PAGE_SIZE / 128), %r1
746 /* PREFETCH (Write) has not (yet) been proven to help here */
747 /* #define PREFETCHW_OP ldd 256(%0), %r0 */
765 addib,COND(>) -1, %r1, 1b
768 #else /* ! CONFIG_64BIT */
769 ldi (PAGE_SIZE / 64), %r1
787 addib,COND(>) -1, %r1, 1b
789 #endif /* CONFIG_64BIT */
793 ENDPROC_CFI(clear_user_page_asm)
795 ENTRY_CFI(flush_dcache_page_asm)
796 ldil L%(TMPALIAS_MAP_START), %r28
798 #if (TMPALIAS_MAP_START >= 0x80000000)
799 depdi 0, 31,32, %r28 /* clear any sign extension */
801 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
802 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
803 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
805 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
806 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
807 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
810 /* Purge any old translation */
815 tlb_lock %r20,%r21,%r22
817 tlb_unlock %r20,%r21,%r22
820 ldil L%dcache_stride, %r1
821 ldw R%dcache_stride(%r1), r31
824 depdi,z 1, 63-PAGE_SHIFT,1, %r25
826 depwi,z 1, 31-PAGE_SHIFT,1, %r25
847 cmpb,COND(<<) %r28, %r25,1b
853 ENDPROC_CFI(flush_dcache_page_asm)
855 ENTRY_CFI(flush_icache_page_asm)
856 ldil L%(TMPALIAS_MAP_START), %r28
858 #if (TMPALIAS_MAP_START >= 0x80000000)
859 depdi 0, 31,32, %r28 /* clear any sign extension */
861 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
862 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
863 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
865 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
866 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
867 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
870 /* Purge any old translation. Note that the FIC instruction
871 * may use either the instruction or data TLB. Given that we
872 * have a flat address space, it's not clear which TLB will be
873 * used. So, we purge both entries. */
877 pitlb,l %r0(%sr4,%r28)
879 tlb_lock %r20,%r21,%r22
882 tlb_unlock %r20,%r21,%r22
885 ldil L%icache_stride, %r1
886 ldw R%icache_stride(%r1), %r31
889 depdi,z 1, 63-PAGE_SHIFT,1, %r25
891 depwi,z 1, 31-PAGE_SHIFT,1, %r25
897 /* fic only has the type 26 form on PA1.1, requiring an
898 * explicit space specification, so use %sr4 */
899 1: fic,m %r31(%sr4,%r28)
900 fic,m %r31(%sr4,%r28)
901 fic,m %r31(%sr4,%r28)
902 fic,m %r31(%sr4,%r28)
903 fic,m %r31(%sr4,%r28)
904 fic,m %r31(%sr4,%r28)
905 fic,m %r31(%sr4,%r28)
906 fic,m %r31(%sr4,%r28)
907 fic,m %r31(%sr4,%r28)
908 fic,m %r31(%sr4,%r28)
909 fic,m %r31(%sr4,%r28)
910 fic,m %r31(%sr4,%r28)
911 fic,m %r31(%sr4,%r28)
912 fic,m %r31(%sr4,%r28)
913 fic,m %r31(%sr4,%r28)
914 cmpb,COND(<<) %r28, %r25,1b
915 fic,m %r31(%sr4,%r28)
920 ENDPROC_CFI(flush_icache_page_asm)
922 ENTRY_CFI(flush_kernel_dcache_page_asm)
923 ldil L%dcache_stride, %r1
924 ldw R%dcache_stride(%r1), %r23
927 depdi,z 1, 63-PAGE_SHIFT,1, %r25
929 depwi,z 1, 31-PAGE_SHIFT,1, %r25
950 cmpb,COND(<<) %r26, %r25,1b
956 ENDPROC_CFI(flush_kernel_dcache_page_asm)
958 ENTRY_CFI(purge_kernel_dcache_page_asm)
959 ldil L%dcache_stride, %r1
960 ldw R%dcache_stride(%r1), %r23
963 depdi,z 1, 63-PAGE_SHIFT,1, %r25
965 depwi,z 1, 31-PAGE_SHIFT,1, %r25
985 cmpb,COND(<<) %r26, %r25, 1b
991 ENDPROC_CFI(purge_kernel_dcache_page_asm)
993 ENTRY_CFI(flush_user_dcache_range_asm)
994 ldil L%dcache_stride, %r1
995 ldw R%dcache_stride(%r1), %r23
997 ANDCM %r26, %r21, %r26
999 1: cmpb,COND(<<),n %r26, %r25, 1b
1000 fdc,m %r23(%sr3, %r26)
1005 ENDPROC_CFI(flush_user_dcache_range_asm)
1007 ENTRY_CFI(flush_kernel_dcache_range_asm)
1008 ldil L%dcache_stride, %r1
1009 ldw R%dcache_stride(%r1), %r23
1011 ANDCM %r26, %r21, %r26
1013 1: cmpb,COND(<<),n %r26, %r25,1b
1020 ENDPROC_CFI(flush_kernel_dcache_range_asm)
1022 ENTRY_CFI(purge_kernel_dcache_range_asm)
1023 ldil L%dcache_stride, %r1
1024 ldw R%dcache_stride(%r1), %r23
1026 ANDCM %r26, %r21, %r26
1028 1: cmpb,COND(<<),n %r26, %r25,1b
1035 ENDPROC_CFI(purge_kernel_dcache_range_asm)
1037 ENTRY_CFI(flush_user_icache_range_asm)
1038 ldil L%icache_stride, %r1
1039 ldw R%icache_stride(%r1), %r23
1041 ANDCM %r26, %r21, %r26
1043 1: cmpb,COND(<<),n %r26, %r25,1b
1044 fic,m %r23(%sr3, %r26)
1049 ENDPROC_CFI(flush_user_icache_range_asm)
1051 ENTRY_CFI(flush_kernel_icache_page)
1052 ldil L%icache_stride, %r1
1053 ldw R%icache_stride(%r1), %r23
1056 depdi,z 1, 63-PAGE_SHIFT,1, %r25
1058 depwi,z 1, 31-PAGE_SHIFT,1, %r25
1060 add %r26, %r25, %r25
1061 sub %r25, %r23, %r25
1064 1: fic,m %r23(%sr4, %r26)
1065 fic,m %r23(%sr4, %r26)
1066 fic,m %r23(%sr4, %r26)
1067 fic,m %r23(%sr4, %r26)
1068 fic,m %r23(%sr4, %r26)
1069 fic,m %r23(%sr4, %r26)
1070 fic,m %r23(%sr4, %r26)
1071 fic,m %r23(%sr4, %r26)
1072 fic,m %r23(%sr4, %r26)
1073 fic,m %r23(%sr4, %r26)
1074 fic,m %r23(%sr4, %r26)
1075 fic,m %r23(%sr4, %r26)
1076 fic,m %r23(%sr4, %r26)
1077 fic,m %r23(%sr4, %r26)
1078 fic,m %r23(%sr4, %r26)
1079 cmpb,COND(<<) %r26, %r25, 1b
1080 fic,m %r23(%sr4, %r26)
1085 ENDPROC_CFI(flush_kernel_icache_page)
1087 ENTRY_CFI(flush_kernel_icache_range_asm)
1088 ldil L%icache_stride, %r1
1089 ldw R%icache_stride(%r1), %r23
1091 ANDCM %r26, %r21, %r26
1093 1: cmpb,COND(<<),n %r26, %r25, 1b
1094 fic,m %r23(%sr4, %r26)
1099 ENDPROC_CFI(flush_kernel_icache_range_asm)
1103 /* align should cover use of rfi in disable_sr_hashing_asm and
1107 ENTRY_CFI(disable_sr_hashing_asm)
1109 * Switch to real mode
1120 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1121 mtctl %r0, %cr17 /* Clear IIASQ tail */
1122 mtctl %r0, %cr17 /* Clear IIASQ head */
1123 mtctl %r1, %cr18 /* IIAOQ head */
1125 mtctl %r1, %cr18 /* IIAOQ tail */
1126 load32 REAL_MODE_PSW, %r1
1131 1: cmpib,=,n SRHASH_PCXST, %r26,srdis_pcxs
1132 cmpib,=,n SRHASH_PCXL, %r26,srdis_pcxl
1133 cmpib,=,n SRHASH_PA20, %r26,srdis_pa20
1138 /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */
1140 .word 0x141c1a00 /* mfdiag %dr0, %r28 */
1141 .word 0x141c1a00 /* must issue twice */
1142 depwi 0,18,1, %r28 /* Clear DHE (dcache hash enable) */
1143 depwi 0,20,1, %r28 /* Clear IHE (icache hash enable) */
1144 .word 0x141c1600 /* mtdiag %r28, %dr0 */
1145 .word 0x141c1600 /* must issue twice */
1150 /* Disable Space Register Hashing for PCXL */
1152 .word 0x141c0600 /* mfdiag %dr0, %r28 */
1153 depwi 0,28,2, %r28 /* Clear DHASH_EN & IHASH_EN */
1154 .word 0x141c0240 /* mtdiag %r28, %dr0 */
1159 /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+,PCXW2 */
1161 .word 0x144008bc /* mfdiag %dr2, %r28 */
1162 depdi 0, 54,1, %r28 /* clear DIAG_SPHASH_ENAB (bit 54) */
1163 .word 0x145c1840 /* mtdiag %r28, %dr2 */
1167 /* Switch back to virtual mode */
1168 rsm PSW_SM_I, %r0 /* prep to load iia queue */
1176 rsm PSW_SM_Q, %r0 /* prep to load iia queue */
1177 mtctl %r0, %cr17 /* Clear IIASQ tail */
1178 mtctl %r0, %cr17 /* Clear IIASQ head */
1179 mtctl %r1, %cr18 /* IIAOQ head */
1181 mtctl %r1, %cr18 /* IIAOQ tail */
1182 load32 KERNEL_PSW, %r1
1189 ENDPROC_CFI(disable_sr_hashing_asm)