2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Inline assembly cache operations.
8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
15 #include <linux/stringify.h>
18 #include <asm/cacheops.h>
19 #include <asm/compiler.h>
20 #include <asm/cpu-features.h>
21 #include <asm/cpu-type.h>
22 #include <asm/mipsmtregs.h>
23 #include <asm/mmzone.h>
24 #include <linux/uaccess.h> /* for uaccess_kernel() */
26 extern void (*r4k_blast_dcache)(void);
27 extern void (*r4k_blast_icache)(void);
30 * This macro return a properly sign-extended address suitable as base address
31 * for indexed cache operations. Two issues here:
33 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
34 * the index bits from the virtual address. This breaks with tradition
35 * set by the R4000. To keep unpleasant surprises from happening we pick
36 * an address in KSEG0 / CKSEG0.
37 * - We need a properly sign extended address for 64-bit code. To get away
38 * without ifdefs we let the compiler do it by a type cast.
40 #define INDEX_BASE CKSEG0
42 #define cache_op(op,addr) \
43 __asm__ __volatile__( \
45 " .set noreorder \n" \
46 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
50 : "i" (op), "R" (*(unsigned char *)(addr)))
54 #define __iflush_prologue \
55 unsigned long redundance; \
56 extern int mt_n_iflushes; \
57 for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
59 #define __iflush_epilogue \
62 #define __dflush_prologue \
63 unsigned long redundance; \
64 extern int mt_n_dflushes; \
65 for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
67 #define __dflush_epilogue \
70 #define __inv_dflush_prologue __dflush_prologue
71 #define __inv_dflush_epilogue __dflush_epilogue
72 #define __sflush_prologue {
73 #define __sflush_epilogue }
74 #define __inv_sflush_prologue __sflush_prologue
75 #define __inv_sflush_epilogue __sflush_epilogue
77 #else /* CONFIG_MIPS_MT */
79 #define __iflush_prologue {
80 #define __iflush_epilogue }
81 #define __dflush_prologue {
82 #define __dflush_epilogue }
83 #define __inv_dflush_prologue {
84 #define __inv_dflush_epilogue }
85 #define __sflush_prologue {
86 #define __sflush_epilogue }
87 #define __inv_sflush_prologue {
88 #define __inv_sflush_epilogue }
90 #endif /* CONFIG_MIPS_MT */
92 static inline void flush_icache_line_indexed(unsigned long addr)
95 cache_op(Index_Invalidate_I, addr);
99 static inline void flush_dcache_line_indexed(unsigned long addr)
102 cache_op(Index_Writeback_Inv_D, addr);
106 static inline void flush_scache_line_indexed(unsigned long addr)
108 cache_op(Index_Writeback_Inv_SD, addr);
111 static inline void flush_icache_line(unsigned long addr)
114 switch (boot_cpu_type()) {
116 cache_op(Hit_Invalidate_I_Loongson2, addr);
120 cache_op(Hit_Invalidate_I, addr);
126 static inline void flush_dcache_line(unsigned long addr)
129 cache_op(Hit_Writeback_Inv_D, addr);
133 static inline void invalidate_dcache_line(unsigned long addr)
136 cache_op(Hit_Invalidate_D, addr);
140 static inline void invalidate_scache_line(unsigned long addr)
142 cache_op(Hit_Invalidate_SD, addr);
145 static inline void flush_scache_line(unsigned long addr)
147 cache_op(Hit_Writeback_Inv_SD, addr);
150 #define protected_cache_op(op,addr) \
153 __asm__ __volatile__( \
155 " .set noreorder \n" \
156 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
157 "1: cache %1, (%2) \n" \
160 " .section .fixup,\"ax\" \n" \
164 " .section __ex_table,\"a\" \n" \
165 " "STR(PTR)" 1b, 3b \n" \
168 : "i" (op), "r" (addr), "i" (-EFAULT)); \
173 #define protected_cachee_op(op,addr) \
176 __asm__ __volatile__( \
178 " .set noreorder \n" \
181 "1: cachee %1, (%2) \n" \
184 " .section .fixup,\"ax\" \n" \
188 " .section __ex_table,\"a\" \n" \
189 " "STR(PTR)" 1b, 3b \n" \
192 : "i" (op), "r" (addr), "i" (-EFAULT)); \
197 * The next two are for badland addresses like signal trampolines.
199 static inline int protected_flush_icache_line(unsigned long addr)
201 switch (boot_cpu_type()) {
203 return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
207 return protected_cachee_op(Hit_Invalidate_I, addr);
209 return protected_cache_op(Hit_Invalidate_I, addr);
215 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
216 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
217 * caches. We're talking about one cacheline unnecessarily getting invalidated
218 * here so the penalty isn't overly hard.
220 static inline int protected_writeback_dcache_line(unsigned long addr)
223 return protected_cachee_op(Hit_Writeback_Inv_D, addr);
225 return protected_cache_op(Hit_Writeback_Inv_D, addr);
229 static inline int protected_writeback_scache_line(unsigned long addr)
232 return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
234 return protected_cache_op(Hit_Writeback_Inv_SD, addr);
239 * This one is RM7000-specific
241 static inline void invalidate_tcache_page(unsigned long addr)
243 cache_op(Page_Invalidate_T, addr);
246 #ifndef CONFIG_CPU_MIPSR6
247 #define cache16_unroll32(base,op) \
248 __asm__ __volatile__( \
250 " .set noreorder \n" \
252 " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
253 " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
254 " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
255 " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
256 " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
257 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
258 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
259 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
260 " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
261 " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
262 " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
263 " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
264 " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
265 " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
266 " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
267 " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
273 #define cache32_unroll32(base,op) \
274 __asm__ __volatile__( \
276 " .set noreorder \n" \
278 " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
279 " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
280 " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
281 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
282 " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
283 " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
284 " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
285 " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
286 " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
287 " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
288 " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
289 " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
290 " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
291 " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
292 " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
293 " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
299 #define cache64_unroll32(base,op) \
300 __asm__ __volatile__( \
302 " .set noreorder \n" \
304 " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
305 " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
306 " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
307 " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
308 " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
309 " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
310 " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
311 " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
312 " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
313 " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
314 " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
315 " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
316 " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
317 " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
318 " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
319 " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
325 #define cache128_unroll32(base,op) \
326 __asm__ __volatile__( \
328 " .set noreorder \n" \
330 " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
331 " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
332 " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
333 " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
334 " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
335 " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
336 " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
337 " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
338 " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
339 " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
340 " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
341 " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
342 " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
343 " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
344 " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
345 " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
353 * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
354 * This means we now need to increment the base register before we flush
357 #define cache16_unroll32(base,op) \
358 __asm__ __volatile__( \
360 " .set noreorder\n" \
363 " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \
364 " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \
365 " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \
366 " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \
367 " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \
368 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
369 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
370 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
371 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
372 " cache %1, 0x000($1); cache %1, 0x010($1)\n" \
373 " cache %1, 0x020($1); cache %1, 0x030($1)\n" \
374 " cache %1, 0x040($1); cache %1, 0x050($1)\n" \
375 " cache %1, 0x060($1); cache %1, 0x070($1)\n" \
376 " cache %1, 0x080($1); cache %1, 0x090($1)\n" \
377 " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \
378 " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \
379 " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \
385 #define cache32_unroll32(base,op) \
386 __asm__ __volatile__( \
388 " .set noreorder\n" \
391 " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \
392 " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
393 " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
394 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
395 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
396 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
397 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
398 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
399 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
400 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
401 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
402 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
403 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
404 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
405 " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \
406 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
407 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
408 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
409 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
415 #define cache64_unroll32(base,op) \
416 __asm__ __volatile__( \
418 " .set noreorder\n" \
421 " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
422 " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
423 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
424 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
425 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
426 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
427 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
428 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
429 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
430 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
431 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
432 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
433 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
434 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
435 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
436 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
437 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
438 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
439 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
440 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
441 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
442 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
443 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
449 #define cache128_unroll32(base,op) \
450 __asm__ __volatile__( \
452 " .set noreorder\n" \
455 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
456 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
457 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
458 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
459 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
460 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
461 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
462 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
463 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
464 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
465 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
466 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
467 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
468 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
469 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
470 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
471 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
472 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
473 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
474 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
475 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
476 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
477 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
478 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
479 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
480 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
481 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
482 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
483 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
484 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
485 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
486 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
487 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
492 #endif /* CONFIG_CPU_MIPSR6 */
495 * Perform the cache operation specified by op using a user mode virtual
496 * address while in kernel mode.
498 #define cache16_unroll32_user(base,op) \
499 __asm__ __volatile__( \
501 " .set noreorder \n" \
504 " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
505 " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
506 " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
507 " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
508 " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
509 " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
510 " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
511 " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
512 " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
513 " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
514 " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
515 " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
516 " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
517 " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
518 " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
519 " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
525 #define cache32_unroll32_user(base, op) \
526 __asm__ __volatile__( \
528 " .set noreorder \n" \
531 " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
532 " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
533 " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
534 " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
535 " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
536 " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
537 " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
538 " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
539 " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
540 " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
541 " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
542 " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
543 " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
544 " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
545 " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
546 " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
552 #define cache64_unroll32_user(base, op) \
553 __asm__ __volatile__( \
555 " .set noreorder \n" \
558 " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
559 " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
560 " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
561 " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
562 " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
563 " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
564 " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
565 " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
566 " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
567 " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
568 " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
569 " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
570 " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
571 " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
572 " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
573 " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
579 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
580 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
581 static inline void extra##blast_##pfx##cache##lsize(void) \
583 unsigned long start = INDEX_BASE; \
584 unsigned long end = start + current_cpu_data.desc.waysize; \
585 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
586 unsigned long ws_end = current_cpu_data.desc.ways << \
587 current_cpu_data.desc.waybit; \
588 unsigned long ws, addr; \
590 __##pfx##flush_prologue \
592 for (ws = 0; ws < ws_end; ws += ws_inc) \
593 for (addr = start; addr < end; addr += lsize * 32) \
594 cache##lsize##_unroll32(addr|ws, indexop); \
596 __##pfx##flush_epilogue \
599 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
601 unsigned long start = page; \
602 unsigned long end = page + PAGE_SIZE; \
604 __##pfx##flush_prologue \
607 cache##lsize##_unroll32(start, hitop); \
608 start += lsize * 32; \
609 } while (start < end); \
611 __##pfx##flush_epilogue \
614 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
616 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
617 unsigned long start = INDEX_BASE + (page & indexmask); \
618 unsigned long end = start + PAGE_SIZE; \
619 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
620 unsigned long ws_end = current_cpu_data.desc.ways << \
621 current_cpu_data.desc.waybit; \
622 unsigned long ws, addr; \
624 __##pfx##flush_prologue \
626 for (ws = 0; ws < ws_end; ws += ws_inc) \
627 for (addr = start; addr < end; addr += lsize * 32) \
628 cache##lsize##_unroll32(addr|ws, indexop); \
630 __##pfx##flush_epilogue \
633 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
634 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
635 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
636 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
637 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
638 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
639 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
640 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
641 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
642 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
643 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
644 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
645 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
647 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
648 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
649 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
650 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
651 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
652 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
654 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
655 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
657 unsigned long start = page; \
658 unsigned long end = page + PAGE_SIZE; \
660 __##pfx##flush_prologue \
663 cache##lsize##_unroll32_user(start, hitop); \
664 start += lsize * 32; \
665 } while (start < end); \
667 __##pfx##flush_epilogue \
670 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
672 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
673 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
675 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
676 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
678 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
680 /* build blast_xxx_range, protected_blast_xxx_range */
681 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
682 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
685 unsigned long lsize = cpu_##desc##_line_size(); \
686 unsigned long addr = start & ~(lsize - 1); \
687 unsigned long aend = (end - 1) & ~(lsize - 1); \
689 __##pfx##flush_prologue \
692 prot##cache_op(hitop, addr); \
698 __##pfx##flush_epilogue \
703 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
704 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
708 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
709 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
712 unsigned long lsize = cpu_##desc##_line_size(); \
713 unsigned long addr = start & ~(lsize - 1); \
714 unsigned long aend = (end - 1) & ~(lsize - 1); \
716 __##pfx##flush_prologue \
718 if (!uaccess_kernel()) { \
720 protected_cachee_op(hitop, addr); \
727 protected_cache_op(hitop, addr); \
734 __##pfx##flush_epilogue \
737 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
738 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
741 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
742 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
743 protected_, loongson2_)
744 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
745 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
746 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
747 /* blast_inv_dcache_range */
748 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
749 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
751 /* Currently, this is very specific to Loongson-3 */
752 #define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
753 static inline void blast_##pfx##cache##lsize##_node(long node) \
755 unsigned long start = CAC_BASE | nid_to_addrbase(node); \
756 unsigned long end = start + current_cpu_data.desc.waysize; \
757 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
758 unsigned long ws_end = current_cpu_data.desc.ways << \
759 current_cpu_data.desc.waybit; \
760 unsigned long ws, addr; \
762 for (ws = 0; ws < ws_end; ws += ws_inc) \
763 for (addr = start; addr < end; addr += lsize * 32) \
764 cache##lsize##_unroll32(addr|ws, indexop); \
767 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
768 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
769 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
770 __BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
772 #endif /* _ASM_R4KCACHE_H */